drivers/gpu/drm/amd/amdkfd: use in_compat_syscall to check open() caller type
[deliverable/linux.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42 static int tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44
45 struct btrfs_path *btrfs_alloc_path(void)
46 {
47 struct btrfs_path *path;
48 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
49 return path;
50 }
51
52 /*
53 * set all locked nodes in the path to blocking locks. This should
54 * be done before scheduling
55 */
56 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
57 {
58 int i;
59 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
60 if (!p->nodes[i] || !p->locks[i])
61 continue;
62 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
63 if (p->locks[i] == BTRFS_READ_LOCK)
64 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
65 else if (p->locks[i] == BTRFS_WRITE_LOCK)
66 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
67 }
68 }
69
70 /*
71 * reset all the locked nodes in the patch to spinning locks.
72 *
73 * held is used to keep lockdep happy, when lockdep is enabled
74 * we set held to a blocking lock before we go around and
75 * retake all the spinlocks in the path. You can safely use NULL
76 * for held
77 */
78 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
79 struct extent_buffer *held, int held_rw)
80 {
81 int i;
82
83 if (held) {
84 btrfs_set_lock_blocking_rw(held, held_rw);
85 if (held_rw == BTRFS_WRITE_LOCK)
86 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
87 else if (held_rw == BTRFS_READ_LOCK)
88 held_rw = BTRFS_READ_LOCK_BLOCKING;
89 }
90 btrfs_set_path_blocking(p);
91
92 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
93 if (p->nodes[i] && p->locks[i]) {
94 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
95 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
96 p->locks[i] = BTRFS_WRITE_LOCK;
97 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
98 p->locks[i] = BTRFS_READ_LOCK;
99 }
100 }
101
102 if (held)
103 btrfs_clear_lock_blocking_rw(held, held_rw);
104 }
105
106 /* this also releases the path */
107 void btrfs_free_path(struct btrfs_path *p)
108 {
109 if (!p)
110 return;
111 btrfs_release_path(p);
112 kmem_cache_free(btrfs_path_cachep, p);
113 }
114
115 /*
116 * path release drops references on the extent buffers in the path
117 * and it drops any locks held by this path
118 *
119 * It is safe to call this on paths that no locks or extent buffers held.
120 */
121 noinline void btrfs_release_path(struct btrfs_path *p)
122 {
123 int i;
124
125 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
126 p->slots[i] = 0;
127 if (!p->nodes[i])
128 continue;
129 if (p->locks[i]) {
130 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
131 p->locks[i] = 0;
132 }
133 free_extent_buffer(p->nodes[i]);
134 p->nodes[i] = NULL;
135 }
136 }
137
138 /*
139 * safely gets a reference on the root node of a tree. A lock
140 * is not taken, so a concurrent writer may put a different node
141 * at the root of the tree. See btrfs_lock_root_node for the
142 * looping required.
143 *
144 * The extent buffer returned by this has a reference taken, so
145 * it won't disappear. It may stop being the root of the tree
146 * at any time because there are no locks held.
147 */
148 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
149 {
150 struct extent_buffer *eb;
151
152 while (1) {
153 rcu_read_lock();
154 eb = rcu_dereference(root->node);
155
156 /*
157 * RCU really hurts here, we could free up the root node because
158 * it was cow'ed but we may not get the new root node yet so do
159 * the inc_not_zero dance and if it doesn't work then
160 * synchronize_rcu and try again.
161 */
162 if (atomic_inc_not_zero(&eb->refs)) {
163 rcu_read_unlock();
164 break;
165 }
166 rcu_read_unlock();
167 synchronize_rcu();
168 }
169 return eb;
170 }
171
172 /* loop around taking references on and locking the root node of the
173 * tree until you end up with a lock on the root. A locked buffer
174 * is returned, with a reference held.
175 */
176 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
177 {
178 struct extent_buffer *eb;
179
180 while (1) {
181 eb = btrfs_root_node(root);
182 btrfs_tree_lock(eb);
183 if (eb == root->node)
184 break;
185 btrfs_tree_unlock(eb);
186 free_extent_buffer(eb);
187 }
188 return eb;
189 }
190
191 /* loop around taking references on and locking the root node of the
192 * tree until you end up with a lock on the root. A locked buffer
193 * is returned, with a reference held.
194 */
195 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
196 {
197 struct extent_buffer *eb;
198
199 while (1) {
200 eb = btrfs_root_node(root);
201 btrfs_tree_read_lock(eb);
202 if (eb == root->node)
203 break;
204 btrfs_tree_read_unlock(eb);
205 free_extent_buffer(eb);
206 }
207 return eb;
208 }
209
210 /* cowonly root (everything not a reference counted cow subvolume), just get
211 * put onto a simple dirty list. transaction.c walks this to make sure they
212 * get properly updated on disk.
213 */
214 static void add_root_to_dirty_list(struct btrfs_root *root)
215 {
216 if (test_bit(BTRFS_ROOT_DIRTY, &root->state) ||
217 !test_bit(BTRFS_ROOT_TRACK_DIRTY, &root->state))
218 return;
219
220 spin_lock(&root->fs_info->trans_lock);
221 if (!test_and_set_bit(BTRFS_ROOT_DIRTY, &root->state)) {
222 /* Want the extent tree to be the last on the list */
223 if (root->objectid == BTRFS_EXTENT_TREE_OBJECTID)
224 list_move_tail(&root->dirty_list,
225 &root->fs_info->dirty_cowonly_roots);
226 else
227 list_move(&root->dirty_list,
228 &root->fs_info->dirty_cowonly_roots);
229 }
230 spin_unlock(&root->fs_info->trans_lock);
231 }
232
233 /*
234 * used by snapshot creation to make a copy of a root for a tree with
235 * a given objectid. The buffer with the new root node is returned in
236 * cow_ret, and this func returns zero on success or a negative error code.
237 */
238 int btrfs_copy_root(struct btrfs_trans_handle *trans,
239 struct btrfs_root *root,
240 struct extent_buffer *buf,
241 struct extent_buffer **cow_ret, u64 new_root_objectid)
242 {
243 struct extent_buffer *cow;
244 int ret = 0;
245 int level;
246 struct btrfs_disk_key disk_key;
247
248 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
249 trans->transid != root->fs_info->running_transaction->transid);
250 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
251 trans->transid != root->last_trans);
252
253 level = btrfs_header_level(buf);
254 if (level == 0)
255 btrfs_item_key(buf, &disk_key, 0);
256 else
257 btrfs_node_key(buf, &disk_key, 0);
258
259 cow = btrfs_alloc_tree_block(trans, root, 0, new_root_objectid,
260 &disk_key, level, buf->start, 0);
261 if (IS_ERR(cow))
262 return PTR_ERR(cow);
263
264 copy_extent_buffer(cow, buf, 0, 0, cow->len);
265 btrfs_set_header_bytenr(cow, cow->start);
266 btrfs_set_header_generation(cow, trans->transid);
267 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
268 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
269 BTRFS_HEADER_FLAG_RELOC);
270 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
271 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
272 else
273 btrfs_set_header_owner(cow, new_root_objectid);
274
275 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
276 BTRFS_FSID_SIZE);
277
278 WARN_ON(btrfs_header_generation(buf) > trans->transid);
279 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
280 ret = btrfs_inc_ref(trans, root, cow, 1);
281 else
282 ret = btrfs_inc_ref(trans, root, cow, 0);
283
284 if (ret)
285 return ret;
286
287 btrfs_mark_buffer_dirty(cow);
288 *cow_ret = cow;
289 return 0;
290 }
291
292 enum mod_log_op {
293 MOD_LOG_KEY_REPLACE,
294 MOD_LOG_KEY_ADD,
295 MOD_LOG_KEY_REMOVE,
296 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
297 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
298 MOD_LOG_MOVE_KEYS,
299 MOD_LOG_ROOT_REPLACE,
300 };
301
302 struct tree_mod_move {
303 int dst_slot;
304 int nr_items;
305 };
306
307 struct tree_mod_root {
308 u64 logical;
309 u8 level;
310 };
311
312 struct tree_mod_elem {
313 struct rb_node node;
314 u64 logical;
315 u64 seq;
316 enum mod_log_op op;
317
318 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
319 int slot;
320
321 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
322 u64 generation;
323
324 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
325 struct btrfs_disk_key key;
326 u64 blockptr;
327
328 /* this is used for op == MOD_LOG_MOVE_KEYS */
329 struct tree_mod_move move;
330
331 /* this is used for op == MOD_LOG_ROOT_REPLACE */
332 struct tree_mod_root old_root;
333 };
334
335 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
336 {
337 read_lock(&fs_info->tree_mod_log_lock);
338 }
339
340 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
341 {
342 read_unlock(&fs_info->tree_mod_log_lock);
343 }
344
345 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
346 {
347 write_lock(&fs_info->tree_mod_log_lock);
348 }
349
350 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
351 {
352 write_unlock(&fs_info->tree_mod_log_lock);
353 }
354
355 /*
356 * Pull a new tree mod seq number for our operation.
357 */
358 static inline u64 btrfs_inc_tree_mod_seq(struct btrfs_fs_info *fs_info)
359 {
360 return atomic64_inc_return(&fs_info->tree_mod_seq);
361 }
362
363 /*
364 * This adds a new blocker to the tree mod log's blocker list if the @elem
365 * passed does not already have a sequence number set. So when a caller expects
366 * to record tree modifications, it should ensure to set elem->seq to zero
367 * before calling btrfs_get_tree_mod_seq.
368 * Returns a fresh, unused tree log modification sequence number, even if no new
369 * blocker was added.
370 */
371 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
372 struct seq_list *elem)
373 {
374 tree_mod_log_write_lock(fs_info);
375 spin_lock(&fs_info->tree_mod_seq_lock);
376 if (!elem->seq) {
377 elem->seq = btrfs_inc_tree_mod_seq(fs_info);
378 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
379 }
380 spin_unlock(&fs_info->tree_mod_seq_lock);
381 tree_mod_log_write_unlock(fs_info);
382
383 return elem->seq;
384 }
385
386 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
387 struct seq_list *elem)
388 {
389 struct rb_root *tm_root;
390 struct rb_node *node;
391 struct rb_node *next;
392 struct seq_list *cur_elem;
393 struct tree_mod_elem *tm;
394 u64 min_seq = (u64)-1;
395 u64 seq_putting = elem->seq;
396
397 if (!seq_putting)
398 return;
399
400 spin_lock(&fs_info->tree_mod_seq_lock);
401 list_del(&elem->list);
402 elem->seq = 0;
403
404 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
405 if (cur_elem->seq < min_seq) {
406 if (seq_putting > cur_elem->seq) {
407 /*
408 * blocker with lower sequence number exists, we
409 * cannot remove anything from the log
410 */
411 spin_unlock(&fs_info->tree_mod_seq_lock);
412 return;
413 }
414 min_seq = cur_elem->seq;
415 }
416 }
417 spin_unlock(&fs_info->tree_mod_seq_lock);
418
419 /*
420 * anything that's lower than the lowest existing (read: blocked)
421 * sequence number can be removed from the tree.
422 */
423 tree_mod_log_write_lock(fs_info);
424 tm_root = &fs_info->tree_mod_log;
425 for (node = rb_first(tm_root); node; node = next) {
426 next = rb_next(node);
427 tm = container_of(node, struct tree_mod_elem, node);
428 if (tm->seq > min_seq)
429 continue;
430 rb_erase(node, tm_root);
431 kfree(tm);
432 }
433 tree_mod_log_write_unlock(fs_info);
434 }
435
436 /*
437 * key order of the log:
438 * node/leaf start address -> sequence
439 *
440 * The 'start address' is the logical address of the *new* root node
441 * for root replace operations, or the logical address of the affected
442 * block for all other operations.
443 *
444 * Note: must be called with write lock (tree_mod_log_write_lock).
445 */
446 static noinline int
447 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
448 {
449 struct rb_root *tm_root;
450 struct rb_node **new;
451 struct rb_node *parent = NULL;
452 struct tree_mod_elem *cur;
453
454 BUG_ON(!tm);
455
456 tm->seq = btrfs_inc_tree_mod_seq(fs_info);
457
458 tm_root = &fs_info->tree_mod_log;
459 new = &tm_root->rb_node;
460 while (*new) {
461 cur = container_of(*new, struct tree_mod_elem, node);
462 parent = *new;
463 if (cur->logical < tm->logical)
464 new = &((*new)->rb_left);
465 else if (cur->logical > tm->logical)
466 new = &((*new)->rb_right);
467 else if (cur->seq < tm->seq)
468 new = &((*new)->rb_left);
469 else if (cur->seq > tm->seq)
470 new = &((*new)->rb_right);
471 else
472 return -EEXIST;
473 }
474
475 rb_link_node(&tm->node, parent, new);
476 rb_insert_color(&tm->node, tm_root);
477 return 0;
478 }
479
480 /*
481 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
482 * returns zero with the tree_mod_log_lock acquired. The caller must hold
483 * this until all tree mod log insertions are recorded in the rb tree and then
484 * call tree_mod_log_write_unlock() to release.
485 */
486 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
487 struct extent_buffer *eb) {
488 smp_mb();
489 if (list_empty(&(fs_info)->tree_mod_seq_list))
490 return 1;
491 if (eb && btrfs_header_level(eb) == 0)
492 return 1;
493
494 tree_mod_log_write_lock(fs_info);
495 if (list_empty(&(fs_info)->tree_mod_seq_list)) {
496 tree_mod_log_write_unlock(fs_info);
497 return 1;
498 }
499
500 return 0;
501 }
502
503 /* Similar to tree_mod_dont_log, but doesn't acquire any locks. */
504 static inline int tree_mod_need_log(const struct btrfs_fs_info *fs_info,
505 struct extent_buffer *eb)
506 {
507 smp_mb();
508 if (list_empty(&(fs_info)->tree_mod_seq_list))
509 return 0;
510 if (eb && btrfs_header_level(eb) == 0)
511 return 0;
512
513 return 1;
514 }
515
516 static struct tree_mod_elem *
517 alloc_tree_mod_elem(struct extent_buffer *eb, int slot,
518 enum mod_log_op op, gfp_t flags)
519 {
520 struct tree_mod_elem *tm;
521
522 tm = kzalloc(sizeof(*tm), flags);
523 if (!tm)
524 return NULL;
525
526 tm->logical = eb->start;
527 if (op != MOD_LOG_KEY_ADD) {
528 btrfs_node_key(eb, &tm->key, slot);
529 tm->blockptr = btrfs_node_blockptr(eb, slot);
530 }
531 tm->op = op;
532 tm->slot = slot;
533 tm->generation = btrfs_node_ptr_generation(eb, slot);
534 RB_CLEAR_NODE(&tm->node);
535
536 return tm;
537 }
538
539 static noinline int
540 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
541 struct extent_buffer *eb, int slot,
542 enum mod_log_op op, gfp_t flags)
543 {
544 struct tree_mod_elem *tm;
545 int ret;
546
547 if (!tree_mod_need_log(fs_info, eb))
548 return 0;
549
550 tm = alloc_tree_mod_elem(eb, slot, op, flags);
551 if (!tm)
552 return -ENOMEM;
553
554 if (tree_mod_dont_log(fs_info, eb)) {
555 kfree(tm);
556 return 0;
557 }
558
559 ret = __tree_mod_log_insert(fs_info, tm);
560 tree_mod_log_write_unlock(fs_info);
561 if (ret)
562 kfree(tm);
563
564 return ret;
565 }
566
567 static noinline int
568 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int dst_slot, int src_slot,
570 int nr_items, gfp_t flags)
571 {
572 struct tree_mod_elem *tm = NULL;
573 struct tree_mod_elem **tm_list = NULL;
574 int ret = 0;
575 int i;
576 int locked = 0;
577
578 if (!tree_mod_need_log(fs_info, eb))
579 return 0;
580
581 tm_list = kcalloc(nr_items, sizeof(struct tree_mod_elem *), flags);
582 if (!tm_list)
583 return -ENOMEM;
584
585 tm = kzalloc(sizeof(*tm), flags);
586 if (!tm) {
587 ret = -ENOMEM;
588 goto free_tms;
589 }
590
591 tm->logical = eb->start;
592 tm->slot = src_slot;
593 tm->move.dst_slot = dst_slot;
594 tm->move.nr_items = nr_items;
595 tm->op = MOD_LOG_MOVE_KEYS;
596
597 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
598 tm_list[i] = alloc_tree_mod_elem(eb, i + dst_slot,
599 MOD_LOG_KEY_REMOVE_WHILE_MOVING, flags);
600 if (!tm_list[i]) {
601 ret = -ENOMEM;
602 goto free_tms;
603 }
604 }
605
606 if (tree_mod_dont_log(fs_info, eb))
607 goto free_tms;
608 locked = 1;
609
610 /*
611 * When we override something during the move, we log these removals.
612 * This can only happen when we move towards the beginning of the
613 * buffer, i.e. dst_slot < src_slot.
614 */
615 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
616 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
617 if (ret)
618 goto free_tms;
619 }
620
621 ret = __tree_mod_log_insert(fs_info, tm);
622 if (ret)
623 goto free_tms;
624 tree_mod_log_write_unlock(fs_info);
625 kfree(tm_list);
626
627 return 0;
628 free_tms:
629 for (i = 0; i < nr_items; i++) {
630 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
631 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
632 kfree(tm_list[i]);
633 }
634 if (locked)
635 tree_mod_log_write_unlock(fs_info);
636 kfree(tm_list);
637 kfree(tm);
638
639 return ret;
640 }
641
642 static inline int
643 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
644 struct tree_mod_elem **tm_list,
645 int nritems)
646 {
647 int i, j;
648 int ret;
649
650 for (i = nritems - 1; i >= 0; i--) {
651 ret = __tree_mod_log_insert(fs_info, tm_list[i]);
652 if (ret) {
653 for (j = nritems - 1; j > i; j--)
654 rb_erase(&tm_list[j]->node,
655 &fs_info->tree_mod_log);
656 return ret;
657 }
658 }
659
660 return 0;
661 }
662
663 static noinline int
664 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
665 struct extent_buffer *old_root,
666 struct extent_buffer *new_root, gfp_t flags,
667 int log_removal)
668 {
669 struct tree_mod_elem *tm = NULL;
670 struct tree_mod_elem **tm_list = NULL;
671 int nritems = 0;
672 int ret = 0;
673 int i;
674
675 if (!tree_mod_need_log(fs_info, NULL))
676 return 0;
677
678 if (log_removal && btrfs_header_level(old_root) > 0) {
679 nritems = btrfs_header_nritems(old_root);
680 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *),
681 flags);
682 if (!tm_list) {
683 ret = -ENOMEM;
684 goto free_tms;
685 }
686 for (i = 0; i < nritems; i++) {
687 tm_list[i] = alloc_tree_mod_elem(old_root, i,
688 MOD_LOG_KEY_REMOVE_WHILE_FREEING, flags);
689 if (!tm_list[i]) {
690 ret = -ENOMEM;
691 goto free_tms;
692 }
693 }
694 }
695
696 tm = kzalloc(sizeof(*tm), flags);
697 if (!tm) {
698 ret = -ENOMEM;
699 goto free_tms;
700 }
701
702 tm->logical = new_root->start;
703 tm->old_root.logical = old_root->start;
704 tm->old_root.level = btrfs_header_level(old_root);
705 tm->generation = btrfs_header_generation(old_root);
706 tm->op = MOD_LOG_ROOT_REPLACE;
707
708 if (tree_mod_dont_log(fs_info, NULL))
709 goto free_tms;
710
711 if (tm_list)
712 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
713 if (!ret)
714 ret = __tree_mod_log_insert(fs_info, tm);
715
716 tree_mod_log_write_unlock(fs_info);
717 if (ret)
718 goto free_tms;
719 kfree(tm_list);
720
721 return ret;
722
723 free_tms:
724 if (tm_list) {
725 for (i = 0; i < nritems; i++)
726 kfree(tm_list[i]);
727 kfree(tm_list);
728 }
729 kfree(tm);
730
731 return ret;
732 }
733
734 static struct tree_mod_elem *
735 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
736 int smallest)
737 {
738 struct rb_root *tm_root;
739 struct rb_node *node;
740 struct tree_mod_elem *cur = NULL;
741 struct tree_mod_elem *found = NULL;
742
743 tree_mod_log_read_lock(fs_info);
744 tm_root = &fs_info->tree_mod_log;
745 node = tm_root->rb_node;
746 while (node) {
747 cur = container_of(node, struct tree_mod_elem, node);
748 if (cur->logical < start) {
749 node = node->rb_left;
750 } else if (cur->logical > start) {
751 node = node->rb_right;
752 } else if (cur->seq < min_seq) {
753 node = node->rb_left;
754 } else if (!smallest) {
755 /* we want the node with the highest seq */
756 if (found)
757 BUG_ON(found->seq > cur->seq);
758 found = cur;
759 node = node->rb_left;
760 } else if (cur->seq > min_seq) {
761 /* we want the node with the smallest seq */
762 if (found)
763 BUG_ON(found->seq < cur->seq);
764 found = cur;
765 node = node->rb_right;
766 } else {
767 found = cur;
768 break;
769 }
770 }
771 tree_mod_log_read_unlock(fs_info);
772
773 return found;
774 }
775
776 /*
777 * this returns the element from the log with the smallest time sequence
778 * value that's in the log (the oldest log item). any element with a time
779 * sequence lower than min_seq will be ignored.
780 */
781 static struct tree_mod_elem *
782 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
783 u64 min_seq)
784 {
785 return __tree_mod_log_search(fs_info, start, min_seq, 1);
786 }
787
788 /*
789 * this returns the element from the log with the largest time sequence
790 * value that's in the log (the most recent log item). any element with
791 * a time sequence lower than min_seq will be ignored.
792 */
793 static struct tree_mod_elem *
794 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
795 {
796 return __tree_mod_log_search(fs_info, start, min_seq, 0);
797 }
798
799 static noinline int
800 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
801 struct extent_buffer *src, unsigned long dst_offset,
802 unsigned long src_offset, int nr_items)
803 {
804 int ret = 0;
805 struct tree_mod_elem **tm_list = NULL;
806 struct tree_mod_elem **tm_list_add, **tm_list_rem;
807 int i;
808 int locked = 0;
809
810 if (!tree_mod_need_log(fs_info, NULL))
811 return 0;
812
813 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0)
814 return 0;
815
816 tm_list = kcalloc(nr_items * 2, sizeof(struct tree_mod_elem *),
817 GFP_NOFS);
818 if (!tm_list)
819 return -ENOMEM;
820
821 tm_list_add = tm_list;
822 tm_list_rem = tm_list + nr_items;
823 for (i = 0; i < nr_items; i++) {
824 tm_list_rem[i] = alloc_tree_mod_elem(src, i + src_offset,
825 MOD_LOG_KEY_REMOVE, GFP_NOFS);
826 if (!tm_list_rem[i]) {
827 ret = -ENOMEM;
828 goto free_tms;
829 }
830
831 tm_list_add[i] = alloc_tree_mod_elem(dst, i + dst_offset,
832 MOD_LOG_KEY_ADD, GFP_NOFS);
833 if (!tm_list_add[i]) {
834 ret = -ENOMEM;
835 goto free_tms;
836 }
837 }
838
839 if (tree_mod_dont_log(fs_info, NULL))
840 goto free_tms;
841 locked = 1;
842
843 for (i = 0; i < nr_items; i++) {
844 ret = __tree_mod_log_insert(fs_info, tm_list_rem[i]);
845 if (ret)
846 goto free_tms;
847 ret = __tree_mod_log_insert(fs_info, tm_list_add[i]);
848 if (ret)
849 goto free_tms;
850 }
851
852 tree_mod_log_write_unlock(fs_info);
853 kfree(tm_list);
854
855 return 0;
856
857 free_tms:
858 for (i = 0; i < nr_items * 2; i++) {
859 if (tm_list[i] && !RB_EMPTY_NODE(&tm_list[i]->node))
860 rb_erase(&tm_list[i]->node, &fs_info->tree_mod_log);
861 kfree(tm_list[i]);
862 }
863 if (locked)
864 tree_mod_log_write_unlock(fs_info);
865 kfree(tm_list);
866
867 return ret;
868 }
869
870 static inline void
871 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
872 int dst_offset, int src_offset, int nr_items)
873 {
874 int ret;
875 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
876 nr_items, GFP_NOFS);
877 BUG_ON(ret < 0);
878 }
879
880 static noinline void
881 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
882 struct extent_buffer *eb, int slot, int atomic)
883 {
884 int ret;
885
886 ret = tree_mod_log_insert_key(fs_info, eb, slot,
887 MOD_LOG_KEY_REPLACE,
888 atomic ? GFP_ATOMIC : GFP_NOFS);
889 BUG_ON(ret < 0);
890 }
891
892 static noinline int
893 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
894 {
895 struct tree_mod_elem **tm_list = NULL;
896 int nritems = 0;
897 int i;
898 int ret = 0;
899
900 if (btrfs_header_level(eb) == 0)
901 return 0;
902
903 if (!tree_mod_need_log(fs_info, NULL))
904 return 0;
905
906 nritems = btrfs_header_nritems(eb);
907 tm_list = kcalloc(nritems, sizeof(struct tree_mod_elem *), GFP_NOFS);
908 if (!tm_list)
909 return -ENOMEM;
910
911 for (i = 0; i < nritems; i++) {
912 tm_list[i] = alloc_tree_mod_elem(eb, i,
913 MOD_LOG_KEY_REMOVE_WHILE_FREEING, GFP_NOFS);
914 if (!tm_list[i]) {
915 ret = -ENOMEM;
916 goto free_tms;
917 }
918 }
919
920 if (tree_mod_dont_log(fs_info, eb))
921 goto free_tms;
922
923 ret = __tree_mod_log_free_eb(fs_info, tm_list, nritems);
924 tree_mod_log_write_unlock(fs_info);
925 if (ret)
926 goto free_tms;
927 kfree(tm_list);
928
929 return 0;
930
931 free_tms:
932 for (i = 0; i < nritems; i++)
933 kfree(tm_list[i]);
934 kfree(tm_list);
935
936 return ret;
937 }
938
939 static noinline void
940 tree_mod_log_set_root_pointer(struct btrfs_root *root,
941 struct extent_buffer *new_root_node,
942 int log_removal)
943 {
944 int ret;
945 ret = tree_mod_log_insert_root(root->fs_info, root->node,
946 new_root_node, GFP_NOFS, log_removal);
947 BUG_ON(ret < 0);
948 }
949
950 /*
951 * check if the tree block can be shared by multiple trees
952 */
953 int btrfs_block_can_be_shared(struct btrfs_root *root,
954 struct extent_buffer *buf)
955 {
956 /*
957 * Tree blocks not in refernece counted trees and tree roots
958 * are never shared. If a block was allocated after the last
959 * snapshot and the block was not allocated by tree relocation,
960 * we know the block is not shared.
961 */
962 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
963 buf != root->node && buf != root->commit_root &&
964 (btrfs_header_generation(buf) <=
965 btrfs_root_last_snapshot(&root->root_item) ||
966 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
967 return 1;
968 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
969 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
970 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
971 return 1;
972 #endif
973 return 0;
974 }
975
976 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
977 struct btrfs_root *root,
978 struct extent_buffer *buf,
979 struct extent_buffer *cow,
980 int *last_ref)
981 {
982 u64 refs;
983 u64 owner;
984 u64 flags;
985 u64 new_flags = 0;
986 int ret;
987
988 /*
989 * Backrefs update rules:
990 *
991 * Always use full backrefs for extent pointers in tree block
992 * allocated by tree relocation.
993 *
994 * If a shared tree block is no longer referenced by its owner
995 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
996 * use full backrefs for extent pointers in tree block.
997 *
998 * If a tree block is been relocating
999 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
1000 * use full backrefs for extent pointers in tree block.
1001 * The reason for this is some operations (such as drop tree)
1002 * are only allowed for blocks use full backrefs.
1003 */
1004
1005 if (btrfs_block_can_be_shared(root, buf)) {
1006 ret = btrfs_lookup_extent_info(trans, root, buf->start,
1007 btrfs_header_level(buf), 1,
1008 &refs, &flags);
1009 if (ret)
1010 return ret;
1011 if (refs == 0) {
1012 ret = -EROFS;
1013 btrfs_std_error(root->fs_info, ret, NULL);
1014 return ret;
1015 }
1016 } else {
1017 refs = 1;
1018 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1019 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1020 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
1021 else
1022 flags = 0;
1023 }
1024
1025 owner = btrfs_header_owner(buf);
1026 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
1027 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
1028
1029 if (refs > 1) {
1030 if ((owner == root->root_key.objectid ||
1031 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
1032 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
1033 ret = btrfs_inc_ref(trans, root, buf, 1);
1034 BUG_ON(ret); /* -ENOMEM */
1035
1036 if (root->root_key.objectid ==
1037 BTRFS_TREE_RELOC_OBJECTID) {
1038 ret = btrfs_dec_ref(trans, root, buf, 0);
1039 BUG_ON(ret); /* -ENOMEM */
1040 ret = btrfs_inc_ref(trans, root, cow, 1);
1041 BUG_ON(ret); /* -ENOMEM */
1042 }
1043 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
1044 } else {
1045
1046 if (root->root_key.objectid ==
1047 BTRFS_TREE_RELOC_OBJECTID)
1048 ret = btrfs_inc_ref(trans, root, cow, 1);
1049 else
1050 ret = btrfs_inc_ref(trans, root, cow, 0);
1051 BUG_ON(ret); /* -ENOMEM */
1052 }
1053 if (new_flags != 0) {
1054 int level = btrfs_header_level(buf);
1055
1056 ret = btrfs_set_disk_extent_flags(trans, root,
1057 buf->start,
1058 buf->len,
1059 new_flags, level, 0);
1060 if (ret)
1061 return ret;
1062 }
1063 } else {
1064 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
1065 if (root->root_key.objectid ==
1066 BTRFS_TREE_RELOC_OBJECTID)
1067 ret = btrfs_inc_ref(trans, root, cow, 1);
1068 else
1069 ret = btrfs_inc_ref(trans, root, cow, 0);
1070 BUG_ON(ret); /* -ENOMEM */
1071 ret = btrfs_dec_ref(trans, root, buf, 1);
1072 BUG_ON(ret); /* -ENOMEM */
1073 }
1074 clean_tree_block(trans, root->fs_info, buf);
1075 *last_ref = 1;
1076 }
1077 return 0;
1078 }
1079
1080 /*
1081 * does the dirty work in cow of a single block. The parent block (if
1082 * supplied) is updated to point to the new cow copy. The new buffer is marked
1083 * dirty and returned locked. If you modify the block it needs to be marked
1084 * dirty again.
1085 *
1086 * search_start -- an allocation hint for the new block
1087 *
1088 * empty_size -- a hint that you plan on doing more cow. This is the size in
1089 * bytes the allocator should try to find free next to the block it returns.
1090 * This is just a hint and may be ignored by the allocator.
1091 */
1092 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
1093 struct btrfs_root *root,
1094 struct extent_buffer *buf,
1095 struct extent_buffer *parent, int parent_slot,
1096 struct extent_buffer **cow_ret,
1097 u64 search_start, u64 empty_size)
1098 {
1099 struct btrfs_disk_key disk_key;
1100 struct extent_buffer *cow;
1101 int level, ret;
1102 int last_ref = 0;
1103 int unlock_orig = 0;
1104 u64 parent_start;
1105
1106 if (*cow_ret == buf)
1107 unlock_orig = 1;
1108
1109 btrfs_assert_tree_locked(buf);
1110
1111 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1112 trans->transid != root->fs_info->running_transaction->transid);
1113 WARN_ON(test_bit(BTRFS_ROOT_REF_COWS, &root->state) &&
1114 trans->transid != root->last_trans);
1115
1116 level = btrfs_header_level(buf);
1117
1118 if (level == 0)
1119 btrfs_item_key(buf, &disk_key, 0);
1120 else
1121 btrfs_node_key(buf, &disk_key, 0);
1122
1123 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1124 if (parent)
1125 parent_start = parent->start;
1126 else
1127 parent_start = 0;
1128 } else
1129 parent_start = 0;
1130
1131 cow = btrfs_alloc_tree_block(trans, root, parent_start,
1132 root->root_key.objectid, &disk_key, level,
1133 search_start, empty_size);
1134 if (IS_ERR(cow))
1135 return PTR_ERR(cow);
1136
1137 /* cow is set to blocking by btrfs_init_new_buffer */
1138
1139 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1140 btrfs_set_header_bytenr(cow, cow->start);
1141 btrfs_set_header_generation(cow, trans->transid);
1142 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1143 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1144 BTRFS_HEADER_FLAG_RELOC);
1145 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1146 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1147 else
1148 btrfs_set_header_owner(cow, root->root_key.objectid);
1149
1150 write_extent_buffer(cow, root->fs_info->fsid, btrfs_header_fsid(),
1151 BTRFS_FSID_SIZE);
1152
1153 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1154 if (ret) {
1155 btrfs_abort_transaction(trans, root, ret);
1156 return ret;
1157 }
1158
1159 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state)) {
1160 ret = btrfs_reloc_cow_block(trans, root, buf, cow);
1161 if (ret) {
1162 btrfs_abort_transaction(trans, root, ret);
1163 return ret;
1164 }
1165 }
1166
1167 if (buf == root->node) {
1168 WARN_ON(parent && parent != buf);
1169 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1170 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1171 parent_start = buf->start;
1172 else
1173 parent_start = 0;
1174
1175 extent_buffer_get(cow);
1176 tree_mod_log_set_root_pointer(root, cow, 1);
1177 rcu_assign_pointer(root->node, cow);
1178
1179 btrfs_free_tree_block(trans, root, buf, parent_start,
1180 last_ref);
1181 free_extent_buffer(buf);
1182 add_root_to_dirty_list(root);
1183 } else {
1184 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1185 parent_start = parent->start;
1186 else
1187 parent_start = 0;
1188
1189 WARN_ON(trans->transid != btrfs_header_generation(parent));
1190 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1191 MOD_LOG_KEY_REPLACE, GFP_NOFS);
1192 btrfs_set_node_blockptr(parent, parent_slot,
1193 cow->start);
1194 btrfs_set_node_ptr_generation(parent, parent_slot,
1195 trans->transid);
1196 btrfs_mark_buffer_dirty(parent);
1197 if (last_ref) {
1198 ret = tree_mod_log_free_eb(root->fs_info, buf);
1199 if (ret) {
1200 btrfs_abort_transaction(trans, root, ret);
1201 return ret;
1202 }
1203 }
1204 btrfs_free_tree_block(trans, root, buf, parent_start,
1205 last_ref);
1206 }
1207 if (unlock_orig)
1208 btrfs_tree_unlock(buf);
1209 free_extent_buffer_stale(buf);
1210 btrfs_mark_buffer_dirty(cow);
1211 *cow_ret = cow;
1212 return 0;
1213 }
1214
1215 /*
1216 * returns the logical address of the oldest predecessor of the given root.
1217 * entries older than time_seq are ignored.
1218 */
1219 static struct tree_mod_elem *
1220 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1221 struct extent_buffer *eb_root, u64 time_seq)
1222 {
1223 struct tree_mod_elem *tm;
1224 struct tree_mod_elem *found = NULL;
1225 u64 root_logical = eb_root->start;
1226 int looped = 0;
1227
1228 if (!time_seq)
1229 return NULL;
1230
1231 /*
1232 * the very last operation that's logged for a root is the
1233 * replacement operation (if it is replaced at all). this has
1234 * the logical address of the *new* root, making it the very
1235 * first operation that's logged for this root.
1236 */
1237 while (1) {
1238 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1239 time_seq);
1240 if (!looped && !tm)
1241 return NULL;
1242 /*
1243 * if there are no tree operation for the oldest root, we simply
1244 * return it. this should only happen if that (old) root is at
1245 * level 0.
1246 */
1247 if (!tm)
1248 break;
1249
1250 /*
1251 * if there's an operation that's not a root replacement, we
1252 * found the oldest version of our root. normally, we'll find a
1253 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1254 */
1255 if (tm->op != MOD_LOG_ROOT_REPLACE)
1256 break;
1257
1258 found = tm;
1259 root_logical = tm->old_root.logical;
1260 looped = 1;
1261 }
1262
1263 /* if there's no old root to return, return what we found instead */
1264 if (!found)
1265 found = tm;
1266
1267 return found;
1268 }
1269
1270 /*
1271 * tm is a pointer to the first operation to rewind within eb. then, all
1272 * previous operations will be rewinded (until we reach something older than
1273 * time_seq).
1274 */
1275 static void
1276 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1277 u64 time_seq, struct tree_mod_elem *first_tm)
1278 {
1279 u32 n;
1280 struct rb_node *next;
1281 struct tree_mod_elem *tm = first_tm;
1282 unsigned long o_dst;
1283 unsigned long o_src;
1284 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1285
1286 n = btrfs_header_nritems(eb);
1287 tree_mod_log_read_lock(fs_info);
1288 while (tm && tm->seq >= time_seq) {
1289 /*
1290 * all the operations are recorded with the operator used for
1291 * the modification. as we're going backwards, we do the
1292 * opposite of each operation here.
1293 */
1294 switch (tm->op) {
1295 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1296 BUG_ON(tm->slot < n);
1297 /* Fallthrough */
1298 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1299 case MOD_LOG_KEY_REMOVE:
1300 btrfs_set_node_key(eb, &tm->key, tm->slot);
1301 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1302 btrfs_set_node_ptr_generation(eb, tm->slot,
1303 tm->generation);
1304 n++;
1305 break;
1306 case MOD_LOG_KEY_REPLACE:
1307 BUG_ON(tm->slot >= n);
1308 btrfs_set_node_key(eb, &tm->key, tm->slot);
1309 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1310 btrfs_set_node_ptr_generation(eb, tm->slot,
1311 tm->generation);
1312 break;
1313 case MOD_LOG_KEY_ADD:
1314 /* if a move operation is needed it's in the log */
1315 n--;
1316 break;
1317 case MOD_LOG_MOVE_KEYS:
1318 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1319 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1320 memmove_extent_buffer(eb, o_dst, o_src,
1321 tm->move.nr_items * p_size);
1322 break;
1323 case MOD_LOG_ROOT_REPLACE:
1324 /*
1325 * this operation is special. for roots, this must be
1326 * handled explicitly before rewinding.
1327 * for non-roots, this operation may exist if the node
1328 * was a root: root A -> child B; then A gets empty and
1329 * B is promoted to the new root. in the mod log, we'll
1330 * have a root-replace operation for B, a tree block
1331 * that is no root. we simply ignore that operation.
1332 */
1333 break;
1334 }
1335 next = rb_next(&tm->node);
1336 if (!next)
1337 break;
1338 tm = container_of(next, struct tree_mod_elem, node);
1339 if (tm->logical != first_tm->logical)
1340 break;
1341 }
1342 tree_mod_log_read_unlock(fs_info);
1343 btrfs_set_header_nritems(eb, n);
1344 }
1345
1346 /*
1347 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1348 * is returned. If rewind operations happen, a fresh buffer is returned. The
1349 * returned buffer is always read-locked. If the returned buffer is not the
1350 * input buffer, the lock on the input buffer is released and the input buffer
1351 * is freed (its refcount is decremented).
1352 */
1353 static struct extent_buffer *
1354 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct btrfs_path *path,
1355 struct extent_buffer *eb, u64 time_seq)
1356 {
1357 struct extent_buffer *eb_rewin;
1358 struct tree_mod_elem *tm;
1359
1360 if (!time_seq)
1361 return eb;
1362
1363 if (btrfs_header_level(eb) == 0)
1364 return eb;
1365
1366 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1367 if (!tm)
1368 return eb;
1369
1370 btrfs_set_path_blocking(path);
1371 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
1372
1373 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1374 BUG_ON(tm->slot != 0);
1375 eb_rewin = alloc_dummy_extent_buffer(fs_info, eb->start);
1376 if (!eb_rewin) {
1377 btrfs_tree_read_unlock_blocking(eb);
1378 free_extent_buffer(eb);
1379 return NULL;
1380 }
1381 btrfs_set_header_bytenr(eb_rewin, eb->start);
1382 btrfs_set_header_backref_rev(eb_rewin,
1383 btrfs_header_backref_rev(eb));
1384 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1385 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1386 } else {
1387 eb_rewin = btrfs_clone_extent_buffer(eb);
1388 if (!eb_rewin) {
1389 btrfs_tree_read_unlock_blocking(eb);
1390 free_extent_buffer(eb);
1391 return NULL;
1392 }
1393 }
1394
1395 btrfs_clear_path_blocking(path, NULL, BTRFS_READ_LOCK);
1396 btrfs_tree_read_unlock_blocking(eb);
1397 free_extent_buffer(eb);
1398
1399 extent_buffer_get(eb_rewin);
1400 btrfs_tree_read_lock(eb_rewin);
1401 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1402 WARN_ON(btrfs_header_nritems(eb_rewin) >
1403 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1404
1405 return eb_rewin;
1406 }
1407
1408 /*
1409 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1410 * value. If there are no changes, the current root->root_node is returned. If
1411 * anything changed in between, there's a fresh buffer allocated on which the
1412 * rewind operations are done. In any case, the returned buffer is read locked.
1413 * Returns NULL on error (with no locks held).
1414 */
1415 static inline struct extent_buffer *
1416 get_old_root(struct btrfs_root *root, u64 time_seq)
1417 {
1418 struct tree_mod_elem *tm;
1419 struct extent_buffer *eb = NULL;
1420 struct extent_buffer *eb_root;
1421 struct extent_buffer *old;
1422 struct tree_mod_root *old_root = NULL;
1423 u64 old_generation = 0;
1424 u64 logical;
1425
1426 eb_root = btrfs_read_lock_root_node(root);
1427 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1428 if (!tm)
1429 return eb_root;
1430
1431 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1432 old_root = &tm->old_root;
1433 old_generation = tm->generation;
1434 logical = old_root->logical;
1435 } else {
1436 logical = eb_root->start;
1437 }
1438
1439 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1440 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1441 btrfs_tree_read_unlock(eb_root);
1442 free_extent_buffer(eb_root);
1443 old = read_tree_block(root, logical, 0);
1444 if (WARN_ON(IS_ERR(old) || !extent_buffer_uptodate(old))) {
1445 if (!IS_ERR(old))
1446 free_extent_buffer(old);
1447 btrfs_warn(root->fs_info,
1448 "failed to read tree block %llu from get_old_root", logical);
1449 } else {
1450 eb = btrfs_clone_extent_buffer(old);
1451 free_extent_buffer(old);
1452 }
1453 } else if (old_root) {
1454 btrfs_tree_read_unlock(eb_root);
1455 free_extent_buffer(eb_root);
1456 eb = alloc_dummy_extent_buffer(root->fs_info, logical);
1457 } else {
1458 btrfs_set_lock_blocking_rw(eb_root, BTRFS_READ_LOCK);
1459 eb = btrfs_clone_extent_buffer(eb_root);
1460 btrfs_tree_read_unlock_blocking(eb_root);
1461 free_extent_buffer(eb_root);
1462 }
1463
1464 if (!eb)
1465 return NULL;
1466 extent_buffer_get(eb);
1467 btrfs_tree_read_lock(eb);
1468 if (old_root) {
1469 btrfs_set_header_bytenr(eb, eb->start);
1470 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1471 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1472 btrfs_set_header_level(eb, old_root->level);
1473 btrfs_set_header_generation(eb, old_generation);
1474 }
1475 if (tm)
1476 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1477 else
1478 WARN_ON(btrfs_header_level(eb) != 0);
1479 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1480
1481 return eb;
1482 }
1483
1484 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1485 {
1486 struct tree_mod_elem *tm;
1487 int level;
1488 struct extent_buffer *eb_root = btrfs_root_node(root);
1489
1490 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1491 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1492 level = tm->old_root.level;
1493 } else {
1494 level = btrfs_header_level(eb_root);
1495 }
1496 free_extent_buffer(eb_root);
1497
1498 return level;
1499 }
1500
1501 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1502 struct btrfs_root *root,
1503 struct extent_buffer *buf)
1504 {
1505 if (btrfs_test_is_dummy_root(root))
1506 return 0;
1507
1508 /* ensure we can see the force_cow */
1509 smp_rmb();
1510
1511 /*
1512 * We do not need to cow a block if
1513 * 1) this block is not created or changed in this transaction;
1514 * 2) this block does not belong to TREE_RELOC tree;
1515 * 3) the root is not forced COW.
1516 *
1517 * What is forced COW:
1518 * when we create snapshot during commiting the transaction,
1519 * after we've finished coping src root, we must COW the shared
1520 * block to ensure the metadata consistency.
1521 */
1522 if (btrfs_header_generation(buf) == trans->transid &&
1523 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1524 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1525 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1526 !test_bit(BTRFS_ROOT_FORCE_COW, &root->state))
1527 return 0;
1528 return 1;
1529 }
1530
1531 /*
1532 * cows a single block, see __btrfs_cow_block for the real work.
1533 * This version of it has extra checks so that a block isn't cow'd more than
1534 * once per transaction, as long as it hasn't been written yet
1535 */
1536 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1537 struct btrfs_root *root, struct extent_buffer *buf,
1538 struct extent_buffer *parent, int parent_slot,
1539 struct extent_buffer **cow_ret)
1540 {
1541 u64 search_start;
1542 int ret;
1543
1544 if (trans->transaction != root->fs_info->running_transaction)
1545 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1546 trans->transid,
1547 root->fs_info->running_transaction->transid);
1548
1549 if (trans->transid != root->fs_info->generation)
1550 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1551 trans->transid, root->fs_info->generation);
1552
1553 if (!should_cow_block(trans, root, buf)) {
1554 *cow_ret = buf;
1555 return 0;
1556 }
1557
1558 search_start = buf->start & ~((u64)SZ_1G - 1);
1559
1560 if (parent)
1561 btrfs_set_lock_blocking(parent);
1562 btrfs_set_lock_blocking(buf);
1563
1564 ret = __btrfs_cow_block(trans, root, buf, parent,
1565 parent_slot, cow_ret, search_start, 0);
1566
1567 trace_btrfs_cow_block(root, buf, *cow_ret);
1568
1569 return ret;
1570 }
1571
1572 /*
1573 * helper function for defrag to decide if two blocks pointed to by a
1574 * node are actually close by
1575 */
1576 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1577 {
1578 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1579 return 1;
1580 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1581 return 1;
1582 return 0;
1583 }
1584
1585 /*
1586 * compare two keys in a memcmp fashion
1587 */
1588 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1589 {
1590 struct btrfs_key k1;
1591
1592 btrfs_disk_key_to_cpu(&k1, disk);
1593
1594 return btrfs_comp_cpu_keys(&k1, k2);
1595 }
1596
1597 /*
1598 * same as comp_keys only with two btrfs_key's
1599 */
1600 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1601 {
1602 if (k1->objectid > k2->objectid)
1603 return 1;
1604 if (k1->objectid < k2->objectid)
1605 return -1;
1606 if (k1->type > k2->type)
1607 return 1;
1608 if (k1->type < k2->type)
1609 return -1;
1610 if (k1->offset > k2->offset)
1611 return 1;
1612 if (k1->offset < k2->offset)
1613 return -1;
1614 return 0;
1615 }
1616
1617 /*
1618 * this is used by the defrag code to go through all the
1619 * leaves pointed to by a node and reallocate them so that
1620 * disk order is close to key order
1621 */
1622 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1623 struct btrfs_root *root, struct extent_buffer *parent,
1624 int start_slot, u64 *last_ret,
1625 struct btrfs_key *progress)
1626 {
1627 struct extent_buffer *cur;
1628 u64 blocknr;
1629 u64 gen;
1630 u64 search_start = *last_ret;
1631 u64 last_block = 0;
1632 u64 other;
1633 u32 parent_nritems;
1634 int end_slot;
1635 int i;
1636 int err = 0;
1637 int parent_level;
1638 int uptodate;
1639 u32 blocksize;
1640 int progress_passed = 0;
1641 struct btrfs_disk_key disk_key;
1642
1643 parent_level = btrfs_header_level(parent);
1644
1645 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1646 WARN_ON(trans->transid != root->fs_info->generation);
1647
1648 parent_nritems = btrfs_header_nritems(parent);
1649 blocksize = root->nodesize;
1650 end_slot = parent_nritems - 1;
1651
1652 if (parent_nritems <= 1)
1653 return 0;
1654
1655 btrfs_set_lock_blocking(parent);
1656
1657 for (i = start_slot; i <= end_slot; i++) {
1658 int close = 1;
1659
1660 btrfs_node_key(parent, &disk_key, i);
1661 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1662 continue;
1663
1664 progress_passed = 1;
1665 blocknr = btrfs_node_blockptr(parent, i);
1666 gen = btrfs_node_ptr_generation(parent, i);
1667 if (last_block == 0)
1668 last_block = blocknr;
1669
1670 if (i > 0) {
1671 other = btrfs_node_blockptr(parent, i - 1);
1672 close = close_blocks(blocknr, other, blocksize);
1673 }
1674 if (!close && i < end_slot) {
1675 other = btrfs_node_blockptr(parent, i + 1);
1676 close = close_blocks(blocknr, other, blocksize);
1677 }
1678 if (close) {
1679 last_block = blocknr;
1680 continue;
1681 }
1682
1683 cur = btrfs_find_tree_block(root->fs_info, blocknr);
1684 if (cur)
1685 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1686 else
1687 uptodate = 0;
1688 if (!cur || !uptodate) {
1689 if (!cur) {
1690 cur = read_tree_block(root, blocknr, gen);
1691 if (IS_ERR(cur)) {
1692 return PTR_ERR(cur);
1693 } else if (!extent_buffer_uptodate(cur)) {
1694 free_extent_buffer(cur);
1695 return -EIO;
1696 }
1697 } else if (!uptodate) {
1698 err = btrfs_read_buffer(cur, gen);
1699 if (err) {
1700 free_extent_buffer(cur);
1701 return err;
1702 }
1703 }
1704 }
1705 if (search_start == 0)
1706 search_start = last_block;
1707
1708 btrfs_tree_lock(cur);
1709 btrfs_set_lock_blocking(cur);
1710 err = __btrfs_cow_block(trans, root, cur, parent, i,
1711 &cur, search_start,
1712 min(16 * blocksize,
1713 (end_slot - i) * blocksize));
1714 if (err) {
1715 btrfs_tree_unlock(cur);
1716 free_extent_buffer(cur);
1717 break;
1718 }
1719 search_start = cur->start;
1720 last_block = cur->start;
1721 *last_ret = search_start;
1722 btrfs_tree_unlock(cur);
1723 free_extent_buffer(cur);
1724 }
1725 return err;
1726 }
1727
1728 /*
1729 * The leaf data grows from end-to-front in the node.
1730 * this returns the address of the start of the last item,
1731 * which is the stop of the leaf data stack
1732 */
1733 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1734 struct extent_buffer *leaf)
1735 {
1736 u32 nr = btrfs_header_nritems(leaf);
1737 if (nr == 0)
1738 return BTRFS_LEAF_DATA_SIZE(root);
1739 return btrfs_item_offset_nr(leaf, nr - 1);
1740 }
1741
1742
1743 /*
1744 * search for key in the extent_buffer. The items start at offset p,
1745 * and they are item_size apart. There are 'max' items in p.
1746 *
1747 * the slot in the array is returned via slot, and it points to
1748 * the place where you would insert key if it is not found in
1749 * the array.
1750 *
1751 * slot may point to max if the key is bigger than all of the keys
1752 */
1753 static noinline int generic_bin_search(struct extent_buffer *eb,
1754 unsigned long p,
1755 int item_size, struct btrfs_key *key,
1756 int max, int *slot)
1757 {
1758 int low = 0;
1759 int high = max;
1760 int mid;
1761 int ret;
1762 struct btrfs_disk_key *tmp = NULL;
1763 struct btrfs_disk_key unaligned;
1764 unsigned long offset;
1765 char *kaddr = NULL;
1766 unsigned long map_start = 0;
1767 unsigned long map_len = 0;
1768 int err;
1769
1770 while (low < high) {
1771 mid = (low + high) / 2;
1772 offset = p + mid * item_size;
1773
1774 if (!kaddr || offset < map_start ||
1775 (offset + sizeof(struct btrfs_disk_key)) >
1776 map_start + map_len) {
1777
1778 err = map_private_extent_buffer(eb, offset,
1779 sizeof(struct btrfs_disk_key),
1780 &kaddr, &map_start, &map_len);
1781
1782 if (!err) {
1783 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1784 map_start);
1785 } else {
1786 read_extent_buffer(eb, &unaligned,
1787 offset, sizeof(unaligned));
1788 tmp = &unaligned;
1789 }
1790
1791 } else {
1792 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1793 map_start);
1794 }
1795 ret = comp_keys(tmp, key);
1796
1797 if (ret < 0)
1798 low = mid + 1;
1799 else if (ret > 0)
1800 high = mid;
1801 else {
1802 *slot = mid;
1803 return 0;
1804 }
1805 }
1806 *slot = low;
1807 return 1;
1808 }
1809
1810 /*
1811 * simple bin_search frontend that does the right thing for
1812 * leaves vs nodes
1813 */
1814 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1815 int level, int *slot)
1816 {
1817 if (level == 0)
1818 return generic_bin_search(eb,
1819 offsetof(struct btrfs_leaf, items),
1820 sizeof(struct btrfs_item),
1821 key, btrfs_header_nritems(eb),
1822 slot);
1823 else
1824 return generic_bin_search(eb,
1825 offsetof(struct btrfs_node, ptrs),
1826 sizeof(struct btrfs_key_ptr),
1827 key, btrfs_header_nritems(eb),
1828 slot);
1829 }
1830
1831 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1832 int level, int *slot)
1833 {
1834 return bin_search(eb, key, level, slot);
1835 }
1836
1837 static void root_add_used(struct btrfs_root *root, u32 size)
1838 {
1839 spin_lock(&root->accounting_lock);
1840 btrfs_set_root_used(&root->root_item,
1841 btrfs_root_used(&root->root_item) + size);
1842 spin_unlock(&root->accounting_lock);
1843 }
1844
1845 static void root_sub_used(struct btrfs_root *root, u32 size)
1846 {
1847 spin_lock(&root->accounting_lock);
1848 btrfs_set_root_used(&root->root_item,
1849 btrfs_root_used(&root->root_item) - size);
1850 spin_unlock(&root->accounting_lock);
1851 }
1852
1853 /* given a node and slot number, this reads the blocks it points to. The
1854 * extent buffer is returned with a reference taken (but unlocked).
1855 * NULL is returned on error.
1856 */
1857 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1858 struct extent_buffer *parent, int slot)
1859 {
1860 int level = btrfs_header_level(parent);
1861 struct extent_buffer *eb;
1862
1863 if (slot < 0)
1864 return NULL;
1865 if (slot >= btrfs_header_nritems(parent))
1866 return NULL;
1867
1868 BUG_ON(level == 0);
1869
1870 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1871 btrfs_node_ptr_generation(parent, slot));
1872 if (IS_ERR(eb) || !extent_buffer_uptodate(eb)) {
1873 if (!IS_ERR(eb))
1874 free_extent_buffer(eb);
1875 eb = NULL;
1876 }
1877
1878 return eb;
1879 }
1880
1881 /*
1882 * node level balancing, used to make sure nodes are in proper order for
1883 * item deletion. We balance from the top down, so we have to make sure
1884 * that a deletion won't leave an node completely empty later on.
1885 */
1886 static noinline int balance_level(struct btrfs_trans_handle *trans,
1887 struct btrfs_root *root,
1888 struct btrfs_path *path, int level)
1889 {
1890 struct extent_buffer *right = NULL;
1891 struct extent_buffer *mid;
1892 struct extent_buffer *left = NULL;
1893 struct extent_buffer *parent = NULL;
1894 int ret = 0;
1895 int wret;
1896 int pslot;
1897 int orig_slot = path->slots[level];
1898 u64 orig_ptr;
1899
1900 if (level == 0)
1901 return 0;
1902
1903 mid = path->nodes[level];
1904
1905 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1906 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1907 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1908
1909 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1910
1911 if (level < BTRFS_MAX_LEVEL - 1) {
1912 parent = path->nodes[level + 1];
1913 pslot = path->slots[level + 1];
1914 }
1915
1916 /*
1917 * deal with the case where there is only one pointer in the root
1918 * by promoting the node below to a root
1919 */
1920 if (!parent) {
1921 struct extent_buffer *child;
1922
1923 if (btrfs_header_nritems(mid) != 1)
1924 return 0;
1925
1926 /* promote the child to a root */
1927 child = read_node_slot(root, mid, 0);
1928 if (!child) {
1929 ret = -EROFS;
1930 btrfs_std_error(root->fs_info, ret, NULL);
1931 goto enospc;
1932 }
1933
1934 btrfs_tree_lock(child);
1935 btrfs_set_lock_blocking(child);
1936 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1937 if (ret) {
1938 btrfs_tree_unlock(child);
1939 free_extent_buffer(child);
1940 goto enospc;
1941 }
1942
1943 tree_mod_log_set_root_pointer(root, child, 1);
1944 rcu_assign_pointer(root->node, child);
1945
1946 add_root_to_dirty_list(root);
1947 btrfs_tree_unlock(child);
1948
1949 path->locks[level] = 0;
1950 path->nodes[level] = NULL;
1951 clean_tree_block(trans, root->fs_info, mid);
1952 btrfs_tree_unlock(mid);
1953 /* once for the path */
1954 free_extent_buffer(mid);
1955
1956 root_sub_used(root, mid->len);
1957 btrfs_free_tree_block(trans, root, mid, 0, 1);
1958 /* once for the root ptr */
1959 free_extent_buffer_stale(mid);
1960 return 0;
1961 }
1962 if (btrfs_header_nritems(mid) >
1963 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1964 return 0;
1965
1966 left = read_node_slot(root, parent, pslot - 1);
1967 if (left) {
1968 btrfs_tree_lock(left);
1969 btrfs_set_lock_blocking(left);
1970 wret = btrfs_cow_block(trans, root, left,
1971 parent, pslot - 1, &left);
1972 if (wret) {
1973 ret = wret;
1974 goto enospc;
1975 }
1976 }
1977 right = read_node_slot(root, parent, pslot + 1);
1978 if (right) {
1979 btrfs_tree_lock(right);
1980 btrfs_set_lock_blocking(right);
1981 wret = btrfs_cow_block(trans, root, right,
1982 parent, pslot + 1, &right);
1983 if (wret) {
1984 ret = wret;
1985 goto enospc;
1986 }
1987 }
1988
1989 /* first, try to make some room in the middle buffer */
1990 if (left) {
1991 orig_slot += btrfs_header_nritems(left);
1992 wret = push_node_left(trans, root, left, mid, 1);
1993 if (wret < 0)
1994 ret = wret;
1995 }
1996
1997 /*
1998 * then try to empty the right most buffer into the middle
1999 */
2000 if (right) {
2001 wret = push_node_left(trans, root, mid, right, 1);
2002 if (wret < 0 && wret != -ENOSPC)
2003 ret = wret;
2004 if (btrfs_header_nritems(right) == 0) {
2005 clean_tree_block(trans, root->fs_info, right);
2006 btrfs_tree_unlock(right);
2007 del_ptr(root, path, level + 1, pslot + 1);
2008 root_sub_used(root, right->len);
2009 btrfs_free_tree_block(trans, root, right, 0, 1);
2010 free_extent_buffer_stale(right);
2011 right = NULL;
2012 } else {
2013 struct btrfs_disk_key right_key;
2014 btrfs_node_key(right, &right_key, 0);
2015 tree_mod_log_set_node_key(root->fs_info, parent,
2016 pslot + 1, 0);
2017 btrfs_set_node_key(parent, &right_key, pslot + 1);
2018 btrfs_mark_buffer_dirty(parent);
2019 }
2020 }
2021 if (btrfs_header_nritems(mid) == 1) {
2022 /*
2023 * we're not allowed to leave a node with one item in the
2024 * tree during a delete. A deletion from lower in the tree
2025 * could try to delete the only pointer in this node.
2026 * So, pull some keys from the left.
2027 * There has to be a left pointer at this point because
2028 * otherwise we would have pulled some pointers from the
2029 * right
2030 */
2031 if (!left) {
2032 ret = -EROFS;
2033 btrfs_std_error(root->fs_info, ret, NULL);
2034 goto enospc;
2035 }
2036 wret = balance_node_right(trans, root, mid, left);
2037 if (wret < 0) {
2038 ret = wret;
2039 goto enospc;
2040 }
2041 if (wret == 1) {
2042 wret = push_node_left(trans, root, left, mid, 1);
2043 if (wret < 0)
2044 ret = wret;
2045 }
2046 BUG_ON(wret == 1);
2047 }
2048 if (btrfs_header_nritems(mid) == 0) {
2049 clean_tree_block(trans, root->fs_info, mid);
2050 btrfs_tree_unlock(mid);
2051 del_ptr(root, path, level + 1, pslot);
2052 root_sub_used(root, mid->len);
2053 btrfs_free_tree_block(trans, root, mid, 0, 1);
2054 free_extent_buffer_stale(mid);
2055 mid = NULL;
2056 } else {
2057 /* update the parent key to reflect our changes */
2058 struct btrfs_disk_key mid_key;
2059 btrfs_node_key(mid, &mid_key, 0);
2060 tree_mod_log_set_node_key(root->fs_info, parent,
2061 pslot, 0);
2062 btrfs_set_node_key(parent, &mid_key, pslot);
2063 btrfs_mark_buffer_dirty(parent);
2064 }
2065
2066 /* update the path */
2067 if (left) {
2068 if (btrfs_header_nritems(left) > orig_slot) {
2069 extent_buffer_get(left);
2070 /* left was locked after cow */
2071 path->nodes[level] = left;
2072 path->slots[level + 1] -= 1;
2073 path->slots[level] = orig_slot;
2074 if (mid) {
2075 btrfs_tree_unlock(mid);
2076 free_extent_buffer(mid);
2077 }
2078 } else {
2079 orig_slot -= btrfs_header_nritems(left);
2080 path->slots[level] = orig_slot;
2081 }
2082 }
2083 /* double check we haven't messed things up */
2084 if (orig_ptr !=
2085 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
2086 BUG();
2087 enospc:
2088 if (right) {
2089 btrfs_tree_unlock(right);
2090 free_extent_buffer(right);
2091 }
2092 if (left) {
2093 if (path->nodes[level] != left)
2094 btrfs_tree_unlock(left);
2095 free_extent_buffer(left);
2096 }
2097 return ret;
2098 }
2099
2100 /* Node balancing for insertion. Here we only split or push nodes around
2101 * when they are completely full. This is also done top down, so we
2102 * have to be pessimistic.
2103 */
2104 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
2105 struct btrfs_root *root,
2106 struct btrfs_path *path, int level)
2107 {
2108 struct extent_buffer *right = NULL;
2109 struct extent_buffer *mid;
2110 struct extent_buffer *left = NULL;
2111 struct extent_buffer *parent = NULL;
2112 int ret = 0;
2113 int wret;
2114 int pslot;
2115 int orig_slot = path->slots[level];
2116
2117 if (level == 0)
2118 return 1;
2119
2120 mid = path->nodes[level];
2121 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2122
2123 if (level < BTRFS_MAX_LEVEL - 1) {
2124 parent = path->nodes[level + 1];
2125 pslot = path->slots[level + 1];
2126 }
2127
2128 if (!parent)
2129 return 1;
2130
2131 left = read_node_slot(root, parent, pslot - 1);
2132
2133 /* first, try to make some room in the middle buffer */
2134 if (left) {
2135 u32 left_nr;
2136
2137 btrfs_tree_lock(left);
2138 btrfs_set_lock_blocking(left);
2139
2140 left_nr = btrfs_header_nritems(left);
2141 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2142 wret = 1;
2143 } else {
2144 ret = btrfs_cow_block(trans, root, left, parent,
2145 pslot - 1, &left);
2146 if (ret)
2147 wret = 1;
2148 else {
2149 wret = push_node_left(trans, root,
2150 left, mid, 0);
2151 }
2152 }
2153 if (wret < 0)
2154 ret = wret;
2155 if (wret == 0) {
2156 struct btrfs_disk_key disk_key;
2157 orig_slot += left_nr;
2158 btrfs_node_key(mid, &disk_key, 0);
2159 tree_mod_log_set_node_key(root->fs_info, parent,
2160 pslot, 0);
2161 btrfs_set_node_key(parent, &disk_key, pslot);
2162 btrfs_mark_buffer_dirty(parent);
2163 if (btrfs_header_nritems(left) > orig_slot) {
2164 path->nodes[level] = left;
2165 path->slots[level + 1] -= 1;
2166 path->slots[level] = orig_slot;
2167 btrfs_tree_unlock(mid);
2168 free_extent_buffer(mid);
2169 } else {
2170 orig_slot -=
2171 btrfs_header_nritems(left);
2172 path->slots[level] = orig_slot;
2173 btrfs_tree_unlock(left);
2174 free_extent_buffer(left);
2175 }
2176 return 0;
2177 }
2178 btrfs_tree_unlock(left);
2179 free_extent_buffer(left);
2180 }
2181 right = read_node_slot(root, parent, pslot + 1);
2182
2183 /*
2184 * then try to empty the right most buffer into the middle
2185 */
2186 if (right) {
2187 u32 right_nr;
2188
2189 btrfs_tree_lock(right);
2190 btrfs_set_lock_blocking(right);
2191
2192 right_nr = btrfs_header_nritems(right);
2193 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2194 wret = 1;
2195 } else {
2196 ret = btrfs_cow_block(trans, root, right,
2197 parent, pslot + 1,
2198 &right);
2199 if (ret)
2200 wret = 1;
2201 else {
2202 wret = balance_node_right(trans, root,
2203 right, mid);
2204 }
2205 }
2206 if (wret < 0)
2207 ret = wret;
2208 if (wret == 0) {
2209 struct btrfs_disk_key disk_key;
2210
2211 btrfs_node_key(right, &disk_key, 0);
2212 tree_mod_log_set_node_key(root->fs_info, parent,
2213 pslot + 1, 0);
2214 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2215 btrfs_mark_buffer_dirty(parent);
2216
2217 if (btrfs_header_nritems(mid) <= orig_slot) {
2218 path->nodes[level] = right;
2219 path->slots[level + 1] += 1;
2220 path->slots[level] = orig_slot -
2221 btrfs_header_nritems(mid);
2222 btrfs_tree_unlock(mid);
2223 free_extent_buffer(mid);
2224 } else {
2225 btrfs_tree_unlock(right);
2226 free_extent_buffer(right);
2227 }
2228 return 0;
2229 }
2230 btrfs_tree_unlock(right);
2231 free_extent_buffer(right);
2232 }
2233 return 1;
2234 }
2235
2236 /*
2237 * readahead one full node of leaves, finding things that are close
2238 * to the block in 'slot', and triggering ra on them.
2239 */
2240 static void reada_for_search(struct btrfs_root *root,
2241 struct btrfs_path *path,
2242 int level, int slot, u64 objectid)
2243 {
2244 struct extent_buffer *node;
2245 struct btrfs_disk_key disk_key;
2246 u32 nritems;
2247 u64 search;
2248 u64 target;
2249 u64 nread = 0;
2250 u64 gen;
2251 struct extent_buffer *eb;
2252 u32 nr;
2253 u32 blocksize;
2254 u32 nscan = 0;
2255
2256 if (level != 1)
2257 return;
2258
2259 if (!path->nodes[level])
2260 return;
2261
2262 node = path->nodes[level];
2263
2264 search = btrfs_node_blockptr(node, slot);
2265 blocksize = root->nodesize;
2266 eb = btrfs_find_tree_block(root->fs_info, search);
2267 if (eb) {
2268 free_extent_buffer(eb);
2269 return;
2270 }
2271
2272 target = search;
2273
2274 nritems = btrfs_header_nritems(node);
2275 nr = slot;
2276
2277 while (1) {
2278 if (path->reada == READA_BACK) {
2279 if (nr == 0)
2280 break;
2281 nr--;
2282 } else if (path->reada == READA_FORWARD) {
2283 nr++;
2284 if (nr >= nritems)
2285 break;
2286 }
2287 if (path->reada == READA_BACK && objectid) {
2288 btrfs_node_key(node, &disk_key, nr);
2289 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2290 break;
2291 }
2292 search = btrfs_node_blockptr(node, nr);
2293 if ((search <= target && target - search <= 65536) ||
2294 (search > target && search - target <= 65536)) {
2295 gen = btrfs_node_ptr_generation(node, nr);
2296 readahead_tree_block(root, search);
2297 nread += blocksize;
2298 }
2299 nscan++;
2300 if ((nread > 65536 || nscan > 32))
2301 break;
2302 }
2303 }
2304
2305 static noinline void reada_for_balance(struct btrfs_root *root,
2306 struct btrfs_path *path, int level)
2307 {
2308 int slot;
2309 int nritems;
2310 struct extent_buffer *parent;
2311 struct extent_buffer *eb;
2312 u64 gen;
2313 u64 block1 = 0;
2314 u64 block2 = 0;
2315
2316 parent = path->nodes[level + 1];
2317 if (!parent)
2318 return;
2319
2320 nritems = btrfs_header_nritems(parent);
2321 slot = path->slots[level + 1];
2322
2323 if (slot > 0) {
2324 block1 = btrfs_node_blockptr(parent, slot - 1);
2325 gen = btrfs_node_ptr_generation(parent, slot - 1);
2326 eb = btrfs_find_tree_block(root->fs_info, block1);
2327 /*
2328 * if we get -eagain from btrfs_buffer_uptodate, we
2329 * don't want to return eagain here. That will loop
2330 * forever
2331 */
2332 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2333 block1 = 0;
2334 free_extent_buffer(eb);
2335 }
2336 if (slot + 1 < nritems) {
2337 block2 = btrfs_node_blockptr(parent, slot + 1);
2338 gen = btrfs_node_ptr_generation(parent, slot + 1);
2339 eb = btrfs_find_tree_block(root->fs_info, block2);
2340 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2341 block2 = 0;
2342 free_extent_buffer(eb);
2343 }
2344
2345 if (block1)
2346 readahead_tree_block(root, block1);
2347 if (block2)
2348 readahead_tree_block(root, block2);
2349 }
2350
2351
2352 /*
2353 * when we walk down the tree, it is usually safe to unlock the higher layers
2354 * in the tree. The exceptions are when our path goes through slot 0, because
2355 * operations on the tree might require changing key pointers higher up in the
2356 * tree.
2357 *
2358 * callers might also have set path->keep_locks, which tells this code to keep
2359 * the lock if the path points to the last slot in the block. This is part of
2360 * walking through the tree, and selecting the next slot in the higher block.
2361 *
2362 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2363 * if lowest_unlock is 1, level 0 won't be unlocked
2364 */
2365 static noinline void unlock_up(struct btrfs_path *path, int level,
2366 int lowest_unlock, int min_write_lock_level,
2367 int *write_lock_level)
2368 {
2369 int i;
2370 int skip_level = level;
2371 int no_skips = 0;
2372 struct extent_buffer *t;
2373
2374 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2375 if (!path->nodes[i])
2376 break;
2377 if (!path->locks[i])
2378 break;
2379 if (!no_skips && path->slots[i] == 0) {
2380 skip_level = i + 1;
2381 continue;
2382 }
2383 if (!no_skips && path->keep_locks) {
2384 u32 nritems;
2385 t = path->nodes[i];
2386 nritems = btrfs_header_nritems(t);
2387 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2388 skip_level = i + 1;
2389 continue;
2390 }
2391 }
2392 if (skip_level < i && i >= lowest_unlock)
2393 no_skips = 1;
2394
2395 t = path->nodes[i];
2396 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2397 btrfs_tree_unlock_rw(t, path->locks[i]);
2398 path->locks[i] = 0;
2399 if (write_lock_level &&
2400 i > min_write_lock_level &&
2401 i <= *write_lock_level) {
2402 *write_lock_level = i - 1;
2403 }
2404 }
2405 }
2406 }
2407
2408 /*
2409 * This releases any locks held in the path starting at level and
2410 * going all the way up to the root.
2411 *
2412 * btrfs_search_slot will keep the lock held on higher nodes in a few
2413 * corner cases, such as COW of the block at slot zero in the node. This
2414 * ignores those rules, and it should only be called when there are no
2415 * more updates to be done higher up in the tree.
2416 */
2417 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2418 {
2419 int i;
2420
2421 if (path->keep_locks)
2422 return;
2423
2424 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2425 if (!path->nodes[i])
2426 continue;
2427 if (!path->locks[i])
2428 continue;
2429 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2430 path->locks[i] = 0;
2431 }
2432 }
2433
2434 /*
2435 * helper function for btrfs_search_slot. The goal is to find a block
2436 * in cache without setting the path to blocking. If we find the block
2437 * we return zero and the path is unchanged.
2438 *
2439 * If we can't find the block, we set the path blocking and do some
2440 * reada. -EAGAIN is returned and the search must be repeated.
2441 */
2442 static int
2443 read_block_for_search(struct btrfs_trans_handle *trans,
2444 struct btrfs_root *root, struct btrfs_path *p,
2445 struct extent_buffer **eb_ret, int level, int slot,
2446 struct btrfs_key *key, u64 time_seq)
2447 {
2448 u64 blocknr;
2449 u64 gen;
2450 struct extent_buffer *b = *eb_ret;
2451 struct extent_buffer *tmp;
2452 int ret;
2453
2454 blocknr = btrfs_node_blockptr(b, slot);
2455 gen = btrfs_node_ptr_generation(b, slot);
2456
2457 tmp = btrfs_find_tree_block(root->fs_info, blocknr);
2458 if (tmp) {
2459 /* first we do an atomic uptodate check */
2460 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2461 *eb_ret = tmp;
2462 return 0;
2463 }
2464
2465 /* the pages were up to date, but we failed
2466 * the generation number check. Do a full
2467 * read for the generation number that is correct.
2468 * We must do this without dropping locks so
2469 * we can trust our generation number
2470 */
2471 btrfs_set_path_blocking(p);
2472
2473 /* now we're allowed to do a blocking uptodate check */
2474 ret = btrfs_read_buffer(tmp, gen);
2475 if (!ret) {
2476 *eb_ret = tmp;
2477 return 0;
2478 }
2479 free_extent_buffer(tmp);
2480 btrfs_release_path(p);
2481 return -EIO;
2482 }
2483
2484 /*
2485 * reduce lock contention at high levels
2486 * of the btree by dropping locks before
2487 * we read. Don't release the lock on the current
2488 * level because we need to walk this node to figure
2489 * out which blocks to read.
2490 */
2491 btrfs_unlock_up_safe(p, level + 1);
2492 btrfs_set_path_blocking(p);
2493
2494 free_extent_buffer(tmp);
2495 if (p->reada != READA_NONE)
2496 reada_for_search(root, p, level, slot, key->objectid);
2497
2498 btrfs_release_path(p);
2499
2500 ret = -EAGAIN;
2501 tmp = read_tree_block(root, blocknr, 0);
2502 if (!IS_ERR(tmp)) {
2503 /*
2504 * If the read above didn't mark this buffer up to date,
2505 * it will never end up being up to date. Set ret to EIO now
2506 * and give up so that our caller doesn't loop forever
2507 * on our EAGAINs.
2508 */
2509 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2510 ret = -EIO;
2511 free_extent_buffer(tmp);
2512 }
2513 return ret;
2514 }
2515
2516 /*
2517 * helper function for btrfs_search_slot. This does all of the checks
2518 * for node-level blocks and does any balancing required based on
2519 * the ins_len.
2520 *
2521 * If no extra work was required, zero is returned. If we had to
2522 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2523 * start over
2524 */
2525 static int
2526 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2527 struct btrfs_root *root, struct btrfs_path *p,
2528 struct extent_buffer *b, int level, int ins_len,
2529 int *write_lock_level)
2530 {
2531 int ret;
2532 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2533 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2534 int sret;
2535
2536 if (*write_lock_level < level + 1) {
2537 *write_lock_level = level + 1;
2538 btrfs_release_path(p);
2539 goto again;
2540 }
2541
2542 btrfs_set_path_blocking(p);
2543 reada_for_balance(root, p, level);
2544 sret = split_node(trans, root, p, level);
2545 btrfs_clear_path_blocking(p, NULL, 0);
2546
2547 BUG_ON(sret > 0);
2548 if (sret) {
2549 ret = sret;
2550 goto done;
2551 }
2552 b = p->nodes[level];
2553 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2554 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2555 int sret;
2556
2557 if (*write_lock_level < level + 1) {
2558 *write_lock_level = level + 1;
2559 btrfs_release_path(p);
2560 goto again;
2561 }
2562
2563 btrfs_set_path_blocking(p);
2564 reada_for_balance(root, p, level);
2565 sret = balance_level(trans, root, p, level);
2566 btrfs_clear_path_blocking(p, NULL, 0);
2567
2568 if (sret) {
2569 ret = sret;
2570 goto done;
2571 }
2572 b = p->nodes[level];
2573 if (!b) {
2574 btrfs_release_path(p);
2575 goto again;
2576 }
2577 BUG_ON(btrfs_header_nritems(b) == 1);
2578 }
2579 return 0;
2580
2581 again:
2582 ret = -EAGAIN;
2583 done:
2584 return ret;
2585 }
2586
2587 static void key_search_validate(struct extent_buffer *b,
2588 struct btrfs_key *key,
2589 int level)
2590 {
2591 #ifdef CONFIG_BTRFS_ASSERT
2592 struct btrfs_disk_key disk_key;
2593
2594 btrfs_cpu_key_to_disk(&disk_key, key);
2595
2596 if (level == 0)
2597 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2598 offsetof(struct btrfs_leaf, items[0].key),
2599 sizeof(disk_key)));
2600 else
2601 ASSERT(!memcmp_extent_buffer(b, &disk_key,
2602 offsetof(struct btrfs_node, ptrs[0].key),
2603 sizeof(disk_key)));
2604 #endif
2605 }
2606
2607 static int key_search(struct extent_buffer *b, struct btrfs_key *key,
2608 int level, int *prev_cmp, int *slot)
2609 {
2610 if (*prev_cmp != 0) {
2611 *prev_cmp = bin_search(b, key, level, slot);
2612 return *prev_cmp;
2613 }
2614
2615 key_search_validate(b, key, level);
2616 *slot = 0;
2617
2618 return 0;
2619 }
2620
2621 int btrfs_find_item(struct btrfs_root *fs_root, struct btrfs_path *path,
2622 u64 iobjectid, u64 ioff, u8 key_type,
2623 struct btrfs_key *found_key)
2624 {
2625 int ret;
2626 struct btrfs_key key;
2627 struct extent_buffer *eb;
2628
2629 ASSERT(path);
2630 ASSERT(found_key);
2631
2632 key.type = key_type;
2633 key.objectid = iobjectid;
2634 key.offset = ioff;
2635
2636 ret = btrfs_search_slot(NULL, fs_root, &key, path, 0, 0);
2637 if (ret < 0)
2638 return ret;
2639
2640 eb = path->nodes[0];
2641 if (ret && path->slots[0] >= btrfs_header_nritems(eb)) {
2642 ret = btrfs_next_leaf(fs_root, path);
2643 if (ret)
2644 return ret;
2645 eb = path->nodes[0];
2646 }
2647
2648 btrfs_item_key_to_cpu(eb, found_key, path->slots[0]);
2649 if (found_key->type != key.type ||
2650 found_key->objectid != key.objectid)
2651 return 1;
2652
2653 return 0;
2654 }
2655
2656 /*
2657 * look for key in the tree. path is filled in with nodes along the way
2658 * if key is found, we return zero and you can find the item in the leaf
2659 * level of the path (level 0)
2660 *
2661 * If the key isn't found, the path points to the slot where it should
2662 * be inserted, and 1 is returned. If there are other errors during the
2663 * search a negative error number is returned.
2664 *
2665 * if ins_len > 0, nodes and leaves will be split as we walk down the
2666 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2667 * possible)
2668 */
2669 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2670 *root, struct btrfs_key *key, struct btrfs_path *p, int
2671 ins_len, int cow)
2672 {
2673 struct extent_buffer *b;
2674 int slot;
2675 int ret;
2676 int err;
2677 int level;
2678 int lowest_unlock = 1;
2679 int root_lock;
2680 /* everything at write_lock_level or lower must be write locked */
2681 int write_lock_level = 0;
2682 u8 lowest_level = 0;
2683 int min_write_lock_level;
2684 int prev_cmp;
2685
2686 lowest_level = p->lowest_level;
2687 WARN_ON(lowest_level && ins_len > 0);
2688 WARN_ON(p->nodes[0] != NULL);
2689 BUG_ON(!cow && ins_len);
2690
2691 if (ins_len < 0) {
2692 lowest_unlock = 2;
2693
2694 /* when we are removing items, we might have to go up to level
2695 * two as we update tree pointers Make sure we keep write
2696 * for those levels as well
2697 */
2698 write_lock_level = 2;
2699 } else if (ins_len > 0) {
2700 /*
2701 * for inserting items, make sure we have a write lock on
2702 * level 1 so we can update keys
2703 */
2704 write_lock_level = 1;
2705 }
2706
2707 if (!cow)
2708 write_lock_level = -1;
2709
2710 if (cow && (p->keep_locks || p->lowest_level))
2711 write_lock_level = BTRFS_MAX_LEVEL;
2712
2713 min_write_lock_level = write_lock_level;
2714
2715 again:
2716 prev_cmp = -1;
2717 /*
2718 * we try very hard to do read locks on the root
2719 */
2720 root_lock = BTRFS_READ_LOCK;
2721 level = 0;
2722 if (p->search_commit_root) {
2723 /*
2724 * the commit roots are read only
2725 * so we always do read locks
2726 */
2727 if (p->need_commit_sem)
2728 down_read(&root->fs_info->commit_root_sem);
2729 b = root->commit_root;
2730 extent_buffer_get(b);
2731 level = btrfs_header_level(b);
2732 if (p->need_commit_sem)
2733 up_read(&root->fs_info->commit_root_sem);
2734 if (!p->skip_locking)
2735 btrfs_tree_read_lock(b);
2736 } else {
2737 if (p->skip_locking) {
2738 b = btrfs_root_node(root);
2739 level = btrfs_header_level(b);
2740 } else {
2741 /* we don't know the level of the root node
2742 * until we actually have it read locked
2743 */
2744 b = btrfs_read_lock_root_node(root);
2745 level = btrfs_header_level(b);
2746 if (level <= write_lock_level) {
2747 /* whoops, must trade for write lock */
2748 btrfs_tree_read_unlock(b);
2749 free_extent_buffer(b);
2750 b = btrfs_lock_root_node(root);
2751 root_lock = BTRFS_WRITE_LOCK;
2752
2753 /* the level might have changed, check again */
2754 level = btrfs_header_level(b);
2755 }
2756 }
2757 }
2758 p->nodes[level] = b;
2759 if (!p->skip_locking)
2760 p->locks[level] = root_lock;
2761
2762 while (b) {
2763 level = btrfs_header_level(b);
2764
2765 /*
2766 * setup the path here so we can release it under lock
2767 * contention with the cow code
2768 */
2769 if (cow) {
2770 /*
2771 * if we don't really need to cow this block
2772 * then we don't want to set the path blocking,
2773 * so we test it here
2774 */
2775 if (!should_cow_block(trans, root, b))
2776 goto cow_done;
2777
2778 /*
2779 * must have write locks on this node and the
2780 * parent
2781 */
2782 if (level > write_lock_level ||
2783 (level + 1 > write_lock_level &&
2784 level + 1 < BTRFS_MAX_LEVEL &&
2785 p->nodes[level + 1])) {
2786 write_lock_level = level + 1;
2787 btrfs_release_path(p);
2788 goto again;
2789 }
2790
2791 btrfs_set_path_blocking(p);
2792 err = btrfs_cow_block(trans, root, b,
2793 p->nodes[level + 1],
2794 p->slots[level + 1], &b);
2795 if (err) {
2796 ret = err;
2797 goto done;
2798 }
2799 }
2800 cow_done:
2801 p->nodes[level] = b;
2802 btrfs_clear_path_blocking(p, NULL, 0);
2803
2804 /*
2805 * we have a lock on b and as long as we aren't changing
2806 * the tree, there is no way to for the items in b to change.
2807 * It is safe to drop the lock on our parent before we
2808 * go through the expensive btree search on b.
2809 *
2810 * If we're inserting or deleting (ins_len != 0), then we might
2811 * be changing slot zero, which may require changing the parent.
2812 * So, we can't drop the lock until after we know which slot
2813 * we're operating on.
2814 */
2815 if (!ins_len && !p->keep_locks) {
2816 int u = level + 1;
2817
2818 if (u < BTRFS_MAX_LEVEL && p->locks[u]) {
2819 btrfs_tree_unlock_rw(p->nodes[u], p->locks[u]);
2820 p->locks[u] = 0;
2821 }
2822 }
2823
2824 ret = key_search(b, key, level, &prev_cmp, &slot);
2825
2826 if (level != 0) {
2827 int dec = 0;
2828 if (ret && slot > 0) {
2829 dec = 1;
2830 slot -= 1;
2831 }
2832 p->slots[level] = slot;
2833 err = setup_nodes_for_search(trans, root, p, b, level,
2834 ins_len, &write_lock_level);
2835 if (err == -EAGAIN)
2836 goto again;
2837 if (err) {
2838 ret = err;
2839 goto done;
2840 }
2841 b = p->nodes[level];
2842 slot = p->slots[level];
2843
2844 /*
2845 * slot 0 is special, if we change the key
2846 * we have to update the parent pointer
2847 * which means we must have a write lock
2848 * on the parent
2849 */
2850 if (slot == 0 && ins_len &&
2851 write_lock_level < level + 1) {
2852 write_lock_level = level + 1;
2853 btrfs_release_path(p);
2854 goto again;
2855 }
2856
2857 unlock_up(p, level, lowest_unlock,
2858 min_write_lock_level, &write_lock_level);
2859
2860 if (level == lowest_level) {
2861 if (dec)
2862 p->slots[level]++;
2863 goto done;
2864 }
2865
2866 err = read_block_for_search(trans, root, p,
2867 &b, level, slot, key, 0);
2868 if (err == -EAGAIN)
2869 goto again;
2870 if (err) {
2871 ret = err;
2872 goto done;
2873 }
2874
2875 if (!p->skip_locking) {
2876 level = btrfs_header_level(b);
2877 if (level <= write_lock_level) {
2878 err = btrfs_try_tree_write_lock(b);
2879 if (!err) {
2880 btrfs_set_path_blocking(p);
2881 btrfs_tree_lock(b);
2882 btrfs_clear_path_blocking(p, b,
2883 BTRFS_WRITE_LOCK);
2884 }
2885 p->locks[level] = BTRFS_WRITE_LOCK;
2886 } else {
2887 err = btrfs_tree_read_lock_atomic(b);
2888 if (!err) {
2889 btrfs_set_path_blocking(p);
2890 btrfs_tree_read_lock(b);
2891 btrfs_clear_path_blocking(p, b,
2892 BTRFS_READ_LOCK);
2893 }
2894 p->locks[level] = BTRFS_READ_LOCK;
2895 }
2896 p->nodes[level] = b;
2897 }
2898 } else {
2899 p->slots[level] = slot;
2900 if (ins_len > 0 &&
2901 btrfs_leaf_free_space(root, b) < ins_len) {
2902 if (write_lock_level < 1) {
2903 write_lock_level = 1;
2904 btrfs_release_path(p);
2905 goto again;
2906 }
2907
2908 btrfs_set_path_blocking(p);
2909 err = split_leaf(trans, root, key,
2910 p, ins_len, ret == 0);
2911 btrfs_clear_path_blocking(p, NULL, 0);
2912
2913 BUG_ON(err > 0);
2914 if (err) {
2915 ret = err;
2916 goto done;
2917 }
2918 }
2919 if (!p->search_for_split)
2920 unlock_up(p, level, lowest_unlock,
2921 min_write_lock_level, &write_lock_level);
2922 goto done;
2923 }
2924 }
2925 ret = 1;
2926 done:
2927 /*
2928 * we don't really know what they plan on doing with the path
2929 * from here on, so for now just mark it as blocking
2930 */
2931 if (!p->leave_spinning)
2932 btrfs_set_path_blocking(p);
2933 if (ret < 0 && !p->skip_release_on_error)
2934 btrfs_release_path(p);
2935 return ret;
2936 }
2937
2938 /*
2939 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2940 * current state of the tree together with the operations recorded in the tree
2941 * modification log to search for the key in a previous version of this tree, as
2942 * denoted by the time_seq parameter.
2943 *
2944 * Naturally, there is no support for insert, delete or cow operations.
2945 *
2946 * The resulting path and return value will be set up as if we called
2947 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2948 */
2949 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2950 struct btrfs_path *p, u64 time_seq)
2951 {
2952 struct extent_buffer *b;
2953 int slot;
2954 int ret;
2955 int err;
2956 int level;
2957 int lowest_unlock = 1;
2958 u8 lowest_level = 0;
2959 int prev_cmp = -1;
2960
2961 lowest_level = p->lowest_level;
2962 WARN_ON(p->nodes[0] != NULL);
2963
2964 if (p->search_commit_root) {
2965 BUG_ON(time_seq);
2966 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2967 }
2968
2969 again:
2970 b = get_old_root(root, time_seq);
2971 level = btrfs_header_level(b);
2972 p->locks[level] = BTRFS_READ_LOCK;
2973
2974 while (b) {
2975 level = btrfs_header_level(b);
2976 p->nodes[level] = b;
2977 btrfs_clear_path_blocking(p, NULL, 0);
2978
2979 /*
2980 * we have a lock on b and as long as we aren't changing
2981 * the tree, there is no way to for the items in b to change.
2982 * It is safe to drop the lock on our parent before we
2983 * go through the expensive btree search on b.
2984 */
2985 btrfs_unlock_up_safe(p, level + 1);
2986
2987 /*
2988 * Since we can unwind eb's we want to do a real search every
2989 * time.
2990 */
2991 prev_cmp = -1;
2992 ret = key_search(b, key, level, &prev_cmp, &slot);
2993
2994 if (level != 0) {
2995 int dec = 0;
2996 if (ret && slot > 0) {
2997 dec = 1;
2998 slot -= 1;
2999 }
3000 p->slots[level] = slot;
3001 unlock_up(p, level, lowest_unlock, 0, NULL);
3002
3003 if (level == lowest_level) {
3004 if (dec)
3005 p->slots[level]++;
3006 goto done;
3007 }
3008
3009 err = read_block_for_search(NULL, root, p, &b, level,
3010 slot, key, time_seq);
3011 if (err == -EAGAIN)
3012 goto again;
3013 if (err) {
3014 ret = err;
3015 goto done;
3016 }
3017
3018 level = btrfs_header_level(b);
3019 err = btrfs_tree_read_lock_atomic(b);
3020 if (!err) {
3021 btrfs_set_path_blocking(p);
3022 btrfs_tree_read_lock(b);
3023 btrfs_clear_path_blocking(p, b,
3024 BTRFS_READ_LOCK);
3025 }
3026 b = tree_mod_log_rewind(root->fs_info, p, b, time_seq);
3027 if (!b) {
3028 ret = -ENOMEM;
3029 goto done;
3030 }
3031 p->locks[level] = BTRFS_READ_LOCK;
3032 p->nodes[level] = b;
3033 } else {
3034 p->slots[level] = slot;
3035 unlock_up(p, level, lowest_unlock, 0, NULL);
3036 goto done;
3037 }
3038 }
3039 ret = 1;
3040 done:
3041 if (!p->leave_spinning)
3042 btrfs_set_path_blocking(p);
3043 if (ret < 0)
3044 btrfs_release_path(p);
3045
3046 return ret;
3047 }
3048
3049 /*
3050 * helper to use instead of search slot if no exact match is needed but
3051 * instead the next or previous item should be returned.
3052 * When find_higher is true, the next higher item is returned, the next lower
3053 * otherwise.
3054 * When return_any and find_higher are both true, and no higher item is found,
3055 * return the next lower instead.
3056 * When return_any is true and find_higher is false, and no lower item is found,
3057 * return the next higher instead.
3058 * It returns 0 if any item is found, 1 if none is found (tree empty), and
3059 * < 0 on error
3060 */
3061 int btrfs_search_slot_for_read(struct btrfs_root *root,
3062 struct btrfs_key *key, struct btrfs_path *p,
3063 int find_higher, int return_any)
3064 {
3065 int ret;
3066 struct extent_buffer *leaf;
3067
3068 again:
3069 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
3070 if (ret <= 0)
3071 return ret;
3072 /*
3073 * a return value of 1 means the path is at the position where the
3074 * item should be inserted. Normally this is the next bigger item,
3075 * but in case the previous item is the last in a leaf, path points
3076 * to the first free slot in the previous leaf, i.e. at an invalid
3077 * item.
3078 */
3079 leaf = p->nodes[0];
3080
3081 if (find_higher) {
3082 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
3083 ret = btrfs_next_leaf(root, p);
3084 if (ret <= 0)
3085 return ret;
3086 if (!return_any)
3087 return 1;
3088 /*
3089 * no higher item found, return the next
3090 * lower instead
3091 */
3092 return_any = 0;
3093 find_higher = 0;
3094 btrfs_release_path(p);
3095 goto again;
3096 }
3097 } else {
3098 if (p->slots[0] == 0) {
3099 ret = btrfs_prev_leaf(root, p);
3100 if (ret < 0)
3101 return ret;
3102 if (!ret) {
3103 leaf = p->nodes[0];
3104 if (p->slots[0] == btrfs_header_nritems(leaf))
3105 p->slots[0]--;
3106 return 0;
3107 }
3108 if (!return_any)
3109 return 1;
3110 /*
3111 * no lower item found, return the next
3112 * higher instead
3113 */
3114 return_any = 0;
3115 find_higher = 1;
3116 btrfs_release_path(p);
3117 goto again;
3118 } else {
3119 --p->slots[0];
3120 }
3121 }
3122 return 0;
3123 }
3124
3125 /*
3126 * adjust the pointers going up the tree, starting at level
3127 * making sure the right key of each node is points to 'key'.
3128 * This is used after shifting pointers to the left, so it stops
3129 * fixing up pointers when a given leaf/node is not in slot 0 of the
3130 * higher levels
3131 *
3132 */
3133 static void fixup_low_keys(struct btrfs_fs_info *fs_info,
3134 struct btrfs_path *path,
3135 struct btrfs_disk_key *key, int level)
3136 {
3137 int i;
3138 struct extent_buffer *t;
3139
3140 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
3141 int tslot = path->slots[i];
3142 if (!path->nodes[i])
3143 break;
3144 t = path->nodes[i];
3145 tree_mod_log_set_node_key(fs_info, t, tslot, 1);
3146 btrfs_set_node_key(t, key, tslot);
3147 btrfs_mark_buffer_dirty(path->nodes[i]);
3148 if (tslot != 0)
3149 break;
3150 }
3151 }
3152
3153 /*
3154 * update item key.
3155 *
3156 * This function isn't completely safe. It's the caller's responsibility
3157 * that the new key won't break the order
3158 */
3159 void btrfs_set_item_key_safe(struct btrfs_fs_info *fs_info,
3160 struct btrfs_path *path,
3161 struct btrfs_key *new_key)
3162 {
3163 struct btrfs_disk_key disk_key;
3164 struct extent_buffer *eb;
3165 int slot;
3166
3167 eb = path->nodes[0];
3168 slot = path->slots[0];
3169 if (slot > 0) {
3170 btrfs_item_key(eb, &disk_key, slot - 1);
3171 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
3172 }
3173 if (slot < btrfs_header_nritems(eb) - 1) {
3174 btrfs_item_key(eb, &disk_key, slot + 1);
3175 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
3176 }
3177
3178 btrfs_cpu_key_to_disk(&disk_key, new_key);
3179 btrfs_set_item_key(eb, &disk_key, slot);
3180 btrfs_mark_buffer_dirty(eb);
3181 if (slot == 0)
3182 fixup_low_keys(fs_info, path, &disk_key, 1);
3183 }
3184
3185 /*
3186 * try to push data from one node into the next node left in the
3187 * tree.
3188 *
3189 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
3190 * error, and > 0 if there was no room in the left hand block.
3191 */
3192 static int push_node_left(struct btrfs_trans_handle *trans,
3193 struct btrfs_root *root, struct extent_buffer *dst,
3194 struct extent_buffer *src, int empty)
3195 {
3196 int push_items = 0;
3197 int src_nritems;
3198 int dst_nritems;
3199 int ret = 0;
3200
3201 src_nritems = btrfs_header_nritems(src);
3202 dst_nritems = btrfs_header_nritems(dst);
3203 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3204 WARN_ON(btrfs_header_generation(src) != trans->transid);
3205 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3206
3207 if (!empty && src_nritems <= 8)
3208 return 1;
3209
3210 if (push_items <= 0)
3211 return 1;
3212
3213 if (empty) {
3214 push_items = min(src_nritems, push_items);
3215 if (push_items < src_nritems) {
3216 /* leave at least 8 pointers in the node if
3217 * we aren't going to empty it
3218 */
3219 if (src_nritems - push_items < 8) {
3220 if (push_items <= 8)
3221 return 1;
3222 push_items -= 8;
3223 }
3224 }
3225 } else
3226 push_items = min(src_nritems - 8, push_items);
3227
3228 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3229 push_items);
3230 if (ret) {
3231 btrfs_abort_transaction(trans, root, ret);
3232 return ret;
3233 }
3234 copy_extent_buffer(dst, src,
3235 btrfs_node_key_ptr_offset(dst_nritems),
3236 btrfs_node_key_ptr_offset(0),
3237 push_items * sizeof(struct btrfs_key_ptr));
3238
3239 if (push_items < src_nritems) {
3240 /*
3241 * don't call tree_mod_log_eb_move here, key removal was already
3242 * fully logged by tree_mod_log_eb_copy above.
3243 */
3244 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3245 btrfs_node_key_ptr_offset(push_items),
3246 (src_nritems - push_items) *
3247 sizeof(struct btrfs_key_ptr));
3248 }
3249 btrfs_set_header_nritems(src, src_nritems - push_items);
3250 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3251 btrfs_mark_buffer_dirty(src);
3252 btrfs_mark_buffer_dirty(dst);
3253
3254 return ret;
3255 }
3256
3257 /*
3258 * try to push data from one node into the next node right in the
3259 * tree.
3260 *
3261 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3262 * error, and > 0 if there was no room in the right hand block.
3263 *
3264 * this will only push up to 1/2 the contents of the left node over
3265 */
3266 static int balance_node_right(struct btrfs_trans_handle *trans,
3267 struct btrfs_root *root,
3268 struct extent_buffer *dst,
3269 struct extent_buffer *src)
3270 {
3271 int push_items = 0;
3272 int max_push;
3273 int src_nritems;
3274 int dst_nritems;
3275 int ret = 0;
3276
3277 WARN_ON(btrfs_header_generation(src) != trans->transid);
3278 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3279
3280 src_nritems = btrfs_header_nritems(src);
3281 dst_nritems = btrfs_header_nritems(dst);
3282 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3283 if (push_items <= 0)
3284 return 1;
3285
3286 if (src_nritems < 4)
3287 return 1;
3288
3289 max_push = src_nritems / 2 + 1;
3290 /* don't try to empty the node */
3291 if (max_push >= src_nritems)
3292 return 1;
3293
3294 if (max_push < push_items)
3295 push_items = max_push;
3296
3297 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3298 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3299 btrfs_node_key_ptr_offset(0),
3300 (dst_nritems) *
3301 sizeof(struct btrfs_key_ptr));
3302
3303 ret = tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3304 src_nritems - push_items, push_items);
3305 if (ret) {
3306 btrfs_abort_transaction(trans, root, ret);
3307 return ret;
3308 }
3309 copy_extent_buffer(dst, src,
3310 btrfs_node_key_ptr_offset(0),
3311 btrfs_node_key_ptr_offset(src_nritems - push_items),
3312 push_items * sizeof(struct btrfs_key_ptr));
3313
3314 btrfs_set_header_nritems(src, src_nritems - push_items);
3315 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3316
3317 btrfs_mark_buffer_dirty(src);
3318 btrfs_mark_buffer_dirty(dst);
3319
3320 return ret;
3321 }
3322
3323 /*
3324 * helper function to insert a new root level in the tree.
3325 * A new node is allocated, and a single item is inserted to
3326 * point to the existing root
3327 *
3328 * returns zero on success or < 0 on failure.
3329 */
3330 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3331 struct btrfs_root *root,
3332 struct btrfs_path *path, int level)
3333 {
3334 u64 lower_gen;
3335 struct extent_buffer *lower;
3336 struct extent_buffer *c;
3337 struct extent_buffer *old;
3338 struct btrfs_disk_key lower_key;
3339
3340 BUG_ON(path->nodes[level]);
3341 BUG_ON(path->nodes[level-1] != root->node);
3342
3343 lower = path->nodes[level-1];
3344 if (level == 1)
3345 btrfs_item_key(lower, &lower_key, 0);
3346 else
3347 btrfs_node_key(lower, &lower_key, 0);
3348
3349 c = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3350 &lower_key, level, root->node->start, 0);
3351 if (IS_ERR(c))
3352 return PTR_ERR(c);
3353
3354 root_add_used(root, root->nodesize);
3355
3356 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3357 btrfs_set_header_nritems(c, 1);
3358 btrfs_set_header_level(c, level);
3359 btrfs_set_header_bytenr(c, c->start);
3360 btrfs_set_header_generation(c, trans->transid);
3361 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3362 btrfs_set_header_owner(c, root->root_key.objectid);
3363
3364 write_extent_buffer(c, root->fs_info->fsid, btrfs_header_fsid(),
3365 BTRFS_FSID_SIZE);
3366
3367 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3368 btrfs_header_chunk_tree_uuid(c), BTRFS_UUID_SIZE);
3369
3370 btrfs_set_node_key(c, &lower_key, 0);
3371 btrfs_set_node_blockptr(c, 0, lower->start);
3372 lower_gen = btrfs_header_generation(lower);
3373 WARN_ON(lower_gen != trans->transid);
3374
3375 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3376
3377 btrfs_mark_buffer_dirty(c);
3378
3379 old = root->node;
3380 tree_mod_log_set_root_pointer(root, c, 0);
3381 rcu_assign_pointer(root->node, c);
3382
3383 /* the super has an extra ref to root->node */
3384 free_extent_buffer(old);
3385
3386 add_root_to_dirty_list(root);
3387 extent_buffer_get(c);
3388 path->nodes[level] = c;
3389 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
3390 path->slots[level] = 0;
3391 return 0;
3392 }
3393
3394 /*
3395 * worker function to insert a single pointer in a node.
3396 * the node should have enough room for the pointer already
3397 *
3398 * slot and level indicate where you want the key to go, and
3399 * blocknr is the block the key points to.
3400 */
3401 static void insert_ptr(struct btrfs_trans_handle *trans,
3402 struct btrfs_root *root, struct btrfs_path *path,
3403 struct btrfs_disk_key *key, u64 bytenr,
3404 int slot, int level)
3405 {
3406 struct extent_buffer *lower;
3407 int nritems;
3408 int ret;
3409
3410 BUG_ON(!path->nodes[level]);
3411 btrfs_assert_tree_locked(path->nodes[level]);
3412 lower = path->nodes[level];
3413 nritems = btrfs_header_nritems(lower);
3414 BUG_ON(slot > nritems);
3415 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3416 if (slot != nritems) {
3417 if (level)
3418 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3419 slot, nritems - slot);
3420 memmove_extent_buffer(lower,
3421 btrfs_node_key_ptr_offset(slot + 1),
3422 btrfs_node_key_ptr_offset(slot),
3423 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3424 }
3425 if (level) {
3426 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3427 MOD_LOG_KEY_ADD, GFP_NOFS);
3428 BUG_ON(ret < 0);
3429 }
3430 btrfs_set_node_key(lower, key, slot);
3431 btrfs_set_node_blockptr(lower, slot, bytenr);
3432 WARN_ON(trans->transid == 0);
3433 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3434 btrfs_set_header_nritems(lower, nritems + 1);
3435 btrfs_mark_buffer_dirty(lower);
3436 }
3437
3438 /*
3439 * split the node at the specified level in path in two.
3440 * The path is corrected to point to the appropriate node after the split
3441 *
3442 * Before splitting this tries to make some room in the node by pushing
3443 * left and right, if either one works, it returns right away.
3444 *
3445 * returns 0 on success and < 0 on failure
3446 */
3447 static noinline int split_node(struct btrfs_trans_handle *trans,
3448 struct btrfs_root *root,
3449 struct btrfs_path *path, int level)
3450 {
3451 struct extent_buffer *c;
3452 struct extent_buffer *split;
3453 struct btrfs_disk_key disk_key;
3454 int mid;
3455 int ret;
3456 u32 c_nritems;
3457
3458 c = path->nodes[level];
3459 WARN_ON(btrfs_header_generation(c) != trans->transid);
3460 if (c == root->node) {
3461 /*
3462 * trying to split the root, lets make a new one
3463 *
3464 * tree mod log: We don't log_removal old root in
3465 * insert_new_root, because that root buffer will be kept as a
3466 * normal node. We are going to log removal of half of the
3467 * elements below with tree_mod_log_eb_copy. We're holding a
3468 * tree lock on the buffer, which is why we cannot race with
3469 * other tree_mod_log users.
3470 */
3471 ret = insert_new_root(trans, root, path, level + 1);
3472 if (ret)
3473 return ret;
3474 } else {
3475 ret = push_nodes_for_insert(trans, root, path, level);
3476 c = path->nodes[level];
3477 if (!ret && btrfs_header_nritems(c) <
3478 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3479 return 0;
3480 if (ret < 0)
3481 return ret;
3482 }
3483
3484 c_nritems = btrfs_header_nritems(c);
3485 mid = (c_nritems + 1) / 2;
3486 btrfs_node_key(c, &disk_key, mid);
3487
3488 split = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
3489 &disk_key, level, c->start, 0);
3490 if (IS_ERR(split))
3491 return PTR_ERR(split);
3492
3493 root_add_used(root, root->nodesize);
3494
3495 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3496 btrfs_set_header_level(split, btrfs_header_level(c));
3497 btrfs_set_header_bytenr(split, split->start);
3498 btrfs_set_header_generation(split, trans->transid);
3499 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3500 btrfs_set_header_owner(split, root->root_key.objectid);
3501 write_extent_buffer(split, root->fs_info->fsid,
3502 btrfs_header_fsid(), BTRFS_FSID_SIZE);
3503 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3504 btrfs_header_chunk_tree_uuid(split),
3505 BTRFS_UUID_SIZE);
3506
3507 ret = tree_mod_log_eb_copy(root->fs_info, split, c, 0,
3508 mid, c_nritems - mid);
3509 if (ret) {
3510 btrfs_abort_transaction(trans, root, ret);
3511 return ret;
3512 }
3513 copy_extent_buffer(split, c,
3514 btrfs_node_key_ptr_offset(0),
3515 btrfs_node_key_ptr_offset(mid),
3516 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3517 btrfs_set_header_nritems(split, c_nritems - mid);
3518 btrfs_set_header_nritems(c, mid);
3519 ret = 0;
3520
3521 btrfs_mark_buffer_dirty(c);
3522 btrfs_mark_buffer_dirty(split);
3523
3524 insert_ptr(trans, root, path, &disk_key, split->start,
3525 path->slots[level + 1] + 1, level + 1);
3526
3527 if (path->slots[level] >= mid) {
3528 path->slots[level] -= mid;
3529 btrfs_tree_unlock(c);
3530 free_extent_buffer(c);
3531 path->nodes[level] = split;
3532 path->slots[level + 1] += 1;
3533 } else {
3534 btrfs_tree_unlock(split);
3535 free_extent_buffer(split);
3536 }
3537 return ret;
3538 }
3539
3540 /*
3541 * how many bytes are required to store the items in a leaf. start
3542 * and nr indicate which items in the leaf to check. This totals up the
3543 * space used both by the item structs and the item data
3544 */
3545 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3546 {
3547 struct btrfs_item *start_item;
3548 struct btrfs_item *end_item;
3549 struct btrfs_map_token token;
3550 int data_len;
3551 int nritems = btrfs_header_nritems(l);
3552 int end = min(nritems, start + nr) - 1;
3553
3554 if (!nr)
3555 return 0;
3556 btrfs_init_map_token(&token);
3557 start_item = btrfs_item_nr(start);
3558 end_item = btrfs_item_nr(end);
3559 data_len = btrfs_token_item_offset(l, start_item, &token) +
3560 btrfs_token_item_size(l, start_item, &token);
3561 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3562 data_len += sizeof(struct btrfs_item) * nr;
3563 WARN_ON(data_len < 0);
3564 return data_len;
3565 }
3566
3567 /*
3568 * The space between the end of the leaf items and
3569 * the start of the leaf data. IOW, how much room
3570 * the leaf has left for both items and data
3571 */
3572 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3573 struct extent_buffer *leaf)
3574 {
3575 int nritems = btrfs_header_nritems(leaf);
3576 int ret;
3577 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3578 if (ret < 0) {
3579 btrfs_crit(root->fs_info,
3580 "leaf free space ret %d, leaf data size %lu, used %d nritems %d",
3581 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3582 leaf_space_used(leaf, 0, nritems), nritems);
3583 }
3584 return ret;
3585 }
3586
3587 /*
3588 * min slot controls the lowest index we're willing to push to the
3589 * right. We'll push up to and including min_slot, but no lower
3590 */
3591 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3592 struct btrfs_root *root,
3593 struct btrfs_path *path,
3594 int data_size, int empty,
3595 struct extent_buffer *right,
3596 int free_space, u32 left_nritems,
3597 u32 min_slot)
3598 {
3599 struct extent_buffer *left = path->nodes[0];
3600 struct extent_buffer *upper = path->nodes[1];
3601 struct btrfs_map_token token;
3602 struct btrfs_disk_key disk_key;
3603 int slot;
3604 u32 i;
3605 int push_space = 0;
3606 int push_items = 0;
3607 struct btrfs_item *item;
3608 u32 nr;
3609 u32 right_nritems;
3610 u32 data_end;
3611 u32 this_item_size;
3612
3613 btrfs_init_map_token(&token);
3614
3615 if (empty)
3616 nr = 0;
3617 else
3618 nr = max_t(u32, 1, min_slot);
3619
3620 if (path->slots[0] >= left_nritems)
3621 push_space += data_size;
3622
3623 slot = path->slots[1];
3624 i = left_nritems - 1;
3625 while (i >= nr) {
3626 item = btrfs_item_nr(i);
3627
3628 if (!empty && push_items > 0) {
3629 if (path->slots[0] > i)
3630 break;
3631 if (path->slots[0] == i) {
3632 int space = btrfs_leaf_free_space(root, left);
3633 if (space + push_space * 2 > free_space)
3634 break;
3635 }
3636 }
3637
3638 if (path->slots[0] == i)
3639 push_space += data_size;
3640
3641 this_item_size = btrfs_item_size(left, item);
3642 if (this_item_size + sizeof(*item) + push_space > free_space)
3643 break;
3644
3645 push_items++;
3646 push_space += this_item_size + sizeof(*item);
3647 if (i == 0)
3648 break;
3649 i--;
3650 }
3651
3652 if (push_items == 0)
3653 goto out_unlock;
3654
3655 WARN_ON(!empty && push_items == left_nritems);
3656
3657 /* push left to right */
3658 right_nritems = btrfs_header_nritems(right);
3659
3660 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3661 push_space -= leaf_data_end(root, left);
3662
3663 /* make room in the right data area */
3664 data_end = leaf_data_end(root, right);
3665 memmove_extent_buffer(right,
3666 btrfs_leaf_data(right) + data_end - push_space,
3667 btrfs_leaf_data(right) + data_end,
3668 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3669
3670 /* copy from the left data area */
3671 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3672 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3673 btrfs_leaf_data(left) + leaf_data_end(root, left),
3674 push_space);
3675
3676 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3677 btrfs_item_nr_offset(0),
3678 right_nritems * sizeof(struct btrfs_item));
3679
3680 /* copy the items from left to right */
3681 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3682 btrfs_item_nr_offset(left_nritems - push_items),
3683 push_items * sizeof(struct btrfs_item));
3684
3685 /* update the item pointers */
3686 right_nritems += push_items;
3687 btrfs_set_header_nritems(right, right_nritems);
3688 push_space = BTRFS_LEAF_DATA_SIZE(root);
3689 for (i = 0; i < right_nritems; i++) {
3690 item = btrfs_item_nr(i);
3691 push_space -= btrfs_token_item_size(right, item, &token);
3692 btrfs_set_token_item_offset(right, item, push_space, &token);
3693 }
3694
3695 left_nritems -= push_items;
3696 btrfs_set_header_nritems(left, left_nritems);
3697
3698 if (left_nritems)
3699 btrfs_mark_buffer_dirty(left);
3700 else
3701 clean_tree_block(trans, root->fs_info, left);
3702
3703 btrfs_mark_buffer_dirty(right);
3704
3705 btrfs_item_key(right, &disk_key, 0);
3706 btrfs_set_node_key(upper, &disk_key, slot + 1);
3707 btrfs_mark_buffer_dirty(upper);
3708
3709 /* then fixup the leaf pointer in the path */
3710 if (path->slots[0] >= left_nritems) {
3711 path->slots[0] -= left_nritems;
3712 if (btrfs_header_nritems(path->nodes[0]) == 0)
3713 clean_tree_block(trans, root->fs_info, path->nodes[0]);
3714 btrfs_tree_unlock(path->nodes[0]);
3715 free_extent_buffer(path->nodes[0]);
3716 path->nodes[0] = right;
3717 path->slots[1] += 1;
3718 } else {
3719 btrfs_tree_unlock(right);
3720 free_extent_buffer(right);
3721 }
3722 return 0;
3723
3724 out_unlock:
3725 btrfs_tree_unlock(right);
3726 free_extent_buffer(right);
3727 return 1;
3728 }
3729
3730 /*
3731 * push some data in the path leaf to the right, trying to free up at
3732 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3733 *
3734 * returns 1 if the push failed because the other node didn't have enough
3735 * room, 0 if everything worked out and < 0 if there were major errors.
3736 *
3737 * this will push starting from min_slot to the end of the leaf. It won't
3738 * push any slot lower than min_slot
3739 */
3740 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3741 *root, struct btrfs_path *path,
3742 int min_data_size, int data_size,
3743 int empty, u32 min_slot)
3744 {
3745 struct extent_buffer *left = path->nodes[0];
3746 struct extent_buffer *right;
3747 struct extent_buffer *upper;
3748 int slot;
3749 int free_space;
3750 u32 left_nritems;
3751 int ret;
3752
3753 if (!path->nodes[1])
3754 return 1;
3755
3756 slot = path->slots[1];
3757 upper = path->nodes[1];
3758 if (slot >= btrfs_header_nritems(upper) - 1)
3759 return 1;
3760
3761 btrfs_assert_tree_locked(path->nodes[1]);
3762
3763 right = read_node_slot(root, upper, slot + 1);
3764 if (right == NULL)
3765 return 1;
3766
3767 btrfs_tree_lock(right);
3768 btrfs_set_lock_blocking(right);
3769
3770 free_space = btrfs_leaf_free_space(root, right);
3771 if (free_space < data_size)
3772 goto out_unlock;
3773
3774 /* cow and double check */
3775 ret = btrfs_cow_block(trans, root, right, upper,
3776 slot + 1, &right);
3777 if (ret)
3778 goto out_unlock;
3779
3780 free_space = btrfs_leaf_free_space(root, right);
3781 if (free_space < data_size)
3782 goto out_unlock;
3783
3784 left_nritems = btrfs_header_nritems(left);
3785 if (left_nritems == 0)
3786 goto out_unlock;
3787
3788 if (path->slots[0] == left_nritems && !empty) {
3789 /* Key greater than all keys in the leaf, right neighbor has
3790 * enough room for it and we're not emptying our leaf to delete
3791 * it, therefore use right neighbor to insert the new item and
3792 * no need to touch/dirty our left leaft. */
3793 btrfs_tree_unlock(left);
3794 free_extent_buffer(left);
3795 path->nodes[0] = right;
3796 path->slots[0] = 0;
3797 path->slots[1]++;
3798 return 0;
3799 }
3800
3801 return __push_leaf_right(trans, root, path, min_data_size, empty,
3802 right, free_space, left_nritems, min_slot);
3803 out_unlock:
3804 btrfs_tree_unlock(right);
3805 free_extent_buffer(right);
3806 return 1;
3807 }
3808
3809 /*
3810 * push some data in the path leaf to the left, trying to free up at
3811 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3812 *
3813 * max_slot can put a limit on how far into the leaf we'll push items. The
3814 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3815 * items
3816 */
3817 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3818 struct btrfs_root *root,
3819 struct btrfs_path *path, int data_size,
3820 int empty, struct extent_buffer *left,
3821 int free_space, u32 right_nritems,
3822 u32 max_slot)
3823 {
3824 struct btrfs_disk_key disk_key;
3825 struct extent_buffer *right = path->nodes[0];
3826 int i;
3827 int push_space = 0;
3828 int push_items = 0;
3829 struct btrfs_item *item;
3830 u32 old_left_nritems;
3831 u32 nr;
3832 int ret = 0;
3833 u32 this_item_size;
3834 u32 old_left_item_size;
3835 struct btrfs_map_token token;
3836
3837 btrfs_init_map_token(&token);
3838
3839 if (empty)
3840 nr = min(right_nritems, max_slot);
3841 else
3842 nr = min(right_nritems - 1, max_slot);
3843
3844 for (i = 0; i < nr; i++) {
3845 item = btrfs_item_nr(i);
3846
3847 if (!empty && push_items > 0) {
3848 if (path->slots[0] < i)
3849 break;
3850 if (path->slots[0] == i) {
3851 int space = btrfs_leaf_free_space(root, right);
3852 if (space + push_space * 2 > free_space)
3853 break;
3854 }
3855 }
3856
3857 if (path->slots[0] == i)
3858 push_space += data_size;
3859
3860 this_item_size = btrfs_item_size(right, item);
3861 if (this_item_size + sizeof(*item) + push_space > free_space)
3862 break;
3863
3864 push_items++;
3865 push_space += this_item_size + sizeof(*item);
3866 }
3867
3868 if (push_items == 0) {
3869 ret = 1;
3870 goto out;
3871 }
3872 WARN_ON(!empty && push_items == btrfs_header_nritems(right));
3873
3874 /* push data from right to left */
3875 copy_extent_buffer(left, right,
3876 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3877 btrfs_item_nr_offset(0),
3878 push_items * sizeof(struct btrfs_item));
3879
3880 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3881 btrfs_item_offset_nr(right, push_items - 1);
3882
3883 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3884 leaf_data_end(root, left) - push_space,
3885 btrfs_leaf_data(right) +
3886 btrfs_item_offset_nr(right, push_items - 1),
3887 push_space);
3888 old_left_nritems = btrfs_header_nritems(left);
3889 BUG_ON(old_left_nritems <= 0);
3890
3891 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3892 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3893 u32 ioff;
3894
3895 item = btrfs_item_nr(i);
3896
3897 ioff = btrfs_token_item_offset(left, item, &token);
3898 btrfs_set_token_item_offset(left, item,
3899 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3900 &token);
3901 }
3902 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3903
3904 /* fixup right node */
3905 if (push_items > right_nritems)
3906 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3907 right_nritems);
3908
3909 if (push_items < right_nritems) {
3910 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3911 leaf_data_end(root, right);
3912 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3913 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3914 btrfs_leaf_data(right) +
3915 leaf_data_end(root, right), push_space);
3916
3917 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3918 btrfs_item_nr_offset(push_items),
3919 (btrfs_header_nritems(right) - push_items) *
3920 sizeof(struct btrfs_item));
3921 }
3922 right_nritems -= push_items;
3923 btrfs_set_header_nritems(right, right_nritems);
3924 push_space = BTRFS_LEAF_DATA_SIZE(root);
3925 for (i = 0; i < right_nritems; i++) {
3926 item = btrfs_item_nr(i);
3927
3928 push_space = push_space - btrfs_token_item_size(right,
3929 item, &token);
3930 btrfs_set_token_item_offset(right, item, push_space, &token);
3931 }
3932
3933 btrfs_mark_buffer_dirty(left);
3934 if (right_nritems)
3935 btrfs_mark_buffer_dirty(right);
3936 else
3937 clean_tree_block(trans, root->fs_info, right);
3938
3939 btrfs_item_key(right, &disk_key, 0);
3940 fixup_low_keys(root->fs_info, path, &disk_key, 1);
3941
3942 /* then fixup the leaf pointer in the path */
3943 if (path->slots[0] < push_items) {
3944 path->slots[0] += old_left_nritems;
3945 btrfs_tree_unlock(path->nodes[0]);
3946 free_extent_buffer(path->nodes[0]);
3947 path->nodes[0] = left;
3948 path->slots[1] -= 1;
3949 } else {
3950 btrfs_tree_unlock(left);
3951 free_extent_buffer(left);
3952 path->slots[0] -= push_items;
3953 }
3954 BUG_ON(path->slots[0] < 0);
3955 return ret;
3956 out:
3957 btrfs_tree_unlock(left);
3958 free_extent_buffer(left);
3959 return ret;
3960 }
3961
3962 /*
3963 * push some data in the path leaf to the left, trying to free up at
3964 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3965 *
3966 * max_slot can put a limit on how far into the leaf we'll push items. The
3967 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3968 * items
3969 */
3970 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3971 *root, struct btrfs_path *path, int min_data_size,
3972 int data_size, int empty, u32 max_slot)
3973 {
3974 struct extent_buffer *right = path->nodes[0];
3975 struct extent_buffer *left;
3976 int slot;
3977 int free_space;
3978 u32 right_nritems;
3979 int ret = 0;
3980
3981 slot = path->slots[1];
3982 if (slot == 0)
3983 return 1;
3984 if (!path->nodes[1])
3985 return 1;
3986
3987 right_nritems = btrfs_header_nritems(right);
3988 if (right_nritems == 0)
3989 return 1;
3990
3991 btrfs_assert_tree_locked(path->nodes[1]);
3992
3993 left = read_node_slot(root, path->nodes[1], slot - 1);
3994 if (left == NULL)
3995 return 1;
3996
3997 btrfs_tree_lock(left);
3998 btrfs_set_lock_blocking(left);
3999
4000 free_space = btrfs_leaf_free_space(root, left);
4001 if (free_space < data_size) {
4002 ret = 1;
4003 goto out;
4004 }
4005
4006 /* cow and double check */
4007 ret = btrfs_cow_block(trans, root, left,
4008 path->nodes[1], slot - 1, &left);
4009 if (ret) {
4010 /* we hit -ENOSPC, but it isn't fatal here */
4011 if (ret == -ENOSPC)
4012 ret = 1;
4013 goto out;
4014 }
4015
4016 free_space = btrfs_leaf_free_space(root, left);
4017 if (free_space < data_size) {
4018 ret = 1;
4019 goto out;
4020 }
4021
4022 return __push_leaf_left(trans, root, path, min_data_size,
4023 empty, left, free_space, right_nritems,
4024 max_slot);
4025 out:
4026 btrfs_tree_unlock(left);
4027 free_extent_buffer(left);
4028 return ret;
4029 }
4030
4031 /*
4032 * split the path's leaf in two, making sure there is at least data_size
4033 * available for the resulting leaf level of the path.
4034 */
4035 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
4036 struct btrfs_root *root,
4037 struct btrfs_path *path,
4038 struct extent_buffer *l,
4039 struct extent_buffer *right,
4040 int slot, int mid, int nritems)
4041 {
4042 int data_copy_size;
4043 int rt_data_off;
4044 int i;
4045 struct btrfs_disk_key disk_key;
4046 struct btrfs_map_token token;
4047
4048 btrfs_init_map_token(&token);
4049
4050 nritems = nritems - mid;
4051 btrfs_set_header_nritems(right, nritems);
4052 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
4053
4054 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
4055 btrfs_item_nr_offset(mid),
4056 nritems * sizeof(struct btrfs_item));
4057
4058 copy_extent_buffer(right, l,
4059 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
4060 data_copy_size, btrfs_leaf_data(l) +
4061 leaf_data_end(root, l), data_copy_size);
4062
4063 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
4064 btrfs_item_end_nr(l, mid);
4065
4066 for (i = 0; i < nritems; i++) {
4067 struct btrfs_item *item = btrfs_item_nr(i);
4068 u32 ioff;
4069
4070 ioff = btrfs_token_item_offset(right, item, &token);
4071 btrfs_set_token_item_offset(right, item,
4072 ioff + rt_data_off, &token);
4073 }
4074
4075 btrfs_set_header_nritems(l, mid);
4076 btrfs_item_key(right, &disk_key, 0);
4077 insert_ptr(trans, root, path, &disk_key, right->start,
4078 path->slots[1] + 1, 1);
4079
4080 btrfs_mark_buffer_dirty(right);
4081 btrfs_mark_buffer_dirty(l);
4082 BUG_ON(path->slots[0] != slot);
4083
4084 if (mid <= slot) {
4085 btrfs_tree_unlock(path->nodes[0]);
4086 free_extent_buffer(path->nodes[0]);
4087 path->nodes[0] = right;
4088 path->slots[0] -= mid;
4089 path->slots[1] += 1;
4090 } else {
4091 btrfs_tree_unlock(right);
4092 free_extent_buffer(right);
4093 }
4094
4095 BUG_ON(path->slots[0] < 0);
4096 }
4097
4098 /*
4099 * double splits happen when we need to insert a big item in the middle
4100 * of a leaf. A double split can leave us with 3 mostly empty leaves:
4101 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
4102 * A B C
4103 *
4104 * We avoid this by trying to push the items on either side of our target
4105 * into the adjacent leaves. If all goes well we can avoid the double split
4106 * completely.
4107 */
4108 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
4109 struct btrfs_root *root,
4110 struct btrfs_path *path,
4111 int data_size)
4112 {
4113 int ret;
4114 int progress = 0;
4115 int slot;
4116 u32 nritems;
4117 int space_needed = data_size;
4118
4119 slot = path->slots[0];
4120 if (slot < btrfs_header_nritems(path->nodes[0]))
4121 space_needed -= btrfs_leaf_free_space(root, path->nodes[0]);
4122
4123 /*
4124 * try to push all the items after our slot into the
4125 * right leaf
4126 */
4127 ret = push_leaf_right(trans, root, path, 1, space_needed, 0, slot);
4128 if (ret < 0)
4129 return ret;
4130
4131 if (ret == 0)
4132 progress++;
4133
4134 nritems = btrfs_header_nritems(path->nodes[0]);
4135 /*
4136 * our goal is to get our slot at the start or end of a leaf. If
4137 * we've done so we're done
4138 */
4139 if (path->slots[0] == 0 || path->slots[0] == nritems)
4140 return 0;
4141
4142 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4143 return 0;
4144
4145 /* try to push all the items before our slot into the next leaf */
4146 slot = path->slots[0];
4147 ret = push_leaf_left(trans, root, path, 1, space_needed, 0, slot);
4148 if (ret < 0)
4149 return ret;
4150
4151 if (ret == 0)
4152 progress++;
4153
4154 if (progress)
4155 return 0;
4156 return 1;
4157 }
4158
4159 /*
4160 * split the path's leaf in two, making sure there is at least data_size
4161 * available for the resulting leaf level of the path.
4162 *
4163 * returns 0 if all went well and < 0 on failure.
4164 */
4165 static noinline int split_leaf(struct btrfs_trans_handle *trans,
4166 struct btrfs_root *root,
4167 struct btrfs_key *ins_key,
4168 struct btrfs_path *path, int data_size,
4169 int extend)
4170 {
4171 struct btrfs_disk_key disk_key;
4172 struct extent_buffer *l;
4173 u32 nritems;
4174 int mid;
4175 int slot;
4176 struct extent_buffer *right;
4177 struct btrfs_fs_info *fs_info = root->fs_info;
4178 int ret = 0;
4179 int wret;
4180 int split;
4181 int num_doubles = 0;
4182 int tried_avoid_double = 0;
4183
4184 l = path->nodes[0];
4185 slot = path->slots[0];
4186 if (extend && data_size + btrfs_item_size_nr(l, slot) +
4187 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
4188 return -EOVERFLOW;
4189
4190 /* first try to make some room by pushing left and right */
4191 if (data_size && path->nodes[1]) {
4192 int space_needed = data_size;
4193
4194 if (slot < btrfs_header_nritems(l))
4195 space_needed -= btrfs_leaf_free_space(root, l);
4196
4197 wret = push_leaf_right(trans, root, path, space_needed,
4198 space_needed, 0, 0);
4199 if (wret < 0)
4200 return wret;
4201 if (wret) {
4202 wret = push_leaf_left(trans, root, path, space_needed,
4203 space_needed, 0, (u32)-1);
4204 if (wret < 0)
4205 return wret;
4206 }
4207 l = path->nodes[0];
4208
4209 /* did the pushes work? */
4210 if (btrfs_leaf_free_space(root, l) >= data_size)
4211 return 0;
4212 }
4213
4214 if (!path->nodes[1]) {
4215 ret = insert_new_root(trans, root, path, 1);
4216 if (ret)
4217 return ret;
4218 }
4219 again:
4220 split = 1;
4221 l = path->nodes[0];
4222 slot = path->slots[0];
4223 nritems = btrfs_header_nritems(l);
4224 mid = (nritems + 1) / 2;
4225
4226 if (mid <= slot) {
4227 if (nritems == 1 ||
4228 leaf_space_used(l, mid, nritems - mid) + data_size >
4229 BTRFS_LEAF_DATA_SIZE(root)) {
4230 if (slot >= nritems) {
4231 split = 0;
4232 } else {
4233 mid = slot;
4234 if (mid != nritems &&
4235 leaf_space_used(l, mid, nritems - mid) +
4236 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4237 if (data_size && !tried_avoid_double)
4238 goto push_for_double;
4239 split = 2;
4240 }
4241 }
4242 }
4243 } else {
4244 if (leaf_space_used(l, 0, mid) + data_size >
4245 BTRFS_LEAF_DATA_SIZE(root)) {
4246 if (!extend && data_size && slot == 0) {
4247 split = 0;
4248 } else if ((extend || !data_size) && slot == 0) {
4249 mid = 1;
4250 } else {
4251 mid = slot;
4252 if (mid != nritems &&
4253 leaf_space_used(l, mid, nritems - mid) +
4254 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4255 if (data_size && !tried_avoid_double)
4256 goto push_for_double;
4257 split = 2;
4258 }
4259 }
4260 }
4261 }
4262
4263 if (split == 0)
4264 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4265 else
4266 btrfs_item_key(l, &disk_key, mid);
4267
4268 right = btrfs_alloc_tree_block(trans, root, 0, root->root_key.objectid,
4269 &disk_key, 0, l->start, 0);
4270 if (IS_ERR(right))
4271 return PTR_ERR(right);
4272
4273 root_add_used(root, root->nodesize);
4274
4275 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4276 btrfs_set_header_bytenr(right, right->start);
4277 btrfs_set_header_generation(right, trans->transid);
4278 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4279 btrfs_set_header_owner(right, root->root_key.objectid);
4280 btrfs_set_header_level(right, 0);
4281 write_extent_buffer(right, fs_info->fsid,
4282 btrfs_header_fsid(), BTRFS_FSID_SIZE);
4283
4284 write_extent_buffer(right, fs_info->chunk_tree_uuid,
4285 btrfs_header_chunk_tree_uuid(right),
4286 BTRFS_UUID_SIZE);
4287
4288 if (split == 0) {
4289 if (mid <= slot) {
4290 btrfs_set_header_nritems(right, 0);
4291 insert_ptr(trans, root, path, &disk_key, right->start,
4292 path->slots[1] + 1, 1);
4293 btrfs_tree_unlock(path->nodes[0]);
4294 free_extent_buffer(path->nodes[0]);
4295 path->nodes[0] = right;
4296 path->slots[0] = 0;
4297 path->slots[1] += 1;
4298 } else {
4299 btrfs_set_header_nritems(right, 0);
4300 insert_ptr(trans, root, path, &disk_key, right->start,
4301 path->slots[1], 1);
4302 btrfs_tree_unlock(path->nodes[0]);
4303 free_extent_buffer(path->nodes[0]);
4304 path->nodes[0] = right;
4305 path->slots[0] = 0;
4306 if (path->slots[1] == 0)
4307 fixup_low_keys(fs_info, path, &disk_key, 1);
4308 }
4309 btrfs_mark_buffer_dirty(right);
4310 return ret;
4311 }
4312
4313 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4314
4315 if (split == 2) {
4316 BUG_ON(num_doubles != 0);
4317 num_doubles++;
4318 goto again;
4319 }
4320
4321 return 0;
4322
4323 push_for_double:
4324 push_for_double_split(trans, root, path, data_size);
4325 tried_avoid_double = 1;
4326 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4327 return 0;
4328 goto again;
4329 }
4330
4331 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4332 struct btrfs_root *root,
4333 struct btrfs_path *path, int ins_len)
4334 {
4335 struct btrfs_key key;
4336 struct extent_buffer *leaf;
4337 struct btrfs_file_extent_item *fi;
4338 u64 extent_len = 0;
4339 u32 item_size;
4340 int ret;
4341
4342 leaf = path->nodes[0];
4343 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4344
4345 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4346 key.type != BTRFS_EXTENT_CSUM_KEY);
4347
4348 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4349 return 0;
4350
4351 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4352 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4353 fi = btrfs_item_ptr(leaf, path->slots[0],
4354 struct btrfs_file_extent_item);
4355 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4356 }
4357 btrfs_release_path(path);
4358
4359 path->keep_locks = 1;
4360 path->search_for_split = 1;
4361 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4362 path->search_for_split = 0;
4363 if (ret > 0)
4364 ret = -EAGAIN;
4365 if (ret < 0)
4366 goto err;
4367
4368 ret = -EAGAIN;
4369 leaf = path->nodes[0];
4370 /* if our item isn't there, return now */
4371 if (item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4372 goto err;
4373
4374 /* the leaf has changed, it now has room. return now */
4375 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4376 goto err;
4377
4378 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4379 fi = btrfs_item_ptr(leaf, path->slots[0],
4380 struct btrfs_file_extent_item);
4381 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4382 goto err;
4383 }
4384
4385 btrfs_set_path_blocking(path);
4386 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4387 if (ret)
4388 goto err;
4389
4390 path->keep_locks = 0;
4391 btrfs_unlock_up_safe(path, 1);
4392 return 0;
4393 err:
4394 path->keep_locks = 0;
4395 return ret;
4396 }
4397
4398 static noinline int split_item(struct btrfs_trans_handle *trans,
4399 struct btrfs_root *root,
4400 struct btrfs_path *path,
4401 struct btrfs_key *new_key,
4402 unsigned long split_offset)
4403 {
4404 struct extent_buffer *leaf;
4405 struct btrfs_item *item;
4406 struct btrfs_item *new_item;
4407 int slot;
4408 char *buf;
4409 u32 nritems;
4410 u32 item_size;
4411 u32 orig_offset;
4412 struct btrfs_disk_key disk_key;
4413
4414 leaf = path->nodes[0];
4415 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4416
4417 btrfs_set_path_blocking(path);
4418
4419 item = btrfs_item_nr(path->slots[0]);
4420 orig_offset = btrfs_item_offset(leaf, item);
4421 item_size = btrfs_item_size(leaf, item);
4422
4423 buf = kmalloc(item_size, GFP_NOFS);
4424 if (!buf)
4425 return -ENOMEM;
4426
4427 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4428 path->slots[0]), item_size);
4429
4430 slot = path->slots[0] + 1;
4431 nritems = btrfs_header_nritems(leaf);
4432 if (slot != nritems) {
4433 /* shift the items */
4434 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4435 btrfs_item_nr_offset(slot),
4436 (nritems - slot) * sizeof(struct btrfs_item));
4437 }
4438
4439 btrfs_cpu_key_to_disk(&disk_key, new_key);
4440 btrfs_set_item_key(leaf, &disk_key, slot);
4441
4442 new_item = btrfs_item_nr(slot);
4443
4444 btrfs_set_item_offset(leaf, new_item, orig_offset);
4445 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4446
4447 btrfs_set_item_offset(leaf, item,
4448 orig_offset + item_size - split_offset);
4449 btrfs_set_item_size(leaf, item, split_offset);
4450
4451 btrfs_set_header_nritems(leaf, nritems + 1);
4452
4453 /* write the data for the start of the original item */
4454 write_extent_buffer(leaf, buf,
4455 btrfs_item_ptr_offset(leaf, path->slots[0]),
4456 split_offset);
4457
4458 /* write the data for the new item */
4459 write_extent_buffer(leaf, buf + split_offset,
4460 btrfs_item_ptr_offset(leaf, slot),
4461 item_size - split_offset);
4462 btrfs_mark_buffer_dirty(leaf);
4463
4464 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4465 kfree(buf);
4466 return 0;
4467 }
4468
4469 /*
4470 * This function splits a single item into two items,
4471 * giving 'new_key' to the new item and splitting the
4472 * old one at split_offset (from the start of the item).
4473 *
4474 * The path may be released by this operation. After
4475 * the split, the path is pointing to the old item. The
4476 * new item is going to be in the same node as the old one.
4477 *
4478 * Note, the item being split must be smaller enough to live alone on
4479 * a tree block with room for one extra struct btrfs_item
4480 *
4481 * This allows us to split the item in place, keeping a lock on the
4482 * leaf the entire time.
4483 */
4484 int btrfs_split_item(struct btrfs_trans_handle *trans,
4485 struct btrfs_root *root,
4486 struct btrfs_path *path,
4487 struct btrfs_key *new_key,
4488 unsigned long split_offset)
4489 {
4490 int ret;
4491 ret = setup_leaf_for_split(trans, root, path,
4492 sizeof(struct btrfs_item));
4493 if (ret)
4494 return ret;
4495
4496 ret = split_item(trans, root, path, new_key, split_offset);
4497 return ret;
4498 }
4499
4500 /*
4501 * This function duplicate a item, giving 'new_key' to the new item.
4502 * It guarantees both items live in the same tree leaf and the new item
4503 * is contiguous with the original item.
4504 *
4505 * This allows us to split file extent in place, keeping a lock on the
4506 * leaf the entire time.
4507 */
4508 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4509 struct btrfs_root *root,
4510 struct btrfs_path *path,
4511 struct btrfs_key *new_key)
4512 {
4513 struct extent_buffer *leaf;
4514 int ret;
4515 u32 item_size;
4516
4517 leaf = path->nodes[0];
4518 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4519 ret = setup_leaf_for_split(trans, root, path,
4520 item_size + sizeof(struct btrfs_item));
4521 if (ret)
4522 return ret;
4523
4524 path->slots[0]++;
4525 setup_items_for_insert(root, path, new_key, &item_size,
4526 item_size, item_size +
4527 sizeof(struct btrfs_item), 1);
4528 leaf = path->nodes[0];
4529 memcpy_extent_buffer(leaf,
4530 btrfs_item_ptr_offset(leaf, path->slots[0]),
4531 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4532 item_size);
4533 return 0;
4534 }
4535
4536 /*
4537 * make the item pointed to by the path smaller. new_size indicates
4538 * how small to make it, and from_end tells us if we just chop bytes
4539 * off the end of the item or if we shift the item to chop bytes off
4540 * the front.
4541 */
4542 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4543 u32 new_size, int from_end)
4544 {
4545 int slot;
4546 struct extent_buffer *leaf;
4547 struct btrfs_item *item;
4548 u32 nritems;
4549 unsigned int data_end;
4550 unsigned int old_data_start;
4551 unsigned int old_size;
4552 unsigned int size_diff;
4553 int i;
4554 struct btrfs_map_token token;
4555
4556 btrfs_init_map_token(&token);
4557
4558 leaf = path->nodes[0];
4559 slot = path->slots[0];
4560
4561 old_size = btrfs_item_size_nr(leaf, slot);
4562 if (old_size == new_size)
4563 return;
4564
4565 nritems = btrfs_header_nritems(leaf);
4566 data_end = leaf_data_end(root, leaf);
4567
4568 old_data_start = btrfs_item_offset_nr(leaf, slot);
4569
4570 size_diff = old_size - new_size;
4571
4572 BUG_ON(slot < 0);
4573 BUG_ON(slot >= nritems);
4574
4575 /*
4576 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4577 */
4578 /* first correct the data pointers */
4579 for (i = slot; i < nritems; i++) {
4580 u32 ioff;
4581 item = btrfs_item_nr(i);
4582
4583 ioff = btrfs_token_item_offset(leaf, item, &token);
4584 btrfs_set_token_item_offset(leaf, item,
4585 ioff + size_diff, &token);
4586 }
4587
4588 /* shift the data */
4589 if (from_end) {
4590 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4591 data_end + size_diff, btrfs_leaf_data(leaf) +
4592 data_end, old_data_start + new_size - data_end);
4593 } else {
4594 struct btrfs_disk_key disk_key;
4595 u64 offset;
4596
4597 btrfs_item_key(leaf, &disk_key, slot);
4598
4599 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4600 unsigned long ptr;
4601 struct btrfs_file_extent_item *fi;
4602
4603 fi = btrfs_item_ptr(leaf, slot,
4604 struct btrfs_file_extent_item);
4605 fi = (struct btrfs_file_extent_item *)(
4606 (unsigned long)fi - size_diff);
4607
4608 if (btrfs_file_extent_type(leaf, fi) ==
4609 BTRFS_FILE_EXTENT_INLINE) {
4610 ptr = btrfs_item_ptr_offset(leaf, slot);
4611 memmove_extent_buffer(leaf, ptr,
4612 (unsigned long)fi,
4613 BTRFS_FILE_EXTENT_INLINE_DATA_START);
4614 }
4615 }
4616
4617 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4618 data_end + size_diff, btrfs_leaf_data(leaf) +
4619 data_end, old_data_start - data_end);
4620
4621 offset = btrfs_disk_key_offset(&disk_key);
4622 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4623 btrfs_set_item_key(leaf, &disk_key, slot);
4624 if (slot == 0)
4625 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4626 }
4627
4628 item = btrfs_item_nr(slot);
4629 btrfs_set_item_size(leaf, item, new_size);
4630 btrfs_mark_buffer_dirty(leaf);
4631
4632 if (btrfs_leaf_free_space(root, leaf) < 0) {
4633 btrfs_print_leaf(root, leaf);
4634 BUG();
4635 }
4636 }
4637
4638 /*
4639 * make the item pointed to by the path bigger, data_size is the added size.
4640 */
4641 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4642 u32 data_size)
4643 {
4644 int slot;
4645 struct extent_buffer *leaf;
4646 struct btrfs_item *item;
4647 u32 nritems;
4648 unsigned int data_end;
4649 unsigned int old_data;
4650 unsigned int old_size;
4651 int i;
4652 struct btrfs_map_token token;
4653
4654 btrfs_init_map_token(&token);
4655
4656 leaf = path->nodes[0];
4657
4658 nritems = btrfs_header_nritems(leaf);
4659 data_end = leaf_data_end(root, leaf);
4660
4661 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4662 btrfs_print_leaf(root, leaf);
4663 BUG();
4664 }
4665 slot = path->slots[0];
4666 old_data = btrfs_item_end_nr(leaf, slot);
4667
4668 BUG_ON(slot < 0);
4669 if (slot >= nritems) {
4670 btrfs_print_leaf(root, leaf);
4671 btrfs_crit(root->fs_info, "slot %d too large, nritems %d",
4672 slot, nritems);
4673 BUG_ON(1);
4674 }
4675
4676 /*
4677 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4678 */
4679 /* first correct the data pointers */
4680 for (i = slot; i < nritems; i++) {
4681 u32 ioff;
4682 item = btrfs_item_nr(i);
4683
4684 ioff = btrfs_token_item_offset(leaf, item, &token);
4685 btrfs_set_token_item_offset(leaf, item,
4686 ioff - data_size, &token);
4687 }
4688
4689 /* shift the data */
4690 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4691 data_end - data_size, btrfs_leaf_data(leaf) +
4692 data_end, old_data - data_end);
4693
4694 data_end = old_data;
4695 old_size = btrfs_item_size_nr(leaf, slot);
4696 item = btrfs_item_nr(slot);
4697 btrfs_set_item_size(leaf, item, old_size + data_size);
4698 btrfs_mark_buffer_dirty(leaf);
4699
4700 if (btrfs_leaf_free_space(root, leaf) < 0) {
4701 btrfs_print_leaf(root, leaf);
4702 BUG();
4703 }
4704 }
4705
4706 /*
4707 * this is a helper for btrfs_insert_empty_items, the main goal here is
4708 * to save stack depth by doing the bulk of the work in a function
4709 * that doesn't call btrfs_search_slot
4710 */
4711 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4712 struct btrfs_key *cpu_key, u32 *data_size,
4713 u32 total_data, u32 total_size, int nr)
4714 {
4715 struct btrfs_item *item;
4716 int i;
4717 u32 nritems;
4718 unsigned int data_end;
4719 struct btrfs_disk_key disk_key;
4720 struct extent_buffer *leaf;
4721 int slot;
4722 struct btrfs_map_token token;
4723
4724 if (path->slots[0] == 0) {
4725 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4726 fixup_low_keys(root->fs_info, path, &disk_key, 1);
4727 }
4728 btrfs_unlock_up_safe(path, 1);
4729
4730 btrfs_init_map_token(&token);
4731
4732 leaf = path->nodes[0];
4733 slot = path->slots[0];
4734
4735 nritems = btrfs_header_nritems(leaf);
4736 data_end = leaf_data_end(root, leaf);
4737
4738 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4739 btrfs_print_leaf(root, leaf);
4740 btrfs_crit(root->fs_info, "not enough freespace need %u have %d",
4741 total_size, btrfs_leaf_free_space(root, leaf));
4742 BUG();
4743 }
4744
4745 if (slot != nritems) {
4746 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4747
4748 if (old_data < data_end) {
4749 btrfs_print_leaf(root, leaf);
4750 btrfs_crit(root->fs_info, "slot %d old_data %d data_end %d",
4751 slot, old_data, data_end);
4752 BUG_ON(1);
4753 }
4754 /*
4755 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4756 */
4757 /* first correct the data pointers */
4758 for (i = slot; i < nritems; i++) {
4759 u32 ioff;
4760
4761 item = btrfs_item_nr( i);
4762 ioff = btrfs_token_item_offset(leaf, item, &token);
4763 btrfs_set_token_item_offset(leaf, item,
4764 ioff - total_data, &token);
4765 }
4766 /* shift the items */
4767 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4768 btrfs_item_nr_offset(slot),
4769 (nritems - slot) * sizeof(struct btrfs_item));
4770
4771 /* shift the data */
4772 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4773 data_end - total_data, btrfs_leaf_data(leaf) +
4774 data_end, old_data - data_end);
4775 data_end = old_data;
4776 }
4777
4778 /* setup the item for the new data */
4779 for (i = 0; i < nr; i++) {
4780 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4781 btrfs_set_item_key(leaf, &disk_key, slot + i);
4782 item = btrfs_item_nr(slot + i);
4783 btrfs_set_token_item_offset(leaf, item,
4784 data_end - data_size[i], &token);
4785 data_end -= data_size[i];
4786 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4787 }
4788
4789 btrfs_set_header_nritems(leaf, nritems + nr);
4790 btrfs_mark_buffer_dirty(leaf);
4791
4792 if (btrfs_leaf_free_space(root, leaf) < 0) {
4793 btrfs_print_leaf(root, leaf);
4794 BUG();
4795 }
4796 }
4797
4798 /*
4799 * Given a key and some data, insert items into the tree.
4800 * This does all the path init required, making room in the tree if needed.
4801 */
4802 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4803 struct btrfs_root *root,
4804 struct btrfs_path *path,
4805 struct btrfs_key *cpu_key, u32 *data_size,
4806 int nr)
4807 {
4808 int ret = 0;
4809 int slot;
4810 int i;
4811 u32 total_size = 0;
4812 u32 total_data = 0;
4813
4814 for (i = 0; i < nr; i++)
4815 total_data += data_size[i];
4816
4817 total_size = total_data + (nr * sizeof(struct btrfs_item));
4818 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4819 if (ret == 0)
4820 return -EEXIST;
4821 if (ret < 0)
4822 return ret;
4823
4824 slot = path->slots[0];
4825 BUG_ON(slot < 0);
4826
4827 setup_items_for_insert(root, path, cpu_key, data_size,
4828 total_data, total_size, nr);
4829 return 0;
4830 }
4831
4832 /*
4833 * Given a key and some data, insert an item into the tree.
4834 * This does all the path init required, making room in the tree if needed.
4835 */
4836 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4837 *root, struct btrfs_key *cpu_key, void *data, u32
4838 data_size)
4839 {
4840 int ret = 0;
4841 struct btrfs_path *path;
4842 struct extent_buffer *leaf;
4843 unsigned long ptr;
4844
4845 path = btrfs_alloc_path();
4846 if (!path)
4847 return -ENOMEM;
4848 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4849 if (!ret) {
4850 leaf = path->nodes[0];
4851 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4852 write_extent_buffer(leaf, data, ptr, data_size);
4853 btrfs_mark_buffer_dirty(leaf);
4854 }
4855 btrfs_free_path(path);
4856 return ret;
4857 }
4858
4859 /*
4860 * delete the pointer from a given node.
4861 *
4862 * the tree should have been previously balanced so the deletion does not
4863 * empty a node.
4864 */
4865 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4866 int level, int slot)
4867 {
4868 struct extent_buffer *parent = path->nodes[level];
4869 u32 nritems;
4870 int ret;
4871
4872 nritems = btrfs_header_nritems(parent);
4873 if (slot != nritems - 1) {
4874 if (level)
4875 tree_mod_log_eb_move(root->fs_info, parent, slot,
4876 slot + 1, nritems - slot - 1);
4877 memmove_extent_buffer(parent,
4878 btrfs_node_key_ptr_offset(slot),
4879 btrfs_node_key_ptr_offset(slot + 1),
4880 sizeof(struct btrfs_key_ptr) *
4881 (nritems - slot - 1));
4882 } else if (level) {
4883 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4884 MOD_LOG_KEY_REMOVE, GFP_NOFS);
4885 BUG_ON(ret < 0);
4886 }
4887
4888 nritems--;
4889 btrfs_set_header_nritems(parent, nritems);
4890 if (nritems == 0 && parent == root->node) {
4891 BUG_ON(btrfs_header_level(root->node) != 1);
4892 /* just turn the root into a leaf and break */
4893 btrfs_set_header_level(root->node, 0);
4894 } else if (slot == 0) {
4895 struct btrfs_disk_key disk_key;
4896
4897 btrfs_node_key(parent, &disk_key, 0);
4898 fixup_low_keys(root->fs_info, path, &disk_key, level + 1);
4899 }
4900 btrfs_mark_buffer_dirty(parent);
4901 }
4902
4903 /*
4904 * a helper function to delete the leaf pointed to by path->slots[1] and
4905 * path->nodes[1].
4906 *
4907 * This deletes the pointer in path->nodes[1] and frees the leaf
4908 * block extent. zero is returned if it all worked out, < 0 otherwise.
4909 *
4910 * The path must have already been setup for deleting the leaf, including
4911 * all the proper balancing. path->nodes[1] must be locked.
4912 */
4913 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4914 struct btrfs_root *root,
4915 struct btrfs_path *path,
4916 struct extent_buffer *leaf)
4917 {
4918 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4919 del_ptr(root, path, 1, path->slots[1]);
4920
4921 /*
4922 * btrfs_free_extent is expensive, we want to make sure we
4923 * aren't holding any locks when we call it
4924 */
4925 btrfs_unlock_up_safe(path, 0);
4926
4927 root_sub_used(root, leaf->len);
4928
4929 extent_buffer_get(leaf);
4930 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4931 free_extent_buffer_stale(leaf);
4932 }
4933 /*
4934 * delete the item at the leaf level in path. If that empties
4935 * the leaf, remove it from the tree
4936 */
4937 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4938 struct btrfs_path *path, int slot, int nr)
4939 {
4940 struct extent_buffer *leaf;
4941 struct btrfs_item *item;
4942 u32 last_off;
4943 u32 dsize = 0;
4944 int ret = 0;
4945 int wret;
4946 int i;
4947 u32 nritems;
4948 struct btrfs_map_token token;
4949
4950 btrfs_init_map_token(&token);
4951
4952 leaf = path->nodes[0];
4953 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4954
4955 for (i = 0; i < nr; i++)
4956 dsize += btrfs_item_size_nr(leaf, slot + i);
4957
4958 nritems = btrfs_header_nritems(leaf);
4959
4960 if (slot + nr != nritems) {
4961 int data_end = leaf_data_end(root, leaf);
4962
4963 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4964 data_end + dsize,
4965 btrfs_leaf_data(leaf) + data_end,
4966 last_off - data_end);
4967
4968 for (i = slot + nr; i < nritems; i++) {
4969 u32 ioff;
4970
4971 item = btrfs_item_nr(i);
4972 ioff = btrfs_token_item_offset(leaf, item, &token);
4973 btrfs_set_token_item_offset(leaf, item,
4974 ioff + dsize, &token);
4975 }
4976
4977 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4978 btrfs_item_nr_offset(slot + nr),
4979 sizeof(struct btrfs_item) *
4980 (nritems - slot - nr));
4981 }
4982 btrfs_set_header_nritems(leaf, nritems - nr);
4983 nritems -= nr;
4984
4985 /* delete the leaf if we've emptied it */
4986 if (nritems == 0) {
4987 if (leaf == root->node) {
4988 btrfs_set_header_level(leaf, 0);
4989 } else {
4990 btrfs_set_path_blocking(path);
4991 clean_tree_block(trans, root->fs_info, leaf);
4992 btrfs_del_leaf(trans, root, path, leaf);
4993 }
4994 } else {
4995 int used = leaf_space_used(leaf, 0, nritems);
4996 if (slot == 0) {
4997 struct btrfs_disk_key disk_key;
4998
4999 btrfs_item_key(leaf, &disk_key, 0);
5000 fixup_low_keys(root->fs_info, path, &disk_key, 1);
5001 }
5002
5003 /* delete the leaf if it is mostly empty */
5004 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
5005 /* push_leaf_left fixes the path.
5006 * make sure the path still points to our leaf
5007 * for possible call to del_ptr below
5008 */
5009 slot = path->slots[1];
5010 extent_buffer_get(leaf);
5011
5012 btrfs_set_path_blocking(path);
5013 wret = push_leaf_left(trans, root, path, 1, 1,
5014 1, (u32)-1);
5015 if (wret < 0 && wret != -ENOSPC)
5016 ret = wret;
5017
5018 if (path->nodes[0] == leaf &&
5019 btrfs_header_nritems(leaf)) {
5020 wret = push_leaf_right(trans, root, path, 1,
5021 1, 1, 0);
5022 if (wret < 0 && wret != -ENOSPC)
5023 ret = wret;
5024 }
5025
5026 if (btrfs_header_nritems(leaf) == 0) {
5027 path->slots[1] = slot;
5028 btrfs_del_leaf(trans, root, path, leaf);
5029 free_extent_buffer(leaf);
5030 ret = 0;
5031 } else {
5032 /* if we're still in the path, make sure
5033 * we're dirty. Otherwise, one of the
5034 * push_leaf functions must have already
5035 * dirtied this buffer
5036 */
5037 if (path->nodes[0] == leaf)
5038 btrfs_mark_buffer_dirty(leaf);
5039 free_extent_buffer(leaf);
5040 }
5041 } else {
5042 btrfs_mark_buffer_dirty(leaf);
5043 }
5044 }
5045 return ret;
5046 }
5047
5048 /*
5049 * search the tree again to find a leaf with lesser keys
5050 * returns 0 if it found something or 1 if there are no lesser leaves.
5051 * returns < 0 on io errors.
5052 *
5053 * This may release the path, and so you may lose any locks held at the
5054 * time you call it.
5055 */
5056 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
5057 {
5058 struct btrfs_key key;
5059 struct btrfs_disk_key found_key;
5060 int ret;
5061
5062 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
5063
5064 if (key.offset > 0) {
5065 key.offset--;
5066 } else if (key.type > 0) {
5067 key.type--;
5068 key.offset = (u64)-1;
5069 } else if (key.objectid > 0) {
5070 key.objectid--;
5071 key.type = (u8)-1;
5072 key.offset = (u64)-1;
5073 } else {
5074 return 1;
5075 }
5076
5077 btrfs_release_path(path);
5078 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5079 if (ret < 0)
5080 return ret;
5081 btrfs_item_key(path->nodes[0], &found_key, 0);
5082 ret = comp_keys(&found_key, &key);
5083 /*
5084 * We might have had an item with the previous key in the tree right
5085 * before we released our path. And after we released our path, that
5086 * item might have been pushed to the first slot (0) of the leaf we
5087 * were holding due to a tree balance. Alternatively, an item with the
5088 * previous key can exist as the only element of a leaf (big fat item).
5089 * Therefore account for these 2 cases, so that our callers (like
5090 * btrfs_previous_item) don't miss an existing item with a key matching
5091 * the previous key we computed above.
5092 */
5093 if (ret <= 0)
5094 return 0;
5095 return 1;
5096 }
5097
5098 /*
5099 * A helper function to walk down the tree starting at min_key, and looking
5100 * for nodes or leaves that are have a minimum transaction id.
5101 * This is used by the btree defrag code, and tree logging
5102 *
5103 * This does not cow, but it does stuff the starting key it finds back
5104 * into min_key, so you can call btrfs_search_slot with cow=1 on the
5105 * key and get a writable path.
5106 *
5107 * This does lock as it descends, and path->keep_locks should be set
5108 * to 1 by the caller.
5109 *
5110 * This honors path->lowest_level to prevent descent past a given level
5111 * of the tree.
5112 *
5113 * min_trans indicates the oldest transaction that you are interested
5114 * in walking through. Any nodes or leaves older than min_trans are
5115 * skipped over (without reading them).
5116 *
5117 * returns zero if something useful was found, < 0 on error and 1 if there
5118 * was nothing in the tree that matched the search criteria.
5119 */
5120 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
5121 struct btrfs_path *path,
5122 u64 min_trans)
5123 {
5124 struct extent_buffer *cur;
5125 struct btrfs_key found_key;
5126 int slot;
5127 int sret;
5128 u32 nritems;
5129 int level;
5130 int ret = 1;
5131 int keep_locks = path->keep_locks;
5132
5133 path->keep_locks = 1;
5134 again:
5135 cur = btrfs_read_lock_root_node(root);
5136 level = btrfs_header_level(cur);
5137 WARN_ON(path->nodes[level]);
5138 path->nodes[level] = cur;
5139 path->locks[level] = BTRFS_READ_LOCK;
5140
5141 if (btrfs_header_generation(cur) < min_trans) {
5142 ret = 1;
5143 goto out;
5144 }
5145 while (1) {
5146 nritems = btrfs_header_nritems(cur);
5147 level = btrfs_header_level(cur);
5148 sret = bin_search(cur, min_key, level, &slot);
5149
5150 /* at the lowest level, we're done, setup the path and exit */
5151 if (level == path->lowest_level) {
5152 if (slot >= nritems)
5153 goto find_next_key;
5154 ret = 0;
5155 path->slots[level] = slot;
5156 btrfs_item_key_to_cpu(cur, &found_key, slot);
5157 goto out;
5158 }
5159 if (sret && slot > 0)
5160 slot--;
5161 /*
5162 * check this node pointer against the min_trans parameters.
5163 * If it is too old, old, skip to the next one.
5164 */
5165 while (slot < nritems) {
5166 u64 gen;
5167
5168 gen = btrfs_node_ptr_generation(cur, slot);
5169 if (gen < min_trans) {
5170 slot++;
5171 continue;
5172 }
5173 break;
5174 }
5175 find_next_key:
5176 /*
5177 * we didn't find a candidate key in this node, walk forward
5178 * and find another one
5179 */
5180 if (slot >= nritems) {
5181 path->slots[level] = slot;
5182 btrfs_set_path_blocking(path);
5183 sret = btrfs_find_next_key(root, path, min_key, level,
5184 min_trans);
5185 if (sret == 0) {
5186 btrfs_release_path(path);
5187 goto again;
5188 } else {
5189 goto out;
5190 }
5191 }
5192 /* save our key for returning back */
5193 btrfs_node_key_to_cpu(cur, &found_key, slot);
5194 path->slots[level] = slot;
5195 if (level == path->lowest_level) {
5196 ret = 0;
5197 goto out;
5198 }
5199 btrfs_set_path_blocking(path);
5200 cur = read_node_slot(root, cur, slot);
5201 BUG_ON(!cur); /* -ENOMEM */
5202
5203 btrfs_tree_read_lock(cur);
5204
5205 path->locks[level - 1] = BTRFS_READ_LOCK;
5206 path->nodes[level - 1] = cur;
5207 unlock_up(path, level, 1, 0, NULL);
5208 btrfs_clear_path_blocking(path, NULL, 0);
5209 }
5210 out:
5211 path->keep_locks = keep_locks;
5212 if (ret == 0) {
5213 btrfs_unlock_up_safe(path, path->lowest_level + 1);
5214 btrfs_set_path_blocking(path);
5215 memcpy(min_key, &found_key, sizeof(found_key));
5216 }
5217 return ret;
5218 }
5219
5220 static void tree_move_down(struct btrfs_root *root,
5221 struct btrfs_path *path,
5222 int *level, int root_level)
5223 {
5224 BUG_ON(*level == 0);
5225 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
5226 path->slots[*level]);
5227 path->slots[*level - 1] = 0;
5228 (*level)--;
5229 }
5230
5231 static int tree_move_next_or_upnext(struct btrfs_root *root,
5232 struct btrfs_path *path,
5233 int *level, int root_level)
5234 {
5235 int ret = 0;
5236 int nritems;
5237 nritems = btrfs_header_nritems(path->nodes[*level]);
5238
5239 path->slots[*level]++;
5240
5241 while (path->slots[*level] >= nritems) {
5242 if (*level == root_level)
5243 return -1;
5244
5245 /* move upnext */
5246 path->slots[*level] = 0;
5247 free_extent_buffer(path->nodes[*level]);
5248 path->nodes[*level] = NULL;
5249 (*level)++;
5250 path->slots[*level]++;
5251
5252 nritems = btrfs_header_nritems(path->nodes[*level]);
5253 ret = 1;
5254 }
5255 return ret;
5256 }
5257
5258 /*
5259 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5260 * or down.
5261 */
5262 static int tree_advance(struct btrfs_root *root,
5263 struct btrfs_path *path,
5264 int *level, int root_level,
5265 int allow_down,
5266 struct btrfs_key *key)
5267 {
5268 int ret;
5269
5270 if (*level == 0 || !allow_down) {
5271 ret = tree_move_next_or_upnext(root, path, level, root_level);
5272 } else {
5273 tree_move_down(root, path, level, root_level);
5274 ret = 0;
5275 }
5276 if (ret >= 0) {
5277 if (*level == 0)
5278 btrfs_item_key_to_cpu(path->nodes[*level], key,
5279 path->slots[*level]);
5280 else
5281 btrfs_node_key_to_cpu(path->nodes[*level], key,
5282 path->slots[*level]);
5283 }
5284 return ret;
5285 }
5286
5287 static int tree_compare_item(struct btrfs_root *left_root,
5288 struct btrfs_path *left_path,
5289 struct btrfs_path *right_path,
5290 char *tmp_buf)
5291 {
5292 int cmp;
5293 int len1, len2;
5294 unsigned long off1, off2;
5295
5296 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5297 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5298 if (len1 != len2)
5299 return 1;
5300
5301 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5302 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5303 right_path->slots[0]);
5304
5305 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5306
5307 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5308 if (cmp)
5309 return 1;
5310 return 0;
5311 }
5312
5313 #define ADVANCE 1
5314 #define ADVANCE_ONLY_NEXT -1
5315
5316 /*
5317 * This function compares two trees and calls the provided callback for
5318 * every changed/new/deleted item it finds.
5319 * If shared tree blocks are encountered, whole subtrees are skipped, making
5320 * the compare pretty fast on snapshotted subvolumes.
5321 *
5322 * This currently works on commit roots only. As commit roots are read only,
5323 * we don't do any locking. The commit roots are protected with transactions.
5324 * Transactions are ended and rejoined when a commit is tried in between.
5325 *
5326 * This function checks for modifications done to the trees while comparing.
5327 * If it detects a change, it aborts immediately.
5328 */
5329 int btrfs_compare_trees(struct btrfs_root *left_root,
5330 struct btrfs_root *right_root,
5331 btrfs_changed_cb_t changed_cb, void *ctx)
5332 {
5333 int ret;
5334 int cmp;
5335 struct btrfs_path *left_path = NULL;
5336 struct btrfs_path *right_path = NULL;
5337 struct btrfs_key left_key;
5338 struct btrfs_key right_key;
5339 char *tmp_buf = NULL;
5340 int left_root_level;
5341 int right_root_level;
5342 int left_level;
5343 int right_level;
5344 int left_end_reached;
5345 int right_end_reached;
5346 int advance_left;
5347 int advance_right;
5348 u64 left_blockptr;
5349 u64 right_blockptr;
5350 u64 left_gen;
5351 u64 right_gen;
5352
5353 left_path = btrfs_alloc_path();
5354 if (!left_path) {
5355 ret = -ENOMEM;
5356 goto out;
5357 }
5358 right_path = btrfs_alloc_path();
5359 if (!right_path) {
5360 ret = -ENOMEM;
5361 goto out;
5362 }
5363
5364 tmp_buf = kmalloc(left_root->nodesize, GFP_KERNEL);
5365 if (!tmp_buf) {
5366 ret = -ENOMEM;
5367 goto out;
5368 }
5369
5370 left_path->search_commit_root = 1;
5371 left_path->skip_locking = 1;
5372 right_path->search_commit_root = 1;
5373 right_path->skip_locking = 1;
5374
5375 /*
5376 * Strategy: Go to the first items of both trees. Then do
5377 *
5378 * If both trees are at level 0
5379 * Compare keys of current items
5380 * If left < right treat left item as new, advance left tree
5381 * and repeat
5382 * If left > right treat right item as deleted, advance right tree
5383 * and repeat
5384 * If left == right do deep compare of items, treat as changed if
5385 * needed, advance both trees and repeat
5386 * If both trees are at the same level but not at level 0
5387 * Compare keys of current nodes/leafs
5388 * If left < right advance left tree and repeat
5389 * If left > right advance right tree and repeat
5390 * If left == right compare blockptrs of the next nodes/leafs
5391 * If they match advance both trees but stay at the same level
5392 * and repeat
5393 * If they don't match advance both trees while allowing to go
5394 * deeper and repeat
5395 * If tree levels are different
5396 * Advance the tree that needs it and repeat
5397 *
5398 * Advancing a tree means:
5399 * If we are at level 0, try to go to the next slot. If that's not
5400 * possible, go one level up and repeat. Stop when we found a level
5401 * where we could go to the next slot. We may at this point be on a
5402 * node or a leaf.
5403 *
5404 * If we are not at level 0 and not on shared tree blocks, go one
5405 * level deeper.
5406 *
5407 * If we are not at level 0 and on shared tree blocks, go one slot to
5408 * the right if possible or go up and right.
5409 */
5410
5411 down_read(&left_root->fs_info->commit_root_sem);
5412 left_level = btrfs_header_level(left_root->commit_root);
5413 left_root_level = left_level;
5414 left_path->nodes[left_level] = left_root->commit_root;
5415 extent_buffer_get(left_path->nodes[left_level]);
5416
5417 right_level = btrfs_header_level(right_root->commit_root);
5418 right_root_level = right_level;
5419 right_path->nodes[right_level] = right_root->commit_root;
5420 extent_buffer_get(right_path->nodes[right_level]);
5421 up_read(&left_root->fs_info->commit_root_sem);
5422
5423 if (left_level == 0)
5424 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5425 &left_key, left_path->slots[left_level]);
5426 else
5427 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5428 &left_key, left_path->slots[left_level]);
5429 if (right_level == 0)
5430 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5431 &right_key, right_path->slots[right_level]);
5432 else
5433 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5434 &right_key, right_path->slots[right_level]);
5435
5436 left_end_reached = right_end_reached = 0;
5437 advance_left = advance_right = 0;
5438
5439 while (1) {
5440 if (advance_left && !left_end_reached) {
5441 ret = tree_advance(left_root, left_path, &left_level,
5442 left_root_level,
5443 advance_left != ADVANCE_ONLY_NEXT,
5444 &left_key);
5445 if (ret < 0)
5446 left_end_reached = ADVANCE;
5447 advance_left = 0;
5448 }
5449 if (advance_right && !right_end_reached) {
5450 ret = tree_advance(right_root, right_path, &right_level,
5451 right_root_level,
5452 advance_right != ADVANCE_ONLY_NEXT,
5453 &right_key);
5454 if (ret < 0)
5455 right_end_reached = ADVANCE;
5456 advance_right = 0;
5457 }
5458
5459 if (left_end_reached && right_end_reached) {
5460 ret = 0;
5461 goto out;
5462 } else if (left_end_reached) {
5463 if (right_level == 0) {
5464 ret = changed_cb(left_root, right_root,
5465 left_path, right_path,
5466 &right_key,
5467 BTRFS_COMPARE_TREE_DELETED,
5468 ctx);
5469 if (ret < 0)
5470 goto out;
5471 }
5472 advance_right = ADVANCE;
5473 continue;
5474 } else if (right_end_reached) {
5475 if (left_level == 0) {
5476 ret = changed_cb(left_root, right_root,
5477 left_path, right_path,
5478 &left_key,
5479 BTRFS_COMPARE_TREE_NEW,
5480 ctx);
5481 if (ret < 0)
5482 goto out;
5483 }
5484 advance_left = ADVANCE;
5485 continue;
5486 }
5487
5488 if (left_level == 0 && right_level == 0) {
5489 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5490 if (cmp < 0) {
5491 ret = changed_cb(left_root, right_root,
5492 left_path, right_path,
5493 &left_key,
5494 BTRFS_COMPARE_TREE_NEW,
5495 ctx);
5496 if (ret < 0)
5497 goto out;
5498 advance_left = ADVANCE;
5499 } else if (cmp > 0) {
5500 ret = changed_cb(left_root, right_root,
5501 left_path, right_path,
5502 &right_key,
5503 BTRFS_COMPARE_TREE_DELETED,
5504 ctx);
5505 if (ret < 0)
5506 goto out;
5507 advance_right = ADVANCE;
5508 } else {
5509 enum btrfs_compare_tree_result result;
5510
5511 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5512 ret = tree_compare_item(left_root, left_path,
5513 right_path, tmp_buf);
5514 if (ret)
5515 result = BTRFS_COMPARE_TREE_CHANGED;
5516 else
5517 result = BTRFS_COMPARE_TREE_SAME;
5518 ret = changed_cb(left_root, right_root,
5519 left_path, right_path,
5520 &left_key, result, ctx);
5521 if (ret < 0)
5522 goto out;
5523 advance_left = ADVANCE;
5524 advance_right = ADVANCE;
5525 }
5526 } else if (left_level == right_level) {
5527 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5528 if (cmp < 0) {
5529 advance_left = ADVANCE;
5530 } else if (cmp > 0) {
5531 advance_right = ADVANCE;
5532 } else {
5533 left_blockptr = btrfs_node_blockptr(
5534 left_path->nodes[left_level],
5535 left_path->slots[left_level]);
5536 right_blockptr = btrfs_node_blockptr(
5537 right_path->nodes[right_level],
5538 right_path->slots[right_level]);
5539 left_gen = btrfs_node_ptr_generation(
5540 left_path->nodes[left_level],
5541 left_path->slots[left_level]);
5542 right_gen = btrfs_node_ptr_generation(
5543 right_path->nodes[right_level],
5544 right_path->slots[right_level]);
5545 if (left_blockptr == right_blockptr &&
5546 left_gen == right_gen) {
5547 /*
5548 * As we're on a shared block, don't
5549 * allow to go deeper.
5550 */
5551 advance_left = ADVANCE_ONLY_NEXT;
5552 advance_right = ADVANCE_ONLY_NEXT;
5553 } else {
5554 advance_left = ADVANCE;
5555 advance_right = ADVANCE;
5556 }
5557 }
5558 } else if (left_level < right_level) {
5559 advance_right = ADVANCE;
5560 } else {
5561 advance_left = ADVANCE;
5562 }
5563 }
5564
5565 out:
5566 btrfs_free_path(left_path);
5567 btrfs_free_path(right_path);
5568 kfree(tmp_buf);
5569 return ret;
5570 }
5571
5572 /*
5573 * this is similar to btrfs_next_leaf, but does not try to preserve
5574 * and fixup the path. It looks for and returns the next key in the
5575 * tree based on the current path and the min_trans parameters.
5576 *
5577 * 0 is returned if another key is found, < 0 if there are any errors
5578 * and 1 is returned if there are no higher keys in the tree
5579 *
5580 * path->keep_locks should be set to 1 on the search made before
5581 * calling this function.
5582 */
5583 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5584 struct btrfs_key *key, int level, u64 min_trans)
5585 {
5586 int slot;
5587 struct extent_buffer *c;
5588
5589 WARN_ON(!path->keep_locks);
5590 while (level < BTRFS_MAX_LEVEL) {
5591 if (!path->nodes[level])
5592 return 1;
5593
5594 slot = path->slots[level] + 1;
5595 c = path->nodes[level];
5596 next:
5597 if (slot >= btrfs_header_nritems(c)) {
5598 int ret;
5599 int orig_lowest;
5600 struct btrfs_key cur_key;
5601 if (level + 1 >= BTRFS_MAX_LEVEL ||
5602 !path->nodes[level + 1])
5603 return 1;
5604
5605 if (path->locks[level + 1]) {
5606 level++;
5607 continue;
5608 }
5609
5610 slot = btrfs_header_nritems(c) - 1;
5611 if (level == 0)
5612 btrfs_item_key_to_cpu(c, &cur_key, slot);
5613 else
5614 btrfs_node_key_to_cpu(c, &cur_key, slot);
5615
5616 orig_lowest = path->lowest_level;
5617 btrfs_release_path(path);
5618 path->lowest_level = level;
5619 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5620 0, 0);
5621 path->lowest_level = orig_lowest;
5622 if (ret < 0)
5623 return ret;
5624
5625 c = path->nodes[level];
5626 slot = path->slots[level];
5627 if (ret == 0)
5628 slot++;
5629 goto next;
5630 }
5631
5632 if (level == 0)
5633 btrfs_item_key_to_cpu(c, key, slot);
5634 else {
5635 u64 gen = btrfs_node_ptr_generation(c, slot);
5636
5637 if (gen < min_trans) {
5638 slot++;
5639 goto next;
5640 }
5641 btrfs_node_key_to_cpu(c, key, slot);
5642 }
5643 return 0;
5644 }
5645 return 1;
5646 }
5647
5648 /*
5649 * search the tree again to find a leaf with greater keys
5650 * returns 0 if it found something or 1 if there are no greater leaves.
5651 * returns < 0 on io errors.
5652 */
5653 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5654 {
5655 return btrfs_next_old_leaf(root, path, 0);
5656 }
5657
5658 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5659 u64 time_seq)
5660 {
5661 int slot;
5662 int level;
5663 struct extent_buffer *c;
5664 struct extent_buffer *next;
5665 struct btrfs_key key;
5666 u32 nritems;
5667 int ret;
5668 int old_spinning = path->leave_spinning;
5669 int next_rw_lock = 0;
5670
5671 nritems = btrfs_header_nritems(path->nodes[0]);
5672 if (nritems == 0)
5673 return 1;
5674
5675 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5676 again:
5677 level = 1;
5678 next = NULL;
5679 next_rw_lock = 0;
5680 btrfs_release_path(path);
5681
5682 path->keep_locks = 1;
5683 path->leave_spinning = 1;
5684
5685 if (time_seq)
5686 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5687 else
5688 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5689 path->keep_locks = 0;
5690
5691 if (ret < 0)
5692 return ret;
5693
5694 nritems = btrfs_header_nritems(path->nodes[0]);
5695 /*
5696 * by releasing the path above we dropped all our locks. A balance
5697 * could have added more items next to the key that used to be
5698 * at the very end of the block. So, check again here and
5699 * advance the path if there are now more items available.
5700 */
5701 if (nritems > 0 && path->slots[0] < nritems - 1) {
5702 if (ret == 0)
5703 path->slots[0]++;
5704 ret = 0;
5705 goto done;
5706 }
5707 /*
5708 * So the above check misses one case:
5709 * - after releasing the path above, someone has removed the item that
5710 * used to be at the very end of the block, and balance between leafs
5711 * gets another one with bigger key.offset to replace it.
5712 *
5713 * This one should be returned as well, or we can get leaf corruption
5714 * later(esp. in __btrfs_drop_extents()).
5715 *
5716 * And a bit more explanation about this check,
5717 * with ret > 0, the key isn't found, the path points to the slot
5718 * where it should be inserted, so the path->slots[0] item must be the
5719 * bigger one.
5720 */
5721 if (nritems > 0 && ret > 0 && path->slots[0] == nritems - 1) {
5722 ret = 0;
5723 goto done;
5724 }
5725
5726 while (level < BTRFS_MAX_LEVEL) {
5727 if (!path->nodes[level]) {
5728 ret = 1;
5729 goto done;
5730 }
5731
5732 slot = path->slots[level] + 1;
5733 c = path->nodes[level];
5734 if (slot >= btrfs_header_nritems(c)) {
5735 level++;
5736 if (level == BTRFS_MAX_LEVEL) {
5737 ret = 1;
5738 goto done;
5739 }
5740 continue;
5741 }
5742
5743 if (next) {
5744 btrfs_tree_unlock_rw(next, next_rw_lock);
5745 free_extent_buffer(next);
5746 }
5747
5748 next = c;
5749 next_rw_lock = path->locks[level];
5750 ret = read_block_for_search(NULL, root, path, &next, level,
5751 slot, &key, 0);
5752 if (ret == -EAGAIN)
5753 goto again;
5754
5755 if (ret < 0) {
5756 btrfs_release_path(path);
5757 goto done;
5758 }
5759
5760 if (!path->skip_locking) {
5761 ret = btrfs_try_tree_read_lock(next);
5762 if (!ret && time_seq) {
5763 /*
5764 * If we don't get the lock, we may be racing
5765 * with push_leaf_left, holding that lock while
5766 * itself waiting for the leaf we've currently
5767 * locked. To solve this situation, we give up
5768 * on our lock and cycle.
5769 */
5770 free_extent_buffer(next);
5771 btrfs_release_path(path);
5772 cond_resched();
5773 goto again;
5774 }
5775 if (!ret) {
5776 btrfs_set_path_blocking(path);
5777 btrfs_tree_read_lock(next);
5778 btrfs_clear_path_blocking(path, next,
5779 BTRFS_READ_LOCK);
5780 }
5781 next_rw_lock = BTRFS_READ_LOCK;
5782 }
5783 break;
5784 }
5785 path->slots[level] = slot;
5786 while (1) {
5787 level--;
5788 c = path->nodes[level];
5789 if (path->locks[level])
5790 btrfs_tree_unlock_rw(c, path->locks[level]);
5791
5792 free_extent_buffer(c);
5793 path->nodes[level] = next;
5794 path->slots[level] = 0;
5795 if (!path->skip_locking)
5796 path->locks[level] = next_rw_lock;
5797 if (!level)
5798 break;
5799
5800 ret = read_block_for_search(NULL, root, path, &next, level,
5801 0, &key, 0);
5802 if (ret == -EAGAIN)
5803 goto again;
5804
5805 if (ret < 0) {
5806 btrfs_release_path(path);
5807 goto done;
5808 }
5809
5810 if (!path->skip_locking) {
5811 ret = btrfs_try_tree_read_lock(next);
5812 if (!ret) {
5813 btrfs_set_path_blocking(path);
5814 btrfs_tree_read_lock(next);
5815 btrfs_clear_path_blocking(path, next,
5816 BTRFS_READ_LOCK);
5817 }
5818 next_rw_lock = BTRFS_READ_LOCK;
5819 }
5820 }
5821 ret = 0;
5822 done:
5823 unlock_up(path, 0, 1, 0, NULL);
5824 path->leave_spinning = old_spinning;
5825 if (!old_spinning)
5826 btrfs_set_path_blocking(path);
5827
5828 return ret;
5829 }
5830
5831 /*
5832 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5833 * searching until it gets past min_objectid or finds an item of 'type'
5834 *
5835 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5836 */
5837 int btrfs_previous_item(struct btrfs_root *root,
5838 struct btrfs_path *path, u64 min_objectid,
5839 int type)
5840 {
5841 struct btrfs_key found_key;
5842 struct extent_buffer *leaf;
5843 u32 nritems;
5844 int ret;
5845
5846 while (1) {
5847 if (path->slots[0] == 0) {
5848 btrfs_set_path_blocking(path);
5849 ret = btrfs_prev_leaf(root, path);
5850 if (ret != 0)
5851 return ret;
5852 } else {
5853 path->slots[0]--;
5854 }
5855 leaf = path->nodes[0];
5856 nritems = btrfs_header_nritems(leaf);
5857 if (nritems == 0)
5858 return 1;
5859 if (path->slots[0] == nritems)
5860 path->slots[0]--;
5861
5862 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5863 if (found_key.objectid < min_objectid)
5864 break;
5865 if (found_key.type == type)
5866 return 0;
5867 if (found_key.objectid == min_objectid &&
5868 found_key.type < type)
5869 break;
5870 }
5871 return 1;
5872 }
5873
5874 /*
5875 * search in extent tree to find a previous Metadata/Data extent item with
5876 * min objecitd.
5877 *
5878 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5879 */
5880 int btrfs_previous_extent_item(struct btrfs_root *root,
5881 struct btrfs_path *path, u64 min_objectid)
5882 {
5883 struct btrfs_key found_key;
5884 struct extent_buffer *leaf;
5885 u32 nritems;
5886 int ret;
5887
5888 while (1) {
5889 if (path->slots[0] == 0) {
5890 btrfs_set_path_blocking(path);
5891 ret = btrfs_prev_leaf(root, path);
5892 if (ret != 0)
5893 return ret;
5894 } else {
5895 path->slots[0]--;
5896 }
5897 leaf = path->nodes[0];
5898 nritems = btrfs_header_nritems(leaf);
5899 if (nritems == 0)
5900 return 1;
5901 if (path->slots[0] == nritems)
5902 path->slots[0]--;
5903
5904 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5905 if (found_key.objectid < min_objectid)
5906 break;
5907 if (found_key.type == BTRFS_EXTENT_ITEM_KEY ||
5908 found_key.type == BTRFS_METADATA_ITEM_KEY)
5909 return 0;
5910 if (found_key.objectid == min_objectid &&
5911 found_key.type < BTRFS_EXTENT_ITEM_KEY)
5912 break;
5913 }
5914 return 1;
5915 }
This page took 0.155589 seconds and 5 git commands to generate.