Merge branch 'timers/clockevents' of git://git.linaro.org/people/dlezcano/clockevents...
[deliverable/linux.git] / fs / btrfs / ctree.c
1 /*
2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/sched.h>
20 #include <linux/slab.h>
21 #include <linux/rbtree.h>
22 #include "ctree.h"
23 #include "disk-io.h"
24 #include "transaction.h"
25 #include "print-tree.h"
26 #include "locking.h"
27
28 static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
29 *root, struct btrfs_path *path, int level);
30 static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
31 *root, struct btrfs_key *ins_key,
32 struct btrfs_path *path, int data_size, int extend);
33 static int push_node_left(struct btrfs_trans_handle *trans,
34 struct btrfs_root *root, struct extent_buffer *dst,
35 struct extent_buffer *src, int empty);
36 static int balance_node_right(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 struct extent_buffer *dst_buf,
39 struct extent_buffer *src_buf);
40 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
41 int level, int slot);
42 static void tree_mod_log_free_eb(struct btrfs_fs_info *fs_info,
43 struct extent_buffer *eb);
44 static int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path);
45
46 struct btrfs_path *btrfs_alloc_path(void)
47 {
48 struct btrfs_path *path;
49 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
50 return path;
51 }
52
53 /*
54 * set all locked nodes in the path to blocking locks. This should
55 * be done before scheduling
56 */
57 noinline void btrfs_set_path_blocking(struct btrfs_path *p)
58 {
59 int i;
60 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
61 if (!p->nodes[i] || !p->locks[i])
62 continue;
63 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
64 if (p->locks[i] == BTRFS_READ_LOCK)
65 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
66 else if (p->locks[i] == BTRFS_WRITE_LOCK)
67 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
68 }
69 }
70
71 /*
72 * reset all the locked nodes in the patch to spinning locks.
73 *
74 * held is used to keep lockdep happy, when lockdep is enabled
75 * we set held to a blocking lock before we go around and
76 * retake all the spinlocks in the path. You can safely use NULL
77 * for held
78 */
79 noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
80 struct extent_buffer *held, int held_rw)
81 {
82 int i;
83
84 #ifdef CONFIG_DEBUG_LOCK_ALLOC
85 /* lockdep really cares that we take all of these spinlocks
86 * in the right order. If any of the locks in the path are not
87 * currently blocking, it is going to complain. So, make really
88 * really sure by forcing the path to blocking before we clear
89 * the path blocking.
90 */
91 if (held) {
92 btrfs_set_lock_blocking_rw(held, held_rw);
93 if (held_rw == BTRFS_WRITE_LOCK)
94 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
95 else if (held_rw == BTRFS_READ_LOCK)
96 held_rw = BTRFS_READ_LOCK_BLOCKING;
97 }
98 btrfs_set_path_blocking(p);
99 #endif
100
101 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
102 if (p->nodes[i] && p->locks[i]) {
103 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
104 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
105 p->locks[i] = BTRFS_WRITE_LOCK;
106 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
107 p->locks[i] = BTRFS_READ_LOCK;
108 }
109 }
110
111 #ifdef CONFIG_DEBUG_LOCK_ALLOC
112 if (held)
113 btrfs_clear_lock_blocking_rw(held, held_rw);
114 #endif
115 }
116
117 /* this also releases the path */
118 void btrfs_free_path(struct btrfs_path *p)
119 {
120 if (!p)
121 return;
122 btrfs_release_path(p);
123 kmem_cache_free(btrfs_path_cachep, p);
124 }
125
126 /*
127 * path release drops references on the extent buffers in the path
128 * and it drops any locks held by this path
129 *
130 * It is safe to call this on paths that no locks or extent buffers held.
131 */
132 noinline void btrfs_release_path(struct btrfs_path *p)
133 {
134 int i;
135
136 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
137 p->slots[i] = 0;
138 if (!p->nodes[i])
139 continue;
140 if (p->locks[i]) {
141 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
142 p->locks[i] = 0;
143 }
144 free_extent_buffer(p->nodes[i]);
145 p->nodes[i] = NULL;
146 }
147 }
148
149 /*
150 * safely gets a reference on the root node of a tree. A lock
151 * is not taken, so a concurrent writer may put a different node
152 * at the root of the tree. See btrfs_lock_root_node for the
153 * looping required.
154 *
155 * The extent buffer returned by this has a reference taken, so
156 * it won't disappear. It may stop being the root of the tree
157 * at any time because there are no locks held.
158 */
159 struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
160 {
161 struct extent_buffer *eb;
162
163 while (1) {
164 rcu_read_lock();
165 eb = rcu_dereference(root->node);
166
167 /*
168 * RCU really hurts here, we could free up the root node because
169 * it was cow'ed but we may not get the new root node yet so do
170 * the inc_not_zero dance and if it doesn't work then
171 * synchronize_rcu and try again.
172 */
173 if (atomic_inc_not_zero(&eb->refs)) {
174 rcu_read_unlock();
175 break;
176 }
177 rcu_read_unlock();
178 synchronize_rcu();
179 }
180 return eb;
181 }
182
183 /* loop around taking references on and locking the root node of the
184 * tree until you end up with a lock on the root. A locked buffer
185 * is returned, with a reference held.
186 */
187 struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
188 {
189 struct extent_buffer *eb;
190
191 while (1) {
192 eb = btrfs_root_node(root);
193 btrfs_tree_lock(eb);
194 if (eb == root->node)
195 break;
196 btrfs_tree_unlock(eb);
197 free_extent_buffer(eb);
198 }
199 return eb;
200 }
201
202 /* loop around taking references on and locking the root node of the
203 * tree until you end up with a lock on the root. A locked buffer
204 * is returned, with a reference held.
205 */
206 static struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
207 {
208 struct extent_buffer *eb;
209
210 while (1) {
211 eb = btrfs_root_node(root);
212 btrfs_tree_read_lock(eb);
213 if (eb == root->node)
214 break;
215 btrfs_tree_read_unlock(eb);
216 free_extent_buffer(eb);
217 }
218 return eb;
219 }
220
221 /* cowonly root (everything not a reference counted cow subvolume), just get
222 * put onto a simple dirty list. transaction.c walks this to make sure they
223 * get properly updated on disk.
224 */
225 static void add_root_to_dirty_list(struct btrfs_root *root)
226 {
227 spin_lock(&root->fs_info->trans_lock);
228 if (root->track_dirty && list_empty(&root->dirty_list)) {
229 list_add(&root->dirty_list,
230 &root->fs_info->dirty_cowonly_roots);
231 }
232 spin_unlock(&root->fs_info->trans_lock);
233 }
234
235 /*
236 * used by snapshot creation to make a copy of a root for a tree with
237 * a given objectid. The buffer with the new root node is returned in
238 * cow_ret, and this func returns zero on success or a negative error code.
239 */
240 int btrfs_copy_root(struct btrfs_trans_handle *trans,
241 struct btrfs_root *root,
242 struct extent_buffer *buf,
243 struct extent_buffer **cow_ret, u64 new_root_objectid)
244 {
245 struct extent_buffer *cow;
246 int ret = 0;
247 int level;
248 struct btrfs_disk_key disk_key;
249
250 WARN_ON(root->ref_cows && trans->transid !=
251 root->fs_info->running_transaction->transid);
252 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
253
254 level = btrfs_header_level(buf);
255 if (level == 0)
256 btrfs_item_key(buf, &disk_key, 0);
257 else
258 btrfs_node_key(buf, &disk_key, 0);
259
260 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
261 new_root_objectid, &disk_key, level,
262 buf->start, 0);
263 if (IS_ERR(cow))
264 return PTR_ERR(cow);
265
266 copy_extent_buffer(cow, buf, 0, 0, cow->len);
267 btrfs_set_header_bytenr(cow, cow->start);
268 btrfs_set_header_generation(cow, trans->transid);
269 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
270 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
271 BTRFS_HEADER_FLAG_RELOC);
272 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
273 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
274 else
275 btrfs_set_header_owner(cow, new_root_objectid);
276
277 write_extent_buffer(cow, root->fs_info->fsid,
278 (unsigned long)btrfs_header_fsid(cow),
279 BTRFS_FSID_SIZE);
280
281 WARN_ON(btrfs_header_generation(buf) > trans->transid);
282 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
283 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
284 else
285 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
286
287 if (ret)
288 return ret;
289
290 btrfs_mark_buffer_dirty(cow);
291 *cow_ret = cow;
292 return 0;
293 }
294
295 enum mod_log_op {
296 MOD_LOG_KEY_REPLACE,
297 MOD_LOG_KEY_ADD,
298 MOD_LOG_KEY_REMOVE,
299 MOD_LOG_KEY_REMOVE_WHILE_FREEING,
300 MOD_LOG_KEY_REMOVE_WHILE_MOVING,
301 MOD_LOG_MOVE_KEYS,
302 MOD_LOG_ROOT_REPLACE,
303 };
304
305 struct tree_mod_move {
306 int dst_slot;
307 int nr_items;
308 };
309
310 struct tree_mod_root {
311 u64 logical;
312 u8 level;
313 };
314
315 struct tree_mod_elem {
316 struct rb_node node;
317 u64 index; /* shifted logical */
318 u64 seq;
319 enum mod_log_op op;
320
321 /* this is used for MOD_LOG_KEY_* and MOD_LOG_MOVE_KEYS operations */
322 int slot;
323
324 /* this is used for MOD_LOG_KEY* and MOD_LOG_ROOT_REPLACE */
325 u64 generation;
326
327 /* those are used for op == MOD_LOG_KEY_{REPLACE,REMOVE} */
328 struct btrfs_disk_key key;
329 u64 blockptr;
330
331 /* this is used for op == MOD_LOG_MOVE_KEYS */
332 struct tree_mod_move move;
333
334 /* this is used for op == MOD_LOG_ROOT_REPLACE */
335 struct tree_mod_root old_root;
336 };
337
338 static inline void tree_mod_log_read_lock(struct btrfs_fs_info *fs_info)
339 {
340 read_lock(&fs_info->tree_mod_log_lock);
341 }
342
343 static inline void tree_mod_log_read_unlock(struct btrfs_fs_info *fs_info)
344 {
345 read_unlock(&fs_info->tree_mod_log_lock);
346 }
347
348 static inline void tree_mod_log_write_lock(struct btrfs_fs_info *fs_info)
349 {
350 write_lock(&fs_info->tree_mod_log_lock);
351 }
352
353 static inline void tree_mod_log_write_unlock(struct btrfs_fs_info *fs_info)
354 {
355 write_unlock(&fs_info->tree_mod_log_lock);
356 }
357
358 /*
359 * Increment the upper half of tree_mod_seq, set lower half zero.
360 *
361 * Must be called with fs_info->tree_mod_seq_lock held.
362 */
363 static inline u64 btrfs_inc_tree_mod_seq_major(struct btrfs_fs_info *fs_info)
364 {
365 u64 seq = atomic64_read(&fs_info->tree_mod_seq);
366 seq &= 0xffffffff00000000ull;
367 seq += 1ull << 32;
368 atomic64_set(&fs_info->tree_mod_seq, seq);
369 return seq;
370 }
371
372 /*
373 * Increment the lower half of tree_mod_seq.
374 *
375 * Must be called with fs_info->tree_mod_seq_lock held. The way major numbers
376 * are generated should not technically require a spin lock here. (Rationale:
377 * incrementing the minor while incrementing the major seq number is between its
378 * atomic64_read and atomic64_set calls doesn't duplicate sequence numbers, it
379 * just returns a unique sequence number as usual.) We have decided to leave
380 * that requirement in here and rethink it once we notice it really imposes a
381 * problem on some workload.
382 */
383 static inline u64 btrfs_inc_tree_mod_seq_minor(struct btrfs_fs_info *fs_info)
384 {
385 return atomic64_inc_return(&fs_info->tree_mod_seq);
386 }
387
388 /*
389 * return the last minor in the previous major tree_mod_seq number
390 */
391 u64 btrfs_tree_mod_seq_prev(u64 seq)
392 {
393 return (seq & 0xffffffff00000000ull) - 1ull;
394 }
395
396 /*
397 * This adds a new blocker to the tree mod log's blocker list if the @elem
398 * passed does not already have a sequence number set. So when a caller expects
399 * to record tree modifications, it should ensure to set elem->seq to zero
400 * before calling btrfs_get_tree_mod_seq.
401 * Returns a fresh, unused tree log modification sequence number, even if no new
402 * blocker was added.
403 */
404 u64 btrfs_get_tree_mod_seq(struct btrfs_fs_info *fs_info,
405 struct seq_list *elem)
406 {
407 u64 seq;
408
409 tree_mod_log_write_lock(fs_info);
410 spin_lock(&fs_info->tree_mod_seq_lock);
411 if (!elem->seq) {
412 elem->seq = btrfs_inc_tree_mod_seq_major(fs_info);
413 list_add_tail(&elem->list, &fs_info->tree_mod_seq_list);
414 }
415 seq = btrfs_inc_tree_mod_seq_minor(fs_info);
416 spin_unlock(&fs_info->tree_mod_seq_lock);
417 tree_mod_log_write_unlock(fs_info);
418
419 return seq;
420 }
421
422 void btrfs_put_tree_mod_seq(struct btrfs_fs_info *fs_info,
423 struct seq_list *elem)
424 {
425 struct rb_root *tm_root;
426 struct rb_node *node;
427 struct rb_node *next;
428 struct seq_list *cur_elem;
429 struct tree_mod_elem *tm;
430 u64 min_seq = (u64)-1;
431 u64 seq_putting = elem->seq;
432
433 if (!seq_putting)
434 return;
435
436 spin_lock(&fs_info->tree_mod_seq_lock);
437 list_del(&elem->list);
438 elem->seq = 0;
439
440 list_for_each_entry(cur_elem, &fs_info->tree_mod_seq_list, list) {
441 if (cur_elem->seq < min_seq) {
442 if (seq_putting > cur_elem->seq) {
443 /*
444 * blocker with lower sequence number exists, we
445 * cannot remove anything from the log
446 */
447 spin_unlock(&fs_info->tree_mod_seq_lock);
448 return;
449 }
450 min_seq = cur_elem->seq;
451 }
452 }
453 spin_unlock(&fs_info->tree_mod_seq_lock);
454
455 /*
456 * anything that's lower than the lowest existing (read: blocked)
457 * sequence number can be removed from the tree.
458 */
459 tree_mod_log_write_lock(fs_info);
460 tm_root = &fs_info->tree_mod_log;
461 for (node = rb_first(tm_root); node; node = next) {
462 next = rb_next(node);
463 tm = container_of(node, struct tree_mod_elem, node);
464 if (tm->seq > min_seq)
465 continue;
466 rb_erase(node, tm_root);
467 kfree(tm);
468 }
469 tree_mod_log_write_unlock(fs_info);
470 }
471
472 /*
473 * key order of the log:
474 * index -> sequence
475 *
476 * the index is the shifted logical of the *new* root node for root replace
477 * operations, or the shifted logical of the affected block for all other
478 * operations.
479 */
480 static noinline int
481 __tree_mod_log_insert(struct btrfs_fs_info *fs_info, struct tree_mod_elem *tm)
482 {
483 struct rb_root *tm_root;
484 struct rb_node **new;
485 struct rb_node *parent = NULL;
486 struct tree_mod_elem *cur;
487
488 BUG_ON(!tm || !tm->seq);
489
490 tm_root = &fs_info->tree_mod_log;
491 new = &tm_root->rb_node;
492 while (*new) {
493 cur = container_of(*new, struct tree_mod_elem, node);
494 parent = *new;
495 if (cur->index < tm->index)
496 new = &((*new)->rb_left);
497 else if (cur->index > tm->index)
498 new = &((*new)->rb_right);
499 else if (cur->seq < tm->seq)
500 new = &((*new)->rb_left);
501 else if (cur->seq > tm->seq)
502 new = &((*new)->rb_right);
503 else {
504 kfree(tm);
505 return -EEXIST;
506 }
507 }
508
509 rb_link_node(&tm->node, parent, new);
510 rb_insert_color(&tm->node, tm_root);
511 return 0;
512 }
513
514 /*
515 * Determines if logging can be omitted. Returns 1 if it can. Otherwise, it
516 * returns zero with the tree_mod_log_lock acquired. The caller must hold
517 * this until all tree mod log insertions are recorded in the rb tree and then
518 * call tree_mod_log_write_unlock() to release.
519 */
520 static inline int tree_mod_dont_log(struct btrfs_fs_info *fs_info,
521 struct extent_buffer *eb) {
522 smp_mb();
523 if (list_empty(&(fs_info)->tree_mod_seq_list))
524 return 1;
525 if (eb && btrfs_header_level(eb) == 0)
526 return 1;
527
528 tree_mod_log_write_lock(fs_info);
529 if (list_empty(&fs_info->tree_mod_seq_list)) {
530 /*
531 * someone emptied the list while we were waiting for the lock.
532 * we must not add to the list when no blocker exists.
533 */
534 tree_mod_log_write_unlock(fs_info);
535 return 1;
536 }
537
538 return 0;
539 }
540
541 /*
542 * This allocates memory and gets a tree modification sequence number.
543 *
544 * Returns <0 on error.
545 * Returns >0 (the added sequence number) on success.
546 */
547 static inline int tree_mod_alloc(struct btrfs_fs_info *fs_info, gfp_t flags,
548 struct tree_mod_elem **tm_ret)
549 {
550 struct tree_mod_elem *tm;
551
552 /*
553 * once we switch from spin locks to something different, we should
554 * honor the flags parameter here.
555 */
556 tm = *tm_ret = kzalloc(sizeof(*tm), GFP_ATOMIC);
557 if (!tm)
558 return -ENOMEM;
559
560 spin_lock(&fs_info->tree_mod_seq_lock);
561 tm->seq = btrfs_inc_tree_mod_seq_minor(fs_info);
562 spin_unlock(&fs_info->tree_mod_seq_lock);
563
564 return tm->seq;
565 }
566
567 static inline int
568 __tree_mod_log_insert_key(struct btrfs_fs_info *fs_info,
569 struct extent_buffer *eb, int slot,
570 enum mod_log_op op, gfp_t flags)
571 {
572 int ret;
573 struct tree_mod_elem *tm;
574
575 ret = tree_mod_alloc(fs_info, flags, &tm);
576 if (ret < 0)
577 return ret;
578
579 tm->index = eb->start >> PAGE_CACHE_SHIFT;
580 if (op != MOD_LOG_KEY_ADD) {
581 btrfs_node_key(eb, &tm->key, slot);
582 tm->blockptr = btrfs_node_blockptr(eb, slot);
583 }
584 tm->op = op;
585 tm->slot = slot;
586 tm->generation = btrfs_node_ptr_generation(eb, slot);
587
588 return __tree_mod_log_insert(fs_info, tm);
589 }
590
591 static noinline int
592 tree_mod_log_insert_key_mask(struct btrfs_fs_info *fs_info,
593 struct extent_buffer *eb, int slot,
594 enum mod_log_op op, gfp_t flags)
595 {
596 int ret;
597
598 if (tree_mod_dont_log(fs_info, eb))
599 return 0;
600
601 ret = __tree_mod_log_insert_key(fs_info, eb, slot, op, flags);
602
603 tree_mod_log_write_unlock(fs_info);
604 return ret;
605 }
606
607 static noinline int
608 tree_mod_log_insert_key(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
609 int slot, enum mod_log_op op)
610 {
611 return tree_mod_log_insert_key_mask(fs_info, eb, slot, op, GFP_NOFS);
612 }
613
614 static noinline int
615 tree_mod_log_insert_key_locked(struct btrfs_fs_info *fs_info,
616 struct extent_buffer *eb, int slot,
617 enum mod_log_op op)
618 {
619 return __tree_mod_log_insert_key(fs_info, eb, slot, op, GFP_NOFS);
620 }
621
622 static noinline int
623 tree_mod_log_insert_move(struct btrfs_fs_info *fs_info,
624 struct extent_buffer *eb, int dst_slot, int src_slot,
625 int nr_items, gfp_t flags)
626 {
627 struct tree_mod_elem *tm;
628 int ret;
629 int i;
630
631 if (tree_mod_dont_log(fs_info, eb))
632 return 0;
633
634 /*
635 * When we override something during the move, we log these removals.
636 * This can only happen when we move towards the beginning of the
637 * buffer, i.e. dst_slot < src_slot.
638 */
639 for (i = 0; i + dst_slot < src_slot && i < nr_items; i++) {
640 ret = tree_mod_log_insert_key_locked(fs_info, eb, i + dst_slot,
641 MOD_LOG_KEY_REMOVE_WHILE_MOVING);
642 BUG_ON(ret < 0);
643 }
644
645 ret = tree_mod_alloc(fs_info, flags, &tm);
646 if (ret < 0)
647 goto out;
648
649 tm->index = eb->start >> PAGE_CACHE_SHIFT;
650 tm->slot = src_slot;
651 tm->move.dst_slot = dst_slot;
652 tm->move.nr_items = nr_items;
653 tm->op = MOD_LOG_MOVE_KEYS;
654
655 ret = __tree_mod_log_insert(fs_info, tm);
656 out:
657 tree_mod_log_write_unlock(fs_info);
658 return ret;
659 }
660
661 static inline void
662 __tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
663 {
664 int i;
665 u32 nritems;
666 int ret;
667
668 if (btrfs_header_level(eb) == 0)
669 return;
670
671 nritems = btrfs_header_nritems(eb);
672 for (i = nritems - 1; i >= 0; i--) {
673 ret = tree_mod_log_insert_key_locked(fs_info, eb, i,
674 MOD_LOG_KEY_REMOVE_WHILE_FREEING);
675 BUG_ON(ret < 0);
676 }
677 }
678
679 static noinline int
680 tree_mod_log_insert_root(struct btrfs_fs_info *fs_info,
681 struct extent_buffer *old_root,
682 struct extent_buffer *new_root, gfp_t flags,
683 int log_removal)
684 {
685 struct tree_mod_elem *tm;
686 int ret;
687
688 if (tree_mod_dont_log(fs_info, NULL))
689 return 0;
690
691 if (log_removal)
692 __tree_mod_log_free_eb(fs_info, old_root);
693
694 ret = tree_mod_alloc(fs_info, flags, &tm);
695 if (ret < 0)
696 goto out;
697
698 tm->index = new_root->start >> PAGE_CACHE_SHIFT;
699 tm->old_root.logical = old_root->start;
700 tm->old_root.level = btrfs_header_level(old_root);
701 tm->generation = btrfs_header_generation(old_root);
702 tm->op = MOD_LOG_ROOT_REPLACE;
703
704 ret = __tree_mod_log_insert(fs_info, tm);
705 out:
706 tree_mod_log_write_unlock(fs_info);
707 return ret;
708 }
709
710 static struct tree_mod_elem *
711 __tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq,
712 int smallest)
713 {
714 struct rb_root *tm_root;
715 struct rb_node *node;
716 struct tree_mod_elem *cur = NULL;
717 struct tree_mod_elem *found = NULL;
718 u64 index = start >> PAGE_CACHE_SHIFT;
719
720 tree_mod_log_read_lock(fs_info);
721 tm_root = &fs_info->tree_mod_log;
722 node = tm_root->rb_node;
723 while (node) {
724 cur = container_of(node, struct tree_mod_elem, node);
725 if (cur->index < index) {
726 node = node->rb_left;
727 } else if (cur->index > index) {
728 node = node->rb_right;
729 } else if (cur->seq < min_seq) {
730 node = node->rb_left;
731 } else if (!smallest) {
732 /* we want the node with the highest seq */
733 if (found)
734 BUG_ON(found->seq > cur->seq);
735 found = cur;
736 node = node->rb_left;
737 } else if (cur->seq > min_seq) {
738 /* we want the node with the smallest seq */
739 if (found)
740 BUG_ON(found->seq < cur->seq);
741 found = cur;
742 node = node->rb_right;
743 } else {
744 found = cur;
745 break;
746 }
747 }
748 tree_mod_log_read_unlock(fs_info);
749
750 return found;
751 }
752
753 /*
754 * this returns the element from the log with the smallest time sequence
755 * value that's in the log (the oldest log item). any element with a time
756 * sequence lower than min_seq will be ignored.
757 */
758 static struct tree_mod_elem *
759 tree_mod_log_search_oldest(struct btrfs_fs_info *fs_info, u64 start,
760 u64 min_seq)
761 {
762 return __tree_mod_log_search(fs_info, start, min_seq, 1);
763 }
764
765 /*
766 * this returns the element from the log with the largest time sequence
767 * value that's in the log (the most recent log item). any element with
768 * a time sequence lower than min_seq will be ignored.
769 */
770 static struct tree_mod_elem *
771 tree_mod_log_search(struct btrfs_fs_info *fs_info, u64 start, u64 min_seq)
772 {
773 return __tree_mod_log_search(fs_info, start, min_seq, 0);
774 }
775
776 static noinline void
777 tree_mod_log_eb_copy(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
778 struct extent_buffer *src, unsigned long dst_offset,
779 unsigned long src_offset, int nr_items)
780 {
781 int ret;
782 int i;
783
784 if (tree_mod_dont_log(fs_info, NULL))
785 return;
786
787 if (btrfs_header_level(dst) == 0 && btrfs_header_level(src) == 0) {
788 tree_mod_log_write_unlock(fs_info);
789 return;
790 }
791
792 for (i = 0; i < nr_items; i++) {
793 ret = tree_mod_log_insert_key_locked(fs_info, src,
794 i + src_offset,
795 MOD_LOG_KEY_REMOVE);
796 BUG_ON(ret < 0);
797 ret = tree_mod_log_insert_key_locked(fs_info, dst,
798 i + dst_offset,
799 MOD_LOG_KEY_ADD);
800 BUG_ON(ret < 0);
801 }
802
803 tree_mod_log_write_unlock(fs_info);
804 }
805
806 static inline void
807 tree_mod_log_eb_move(struct btrfs_fs_info *fs_info, struct extent_buffer *dst,
808 int dst_offset, int src_offset, int nr_items)
809 {
810 int ret;
811 ret = tree_mod_log_insert_move(fs_info, dst, dst_offset, src_offset,
812 nr_items, GFP_NOFS);
813 BUG_ON(ret < 0);
814 }
815
816 static noinline void
817 tree_mod_log_set_node_key(struct btrfs_fs_info *fs_info,
818 struct extent_buffer *eb, int slot, int atomic)
819 {
820 int ret;
821
822 ret = tree_mod_log_insert_key_mask(fs_info, eb, slot,
823 MOD_LOG_KEY_REPLACE,
824 atomic ? GFP_ATOMIC : GFP_NOFS);
825 BUG_ON(ret < 0);
826 }
827
828 static noinline void
829 tree_mod_log_free_eb(struct btrfs_fs_info *fs_info, struct extent_buffer *eb)
830 {
831 if (tree_mod_dont_log(fs_info, eb))
832 return;
833
834 __tree_mod_log_free_eb(fs_info, eb);
835
836 tree_mod_log_write_unlock(fs_info);
837 }
838
839 static noinline void
840 tree_mod_log_set_root_pointer(struct btrfs_root *root,
841 struct extent_buffer *new_root_node,
842 int log_removal)
843 {
844 int ret;
845 ret = tree_mod_log_insert_root(root->fs_info, root->node,
846 new_root_node, GFP_NOFS, log_removal);
847 BUG_ON(ret < 0);
848 }
849
850 /*
851 * check if the tree block can be shared by multiple trees
852 */
853 int btrfs_block_can_be_shared(struct btrfs_root *root,
854 struct extent_buffer *buf)
855 {
856 /*
857 * Tree blocks not in refernece counted trees and tree roots
858 * are never shared. If a block was allocated after the last
859 * snapshot and the block was not allocated by tree relocation,
860 * we know the block is not shared.
861 */
862 if (root->ref_cows &&
863 buf != root->node && buf != root->commit_root &&
864 (btrfs_header_generation(buf) <=
865 btrfs_root_last_snapshot(&root->root_item) ||
866 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
867 return 1;
868 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
869 if (root->ref_cows &&
870 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
871 return 1;
872 #endif
873 return 0;
874 }
875
876 static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
877 struct btrfs_root *root,
878 struct extent_buffer *buf,
879 struct extent_buffer *cow,
880 int *last_ref)
881 {
882 u64 refs;
883 u64 owner;
884 u64 flags;
885 u64 new_flags = 0;
886 int ret;
887
888 /*
889 * Backrefs update rules:
890 *
891 * Always use full backrefs for extent pointers in tree block
892 * allocated by tree relocation.
893 *
894 * If a shared tree block is no longer referenced by its owner
895 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
896 * use full backrefs for extent pointers in tree block.
897 *
898 * If a tree block is been relocating
899 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
900 * use full backrefs for extent pointers in tree block.
901 * The reason for this is some operations (such as drop tree)
902 * are only allowed for blocks use full backrefs.
903 */
904
905 if (btrfs_block_can_be_shared(root, buf)) {
906 ret = btrfs_lookup_extent_info(trans, root, buf->start,
907 btrfs_header_level(buf), 1,
908 &refs, &flags);
909 if (ret)
910 return ret;
911 if (refs == 0) {
912 ret = -EROFS;
913 btrfs_std_error(root->fs_info, ret);
914 return ret;
915 }
916 } else {
917 refs = 1;
918 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
919 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
920 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
921 else
922 flags = 0;
923 }
924
925 owner = btrfs_header_owner(buf);
926 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
927 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
928
929 if (refs > 1) {
930 if ((owner == root->root_key.objectid ||
931 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
932 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
933 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
934 BUG_ON(ret); /* -ENOMEM */
935
936 if (root->root_key.objectid ==
937 BTRFS_TREE_RELOC_OBJECTID) {
938 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
939 BUG_ON(ret); /* -ENOMEM */
940 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
941 BUG_ON(ret); /* -ENOMEM */
942 }
943 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
944 } else {
945
946 if (root->root_key.objectid ==
947 BTRFS_TREE_RELOC_OBJECTID)
948 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
949 else
950 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
951 BUG_ON(ret); /* -ENOMEM */
952 }
953 if (new_flags != 0) {
954 int level = btrfs_header_level(buf);
955
956 ret = btrfs_set_disk_extent_flags(trans, root,
957 buf->start,
958 buf->len,
959 new_flags, level, 0);
960 if (ret)
961 return ret;
962 }
963 } else {
964 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
965 if (root->root_key.objectid ==
966 BTRFS_TREE_RELOC_OBJECTID)
967 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
968 else
969 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
970 BUG_ON(ret); /* -ENOMEM */
971 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
972 BUG_ON(ret); /* -ENOMEM */
973 }
974 clean_tree_block(trans, root, buf);
975 *last_ref = 1;
976 }
977 return 0;
978 }
979
980 /*
981 * does the dirty work in cow of a single block. The parent block (if
982 * supplied) is updated to point to the new cow copy. The new buffer is marked
983 * dirty and returned locked. If you modify the block it needs to be marked
984 * dirty again.
985 *
986 * search_start -- an allocation hint for the new block
987 *
988 * empty_size -- a hint that you plan on doing more cow. This is the size in
989 * bytes the allocator should try to find free next to the block it returns.
990 * This is just a hint and may be ignored by the allocator.
991 */
992 static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
993 struct btrfs_root *root,
994 struct extent_buffer *buf,
995 struct extent_buffer *parent, int parent_slot,
996 struct extent_buffer **cow_ret,
997 u64 search_start, u64 empty_size)
998 {
999 struct btrfs_disk_key disk_key;
1000 struct extent_buffer *cow;
1001 int level, ret;
1002 int last_ref = 0;
1003 int unlock_orig = 0;
1004 u64 parent_start;
1005
1006 if (*cow_ret == buf)
1007 unlock_orig = 1;
1008
1009 btrfs_assert_tree_locked(buf);
1010
1011 WARN_ON(root->ref_cows && trans->transid !=
1012 root->fs_info->running_transaction->transid);
1013 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
1014
1015 level = btrfs_header_level(buf);
1016
1017 if (level == 0)
1018 btrfs_item_key(buf, &disk_key, 0);
1019 else
1020 btrfs_node_key(buf, &disk_key, 0);
1021
1022 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
1023 if (parent)
1024 parent_start = parent->start;
1025 else
1026 parent_start = 0;
1027 } else
1028 parent_start = 0;
1029
1030 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
1031 root->root_key.objectid, &disk_key,
1032 level, search_start, empty_size);
1033 if (IS_ERR(cow))
1034 return PTR_ERR(cow);
1035
1036 /* cow is set to blocking by btrfs_init_new_buffer */
1037
1038 copy_extent_buffer(cow, buf, 0, 0, cow->len);
1039 btrfs_set_header_bytenr(cow, cow->start);
1040 btrfs_set_header_generation(cow, trans->transid);
1041 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
1042 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
1043 BTRFS_HEADER_FLAG_RELOC);
1044 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1045 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
1046 else
1047 btrfs_set_header_owner(cow, root->root_key.objectid);
1048
1049 write_extent_buffer(cow, root->fs_info->fsid,
1050 (unsigned long)btrfs_header_fsid(cow),
1051 BTRFS_FSID_SIZE);
1052
1053 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
1054 if (ret) {
1055 btrfs_abort_transaction(trans, root, ret);
1056 return ret;
1057 }
1058
1059 if (root->ref_cows)
1060 btrfs_reloc_cow_block(trans, root, buf, cow);
1061
1062 if (buf == root->node) {
1063 WARN_ON(parent && parent != buf);
1064 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
1065 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
1066 parent_start = buf->start;
1067 else
1068 parent_start = 0;
1069
1070 extent_buffer_get(cow);
1071 tree_mod_log_set_root_pointer(root, cow, 1);
1072 rcu_assign_pointer(root->node, cow);
1073
1074 btrfs_free_tree_block(trans, root, buf, parent_start,
1075 last_ref);
1076 free_extent_buffer(buf);
1077 add_root_to_dirty_list(root);
1078 } else {
1079 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
1080 parent_start = parent->start;
1081 else
1082 parent_start = 0;
1083
1084 WARN_ON(trans->transid != btrfs_header_generation(parent));
1085 tree_mod_log_insert_key(root->fs_info, parent, parent_slot,
1086 MOD_LOG_KEY_REPLACE);
1087 btrfs_set_node_blockptr(parent, parent_slot,
1088 cow->start);
1089 btrfs_set_node_ptr_generation(parent, parent_slot,
1090 trans->transid);
1091 btrfs_mark_buffer_dirty(parent);
1092 if (last_ref)
1093 tree_mod_log_free_eb(root->fs_info, buf);
1094 btrfs_free_tree_block(trans, root, buf, parent_start,
1095 last_ref);
1096 }
1097 if (unlock_orig)
1098 btrfs_tree_unlock(buf);
1099 free_extent_buffer_stale(buf);
1100 btrfs_mark_buffer_dirty(cow);
1101 *cow_ret = cow;
1102 return 0;
1103 }
1104
1105 /*
1106 * returns the logical address of the oldest predecessor of the given root.
1107 * entries older than time_seq are ignored.
1108 */
1109 static struct tree_mod_elem *
1110 __tree_mod_log_oldest_root(struct btrfs_fs_info *fs_info,
1111 struct extent_buffer *eb_root, u64 time_seq)
1112 {
1113 struct tree_mod_elem *tm;
1114 struct tree_mod_elem *found = NULL;
1115 u64 root_logical = eb_root->start;
1116 int looped = 0;
1117
1118 if (!time_seq)
1119 return 0;
1120
1121 /*
1122 * the very last operation that's logged for a root is the replacement
1123 * operation (if it is replaced at all). this has the index of the *new*
1124 * root, making it the very first operation that's logged for this root.
1125 */
1126 while (1) {
1127 tm = tree_mod_log_search_oldest(fs_info, root_logical,
1128 time_seq);
1129 if (!looped && !tm)
1130 return 0;
1131 /*
1132 * if there are no tree operation for the oldest root, we simply
1133 * return it. this should only happen if that (old) root is at
1134 * level 0.
1135 */
1136 if (!tm)
1137 break;
1138
1139 /*
1140 * if there's an operation that's not a root replacement, we
1141 * found the oldest version of our root. normally, we'll find a
1142 * MOD_LOG_KEY_REMOVE_WHILE_FREEING operation here.
1143 */
1144 if (tm->op != MOD_LOG_ROOT_REPLACE)
1145 break;
1146
1147 found = tm;
1148 root_logical = tm->old_root.logical;
1149 looped = 1;
1150 }
1151
1152 /* if there's no old root to return, return what we found instead */
1153 if (!found)
1154 found = tm;
1155
1156 return found;
1157 }
1158
1159 /*
1160 * tm is a pointer to the first operation to rewind within eb. then, all
1161 * previous operations will be rewinded (until we reach something older than
1162 * time_seq).
1163 */
1164 static void
1165 __tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1166 u64 time_seq, struct tree_mod_elem *first_tm)
1167 {
1168 u32 n;
1169 struct rb_node *next;
1170 struct tree_mod_elem *tm = first_tm;
1171 unsigned long o_dst;
1172 unsigned long o_src;
1173 unsigned long p_size = sizeof(struct btrfs_key_ptr);
1174
1175 n = btrfs_header_nritems(eb);
1176 tree_mod_log_read_lock(fs_info);
1177 while (tm && tm->seq >= time_seq) {
1178 /*
1179 * all the operations are recorded with the operator used for
1180 * the modification. as we're going backwards, we do the
1181 * opposite of each operation here.
1182 */
1183 switch (tm->op) {
1184 case MOD_LOG_KEY_REMOVE_WHILE_FREEING:
1185 BUG_ON(tm->slot < n);
1186 /* Fallthrough */
1187 case MOD_LOG_KEY_REMOVE_WHILE_MOVING:
1188 case MOD_LOG_KEY_REMOVE:
1189 btrfs_set_node_key(eb, &tm->key, tm->slot);
1190 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1191 btrfs_set_node_ptr_generation(eb, tm->slot,
1192 tm->generation);
1193 n++;
1194 break;
1195 case MOD_LOG_KEY_REPLACE:
1196 BUG_ON(tm->slot >= n);
1197 btrfs_set_node_key(eb, &tm->key, tm->slot);
1198 btrfs_set_node_blockptr(eb, tm->slot, tm->blockptr);
1199 btrfs_set_node_ptr_generation(eb, tm->slot,
1200 tm->generation);
1201 break;
1202 case MOD_LOG_KEY_ADD:
1203 /* if a move operation is needed it's in the log */
1204 n--;
1205 break;
1206 case MOD_LOG_MOVE_KEYS:
1207 o_dst = btrfs_node_key_ptr_offset(tm->slot);
1208 o_src = btrfs_node_key_ptr_offset(tm->move.dst_slot);
1209 memmove_extent_buffer(eb, o_dst, o_src,
1210 tm->move.nr_items * p_size);
1211 break;
1212 case MOD_LOG_ROOT_REPLACE:
1213 /*
1214 * this operation is special. for roots, this must be
1215 * handled explicitly before rewinding.
1216 * for non-roots, this operation may exist if the node
1217 * was a root: root A -> child B; then A gets empty and
1218 * B is promoted to the new root. in the mod log, we'll
1219 * have a root-replace operation for B, a tree block
1220 * that is no root. we simply ignore that operation.
1221 */
1222 break;
1223 }
1224 next = rb_next(&tm->node);
1225 if (!next)
1226 break;
1227 tm = container_of(next, struct tree_mod_elem, node);
1228 if (tm->index != first_tm->index)
1229 break;
1230 }
1231 tree_mod_log_read_unlock(fs_info);
1232 btrfs_set_header_nritems(eb, n);
1233 }
1234
1235 /*
1236 * Called with eb read locked. If the buffer cannot be rewinded, the same buffer
1237 * is returned. If rewind operations happen, a fresh buffer is returned. The
1238 * returned buffer is always read-locked. If the returned buffer is not the
1239 * input buffer, the lock on the input buffer is released and the input buffer
1240 * is freed (its refcount is decremented).
1241 */
1242 static struct extent_buffer *
1243 tree_mod_log_rewind(struct btrfs_fs_info *fs_info, struct extent_buffer *eb,
1244 u64 time_seq)
1245 {
1246 struct extent_buffer *eb_rewin;
1247 struct tree_mod_elem *tm;
1248
1249 if (!time_seq)
1250 return eb;
1251
1252 if (btrfs_header_level(eb) == 0)
1253 return eb;
1254
1255 tm = tree_mod_log_search(fs_info, eb->start, time_seq);
1256 if (!tm)
1257 return eb;
1258
1259 if (tm->op == MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1260 BUG_ON(tm->slot != 0);
1261 eb_rewin = alloc_dummy_extent_buffer(eb->start,
1262 fs_info->tree_root->nodesize);
1263 BUG_ON(!eb_rewin);
1264 btrfs_set_header_bytenr(eb_rewin, eb->start);
1265 btrfs_set_header_backref_rev(eb_rewin,
1266 btrfs_header_backref_rev(eb));
1267 btrfs_set_header_owner(eb_rewin, btrfs_header_owner(eb));
1268 btrfs_set_header_level(eb_rewin, btrfs_header_level(eb));
1269 } else {
1270 eb_rewin = btrfs_clone_extent_buffer(eb);
1271 BUG_ON(!eb_rewin);
1272 }
1273
1274 extent_buffer_get(eb_rewin);
1275 btrfs_tree_read_unlock(eb);
1276 free_extent_buffer(eb);
1277
1278 extent_buffer_get(eb_rewin);
1279 btrfs_tree_read_lock(eb_rewin);
1280 __tree_mod_log_rewind(fs_info, eb_rewin, time_seq, tm);
1281 WARN_ON(btrfs_header_nritems(eb_rewin) >
1282 BTRFS_NODEPTRS_PER_BLOCK(fs_info->tree_root));
1283
1284 return eb_rewin;
1285 }
1286
1287 /*
1288 * get_old_root() rewinds the state of @root's root node to the given @time_seq
1289 * value. If there are no changes, the current root->root_node is returned. If
1290 * anything changed in between, there's a fresh buffer allocated on which the
1291 * rewind operations are done. In any case, the returned buffer is read locked.
1292 * Returns NULL on error (with no locks held).
1293 */
1294 static inline struct extent_buffer *
1295 get_old_root(struct btrfs_root *root, u64 time_seq)
1296 {
1297 struct tree_mod_elem *tm;
1298 struct extent_buffer *eb = NULL;
1299 struct extent_buffer *eb_root;
1300 struct extent_buffer *old;
1301 struct tree_mod_root *old_root = NULL;
1302 u64 old_generation = 0;
1303 u64 logical;
1304 u32 blocksize;
1305
1306 eb_root = btrfs_read_lock_root_node(root);
1307 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1308 if (!tm)
1309 return eb_root;
1310
1311 if (tm->op == MOD_LOG_ROOT_REPLACE) {
1312 old_root = &tm->old_root;
1313 old_generation = tm->generation;
1314 logical = old_root->logical;
1315 } else {
1316 logical = eb_root->start;
1317 }
1318
1319 tm = tree_mod_log_search(root->fs_info, logical, time_seq);
1320 if (old_root && tm && tm->op != MOD_LOG_KEY_REMOVE_WHILE_FREEING) {
1321 btrfs_tree_read_unlock(eb_root);
1322 free_extent_buffer(eb_root);
1323 blocksize = btrfs_level_size(root, old_root->level);
1324 old = read_tree_block(root, logical, blocksize, 0);
1325 if (!old || !extent_buffer_uptodate(old)) {
1326 free_extent_buffer(old);
1327 pr_warn("btrfs: failed to read tree block %llu from get_old_root\n",
1328 logical);
1329 WARN_ON(1);
1330 } else {
1331 eb = btrfs_clone_extent_buffer(old);
1332 free_extent_buffer(old);
1333 }
1334 } else if (old_root) {
1335 btrfs_tree_read_unlock(eb_root);
1336 free_extent_buffer(eb_root);
1337 eb = alloc_dummy_extent_buffer(logical, root->nodesize);
1338 } else {
1339 eb = btrfs_clone_extent_buffer(eb_root);
1340 btrfs_tree_read_unlock(eb_root);
1341 free_extent_buffer(eb_root);
1342 }
1343
1344 if (!eb)
1345 return NULL;
1346 extent_buffer_get(eb);
1347 btrfs_tree_read_lock(eb);
1348 if (old_root) {
1349 btrfs_set_header_bytenr(eb, eb->start);
1350 btrfs_set_header_backref_rev(eb, BTRFS_MIXED_BACKREF_REV);
1351 btrfs_set_header_owner(eb, btrfs_header_owner(eb_root));
1352 btrfs_set_header_level(eb, old_root->level);
1353 btrfs_set_header_generation(eb, old_generation);
1354 }
1355 if (tm)
1356 __tree_mod_log_rewind(root->fs_info, eb, time_seq, tm);
1357 else
1358 WARN_ON(btrfs_header_level(eb) != 0);
1359 WARN_ON(btrfs_header_nritems(eb) > BTRFS_NODEPTRS_PER_BLOCK(root));
1360
1361 return eb;
1362 }
1363
1364 int btrfs_old_root_level(struct btrfs_root *root, u64 time_seq)
1365 {
1366 struct tree_mod_elem *tm;
1367 int level;
1368 struct extent_buffer *eb_root = btrfs_root_node(root);
1369
1370 tm = __tree_mod_log_oldest_root(root->fs_info, eb_root, time_seq);
1371 if (tm && tm->op == MOD_LOG_ROOT_REPLACE) {
1372 level = tm->old_root.level;
1373 } else {
1374 level = btrfs_header_level(eb_root);
1375 }
1376 free_extent_buffer(eb_root);
1377
1378 return level;
1379 }
1380
1381 static inline int should_cow_block(struct btrfs_trans_handle *trans,
1382 struct btrfs_root *root,
1383 struct extent_buffer *buf)
1384 {
1385 /* ensure we can see the force_cow */
1386 smp_rmb();
1387
1388 /*
1389 * We do not need to cow a block if
1390 * 1) this block is not created or changed in this transaction;
1391 * 2) this block does not belong to TREE_RELOC tree;
1392 * 3) the root is not forced COW.
1393 *
1394 * What is forced COW:
1395 * when we create snapshot during commiting the transaction,
1396 * after we've finished coping src root, we must COW the shared
1397 * block to ensure the metadata consistency.
1398 */
1399 if (btrfs_header_generation(buf) == trans->transid &&
1400 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
1401 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
1402 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
1403 !root->force_cow)
1404 return 0;
1405 return 1;
1406 }
1407
1408 /*
1409 * cows a single block, see __btrfs_cow_block for the real work.
1410 * This version of it has extra checks so that a block isn't cow'd more than
1411 * once per transaction, as long as it hasn't been written yet
1412 */
1413 noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
1414 struct btrfs_root *root, struct extent_buffer *buf,
1415 struct extent_buffer *parent, int parent_slot,
1416 struct extent_buffer **cow_ret)
1417 {
1418 u64 search_start;
1419 int ret;
1420
1421 if (trans->transaction != root->fs_info->running_transaction)
1422 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1423 (unsigned long long)trans->transid,
1424 (unsigned long long)
1425 root->fs_info->running_transaction->transid);
1426
1427 if (trans->transid != root->fs_info->generation)
1428 WARN(1, KERN_CRIT "trans %llu running %llu\n",
1429 (unsigned long long)trans->transid,
1430 (unsigned long long)root->fs_info->generation);
1431
1432 if (!should_cow_block(trans, root, buf)) {
1433 *cow_ret = buf;
1434 return 0;
1435 }
1436
1437 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
1438
1439 if (parent)
1440 btrfs_set_lock_blocking(parent);
1441 btrfs_set_lock_blocking(buf);
1442
1443 ret = __btrfs_cow_block(trans, root, buf, parent,
1444 parent_slot, cow_ret, search_start, 0);
1445
1446 trace_btrfs_cow_block(root, buf, *cow_ret);
1447
1448 return ret;
1449 }
1450
1451 /*
1452 * helper function for defrag to decide if two blocks pointed to by a
1453 * node are actually close by
1454 */
1455 static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
1456 {
1457 if (blocknr < other && other - (blocknr + blocksize) < 32768)
1458 return 1;
1459 if (blocknr > other && blocknr - (other + blocksize) < 32768)
1460 return 1;
1461 return 0;
1462 }
1463
1464 /*
1465 * compare two keys in a memcmp fashion
1466 */
1467 static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
1468 {
1469 struct btrfs_key k1;
1470
1471 btrfs_disk_key_to_cpu(&k1, disk);
1472
1473 return btrfs_comp_cpu_keys(&k1, k2);
1474 }
1475
1476 /*
1477 * same as comp_keys only with two btrfs_key's
1478 */
1479 int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
1480 {
1481 if (k1->objectid > k2->objectid)
1482 return 1;
1483 if (k1->objectid < k2->objectid)
1484 return -1;
1485 if (k1->type > k2->type)
1486 return 1;
1487 if (k1->type < k2->type)
1488 return -1;
1489 if (k1->offset > k2->offset)
1490 return 1;
1491 if (k1->offset < k2->offset)
1492 return -1;
1493 return 0;
1494 }
1495
1496 /*
1497 * this is used by the defrag code to go through all the
1498 * leaves pointed to by a node and reallocate them so that
1499 * disk order is close to key order
1500 */
1501 int btrfs_realloc_node(struct btrfs_trans_handle *trans,
1502 struct btrfs_root *root, struct extent_buffer *parent,
1503 int start_slot, u64 *last_ret,
1504 struct btrfs_key *progress)
1505 {
1506 struct extent_buffer *cur;
1507 u64 blocknr;
1508 u64 gen;
1509 u64 search_start = *last_ret;
1510 u64 last_block = 0;
1511 u64 other;
1512 u32 parent_nritems;
1513 int end_slot;
1514 int i;
1515 int err = 0;
1516 int parent_level;
1517 int uptodate;
1518 u32 blocksize;
1519 int progress_passed = 0;
1520 struct btrfs_disk_key disk_key;
1521
1522 parent_level = btrfs_header_level(parent);
1523
1524 WARN_ON(trans->transaction != root->fs_info->running_transaction);
1525 WARN_ON(trans->transid != root->fs_info->generation);
1526
1527 parent_nritems = btrfs_header_nritems(parent);
1528 blocksize = btrfs_level_size(root, parent_level - 1);
1529 end_slot = parent_nritems;
1530
1531 if (parent_nritems == 1)
1532 return 0;
1533
1534 btrfs_set_lock_blocking(parent);
1535
1536 for (i = start_slot; i < end_slot; i++) {
1537 int close = 1;
1538
1539 btrfs_node_key(parent, &disk_key, i);
1540 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
1541 continue;
1542
1543 progress_passed = 1;
1544 blocknr = btrfs_node_blockptr(parent, i);
1545 gen = btrfs_node_ptr_generation(parent, i);
1546 if (last_block == 0)
1547 last_block = blocknr;
1548
1549 if (i > 0) {
1550 other = btrfs_node_blockptr(parent, i - 1);
1551 close = close_blocks(blocknr, other, blocksize);
1552 }
1553 if (!close && i < end_slot - 2) {
1554 other = btrfs_node_blockptr(parent, i + 1);
1555 close = close_blocks(blocknr, other, blocksize);
1556 }
1557 if (close) {
1558 last_block = blocknr;
1559 continue;
1560 }
1561
1562 cur = btrfs_find_tree_block(root, blocknr, blocksize);
1563 if (cur)
1564 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
1565 else
1566 uptodate = 0;
1567 if (!cur || !uptodate) {
1568 if (!cur) {
1569 cur = read_tree_block(root, blocknr,
1570 blocksize, gen);
1571 if (!cur || !extent_buffer_uptodate(cur)) {
1572 free_extent_buffer(cur);
1573 return -EIO;
1574 }
1575 } else if (!uptodate) {
1576 err = btrfs_read_buffer(cur, gen);
1577 if (err) {
1578 free_extent_buffer(cur);
1579 return err;
1580 }
1581 }
1582 }
1583 if (search_start == 0)
1584 search_start = last_block;
1585
1586 btrfs_tree_lock(cur);
1587 btrfs_set_lock_blocking(cur);
1588 err = __btrfs_cow_block(trans, root, cur, parent, i,
1589 &cur, search_start,
1590 min(16 * blocksize,
1591 (end_slot - i) * blocksize));
1592 if (err) {
1593 btrfs_tree_unlock(cur);
1594 free_extent_buffer(cur);
1595 break;
1596 }
1597 search_start = cur->start;
1598 last_block = cur->start;
1599 *last_ret = search_start;
1600 btrfs_tree_unlock(cur);
1601 free_extent_buffer(cur);
1602 }
1603 return err;
1604 }
1605
1606 /*
1607 * The leaf data grows from end-to-front in the node.
1608 * this returns the address of the start of the last item,
1609 * which is the stop of the leaf data stack
1610 */
1611 static inline unsigned int leaf_data_end(struct btrfs_root *root,
1612 struct extent_buffer *leaf)
1613 {
1614 u32 nr = btrfs_header_nritems(leaf);
1615 if (nr == 0)
1616 return BTRFS_LEAF_DATA_SIZE(root);
1617 return btrfs_item_offset_nr(leaf, nr - 1);
1618 }
1619
1620
1621 /*
1622 * search for key in the extent_buffer. The items start at offset p,
1623 * and they are item_size apart. There are 'max' items in p.
1624 *
1625 * the slot in the array is returned via slot, and it points to
1626 * the place where you would insert key if it is not found in
1627 * the array.
1628 *
1629 * slot may point to max if the key is bigger than all of the keys
1630 */
1631 static noinline int generic_bin_search(struct extent_buffer *eb,
1632 unsigned long p,
1633 int item_size, struct btrfs_key *key,
1634 int max, int *slot)
1635 {
1636 int low = 0;
1637 int high = max;
1638 int mid;
1639 int ret;
1640 struct btrfs_disk_key *tmp = NULL;
1641 struct btrfs_disk_key unaligned;
1642 unsigned long offset;
1643 char *kaddr = NULL;
1644 unsigned long map_start = 0;
1645 unsigned long map_len = 0;
1646 int err;
1647
1648 while (low < high) {
1649 mid = (low + high) / 2;
1650 offset = p + mid * item_size;
1651
1652 if (!kaddr || offset < map_start ||
1653 (offset + sizeof(struct btrfs_disk_key)) >
1654 map_start + map_len) {
1655
1656 err = map_private_extent_buffer(eb, offset,
1657 sizeof(struct btrfs_disk_key),
1658 &kaddr, &map_start, &map_len);
1659
1660 if (!err) {
1661 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1662 map_start);
1663 } else {
1664 read_extent_buffer(eb, &unaligned,
1665 offset, sizeof(unaligned));
1666 tmp = &unaligned;
1667 }
1668
1669 } else {
1670 tmp = (struct btrfs_disk_key *)(kaddr + offset -
1671 map_start);
1672 }
1673 ret = comp_keys(tmp, key);
1674
1675 if (ret < 0)
1676 low = mid + 1;
1677 else if (ret > 0)
1678 high = mid;
1679 else {
1680 *slot = mid;
1681 return 0;
1682 }
1683 }
1684 *slot = low;
1685 return 1;
1686 }
1687
1688 /*
1689 * simple bin_search frontend that does the right thing for
1690 * leaves vs nodes
1691 */
1692 static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1693 int level, int *slot)
1694 {
1695 if (level == 0)
1696 return generic_bin_search(eb,
1697 offsetof(struct btrfs_leaf, items),
1698 sizeof(struct btrfs_item),
1699 key, btrfs_header_nritems(eb),
1700 slot);
1701 else
1702 return generic_bin_search(eb,
1703 offsetof(struct btrfs_node, ptrs),
1704 sizeof(struct btrfs_key_ptr),
1705 key, btrfs_header_nritems(eb),
1706 slot);
1707 }
1708
1709 int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
1710 int level, int *slot)
1711 {
1712 return bin_search(eb, key, level, slot);
1713 }
1714
1715 static void root_add_used(struct btrfs_root *root, u32 size)
1716 {
1717 spin_lock(&root->accounting_lock);
1718 btrfs_set_root_used(&root->root_item,
1719 btrfs_root_used(&root->root_item) + size);
1720 spin_unlock(&root->accounting_lock);
1721 }
1722
1723 static void root_sub_used(struct btrfs_root *root, u32 size)
1724 {
1725 spin_lock(&root->accounting_lock);
1726 btrfs_set_root_used(&root->root_item,
1727 btrfs_root_used(&root->root_item) - size);
1728 spin_unlock(&root->accounting_lock);
1729 }
1730
1731 /* given a node and slot number, this reads the blocks it points to. The
1732 * extent buffer is returned with a reference taken (but unlocked).
1733 * NULL is returned on error.
1734 */
1735 static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
1736 struct extent_buffer *parent, int slot)
1737 {
1738 int level = btrfs_header_level(parent);
1739 struct extent_buffer *eb;
1740
1741 if (slot < 0)
1742 return NULL;
1743 if (slot >= btrfs_header_nritems(parent))
1744 return NULL;
1745
1746 BUG_ON(level == 0);
1747
1748 eb = read_tree_block(root, btrfs_node_blockptr(parent, slot),
1749 btrfs_level_size(root, level - 1),
1750 btrfs_node_ptr_generation(parent, slot));
1751 if (eb && !extent_buffer_uptodate(eb)) {
1752 free_extent_buffer(eb);
1753 eb = NULL;
1754 }
1755
1756 return eb;
1757 }
1758
1759 /*
1760 * node level balancing, used to make sure nodes are in proper order for
1761 * item deletion. We balance from the top down, so we have to make sure
1762 * that a deletion won't leave an node completely empty later on.
1763 */
1764 static noinline int balance_level(struct btrfs_trans_handle *trans,
1765 struct btrfs_root *root,
1766 struct btrfs_path *path, int level)
1767 {
1768 struct extent_buffer *right = NULL;
1769 struct extent_buffer *mid;
1770 struct extent_buffer *left = NULL;
1771 struct extent_buffer *parent = NULL;
1772 int ret = 0;
1773 int wret;
1774 int pslot;
1775 int orig_slot = path->slots[level];
1776 u64 orig_ptr;
1777
1778 if (level == 0)
1779 return 0;
1780
1781 mid = path->nodes[level];
1782
1783 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
1784 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
1785 WARN_ON(btrfs_header_generation(mid) != trans->transid);
1786
1787 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
1788
1789 if (level < BTRFS_MAX_LEVEL - 1) {
1790 parent = path->nodes[level + 1];
1791 pslot = path->slots[level + 1];
1792 }
1793
1794 /*
1795 * deal with the case where there is only one pointer in the root
1796 * by promoting the node below to a root
1797 */
1798 if (!parent) {
1799 struct extent_buffer *child;
1800
1801 if (btrfs_header_nritems(mid) != 1)
1802 return 0;
1803
1804 /* promote the child to a root */
1805 child = read_node_slot(root, mid, 0);
1806 if (!child) {
1807 ret = -EROFS;
1808 btrfs_std_error(root->fs_info, ret);
1809 goto enospc;
1810 }
1811
1812 btrfs_tree_lock(child);
1813 btrfs_set_lock_blocking(child);
1814 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
1815 if (ret) {
1816 btrfs_tree_unlock(child);
1817 free_extent_buffer(child);
1818 goto enospc;
1819 }
1820
1821 tree_mod_log_set_root_pointer(root, child, 1);
1822 rcu_assign_pointer(root->node, child);
1823
1824 add_root_to_dirty_list(root);
1825 btrfs_tree_unlock(child);
1826
1827 path->locks[level] = 0;
1828 path->nodes[level] = NULL;
1829 clean_tree_block(trans, root, mid);
1830 btrfs_tree_unlock(mid);
1831 /* once for the path */
1832 free_extent_buffer(mid);
1833
1834 root_sub_used(root, mid->len);
1835 btrfs_free_tree_block(trans, root, mid, 0, 1);
1836 /* once for the root ptr */
1837 free_extent_buffer_stale(mid);
1838 return 0;
1839 }
1840 if (btrfs_header_nritems(mid) >
1841 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
1842 return 0;
1843
1844 left = read_node_slot(root, parent, pslot - 1);
1845 if (left) {
1846 btrfs_tree_lock(left);
1847 btrfs_set_lock_blocking(left);
1848 wret = btrfs_cow_block(trans, root, left,
1849 parent, pslot - 1, &left);
1850 if (wret) {
1851 ret = wret;
1852 goto enospc;
1853 }
1854 }
1855 right = read_node_slot(root, parent, pslot + 1);
1856 if (right) {
1857 btrfs_tree_lock(right);
1858 btrfs_set_lock_blocking(right);
1859 wret = btrfs_cow_block(trans, root, right,
1860 parent, pslot + 1, &right);
1861 if (wret) {
1862 ret = wret;
1863 goto enospc;
1864 }
1865 }
1866
1867 /* first, try to make some room in the middle buffer */
1868 if (left) {
1869 orig_slot += btrfs_header_nritems(left);
1870 wret = push_node_left(trans, root, left, mid, 1);
1871 if (wret < 0)
1872 ret = wret;
1873 }
1874
1875 /*
1876 * then try to empty the right most buffer into the middle
1877 */
1878 if (right) {
1879 wret = push_node_left(trans, root, mid, right, 1);
1880 if (wret < 0 && wret != -ENOSPC)
1881 ret = wret;
1882 if (btrfs_header_nritems(right) == 0) {
1883 clean_tree_block(trans, root, right);
1884 btrfs_tree_unlock(right);
1885 del_ptr(root, path, level + 1, pslot + 1);
1886 root_sub_used(root, right->len);
1887 btrfs_free_tree_block(trans, root, right, 0, 1);
1888 free_extent_buffer_stale(right);
1889 right = NULL;
1890 } else {
1891 struct btrfs_disk_key right_key;
1892 btrfs_node_key(right, &right_key, 0);
1893 tree_mod_log_set_node_key(root->fs_info, parent,
1894 pslot + 1, 0);
1895 btrfs_set_node_key(parent, &right_key, pslot + 1);
1896 btrfs_mark_buffer_dirty(parent);
1897 }
1898 }
1899 if (btrfs_header_nritems(mid) == 1) {
1900 /*
1901 * we're not allowed to leave a node with one item in the
1902 * tree during a delete. A deletion from lower in the tree
1903 * could try to delete the only pointer in this node.
1904 * So, pull some keys from the left.
1905 * There has to be a left pointer at this point because
1906 * otherwise we would have pulled some pointers from the
1907 * right
1908 */
1909 if (!left) {
1910 ret = -EROFS;
1911 btrfs_std_error(root->fs_info, ret);
1912 goto enospc;
1913 }
1914 wret = balance_node_right(trans, root, mid, left);
1915 if (wret < 0) {
1916 ret = wret;
1917 goto enospc;
1918 }
1919 if (wret == 1) {
1920 wret = push_node_left(trans, root, left, mid, 1);
1921 if (wret < 0)
1922 ret = wret;
1923 }
1924 BUG_ON(wret == 1);
1925 }
1926 if (btrfs_header_nritems(mid) == 0) {
1927 clean_tree_block(trans, root, mid);
1928 btrfs_tree_unlock(mid);
1929 del_ptr(root, path, level + 1, pslot);
1930 root_sub_used(root, mid->len);
1931 btrfs_free_tree_block(trans, root, mid, 0, 1);
1932 free_extent_buffer_stale(mid);
1933 mid = NULL;
1934 } else {
1935 /* update the parent key to reflect our changes */
1936 struct btrfs_disk_key mid_key;
1937 btrfs_node_key(mid, &mid_key, 0);
1938 tree_mod_log_set_node_key(root->fs_info, parent,
1939 pslot, 0);
1940 btrfs_set_node_key(parent, &mid_key, pslot);
1941 btrfs_mark_buffer_dirty(parent);
1942 }
1943
1944 /* update the path */
1945 if (left) {
1946 if (btrfs_header_nritems(left) > orig_slot) {
1947 extent_buffer_get(left);
1948 /* left was locked after cow */
1949 path->nodes[level] = left;
1950 path->slots[level + 1] -= 1;
1951 path->slots[level] = orig_slot;
1952 if (mid) {
1953 btrfs_tree_unlock(mid);
1954 free_extent_buffer(mid);
1955 }
1956 } else {
1957 orig_slot -= btrfs_header_nritems(left);
1958 path->slots[level] = orig_slot;
1959 }
1960 }
1961 /* double check we haven't messed things up */
1962 if (orig_ptr !=
1963 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
1964 BUG();
1965 enospc:
1966 if (right) {
1967 btrfs_tree_unlock(right);
1968 free_extent_buffer(right);
1969 }
1970 if (left) {
1971 if (path->nodes[level] != left)
1972 btrfs_tree_unlock(left);
1973 free_extent_buffer(left);
1974 }
1975 return ret;
1976 }
1977
1978 /* Node balancing for insertion. Here we only split or push nodes around
1979 * when they are completely full. This is also done top down, so we
1980 * have to be pessimistic.
1981 */
1982 static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
1983 struct btrfs_root *root,
1984 struct btrfs_path *path, int level)
1985 {
1986 struct extent_buffer *right = NULL;
1987 struct extent_buffer *mid;
1988 struct extent_buffer *left = NULL;
1989 struct extent_buffer *parent = NULL;
1990 int ret = 0;
1991 int wret;
1992 int pslot;
1993 int orig_slot = path->slots[level];
1994
1995 if (level == 0)
1996 return 1;
1997
1998 mid = path->nodes[level];
1999 WARN_ON(btrfs_header_generation(mid) != trans->transid);
2000
2001 if (level < BTRFS_MAX_LEVEL - 1) {
2002 parent = path->nodes[level + 1];
2003 pslot = path->slots[level + 1];
2004 }
2005
2006 if (!parent)
2007 return 1;
2008
2009 left = read_node_slot(root, parent, pslot - 1);
2010
2011 /* first, try to make some room in the middle buffer */
2012 if (left) {
2013 u32 left_nr;
2014
2015 btrfs_tree_lock(left);
2016 btrfs_set_lock_blocking(left);
2017
2018 left_nr = btrfs_header_nritems(left);
2019 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2020 wret = 1;
2021 } else {
2022 ret = btrfs_cow_block(trans, root, left, parent,
2023 pslot - 1, &left);
2024 if (ret)
2025 wret = 1;
2026 else {
2027 wret = push_node_left(trans, root,
2028 left, mid, 0);
2029 }
2030 }
2031 if (wret < 0)
2032 ret = wret;
2033 if (wret == 0) {
2034 struct btrfs_disk_key disk_key;
2035 orig_slot += left_nr;
2036 btrfs_node_key(mid, &disk_key, 0);
2037 tree_mod_log_set_node_key(root->fs_info, parent,
2038 pslot, 0);
2039 btrfs_set_node_key(parent, &disk_key, pslot);
2040 btrfs_mark_buffer_dirty(parent);
2041 if (btrfs_header_nritems(left) > orig_slot) {
2042 path->nodes[level] = left;
2043 path->slots[level + 1] -= 1;
2044 path->slots[level] = orig_slot;
2045 btrfs_tree_unlock(mid);
2046 free_extent_buffer(mid);
2047 } else {
2048 orig_slot -=
2049 btrfs_header_nritems(left);
2050 path->slots[level] = orig_slot;
2051 btrfs_tree_unlock(left);
2052 free_extent_buffer(left);
2053 }
2054 return 0;
2055 }
2056 btrfs_tree_unlock(left);
2057 free_extent_buffer(left);
2058 }
2059 right = read_node_slot(root, parent, pslot + 1);
2060
2061 /*
2062 * then try to empty the right most buffer into the middle
2063 */
2064 if (right) {
2065 u32 right_nr;
2066
2067 btrfs_tree_lock(right);
2068 btrfs_set_lock_blocking(right);
2069
2070 right_nr = btrfs_header_nritems(right);
2071 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
2072 wret = 1;
2073 } else {
2074 ret = btrfs_cow_block(trans, root, right,
2075 parent, pslot + 1,
2076 &right);
2077 if (ret)
2078 wret = 1;
2079 else {
2080 wret = balance_node_right(trans, root,
2081 right, mid);
2082 }
2083 }
2084 if (wret < 0)
2085 ret = wret;
2086 if (wret == 0) {
2087 struct btrfs_disk_key disk_key;
2088
2089 btrfs_node_key(right, &disk_key, 0);
2090 tree_mod_log_set_node_key(root->fs_info, parent,
2091 pslot + 1, 0);
2092 btrfs_set_node_key(parent, &disk_key, pslot + 1);
2093 btrfs_mark_buffer_dirty(parent);
2094
2095 if (btrfs_header_nritems(mid) <= orig_slot) {
2096 path->nodes[level] = right;
2097 path->slots[level + 1] += 1;
2098 path->slots[level] = orig_slot -
2099 btrfs_header_nritems(mid);
2100 btrfs_tree_unlock(mid);
2101 free_extent_buffer(mid);
2102 } else {
2103 btrfs_tree_unlock(right);
2104 free_extent_buffer(right);
2105 }
2106 return 0;
2107 }
2108 btrfs_tree_unlock(right);
2109 free_extent_buffer(right);
2110 }
2111 return 1;
2112 }
2113
2114 /*
2115 * readahead one full node of leaves, finding things that are close
2116 * to the block in 'slot', and triggering ra on them.
2117 */
2118 static void reada_for_search(struct btrfs_root *root,
2119 struct btrfs_path *path,
2120 int level, int slot, u64 objectid)
2121 {
2122 struct extent_buffer *node;
2123 struct btrfs_disk_key disk_key;
2124 u32 nritems;
2125 u64 search;
2126 u64 target;
2127 u64 nread = 0;
2128 u64 gen;
2129 int direction = path->reada;
2130 struct extent_buffer *eb;
2131 u32 nr;
2132 u32 blocksize;
2133 u32 nscan = 0;
2134
2135 if (level != 1)
2136 return;
2137
2138 if (!path->nodes[level])
2139 return;
2140
2141 node = path->nodes[level];
2142
2143 search = btrfs_node_blockptr(node, slot);
2144 blocksize = btrfs_level_size(root, level - 1);
2145 eb = btrfs_find_tree_block(root, search, blocksize);
2146 if (eb) {
2147 free_extent_buffer(eb);
2148 return;
2149 }
2150
2151 target = search;
2152
2153 nritems = btrfs_header_nritems(node);
2154 nr = slot;
2155
2156 while (1) {
2157 if (direction < 0) {
2158 if (nr == 0)
2159 break;
2160 nr--;
2161 } else if (direction > 0) {
2162 nr++;
2163 if (nr >= nritems)
2164 break;
2165 }
2166 if (path->reada < 0 && objectid) {
2167 btrfs_node_key(node, &disk_key, nr);
2168 if (btrfs_disk_key_objectid(&disk_key) != objectid)
2169 break;
2170 }
2171 search = btrfs_node_blockptr(node, nr);
2172 if ((search <= target && target - search <= 65536) ||
2173 (search > target && search - target <= 65536)) {
2174 gen = btrfs_node_ptr_generation(node, nr);
2175 readahead_tree_block(root, search, blocksize, gen);
2176 nread += blocksize;
2177 }
2178 nscan++;
2179 if ((nread > 65536 || nscan > 32))
2180 break;
2181 }
2182 }
2183
2184 static noinline void reada_for_balance(struct btrfs_root *root,
2185 struct btrfs_path *path, int level)
2186 {
2187 int slot;
2188 int nritems;
2189 struct extent_buffer *parent;
2190 struct extent_buffer *eb;
2191 u64 gen;
2192 u64 block1 = 0;
2193 u64 block2 = 0;
2194 int blocksize;
2195
2196 parent = path->nodes[level + 1];
2197 if (!parent)
2198 return;
2199
2200 nritems = btrfs_header_nritems(parent);
2201 slot = path->slots[level + 1];
2202 blocksize = btrfs_level_size(root, level);
2203
2204 if (slot > 0) {
2205 block1 = btrfs_node_blockptr(parent, slot - 1);
2206 gen = btrfs_node_ptr_generation(parent, slot - 1);
2207 eb = btrfs_find_tree_block(root, block1, blocksize);
2208 /*
2209 * if we get -eagain from btrfs_buffer_uptodate, we
2210 * don't want to return eagain here. That will loop
2211 * forever
2212 */
2213 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2214 block1 = 0;
2215 free_extent_buffer(eb);
2216 }
2217 if (slot + 1 < nritems) {
2218 block2 = btrfs_node_blockptr(parent, slot + 1);
2219 gen = btrfs_node_ptr_generation(parent, slot + 1);
2220 eb = btrfs_find_tree_block(root, block2, blocksize);
2221 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
2222 block2 = 0;
2223 free_extent_buffer(eb);
2224 }
2225
2226 if (block1)
2227 readahead_tree_block(root, block1, blocksize, 0);
2228 if (block2)
2229 readahead_tree_block(root, block2, blocksize, 0);
2230 }
2231
2232
2233 /*
2234 * when we walk down the tree, it is usually safe to unlock the higher layers
2235 * in the tree. The exceptions are when our path goes through slot 0, because
2236 * operations on the tree might require changing key pointers higher up in the
2237 * tree.
2238 *
2239 * callers might also have set path->keep_locks, which tells this code to keep
2240 * the lock if the path points to the last slot in the block. This is part of
2241 * walking through the tree, and selecting the next slot in the higher block.
2242 *
2243 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
2244 * if lowest_unlock is 1, level 0 won't be unlocked
2245 */
2246 static noinline void unlock_up(struct btrfs_path *path, int level,
2247 int lowest_unlock, int min_write_lock_level,
2248 int *write_lock_level)
2249 {
2250 int i;
2251 int skip_level = level;
2252 int no_skips = 0;
2253 struct extent_buffer *t;
2254
2255 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2256 if (!path->nodes[i])
2257 break;
2258 if (!path->locks[i])
2259 break;
2260 if (!no_skips && path->slots[i] == 0) {
2261 skip_level = i + 1;
2262 continue;
2263 }
2264 if (!no_skips && path->keep_locks) {
2265 u32 nritems;
2266 t = path->nodes[i];
2267 nritems = btrfs_header_nritems(t);
2268 if (nritems < 1 || path->slots[i] >= nritems - 1) {
2269 skip_level = i + 1;
2270 continue;
2271 }
2272 }
2273 if (skip_level < i && i >= lowest_unlock)
2274 no_skips = 1;
2275
2276 t = path->nodes[i];
2277 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
2278 btrfs_tree_unlock_rw(t, path->locks[i]);
2279 path->locks[i] = 0;
2280 if (write_lock_level &&
2281 i > min_write_lock_level &&
2282 i <= *write_lock_level) {
2283 *write_lock_level = i - 1;
2284 }
2285 }
2286 }
2287 }
2288
2289 /*
2290 * This releases any locks held in the path starting at level and
2291 * going all the way up to the root.
2292 *
2293 * btrfs_search_slot will keep the lock held on higher nodes in a few
2294 * corner cases, such as COW of the block at slot zero in the node. This
2295 * ignores those rules, and it should only be called when there are no
2296 * more updates to be done higher up in the tree.
2297 */
2298 noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
2299 {
2300 int i;
2301
2302 if (path->keep_locks)
2303 return;
2304
2305 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2306 if (!path->nodes[i])
2307 continue;
2308 if (!path->locks[i])
2309 continue;
2310 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
2311 path->locks[i] = 0;
2312 }
2313 }
2314
2315 /*
2316 * helper function for btrfs_search_slot. The goal is to find a block
2317 * in cache without setting the path to blocking. If we find the block
2318 * we return zero and the path is unchanged.
2319 *
2320 * If we can't find the block, we set the path blocking and do some
2321 * reada. -EAGAIN is returned and the search must be repeated.
2322 */
2323 static int
2324 read_block_for_search(struct btrfs_trans_handle *trans,
2325 struct btrfs_root *root, struct btrfs_path *p,
2326 struct extent_buffer **eb_ret, int level, int slot,
2327 struct btrfs_key *key, u64 time_seq)
2328 {
2329 u64 blocknr;
2330 u64 gen;
2331 u32 blocksize;
2332 struct extent_buffer *b = *eb_ret;
2333 struct extent_buffer *tmp;
2334 int ret;
2335
2336 blocknr = btrfs_node_blockptr(b, slot);
2337 gen = btrfs_node_ptr_generation(b, slot);
2338 blocksize = btrfs_level_size(root, level - 1);
2339
2340 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
2341 if (tmp) {
2342 /* first we do an atomic uptodate check */
2343 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
2344 *eb_ret = tmp;
2345 return 0;
2346 }
2347
2348 /* the pages were up to date, but we failed
2349 * the generation number check. Do a full
2350 * read for the generation number that is correct.
2351 * We must do this without dropping locks so
2352 * we can trust our generation number
2353 */
2354 btrfs_set_path_blocking(p);
2355
2356 /* now we're allowed to do a blocking uptodate check */
2357 ret = btrfs_read_buffer(tmp, gen);
2358 if (!ret) {
2359 *eb_ret = tmp;
2360 return 0;
2361 }
2362 free_extent_buffer(tmp);
2363 btrfs_release_path(p);
2364 return -EIO;
2365 }
2366
2367 /*
2368 * reduce lock contention at high levels
2369 * of the btree by dropping locks before
2370 * we read. Don't release the lock on the current
2371 * level because we need to walk this node to figure
2372 * out which blocks to read.
2373 */
2374 btrfs_unlock_up_safe(p, level + 1);
2375 btrfs_set_path_blocking(p);
2376
2377 free_extent_buffer(tmp);
2378 if (p->reada)
2379 reada_for_search(root, p, level, slot, key->objectid);
2380
2381 btrfs_release_path(p);
2382
2383 ret = -EAGAIN;
2384 tmp = read_tree_block(root, blocknr, blocksize, 0);
2385 if (tmp) {
2386 /*
2387 * If the read above didn't mark this buffer up to date,
2388 * it will never end up being up to date. Set ret to EIO now
2389 * and give up so that our caller doesn't loop forever
2390 * on our EAGAINs.
2391 */
2392 if (!btrfs_buffer_uptodate(tmp, 0, 0))
2393 ret = -EIO;
2394 free_extent_buffer(tmp);
2395 }
2396 return ret;
2397 }
2398
2399 /*
2400 * helper function for btrfs_search_slot. This does all of the checks
2401 * for node-level blocks and does any balancing required based on
2402 * the ins_len.
2403 *
2404 * If no extra work was required, zero is returned. If we had to
2405 * drop the path, -EAGAIN is returned and btrfs_search_slot must
2406 * start over
2407 */
2408 static int
2409 setup_nodes_for_search(struct btrfs_trans_handle *trans,
2410 struct btrfs_root *root, struct btrfs_path *p,
2411 struct extent_buffer *b, int level, int ins_len,
2412 int *write_lock_level)
2413 {
2414 int ret;
2415 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
2416 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
2417 int sret;
2418
2419 if (*write_lock_level < level + 1) {
2420 *write_lock_level = level + 1;
2421 btrfs_release_path(p);
2422 goto again;
2423 }
2424
2425 btrfs_set_path_blocking(p);
2426 reada_for_balance(root, p, level);
2427 sret = split_node(trans, root, p, level);
2428 btrfs_clear_path_blocking(p, NULL, 0);
2429
2430 BUG_ON(sret > 0);
2431 if (sret) {
2432 ret = sret;
2433 goto done;
2434 }
2435 b = p->nodes[level];
2436 } else if (ins_len < 0 && btrfs_header_nritems(b) <
2437 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
2438 int sret;
2439
2440 if (*write_lock_level < level + 1) {
2441 *write_lock_level = level + 1;
2442 btrfs_release_path(p);
2443 goto again;
2444 }
2445
2446 btrfs_set_path_blocking(p);
2447 reada_for_balance(root, p, level);
2448 sret = balance_level(trans, root, p, level);
2449 btrfs_clear_path_blocking(p, NULL, 0);
2450
2451 if (sret) {
2452 ret = sret;
2453 goto done;
2454 }
2455 b = p->nodes[level];
2456 if (!b) {
2457 btrfs_release_path(p);
2458 goto again;
2459 }
2460 BUG_ON(btrfs_header_nritems(b) == 1);
2461 }
2462 return 0;
2463
2464 again:
2465 ret = -EAGAIN;
2466 done:
2467 return ret;
2468 }
2469
2470 /*
2471 * look for key in the tree. path is filled in with nodes along the way
2472 * if key is found, we return zero and you can find the item in the leaf
2473 * level of the path (level 0)
2474 *
2475 * If the key isn't found, the path points to the slot where it should
2476 * be inserted, and 1 is returned. If there are other errors during the
2477 * search a negative error number is returned.
2478 *
2479 * if ins_len > 0, nodes and leaves will be split as we walk down the
2480 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
2481 * possible)
2482 */
2483 int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
2484 *root, struct btrfs_key *key, struct btrfs_path *p, int
2485 ins_len, int cow)
2486 {
2487 struct extent_buffer *b;
2488 int slot;
2489 int ret;
2490 int err;
2491 int level;
2492 int lowest_unlock = 1;
2493 int root_lock;
2494 /* everything at write_lock_level or lower must be write locked */
2495 int write_lock_level = 0;
2496 u8 lowest_level = 0;
2497 int min_write_lock_level;
2498
2499 lowest_level = p->lowest_level;
2500 WARN_ON(lowest_level && ins_len > 0);
2501 WARN_ON(p->nodes[0] != NULL);
2502
2503 if (ins_len < 0) {
2504 lowest_unlock = 2;
2505
2506 /* when we are removing items, we might have to go up to level
2507 * two as we update tree pointers Make sure we keep write
2508 * for those levels as well
2509 */
2510 write_lock_level = 2;
2511 } else if (ins_len > 0) {
2512 /*
2513 * for inserting items, make sure we have a write lock on
2514 * level 1 so we can update keys
2515 */
2516 write_lock_level = 1;
2517 }
2518
2519 if (!cow)
2520 write_lock_level = -1;
2521
2522 if (cow && (p->keep_locks || p->lowest_level))
2523 write_lock_level = BTRFS_MAX_LEVEL;
2524
2525 min_write_lock_level = write_lock_level;
2526
2527 again:
2528 /*
2529 * we try very hard to do read locks on the root
2530 */
2531 root_lock = BTRFS_READ_LOCK;
2532 level = 0;
2533 if (p->search_commit_root) {
2534 /*
2535 * the commit roots are read only
2536 * so we always do read locks
2537 */
2538 b = root->commit_root;
2539 extent_buffer_get(b);
2540 level = btrfs_header_level(b);
2541 if (!p->skip_locking)
2542 btrfs_tree_read_lock(b);
2543 } else {
2544 if (p->skip_locking) {
2545 b = btrfs_root_node(root);
2546 level = btrfs_header_level(b);
2547 } else {
2548 /* we don't know the level of the root node
2549 * until we actually have it read locked
2550 */
2551 b = btrfs_read_lock_root_node(root);
2552 level = btrfs_header_level(b);
2553 if (level <= write_lock_level) {
2554 /* whoops, must trade for write lock */
2555 btrfs_tree_read_unlock(b);
2556 free_extent_buffer(b);
2557 b = btrfs_lock_root_node(root);
2558 root_lock = BTRFS_WRITE_LOCK;
2559
2560 /* the level might have changed, check again */
2561 level = btrfs_header_level(b);
2562 }
2563 }
2564 }
2565 p->nodes[level] = b;
2566 if (!p->skip_locking)
2567 p->locks[level] = root_lock;
2568
2569 while (b) {
2570 level = btrfs_header_level(b);
2571
2572 /*
2573 * setup the path here so we can release it under lock
2574 * contention with the cow code
2575 */
2576 if (cow) {
2577 /*
2578 * if we don't really need to cow this block
2579 * then we don't want to set the path blocking,
2580 * so we test it here
2581 */
2582 if (!should_cow_block(trans, root, b))
2583 goto cow_done;
2584
2585 btrfs_set_path_blocking(p);
2586
2587 /*
2588 * must have write locks on this node and the
2589 * parent
2590 */
2591 if (level > write_lock_level ||
2592 (level + 1 > write_lock_level &&
2593 level + 1 < BTRFS_MAX_LEVEL &&
2594 p->nodes[level + 1])) {
2595 write_lock_level = level + 1;
2596 btrfs_release_path(p);
2597 goto again;
2598 }
2599
2600 err = btrfs_cow_block(trans, root, b,
2601 p->nodes[level + 1],
2602 p->slots[level + 1], &b);
2603 if (err) {
2604 ret = err;
2605 goto done;
2606 }
2607 }
2608 cow_done:
2609 BUG_ON(!cow && ins_len);
2610
2611 p->nodes[level] = b;
2612 btrfs_clear_path_blocking(p, NULL, 0);
2613
2614 /*
2615 * we have a lock on b and as long as we aren't changing
2616 * the tree, there is no way to for the items in b to change.
2617 * It is safe to drop the lock on our parent before we
2618 * go through the expensive btree search on b.
2619 *
2620 * If cow is true, then we might be changing slot zero,
2621 * which may require changing the parent. So, we can't
2622 * drop the lock until after we know which slot we're
2623 * operating on.
2624 */
2625 if (!cow)
2626 btrfs_unlock_up_safe(p, level + 1);
2627
2628 ret = bin_search(b, key, level, &slot);
2629
2630 if (level != 0) {
2631 int dec = 0;
2632 if (ret && slot > 0) {
2633 dec = 1;
2634 slot -= 1;
2635 }
2636 p->slots[level] = slot;
2637 err = setup_nodes_for_search(trans, root, p, b, level,
2638 ins_len, &write_lock_level);
2639 if (err == -EAGAIN)
2640 goto again;
2641 if (err) {
2642 ret = err;
2643 goto done;
2644 }
2645 b = p->nodes[level];
2646 slot = p->slots[level];
2647
2648 /*
2649 * slot 0 is special, if we change the key
2650 * we have to update the parent pointer
2651 * which means we must have a write lock
2652 * on the parent
2653 */
2654 if (slot == 0 && cow &&
2655 write_lock_level < level + 1) {
2656 write_lock_level = level + 1;
2657 btrfs_release_path(p);
2658 goto again;
2659 }
2660
2661 unlock_up(p, level, lowest_unlock,
2662 min_write_lock_level, &write_lock_level);
2663
2664 if (level == lowest_level) {
2665 if (dec)
2666 p->slots[level]++;
2667 goto done;
2668 }
2669
2670 err = read_block_for_search(trans, root, p,
2671 &b, level, slot, key, 0);
2672 if (err == -EAGAIN)
2673 goto again;
2674 if (err) {
2675 ret = err;
2676 goto done;
2677 }
2678
2679 if (!p->skip_locking) {
2680 level = btrfs_header_level(b);
2681 if (level <= write_lock_level) {
2682 err = btrfs_try_tree_write_lock(b);
2683 if (!err) {
2684 btrfs_set_path_blocking(p);
2685 btrfs_tree_lock(b);
2686 btrfs_clear_path_blocking(p, b,
2687 BTRFS_WRITE_LOCK);
2688 }
2689 p->locks[level] = BTRFS_WRITE_LOCK;
2690 } else {
2691 err = btrfs_try_tree_read_lock(b);
2692 if (!err) {
2693 btrfs_set_path_blocking(p);
2694 btrfs_tree_read_lock(b);
2695 btrfs_clear_path_blocking(p, b,
2696 BTRFS_READ_LOCK);
2697 }
2698 p->locks[level] = BTRFS_READ_LOCK;
2699 }
2700 p->nodes[level] = b;
2701 }
2702 } else {
2703 p->slots[level] = slot;
2704 if (ins_len > 0 &&
2705 btrfs_leaf_free_space(root, b) < ins_len) {
2706 if (write_lock_level < 1) {
2707 write_lock_level = 1;
2708 btrfs_release_path(p);
2709 goto again;
2710 }
2711
2712 btrfs_set_path_blocking(p);
2713 err = split_leaf(trans, root, key,
2714 p, ins_len, ret == 0);
2715 btrfs_clear_path_blocking(p, NULL, 0);
2716
2717 BUG_ON(err > 0);
2718 if (err) {
2719 ret = err;
2720 goto done;
2721 }
2722 }
2723 if (!p->search_for_split)
2724 unlock_up(p, level, lowest_unlock,
2725 min_write_lock_level, &write_lock_level);
2726 goto done;
2727 }
2728 }
2729 ret = 1;
2730 done:
2731 /*
2732 * we don't really know what they plan on doing with the path
2733 * from here on, so for now just mark it as blocking
2734 */
2735 if (!p->leave_spinning)
2736 btrfs_set_path_blocking(p);
2737 if (ret < 0)
2738 btrfs_release_path(p);
2739 return ret;
2740 }
2741
2742 /*
2743 * Like btrfs_search_slot, this looks for a key in the given tree. It uses the
2744 * current state of the tree together with the operations recorded in the tree
2745 * modification log to search for the key in a previous version of this tree, as
2746 * denoted by the time_seq parameter.
2747 *
2748 * Naturally, there is no support for insert, delete or cow operations.
2749 *
2750 * The resulting path and return value will be set up as if we called
2751 * btrfs_search_slot at that point in time with ins_len and cow both set to 0.
2752 */
2753 int btrfs_search_old_slot(struct btrfs_root *root, struct btrfs_key *key,
2754 struct btrfs_path *p, u64 time_seq)
2755 {
2756 struct extent_buffer *b;
2757 int slot;
2758 int ret;
2759 int err;
2760 int level;
2761 int lowest_unlock = 1;
2762 u8 lowest_level = 0;
2763
2764 lowest_level = p->lowest_level;
2765 WARN_ON(p->nodes[0] != NULL);
2766
2767 if (p->search_commit_root) {
2768 BUG_ON(time_seq);
2769 return btrfs_search_slot(NULL, root, key, p, 0, 0);
2770 }
2771
2772 again:
2773 b = get_old_root(root, time_seq);
2774 level = btrfs_header_level(b);
2775 p->locks[level] = BTRFS_READ_LOCK;
2776
2777 while (b) {
2778 level = btrfs_header_level(b);
2779 p->nodes[level] = b;
2780 btrfs_clear_path_blocking(p, NULL, 0);
2781
2782 /*
2783 * we have a lock on b and as long as we aren't changing
2784 * the tree, there is no way to for the items in b to change.
2785 * It is safe to drop the lock on our parent before we
2786 * go through the expensive btree search on b.
2787 */
2788 btrfs_unlock_up_safe(p, level + 1);
2789
2790 ret = bin_search(b, key, level, &slot);
2791
2792 if (level != 0) {
2793 int dec = 0;
2794 if (ret && slot > 0) {
2795 dec = 1;
2796 slot -= 1;
2797 }
2798 p->slots[level] = slot;
2799 unlock_up(p, level, lowest_unlock, 0, NULL);
2800
2801 if (level == lowest_level) {
2802 if (dec)
2803 p->slots[level]++;
2804 goto done;
2805 }
2806
2807 err = read_block_for_search(NULL, root, p, &b, level,
2808 slot, key, time_seq);
2809 if (err == -EAGAIN)
2810 goto again;
2811 if (err) {
2812 ret = err;
2813 goto done;
2814 }
2815
2816 level = btrfs_header_level(b);
2817 err = btrfs_try_tree_read_lock(b);
2818 if (!err) {
2819 btrfs_set_path_blocking(p);
2820 btrfs_tree_read_lock(b);
2821 btrfs_clear_path_blocking(p, b,
2822 BTRFS_READ_LOCK);
2823 }
2824 b = tree_mod_log_rewind(root->fs_info, b, time_seq);
2825 p->locks[level] = BTRFS_READ_LOCK;
2826 p->nodes[level] = b;
2827 } else {
2828 p->slots[level] = slot;
2829 unlock_up(p, level, lowest_unlock, 0, NULL);
2830 goto done;
2831 }
2832 }
2833 ret = 1;
2834 done:
2835 if (!p->leave_spinning)
2836 btrfs_set_path_blocking(p);
2837 if (ret < 0)
2838 btrfs_release_path(p);
2839
2840 return ret;
2841 }
2842
2843 /*
2844 * helper to use instead of search slot if no exact match is needed but
2845 * instead the next or previous item should be returned.
2846 * When find_higher is true, the next higher item is returned, the next lower
2847 * otherwise.
2848 * When return_any and find_higher are both true, and no higher item is found,
2849 * return the next lower instead.
2850 * When return_any is true and find_higher is false, and no lower item is found,
2851 * return the next higher instead.
2852 * It returns 0 if any item is found, 1 if none is found (tree empty), and
2853 * < 0 on error
2854 */
2855 int btrfs_search_slot_for_read(struct btrfs_root *root,
2856 struct btrfs_key *key, struct btrfs_path *p,
2857 int find_higher, int return_any)
2858 {
2859 int ret;
2860 struct extent_buffer *leaf;
2861
2862 again:
2863 ret = btrfs_search_slot(NULL, root, key, p, 0, 0);
2864 if (ret <= 0)
2865 return ret;
2866 /*
2867 * a return value of 1 means the path is at the position where the
2868 * item should be inserted. Normally this is the next bigger item,
2869 * but in case the previous item is the last in a leaf, path points
2870 * to the first free slot in the previous leaf, i.e. at an invalid
2871 * item.
2872 */
2873 leaf = p->nodes[0];
2874
2875 if (find_higher) {
2876 if (p->slots[0] >= btrfs_header_nritems(leaf)) {
2877 ret = btrfs_next_leaf(root, p);
2878 if (ret <= 0)
2879 return ret;
2880 if (!return_any)
2881 return 1;
2882 /*
2883 * no higher item found, return the next
2884 * lower instead
2885 */
2886 return_any = 0;
2887 find_higher = 0;
2888 btrfs_release_path(p);
2889 goto again;
2890 }
2891 } else {
2892 if (p->slots[0] == 0) {
2893 ret = btrfs_prev_leaf(root, p);
2894 if (ret < 0)
2895 return ret;
2896 if (!ret) {
2897 p->slots[0] = btrfs_header_nritems(leaf) - 1;
2898 return 0;
2899 }
2900 if (!return_any)
2901 return 1;
2902 /*
2903 * no lower item found, return the next
2904 * higher instead
2905 */
2906 return_any = 0;
2907 find_higher = 1;
2908 btrfs_release_path(p);
2909 goto again;
2910 } else {
2911 --p->slots[0];
2912 }
2913 }
2914 return 0;
2915 }
2916
2917 /*
2918 * adjust the pointers going up the tree, starting at level
2919 * making sure the right key of each node is points to 'key'.
2920 * This is used after shifting pointers to the left, so it stops
2921 * fixing up pointers when a given leaf/node is not in slot 0 of the
2922 * higher levels
2923 *
2924 */
2925 static void fixup_low_keys(struct btrfs_root *root, struct btrfs_path *path,
2926 struct btrfs_disk_key *key, int level)
2927 {
2928 int i;
2929 struct extent_buffer *t;
2930
2931 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
2932 int tslot = path->slots[i];
2933 if (!path->nodes[i])
2934 break;
2935 t = path->nodes[i];
2936 tree_mod_log_set_node_key(root->fs_info, t, tslot, 1);
2937 btrfs_set_node_key(t, key, tslot);
2938 btrfs_mark_buffer_dirty(path->nodes[i]);
2939 if (tslot != 0)
2940 break;
2941 }
2942 }
2943
2944 /*
2945 * update item key.
2946 *
2947 * This function isn't completely safe. It's the caller's responsibility
2948 * that the new key won't break the order
2949 */
2950 void btrfs_set_item_key_safe(struct btrfs_root *root, struct btrfs_path *path,
2951 struct btrfs_key *new_key)
2952 {
2953 struct btrfs_disk_key disk_key;
2954 struct extent_buffer *eb;
2955 int slot;
2956
2957 eb = path->nodes[0];
2958 slot = path->slots[0];
2959 if (slot > 0) {
2960 btrfs_item_key(eb, &disk_key, slot - 1);
2961 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
2962 }
2963 if (slot < btrfs_header_nritems(eb) - 1) {
2964 btrfs_item_key(eb, &disk_key, slot + 1);
2965 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
2966 }
2967
2968 btrfs_cpu_key_to_disk(&disk_key, new_key);
2969 btrfs_set_item_key(eb, &disk_key, slot);
2970 btrfs_mark_buffer_dirty(eb);
2971 if (slot == 0)
2972 fixup_low_keys(root, path, &disk_key, 1);
2973 }
2974
2975 /*
2976 * try to push data from one node into the next node left in the
2977 * tree.
2978 *
2979 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
2980 * error, and > 0 if there was no room in the left hand block.
2981 */
2982 static int push_node_left(struct btrfs_trans_handle *trans,
2983 struct btrfs_root *root, struct extent_buffer *dst,
2984 struct extent_buffer *src, int empty)
2985 {
2986 int push_items = 0;
2987 int src_nritems;
2988 int dst_nritems;
2989 int ret = 0;
2990
2991 src_nritems = btrfs_header_nritems(src);
2992 dst_nritems = btrfs_header_nritems(dst);
2993 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
2994 WARN_ON(btrfs_header_generation(src) != trans->transid);
2995 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2996
2997 if (!empty && src_nritems <= 8)
2998 return 1;
2999
3000 if (push_items <= 0)
3001 return 1;
3002
3003 if (empty) {
3004 push_items = min(src_nritems, push_items);
3005 if (push_items < src_nritems) {
3006 /* leave at least 8 pointers in the node if
3007 * we aren't going to empty it
3008 */
3009 if (src_nritems - push_items < 8) {
3010 if (push_items <= 8)
3011 return 1;
3012 push_items -= 8;
3013 }
3014 }
3015 } else
3016 push_items = min(src_nritems - 8, push_items);
3017
3018 tree_mod_log_eb_copy(root->fs_info, dst, src, dst_nritems, 0,
3019 push_items);
3020 copy_extent_buffer(dst, src,
3021 btrfs_node_key_ptr_offset(dst_nritems),
3022 btrfs_node_key_ptr_offset(0),
3023 push_items * sizeof(struct btrfs_key_ptr));
3024
3025 if (push_items < src_nritems) {
3026 /*
3027 * don't call tree_mod_log_eb_move here, key removal was already
3028 * fully logged by tree_mod_log_eb_copy above.
3029 */
3030 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
3031 btrfs_node_key_ptr_offset(push_items),
3032 (src_nritems - push_items) *
3033 sizeof(struct btrfs_key_ptr));
3034 }
3035 btrfs_set_header_nritems(src, src_nritems - push_items);
3036 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3037 btrfs_mark_buffer_dirty(src);
3038 btrfs_mark_buffer_dirty(dst);
3039
3040 return ret;
3041 }
3042
3043 /*
3044 * try to push data from one node into the next node right in the
3045 * tree.
3046 *
3047 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
3048 * error, and > 0 if there was no room in the right hand block.
3049 *
3050 * this will only push up to 1/2 the contents of the left node over
3051 */
3052 static int balance_node_right(struct btrfs_trans_handle *trans,
3053 struct btrfs_root *root,
3054 struct extent_buffer *dst,
3055 struct extent_buffer *src)
3056 {
3057 int push_items = 0;
3058 int max_push;
3059 int src_nritems;
3060 int dst_nritems;
3061 int ret = 0;
3062
3063 WARN_ON(btrfs_header_generation(src) != trans->transid);
3064 WARN_ON(btrfs_header_generation(dst) != trans->transid);
3065
3066 src_nritems = btrfs_header_nritems(src);
3067 dst_nritems = btrfs_header_nritems(dst);
3068 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
3069 if (push_items <= 0)
3070 return 1;
3071
3072 if (src_nritems < 4)
3073 return 1;
3074
3075 max_push = src_nritems / 2 + 1;
3076 /* don't try to empty the node */
3077 if (max_push >= src_nritems)
3078 return 1;
3079
3080 if (max_push < push_items)
3081 push_items = max_push;
3082
3083 tree_mod_log_eb_move(root->fs_info, dst, push_items, 0, dst_nritems);
3084 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
3085 btrfs_node_key_ptr_offset(0),
3086 (dst_nritems) *
3087 sizeof(struct btrfs_key_ptr));
3088
3089 tree_mod_log_eb_copy(root->fs_info, dst, src, 0,
3090 src_nritems - push_items, push_items);
3091 copy_extent_buffer(dst, src,
3092 btrfs_node_key_ptr_offset(0),
3093 btrfs_node_key_ptr_offset(src_nritems - push_items),
3094 push_items * sizeof(struct btrfs_key_ptr));
3095
3096 btrfs_set_header_nritems(src, src_nritems - push_items);
3097 btrfs_set_header_nritems(dst, dst_nritems + push_items);
3098
3099 btrfs_mark_buffer_dirty(src);
3100 btrfs_mark_buffer_dirty(dst);
3101
3102 return ret;
3103 }
3104
3105 /*
3106 * helper function to insert a new root level in the tree.
3107 * A new node is allocated, and a single item is inserted to
3108 * point to the existing root
3109 *
3110 * returns zero on success or < 0 on failure.
3111 */
3112 static noinline int insert_new_root(struct btrfs_trans_handle *trans,
3113 struct btrfs_root *root,
3114 struct btrfs_path *path, int level)
3115 {
3116 u64 lower_gen;
3117 struct extent_buffer *lower;
3118 struct extent_buffer *c;
3119 struct extent_buffer *old;
3120 struct btrfs_disk_key lower_key;
3121
3122 BUG_ON(path->nodes[level]);
3123 BUG_ON(path->nodes[level-1] != root->node);
3124
3125 lower = path->nodes[level-1];
3126 if (level == 1)
3127 btrfs_item_key(lower, &lower_key, 0);
3128 else
3129 btrfs_node_key(lower, &lower_key, 0);
3130
3131 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3132 root->root_key.objectid, &lower_key,
3133 level, root->node->start, 0);
3134 if (IS_ERR(c))
3135 return PTR_ERR(c);
3136
3137 root_add_used(root, root->nodesize);
3138
3139 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
3140 btrfs_set_header_nritems(c, 1);
3141 btrfs_set_header_level(c, level);
3142 btrfs_set_header_bytenr(c, c->start);
3143 btrfs_set_header_generation(c, trans->transid);
3144 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
3145 btrfs_set_header_owner(c, root->root_key.objectid);
3146
3147 write_extent_buffer(c, root->fs_info->fsid,
3148 (unsigned long)btrfs_header_fsid(c),
3149 BTRFS_FSID_SIZE);
3150
3151 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
3152 (unsigned long)btrfs_header_chunk_tree_uuid(c),
3153 BTRFS_UUID_SIZE);
3154
3155 btrfs_set_node_key(c, &lower_key, 0);
3156 btrfs_set_node_blockptr(c, 0, lower->start);
3157 lower_gen = btrfs_header_generation(lower);
3158 WARN_ON(lower_gen != trans->transid);
3159
3160 btrfs_set_node_ptr_generation(c, 0, lower_gen);
3161
3162 btrfs_mark_buffer_dirty(c);
3163
3164 old = root->node;
3165 tree_mod_log_set_root_pointer(root, c, 0);
3166 rcu_assign_pointer(root->node, c);
3167
3168 /* the super has an extra ref to root->node */
3169 free_extent_buffer(old);
3170
3171 add_root_to_dirty_list(root);
3172 extent_buffer_get(c);
3173 path->nodes[level] = c;
3174 path->locks[level] = BTRFS_WRITE_LOCK;
3175 path->slots[level] = 0;
3176 return 0;
3177 }
3178
3179 /*
3180 * worker function to insert a single pointer in a node.
3181 * the node should have enough room for the pointer already
3182 *
3183 * slot and level indicate where you want the key to go, and
3184 * blocknr is the block the key points to.
3185 */
3186 static void insert_ptr(struct btrfs_trans_handle *trans,
3187 struct btrfs_root *root, struct btrfs_path *path,
3188 struct btrfs_disk_key *key, u64 bytenr,
3189 int slot, int level)
3190 {
3191 struct extent_buffer *lower;
3192 int nritems;
3193 int ret;
3194
3195 BUG_ON(!path->nodes[level]);
3196 btrfs_assert_tree_locked(path->nodes[level]);
3197 lower = path->nodes[level];
3198 nritems = btrfs_header_nritems(lower);
3199 BUG_ON(slot > nritems);
3200 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
3201 if (slot != nritems) {
3202 if (level)
3203 tree_mod_log_eb_move(root->fs_info, lower, slot + 1,
3204 slot, nritems - slot);
3205 memmove_extent_buffer(lower,
3206 btrfs_node_key_ptr_offset(slot + 1),
3207 btrfs_node_key_ptr_offset(slot),
3208 (nritems - slot) * sizeof(struct btrfs_key_ptr));
3209 }
3210 if (level) {
3211 ret = tree_mod_log_insert_key(root->fs_info, lower, slot,
3212 MOD_LOG_KEY_ADD);
3213 BUG_ON(ret < 0);
3214 }
3215 btrfs_set_node_key(lower, key, slot);
3216 btrfs_set_node_blockptr(lower, slot, bytenr);
3217 WARN_ON(trans->transid == 0);
3218 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
3219 btrfs_set_header_nritems(lower, nritems + 1);
3220 btrfs_mark_buffer_dirty(lower);
3221 }
3222
3223 /*
3224 * split the node at the specified level in path in two.
3225 * The path is corrected to point to the appropriate node after the split
3226 *
3227 * Before splitting this tries to make some room in the node by pushing
3228 * left and right, if either one works, it returns right away.
3229 *
3230 * returns 0 on success and < 0 on failure
3231 */
3232 static noinline int split_node(struct btrfs_trans_handle *trans,
3233 struct btrfs_root *root,
3234 struct btrfs_path *path, int level)
3235 {
3236 struct extent_buffer *c;
3237 struct extent_buffer *split;
3238 struct btrfs_disk_key disk_key;
3239 int mid;
3240 int ret;
3241 u32 c_nritems;
3242
3243 c = path->nodes[level];
3244 WARN_ON(btrfs_header_generation(c) != trans->transid);
3245 if (c == root->node) {
3246 /*
3247 * trying to split the root, lets make a new one
3248 *
3249 * tree mod log: We don't log_removal old root in
3250 * insert_new_root, because that root buffer will be kept as a
3251 * normal node. We are going to log removal of half of the
3252 * elements below with tree_mod_log_eb_copy. We're holding a
3253 * tree lock on the buffer, which is why we cannot race with
3254 * other tree_mod_log users.
3255 */
3256 ret = insert_new_root(trans, root, path, level + 1);
3257 if (ret)
3258 return ret;
3259 } else {
3260 ret = push_nodes_for_insert(trans, root, path, level);
3261 c = path->nodes[level];
3262 if (!ret && btrfs_header_nritems(c) <
3263 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
3264 return 0;
3265 if (ret < 0)
3266 return ret;
3267 }
3268
3269 c_nritems = btrfs_header_nritems(c);
3270 mid = (c_nritems + 1) / 2;
3271 btrfs_node_key(c, &disk_key, mid);
3272
3273 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
3274 root->root_key.objectid,
3275 &disk_key, level, c->start, 0);
3276 if (IS_ERR(split))
3277 return PTR_ERR(split);
3278
3279 root_add_used(root, root->nodesize);
3280
3281 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
3282 btrfs_set_header_level(split, btrfs_header_level(c));
3283 btrfs_set_header_bytenr(split, split->start);
3284 btrfs_set_header_generation(split, trans->transid);
3285 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
3286 btrfs_set_header_owner(split, root->root_key.objectid);
3287 write_extent_buffer(split, root->fs_info->fsid,
3288 (unsigned long)btrfs_header_fsid(split),
3289 BTRFS_FSID_SIZE);
3290 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
3291 (unsigned long)btrfs_header_chunk_tree_uuid(split),
3292 BTRFS_UUID_SIZE);
3293
3294 tree_mod_log_eb_copy(root->fs_info, split, c, 0, mid, c_nritems - mid);
3295 copy_extent_buffer(split, c,
3296 btrfs_node_key_ptr_offset(0),
3297 btrfs_node_key_ptr_offset(mid),
3298 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
3299 btrfs_set_header_nritems(split, c_nritems - mid);
3300 btrfs_set_header_nritems(c, mid);
3301 ret = 0;
3302
3303 btrfs_mark_buffer_dirty(c);
3304 btrfs_mark_buffer_dirty(split);
3305
3306 insert_ptr(trans, root, path, &disk_key, split->start,
3307 path->slots[level + 1] + 1, level + 1);
3308
3309 if (path->slots[level] >= mid) {
3310 path->slots[level] -= mid;
3311 btrfs_tree_unlock(c);
3312 free_extent_buffer(c);
3313 path->nodes[level] = split;
3314 path->slots[level + 1] += 1;
3315 } else {
3316 btrfs_tree_unlock(split);
3317 free_extent_buffer(split);
3318 }
3319 return ret;
3320 }
3321
3322 /*
3323 * how many bytes are required to store the items in a leaf. start
3324 * and nr indicate which items in the leaf to check. This totals up the
3325 * space used both by the item structs and the item data
3326 */
3327 static int leaf_space_used(struct extent_buffer *l, int start, int nr)
3328 {
3329 struct btrfs_item *start_item;
3330 struct btrfs_item *end_item;
3331 struct btrfs_map_token token;
3332 int data_len;
3333 int nritems = btrfs_header_nritems(l);
3334 int end = min(nritems, start + nr) - 1;
3335
3336 if (!nr)
3337 return 0;
3338 btrfs_init_map_token(&token);
3339 start_item = btrfs_item_nr(l, start);
3340 end_item = btrfs_item_nr(l, end);
3341 data_len = btrfs_token_item_offset(l, start_item, &token) +
3342 btrfs_token_item_size(l, start_item, &token);
3343 data_len = data_len - btrfs_token_item_offset(l, end_item, &token);
3344 data_len += sizeof(struct btrfs_item) * nr;
3345 WARN_ON(data_len < 0);
3346 return data_len;
3347 }
3348
3349 /*
3350 * The space between the end of the leaf items and
3351 * the start of the leaf data. IOW, how much room
3352 * the leaf has left for both items and data
3353 */
3354 noinline int btrfs_leaf_free_space(struct btrfs_root *root,
3355 struct extent_buffer *leaf)
3356 {
3357 int nritems = btrfs_header_nritems(leaf);
3358 int ret;
3359 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
3360 if (ret < 0) {
3361 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
3362 "used %d nritems %d\n",
3363 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
3364 leaf_space_used(leaf, 0, nritems), nritems);
3365 }
3366 return ret;
3367 }
3368
3369 /*
3370 * min slot controls the lowest index we're willing to push to the
3371 * right. We'll push up to and including min_slot, but no lower
3372 */
3373 static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
3374 struct btrfs_root *root,
3375 struct btrfs_path *path,
3376 int data_size, int empty,
3377 struct extent_buffer *right,
3378 int free_space, u32 left_nritems,
3379 u32 min_slot)
3380 {
3381 struct extent_buffer *left = path->nodes[0];
3382 struct extent_buffer *upper = path->nodes[1];
3383 struct btrfs_map_token token;
3384 struct btrfs_disk_key disk_key;
3385 int slot;
3386 u32 i;
3387 int push_space = 0;
3388 int push_items = 0;
3389 struct btrfs_item *item;
3390 u32 nr;
3391 u32 right_nritems;
3392 u32 data_end;
3393 u32 this_item_size;
3394
3395 btrfs_init_map_token(&token);
3396
3397 if (empty)
3398 nr = 0;
3399 else
3400 nr = max_t(u32, 1, min_slot);
3401
3402 if (path->slots[0] >= left_nritems)
3403 push_space += data_size;
3404
3405 slot = path->slots[1];
3406 i = left_nritems - 1;
3407 while (i >= nr) {
3408 item = btrfs_item_nr(left, i);
3409
3410 if (!empty && push_items > 0) {
3411 if (path->slots[0] > i)
3412 break;
3413 if (path->slots[0] == i) {
3414 int space = btrfs_leaf_free_space(root, left);
3415 if (space + push_space * 2 > free_space)
3416 break;
3417 }
3418 }
3419
3420 if (path->slots[0] == i)
3421 push_space += data_size;
3422
3423 this_item_size = btrfs_item_size(left, item);
3424 if (this_item_size + sizeof(*item) + push_space > free_space)
3425 break;
3426
3427 push_items++;
3428 push_space += this_item_size + sizeof(*item);
3429 if (i == 0)
3430 break;
3431 i--;
3432 }
3433
3434 if (push_items == 0)
3435 goto out_unlock;
3436
3437 WARN_ON(!empty && push_items == left_nritems);
3438
3439 /* push left to right */
3440 right_nritems = btrfs_header_nritems(right);
3441
3442 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
3443 push_space -= leaf_data_end(root, left);
3444
3445 /* make room in the right data area */
3446 data_end = leaf_data_end(root, right);
3447 memmove_extent_buffer(right,
3448 btrfs_leaf_data(right) + data_end - push_space,
3449 btrfs_leaf_data(right) + data_end,
3450 BTRFS_LEAF_DATA_SIZE(root) - data_end);
3451
3452 /* copy from the left data area */
3453 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
3454 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3455 btrfs_leaf_data(left) + leaf_data_end(root, left),
3456 push_space);
3457
3458 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
3459 btrfs_item_nr_offset(0),
3460 right_nritems * sizeof(struct btrfs_item));
3461
3462 /* copy the items from left to right */
3463 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
3464 btrfs_item_nr_offset(left_nritems - push_items),
3465 push_items * sizeof(struct btrfs_item));
3466
3467 /* update the item pointers */
3468 right_nritems += push_items;
3469 btrfs_set_header_nritems(right, right_nritems);
3470 push_space = BTRFS_LEAF_DATA_SIZE(root);
3471 for (i = 0; i < right_nritems; i++) {
3472 item = btrfs_item_nr(right, i);
3473 push_space -= btrfs_token_item_size(right, item, &token);
3474 btrfs_set_token_item_offset(right, item, push_space, &token);
3475 }
3476
3477 left_nritems -= push_items;
3478 btrfs_set_header_nritems(left, left_nritems);
3479
3480 if (left_nritems)
3481 btrfs_mark_buffer_dirty(left);
3482 else
3483 clean_tree_block(trans, root, left);
3484
3485 btrfs_mark_buffer_dirty(right);
3486
3487 btrfs_item_key(right, &disk_key, 0);
3488 btrfs_set_node_key(upper, &disk_key, slot + 1);
3489 btrfs_mark_buffer_dirty(upper);
3490
3491 /* then fixup the leaf pointer in the path */
3492 if (path->slots[0] >= left_nritems) {
3493 path->slots[0] -= left_nritems;
3494 if (btrfs_header_nritems(path->nodes[0]) == 0)
3495 clean_tree_block(trans, root, path->nodes[0]);
3496 btrfs_tree_unlock(path->nodes[0]);
3497 free_extent_buffer(path->nodes[0]);
3498 path->nodes[0] = right;
3499 path->slots[1] += 1;
3500 } else {
3501 btrfs_tree_unlock(right);
3502 free_extent_buffer(right);
3503 }
3504 return 0;
3505
3506 out_unlock:
3507 btrfs_tree_unlock(right);
3508 free_extent_buffer(right);
3509 return 1;
3510 }
3511
3512 /*
3513 * push some data in the path leaf to the right, trying to free up at
3514 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3515 *
3516 * returns 1 if the push failed because the other node didn't have enough
3517 * room, 0 if everything worked out and < 0 if there were major errors.
3518 *
3519 * this will push starting from min_slot to the end of the leaf. It won't
3520 * push any slot lower than min_slot
3521 */
3522 static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
3523 *root, struct btrfs_path *path,
3524 int min_data_size, int data_size,
3525 int empty, u32 min_slot)
3526 {
3527 struct extent_buffer *left = path->nodes[0];
3528 struct extent_buffer *right;
3529 struct extent_buffer *upper;
3530 int slot;
3531 int free_space;
3532 u32 left_nritems;
3533 int ret;
3534
3535 if (!path->nodes[1])
3536 return 1;
3537
3538 slot = path->slots[1];
3539 upper = path->nodes[1];
3540 if (slot >= btrfs_header_nritems(upper) - 1)
3541 return 1;
3542
3543 btrfs_assert_tree_locked(path->nodes[1]);
3544
3545 right = read_node_slot(root, upper, slot + 1);
3546 if (right == NULL)
3547 return 1;
3548
3549 btrfs_tree_lock(right);
3550 btrfs_set_lock_blocking(right);
3551
3552 free_space = btrfs_leaf_free_space(root, right);
3553 if (free_space < data_size)
3554 goto out_unlock;
3555
3556 /* cow and double check */
3557 ret = btrfs_cow_block(trans, root, right, upper,
3558 slot + 1, &right);
3559 if (ret)
3560 goto out_unlock;
3561
3562 free_space = btrfs_leaf_free_space(root, right);
3563 if (free_space < data_size)
3564 goto out_unlock;
3565
3566 left_nritems = btrfs_header_nritems(left);
3567 if (left_nritems == 0)
3568 goto out_unlock;
3569
3570 return __push_leaf_right(trans, root, path, min_data_size, empty,
3571 right, free_space, left_nritems, min_slot);
3572 out_unlock:
3573 btrfs_tree_unlock(right);
3574 free_extent_buffer(right);
3575 return 1;
3576 }
3577
3578 /*
3579 * push some data in the path leaf to the left, trying to free up at
3580 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3581 *
3582 * max_slot can put a limit on how far into the leaf we'll push items. The
3583 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
3584 * items
3585 */
3586 static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
3587 struct btrfs_root *root,
3588 struct btrfs_path *path, int data_size,
3589 int empty, struct extent_buffer *left,
3590 int free_space, u32 right_nritems,
3591 u32 max_slot)
3592 {
3593 struct btrfs_disk_key disk_key;
3594 struct extent_buffer *right = path->nodes[0];
3595 int i;
3596 int push_space = 0;
3597 int push_items = 0;
3598 struct btrfs_item *item;
3599 u32 old_left_nritems;
3600 u32 nr;
3601 int ret = 0;
3602 u32 this_item_size;
3603 u32 old_left_item_size;
3604 struct btrfs_map_token token;
3605
3606 btrfs_init_map_token(&token);
3607
3608 if (empty)
3609 nr = min(right_nritems, max_slot);
3610 else
3611 nr = min(right_nritems - 1, max_slot);
3612
3613 for (i = 0; i < nr; i++) {
3614 item = btrfs_item_nr(right, i);
3615
3616 if (!empty && push_items > 0) {
3617 if (path->slots[0] < i)
3618 break;
3619 if (path->slots[0] == i) {
3620 int space = btrfs_leaf_free_space(root, right);
3621 if (space + push_space * 2 > free_space)
3622 break;
3623 }
3624 }
3625
3626 if (path->slots[0] == i)
3627 push_space += data_size;
3628
3629 this_item_size = btrfs_item_size(right, item);
3630 if (this_item_size + sizeof(*item) + push_space > free_space)
3631 break;
3632
3633 push_items++;
3634 push_space += this_item_size + sizeof(*item);
3635 }
3636
3637 if (push_items == 0) {
3638 ret = 1;
3639 goto out;
3640 }
3641 if (!empty && push_items == btrfs_header_nritems(right))
3642 WARN_ON(1);
3643
3644 /* push data from right to left */
3645 copy_extent_buffer(left, right,
3646 btrfs_item_nr_offset(btrfs_header_nritems(left)),
3647 btrfs_item_nr_offset(0),
3648 push_items * sizeof(struct btrfs_item));
3649
3650 push_space = BTRFS_LEAF_DATA_SIZE(root) -
3651 btrfs_item_offset_nr(right, push_items - 1);
3652
3653 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
3654 leaf_data_end(root, left) - push_space,
3655 btrfs_leaf_data(right) +
3656 btrfs_item_offset_nr(right, push_items - 1),
3657 push_space);
3658 old_left_nritems = btrfs_header_nritems(left);
3659 BUG_ON(old_left_nritems <= 0);
3660
3661 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
3662 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
3663 u32 ioff;
3664
3665 item = btrfs_item_nr(left, i);
3666
3667 ioff = btrfs_token_item_offset(left, item, &token);
3668 btrfs_set_token_item_offset(left, item,
3669 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
3670 &token);
3671 }
3672 btrfs_set_header_nritems(left, old_left_nritems + push_items);
3673
3674 /* fixup right node */
3675 if (push_items > right_nritems)
3676 WARN(1, KERN_CRIT "push items %d nr %u\n", push_items,
3677 right_nritems);
3678
3679 if (push_items < right_nritems) {
3680 push_space = btrfs_item_offset_nr(right, push_items - 1) -
3681 leaf_data_end(root, right);
3682 memmove_extent_buffer(right, btrfs_leaf_data(right) +
3683 BTRFS_LEAF_DATA_SIZE(root) - push_space,
3684 btrfs_leaf_data(right) +
3685 leaf_data_end(root, right), push_space);
3686
3687 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
3688 btrfs_item_nr_offset(push_items),
3689 (btrfs_header_nritems(right) - push_items) *
3690 sizeof(struct btrfs_item));
3691 }
3692 right_nritems -= push_items;
3693 btrfs_set_header_nritems(right, right_nritems);
3694 push_space = BTRFS_LEAF_DATA_SIZE(root);
3695 for (i = 0; i < right_nritems; i++) {
3696 item = btrfs_item_nr(right, i);
3697
3698 push_space = push_space - btrfs_token_item_size(right,
3699 item, &token);
3700 btrfs_set_token_item_offset(right, item, push_space, &token);
3701 }
3702
3703 btrfs_mark_buffer_dirty(left);
3704 if (right_nritems)
3705 btrfs_mark_buffer_dirty(right);
3706 else
3707 clean_tree_block(trans, root, right);
3708
3709 btrfs_item_key(right, &disk_key, 0);
3710 fixup_low_keys(root, path, &disk_key, 1);
3711
3712 /* then fixup the leaf pointer in the path */
3713 if (path->slots[0] < push_items) {
3714 path->slots[0] += old_left_nritems;
3715 btrfs_tree_unlock(path->nodes[0]);
3716 free_extent_buffer(path->nodes[0]);
3717 path->nodes[0] = left;
3718 path->slots[1] -= 1;
3719 } else {
3720 btrfs_tree_unlock(left);
3721 free_extent_buffer(left);
3722 path->slots[0] -= push_items;
3723 }
3724 BUG_ON(path->slots[0] < 0);
3725 return ret;
3726 out:
3727 btrfs_tree_unlock(left);
3728 free_extent_buffer(left);
3729 return ret;
3730 }
3731
3732 /*
3733 * push some data in the path leaf to the left, trying to free up at
3734 * least data_size bytes. returns zero if the push worked, nonzero otherwise
3735 *
3736 * max_slot can put a limit on how far into the leaf we'll push items. The
3737 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
3738 * items
3739 */
3740 static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
3741 *root, struct btrfs_path *path, int min_data_size,
3742 int data_size, int empty, u32 max_slot)
3743 {
3744 struct extent_buffer *right = path->nodes[0];
3745 struct extent_buffer *left;
3746 int slot;
3747 int free_space;
3748 u32 right_nritems;
3749 int ret = 0;
3750
3751 slot = path->slots[1];
3752 if (slot == 0)
3753 return 1;
3754 if (!path->nodes[1])
3755 return 1;
3756
3757 right_nritems = btrfs_header_nritems(right);
3758 if (right_nritems == 0)
3759 return 1;
3760
3761 btrfs_assert_tree_locked(path->nodes[1]);
3762
3763 left = read_node_slot(root, path->nodes[1], slot - 1);
3764 if (left == NULL)
3765 return 1;
3766
3767 btrfs_tree_lock(left);
3768 btrfs_set_lock_blocking(left);
3769
3770 free_space = btrfs_leaf_free_space(root, left);
3771 if (free_space < data_size) {
3772 ret = 1;
3773 goto out;
3774 }
3775
3776 /* cow and double check */
3777 ret = btrfs_cow_block(trans, root, left,
3778 path->nodes[1], slot - 1, &left);
3779 if (ret) {
3780 /* we hit -ENOSPC, but it isn't fatal here */
3781 if (ret == -ENOSPC)
3782 ret = 1;
3783 goto out;
3784 }
3785
3786 free_space = btrfs_leaf_free_space(root, left);
3787 if (free_space < data_size) {
3788 ret = 1;
3789 goto out;
3790 }
3791
3792 return __push_leaf_left(trans, root, path, min_data_size,
3793 empty, left, free_space, right_nritems,
3794 max_slot);
3795 out:
3796 btrfs_tree_unlock(left);
3797 free_extent_buffer(left);
3798 return ret;
3799 }
3800
3801 /*
3802 * split the path's leaf in two, making sure there is at least data_size
3803 * available for the resulting leaf level of the path.
3804 */
3805 static noinline void copy_for_split(struct btrfs_trans_handle *trans,
3806 struct btrfs_root *root,
3807 struct btrfs_path *path,
3808 struct extent_buffer *l,
3809 struct extent_buffer *right,
3810 int slot, int mid, int nritems)
3811 {
3812 int data_copy_size;
3813 int rt_data_off;
3814 int i;
3815 struct btrfs_disk_key disk_key;
3816 struct btrfs_map_token token;
3817
3818 btrfs_init_map_token(&token);
3819
3820 nritems = nritems - mid;
3821 btrfs_set_header_nritems(right, nritems);
3822 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
3823
3824 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
3825 btrfs_item_nr_offset(mid),
3826 nritems * sizeof(struct btrfs_item));
3827
3828 copy_extent_buffer(right, l,
3829 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
3830 data_copy_size, btrfs_leaf_data(l) +
3831 leaf_data_end(root, l), data_copy_size);
3832
3833 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
3834 btrfs_item_end_nr(l, mid);
3835
3836 for (i = 0; i < nritems; i++) {
3837 struct btrfs_item *item = btrfs_item_nr(right, i);
3838 u32 ioff;
3839
3840 ioff = btrfs_token_item_offset(right, item, &token);
3841 btrfs_set_token_item_offset(right, item,
3842 ioff + rt_data_off, &token);
3843 }
3844
3845 btrfs_set_header_nritems(l, mid);
3846 btrfs_item_key(right, &disk_key, 0);
3847 insert_ptr(trans, root, path, &disk_key, right->start,
3848 path->slots[1] + 1, 1);
3849
3850 btrfs_mark_buffer_dirty(right);
3851 btrfs_mark_buffer_dirty(l);
3852 BUG_ON(path->slots[0] != slot);
3853
3854 if (mid <= slot) {
3855 btrfs_tree_unlock(path->nodes[0]);
3856 free_extent_buffer(path->nodes[0]);
3857 path->nodes[0] = right;
3858 path->slots[0] -= mid;
3859 path->slots[1] += 1;
3860 } else {
3861 btrfs_tree_unlock(right);
3862 free_extent_buffer(right);
3863 }
3864
3865 BUG_ON(path->slots[0] < 0);
3866 }
3867
3868 /*
3869 * double splits happen when we need to insert a big item in the middle
3870 * of a leaf. A double split can leave us with 3 mostly empty leaves:
3871 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
3872 * A B C
3873 *
3874 * We avoid this by trying to push the items on either side of our target
3875 * into the adjacent leaves. If all goes well we can avoid the double split
3876 * completely.
3877 */
3878 static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
3879 struct btrfs_root *root,
3880 struct btrfs_path *path,
3881 int data_size)
3882 {
3883 int ret;
3884 int progress = 0;
3885 int slot;
3886 u32 nritems;
3887
3888 slot = path->slots[0];
3889
3890 /*
3891 * try to push all the items after our slot into the
3892 * right leaf
3893 */
3894 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
3895 if (ret < 0)
3896 return ret;
3897
3898 if (ret == 0)
3899 progress++;
3900
3901 nritems = btrfs_header_nritems(path->nodes[0]);
3902 /*
3903 * our goal is to get our slot at the start or end of a leaf. If
3904 * we've done so we're done
3905 */
3906 if (path->slots[0] == 0 || path->slots[0] == nritems)
3907 return 0;
3908
3909 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3910 return 0;
3911
3912 /* try to push all the items before our slot into the next leaf */
3913 slot = path->slots[0];
3914 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
3915 if (ret < 0)
3916 return ret;
3917
3918 if (ret == 0)
3919 progress++;
3920
3921 if (progress)
3922 return 0;
3923 return 1;
3924 }
3925
3926 /*
3927 * split the path's leaf in two, making sure there is at least data_size
3928 * available for the resulting leaf level of the path.
3929 *
3930 * returns 0 if all went well and < 0 on failure.
3931 */
3932 static noinline int split_leaf(struct btrfs_trans_handle *trans,
3933 struct btrfs_root *root,
3934 struct btrfs_key *ins_key,
3935 struct btrfs_path *path, int data_size,
3936 int extend)
3937 {
3938 struct btrfs_disk_key disk_key;
3939 struct extent_buffer *l;
3940 u32 nritems;
3941 int mid;
3942 int slot;
3943 struct extent_buffer *right;
3944 int ret = 0;
3945 int wret;
3946 int split;
3947 int num_doubles = 0;
3948 int tried_avoid_double = 0;
3949
3950 l = path->nodes[0];
3951 slot = path->slots[0];
3952 if (extend && data_size + btrfs_item_size_nr(l, slot) +
3953 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
3954 return -EOVERFLOW;
3955
3956 /* first try to make some room by pushing left and right */
3957 if (data_size && path->nodes[1]) {
3958 wret = push_leaf_right(trans, root, path, data_size,
3959 data_size, 0, 0);
3960 if (wret < 0)
3961 return wret;
3962 if (wret) {
3963 wret = push_leaf_left(trans, root, path, data_size,
3964 data_size, 0, (u32)-1);
3965 if (wret < 0)
3966 return wret;
3967 }
3968 l = path->nodes[0];
3969
3970 /* did the pushes work? */
3971 if (btrfs_leaf_free_space(root, l) >= data_size)
3972 return 0;
3973 }
3974
3975 if (!path->nodes[1]) {
3976 ret = insert_new_root(trans, root, path, 1);
3977 if (ret)
3978 return ret;
3979 }
3980 again:
3981 split = 1;
3982 l = path->nodes[0];
3983 slot = path->slots[0];
3984 nritems = btrfs_header_nritems(l);
3985 mid = (nritems + 1) / 2;
3986
3987 if (mid <= slot) {
3988 if (nritems == 1 ||
3989 leaf_space_used(l, mid, nritems - mid) + data_size >
3990 BTRFS_LEAF_DATA_SIZE(root)) {
3991 if (slot >= nritems) {
3992 split = 0;
3993 } else {
3994 mid = slot;
3995 if (mid != nritems &&
3996 leaf_space_used(l, mid, nritems - mid) +
3997 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
3998 if (data_size && !tried_avoid_double)
3999 goto push_for_double;
4000 split = 2;
4001 }
4002 }
4003 }
4004 } else {
4005 if (leaf_space_used(l, 0, mid) + data_size >
4006 BTRFS_LEAF_DATA_SIZE(root)) {
4007 if (!extend && data_size && slot == 0) {
4008 split = 0;
4009 } else if ((extend || !data_size) && slot == 0) {
4010 mid = 1;
4011 } else {
4012 mid = slot;
4013 if (mid != nritems &&
4014 leaf_space_used(l, mid, nritems - mid) +
4015 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
4016 if (data_size && !tried_avoid_double)
4017 goto push_for_double;
4018 split = 2 ;
4019 }
4020 }
4021 }
4022 }
4023
4024 if (split == 0)
4025 btrfs_cpu_key_to_disk(&disk_key, ins_key);
4026 else
4027 btrfs_item_key(l, &disk_key, mid);
4028
4029 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
4030 root->root_key.objectid,
4031 &disk_key, 0, l->start, 0);
4032 if (IS_ERR(right))
4033 return PTR_ERR(right);
4034
4035 root_add_used(root, root->leafsize);
4036
4037 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
4038 btrfs_set_header_bytenr(right, right->start);
4039 btrfs_set_header_generation(right, trans->transid);
4040 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
4041 btrfs_set_header_owner(right, root->root_key.objectid);
4042 btrfs_set_header_level(right, 0);
4043 write_extent_buffer(right, root->fs_info->fsid,
4044 (unsigned long)btrfs_header_fsid(right),
4045 BTRFS_FSID_SIZE);
4046
4047 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
4048 (unsigned long)btrfs_header_chunk_tree_uuid(right),
4049 BTRFS_UUID_SIZE);
4050
4051 if (split == 0) {
4052 if (mid <= slot) {
4053 btrfs_set_header_nritems(right, 0);
4054 insert_ptr(trans, root, path, &disk_key, right->start,
4055 path->slots[1] + 1, 1);
4056 btrfs_tree_unlock(path->nodes[0]);
4057 free_extent_buffer(path->nodes[0]);
4058 path->nodes[0] = right;
4059 path->slots[0] = 0;
4060 path->slots[1] += 1;
4061 } else {
4062 btrfs_set_header_nritems(right, 0);
4063 insert_ptr(trans, root, path, &disk_key, right->start,
4064 path->slots[1], 1);
4065 btrfs_tree_unlock(path->nodes[0]);
4066 free_extent_buffer(path->nodes[0]);
4067 path->nodes[0] = right;
4068 path->slots[0] = 0;
4069 if (path->slots[1] == 0)
4070 fixup_low_keys(root, path, &disk_key, 1);
4071 }
4072 btrfs_mark_buffer_dirty(right);
4073 return ret;
4074 }
4075
4076 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
4077
4078 if (split == 2) {
4079 BUG_ON(num_doubles != 0);
4080 num_doubles++;
4081 goto again;
4082 }
4083
4084 return 0;
4085
4086 push_for_double:
4087 push_for_double_split(trans, root, path, data_size);
4088 tried_avoid_double = 1;
4089 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
4090 return 0;
4091 goto again;
4092 }
4093
4094 static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
4095 struct btrfs_root *root,
4096 struct btrfs_path *path, int ins_len)
4097 {
4098 struct btrfs_key key;
4099 struct extent_buffer *leaf;
4100 struct btrfs_file_extent_item *fi;
4101 u64 extent_len = 0;
4102 u32 item_size;
4103 int ret;
4104
4105 leaf = path->nodes[0];
4106 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4107
4108 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
4109 key.type != BTRFS_EXTENT_CSUM_KEY);
4110
4111 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
4112 return 0;
4113
4114 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4115 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4116 fi = btrfs_item_ptr(leaf, path->slots[0],
4117 struct btrfs_file_extent_item);
4118 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
4119 }
4120 btrfs_release_path(path);
4121
4122 path->keep_locks = 1;
4123 path->search_for_split = 1;
4124 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4125 path->search_for_split = 0;
4126 if (ret < 0)
4127 goto err;
4128
4129 ret = -EAGAIN;
4130 leaf = path->nodes[0];
4131 /* if our item isn't there or got smaller, return now */
4132 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
4133 goto err;
4134
4135 /* the leaf has changed, it now has room. return now */
4136 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
4137 goto err;
4138
4139 if (key.type == BTRFS_EXTENT_DATA_KEY) {
4140 fi = btrfs_item_ptr(leaf, path->slots[0],
4141 struct btrfs_file_extent_item);
4142 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
4143 goto err;
4144 }
4145
4146 btrfs_set_path_blocking(path);
4147 ret = split_leaf(trans, root, &key, path, ins_len, 1);
4148 if (ret)
4149 goto err;
4150
4151 path->keep_locks = 0;
4152 btrfs_unlock_up_safe(path, 1);
4153 return 0;
4154 err:
4155 path->keep_locks = 0;
4156 return ret;
4157 }
4158
4159 static noinline int split_item(struct btrfs_trans_handle *trans,
4160 struct btrfs_root *root,
4161 struct btrfs_path *path,
4162 struct btrfs_key *new_key,
4163 unsigned long split_offset)
4164 {
4165 struct extent_buffer *leaf;
4166 struct btrfs_item *item;
4167 struct btrfs_item *new_item;
4168 int slot;
4169 char *buf;
4170 u32 nritems;
4171 u32 item_size;
4172 u32 orig_offset;
4173 struct btrfs_disk_key disk_key;
4174
4175 leaf = path->nodes[0];
4176 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
4177
4178 btrfs_set_path_blocking(path);
4179
4180 item = btrfs_item_nr(leaf, path->slots[0]);
4181 orig_offset = btrfs_item_offset(leaf, item);
4182 item_size = btrfs_item_size(leaf, item);
4183
4184 buf = kmalloc(item_size, GFP_NOFS);
4185 if (!buf)
4186 return -ENOMEM;
4187
4188 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
4189 path->slots[0]), item_size);
4190
4191 slot = path->slots[0] + 1;
4192 nritems = btrfs_header_nritems(leaf);
4193 if (slot != nritems) {
4194 /* shift the items */
4195 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
4196 btrfs_item_nr_offset(slot),
4197 (nritems - slot) * sizeof(struct btrfs_item));
4198 }
4199
4200 btrfs_cpu_key_to_disk(&disk_key, new_key);
4201 btrfs_set_item_key(leaf, &disk_key, slot);
4202
4203 new_item = btrfs_item_nr(leaf, slot);
4204
4205 btrfs_set_item_offset(leaf, new_item, orig_offset);
4206 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
4207
4208 btrfs_set_item_offset(leaf, item,
4209 orig_offset + item_size - split_offset);
4210 btrfs_set_item_size(leaf, item, split_offset);
4211
4212 btrfs_set_header_nritems(leaf, nritems + 1);
4213
4214 /* write the data for the start of the original item */
4215 write_extent_buffer(leaf, buf,
4216 btrfs_item_ptr_offset(leaf, path->slots[0]),
4217 split_offset);
4218
4219 /* write the data for the new item */
4220 write_extent_buffer(leaf, buf + split_offset,
4221 btrfs_item_ptr_offset(leaf, slot),
4222 item_size - split_offset);
4223 btrfs_mark_buffer_dirty(leaf);
4224
4225 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
4226 kfree(buf);
4227 return 0;
4228 }
4229
4230 /*
4231 * This function splits a single item into two items,
4232 * giving 'new_key' to the new item and splitting the
4233 * old one at split_offset (from the start of the item).
4234 *
4235 * The path may be released by this operation. After
4236 * the split, the path is pointing to the old item. The
4237 * new item is going to be in the same node as the old one.
4238 *
4239 * Note, the item being split must be smaller enough to live alone on
4240 * a tree block with room for one extra struct btrfs_item
4241 *
4242 * This allows us to split the item in place, keeping a lock on the
4243 * leaf the entire time.
4244 */
4245 int btrfs_split_item(struct btrfs_trans_handle *trans,
4246 struct btrfs_root *root,
4247 struct btrfs_path *path,
4248 struct btrfs_key *new_key,
4249 unsigned long split_offset)
4250 {
4251 int ret;
4252 ret = setup_leaf_for_split(trans, root, path,
4253 sizeof(struct btrfs_item));
4254 if (ret)
4255 return ret;
4256
4257 ret = split_item(trans, root, path, new_key, split_offset);
4258 return ret;
4259 }
4260
4261 /*
4262 * This function duplicate a item, giving 'new_key' to the new item.
4263 * It guarantees both items live in the same tree leaf and the new item
4264 * is contiguous with the original item.
4265 *
4266 * This allows us to split file extent in place, keeping a lock on the
4267 * leaf the entire time.
4268 */
4269 int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
4270 struct btrfs_root *root,
4271 struct btrfs_path *path,
4272 struct btrfs_key *new_key)
4273 {
4274 struct extent_buffer *leaf;
4275 int ret;
4276 u32 item_size;
4277
4278 leaf = path->nodes[0];
4279 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
4280 ret = setup_leaf_for_split(trans, root, path,
4281 item_size + sizeof(struct btrfs_item));
4282 if (ret)
4283 return ret;
4284
4285 path->slots[0]++;
4286 setup_items_for_insert(root, path, new_key, &item_size,
4287 item_size, item_size +
4288 sizeof(struct btrfs_item), 1);
4289 leaf = path->nodes[0];
4290 memcpy_extent_buffer(leaf,
4291 btrfs_item_ptr_offset(leaf, path->slots[0]),
4292 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
4293 item_size);
4294 return 0;
4295 }
4296
4297 /*
4298 * make the item pointed to by the path smaller. new_size indicates
4299 * how small to make it, and from_end tells us if we just chop bytes
4300 * off the end of the item or if we shift the item to chop bytes off
4301 * the front.
4302 */
4303 void btrfs_truncate_item(struct btrfs_root *root, struct btrfs_path *path,
4304 u32 new_size, int from_end)
4305 {
4306 int slot;
4307 struct extent_buffer *leaf;
4308 struct btrfs_item *item;
4309 u32 nritems;
4310 unsigned int data_end;
4311 unsigned int old_data_start;
4312 unsigned int old_size;
4313 unsigned int size_diff;
4314 int i;
4315 struct btrfs_map_token token;
4316
4317 btrfs_init_map_token(&token);
4318
4319 leaf = path->nodes[0];
4320 slot = path->slots[0];
4321
4322 old_size = btrfs_item_size_nr(leaf, slot);
4323 if (old_size == new_size)
4324 return;
4325
4326 nritems = btrfs_header_nritems(leaf);
4327 data_end = leaf_data_end(root, leaf);
4328
4329 old_data_start = btrfs_item_offset_nr(leaf, slot);
4330
4331 size_diff = old_size - new_size;
4332
4333 BUG_ON(slot < 0);
4334 BUG_ON(slot >= nritems);
4335
4336 /*
4337 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4338 */
4339 /* first correct the data pointers */
4340 for (i = slot; i < nritems; i++) {
4341 u32 ioff;
4342 item = btrfs_item_nr(leaf, i);
4343
4344 ioff = btrfs_token_item_offset(leaf, item, &token);
4345 btrfs_set_token_item_offset(leaf, item,
4346 ioff + size_diff, &token);
4347 }
4348
4349 /* shift the data */
4350 if (from_end) {
4351 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4352 data_end + size_diff, btrfs_leaf_data(leaf) +
4353 data_end, old_data_start + new_size - data_end);
4354 } else {
4355 struct btrfs_disk_key disk_key;
4356 u64 offset;
4357
4358 btrfs_item_key(leaf, &disk_key, slot);
4359
4360 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
4361 unsigned long ptr;
4362 struct btrfs_file_extent_item *fi;
4363
4364 fi = btrfs_item_ptr(leaf, slot,
4365 struct btrfs_file_extent_item);
4366 fi = (struct btrfs_file_extent_item *)(
4367 (unsigned long)fi - size_diff);
4368
4369 if (btrfs_file_extent_type(leaf, fi) ==
4370 BTRFS_FILE_EXTENT_INLINE) {
4371 ptr = btrfs_item_ptr_offset(leaf, slot);
4372 memmove_extent_buffer(leaf, ptr,
4373 (unsigned long)fi,
4374 offsetof(struct btrfs_file_extent_item,
4375 disk_bytenr));
4376 }
4377 }
4378
4379 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4380 data_end + size_diff, btrfs_leaf_data(leaf) +
4381 data_end, old_data_start - data_end);
4382
4383 offset = btrfs_disk_key_offset(&disk_key);
4384 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
4385 btrfs_set_item_key(leaf, &disk_key, slot);
4386 if (slot == 0)
4387 fixup_low_keys(root, path, &disk_key, 1);
4388 }
4389
4390 item = btrfs_item_nr(leaf, slot);
4391 btrfs_set_item_size(leaf, item, new_size);
4392 btrfs_mark_buffer_dirty(leaf);
4393
4394 if (btrfs_leaf_free_space(root, leaf) < 0) {
4395 btrfs_print_leaf(root, leaf);
4396 BUG();
4397 }
4398 }
4399
4400 /*
4401 * make the item pointed to by the path bigger, data_size is the added size.
4402 */
4403 void btrfs_extend_item(struct btrfs_root *root, struct btrfs_path *path,
4404 u32 data_size)
4405 {
4406 int slot;
4407 struct extent_buffer *leaf;
4408 struct btrfs_item *item;
4409 u32 nritems;
4410 unsigned int data_end;
4411 unsigned int old_data;
4412 unsigned int old_size;
4413 int i;
4414 struct btrfs_map_token token;
4415
4416 btrfs_init_map_token(&token);
4417
4418 leaf = path->nodes[0];
4419
4420 nritems = btrfs_header_nritems(leaf);
4421 data_end = leaf_data_end(root, leaf);
4422
4423 if (btrfs_leaf_free_space(root, leaf) < data_size) {
4424 btrfs_print_leaf(root, leaf);
4425 BUG();
4426 }
4427 slot = path->slots[0];
4428 old_data = btrfs_item_end_nr(leaf, slot);
4429
4430 BUG_ON(slot < 0);
4431 if (slot >= nritems) {
4432 btrfs_print_leaf(root, leaf);
4433 printk(KERN_CRIT "slot %d too large, nritems %d\n",
4434 slot, nritems);
4435 BUG_ON(1);
4436 }
4437
4438 /*
4439 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4440 */
4441 /* first correct the data pointers */
4442 for (i = slot; i < nritems; i++) {
4443 u32 ioff;
4444 item = btrfs_item_nr(leaf, i);
4445
4446 ioff = btrfs_token_item_offset(leaf, item, &token);
4447 btrfs_set_token_item_offset(leaf, item,
4448 ioff - data_size, &token);
4449 }
4450
4451 /* shift the data */
4452 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4453 data_end - data_size, btrfs_leaf_data(leaf) +
4454 data_end, old_data - data_end);
4455
4456 data_end = old_data;
4457 old_size = btrfs_item_size_nr(leaf, slot);
4458 item = btrfs_item_nr(leaf, slot);
4459 btrfs_set_item_size(leaf, item, old_size + data_size);
4460 btrfs_mark_buffer_dirty(leaf);
4461
4462 if (btrfs_leaf_free_space(root, leaf) < 0) {
4463 btrfs_print_leaf(root, leaf);
4464 BUG();
4465 }
4466 }
4467
4468 /*
4469 * this is a helper for btrfs_insert_empty_items, the main goal here is
4470 * to save stack depth by doing the bulk of the work in a function
4471 * that doesn't call btrfs_search_slot
4472 */
4473 void setup_items_for_insert(struct btrfs_root *root, struct btrfs_path *path,
4474 struct btrfs_key *cpu_key, u32 *data_size,
4475 u32 total_data, u32 total_size, int nr)
4476 {
4477 struct btrfs_item *item;
4478 int i;
4479 u32 nritems;
4480 unsigned int data_end;
4481 struct btrfs_disk_key disk_key;
4482 struct extent_buffer *leaf;
4483 int slot;
4484 struct btrfs_map_token token;
4485
4486 btrfs_init_map_token(&token);
4487
4488 leaf = path->nodes[0];
4489 slot = path->slots[0];
4490
4491 nritems = btrfs_header_nritems(leaf);
4492 data_end = leaf_data_end(root, leaf);
4493
4494 if (btrfs_leaf_free_space(root, leaf) < total_size) {
4495 btrfs_print_leaf(root, leaf);
4496 printk(KERN_CRIT "not enough freespace need %u have %d\n",
4497 total_size, btrfs_leaf_free_space(root, leaf));
4498 BUG();
4499 }
4500
4501 if (slot != nritems) {
4502 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
4503
4504 if (old_data < data_end) {
4505 btrfs_print_leaf(root, leaf);
4506 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
4507 slot, old_data, data_end);
4508 BUG_ON(1);
4509 }
4510 /*
4511 * item0..itemN ... dataN.offset..dataN.size .. data0.size
4512 */
4513 /* first correct the data pointers */
4514 for (i = slot; i < nritems; i++) {
4515 u32 ioff;
4516
4517 item = btrfs_item_nr(leaf, i);
4518 ioff = btrfs_token_item_offset(leaf, item, &token);
4519 btrfs_set_token_item_offset(leaf, item,
4520 ioff - total_data, &token);
4521 }
4522 /* shift the items */
4523 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
4524 btrfs_item_nr_offset(slot),
4525 (nritems - slot) * sizeof(struct btrfs_item));
4526
4527 /* shift the data */
4528 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4529 data_end - total_data, btrfs_leaf_data(leaf) +
4530 data_end, old_data - data_end);
4531 data_end = old_data;
4532 }
4533
4534 /* setup the item for the new data */
4535 for (i = 0; i < nr; i++) {
4536 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
4537 btrfs_set_item_key(leaf, &disk_key, slot + i);
4538 item = btrfs_item_nr(leaf, slot + i);
4539 btrfs_set_token_item_offset(leaf, item,
4540 data_end - data_size[i], &token);
4541 data_end -= data_size[i];
4542 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
4543 }
4544
4545 btrfs_set_header_nritems(leaf, nritems + nr);
4546
4547 if (slot == 0) {
4548 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
4549 fixup_low_keys(root, path, &disk_key, 1);
4550 }
4551 btrfs_unlock_up_safe(path, 1);
4552 btrfs_mark_buffer_dirty(leaf);
4553
4554 if (btrfs_leaf_free_space(root, leaf) < 0) {
4555 btrfs_print_leaf(root, leaf);
4556 BUG();
4557 }
4558 }
4559
4560 /*
4561 * Given a key and some data, insert items into the tree.
4562 * This does all the path init required, making room in the tree if needed.
4563 */
4564 int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
4565 struct btrfs_root *root,
4566 struct btrfs_path *path,
4567 struct btrfs_key *cpu_key, u32 *data_size,
4568 int nr)
4569 {
4570 int ret = 0;
4571 int slot;
4572 int i;
4573 u32 total_size = 0;
4574 u32 total_data = 0;
4575
4576 for (i = 0; i < nr; i++)
4577 total_data += data_size[i];
4578
4579 total_size = total_data + (nr * sizeof(struct btrfs_item));
4580 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
4581 if (ret == 0)
4582 return -EEXIST;
4583 if (ret < 0)
4584 return ret;
4585
4586 slot = path->slots[0];
4587 BUG_ON(slot < 0);
4588
4589 setup_items_for_insert(root, path, cpu_key, data_size,
4590 total_data, total_size, nr);
4591 return 0;
4592 }
4593
4594 /*
4595 * Given a key and some data, insert an item into the tree.
4596 * This does all the path init required, making room in the tree if needed.
4597 */
4598 int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
4599 *root, struct btrfs_key *cpu_key, void *data, u32
4600 data_size)
4601 {
4602 int ret = 0;
4603 struct btrfs_path *path;
4604 struct extent_buffer *leaf;
4605 unsigned long ptr;
4606
4607 path = btrfs_alloc_path();
4608 if (!path)
4609 return -ENOMEM;
4610 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
4611 if (!ret) {
4612 leaf = path->nodes[0];
4613 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
4614 write_extent_buffer(leaf, data, ptr, data_size);
4615 btrfs_mark_buffer_dirty(leaf);
4616 }
4617 btrfs_free_path(path);
4618 return ret;
4619 }
4620
4621 /*
4622 * delete the pointer from a given node.
4623 *
4624 * the tree should have been previously balanced so the deletion does not
4625 * empty a node.
4626 */
4627 static void del_ptr(struct btrfs_root *root, struct btrfs_path *path,
4628 int level, int slot)
4629 {
4630 struct extent_buffer *parent = path->nodes[level];
4631 u32 nritems;
4632 int ret;
4633
4634 nritems = btrfs_header_nritems(parent);
4635 if (slot != nritems - 1) {
4636 if (level)
4637 tree_mod_log_eb_move(root->fs_info, parent, slot,
4638 slot + 1, nritems - slot - 1);
4639 memmove_extent_buffer(parent,
4640 btrfs_node_key_ptr_offset(slot),
4641 btrfs_node_key_ptr_offset(slot + 1),
4642 sizeof(struct btrfs_key_ptr) *
4643 (nritems - slot - 1));
4644 } else if (level) {
4645 ret = tree_mod_log_insert_key(root->fs_info, parent, slot,
4646 MOD_LOG_KEY_REMOVE);
4647 BUG_ON(ret < 0);
4648 }
4649
4650 nritems--;
4651 btrfs_set_header_nritems(parent, nritems);
4652 if (nritems == 0 && parent == root->node) {
4653 BUG_ON(btrfs_header_level(root->node) != 1);
4654 /* just turn the root into a leaf and break */
4655 btrfs_set_header_level(root->node, 0);
4656 } else if (slot == 0) {
4657 struct btrfs_disk_key disk_key;
4658
4659 btrfs_node_key(parent, &disk_key, 0);
4660 fixup_low_keys(root, path, &disk_key, level + 1);
4661 }
4662 btrfs_mark_buffer_dirty(parent);
4663 }
4664
4665 /*
4666 * a helper function to delete the leaf pointed to by path->slots[1] and
4667 * path->nodes[1].
4668 *
4669 * This deletes the pointer in path->nodes[1] and frees the leaf
4670 * block extent. zero is returned if it all worked out, < 0 otherwise.
4671 *
4672 * The path must have already been setup for deleting the leaf, including
4673 * all the proper balancing. path->nodes[1] must be locked.
4674 */
4675 static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
4676 struct btrfs_root *root,
4677 struct btrfs_path *path,
4678 struct extent_buffer *leaf)
4679 {
4680 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
4681 del_ptr(root, path, 1, path->slots[1]);
4682
4683 /*
4684 * btrfs_free_extent is expensive, we want to make sure we
4685 * aren't holding any locks when we call it
4686 */
4687 btrfs_unlock_up_safe(path, 0);
4688
4689 root_sub_used(root, leaf->len);
4690
4691 extent_buffer_get(leaf);
4692 btrfs_free_tree_block(trans, root, leaf, 0, 1);
4693 free_extent_buffer_stale(leaf);
4694 }
4695 /*
4696 * delete the item at the leaf level in path. If that empties
4697 * the leaf, remove it from the tree
4698 */
4699 int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
4700 struct btrfs_path *path, int slot, int nr)
4701 {
4702 struct extent_buffer *leaf;
4703 struct btrfs_item *item;
4704 int last_off;
4705 int dsize = 0;
4706 int ret = 0;
4707 int wret;
4708 int i;
4709 u32 nritems;
4710 struct btrfs_map_token token;
4711
4712 btrfs_init_map_token(&token);
4713
4714 leaf = path->nodes[0];
4715 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
4716
4717 for (i = 0; i < nr; i++)
4718 dsize += btrfs_item_size_nr(leaf, slot + i);
4719
4720 nritems = btrfs_header_nritems(leaf);
4721
4722 if (slot + nr != nritems) {
4723 int data_end = leaf_data_end(root, leaf);
4724
4725 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
4726 data_end + dsize,
4727 btrfs_leaf_data(leaf) + data_end,
4728 last_off - data_end);
4729
4730 for (i = slot + nr; i < nritems; i++) {
4731 u32 ioff;
4732
4733 item = btrfs_item_nr(leaf, i);
4734 ioff = btrfs_token_item_offset(leaf, item, &token);
4735 btrfs_set_token_item_offset(leaf, item,
4736 ioff + dsize, &token);
4737 }
4738
4739 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
4740 btrfs_item_nr_offset(slot + nr),
4741 sizeof(struct btrfs_item) *
4742 (nritems - slot - nr));
4743 }
4744 btrfs_set_header_nritems(leaf, nritems - nr);
4745 nritems -= nr;
4746
4747 /* delete the leaf if we've emptied it */
4748 if (nritems == 0) {
4749 if (leaf == root->node) {
4750 btrfs_set_header_level(leaf, 0);
4751 } else {
4752 btrfs_set_path_blocking(path);
4753 clean_tree_block(trans, root, leaf);
4754 btrfs_del_leaf(trans, root, path, leaf);
4755 }
4756 } else {
4757 int used = leaf_space_used(leaf, 0, nritems);
4758 if (slot == 0) {
4759 struct btrfs_disk_key disk_key;
4760
4761 btrfs_item_key(leaf, &disk_key, 0);
4762 fixup_low_keys(root, path, &disk_key, 1);
4763 }
4764
4765 /* delete the leaf if it is mostly empty */
4766 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
4767 /* push_leaf_left fixes the path.
4768 * make sure the path still points to our leaf
4769 * for possible call to del_ptr below
4770 */
4771 slot = path->slots[1];
4772 extent_buffer_get(leaf);
4773
4774 btrfs_set_path_blocking(path);
4775 wret = push_leaf_left(trans, root, path, 1, 1,
4776 1, (u32)-1);
4777 if (wret < 0 && wret != -ENOSPC)
4778 ret = wret;
4779
4780 if (path->nodes[0] == leaf &&
4781 btrfs_header_nritems(leaf)) {
4782 wret = push_leaf_right(trans, root, path, 1,
4783 1, 1, 0);
4784 if (wret < 0 && wret != -ENOSPC)
4785 ret = wret;
4786 }
4787
4788 if (btrfs_header_nritems(leaf) == 0) {
4789 path->slots[1] = slot;
4790 btrfs_del_leaf(trans, root, path, leaf);
4791 free_extent_buffer(leaf);
4792 ret = 0;
4793 } else {
4794 /* if we're still in the path, make sure
4795 * we're dirty. Otherwise, one of the
4796 * push_leaf functions must have already
4797 * dirtied this buffer
4798 */
4799 if (path->nodes[0] == leaf)
4800 btrfs_mark_buffer_dirty(leaf);
4801 free_extent_buffer(leaf);
4802 }
4803 } else {
4804 btrfs_mark_buffer_dirty(leaf);
4805 }
4806 }
4807 return ret;
4808 }
4809
4810 /*
4811 * search the tree again to find a leaf with lesser keys
4812 * returns 0 if it found something or 1 if there are no lesser leaves.
4813 * returns < 0 on io errors.
4814 *
4815 * This may release the path, and so you may lose any locks held at the
4816 * time you call it.
4817 */
4818 int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
4819 {
4820 struct btrfs_key key;
4821 struct btrfs_disk_key found_key;
4822 int ret;
4823
4824 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
4825
4826 if (key.offset > 0)
4827 key.offset--;
4828 else if (key.type > 0)
4829 key.type--;
4830 else if (key.objectid > 0)
4831 key.objectid--;
4832 else
4833 return 1;
4834
4835 btrfs_release_path(path);
4836 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4837 if (ret < 0)
4838 return ret;
4839 btrfs_item_key(path->nodes[0], &found_key, 0);
4840 ret = comp_keys(&found_key, &key);
4841 if (ret < 0)
4842 return 0;
4843 return 1;
4844 }
4845
4846 /*
4847 * A helper function to walk down the tree starting at min_key, and looking
4848 * for nodes or leaves that are have a minimum transaction id.
4849 * This is used by the btree defrag code, and tree logging
4850 *
4851 * This does not cow, but it does stuff the starting key it finds back
4852 * into min_key, so you can call btrfs_search_slot with cow=1 on the
4853 * key and get a writable path.
4854 *
4855 * This does lock as it descends, and path->keep_locks should be set
4856 * to 1 by the caller.
4857 *
4858 * This honors path->lowest_level to prevent descent past a given level
4859 * of the tree.
4860 *
4861 * min_trans indicates the oldest transaction that you are interested
4862 * in walking through. Any nodes or leaves older than min_trans are
4863 * skipped over (without reading them).
4864 *
4865 * returns zero if something useful was found, < 0 on error and 1 if there
4866 * was nothing in the tree that matched the search criteria.
4867 */
4868 int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
4869 struct btrfs_key *max_key,
4870 struct btrfs_path *path,
4871 u64 min_trans)
4872 {
4873 struct extent_buffer *cur;
4874 struct btrfs_key found_key;
4875 int slot;
4876 int sret;
4877 u32 nritems;
4878 int level;
4879 int ret = 1;
4880
4881 WARN_ON(!path->keep_locks);
4882 again:
4883 cur = btrfs_read_lock_root_node(root);
4884 level = btrfs_header_level(cur);
4885 WARN_ON(path->nodes[level]);
4886 path->nodes[level] = cur;
4887 path->locks[level] = BTRFS_READ_LOCK;
4888
4889 if (btrfs_header_generation(cur) < min_trans) {
4890 ret = 1;
4891 goto out;
4892 }
4893 while (1) {
4894 nritems = btrfs_header_nritems(cur);
4895 level = btrfs_header_level(cur);
4896 sret = bin_search(cur, min_key, level, &slot);
4897
4898 /* at the lowest level, we're done, setup the path and exit */
4899 if (level == path->lowest_level) {
4900 if (slot >= nritems)
4901 goto find_next_key;
4902 ret = 0;
4903 path->slots[level] = slot;
4904 btrfs_item_key_to_cpu(cur, &found_key, slot);
4905 goto out;
4906 }
4907 if (sret && slot > 0)
4908 slot--;
4909 /*
4910 * check this node pointer against the min_trans parameters.
4911 * If it is too old, old, skip to the next one.
4912 */
4913 while (slot < nritems) {
4914 u64 blockptr;
4915 u64 gen;
4916
4917 blockptr = btrfs_node_blockptr(cur, slot);
4918 gen = btrfs_node_ptr_generation(cur, slot);
4919 if (gen < min_trans) {
4920 slot++;
4921 continue;
4922 }
4923 break;
4924 }
4925 find_next_key:
4926 /*
4927 * we didn't find a candidate key in this node, walk forward
4928 * and find another one
4929 */
4930 if (slot >= nritems) {
4931 path->slots[level] = slot;
4932 btrfs_set_path_blocking(path);
4933 sret = btrfs_find_next_key(root, path, min_key, level,
4934 min_trans);
4935 if (sret == 0) {
4936 btrfs_release_path(path);
4937 goto again;
4938 } else {
4939 goto out;
4940 }
4941 }
4942 /* save our key for returning back */
4943 btrfs_node_key_to_cpu(cur, &found_key, slot);
4944 path->slots[level] = slot;
4945 if (level == path->lowest_level) {
4946 ret = 0;
4947 unlock_up(path, level, 1, 0, NULL);
4948 goto out;
4949 }
4950 btrfs_set_path_blocking(path);
4951 cur = read_node_slot(root, cur, slot);
4952 BUG_ON(!cur); /* -ENOMEM */
4953
4954 btrfs_tree_read_lock(cur);
4955
4956 path->locks[level - 1] = BTRFS_READ_LOCK;
4957 path->nodes[level - 1] = cur;
4958 unlock_up(path, level, 1, 0, NULL);
4959 btrfs_clear_path_blocking(path, NULL, 0);
4960 }
4961 out:
4962 if (ret == 0)
4963 memcpy(min_key, &found_key, sizeof(found_key));
4964 btrfs_set_path_blocking(path);
4965 return ret;
4966 }
4967
4968 static void tree_move_down(struct btrfs_root *root,
4969 struct btrfs_path *path,
4970 int *level, int root_level)
4971 {
4972 BUG_ON(*level == 0);
4973 path->nodes[*level - 1] = read_node_slot(root, path->nodes[*level],
4974 path->slots[*level]);
4975 path->slots[*level - 1] = 0;
4976 (*level)--;
4977 }
4978
4979 static int tree_move_next_or_upnext(struct btrfs_root *root,
4980 struct btrfs_path *path,
4981 int *level, int root_level)
4982 {
4983 int ret = 0;
4984 int nritems;
4985 nritems = btrfs_header_nritems(path->nodes[*level]);
4986
4987 path->slots[*level]++;
4988
4989 while (path->slots[*level] >= nritems) {
4990 if (*level == root_level)
4991 return -1;
4992
4993 /* move upnext */
4994 path->slots[*level] = 0;
4995 free_extent_buffer(path->nodes[*level]);
4996 path->nodes[*level] = NULL;
4997 (*level)++;
4998 path->slots[*level]++;
4999
5000 nritems = btrfs_header_nritems(path->nodes[*level]);
5001 ret = 1;
5002 }
5003 return ret;
5004 }
5005
5006 /*
5007 * Returns 1 if it had to move up and next. 0 is returned if it moved only next
5008 * or down.
5009 */
5010 static int tree_advance(struct btrfs_root *root,
5011 struct btrfs_path *path,
5012 int *level, int root_level,
5013 int allow_down,
5014 struct btrfs_key *key)
5015 {
5016 int ret;
5017
5018 if (*level == 0 || !allow_down) {
5019 ret = tree_move_next_or_upnext(root, path, level, root_level);
5020 } else {
5021 tree_move_down(root, path, level, root_level);
5022 ret = 0;
5023 }
5024 if (ret >= 0) {
5025 if (*level == 0)
5026 btrfs_item_key_to_cpu(path->nodes[*level], key,
5027 path->slots[*level]);
5028 else
5029 btrfs_node_key_to_cpu(path->nodes[*level], key,
5030 path->slots[*level]);
5031 }
5032 return ret;
5033 }
5034
5035 static int tree_compare_item(struct btrfs_root *left_root,
5036 struct btrfs_path *left_path,
5037 struct btrfs_path *right_path,
5038 char *tmp_buf)
5039 {
5040 int cmp;
5041 int len1, len2;
5042 unsigned long off1, off2;
5043
5044 len1 = btrfs_item_size_nr(left_path->nodes[0], left_path->slots[0]);
5045 len2 = btrfs_item_size_nr(right_path->nodes[0], right_path->slots[0]);
5046 if (len1 != len2)
5047 return 1;
5048
5049 off1 = btrfs_item_ptr_offset(left_path->nodes[0], left_path->slots[0]);
5050 off2 = btrfs_item_ptr_offset(right_path->nodes[0],
5051 right_path->slots[0]);
5052
5053 read_extent_buffer(left_path->nodes[0], tmp_buf, off1, len1);
5054
5055 cmp = memcmp_extent_buffer(right_path->nodes[0], tmp_buf, off2, len1);
5056 if (cmp)
5057 return 1;
5058 return 0;
5059 }
5060
5061 #define ADVANCE 1
5062 #define ADVANCE_ONLY_NEXT -1
5063
5064 /*
5065 * This function compares two trees and calls the provided callback for
5066 * every changed/new/deleted item it finds.
5067 * If shared tree blocks are encountered, whole subtrees are skipped, making
5068 * the compare pretty fast on snapshotted subvolumes.
5069 *
5070 * This currently works on commit roots only. As commit roots are read only,
5071 * we don't do any locking. The commit roots are protected with transactions.
5072 * Transactions are ended and rejoined when a commit is tried in between.
5073 *
5074 * This function checks for modifications done to the trees while comparing.
5075 * If it detects a change, it aborts immediately.
5076 */
5077 int btrfs_compare_trees(struct btrfs_root *left_root,
5078 struct btrfs_root *right_root,
5079 btrfs_changed_cb_t changed_cb, void *ctx)
5080 {
5081 int ret;
5082 int cmp;
5083 struct btrfs_trans_handle *trans = NULL;
5084 struct btrfs_path *left_path = NULL;
5085 struct btrfs_path *right_path = NULL;
5086 struct btrfs_key left_key;
5087 struct btrfs_key right_key;
5088 char *tmp_buf = NULL;
5089 int left_root_level;
5090 int right_root_level;
5091 int left_level;
5092 int right_level;
5093 int left_end_reached;
5094 int right_end_reached;
5095 int advance_left;
5096 int advance_right;
5097 u64 left_blockptr;
5098 u64 right_blockptr;
5099 u64 left_start_ctransid;
5100 u64 right_start_ctransid;
5101 u64 ctransid;
5102
5103 left_path = btrfs_alloc_path();
5104 if (!left_path) {
5105 ret = -ENOMEM;
5106 goto out;
5107 }
5108 right_path = btrfs_alloc_path();
5109 if (!right_path) {
5110 ret = -ENOMEM;
5111 goto out;
5112 }
5113
5114 tmp_buf = kmalloc(left_root->leafsize, GFP_NOFS);
5115 if (!tmp_buf) {
5116 ret = -ENOMEM;
5117 goto out;
5118 }
5119
5120 left_path->search_commit_root = 1;
5121 left_path->skip_locking = 1;
5122 right_path->search_commit_root = 1;
5123 right_path->skip_locking = 1;
5124
5125 spin_lock(&left_root->root_item_lock);
5126 left_start_ctransid = btrfs_root_ctransid(&left_root->root_item);
5127 spin_unlock(&left_root->root_item_lock);
5128
5129 spin_lock(&right_root->root_item_lock);
5130 right_start_ctransid = btrfs_root_ctransid(&right_root->root_item);
5131 spin_unlock(&right_root->root_item_lock);
5132
5133 trans = btrfs_join_transaction(left_root);
5134 if (IS_ERR(trans)) {
5135 ret = PTR_ERR(trans);
5136 trans = NULL;
5137 goto out;
5138 }
5139
5140 /*
5141 * Strategy: Go to the first items of both trees. Then do
5142 *
5143 * If both trees are at level 0
5144 * Compare keys of current items
5145 * If left < right treat left item as new, advance left tree
5146 * and repeat
5147 * If left > right treat right item as deleted, advance right tree
5148 * and repeat
5149 * If left == right do deep compare of items, treat as changed if
5150 * needed, advance both trees and repeat
5151 * If both trees are at the same level but not at level 0
5152 * Compare keys of current nodes/leafs
5153 * If left < right advance left tree and repeat
5154 * If left > right advance right tree and repeat
5155 * If left == right compare blockptrs of the next nodes/leafs
5156 * If they match advance both trees but stay at the same level
5157 * and repeat
5158 * If they don't match advance both trees while allowing to go
5159 * deeper and repeat
5160 * If tree levels are different
5161 * Advance the tree that needs it and repeat
5162 *
5163 * Advancing a tree means:
5164 * If we are at level 0, try to go to the next slot. If that's not
5165 * possible, go one level up and repeat. Stop when we found a level
5166 * where we could go to the next slot. We may at this point be on a
5167 * node or a leaf.
5168 *
5169 * If we are not at level 0 and not on shared tree blocks, go one
5170 * level deeper.
5171 *
5172 * If we are not at level 0 and on shared tree blocks, go one slot to
5173 * the right if possible or go up and right.
5174 */
5175
5176 left_level = btrfs_header_level(left_root->commit_root);
5177 left_root_level = left_level;
5178 left_path->nodes[left_level] = left_root->commit_root;
5179 extent_buffer_get(left_path->nodes[left_level]);
5180
5181 right_level = btrfs_header_level(right_root->commit_root);
5182 right_root_level = right_level;
5183 right_path->nodes[right_level] = right_root->commit_root;
5184 extent_buffer_get(right_path->nodes[right_level]);
5185
5186 if (left_level == 0)
5187 btrfs_item_key_to_cpu(left_path->nodes[left_level],
5188 &left_key, left_path->slots[left_level]);
5189 else
5190 btrfs_node_key_to_cpu(left_path->nodes[left_level],
5191 &left_key, left_path->slots[left_level]);
5192 if (right_level == 0)
5193 btrfs_item_key_to_cpu(right_path->nodes[right_level],
5194 &right_key, right_path->slots[right_level]);
5195 else
5196 btrfs_node_key_to_cpu(right_path->nodes[right_level],
5197 &right_key, right_path->slots[right_level]);
5198
5199 left_end_reached = right_end_reached = 0;
5200 advance_left = advance_right = 0;
5201
5202 while (1) {
5203 /*
5204 * We need to make sure the transaction does not get committed
5205 * while we do anything on commit roots. This means, we need to
5206 * join and leave transactions for every item that we process.
5207 */
5208 if (trans && btrfs_should_end_transaction(trans, left_root)) {
5209 btrfs_release_path(left_path);
5210 btrfs_release_path(right_path);
5211
5212 ret = btrfs_end_transaction(trans, left_root);
5213 trans = NULL;
5214 if (ret < 0)
5215 goto out;
5216 }
5217 /* now rejoin the transaction */
5218 if (!trans) {
5219 trans = btrfs_join_transaction(left_root);
5220 if (IS_ERR(trans)) {
5221 ret = PTR_ERR(trans);
5222 trans = NULL;
5223 goto out;
5224 }
5225
5226 spin_lock(&left_root->root_item_lock);
5227 ctransid = btrfs_root_ctransid(&left_root->root_item);
5228 spin_unlock(&left_root->root_item_lock);
5229 if (ctransid != left_start_ctransid)
5230 left_start_ctransid = 0;
5231
5232 spin_lock(&right_root->root_item_lock);
5233 ctransid = btrfs_root_ctransid(&right_root->root_item);
5234 spin_unlock(&right_root->root_item_lock);
5235 if (ctransid != right_start_ctransid)
5236 right_start_ctransid = 0;
5237
5238 if (!left_start_ctransid || !right_start_ctransid) {
5239 WARN(1, KERN_WARNING
5240 "btrfs: btrfs_compare_tree detected "
5241 "a change in one of the trees while "
5242 "iterating. This is probably a "
5243 "bug.\n");
5244 ret = -EIO;
5245 goto out;
5246 }
5247
5248 /*
5249 * the commit root may have changed, so start again
5250 * where we stopped
5251 */
5252 left_path->lowest_level = left_level;
5253 right_path->lowest_level = right_level;
5254 ret = btrfs_search_slot(NULL, left_root,
5255 &left_key, left_path, 0, 0);
5256 if (ret < 0)
5257 goto out;
5258 ret = btrfs_search_slot(NULL, right_root,
5259 &right_key, right_path, 0, 0);
5260 if (ret < 0)
5261 goto out;
5262 }
5263
5264 if (advance_left && !left_end_reached) {
5265 ret = tree_advance(left_root, left_path, &left_level,
5266 left_root_level,
5267 advance_left != ADVANCE_ONLY_NEXT,
5268 &left_key);
5269 if (ret < 0)
5270 left_end_reached = ADVANCE;
5271 advance_left = 0;
5272 }
5273 if (advance_right && !right_end_reached) {
5274 ret = tree_advance(right_root, right_path, &right_level,
5275 right_root_level,
5276 advance_right != ADVANCE_ONLY_NEXT,
5277 &right_key);
5278 if (ret < 0)
5279 right_end_reached = ADVANCE;
5280 advance_right = 0;
5281 }
5282
5283 if (left_end_reached && right_end_reached) {
5284 ret = 0;
5285 goto out;
5286 } else if (left_end_reached) {
5287 if (right_level == 0) {
5288 ret = changed_cb(left_root, right_root,
5289 left_path, right_path,
5290 &right_key,
5291 BTRFS_COMPARE_TREE_DELETED,
5292 ctx);
5293 if (ret < 0)
5294 goto out;
5295 }
5296 advance_right = ADVANCE;
5297 continue;
5298 } else if (right_end_reached) {
5299 if (left_level == 0) {
5300 ret = changed_cb(left_root, right_root,
5301 left_path, right_path,
5302 &left_key,
5303 BTRFS_COMPARE_TREE_NEW,
5304 ctx);
5305 if (ret < 0)
5306 goto out;
5307 }
5308 advance_left = ADVANCE;
5309 continue;
5310 }
5311
5312 if (left_level == 0 && right_level == 0) {
5313 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5314 if (cmp < 0) {
5315 ret = changed_cb(left_root, right_root,
5316 left_path, right_path,
5317 &left_key,
5318 BTRFS_COMPARE_TREE_NEW,
5319 ctx);
5320 if (ret < 0)
5321 goto out;
5322 advance_left = ADVANCE;
5323 } else if (cmp > 0) {
5324 ret = changed_cb(left_root, right_root,
5325 left_path, right_path,
5326 &right_key,
5327 BTRFS_COMPARE_TREE_DELETED,
5328 ctx);
5329 if (ret < 0)
5330 goto out;
5331 advance_right = ADVANCE;
5332 } else {
5333 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5334 ret = tree_compare_item(left_root, left_path,
5335 right_path, tmp_buf);
5336 if (ret) {
5337 WARN_ON(!extent_buffer_uptodate(left_path->nodes[0]));
5338 ret = changed_cb(left_root, right_root,
5339 left_path, right_path,
5340 &left_key,
5341 BTRFS_COMPARE_TREE_CHANGED,
5342 ctx);
5343 if (ret < 0)
5344 goto out;
5345 }
5346 advance_left = ADVANCE;
5347 advance_right = ADVANCE;
5348 }
5349 } else if (left_level == right_level) {
5350 cmp = btrfs_comp_cpu_keys(&left_key, &right_key);
5351 if (cmp < 0) {
5352 advance_left = ADVANCE;
5353 } else if (cmp > 0) {
5354 advance_right = ADVANCE;
5355 } else {
5356 left_blockptr = btrfs_node_blockptr(
5357 left_path->nodes[left_level],
5358 left_path->slots[left_level]);
5359 right_blockptr = btrfs_node_blockptr(
5360 right_path->nodes[right_level],
5361 right_path->slots[right_level]);
5362 if (left_blockptr == right_blockptr) {
5363 /*
5364 * As we're on a shared block, don't
5365 * allow to go deeper.
5366 */
5367 advance_left = ADVANCE_ONLY_NEXT;
5368 advance_right = ADVANCE_ONLY_NEXT;
5369 } else {
5370 advance_left = ADVANCE;
5371 advance_right = ADVANCE;
5372 }
5373 }
5374 } else if (left_level < right_level) {
5375 advance_right = ADVANCE;
5376 } else {
5377 advance_left = ADVANCE;
5378 }
5379 }
5380
5381 out:
5382 btrfs_free_path(left_path);
5383 btrfs_free_path(right_path);
5384 kfree(tmp_buf);
5385
5386 if (trans) {
5387 if (!ret)
5388 ret = btrfs_end_transaction(trans, left_root);
5389 else
5390 btrfs_end_transaction(trans, left_root);
5391 }
5392
5393 return ret;
5394 }
5395
5396 /*
5397 * this is similar to btrfs_next_leaf, but does not try to preserve
5398 * and fixup the path. It looks for and returns the next key in the
5399 * tree based on the current path and the min_trans parameters.
5400 *
5401 * 0 is returned if another key is found, < 0 if there are any errors
5402 * and 1 is returned if there are no higher keys in the tree
5403 *
5404 * path->keep_locks should be set to 1 on the search made before
5405 * calling this function.
5406 */
5407 int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
5408 struct btrfs_key *key, int level, u64 min_trans)
5409 {
5410 int slot;
5411 struct extent_buffer *c;
5412
5413 WARN_ON(!path->keep_locks);
5414 while (level < BTRFS_MAX_LEVEL) {
5415 if (!path->nodes[level])
5416 return 1;
5417
5418 slot = path->slots[level] + 1;
5419 c = path->nodes[level];
5420 next:
5421 if (slot >= btrfs_header_nritems(c)) {
5422 int ret;
5423 int orig_lowest;
5424 struct btrfs_key cur_key;
5425 if (level + 1 >= BTRFS_MAX_LEVEL ||
5426 !path->nodes[level + 1])
5427 return 1;
5428
5429 if (path->locks[level + 1]) {
5430 level++;
5431 continue;
5432 }
5433
5434 slot = btrfs_header_nritems(c) - 1;
5435 if (level == 0)
5436 btrfs_item_key_to_cpu(c, &cur_key, slot);
5437 else
5438 btrfs_node_key_to_cpu(c, &cur_key, slot);
5439
5440 orig_lowest = path->lowest_level;
5441 btrfs_release_path(path);
5442 path->lowest_level = level;
5443 ret = btrfs_search_slot(NULL, root, &cur_key, path,
5444 0, 0);
5445 path->lowest_level = orig_lowest;
5446 if (ret < 0)
5447 return ret;
5448
5449 c = path->nodes[level];
5450 slot = path->slots[level];
5451 if (ret == 0)
5452 slot++;
5453 goto next;
5454 }
5455
5456 if (level == 0)
5457 btrfs_item_key_to_cpu(c, key, slot);
5458 else {
5459 u64 gen = btrfs_node_ptr_generation(c, slot);
5460
5461 if (gen < min_trans) {
5462 slot++;
5463 goto next;
5464 }
5465 btrfs_node_key_to_cpu(c, key, slot);
5466 }
5467 return 0;
5468 }
5469 return 1;
5470 }
5471
5472 /*
5473 * search the tree again to find a leaf with greater keys
5474 * returns 0 if it found something or 1 if there are no greater leaves.
5475 * returns < 0 on io errors.
5476 */
5477 int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
5478 {
5479 return btrfs_next_old_leaf(root, path, 0);
5480 }
5481
5482 int btrfs_next_old_leaf(struct btrfs_root *root, struct btrfs_path *path,
5483 u64 time_seq)
5484 {
5485 int slot;
5486 int level;
5487 struct extent_buffer *c;
5488 struct extent_buffer *next;
5489 struct btrfs_key key;
5490 u32 nritems;
5491 int ret;
5492 int old_spinning = path->leave_spinning;
5493 int next_rw_lock = 0;
5494
5495 nritems = btrfs_header_nritems(path->nodes[0]);
5496 if (nritems == 0)
5497 return 1;
5498
5499 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
5500 again:
5501 level = 1;
5502 next = NULL;
5503 next_rw_lock = 0;
5504 btrfs_release_path(path);
5505
5506 path->keep_locks = 1;
5507 path->leave_spinning = 1;
5508
5509 if (time_seq)
5510 ret = btrfs_search_old_slot(root, &key, path, time_seq);
5511 else
5512 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5513 path->keep_locks = 0;
5514
5515 if (ret < 0)
5516 return ret;
5517
5518 nritems = btrfs_header_nritems(path->nodes[0]);
5519 /*
5520 * by releasing the path above we dropped all our locks. A balance
5521 * could have added more items next to the key that used to be
5522 * at the very end of the block. So, check again here and
5523 * advance the path if there are now more items available.
5524 */
5525 if (nritems > 0 && path->slots[0] < nritems - 1) {
5526 if (ret == 0)
5527 path->slots[0]++;
5528 ret = 0;
5529 goto done;
5530 }
5531
5532 while (level < BTRFS_MAX_LEVEL) {
5533 if (!path->nodes[level]) {
5534 ret = 1;
5535 goto done;
5536 }
5537
5538 slot = path->slots[level] + 1;
5539 c = path->nodes[level];
5540 if (slot >= btrfs_header_nritems(c)) {
5541 level++;
5542 if (level == BTRFS_MAX_LEVEL) {
5543 ret = 1;
5544 goto done;
5545 }
5546 continue;
5547 }
5548
5549 if (next) {
5550 btrfs_tree_unlock_rw(next, next_rw_lock);
5551 free_extent_buffer(next);
5552 }
5553
5554 next = c;
5555 next_rw_lock = path->locks[level];
5556 ret = read_block_for_search(NULL, root, path, &next, level,
5557 slot, &key, 0);
5558 if (ret == -EAGAIN)
5559 goto again;
5560
5561 if (ret < 0) {
5562 btrfs_release_path(path);
5563 goto done;
5564 }
5565
5566 if (!path->skip_locking) {
5567 ret = btrfs_try_tree_read_lock(next);
5568 if (!ret && time_seq) {
5569 /*
5570 * If we don't get the lock, we may be racing
5571 * with push_leaf_left, holding that lock while
5572 * itself waiting for the leaf we've currently
5573 * locked. To solve this situation, we give up
5574 * on our lock and cycle.
5575 */
5576 free_extent_buffer(next);
5577 btrfs_release_path(path);
5578 cond_resched();
5579 goto again;
5580 }
5581 if (!ret) {
5582 btrfs_set_path_blocking(path);
5583 btrfs_tree_read_lock(next);
5584 btrfs_clear_path_blocking(path, next,
5585 BTRFS_READ_LOCK);
5586 }
5587 next_rw_lock = BTRFS_READ_LOCK;
5588 }
5589 break;
5590 }
5591 path->slots[level] = slot;
5592 while (1) {
5593 level--;
5594 c = path->nodes[level];
5595 if (path->locks[level])
5596 btrfs_tree_unlock_rw(c, path->locks[level]);
5597
5598 free_extent_buffer(c);
5599 path->nodes[level] = next;
5600 path->slots[level] = 0;
5601 if (!path->skip_locking)
5602 path->locks[level] = next_rw_lock;
5603 if (!level)
5604 break;
5605
5606 ret = read_block_for_search(NULL, root, path, &next, level,
5607 0, &key, 0);
5608 if (ret == -EAGAIN)
5609 goto again;
5610
5611 if (ret < 0) {
5612 btrfs_release_path(path);
5613 goto done;
5614 }
5615
5616 if (!path->skip_locking) {
5617 ret = btrfs_try_tree_read_lock(next);
5618 if (!ret) {
5619 btrfs_set_path_blocking(path);
5620 btrfs_tree_read_lock(next);
5621 btrfs_clear_path_blocking(path, next,
5622 BTRFS_READ_LOCK);
5623 }
5624 next_rw_lock = BTRFS_READ_LOCK;
5625 }
5626 }
5627 ret = 0;
5628 done:
5629 unlock_up(path, 0, 1, 0, NULL);
5630 path->leave_spinning = old_spinning;
5631 if (!old_spinning)
5632 btrfs_set_path_blocking(path);
5633
5634 return ret;
5635 }
5636
5637 /*
5638 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
5639 * searching until it gets past min_objectid or finds an item of 'type'
5640 *
5641 * returns 0 if something is found, 1 if nothing was found and < 0 on error
5642 */
5643 int btrfs_previous_item(struct btrfs_root *root,
5644 struct btrfs_path *path, u64 min_objectid,
5645 int type)
5646 {
5647 struct btrfs_key found_key;
5648 struct extent_buffer *leaf;
5649 u32 nritems;
5650 int ret;
5651
5652 while (1) {
5653 if (path->slots[0] == 0) {
5654 btrfs_set_path_blocking(path);
5655 ret = btrfs_prev_leaf(root, path);
5656 if (ret != 0)
5657 return ret;
5658 } else {
5659 path->slots[0]--;
5660 }
5661 leaf = path->nodes[0];
5662 nritems = btrfs_header_nritems(leaf);
5663 if (nritems == 0)
5664 return 1;
5665 if (path->slots[0] == nritems)
5666 path->slots[0]--;
5667
5668 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
5669 if (found_key.objectid < min_objectid)
5670 break;
5671 if (found_key.type == type)
5672 return 0;
5673 if (found_key.objectid == min_objectid &&
5674 found_key.type < type)
5675 break;
5676 }
5677 return 1;
5678 }
This page took 0.226807 seconds and 6 git commands to generate.