btrfs: Drop unused function btrfs_abort_devices()
[deliverable/linux.git] / fs / btrfs / ctree.c
CommitLineData
6cbd5570 1/*
d352ac68 2 * Copyright (C) 2007,2008 Oracle. All rights reserved.
6cbd5570
CM
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
a6b6e75e 19#include <linux/sched.h>
5a0e3ad6 20#include <linux/slab.h>
eb60ceac
CM
21#include "ctree.h"
22#include "disk-io.h"
7f5c1516 23#include "transaction.h"
5f39d397 24#include "print-tree.h"
925baedd 25#include "locking.h"
9a8dd150 26
e089f05c
CM
27static int split_node(struct btrfs_trans_handle *trans, struct btrfs_root
28 *root, struct btrfs_path *path, int level);
29static int split_leaf(struct btrfs_trans_handle *trans, struct btrfs_root
d4dbff95 30 *root, struct btrfs_key *ins_key,
cc0c5538 31 struct btrfs_path *path, int data_size, int extend);
5f39d397
CM
32static int push_node_left(struct btrfs_trans_handle *trans,
33 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 34 struct extent_buffer *src, int empty);
5f39d397
CM
35static int balance_node_right(struct btrfs_trans_handle *trans,
36 struct btrfs_root *root,
37 struct extent_buffer *dst_buf,
38 struct extent_buffer *src_buf);
143bede5 39static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
e089f05c 40 struct btrfs_path *path, int level, int slot);
d97e63b6 41
df24a2b9 42struct btrfs_path *btrfs_alloc_path(void)
2c90e5d6 43{
df24a2b9 44 struct btrfs_path *path;
e00f7308 45 path = kmem_cache_zalloc(btrfs_path_cachep, GFP_NOFS);
df24a2b9 46 return path;
2c90e5d6
CM
47}
48
b4ce94de
CM
49/*
50 * set all locked nodes in the path to blocking locks. This should
51 * be done before scheduling
52 */
53noinline void btrfs_set_path_blocking(struct btrfs_path *p)
54{
55 int i;
56 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
bd681513
CM
57 if (!p->nodes[i] || !p->locks[i])
58 continue;
59 btrfs_set_lock_blocking_rw(p->nodes[i], p->locks[i]);
60 if (p->locks[i] == BTRFS_READ_LOCK)
61 p->locks[i] = BTRFS_READ_LOCK_BLOCKING;
62 else if (p->locks[i] == BTRFS_WRITE_LOCK)
63 p->locks[i] = BTRFS_WRITE_LOCK_BLOCKING;
b4ce94de
CM
64 }
65}
66
67/*
68 * reset all the locked nodes in the patch to spinning locks.
4008c04a
CM
69 *
70 * held is used to keep lockdep happy, when lockdep is enabled
71 * we set held to a blocking lock before we go around and
72 * retake all the spinlocks in the path. You can safely use NULL
73 * for held
b4ce94de 74 */
4008c04a 75noinline void btrfs_clear_path_blocking(struct btrfs_path *p,
bd681513 76 struct extent_buffer *held, int held_rw)
b4ce94de
CM
77{
78 int i;
4008c04a
CM
79
80#ifdef CONFIG_DEBUG_LOCK_ALLOC
81 /* lockdep really cares that we take all of these spinlocks
82 * in the right order. If any of the locks in the path are not
83 * currently blocking, it is going to complain. So, make really
84 * really sure by forcing the path to blocking before we clear
85 * the path blocking.
86 */
bd681513
CM
87 if (held) {
88 btrfs_set_lock_blocking_rw(held, held_rw);
89 if (held_rw == BTRFS_WRITE_LOCK)
90 held_rw = BTRFS_WRITE_LOCK_BLOCKING;
91 else if (held_rw == BTRFS_READ_LOCK)
92 held_rw = BTRFS_READ_LOCK_BLOCKING;
93 }
4008c04a
CM
94 btrfs_set_path_blocking(p);
95#endif
96
97 for (i = BTRFS_MAX_LEVEL - 1; i >= 0; i--) {
bd681513
CM
98 if (p->nodes[i] && p->locks[i]) {
99 btrfs_clear_lock_blocking_rw(p->nodes[i], p->locks[i]);
100 if (p->locks[i] == BTRFS_WRITE_LOCK_BLOCKING)
101 p->locks[i] = BTRFS_WRITE_LOCK;
102 else if (p->locks[i] == BTRFS_READ_LOCK_BLOCKING)
103 p->locks[i] = BTRFS_READ_LOCK;
104 }
b4ce94de 105 }
4008c04a
CM
106
107#ifdef CONFIG_DEBUG_LOCK_ALLOC
108 if (held)
bd681513 109 btrfs_clear_lock_blocking_rw(held, held_rw);
4008c04a 110#endif
b4ce94de
CM
111}
112
d352ac68 113/* this also releases the path */
df24a2b9 114void btrfs_free_path(struct btrfs_path *p)
be0e5c09 115{
ff175d57
JJ
116 if (!p)
117 return;
b3b4aa74 118 btrfs_release_path(p);
df24a2b9 119 kmem_cache_free(btrfs_path_cachep, p);
be0e5c09
CM
120}
121
d352ac68
CM
122/*
123 * path release drops references on the extent buffers in the path
124 * and it drops any locks held by this path
125 *
126 * It is safe to call this on paths that no locks or extent buffers held.
127 */
b3b4aa74 128noinline void btrfs_release_path(struct btrfs_path *p)
eb60ceac
CM
129{
130 int i;
a2135011 131
234b63a0 132 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
3f157a2f 133 p->slots[i] = 0;
eb60ceac 134 if (!p->nodes[i])
925baedd
CM
135 continue;
136 if (p->locks[i]) {
bd681513 137 btrfs_tree_unlock_rw(p->nodes[i], p->locks[i]);
925baedd
CM
138 p->locks[i] = 0;
139 }
5f39d397 140 free_extent_buffer(p->nodes[i]);
3f157a2f 141 p->nodes[i] = NULL;
eb60ceac
CM
142 }
143}
144
d352ac68
CM
145/*
146 * safely gets a reference on the root node of a tree. A lock
147 * is not taken, so a concurrent writer may put a different node
148 * at the root of the tree. See btrfs_lock_root_node for the
149 * looping required.
150 *
151 * The extent buffer returned by this has a reference taken, so
152 * it won't disappear. It may stop being the root of the tree
153 * at any time because there are no locks held.
154 */
925baedd
CM
155struct extent_buffer *btrfs_root_node(struct btrfs_root *root)
156{
157 struct extent_buffer *eb;
240f62c8 158
3083ee2e
JB
159 while (1) {
160 rcu_read_lock();
161 eb = rcu_dereference(root->node);
162
163 /*
164 * RCU really hurts here, we could free up the root node because
165 * it was cow'ed but we may not get the new root node yet so do
166 * the inc_not_zero dance and if it doesn't work then
167 * synchronize_rcu and try again.
168 */
169 if (atomic_inc_not_zero(&eb->refs)) {
170 rcu_read_unlock();
171 break;
172 }
173 rcu_read_unlock();
174 synchronize_rcu();
175 }
925baedd
CM
176 return eb;
177}
178
d352ac68
CM
179/* loop around taking references on and locking the root node of the
180 * tree until you end up with a lock on the root. A locked buffer
181 * is returned, with a reference held.
182 */
925baedd
CM
183struct extent_buffer *btrfs_lock_root_node(struct btrfs_root *root)
184{
185 struct extent_buffer *eb;
186
d397712b 187 while (1) {
925baedd
CM
188 eb = btrfs_root_node(root);
189 btrfs_tree_lock(eb);
240f62c8 190 if (eb == root->node)
925baedd 191 break;
925baedd
CM
192 btrfs_tree_unlock(eb);
193 free_extent_buffer(eb);
194 }
195 return eb;
196}
197
bd681513
CM
198/* loop around taking references on and locking the root node of the
199 * tree until you end up with a lock on the root. A locked buffer
200 * is returned, with a reference held.
201 */
202struct extent_buffer *btrfs_read_lock_root_node(struct btrfs_root *root)
203{
204 struct extent_buffer *eb;
205
206 while (1) {
207 eb = btrfs_root_node(root);
208 btrfs_tree_read_lock(eb);
209 if (eb == root->node)
210 break;
211 btrfs_tree_read_unlock(eb);
212 free_extent_buffer(eb);
213 }
214 return eb;
215}
216
d352ac68
CM
217/* cowonly root (everything not a reference counted cow subvolume), just get
218 * put onto a simple dirty list. transaction.c walks this to make sure they
219 * get properly updated on disk.
220 */
0b86a832
CM
221static void add_root_to_dirty_list(struct btrfs_root *root)
222{
e5846fc6 223 spin_lock(&root->fs_info->trans_lock);
0b86a832
CM
224 if (root->track_dirty && list_empty(&root->dirty_list)) {
225 list_add(&root->dirty_list,
226 &root->fs_info->dirty_cowonly_roots);
227 }
e5846fc6 228 spin_unlock(&root->fs_info->trans_lock);
0b86a832
CM
229}
230
d352ac68
CM
231/*
232 * used by snapshot creation to make a copy of a root for a tree with
233 * a given objectid. The buffer with the new root node is returned in
234 * cow_ret, and this func returns zero on success or a negative error code.
235 */
be20aa9d
CM
236int btrfs_copy_root(struct btrfs_trans_handle *trans,
237 struct btrfs_root *root,
238 struct extent_buffer *buf,
239 struct extent_buffer **cow_ret, u64 new_root_objectid)
240{
241 struct extent_buffer *cow;
be20aa9d
CM
242 int ret = 0;
243 int level;
5d4f98a2 244 struct btrfs_disk_key disk_key;
be20aa9d
CM
245
246 WARN_ON(root->ref_cows && trans->transid !=
247 root->fs_info->running_transaction->transid);
248 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
249
250 level = btrfs_header_level(buf);
5d4f98a2
YZ
251 if (level == 0)
252 btrfs_item_key(buf, &disk_key, 0);
253 else
254 btrfs_node_key(buf, &disk_key, 0);
31840ae1 255
5d4f98a2
YZ
256 cow = btrfs_alloc_free_block(trans, root, buf->len, 0,
257 new_root_objectid, &disk_key, level,
66d7e7f0 258 buf->start, 0, 1);
5d4f98a2 259 if (IS_ERR(cow))
be20aa9d
CM
260 return PTR_ERR(cow);
261
262 copy_extent_buffer(cow, buf, 0, 0, cow->len);
263 btrfs_set_header_bytenr(cow, cow->start);
264 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
265 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
266 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
267 BTRFS_HEADER_FLAG_RELOC);
268 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
269 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
270 else
271 btrfs_set_header_owner(cow, new_root_objectid);
be20aa9d 272
2b82032c
YZ
273 write_extent_buffer(cow, root->fs_info->fsid,
274 (unsigned long)btrfs_header_fsid(cow),
275 BTRFS_FSID_SIZE);
276
be20aa9d 277 WARN_ON(btrfs_header_generation(buf) > trans->transid);
5d4f98a2 278 if (new_root_objectid == BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 279 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 280 else
66d7e7f0 281 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
4aec2b52 282
be20aa9d
CM
283 if (ret)
284 return ret;
285
286 btrfs_mark_buffer_dirty(cow);
287 *cow_ret = cow;
288 return 0;
289}
290
5d4f98a2
YZ
291/*
292 * check if the tree block can be shared by multiple trees
293 */
294int btrfs_block_can_be_shared(struct btrfs_root *root,
295 struct extent_buffer *buf)
296{
297 /*
298 * Tree blocks not in refernece counted trees and tree roots
299 * are never shared. If a block was allocated after the last
300 * snapshot and the block was not allocated by tree relocation,
301 * we know the block is not shared.
302 */
303 if (root->ref_cows &&
304 buf != root->node && buf != root->commit_root &&
305 (btrfs_header_generation(buf) <=
306 btrfs_root_last_snapshot(&root->root_item) ||
307 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)))
308 return 1;
309#ifdef BTRFS_COMPAT_EXTENT_TREE_V0
310 if (root->ref_cows &&
311 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
312 return 1;
313#endif
314 return 0;
315}
316
317static noinline int update_ref_for_cow(struct btrfs_trans_handle *trans,
318 struct btrfs_root *root,
319 struct extent_buffer *buf,
f0486c68
YZ
320 struct extent_buffer *cow,
321 int *last_ref)
5d4f98a2
YZ
322{
323 u64 refs;
324 u64 owner;
325 u64 flags;
326 u64 new_flags = 0;
327 int ret;
328
329 /*
330 * Backrefs update rules:
331 *
332 * Always use full backrefs for extent pointers in tree block
333 * allocated by tree relocation.
334 *
335 * If a shared tree block is no longer referenced by its owner
336 * tree (btrfs_header_owner(buf) == root->root_key.objectid),
337 * use full backrefs for extent pointers in tree block.
338 *
339 * If a tree block is been relocating
340 * (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID),
341 * use full backrefs for extent pointers in tree block.
342 * The reason for this is some operations (such as drop tree)
343 * are only allowed for blocks use full backrefs.
344 */
345
346 if (btrfs_block_can_be_shared(root, buf)) {
347 ret = btrfs_lookup_extent_info(trans, root, buf->start,
348 buf->len, &refs, &flags);
be1a5564
MF
349 if (ret)
350 return ret;
e5df9573
MF
351 if (refs == 0) {
352 ret = -EROFS;
353 btrfs_std_error(root->fs_info, ret);
354 return ret;
355 }
5d4f98a2
YZ
356 } else {
357 refs = 1;
358 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
359 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
360 flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
361 else
362 flags = 0;
363 }
364
365 owner = btrfs_header_owner(buf);
366 BUG_ON(owner == BTRFS_TREE_RELOC_OBJECTID &&
367 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
368
369 if (refs > 1) {
370 if ((owner == root->root_key.objectid ||
371 root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) &&
372 !(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF)) {
66d7e7f0 373 ret = btrfs_inc_ref(trans, root, buf, 1, 1);
79787eaa 374 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
375
376 if (root->root_key.objectid ==
377 BTRFS_TREE_RELOC_OBJECTID) {
66d7e7f0 378 ret = btrfs_dec_ref(trans, root, buf, 0, 1);
79787eaa 379 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 380 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
79787eaa 381 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
382 }
383 new_flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
384 } else {
385
386 if (root->root_key.objectid ==
387 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 388 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 389 else
66d7e7f0 390 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 391 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
392 }
393 if (new_flags != 0) {
394 ret = btrfs_set_disk_extent_flags(trans, root,
395 buf->start,
396 buf->len,
397 new_flags, 0);
be1a5564
MF
398 if (ret)
399 return ret;
5d4f98a2
YZ
400 }
401 } else {
402 if (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
403 if (root->root_key.objectid ==
404 BTRFS_TREE_RELOC_OBJECTID)
66d7e7f0 405 ret = btrfs_inc_ref(trans, root, cow, 1, 1);
5d4f98a2 406 else
66d7e7f0 407 ret = btrfs_inc_ref(trans, root, cow, 0, 1);
79787eaa 408 BUG_ON(ret); /* -ENOMEM */
66d7e7f0 409 ret = btrfs_dec_ref(trans, root, buf, 1, 1);
79787eaa 410 BUG_ON(ret); /* -ENOMEM */
5d4f98a2
YZ
411 }
412 clean_tree_block(trans, root, buf);
f0486c68 413 *last_ref = 1;
5d4f98a2
YZ
414 }
415 return 0;
416}
417
d352ac68 418/*
d397712b
CM
419 * does the dirty work in cow of a single block. The parent block (if
420 * supplied) is updated to point to the new cow copy. The new buffer is marked
421 * dirty and returned locked. If you modify the block it needs to be marked
422 * dirty again.
d352ac68
CM
423 *
424 * search_start -- an allocation hint for the new block
425 *
d397712b
CM
426 * empty_size -- a hint that you plan on doing more cow. This is the size in
427 * bytes the allocator should try to find free next to the block it returns.
428 * This is just a hint and may be ignored by the allocator.
d352ac68 429 */
d397712b 430static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
431 struct btrfs_root *root,
432 struct extent_buffer *buf,
433 struct extent_buffer *parent, int parent_slot,
434 struct extent_buffer **cow_ret,
9fa8cfe7 435 u64 search_start, u64 empty_size)
02217ed2 436{
5d4f98a2 437 struct btrfs_disk_key disk_key;
5f39d397 438 struct extent_buffer *cow;
be1a5564 439 int level, ret;
f0486c68 440 int last_ref = 0;
925baedd 441 int unlock_orig = 0;
5d4f98a2 442 u64 parent_start;
7bb86316 443
925baedd
CM
444 if (*cow_ret == buf)
445 unlock_orig = 1;
446
b9447ef8 447 btrfs_assert_tree_locked(buf);
925baedd 448
7bb86316
CM
449 WARN_ON(root->ref_cows && trans->transid !=
450 root->fs_info->running_transaction->transid);
6702ed49 451 WARN_ON(root->ref_cows && trans->transid != root->last_trans);
5f39d397 452
7bb86316 453 level = btrfs_header_level(buf);
31840ae1 454
5d4f98a2
YZ
455 if (level == 0)
456 btrfs_item_key(buf, &disk_key, 0);
457 else
458 btrfs_node_key(buf, &disk_key, 0);
459
460 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
461 if (parent)
462 parent_start = parent->start;
463 else
464 parent_start = 0;
465 } else
466 parent_start = 0;
467
468 cow = btrfs_alloc_free_block(trans, root, buf->len, parent_start,
469 root->root_key.objectid, &disk_key,
66d7e7f0 470 level, search_start, empty_size, 1);
54aa1f4d
CM
471 if (IS_ERR(cow))
472 return PTR_ERR(cow);
6702ed49 473
b4ce94de
CM
474 /* cow is set to blocking by btrfs_init_new_buffer */
475
5f39d397 476 copy_extent_buffer(cow, buf, 0, 0, cow->len);
db94535d 477 btrfs_set_header_bytenr(cow, cow->start);
5f39d397 478 btrfs_set_header_generation(cow, trans->transid);
5d4f98a2
YZ
479 btrfs_set_header_backref_rev(cow, BTRFS_MIXED_BACKREF_REV);
480 btrfs_clear_header_flag(cow, BTRFS_HEADER_FLAG_WRITTEN |
481 BTRFS_HEADER_FLAG_RELOC);
482 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
483 btrfs_set_header_flag(cow, BTRFS_HEADER_FLAG_RELOC);
484 else
485 btrfs_set_header_owner(cow, root->root_key.objectid);
6702ed49 486
2b82032c
YZ
487 write_extent_buffer(cow, root->fs_info->fsid,
488 (unsigned long)btrfs_header_fsid(cow),
489 BTRFS_FSID_SIZE);
490
be1a5564 491 ret = update_ref_for_cow(trans, root, buf, cow, &last_ref);
b68dc2a9 492 if (ret) {
79787eaa 493 btrfs_abort_transaction(trans, root, ret);
b68dc2a9
MF
494 return ret;
495 }
1a40e23b 496
3fd0a558
YZ
497 if (root->ref_cows)
498 btrfs_reloc_cow_block(trans, root, buf, cow);
499
02217ed2 500 if (buf == root->node) {
925baedd 501 WARN_ON(parent && parent != buf);
5d4f98a2
YZ
502 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID ||
503 btrfs_header_backref_rev(buf) < BTRFS_MIXED_BACKREF_REV)
504 parent_start = buf->start;
505 else
506 parent_start = 0;
925baedd 507
5f39d397 508 extent_buffer_get(cow);
240f62c8 509 rcu_assign_pointer(root->node, cow);
925baedd 510
f0486c68 511 btrfs_free_tree_block(trans, root, buf, parent_start,
66d7e7f0 512 last_ref, 1);
5f39d397 513 free_extent_buffer(buf);
0b86a832 514 add_root_to_dirty_list(root);
02217ed2 515 } else {
5d4f98a2
YZ
516 if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
517 parent_start = parent->start;
518 else
519 parent_start = 0;
520
521 WARN_ON(trans->transid != btrfs_header_generation(parent));
5f39d397 522 btrfs_set_node_blockptr(parent, parent_slot,
db94535d 523 cow->start);
74493f7a
CM
524 btrfs_set_node_ptr_generation(parent, parent_slot,
525 trans->transid);
d6025579 526 btrfs_mark_buffer_dirty(parent);
f0486c68 527 btrfs_free_tree_block(trans, root, buf, parent_start,
66d7e7f0 528 last_ref, 1);
02217ed2 529 }
925baedd
CM
530 if (unlock_orig)
531 btrfs_tree_unlock(buf);
3083ee2e 532 free_extent_buffer_stale(buf);
ccd467d6 533 btrfs_mark_buffer_dirty(cow);
2c90e5d6 534 *cow_ret = cow;
02217ed2
CM
535 return 0;
536}
537
5d4f98a2
YZ
538static inline int should_cow_block(struct btrfs_trans_handle *trans,
539 struct btrfs_root *root,
540 struct extent_buffer *buf)
541{
f1ebcc74
LB
542 /* ensure we can see the force_cow */
543 smp_rmb();
544
545 /*
546 * We do not need to cow a block if
547 * 1) this block is not created or changed in this transaction;
548 * 2) this block does not belong to TREE_RELOC tree;
549 * 3) the root is not forced COW.
550 *
551 * What is forced COW:
552 * when we create snapshot during commiting the transaction,
553 * after we've finished coping src root, we must COW the shared
554 * block to ensure the metadata consistency.
555 */
5d4f98a2
YZ
556 if (btrfs_header_generation(buf) == trans->transid &&
557 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN) &&
558 !(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID &&
f1ebcc74
LB
559 btrfs_header_flag(buf, BTRFS_HEADER_FLAG_RELOC)) &&
560 !root->force_cow)
5d4f98a2
YZ
561 return 0;
562 return 1;
563}
564
d352ac68
CM
565/*
566 * cows a single block, see __btrfs_cow_block for the real work.
567 * This version of it has extra checks so that a block isn't cow'd more than
568 * once per transaction, as long as it hasn't been written yet
569 */
d397712b 570noinline int btrfs_cow_block(struct btrfs_trans_handle *trans,
5f39d397
CM
571 struct btrfs_root *root, struct extent_buffer *buf,
572 struct extent_buffer *parent, int parent_slot,
9fa8cfe7 573 struct extent_buffer **cow_ret)
6702ed49
CM
574{
575 u64 search_start;
f510cfec 576 int ret;
dc17ff8f 577
6702ed49 578 if (trans->transaction != root->fs_info->running_transaction) {
d397712b
CM
579 printk(KERN_CRIT "trans %llu running %llu\n",
580 (unsigned long long)trans->transid,
581 (unsigned long long)
6702ed49
CM
582 root->fs_info->running_transaction->transid);
583 WARN_ON(1);
584 }
585 if (trans->transid != root->fs_info->generation) {
d397712b
CM
586 printk(KERN_CRIT "trans %llu running %llu\n",
587 (unsigned long long)trans->transid,
588 (unsigned long long)root->fs_info->generation);
6702ed49
CM
589 WARN_ON(1);
590 }
dc17ff8f 591
5d4f98a2 592 if (!should_cow_block(trans, root, buf)) {
6702ed49
CM
593 *cow_ret = buf;
594 return 0;
595 }
c487685d 596
0b86a832 597 search_start = buf->start & ~((u64)(1024 * 1024 * 1024) - 1);
b4ce94de
CM
598
599 if (parent)
600 btrfs_set_lock_blocking(parent);
601 btrfs_set_lock_blocking(buf);
602
f510cfec 603 ret = __btrfs_cow_block(trans, root, buf, parent,
9fa8cfe7 604 parent_slot, cow_ret, search_start, 0);
1abe9b8a 605
606 trace_btrfs_cow_block(root, buf, *cow_ret);
607
f510cfec 608 return ret;
6702ed49
CM
609}
610
d352ac68
CM
611/*
612 * helper function for defrag to decide if two blocks pointed to by a
613 * node are actually close by
614 */
6b80053d 615static int close_blocks(u64 blocknr, u64 other, u32 blocksize)
6702ed49 616{
6b80053d 617 if (blocknr < other && other - (blocknr + blocksize) < 32768)
6702ed49 618 return 1;
6b80053d 619 if (blocknr > other && blocknr - (other + blocksize) < 32768)
6702ed49
CM
620 return 1;
621 return 0;
622}
623
081e9573
CM
624/*
625 * compare two keys in a memcmp fashion
626 */
627static int comp_keys(struct btrfs_disk_key *disk, struct btrfs_key *k2)
628{
629 struct btrfs_key k1;
630
631 btrfs_disk_key_to_cpu(&k1, disk);
632
20736aba 633 return btrfs_comp_cpu_keys(&k1, k2);
081e9573
CM
634}
635
f3465ca4
JB
636/*
637 * same as comp_keys only with two btrfs_key's
638 */
5d4f98a2 639int btrfs_comp_cpu_keys(struct btrfs_key *k1, struct btrfs_key *k2)
f3465ca4
JB
640{
641 if (k1->objectid > k2->objectid)
642 return 1;
643 if (k1->objectid < k2->objectid)
644 return -1;
645 if (k1->type > k2->type)
646 return 1;
647 if (k1->type < k2->type)
648 return -1;
649 if (k1->offset > k2->offset)
650 return 1;
651 if (k1->offset < k2->offset)
652 return -1;
653 return 0;
654}
081e9573 655
d352ac68
CM
656/*
657 * this is used by the defrag code to go through all the
658 * leaves pointed to by a node and reallocate them so that
659 * disk order is close to key order
660 */
6702ed49 661int btrfs_realloc_node(struct btrfs_trans_handle *trans,
5f39d397 662 struct btrfs_root *root, struct extent_buffer *parent,
a6b6e75e
CM
663 int start_slot, int cache_only, u64 *last_ret,
664 struct btrfs_key *progress)
6702ed49 665{
6b80053d 666 struct extent_buffer *cur;
6702ed49 667 u64 blocknr;
ca7a79ad 668 u64 gen;
e9d0b13b
CM
669 u64 search_start = *last_ret;
670 u64 last_block = 0;
6702ed49
CM
671 u64 other;
672 u32 parent_nritems;
6702ed49
CM
673 int end_slot;
674 int i;
675 int err = 0;
f2183bde 676 int parent_level;
6b80053d
CM
677 int uptodate;
678 u32 blocksize;
081e9573
CM
679 int progress_passed = 0;
680 struct btrfs_disk_key disk_key;
6702ed49 681
5708b959
CM
682 parent_level = btrfs_header_level(parent);
683 if (cache_only && parent_level != 1)
684 return 0;
685
d397712b 686 if (trans->transaction != root->fs_info->running_transaction)
6702ed49 687 WARN_ON(1);
d397712b 688 if (trans->transid != root->fs_info->generation)
6702ed49 689 WARN_ON(1);
86479a04 690
6b80053d 691 parent_nritems = btrfs_header_nritems(parent);
6b80053d 692 blocksize = btrfs_level_size(root, parent_level - 1);
6702ed49
CM
693 end_slot = parent_nritems;
694
695 if (parent_nritems == 1)
696 return 0;
697
b4ce94de
CM
698 btrfs_set_lock_blocking(parent);
699
6702ed49
CM
700 for (i = start_slot; i < end_slot; i++) {
701 int close = 1;
a6b6e75e 702
081e9573
CM
703 btrfs_node_key(parent, &disk_key, i);
704 if (!progress_passed && comp_keys(&disk_key, progress) < 0)
705 continue;
706
707 progress_passed = 1;
6b80053d 708 blocknr = btrfs_node_blockptr(parent, i);
ca7a79ad 709 gen = btrfs_node_ptr_generation(parent, i);
e9d0b13b
CM
710 if (last_block == 0)
711 last_block = blocknr;
5708b959 712
6702ed49 713 if (i > 0) {
6b80053d
CM
714 other = btrfs_node_blockptr(parent, i - 1);
715 close = close_blocks(blocknr, other, blocksize);
6702ed49 716 }
0ef3e66b 717 if (!close && i < end_slot - 2) {
6b80053d
CM
718 other = btrfs_node_blockptr(parent, i + 1);
719 close = close_blocks(blocknr, other, blocksize);
6702ed49 720 }
e9d0b13b
CM
721 if (close) {
722 last_block = blocknr;
6702ed49 723 continue;
e9d0b13b 724 }
6702ed49 725
6b80053d
CM
726 cur = btrfs_find_tree_block(root, blocknr, blocksize);
727 if (cur)
b9fab919 728 uptodate = btrfs_buffer_uptodate(cur, gen, 0);
6b80053d
CM
729 else
730 uptodate = 0;
5708b959 731 if (!cur || !uptodate) {
6702ed49 732 if (cache_only) {
6b80053d 733 free_extent_buffer(cur);
6702ed49
CM
734 continue;
735 }
6b80053d
CM
736 if (!cur) {
737 cur = read_tree_block(root, blocknr,
ca7a79ad 738 blocksize, gen);
97d9a8a4
TI
739 if (!cur)
740 return -EIO;
6b80053d 741 } else if (!uptodate) {
ca7a79ad 742 btrfs_read_buffer(cur, gen);
f2183bde 743 }
6702ed49 744 }
e9d0b13b 745 if (search_start == 0)
6b80053d 746 search_start = last_block;
e9d0b13b 747
e7a84565 748 btrfs_tree_lock(cur);
b4ce94de 749 btrfs_set_lock_blocking(cur);
6b80053d 750 err = __btrfs_cow_block(trans, root, cur, parent, i,
e7a84565 751 &cur, search_start,
6b80053d 752 min(16 * blocksize,
9fa8cfe7 753 (end_slot - i) * blocksize));
252c38f0 754 if (err) {
e7a84565 755 btrfs_tree_unlock(cur);
6b80053d 756 free_extent_buffer(cur);
6702ed49 757 break;
252c38f0 758 }
e7a84565
CM
759 search_start = cur->start;
760 last_block = cur->start;
f2183bde 761 *last_ret = search_start;
e7a84565
CM
762 btrfs_tree_unlock(cur);
763 free_extent_buffer(cur);
6702ed49
CM
764 }
765 return err;
766}
767
74123bd7
CM
768/*
769 * The leaf data grows from end-to-front in the node.
770 * this returns the address of the start of the last item,
771 * which is the stop of the leaf data stack
772 */
123abc88 773static inline unsigned int leaf_data_end(struct btrfs_root *root,
5f39d397 774 struct extent_buffer *leaf)
be0e5c09 775{
5f39d397 776 u32 nr = btrfs_header_nritems(leaf);
be0e5c09 777 if (nr == 0)
123abc88 778 return BTRFS_LEAF_DATA_SIZE(root);
5f39d397 779 return btrfs_item_offset_nr(leaf, nr - 1);
be0e5c09
CM
780}
781
aa5d6bed 782
74123bd7 783/*
5f39d397
CM
784 * search for key in the extent_buffer. The items start at offset p,
785 * and they are item_size apart. There are 'max' items in p.
786 *
74123bd7
CM
787 * the slot in the array is returned via slot, and it points to
788 * the place where you would insert key if it is not found in
789 * the array.
790 *
791 * slot may point to max if the key is bigger than all of the keys
792 */
e02119d5
CM
793static noinline int generic_bin_search(struct extent_buffer *eb,
794 unsigned long p,
795 int item_size, struct btrfs_key *key,
796 int max, int *slot)
be0e5c09
CM
797{
798 int low = 0;
799 int high = max;
800 int mid;
801 int ret;
479965d6 802 struct btrfs_disk_key *tmp = NULL;
5f39d397
CM
803 struct btrfs_disk_key unaligned;
804 unsigned long offset;
5f39d397
CM
805 char *kaddr = NULL;
806 unsigned long map_start = 0;
807 unsigned long map_len = 0;
479965d6 808 int err;
be0e5c09 809
d397712b 810 while (low < high) {
be0e5c09 811 mid = (low + high) / 2;
5f39d397
CM
812 offset = p + mid * item_size;
813
a6591715 814 if (!kaddr || offset < map_start ||
5f39d397
CM
815 (offset + sizeof(struct btrfs_disk_key)) >
816 map_start + map_len) {
934d375b
CM
817
818 err = map_private_extent_buffer(eb, offset,
479965d6 819 sizeof(struct btrfs_disk_key),
a6591715 820 &kaddr, &map_start, &map_len);
479965d6
CM
821
822 if (!err) {
823 tmp = (struct btrfs_disk_key *)(kaddr + offset -
824 map_start);
825 } else {
826 read_extent_buffer(eb, &unaligned,
827 offset, sizeof(unaligned));
828 tmp = &unaligned;
829 }
5f39d397 830
5f39d397
CM
831 } else {
832 tmp = (struct btrfs_disk_key *)(kaddr + offset -
833 map_start);
834 }
be0e5c09
CM
835 ret = comp_keys(tmp, key);
836
837 if (ret < 0)
838 low = mid + 1;
839 else if (ret > 0)
840 high = mid;
841 else {
842 *slot = mid;
843 return 0;
844 }
845 }
846 *slot = low;
847 return 1;
848}
849
97571fd0
CM
850/*
851 * simple bin_search frontend that does the right thing for
852 * leaves vs nodes
853 */
5f39d397
CM
854static int bin_search(struct extent_buffer *eb, struct btrfs_key *key,
855 int level, int *slot)
be0e5c09 856{
f775738f 857 if (level == 0)
5f39d397
CM
858 return generic_bin_search(eb,
859 offsetof(struct btrfs_leaf, items),
0783fcfc 860 sizeof(struct btrfs_item),
5f39d397 861 key, btrfs_header_nritems(eb),
7518a238 862 slot);
f775738f 863 else
5f39d397
CM
864 return generic_bin_search(eb,
865 offsetof(struct btrfs_node, ptrs),
123abc88 866 sizeof(struct btrfs_key_ptr),
5f39d397 867 key, btrfs_header_nritems(eb),
7518a238 868 slot);
be0e5c09
CM
869}
870
5d4f98a2
YZ
871int btrfs_bin_search(struct extent_buffer *eb, struct btrfs_key *key,
872 int level, int *slot)
873{
874 return bin_search(eb, key, level, slot);
875}
876
f0486c68
YZ
877static void root_add_used(struct btrfs_root *root, u32 size)
878{
879 spin_lock(&root->accounting_lock);
880 btrfs_set_root_used(&root->root_item,
881 btrfs_root_used(&root->root_item) + size);
882 spin_unlock(&root->accounting_lock);
883}
884
885static void root_sub_used(struct btrfs_root *root, u32 size)
886{
887 spin_lock(&root->accounting_lock);
888 btrfs_set_root_used(&root->root_item,
889 btrfs_root_used(&root->root_item) - size);
890 spin_unlock(&root->accounting_lock);
891}
892
d352ac68
CM
893/* given a node and slot number, this reads the blocks it points to. The
894 * extent buffer is returned with a reference taken (but unlocked).
895 * NULL is returned on error.
896 */
e02119d5 897static noinline struct extent_buffer *read_node_slot(struct btrfs_root *root,
5f39d397 898 struct extent_buffer *parent, int slot)
bb803951 899{
ca7a79ad 900 int level = btrfs_header_level(parent);
bb803951
CM
901 if (slot < 0)
902 return NULL;
5f39d397 903 if (slot >= btrfs_header_nritems(parent))
bb803951 904 return NULL;
ca7a79ad
CM
905
906 BUG_ON(level == 0);
907
db94535d 908 return read_tree_block(root, btrfs_node_blockptr(parent, slot),
ca7a79ad
CM
909 btrfs_level_size(root, level - 1),
910 btrfs_node_ptr_generation(parent, slot));
bb803951
CM
911}
912
d352ac68
CM
913/*
914 * node level balancing, used to make sure nodes are in proper order for
915 * item deletion. We balance from the top down, so we have to make sure
916 * that a deletion won't leave an node completely empty later on.
917 */
e02119d5 918static noinline int balance_level(struct btrfs_trans_handle *trans,
98ed5174
CM
919 struct btrfs_root *root,
920 struct btrfs_path *path, int level)
bb803951 921{
5f39d397
CM
922 struct extent_buffer *right = NULL;
923 struct extent_buffer *mid;
924 struct extent_buffer *left = NULL;
925 struct extent_buffer *parent = NULL;
bb803951
CM
926 int ret = 0;
927 int wret;
928 int pslot;
bb803951 929 int orig_slot = path->slots[level];
79f95c82 930 u64 orig_ptr;
bb803951
CM
931
932 if (level == 0)
933 return 0;
934
5f39d397 935 mid = path->nodes[level];
b4ce94de 936
bd681513
CM
937 WARN_ON(path->locks[level] != BTRFS_WRITE_LOCK &&
938 path->locks[level] != BTRFS_WRITE_LOCK_BLOCKING);
7bb86316
CM
939 WARN_ON(btrfs_header_generation(mid) != trans->transid);
940
1d4f8a0c 941 orig_ptr = btrfs_node_blockptr(mid, orig_slot);
79f95c82 942
a05a9bb1 943 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 944 parent = path->nodes[level + 1];
a05a9bb1
LZ
945 pslot = path->slots[level + 1];
946 }
bb803951 947
40689478
CM
948 /*
949 * deal with the case where there is only one pointer in the root
950 * by promoting the node below to a root
951 */
5f39d397
CM
952 if (!parent) {
953 struct extent_buffer *child;
bb803951 954
5f39d397 955 if (btrfs_header_nritems(mid) != 1)
bb803951
CM
956 return 0;
957
958 /* promote the child to a root */
5f39d397 959 child = read_node_slot(root, mid, 0);
305a26af
MF
960 if (!child) {
961 ret = -EROFS;
962 btrfs_std_error(root->fs_info, ret);
963 goto enospc;
964 }
965
925baedd 966 btrfs_tree_lock(child);
b4ce94de 967 btrfs_set_lock_blocking(child);
9fa8cfe7 968 ret = btrfs_cow_block(trans, root, child, mid, 0, &child);
f0486c68
YZ
969 if (ret) {
970 btrfs_tree_unlock(child);
971 free_extent_buffer(child);
972 goto enospc;
973 }
2f375ab9 974
240f62c8 975 rcu_assign_pointer(root->node, child);
925baedd 976
0b86a832 977 add_root_to_dirty_list(root);
925baedd 978 btrfs_tree_unlock(child);
b4ce94de 979
925baedd 980 path->locks[level] = 0;
bb803951 981 path->nodes[level] = NULL;
5f39d397 982 clean_tree_block(trans, root, mid);
925baedd 983 btrfs_tree_unlock(mid);
bb803951 984 /* once for the path */
5f39d397 985 free_extent_buffer(mid);
f0486c68
YZ
986
987 root_sub_used(root, mid->len);
66d7e7f0 988 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
bb803951 989 /* once for the root ptr */
3083ee2e 990 free_extent_buffer_stale(mid);
f0486c68 991 return 0;
bb803951 992 }
5f39d397 993 if (btrfs_header_nritems(mid) >
123abc88 994 BTRFS_NODEPTRS_PER_BLOCK(root) / 4)
bb803951
CM
995 return 0;
996
559af821 997 btrfs_header_nritems(mid);
54aa1f4d 998
5f39d397
CM
999 left = read_node_slot(root, parent, pslot - 1);
1000 if (left) {
925baedd 1001 btrfs_tree_lock(left);
b4ce94de 1002 btrfs_set_lock_blocking(left);
5f39d397 1003 wret = btrfs_cow_block(trans, root, left,
9fa8cfe7 1004 parent, pslot - 1, &left);
54aa1f4d
CM
1005 if (wret) {
1006 ret = wret;
1007 goto enospc;
1008 }
2cc58cf2 1009 }
5f39d397
CM
1010 right = read_node_slot(root, parent, pslot + 1);
1011 if (right) {
925baedd 1012 btrfs_tree_lock(right);
b4ce94de 1013 btrfs_set_lock_blocking(right);
5f39d397 1014 wret = btrfs_cow_block(trans, root, right,
9fa8cfe7 1015 parent, pslot + 1, &right);
2cc58cf2
CM
1016 if (wret) {
1017 ret = wret;
1018 goto enospc;
1019 }
1020 }
1021
1022 /* first, try to make some room in the middle buffer */
5f39d397
CM
1023 if (left) {
1024 orig_slot += btrfs_header_nritems(left);
bce4eae9 1025 wret = push_node_left(trans, root, left, mid, 1);
79f95c82
CM
1026 if (wret < 0)
1027 ret = wret;
559af821 1028 btrfs_header_nritems(mid);
bb803951 1029 }
79f95c82
CM
1030
1031 /*
1032 * then try to empty the right most buffer into the middle
1033 */
5f39d397 1034 if (right) {
971a1f66 1035 wret = push_node_left(trans, root, mid, right, 1);
54aa1f4d 1036 if (wret < 0 && wret != -ENOSPC)
79f95c82 1037 ret = wret;
5f39d397 1038 if (btrfs_header_nritems(right) == 0) {
5f39d397 1039 clean_tree_block(trans, root, right);
925baedd 1040 btrfs_tree_unlock(right);
143bede5 1041 del_ptr(trans, root, path, level + 1, pslot + 1);
f0486c68 1042 root_sub_used(root, right->len);
66d7e7f0 1043 btrfs_free_tree_block(trans, root, right, 0, 1, 0);
3083ee2e 1044 free_extent_buffer_stale(right);
f0486c68 1045 right = NULL;
bb803951 1046 } else {
5f39d397
CM
1047 struct btrfs_disk_key right_key;
1048 btrfs_node_key(right, &right_key, 0);
1049 btrfs_set_node_key(parent, &right_key, pslot + 1);
1050 btrfs_mark_buffer_dirty(parent);
bb803951
CM
1051 }
1052 }
5f39d397 1053 if (btrfs_header_nritems(mid) == 1) {
79f95c82
CM
1054 /*
1055 * we're not allowed to leave a node with one item in the
1056 * tree during a delete. A deletion from lower in the tree
1057 * could try to delete the only pointer in this node.
1058 * So, pull some keys from the left.
1059 * There has to be a left pointer at this point because
1060 * otherwise we would have pulled some pointers from the
1061 * right
1062 */
305a26af
MF
1063 if (!left) {
1064 ret = -EROFS;
1065 btrfs_std_error(root->fs_info, ret);
1066 goto enospc;
1067 }
5f39d397 1068 wret = balance_node_right(trans, root, mid, left);
54aa1f4d 1069 if (wret < 0) {
79f95c82 1070 ret = wret;
54aa1f4d
CM
1071 goto enospc;
1072 }
bce4eae9
CM
1073 if (wret == 1) {
1074 wret = push_node_left(trans, root, left, mid, 1);
1075 if (wret < 0)
1076 ret = wret;
1077 }
79f95c82
CM
1078 BUG_ON(wret == 1);
1079 }
5f39d397 1080 if (btrfs_header_nritems(mid) == 0) {
5f39d397 1081 clean_tree_block(trans, root, mid);
925baedd 1082 btrfs_tree_unlock(mid);
143bede5 1083 del_ptr(trans, root, path, level + 1, pslot);
f0486c68 1084 root_sub_used(root, mid->len);
66d7e7f0 1085 btrfs_free_tree_block(trans, root, mid, 0, 1, 0);
3083ee2e 1086 free_extent_buffer_stale(mid);
f0486c68 1087 mid = NULL;
79f95c82
CM
1088 } else {
1089 /* update the parent key to reflect our changes */
5f39d397
CM
1090 struct btrfs_disk_key mid_key;
1091 btrfs_node_key(mid, &mid_key, 0);
1092 btrfs_set_node_key(parent, &mid_key, pslot);
1093 btrfs_mark_buffer_dirty(parent);
79f95c82 1094 }
bb803951 1095
79f95c82 1096 /* update the path */
5f39d397
CM
1097 if (left) {
1098 if (btrfs_header_nritems(left) > orig_slot) {
1099 extent_buffer_get(left);
925baedd 1100 /* left was locked after cow */
5f39d397 1101 path->nodes[level] = left;
bb803951
CM
1102 path->slots[level + 1] -= 1;
1103 path->slots[level] = orig_slot;
925baedd
CM
1104 if (mid) {
1105 btrfs_tree_unlock(mid);
5f39d397 1106 free_extent_buffer(mid);
925baedd 1107 }
bb803951 1108 } else {
5f39d397 1109 orig_slot -= btrfs_header_nritems(left);
bb803951
CM
1110 path->slots[level] = orig_slot;
1111 }
1112 }
79f95c82 1113 /* double check we haven't messed things up */
e20d96d6 1114 if (orig_ptr !=
5f39d397 1115 btrfs_node_blockptr(path->nodes[level], path->slots[level]))
79f95c82 1116 BUG();
54aa1f4d 1117enospc:
925baedd
CM
1118 if (right) {
1119 btrfs_tree_unlock(right);
5f39d397 1120 free_extent_buffer(right);
925baedd
CM
1121 }
1122 if (left) {
1123 if (path->nodes[level] != left)
1124 btrfs_tree_unlock(left);
5f39d397 1125 free_extent_buffer(left);
925baedd 1126 }
bb803951
CM
1127 return ret;
1128}
1129
d352ac68
CM
1130/* Node balancing for insertion. Here we only split or push nodes around
1131 * when they are completely full. This is also done top down, so we
1132 * have to be pessimistic.
1133 */
d397712b 1134static noinline int push_nodes_for_insert(struct btrfs_trans_handle *trans,
98ed5174
CM
1135 struct btrfs_root *root,
1136 struct btrfs_path *path, int level)
e66f709b 1137{
5f39d397
CM
1138 struct extent_buffer *right = NULL;
1139 struct extent_buffer *mid;
1140 struct extent_buffer *left = NULL;
1141 struct extent_buffer *parent = NULL;
e66f709b
CM
1142 int ret = 0;
1143 int wret;
1144 int pslot;
1145 int orig_slot = path->slots[level];
e66f709b
CM
1146
1147 if (level == 0)
1148 return 1;
1149
5f39d397 1150 mid = path->nodes[level];
7bb86316 1151 WARN_ON(btrfs_header_generation(mid) != trans->transid);
e66f709b 1152
a05a9bb1 1153 if (level < BTRFS_MAX_LEVEL - 1) {
5f39d397 1154 parent = path->nodes[level + 1];
a05a9bb1
LZ
1155 pslot = path->slots[level + 1];
1156 }
e66f709b 1157
5f39d397 1158 if (!parent)
e66f709b 1159 return 1;
e66f709b 1160
5f39d397 1161 left = read_node_slot(root, parent, pslot - 1);
e66f709b
CM
1162
1163 /* first, try to make some room in the middle buffer */
5f39d397 1164 if (left) {
e66f709b 1165 u32 left_nr;
925baedd
CM
1166
1167 btrfs_tree_lock(left);
b4ce94de
CM
1168 btrfs_set_lock_blocking(left);
1169
5f39d397 1170 left_nr = btrfs_header_nritems(left);
33ade1f8
CM
1171 if (left_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1172 wret = 1;
1173 } else {
5f39d397 1174 ret = btrfs_cow_block(trans, root, left, parent,
9fa8cfe7 1175 pslot - 1, &left);
54aa1f4d
CM
1176 if (ret)
1177 wret = 1;
1178 else {
54aa1f4d 1179 wret = push_node_left(trans, root,
971a1f66 1180 left, mid, 0);
54aa1f4d 1181 }
33ade1f8 1182 }
e66f709b
CM
1183 if (wret < 0)
1184 ret = wret;
1185 if (wret == 0) {
5f39d397 1186 struct btrfs_disk_key disk_key;
e66f709b 1187 orig_slot += left_nr;
5f39d397
CM
1188 btrfs_node_key(mid, &disk_key, 0);
1189 btrfs_set_node_key(parent, &disk_key, pslot);
1190 btrfs_mark_buffer_dirty(parent);
1191 if (btrfs_header_nritems(left) > orig_slot) {
1192 path->nodes[level] = left;
e66f709b
CM
1193 path->slots[level + 1] -= 1;
1194 path->slots[level] = orig_slot;
925baedd 1195 btrfs_tree_unlock(mid);
5f39d397 1196 free_extent_buffer(mid);
e66f709b
CM
1197 } else {
1198 orig_slot -=
5f39d397 1199 btrfs_header_nritems(left);
e66f709b 1200 path->slots[level] = orig_slot;
925baedd 1201 btrfs_tree_unlock(left);
5f39d397 1202 free_extent_buffer(left);
e66f709b 1203 }
e66f709b
CM
1204 return 0;
1205 }
925baedd 1206 btrfs_tree_unlock(left);
5f39d397 1207 free_extent_buffer(left);
e66f709b 1208 }
925baedd 1209 right = read_node_slot(root, parent, pslot + 1);
e66f709b
CM
1210
1211 /*
1212 * then try to empty the right most buffer into the middle
1213 */
5f39d397 1214 if (right) {
33ade1f8 1215 u32 right_nr;
b4ce94de 1216
925baedd 1217 btrfs_tree_lock(right);
b4ce94de
CM
1218 btrfs_set_lock_blocking(right);
1219
5f39d397 1220 right_nr = btrfs_header_nritems(right);
33ade1f8
CM
1221 if (right_nr >= BTRFS_NODEPTRS_PER_BLOCK(root) - 1) {
1222 wret = 1;
1223 } else {
5f39d397
CM
1224 ret = btrfs_cow_block(trans, root, right,
1225 parent, pslot + 1,
9fa8cfe7 1226 &right);
54aa1f4d
CM
1227 if (ret)
1228 wret = 1;
1229 else {
54aa1f4d 1230 wret = balance_node_right(trans, root,
5f39d397 1231 right, mid);
54aa1f4d 1232 }
33ade1f8 1233 }
e66f709b
CM
1234 if (wret < 0)
1235 ret = wret;
1236 if (wret == 0) {
5f39d397
CM
1237 struct btrfs_disk_key disk_key;
1238
1239 btrfs_node_key(right, &disk_key, 0);
1240 btrfs_set_node_key(parent, &disk_key, pslot + 1);
1241 btrfs_mark_buffer_dirty(parent);
1242
1243 if (btrfs_header_nritems(mid) <= orig_slot) {
1244 path->nodes[level] = right;
e66f709b
CM
1245 path->slots[level + 1] += 1;
1246 path->slots[level] = orig_slot -
5f39d397 1247 btrfs_header_nritems(mid);
925baedd 1248 btrfs_tree_unlock(mid);
5f39d397 1249 free_extent_buffer(mid);
e66f709b 1250 } else {
925baedd 1251 btrfs_tree_unlock(right);
5f39d397 1252 free_extent_buffer(right);
e66f709b 1253 }
e66f709b
CM
1254 return 0;
1255 }
925baedd 1256 btrfs_tree_unlock(right);
5f39d397 1257 free_extent_buffer(right);
e66f709b 1258 }
e66f709b
CM
1259 return 1;
1260}
1261
3c69faec 1262/*
d352ac68
CM
1263 * readahead one full node of leaves, finding things that are close
1264 * to the block in 'slot', and triggering ra on them.
3c69faec 1265 */
c8c42864
CM
1266static void reada_for_search(struct btrfs_root *root,
1267 struct btrfs_path *path,
1268 int level, int slot, u64 objectid)
3c69faec 1269{
5f39d397 1270 struct extent_buffer *node;
01f46658 1271 struct btrfs_disk_key disk_key;
3c69faec 1272 u32 nritems;
3c69faec 1273 u64 search;
a7175319 1274 u64 target;
6b80053d 1275 u64 nread = 0;
cb25c2ea 1276 u64 gen;
3c69faec 1277 int direction = path->reada;
5f39d397 1278 struct extent_buffer *eb;
6b80053d
CM
1279 u32 nr;
1280 u32 blocksize;
1281 u32 nscan = 0;
db94535d 1282
a6b6e75e 1283 if (level != 1)
6702ed49
CM
1284 return;
1285
1286 if (!path->nodes[level])
3c69faec
CM
1287 return;
1288
5f39d397 1289 node = path->nodes[level];
925baedd 1290
3c69faec 1291 search = btrfs_node_blockptr(node, slot);
6b80053d
CM
1292 blocksize = btrfs_level_size(root, level - 1);
1293 eb = btrfs_find_tree_block(root, search, blocksize);
5f39d397
CM
1294 if (eb) {
1295 free_extent_buffer(eb);
3c69faec
CM
1296 return;
1297 }
1298
a7175319 1299 target = search;
6b80053d 1300
5f39d397 1301 nritems = btrfs_header_nritems(node);
6b80053d 1302 nr = slot;
25b8b936 1303
d397712b 1304 while (1) {
6b80053d
CM
1305 if (direction < 0) {
1306 if (nr == 0)
1307 break;
1308 nr--;
1309 } else if (direction > 0) {
1310 nr++;
1311 if (nr >= nritems)
1312 break;
3c69faec 1313 }
01f46658
CM
1314 if (path->reada < 0 && objectid) {
1315 btrfs_node_key(node, &disk_key, nr);
1316 if (btrfs_disk_key_objectid(&disk_key) != objectid)
1317 break;
1318 }
6b80053d 1319 search = btrfs_node_blockptr(node, nr);
a7175319
CM
1320 if ((search <= target && target - search <= 65536) ||
1321 (search > target && search - target <= 65536)) {
cb25c2ea 1322 gen = btrfs_node_ptr_generation(node, nr);
cb25c2ea 1323 readahead_tree_block(root, search, blocksize, gen);
6b80053d
CM
1324 nread += blocksize;
1325 }
1326 nscan++;
a7175319 1327 if ((nread > 65536 || nscan > 32))
6b80053d 1328 break;
3c69faec
CM
1329 }
1330}
925baedd 1331
b4ce94de
CM
1332/*
1333 * returns -EAGAIN if it had to drop the path, or zero if everything was in
1334 * cache
1335 */
1336static noinline int reada_for_balance(struct btrfs_root *root,
1337 struct btrfs_path *path, int level)
1338{
1339 int slot;
1340 int nritems;
1341 struct extent_buffer *parent;
1342 struct extent_buffer *eb;
1343 u64 gen;
1344 u64 block1 = 0;
1345 u64 block2 = 0;
1346 int ret = 0;
1347 int blocksize;
1348
8c594ea8 1349 parent = path->nodes[level + 1];
b4ce94de
CM
1350 if (!parent)
1351 return 0;
1352
1353 nritems = btrfs_header_nritems(parent);
8c594ea8 1354 slot = path->slots[level + 1];
b4ce94de
CM
1355 blocksize = btrfs_level_size(root, level);
1356
1357 if (slot > 0) {
1358 block1 = btrfs_node_blockptr(parent, slot - 1);
1359 gen = btrfs_node_ptr_generation(parent, slot - 1);
1360 eb = btrfs_find_tree_block(root, block1, blocksize);
b9fab919
CM
1361 /*
1362 * if we get -eagain from btrfs_buffer_uptodate, we
1363 * don't want to return eagain here. That will loop
1364 * forever
1365 */
1366 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
1367 block1 = 0;
1368 free_extent_buffer(eb);
1369 }
8c594ea8 1370 if (slot + 1 < nritems) {
b4ce94de
CM
1371 block2 = btrfs_node_blockptr(parent, slot + 1);
1372 gen = btrfs_node_ptr_generation(parent, slot + 1);
1373 eb = btrfs_find_tree_block(root, block2, blocksize);
b9fab919 1374 if (eb && btrfs_buffer_uptodate(eb, gen, 1) != 0)
b4ce94de
CM
1375 block2 = 0;
1376 free_extent_buffer(eb);
1377 }
1378 if (block1 || block2) {
1379 ret = -EAGAIN;
8c594ea8
CM
1380
1381 /* release the whole path */
b3b4aa74 1382 btrfs_release_path(path);
8c594ea8
CM
1383
1384 /* read the blocks */
b4ce94de
CM
1385 if (block1)
1386 readahead_tree_block(root, block1, blocksize, 0);
1387 if (block2)
1388 readahead_tree_block(root, block2, blocksize, 0);
1389
1390 if (block1) {
1391 eb = read_tree_block(root, block1, blocksize, 0);
1392 free_extent_buffer(eb);
1393 }
8c594ea8 1394 if (block2) {
b4ce94de
CM
1395 eb = read_tree_block(root, block2, blocksize, 0);
1396 free_extent_buffer(eb);
1397 }
1398 }
1399 return ret;
1400}
1401
1402
d352ac68 1403/*
d397712b
CM
1404 * when we walk down the tree, it is usually safe to unlock the higher layers
1405 * in the tree. The exceptions are when our path goes through slot 0, because
1406 * operations on the tree might require changing key pointers higher up in the
1407 * tree.
d352ac68 1408 *
d397712b
CM
1409 * callers might also have set path->keep_locks, which tells this code to keep
1410 * the lock if the path points to the last slot in the block. This is part of
1411 * walking through the tree, and selecting the next slot in the higher block.
d352ac68 1412 *
d397712b
CM
1413 * lowest_unlock sets the lowest level in the tree we're allowed to unlock. so
1414 * if lowest_unlock is 1, level 0 won't be unlocked
d352ac68 1415 */
e02119d5 1416static noinline void unlock_up(struct btrfs_path *path, int level,
f7c79f30
CM
1417 int lowest_unlock, int min_write_lock_level,
1418 int *write_lock_level)
925baedd
CM
1419{
1420 int i;
1421 int skip_level = level;
051e1b9f 1422 int no_skips = 0;
925baedd
CM
1423 struct extent_buffer *t;
1424
1425 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1426 if (!path->nodes[i])
1427 break;
1428 if (!path->locks[i])
1429 break;
051e1b9f 1430 if (!no_skips && path->slots[i] == 0) {
925baedd
CM
1431 skip_level = i + 1;
1432 continue;
1433 }
051e1b9f 1434 if (!no_skips && path->keep_locks) {
925baedd
CM
1435 u32 nritems;
1436 t = path->nodes[i];
1437 nritems = btrfs_header_nritems(t);
051e1b9f 1438 if (nritems < 1 || path->slots[i] >= nritems - 1) {
925baedd
CM
1439 skip_level = i + 1;
1440 continue;
1441 }
1442 }
051e1b9f
CM
1443 if (skip_level < i && i >= lowest_unlock)
1444 no_skips = 1;
1445
925baedd
CM
1446 t = path->nodes[i];
1447 if (i >= lowest_unlock && i > skip_level && path->locks[i]) {
bd681513 1448 btrfs_tree_unlock_rw(t, path->locks[i]);
925baedd 1449 path->locks[i] = 0;
f7c79f30
CM
1450 if (write_lock_level &&
1451 i > min_write_lock_level &&
1452 i <= *write_lock_level) {
1453 *write_lock_level = i - 1;
1454 }
925baedd
CM
1455 }
1456 }
1457}
1458
b4ce94de
CM
1459/*
1460 * This releases any locks held in the path starting at level and
1461 * going all the way up to the root.
1462 *
1463 * btrfs_search_slot will keep the lock held on higher nodes in a few
1464 * corner cases, such as COW of the block at slot zero in the node. This
1465 * ignores those rules, and it should only be called when there are no
1466 * more updates to be done higher up in the tree.
1467 */
1468noinline void btrfs_unlock_up_safe(struct btrfs_path *path, int level)
1469{
1470 int i;
1471
5d4f98a2 1472 if (path->keep_locks)
b4ce94de
CM
1473 return;
1474
1475 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
1476 if (!path->nodes[i])
12f4dacc 1477 continue;
b4ce94de 1478 if (!path->locks[i])
12f4dacc 1479 continue;
bd681513 1480 btrfs_tree_unlock_rw(path->nodes[i], path->locks[i]);
b4ce94de
CM
1481 path->locks[i] = 0;
1482 }
1483}
1484
c8c42864
CM
1485/*
1486 * helper function for btrfs_search_slot. The goal is to find a block
1487 * in cache without setting the path to blocking. If we find the block
1488 * we return zero and the path is unchanged.
1489 *
1490 * If we can't find the block, we set the path blocking and do some
1491 * reada. -EAGAIN is returned and the search must be repeated.
1492 */
1493static int
1494read_block_for_search(struct btrfs_trans_handle *trans,
1495 struct btrfs_root *root, struct btrfs_path *p,
1496 struct extent_buffer **eb_ret, int level, int slot,
1497 struct btrfs_key *key)
1498{
1499 u64 blocknr;
1500 u64 gen;
1501 u32 blocksize;
1502 struct extent_buffer *b = *eb_ret;
1503 struct extent_buffer *tmp;
76a05b35 1504 int ret;
c8c42864
CM
1505
1506 blocknr = btrfs_node_blockptr(b, slot);
1507 gen = btrfs_node_ptr_generation(b, slot);
1508 blocksize = btrfs_level_size(root, level - 1);
1509
1510 tmp = btrfs_find_tree_block(root, blocknr, blocksize);
cb44921a 1511 if (tmp) {
b9fab919
CM
1512 /* first we do an atomic uptodate check */
1513 if (btrfs_buffer_uptodate(tmp, 0, 1) > 0) {
1514 if (btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
cb44921a
CM
1515 /*
1516 * we found an up to date block without
1517 * sleeping, return
1518 * right away
1519 */
1520 *eb_ret = tmp;
1521 return 0;
1522 }
1523 /* the pages were up to date, but we failed
1524 * the generation number check. Do a full
1525 * read for the generation number that is correct.
1526 * We must do this without dropping locks so
1527 * we can trust our generation number
1528 */
1529 free_extent_buffer(tmp);
bd681513
CM
1530 btrfs_set_path_blocking(p);
1531
b9fab919 1532 /* now we're allowed to do a blocking uptodate check */
cb44921a 1533 tmp = read_tree_block(root, blocknr, blocksize, gen);
b9fab919 1534 if (tmp && btrfs_buffer_uptodate(tmp, gen, 0) > 0) {
cb44921a
CM
1535 *eb_ret = tmp;
1536 return 0;
1537 }
1538 free_extent_buffer(tmp);
b3b4aa74 1539 btrfs_release_path(p);
cb44921a
CM
1540 return -EIO;
1541 }
c8c42864
CM
1542 }
1543
1544 /*
1545 * reduce lock contention at high levels
1546 * of the btree by dropping locks before
76a05b35
CM
1547 * we read. Don't release the lock on the current
1548 * level because we need to walk this node to figure
1549 * out which blocks to read.
c8c42864 1550 */
8c594ea8
CM
1551 btrfs_unlock_up_safe(p, level + 1);
1552 btrfs_set_path_blocking(p);
1553
cb44921a 1554 free_extent_buffer(tmp);
c8c42864
CM
1555 if (p->reada)
1556 reada_for_search(root, p, level, slot, key->objectid);
1557
b3b4aa74 1558 btrfs_release_path(p);
76a05b35
CM
1559
1560 ret = -EAGAIN;
5bdd3536 1561 tmp = read_tree_block(root, blocknr, blocksize, 0);
76a05b35
CM
1562 if (tmp) {
1563 /*
1564 * If the read above didn't mark this buffer up to date,
1565 * it will never end up being up to date. Set ret to EIO now
1566 * and give up so that our caller doesn't loop forever
1567 * on our EAGAINs.
1568 */
b9fab919 1569 if (!btrfs_buffer_uptodate(tmp, 0, 0))
76a05b35 1570 ret = -EIO;
c8c42864 1571 free_extent_buffer(tmp);
76a05b35
CM
1572 }
1573 return ret;
c8c42864
CM
1574}
1575
1576/*
1577 * helper function for btrfs_search_slot. This does all of the checks
1578 * for node-level blocks and does any balancing required based on
1579 * the ins_len.
1580 *
1581 * If no extra work was required, zero is returned. If we had to
1582 * drop the path, -EAGAIN is returned and btrfs_search_slot must
1583 * start over
1584 */
1585static int
1586setup_nodes_for_search(struct btrfs_trans_handle *trans,
1587 struct btrfs_root *root, struct btrfs_path *p,
bd681513
CM
1588 struct extent_buffer *b, int level, int ins_len,
1589 int *write_lock_level)
c8c42864
CM
1590{
1591 int ret;
1592 if ((p->search_for_split || ins_len > 0) && btrfs_header_nritems(b) >=
1593 BTRFS_NODEPTRS_PER_BLOCK(root) - 3) {
1594 int sret;
1595
bd681513
CM
1596 if (*write_lock_level < level + 1) {
1597 *write_lock_level = level + 1;
1598 btrfs_release_path(p);
1599 goto again;
1600 }
1601
c8c42864
CM
1602 sret = reada_for_balance(root, p, level);
1603 if (sret)
1604 goto again;
1605
1606 btrfs_set_path_blocking(p);
1607 sret = split_node(trans, root, p, level);
bd681513 1608 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
1609
1610 BUG_ON(sret > 0);
1611 if (sret) {
1612 ret = sret;
1613 goto done;
1614 }
1615 b = p->nodes[level];
1616 } else if (ins_len < 0 && btrfs_header_nritems(b) <
cfbb9308 1617 BTRFS_NODEPTRS_PER_BLOCK(root) / 2) {
c8c42864
CM
1618 int sret;
1619
bd681513
CM
1620 if (*write_lock_level < level + 1) {
1621 *write_lock_level = level + 1;
1622 btrfs_release_path(p);
1623 goto again;
1624 }
1625
c8c42864
CM
1626 sret = reada_for_balance(root, p, level);
1627 if (sret)
1628 goto again;
1629
1630 btrfs_set_path_blocking(p);
1631 sret = balance_level(trans, root, p, level);
bd681513 1632 btrfs_clear_path_blocking(p, NULL, 0);
c8c42864
CM
1633
1634 if (sret) {
1635 ret = sret;
1636 goto done;
1637 }
1638 b = p->nodes[level];
1639 if (!b) {
b3b4aa74 1640 btrfs_release_path(p);
c8c42864
CM
1641 goto again;
1642 }
1643 BUG_ON(btrfs_header_nritems(b) == 1);
1644 }
1645 return 0;
1646
1647again:
1648 ret = -EAGAIN;
1649done:
1650 return ret;
1651}
1652
74123bd7
CM
1653/*
1654 * look for key in the tree. path is filled in with nodes along the way
1655 * if key is found, we return zero and you can find the item in the leaf
1656 * level of the path (level 0)
1657 *
1658 * If the key isn't found, the path points to the slot where it should
aa5d6bed
CM
1659 * be inserted, and 1 is returned. If there are other errors during the
1660 * search a negative error number is returned.
97571fd0
CM
1661 *
1662 * if ins_len > 0, nodes and leaves will be split as we walk down the
1663 * tree. if ins_len < 0, nodes will be merged as we walk down the tree (if
1664 * possible)
74123bd7 1665 */
e089f05c
CM
1666int btrfs_search_slot(struct btrfs_trans_handle *trans, struct btrfs_root
1667 *root, struct btrfs_key *key, struct btrfs_path *p, int
1668 ins_len, int cow)
be0e5c09 1669{
5f39d397 1670 struct extent_buffer *b;
be0e5c09
CM
1671 int slot;
1672 int ret;
33c66f43 1673 int err;
be0e5c09 1674 int level;
925baedd 1675 int lowest_unlock = 1;
bd681513
CM
1676 int root_lock;
1677 /* everything at write_lock_level or lower must be write locked */
1678 int write_lock_level = 0;
9f3a7427 1679 u8 lowest_level = 0;
f7c79f30 1680 int min_write_lock_level;
9f3a7427 1681
6702ed49 1682 lowest_level = p->lowest_level;
323ac95b 1683 WARN_ON(lowest_level && ins_len > 0);
22b0ebda 1684 WARN_ON(p->nodes[0] != NULL);
25179201 1685
bd681513 1686 if (ins_len < 0) {
925baedd 1687 lowest_unlock = 2;
65b51a00 1688
bd681513
CM
1689 /* when we are removing items, we might have to go up to level
1690 * two as we update tree pointers Make sure we keep write
1691 * for those levels as well
1692 */
1693 write_lock_level = 2;
1694 } else if (ins_len > 0) {
1695 /*
1696 * for inserting items, make sure we have a write lock on
1697 * level 1 so we can update keys
1698 */
1699 write_lock_level = 1;
1700 }
1701
1702 if (!cow)
1703 write_lock_level = -1;
1704
1705 if (cow && (p->keep_locks || p->lowest_level))
1706 write_lock_level = BTRFS_MAX_LEVEL;
1707
f7c79f30
CM
1708 min_write_lock_level = write_lock_level;
1709
bb803951 1710again:
bd681513
CM
1711 /*
1712 * we try very hard to do read locks on the root
1713 */
1714 root_lock = BTRFS_READ_LOCK;
1715 level = 0;
5d4f98a2 1716 if (p->search_commit_root) {
bd681513
CM
1717 /*
1718 * the commit roots are read only
1719 * so we always do read locks
1720 */
5d4f98a2
YZ
1721 b = root->commit_root;
1722 extent_buffer_get(b);
bd681513 1723 level = btrfs_header_level(b);
5d4f98a2 1724 if (!p->skip_locking)
bd681513 1725 btrfs_tree_read_lock(b);
5d4f98a2 1726 } else {
bd681513 1727 if (p->skip_locking) {
5d4f98a2 1728 b = btrfs_root_node(root);
bd681513
CM
1729 level = btrfs_header_level(b);
1730 } else {
1731 /* we don't know the level of the root node
1732 * until we actually have it read locked
1733 */
1734 b = btrfs_read_lock_root_node(root);
1735 level = btrfs_header_level(b);
1736 if (level <= write_lock_level) {
1737 /* whoops, must trade for write lock */
1738 btrfs_tree_read_unlock(b);
1739 free_extent_buffer(b);
1740 b = btrfs_lock_root_node(root);
1741 root_lock = BTRFS_WRITE_LOCK;
1742
1743 /* the level might have changed, check again */
1744 level = btrfs_header_level(b);
1745 }
1746 }
5d4f98a2 1747 }
bd681513
CM
1748 p->nodes[level] = b;
1749 if (!p->skip_locking)
1750 p->locks[level] = root_lock;
925baedd 1751
eb60ceac 1752 while (b) {
5f39d397 1753 level = btrfs_header_level(b);
65b51a00
CM
1754
1755 /*
1756 * setup the path here so we can release it under lock
1757 * contention with the cow code
1758 */
02217ed2 1759 if (cow) {
c8c42864
CM
1760 /*
1761 * if we don't really need to cow this block
1762 * then we don't want to set the path blocking,
1763 * so we test it here
1764 */
5d4f98a2 1765 if (!should_cow_block(trans, root, b))
65b51a00 1766 goto cow_done;
5d4f98a2 1767
b4ce94de
CM
1768 btrfs_set_path_blocking(p);
1769
bd681513
CM
1770 /*
1771 * must have write locks on this node and the
1772 * parent
1773 */
1774 if (level + 1 > write_lock_level) {
1775 write_lock_level = level + 1;
1776 btrfs_release_path(p);
1777 goto again;
1778 }
1779
33c66f43
YZ
1780 err = btrfs_cow_block(trans, root, b,
1781 p->nodes[level + 1],
1782 p->slots[level + 1], &b);
1783 if (err) {
33c66f43 1784 ret = err;
65b51a00 1785 goto done;
54aa1f4d 1786 }
02217ed2 1787 }
65b51a00 1788cow_done:
02217ed2 1789 BUG_ON(!cow && ins_len);
65b51a00 1790
eb60ceac 1791 p->nodes[level] = b;
bd681513 1792 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de
CM
1793
1794 /*
1795 * we have a lock on b and as long as we aren't changing
1796 * the tree, there is no way to for the items in b to change.
1797 * It is safe to drop the lock on our parent before we
1798 * go through the expensive btree search on b.
1799 *
1800 * If cow is true, then we might be changing slot zero,
1801 * which may require changing the parent. So, we can't
1802 * drop the lock until after we know which slot we're
1803 * operating on.
1804 */
1805 if (!cow)
1806 btrfs_unlock_up_safe(p, level + 1);
1807
5f39d397 1808 ret = bin_search(b, key, level, &slot);
b4ce94de 1809
5f39d397 1810 if (level != 0) {
33c66f43
YZ
1811 int dec = 0;
1812 if (ret && slot > 0) {
1813 dec = 1;
be0e5c09 1814 slot -= 1;
33c66f43 1815 }
be0e5c09 1816 p->slots[level] = slot;
33c66f43 1817 err = setup_nodes_for_search(trans, root, p, b, level,
bd681513 1818 ins_len, &write_lock_level);
33c66f43 1819 if (err == -EAGAIN)
c8c42864 1820 goto again;
33c66f43
YZ
1821 if (err) {
1822 ret = err;
c8c42864 1823 goto done;
33c66f43 1824 }
c8c42864
CM
1825 b = p->nodes[level];
1826 slot = p->slots[level];
b4ce94de 1827
bd681513
CM
1828 /*
1829 * slot 0 is special, if we change the key
1830 * we have to update the parent pointer
1831 * which means we must have a write lock
1832 * on the parent
1833 */
1834 if (slot == 0 && cow &&
1835 write_lock_level < level + 1) {
1836 write_lock_level = level + 1;
1837 btrfs_release_path(p);
1838 goto again;
1839 }
1840
f7c79f30
CM
1841 unlock_up(p, level, lowest_unlock,
1842 min_write_lock_level, &write_lock_level);
f9efa9c7 1843
925baedd 1844 if (level == lowest_level) {
33c66f43
YZ
1845 if (dec)
1846 p->slots[level]++;
5b21f2ed 1847 goto done;
925baedd 1848 }
ca7a79ad 1849
33c66f43 1850 err = read_block_for_search(trans, root, p,
c8c42864 1851 &b, level, slot, key);
33c66f43 1852 if (err == -EAGAIN)
c8c42864 1853 goto again;
33c66f43
YZ
1854 if (err) {
1855 ret = err;
76a05b35 1856 goto done;
33c66f43 1857 }
76a05b35 1858
b4ce94de 1859 if (!p->skip_locking) {
bd681513
CM
1860 level = btrfs_header_level(b);
1861 if (level <= write_lock_level) {
1862 err = btrfs_try_tree_write_lock(b);
1863 if (!err) {
1864 btrfs_set_path_blocking(p);
1865 btrfs_tree_lock(b);
1866 btrfs_clear_path_blocking(p, b,
1867 BTRFS_WRITE_LOCK);
1868 }
1869 p->locks[level] = BTRFS_WRITE_LOCK;
1870 } else {
1871 err = btrfs_try_tree_read_lock(b);
1872 if (!err) {
1873 btrfs_set_path_blocking(p);
1874 btrfs_tree_read_lock(b);
1875 btrfs_clear_path_blocking(p, b,
1876 BTRFS_READ_LOCK);
1877 }
1878 p->locks[level] = BTRFS_READ_LOCK;
b4ce94de 1879 }
bd681513 1880 p->nodes[level] = b;
b4ce94de 1881 }
be0e5c09
CM
1882 } else {
1883 p->slots[level] = slot;
87b29b20
YZ
1884 if (ins_len > 0 &&
1885 btrfs_leaf_free_space(root, b) < ins_len) {
bd681513
CM
1886 if (write_lock_level < 1) {
1887 write_lock_level = 1;
1888 btrfs_release_path(p);
1889 goto again;
1890 }
1891
b4ce94de 1892 btrfs_set_path_blocking(p);
33c66f43
YZ
1893 err = split_leaf(trans, root, key,
1894 p, ins_len, ret == 0);
bd681513 1895 btrfs_clear_path_blocking(p, NULL, 0);
b4ce94de 1896
33c66f43
YZ
1897 BUG_ON(err > 0);
1898 if (err) {
1899 ret = err;
65b51a00
CM
1900 goto done;
1901 }
5c680ed6 1902 }
459931ec 1903 if (!p->search_for_split)
f7c79f30
CM
1904 unlock_up(p, level, lowest_unlock,
1905 min_write_lock_level, &write_lock_level);
65b51a00 1906 goto done;
be0e5c09
CM
1907 }
1908 }
65b51a00
CM
1909 ret = 1;
1910done:
b4ce94de
CM
1911 /*
1912 * we don't really know what they plan on doing with the path
1913 * from here on, so for now just mark it as blocking
1914 */
b9473439
CM
1915 if (!p->leave_spinning)
1916 btrfs_set_path_blocking(p);
76a05b35 1917 if (ret < 0)
b3b4aa74 1918 btrfs_release_path(p);
65b51a00 1919 return ret;
be0e5c09
CM
1920}
1921
74123bd7
CM
1922/*
1923 * adjust the pointers going up the tree, starting at level
1924 * making sure the right key of each node is points to 'key'.
1925 * This is used after shifting pointers to the left, so it stops
1926 * fixing up pointers when a given leaf/node is not in slot 0 of the
1927 * higher levels
aa5d6bed 1928 *
74123bd7 1929 */
143bede5
JM
1930static void fixup_low_keys(struct btrfs_trans_handle *trans,
1931 struct btrfs_root *root, struct btrfs_path *path,
1932 struct btrfs_disk_key *key, int level)
be0e5c09
CM
1933{
1934 int i;
5f39d397
CM
1935 struct extent_buffer *t;
1936
234b63a0 1937 for (i = level; i < BTRFS_MAX_LEVEL; i++) {
be0e5c09 1938 int tslot = path->slots[i];
eb60ceac 1939 if (!path->nodes[i])
be0e5c09 1940 break;
5f39d397
CM
1941 t = path->nodes[i];
1942 btrfs_set_node_key(t, key, tslot);
d6025579 1943 btrfs_mark_buffer_dirty(path->nodes[i]);
be0e5c09
CM
1944 if (tslot != 0)
1945 break;
1946 }
1947}
1948
31840ae1
ZY
1949/*
1950 * update item key.
1951 *
1952 * This function isn't completely safe. It's the caller's responsibility
1953 * that the new key won't break the order
1954 */
143bede5
JM
1955void btrfs_set_item_key_safe(struct btrfs_trans_handle *trans,
1956 struct btrfs_root *root, struct btrfs_path *path,
1957 struct btrfs_key *new_key)
31840ae1
ZY
1958{
1959 struct btrfs_disk_key disk_key;
1960 struct extent_buffer *eb;
1961 int slot;
1962
1963 eb = path->nodes[0];
1964 slot = path->slots[0];
1965 if (slot > 0) {
1966 btrfs_item_key(eb, &disk_key, slot - 1);
143bede5 1967 BUG_ON(comp_keys(&disk_key, new_key) >= 0);
31840ae1
ZY
1968 }
1969 if (slot < btrfs_header_nritems(eb) - 1) {
1970 btrfs_item_key(eb, &disk_key, slot + 1);
143bede5 1971 BUG_ON(comp_keys(&disk_key, new_key) <= 0);
31840ae1
ZY
1972 }
1973
1974 btrfs_cpu_key_to_disk(&disk_key, new_key);
1975 btrfs_set_item_key(eb, &disk_key, slot);
1976 btrfs_mark_buffer_dirty(eb);
1977 if (slot == 0)
1978 fixup_low_keys(trans, root, path, &disk_key, 1);
31840ae1
ZY
1979}
1980
74123bd7
CM
1981/*
1982 * try to push data from one node into the next node left in the
79f95c82 1983 * tree.
aa5d6bed
CM
1984 *
1985 * returns 0 if some ptrs were pushed left, < 0 if there was some horrible
1986 * error, and > 0 if there was no room in the left hand block.
74123bd7 1987 */
98ed5174
CM
1988static int push_node_left(struct btrfs_trans_handle *trans,
1989 struct btrfs_root *root, struct extent_buffer *dst,
971a1f66 1990 struct extent_buffer *src, int empty)
be0e5c09 1991{
be0e5c09 1992 int push_items = 0;
bb803951
CM
1993 int src_nritems;
1994 int dst_nritems;
aa5d6bed 1995 int ret = 0;
be0e5c09 1996
5f39d397
CM
1997 src_nritems = btrfs_header_nritems(src);
1998 dst_nritems = btrfs_header_nritems(dst);
123abc88 1999 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
7bb86316
CM
2000 WARN_ON(btrfs_header_generation(src) != trans->transid);
2001 WARN_ON(btrfs_header_generation(dst) != trans->transid);
54aa1f4d 2002
bce4eae9 2003 if (!empty && src_nritems <= 8)
971a1f66
CM
2004 return 1;
2005
d397712b 2006 if (push_items <= 0)
be0e5c09
CM
2007 return 1;
2008
bce4eae9 2009 if (empty) {
971a1f66 2010 push_items = min(src_nritems, push_items);
bce4eae9
CM
2011 if (push_items < src_nritems) {
2012 /* leave at least 8 pointers in the node if
2013 * we aren't going to empty it
2014 */
2015 if (src_nritems - push_items < 8) {
2016 if (push_items <= 8)
2017 return 1;
2018 push_items -= 8;
2019 }
2020 }
2021 } else
2022 push_items = min(src_nritems - 8, push_items);
79f95c82 2023
5f39d397
CM
2024 copy_extent_buffer(dst, src,
2025 btrfs_node_key_ptr_offset(dst_nritems),
2026 btrfs_node_key_ptr_offset(0),
d397712b 2027 push_items * sizeof(struct btrfs_key_ptr));
5f39d397 2028
bb803951 2029 if (push_items < src_nritems) {
5f39d397
CM
2030 memmove_extent_buffer(src, btrfs_node_key_ptr_offset(0),
2031 btrfs_node_key_ptr_offset(push_items),
2032 (src_nritems - push_items) *
2033 sizeof(struct btrfs_key_ptr));
2034 }
2035 btrfs_set_header_nritems(src, src_nritems - push_items);
2036 btrfs_set_header_nritems(dst, dst_nritems + push_items);
2037 btrfs_mark_buffer_dirty(src);
2038 btrfs_mark_buffer_dirty(dst);
31840ae1 2039
79f95c82
CM
2040 return ret;
2041}
2042
2043/*
2044 * try to push data from one node into the next node right in the
2045 * tree.
2046 *
2047 * returns 0 if some ptrs were pushed, < 0 if there was some horrible
2048 * error, and > 0 if there was no room in the right hand block.
2049 *
2050 * this will only push up to 1/2 the contents of the left node over
2051 */
5f39d397
CM
2052static int balance_node_right(struct btrfs_trans_handle *trans,
2053 struct btrfs_root *root,
2054 struct extent_buffer *dst,
2055 struct extent_buffer *src)
79f95c82 2056{
79f95c82
CM
2057 int push_items = 0;
2058 int max_push;
2059 int src_nritems;
2060 int dst_nritems;
2061 int ret = 0;
79f95c82 2062
7bb86316
CM
2063 WARN_ON(btrfs_header_generation(src) != trans->transid);
2064 WARN_ON(btrfs_header_generation(dst) != trans->transid);
2065
5f39d397
CM
2066 src_nritems = btrfs_header_nritems(src);
2067 dst_nritems = btrfs_header_nritems(dst);
123abc88 2068 push_items = BTRFS_NODEPTRS_PER_BLOCK(root) - dst_nritems;
d397712b 2069 if (push_items <= 0)
79f95c82 2070 return 1;
bce4eae9 2071
d397712b 2072 if (src_nritems < 4)
bce4eae9 2073 return 1;
79f95c82
CM
2074
2075 max_push = src_nritems / 2 + 1;
2076 /* don't try to empty the node */
d397712b 2077 if (max_push >= src_nritems)
79f95c82 2078 return 1;
252c38f0 2079
79f95c82
CM
2080 if (max_push < push_items)
2081 push_items = max_push;
2082
5f39d397
CM
2083 memmove_extent_buffer(dst, btrfs_node_key_ptr_offset(push_items),
2084 btrfs_node_key_ptr_offset(0),
2085 (dst_nritems) *
2086 sizeof(struct btrfs_key_ptr));
d6025579 2087
5f39d397
CM
2088 copy_extent_buffer(dst, src,
2089 btrfs_node_key_ptr_offset(0),
2090 btrfs_node_key_ptr_offset(src_nritems - push_items),
d397712b 2091 push_items * sizeof(struct btrfs_key_ptr));
79f95c82 2092
5f39d397
CM
2093 btrfs_set_header_nritems(src, src_nritems - push_items);
2094 btrfs_set_header_nritems(dst, dst_nritems + push_items);
79f95c82 2095
5f39d397
CM
2096 btrfs_mark_buffer_dirty(src);
2097 btrfs_mark_buffer_dirty(dst);
31840ae1 2098
aa5d6bed 2099 return ret;
be0e5c09
CM
2100}
2101
97571fd0
CM
2102/*
2103 * helper function to insert a new root level in the tree.
2104 * A new node is allocated, and a single item is inserted to
2105 * point to the existing root
aa5d6bed
CM
2106 *
2107 * returns zero on success or < 0 on failure.
97571fd0 2108 */
d397712b 2109static noinline int insert_new_root(struct btrfs_trans_handle *trans,
5f39d397
CM
2110 struct btrfs_root *root,
2111 struct btrfs_path *path, int level)
5c680ed6 2112{
7bb86316 2113 u64 lower_gen;
5f39d397
CM
2114 struct extent_buffer *lower;
2115 struct extent_buffer *c;
925baedd 2116 struct extent_buffer *old;
5f39d397 2117 struct btrfs_disk_key lower_key;
5c680ed6
CM
2118
2119 BUG_ON(path->nodes[level]);
2120 BUG_ON(path->nodes[level-1] != root->node);
2121
7bb86316
CM
2122 lower = path->nodes[level-1];
2123 if (level == 1)
2124 btrfs_item_key(lower, &lower_key, 0);
2125 else
2126 btrfs_node_key(lower, &lower_key, 0);
2127
31840ae1 2128 c = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
5d4f98a2 2129 root->root_key.objectid, &lower_key,
66d7e7f0 2130 level, root->node->start, 0, 0);
5f39d397
CM
2131 if (IS_ERR(c))
2132 return PTR_ERR(c);
925baedd 2133
f0486c68
YZ
2134 root_add_used(root, root->nodesize);
2135
5d4f98a2 2136 memset_extent_buffer(c, 0, 0, sizeof(struct btrfs_header));
5f39d397
CM
2137 btrfs_set_header_nritems(c, 1);
2138 btrfs_set_header_level(c, level);
db94535d 2139 btrfs_set_header_bytenr(c, c->start);
5f39d397 2140 btrfs_set_header_generation(c, trans->transid);
5d4f98a2 2141 btrfs_set_header_backref_rev(c, BTRFS_MIXED_BACKREF_REV);
5f39d397 2142 btrfs_set_header_owner(c, root->root_key.objectid);
5f39d397
CM
2143
2144 write_extent_buffer(c, root->fs_info->fsid,
2145 (unsigned long)btrfs_header_fsid(c),
2146 BTRFS_FSID_SIZE);
e17cade2
CM
2147
2148 write_extent_buffer(c, root->fs_info->chunk_tree_uuid,
2149 (unsigned long)btrfs_header_chunk_tree_uuid(c),
2150 BTRFS_UUID_SIZE);
2151
5f39d397 2152 btrfs_set_node_key(c, &lower_key, 0);
db94535d 2153 btrfs_set_node_blockptr(c, 0, lower->start);
7bb86316 2154 lower_gen = btrfs_header_generation(lower);
31840ae1 2155 WARN_ON(lower_gen != trans->transid);
7bb86316
CM
2156
2157 btrfs_set_node_ptr_generation(c, 0, lower_gen);
d5719762 2158
5f39d397 2159 btrfs_mark_buffer_dirty(c);
d5719762 2160
925baedd 2161 old = root->node;
240f62c8 2162 rcu_assign_pointer(root->node, c);
925baedd
CM
2163
2164 /* the super has an extra ref to root->node */
2165 free_extent_buffer(old);
2166
0b86a832 2167 add_root_to_dirty_list(root);
5f39d397
CM
2168 extent_buffer_get(c);
2169 path->nodes[level] = c;
bd681513 2170 path->locks[level] = BTRFS_WRITE_LOCK;
5c680ed6
CM
2171 path->slots[level] = 0;
2172 return 0;
2173}
2174
74123bd7
CM
2175/*
2176 * worker function to insert a single pointer in a node.
2177 * the node should have enough room for the pointer already
97571fd0 2178 *
74123bd7
CM
2179 * slot and level indicate where you want the key to go, and
2180 * blocknr is the block the key points to.
2181 */
143bede5
JM
2182static void insert_ptr(struct btrfs_trans_handle *trans,
2183 struct btrfs_root *root, struct btrfs_path *path,
2184 struct btrfs_disk_key *key, u64 bytenr,
2185 int slot, int level)
74123bd7 2186{
5f39d397 2187 struct extent_buffer *lower;
74123bd7 2188 int nritems;
5c680ed6
CM
2189
2190 BUG_ON(!path->nodes[level]);
f0486c68 2191 btrfs_assert_tree_locked(path->nodes[level]);
5f39d397
CM
2192 lower = path->nodes[level];
2193 nritems = btrfs_header_nritems(lower);
c293498b 2194 BUG_ON(slot > nritems);
143bede5 2195 BUG_ON(nritems == BTRFS_NODEPTRS_PER_BLOCK(root));
74123bd7 2196 if (slot != nritems) {
5f39d397
CM
2197 memmove_extent_buffer(lower,
2198 btrfs_node_key_ptr_offset(slot + 1),
2199 btrfs_node_key_ptr_offset(slot),
d6025579 2200 (nritems - slot) * sizeof(struct btrfs_key_ptr));
74123bd7 2201 }
5f39d397 2202 btrfs_set_node_key(lower, key, slot);
db94535d 2203 btrfs_set_node_blockptr(lower, slot, bytenr);
74493f7a
CM
2204 WARN_ON(trans->transid == 0);
2205 btrfs_set_node_ptr_generation(lower, slot, trans->transid);
5f39d397
CM
2206 btrfs_set_header_nritems(lower, nritems + 1);
2207 btrfs_mark_buffer_dirty(lower);
74123bd7
CM
2208}
2209
97571fd0
CM
2210/*
2211 * split the node at the specified level in path in two.
2212 * The path is corrected to point to the appropriate node after the split
2213 *
2214 * Before splitting this tries to make some room in the node by pushing
2215 * left and right, if either one works, it returns right away.
aa5d6bed
CM
2216 *
2217 * returns 0 on success and < 0 on failure
97571fd0 2218 */
e02119d5
CM
2219static noinline int split_node(struct btrfs_trans_handle *trans,
2220 struct btrfs_root *root,
2221 struct btrfs_path *path, int level)
be0e5c09 2222{
5f39d397
CM
2223 struct extent_buffer *c;
2224 struct extent_buffer *split;
2225 struct btrfs_disk_key disk_key;
be0e5c09 2226 int mid;
5c680ed6 2227 int ret;
7518a238 2228 u32 c_nritems;
eb60ceac 2229
5f39d397 2230 c = path->nodes[level];
7bb86316 2231 WARN_ON(btrfs_header_generation(c) != trans->transid);
5f39d397 2232 if (c == root->node) {
5c680ed6 2233 /* trying to split the root, lets make a new one */
e089f05c 2234 ret = insert_new_root(trans, root, path, level + 1);
5c680ed6
CM
2235 if (ret)
2236 return ret;
b3612421 2237 } else {
e66f709b 2238 ret = push_nodes_for_insert(trans, root, path, level);
5f39d397
CM
2239 c = path->nodes[level];
2240 if (!ret && btrfs_header_nritems(c) <
c448acf0 2241 BTRFS_NODEPTRS_PER_BLOCK(root) - 3)
e66f709b 2242 return 0;
54aa1f4d
CM
2243 if (ret < 0)
2244 return ret;
be0e5c09 2245 }
e66f709b 2246
5f39d397 2247 c_nritems = btrfs_header_nritems(c);
5d4f98a2
YZ
2248 mid = (c_nritems + 1) / 2;
2249 btrfs_node_key(c, &disk_key, mid);
7bb86316 2250
5d4f98a2 2251 split = btrfs_alloc_free_block(trans, root, root->nodesize, 0,
31840ae1 2252 root->root_key.objectid,
66d7e7f0 2253 &disk_key, level, c->start, 0, 0);
5f39d397
CM
2254 if (IS_ERR(split))
2255 return PTR_ERR(split);
2256
f0486c68
YZ
2257 root_add_used(root, root->nodesize);
2258
5d4f98a2 2259 memset_extent_buffer(split, 0, 0, sizeof(struct btrfs_header));
5f39d397 2260 btrfs_set_header_level(split, btrfs_header_level(c));
db94535d 2261 btrfs_set_header_bytenr(split, split->start);
5f39d397 2262 btrfs_set_header_generation(split, trans->transid);
5d4f98a2 2263 btrfs_set_header_backref_rev(split, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
2264 btrfs_set_header_owner(split, root->root_key.objectid);
2265 write_extent_buffer(split, root->fs_info->fsid,
2266 (unsigned long)btrfs_header_fsid(split),
2267 BTRFS_FSID_SIZE);
e17cade2
CM
2268 write_extent_buffer(split, root->fs_info->chunk_tree_uuid,
2269 (unsigned long)btrfs_header_chunk_tree_uuid(split),
2270 BTRFS_UUID_SIZE);
54aa1f4d 2271
5f39d397
CM
2272
2273 copy_extent_buffer(split, c,
2274 btrfs_node_key_ptr_offset(0),
2275 btrfs_node_key_ptr_offset(mid),
2276 (c_nritems - mid) * sizeof(struct btrfs_key_ptr));
2277 btrfs_set_header_nritems(split, c_nritems - mid);
2278 btrfs_set_header_nritems(c, mid);
aa5d6bed
CM
2279 ret = 0;
2280
5f39d397
CM
2281 btrfs_mark_buffer_dirty(c);
2282 btrfs_mark_buffer_dirty(split);
2283
143bede5
JM
2284 insert_ptr(trans, root, path, &disk_key, split->start,
2285 path->slots[level + 1] + 1, level + 1);
aa5d6bed 2286
5de08d7d 2287 if (path->slots[level] >= mid) {
5c680ed6 2288 path->slots[level] -= mid;
925baedd 2289 btrfs_tree_unlock(c);
5f39d397
CM
2290 free_extent_buffer(c);
2291 path->nodes[level] = split;
5c680ed6
CM
2292 path->slots[level + 1] += 1;
2293 } else {
925baedd 2294 btrfs_tree_unlock(split);
5f39d397 2295 free_extent_buffer(split);
be0e5c09 2296 }
aa5d6bed 2297 return ret;
be0e5c09
CM
2298}
2299
74123bd7
CM
2300/*
2301 * how many bytes are required to store the items in a leaf. start
2302 * and nr indicate which items in the leaf to check. This totals up the
2303 * space used both by the item structs and the item data
2304 */
5f39d397 2305static int leaf_space_used(struct extent_buffer *l, int start, int nr)
be0e5c09
CM
2306{
2307 int data_len;
5f39d397 2308 int nritems = btrfs_header_nritems(l);
d4dbff95 2309 int end = min(nritems, start + nr) - 1;
be0e5c09
CM
2310
2311 if (!nr)
2312 return 0;
5f39d397
CM
2313 data_len = btrfs_item_end_nr(l, start);
2314 data_len = data_len - btrfs_item_offset_nr(l, end);
0783fcfc 2315 data_len += sizeof(struct btrfs_item) * nr;
d4dbff95 2316 WARN_ON(data_len < 0);
be0e5c09
CM
2317 return data_len;
2318}
2319
d4dbff95
CM
2320/*
2321 * The space between the end of the leaf items and
2322 * the start of the leaf data. IOW, how much room
2323 * the leaf has left for both items and data
2324 */
d397712b 2325noinline int btrfs_leaf_free_space(struct btrfs_root *root,
e02119d5 2326 struct extent_buffer *leaf)
d4dbff95 2327{
5f39d397
CM
2328 int nritems = btrfs_header_nritems(leaf);
2329 int ret;
2330 ret = BTRFS_LEAF_DATA_SIZE(root) - leaf_space_used(leaf, 0, nritems);
2331 if (ret < 0) {
d397712b
CM
2332 printk(KERN_CRIT "leaf free space ret %d, leaf data size %lu, "
2333 "used %d nritems %d\n",
ae2f5411 2334 ret, (unsigned long) BTRFS_LEAF_DATA_SIZE(root),
5f39d397
CM
2335 leaf_space_used(leaf, 0, nritems), nritems);
2336 }
2337 return ret;
d4dbff95
CM
2338}
2339
99d8f83c
CM
2340/*
2341 * min slot controls the lowest index we're willing to push to the
2342 * right. We'll push up to and including min_slot, but no lower
2343 */
44871b1b
CM
2344static noinline int __push_leaf_right(struct btrfs_trans_handle *trans,
2345 struct btrfs_root *root,
2346 struct btrfs_path *path,
2347 int data_size, int empty,
2348 struct extent_buffer *right,
99d8f83c
CM
2349 int free_space, u32 left_nritems,
2350 u32 min_slot)
00ec4c51 2351{
5f39d397 2352 struct extent_buffer *left = path->nodes[0];
44871b1b 2353 struct extent_buffer *upper = path->nodes[1];
cfed81a0 2354 struct btrfs_map_token token;
5f39d397 2355 struct btrfs_disk_key disk_key;
00ec4c51 2356 int slot;
34a38218 2357 u32 i;
00ec4c51
CM
2358 int push_space = 0;
2359 int push_items = 0;
0783fcfc 2360 struct btrfs_item *item;
34a38218 2361 u32 nr;
7518a238 2362 u32 right_nritems;
5f39d397 2363 u32 data_end;
db94535d 2364 u32 this_item_size;
00ec4c51 2365
cfed81a0
CM
2366 btrfs_init_map_token(&token);
2367
34a38218
CM
2368 if (empty)
2369 nr = 0;
2370 else
99d8f83c 2371 nr = max_t(u32, 1, min_slot);
34a38218 2372
31840ae1 2373 if (path->slots[0] >= left_nritems)
87b29b20 2374 push_space += data_size;
31840ae1 2375
44871b1b 2376 slot = path->slots[1];
34a38218
CM
2377 i = left_nritems - 1;
2378 while (i >= nr) {
5f39d397 2379 item = btrfs_item_nr(left, i);
db94535d 2380
31840ae1
ZY
2381 if (!empty && push_items > 0) {
2382 if (path->slots[0] > i)
2383 break;
2384 if (path->slots[0] == i) {
2385 int space = btrfs_leaf_free_space(root, left);
2386 if (space + push_space * 2 > free_space)
2387 break;
2388 }
2389 }
2390
00ec4c51 2391 if (path->slots[0] == i)
87b29b20 2392 push_space += data_size;
db94535d 2393
db94535d
CM
2394 this_item_size = btrfs_item_size(left, item);
2395 if (this_item_size + sizeof(*item) + push_space > free_space)
00ec4c51 2396 break;
31840ae1 2397
00ec4c51 2398 push_items++;
db94535d 2399 push_space += this_item_size + sizeof(*item);
34a38218
CM
2400 if (i == 0)
2401 break;
2402 i--;
db94535d 2403 }
5f39d397 2404
925baedd
CM
2405 if (push_items == 0)
2406 goto out_unlock;
5f39d397 2407
34a38218 2408 if (!empty && push_items == left_nritems)
a429e513 2409 WARN_ON(1);
5f39d397 2410
00ec4c51 2411 /* push left to right */
5f39d397 2412 right_nritems = btrfs_header_nritems(right);
34a38218 2413
5f39d397 2414 push_space = btrfs_item_end_nr(left, left_nritems - push_items);
123abc88 2415 push_space -= leaf_data_end(root, left);
5f39d397 2416
00ec4c51 2417 /* make room in the right data area */
5f39d397
CM
2418 data_end = leaf_data_end(root, right);
2419 memmove_extent_buffer(right,
2420 btrfs_leaf_data(right) + data_end - push_space,
2421 btrfs_leaf_data(right) + data_end,
2422 BTRFS_LEAF_DATA_SIZE(root) - data_end);
2423
00ec4c51 2424 /* copy from the left data area */
5f39d397 2425 copy_extent_buffer(right, left, btrfs_leaf_data(right) +
d6025579
CM
2426 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2427 btrfs_leaf_data(left) + leaf_data_end(root, left),
2428 push_space);
5f39d397
CM
2429
2430 memmove_extent_buffer(right, btrfs_item_nr_offset(push_items),
2431 btrfs_item_nr_offset(0),
2432 right_nritems * sizeof(struct btrfs_item));
2433
00ec4c51 2434 /* copy the items from left to right */
5f39d397
CM
2435 copy_extent_buffer(right, left, btrfs_item_nr_offset(0),
2436 btrfs_item_nr_offset(left_nritems - push_items),
2437 push_items * sizeof(struct btrfs_item));
00ec4c51
CM
2438
2439 /* update the item pointers */
7518a238 2440 right_nritems += push_items;
5f39d397 2441 btrfs_set_header_nritems(right, right_nritems);
123abc88 2442 push_space = BTRFS_LEAF_DATA_SIZE(root);
7518a238 2443 for (i = 0; i < right_nritems; i++) {
5f39d397 2444 item = btrfs_item_nr(right, i);
cfed81a0
CM
2445 push_space -= btrfs_token_item_size(right, item, &token);
2446 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d
CM
2447 }
2448
7518a238 2449 left_nritems -= push_items;
5f39d397 2450 btrfs_set_header_nritems(left, left_nritems);
00ec4c51 2451
34a38218
CM
2452 if (left_nritems)
2453 btrfs_mark_buffer_dirty(left);
f0486c68
YZ
2454 else
2455 clean_tree_block(trans, root, left);
2456
5f39d397 2457 btrfs_mark_buffer_dirty(right);
a429e513 2458
5f39d397
CM
2459 btrfs_item_key(right, &disk_key, 0);
2460 btrfs_set_node_key(upper, &disk_key, slot + 1);
d6025579 2461 btrfs_mark_buffer_dirty(upper);
02217ed2 2462
00ec4c51 2463 /* then fixup the leaf pointer in the path */
7518a238
CM
2464 if (path->slots[0] >= left_nritems) {
2465 path->slots[0] -= left_nritems;
925baedd
CM
2466 if (btrfs_header_nritems(path->nodes[0]) == 0)
2467 clean_tree_block(trans, root, path->nodes[0]);
2468 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2469 free_extent_buffer(path->nodes[0]);
2470 path->nodes[0] = right;
00ec4c51
CM
2471 path->slots[1] += 1;
2472 } else {
925baedd 2473 btrfs_tree_unlock(right);
5f39d397 2474 free_extent_buffer(right);
00ec4c51
CM
2475 }
2476 return 0;
925baedd
CM
2477
2478out_unlock:
2479 btrfs_tree_unlock(right);
2480 free_extent_buffer(right);
2481 return 1;
00ec4c51 2482}
925baedd 2483
44871b1b
CM
2484/*
2485 * push some data in the path leaf to the right, trying to free up at
2486 * least data_size bytes. returns zero if the push worked, nonzero otherwise
2487 *
2488 * returns 1 if the push failed because the other node didn't have enough
2489 * room, 0 if everything worked out and < 0 if there were major errors.
99d8f83c
CM
2490 *
2491 * this will push starting from min_slot to the end of the leaf. It won't
2492 * push any slot lower than min_slot
44871b1b
CM
2493 */
2494static int push_leaf_right(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
2495 *root, struct btrfs_path *path,
2496 int min_data_size, int data_size,
2497 int empty, u32 min_slot)
44871b1b
CM
2498{
2499 struct extent_buffer *left = path->nodes[0];
2500 struct extent_buffer *right;
2501 struct extent_buffer *upper;
2502 int slot;
2503 int free_space;
2504 u32 left_nritems;
2505 int ret;
2506
2507 if (!path->nodes[1])
2508 return 1;
2509
2510 slot = path->slots[1];
2511 upper = path->nodes[1];
2512 if (slot >= btrfs_header_nritems(upper) - 1)
2513 return 1;
2514
2515 btrfs_assert_tree_locked(path->nodes[1]);
2516
2517 right = read_node_slot(root, upper, slot + 1);
91ca338d
TI
2518 if (right == NULL)
2519 return 1;
2520
44871b1b
CM
2521 btrfs_tree_lock(right);
2522 btrfs_set_lock_blocking(right);
2523
2524 free_space = btrfs_leaf_free_space(root, right);
2525 if (free_space < data_size)
2526 goto out_unlock;
2527
2528 /* cow and double check */
2529 ret = btrfs_cow_block(trans, root, right, upper,
2530 slot + 1, &right);
2531 if (ret)
2532 goto out_unlock;
2533
2534 free_space = btrfs_leaf_free_space(root, right);
2535 if (free_space < data_size)
2536 goto out_unlock;
2537
2538 left_nritems = btrfs_header_nritems(left);
2539 if (left_nritems == 0)
2540 goto out_unlock;
2541
99d8f83c
CM
2542 return __push_leaf_right(trans, root, path, min_data_size, empty,
2543 right, free_space, left_nritems, min_slot);
44871b1b
CM
2544out_unlock:
2545 btrfs_tree_unlock(right);
2546 free_extent_buffer(right);
2547 return 1;
2548}
2549
74123bd7
CM
2550/*
2551 * push some data in the path leaf to the left, trying to free up at
2552 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
2553 *
2554 * max_slot can put a limit on how far into the leaf we'll push items. The
2555 * item at 'max_slot' won't be touched. Use (u32)-1 to make us do all the
2556 * items
74123bd7 2557 */
44871b1b
CM
2558static noinline int __push_leaf_left(struct btrfs_trans_handle *trans,
2559 struct btrfs_root *root,
2560 struct btrfs_path *path, int data_size,
2561 int empty, struct extent_buffer *left,
99d8f83c
CM
2562 int free_space, u32 right_nritems,
2563 u32 max_slot)
be0e5c09 2564{
5f39d397
CM
2565 struct btrfs_disk_key disk_key;
2566 struct extent_buffer *right = path->nodes[0];
be0e5c09 2567 int i;
be0e5c09
CM
2568 int push_space = 0;
2569 int push_items = 0;
0783fcfc 2570 struct btrfs_item *item;
7518a238 2571 u32 old_left_nritems;
34a38218 2572 u32 nr;
aa5d6bed 2573 int ret = 0;
db94535d
CM
2574 u32 this_item_size;
2575 u32 old_left_item_size;
cfed81a0
CM
2576 struct btrfs_map_token token;
2577
2578 btrfs_init_map_token(&token);
be0e5c09 2579
34a38218 2580 if (empty)
99d8f83c 2581 nr = min(right_nritems, max_slot);
34a38218 2582 else
99d8f83c 2583 nr = min(right_nritems - 1, max_slot);
34a38218
CM
2584
2585 for (i = 0; i < nr; i++) {
5f39d397 2586 item = btrfs_item_nr(right, i);
db94535d 2587
31840ae1
ZY
2588 if (!empty && push_items > 0) {
2589 if (path->slots[0] < i)
2590 break;
2591 if (path->slots[0] == i) {
2592 int space = btrfs_leaf_free_space(root, right);
2593 if (space + push_space * 2 > free_space)
2594 break;
2595 }
2596 }
2597
be0e5c09 2598 if (path->slots[0] == i)
87b29b20 2599 push_space += data_size;
db94535d
CM
2600
2601 this_item_size = btrfs_item_size(right, item);
2602 if (this_item_size + sizeof(*item) + push_space > free_space)
be0e5c09 2603 break;
db94535d 2604
be0e5c09 2605 push_items++;
db94535d
CM
2606 push_space += this_item_size + sizeof(*item);
2607 }
2608
be0e5c09 2609 if (push_items == 0) {
925baedd
CM
2610 ret = 1;
2611 goto out;
be0e5c09 2612 }
34a38218 2613 if (!empty && push_items == btrfs_header_nritems(right))
a429e513 2614 WARN_ON(1);
5f39d397 2615
be0e5c09 2616 /* push data from right to left */
5f39d397
CM
2617 copy_extent_buffer(left, right,
2618 btrfs_item_nr_offset(btrfs_header_nritems(left)),
2619 btrfs_item_nr_offset(0),
2620 push_items * sizeof(struct btrfs_item));
2621
123abc88 2622 push_space = BTRFS_LEAF_DATA_SIZE(root) -
d397712b 2623 btrfs_item_offset_nr(right, push_items - 1);
5f39d397
CM
2624
2625 copy_extent_buffer(left, right, btrfs_leaf_data(left) +
d6025579
CM
2626 leaf_data_end(root, left) - push_space,
2627 btrfs_leaf_data(right) +
5f39d397 2628 btrfs_item_offset_nr(right, push_items - 1),
d6025579 2629 push_space);
5f39d397 2630 old_left_nritems = btrfs_header_nritems(left);
87b29b20 2631 BUG_ON(old_left_nritems <= 0);
eb60ceac 2632
db94535d 2633 old_left_item_size = btrfs_item_offset_nr(left, old_left_nritems - 1);
0783fcfc 2634 for (i = old_left_nritems; i < old_left_nritems + push_items; i++) {
5f39d397 2635 u32 ioff;
db94535d 2636
5f39d397 2637 item = btrfs_item_nr(left, i);
db94535d 2638
cfed81a0
CM
2639 ioff = btrfs_token_item_offset(left, item, &token);
2640 btrfs_set_token_item_offset(left, item,
2641 ioff - (BTRFS_LEAF_DATA_SIZE(root) - old_left_item_size),
2642 &token);
be0e5c09 2643 }
5f39d397 2644 btrfs_set_header_nritems(left, old_left_nritems + push_items);
be0e5c09
CM
2645
2646 /* fixup right node */
34a38218 2647 if (push_items > right_nritems) {
d397712b
CM
2648 printk(KERN_CRIT "push items %d nr %u\n", push_items,
2649 right_nritems);
34a38218
CM
2650 WARN_ON(1);
2651 }
2652
2653 if (push_items < right_nritems) {
2654 push_space = btrfs_item_offset_nr(right, push_items - 1) -
2655 leaf_data_end(root, right);
2656 memmove_extent_buffer(right, btrfs_leaf_data(right) +
2657 BTRFS_LEAF_DATA_SIZE(root) - push_space,
2658 btrfs_leaf_data(right) +
2659 leaf_data_end(root, right), push_space);
2660
2661 memmove_extent_buffer(right, btrfs_item_nr_offset(0),
5f39d397
CM
2662 btrfs_item_nr_offset(push_items),
2663 (btrfs_header_nritems(right) - push_items) *
2664 sizeof(struct btrfs_item));
34a38218 2665 }
eef1c494
Y
2666 right_nritems -= push_items;
2667 btrfs_set_header_nritems(right, right_nritems);
123abc88 2668 push_space = BTRFS_LEAF_DATA_SIZE(root);
5f39d397
CM
2669 for (i = 0; i < right_nritems; i++) {
2670 item = btrfs_item_nr(right, i);
db94535d 2671
cfed81a0
CM
2672 push_space = push_space - btrfs_token_item_size(right,
2673 item, &token);
2674 btrfs_set_token_item_offset(right, item, push_space, &token);
db94535d 2675 }
eb60ceac 2676
5f39d397 2677 btrfs_mark_buffer_dirty(left);
34a38218
CM
2678 if (right_nritems)
2679 btrfs_mark_buffer_dirty(right);
f0486c68
YZ
2680 else
2681 clean_tree_block(trans, root, right);
098f59c2 2682
5f39d397 2683 btrfs_item_key(right, &disk_key, 0);
143bede5 2684 fixup_low_keys(trans, root, path, &disk_key, 1);
be0e5c09
CM
2685
2686 /* then fixup the leaf pointer in the path */
2687 if (path->slots[0] < push_items) {
2688 path->slots[0] += old_left_nritems;
925baedd 2689 btrfs_tree_unlock(path->nodes[0]);
5f39d397
CM
2690 free_extent_buffer(path->nodes[0]);
2691 path->nodes[0] = left;
be0e5c09
CM
2692 path->slots[1] -= 1;
2693 } else {
925baedd 2694 btrfs_tree_unlock(left);
5f39d397 2695 free_extent_buffer(left);
be0e5c09
CM
2696 path->slots[0] -= push_items;
2697 }
eb60ceac 2698 BUG_ON(path->slots[0] < 0);
aa5d6bed 2699 return ret;
925baedd
CM
2700out:
2701 btrfs_tree_unlock(left);
2702 free_extent_buffer(left);
2703 return ret;
be0e5c09
CM
2704}
2705
44871b1b
CM
2706/*
2707 * push some data in the path leaf to the left, trying to free up at
2708 * least data_size bytes. returns zero if the push worked, nonzero otherwise
99d8f83c
CM
2709 *
2710 * max_slot can put a limit on how far into the leaf we'll push items. The
2711 * item at 'max_slot' won't be touched. Use (u32)-1 to make us push all the
2712 * items
44871b1b
CM
2713 */
2714static int push_leaf_left(struct btrfs_trans_handle *trans, struct btrfs_root
99d8f83c
CM
2715 *root, struct btrfs_path *path, int min_data_size,
2716 int data_size, int empty, u32 max_slot)
44871b1b
CM
2717{
2718 struct extent_buffer *right = path->nodes[0];
2719 struct extent_buffer *left;
2720 int slot;
2721 int free_space;
2722 u32 right_nritems;
2723 int ret = 0;
2724
2725 slot = path->slots[1];
2726 if (slot == 0)
2727 return 1;
2728 if (!path->nodes[1])
2729 return 1;
2730
2731 right_nritems = btrfs_header_nritems(right);
2732 if (right_nritems == 0)
2733 return 1;
2734
2735 btrfs_assert_tree_locked(path->nodes[1]);
2736
2737 left = read_node_slot(root, path->nodes[1], slot - 1);
91ca338d
TI
2738 if (left == NULL)
2739 return 1;
2740
44871b1b
CM
2741 btrfs_tree_lock(left);
2742 btrfs_set_lock_blocking(left);
2743
2744 free_space = btrfs_leaf_free_space(root, left);
2745 if (free_space < data_size) {
2746 ret = 1;
2747 goto out;
2748 }
2749
2750 /* cow and double check */
2751 ret = btrfs_cow_block(trans, root, left,
2752 path->nodes[1], slot - 1, &left);
2753 if (ret) {
2754 /* we hit -ENOSPC, but it isn't fatal here */
79787eaa
JM
2755 if (ret == -ENOSPC)
2756 ret = 1;
44871b1b
CM
2757 goto out;
2758 }
2759
2760 free_space = btrfs_leaf_free_space(root, left);
2761 if (free_space < data_size) {
2762 ret = 1;
2763 goto out;
2764 }
2765
99d8f83c
CM
2766 return __push_leaf_left(trans, root, path, min_data_size,
2767 empty, left, free_space, right_nritems,
2768 max_slot);
44871b1b
CM
2769out:
2770 btrfs_tree_unlock(left);
2771 free_extent_buffer(left);
2772 return ret;
2773}
2774
2775/*
2776 * split the path's leaf in two, making sure there is at least data_size
2777 * available for the resulting leaf level of the path.
44871b1b 2778 */
143bede5
JM
2779static noinline void copy_for_split(struct btrfs_trans_handle *trans,
2780 struct btrfs_root *root,
2781 struct btrfs_path *path,
2782 struct extent_buffer *l,
2783 struct extent_buffer *right,
2784 int slot, int mid, int nritems)
44871b1b
CM
2785{
2786 int data_copy_size;
2787 int rt_data_off;
2788 int i;
44871b1b 2789 struct btrfs_disk_key disk_key;
cfed81a0
CM
2790 struct btrfs_map_token token;
2791
2792 btrfs_init_map_token(&token);
44871b1b
CM
2793
2794 nritems = nritems - mid;
2795 btrfs_set_header_nritems(right, nritems);
2796 data_copy_size = btrfs_item_end_nr(l, mid) - leaf_data_end(root, l);
2797
2798 copy_extent_buffer(right, l, btrfs_item_nr_offset(0),
2799 btrfs_item_nr_offset(mid),
2800 nritems * sizeof(struct btrfs_item));
2801
2802 copy_extent_buffer(right, l,
2803 btrfs_leaf_data(right) + BTRFS_LEAF_DATA_SIZE(root) -
2804 data_copy_size, btrfs_leaf_data(l) +
2805 leaf_data_end(root, l), data_copy_size);
2806
2807 rt_data_off = BTRFS_LEAF_DATA_SIZE(root) -
2808 btrfs_item_end_nr(l, mid);
2809
2810 for (i = 0; i < nritems; i++) {
2811 struct btrfs_item *item = btrfs_item_nr(right, i);
2812 u32 ioff;
2813
cfed81a0
CM
2814 ioff = btrfs_token_item_offset(right, item, &token);
2815 btrfs_set_token_item_offset(right, item,
2816 ioff + rt_data_off, &token);
44871b1b
CM
2817 }
2818
44871b1b 2819 btrfs_set_header_nritems(l, mid);
44871b1b 2820 btrfs_item_key(right, &disk_key, 0);
143bede5
JM
2821 insert_ptr(trans, root, path, &disk_key, right->start,
2822 path->slots[1] + 1, 1);
44871b1b
CM
2823
2824 btrfs_mark_buffer_dirty(right);
2825 btrfs_mark_buffer_dirty(l);
2826 BUG_ON(path->slots[0] != slot);
2827
44871b1b
CM
2828 if (mid <= slot) {
2829 btrfs_tree_unlock(path->nodes[0]);
2830 free_extent_buffer(path->nodes[0]);
2831 path->nodes[0] = right;
2832 path->slots[0] -= mid;
2833 path->slots[1] += 1;
2834 } else {
2835 btrfs_tree_unlock(right);
2836 free_extent_buffer(right);
2837 }
2838
2839 BUG_ON(path->slots[0] < 0);
44871b1b
CM
2840}
2841
99d8f83c
CM
2842/*
2843 * double splits happen when we need to insert a big item in the middle
2844 * of a leaf. A double split can leave us with 3 mostly empty leaves:
2845 * leaf: [ slots 0 - N] [ our target ] [ N + 1 - total in leaf ]
2846 * A B C
2847 *
2848 * We avoid this by trying to push the items on either side of our target
2849 * into the adjacent leaves. If all goes well we can avoid the double split
2850 * completely.
2851 */
2852static noinline int push_for_double_split(struct btrfs_trans_handle *trans,
2853 struct btrfs_root *root,
2854 struct btrfs_path *path,
2855 int data_size)
2856{
2857 int ret;
2858 int progress = 0;
2859 int slot;
2860 u32 nritems;
2861
2862 slot = path->slots[0];
2863
2864 /*
2865 * try to push all the items after our slot into the
2866 * right leaf
2867 */
2868 ret = push_leaf_right(trans, root, path, 1, data_size, 0, slot);
2869 if (ret < 0)
2870 return ret;
2871
2872 if (ret == 0)
2873 progress++;
2874
2875 nritems = btrfs_header_nritems(path->nodes[0]);
2876 /*
2877 * our goal is to get our slot at the start or end of a leaf. If
2878 * we've done so we're done
2879 */
2880 if (path->slots[0] == 0 || path->slots[0] == nritems)
2881 return 0;
2882
2883 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
2884 return 0;
2885
2886 /* try to push all the items before our slot into the next leaf */
2887 slot = path->slots[0];
2888 ret = push_leaf_left(trans, root, path, 1, data_size, 0, slot);
2889 if (ret < 0)
2890 return ret;
2891
2892 if (ret == 0)
2893 progress++;
2894
2895 if (progress)
2896 return 0;
2897 return 1;
2898}
2899
74123bd7
CM
2900/*
2901 * split the path's leaf in two, making sure there is at least data_size
2902 * available for the resulting leaf level of the path.
aa5d6bed
CM
2903 *
2904 * returns 0 if all went well and < 0 on failure.
74123bd7 2905 */
e02119d5
CM
2906static noinline int split_leaf(struct btrfs_trans_handle *trans,
2907 struct btrfs_root *root,
2908 struct btrfs_key *ins_key,
2909 struct btrfs_path *path, int data_size,
2910 int extend)
be0e5c09 2911{
5d4f98a2 2912 struct btrfs_disk_key disk_key;
5f39d397 2913 struct extent_buffer *l;
7518a238 2914 u32 nritems;
eb60ceac
CM
2915 int mid;
2916 int slot;
5f39d397 2917 struct extent_buffer *right;
d4dbff95 2918 int ret = 0;
aa5d6bed 2919 int wret;
5d4f98a2 2920 int split;
cc0c5538 2921 int num_doubles = 0;
99d8f83c 2922 int tried_avoid_double = 0;
aa5d6bed 2923
a5719521
YZ
2924 l = path->nodes[0];
2925 slot = path->slots[0];
2926 if (extend && data_size + btrfs_item_size_nr(l, slot) +
2927 sizeof(struct btrfs_item) > BTRFS_LEAF_DATA_SIZE(root))
2928 return -EOVERFLOW;
2929
40689478 2930 /* first try to make some room by pushing left and right */
99d8f83c
CM
2931 if (data_size) {
2932 wret = push_leaf_right(trans, root, path, data_size,
2933 data_size, 0, 0);
d397712b 2934 if (wret < 0)
eaee50e8 2935 return wret;
3685f791 2936 if (wret) {
99d8f83c
CM
2937 wret = push_leaf_left(trans, root, path, data_size,
2938 data_size, 0, (u32)-1);
3685f791
CM
2939 if (wret < 0)
2940 return wret;
2941 }
2942 l = path->nodes[0];
aa5d6bed 2943
3685f791 2944 /* did the pushes work? */
87b29b20 2945 if (btrfs_leaf_free_space(root, l) >= data_size)
3685f791 2946 return 0;
3326d1b0 2947 }
aa5d6bed 2948
5c680ed6 2949 if (!path->nodes[1]) {
e089f05c 2950 ret = insert_new_root(trans, root, path, 1);
5c680ed6
CM
2951 if (ret)
2952 return ret;
2953 }
cc0c5538 2954again:
5d4f98a2 2955 split = 1;
cc0c5538 2956 l = path->nodes[0];
eb60ceac 2957 slot = path->slots[0];
5f39d397 2958 nritems = btrfs_header_nritems(l);
d397712b 2959 mid = (nritems + 1) / 2;
54aa1f4d 2960
5d4f98a2
YZ
2961 if (mid <= slot) {
2962 if (nritems == 1 ||
2963 leaf_space_used(l, mid, nritems - mid) + data_size >
2964 BTRFS_LEAF_DATA_SIZE(root)) {
2965 if (slot >= nritems) {
2966 split = 0;
2967 } else {
2968 mid = slot;
2969 if (mid != nritems &&
2970 leaf_space_used(l, mid, nritems - mid) +
2971 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
2972 if (data_size && !tried_avoid_double)
2973 goto push_for_double;
5d4f98a2
YZ
2974 split = 2;
2975 }
2976 }
2977 }
2978 } else {
2979 if (leaf_space_used(l, 0, mid) + data_size >
2980 BTRFS_LEAF_DATA_SIZE(root)) {
2981 if (!extend && data_size && slot == 0) {
2982 split = 0;
2983 } else if ((extend || !data_size) && slot == 0) {
2984 mid = 1;
2985 } else {
2986 mid = slot;
2987 if (mid != nritems &&
2988 leaf_space_used(l, mid, nritems - mid) +
2989 data_size > BTRFS_LEAF_DATA_SIZE(root)) {
99d8f83c
CM
2990 if (data_size && !tried_avoid_double)
2991 goto push_for_double;
5d4f98a2
YZ
2992 split = 2 ;
2993 }
2994 }
2995 }
2996 }
2997
2998 if (split == 0)
2999 btrfs_cpu_key_to_disk(&disk_key, ins_key);
3000 else
3001 btrfs_item_key(l, &disk_key, mid);
3002
3003 right = btrfs_alloc_free_block(trans, root, root->leafsize, 0,
31840ae1 3004 root->root_key.objectid,
66d7e7f0 3005 &disk_key, 0, l->start, 0, 0);
f0486c68 3006 if (IS_ERR(right))
5f39d397 3007 return PTR_ERR(right);
f0486c68
YZ
3008
3009 root_add_used(root, root->leafsize);
5f39d397
CM
3010
3011 memset_extent_buffer(right, 0, 0, sizeof(struct btrfs_header));
db94535d 3012 btrfs_set_header_bytenr(right, right->start);
5f39d397 3013 btrfs_set_header_generation(right, trans->transid);
5d4f98a2 3014 btrfs_set_header_backref_rev(right, BTRFS_MIXED_BACKREF_REV);
5f39d397
CM
3015 btrfs_set_header_owner(right, root->root_key.objectid);
3016 btrfs_set_header_level(right, 0);
3017 write_extent_buffer(right, root->fs_info->fsid,
3018 (unsigned long)btrfs_header_fsid(right),
3019 BTRFS_FSID_SIZE);
e17cade2
CM
3020
3021 write_extent_buffer(right, root->fs_info->chunk_tree_uuid,
3022 (unsigned long)btrfs_header_chunk_tree_uuid(right),
3023 BTRFS_UUID_SIZE);
44871b1b 3024
5d4f98a2
YZ
3025 if (split == 0) {
3026 if (mid <= slot) {
3027 btrfs_set_header_nritems(right, 0);
143bede5
JM
3028 insert_ptr(trans, root, path, &disk_key, right->start,
3029 path->slots[1] + 1, 1);
5d4f98a2
YZ
3030 btrfs_tree_unlock(path->nodes[0]);
3031 free_extent_buffer(path->nodes[0]);
3032 path->nodes[0] = right;
3033 path->slots[0] = 0;
3034 path->slots[1] += 1;
3035 } else {
3036 btrfs_set_header_nritems(right, 0);
143bede5 3037 insert_ptr(trans, root, path, &disk_key, right->start,
5d4f98a2 3038 path->slots[1], 1);
5d4f98a2
YZ
3039 btrfs_tree_unlock(path->nodes[0]);
3040 free_extent_buffer(path->nodes[0]);
3041 path->nodes[0] = right;
3042 path->slots[0] = 0;
143bede5
JM
3043 if (path->slots[1] == 0)
3044 fixup_low_keys(trans, root, path,
3045 &disk_key, 1);
d4dbff95 3046 }
5d4f98a2
YZ
3047 btrfs_mark_buffer_dirty(right);
3048 return ret;
d4dbff95 3049 }
74123bd7 3050
143bede5 3051 copy_for_split(trans, root, path, l, right, slot, mid, nritems);
31840ae1 3052
5d4f98a2 3053 if (split == 2) {
cc0c5538
CM
3054 BUG_ON(num_doubles != 0);
3055 num_doubles++;
3056 goto again;
a429e513 3057 }
44871b1b 3058
143bede5 3059 return 0;
99d8f83c
CM
3060
3061push_for_double:
3062 push_for_double_split(trans, root, path, data_size);
3063 tried_avoid_double = 1;
3064 if (btrfs_leaf_free_space(root, path->nodes[0]) >= data_size)
3065 return 0;
3066 goto again;
be0e5c09
CM
3067}
3068
ad48fd75
YZ
3069static noinline int setup_leaf_for_split(struct btrfs_trans_handle *trans,
3070 struct btrfs_root *root,
3071 struct btrfs_path *path, int ins_len)
459931ec 3072{
ad48fd75 3073 struct btrfs_key key;
459931ec 3074 struct extent_buffer *leaf;
ad48fd75
YZ
3075 struct btrfs_file_extent_item *fi;
3076 u64 extent_len = 0;
3077 u32 item_size;
3078 int ret;
459931ec
CM
3079
3080 leaf = path->nodes[0];
ad48fd75
YZ
3081 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3082
3083 BUG_ON(key.type != BTRFS_EXTENT_DATA_KEY &&
3084 key.type != BTRFS_EXTENT_CSUM_KEY);
3085
3086 if (btrfs_leaf_free_space(root, leaf) >= ins_len)
3087 return 0;
459931ec
CM
3088
3089 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
ad48fd75
YZ
3090 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3091 fi = btrfs_item_ptr(leaf, path->slots[0],
3092 struct btrfs_file_extent_item);
3093 extent_len = btrfs_file_extent_num_bytes(leaf, fi);
3094 }
b3b4aa74 3095 btrfs_release_path(path);
459931ec 3096
459931ec 3097 path->keep_locks = 1;
ad48fd75
YZ
3098 path->search_for_split = 1;
3099 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
459931ec 3100 path->search_for_split = 0;
ad48fd75
YZ
3101 if (ret < 0)
3102 goto err;
459931ec 3103
ad48fd75
YZ
3104 ret = -EAGAIN;
3105 leaf = path->nodes[0];
459931ec 3106 /* if our item isn't there or got smaller, return now */
ad48fd75
YZ
3107 if (ret > 0 || item_size != btrfs_item_size_nr(leaf, path->slots[0]))
3108 goto err;
3109
109f6aef
CM
3110 /* the leaf has changed, it now has room. return now */
3111 if (btrfs_leaf_free_space(root, path->nodes[0]) >= ins_len)
3112 goto err;
3113
ad48fd75
YZ
3114 if (key.type == BTRFS_EXTENT_DATA_KEY) {
3115 fi = btrfs_item_ptr(leaf, path->slots[0],
3116 struct btrfs_file_extent_item);
3117 if (extent_len != btrfs_file_extent_num_bytes(leaf, fi))
3118 goto err;
459931ec
CM
3119 }
3120
b9473439 3121 btrfs_set_path_blocking(path);
ad48fd75 3122 ret = split_leaf(trans, root, &key, path, ins_len, 1);
f0486c68
YZ
3123 if (ret)
3124 goto err;
459931ec 3125
ad48fd75 3126 path->keep_locks = 0;
b9473439 3127 btrfs_unlock_up_safe(path, 1);
ad48fd75
YZ
3128 return 0;
3129err:
3130 path->keep_locks = 0;
3131 return ret;
3132}
3133
3134static noinline int split_item(struct btrfs_trans_handle *trans,
3135 struct btrfs_root *root,
3136 struct btrfs_path *path,
3137 struct btrfs_key *new_key,
3138 unsigned long split_offset)
3139{
3140 struct extent_buffer *leaf;
3141 struct btrfs_item *item;
3142 struct btrfs_item *new_item;
3143 int slot;
3144 char *buf;
3145 u32 nritems;
3146 u32 item_size;
3147 u32 orig_offset;
3148 struct btrfs_disk_key disk_key;
3149
b9473439
CM
3150 leaf = path->nodes[0];
3151 BUG_ON(btrfs_leaf_free_space(root, leaf) < sizeof(struct btrfs_item));
3152
b4ce94de
CM
3153 btrfs_set_path_blocking(path);
3154
459931ec
CM
3155 item = btrfs_item_nr(leaf, path->slots[0]);
3156 orig_offset = btrfs_item_offset(leaf, item);
3157 item_size = btrfs_item_size(leaf, item);
3158
459931ec 3159 buf = kmalloc(item_size, GFP_NOFS);
ad48fd75
YZ
3160 if (!buf)
3161 return -ENOMEM;
3162
459931ec
CM
3163 read_extent_buffer(leaf, buf, btrfs_item_ptr_offset(leaf,
3164 path->slots[0]), item_size);
459931ec 3165
ad48fd75 3166 slot = path->slots[0] + 1;
459931ec 3167 nritems = btrfs_header_nritems(leaf);
459931ec
CM
3168 if (slot != nritems) {
3169 /* shift the items */
3170 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + 1),
ad48fd75
YZ
3171 btrfs_item_nr_offset(slot),
3172 (nritems - slot) * sizeof(struct btrfs_item));
459931ec
CM
3173 }
3174
3175 btrfs_cpu_key_to_disk(&disk_key, new_key);
3176 btrfs_set_item_key(leaf, &disk_key, slot);
3177
3178 new_item = btrfs_item_nr(leaf, slot);
3179
3180 btrfs_set_item_offset(leaf, new_item, orig_offset);
3181 btrfs_set_item_size(leaf, new_item, item_size - split_offset);
3182
3183 btrfs_set_item_offset(leaf, item,
3184 orig_offset + item_size - split_offset);
3185 btrfs_set_item_size(leaf, item, split_offset);
3186
3187 btrfs_set_header_nritems(leaf, nritems + 1);
3188
3189 /* write the data for the start of the original item */
3190 write_extent_buffer(leaf, buf,
3191 btrfs_item_ptr_offset(leaf, path->slots[0]),
3192 split_offset);
3193
3194 /* write the data for the new item */
3195 write_extent_buffer(leaf, buf + split_offset,
3196 btrfs_item_ptr_offset(leaf, slot),
3197 item_size - split_offset);
3198 btrfs_mark_buffer_dirty(leaf);
3199
ad48fd75 3200 BUG_ON(btrfs_leaf_free_space(root, leaf) < 0);
459931ec 3201 kfree(buf);
ad48fd75
YZ
3202 return 0;
3203}
3204
3205/*
3206 * This function splits a single item into two items,
3207 * giving 'new_key' to the new item and splitting the
3208 * old one at split_offset (from the start of the item).
3209 *
3210 * The path may be released by this operation. After
3211 * the split, the path is pointing to the old item. The
3212 * new item is going to be in the same node as the old one.
3213 *
3214 * Note, the item being split must be smaller enough to live alone on
3215 * a tree block with room for one extra struct btrfs_item
3216 *
3217 * This allows us to split the item in place, keeping a lock on the
3218 * leaf the entire time.
3219 */
3220int btrfs_split_item(struct btrfs_trans_handle *trans,
3221 struct btrfs_root *root,
3222 struct btrfs_path *path,
3223 struct btrfs_key *new_key,
3224 unsigned long split_offset)
3225{
3226 int ret;
3227 ret = setup_leaf_for_split(trans, root, path,
3228 sizeof(struct btrfs_item));
3229 if (ret)
3230 return ret;
3231
3232 ret = split_item(trans, root, path, new_key, split_offset);
459931ec
CM
3233 return ret;
3234}
3235
ad48fd75
YZ
3236/*
3237 * This function duplicate a item, giving 'new_key' to the new item.
3238 * It guarantees both items live in the same tree leaf and the new item
3239 * is contiguous with the original item.
3240 *
3241 * This allows us to split file extent in place, keeping a lock on the
3242 * leaf the entire time.
3243 */
3244int btrfs_duplicate_item(struct btrfs_trans_handle *trans,
3245 struct btrfs_root *root,
3246 struct btrfs_path *path,
3247 struct btrfs_key *new_key)
3248{
3249 struct extent_buffer *leaf;
3250 int ret;
3251 u32 item_size;
3252
3253 leaf = path->nodes[0];
3254 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3255 ret = setup_leaf_for_split(trans, root, path,
3256 item_size + sizeof(struct btrfs_item));
3257 if (ret)
3258 return ret;
3259
3260 path->slots[0]++;
143bede5
JM
3261 setup_items_for_insert(trans, root, path, new_key, &item_size,
3262 item_size, item_size +
3263 sizeof(struct btrfs_item), 1);
ad48fd75
YZ
3264 leaf = path->nodes[0];
3265 memcpy_extent_buffer(leaf,
3266 btrfs_item_ptr_offset(leaf, path->slots[0]),
3267 btrfs_item_ptr_offset(leaf, path->slots[0] - 1),
3268 item_size);
3269 return 0;
3270}
3271
d352ac68
CM
3272/*
3273 * make the item pointed to by the path smaller. new_size indicates
3274 * how small to make it, and from_end tells us if we just chop bytes
3275 * off the end of the item or if we shift the item to chop bytes off
3276 * the front.
3277 */
143bede5
JM
3278void btrfs_truncate_item(struct btrfs_trans_handle *trans,
3279 struct btrfs_root *root,
3280 struct btrfs_path *path,
3281 u32 new_size, int from_end)
b18c6685 3282{
b18c6685 3283 int slot;
5f39d397
CM
3284 struct extent_buffer *leaf;
3285 struct btrfs_item *item;
b18c6685
CM
3286 u32 nritems;
3287 unsigned int data_end;
3288 unsigned int old_data_start;
3289 unsigned int old_size;
3290 unsigned int size_diff;
3291 int i;
cfed81a0
CM
3292 struct btrfs_map_token token;
3293
3294 btrfs_init_map_token(&token);
b18c6685 3295
5f39d397 3296 leaf = path->nodes[0];
179e29e4
CM
3297 slot = path->slots[0];
3298
3299 old_size = btrfs_item_size_nr(leaf, slot);
3300 if (old_size == new_size)
143bede5 3301 return;
b18c6685 3302
5f39d397 3303 nritems = btrfs_header_nritems(leaf);
b18c6685
CM
3304 data_end = leaf_data_end(root, leaf);
3305
5f39d397 3306 old_data_start = btrfs_item_offset_nr(leaf, slot);
179e29e4 3307
b18c6685
CM
3308 size_diff = old_size - new_size;
3309
3310 BUG_ON(slot < 0);
3311 BUG_ON(slot >= nritems);
3312
3313 /*
3314 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3315 */
3316 /* first correct the data pointers */
3317 for (i = slot; i < nritems; i++) {
5f39d397
CM
3318 u32 ioff;
3319 item = btrfs_item_nr(leaf, i);
db94535d 3320
cfed81a0
CM
3321 ioff = btrfs_token_item_offset(leaf, item, &token);
3322 btrfs_set_token_item_offset(leaf, item,
3323 ioff + size_diff, &token);
b18c6685 3324 }
db94535d 3325
b18c6685 3326 /* shift the data */
179e29e4
CM
3327 if (from_end) {
3328 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3329 data_end + size_diff, btrfs_leaf_data(leaf) +
3330 data_end, old_data_start + new_size - data_end);
3331 } else {
3332 struct btrfs_disk_key disk_key;
3333 u64 offset;
3334
3335 btrfs_item_key(leaf, &disk_key, slot);
3336
3337 if (btrfs_disk_key_type(&disk_key) == BTRFS_EXTENT_DATA_KEY) {
3338 unsigned long ptr;
3339 struct btrfs_file_extent_item *fi;
3340
3341 fi = btrfs_item_ptr(leaf, slot,
3342 struct btrfs_file_extent_item);
3343 fi = (struct btrfs_file_extent_item *)(
3344 (unsigned long)fi - size_diff);
3345
3346 if (btrfs_file_extent_type(leaf, fi) ==
3347 BTRFS_FILE_EXTENT_INLINE) {
3348 ptr = btrfs_item_ptr_offset(leaf, slot);
3349 memmove_extent_buffer(leaf, ptr,
d397712b
CM
3350 (unsigned long)fi,
3351 offsetof(struct btrfs_file_extent_item,
179e29e4
CM
3352 disk_bytenr));
3353 }
3354 }
3355
3356 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3357 data_end + size_diff, btrfs_leaf_data(leaf) +
3358 data_end, old_data_start - data_end);
3359
3360 offset = btrfs_disk_key_offset(&disk_key);
3361 btrfs_set_disk_key_offset(&disk_key, offset + size_diff);
3362 btrfs_set_item_key(leaf, &disk_key, slot);
3363 if (slot == 0)
3364 fixup_low_keys(trans, root, path, &disk_key, 1);
3365 }
5f39d397
CM
3366
3367 item = btrfs_item_nr(leaf, slot);
3368 btrfs_set_item_size(leaf, item, new_size);
3369 btrfs_mark_buffer_dirty(leaf);
b18c6685 3370
5f39d397
CM
3371 if (btrfs_leaf_free_space(root, leaf) < 0) {
3372 btrfs_print_leaf(root, leaf);
b18c6685 3373 BUG();
5f39d397 3374 }
b18c6685
CM
3375}
3376
d352ac68
CM
3377/*
3378 * make the item pointed to by the path bigger, data_size is the new size.
3379 */
143bede5
JM
3380void btrfs_extend_item(struct btrfs_trans_handle *trans,
3381 struct btrfs_root *root, struct btrfs_path *path,
3382 u32 data_size)
6567e837 3383{
6567e837 3384 int slot;
5f39d397
CM
3385 struct extent_buffer *leaf;
3386 struct btrfs_item *item;
6567e837
CM
3387 u32 nritems;
3388 unsigned int data_end;
3389 unsigned int old_data;
3390 unsigned int old_size;
3391 int i;
cfed81a0
CM
3392 struct btrfs_map_token token;
3393
3394 btrfs_init_map_token(&token);
6567e837 3395
5f39d397 3396 leaf = path->nodes[0];
6567e837 3397
5f39d397 3398 nritems = btrfs_header_nritems(leaf);
6567e837
CM
3399 data_end = leaf_data_end(root, leaf);
3400
5f39d397
CM
3401 if (btrfs_leaf_free_space(root, leaf) < data_size) {
3402 btrfs_print_leaf(root, leaf);
6567e837 3403 BUG();
5f39d397 3404 }
6567e837 3405 slot = path->slots[0];
5f39d397 3406 old_data = btrfs_item_end_nr(leaf, slot);
6567e837
CM
3407
3408 BUG_ON(slot < 0);
3326d1b0
CM
3409 if (slot >= nritems) {
3410 btrfs_print_leaf(root, leaf);
d397712b
CM
3411 printk(KERN_CRIT "slot %d too large, nritems %d\n",
3412 slot, nritems);
3326d1b0
CM
3413 BUG_ON(1);
3414 }
6567e837
CM
3415
3416 /*
3417 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3418 */
3419 /* first correct the data pointers */
3420 for (i = slot; i < nritems; i++) {
5f39d397
CM
3421 u32 ioff;
3422 item = btrfs_item_nr(leaf, i);
db94535d 3423
cfed81a0
CM
3424 ioff = btrfs_token_item_offset(leaf, item, &token);
3425 btrfs_set_token_item_offset(leaf, item,
3426 ioff - data_size, &token);
6567e837 3427 }
5f39d397 3428
6567e837 3429 /* shift the data */
5f39d397 3430 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
6567e837
CM
3431 data_end - data_size, btrfs_leaf_data(leaf) +
3432 data_end, old_data - data_end);
5f39d397 3433
6567e837 3434 data_end = old_data;
5f39d397
CM
3435 old_size = btrfs_item_size_nr(leaf, slot);
3436 item = btrfs_item_nr(leaf, slot);
3437 btrfs_set_item_size(leaf, item, old_size + data_size);
3438 btrfs_mark_buffer_dirty(leaf);
6567e837 3439
5f39d397
CM
3440 if (btrfs_leaf_free_space(root, leaf) < 0) {
3441 btrfs_print_leaf(root, leaf);
6567e837 3442 BUG();
5f39d397 3443 }
6567e837
CM
3444}
3445
f3465ca4
JB
3446/*
3447 * Given a key and some data, insert items into the tree.
3448 * This does all the path init required, making room in the tree if needed.
3449 * Returns the number of keys that were inserted.
3450 */
3451int btrfs_insert_some_items(struct btrfs_trans_handle *trans,
3452 struct btrfs_root *root,
3453 struct btrfs_path *path,
3454 struct btrfs_key *cpu_key, u32 *data_size,
3455 int nr)
3456{
3457 struct extent_buffer *leaf;
3458 struct btrfs_item *item;
3459 int ret = 0;
3460 int slot;
f3465ca4
JB
3461 int i;
3462 u32 nritems;
3463 u32 total_data = 0;
3464 u32 total_size = 0;
3465 unsigned int data_end;
3466 struct btrfs_disk_key disk_key;
3467 struct btrfs_key found_key;
cfed81a0
CM
3468 struct btrfs_map_token token;
3469
3470 btrfs_init_map_token(&token);
f3465ca4 3471
87b29b20
YZ
3472 for (i = 0; i < nr; i++) {
3473 if (total_size + data_size[i] + sizeof(struct btrfs_item) >
3474 BTRFS_LEAF_DATA_SIZE(root)) {
3475 break;
3476 nr = i;
3477 }
f3465ca4 3478 total_data += data_size[i];
87b29b20
YZ
3479 total_size += data_size[i] + sizeof(struct btrfs_item);
3480 }
3481 BUG_ON(nr == 0);
f3465ca4 3482
f3465ca4
JB
3483 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3484 if (ret == 0)
3485 return -EEXIST;
3486 if (ret < 0)
3487 goto out;
3488
f3465ca4
JB
3489 leaf = path->nodes[0];
3490
3491 nritems = btrfs_header_nritems(leaf);
3492 data_end = leaf_data_end(root, leaf);
3493
3494 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3495 for (i = nr; i >= 0; i--) {
3496 total_data -= data_size[i];
3497 total_size -= data_size[i] + sizeof(struct btrfs_item);
3498 if (total_size < btrfs_leaf_free_space(root, leaf))
3499 break;
3500 }
3501 nr = i;
3502 }
3503
3504 slot = path->slots[0];
3505 BUG_ON(slot < 0);
3506
3507 if (slot != nritems) {
3508 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
3509
3510 item = btrfs_item_nr(leaf, slot);
3511 btrfs_item_key_to_cpu(leaf, &found_key, slot);
3512
3513 /* figure out how many keys we can insert in here */
3514 total_data = data_size[0];
3515 for (i = 1; i < nr; i++) {
5d4f98a2 3516 if (btrfs_comp_cpu_keys(&found_key, cpu_key + i) <= 0)
f3465ca4
JB
3517 break;
3518 total_data += data_size[i];
3519 }
3520 nr = i;
3521
3522 if (old_data < data_end) {
3523 btrfs_print_leaf(root, leaf);
d397712b 3524 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
f3465ca4
JB
3525 slot, old_data, data_end);
3526 BUG_ON(1);
3527 }
3528 /*
3529 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3530 */
3531 /* first correct the data pointers */
f3465ca4
JB
3532 for (i = slot; i < nritems; i++) {
3533 u32 ioff;
3534
3535 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
3536 ioff = btrfs_token_item_offset(leaf, item, &token);
3537 btrfs_set_token_item_offset(leaf, item,
3538 ioff - total_data, &token);
f3465ca4 3539 }
f3465ca4
JB
3540 /* shift the items */
3541 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
3542 btrfs_item_nr_offset(slot),
3543 (nritems - slot) * sizeof(struct btrfs_item));
3544
3545 /* shift the data */
3546 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
3547 data_end - total_data, btrfs_leaf_data(leaf) +
3548 data_end, old_data - data_end);
3549 data_end = old_data;
3550 } else {
3551 /*
3552 * this sucks but it has to be done, if we are inserting at
3553 * the end of the leaf only insert 1 of the items, since we
3554 * have no way of knowing whats on the next leaf and we'd have
3555 * to drop our current locks to figure it out
3556 */
3557 nr = 1;
3558 }
3559
3560 /* setup the item for the new data */
3561 for (i = 0; i < nr; i++) {
3562 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3563 btrfs_set_item_key(leaf, &disk_key, slot + i);
3564 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
3565 btrfs_set_token_item_offset(leaf, item,
3566 data_end - data_size[i], &token);
f3465ca4 3567 data_end -= data_size[i];
cfed81a0 3568 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
f3465ca4
JB
3569 }
3570 btrfs_set_header_nritems(leaf, nritems + nr);
3571 btrfs_mark_buffer_dirty(leaf);
3572
3573 ret = 0;
3574 if (slot == 0) {
3575 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 3576 fixup_low_keys(trans, root, path, &disk_key, 1);
f3465ca4
JB
3577 }
3578
3579 if (btrfs_leaf_free_space(root, leaf) < 0) {
3580 btrfs_print_leaf(root, leaf);
3581 BUG();
3582 }
3583out:
3584 if (!ret)
3585 ret = nr;
3586 return ret;
3587}
3588
74123bd7 3589/*
44871b1b
CM
3590 * this is a helper for btrfs_insert_empty_items, the main goal here is
3591 * to save stack depth by doing the bulk of the work in a function
3592 * that doesn't call btrfs_search_slot
74123bd7 3593 */
143bede5
JM
3594void setup_items_for_insert(struct btrfs_trans_handle *trans,
3595 struct btrfs_root *root, struct btrfs_path *path,
3596 struct btrfs_key *cpu_key, u32 *data_size,
3597 u32 total_data, u32 total_size, int nr)
be0e5c09 3598{
5f39d397 3599 struct btrfs_item *item;
9c58309d 3600 int i;
7518a238 3601 u32 nritems;
be0e5c09 3602 unsigned int data_end;
e2fa7227 3603 struct btrfs_disk_key disk_key;
44871b1b
CM
3604 struct extent_buffer *leaf;
3605 int slot;
cfed81a0
CM
3606 struct btrfs_map_token token;
3607
3608 btrfs_init_map_token(&token);
e2fa7227 3609
5f39d397 3610 leaf = path->nodes[0];
44871b1b 3611 slot = path->slots[0];
74123bd7 3612
5f39d397 3613 nritems = btrfs_header_nritems(leaf);
123abc88 3614 data_end = leaf_data_end(root, leaf);
eb60ceac 3615
f25956cc 3616 if (btrfs_leaf_free_space(root, leaf) < total_size) {
3326d1b0 3617 btrfs_print_leaf(root, leaf);
d397712b 3618 printk(KERN_CRIT "not enough freespace need %u have %d\n",
9c58309d 3619 total_size, btrfs_leaf_free_space(root, leaf));
be0e5c09 3620 BUG();
d4dbff95 3621 }
5f39d397 3622
be0e5c09 3623 if (slot != nritems) {
5f39d397 3624 unsigned int old_data = btrfs_item_end_nr(leaf, slot);
be0e5c09 3625
5f39d397
CM
3626 if (old_data < data_end) {
3627 btrfs_print_leaf(root, leaf);
d397712b 3628 printk(KERN_CRIT "slot %d old_data %d data_end %d\n",
5f39d397
CM
3629 slot, old_data, data_end);
3630 BUG_ON(1);
3631 }
be0e5c09
CM
3632 /*
3633 * item0..itemN ... dataN.offset..dataN.size .. data0.size
3634 */
3635 /* first correct the data pointers */
0783fcfc 3636 for (i = slot; i < nritems; i++) {
5f39d397 3637 u32 ioff;
db94535d 3638
5f39d397 3639 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
3640 ioff = btrfs_token_item_offset(leaf, item, &token);
3641 btrfs_set_token_item_offset(leaf, item,
3642 ioff - total_data, &token);
0783fcfc 3643 }
be0e5c09 3644 /* shift the items */
9c58309d 3645 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot + nr),
5f39d397 3646 btrfs_item_nr_offset(slot),
d6025579 3647 (nritems - slot) * sizeof(struct btrfs_item));
be0e5c09
CM
3648
3649 /* shift the data */
5f39d397 3650 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
9c58309d 3651 data_end - total_data, btrfs_leaf_data(leaf) +
d6025579 3652 data_end, old_data - data_end);
be0e5c09
CM
3653 data_end = old_data;
3654 }
5f39d397 3655
62e2749e 3656 /* setup the item for the new data */
9c58309d
CM
3657 for (i = 0; i < nr; i++) {
3658 btrfs_cpu_key_to_disk(&disk_key, cpu_key + i);
3659 btrfs_set_item_key(leaf, &disk_key, slot + i);
3660 item = btrfs_item_nr(leaf, slot + i);
cfed81a0
CM
3661 btrfs_set_token_item_offset(leaf, item,
3662 data_end - data_size[i], &token);
9c58309d 3663 data_end -= data_size[i];
cfed81a0 3664 btrfs_set_token_item_size(leaf, item, data_size[i], &token);
9c58309d 3665 }
44871b1b 3666
9c58309d 3667 btrfs_set_header_nritems(leaf, nritems + nr);
aa5d6bed 3668
5a01a2e3
CM
3669 if (slot == 0) {
3670 btrfs_cpu_key_to_disk(&disk_key, cpu_key);
143bede5 3671 fixup_low_keys(trans, root, path, &disk_key, 1);
5a01a2e3 3672 }
b9473439
CM
3673 btrfs_unlock_up_safe(path, 1);
3674 btrfs_mark_buffer_dirty(leaf);
aa5d6bed 3675
5f39d397
CM
3676 if (btrfs_leaf_free_space(root, leaf) < 0) {
3677 btrfs_print_leaf(root, leaf);
be0e5c09 3678 BUG();
5f39d397 3679 }
44871b1b
CM
3680}
3681
3682/*
3683 * Given a key and some data, insert items into the tree.
3684 * This does all the path init required, making room in the tree if needed.
3685 */
3686int btrfs_insert_empty_items(struct btrfs_trans_handle *trans,
3687 struct btrfs_root *root,
3688 struct btrfs_path *path,
3689 struct btrfs_key *cpu_key, u32 *data_size,
3690 int nr)
3691{
44871b1b
CM
3692 int ret = 0;
3693 int slot;
3694 int i;
3695 u32 total_size = 0;
3696 u32 total_data = 0;
3697
3698 for (i = 0; i < nr; i++)
3699 total_data += data_size[i];
3700
3701 total_size = total_data + (nr * sizeof(struct btrfs_item));
3702 ret = btrfs_search_slot(trans, root, cpu_key, path, total_size, 1);
3703 if (ret == 0)
3704 return -EEXIST;
3705 if (ret < 0)
143bede5 3706 return ret;
44871b1b 3707
44871b1b
CM
3708 slot = path->slots[0];
3709 BUG_ON(slot < 0);
3710
143bede5 3711 setup_items_for_insert(trans, root, path, cpu_key, data_size,
44871b1b 3712 total_data, total_size, nr);
143bede5 3713 return 0;
62e2749e
CM
3714}
3715
3716/*
3717 * Given a key and some data, insert an item into the tree.
3718 * This does all the path init required, making room in the tree if needed.
3719 */
e089f05c
CM
3720int btrfs_insert_item(struct btrfs_trans_handle *trans, struct btrfs_root
3721 *root, struct btrfs_key *cpu_key, void *data, u32
3722 data_size)
62e2749e
CM
3723{
3724 int ret = 0;
2c90e5d6 3725 struct btrfs_path *path;
5f39d397
CM
3726 struct extent_buffer *leaf;
3727 unsigned long ptr;
62e2749e 3728
2c90e5d6 3729 path = btrfs_alloc_path();
db5b493a
TI
3730 if (!path)
3731 return -ENOMEM;
2c90e5d6 3732 ret = btrfs_insert_empty_item(trans, root, path, cpu_key, data_size);
62e2749e 3733 if (!ret) {
5f39d397
CM
3734 leaf = path->nodes[0];
3735 ptr = btrfs_item_ptr_offset(leaf, path->slots[0]);
3736 write_extent_buffer(leaf, data, ptr, data_size);
3737 btrfs_mark_buffer_dirty(leaf);
62e2749e 3738 }
2c90e5d6 3739 btrfs_free_path(path);
aa5d6bed 3740 return ret;
be0e5c09
CM
3741}
3742
74123bd7 3743/*
5de08d7d 3744 * delete the pointer from a given node.
74123bd7 3745 *
d352ac68
CM
3746 * the tree should have been previously balanced so the deletion does not
3747 * empty a node.
74123bd7 3748 */
143bede5
JM
3749static void del_ptr(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3750 struct btrfs_path *path, int level, int slot)
be0e5c09 3751{
5f39d397 3752 struct extent_buffer *parent = path->nodes[level];
7518a238 3753 u32 nritems;
be0e5c09 3754
5f39d397 3755 nritems = btrfs_header_nritems(parent);
d397712b 3756 if (slot != nritems - 1) {
5f39d397
CM
3757 memmove_extent_buffer(parent,
3758 btrfs_node_key_ptr_offset(slot),
3759 btrfs_node_key_ptr_offset(slot + 1),
d6025579
CM
3760 sizeof(struct btrfs_key_ptr) *
3761 (nritems - slot - 1));
bb803951 3762 }
7518a238 3763 nritems--;
5f39d397 3764 btrfs_set_header_nritems(parent, nritems);
7518a238 3765 if (nritems == 0 && parent == root->node) {
5f39d397 3766 BUG_ON(btrfs_header_level(root->node) != 1);
bb803951 3767 /* just turn the root into a leaf and break */
5f39d397 3768 btrfs_set_header_level(root->node, 0);
bb803951 3769 } else if (slot == 0) {
5f39d397
CM
3770 struct btrfs_disk_key disk_key;
3771
3772 btrfs_node_key(parent, &disk_key, 0);
143bede5 3773 fixup_low_keys(trans, root, path, &disk_key, level + 1);
be0e5c09 3774 }
d6025579 3775 btrfs_mark_buffer_dirty(parent);
be0e5c09
CM
3776}
3777
323ac95b
CM
3778/*
3779 * a helper function to delete the leaf pointed to by path->slots[1] and
5d4f98a2 3780 * path->nodes[1].
323ac95b
CM
3781 *
3782 * This deletes the pointer in path->nodes[1] and frees the leaf
3783 * block extent. zero is returned if it all worked out, < 0 otherwise.
3784 *
3785 * The path must have already been setup for deleting the leaf, including
3786 * all the proper balancing. path->nodes[1] must be locked.
3787 */
143bede5
JM
3788static noinline void btrfs_del_leaf(struct btrfs_trans_handle *trans,
3789 struct btrfs_root *root,
3790 struct btrfs_path *path,
3791 struct extent_buffer *leaf)
323ac95b 3792{
5d4f98a2 3793 WARN_ON(btrfs_header_generation(leaf) != trans->transid);
143bede5 3794 del_ptr(trans, root, path, 1, path->slots[1]);
323ac95b 3795
4d081c41
CM
3796 /*
3797 * btrfs_free_extent is expensive, we want to make sure we
3798 * aren't holding any locks when we call it
3799 */
3800 btrfs_unlock_up_safe(path, 0);
3801
f0486c68
YZ
3802 root_sub_used(root, leaf->len);
3803
3083ee2e 3804 extent_buffer_get(leaf);
66d7e7f0 3805 btrfs_free_tree_block(trans, root, leaf, 0, 1, 0);
3083ee2e 3806 free_extent_buffer_stale(leaf);
323ac95b 3807}
74123bd7
CM
3808/*
3809 * delete the item at the leaf level in path. If that empties
3810 * the leaf, remove it from the tree
3811 */
85e21bac
CM
3812int btrfs_del_items(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3813 struct btrfs_path *path, int slot, int nr)
be0e5c09 3814{
5f39d397
CM
3815 struct extent_buffer *leaf;
3816 struct btrfs_item *item;
85e21bac
CM
3817 int last_off;
3818 int dsize = 0;
aa5d6bed
CM
3819 int ret = 0;
3820 int wret;
85e21bac 3821 int i;
7518a238 3822 u32 nritems;
cfed81a0
CM
3823 struct btrfs_map_token token;
3824
3825 btrfs_init_map_token(&token);
be0e5c09 3826
5f39d397 3827 leaf = path->nodes[0];
85e21bac
CM
3828 last_off = btrfs_item_offset_nr(leaf, slot + nr - 1);
3829
3830 for (i = 0; i < nr; i++)
3831 dsize += btrfs_item_size_nr(leaf, slot + i);
3832
5f39d397 3833 nritems = btrfs_header_nritems(leaf);
be0e5c09 3834
85e21bac 3835 if (slot + nr != nritems) {
123abc88 3836 int data_end = leaf_data_end(root, leaf);
5f39d397
CM
3837
3838 memmove_extent_buffer(leaf, btrfs_leaf_data(leaf) +
d6025579
CM
3839 data_end + dsize,
3840 btrfs_leaf_data(leaf) + data_end,
85e21bac 3841 last_off - data_end);
5f39d397 3842
85e21bac 3843 for (i = slot + nr; i < nritems; i++) {
5f39d397 3844 u32 ioff;
db94535d 3845
5f39d397 3846 item = btrfs_item_nr(leaf, i);
cfed81a0
CM
3847 ioff = btrfs_token_item_offset(leaf, item, &token);
3848 btrfs_set_token_item_offset(leaf, item,
3849 ioff + dsize, &token);
0783fcfc 3850 }
db94535d 3851
5f39d397 3852 memmove_extent_buffer(leaf, btrfs_item_nr_offset(slot),
85e21bac 3853 btrfs_item_nr_offset(slot + nr),
d6025579 3854 sizeof(struct btrfs_item) *
85e21bac 3855 (nritems - slot - nr));
be0e5c09 3856 }
85e21bac
CM
3857 btrfs_set_header_nritems(leaf, nritems - nr);
3858 nritems -= nr;
5f39d397 3859
74123bd7 3860 /* delete the leaf if we've emptied it */
7518a238 3861 if (nritems == 0) {
5f39d397
CM
3862 if (leaf == root->node) {
3863 btrfs_set_header_level(leaf, 0);
9a8dd150 3864 } else {
f0486c68
YZ
3865 btrfs_set_path_blocking(path);
3866 clean_tree_block(trans, root, leaf);
143bede5 3867 btrfs_del_leaf(trans, root, path, leaf);
9a8dd150 3868 }
be0e5c09 3869 } else {
7518a238 3870 int used = leaf_space_used(leaf, 0, nritems);
aa5d6bed 3871 if (slot == 0) {
5f39d397
CM
3872 struct btrfs_disk_key disk_key;
3873
3874 btrfs_item_key(leaf, &disk_key, 0);
143bede5 3875 fixup_low_keys(trans, root, path, &disk_key, 1);
aa5d6bed 3876 }
aa5d6bed 3877
74123bd7 3878 /* delete the leaf if it is mostly empty */
d717aa1d 3879 if (used < BTRFS_LEAF_DATA_SIZE(root) / 3) {
be0e5c09
CM
3880 /* push_leaf_left fixes the path.
3881 * make sure the path still points to our leaf
3882 * for possible call to del_ptr below
3883 */
4920c9ac 3884 slot = path->slots[1];
5f39d397
CM
3885 extent_buffer_get(leaf);
3886
b9473439 3887 btrfs_set_path_blocking(path);
99d8f83c
CM
3888 wret = push_leaf_left(trans, root, path, 1, 1,
3889 1, (u32)-1);
54aa1f4d 3890 if (wret < 0 && wret != -ENOSPC)
aa5d6bed 3891 ret = wret;
5f39d397
CM
3892
3893 if (path->nodes[0] == leaf &&
3894 btrfs_header_nritems(leaf)) {
99d8f83c
CM
3895 wret = push_leaf_right(trans, root, path, 1,
3896 1, 1, 0);
54aa1f4d 3897 if (wret < 0 && wret != -ENOSPC)
aa5d6bed
CM
3898 ret = wret;
3899 }
5f39d397
CM
3900
3901 if (btrfs_header_nritems(leaf) == 0) {
323ac95b 3902 path->slots[1] = slot;
143bede5 3903 btrfs_del_leaf(trans, root, path, leaf);
5f39d397 3904 free_extent_buffer(leaf);
143bede5 3905 ret = 0;
5de08d7d 3906 } else {
925baedd
CM
3907 /* if we're still in the path, make sure
3908 * we're dirty. Otherwise, one of the
3909 * push_leaf functions must have already
3910 * dirtied this buffer
3911 */
3912 if (path->nodes[0] == leaf)
3913 btrfs_mark_buffer_dirty(leaf);
5f39d397 3914 free_extent_buffer(leaf);
be0e5c09 3915 }
d5719762 3916 } else {
5f39d397 3917 btrfs_mark_buffer_dirty(leaf);
be0e5c09
CM
3918 }
3919 }
aa5d6bed 3920 return ret;
be0e5c09
CM
3921}
3922
7bb86316 3923/*
925baedd 3924 * search the tree again to find a leaf with lesser keys
7bb86316
CM
3925 * returns 0 if it found something or 1 if there are no lesser leaves.
3926 * returns < 0 on io errors.
d352ac68
CM
3927 *
3928 * This may release the path, and so you may lose any locks held at the
3929 * time you call it.
7bb86316
CM
3930 */
3931int btrfs_prev_leaf(struct btrfs_root *root, struct btrfs_path *path)
3932{
925baedd
CM
3933 struct btrfs_key key;
3934 struct btrfs_disk_key found_key;
3935 int ret;
7bb86316 3936
925baedd 3937 btrfs_item_key_to_cpu(path->nodes[0], &key, 0);
7bb86316 3938
925baedd
CM
3939 if (key.offset > 0)
3940 key.offset--;
3941 else if (key.type > 0)
3942 key.type--;
3943 else if (key.objectid > 0)
3944 key.objectid--;
3945 else
3946 return 1;
7bb86316 3947
b3b4aa74 3948 btrfs_release_path(path);
925baedd
CM
3949 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
3950 if (ret < 0)
3951 return ret;
3952 btrfs_item_key(path->nodes[0], &found_key, 0);
3953 ret = comp_keys(&found_key, &key);
3954 if (ret < 0)
3955 return 0;
3956 return 1;
7bb86316
CM
3957}
3958
3f157a2f
CM
3959/*
3960 * A helper function to walk down the tree starting at min_key, and looking
3961 * for nodes or leaves that are either in cache or have a minimum
d352ac68 3962 * transaction id. This is used by the btree defrag code, and tree logging
3f157a2f
CM
3963 *
3964 * This does not cow, but it does stuff the starting key it finds back
3965 * into min_key, so you can call btrfs_search_slot with cow=1 on the
3966 * key and get a writable path.
3967 *
3968 * This does lock as it descends, and path->keep_locks should be set
3969 * to 1 by the caller.
3970 *
3971 * This honors path->lowest_level to prevent descent past a given level
3972 * of the tree.
3973 *
d352ac68
CM
3974 * min_trans indicates the oldest transaction that you are interested
3975 * in walking through. Any nodes or leaves older than min_trans are
3976 * skipped over (without reading them).
3977 *
3f157a2f
CM
3978 * returns zero if something useful was found, < 0 on error and 1 if there
3979 * was nothing in the tree that matched the search criteria.
3980 */
3981int btrfs_search_forward(struct btrfs_root *root, struct btrfs_key *min_key,
e02119d5 3982 struct btrfs_key *max_key,
3f157a2f
CM
3983 struct btrfs_path *path, int cache_only,
3984 u64 min_trans)
3985{
3986 struct extent_buffer *cur;
3987 struct btrfs_key found_key;
3988 int slot;
9652480b 3989 int sret;
3f157a2f
CM
3990 u32 nritems;
3991 int level;
3992 int ret = 1;
3993
934d375b 3994 WARN_ON(!path->keep_locks);
3f157a2f 3995again:
bd681513 3996 cur = btrfs_read_lock_root_node(root);
3f157a2f 3997 level = btrfs_header_level(cur);
e02119d5 3998 WARN_ON(path->nodes[level]);
3f157a2f 3999 path->nodes[level] = cur;
bd681513 4000 path->locks[level] = BTRFS_READ_LOCK;
3f157a2f
CM
4001
4002 if (btrfs_header_generation(cur) < min_trans) {
4003 ret = 1;
4004 goto out;
4005 }
d397712b 4006 while (1) {
3f157a2f
CM
4007 nritems = btrfs_header_nritems(cur);
4008 level = btrfs_header_level(cur);
9652480b 4009 sret = bin_search(cur, min_key, level, &slot);
3f157a2f 4010
323ac95b
CM
4011 /* at the lowest level, we're done, setup the path and exit */
4012 if (level == path->lowest_level) {
e02119d5
CM
4013 if (slot >= nritems)
4014 goto find_next_key;
3f157a2f
CM
4015 ret = 0;
4016 path->slots[level] = slot;
4017 btrfs_item_key_to_cpu(cur, &found_key, slot);
4018 goto out;
4019 }
9652480b
Y
4020 if (sret && slot > 0)
4021 slot--;
3f157a2f
CM
4022 /*
4023 * check this node pointer against the cache_only and
4024 * min_trans parameters. If it isn't in cache or is too
4025 * old, skip to the next one.
4026 */
d397712b 4027 while (slot < nritems) {
3f157a2f
CM
4028 u64 blockptr;
4029 u64 gen;
4030 struct extent_buffer *tmp;
e02119d5
CM
4031 struct btrfs_disk_key disk_key;
4032
3f157a2f
CM
4033 blockptr = btrfs_node_blockptr(cur, slot);
4034 gen = btrfs_node_ptr_generation(cur, slot);
4035 if (gen < min_trans) {
4036 slot++;
4037 continue;
4038 }
4039 if (!cache_only)
4040 break;
4041
e02119d5
CM
4042 if (max_key) {
4043 btrfs_node_key(cur, &disk_key, slot);
4044 if (comp_keys(&disk_key, max_key) >= 0) {
4045 ret = 1;
4046 goto out;
4047 }
4048 }
4049
3f157a2f
CM
4050 tmp = btrfs_find_tree_block(root, blockptr,
4051 btrfs_level_size(root, level - 1));
4052
b9fab919 4053 if (tmp && btrfs_buffer_uptodate(tmp, gen, 1) > 0) {
3f157a2f
CM
4054 free_extent_buffer(tmp);
4055 break;
4056 }
4057 if (tmp)
4058 free_extent_buffer(tmp);
4059 slot++;
4060 }
e02119d5 4061find_next_key:
3f157a2f
CM
4062 /*
4063 * we didn't find a candidate key in this node, walk forward
4064 * and find another one
4065 */
4066 if (slot >= nritems) {
e02119d5 4067 path->slots[level] = slot;
b4ce94de 4068 btrfs_set_path_blocking(path);
e02119d5 4069 sret = btrfs_find_next_key(root, path, min_key, level,
3f157a2f 4070 cache_only, min_trans);
e02119d5 4071 if (sret == 0) {
b3b4aa74 4072 btrfs_release_path(path);
3f157a2f
CM
4073 goto again;
4074 } else {
4075 goto out;
4076 }
4077 }
4078 /* save our key for returning back */
4079 btrfs_node_key_to_cpu(cur, &found_key, slot);
4080 path->slots[level] = slot;
4081 if (level == path->lowest_level) {
4082 ret = 0;
f7c79f30 4083 unlock_up(path, level, 1, 0, NULL);
3f157a2f
CM
4084 goto out;
4085 }
b4ce94de 4086 btrfs_set_path_blocking(path);
3f157a2f 4087 cur = read_node_slot(root, cur, slot);
79787eaa 4088 BUG_ON(!cur); /* -ENOMEM */
3f157a2f 4089
bd681513 4090 btrfs_tree_read_lock(cur);
b4ce94de 4091
bd681513 4092 path->locks[level - 1] = BTRFS_READ_LOCK;
3f157a2f 4093 path->nodes[level - 1] = cur;
f7c79f30 4094 unlock_up(path, level, 1, 0, NULL);
bd681513 4095 btrfs_clear_path_blocking(path, NULL, 0);
3f157a2f
CM
4096 }
4097out:
4098 if (ret == 0)
4099 memcpy(min_key, &found_key, sizeof(found_key));
b4ce94de 4100 btrfs_set_path_blocking(path);
3f157a2f
CM
4101 return ret;
4102}
4103
4104/*
4105 * this is similar to btrfs_next_leaf, but does not try to preserve
4106 * and fixup the path. It looks for and returns the next key in the
4107 * tree based on the current path and the cache_only and min_trans
4108 * parameters.
4109 *
4110 * 0 is returned if another key is found, < 0 if there are any errors
4111 * and 1 is returned if there are no higher keys in the tree
4112 *
4113 * path->keep_locks should be set to 1 on the search made before
4114 * calling this function.
4115 */
e7a84565 4116int btrfs_find_next_key(struct btrfs_root *root, struct btrfs_path *path,
33c66f43 4117 struct btrfs_key *key, int level,
3f157a2f 4118 int cache_only, u64 min_trans)
e7a84565 4119{
e7a84565
CM
4120 int slot;
4121 struct extent_buffer *c;
4122
934d375b 4123 WARN_ON(!path->keep_locks);
d397712b 4124 while (level < BTRFS_MAX_LEVEL) {
e7a84565
CM
4125 if (!path->nodes[level])
4126 return 1;
4127
4128 slot = path->slots[level] + 1;
4129 c = path->nodes[level];
3f157a2f 4130next:
e7a84565 4131 if (slot >= btrfs_header_nritems(c)) {
33c66f43
YZ
4132 int ret;
4133 int orig_lowest;
4134 struct btrfs_key cur_key;
4135 if (level + 1 >= BTRFS_MAX_LEVEL ||
4136 !path->nodes[level + 1])
e7a84565 4137 return 1;
33c66f43
YZ
4138
4139 if (path->locks[level + 1]) {
4140 level++;
4141 continue;
4142 }
4143
4144 slot = btrfs_header_nritems(c) - 1;
4145 if (level == 0)
4146 btrfs_item_key_to_cpu(c, &cur_key, slot);
4147 else
4148 btrfs_node_key_to_cpu(c, &cur_key, slot);
4149
4150 orig_lowest = path->lowest_level;
b3b4aa74 4151 btrfs_release_path(path);
33c66f43
YZ
4152 path->lowest_level = level;
4153 ret = btrfs_search_slot(NULL, root, &cur_key, path,
4154 0, 0);
4155 path->lowest_level = orig_lowest;
4156 if (ret < 0)
4157 return ret;
4158
4159 c = path->nodes[level];
4160 slot = path->slots[level];
4161 if (ret == 0)
4162 slot++;
4163 goto next;
e7a84565 4164 }
33c66f43 4165
e7a84565
CM
4166 if (level == 0)
4167 btrfs_item_key_to_cpu(c, key, slot);
3f157a2f
CM
4168 else {
4169 u64 blockptr = btrfs_node_blockptr(c, slot);
4170 u64 gen = btrfs_node_ptr_generation(c, slot);
4171
4172 if (cache_only) {
4173 struct extent_buffer *cur;
4174 cur = btrfs_find_tree_block(root, blockptr,
4175 btrfs_level_size(root, level - 1));
b9fab919
CM
4176 if (!cur ||
4177 btrfs_buffer_uptodate(cur, gen, 1) <= 0) {
3f157a2f
CM
4178 slot++;
4179 if (cur)
4180 free_extent_buffer(cur);
4181 goto next;
4182 }
4183 free_extent_buffer(cur);
4184 }
4185 if (gen < min_trans) {
4186 slot++;
4187 goto next;
4188 }
e7a84565 4189 btrfs_node_key_to_cpu(c, key, slot);
3f157a2f 4190 }
e7a84565
CM
4191 return 0;
4192 }
4193 return 1;
4194}
4195
97571fd0 4196/*
925baedd 4197 * search the tree again to find a leaf with greater keys
0f70abe2
CM
4198 * returns 0 if it found something or 1 if there are no greater leaves.
4199 * returns < 0 on io errors.
97571fd0 4200 */
234b63a0 4201int btrfs_next_leaf(struct btrfs_root *root, struct btrfs_path *path)
d97e63b6
CM
4202{
4203 int slot;
8e73f275 4204 int level;
5f39d397 4205 struct extent_buffer *c;
8e73f275 4206 struct extent_buffer *next;
925baedd
CM
4207 struct btrfs_key key;
4208 u32 nritems;
4209 int ret;
8e73f275 4210 int old_spinning = path->leave_spinning;
bd681513 4211 int next_rw_lock = 0;
925baedd
CM
4212
4213 nritems = btrfs_header_nritems(path->nodes[0]);
d397712b 4214 if (nritems == 0)
925baedd 4215 return 1;
925baedd 4216
8e73f275
CM
4217 btrfs_item_key_to_cpu(path->nodes[0], &key, nritems - 1);
4218again:
4219 level = 1;
4220 next = NULL;
bd681513 4221 next_rw_lock = 0;
b3b4aa74 4222 btrfs_release_path(path);
8e73f275 4223
a2135011 4224 path->keep_locks = 1;
31533fb2 4225 path->leave_spinning = 1;
8e73f275 4226
925baedd
CM
4227 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4228 path->keep_locks = 0;
4229
4230 if (ret < 0)
4231 return ret;
4232
a2135011 4233 nritems = btrfs_header_nritems(path->nodes[0]);
168fd7d2
CM
4234 /*
4235 * by releasing the path above we dropped all our locks. A balance
4236 * could have added more items next to the key that used to be
4237 * at the very end of the block. So, check again here and
4238 * advance the path if there are now more items available.
4239 */
a2135011 4240 if (nritems > 0 && path->slots[0] < nritems - 1) {
e457afec
YZ
4241 if (ret == 0)
4242 path->slots[0]++;
8e73f275 4243 ret = 0;
925baedd
CM
4244 goto done;
4245 }
d97e63b6 4246
d397712b 4247 while (level < BTRFS_MAX_LEVEL) {
8e73f275
CM
4248 if (!path->nodes[level]) {
4249 ret = 1;
4250 goto done;
4251 }
5f39d397 4252
d97e63b6
CM
4253 slot = path->slots[level] + 1;
4254 c = path->nodes[level];
5f39d397 4255 if (slot >= btrfs_header_nritems(c)) {
d97e63b6 4256 level++;
8e73f275
CM
4257 if (level == BTRFS_MAX_LEVEL) {
4258 ret = 1;
4259 goto done;
4260 }
d97e63b6
CM
4261 continue;
4262 }
5f39d397 4263
925baedd 4264 if (next) {
bd681513 4265 btrfs_tree_unlock_rw(next, next_rw_lock);
5f39d397 4266 free_extent_buffer(next);
925baedd 4267 }
5f39d397 4268
8e73f275 4269 next = c;
bd681513 4270 next_rw_lock = path->locks[level];
8e73f275
CM
4271 ret = read_block_for_search(NULL, root, path, &next, level,
4272 slot, &key);
4273 if (ret == -EAGAIN)
4274 goto again;
5f39d397 4275
76a05b35 4276 if (ret < 0) {
b3b4aa74 4277 btrfs_release_path(path);
76a05b35
CM
4278 goto done;
4279 }
4280
5cd57b2c 4281 if (!path->skip_locking) {
bd681513 4282 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
4283 if (!ret) {
4284 btrfs_set_path_blocking(path);
bd681513 4285 btrfs_tree_read_lock(next);
31533fb2 4286 btrfs_clear_path_blocking(path, next,
bd681513 4287 BTRFS_READ_LOCK);
8e73f275 4288 }
31533fb2 4289 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 4290 }
d97e63b6
CM
4291 break;
4292 }
4293 path->slots[level] = slot;
d397712b 4294 while (1) {
d97e63b6
CM
4295 level--;
4296 c = path->nodes[level];
925baedd 4297 if (path->locks[level])
bd681513 4298 btrfs_tree_unlock_rw(c, path->locks[level]);
8e73f275 4299
5f39d397 4300 free_extent_buffer(c);
d97e63b6
CM
4301 path->nodes[level] = next;
4302 path->slots[level] = 0;
a74a4b97 4303 if (!path->skip_locking)
bd681513 4304 path->locks[level] = next_rw_lock;
d97e63b6
CM
4305 if (!level)
4306 break;
b4ce94de 4307
8e73f275
CM
4308 ret = read_block_for_search(NULL, root, path, &next, level,
4309 0, &key);
4310 if (ret == -EAGAIN)
4311 goto again;
4312
76a05b35 4313 if (ret < 0) {
b3b4aa74 4314 btrfs_release_path(path);
76a05b35
CM
4315 goto done;
4316 }
4317
5cd57b2c 4318 if (!path->skip_locking) {
bd681513 4319 ret = btrfs_try_tree_read_lock(next);
8e73f275
CM
4320 if (!ret) {
4321 btrfs_set_path_blocking(path);
bd681513 4322 btrfs_tree_read_lock(next);
31533fb2 4323 btrfs_clear_path_blocking(path, next,
bd681513
CM
4324 BTRFS_READ_LOCK);
4325 }
31533fb2 4326 next_rw_lock = BTRFS_READ_LOCK;
5cd57b2c 4327 }
d97e63b6 4328 }
8e73f275 4329 ret = 0;
925baedd 4330done:
f7c79f30 4331 unlock_up(path, 0, 1, 0, NULL);
8e73f275
CM
4332 path->leave_spinning = old_spinning;
4333 if (!old_spinning)
4334 btrfs_set_path_blocking(path);
4335
4336 return ret;
d97e63b6 4337}
0b86a832 4338
3f157a2f
CM
4339/*
4340 * this uses btrfs_prev_leaf to walk backwards in the tree, and keeps
4341 * searching until it gets past min_objectid or finds an item of 'type'
4342 *
4343 * returns 0 if something is found, 1 if nothing was found and < 0 on error
4344 */
0b86a832
CM
4345int btrfs_previous_item(struct btrfs_root *root,
4346 struct btrfs_path *path, u64 min_objectid,
4347 int type)
4348{
4349 struct btrfs_key found_key;
4350 struct extent_buffer *leaf;
e02119d5 4351 u32 nritems;
0b86a832
CM
4352 int ret;
4353
d397712b 4354 while (1) {
0b86a832 4355 if (path->slots[0] == 0) {
b4ce94de 4356 btrfs_set_path_blocking(path);
0b86a832
CM
4357 ret = btrfs_prev_leaf(root, path);
4358 if (ret != 0)
4359 return ret;
4360 } else {
4361 path->slots[0]--;
4362 }
4363 leaf = path->nodes[0];
e02119d5
CM
4364 nritems = btrfs_header_nritems(leaf);
4365 if (nritems == 0)
4366 return 1;
4367 if (path->slots[0] == nritems)
4368 path->slots[0]--;
4369
0b86a832 4370 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
e02119d5
CM
4371 if (found_key.objectid < min_objectid)
4372 break;
0a4eefbb
YZ
4373 if (found_key.type == type)
4374 return 0;
e02119d5
CM
4375 if (found_key.objectid == min_objectid &&
4376 found_key.type < type)
4377 break;
0b86a832
CM
4378 }
4379 return 1;
4380}
This page took 0.839852 seconds and 5 git commands to generate.