Merge git://git.kernel.org/pub/scm/linux/kernel/git/mason/btrfs-unstable
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include "compat.h"
27 #include "hash.h"
28 #include "ctree.h"
29 #include "disk-io.h"
30 #include "print-tree.h"
31 #include "transaction.h"
32 #include "volumes.h"
33 #include "locking.h"
34 #include "free-space-cache.h"
35
36 static int update_block_group(struct btrfs_trans_handle *trans,
37 struct btrfs_root *root,
38 u64 bytenr, u64 num_bytes, int alloc);
39 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
40 u64 num_bytes, int reserve, int sinfo);
41 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
42 struct btrfs_root *root,
43 u64 bytenr, u64 num_bytes, u64 parent,
44 u64 root_objectid, u64 owner_objectid,
45 u64 owner_offset, int refs_to_drop,
46 struct btrfs_delayed_extent_op *extra_op);
47 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
48 struct extent_buffer *leaf,
49 struct btrfs_extent_item *ei);
50 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
51 struct btrfs_root *root,
52 u64 parent, u64 root_objectid,
53 u64 flags, u64 owner, u64 offset,
54 struct btrfs_key *ins, int ref_mod);
55 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 parent, u64 root_objectid,
58 u64 flags, struct btrfs_disk_key *key,
59 int level, struct btrfs_key *ins);
60 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
61 struct btrfs_root *extent_root, u64 alloc_bytes,
62 u64 flags, int force);
63 static int find_next_key(struct btrfs_path *path, int level,
64 struct btrfs_key *key);
65 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
66 int dump_block_groups);
67
68 static noinline int
69 block_group_cache_done(struct btrfs_block_group_cache *cache)
70 {
71 smp_mb();
72 return cache->cached == BTRFS_CACHE_FINISHED;
73 }
74
75 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
76 {
77 return (cache->flags & bits) == bits;
78 }
79
80 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
81 {
82 atomic_inc(&cache->count);
83 }
84
85 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
86 {
87 if (atomic_dec_and_test(&cache->count)) {
88 WARN_ON(cache->pinned > 0);
89 WARN_ON(cache->reserved > 0);
90 WARN_ON(cache->reserved_pinned > 0);
91 kfree(cache);
92 }
93 }
94
95 /*
96 * this adds the block group to the fs_info rb tree for the block group
97 * cache
98 */
99 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
100 struct btrfs_block_group_cache *block_group)
101 {
102 struct rb_node **p;
103 struct rb_node *parent = NULL;
104 struct btrfs_block_group_cache *cache;
105
106 spin_lock(&info->block_group_cache_lock);
107 p = &info->block_group_cache_tree.rb_node;
108
109 while (*p) {
110 parent = *p;
111 cache = rb_entry(parent, struct btrfs_block_group_cache,
112 cache_node);
113 if (block_group->key.objectid < cache->key.objectid) {
114 p = &(*p)->rb_left;
115 } else if (block_group->key.objectid > cache->key.objectid) {
116 p = &(*p)->rb_right;
117 } else {
118 spin_unlock(&info->block_group_cache_lock);
119 return -EEXIST;
120 }
121 }
122
123 rb_link_node(&block_group->cache_node, parent, p);
124 rb_insert_color(&block_group->cache_node,
125 &info->block_group_cache_tree);
126 spin_unlock(&info->block_group_cache_lock);
127
128 return 0;
129 }
130
131 /*
132 * This will return the block group at or after bytenr if contains is 0, else
133 * it will return the block group that contains the bytenr
134 */
135 static struct btrfs_block_group_cache *
136 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
137 int contains)
138 {
139 struct btrfs_block_group_cache *cache, *ret = NULL;
140 struct rb_node *n;
141 u64 end, start;
142
143 spin_lock(&info->block_group_cache_lock);
144 n = info->block_group_cache_tree.rb_node;
145
146 while (n) {
147 cache = rb_entry(n, struct btrfs_block_group_cache,
148 cache_node);
149 end = cache->key.objectid + cache->key.offset - 1;
150 start = cache->key.objectid;
151
152 if (bytenr < start) {
153 if (!contains && (!ret || start < ret->key.objectid))
154 ret = cache;
155 n = n->rb_left;
156 } else if (bytenr > start) {
157 if (contains && bytenr <= end) {
158 ret = cache;
159 break;
160 }
161 n = n->rb_right;
162 } else {
163 ret = cache;
164 break;
165 }
166 }
167 if (ret)
168 btrfs_get_block_group(ret);
169 spin_unlock(&info->block_group_cache_lock);
170
171 return ret;
172 }
173
174 static int add_excluded_extent(struct btrfs_root *root,
175 u64 start, u64 num_bytes)
176 {
177 u64 end = start + num_bytes - 1;
178 set_extent_bits(&root->fs_info->freed_extents[0],
179 start, end, EXTENT_UPTODATE, GFP_NOFS);
180 set_extent_bits(&root->fs_info->freed_extents[1],
181 start, end, EXTENT_UPTODATE, GFP_NOFS);
182 return 0;
183 }
184
185 static void free_excluded_extents(struct btrfs_root *root,
186 struct btrfs_block_group_cache *cache)
187 {
188 u64 start, end;
189
190 start = cache->key.objectid;
191 end = start + cache->key.offset - 1;
192
193 clear_extent_bits(&root->fs_info->freed_extents[0],
194 start, end, EXTENT_UPTODATE, GFP_NOFS);
195 clear_extent_bits(&root->fs_info->freed_extents[1],
196 start, end, EXTENT_UPTODATE, GFP_NOFS);
197 }
198
199 static int exclude_super_stripes(struct btrfs_root *root,
200 struct btrfs_block_group_cache *cache)
201 {
202 u64 bytenr;
203 u64 *logical;
204 int stripe_len;
205 int i, nr, ret;
206
207 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
208 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
209 cache->bytes_super += stripe_len;
210 ret = add_excluded_extent(root, cache->key.objectid,
211 stripe_len);
212 BUG_ON(ret);
213 }
214
215 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
216 bytenr = btrfs_sb_offset(i);
217 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
218 cache->key.objectid, bytenr,
219 0, &logical, &nr, &stripe_len);
220 BUG_ON(ret);
221
222 while (nr--) {
223 cache->bytes_super += stripe_len;
224 ret = add_excluded_extent(root, logical[nr],
225 stripe_len);
226 BUG_ON(ret);
227 }
228
229 kfree(logical);
230 }
231 return 0;
232 }
233
234 static struct btrfs_caching_control *
235 get_caching_control(struct btrfs_block_group_cache *cache)
236 {
237 struct btrfs_caching_control *ctl;
238
239 spin_lock(&cache->lock);
240 if (cache->cached != BTRFS_CACHE_STARTED) {
241 spin_unlock(&cache->lock);
242 return NULL;
243 }
244
245 /* We're loading it the fast way, so we don't have a caching_ctl. */
246 if (!cache->caching_ctl) {
247 spin_unlock(&cache->lock);
248 return NULL;
249 }
250
251 ctl = cache->caching_ctl;
252 atomic_inc(&ctl->count);
253 spin_unlock(&cache->lock);
254 return ctl;
255 }
256
257 static void put_caching_control(struct btrfs_caching_control *ctl)
258 {
259 if (atomic_dec_and_test(&ctl->count))
260 kfree(ctl);
261 }
262
263 /*
264 * this is only called by cache_block_group, since we could have freed extents
265 * we need to check the pinned_extents for any extents that can't be used yet
266 * since their free space will be released as soon as the transaction commits.
267 */
268 static u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
269 struct btrfs_fs_info *info, u64 start, u64 end)
270 {
271 u64 extent_start, extent_end, size, total_added = 0;
272 int ret;
273
274 while (start < end) {
275 ret = find_first_extent_bit(info->pinned_extents, start,
276 &extent_start, &extent_end,
277 EXTENT_DIRTY | EXTENT_UPTODATE);
278 if (ret)
279 break;
280
281 if (extent_start <= start) {
282 start = extent_end + 1;
283 } else if (extent_start > start && extent_start < end) {
284 size = extent_start - start;
285 total_added += size;
286 ret = btrfs_add_free_space(block_group, start,
287 size);
288 BUG_ON(ret);
289 start = extent_end + 1;
290 } else {
291 break;
292 }
293 }
294
295 if (start < end) {
296 size = end - start;
297 total_added += size;
298 ret = btrfs_add_free_space(block_group, start, size);
299 BUG_ON(ret);
300 }
301
302 return total_added;
303 }
304
305 static int caching_kthread(void *data)
306 {
307 struct btrfs_block_group_cache *block_group = data;
308 struct btrfs_fs_info *fs_info = block_group->fs_info;
309 struct btrfs_caching_control *caching_ctl = block_group->caching_ctl;
310 struct btrfs_root *extent_root = fs_info->extent_root;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_key key;
314 u64 total_found = 0;
315 u64 last = 0;
316 u32 nritems;
317 int ret = 0;
318
319 path = btrfs_alloc_path();
320 if (!path)
321 return -ENOMEM;
322
323 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
324
325 /*
326 * We don't want to deadlock with somebody trying to allocate a new
327 * extent for the extent root while also trying to search the extent
328 * root to add free space. So we skip locking and search the commit
329 * root, since its read-only
330 */
331 path->skip_locking = 1;
332 path->search_commit_root = 1;
333 path->reada = 2;
334
335 key.objectid = last;
336 key.offset = 0;
337 key.type = BTRFS_EXTENT_ITEM_KEY;
338 again:
339 mutex_lock(&caching_ctl->mutex);
340 /* need to make sure the commit_root doesn't disappear */
341 down_read(&fs_info->extent_commit_sem);
342
343 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
344 if (ret < 0)
345 goto err;
346
347 leaf = path->nodes[0];
348 nritems = btrfs_header_nritems(leaf);
349
350 while (1) {
351 smp_mb();
352 if (fs_info->closing > 1) {
353 last = (u64)-1;
354 break;
355 }
356
357 if (path->slots[0] < nritems) {
358 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
359 } else {
360 ret = find_next_key(path, 0, &key);
361 if (ret)
362 break;
363
364 caching_ctl->progress = last;
365 btrfs_release_path(extent_root, path);
366 up_read(&fs_info->extent_commit_sem);
367 mutex_unlock(&caching_ctl->mutex);
368 if (btrfs_transaction_in_commit(fs_info))
369 schedule_timeout(1);
370 else
371 cond_resched();
372 goto again;
373 }
374
375 if (key.objectid < block_group->key.objectid) {
376 path->slots[0]++;
377 continue;
378 }
379
380 if (key.objectid >= block_group->key.objectid +
381 block_group->key.offset)
382 break;
383
384 if (key.type == BTRFS_EXTENT_ITEM_KEY) {
385 total_found += add_new_free_space(block_group,
386 fs_info, last,
387 key.objectid);
388 last = key.objectid + key.offset;
389
390 if (total_found > (1024 * 1024 * 2)) {
391 total_found = 0;
392 wake_up(&caching_ctl->wait);
393 }
394 }
395 path->slots[0]++;
396 }
397 ret = 0;
398
399 total_found += add_new_free_space(block_group, fs_info, last,
400 block_group->key.objectid +
401 block_group->key.offset);
402 caching_ctl->progress = (u64)-1;
403
404 spin_lock(&block_group->lock);
405 block_group->caching_ctl = NULL;
406 block_group->cached = BTRFS_CACHE_FINISHED;
407 spin_unlock(&block_group->lock);
408
409 err:
410 btrfs_free_path(path);
411 up_read(&fs_info->extent_commit_sem);
412
413 free_excluded_extents(extent_root, block_group);
414
415 mutex_unlock(&caching_ctl->mutex);
416 wake_up(&caching_ctl->wait);
417
418 put_caching_control(caching_ctl);
419 atomic_dec(&block_group->space_info->caching_threads);
420 btrfs_put_block_group(block_group);
421
422 return 0;
423 }
424
425 static int cache_block_group(struct btrfs_block_group_cache *cache,
426 struct btrfs_trans_handle *trans,
427 struct btrfs_root *root,
428 int load_cache_only)
429 {
430 struct btrfs_fs_info *fs_info = cache->fs_info;
431 struct btrfs_caching_control *caching_ctl;
432 struct task_struct *tsk;
433 int ret = 0;
434
435 smp_mb();
436 if (cache->cached != BTRFS_CACHE_NO)
437 return 0;
438
439 /*
440 * We can't do the read from on-disk cache during a commit since we need
441 * to have the normal tree locking. Also if we are currently trying to
442 * allocate blocks for the tree root we can't do the fast caching since
443 * we likely hold important locks.
444 */
445 if (!trans->transaction->in_commit &&
446 (root && root != root->fs_info->tree_root)) {
447 spin_lock(&cache->lock);
448 if (cache->cached != BTRFS_CACHE_NO) {
449 spin_unlock(&cache->lock);
450 return 0;
451 }
452 cache->cached = BTRFS_CACHE_STARTED;
453 spin_unlock(&cache->lock);
454
455 ret = load_free_space_cache(fs_info, cache);
456
457 spin_lock(&cache->lock);
458 if (ret == 1) {
459 cache->cached = BTRFS_CACHE_FINISHED;
460 cache->last_byte_to_unpin = (u64)-1;
461 } else {
462 cache->cached = BTRFS_CACHE_NO;
463 }
464 spin_unlock(&cache->lock);
465 if (ret == 1) {
466 free_excluded_extents(fs_info->extent_root, cache);
467 return 0;
468 }
469 }
470
471 if (load_cache_only)
472 return 0;
473
474 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_KERNEL);
475 BUG_ON(!caching_ctl);
476
477 INIT_LIST_HEAD(&caching_ctl->list);
478 mutex_init(&caching_ctl->mutex);
479 init_waitqueue_head(&caching_ctl->wait);
480 caching_ctl->block_group = cache;
481 caching_ctl->progress = cache->key.objectid;
482 /* one for caching kthread, one for caching block group list */
483 atomic_set(&caching_ctl->count, 2);
484
485 spin_lock(&cache->lock);
486 if (cache->cached != BTRFS_CACHE_NO) {
487 spin_unlock(&cache->lock);
488 kfree(caching_ctl);
489 return 0;
490 }
491 cache->caching_ctl = caching_ctl;
492 cache->cached = BTRFS_CACHE_STARTED;
493 spin_unlock(&cache->lock);
494
495 down_write(&fs_info->extent_commit_sem);
496 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
497 up_write(&fs_info->extent_commit_sem);
498
499 atomic_inc(&cache->space_info->caching_threads);
500 btrfs_get_block_group(cache);
501
502 tsk = kthread_run(caching_kthread, cache, "btrfs-cache-%llu\n",
503 cache->key.objectid);
504 if (IS_ERR(tsk)) {
505 ret = PTR_ERR(tsk);
506 printk(KERN_ERR "error running thread %d\n", ret);
507 BUG();
508 }
509
510 return ret;
511 }
512
513 /*
514 * return the block group that starts at or after bytenr
515 */
516 static struct btrfs_block_group_cache *
517 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
518 {
519 struct btrfs_block_group_cache *cache;
520
521 cache = block_group_cache_tree_search(info, bytenr, 0);
522
523 return cache;
524 }
525
526 /*
527 * return the block group that contains the given bytenr
528 */
529 struct btrfs_block_group_cache *btrfs_lookup_block_group(
530 struct btrfs_fs_info *info,
531 u64 bytenr)
532 {
533 struct btrfs_block_group_cache *cache;
534
535 cache = block_group_cache_tree_search(info, bytenr, 1);
536
537 return cache;
538 }
539
540 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
541 u64 flags)
542 {
543 struct list_head *head = &info->space_info;
544 struct btrfs_space_info *found;
545
546 flags &= BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_SYSTEM |
547 BTRFS_BLOCK_GROUP_METADATA;
548
549 rcu_read_lock();
550 list_for_each_entry_rcu(found, head, list) {
551 if (found->flags & flags) {
552 rcu_read_unlock();
553 return found;
554 }
555 }
556 rcu_read_unlock();
557 return NULL;
558 }
559
560 /*
561 * after adding space to the filesystem, we need to clear the full flags
562 * on all the space infos.
563 */
564 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
565 {
566 struct list_head *head = &info->space_info;
567 struct btrfs_space_info *found;
568
569 rcu_read_lock();
570 list_for_each_entry_rcu(found, head, list)
571 found->full = 0;
572 rcu_read_unlock();
573 }
574
575 static u64 div_factor(u64 num, int factor)
576 {
577 if (factor == 10)
578 return num;
579 num *= factor;
580 do_div(num, 10);
581 return num;
582 }
583
584 static u64 div_factor_fine(u64 num, int factor)
585 {
586 if (factor == 100)
587 return num;
588 num *= factor;
589 do_div(num, 100);
590 return num;
591 }
592
593 u64 btrfs_find_block_group(struct btrfs_root *root,
594 u64 search_start, u64 search_hint, int owner)
595 {
596 struct btrfs_block_group_cache *cache;
597 u64 used;
598 u64 last = max(search_hint, search_start);
599 u64 group_start = 0;
600 int full_search = 0;
601 int factor = 9;
602 int wrapped = 0;
603 again:
604 while (1) {
605 cache = btrfs_lookup_first_block_group(root->fs_info, last);
606 if (!cache)
607 break;
608
609 spin_lock(&cache->lock);
610 last = cache->key.objectid + cache->key.offset;
611 used = btrfs_block_group_used(&cache->item);
612
613 if ((full_search || !cache->ro) &&
614 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
615 if (used + cache->pinned + cache->reserved <
616 div_factor(cache->key.offset, factor)) {
617 group_start = cache->key.objectid;
618 spin_unlock(&cache->lock);
619 btrfs_put_block_group(cache);
620 goto found;
621 }
622 }
623 spin_unlock(&cache->lock);
624 btrfs_put_block_group(cache);
625 cond_resched();
626 }
627 if (!wrapped) {
628 last = search_start;
629 wrapped = 1;
630 goto again;
631 }
632 if (!full_search && factor < 10) {
633 last = search_start;
634 full_search = 1;
635 factor = 10;
636 goto again;
637 }
638 found:
639 return group_start;
640 }
641
642 /* simple helper to search for an existing extent at a given offset */
643 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
644 {
645 int ret;
646 struct btrfs_key key;
647 struct btrfs_path *path;
648
649 path = btrfs_alloc_path();
650 BUG_ON(!path);
651 key.objectid = start;
652 key.offset = len;
653 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
654 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
655 0, 0);
656 btrfs_free_path(path);
657 return ret;
658 }
659
660 /*
661 * helper function to lookup reference count and flags of extent.
662 *
663 * the head node for delayed ref is used to store the sum of all the
664 * reference count modifications queued up in the rbtree. the head
665 * node may also store the extent flags to set. This way you can check
666 * to see what the reference count and extent flags would be if all of
667 * the delayed refs are not processed.
668 */
669 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
670 struct btrfs_root *root, u64 bytenr,
671 u64 num_bytes, u64 *refs, u64 *flags)
672 {
673 struct btrfs_delayed_ref_head *head;
674 struct btrfs_delayed_ref_root *delayed_refs;
675 struct btrfs_path *path;
676 struct btrfs_extent_item *ei;
677 struct extent_buffer *leaf;
678 struct btrfs_key key;
679 u32 item_size;
680 u64 num_refs;
681 u64 extent_flags;
682 int ret;
683
684 path = btrfs_alloc_path();
685 if (!path)
686 return -ENOMEM;
687
688 key.objectid = bytenr;
689 key.type = BTRFS_EXTENT_ITEM_KEY;
690 key.offset = num_bytes;
691 if (!trans) {
692 path->skip_locking = 1;
693 path->search_commit_root = 1;
694 }
695 again:
696 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
697 &key, path, 0, 0);
698 if (ret < 0)
699 goto out_free;
700
701 if (ret == 0) {
702 leaf = path->nodes[0];
703 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
704 if (item_size >= sizeof(*ei)) {
705 ei = btrfs_item_ptr(leaf, path->slots[0],
706 struct btrfs_extent_item);
707 num_refs = btrfs_extent_refs(leaf, ei);
708 extent_flags = btrfs_extent_flags(leaf, ei);
709 } else {
710 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
711 struct btrfs_extent_item_v0 *ei0;
712 BUG_ON(item_size != sizeof(*ei0));
713 ei0 = btrfs_item_ptr(leaf, path->slots[0],
714 struct btrfs_extent_item_v0);
715 num_refs = btrfs_extent_refs_v0(leaf, ei0);
716 /* FIXME: this isn't correct for data */
717 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
718 #else
719 BUG();
720 #endif
721 }
722 BUG_ON(num_refs == 0);
723 } else {
724 num_refs = 0;
725 extent_flags = 0;
726 ret = 0;
727 }
728
729 if (!trans)
730 goto out;
731
732 delayed_refs = &trans->transaction->delayed_refs;
733 spin_lock(&delayed_refs->lock);
734 head = btrfs_find_delayed_ref_head(trans, bytenr);
735 if (head) {
736 if (!mutex_trylock(&head->mutex)) {
737 atomic_inc(&head->node.refs);
738 spin_unlock(&delayed_refs->lock);
739
740 btrfs_release_path(root->fs_info->extent_root, path);
741
742 mutex_lock(&head->mutex);
743 mutex_unlock(&head->mutex);
744 btrfs_put_delayed_ref(&head->node);
745 goto again;
746 }
747 if (head->extent_op && head->extent_op->update_flags)
748 extent_flags |= head->extent_op->flags_to_set;
749 else
750 BUG_ON(num_refs == 0);
751
752 num_refs += head->node.ref_mod;
753 mutex_unlock(&head->mutex);
754 }
755 spin_unlock(&delayed_refs->lock);
756 out:
757 WARN_ON(num_refs == 0);
758 if (refs)
759 *refs = num_refs;
760 if (flags)
761 *flags = extent_flags;
762 out_free:
763 btrfs_free_path(path);
764 return ret;
765 }
766
767 /*
768 * Back reference rules. Back refs have three main goals:
769 *
770 * 1) differentiate between all holders of references to an extent so that
771 * when a reference is dropped we can make sure it was a valid reference
772 * before freeing the extent.
773 *
774 * 2) Provide enough information to quickly find the holders of an extent
775 * if we notice a given block is corrupted or bad.
776 *
777 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
778 * maintenance. This is actually the same as #2, but with a slightly
779 * different use case.
780 *
781 * There are two kinds of back refs. The implicit back refs is optimized
782 * for pointers in non-shared tree blocks. For a given pointer in a block,
783 * back refs of this kind provide information about the block's owner tree
784 * and the pointer's key. These information allow us to find the block by
785 * b-tree searching. The full back refs is for pointers in tree blocks not
786 * referenced by their owner trees. The location of tree block is recorded
787 * in the back refs. Actually the full back refs is generic, and can be
788 * used in all cases the implicit back refs is used. The major shortcoming
789 * of the full back refs is its overhead. Every time a tree block gets
790 * COWed, we have to update back refs entry for all pointers in it.
791 *
792 * For a newly allocated tree block, we use implicit back refs for
793 * pointers in it. This means most tree related operations only involve
794 * implicit back refs. For a tree block created in old transaction, the
795 * only way to drop a reference to it is COW it. So we can detect the
796 * event that tree block loses its owner tree's reference and do the
797 * back refs conversion.
798 *
799 * When a tree block is COW'd through a tree, there are four cases:
800 *
801 * The reference count of the block is one and the tree is the block's
802 * owner tree. Nothing to do in this case.
803 *
804 * The reference count of the block is one and the tree is not the
805 * block's owner tree. In this case, full back refs is used for pointers
806 * in the block. Remove these full back refs, add implicit back refs for
807 * every pointers in the new block.
808 *
809 * The reference count of the block is greater than one and the tree is
810 * the block's owner tree. In this case, implicit back refs is used for
811 * pointers in the block. Add full back refs for every pointers in the
812 * block, increase lower level extents' reference counts. The original
813 * implicit back refs are entailed to the new block.
814 *
815 * The reference count of the block is greater than one and the tree is
816 * not the block's owner tree. Add implicit back refs for every pointer in
817 * the new block, increase lower level extents' reference count.
818 *
819 * Back Reference Key composing:
820 *
821 * The key objectid corresponds to the first byte in the extent,
822 * The key type is used to differentiate between types of back refs.
823 * There are different meanings of the key offset for different types
824 * of back refs.
825 *
826 * File extents can be referenced by:
827 *
828 * - multiple snapshots, subvolumes, or different generations in one subvol
829 * - different files inside a single subvolume
830 * - different offsets inside a file (bookend extents in file.c)
831 *
832 * The extent ref structure for the implicit back refs has fields for:
833 *
834 * - Objectid of the subvolume root
835 * - objectid of the file holding the reference
836 * - original offset in the file
837 * - how many bookend extents
838 *
839 * The key offset for the implicit back refs is hash of the first
840 * three fields.
841 *
842 * The extent ref structure for the full back refs has field for:
843 *
844 * - number of pointers in the tree leaf
845 *
846 * The key offset for the implicit back refs is the first byte of
847 * the tree leaf
848 *
849 * When a file extent is allocated, The implicit back refs is used.
850 * the fields are filled in:
851 *
852 * (root_key.objectid, inode objectid, offset in file, 1)
853 *
854 * When a file extent is removed file truncation, we find the
855 * corresponding implicit back refs and check the following fields:
856 *
857 * (btrfs_header_owner(leaf), inode objectid, offset in file)
858 *
859 * Btree extents can be referenced by:
860 *
861 * - Different subvolumes
862 *
863 * Both the implicit back refs and the full back refs for tree blocks
864 * only consist of key. The key offset for the implicit back refs is
865 * objectid of block's owner tree. The key offset for the full back refs
866 * is the first byte of parent block.
867 *
868 * When implicit back refs is used, information about the lowest key and
869 * level of the tree block are required. These information are stored in
870 * tree block info structure.
871 */
872
873 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
874 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
875 struct btrfs_root *root,
876 struct btrfs_path *path,
877 u64 owner, u32 extra_size)
878 {
879 struct btrfs_extent_item *item;
880 struct btrfs_extent_item_v0 *ei0;
881 struct btrfs_extent_ref_v0 *ref0;
882 struct btrfs_tree_block_info *bi;
883 struct extent_buffer *leaf;
884 struct btrfs_key key;
885 struct btrfs_key found_key;
886 u32 new_size = sizeof(*item);
887 u64 refs;
888 int ret;
889
890 leaf = path->nodes[0];
891 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
892
893 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
894 ei0 = btrfs_item_ptr(leaf, path->slots[0],
895 struct btrfs_extent_item_v0);
896 refs = btrfs_extent_refs_v0(leaf, ei0);
897
898 if (owner == (u64)-1) {
899 while (1) {
900 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
901 ret = btrfs_next_leaf(root, path);
902 if (ret < 0)
903 return ret;
904 BUG_ON(ret > 0);
905 leaf = path->nodes[0];
906 }
907 btrfs_item_key_to_cpu(leaf, &found_key,
908 path->slots[0]);
909 BUG_ON(key.objectid != found_key.objectid);
910 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
911 path->slots[0]++;
912 continue;
913 }
914 ref0 = btrfs_item_ptr(leaf, path->slots[0],
915 struct btrfs_extent_ref_v0);
916 owner = btrfs_ref_objectid_v0(leaf, ref0);
917 break;
918 }
919 }
920 btrfs_release_path(root, path);
921
922 if (owner < BTRFS_FIRST_FREE_OBJECTID)
923 new_size += sizeof(*bi);
924
925 new_size -= sizeof(*ei0);
926 ret = btrfs_search_slot(trans, root, &key, path,
927 new_size + extra_size, 1);
928 if (ret < 0)
929 return ret;
930 BUG_ON(ret);
931
932 ret = btrfs_extend_item(trans, root, path, new_size);
933 BUG_ON(ret);
934
935 leaf = path->nodes[0];
936 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
937 btrfs_set_extent_refs(leaf, item, refs);
938 /* FIXME: get real generation */
939 btrfs_set_extent_generation(leaf, item, 0);
940 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
941 btrfs_set_extent_flags(leaf, item,
942 BTRFS_EXTENT_FLAG_TREE_BLOCK |
943 BTRFS_BLOCK_FLAG_FULL_BACKREF);
944 bi = (struct btrfs_tree_block_info *)(item + 1);
945 /* FIXME: get first key of the block */
946 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
947 btrfs_set_tree_block_level(leaf, bi, (int)owner);
948 } else {
949 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
950 }
951 btrfs_mark_buffer_dirty(leaf);
952 return 0;
953 }
954 #endif
955
956 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
957 {
958 u32 high_crc = ~(u32)0;
959 u32 low_crc = ~(u32)0;
960 __le64 lenum;
961
962 lenum = cpu_to_le64(root_objectid);
963 high_crc = crc32c(high_crc, &lenum, sizeof(lenum));
964 lenum = cpu_to_le64(owner);
965 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
966 lenum = cpu_to_le64(offset);
967 low_crc = crc32c(low_crc, &lenum, sizeof(lenum));
968
969 return ((u64)high_crc << 31) ^ (u64)low_crc;
970 }
971
972 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
973 struct btrfs_extent_data_ref *ref)
974 {
975 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
976 btrfs_extent_data_ref_objectid(leaf, ref),
977 btrfs_extent_data_ref_offset(leaf, ref));
978 }
979
980 static int match_extent_data_ref(struct extent_buffer *leaf,
981 struct btrfs_extent_data_ref *ref,
982 u64 root_objectid, u64 owner, u64 offset)
983 {
984 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
985 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
986 btrfs_extent_data_ref_offset(leaf, ref) != offset)
987 return 0;
988 return 1;
989 }
990
991 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
992 struct btrfs_root *root,
993 struct btrfs_path *path,
994 u64 bytenr, u64 parent,
995 u64 root_objectid,
996 u64 owner, u64 offset)
997 {
998 struct btrfs_key key;
999 struct btrfs_extent_data_ref *ref;
1000 struct extent_buffer *leaf;
1001 u32 nritems;
1002 int ret;
1003 int recow;
1004 int err = -ENOENT;
1005
1006 key.objectid = bytenr;
1007 if (parent) {
1008 key.type = BTRFS_SHARED_DATA_REF_KEY;
1009 key.offset = parent;
1010 } else {
1011 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1012 key.offset = hash_extent_data_ref(root_objectid,
1013 owner, offset);
1014 }
1015 again:
1016 recow = 0;
1017 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1018 if (ret < 0) {
1019 err = ret;
1020 goto fail;
1021 }
1022
1023 if (parent) {
1024 if (!ret)
1025 return 0;
1026 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1027 key.type = BTRFS_EXTENT_REF_V0_KEY;
1028 btrfs_release_path(root, path);
1029 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1030 if (ret < 0) {
1031 err = ret;
1032 goto fail;
1033 }
1034 if (!ret)
1035 return 0;
1036 #endif
1037 goto fail;
1038 }
1039
1040 leaf = path->nodes[0];
1041 nritems = btrfs_header_nritems(leaf);
1042 while (1) {
1043 if (path->slots[0] >= nritems) {
1044 ret = btrfs_next_leaf(root, path);
1045 if (ret < 0)
1046 err = ret;
1047 if (ret)
1048 goto fail;
1049
1050 leaf = path->nodes[0];
1051 nritems = btrfs_header_nritems(leaf);
1052 recow = 1;
1053 }
1054
1055 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1056 if (key.objectid != bytenr ||
1057 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1058 goto fail;
1059
1060 ref = btrfs_item_ptr(leaf, path->slots[0],
1061 struct btrfs_extent_data_ref);
1062
1063 if (match_extent_data_ref(leaf, ref, root_objectid,
1064 owner, offset)) {
1065 if (recow) {
1066 btrfs_release_path(root, path);
1067 goto again;
1068 }
1069 err = 0;
1070 break;
1071 }
1072 path->slots[0]++;
1073 }
1074 fail:
1075 return err;
1076 }
1077
1078 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1079 struct btrfs_root *root,
1080 struct btrfs_path *path,
1081 u64 bytenr, u64 parent,
1082 u64 root_objectid, u64 owner,
1083 u64 offset, int refs_to_add)
1084 {
1085 struct btrfs_key key;
1086 struct extent_buffer *leaf;
1087 u32 size;
1088 u32 num_refs;
1089 int ret;
1090
1091 key.objectid = bytenr;
1092 if (parent) {
1093 key.type = BTRFS_SHARED_DATA_REF_KEY;
1094 key.offset = parent;
1095 size = sizeof(struct btrfs_shared_data_ref);
1096 } else {
1097 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1098 key.offset = hash_extent_data_ref(root_objectid,
1099 owner, offset);
1100 size = sizeof(struct btrfs_extent_data_ref);
1101 }
1102
1103 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1104 if (ret && ret != -EEXIST)
1105 goto fail;
1106
1107 leaf = path->nodes[0];
1108 if (parent) {
1109 struct btrfs_shared_data_ref *ref;
1110 ref = btrfs_item_ptr(leaf, path->slots[0],
1111 struct btrfs_shared_data_ref);
1112 if (ret == 0) {
1113 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1114 } else {
1115 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1116 num_refs += refs_to_add;
1117 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1118 }
1119 } else {
1120 struct btrfs_extent_data_ref *ref;
1121 while (ret == -EEXIST) {
1122 ref = btrfs_item_ptr(leaf, path->slots[0],
1123 struct btrfs_extent_data_ref);
1124 if (match_extent_data_ref(leaf, ref, root_objectid,
1125 owner, offset))
1126 break;
1127 btrfs_release_path(root, path);
1128 key.offset++;
1129 ret = btrfs_insert_empty_item(trans, root, path, &key,
1130 size);
1131 if (ret && ret != -EEXIST)
1132 goto fail;
1133
1134 leaf = path->nodes[0];
1135 }
1136 ref = btrfs_item_ptr(leaf, path->slots[0],
1137 struct btrfs_extent_data_ref);
1138 if (ret == 0) {
1139 btrfs_set_extent_data_ref_root(leaf, ref,
1140 root_objectid);
1141 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1142 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1143 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1144 } else {
1145 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1146 num_refs += refs_to_add;
1147 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1148 }
1149 }
1150 btrfs_mark_buffer_dirty(leaf);
1151 ret = 0;
1152 fail:
1153 btrfs_release_path(root, path);
1154 return ret;
1155 }
1156
1157 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1158 struct btrfs_root *root,
1159 struct btrfs_path *path,
1160 int refs_to_drop)
1161 {
1162 struct btrfs_key key;
1163 struct btrfs_extent_data_ref *ref1 = NULL;
1164 struct btrfs_shared_data_ref *ref2 = NULL;
1165 struct extent_buffer *leaf;
1166 u32 num_refs = 0;
1167 int ret = 0;
1168
1169 leaf = path->nodes[0];
1170 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1171
1172 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1173 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1174 struct btrfs_extent_data_ref);
1175 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1176 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1177 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1178 struct btrfs_shared_data_ref);
1179 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1180 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1181 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1182 struct btrfs_extent_ref_v0 *ref0;
1183 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1184 struct btrfs_extent_ref_v0);
1185 num_refs = btrfs_ref_count_v0(leaf, ref0);
1186 #endif
1187 } else {
1188 BUG();
1189 }
1190
1191 BUG_ON(num_refs < refs_to_drop);
1192 num_refs -= refs_to_drop;
1193
1194 if (num_refs == 0) {
1195 ret = btrfs_del_item(trans, root, path);
1196 } else {
1197 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1198 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1199 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1200 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1201 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1202 else {
1203 struct btrfs_extent_ref_v0 *ref0;
1204 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1205 struct btrfs_extent_ref_v0);
1206 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1207 }
1208 #endif
1209 btrfs_mark_buffer_dirty(leaf);
1210 }
1211 return ret;
1212 }
1213
1214 static noinline u32 extent_data_ref_count(struct btrfs_root *root,
1215 struct btrfs_path *path,
1216 struct btrfs_extent_inline_ref *iref)
1217 {
1218 struct btrfs_key key;
1219 struct extent_buffer *leaf;
1220 struct btrfs_extent_data_ref *ref1;
1221 struct btrfs_shared_data_ref *ref2;
1222 u32 num_refs = 0;
1223
1224 leaf = path->nodes[0];
1225 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1226 if (iref) {
1227 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1228 BTRFS_EXTENT_DATA_REF_KEY) {
1229 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1230 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1231 } else {
1232 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1233 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1234 }
1235 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1236 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1237 struct btrfs_extent_data_ref);
1238 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1239 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1240 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1241 struct btrfs_shared_data_ref);
1242 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1243 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1244 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1245 struct btrfs_extent_ref_v0 *ref0;
1246 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1247 struct btrfs_extent_ref_v0);
1248 num_refs = btrfs_ref_count_v0(leaf, ref0);
1249 #endif
1250 } else {
1251 WARN_ON(1);
1252 }
1253 return num_refs;
1254 }
1255
1256 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1257 struct btrfs_root *root,
1258 struct btrfs_path *path,
1259 u64 bytenr, u64 parent,
1260 u64 root_objectid)
1261 {
1262 struct btrfs_key key;
1263 int ret;
1264
1265 key.objectid = bytenr;
1266 if (parent) {
1267 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1268 key.offset = parent;
1269 } else {
1270 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1271 key.offset = root_objectid;
1272 }
1273
1274 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1275 if (ret > 0)
1276 ret = -ENOENT;
1277 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1278 if (ret == -ENOENT && parent) {
1279 btrfs_release_path(root, path);
1280 key.type = BTRFS_EXTENT_REF_V0_KEY;
1281 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1282 if (ret > 0)
1283 ret = -ENOENT;
1284 }
1285 #endif
1286 return ret;
1287 }
1288
1289 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1290 struct btrfs_root *root,
1291 struct btrfs_path *path,
1292 u64 bytenr, u64 parent,
1293 u64 root_objectid)
1294 {
1295 struct btrfs_key key;
1296 int ret;
1297
1298 key.objectid = bytenr;
1299 if (parent) {
1300 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1301 key.offset = parent;
1302 } else {
1303 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1304 key.offset = root_objectid;
1305 }
1306
1307 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1308 btrfs_release_path(root, path);
1309 return ret;
1310 }
1311
1312 static inline int extent_ref_type(u64 parent, u64 owner)
1313 {
1314 int type;
1315 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1316 if (parent > 0)
1317 type = BTRFS_SHARED_BLOCK_REF_KEY;
1318 else
1319 type = BTRFS_TREE_BLOCK_REF_KEY;
1320 } else {
1321 if (parent > 0)
1322 type = BTRFS_SHARED_DATA_REF_KEY;
1323 else
1324 type = BTRFS_EXTENT_DATA_REF_KEY;
1325 }
1326 return type;
1327 }
1328
1329 static int find_next_key(struct btrfs_path *path, int level,
1330 struct btrfs_key *key)
1331
1332 {
1333 for (; level < BTRFS_MAX_LEVEL; level++) {
1334 if (!path->nodes[level])
1335 break;
1336 if (path->slots[level] + 1 >=
1337 btrfs_header_nritems(path->nodes[level]))
1338 continue;
1339 if (level == 0)
1340 btrfs_item_key_to_cpu(path->nodes[level], key,
1341 path->slots[level] + 1);
1342 else
1343 btrfs_node_key_to_cpu(path->nodes[level], key,
1344 path->slots[level] + 1);
1345 return 0;
1346 }
1347 return 1;
1348 }
1349
1350 /*
1351 * look for inline back ref. if back ref is found, *ref_ret is set
1352 * to the address of inline back ref, and 0 is returned.
1353 *
1354 * if back ref isn't found, *ref_ret is set to the address where it
1355 * should be inserted, and -ENOENT is returned.
1356 *
1357 * if insert is true and there are too many inline back refs, the path
1358 * points to the extent item, and -EAGAIN is returned.
1359 *
1360 * NOTE: inline back refs are ordered in the same way that back ref
1361 * items in the tree are ordered.
1362 */
1363 static noinline_for_stack
1364 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1365 struct btrfs_root *root,
1366 struct btrfs_path *path,
1367 struct btrfs_extent_inline_ref **ref_ret,
1368 u64 bytenr, u64 num_bytes,
1369 u64 parent, u64 root_objectid,
1370 u64 owner, u64 offset, int insert)
1371 {
1372 struct btrfs_key key;
1373 struct extent_buffer *leaf;
1374 struct btrfs_extent_item *ei;
1375 struct btrfs_extent_inline_ref *iref;
1376 u64 flags;
1377 u64 item_size;
1378 unsigned long ptr;
1379 unsigned long end;
1380 int extra_size;
1381 int type;
1382 int want;
1383 int ret;
1384 int err = 0;
1385
1386 key.objectid = bytenr;
1387 key.type = BTRFS_EXTENT_ITEM_KEY;
1388 key.offset = num_bytes;
1389
1390 want = extent_ref_type(parent, owner);
1391 if (insert) {
1392 extra_size = btrfs_extent_inline_ref_size(want);
1393 path->keep_locks = 1;
1394 } else
1395 extra_size = -1;
1396 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1397 if (ret < 0) {
1398 err = ret;
1399 goto out;
1400 }
1401 BUG_ON(ret);
1402
1403 leaf = path->nodes[0];
1404 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1405 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1406 if (item_size < sizeof(*ei)) {
1407 if (!insert) {
1408 err = -ENOENT;
1409 goto out;
1410 }
1411 ret = convert_extent_item_v0(trans, root, path, owner,
1412 extra_size);
1413 if (ret < 0) {
1414 err = ret;
1415 goto out;
1416 }
1417 leaf = path->nodes[0];
1418 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1419 }
1420 #endif
1421 BUG_ON(item_size < sizeof(*ei));
1422
1423 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1424 flags = btrfs_extent_flags(leaf, ei);
1425
1426 ptr = (unsigned long)(ei + 1);
1427 end = (unsigned long)ei + item_size;
1428
1429 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK) {
1430 ptr += sizeof(struct btrfs_tree_block_info);
1431 BUG_ON(ptr > end);
1432 } else {
1433 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_DATA));
1434 }
1435
1436 err = -ENOENT;
1437 while (1) {
1438 if (ptr >= end) {
1439 WARN_ON(ptr > end);
1440 break;
1441 }
1442 iref = (struct btrfs_extent_inline_ref *)ptr;
1443 type = btrfs_extent_inline_ref_type(leaf, iref);
1444 if (want < type)
1445 break;
1446 if (want > type) {
1447 ptr += btrfs_extent_inline_ref_size(type);
1448 continue;
1449 }
1450
1451 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1452 struct btrfs_extent_data_ref *dref;
1453 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1454 if (match_extent_data_ref(leaf, dref, root_objectid,
1455 owner, offset)) {
1456 err = 0;
1457 break;
1458 }
1459 if (hash_extent_data_ref_item(leaf, dref) <
1460 hash_extent_data_ref(root_objectid, owner, offset))
1461 break;
1462 } else {
1463 u64 ref_offset;
1464 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1465 if (parent > 0) {
1466 if (parent == ref_offset) {
1467 err = 0;
1468 break;
1469 }
1470 if (ref_offset < parent)
1471 break;
1472 } else {
1473 if (root_objectid == ref_offset) {
1474 err = 0;
1475 break;
1476 }
1477 if (ref_offset < root_objectid)
1478 break;
1479 }
1480 }
1481 ptr += btrfs_extent_inline_ref_size(type);
1482 }
1483 if (err == -ENOENT && insert) {
1484 if (item_size + extra_size >=
1485 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1486 err = -EAGAIN;
1487 goto out;
1488 }
1489 /*
1490 * To add new inline back ref, we have to make sure
1491 * there is no corresponding back ref item.
1492 * For simplicity, we just do not add new inline back
1493 * ref if there is any kind of item for this block
1494 */
1495 if (find_next_key(path, 0, &key) == 0 &&
1496 key.objectid == bytenr &&
1497 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1498 err = -EAGAIN;
1499 goto out;
1500 }
1501 }
1502 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1503 out:
1504 if (insert) {
1505 path->keep_locks = 0;
1506 btrfs_unlock_up_safe(path, 1);
1507 }
1508 return err;
1509 }
1510
1511 /*
1512 * helper to add new inline back ref
1513 */
1514 static noinline_for_stack
1515 int setup_inline_extent_backref(struct btrfs_trans_handle *trans,
1516 struct btrfs_root *root,
1517 struct btrfs_path *path,
1518 struct btrfs_extent_inline_ref *iref,
1519 u64 parent, u64 root_objectid,
1520 u64 owner, u64 offset, int refs_to_add,
1521 struct btrfs_delayed_extent_op *extent_op)
1522 {
1523 struct extent_buffer *leaf;
1524 struct btrfs_extent_item *ei;
1525 unsigned long ptr;
1526 unsigned long end;
1527 unsigned long item_offset;
1528 u64 refs;
1529 int size;
1530 int type;
1531 int ret;
1532
1533 leaf = path->nodes[0];
1534 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1535 item_offset = (unsigned long)iref - (unsigned long)ei;
1536
1537 type = extent_ref_type(parent, owner);
1538 size = btrfs_extent_inline_ref_size(type);
1539
1540 ret = btrfs_extend_item(trans, root, path, size);
1541 BUG_ON(ret);
1542
1543 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1544 refs = btrfs_extent_refs(leaf, ei);
1545 refs += refs_to_add;
1546 btrfs_set_extent_refs(leaf, ei, refs);
1547 if (extent_op)
1548 __run_delayed_extent_op(extent_op, leaf, ei);
1549
1550 ptr = (unsigned long)ei + item_offset;
1551 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1552 if (ptr < end - size)
1553 memmove_extent_buffer(leaf, ptr + size, ptr,
1554 end - size - ptr);
1555
1556 iref = (struct btrfs_extent_inline_ref *)ptr;
1557 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1558 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1559 struct btrfs_extent_data_ref *dref;
1560 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1561 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1562 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1563 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1564 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1565 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1566 struct btrfs_shared_data_ref *sref;
1567 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1568 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1569 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1570 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1571 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1572 } else {
1573 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1574 }
1575 btrfs_mark_buffer_dirty(leaf);
1576 return 0;
1577 }
1578
1579 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1580 struct btrfs_root *root,
1581 struct btrfs_path *path,
1582 struct btrfs_extent_inline_ref **ref_ret,
1583 u64 bytenr, u64 num_bytes, u64 parent,
1584 u64 root_objectid, u64 owner, u64 offset)
1585 {
1586 int ret;
1587
1588 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1589 bytenr, num_bytes, parent,
1590 root_objectid, owner, offset, 0);
1591 if (ret != -ENOENT)
1592 return ret;
1593
1594 btrfs_release_path(root, path);
1595 *ref_ret = NULL;
1596
1597 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1598 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1599 root_objectid);
1600 } else {
1601 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1602 root_objectid, owner, offset);
1603 }
1604 return ret;
1605 }
1606
1607 /*
1608 * helper to update/remove inline back ref
1609 */
1610 static noinline_for_stack
1611 int update_inline_extent_backref(struct btrfs_trans_handle *trans,
1612 struct btrfs_root *root,
1613 struct btrfs_path *path,
1614 struct btrfs_extent_inline_ref *iref,
1615 int refs_to_mod,
1616 struct btrfs_delayed_extent_op *extent_op)
1617 {
1618 struct extent_buffer *leaf;
1619 struct btrfs_extent_item *ei;
1620 struct btrfs_extent_data_ref *dref = NULL;
1621 struct btrfs_shared_data_ref *sref = NULL;
1622 unsigned long ptr;
1623 unsigned long end;
1624 u32 item_size;
1625 int size;
1626 int type;
1627 int ret;
1628 u64 refs;
1629
1630 leaf = path->nodes[0];
1631 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1632 refs = btrfs_extent_refs(leaf, ei);
1633 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1634 refs += refs_to_mod;
1635 btrfs_set_extent_refs(leaf, ei, refs);
1636 if (extent_op)
1637 __run_delayed_extent_op(extent_op, leaf, ei);
1638
1639 type = btrfs_extent_inline_ref_type(leaf, iref);
1640
1641 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1642 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1643 refs = btrfs_extent_data_ref_count(leaf, dref);
1644 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1645 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1646 refs = btrfs_shared_data_ref_count(leaf, sref);
1647 } else {
1648 refs = 1;
1649 BUG_ON(refs_to_mod != -1);
1650 }
1651
1652 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1653 refs += refs_to_mod;
1654
1655 if (refs > 0) {
1656 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1657 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1658 else
1659 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1660 } else {
1661 size = btrfs_extent_inline_ref_size(type);
1662 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1663 ptr = (unsigned long)iref;
1664 end = (unsigned long)ei + item_size;
1665 if (ptr + size < end)
1666 memmove_extent_buffer(leaf, ptr, ptr + size,
1667 end - ptr - size);
1668 item_size -= size;
1669 ret = btrfs_truncate_item(trans, root, path, item_size, 1);
1670 BUG_ON(ret);
1671 }
1672 btrfs_mark_buffer_dirty(leaf);
1673 return 0;
1674 }
1675
1676 static noinline_for_stack
1677 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1678 struct btrfs_root *root,
1679 struct btrfs_path *path,
1680 u64 bytenr, u64 num_bytes, u64 parent,
1681 u64 root_objectid, u64 owner,
1682 u64 offset, int refs_to_add,
1683 struct btrfs_delayed_extent_op *extent_op)
1684 {
1685 struct btrfs_extent_inline_ref *iref;
1686 int ret;
1687
1688 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1689 bytenr, num_bytes, parent,
1690 root_objectid, owner, offset, 1);
1691 if (ret == 0) {
1692 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1693 ret = update_inline_extent_backref(trans, root, path, iref,
1694 refs_to_add, extent_op);
1695 } else if (ret == -ENOENT) {
1696 ret = setup_inline_extent_backref(trans, root, path, iref,
1697 parent, root_objectid,
1698 owner, offset, refs_to_add,
1699 extent_op);
1700 }
1701 return ret;
1702 }
1703
1704 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1705 struct btrfs_root *root,
1706 struct btrfs_path *path,
1707 u64 bytenr, u64 parent, u64 root_objectid,
1708 u64 owner, u64 offset, int refs_to_add)
1709 {
1710 int ret;
1711 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1712 BUG_ON(refs_to_add != 1);
1713 ret = insert_tree_block_ref(trans, root, path, bytenr,
1714 parent, root_objectid);
1715 } else {
1716 ret = insert_extent_data_ref(trans, root, path, bytenr,
1717 parent, root_objectid,
1718 owner, offset, refs_to_add);
1719 }
1720 return ret;
1721 }
1722
1723 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1724 struct btrfs_root *root,
1725 struct btrfs_path *path,
1726 struct btrfs_extent_inline_ref *iref,
1727 int refs_to_drop, int is_data)
1728 {
1729 int ret;
1730
1731 BUG_ON(!is_data && refs_to_drop != 1);
1732 if (iref) {
1733 ret = update_inline_extent_backref(trans, root, path, iref,
1734 -refs_to_drop, NULL);
1735 } else if (is_data) {
1736 ret = remove_extent_data_ref(trans, root, path, refs_to_drop);
1737 } else {
1738 ret = btrfs_del_item(trans, root, path);
1739 }
1740 return ret;
1741 }
1742
1743 static void btrfs_issue_discard(struct block_device *bdev,
1744 u64 start, u64 len)
1745 {
1746 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL, 0);
1747 }
1748
1749 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
1750 u64 num_bytes)
1751 {
1752 int ret;
1753 u64 map_length = num_bytes;
1754 struct btrfs_multi_bio *multi = NULL;
1755
1756 if (!btrfs_test_opt(root, DISCARD))
1757 return 0;
1758
1759 /* Tell the block device(s) that the sectors can be discarded */
1760 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
1761 bytenr, &map_length, &multi, 0);
1762 if (!ret) {
1763 struct btrfs_bio_stripe *stripe = multi->stripes;
1764 int i;
1765
1766 if (map_length > num_bytes)
1767 map_length = num_bytes;
1768
1769 for (i = 0; i < multi->num_stripes; i++, stripe++) {
1770 btrfs_issue_discard(stripe->dev->bdev,
1771 stripe->physical,
1772 map_length);
1773 }
1774 kfree(multi);
1775 }
1776
1777 return ret;
1778 }
1779
1780 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1781 struct btrfs_root *root,
1782 u64 bytenr, u64 num_bytes, u64 parent,
1783 u64 root_objectid, u64 owner, u64 offset)
1784 {
1785 int ret;
1786 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
1787 root_objectid == BTRFS_TREE_LOG_OBJECTID);
1788
1789 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1790 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
1791 parent, root_objectid, (int)owner,
1792 BTRFS_ADD_DELAYED_REF, NULL);
1793 } else {
1794 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
1795 parent, root_objectid, owner, offset,
1796 BTRFS_ADD_DELAYED_REF, NULL);
1797 }
1798 return ret;
1799 }
1800
1801 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1802 struct btrfs_root *root,
1803 u64 bytenr, u64 num_bytes,
1804 u64 parent, u64 root_objectid,
1805 u64 owner, u64 offset, int refs_to_add,
1806 struct btrfs_delayed_extent_op *extent_op)
1807 {
1808 struct btrfs_path *path;
1809 struct extent_buffer *leaf;
1810 struct btrfs_extent_item *item;
1811 u64 refs;
1812 int ret;
1813 int err = 0;
1814
1815 path = btrfs_alloc_path();
1816 if (!path)
1817 return -ENOMEM;
1818
1819 path->reada = 1;
1820 path->leave_spinning = 1;
1821 /* this will setup the path even if it fails to insert the back ref */
1822 ret = insert_inline_extent_backref(trans, root->fs_info->extent_root,
1823 path, bytenr, num_bytes, parent,
1824 root_objectid, owner, offset,
1825 refs_to_add, extent_op);
1826 if (ret == 0)
1827 goto out;
1828
1829 if (ret != -EAGAIN) {
1830 err = ret;
1831 goto out;
1832 }
1833
1834 leaf = path->nodes[0];
1835 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1836 refs = btrfs_extent_refs(leaf, item);
1837 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
1838 if (extent_op)
1839 __run_delayed_extent_op(extent_op, leaf, item);
1840
1841 btrfs_mark_buffer_dirty(leaf);
1842 btrfs_release_path(root->fs_info->extent_root, path);
1843
1844 path->reada = 1;
1845 path->leave_spinning = 1;
1846
1847 /* now insert the actual backref */
1848 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1849 path, bytenr, parent, root_objectid,
1850 owner, offset, refs_to_add);
1851 BUG_ON(ret);
1852 out:
1853 btrfs_free_path(path);
1854 return err;
1855 }
1856
1857 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
1858 struct btrfs_root *root,
1859 struct btrfs_delayed_ref_node *node,
1860 struct btrfs_delayed_extent_op *extent_op,
1861 int insert_reserved)
1862 {
1863 int ret = 0;
1864 struct btrfs_delayed_data_ref *ref;
1865 struct btrfs_key ins;
1866 u64 parent = 0;
1867 u64 ref_root = 0;
1868 u64 flags = 0;
1869
1870 ins.objectid = node->bytenr;
1871 ins.offset = node->num_bytes;
1872 ins.type = BTRFS_EXTENT_ITEM_KEY;
1873
1874 ref = btrfs_delayed_node_to_data_ref(node);
1875 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
1876 parent = ref->parent;
1877 else
1878 ref_root = ref->root;
1879
1880 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
1881 if (extent_op) {
1882 BUG_ON(extent_op->update_key);
1883 flags |= extent_op->flags_to_set;
1884 }
1885 ret = alloc_reserved_file_extent(trans, root,
1886 parent, ref_root, flags,
1887 ref->objectid, ref->offset,
1888 &ins, node->ref_mod);
1889 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
1890 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
1891 node->num_bytes, parent,
1892 ref_root, ref->objectid,
1893 ref->offset, node->ref_mod,
1894 extent_op);
1895 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
1896 ret = __btrfs_free_extent(trans, root, node->bytenr,
1897 node->num_bytes, parent,
1898 ref_root, ref->objectid,
1899 ref->offset, node->ref_mod,
1900 extent_op);
1901 } else {
1902 BUG();
1903 }
1904 return ret;
1905 }
1906
1907 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
1908 struct extent_buffer *leaf,
1909 struct btrfs_extent_item *ei)
1910 {
1911 u64 flags = btrfs_extent_flags(leaf, ei);
1912 if (extent_op->update_flags) {
1913 flags |= extent_op->flags_to_set;
1914 btrfs_set_extent_flags(leaf, ei, flags);
1915 }
1916
1917 if (extent_op->update_key) {
1918 struct btrfs_tree_block_info *bi;
1919 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
1920 bi = (struct btrfs_tree_block_info *)(ei + 1);
1921 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
1922 }
1923 }
1924
1925 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
1926 struct btrfs_root *root,
1927 struct btrfs_delayed_ref_node *node,
1928 struct btrfs_delayed_extent_op *extent_op)
1929 {
1930 struct btrfs_key key;
1931 struct btrfs_path *path;
1932 struct btrfs_extent_item *ei;
1933 struct extent_buffer *leaf;
1934 u32 item_size;
1935 int ret;
1936 int err = 0;
1937
1938 path = btrfs_alloc_path();
1939 if (!path)
1940 return -ENOMEM;
1941
1942 key.objectid = node->bytenr;
1943 key.type = BTRFS_EXTENT_ITEM_KEY;
1944 key.offset = node->num_bytes;
1945
1946 path->reada = 1;
1947 path->leave_spinning = 1;
1948 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
1949 path, 0, 1);
1950 if (ret < 0) {
1951 err = ret;
1952 goto out;
1953 }
1954 if (ret > 0) {
1955 err = -EIO;
1956 goto out;
1957 }
1958
1959 leaf = path->nodes[0];
1960 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1961 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1962 if (item_size < sizeof(*ei)) {
1963 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
1964 path, (u64)-1, 0);
1965 if (ret < 0) {
1966 err = ret;
1967 goto out;
1968 }
1969 leaf = path->nodes[0];
1970 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1971 }
1972 #endif
1973 BUG_ON(item_size < sizeof(*ei));
1974 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1975 __run_delayed_extent_op(extent_op, leaf, ei);
1976
1977 btrfs_mark_buffer_dirty(leaf);
1978 out:
1979 btrfs_free_path(path);
1980 return err;
1981 }
1982
1983 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
1984 struct btrfs_root *root,
1985 struct btrfs_delayed_ref_node *node,
1986 struct btrfs_delayed_extent_op *extent_op,
1987 int insert_reserved)
1988 {
1989 int ret = 0;
1990 struct btrfs_delayed_tree_ref *ref;
1991 struct btrfs_key ins;
1992 u64 parent = 0;
1993 u64 ref_root = 0;
1994
1995 ins.objectid = node->bytenr;
1996 ins.offset = node->num_bytes;
1997 ins.type = BTRFS_EXTENT_ITEM_KEY;
1998
1999 ref = btrfs_delayed_node_to_tree_ref(node);
2000 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2001 parent = ref->parent;
2002 else
2003 ref_root = ref->root;
2004
2005 BUG_ON(node->ref_mod != 1);
2006 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2007 BUG_ON(!extent_op || !extent_op->update_flags ||
2008 !extent_op->update_key);
2009 ret = alloc_reserved_tree_block(trans, root,
2010 parent, ref_root,
2011 extent_op->flags_to_set,
2012 &extent_op->key,
2013 ref->level, &ins);
2014 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2015 ret = __btrfs_inc_extent_ref(trans, root, node->bytenr,
2016 node->num_bytes, parent, ref_root,
2017 ref->level, 0, 1, extent_op);
2018 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2019 ret = __btrfs_free_extent(trans, root, node->bytenr,
2020 node->num_bytes, parent, ref_root,
2021 ref->level, 0, 1, extent_op);
2022 } else {
2023 BUG();
2024 }
2025 return ret;
2026 }
2027
2028 /* helper function to actually process a single delayed ref entry */
2029 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2030 struct btrfs_root *root,
2031 struct btrfs_delayed_ref_node *node,
2032 struct btrfs_delayed_extent_op *extent_op,
2033 int insert_reserved)
2034 {
2035 int ret;
2036 if (btrfs_delayed_ref_is_head(node)) {
2037 struct btrfs_delayed_ref_head *head;
2038 /*
2039 * we've hit the end of the chain and we were supposed
2040 * to insert this extent into the tree. But, it got
2041 * deleted before we ever needed to insert it, so all
2042 * we have to do is clean up the accounting
2043 */
2044 BUG_ON(extent_op);
2045 head = btrfs_delayed_node_to_head(node);
2046 if (insert_reserved) {
2047 btrfs_pin_extent(root, node->bytenr,
2048 node->num_bytes, 1);
2049 if (head->is_data) {
2050 ret = btrfs_del_csums(trans, root,
2051 node->bytenr,
2052 node->num_bytes);
2053 BUG_ON(ret);
2054 }
2055 }
2056 mutex_unlock(&head->mutex);
2057 return 0;
2058 }
2059
2060 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2061 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2062 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2063 insert_reserved);
2064 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2065 node->type == BTRFS_SHARED_DATA_REF_KEY)
2066 ret = run_delayed_data_ref(trans, root, node, extent_op,
2067 insert_reserved);
2068 else
2069 BUG();
2070 return ret;
2071 }
2072
2073 static noinline struct btrfs_delayed_ref_node *
2074 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2075 {
2076 struct rb_node *node;
2077 struct btrfs_delayed_ref_node *ref;
2078 int action = BTRFS_ADD_DELAYED_REF;
2079 again:
2080 /*
2081 * select delayed ref of type BTRFS_ADD_DELAYED_REF first.
2082 * this prevents ref count from going down to zero when
2083 * there still are pending delayed ref.
2084 */
2085 node = rb_prev(&head->node.rb_node);
2086 while (1) {
2087 if (!node)
2088 break;
2089 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2090 rb_node);
2091 if (ref->bytenr != head->node.bytenr)
2092 break;
2093 if (ref->action == action)
2094 return ref;
2095 node = rb_prev(node);
2096 }
2097 if (action == BTRFS_ADD_DELAYED_REF) {
2098 action = BTRFS_DROP_DELAYED_REF;
2099 goto again;
2100 }
2101 return NULL;
2102 }
2103
2104 static noinline int run_clustered_refs(struct btrfs_trans_handle *trans,
2105 struct btrfs_root *root,
2106 struct list_head *cluster)
2107 {
2108 struct btrfs_delayed_ref_root *delayed_refs;
2109 struct btrfs_delayed_ref_node *ref;
2110 struct btrfs_delayed_ref_head *locked_ref = NULL;
2111 struct btrfs_delayed_extent_op *extent_op;
2112 int ret;
2113 int count = 0;
2114 int must_insert_reserved = 0;
2115
2116 delayed_refs = &trans->transaction->delayed_refs;
2117 while (1) {
2118 if (!locked_ref) {
2119 /* pick a new head ref from the cluster list */
2120 if (list_empty(cluster))
2121 break;
2122
2123 locked_ref = list_entry(cluster->next,
2124 struct btrfs_delayed_ref_head, cluster);
2125
2126 /* grab the lock that says we are going to process
2127 * all the refs for this head */
2128 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2129
2130 /*
2131 * we may have dropped the spin lock to get the head
2132 * mutex lock, and that might have given someone else
2133 * time to free the head. If that's true, it has been
2134 * removed from our list and we can move on.
2135 */
2136 if (ret == -EAGAIN) {
2137 locked_ref = NULL;
2138 count++;
2139 continue;
2140 }
2141 }
2142
2143 /*
2144 * record the must insert reserved flag before we
2145 * drop the spin lock.
2146 */
2147 must_insert_reserved = locked_ref->must_insert_reserved;
2148 locked_ref->must_insert_reserved = 0;
2149
2150 extent_op = locked_ref->extent_op;
2151 locked_ref->extent_op = NULL;
2152
2153 /*
2154 * locked_ref is the head node, so we have to go one
2155 * node back for any delayed ref updates
2156 */
2157 ref = select_delayed_ref(locked_ref);
2158 if (!ref) {
2159 /* All delayed refs have been processed, Go ahead
2160 * and send the head node to run_one_delayed_ref,
2161 * so that any accounting fixes can happen
2162 */
2163 ref = &locked_ref->node;
2164
2165 if (extent_op && must_insert_reserved) {
2166 kfree(extent_op);
2167 extent_op = NULL;
2168 }
2169
2170 if (extent_op) {
2171 spin_unlock(&delayed_refs->lock);
2172
2173 ret = run_delayed_extent_op(trans, root,
2174 ref, extent_op);
2175 BUG_ON(ret);
2176 kfree(extent_op);
2177
2178 cond_resched();
2179 spin_lock(&delayed_refs->lock);
2180 continue;
2181 }
2182
2183 list_del_init(&locked_ref->cluster);
2184 locked_ref = NULL;
2185 }
2186
2187 ref->in_tree = 0;
2188 rb_erase(&ref->rb_node, &delayed_refs->root);
2189 delayed_refs->num_entries--;
2190
2191 spin_unlock(&delayed_refs->lock);
2192
2193 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2194 must_insert_reserved);
2195 BUG_ON(ret);
2196
2197 btrfs_put_delayed_ref(ref);
2198 kfree(extent_op);
2199 count++;
2200
2201 cond_resched();
2202 spin_lock(&delayed_refs->lock);
2203 }
2204 return count;
2205 }
2206
2207 /*
2208 * this starts processing the delayed reference count updates and
2209 * extent insertions we have queued up so far. count can be
2210 * 0, which means to process everything in the tree at the start
2211 * of the run (but not newly added entries), or it can be some target
2212 * number you'd like to process.
2213 */
2214 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2215 struct btrfs_root *root, unsigned long count)
2216 {
2217 struct rb_node *node;
2218 struct btrfs_delayed_ref_root *delayed_refs;
2219 struct btrfs_delayed_ref_node *ref;
2220 struct list_head cluster;
2221 int ret;
2222 int run_all = count == (unsigned long)-1;
2223 int run_most = 0;
2224
2225 if (root == root->fs_info->extent_root)
2226 root = root->fs_info->tree_root;
2227
2228 delayed_refs = &trans->transaction->delayed_refs;
2229 INIT_LIST_HEAD(&cluster);
2230 again:
2231 spin_lock(&delayed_refs->lock);
2232 if (count == 0) {
2233 count = delayed_refs->num_entries * 2;
2234 run_most = 1;
2235 }
2236 while (1) {
2237 if (!(run_all || run_most) &&
2238 delayed_refs->num_heads_ready < 64)
2239 break;
2240
2241 /*
2242 * go find something we can process in the rbtree. We start at
2243 * the beginning of the tree, and then build a cluster
2244 * of refs to process starting at the first one we are able to
2245 * lock
2246 */
2247 ret = btrfs_find_ref_cluster(trans, &cluster,
2248 delayed_refs->run_delayed_start);
2249 if (ret)
2250 break;
2251
2252 ret = run_clustered_refs(trans, root, &cluster);
2253 BUG_ON(ret < 0);
2254
2255 count -= min_t(unsigned long, ret, count);
2256
2257 if (count == 0)
2258 break;
2259 }
2260
2261 if (run_all) {
2262 node = rb_first(&delayed_refs->root);
2263 if (!node)
2264 goto out;
2265 count = (unsigned long)-1;
2266
2267 while (node) {
2268 ref = rb_entry(node, struct btrfs_delayed_ref_node,
2269 rb_node);
2270 if (btrfs_delayed_ref_is_head(ref)) {
2271 struct btrfs_delayed_ref_head *head;
2272
2273 head = btrfs_delayed_node_to_head(ref);
2274 atomic_inc(&ref->refs);
2275
2276 spin_unlock(&delayed_refs->lock);
2277 mutex_lock(&head->mutex);
2278 mutex_unlock(&head->mutex);
2279
2280 btrfs_put_delayed_ref(ref);
2281 cond_resched();
2282 goto again;
2283 }
2284 node = rb_next(node);
2285 }
2286 spin_unlock(&delayed_refs->lock);
2287 schedule_timeout(1);
2288 goto again;
2289 }
2290 out:
2291 spin_unlock(&delayed_refs->lock);
2292 return 0;
2293 }
2294
2295 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2296 struct btrfs_root *root,
2297 u64 bytenr, u64 num_bytes, u64 flags,
2298 int is_data)
2299 {
2300 struct btrfs_delayed_extent_op *extent_op;
2301 int ret;
2302
2303 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2304 if (!extent_op)
2305 return -ENOMEM;
2306
2307 extent_op->flags_to_set = flags;
2308 extent_op->update_flags = 1;
2309 extent_op->update_key = 0;
2310 extent_op->is_data = is_data ? 1 : 0;
2311
2312 ret = btrfs_add_delayed_extent_op(trans, bytenr, num_bytes, extent_op);
2313 if (ret)
2314 kfree(extent_op);
2315 return ret;
2316 }
2317
2318 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
2319 struct btrfs_root *root,
2320 struct btrfs_path *path,
2321 u64 objectid, u64 offset, u64 bytenr)
2322 {
2323 struct btrfs_delayed_ref_head *head;
2324 struct btrfs_delayed_ref_node *ref;
2325 struct btrfs_delayed_data_ref *data_ref;
2326 struct btrfs_delayed_ref_root *delayed_refs;
2327 struct rb_node *node;
2328 int ret = 0;
2329
2330 ret = -ENOENT;
2331 delayed_refs = &trans->transaction->delayed_refs;
2332 spin_lock(&delayed_refs->lock);
2333 head = btrfs_find_delayed_ref_head(trans, bytenr);
2334 if (!head)
2335 goto out;
2336
2337 if (!mutex_trylock(&head->mutex)) {
2338 atomic_inc(&head->node.refs);
2339 spin_unlock(&delayed_refs->lock);
2340
2341 btrfs_release_path(root->fs_info->extent_root, path);
2342
2343 mutex_lock(&head->mutex);
2344 mutex_unlock(&head->mutex);
2345 btrfs_put_delayed_ref(&head->node);
2346 return -EAGAIN;
2347 }
2348
2349 node = rb_prev(&head->node.rb_node);
2350 if (!node)
2351 goto out_unlock;
2352
2353 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2354
2355 if (ref->bytenr != bytenr)
2356 goto out_unlock;
2357
2358 ret = 1;
2359 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY)
2360 goto out_unlock;
2361
2362 data_ref = btrfs_delayed_node_to_data_ref(ref);
2363
2364 node = rb_prev(node);
2365 if (node) {
2366 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
2367 if (ref->bytenr == bytenr)
2368 goto out_unlock;
2369 }
2370
2371 if (data_ref->root != root->root_key.objectid ||
2372 data_ref->objectid != objectid || data_ref->offset != offset)
2373 goto out_unlock;
2374
2375 ret = 0;
2376 out_unlock:
2377 mutex_unlock(&head->mutex);
2378 out:
2379 spin_unlock(&delayed_refs->lock);
2380 return ret;
2381 }
2382
2383 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
2384 struct btrfs_root *root,
2385 struct btrfs_path *path,
2386 u64 objectid, u64 offset, u64 bytenr)
2387 {
2388 struct btrfs_root *extent_root = root->fs_info->extent_root;
2389 struct extent_buffer *leaf;
2390 struct btrfs_extent_data_ref *ref;
2391 struct btrfs_extent_inline_ref *iref;
2392 struct btrfs_extent_item *ei;
2393 struct btrfs_key key;
2394 u32 item_size;
2395 int ret;
2396
2397 key.objectid = bytenr;
2398 key.offset = (u64)-1;
2399 key.type = BTRFS_EXTENT_ITEM_KEY;
2400
2401 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
2402 if (ret < 0)
2403 goto out;
2404 BUG_ON(ret == 0);
2405
2406 ret = -ENOENT;
2407 if (path->slots[0] == 0)
2408 goto out;
2409
2410 path->slots[0]--;
2411 leaf = path->nodes[0];
2412 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2413
2414 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
2415 goto out;
2416
2417 ret = 1;
2418 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2419 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2420 if (item_size < sizeof(*ei)) {
2421 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
2422 goto out;
2423 }
2424 #endif
2425 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2426
2427 if (item_size != sizeof(*ei) +
2428 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
2429 goto out;
2430
2431 if (btrfs_extent_generation(leaf, ei) <=
2432 btrfs_root_last_snapshot(&root->root_item))
2433 goto out;
2434
2435 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
2436 if (btrfs_extent_inline_ref_type(leaf, iref) !=
2437 BTRFS_EXTENT_DATA_REF_KEY)
2438 goto out;
2439
2440 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
2441 if (btrfs_extent_refs(leaf, ei) !=
2442 btrfs_extent_data_ref_count(leaf, ref) ||
2443 btrfs_extent_data_ref_root(leaf, ref) !=
2444 root->root_key.objectid ||
2445 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
2446 btrfs_extent_data_ref_offset(leaf, ref) != offset)
2447 goto out;
2448
2449 ret = 0;
2450 out:
2451 return ret;
2452 }
2453
2454 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
2455 struct btrfs_root *root,
2456 u64 objectid, u64 offset, u64 bytenr)
2457 {
2458 struct btrfs_path *path;
2459 int ret;
2460 int ret2;
2461
2462 path = btrfs_alloc_path();
2463 if (!path)
2464 return -ENOENT;
2465
2466 do {
2467 ret = check_committed_ref(trans, root, path, objectid,
2468 offset, bytenr);
2469 if (ret && ret != -ENOENT)
2470 goto out;
2471
2472 ret2 = check_delayed_ref(trans, root, path, objectid,
2473 offset, bytenr);
2474 } while (ret2 == -EAGAIN);
2475
2476 if (ret2 && ret2 != -ENOENT) {
2477 ret = ret2;
2478 goto out;
2479 }
2480
2481 if (ret != -ENOENT || ret2 != -ENOENT)
2482 ret = 0;
2483 out:
2484 btrfs_free_path(path);
2485 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
2486 WARN_ON(ret > 0);
2487 return ret;
2488 }
2489
2490 #if 0
2491 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2492 struct extent_buffer *buf, u32 nr_extents)
2493 {
2494 struct btrfs_key key;
2495 struct btrfs_file_extent_item *fi;
2496 u64 root_gen;
2497 u32 nritems;
2498 int i;
2499 int level;
2500 int ret = 0;
2501 int shared = 0;
2502
2503 if (!root->ref_cows)
2504 return 0;
2505
2506 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
2507 shared = 0;
2508 root_gen = root->root_key.offset;
2509 } else {
2510 shared = 1;
2511 root_gen = trans->transid - 1;
2512 }
2513
2514 level = btrfs_header_level(buf);
2515 nritems = btrfs_header_nritems(buf);
2516
2517 if (level == 0) {
2518 struct btrfs_leaf_ref *ref;
2519 struct btrfs_extent_info *info;
2520
2521 ref = btrfs_alloc_leaf_ref(root, nr_extents);
2522 if (!ref) {
2523 ret = -ENOMEM;
2524 goto out;
2525 }
2526
2527 ref->root_gen = root_gen;
2528 ref->bytenr = buf->start;
2529 ref->owner = btrfs_header_owner(buf);
2530 ref->generation = btrfs_header_generation(buf);
2531 ref->nritems = nr_extents;
2532 info = ref->extents;
2533
2534 for (i = 0; nr_extents > 0 && i < nritems; i++) {
2535 u64 disk_bytenr;
2536 btrfs_item_key_to_cpu(buf, &key, i);
2537 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2538 continue;
2539 fi = btrfs_item_ptr(buf, i,
2540 struct btrfs_file_extent_item);
2541 if (btrfs_file_extent_type(buf, fi) ==
2542 BTRFS_FILE_EXTENT_INLINE)
2543 continue;
2544 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2545 if (disk_bytenr == 0)
2546 continue;
2547
2548 info->bytenr = disk_bytenr;
2549 info->num_bytes =
2550 btrfs_file_extent_disk_num_bytes(buf, fi);
2551 info->objectid = key.objectid;
2552 info->offset = key.offset;
2553 info++;
2554 }
2555
2556 ret = btrfs_add_leaf_ref(root, ref, shared);
2557 if (ret == -EEXIST && shared) {
2558 struct btrfs_leaf_ref *old;
2559 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
2560 BUG_ON(!old);
2561 btrfs_remove_leaf_ref(root, old);
2562 btrfs_free_leaf_ref(root, old);
2563 ret = btrfs_add_leaf_ref(root, ref, shared);
2564 }
2565 WARN_ON(ret);
2566 btrfs_free_leaf_ref(root, ref);
2567 }
2568 out:
2569 return ret;
2570 }
2571
2572 /* when a block goes through cow, we update the reference counts of
2573 * everything that block points to. The internal pointers of the block
2574 * can be in just about any order, and it is likely to have clusters of
2575 * things that are close together and clusters of things that are not.
2576 *
2577 * To help reduce the seeks that come with updating all of these reference
2578 * counts, sort them by byte number before actual updates are done.
2579 *
2580 * struct refsort is used to match byte number to slot in the btree block.
2581 * we sort based on the byte number and then use the slot to actually
2582 * find the item.
2583 *
2584 * struct refsort is smaller than strcut btrfs_item and smaller than
2585 * struct btrfs_key_ptr. Since we're currently limited to the page size
2586 * for a btree block, there's no way for a kmalloc of refsorts for a
2587 * single node to be bigger than a page.
2588 */
2589 struct refsort {
2590 u64 bytenr;
2591 u32 slot;
2592 };
2593
2594 /*
2595 * for passing into sort()
2596 */
2597 static int refsort_cmp(const void *a_void, const void *b_void)
2598 {
2599 const struct refsort *a = a_void;
2600 const struct refsort *b = b_void;
2601
2602 if (a->bytenr < b->bytenr)
2603 return -1;
2604 if (a->bytenr > b->bytenr)
2605 return 1;
2606 return 0;
2607 }
2608 #endif
2609
2610 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
2611 struct btrfs_root *root,
2612 struct extent_buffer *buf,
2613 int full_backref, int inc)
2614 {
2615 u64 bytenr;
2616 u64 num_bytes;
2617 u64 parent;
2618 u64 ref_root;
2619 u32 nritems;
2620 struct btrfs_key key;
2621 struct btrfs_file_extent_item *fi;
2622 int i;
2623 int level;
2624 int ret = 0;
2625 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
2626 u64, u64, u64, u64, u64, u64);
2627
2628 ref_root = btrfs_header_owner(buf);
2629 nritems = btrfs_header_nritems(buf);
2630 level = btrfs_header_level(buf);
2631
2632 if (!root->ref_cows && level == 0)
2633 return 0;
2634
2635 if (inc)
2636 process_func = btrfs_inc_extent_ref;
2637 else
2638 process_func = btrfs_free_extent;
2639
2640 if (full_backref)
2641 parent = buf->start;
2642 else
2643 parent = 0;
2644
2645 for (i = 0; i < nritems; i++) {
2646 if (level == 0) {
2647 btrfs_item_key_to_cpu(buf, &key, i);
2648 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
2649 continue;
2650 fi = btrfs_item_ptr(buf, i,
2651 struct btrfs_file_extent_item);
2652 if (btrfs_file_extent_type(buf, fi) ==
2653 BTRFS_FILE_EXTENT_INLINE)
2654 continue;
2655 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
2656 if (bytenr == 0)
2657 continue;
2658
2659 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
2660 key.offset -= btrfs_file_extent_offset(buf, fi);
2661 ret = process_func(trans, root, bytenr, num_bytes,
2662 parent, ref_root, key.objectid,
2663 key.offset);
2664 if (ret)
2665 goto fail;
2666 } else {
2667 bytenr = btrfs_node_blockptr(buf, i);
2668 num_bytes = btrfs_level_size(root, level - 1);
2669 ret = process_func(trans, root, bytenr, num_bytes,
2670 parent, ref_root, level - 1, 0);
2671 if (ret)
2672 goto fail;
2673 }
2674 }
2675 return 0;
2676 fail:
2677 BUG();
2678 return ret;
2679 }
2680
2681 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2682 struct extent_buffer *buf, int full_backref)
2683 {
2684 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
2685 }
2686
2687 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
2688 struct extent_buffer *buf, int full_backref)
2689 {
2690 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
2691 }
2692
2693 static int write_one_cache_group(struct btrfs_trans_handle *trans,
2694 struct btrfs_root *root,
2695 struct btrfs_path *path,
2696 struct btrfs_block_group_cache *cache)
2697 {
2698 int ret;
2699 struct btrfs_root *extent_root = root->fs_info->extent_root;
2700 unsigned long bi;
2701 struct extent_buffer *leaf;
2702
2703 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
2704 if (ret < 0)
2705 goto fail;
2706 BUG_ON(ret);
2707
2708 leaf = path->nodes[0];
2709 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
2710 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
2711 btrfs_mark_buffer_dirty(leaf);
2712 btrfs_release_path(extent_root, path);
2713 fail:
2714 if (ret)
2715 return ret;
2716 return 0;
2717
2718 }
2719
2720 static struct btrfs_block_group_cache *
2721 next_block_group(struct btrfs_root *root,
2722 struct btrfs_block_group_cache *cache)
2723 {
2724 struct rb_node *node;
2725 spin_lock(&root->fs_info->block_group_cache_lock);
2726 node = rb_next(&cache->cache_node);
2727 btrfs_put_block_group(cache);
2728 if (node) {
2729 cache = rb_entry(node, struct btrfs_block_group_cache,
2730 cache_node);
2731 btrfs_get_block_group(cache);
2732 } else
2733 cache = NULL;
2734 spin_unlock(&root->fs_info->block_group_cache_lock);
2735 return cache;
2736 }
2737
2738 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
2739 struct btrfs_trans_handle *trans,
2740 struct btrfs_path *path)
2741 {
2742 struct btrfs_root *root = block_group->fs_info->tree_root;
2743 struct inode *inode = NULL;
2744 u64 alloc_hint = 0;
2745 int dcs = BTRFS_DC_ERROR;
2746 int num_pages = 0;
2747 int retries = 0;
2748 int ret = 0;
2749
2750 /*
2751 * If this block group is smaller than 100 megs don't bother caching the
2752 * block group.
2753 */
2754 if (block_group->key.offset < (100 * 1024 * 1024)) {
2755 spin_lock(&block_group->lock);
2756 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
2757 spin_unlock(&block_group->lock);
2758 return 0;
2759 }
2760
2761 again:
2762 inode = lookup_free_space_inode(root, block_group, path);
2763 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
2764 ret = PTR_ERR(inode);
2765 btrfs_release_path(root, path);
2766 goto out;
2767 }
2768
2769 if (IS_ERR(inode)) {
2770 BUG_ON(retries);
2771 retries++;
2772
2773 if (block_group->ro)
2774 goto out_free;
2775
2776 ret = create_free_space_inode(root, trans, block_group, path);
2777 if (ret)
2778 goto out_free;
2779 goto again;
2780 }
2781
2782 /*
2783 * We want to set the generation to 0, that way if anything goes wrong
2784 * from here on out we know not to trust this cache when we load up next
2785 * time.
2786 */
2787 BTRFS_I(inode)->generation = 0;
2788 ret = btrfs_update_inode(trans, root, inode);
2789 WARN_ON(ret);
2790
2791 if (i_size_read(inode) > 0) {
2792 ret = btrfs_truncate_free_space_cache(root, trans, path,
2793 inode);
2794 if (ret)
2795 goto out_put;
2796 }
2797
2798 spin_lock(&block_group->lock);
2799 if (block_group->cached != BTRFS_CACHE_FINISHED) {
2800 /* We're not cached, don't bother trying to write stuff out */
2801 dcs = BTRFS_DC_WRITTEN;
2802 spin_unlock(&block_group->lock);
2803 goto out_put;
2804 }
2805 spin_unlock(&block_group->lock);
2806
2807 num_pages = (int)div64_u64(block_group->key.offset, 1024 * 1024 * 1024);
2808 if (!num_pages)
2809 num_pages = 1;
2810
2811 /*
2812 * Just to make absolutely sure we have enough space, we're going to
2813 * preallocate 12 pages worth of space for each block group. In
2814 * practice we ought to use at most 8, but we need extra space so we can
2815 * add our header and have a terminator between the extents and the
2816 * bitmaps.
2817 */
2818 num_pages *= 16;
2819 num_pages *= PAGE_CACHE_SIZE;
2820
2821 ret = btrfs_check_data_free_space(inode, num_pages);
2822 if (ret)
2823 goto out_put;
2824
2825 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
2826 num_pages, num_pages,
2827 &alloc_hint);
2828 if (!ret)
2829 dcs = BTRFS_DC_SETUP;
2830 btrfs_free_reserved_data_space(inode, num_pages);
2831 out_put:
2832 iput(inode);
2833 out_free:
2834 btrfs_release_path(root, path);
2835 out:
2836 spin_lock(&block_group->lock);
2837 block_group->disk_cache_state = dcs;
2838 spin_unlock(&block_group->lock);
2839
2840 return ret;
2841 }
2842
2843 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *root)
2845 {
2846 struct btrfs_block_group_cache *cache;
2847 int err = 0;
2848 struct btrfs_path *path;
2849 u64 last = 0;
2850
2851 path = btrfs_alloc_path();
2852 if (!path)
2853 return -ENOMEM;
2854
2855 again:
2856 while (1) {
2857 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2858 while (cache) {
2859 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
2860 break;
2861 cache = next_block_group(root, cache);
2862 }
2863 if (!cache) {
2864 if (last == 0)
2865 break;
2866 last = 0;
2867 continue;
2868 }
2869 err = cache_save_setup(cache, trans, path);
2870 last = cache->key.objectid + cache->key.offset;
2871 btrfs_put_block_group(cache);
2872 }
2873
2874 while (1) {
2875 if (last == 0) {
2876 err = btrfs_run_delayed_refs(trans, root,
2877 (unsigned long)-1);
2878 BUG_ON(err);
2879 }
2880
2881 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2882 while (cache) {
2883 if (cache->disk_cache_state == BTRFS_DC_CLEAR) {
2884 btrfs_put_block_group(cache);
2885 goto again;
2886 }
2887
2888 if (cache->dirty)
2889 break;
2890 cache = next_block_group(root, cache);
2891 }
2892 if (!cache) {
2893 if (last == 0)
2894 break;
2895 last = 0;
2896 continue;
2897 }
2898
2899 if (cache->disk_cache_state == BTRFS_DC_SETUP)
2900 cache->disk_cache_state = BTRFS_DC_NEED_WRITE;
2901 cache->dirty = 0;
2902 last = cache->key.objectid + cache->key.offset;
2903
2904 err = write_one_cache_group(trans, root, path, cache);
2905 BUG_ON(err);
2906 btrfs_put_block_group(cache);
2907 }
2908
2909 while (1) {
2910 /*
2911 * I don't think this is needed since we're just marking our
2912 * preallocated extent as written, but just in case it can't
2913 * hurt.
2914 */
2915 if (last == 0) {
2916 err = btrfs_run_delayed_refs(trans, root,
2917 (unsigned long)-1);
2918 BUG_ON(err);
2919 }
2920
2921 cache = btrfs_lookup_first_block_group(root->fs_info, last);
2922 while (cache) {
2923 /*
2924 * Really this shouldn't happen, but it could if we
2925 * couldn't write the entire preallocated extent and
2926 * splitting the extent resulted in a new block.
2927 */
2928 if (cache->dirty) {
2929 btrfs_put_block_group(cache);
2930 goto again;
2931 }
2932 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2933 break;
2934 cache = next_block_group(root, cache);
2935 }
2936 if (!cache) {
2937 if (last == 0)
2938 break;
2939 last = 0;
2940 continue;
2941 }
2942
2943 btrfs_write_out_cache(root, trans, cache, path);
2944
2945 /*
2946 * If we didn't have an error then the cache state is still
2947 * NEED_WRITE, so we can set it to WRITTEN.
2948 */
2949 if (cache->disk_cache_state == BTRFS_DC_NEED_WRITE)
2950 cache->disk_cache_state = BTRFS_DC_WRITTEN;
2951 last = cache->key.objectid + cache->key.offset;
2952 btrfs_put_block_group(cache);
2953 }
2954
2955 btrfs_free_path(path);
2956 return 0;
2957 }
2958
2959 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
2960 {
2961 struct btrfs_block_group_cache *block_group;
2962 int readonly = 0;
2963
2964 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
2965 if (!block_group || block_group->ro)
2966 readonly = 1;
2967 if (block_group)
2968 btrfs_put_block_group(block_group);
2969 return readonly;
2970 }
2971
2972 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
2973 u64 total_bytes, u64 bytes_used,
2974 struct btrfs_space_info **space_info)
2975 {
2976 struct btrfs_space_info *found;
2977 int i;
2978 int factor;
2979
2980 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
2981 BTRFS_BLOCK_GROUP_RAID10))
2982 factor = 2;
2983 else
2984 factor = 1;
2985
2986 found = __find_space_info(info, flags);
2987 if (found) {
2988 spin_lock(&found->lock);
2989 found->total_bytes += total_bytes;
2990 found->disk_total += total_bytes * factor;
2991 found->bytes_used += bytes_used;
2992 found->disk_used += bytes_used * factor;
2993 found->full = 0;
2994 spin_unlock(&found->lock);
2995 *space_info = found;
2996 return 0;
2997 }
2998 found = kzalloc(sizeof(*found), GFP_NOFS);
2999 if (!found)
3000 return -ENOMEM;
3001
3002 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3003 INIT_LIST_HEAD(&found->block_groups[i]);
3004 init_rwsem(&found->groups_sem);
3005 spin_lock_init(&found->lock);
3006 found->flags = flags & (BTRFS_BLOCK_GROUP_DATA |
3007 BTRFS_BLOCK_GROUP_SYSTEM |
3008 BTRFS_BLOCK_GROUP_METADATA);
3009 found->total_bytes = total_bytes;
3010 found->disk_total = total_bytes * factor;
3011 found->bytes_used = bytes_used;
3012 found->disk_used = bytes_used * factor;
3013 found->bytes_pinned = 0;
3014 found->bytes_reserved = 0;
3015 found->bytes_readonly = 0;
3016 found->bytes_may_use = 0;
3017 found->full = 0;
3018 found->force_alloc = 0;
3019 *space_info = found;
3020 list_add_rcu(&found->list, &info->space_info);
3021 atomic_set(&found->caching_threads, 0);
3022 return 0;
3023 }
3024
3025 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3026 {
3027 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
3028 BTRFS_BLOCK_GROUP_RAID1 |
3029 BTRFS_BLOCK_GROUP_RAID10 |
3030 BTRFS_BLOCK_GROUP_DUP);
3031 if (extra_flags) {
3032 if (flags & BTRFS_BLOCK_GROUP_DATA)
3033 fs_info->avail_data_alloc_bits |= extra_flags;
3034 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3035 fs_info->avail_metadata_alloc_bits |= extra_flags;
3036 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3037 fs_info->avail_system_alloc_bits |= extra_flags;
3038 }
3039 }
3040
3041 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3042 {
3043 /*
3044 * we add in the count of missing devices because we want
3045 * to make sure that any RAID levels on a degraded FS
3046 * continue to be honored.
3047 */
3048 u64 num_devices = root->fs_info->fs_devices->rw_devices +
3049 root->fs_info->fs_devices->missing_devices;
3050
3051 if (num_devices == 1)
3052 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
3053 if (num_devices < 4)
3054 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
3055
3056 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
3057 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
3058 BTRFS_BLOCK_GROUP_RAID10))) {
3059 flags &= ~BTRFS_BLOCK_GROUP_DUP;
3060 }
3061
3062 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
3063 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
3064 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
3065 }
3066
3067 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
3068 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
3069 (flags & BTRFS_BLOCK_GROUP_RAID10) |
3070 (flags & BTRFS_BLOCK_GROUP_DUP)))
3071 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
3072 return flags;
3073 }
3074
3075 static u64 get_alloc_profile(struct btrfs_root *root, u64 flags)
3076 {
3077 if (flags & BTRFS_BLOCK_GROUP_DATA)
3078 flags |= root->fs_info->avail_data_alloc_bits &
3079 root->fs_info->data_alloc_profile;
3080 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3081 flags |= root->fs_info->avail_system_alloc_bits &
3082 root->fs_info->system_alloc_profile;
3083 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
3084 flags |= root->fs_info->avail_metadata_alloc_bits &
3085 root->fs_info->metadata_alloc_profile;
3086 return btrfs_reduce_alloc_profile(root, flags);
3087 }
3088
3089 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
3090 {
3091 u64 flags;
3092
3093 if (data)
3094 flags = BTRFS_BLOCK_GROUP_DATA;
3095 else if (root == root->fs_info->chunk_root)
3096 flags = BTRFS_BLOCK_GROUP_SYSTEM;
3097 else
3098 flags = BTRFS_BLOCK_GROUP_METADATA;
3099
3100 return get_alloc_profile(root, flags);
3101 }
3102
3103 void btrfs_set_inode_space_info(struct btrfs_root *root, struct inode *inode)
3104 {
3105 BTRFS_I(inode)->space_info = __find_space_info(root->fs_info,
3106 BTRFS_BLOCK_GROUP_DATA);
3107 }
3108
3109 /*
3110 * This will check the space that the inode allocates from to make sure we have
3111 * enough space for bytes.
3112 */
3113 int btrfs_check_data_free_space(struct inode *inode, u64 bytes)
3114 {
3115 struct btrfs_space_info *data_sinfo;
3116 struct btrfs_root *root = BTRFS_I(inode)->root;
3117 u64 used;
3118 int ret = 0, committed = 0, alloc_chunk = 1;
3119
3120 /* make sure bytes are sectorsize aligned */
3121 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3122
3123 if (root == root->fs_info->tree_root) {
3124 alloc_chunk = 0;
3125 committed = 1;
3126 }
3127
3128 data_sinfo = BTRFS_I(inode)->space_info;
3129 if (!data_sinfo)
3130 goto alloc;
3131
3132 again:
3133 /* make sure we have enough space to handle the data first */
3134 spin_lock(&data_sinfo->lock);
3135 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
3136 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
3137 data_sinfo->bytes_may_use;
3138
3139 if (used + bytes > data_sinfo->total_bytes) {
3140 struct btrfs_trans_handle *trans;
3141
3142 /*
3143 * if we don't have enough free bytes in this space then we need
3144 * to alloc a new chunk.
3145 */
3146 if (!data_sinfo->full && alloc_chunk) {
3147 u64 alloc_target;
3148
3149 data_sinfo->force_alloc = 1;
3150 spin_unlock(&data_sinfo->lock);
3151 alloc:
3152 alloc_target = btrfs_get_alloc_profile(root, 1);
3153 trans = btrfs_join_transaction(root, 1);
3154 if (IS_ERR(trans))
3155 return PTR_ERR(trans);
3156
3157 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3158 bytes + 2 * 1024 * 1024,
3159 alloc_target, 0);
3160 btrfs_end_transaction(trans, root);
3161 if (ret < 0) {
3162 if (ret != -ENOSPC)
3163 return ret;
3164 else
3165 goto commit_trans;
3166 }
3167
3168 if (!data_sinfo) {
3169 btrfs_set_inode_space_info(root, inode);
3170 data_sinfo = BTRFS_I(inode)->space_info;
3171 }
3172 goto again;
3173 }
3174 spin_unlock(&data_sinfo->lock);
3175
3176 /* commit the current transaction and try again */
3177 commit_trans:
3178 if (!committed && !root->fs_info->open_ioctl_trans) {
3179 committed = 1;
3180 trans = btrfs_join_transaction(root, 1);
3181 if (IS_ERR(trans))
3182 return PTR_ERR(trans);
3183 ret = btrfs_commit_transaction(trans, root);
3184 if (ret)
3185 return ret;
3186 goto again;
3187 }
3188
3189 #if 0 /* I hope we never need this code again, just in case */
3190 printk(KERN_ERR "no space left, need %llu, %llu bytes_used, "
3191 "%llu bytes_reserved, " "%llu bytes_pinned, "
3192 "%llu bytes_readonly, %llu may use %llu total\n",
3193 (unsigned long long)bytes,
3194 (unsigned long long)data_sinfo->bytes_used,
3195 (unsigned long long)data_sinfo->bytes_reserved,
3196 (unsigned long long)data_sinfo->bytes_pinned,
3197 (unsigned long long)data_sinfo->bytes_readonly,
3198 (unsigned long long)data_sinfo->bytes_may_use,
3199 (unsigned long long)data_sinfo->total_bytes);
3200 #endif
3201 return -ENOSPC;
3202 }
3203 data_sinfo->bytes_may_use += bytes;
3204 BTRFS_I(inode)->reserved_bytes += bytes;
3205 spin_unlock(&data_sinfo->lock);
3206
3207 return 0;
3208 }
3209
3210 /*
3211 * called when we are clearing an delalloc extent from the
3212 * inode's io_tree or there was an error for whatever reason
3213 * after calling btrfs_check_data_free_space
3214 */
3215 void btrfs_free_reserved_data_space(struct inode *inode, u64 bytes)
3216 {
3217 struct btrfs_root *root = BTRFS_I(inode)->root;
3218 struct btrfs_space_info *data_sinfo;
3219
3220 /* make sure bytes are sectorsize aligned */
3221 bytes = (bytes + root->sectorsize - 1) & ~((u64)root->sectorsize - 1);
3222
3223 data_sinfo = BTRFS_I(inode)->space_info;
3224 spin_lock(&data_sinfo->lock);
3225 data_sinfo->bytes_may_use -= bytes;
3226 BTRFS_I(inode)->reserved_bytes -= bytes;
3227 spin_unlock(&data_sinfo->lock);
3228 }
3229
3230 static void force_metadata_allocation(struct btrfs_fs_info *info)
3231 {
3232 struct list_head *head = &info->space_info;
3233 struct btrfs_space_info *found;
3234
3235 rcu_read_lock();
3236 list_for_each_entry_rcu(found, head, list) {
3237 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
3238 found->force_alloc = 1;
3239 }
3240 rcu_read_unlock();
3241 }
3242
3243 static int should_alloc_chunk(struct btrfs_root *root,
3244 struct btrfs_space_info *sinfo, u64 alloc_bytes)
3245 {
3246 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
3247 u64 thresh;
3248
3249 if (sinfo->bytes_used + sinfo->bytes_reserved +
3250 alloc_bytes + 256 * 1024 * 1024 < num_bytes)
3251 return 0;
3252
3253 if (sinfo->bytes_used + sinfo->bytes_reserved +
3254 alloc_bytes < div_factor(num_bytes, 8))
3255 return 0;
3256
3257 thresh = btrfs_super_total_bytes(&root->fs_info->super_copy);
3258 thresh = max_t(u64, 256 * 1024 * 1024, div_factor_fine(thresh, 5));
3259
3260 if (num_bytes > thresh && sinfo->bytes_used < div_factor(num_bytes, 3))
3261 return 0;
3262
3263 return 1;
3264 }
3265
3266 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
3267 struct btrfs_root *extent_root, u64 alloc_bytes,
3268 u64 flags, int force)
3269 {
3270 struct btrfs_space_info *space_info;
3271 struct btrfs_fs_info *fs_info = extent_root->fs_info;
3272 int ret = 0;
3273
3274 mutex_lock(&fs_info->chunk_mutex);
3275
3276 flags = btrfs_reduce_alloc_profile(extent_root, flags);
3277
3278 space_info = __find_space_info(extent_root->fs_info, flags);
3279 if (!space_info) {
3280 ret = update_space_info(extent_root->fs_info, flags,
3281 0, 0, &space_info);
3282 BUG_ON(ret);
3283 }
3284 BUG_ON(!space_info);
3285
3286 spin_lock(&space_info->lock);
3287 if (space_info->force_alloc)
3288 force = 1;
3289 if (space_info->full) {
3290 spin_unlock(&space_info->lock);
3291 goto out;
3292 }
3293
3294 if (!force && !should_alloc_chunk(extent_root, space_info,
3295 alloc_bytes)) {
3296 spin_unlock(&space_info->lock);
3297 goto out;
3298 }
3299 spin_unlock(&space_info->lock);
3300
3301 /*
3302 * If we have mixed data/metadata chunks we want to make sure we keep
3303 * allocating mixed chunks instead of individual chunks.
3304 */
3305 if (btrfs_mixed_space_info(space_info))
3306 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
3307
3308 /*
3309 * if we're doing a data chunk, go ahead and make sure that
3310 * we keep a reasonable number of metadata chunks allocated in the
3311 * FS as well.
3312 */
3313 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
3314 fs_info->data_chunk_allocations++;
3315 if (!(fs_info->data_chunk_allocations %
3316 fs_info->metadata_ratio))
3317 force_metadata_allocation(fs_info);
3318 }
3319
3320 ret = btrfs_alloc_chunk(trans, extent_root, flags);
3321 spin_lock(&space_info->lock);
3322 if (ret)
3323 space_info->full = 1;
3324 else
3325 ret = 1;
3326 space_info->force_alloc = 0;
3327 spin_unlock(&space_info->lock);
3328 out:
3329 mutex_unlock(&extent_root->fs_info->chunk_mutex);
3330 return ret;
3331 }
3332
3333 /*
3334 * shrink metadata reservation for delalloc
3335 */
3336 static int shrink_delalloc(struct btrfs_trans_handle *trans,
3337 struct btrfs_root *root, u64 to_reclaim, int sync)
3338 {
3339 struct btrfs_block_rsv *block_rsv;
3340 struct btrfs_space_info *space_info;
3341 u64 reserved;
3342 u64 max_reclaim;
3343 u64 reclaimed = 0;
3344 long time_left;
3345 int pause = 1;
3346 int nr_pages = (2 * 1024 * 1024) >> PAGE_CACHE_SHIFT;
3347 int loops = 0;
3348
3349 block_rsv = &root->fs_info->delalloc_block_rsv;
3350 space_info = block_rsv->space_info;
3351
3352 smp_mb();
3353 reserved = space_info->bytes_reserved;
3354
3355 if (reserved == 0)
3356 return 0;
3357
3358 max_reclaim = min(reserved, to_reclaim);
3359
3360 while (loops < 1024) {
3361 /* have the flusher threads jump in and do some IO */
3362 smp_mb();
3363 nr_pages = min_t(unsigned long, nr_pages,
3364 root->fs_info->delalloc_bytes >> PAGE_CACHE_SHIFT);
3365 writeback_inodes_sb_nr_if_idle(root->fs_info->sb, nr_pages);
3366
3367 spin_lock(&space_info->lock);
3368 if (reserved > space_info->bytes_reserved) {
3369 loops = 0;
3370 reclaimed += reserved - space_info->bytes_reserved;
3371 } else {
3372 loops++;
3373 }
3374 reserved = space_info->bytes_reserved;
3375 spin_unlock(&space_info->lock);
3376
3377 if (reserved == 0 || reclaimed >= max_reclaim)
3378 break;
3379
3380 if (trans && trans->transaction->blocked)
3381 return -EAGAIN;
3382
3383 __set_current_state(TASK_INTERRUPTIBLE);
3384 time_left = schedule_timeout(pause);
3385
3386 /* We were interrupted, exit */
3387 if (time_left)
3388 break;
3389
3390 pause <<= 1;
3391 if (pause > HZ / 10)
3392 pause = HZ / 10;
3393
3394 }
3395 return reclaimed >= to_reclaim;
3396 }
3397
3398 /*
3399 * Retries tells us how many times we've called reserve_metadata_bytes. The
3400 * idea is if this is the first call (retries == 0) then we will add to our
3401 * reserved count if we can't make the allocation in order to hold our place
3402 * while we go and try and free up space. That way for retries > 1 we don't try
3403 * and add space, we just check to see if the amount of unused space is >= the
3404 * total space, meaning that our reservation is valid.
3405 *
3406 * However if we don't intend to retry this reservation, pass -1 as retries so
3407 * that it short circuits this logic.
3408 */
3409 static int reserve_metadata_bytes(struct btrfs_trans_handle *trans,
3410 struct btrfs_root *root,
3411 struct btrfs_block_rsv *block_rsv,
3412 u64 orig_bytes, int flush)
3413 {
3414 struct btrfs_space_info *space_info = block_rsv->space_info;
3415 u64 unused;
3416 u64 num_bytes = orig_bytes;
3417 int retries = 0;
3418 int ret = 0;
3419 bool reserved = false;
3420 bool committed = false;
3421
3422 again:
3423 ret = -ENOSPC;
3424 if (reserved)
3425 num_bytes = 0;
3426
3427 spin_lock(&space_info->lock);
3428 unused = space_info->bytes_used + space_info->bytes_reserved +
3429 space_info->bytes_pinned + space_info->bytes_readonly +
3430 space_info->bytes_may_use;
3431
3432 /*
3433 * The idea here is that we've not already over-reserved the block group
3434 * then we can go ahead and save our reservation first and then start
3435 * flushing if we need to. Otherwise if we've already overcommitted
3436 * lets start flushing stuff first and then come back and try to make
3437 * our reservation.
3438 */
3439 if (unused <= space_info->total_bytes) {
3440 unused = space_info->total_bytes - unused;
3441 if (unused >= num_bytes) {
3442 if (!reserved)
3443 space_info->bytes_reserved += orig_bytes;
3444 ret = 0;
3445 } else {
3446 /*
3447 * Ok set num_bytes to orig_bytes since we aren't
3448 * overocmmitted, this way we only try and reclaim what
3449 * we need.
3450 */
3451 num_bytes = orig_bytes;
3452 }
3453 } else {
3454 /*
3455 * Ok we're over committed, set num_bytes to the overcommitted
3456 * amount plus the amount of bytes that we need for this
3457 * reservation.
3458 */
3459 num_bytes = unused - space_info->total_bytes +
3460 (orig_bytes * (retries + 1));
3461 }
3462
3463 /*
3464 * Couldn't make our reservation, save our place so while we're trying
3465 * to reclaim space we can actually use it instead of somebody else
3466 * stealing it from us.
3467 */
3468 if (ret && !reserved) {
3469 space_info->bytes_reserved += orig_bytes;
3470 reserved = true;
3471 }
3472
3473 spin_unlock(&space_info->lock);
3474
3475 if (!ret)
3476 return 0;
3477
3478 if (!flush)
3479 goto out;
3480
3481 /*
3482 * We do synchronous shrinking since we don't actually unreserve
3483 * metadata until after the IO is completed.
3484 */
3485 ret = shrink_delalloc(trans, root, num_bytes, 1);
3486 if (ret > 0)
3487 return 0;
3488 else if (ret < 0)
3489 goto out;
3490
3491 /*
3492 * So if we were overcommitted it's possible that somebody else flushed
3493 * out enough space and we simply didn't have enough space to reclaim,
3494 * so go back around and try again.
3495 */
3496 if (retries < 2) {
3497 retries++;
3498 goto again;
3499 }
3500
3501 spin_lock(&space_info->lock);
3502 /*
3503 * Not enough space to be reclaimed, don't bother committing the
3504 * transaction.
3505 */
3506 if (space_info->bytes_pinned < orig_bytes)
3507 ret = -ENOSPC;
3508 spin_unlock(&space_info->lock);
3509 if (ret)
3510 goto out;
3511
3512 ret = -EAGAIN;
3513 if (trans || committed)
3514 goto out;
3515
3516 ret = -ENOSPC;
3517 trans = btrfs_join_transaction(root, 1);
3518 if (IS_ERR(trans))
3519 goto out;
3520 ret = btrfs_commit_transaction(trans, root);
3521 if (!ret) {
3522 trans = NULL;
3523 committed = true;
3524 goto again;
3525 }
3526
3527 out:
3528 if (reserved) {
3529 spin_lock(&space_info->lock);
3530 space_info->bytes_reserved -= orig_bytes;
3531 spin_unlock(&space_info->lock);
3532 }
3533
3534 return ret;
3535 }
3536
3537 static struct btrfs_block_rsv *get_block_rsv(struct btrfs_trans_handle *trans,
3538 struct btrfs_root *root)
3539 {
3540 struct btrfs_block_rsv *block_rsv;
3541 if (root->ref_cows)
3542 block_rsv = trans->block_rsv;
3543 else
3544 block_rsv = root->block_rsv;
3545
3546 if (!block_rsv)
3547 block_rsv = &root->fs_info->empty_block_rsv;
3548
3549 return block_rsv;
3550 }
3551
3552 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
3553 u64 num_bytes)
3554 {
3555 int ret = -ENOSPC;
3556 spin_lock(&block_rsv->lock);
3557 if (block_rsv->reserved >= num_bytes) {
3558 block_rsv->reserved -= num_bytes;
3559 if (block_rsv->reserved < block_rsv->size)
3560 block_rsv->full = 0;
3561 ret = 0;
3562 }
3563 spin_unlock(&block_rsv->lock);
3564 return ret;
3565 }
3566
3567 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
3568 u64 num_bytes, int update_size)
3569 {
3570 spin_lock(&block_rsv->lock);
3571 block_rsv->reserved += num_bytes;
3572 if (update_size)
3573 block_rsv->size += num_bytes;
3574 else if (block_rsv->reserved >= block_rsv->size)
3575 block_rsv->full = 1;
3576 spin_unlock(&block_rsv->lock);
3577 }
3578
3579 void block_rsv_release_bytes(struct btrfs_block_rsv *block_rsv,
3580 struct btrfs_block_rsv *dest, u64 num_bytes)
3581 {
3582 struct btrfs_space_info *space_info = block_rsv->space_info;
3583
3584 spin_lock(&block_rsv->lock);
3585 if (num_bytes == (u64)-1)
3586 num_bytes = block_rsv->size;
3587 block_rsv->size -= num_bytes;
3588 if (block_rsv->reserved >= block_rsv->size) {
3589 num_bytes = block_rsv->reserved - block_rsv->size;
3590 block_rsv->reserved = block_rsv->size;
3591 block_rsv->full = 1;
3592 } else {
3593 num_bytes = 0;
3594 }
3595 spin_unlock(&block_rsv->lock);
3596
3597 if (num_bytes > 0) {
3598 if (dest) {
3599 spin_lock(&dest->lock);
3600 if (!dest->full) {
3601 u64 bytes_to_add;
3602
3603 bytes_to_add = dest->size - dest->reserved;
3604 bytes_to_add = min(num_bytes, bytes_to_add);
3605 dest->reserved += bytes_to_add;
3606 if (dest->reserved >= dest->size)
3607 dest->full = 1;
3608 num_bytes -= bytes_to_add;
3609 }
3610 spin_unlock(&dest->lock);
3611 }
3612 if (num_bytes) {
3613 spin_lock(&space_info->lock);
3614 space_info->bytes_reserved -= num_bytes;
3615 spin_unlock(&space_info->lock);
3616 }
3617 }
3618 }
3619
3620 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
3621 struct btrfs_block_rsv *dst, u64 num_bytes)
3622 {
3623 int ret;
3624
3625 ret = block_rsv_use_bytes(src, num_bytes);
3626 if (ret)
3627 return ret;
3628
3629 block_rsv_add_bytes(dst, num_bytes, 1);
3630 return 0;
3631 }
3632
3633 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv)
3634 {
3635 memset(rsv, 0, sizeof(*rsv));
3636 spin_lock_init(&rsv->lock);
3637 atomic_set(&rsv->usage, 1);
3638 rsv->priority = 6;
3639 INIT_LIST_HEAD(&rsv->list);
3640 }
3641
3642 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root)
3643 {
3644 struct btrfs_block_rsv *block_rsv;
3645 struct btrfs_fs_info *fs_info = root->fs_info;
3646
3647 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
3648 if (!block_rsv)
3649 return NULL;
3650
3651 btrfs_init_block_rsv(block_rsv);
3652 block_rsv->space_info = __find_space_info(fs_info,
3653 BTRFS_BLOCK_GROUP_METADATA);
3654 return block_rsv;
3655 }
3656
3657 void btrfs_free_block_rsv(struct btrfs_root *root,
3658 struct btrfs_block_rsv *rsv)
3659 {
3660 if (rsv && atomic_dec_and_test(&rsv->usage)) {
3661 btrfs_block_rsv_release(root, rsv, (u64)-1);
3662 if (!rsv->durable)
3663 kfree(rsv);
3664 }
3665 }
3666
3667 /*
3668 * make the block_rsv struct be able to capture freed space.
3669 * the captured space will re-add to the the block_rsv struct
3670 * after transaction commit
3671 */
3672 void btrfs_add_durable_block_rsv(struct btrfs_fs_info *fs_info,
3673 struct btrfs_block_rsv *block_rsv)
3674 {
3675 block_rsv->durable = 1;
3676 mutex_lock(&fs_info->durable_block_rsv_mutex);
3677 list_add_tail(&block_rsv->list, &fs_info->durable_block_rsv_list);
3678 mutex_unlock(&fs_info->durable_block_rsv_mutex);
3679 }
3680
3681 int btrfs_block_rsv_add(struct btrfs_trans_handle *trans,
3682 struct btrfs_root *root,
3683 struct btrfs_block_rsv *block_rsv,
3684 u64 num_bytes)
3685 {
3686 int ret;
3687
3688 if (num_bytes == 0)
3689 return 0;
3690
3691 ret = reserve_metadata_bytes(trans, root, block_rsv, num_bytes, 1);
3692 if (!ret) {
3693 block_rsv_add_bytes(block_rsv, num_bytes, 1);
3694 return 0;
3695 }
3696
3697 return ret;
3698 }
3699
3700 int btrfs_block_rsv_check(struct btrfs_trans_handle *trans,
3701 struct btrfs_root *root,
3702 struct btrfs_block_rsv *block_rsv,
3703 u64 min_reserved, int min_factor)
3704 {
3705 u64 num_bytes = 0;
3706 int commit_trans = 0;
3707 int ret = -ENOSPC;
3708
3709 if (!block_rsv)
3710 return 0;
3711
3712 spin_lock(&block_rsv->lock);
3713 if (min_factor > 0)
3714 num_bytes = div_factor(block_rsv->size, min_factor);
3715 if (min_reserved > num_bytes)
3716 num_bytes = min_reserved;
3717
3718 if (block_rsv->reserved >= num_bytes) {
3719 ret = 0;
3720 } else {
3721 num_bytes -= block_rsv->reserved;
3722 if (block_rsv->durable &&
3723 block_rsv->freed[0] + block_rsv->freed[1] >= num_bytes)
3724 commit_trans = 1;
3725 }
3726 spin_unlock(&block_rsv->lock);
3727 if (!ret)
3728 return 0;
3729
3730 if (block_rsv->refill_used) {
3731 ret = reserve_metadata_bytes(trans, root, block_rsv,
3732 num_bytes, 0);
3733 if (!ret) {
3734 block_rsv_add_bytes(block_rsv, num_bytes, 0);
3735 return 0;
3736 }
3737 }
3738
3739 if (commit_trans) {
3740 if (trans)
3741 return -EAGAIN;
3742
3743 trans = btrfs_join_transaction(root, 1);
3744 BUG_ON(IS_ERR(trans));
3745 ret = btrfs_commit_transaction(trans, root);
3746 return 0;
3747 }
3748
3749 return -ENOSPC;
3750 }
3751
3752 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
3753 struct btrfs_block_rsv *dst_rsv,
3754 u64 num_bytes)
3755 {
3756 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3757 }
3758
3759 void btrfs_block_rsv_release(struct btrfs_root *root,
3760 struct btrfs_block_rsv *block_rsv,
3761 u64 num_bytes)
3762 {
3763 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
3764 if (global_rsv->full || global_rsv == block_rsv ||
3765 block_rsv->space_info != global_rsv->space_info)
3766 global_rsv = NULL;
3767 block_rsv_release_bytes(block_rsv, global_rsv, num_bytes);
3768 }
3769
3770 /*
3771 * helper to calculate size of global block reservation.
3772 * the desired value is sum of space used by extent tree,
3773 * checksum tree and root tree
3774 */
3775 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
3776 {
3777 struct btrfs_space_info *sinfo;
3778 u64 num_bytes;
3779 u64 meta_used;
3780 u64 data_used;
3781 int csum_size = btrfs_super_csum_size(&fs_info->super_copy);
3782 #if 0
3783 /*
3784 * per tree used space accounting can be inaccuracy, so we
3785 * can't rely on it.
3786 */
3787 spin_lock(&fs_info->extent_root->accounting_lock);
3788 num_bytes = btrfs_root_used(&fs_info->extent_root->root_item);
3789 spin_unlock(&fs_info->extent_root->accounting_lock);
3790
3791 spin_lock(&fs_info->csum_root->accounting_lock);
3792 num_bytes += btrfs_root_used(&fs_info->csum_root->root_item);
3793 spin_unlock(&fs_info->csum_root->accounting_lock);
3794
3795 spin_lock(&fs_info->tree_root->accounting_lock);
3796 num_bytes += btrfs_root_used(&fs_info->tree_root->root_item);
3797 spin_unlock(&fs_info->tree_root->accounting_lock);
3798 #endif
3799 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
3800 spin_lock(&sinfo->lock);
3801 data_used = sinfo->bytes_used;
3802 spin_unlock(&sinfo->lock);
3803
3804 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3805 spin_lock(&sinfo->lock);
3806 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
3807 data_used = 0;
3808 meta_used = sinfo->bytes_used;
3809 spin_unlock(&sinfo->lock);
3810
3811 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
3812 csum_size * 2;
3813 num_bytes += div64_u64(data_used + meta_used, 50);
3814
3815 if (num_bytes * 3 > meta_used)
3816 num_bytes = div64_u64(meta_used, 3);
3817
3818 return ALIGN(num_bytes, fs_info->extent_root->leafsize << 10);
3819 }
3820
3821 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
3822 {
3823 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
3824 struct btrfs_space_info *sinfo = block_rsv->space_info;
3825 u64 num_bytes;
3826
3827 num_bytes = calc_global_metadata_size(fs_info);
3828
3829 spin_lock(&block_rsv->lock);
3830 spin_lock(&sinfo->lock);
3831
3832 block_rsv->size = num_bytes;
3833
3834 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
3835 sinfo->bytes_reserved + sinfo->bytes_readonly +
3836 sinfo->bytes_may_use;
3837
3838 if (sinfo->total_bytes > num_bytes) {
3839 num_bytes = sinfo->total_bytes - num_bytes;
3840 block_rsv->reserved += num_bytes;
3841 sinfo->bytes_reserved += num_bytes;
3842 }
3843
3844 if (block_rsv->reserved >= block_rsv->size) {
3845 num_bytes = block_rsv->reserved - block_rsv->size;
3846 sinfo->bytes_reserved -= num_bytes;
3847 block_rsv->reserved = block_rsv->size;
3848 block_rsv->full = 1;
3849 }
3850 #if 0
3851 printk(KERN_INFO"global block rsv size %llu reserved %llu\n",
3852 block_rsv->size, block_rsv->reserved);
3853 #endif
3854 spin_unlock(&sinfo->lock);
3855 spin_unlock(&block_rsv->lock);
3856 }
3857
3858 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
3859 {
3860 struct btrfs_space_info *space_info;
3861
3862 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
3863 fs_info->chunk_block_rsv.space_info = space_info;
3864 fs_info->chunk_block_rsv.priority = 10;
3865
3866 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
3867 fs_info->global_block_rsv.space_info = space_info;
3868 fs_info->global_block_rsv.priority = 10;
3869 fs_info->global_block_rsv.refill_used = 1;
3870 fs_info->delalloc_block_rsv.space_info = space_info;
3871 fs_info->trans_block_rsv.space_info = space_info;
3872 fs_info->empty_block_rsv.space_info = space_info;
3873 fs_info->empty_block_rsv.priority = 10;
3874
3875 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
3876 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
3877 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
3878 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
3879 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
3880
3881 btrfs_add_durable_block_rsv(fs_info, &fs_info->global_block_rsv);
3882
3883 btrfs_add_durable_block_rsv(fs_info, &fs_info->delalloc_block_rsv);
3884
3885 update_global_block_rsv(fs_info);
3886 }
3887
3888 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
3889 {
3890 block_rsv_release_bytes(&fs_info->global_block_rsv, NULL, (u64)-1);
3891 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
3892 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
3893 WARN_ON(fs_info->trans_block_rsv.size > 0);
3894 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
3895 WARN_ON(fs_info->chunk_block_rsv.size > 0);
3896 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
3897 }
3898
3899 static u64 calc_trans_metadata_size(struct btrfs_root *root, int num_items)
3900 {
3901 return (root->leafsize + root->nodesize * (BTRFS_MAX_LEVEL - 1)) *
3902 3 * num_items;
3903 }
3904
3905 int btrfs_trans_reserve_metadata(struct btrfs_trans_handle *trans,
3906 struct btrfs_root *root,
3907 int num_items)
3908 {
3909 u64 num_bytes;
3910 int ret;
3911
3912 if (num_items == 0 || root->fs_info->chunk_root == root)
3913 return 0;
3914
3915 num_bytes = calc_trans_metadata_size(root, num_items);
3916 ret = btrfs_block_rsv_add(trans, root, &root->fs_info->trans_block_rsv,
3917 num_bytes);
3918 if (!ret) {
3919 trans->bytes_reserved += num_bytes;
3920 trans->block_rsv = &root->fs_info->trans_block_rsv;
3921 }
3922 return ret;
3923 }
3924
3925 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
3926 struct btrfs_root *root)
3927 {
3928 if (!trans->bytes_reserved)
3929 return;
3930
3931 BUG_ON(trans->block_rsv != &root->fs_info->trans_block_rsv);
3932 btrfs_block_rsv_release(root, trans->block_rsv,
3933 trans->bytes_reserved);
3934 trans->bytes_reserved = 0;
3935 }
3936
3937 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
3938 struct inode *inode)
3939 {
3940 struct btrfs_root *root = BTRFS_I(inode)->root;
3941 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3942 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
3943
3944 /*
3945 * one for deleting orphan item, one for updating inode and
3946 * two for calling btrfs_truncate_inode_items.
3947 *
3948 * btrfs_truncate_inode_items is a delete operation, it frees
3949 * more space than it uses in most cases. So two units of
3950 * metadata space should be enough for calling it many times.
3951 * If all of the metadata space is used, we can commit
3952 * transaction and use space it freed.
3953 */
3954 u64 num_bytes = calc_trans_metadata_size(root, 4);
3955 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3956 }
3957
3958 void btrfs_orphan_release_metadata(struct inode *inode)
3959 {
3960 struct btrfs_root *root = BTRFS_I(inode)->root;
3961 u64 num_bytes = calc_trans_metadata_size(root, 4);
3962 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
3963 }
3964
3965 int btrfs_snap_reserve_metadata(struct btrfs_trans_handle *trans,
3966 struct btrfs_pending_snapshot *pending)
3967 {
3968 struct btrfs_root *root = pending->root;
3969 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
3970 struct btrfs_block_rsv *dst_rsv = &pending->block_rsv;
3971 /*
3972 * two for root back/forward refs, two for directory entries
3973 * and one for root of the snapshot.
3974 */
3975 u64 num_bytes = calc_trans_metadata_size(root, 5);
3976 dst_rsv->space_info = src_rsv->space_info;
3977 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
3978 }
3979
3980 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes)
3981 {
3982 return num_bytes >>= 3;
3983 }
3984
3985 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
3986 {
3987 struct btrfs_root *root = BTRFS_I(inode)->root;
3988 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
3989 u64 to_reserve;
3990 int nr_extents;
3991 int ret;
3992
3993 if (btrfs_transaction_in_commit(root->fs_info))
3994 schedule_timeout(1);
3995
3996 num_bytes = ALIGN(num_bytes, root->sectorsize);
3997
3998 spin_lock(&BTRFS_I(inode)->accounting_lock);
3999 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents) + 1;
4000 if (nr_extents > BTRFS_I(inode)->reserved_extents) {
4001 nr_extents -= BTRFS_I(inode)->reserved_extents;
4002 to_reserve = calc_trans_metadata_size(root, nr_extents);
4003 } else {
4004 nr_extents = 0;
4005 to_reserve = 0;
4006 }
4007 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4008
4009 to_reserve += calc_csum_metadata_size(inode, num_bytes);
4010 ret = reserve_metadata_bytes(NULL, root, block_rsv, to_reserve, 1);
4011 if (ret)
4012 return ret;
4013
4014 spin_lock(&BTRFS_I(inode)->accounting_lock);
4015 BTRFS_I(inode)->reserved_extents += nr_extents;
4016 atomic_inc(&BTRFS_I(inode)->outstanding_extents);
4017 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4018
4019 block_rsv_add_bytes(block_rsv, to_reserve, 1);
4020
4021 if (block_rsv->size > 512 * 1024 * 1024)
4022 shrink_delalloc(NULL, root, to_reserve, 0);
4023
4024 return 0;
4025 }
4026
4027 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
4028 {
4029 struct btrfs_root *root = BTRFS_I(inode)->root;
4030 u64 to_free;
4031 int nr_extents;
4032
4033 num_bytes = ALIGN(num_bytes, root->sectorsize);
4034 atomic_dec(&BTRFS_I(inode)->outstanding_extents);
4035 WARN_ON(atomic_read(&BTRFS_I(inode)->outstanding_extents) < 0);
4036
4037 spin_lock(&BTRFS_I(inode)->accounting_lock);
4038 nr_extents = atomic_read(&BTRFS_I(inode)->outstanding_extents);
4039 if (nr_extents < BTRFS_I(inode)->reserved_extents) {
4040 nr_extents = BTRFS_I(inode)->reserved_extents - nr_extents;
4041 BTRFS_I(inode)->reserved_extents -= nr_extents;
4042 } else {
4043 nr_extents = 0;
4044 }
4045 spin_unlock(&BTRFS_I(inode)->accounting_lock);
4046
4047 to_free = calc_csum_metadata_size(inode, num_bytes);
4048 if (nr_extents > 0)
4049 to_free += calc_trans_metadata_size(root, nr_extents);
4050
4051 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
4052 to_free);
4053 }
4054
4055 int btrfs_delalloc_reserve_space(struct inode *inode, u64 num_bytes)
4056 {
4057 int ret;
4058
4059 ret = btrfs_check_data_free_space(inode, num_bytes);
4060 if (ret)
4061 return ret;
4062
4063 ret = btrfs_delalloc_reserve_metadata(inode, num_bytes);
4064 if (ret) {
4065 btrfs_free_reserved_data_space(inode, num_bytes);
4066 return ret;
4067 }
4068
4069 return 0;
4070 }
4071
4072 void btrfs_delalloc_release_space(struct inode *inode, u64 num_bytes)
4073 {
4074 btrfs_delalloc_release_metadata(inode, num_bytes);
4075 btrfs_free_reserved_data_space(inode, num_bytes);
4076 }
4077
4078 static int update_block_group(struct btrfs_trans_handle *trans,
4079 struct btrfs_root *root,
4080 u64 bytenr, u64 num_bytes, int alloc)
4081 {
4082 struct btrfs_block_group_cache *cache = NULL;
4083 struct btrfs_fs_info *info = root->fs_info;
4084 u64 total = num_bytes;
4085 u64 old_val;
4086 u64 byte_in_group;
4087 int factor;
4088
4089 /* block accounting for super block */
4090 spin_lock(&info->delalloc_lock);
4091 old_val = btrfs_super_bytes_used(&info->super_copy);
4092 if (alloc)
4093 old_val += num_bytes;
4094 else
4095 old_val -= num_bytes;
4096 btrfs_set_super_bytes_used(&info->super_copy, old_val);
4097 spin_unlock(&info->delalloc_lock);
4098
4099 while (total) {
4100 cache = btrfs_lookup_block_group(info, bytenr);
4101 if (!cache)
4102 return -1;
4103 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
4104 BTRFS_BLOCK_GROUP_RAID1 |
4105 BTRFS_BLOCK_GROUP_RAID10))
4106 factor = 2;
4107 else
4108 factor = 1;
4109 /*
4110 * If this block group has free space cache written out, we
4111 * need to make sure to load it if we are removing space. This
4112 * is because we need the unpinning stage to actually add the
4113 * space back to the block group, otherwise we will leak space.
4114 */
4115 if (!alloc && cache->cached == BTRFS_CACHE_NO)
4116 cache_block_group(cache, trans, NULL, 1);
4117
4118 byte_in_group = bytenr - cache->key.objectid;
4119 WARN_ON(byte_in_group > cache->key.offset);
4120
4121 spin_lock(&cache->space_info->lock);
4122 spin_lock(&cache->lock);
4123
4124 if (btrfs_super_cache_generation(&info->super_copy) != 0 &&
4125 cache->disk_cache_state < BTRFS_DC_CLEAR)
4126 cache->disk_cache_state = BTRFS_DC_CLEAR;
4127
4128 cache->dirty = 1;
4129 old_val = btrfs_block_group_used(&cache->item);
4130 num_bytes = min(total, cache->key.offset - byte_in_group);
4131 if (alloc) {
4132 old_val += num_bytes;
4133 btrfs_set_block_group_used(&cache->item, old_val);
4134 cache->reserved -= num_bytes;
4135 cache->space_info->bytes_reserved -= num_bytes;
4136 cache->space_info->bytes_used += num_bytes;
4137 cache->space_info->disk_used += num_bytes * factor;
4138 spin_unlock(&cache->lock);
4139 spin_unlock(&cache->space_info->lock);
4140 } else {
4141 old_val -= num_bytes;
4142 btrfs_set_block_group_used(&cache->item, old_val);
4143 cache->pinned += num_bytes;
4144 cache->space_info->bytes_pinned += num_bytes;
4145 cache->space_info->bytes_used -= num_bytes;
4146 cache->space_info->disk_used -= num_bytes * factor;
4147 spin_unlock(&cache->lock);
4148 spin_unlock(&cache->space_info->lock);
4149
4150 set_extent_dirty(info->pinned_extents,
4151 bytenr, bytenr + num_bytes - 1,
4152 GFP_NOFS | __GFP_NOFAIL);
4153 }
4154 btrfs_put_block_group(cache);
4155 total -= num_bytes;
4156 bytenr += num_bytes;
4157 }
4158 return 0;
4159 }
4160
4161 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
4162 {
4163 struct btrfs_block_group_cache *cache;
4164 u64 bytenr;
4165
4166 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
4167 if (!cache)
4168 return 0;
4169
4170 bytenr = cache->key.objectid;
4171 btrfs_put_block_group(cache);
4172
4173 return bytenr;
4174 }
4175
4176 static int pin_down_extent(struct btrfs_root *root,
4177 struct btrfs_block_group_cache *cache,
4178 u64 bytenr, u64 num_bytes, int reserved)
4179 {
4180 spin_lock(&cache->space_info->lock);
4181 spin_lock(&cache->lock);
4182 cache->pinned += num_bytes;
4183 cache->space_info->bytes_pinned += num_bytes;
4184 if (reserved) {
4185 cache->reserved -= num_bytes;
4186 cache->space_info->bytes_reserved -= num_bytes;
4187 }
4188 spin_unlock(&cache->lock);
4189 spin_unlock(&cache->space_info->lock);
4190
4191 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
4192 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
4193 return 0;
4194 }
4195
4196 /*
4197 * this function must be called within transaction
4198 */
4199 int btrfs_pin_extent(struct btrfs_root *root,
4200 u64 bytenr, u64 num_bytes, int reserved)
4201 {
4202 struct btrfs_block_group_cache *cache;
4203
4204 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
4205 BUG_ON(!cache);
4206
4207 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
4208
4209 btrfs_put_block_group(cache);
4210 return 0;
4211 }
4212
4213 /*
4214 * update size of reserved extents. this function may return -EAGAIN
4215 * if 'reserve' is true or 'sinfo' is false.
4216 */
4217 static int update_reserved_bytes(struct btrfs_block_group_cache *cache,
4218 u64 num_bytes, int reserve, int sinfo)
4219 {
4220 int ret = 0;
4221 if (sinfo) {
4222 struct btrfs_space_info *space_info = cache->space_info;
4223 spin_lock(&space_info->lock);
4224 spin_lock(&cache->lock);
4225 if (reserve) {
4226 if (cache->ro) {
4227 ret = -EAGAIN;
4228 } else {
4229 cache->reserved += num_bytes;
4230 space_info->bytes_reserved += num_bytes;
4231 }
4232 } else {
4233 if (cache->ro)
4234 space_info->bytes_readonly += num_bytes;
4235 cache->reserved -= num_bytes;
4236 space_info->bytes_reserved -= num_bytes;
4237 }
4238 spin_unlock(&cache->lock);
4239 spin_unlock(&space_info->lock);
4240 } else {
4241 spin_lock(&cache->lock);
4242 if (cache->ro) {
4243 ret = -EAGAIN;
4244 } else {
4245 if (reserve)
4246 cache->reserved += num_bytes;
4247 else
4248 cache->reserved -= num_bytes;
4249 }
4250 spin_unlock(&cache->lock);
4251 }
4252 return ret;
4253 }
4254
4255 int btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
4256 struct btrfs_root *root)
4257 {
4258 struct btrfs_fs_info *fs_info = root->fs_info;
4259 struct btrfs_caching_control *next;
4260 struct btrfs_caching_control *caching_ctl;
4261 struct btrfs_block_group_cache *cache;
4262
4263 down_write(&fs_info->extent_commit_sem);
4264
4265 list_for_each_entry_safe(caching_ctl, next,
4266 &fs_info->caching_block_groups, list) {
4267 cache = caching_ctl->block_group;
4268 if (block_group_cache_done(cache)) {
4269 cache->last_byte_to_unpin = (u64)-1;
4270 list_del_init(&caching_ctl->list);
4271 put_caching_control(caching_ctl);
4272 } else {
4273 cache->last_byte_to_unpin = caching_ctl->progress;
4274 }
4275 }
4276
4277 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4278 fs_info->pinned_extents = &fs_info->freed_extents[1];
4279 else
4280 fs_info->pinned_extents = &fs_info->freed_extents[0];
4281
4282 up_write(&fs_info->extent_commit_sem);
4283
4284 update_global_block_rsv(fs_info);
4285 return 0;
4286 }
4287
4288 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
4289 {
4290 struct btrfs_fs_info *fs_info = root->fs_info;
4291 struct btrfs_block_group_cache *cache = NULL;
4292 u64 len;
4293
4294 while (start <= end) {
4295 if (!cache ||
4296 start >= cache->key.objectid + cache->key.offset) {
4297 if (cache)
4298 btrfs_put_block_group(cache);
4299 cache = btrfs_lookup_block_group(fs_info, start);
4300 BUG_ON(!cache);
4301 }
4302
4303 len = cache->key.objectid + cache->key.offset - start;
4304 len = min(len, end + 1 - start);
4305
4306 if (start < cache->last_byte_to_unpin) {
4307 len = min(len, cache->last_byte_to_unpin - start);
4308 btrfs_add_free_space(cache, start, len);
4309 }
4310
4311 start += len;
4312
4313 spin_lock(&cache->space_info->lock);
4314 spin_lock(&cache->lock);
4315 cache->pinned -= len;
4316 cache->space_info->bytes_pinned -= len;
4317 if (cache->ro) {
4318 cache->space_info->bytes_readonly += len;
4319 } else if (cache->reserved_pinned > 0) {
4320 len = min(len, cache->reserved_pinned);
4321 cache->reserved_pinned -= len;
4322 cache->space_info->bytes_reserved += len;
4323 }
4324 spin_unlock(&cache->lock);
4325 spin_unlock(&cache->space_info->lock);
4326 }
4327
4328 if (cache)
4329 btrfs_put_block_group(cache);
4330 return 0;
4331 }
4332
4333 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
4334 struct btrfs_root *root)
4335 {
4336 struct btrfs_fs_info *fs_info = root->fs_info;
4337 struct extent_io_tree *unpin;
4338 struct btrfs_block_rsv *block_rsv;
4339 struct btrfs_block_rsv *next_rsv;
4340 u64 start;
4341 u64 end;
4342 int idx;
4343 int ret;
4344
4345 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
4346 unpin = &fs_info->freed_extents[1];
4347 else
4348 unpin = &fs_info->freed_extents[0];
4349
4350 while (1) {
4351 ret = find_first_extent_bit(unpin, 0, &start, &end,
4352 EXTENT_DIRTY);
4353 if (ret)
4354 break;
4355
4356 ret = btrfs_discard_extent(root, start, end + 1 - start);
4357
4358 clear_extent_dirty(unpin, start, end, GFP_NOFS);
4359 unpin_extent_range(root, start, end);
4360 cond_resched();
4361 }
4362
4363 mutex_lock(&fs_info->durable_block_rsv_mutex);
4364 list_for_each_entry_safe(block_rsv, next_rsv,
4365 &fs_info->durable_block_rsv_list, list) {
4366
4367 idx = trans->transid & 0x1;
4368 if (block_rsv->freed[idx] > 0) {
4369 block_rsv_add_bytes(block_rsv,
4370 block_rsv->freed[idx], 0);
4371 block_rsv->freed[idx] = 0;
4372 }
4373 if (atomic_read(&block_rsv->usage) == 0) {
4374 btrfs_block_rsv_release(root, block_rsv, (u64)-1);
4375
4376 if (block_rsv->freed[0] == 0 &&
4377 block_rsv->freed[1] == 0) {
4378 list_del_init(&block_rsv->list);
4379 kfree(block_rsv);
4380 }
4381 } else {
4382 btrfs_block_rsv_release(root, block_rsv, 0);
4383 }
4384 }
4385 mutex_unlock(&fs_info->durable_block_rsv_mutex);
4386
4387 return 0;
4388 }
4389
4390 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
4391 struct btrfs_root *root,
4392 u64 bytenr, u64 num_bytes, u64 parent,
4393 u64 root_objectid, u64 owner_objectid,
4394 u64 owner_offset, int refs_to_drop,
4395 struct btrfs_delayed_extent_op *extent_op)
4396 {
4397 struct btrfs_key key;
4398 struct btrfs_path *path;
4399 struct btrfs_fs_info *info = root->fs_info;
4400 struct btrfs_root *extent_root = info->extent_root;
4401 struct extent_buffer *leaf;
4402 struct btrfs_extent_item *ei;
4403 struct btrfs_extent_inline_ref *iref;
4404 int ret;
4405 int is_data;
4406 int extent_slot = 0;
4407 int found_extent = 0;
4408 int num_to_del = 1;
4409 u32 item_size;
4410 u64 refs;
4411
4412 path = btrfs_alloc_path();
4413 if (!path)
4414 return -ENOMEM;
4415
4416 path->reada = 1;
4417 path->leave_spinning = 1;
4418
4419 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
4420 BUG_ON(!is_data && refs_to_drop != 1);
4421
4422 ret = lookup_extent_backref(trans, extent_root, path, &iref,
4423 bytenr, num_bytes, parent,
4424 root_objectid, owner_objectid,
4425 owner_offset);
4426 if (ret == 0) {
4427 extent_slot = path->slots[0];
4428 while (extent_slot >= 0) {
4429 btrfs_item_key_to_cpu(path->nodes[0], &key,
4430 extent_slot);
4431 if (key.objectid != bytenr)
4432 break;
4433 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
4434 key.offset == num_bytes) {
4435 found_extent = 1;
4436 break;
4437 }
4438 if (path->slots[0] - extent_slot > 5)
4439 break;
4440 extent_slot--;
4441 }
4442 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4443 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
4444 if (found_extent && item_size < sizeof(*ei))
4445 found_extent = 0;
4446 #endif
4447 if (!found_extent) {
4448 BUG_ON(iref);
4449 ret = remove_extent_backref(trans, extent_root, path,
4450 NULL, refs_to_drop,
4451 is_data);
4452 BUG_ON(ret);
4453 btrfs_release_path(extent_root, path);
4454 path->leave_spinning = 1;
4455
4456 key.objectid = bytenr;
4457 key.type = BTRFS_EXTENT_ITEM_KEY;
4458 key.offset = num_bytes;
4459
4460 ret = btrfs_search_slot(trans, extent_root,
4461 &key, path, -1, 1);
4462 if (ret) {
4463 printk(KERN_ERR "umm, got %d back from search"
4464 ", was looking for %llu\n", ret,
4465 (unsigned long long)bytenr);
4466 btrfs_print_leaf(extent_root, path->nodes[0]);
4467 }
4468 BUG_ON(ret);
4469 extent_slot = path->slots[0];
4470 }
4471 } else {
4472 btrfs_print_leaf(extent_root, path->nodes[0]);
4473 WARN_ON(1);
4474 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
4475 "parent %llu root %llu owner %llu offset %llu\n",
4476 (unsigned long long)bytenr,
4477 (unsigned long long)parent,
4478 (unsigned long long)root_objectid,
4479 (unsigned long long)owner_objectid,
4480 (unsigned long long)owner_offset);
4481 }
4482
4483 leaf = path->nodes[0];
4484 item_size = btrfs_item_size_nr(leaf, extent_slot);
4485 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
4486 if (item_size < sizeof(*ei)) {
4487 BUG_ON(found_extent || extent_slot != path->slots[0]);
4488 ret = convert_extent_item_v0(trans, extent_root, path,
4489 owner_objectid, 0);
4490 BUG_ON(ret < 0);
4491
4492 btrfs_release_path(extent_root, path);
4493 path->leave_spinning = 1;
4494
4495 key.objectid = bytenr;
4496 key.type = BTRFS_EXTENT_ITEM_KEY;
4497 key.offset = num_bytes;
4498
4499 ret = btrfs_search_slot(trans, extent_root, &key, path,
4500 -1, 1);
4501 if (ret) {
4502 printk(KERN_ERR "umm, got %d back from search"
4503 ", was looking for %llu\n", ret,
4504 (unsigned long long)bytenr);
4505 btrfs_print_leaf(extent_root, path->nodes[0]);
4506 }
4507 BUG_ON(ret);
4508 extent_slot = path->slots[0];
4509 leaf = path->nodes[0];
4510 item_size = btrfs_item_size_nr(leaf, extent_slot);
4511 }
4512 #endif
4513 BUG_ON(item_size < sizeof(*ei));
4514 ei = btrfs_item_ptr(leaf, extent_slot,
4515 struct btrfs_extent_item);
4516 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4517 struct btrfs_tree_block_info *bi;
4518 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
4519 bi = (struct btrfs_tree_block_info *)(ei + 1);
4520 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
4521 }
4522
4523 refs = btrfs_extent_refs(leaf, ei);
4524 BUG_ON(refs < refs_to_drop);
4525 refs -= refs_to_drop;
4526
4527 if (refs > 0) {
4528 if (extent_op)
4529 __run_delayed_extent_op(extent_op, leaf, ei);
4530 /*
4531 * In the case of inline back ref, reference count will
4532 * be updated by remove_extent_backref
4533 */
4534 if (iref) {
4535 BUG_ON(!found_extent);
4536 } else {
4537 btrfs_set_extent_refs(leaf, ei, refs);
4538 btrfs_mark_buffer_dirty(leaf);
4539 }
4540 if (found_extent) {
4541 ret = remove_extent_backref(trans, extent_root, path,
4542 iref, refs_to_drop,
4543 is_data);
4544 BUG_ON(ret);
4545 }
4546 } else {
4547 if (found_extent) {
4548 BUG_ON(is_data && refs_to_drop !=
4549 extent_data_ref_count(root, path, iref));
4550 if (iref) {
4551 BUG_ON(path->slots[0] != extent_slot);
4552 } else {
4553 BUG_ON(path->slots[0] != extent_slot + 1);
4554 path->slots[0] = extent_slot;
4555 num_to_del = 2;
4556 }
4557 }
4558
4559 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
4560 num_to_del);
4561 BUG_ON(ret);
4562 btrfs_release_path(extent_root, path);
4563
4564 if (is_data) {
4565 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
4566 BUG_ON(ret);
4567 } else {
4568 invalidate_mapping_pages(info->btree_inode->i_mapping,
4569 bytenr >> PAGE_CACHE_SHIFT,
4570 (bytenr + num_bytes - 1) >> PAGE_CACHE_SHIFT);
4571 }
4572
4573 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
4574 BUG_ON(ret);
4575 }
4576 btrfs_free_path(path);
4577 return ret;
4578 }
4579
4580 /*
4581 * when we free an block, it is possible (and likely) that we free the last
4582 * delayed ref for that extent as well. This searches the delayed ref tree for
4583 * a given extent, and if there are no other delayed refs to be processed, it
4584 * removes it from the tree.
4585 */
4586 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
4587 struct btrfs_root *root, u64 bytenr)
4588 {
4589 struct btrfs_delayed_ref_head *head;
4590 struct btrfs_delayed_ref_root *delayed_refs;
4591 struct btrfs_delayed_ref_node *ref;
4592 struct rb_node *node;
4593 int ret = 0;
4594
4595 delayed_refs = &trans->transaction->delayed_refs;
4596 spin_lock(&delayed_refs->lock);
4597 head = btrfs_find_delayed_ref_head(trans, bytenr);
4598 if (!head)
4599 goto out;
4600
4601 node = rb_prev(&head->node.rb_node);
4602 if (!node)
4603 goto out;
4604
4605 ref = rb_entry(node, struct btrfs_delayed_ref_node, rb_node);
4606
4607 /* there are still entries for this ref, we can't drop it */
4608 if (ref->bytenr == bytenr)
4609 goto out;
4610
4611 if (head->extent_op) {
4612 if (!head->must_insert_reserved)
4613 goto out;
4614 kfree(head->extent_op);
4615 head->extent_op = NULL;
4616 }
4617
4618 /*
4619 * waiting for the lock here would deadlock. If someone else has it
4620 * locked they are already in the process of dropping it anyway
4621 */
4622 if (!mutex_trylock(&head->mutex))
4623 goto out;
4624
4625 /*
4626 * at this point we have a head with no other entries. Go
4627 * ahead and process it.
4628 */
4629 head->node.in_tree = 0;
4630 rb_erase(&head->node.rb_node, &delayed_refs->root);
4631
4632 delayed_refs->num_entries--;
4633
4634 /*
4635 * we don't take a ref on the node because we're removing it from the
4636 * tree, so we just steal the ref the tree was holding.
4637 */
4638 delayed_refs->num_heads--;
4639 if (list_empty(&head->cluster))
4640 delayed_refs->num_heads_ready--;
4641
4642 list_del_init(&head->cluster);
4643 spin_unlock(&delayed_refs->lock);
4644
4645 BUG_ON(head->extent_op);
4646 if (head->must_insert_reserved)
4647 ret = 1;
4648
4649 mutex_unlock(&head->mutex);
4650 btrfs_put_delayed_ref(&head->node);
4651 return ret;
4652 out:
4653 spin_unlock(&delayed_refs->lock);
4654 return 0;
4655 }
4656
4657 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
4658 struct btrfs_root *root,
4659 struct extent_buffer *buf,
4660 u64 parent, int last_ref)
4661 {
4662 struct btrfs_block_rsv *block_rsv;
4663 struct btrfs_block_group_cache *cache = NULL;
4664 int ret;
4665
4666 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4667 ret = btrfs_add_delayed_tree_ref(trans, buf->start, buf->len,
4668 parent, root->root_key.objectid,
4669 btrfs_header_level(buf),
4670 BTRFS_DROP_DELAYED_REF, NULL);
4671 BUG_ON(ret);
4672 }
4673
4674 if (!last_ref)
4675 return;
4676
4677 block_rsv = get_block_rsv(trans, root);
4678 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
4679 if (block_rsv->space_info != cache->space_info)
4680 goto out;
4681
4682 if (btrfs_header_generation(buf) == trans->transid) {
4683 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
4684 ret = check_ref_cleanup(trans, root, buf->start);
4685 if (!ret)
4686 goto pin;
4687 }
4688
4689 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
4690 pin_down_extent(root, cache, buf->start, buf->len, 1);
4691 goto pin;
4692 }
4693
4694 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
4695
4696 btrfs_add_free_space(cache, buf->start, buf->len);
4697 ret = update_reserved_bytes(cache, buf->len, 0, 0);
4698 if (ret == -EAGAIN) {
4699 /* block group became read-only */
4700 update_reserved_bytes(cache, buf->len, 0, 1);
4701 goto out;
4702 }
4703
4704 ret = 1;
4705 spin_lock(&block_rsv->lock);
4706 if (block_rsv->reserved < block_rsv->size) {
4707 block_rsv->reserved += buf->len;
4708 ret = 0;
4709 }
4710 spin_unlock(&block_rsv->lock);
4711
4712 if (ret) {
4713 spin_lock(&cache->space_info->lock);
4714 cache->space_info->bytes_reserved -= buf->len;
4715 spin_unlock(&cache->space_info->lock);
4716 }
4717 goto out;
4718 }
4719 pin:
4720 if (block_rsv->durable && !cache->ro) {
4721 ret = 0;
4722 spin_lock(&cache->lock);
4723 if (!cache->ro) {
4724 cache->reserved_pinned += buf->len;
4725 ret = 1;
4726 }
4727 spin_unlock(&cache->lock);
4728
4729 if (ret) {
4730 spin_lock(&block_rsv->lock);
4731 block_rsv->freed[trans->transid & 0x1] += buf->len;
4732 spin_unlock(&block_rsv->lock);
4733 }
4734 }
4735 out:
4736 btrfs_put_block_group(cache);
4737 }
4738
4739 int btrfs_free_extent(struct btrfs_trans_handle *trans,
4740 struct btrfs_root *root,
4741 u64 bytenr, u64 num_bytes, u64 parent,
4742 u64 root_objectid, u64 owner, u64 offset)
4743 {
4744 int ret;
4745
4746 /*
4747 * tree log blocks never actually go into the extent allocation
4748 * tree, just update pinning info and exit early.
4749 */
4750 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
4751 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
4752 /* unlocks the pinned mutex */
4753 btrfs_pin_extent(root, bytenr, num_bytes, 1);
4754 ret = 0;
4755 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
4756 ret = btrfs_add_delayed_tree_ref(trans, bytenr, num_bytes,
4757 parent, root_objectid, (int)owner,
4758 BTRFS_DROP_DELAYED_REF, NULL);
4759 BUG_ON(ret);
4760 } else {
4761 ret = btrfs_add_delayed_data_ref(trans, bytenr, num_bytes,
4762 parent, root_objectid, owner,
4763 offset, BTRFS_DROP_DELAYED_REF, NULL);
4764 BUG_ON(ret);
4765 }
4766 return ret;
4767 }
4768
4769 static u64 stripe_align(struct btrfs_root *root, u64 val)
4770 {
4771 u64 mask = ((u64)root->stripesize - 1);
4772 u64 ret = (val + mask) & ~mask;
4773 return ret;
4774 }
4775
4776 /*
4777 * when we wait for progress in the block group caching, its because
4778 * our allocation attempt failed at least once. So, we must sleep
4779 * and let some progress happen before we try again.
4780 *
4781 * This function will sleep at least once waiting for new free space to
4782 * show up, and then it will check the block group free space numbers
4783 * for our min num_bytes. Another option is to have it go ahead
4784 * and look in the rbtree for a free extent of a given size, but this
4785 * is a good start.
4786 */
4787 static noinline int
4788 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
4789 u64 num_bytes)
4790 {
4791 struct btrfs_caching_control *caching_ctl;
4792 DEFINE_WAIT(wait);
4793
4794 caching_ctl = get_caching_control(cache);
4795 if (!caching_ctl)
4796 return 0;
4797
4798 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
4799 (cache->free_space >= num_bytes));
4800
4801 put_caching_control(caching_ctl);
4802 return 0;
4803 }
4804
4805 static noinline int
4806 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
4807 {
4808 struct btrfs_caching_control *caching_ctl;
4809 DEFINE_WAIT(wait);
4810
4811 caching_ctl = get_caching_control(cache);
4812 if (!caching_ctl)
4813 return 0;
4814
4815 wait_event(caching_ctl->wait, block_group_cache_done(cache));
4816
4817 put_caching_control(caching_ctl);
4818 return 0;
4819 }
4820
4821 static int get_block_group_index(struct btrfs_block_group_cache *cache)
4822 {
4823 int index;
4824 if (cache->flags & BTRFS_BLOCK_GROUP_RAID10)
4825 index = 0;
4826 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID1)
4827 index = 1;
4828 else if (cache->flags & BTRFS_BLOCK_GROUP_DUP)
4829 index = 2;
4830 else if (cache->flags & BTRFS_BLOCK_GROUP_RAID0)
4831 index = 3;
4832 else
4833 index = 4;
4834 return index;
4835 }
4836
4837 enum btrfs_loop_type {
4838 LOOP_FIND_IDEAL = 0,
4839 LOOP_CACHING_NOWAIT = 1,
4840 LOOP_CACHING_WAIT = 2,
4841 LOOP_ALLOC_CHUNK = 3,
4842 LOOP_NO_EMPTY_SIZE = 4,
4843 };
4844
4845 /*
4846 * walks the btree of allocated extents and find a hole of a given size.
4847 * The key ins is changed to record the hole:
4848 * ins->objectid == block start
4849 * ins->flags = BTRFS_EXTENT_ITEM_KEY
4850 * ins->offset == number of blocks
4851 * Any available blocks before search_start are skipped.
4852 */
4853 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
4854 struct btrfs_root *orig_root,
4855 u64 num_bytes, u64 empty_size,
4856 u64 search_start, u64 search_end,
4857 u64 hint_byte, struct btrfs_key *ins,
4858 int data)
4859 {
4860 int ret = 0;
4861 struct btrfs_root *root = orig_root->fs_info->extent_root;
4862 struct btrfs_free_cluster *last_ptr = NULL;
4863 struct btrfs_block_group_cache *block_group = NULL;
4864 int empty_cluster = 2 * 1024 * 1024;
4865 int allowed_chunk_alloc = 0;
4866 int done_chunk_alloc = 0;
4867 struct btrfs_space_info *space_info;
4868 int last_ptr_loop = 0;
4869 int loop = 0;
4870 int index = 0;
4871 bool found_uncached_bg = false;
4872 bool failed_cluster_refill = false;
4873 bool failed_alloc = false;
4874 bool use_cluster = true;
4875 u64 ideal_cache_percent = 0;
4876 u64 ideal_cache_offset = 0;
4877
4878 WARN_ON(num_bytes < root->sectorsize);
4879 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
4880 ins->objectid = 0;
4881 ins->offset = 0;
4882
4883 space_info = __find_space_info(root->fs_info, data);
4884 if (!space_info) {
4885 printk(KERN_ERR "No space info for %d\n", data);
4886 return -ENOSPC;
4887 }
4888
4889 /*
4890 * If the space info is for both data and metadata it means we have a
4891 * small filesystem and we can't use the clustering stuff.
4892 */
4893 if (btrfs_mixed_space_info(space_info))
4894 use_cluster = false;
4895
4896 if (orig_root->ref_cows || empty_size)
4897 allowed_chunk_alloc = 1;
4898
4899 if (data & BTRFS_BLOCK_GROUP_METADATA && use_cluster) {
4900 last_ptr = &root->fs_info->meta_alloc_cluster;
4901 if (!btrfs_test_opt(root, SSD))
4902 empty_cluster = 64 * 1024;
4903 }
4904
4905 if ((data & BTRFS_BLOCK_GROUP_DATA) && use_cluster &&
4906 btrfs_test_opt(root, SSD)) {
4907 last_ptr = &root->fs_info->data_alloc_cluster;
4908 }
4909
4910 if (last_ptr) {
4911 spin_lock(&last_ptr->lock);
4912 if (last_ptr->block_group)
4913 hint_byte = last_ptr->window_start;
4914 spin_unlock(&last_ptr->lock);
4915 }
4916
4917 search_start = max(search_start, first_logical_byte(root, 0));
4918 search_start = max(search_start, hint_byte);
4919
4920 if (!last_ptr)
4921 empty_cluster = 0;
4922
4923 if (search_start == hint_byte) {
4924 ideal_cache:
4925 block_group = btrfs_lookup_block_group(root->fs_info,
4926 search_start);
4927 /*
4928 * we don't want to use the block group if it doesn't match our
4929 * allocation bits, or if its not cached.
4930 *
4931 * However if we are re-searching with an ideal block group
4932 * picked out then we don't care that the block group is cached.
4933 */
4934 if (block_group && block_group_bits(block_group, data) &&
4935 (block_group->cached != BTRFS_CACHE_NO ||
4936 search_start == ideal_cache_offset)) {
4937 down_read(&space_info->groups_sem);
4938 if (list_empty(&block_group->list) ||
4939 block_group->ro) {
4940 /*
4941 * someone is removing this block group,
4942 * we can't jump into the have_block_group
4943 * target because our list pointers are not
4944 * valid
4945 */
4946 btrfs_put_block_group(block_group);
4947 up_read(&space_info->groups_sem);
4948 } else {
4949 index = get_block_group_index(block_group);
4950 goto have_block_group;
4951 }
4952 } else if (block_group) {
4953 btrfs_put_block_group(block_group);
4954 }
4955 }
4956 search:
4957 down_read(&space_info->groups_sem);
4958 list_for_each_entry(block_group, &space_info->block_groups[index],
4959 list) {
4960 u64 offset;
4961 int cached;
4962
4963 btrfs_get_block_group(block_group);
4964 search_start = block_group->key.objectid;
4965
4966 /*
4967 * this can happen if we end up cycling through all the
4968 * raid types, but we want to make sure we only allocate
4969 * for the proper type.
4970 */
4971 if (!block_group_bits(block_group, data)) {
4972 u64 extra = BTRFS_BLOCK_GROUP_DUP |
4973 BTRFS_BLOCK_GROUP_RAID1 |
4974 BTRFS_BLOCK_GROUP_RAID10;
4975
4976 /*
4977 * if they asked for extra copies and this block group
4978 * doesn't provide them, bail. This does allow us to
4979 * fill raid0 from raid1.
4980 */
4981 if ((data & extra) && !(block_group->flags & extra))
4982 goto loop;
4983 }
4984
4985 have_block_group:
4986 if (unlikely(block_group->cached == BTRFS_CACHE_NO)) {
4987 u64 free_percent;
4988
4989 ret = cache_block_group(block_group, trans,
4990 orig_root, 1);
4991 if (block_group->cached == BTRFS_CACHE_FINISHED)
4992 goto have_block_group;
4993
4994 free_percent = btrfs_block_group_used(&block_group->item);
4995 free_percent *= 100;
4996 free_percent = div64_u64(free_percent,
4997 block_group->key.offset);
4998 free_percent = 100 - free_percent;
4999 if (free_percent > ideal_cache_percent &&
5000 likely(!block_group->ro)) {
5001 ideal_cache_offset = block_group->key.objectid;
5002 ideal_cache_percent = free_percent;
5003 }
5004
5005 /*
5006 * We only want to start kthread caching if we are at
5007 * the point where we will wait for caching to make
5008 * progress, or if our ideal search is over and we've
5009 * found somebody to start caching.
5010 */
5011 if (loop > LOOP_CACHING_NOWAIT ||
5012 (loop > LOOP_FIND_IDEAL &&
5013 atomic_read(&space_info->caching_threads) < 2)) {
5014 ret = cache_block_group(block_group, trans,
5015 orig_root, 0);
5016 BUG_ON(ret);
5017 }
5018 found_uncached_bg = true;
5019
5020 /*
5021 * If loop is set for cached only, try the next block
5022 * group.
5023 */
5024 if (loop == LOOP_FIND_IDEAL)
5025 goto loop;
5026 }
5027
5028 cached = block_group_cache_done(block_group);
5029 if (unlikely(!cached))
5030 found_uncached_bg = true;
5031
5032 if (unlikely(block_group->ro))
5033 goto loop;
5034
5035 /*
5036 * Ok we want to try and use the cluster allocator, so lets look
5037 * there, unless we are on LOOP_NO_EMPTY_SIZE, since we will
5038 * have tried the cluster allocator plenty of times at this
5039 * point and not have found anything, so we are likely way too
5040 * fragmented for the clustering stuff to find anything, so lets
5041 * just skip it and let the allocator find whatever block it can
5042 * find
5043 */
5044 if (last_ptr && loop < LOOP_NO_EMPTY_SIZE) {
5045 /*
5046 * the refill lock keeps out other
5047 * people trying to start a new cluster
5048 */
5049 spin_lock(&last_ptr->refill_lock);
5050 if (last_ptr->block_group &&
5051 (last_ptr->block_group->ro ||
5052 !block_group_bits(last_ptr->block_group, data))) {
5053 offset = 0;
5054 goto refill_cluster;
5055 }
5056
5057 offset = btrfs_alloc_from_cluster(block_group, last_ptr,
5058 num_bytes, search_start);
5059 if (offset) {
5060 /* we have a block, we're done */
5061 spin_unlock(&last_ptr->refill_lock);
5062 goto checks;
5063 }
5064
5065 spin_lock(&last_ptr->lock);
5066 /*
5067 * whoops, this cluster doesn't actually point to
5068 * this block group. Get a ref on the block
5069 * group is does point to and try again
5070 */
5071 if (!last_ptr_loop && last_ptr->block_group &&
5072 last_ptr->block_group != block_group) {
5073
5074 btrfs_put_block_group(block_group);
5075 block_group = last_ptr->block_group;
5076 btrfs_get_block_group(block_group);
5077 spin_unlock(&last_ptr->lock);
5078 spin_unlock(&last_ptr->refill_lock);
5079
5080 last_ptr_loop = 1;
5081 search_start = block_group->key.objectid;
5082 /*
5083 * we know this block group is properly
5084 * in the list because
5085 * btrfs_remove_block_group, drops the
5086 * cluster before it removes the block
5087 * group from the list
5088 */
5089 goto have_block_group;
5090 }
5091 spin_unlock(&last_ptr->lock);
5092 refill_cluster:
5093 /*
5094 * this cluster didn't work out, free it and
5095 * start over
5096 */
5097 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5098
5099 last_ptr_loop = 0;
5100
5101 /* allocate a cluster in this block group */
5102 ret = btrfs_find_space_cluster(trans, root,
5103 block_group, last_ptr,
5104 offset, num_bytes,
5105 empty_cluster + empty_size);
5106 if (ret == 0) {
5107 /*
5108 * now pull our allocation out of this
5109 * cluster
5110 */
5111 offset = btrfs_alloc_from_cluster(block_group,
5112 last_ptr, num_bytes,
5113 search_start);
5114 if (offset) {
5115 /* we found one, proceed */
5116 spin_unlock(&last_ptr->refill_lock);
5117 goto checks;
5118 }
5119 } else if (!cached && loop > LOOP_CACHING_NOWAIT
5120 && !failed_cluster_refill) {
5121 spin_unlock(&last_ptr->refill_lock);
5122
5123 failed_cluster_refill = true;
5124 wait_block_group_cache_progress(block_group,
5125 num_bytes + empty_cluster + empty_size);
5126 goto have_block_group;
5127 }
5128
5129 /*
5130 * at this point we either didn't find a cluster
5131 * or we weren't able to allocate a block from our
5132 * cluster. Free the cluster we've been trying
5133 * to use, and go to the next block group
5134 */
5135 btrfs_return_cluster_to_free_space(NULL, last_ptr);
5136 spin_unlock(&last_ptr->refill_lock);
5137 goto loop;
5138 }
5139
5140 offset = btrfs_find_space_for_alloc(block_group, search_start,
5141 num_bytes, empty_size);
5142 /*
5143 * If we didn't find a chunk, and we haven't failed on this
5144 * block group before, and this block group is in the middle of
5145 * caching and we are ok with waiting, then go ahead and wait
5146 * for progress to be made, and set failed_alloc to true.
5147 *
5148 * If failed_alloc is true then we've already waited on this
5149 * block group once and should move on to the next block group.
5150 */
5151 if (!offset && !failed_alloc && !cached &&
5152 loop > LOOP_CACHING_NOWAIT) {
5153 wait_block_group_cache_progress(block_group,
5154 num_bytes + empty_size);
5155 failed_alloc = true;
5156 goto have_block_group;
5157 } else if (!offset) {
5158 goto loop;
5159 }
5160 checks:
5161 search_start = stripe_align(root, offset);
5162 /* move on to the next group */
5163 if (search_start + num_bytes >= search_end) {
5164 btrfs_add_free_space(block_group, offset, num_bytes);
5165 goto loop;
5166 }
5167
5168 /* move on to the next group */
5169 if (search_start + num_bytes >
5170 block_group->key.objectid + block_group->key.offset) {
5171 btrfs_add_free_space(block_group, offset, num_bytes);
5172 goto loop;
5173 }
5174
5175 ins->objectid = search_start;
5176 ins->offset = num_bytes;
5177
5178 if (offset < search_start)
5179 btrfs_add_free_space(block_group, offset,
5180 search_start - offset);
5181 BUG_ON(offset > search_start);
5182
5183 ret = update_reserved_bytes(block_group, num_bytes, 1,
5184 (data & BTRFS_BLOCK_GROUP_DATA));
5185 if (ret == -EAGAIN) {
5186 btrfs_add_free_space(block_group, offset, num_bytes);
5187 goto loop;
5188 }
5189
5190 /* we are all good, lets return */
5191 ins->objectid = search_start;
5192 ins->offset = num_bytes;
5193
5194 if (offset < search_start)
5195 btrfs_add_free_space(block_group, offset,
5196 search_start - offset);
5197 BUG_ON(offset > search_start);
5198 break;
5199 loop:
5200 failed_cluster_refill = false;
5201 failed_alloc = false;
5202 BUG_ON(index != get_block_group_index(block_group));
5203 btrfs_put_block_group(block_group);
5204 }
5205 up_read(&space_info->groups_sem);
5206
5207 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
5208 goto search;
5209
5210 /* LOOP_FIND_IDEAL, only search caching/cached bg's, and don't wait for
5211 * for them to make caching progress. Also
5212 * determine the best possible bg to cache
5213 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
5214 * caching kthreads as we move along
5215 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
5216 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
5217 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
5218 * again
5219 */
5220 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE &&
5221 (found_uncached_bg || empty_size || empty_cluster ||
5222 allowed_chunk_alloc)) {
5223 index = 0;
5224 if (loop == LOOP_FIND_IDEAL && found_uncached_bg) {
5225 found_uncached_bg = false;
5226 loop++;
5227 if (!ideal_cache_percent &&
5228 atomic_read(&space_info->caching_threads))
5229 goto search;
5230
5231 /*
5232 * 1 of the following 2 things have happened so far
5233 *
5234 * 1) We found an ideal block group for caching that
5235 * is mostly full and will cache quickly, so we might
5236 * as well wait for it.
5237 *
5238 * 2) We searched for cached only and we didn't find
5239 * anything, and we didn't start any caching kthreads
5240 * either, so chances are we will loop through and
5241 * start a couple caching kthreads, and then come back
5242 * around and just wait for them. This will be slower
5243 * because we will have 2 caching kthreads reading at
5244 * the same time when we could have just started one
5245 * and waited for it to get far enough to give us an
5246 * allocation, so go ahead and go to the wait caching
5247 * loop.
5248 */
5249 loop = LOOP_CACHING_WAIT;
5250 search_start = ideal_cache_offset;
5251 ideal_cache_percent = 0;
5252 goto ideal_cache;
5253 } else if (loop == LOOP_FIND_IDEAL) {
5254 /*
5255 * Didn't find a uncached bg, wait on anything we find
5256 * next.
5257 */
5258 loop = LOOP_CACHING_WAIT;
5259 goto search;
5260 }
5261
5262 if (loop < LOOP_CACHING_WAIT) {
5263 loop++;
5264 goto search;
5265 }
5266
5267 if (loop == LOOP_ALLOC_CHUNK) {
5268 empty_size = 0;
5269 empty_cluster = 0;
5270 }
5271
5272 if (allowed_chunk_alloc) {
5273 ret = do_chunk_alloc(trans, root, num_bytes +
5274 2 * 1024 * 1024, data, 1);
5275 allowed_chunk_alloc = 0;
5276 done_chunk_alloc = 1;
5277 } else if (!done_chunk_alloc) {
5278 space_info->force_alloc = 1;
5279 }
5280
5281 if (loop < LOOP_NO_EMPTY_SIZE) {
5282 loop++;
5283 goto search;
5284 }
5285 ret = -ENOSPC;
5286 } else if (!ins->objectid) {
5287 ret = -ENOSPC;
5288 }
5289
5290 /* we found what we needed */
5291 if (ins->objectid) {
5292 if (!(data & BTRFS_BLOCK_GROUP_DATA))
5293 trans->block_group = block_group->key.objectid;
5294
5295 btrfs_put_block_group(block_group);
5296 ret = 0;
5297 }
5298
5299 return ret;
5300 }
5301
5302 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
5303 int dump_block_groups)
5304 {
5305 struct btrfs_block_group_cache *cache;
5306 int index = 0;
5307
5308 spin_lock(&info->lock);
5309 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
5310 (unsigned long long)(info->total_bytes - info->bytes_used -
5311 info->bytes_pinned - info->bytes_reserved -
5312 info->bytes_readonly),
5313 (info->full) ? "" : "not ");
5314 printk(KERN_INFO "space_info total=%llu, used=%llu, pinned=%llu, "
5315 "reserved=%llu, may_use=%llu, readonly=%llu\n",
5316 (unsigned long long)info->total_bytes,
5317 (unsigned long long)info->bytes_used,
5318 (unsigned long long)info->bytes_pinned,
5319 (unsigned long long)info->bytes_reserved,
5320 (unsigned long long)info->bytes_may_use,
5321 (unsigned long long)info->bytes_readonly);
5322 spin_unlock(&info->lock);
5323
5324 if (!dump_block_groups)
5325 return;
5326
5327 down_read(&info->groups_sem);
5328 again:
5329 list_for_each_entry(cache, &info->block_groups[index], list) {
5330 spin_lock(&cache->lock);
5331 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
5332 "%llu pinned %llu reserved\n",
5333 (unsigned long long)cache->key.objectid,
5334 (unsigned long long)cache->key.offset,
5335 (unsigned long long)btrfs_block_group_used(&cache->item),
5336 (unsigned long long)cache->pinned,
5337 (unsigned long long)cache->reserved);
5338 btrfs_dump_free_space(cache, bytes);
5339 spin_unlock(&cache->lock);
5340 }
5341 if (++index < BTRFS_NR_RAID_TYPES)
5342 goto again;
5343 up_read(&info->groups_sem);
5344 }
5345
5346 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
5347 struct btrfs_root *root,
5348 u64 num_bytes, u64 min_alloc_size,
5349 u64 empty_size, u64 hint_byte,
5350 u64 search_end, struct btrfs_key *ins,
5351 u64 data)
5352 {
5353 int ret;
5354 u64 search_start = 0;
5355
5356 data = btrfs_get_alloc_profile(root, data);
5357 again:
5358 /*
5359 * the only place that sets empty_size is btrfs_realloc_node, which
5360 * is not called recursively on allocations
5361 */
5362 if (empty_size || root->ref_cows)
5363 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
5364 num_bytes + 2 * 1024 * 1024, data, 0);
5365
5366 WARN_ON(num_bytes < root->sectorsize);
5367 ret = find_free_extent(trans, root, num_bytes, empty_size,
5368 search_start, search_end, hint_byte,
5369 ins, data);
5370
5371 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
5372 num_bytes = num_bytes >> 1;
5373 num_bytes = num_bytes & ~(root->sectorsize - 1);
5374 num_bytes = max(num_bytes, min_alloc_size);
5375 do_chunk_alloc(trans, root->fs_info->extent_root,
5376 num_bytes, data, 1);
5377 goto again;
5378 }
5379 if (ret == -ENOSPC) {
5380 struct btrfs_space_info *sinfo;
5381
5382 sinfo = __find_space_info(root->fs_info, data);
5383 printk(KERN_ERR "btrfs allocation failed flags %llu, "
5384 "wanted %llu\n", (unsigned long long)data,
5385 (unsigned long long)num_bytes);
5386 dump_space_info(sinfo, num_bytes, 1);
5387 }
5388
5389 return ret;
5390 }
5391
5392 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
5393 {
5394 struct btrfs_block_group_cache *cache;
5395 int ret = 0;
5396
5397 cache = btrfs_lookup_block_group(root->fs_info, start);
5398 if (!cache) {
5399 printk(KERN_ERR "Unable to find block group for %llu\n",
5400 (unsigned long long)start);
5401 return -ENOSPC;
5402 }
5403
5404 ret = btrfs_discard_extent(root, start, len);
5405
5406 btrfs_add_free_space(cache, start, len);
5407 update_reserved_bytes(cache, len, 0, 1);
5408 btrfs_put_block_group(cache);
5409
5410 return ret;
5411 }
5412
5413 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5414 struct btrfs_root *root,
5415 u64 parent, u64 root_objectid,
5416 u64 flags, u64 owner, u64 offset,
5417 struct btrfs_key *ins, int ref_mod)
5418 {
5419 int ret;
5420 struct btrfs_fs_info *fs_info = root->fs_info;
5421 struct btrfs_extent_item *extent_item;
5422 struct btrfs_extent_inline_ref *iref;
5423 struct btrfs_path *path;
5424 struct extent_buffer *leaf;
5425 int type;
5426 u32 size;
5427
5428 if (parent > 0)
5429 type = BTRFS_SHARED_DATA_REF_KEY;
5430 else
5431 type = BTRFS_EXTENT_DATA_REF_KEY;
5432
5433 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
5434
5435 path = btrfs_alloc_path();
5436 BUG_ON(!path);
5437
5438 path->leave_spinning = 1;
5439 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5440 ins, size);
5441 BUG_ON(ret);
5442
5443 leaf = path->nodes[0];
5444 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5445 struct btrfs_extent_item);
5446 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
5447 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5448 btrfs_set_extent_flags(leaf, extent_item,
5449 flags | BTRFS_EXTENT_FLAG_DATA);
5450
5451 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
5452 btrfs_set_extent_inline_ref_type(leaf, iref, type);
5453 if (parent > 0) {
5454 struct btrfs_shared_data_ref *ref;
5455 ref = (struct btrfs_shared_data_ref *)(iref + 1);
5456 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5457 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
5458 } else {
5459 struct btrfs_extent_data_ref *ref;
5460 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
5461 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
5462 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
5463 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
5464 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
5465 }
5466
5467 btrfs_mark_buffer_dirty(path->nodes[0]);
5468 btrfs_free_path(path);
5469
5470 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5471 if (ret) {
5472 printk(KERN_ERR "btrfs update block group failed for %llu "
5473 "%llu\n", (unsigned long long)ins->objectid,
5474 (unsigned long long)ins->offset);
5475 BUG();
5476 }
5477 return ret;
5478 }
5479
5480 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
5481 struct btrfs_root *root,
5482 u64 parent, u64 root_objectid,
5483 u64 flags, struct btrfs_disk_key *key,
5484 int level, struct btrfs_key *ins)
5485 {
5486 int ret;
5487 struct btrfs_fs_info *fs_info = root->fs_info;
5488 struct btrfs_extent_item *extent_item;
5489 struct btrfs_tree_block_info *block_info;
5490 struct btrfs_extent_inline_ref *iref;
5491 struct btrfs_path *path;
5492 struct extent_buffer *leaf;
5493 u32 size = sizeof(*extent_item) + sizeof(*block_info) + sizeof(*iref);
5494
5495 path = btrfs_alloc_path();
5496 BUG_ON(!path);
5497
5498 path->leave_spinning = 1;
5499 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
5500 ins, size);
5501 BUG_ON(ret);
5502
5503 leaf = path->nodes[0];
5504 extent_item = btrfs_item_ptr(leaf, path->slots[0],
5505 struct btrfs_extent_item);
5506 btrfs_set_extent_refs(leaf, extent_item, 1);
5507 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
5508 btrfs_set_extent_flags(leaf, extent_item,
5509 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
5510 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
5511
5512 btrfs_set_tree_block_key(leaf, block_info, key);
5513 btrfs_set_tree_block_level(leaf, block_info, level);
5514
5515 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
5516 if (parent > 0) {
5517 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
5518 btrfs_set_extent_inline_ref_type(leaf, iref,
5519 BTRFS_SHARED_BLOCK_REF_KEY);
5520 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
5521 } else {
5522 btrfs_set_extent_inline_ref_type(leaf, iref,
5523 BTRFS_TREE_BLOCK_REF_KEY);
5524 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
5525 }
5526
5527 btrfs_mark_buffer_dirty(leaf);
5528 btrfs_free_path(path);
5529
5530 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
5531 if (ret) {
5532 printk(KERN_ERR "btrfs update block group failed for %llu "
5533 "%llu\n", (unsigned long long)ins->objectid,
5534 (unsigned long long)ins->offset);
5535 BUG();
5536 }
5537 return ret;
5538 }
5539
5540 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
5541 struct btrfs_root *root,
5542 u64 root_objectid, u64 owner,
5543 u64 offset, struct btrfs_key *ins)
5544 {
5545 int ret;
5546
5547 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
5548
5549 ret = btrfs_add_delayed_data_ref(trans, ins->objectid, ins->offset,
5550 0, root_objectid, owner, offset,
5551 BTRFS_ADD_DELAYED_EXTENT, NULL);
5552 return ret;
5553 }
5554
5555 /*
5556 * this is used by the tree logging recovery code. It records that
5557 * an extent has been allocated and makes sure to clear the free
5558 * space cache bits as well
5559 */
5560 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
5561 struct btrfs_root *root,
5562 u64 root_objectid, u64 owner, u64 offset,
5563 struct btrfs_key *ins)
5564 {
5565 int ret;
5566 struct btrfs_block_group_cache *block_group;
5567 struct btrfs_caching_control *caching_ctl;
5568 u64 start = ins->objectid;
5569 u64 num_bytes = ins->offset;
5570
5571 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
5572 cache_block_group(block_group, trans, NULL, 0);
5573 caching_ctl = get_caching_control(block_group);
5574
5575 if (!caching_ctl) {
5576 BUG_ON(!block_group_cache_done(block_group));
5577 ret = btrfs_remove_free_space(block_group, start, num_bytes);
5578 BUG_ON(ret);
5579 } else {
5580 mutex_lock(&caching_ctl->mutex);
5581
5582 if (start >= caching_ctl->progress) {
5583 ret = add_excluded_extent(root, start, num_bytes);
5584 BUG_ON(ret);
5585 } else if (start + num_bytes <= caching_ctl->progress) {
5586 ret = btrfs_remove_free_space(block_group,
5587 start, num_bytes);
5588 BUG_ON(ret);
5589 } else {
5590 num_bytes = caching_ctl->progress - start;
5591 ret = btrfs_remove_free_space(block_group,
5592 start, num_bytes);
5593 BUG_ON(ret);
5594
5595 start = caching_ctl->progress;
5596 num_bytes = ins->objectid + ins->offset -
5597 caching_ctl->progress;
5598 ret = add_excluded_extent(root, start, num_bytes);
5599 BUG_ON(ret);
5600 }
5601
5602 mutex_unlock(&caching_ctl->mutex);
5603 put_caching_control(caching_ctl);
5604 }
5605
5606 ret = update_reserved_bytes(block_group, ins->offset, 1, 1);
5607 BUG_ON(ret);
5608 btrfs_put_block_group(block_group);
5609 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
5610 0, owner, offset, ins, 1);
5611 return ret;
5612 }
5613
5614 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
5615 struct btrfs_root *root,
5616 u64 bytenr, u32 blocksize,
5617 int level)
5618 {
5619 struct extent_buffer *buf;
5620
5621 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
5622 if (!buf)
5623 return ERR_PTR(-ENOMEM);
5624 btrfs_set_header_generation(buf, trans->transid);
5625 btrfs_set_buffer_lockdep_class(buf, level);
5626 btrfs_tree_lock(buf);
5627 clean_tree_block(trans, root, buf);
5628
5629 btrfs_set_lock_blocking(buf);
5630 btrfs_set_buffer_uptodate(buf);
5631
5632 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
5633 /*
5634 * we allow two log transactions at a time, use different
5635 * EXENT bit to differentiate dirty pages.
5636 */
5637 if (root->log_transid % 2 == 0)
5638 set_extent_dirty(&root->dirty_log_pages, buf->start,
5639 buf->start + buf->len - 1, GFP_NOFS);
5640 else
5641 set_extent_new(&root->dirty_log_pages, buf->start,
5642 buf->start + buf->len - 1, GFP_NOFS);
5643 } else {
5644 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
5645 buf->start + buf->len - 1, GFP_NOFS);
5646 }
5647 trans->blocks_used++;
5648 /* this returns a buffer locked for blocking */
5649 return buf;
5650 }
5651
5652 static struct btrfs_block_rsv *
5653 use_block_rsv(struct btrfs_trans_handle *trans,
5654 struct btrfs_root *root, u32 blocksize)
5655 {
5656 struct btrfs_block_rsv *block_rsv;
5657 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5658 int ret;
5659
5660 block_rsv = get_block_rsv(trans, root);
5661
5662 if (block_rsv->size == 0) {
5663 ret = reserve_metadata_bytes(trans, root, block_rsv,
5664 blocksize, 0);
5665 /*
5666 * If we couldn't reserve metadata bytes try and use some from
5667 * the global reserve.
5668 */
5669 if (ret && block_rsv != global_rsv) {
5670 ret = block_rsv_use_bytes(global_rsv, blocksize);
5671 if (!ret)
5672 return global_rsv;
5673 return ERR_PTR(ret);
5674 } else if (ret) {
5675 return ERR_PTR(ret);
5676 }
5677 return block_rsv;
5678 }
5679
5680 ret = block_rsv_use_bytes(block_rsv, blocksize);
5681 if (!ret)
5682 return block_rsv;
5683 if (ret) {
5684 WARN_ON(1);
5685 ret = reserve_metadata_bytes(trans, root, block_rsv, blocksize,
5686 0);
5687 if (!ret) {
5688 spin_lock(&block_rsv->lock);
5689 block_rsv->size += blocksize;
5690 spin_unlock(&block_rsv->lock);
5691 return block_rsv;
5692 } else if (ret && block_rsv != global_rsv) {
5693 ret = block_rsv_use_bytes(global_rsv, blocksize);
5694 if (!ret)
5695 return global_rsv;
5696 }
5697 }
5698
5699 return ERR_PTR(-ENOSPC);
5700 }
5701
5702 static void unuse_block_rsv(struct btrfs_block_rsv *block_rsv, u32 blocksize)
5703 {
5704 block_rsv_add_bytes(block_rsv, blocksize, 0);
5705 block_rsv_release_bytes(block_rsv, NULL, 0);
5706 }
5707
5708 /*
5709 * finds a free extent and does all the dirty work required for allocation
5710 * returns the key for the extent through ins, and a tree buffer for
5711 * the first block of the extent through buf.
5712 *
5713 * returns the tree buffer or NULL.
5714 */
5715 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
5716 struct btrfs_root *root, u32 blocksize,
5717 u64 parent, u64 root_objectid,
5718 struct btrfs_disk_key *key, int level,
5719 u64 hint, u64 empty_size)
5720 {
5721 struct btrfs_key ins;
5722 struct btrfs_block_rsv *block_rsv;
5723 struct extent_buffer *buf;
5724 u64 flags = 0;
5725 int ret;
5726
5727
5728 block_rsv = use_block_rsv(trans, root, blocksize);
5729 if (IS_ERR(block_rsv))
5730 return ERR_CAST(block_rsv);
5731
5732 ret = btrfs_reserve_extent(trans, root, blocksize, blocksize,
5733 empty_size, hint, (u64)-1, &ins, 0);
5734 if (ret) {
5735 unuse_block_rsv(block_rsv, blocksize);
5736 return ERR_PTR(ret);
5737 }
5738
5739 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
5740 blocksize, level);
5741 BUG_ON(IS_ERR(buf));
5742
5743 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
5744 if (parent == 0)
5745 parent = ins.objectid;
5746 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
5747 } else
5748 BUG_ON(parent > 0);
5749
5750 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
5751 struct btrfs_delayed_extent_op *extent_op;
5752 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
5753 BUG_ON(!extent_op);
5754 if (key)
5755 memcpy(&extent_op->key, key, sizeof(extent_op->key));
5756 else
5757 memset(&extent_op->key, 0, sizeof(extent_op->key));
5758 extent_op->flags_to_set = flags;
5759 extent_op->update_key = 1;
5760 extent_op->update_flags = 1;
5761 extent_op->is_data = 0;
5762
5763 ret = btrfs_add_delayed_tree_ref(trans, ins.objectid,
5764 ins.offset, parent, root_objectid,
5765 level, BTRFS_ADD_DELAYED_EXTENT,
5766 extent_op);
5767 BUG_ON(ret);
5768 }
5769 return buf;
5770 }
5771
5772 struct walk_control {
5773 u64 refs[BTRFS_MAX_LEVEL];
5774 u64 flags[BTRFS_MAX_LEVEL];
5775 struct btrfs_key update_progress;
5776 int stage;
5777 int level;
5778 int shared_level;
5779 int update_ref;
5780 int keep_locks;
5781 int reada_slot;
5782 int reada_count;
5783 };
5784
5785 #define DROP_REFERENCE 1
5786 #define UPDATE_BACKREF 2
5787
5788 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
5789 struct btrfs_root *root,
5790 struct walk_control *wc,
5791 struct btrfs_path *path)
5792 {
5793 u64 bytenr;
5794 u64 generation;
5795 u64 refs;
5796 u64 flags;
5797 u32 nritems;
5798 u32 blocksize;
5799 struct btrfs_key key;
5800 struct extent_buffer *eb;
5801 int ret;
5802 int slot;
5803 int nread = 0;
5804
5805 if (path->slots[wc->level] < wc->reada_slot) {
5806 wc->reada_count = wc->reada_count * 2 / 3;
5807 wc->reada_count = max(wc->reada_count, 2);
5808 } else {
5809 wc->reada_count = wc->reada_count * 3 / 2;
5810 wc->reada_count = min_t(int, wc->reada_count,
5811 BTRFS_NODEPTRS_PER_BLOCK(root));
5812 }
5813
5814 eb = path->nodes[wc->level];
5815 nritems = btrfs_header_nritems(eb);
5816 blocksize = btrfs_level_size(root, wc->level - 1);
5817
5818 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
5819 if (nread >= wc->reada_count)
5820 break;
5821
5822 cond_resched();
5823 bytenr = btrfs_node_blockptr(eb, slot);
5824 generation = btrfs_node_ptr_generation(eb, slot);
5825
5826 if (slot == path->slots[wc->level])
5827 goto reada;
5828
5829 if (wc->stage == UPDATE_BACKREF &&
5830 generation <= root->root_key.offset)
5831 continue;
5832
5833 /* We don't lock the tree block, it's OK to be racy here */
5834 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5835 &refs, &flags);
5836 BUG_ON(ret);
5837 BUG_ON(refs == 0);
5838
5839 if (wc->stage == DROP_REFERENCE) {
5840 if (refs == 1)
5841 goto reada;
5842
5843 if (wc->level == 1 &&
5844 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5845 continue;
5846 if (!wc->update_ref ||
5847 generation <= root->root_key.offset)
5848 continue;
5849 btrfs_node_key_to_cpu(eb, &key, slot);
5850 ret = btrfs_comp_cpu_keys(&key,
5851 &wc->update_progress);
5852 if (ret < 0)
5853 continue;
5854 } else {
5855 if (wc->level == 1 &&
5856 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
5857 continue;
5858 }
5859 reada:
5860 ret = readahead_tree_block(root, bytenr, blocksize,
5861 generation);
5862 if (ret)
5863 break;
5864 nread++;
5865 }
5866 wc->reada_slot = slot;
5867 }
5868
5869 /*
5870 * hepler to process tree block while walking down the tree.
5871 *
5872 * when wc->stage == UPDATE_BACKREF, this function updates
5873 * back refs for pointers in the block.
5874 *
5875 * NOTE: return value 1 means we should stop walking down.
5876 */
5877 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
5878 struct btrfs_root *root,
5879 struct btrfs_path *path,
5880 struct walk_control *wc, int lookup_info)
5881 {
5882 int level = wc->level;
5883 struct extent_buffer *eb = path->nodes[level];
5884 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
5885 int ret;
5886
5887 if (wc->stage == UPDATE_BACKREF &&
5888 btrfs_header_owner(eb) != root->root_key.objectid)
5889 return 1;
5890
5891 /*
5892 * when reference count of tree block is 1, it won't increase
5893 * again. once full backref flag is set, we never clear it.
5894 */
5895 if (lookup_info &&
5896 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
5897 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
5898 BUG_ON(!path->locks[level]);
5899 ret = btrfs_lookup_extent_info(trans, root,
5900 eb->start, eb->len,
5901 &wc->refs[level],
5902 &wc->flags[level]);
5903 BUG_ON(ret);
5904 BUG_ON(wc->refs[level] == 0);
5905 }
5906
5907 if (wc->stage == DROP_REFERENCE) {
5908 if (wc->refs[level] > 1)
5909 return 1;
5910
5911 if (path->locks[level] && !wc->keep_locks) {
5912 btrfs_tree_unlock(eb);
5913 path->locks[level] = 0;
5914 }
5915 return 0;
5916 }
5917
5918 /* wc->stage == UPDATE_BACKREF */
5919 if (!(wc->flags[level] & flag)) {
5920 BUG_ON(!path->locks[level]);
5921 ret = btrfs_inc_ref(trans, root, eb, 1);
5922 BUG_ON(ret);
5923 ret = btrfs_dec_ref(trans, root, eb, 0);
5924 BUG_ON(ret);
5925 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
5926 eb->len, flag, 0);
5927 BUG_ON(ret);
5928 wc->flags[level] |= flag;
5929 }
5930
5931 /*
5932 * the block is shared by multiple trees, so it's not good to
5933 * keep the tree lock
5934 */
5935 if (path->locks[level] && level > 0) {
5936 btrfs_tree_unlock(eb);
5937 path->locks[level] = 0;
5938 }
5939 return 0;
5940 }
5941
5942 /*
5943 * hepler to process tree block pointer.
5944 *
5945 * when wc->stage == DROP_REFERENCE, this function checks
5946 * reference count of the block pointed to. if the block
5947 * is shared and we need update back refs for the subtree
5948 * rooted at the block, this function changes wc->stage to
5949 * UPDATE_BACKREF. if the block is shared and there is no
5950 * need to update back, this function drops the reference
5951 * to the block.
5952 *
5953 * NOTE: return value 1 means we should stop walking down.
5954 */
5955 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
5956 struct btrfs_root *root,
5957 struct btrfs_path *path,
5958 struct walk_control *wc, int *lookup_info)
5959 {
5960 u64 bytenr;
5961 u64 generation;
5962 u64 parent;
5963 u32 blocksize;
5964 struct btrfs_key key;
5965 struct extent_buffer *next;
5966 int level = wc->level;
5967 int reada = 0;
5968 int ret = 0;
5969
5970 generation = btrfs_node_ptr_generation(path->nodes[level],
5971 path->slots[level]);
5972 /*
5973 * if the lower level block was created before the snapshot
5974 * was created, we know there is no need to update back refs
5975 * for the subtree
5976 */
5977 if (wc->stage == UPDATE_BACKREF &&
5978 generation <= root->root_key.offset) {
5979 *lookup_info = 1;
5980 return 1;
5981 }
5982
5983 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
5984 blocksize = btrfs_level_size(root, level - 1);
5985
5986 next = btrfs_find_tree_block(root, bytenr, blocksize);
5987 if (!next) {
5988 next = btrfs_find_create_tree_block(root, bytenr, blocksize);
5989 if (!next)
5990 return -ENOMEM;
5991 reada = 1;
5992 }
5993 btrfs_tree_lock(next);
5994 btrfs_set_lock_blocking(next);
5995
5996 ret = btrfs_lookup_extent_info(trans, root, bytenr, blocksize,
5997 &wc->refs[level - 1],
5998 &wc->flags[level - 1]);
5999 BUG_ON(ret);
6000 BUG_ON(wc->refs[level - 1] == 0);
6001 *lookup_info = 0;
6002
6003 if (wc->stage == DROP_REFERENCE) {
6004 if (wc->refs[level - 1] > 1) {
6005 if (level == 1 &&
6006 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6007 goto skip;
6008
6009 if (!wc->update_ref ||
6010 generation <= root->root_key.offset)
6011 goto skip;
6012
6013 btrfs_node_key_to_cpu(path->nodes[level], &key,
6014 path->slots[level]);
6015 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
6016 if (ret < 0)
6017 goto skip;
6018
6019 wc->stage = UPDATE_BACKREF;
6020 wc->shared_level = level - 1;
6021 }
6022 } else {
6023 if (level == 1 &&
6024 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
6025 goto skip;
6026 }
6027
6028 if (!btrfs_buffer_uptodate(next, generation)) {
6029 btrfs_tree_unlock(next);
6030 free_extent_buffer(next);
6031 next = NULL;
6032 *lookup_info = 1;
6033 }
6034
6035 if (!next) {
6036 if (reada && level == 1)
6037 reada_walk_down(trans, root, wc, path);
6038 next = read_tree_block(root, bytenr, blocksize, generation);
6039 btrfs_tree_lock(next);
6040 btrfs_set_lock_blocking(next);
6041 }
6042
6043 level--;
6044 BUG_ON(level != btrfs_header_level(next));
6045 path->nodes[level] = next;
6046 path->slots[level] = 0;
6047 path->locks[level] = 1;
6048 wc->level = level;
6049 if (wc->level == 1)
6050 wc->reada_slot = 0;
6051 return 0;
6052 skip:
6053 wc->refs[level - 1] = 0;
6054 wc->flags[level - 1] = 0;
6055 if (wc->stage == DROP_REFERENCE) {
6056 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
6057 parent = path->nodes[level]->start;
6058 } else {
6059 BUG_ON(root->root_key.objectid !=
6060 btrfs_header_owner(path->nodes[level]));
6061 parent = 0;
6062 }
6063
6064 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
6065 root->root_key.objectid, level - 1, 0);
6066 BUG_ON(ret);
6067 }
6068 btrfs_tree_unlock(next);
6069 free_extent_buffer(next);
6070 *lookup_info = 1;
6071 return 1;
6072 }
6073
6074 /*
6075 * hepler to process tree block while walking up the tree.
6076 *
6077 * when wc->stage == DROP_REFERENCE, this function drops
6078 * reference count on the block.
6079 *
6080 * when wc->stage == UPDATE_BACKREF, this function changes
6081 * wc->stage back to DROP_REFERENCE if we changed wc->stage
6082 * to UPDATE_BACKREF previously while processing the block.
6083 *
6084 * NOTE: return value 1 means we should stop walking up.
6085 */
6086 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
6087 struct btrfs_root *root,
6088 struct btrfs_path *path,
6089 struct walk_control *wc)
6090 {
6091 int ret;
6092 int level = wc->level;
6093 struct extent_buffer *eb = path->nodes[level];
6094 u64 parent = 0;
6095
6096 if (wc->stage == UPDATE_BACKREF) {
6097 BUG_ON(wc->shared_level < level);
6098 if (level < wc->shared_level)
6099 goto out;
6100
6101 ret = find_next_key(path, level + 1, &wc->update_progress);
6102 if (ret > 0)
6103 wc->update_ref = 0;
6104
6105 wc->stage = DROP_REFERENCE;
6106 wc->shared_level = -1;
6107 path->slots[level] = 0;
6108
6109 /*
6110 * check reference count again if the block isn't locked.
6111 * we should start walking down the tree again if reference
6112 * count is one.
6113 */
6114 if (!path->locks[level]) {
6115 BUG_ON(level == 0);
6116 btrfs_tree_lock(eb);
6117 btrfs_set_lock_blocking(eb);
6118 path->locks[level] = 1;
6119
6120 ret = btrfs_lookup_extent_info(trans, root,
6121 eb->start, eb->len,
6122 &wc->refs[level],
6123 &wc->flags[level]);
6124 BUG_ON(ret);
6125 BUG_ON(wc->refs[level] == 0);
6126 if (wc->refs[level] == 1) {
6127 btrfs_tree_unlock(eb);
6128 path->locks[level] = 0;
6129 return 1;
6130 }
6131 }
6132 }
6133
6134 /* wc->stage == DROP_REFERENCE */
6135 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
6136
6137 if (wc->refs[level] == 1) {
6138 if (level == 0) {
6139 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6140 ret = btrfs_dec_ref(trans, root, eb, 1);
6141 else
6142 ret = btrfs_dec_ref(trans, root, eb, 0);
6143 BUG_ON(ret);
6144 }
6145 /* make block locked assertion in clean_tree_block happy */
6146 if (!path->locks[level] &&
6147 btrfs_header_generation(eb) == trans->transid) {
6148 btrfs_tree_lock(eb);
6149 btrfs_set_lock_blocking(eb);
6150 path->locks[level] = 1;
6151 }
6152 clean_tree_block(trans, root, eb);
6153 }
6154
6155 if (eb == root->node) {
6156 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6157 parent = eb->start;
6158 else
6159 BUG_ON(root->root_key.objectid !=
6160 btrfs_header_owner(eb));
6161 } else {
6162 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
6163 parent = path->nodes[level + 1]->start;
6164 else
6165 BUG_ON(root->root_key.objectid !=
6166 btrfs_header_owner(path->nodes[level + 1]));
6167 }
6168
6169 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
6170 out:
6171 wc->refs[level] = 0;
6172 wc->flags[level] = 0;
6173 return 0;
6174 }
6175
6176 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
6177 struct btrfs_root *root,
6178 struct btrfs_path *path,
6179 struct walk_control *wc)
6180 {
6181 int level = wc->level;
6182 int lookup_info = 1;
6183 int ret;
6184
6185 while (level >= 0) {
6186 ret = walk_down_proc(trans, root, path, wc, lookup_info);
6187 if (ret > 0)
6188 break;
6189
6190 if (level == 0)
6191 break;
6192
6193 if (path->slots[level] >=
6194 btrfs_header_nritems(path->nodes[level]))
6195 break;
6196
6197 ret = do_walk_down(trans, root, path, wc, &lookup_info);
6198 if (ret > 0) {
6199 path->slots[level]++;
6200 continue;
6201 } else if (ret < 0)
6202 return ret;
6203 level = wc->level;
6204 }
6205 return 0;
6206 }
6207
6208 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
6209 struct btrfs_root *root,
6210 struct btrfs_path *path,
6211 struct walk_control *wc, int max_level)
6212 {
6213 int level = wc->level;
6214 int ret;
6215
6216 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
6217 while (level < max_level && path->nodes[level]) {
6218 wc->level = level;
6219 if (path->slots[level] + 1 <
6220 btrfs_header_nritems(path->nodes[level])) {
6221 path->slots[level]++;
6222 return 0;
6223 } else {
6224 ret = walk_up_proc(trans, root, path, wc);
6225 if (ret > 0)
6226 return 0;
6227
6228 if (path->locks[level]) {
6229 btrfs_tree_unlock(path->nodes[level]);
6230 path->locks[level] = 0;
6231 }
6232 free_extent_buffer(path->nodes[level]);
6233 path->nodes[level] = NULL;
6234 level++;
6235 }
6236 }
6237 return 1;
6238 }
6239
6240 /*
6241 * drop a subvolume tree.
6242 *
6243 * this function traverses the tree freeing any blocks that only
6244 * referenced by the tree.
6245 *
6246 * when a shared tree block is found. this function decreases its
6247 * reference count by one. if update_ref is true, this function
6248 * also make sure backrefs for the shared block and all lower level
6249 * blocks are properly updated.
6250 */
6251 int btrfs_drop_snapshot(struct btrfs_root *root,
6252 struct btrfs_block_rsv *block_rsv, int update_ref)
6253 {
6254 struct btrfs_path *path;
6255 struct btrfs_trans_handle *trans;
6256 struct btrfs_root *tree_root = root->fs_info->tree_root;
6257 struct btrfs_root_item *root_item = &root->root_item;
6258 struct walk_control *wc;
6259 struct btrfs_key key;
6260 int err = 0;
6261 int ret;
6262 int level;
6263
6264 path = btrfs_alloc_path();
6265 BUG_ON(!path);
6266
6267 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6268 BUG_ON(!wc);
6269
6270 trans = btrfs_start_transaction(tree_root, 0);
6271 BUG_ON(IS_ERR(trans));
6272
6273 if (block_rsv)
6274 trans->block_rsv = block_rsv;
6275
6276 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
6277 level = btrfs_header_level(root->node);
6278 path->nodes[level] = btrfs_lock_root_node(root);
6279 btrfs_set_lock_blocking(path->nodes[level]);
6280 path->slots[level] = 0;
6281 path->locks[level] = 1;
6282 memset(&wc->update_progress, 0,
6283 sizeof(wc->update_progress));
6284 } else {
6285 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
6286 memcpy(&wc->update_progress, &key,
6287 sizeof(wc->update_progress));
6288
6289 level = root_item->drop_level;
6290 BUG_ON(level == 0);
6291 path->lowest_level = level;
6292 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
6293 path->lowest_level = 0;
6294 if (ret < 0) {
6295 err = ret;
6296 goto out;
6297 }
6298 WARN_ON(ret > 0);
6299
6300 /*
6301 * unlock our path, this is safe because only this
6302 * function is allowed to delete this snapshot
6303 */
6304 btrfs_unlock_up_safe(path, 0);
6305
6306 level = btrfs_header_level(root->node);
6307 while (1) {
6308 btrfs_tree_lock(path->nodes[level]);
6309 btrfs_set_lock_blocking(path->nodes[level]);
6310
6311 ret = btrfs_lookup_extent_info(trans, root,
6312 path->nodes[level]->start,
6313 path->nodes[level]->len,
6314 &wc->refs[level],
6315 &wc->flags[level]);
6316 BUG_ON(ret);
6317 BUG_ON(wc->refs[level] == 0);
6318
6319 if (level == root_item->drop_level)
6320 break;
6321
6322 btrfs_tree_unlock(path->nodes[level]);
6323 WARN_ON(wc->refs[level] != 1);
6324 level--;
6325 }
6326 }
6327
6328 wc->level = level;
6329 wc->shared_level = -1;
6330 wc->stage = DROP_REFERENCE;
6331 wc->update_ref = update_ref;
6332 wc->keep_locks = 0;
6333 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6334
6335 while (1) {
6336 ret = walk_down_tree(trans, root, path, wc);
6337 if (ret < 0) {
6338 err = ret;
6339 break;
6340 }
6341
6342 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
6343 if (ret < 0) {
6344 err = ret;
6345 break;
6346 }
6347
6348 if (ret > 0) {
6349 BUG_ON(wc->stage != DROP_REFERENCE);
6350 break;
6351 }
6352
6353 if (wc->stage == DROP_REFERENCE) {
6354 level = wc->level;
6355 btrfs_node_key(path->nodes[level],
6356 &root_item->drop_progress,
6357 path->slots[level]);
6358 root_item->drop_level = level;
6359 }
6360
6361 BUG_ON(wc->level == 0);
6362 if (btrfs_should_end_transaction(trans, tree_root)) {
6363 ret = btrfs_update_root(trans, tree_root,
6364 &root->root_key,
6365 root_item);
6366 BUG_ON(ret);
6367
6368 btrfs_end_transaction_throttle(trans, tree_root);
6369 trans = btrfs_start_transaction(tree_root, 0);
6370 BUG_ON(IS_ERR(trans));
6371 if (block_rsv)
6372 trans->block_rsv = block_rsv;
6373 }
6374 }
6375 btrfs_release_path(root, path);
6376 BUG_ON(err);
6377
6378 ret = btrfs_del_root(trans, tree_root, &root->root_key);
6379 BUG_ON(ret);
6380
6381 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
6382 ret = btrfs_find_last_root(tree_root, root->root_key.objectid,
6383 NULL, NULL);
6384 BUG_ON(ret < 0);
6385 if (ret > 0) {
6386 /* if we fail to delete the orphan item this time
6387 * around, it'll get picked up the next time.
6388 *
6389 * The most common failure here is just -ENOENT.
6390 */
6391 btrfs_del_orphan_item(trans, tree_root,
6392 root->root_key.objectid);
6393 }
6394 }
6395
6396 if (root->in_radix) {
6397 btrfs_free_fs_root(tree_root->fs_info, root);
6398 } else {
6399 free_extent_buffer(root->node);
6400 free_extent_buffer(root->commit_root);
6401 kfree(root);
6402 }
6403 out:
6404 btrfs_end_transaction_throttle(trans, tree_root);
6405 kfree(wc);
6406 btrfs_free_path(path);
6407 return err;
6408 }
6409
6410 /*
6411 * drop subtree rooted at tree block 'node'.
6412 *
6413 * NOTE: this function will unlock and release tree block 'node'
6414 */
6415 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
6416 struct btrfs_root *root,
6417 struct extent_buffer *node,
6418 struct extent_buffer *parent)
6419 {
6420 struct btrfs_path *path;
6421 struct walk_control *wc;
6422 int level;
6423 int parent_level;
6424 int ret = 0;
6425 int wret;
6426
6427 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
6428
6429 path = btrfs_alloc_path();
6430 BUG_ON(!path);
6431
6432 wc = kzalloc(sizeof(*wc), GFP_NOFS);
6433 BUG_ON(!wc);
6434
6435 btrfs_assert_tree_locked(parent);
6436 parent_level = btrfs_header_level(parent);
6437 extent_buffer_get(parent);
6438 path->nodes[parent_level] = parent;
6439 path->slots[parent_level] = btrfs_header_nritems(parent);
6440
6441 btrfs_assert_tree_locked(node);
6442 level = btrfs_header_level(node);
6443 path->nodes[level] = node;
6444 path->slots[level] = 0;
6445 path->locks[level] = 1;
6446
6447 wc->refs[parent_level] = 1;
6448 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
6449 wc->level = level;
6450 wc->shared_level = -1;
6451 wc->stage = DROP_REFERENCE;
6452 wc->update_ref = 0;
6453 wc->keep_locks = 1;
6454 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
6455
6456 while (1) {
6457 wret = walk_down_tree(trans, root, path, wc);
6458 if (wret < 0) {
6459 ret = wret;
6460 break;
6461 }
6462
6463 wret = walk_up_tree(trans, root, path, wc, parent_level);
6464 if (wret < 0)
6465 ret = wret;
6466 if (wret != 0)
6467 break;
6468 }
6469
6470 kfree(wc);
6471 btrfs_free_path(path);
6472 return ret;
6473 }
6474
6475 #if 0
6476 static unsigned long calc_ra(unsigned long start, unsigned long last,
6477 unsigned long nr)
6478 {
6479 return min(last, start + nr - 1);
6480 }
6481
6482 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
6483 u64 len)
6484 {
6485 u64 page_start;
6486 u64 page_end;
6487 unsigned long first_index;
6488 unsigned long last_index;
6489 unsigned long i;
6490 struct page *page;
6491 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
6492 struct file_ra_state *ra;
6493 struct btrfs_ordered_extent *ordered;
6494 unsigned int total_read = 0;
6495 unsigned int total_dirty = 0;
6496 int ret = 0;
6497
6498 ra = kzalloc(sizeof(*ra), GFP_NOFS);
6499 if (!ra)
6500 return -ENOMEM;
6501
6502 mutex_lock(&inode->i_mutex);
6503 first_index = start >> PAGE_CACHE_SHIFT;
6504 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
6505
6506 /* make sure the dirty trick played by the caller work */
6507 ret = invalidate_inode_pages2_range(inode->i_mapping,
6508 first_index, last_index);
6509 if (ret)
6510 goto out_unlock;
6511
6512 file_ra_state_init(ra, inode->i_mapping);
6513
6514 for (i = first_index ; i <= last_index; i++) {
6515 if (total_read % ra->ra_pages == 0) {
6516 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
6517 calc_ra(i, last_index, ra->ra_pages));
6518 }
6519 total_read++;
6520 again:
6521 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
6522 BUG_ON(1);
6523 page = grab_cache_page(inode->i_mapping, i);
6524 if (!page) {
6525 ret = -ENOMEM;
6526 goto out_unlock;
6527 }
6528 if (!PageUptodate(page)) {
6529 btrfs_readpage(NULL, page);
6530 lock_page(page);
6531 if (!PageUptodate(page)) {
6532 unlock_page(page);
6533 page_cache_release(page);
6534 ret = -EIO;
6535 goto out_unlock;
6536 }
6537 }
6538 wait_on_page_writeback(page);
6539
6540 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
6541 page_end = page_start + PAGE_CACHE_SIZE - 1;
6542 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
6543
6544 ordered = btrfs_lookup_ordered_extent(inode, page_start);
6545 if (ordered) {
6546 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6547 unlock_page(page);
6548 page_cache_release(page);
6549 btrfs_start_ordered_extent(inode, ordered, 1);
6550 btrfs_put_ordered_extent(ordered);
6551 goto again;
6552 }
6553 set_page_extent_mapped(page);
6554
6555 if (i == first_index)
6556 set_extent_bits(io_tree, page_start, page_end,
6557 EXTENT_BOUNDARY, GFP_NOFS);
6558 btrfs_set_extent_delalloc(inode, page_start, page_end);
6559
6560 set_page_dirty(page);
6561 total_dirty++;
6562
6563 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
6564 unlock_page(page);
6565 page_cache_release(page);
6566 }
6567
6568 out_unlock:
6569 kfree(ra);
6570 mutex_unlock(&inode->i_mutex);
6571 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
6572 return ret;
6573 }
6574
6575 static noinline int relocate_data_extent(struct inode *reloc_inode,
6576 struct btrfs_key *extent_key,
6577 u64 offset)
6578 {
6579 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6580 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
6581 struct extent_map *em;
6582 u64 start = extent_key->objectid - offset;
6583 u64 end = start + extent_key->offset - 1;
6584
6585 em = alloc_extent_map(GFP_NOFS);
6586 BUG_ON(!em);
6587
6588 em->start = start;
6589 em->len = extent_key->offset;
6590 em->block_len = extent_key->offset;
6591 em->block_start = extent_key->objectid;
6592 em->bdev = root->fs_info->fs_devices->latest_bdev;
6593 set_bit(EXTENT_FLAG_PINNED, &em->flags);
6594
6595 /* setup extent map to cheat btrfs_readpage */
6596 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6597 while (1) {
6598 int ret;
6599 write_lock(&em_tree->lock);
6600 ret = add_extent_mapping(em_tree, em);
6601 write_unlock(&em_tree->lock);
6602 if (ret != -EEXIST) {
6603 free_extent_map(em);
6604 break;
6605 }
6606 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
6607 }
6608 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
6609
6610 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
6611 }
6612
6613 struct btrfs_ref_path {
6614 u64 extent_start;
6615 u64 nodes[BTRFS_MAX_LEVEL];
6616 u64 root_objectid;
6617 u64 root_generation;
6618 u64 owner_objectid;
6619 u32 num_refs;
6620 int lowest_level;
6621 int current_level;
6622 int shared_level;
6623
6624 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
6625 u64 new_nodes[BTRFS_MAX_LEVEL];
6626 };
6627
6628 struct disk_extent {
6629 u64 ram_bytes;
6630 u64 disk_bytenr;
6631 u64 disk_num_bytes;
6632 u64 offset;
6633 u64 num_bytes;
6634 u8 compression;
6635 u8 encryption;
6636 u16 other_encoding;
6637 };
6638
6639 static int is_cowonly_root(u64 root_objectid)
6640 {
6641 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
6642 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
6643 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
6644 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
6645 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
6646 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
6647 return 1;
6648 return 0;
6649 }
6650
6651 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
6652 struct btrfs_root *extent_root,
6653 struct btrfs_ref_path *ref_path,
6654 int first_time)
6655 {
6656 struct extent_buffer *leaf;
6657 struct btrfs_path *path;
6658 struct btrfs_extent_ref *ref;
6659 struct btrfs_key key;
6660 struct btrfs_key found_key;
6661 u64 bytenr;
6662 u32 nritems;
6663 int level;
6664 int ret = 1;
6665
6666 path = btrfs_alloc_path();
6667 if (!path)
6668 return -ENOMEM;
6669
6670 if (first_time) {
6671 ref_path->lowest_level = -1;
6672 ref_path->current_level = -1;
6673 ref_path->shared_level = -1;
6674 goto walk_up;
6675 }
6676 walk_down:
6677 level = ref_path->current_level - 1;
6678 while (level >= -1) {
6679 u64 parent;
6680 if (level < ref_path->lowest_level)
6681 break;
6682
6683 if (level >= 0)
6684 bytenr = ref_path->nodes[level];
6685 else
6686 bytenr = ref_path->extent_start;
6687 BUG_ON(bytenr == 0);
6688
6689 parent = ref_path->nodes[level + 1];
6690 ref_path->nodes[level + 1] = 0;
6691 ref_path->current_level = level;
6692 BUG_ON(parent == 0);
6693
6694 key.objectid = bytenr;
6695 key.offset = parent + 1;
6696 key.type = BTRFS_EXTENT_REF_KEY;
6697
6698 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6699 if (ret < 0)
6700 goto out;
6701 BUG_ON(ret == 0);
6702
6703 leaf = path->nodes[0];
6704 nritems = btrfs_header_nritems(leaf);
6705 if (path->slots[0] >= nritems) {
6706 ret = btrfs_next_leaf(extent_root, path);
6707 if (ret < 0)
6708 goto out;
6709 if (ret > 0)
6710 goto next;
6711 leaf = path->nodes[0];
6712 }
6713
6714 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6715 if (found_key.objectid == bytenr &&
6716 found_key.type == BTRFS_EXTENT_REF_KEY) {
6717 if (level < ref_path->shared_level)
6718 ref_path->shared_level = level;
6719 goto found;
6720 }
6721 next:
6722 level--;
6723 btrfs_release_path(extent_root, path);
6724 cond_resched();
6725 }
6726 /* reached lowest level */
6727 ret = 1;
6728 goto out;
6729 walk_up:
6730 level = ref_path->current_level;
6731 while (level < BTRFS_MAX_LEVEL - 1) {
6732 u64 ref_objectid;
6733
6734 if (level >= 0)
6735 bytenr = ref_path->nodes[level];
6736 else
6737 bytenr = ref_path->extent_start;
6738
6739 BUG_ON(bytenr == 0);
6740
6741 key.objectid = bytenr;
6742 key.offset = 0;
6743 key.type = BTRFS_EXTENT_REF_KEY;
6744
6745 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
6746 if (ret < 0)
6747 goto out;
6748
6749 leaf = path->nodes[0];
6750 nritems = btrfs_header_nritems(leaf);
6751 if (path->slots[0] >= nritems) {
6752 ret = btrfs_next_leaf(extent_root, path);
6753 if (ret < 0)
6754 goto out;
6755 if (ret > 0) {
6756 /* the extent was freed by someone */
6757 if (ref_path->lowest_level == level)
6758 goto out;
6759 btrfs_release_path(extent_root, path);
6760 goto walk_down;
6761 }
6762 leaf = path->nodes[0];
6763 }
6764
6765 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6766 if (found_key.objectid != bytenr ||
6767 found_key.type != BTRFS_EXTENT_REF_KEY) {
6768 /* the extent was freed by someone */
6769 if (ref_path->lowest_level == level) {
6770 ret = 1;
6771 goto out;
6772 }
6773 btrfs_release_path(extent_root, path);
6774 goto walk_down;
6775 }
6776 found:
6777 ref = btrfs_item_ptr(leaf, path->slots[0],
6778 struct btrfs_extent_ref);
6779 ref_objectid = btrfs_ref_objectid(leaf, ref);
6780 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
6781 if (first_time) {
6782 level = (int)ref_objectid;
6783 BUG_ON(level >= BTRFS_MAX_LEVEL);
6784 ref_path->lowest_level = level;
6785 ref_path->current_level = level;
6786 ref_path->nodes[level] = bytenr;
6787 } else {
6788 WARN_ON(ref_objectid != level);
6789 }
6790 } else {
6791 WARN_ON(level != -1);
6792 }
6793 first_time = 0;
6794
6795 if (ref_path->lowest_level == level) {
6796 ref_path->owner_objectid = ref_objectid;
6797 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
6798 }
6799
6800 /*
6801 * the block is tree root or the block isn't in reference
6802 * counted tree.
6803 */
6804 if (found_key.objectid == found_key.offset ||
6805 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
6806 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6807 ref_path->root_generation =
6808 btrfs_ref_generation(leaf, ref);
6809 if (level < 0) {
6810 /* special reference from the tree log */
6811 ref_path->nodes[0] = found_key.offset;
6812 ref_path->current_level = 0;
6813 }
6814 ret = 0;
6815 goto out;
6816 }
6817
6818 level++;
6819 BUG_ON(ref_path->nodes[level] != 0);
6820 ref_path->nodes[level] = found_key.offset;
6821 ref_path->current_level = level;
6822
6823 /*
6824 * the reference was created in the running transaction,
6825 * no need to continue walking up.
6826 */
6827 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
6828 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
6829 ref_path->root_generation =
6830 btrfs_ref_generation(leaf, ref);
6831 ret = 0;
6832 goto out;
6833 }
6834
6835 btrfs_release_path(extent_root, path);
6836 cond_resched();
6837 }
6838 /* reached max tree level, but no tree root found. */
6839 BUG();
6840 out:
6841 btrfs_free_path(path);
6842 return ret;
6843 }
6844
6845 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
6846 struct btrfs_root *extent_root,
6847 struct btrfs_ref_path *ref_path,
6848 u64 extent_start)
6849 {
6850 memset(ref_path, 0, sizeof(*ref_path));
6851 ref_path->extent_start = extent_start;
6852
6853 return __next_ref_path(trans, extent_root, ref_path, 1);
6854 }
6855
6856 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
6857 struct btrfs_root *extent_root,
6858 struct btrfs_ref_path *ref_path)
6859 {
6860 return __next_ref_path(trans, extent_root, ref_path, 0);
6861 }
6862
6863 static noinline int get_new_locations(struct inode *reloc_inode,
6864 struct btrfs_key *extent_key,
6865 u64 offset, int no_fragment,
6866 struct disk_extent **extents,
6867 int *nr_extents)
6868 {
6869 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
6870 struct btrfs_path *path;
6871 struct btrfs_file_extent_item *fi;
6872 struct extent_buffer *leaf;
6873 struct disk_extent *exts = *extents;
6874 struct btrfs_key found_key;
6875 u64 cur_pos;
6876 u64 last_byte;
6877 u32 nritems;
6878 int nr = 0;
6879 int max = *nr_extents;
6880 int ret;
6881
6882 WARN_ON(!no_fragment && *extents);
6883 if (!exts) {
6884 max = 1;
6885 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
6886 if (!exts)
6887 return -ENOMEM;
6888 }
6889
6890 path = btrfs_alloc_path();
6891 BUG_ON(!path);
6892
6893 cur_pos = extent_key->objectid - offset;
6894 last_byte = extent_key->objectid + extent_key->offset;
6895 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
6896 cur_pos, 0);
6897 if (ret < 0)
6898 goto out;
6899 if (ret > 0) {
6900 ret = -ENOENT;
6901 goto out;
6902 }
6903
6904 while (1) {
6905 leaf = path->nodes[0];
6906 nritems = btrfs_header_nritems(leaf);
6907 if (path->slots[0] >= nritems) {
6908 ret = btrfs_next_leaf(root, path);
6909 if (ret < 0)
6910 goto out;
6911 if (ret > 0)
6912 break;
6913 leaf = path->nodes[0];
6914 }
6915
6916 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6917 if (found_key.offset != cur_pos ||
6918 found_key.type != BTRFS_EXTENT_DATA_KEY ||
6919 found_key.objectid != reloc_inode->i_ino)
6920 break;
6921
6922 fi = btrfs_item_ptr(leaf, path->slots[0],
6923 struct btrfs_file_extent_item);
6924 if (btrfs_file_extent_type(leaf, fi) !=
6925 BTRFS_FILE_EXTENT_REG ||
6926 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
6927 break;
6928
6929 if (nr == max) {
6930 struct disk_extent *old = exts;
6931 max *= 2;
6932 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
6933 memcpy(exts, old, sizeof(*exts) * nr);
6934 if (old != *extents)
6935 kfree(old);
6936 }
6937
6938 exts[nr].disk_bytenr =
6939 btrfs_file_extent_disk_bytenr(leaf, fi);
6940 exts[nr].disk_num_bytes =
6941 btrfs_file_extent_disk_num_bytes(leaf, fi);
6942 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
6943 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
6944 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
6945 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
6946 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
6947 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
6948 fi);
6949 BUG_ON(exts[nr].offset > 0);
6950 BUG_ON(exts[nr].compression || exts[nr].encryption);
6951 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
6952
6953 cur_pos += exts[nr].num_bytes;
6954 nr++;
6955
6956 if (cur_pos + offset >= last_byte)
6957 break;
6958
6959 if (no_fragment) {
6960 ret = 1;
6961 goto out;
6962 }
6963 path->slots[0]++;
6964 }
6965
6966 BUG_ON(cur_pos + offset > last_byte);
6967 if (cur_pos + offset < last_byte) {
6968 ret = -ENOENT;
6969 goto out;
6970 }
6971 ret = 0;
6972 out:
6973 btrfs_free_path(path);
6974 if (ret) {
6975 if (exts != *extents)
6976 kfree(exts);
6977 } else {
6978 *extents = exts;
6979 *nr_extents = nr;
6980 }
6981 return ret;
6982 }
6983
6984 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
6985 struct btrfs_root *root,
6986 struct btrfs_path *path,
6987 struct btrfs_key *extent_key,
6988 struct btrfs_key *leaf_key,
6989 struct btrfs_ref_path *ref_path,
6990 struct disk_extent *new_extents,
6991 int nr_extents)
6992 {
6993 struct extent_buffer *leaf;
6994 struct btrfs_file_extent_item *fi;
6995 struct inode *inode = NULL;
6996 struct btrfs_key key;
6997 u64 lock_start = 0;
6998 u64 lock_end = 0;
6999 u64 num_bytes;
7000 u64 ext_offset;
7001 u64 search_end = (u64)-1;
7002 u32 nritems;
7003 int nr_scaned = 0;
7004 int extent_locked = 0;
7005 int extent_type;
7006 int ret;
7007
7008 memcpy(&key, leaf_key, sizeof(key));
7009 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7010 if (key.objectid < ref_path->owner_objectid ||
7011 (key.objectid == ref_path->owner_objectid &&
7012 key.type < BTRFS_EXTENT_DATA_KEY)) {
7013 key.objectid = ref_path->owner_objectid;
7014 key.type = BTRFS_EXTENT_DATA_KEY;
7015 key.offset = 0;
7016 }
7017 }
7018
7019 while (1) {
7020 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
7021 if (ret < 0)
7022 goto out;
7023
7024 leaf = path->nodes[0];
7025 nritems = btrfs_header_nritems(leaf);
7026 next:
7027 if (extent_locked && ret > 0) {
7028 /*
7029 * the file extent item was modified by someone
7030 * before the extent got locked.
7031 */
7032 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7033 lock_end, GFP_NOFS);
7034 extent_locked = 0;
7035 }
7036
7037 if (path->slots[0] >= nritems) {
7038 if (++nr_scaned > 2)
7039 break;
7040
7041 BUG_ON(extent_locked);
7042 ret = btrfs_next_leaf(root, path);
7043 if (ret < 0)
7044 goto out;
7045 if (ret > 0)
7046 break;
7047 leaf = path->nodes[0];
7048 nritems = btrfs_header_nritems(leaf);
7049 }
7050
7051 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
7052
7053 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
7054 if ((key.objectid > ref_path->owner_objectid) ||
7055 (key.objectid == ref_path->owner_objectid &&
7056 key.type > BTRFS_EXTENT_DATA_KEY) ||
7057 key.offset >= search_end)
7058 break;
7059 }
7060
7061 if (inode && key.objectid != inode->i_ino) {
7062 BUG_ON(extent_locked);
7063 btrfs_release_path(root, path);
7064 mutex_unlock(&inode->i_mutex);
7065 iput(inode);
7066 inode = NULL;
7067 continue;
7068 }
7069
7070 if (key.type != BTRFS_EXTENT_DATA_KEY) {
7071 path->slots[0]++;
7072 ret = 1;
7073 goto next;
7074 }
7075 fi = btrfs_item_ptr(leaf, path->slots[0],
7076 struct btrfs_file_extent_item);
7077 extent_type = btrfs_file_extent_type(leaf, fi);
7078 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
7079 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
7080 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
7081 extent_key->objectid)) {
7082 path->slots[0]++;
7083 ret = 1;
7084 goto next;
7085 }
7086
7087 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7088 ext_offset = btrfs_file_extent_offset(leaf, fi);
7089
7090 if (search_end == (u64)-1) {
7091 search_end = key.offset - ext_offset +
7092 btrfs_file_extent_ram_bytes(leaf, fi);
7093 }
7094
7095 if (!extent_locked) {
7096 lock_start = key.offset;
7097 lock_end = lock_start + num_bytes - 1;
7098 } else {
7099 if (lock_start > key.offset ||
7100 lock_end + 1 < key.offset + num_bytes) {
7101 unlock_extent(&BTRFS_I(inode)->io_tree,
7102 lock_start, lock_end, GFP_NOFS);
7103 extent_locked = 0;
7104 }
7105 }
7106
7107 if (!inode) {
7108 btrfs_release_path(root, path);
7109
7110 inode = btrfs_iget_locked(root->fs_info->sb,
7111 key.objectid, root);
7112 if (inode->i_state & I_NEW) {
7113 BTRFS_I(inode)->root = root;
7114 BTRFS_I(inode)->location.objectid =
7115 key.objectid;
7116 BTRFS_I(inode)->location.type =
7117 BTRFS_INODE_ITEM_KEY;
7118 BTRFS_I(inode)->location.offset = 0;
7119 btrfs_read_locked_inode(inode);
7120 unlock_new_inode(inode);
7121 }
7122 /*
7123 * some code call btrfs_commit_transaction while
7124 * holding the i_mutex, so we can't use mutex_lock
7125 * here.
7126 */
7127 if (is_bad_inode(inode) ||
7128 !mutex_trylock(&inode->i_mutex)) {
7129 iput(inode);
7130 inode = NULL;
7131 key.offset = (u64)-1;
7132 goto skip;
7133 }
7134 }
7135
7136 if (!extent_locked) {
7137 struct btrfs_ordered_extent *ordered;
7138
7139 btrfs_release_path(root, path);
7140
7141 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7142 lock_end, GFP_NOFS);
7143 ordered = btrfs_lookup_first_ordered_extent(inode,
7144 lock_end);
7145 if (ordered &&
7146 ordered->file_offset <= lock_end &&
7147 ordered->file_offset + ordered->len > lock_start) {
7148 unlock_extent(&BTRFS_I(inode)->io_tree,
7149 lock_start, lock_end, GFP_NOFS);
7150 btrfs_start_ordered_extent(inode, ordered, 1);
7151 btrfs_put_ordered_extent(ordered);
7152 key.offset += num_bytes;
7153 goto skip;
7154 }
7155 if (ordered)
7156 btrfs_put_ordered_extent(ordered);
7157
7158 extent_locked = 1;
7159 continue;
7160 }
7161
7162 if (nr_extents == 1) {
7163 /* update extent pointer in place */
7164 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7165 new_extents[0].disk_bytenr);
7166 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7167 new_extents[0].disk_num_bytes);
7168 btrfs_mark_buffer_dirty(leaf);
7169
7170 btrfs_drop_extent_cache(inode, key.offset,
7171 key.offset + num_bytes - 1, 0);
7172
7173 ret = btrfs_inc_extent_ref(trans, root,
7174 new_extents[0].disk_bytenr,
7175 new_extents[0].disk_num_bytes,
7176 leaf->start,
7177 root->root_key.objectid,
7178 trans->transid,
7179 key.objectid);
7180 BUG_ON(ret);
7181
7182 ret = btrfs_free_extent(trans, root,
7183 extent_key->objectid,
7184 extent_key->offset,
7185 leaf->start,
7186 btrfs_header_owner(leaf),
7187 btrfs_header_generation(leaf),
7188 key.objectid, 0);
7189 BUG_ON(ret);
7190
7191 btrfs_release_path(root, path);
7192 key.offset += num_bytes;
7193 } else {
7194 BUG_ON(1);
7195 #if 0
7196 u64 alloc_hint;
7197 u64 extent_len;
7198 int i;
7199 /*
7200 * drop old extent pointer at first, then insert the
7201 * new pointers one bye one
7202 */
7203 btrfs_release_path(root, path);
7204 ret = btrfs_drop_extents(trans, root, inode, key.offset,
7205 key.offset + num_bytes,
7206 key.offset, &alloc_hint);
7207 BUG_ON(ret);
7208
7209 for (i = 0; i < nr_extents; i++) {
7210 if (ext_offset >= new_extents[i].num_bytes) {
7211 ext_offset -= new_extents[i].num_bytes;
7212 continue;
7213 }
7214 extent_len = min(new_extents[i].num_bytes -
7215 ext_offset, num_bytes);
7216
7217 ret = btrfs_insert_empty_item(trans, root,
7218 path, &key,
7219 sizeof(*fi));
7220 BUG_ON(ret);
7221
7222 leaf = path->nodes[0];
7223 fi = btrfs_item_ptr(leaf, path->slots[0],
7224 struct btrfs_file_extent_item);
7225 btrfs_set_file_extent_generation(leaf, fi,
7226 trans->transid);
7227 btrfs_set_file_extent_type(leaf, fi,
7228 BTRFS_FILE_EXTENT_REG);
7229 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7230 new_extents[i].disk_bytenr);
7231 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7232 new_extents[i].disk_num_bytes);
7233 btrfs_set_file_extent_ram_bytes(leaf, fi,
7234 new_extents[i].ram_bytes);
7235
7236 btrfs_set_file_extent_compression(leaf, fi,
7237 new_extents[i].compression);
7238 btrfs_set_file_extent_encryption(leaf, fi,
7239 new_extents[i].encryption);
7240 btrfs_set_file_extent_other_encoding(leaf, fi,
7241 new_extents[i].other_encoding);
7242
7243 btrfs_set_file_extent_num_bytes(leaf, fi,
7244 extent_len);
7245 ext_offset += new_extents[i].offset;
7246 btrfs_set_file_extent_offset(leaf, fi,
7247 ext_offset);
7248 btrfs_mark_buffer_dirty(leaf);
7249
7250 btrfs_drop_extent_cache(inode, key.offset,
7251 key.offset + extent_len - 1, 0);
7252
7253 ret = btrfs_inc_extent_ref(trans, root,
7254 new_extents[i].disk_bytenr,
7255 new_extents[i].disk_num_bytes,
7256 leaf->start,
7257 root->root_key.objectid,
7258 trans->transid, key.objectid);
7259 BUG_ON(ret);
7260 btrfs_release_path(root, path);
7261
7262 inode_add_bytes(inode, extent_len);
7263
7264 ext_offset = 0;
7265 num_bytes -= extent_len;
7266 key.offset += extent_len;
7267
7268 if (num_bytes == 0)
7269 break;
7270 }
7271 BUG_ON(i >= nr_extents);
7272 #endif
7273 }
7274
7275 if (extent_locked) {
7276 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7277 lock_end, GFP_NOFS);
7278 extent_locked = 0;
7279 }
7280 skip:
7281 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
7282 key.offset >= search_end)
7283 break;
7284
7285 cond_resched();
7286 }
7287 ret = 0;
7288 out:
7289 btrfs_release_path(root, path);
7290 if (inode) {
7291 mutex_unlock(&inode->i_mutex);
7292 if (extent_locked) {
7293 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
7294 lock_end, GFP_NOFS);
7295 }
7296 iput(inode);
7297 }
7298 return ret;
7299 }
7300
7301 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
7302 struct btrfs_root *root,
7303 struct extent_buffer *buf, u64 orig_start)
7304 {
7305 int level;
7306 int ret;
7307
7308 BUG_ON(btrfs_header_generation(buf) != trans->transid);
7309 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
7310
7311 level = btrfs_header_level(buf);
7312 if (level == 0) {
7313 struct btrfs_leaf_ref *ref;
7314 struct btrfs_leaf_ref *orig_ref;
7315
7316 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
7317 if (!orig_ref)
7318 return -ENOENT;
7319
7320 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
7321 if (!ref) {
7322 btrfs_free_leaf_ref(root, orig_ref);
7323 return -ENOMEM;
7324 }
7325
7326 ref->nritems = orig_ref->nritems;
7327 memcpy(ref->extents, orig_ref->extents,
7328 sizeof(ref->extents[0]) * ref->nritems);
7329
7330 btrfs_free_leaf_ref(root, orig_ref);
7331
7332 ref->root_gen = trans->transid;
7333 ref->bytenr = buf->start;
7334 ref->owner = btrfs_header_owner(buf);
7335 ref->generation = btrfs_header_generation(buf);
7336
7337 ret = btrfs_add_leaf_ref(root, ref, 0);
7338 WARN_ON(ret);
7339 btrfs_free_leaf_ref(root, ref);
7340 }
7341 return 0;
7342 }
7343
7344 static noinline int invalidate_extent_cache(struct btrfs_root *root,
7345 struct extent_buffer *leaf,
7346 struct btrfs_block_group_cache *group,
7347 struct btrfs_root *target_root)
7348 {
7349 struct btrfs_key key;
7350 struct inode *inode = NULL;
7351 struct btrfs_file_extent_item *fi;
7352 struct extent_state *cached_state = NULL;
7353 u64 num_bytes;
7354 u64 skip_objectid = 0;
7355 u32 nritems;
7356 u32 i;
7357
7358 nritems = btrfs_header_nritems(leaf);
7359 for (i = 0; i < nritems; i++) {
7360 btrfs_item_key_to_cpu(leaf, &key, i);
7361 if (key.objectid == skip_objectid ||
7362 key.type != BTRFS_EXTENT_DATA_KEY)
7363 continue;
7364 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7365 if (btrfs_file_extent_type(leaf, fi) ==
7366 BTRFS_FILE_EXTENT_INLINE)
7367 continue;
7368 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
7369 continue;
7370 if (!inode || inode->i_ino != key.objectid) {
7371 iput(inode);
7372 inode = btrfs_ilookup(target_root->fs_info->sb,
7373 key.objectid, target_root, 1);
7374 }
7375 if (!inode) {
7376 skip_objectid = key.objectid;
7377 continue;
7378 }
7379 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
7380
7381 lock_extent_bits(&BTRFS_I(inode)->io_tree, key.offset,
7382 key.offset + num_bytes - 1, 0, &cached_state,
7383 GFP_NOFS);
7384 btrfs_drop_extent_cache(inode, key.offset,
7385 key.offset + num_bytes - 1, 1);
7386 unlock_extent_cached(&BTRFS_I(inode)->io_tree, key.offset,
7387 key.offset + num_bytes - 1, &cached_state,
7388 GFP_NOFS);
7389 cond_resched();
7390 }
7391 iput(inode);
7392 return 0;
7393 }
7394
7395 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
7396 struct btrfs_root *root,
7397 struct extent_buffer *leaf,
7398 struct btrfs_block_group_cache *group,
7399 struct inode *reloc_inode)
7400 {
7401 struct btrfs_key key;
7402 struct btrfs_key extent_key;
7403 struct btrfs_file_extent_item *fi;
7404 struct btrfs_leaf_ref *ref;
7405 struct disk_extent *new_extent;
7406 u64 bytenr;
7407 u64 num_bytes;
7408 u32 nritems;
7409 u32 i;
7410 int ext_index;
7411 int nr_extent;
7412 int ret;
7413
7414 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
7415 BUG_ON(!new_extent);
7416
7417 ref = btrfs_lookup_leaf_ref(root, leaf->start);
7418 BUG_ON(!ref);
7419
7420 ext_index = -1;
7421 nritems = btrfs_header_nritems(leaf);
7422 for (i = 0; i < nritems; i++) {
7423 btrfs_item_key_to_cpu(leaf, &key, i);
7424 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
7425 continue;
7426 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
7427 if (btrfs_file_extent_type(leaf, fi) ==
7428 BTRFS_FILE_EXTENT_INLINE)
7429 continue;
7430 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
7431 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
7432 if (bytenr == 0)
7433 continue;
7434
7435 ext_index++;
7436 if (bytenr >= group->key.objectid + group->key.offset ||
7437 bytenr + num_bytes <= group->key.objectid)
7438 continue;
7439
7440 extent_key.objectid = bytenr;
7441 extent_key.offset = num_bytes;
7442 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
7443 nr_extent = 1;
7444 ret = get_new_locations(reloc_inode, &extent_key,
7445 group->key.objectid, 1,
7446 &new_extent, &nr_extent);
7447 if (ret > 0)
7448 continue;
7449 BUG_ON(ret < 0);
7450
7451 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
7452 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
7453 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
7454 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
7455
7456 btrfs_set_file_extent_disk_bytenr(leaf, fi,
7457 new_extent->disk_bytenr);
7458 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
7459 new_extent->disk_num_bytes);
7460 btrfs_mark_buffer_dirty(leaf);
7461
7462 ret = btrfs_inc_extent_ref(trans, root,
7463 new_extent->disk_bytenr,
7464 new_extent->disk_num_bytes,
7465 leaf->start,
7466 root->root_key.objectid,
7467 trans->transid, key.objectid);
7468 BUG_ON(ret);
7469
7470 ret = btrfs_free_extent(trans, root,
7471 bytenr, num_bytes, leaf->start,
7472 btrfs_header_owner(leaf),
7473 btrfs_header_generation(leaf),
7474 key.objectid, 0);
7475 BUG_ON(ret);
7476 cond_resched();
7477 }
7478 kfree(new_extent);
7479 BUG_ON(ext_index + 1 != ref->nritems);
7480 btrfs_free_leaf_ref(root, ref);
7481 return 0;
7482 }
7483
7484 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
7485 struct btrfs_root *root)
7486 {
7487 struct btrfs_root *reloc_root;
7488 int ret;
7489
7490 if (root->reloc_root) {
7491 reloc_root = root->reloc_root;
7492 root->reloc_root = NULL;
7493 list_add(&reloc_root->dead_list,
7494 &root->fs_info->dead_reloc_roots);
7495
7496 btrfs_set_root_bytenr(&reloc_root->root_item,
7497 reloc_root->node->start);
7498 btrfs_set_root_level(&root->root_item,
7499 btrfs_header_level(reloc_root->node));
7500 memset(&reloc_root->root_item.drop_progress, 0,
7501 sizeof(struct btrfs_disk_key));
7502 reloc_root->root_item.drop_level = 0;
7503
7504 ret = btrfs_update_root(trans, root->fs_info->tree_root,
7505 &reloc_root->root_key,
7506 &reloc_root->root_item);
7507 BUG_ON(ret);
7508 }
7509 return 0;
7510 }
7511
7512 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
7513 {
7514 struct btrfs_trans_handle *trans;
7515 struct btrfs_root *reloc_root;
7516 struct btrfs_root *prev_root = NULL;
7517 struct list_head dead_roots;
7518 int ret;
7519 unsigned long nr;
7520
7521 INIT_LIST_HEAD(&dead_roots);
7522 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
7523
7524 while (!list_empty(&dead_roots)) {
7525 reloc_root = list_entry(dead_roots.prev,
7526 struct btrfs_root, dead_list);
7527 list_del_init(&reloc_root->dead_list);
7528
7529 BUG_ON(reloc_root->commit_root != NULL);
7530 while (1) {
7531 trans = btrfs_join_transaction(root, 1);
7532 BUG_ON(IS_ERR(trans));
7533
7534 mutex_lock(&root->fs_info->drop_mutex);
7535 ret = btrfs_drop_snapshot(trans, reloc_root);
7536 if (ret != -EAGAIN)
7537 break;
7538 mutex_unlock(&root->fs_info->drop_mutex);
7539
7540 nr = trans->blocks_used;
7541 ret = btrfs_end_transaction(trans, root);
7542 BUG_ON(ret);
7543 btrfs_btree_balance_dirty(root, nr);
7544 }
7545
7546 free_extent_buffer(reloc_root->node);
7547
7548 ret = btrfs_del_root(trans, root->fs_info->tree_root,
7549 &reloc_root->root_key);
7550 BUG_ON(ret);
7551 mutex_unlock(&root->fs_info->drop_mutex);
7552
7553 nr = trans->blocks_used;
7554 ret = btrfs_end_transaction(trans, root);
7555 BUG_ON(ret);
7556 btrfs_btree_balance_dirty(root, nr);
7557
7558 kfree(prev_root);
7559 prev_root = reloc_root;
7560 }
7561 if (prev_root) {
7562 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
7563 kfree(prev_root);
7564 }
7565 return 0;
7566 }
7567
7568 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
7569 {
7570 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
7571 return 0;
7572 }
7573
7574 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
7575 {
7576 struct btrfs_root *reloc_root;
7577 struct btrfs_trans_handle *trans;
7578 struct btrfs_key location;
7579 int found;
7580 int ret;
7581
7582 mutex_lock(&root->fs_info->tree_reloc_mutex);
7583 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
7584 BUG_ON(ret);
7585 found = !list_empty(&root->fs_info->dead_reloc_roots);
7586 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7587
7588 if (found) {
7589 trans = btrfs_start_transaction(root, 1);
7590 BUG_ON(IS_ERR(trans));
7591 ret = btrfs_commit_transaction(trans, root);
7592 BUG_ON(ret);
7593 }
7594
7595 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
7596 location.offset = (u64)-1;
7597 location.type = BTRFS_ROOT_ITEM_KEY;
7598
7599 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
7600 BUG_ON(!reloc_root);
7601 btrfs_orphan_cleanup(reloc_root);
7602 return 0;
7603 }
7604
7605 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
7606 struct btrfs_root *root)
7607 {
7608 struct btrfs_root *reloc_root;
7609 struct extent_buffer *eb;
7610 struct btrfs_root_item *root_item;
7611 struct btrfs_key root_key;
7612 int ret;
7613
7614 BUG_ON(!root->ref_cows);
7615 if (root->reloc_root)
7616 return 0;
7617
7618 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
7619 BUG_ON(!root_item);
7620
7621 ret = btrfs_copy_root(trans, root, root->commit_root,
7622 &eb, BTRFS_TREE_RELOC_OBJECTID);
7623 BUG_ON(ret);
7624
7625 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
7626 root_key.offset = root->root_key.objectid;
7627 root_key.type = BTRFS_ROOT_ITEM_KEY;
7628
7629 memcpy(root_item, &root->root_item, sizeof(root_item));
7630 btrfs_set_root_refs(root_item, 0);
7631 btrfs_set_root_bytenr(root_item, eb->start);
7632 btrfs_set_root_level(root_item, btrfs_header_level(eb));
7633 btrfs_set_root_generation(root_item, trans->transid);
7634
7635 btrfs_tree_unlock(eb);
7636 free_extent_buffer(eb);
7637
7638 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
7639 &root_key, root_item);
7640 BUG_ON(ret);
7641 kfree(root_item);
7642
7643 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
7644 &root_key);
7645 BUG_ON(!reloc_root);
7646 reloc_root->last_trans = trans->transid;
7647 reloc_root->commit_root = NULL;
7648 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
7649
7650 root->reloc_root = reloc_root;
7651 return 0;
7652 }
7653
7654 /*
7655 * Core function of space balance.
7656 *
7657 * The idea is using reloc trees to relocate tree blocks in reference
7658 * counted roots. There is one reloc tree for each subvol, and all
7659 * reloc trees share same root key objectid. Reloc trees are snapshots
7660 * of the latest committed roots of subvols (root->commit_root).
7661 *
7662 * To relocate a tree block referenced by a subvol, there are two steps.
7663 * COW the block through subvol's reloc tree, then update block pointer
7664 * in the subvol to point to the new block. Since all reloc trees share
7665 * same root key objectid, doing special handing for tree blocks owned
7666 * by them is easy. Once a tree block has been COWed in one reloc tree,
7667 * we can use the resulting new block directly when the same block is
7668 * required to COW again through other reloc trees. By this way, relocated
7669 * tree blocks are shared between reloc trees, so they are also shared
7670 * between subvols.
7671 */
7672 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
7673 struct btrfs_root *root,
7674 struct btrfs_path *path,
7675 struct btrfs_key *first_key,
7676 struct btrfs_ref_path *ref_path,
7677 struct btrfs_block_group_cache *group,
7678 struct inode *reloc_inode)
7679 {
7680 struct btrfs_root *reloc_root;
7681 struct extent_buffer *eb = NULL;
7682 struct btrfs_key *keys;
7683 u64 *nodes;
7684 int level;
7685 int shared_level;
7686 int lowest_level = 0;
7687 int ret;
7688
7689 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
7690 lowest_level = ref_path->owner_objectid;
7691
7692 if (!root->ref_cows) {
7693 path->lowest_level = lowest_level;
7694 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
7695 BUG_ON(ret < 0);
7696 path->lowest_level = 0;
7697 btrfs_release_path(root, path);
7698 return 0;
7699 }
7700
7701 mutex_lock(&root->fs_info->tree_reloc_mutex);
7702 ret = init_reloc_tree(trans, root);
7703 BUG_ON(ret);
7704 reloc_root = root->reloc_root;
7705
7706 shared_level = ref_path->shared_level;
7707 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
7708
7709 keys = ref_path->node_keys;
7710 nodes = ref_path->new_nodes;
7711 memset(&keys[shared_level + 1], 0,
7712 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
7713 memset(&nodes[shared_level + 1], 0,
7714 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
7715
7716 if (nodes[lowest_level] == 0) {
7717 path->lowest_level = lowest_level;
7718 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7719 0, 1);
7720 BUG_ON(ret);
7721 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
7722 eb = path->nodes[level];
7723 if (!eb || eb == reloc_root->node)
7724 break;
7725 nodes[level] = eb->start;
7726 if (level == 0)
7727 btrfs_item_key_to_cpu(eb, &keys[level], 0);
7728 else
7729 btrfs_node_key_to_cpu(eb, &keys[level], 0);
7730 }
7731 if (nodes[0] &&
7732 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7733 eb = path->nodes[0];
7734 ret = replace_extents_in_leaf(trans, reloc_root, eb,
7735 group, reloc_inode);
7736 BUG_ON(ret);
7737 }
7738 btrfs_release_path(reloc_root, path);
7739 } else {
7740 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
7741 lowest_level);
7742 BUG_ON(ret);
7743 }
7744
7745 /*
7746 * replace tree blocks in the fs tree with tree blocks in
7747 * the reloc tree.
7748 */
7749 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
7750 BUG_ON(ret < 0);
7751
7752 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7753 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
7754 0, 0);
7755 BUG_ON(ret);
7756 extent_buffer_get(path->nodes[0]);
7757 eb = path->nodes[0];
7758 btrfs_release_path(reloc_root, path);
7759 ret = invalidate_extent_cache(reloc_root, eb, group, root);
7760 BUG_ON(ret);
7761 free_extent_buffer(eb);
7762 }
7763
7764 mutex_unlock(&root->fs_info->tree_reloc_mutex);
7765 path->lowest_level = 0;
7766 return 0;
7767 }
7768
7769 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
7770 struct btrfs_root *root,
7771 struct btrfs_path *path,
7772 struct btrfs_key *first_key,
7773 struct btrfs_ref_path *ref_path)
7774 {
7775 int ret;
7776
7777 ret = relocate_one_path(trans, root, path, first_key,
7778 ref_path, NULL, NULL);
7779 BUG_ON(ret);
7780
7781 return 0;
7782 }
7783
7784 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
7785 struct btrfs_root *extent_root,
7786 struct btrfs_path *path,
7787 struct btrfs_key *extent_key)
7788 {
7789 int ret;
7790
7791 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
7792 if (ret)
7793 goto out;
7794 ret = btrfs_del_item(trans, extent_root, path);
7795 out:
7796 btrfs_release_path(extent_root, path);
7797 return ret;
7798 }
7799
7800 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
7801 struct btrfs_ref_path *ref_path)
7802 {
7803 struct btrfs_key root_key;
7804
7805 root_key.objectid = ref_path->root_objectid;
7806 root_key.type = BTRFS_ROOT_ITEM_KEY;
7807 if (is_cowonly_root(ref_path->root_objectid))
7808 root_key.offset = 0;
7809 else
7810 root_key.offset = (u64)-1;
7811
7812 return btrfs_read_fs_root_no_name(fs_info, &root_key);
7813 }
7814
7815 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
7816 struct btrfs_path *path,
7817 struct btrfs_key *extent_key,
7818 struct btrfs_block_group_cache *group,
7819 struct inode *reloc_inode, int pass)
7820 {
7821 struct btrfs_trans_handle *trans;
7822 struct btrfs_root *found_root;
7823 struct btrfs_ref_path *ref_path = NULL;
7824 struct disk_extent *new_extents = NULL;
7825 int nr_extents = 0;
7826 int loops;
7827 int ret;
7828 int level;
7829 struct btrfs_key first_key;
7830 u64 prev_block = 0;
7831
7832
7833 trans = btrfs_start_transaction(extent_root, 1);
7834 BUG_ON(IS_ERR(trans));
7835
7836 if (extent_key->objectid == 0) {
7837 ret = del_extent_zero(trans, extent_root, path, extent_key);
7838 goto out;
7839 }
7840
7841 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
7842 if (!ref_path) {
7843 ret = -ENOMEM;
7844 goto out;
7845 }
7846
7847 for (loops = 0; ; loops++) {
7848 if (loops == 0) {
7849 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
7850 extent_key->objectid);
7851 } else {
7852 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
7853 }
7854 if (ret < 0)
7855 goto out;
7856 if (ret > 0)
7857 break;
7858
7859 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
7860 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
7861 continue;
7862
7863 found_root = read_ref_root(extent_root->fs_info, ref_path);
7864 BUG_ON(!found_root);
7865 /*
7866 * for reference counted tree, only process reference paths
7867 * rooted at the latest committed root.
7868 */
7869 if (found_root->ref_cows &&
7870 ref_path->root_generation != found_root->root_key.offset)
7871 continue;
7872
7873 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7874 if (pass == 0) {
7875 /*
7876 * copy data extents to new locations
7877 */
7878 u64 group_start = group->key.objectid;
7879 ret = relocate_data_extent(reloc_inode,
7880 extent_key,
7881 group_start);
7882 if (ret < 0)
7883 goto out;
7884 break;
7885 }
7886 level = 0;
7887 } else {
7888 level = ref_path->owner_objectid;
7889 }
7890
7891 if (prev_block != ref_path->nodes[level]) {
7892 struct extent_buffer *eb;
7893 u64 block_start = ref_path->nodes[level];
7894 u64 block_size = btrfs_level_size(found_root, level);
7895
7896 eb = read_tree_block(found_root, block_start,
7897 block_size, 0);
7898 btrfs_tree_lock(eb);
7899 BUG_ON(level != btrfs_header_level(eb));
7900
7901 if (level == 0)
7902 btrfs_item_key_to_cpu(eb, &first_key, 0);
7903 else
7904 btrfs_node_key_to_cpu(eb, &first_key, 0);
7905
7906 btrfs_tree_unlock(eb);
7907 free_extent_buffer(eb);
7908 prev_block = block_start;
7909 }
7910
7911 mutex_lock(&extent_root->fs_info->trans_mutex);
7912 btrfs_record_root_in_trans(found_root);
7913 mutex_unlock(&extent_root->fs_info->trans_mutex);
7914 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
7915 /*
7916 * try to update data extent references while
7917 * keeping metadata shared between snapshots.
7918 */
7919 if (pass == 1) {
7920 ret = relocate_one_path(trans, found_root,
7921 path, &first_key, ref_path,
7922 group, reloc_inode);
7923 if (ret < 0)
7924 goto out;
7925 continue;
7926 }
7927 /*
7928 * use fallback method to process the remaining
7929 * references.
7930 */
7931 if (!new_extents) {
7932 u64 group_start = group->key.objectid;
7933 new_extents = kmalloc(sizeof(*new_extents),
7934 GFP_NOFS);
7935 nr_extents = 1;
7936 ret = get_new_locations(reloc_inode,
7937 extent_key,
7938 group_start, 1,
7939 &new_extents,
7940 &nr_extents);
7941 if (ret)
7942 goto out;
7943 }
7944 ret = replace_one_extent(trans, found_root,
7945 path, extent_key,
7946 &first_key, ref_path,
7947 new_extents, nr_extents);
7948 } else {
7949 ret = relocate_tree_block(trans, found_root, path,
7950 &first_key, ref_path);
7951 }
7952 if (ret < 0)
7953 goto out;
7954 }
7955 ret = 0;
7956 out:
7957 btrfs_end_transaction(trans, extent_root);
7958 kfree(new_extents);
7959 kfree(ref_path);
7960 return ret;
7961 }
7962 #endif
7963
7964 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
7965 {
7966 u64 num_devices;
7967 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
7968 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
7969
7970 /*
7971 * we add in the count of missing devices because we want
7972 * to make sure that any RAID levels on a degraded FS
7973 * continue to be honored.
7974 */
7975 num_devices = root->fs_info->fs_devices->rw_devices +
7976 root->fs_info->fs_devices->missing_devices;
7977
7978 if (num_devices == 1) {
7979 stripped |= BTRFS_BLOCK_GROUP_DUP;
7980 stripped = flags & ~stripped;
7981
7982 /* turn raid0 into single device chunks */
7983 if (flags & BTRFS_BLOCK_GROUP_RAID0)
7984 return stripped;
7985
7986 /* turn mirroring into duplication */
7987 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
7988 BTRFS_BLOCK_GROUP_RAID10))
7989 return stripped | BTRFS_BLOCK_GROUP_DUP;
7990 return flags;
7991 } else {
7992 /* they already had raid on here, just return */
7993 if (flags & stripped)
7994 return flags;
7995
7996 stripped |= BTRFS_BLOCK_GROUP_DUP;
7997 stripped = flags & ~stripped;
7998
7999 /* switch duplicated blocks with raid1 */
8000 if (flags & BTRFS_BLOCK_GROUP_DUP)
8001 return stripped | BTRFS_BLOCK_GROUP_RAID1;
8002
8003 /* turn single device chunks into raid0 */
8004 return stripped | BTRFS_BLOCK_GROUP_RAID0;
8005 }
8006 return flags;
8007 }
8008
8009 static int set_block_group_ro(struct btrfs_block_group_cache *cache)
8010 {
8011 struct btrfs_space_info *sinfo = cache->space_info;
8012 u64 num_bytes;
8013 int ret = -ENOSPC;
8014
8015 if (cache->ro)
8016 return 0;
8017
8018 spin_lock(&sinfo->lock);
8019 spin_lock(&cache->lock);
8020 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8021 cache->bytes_super - btrfs_block_group_used(&cache->item);
8022
8023 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
8024 sinfo->bytes_may_use + sinfo->bytes_readonly +
8025 cache->reserved_pinned + num_bytes <= sinfo->total_bytes) {
8026 sinfo->bytes_readonly += num_bytes;
8027 sinfo->bytes_reserved += cache->reserved_pinned;
8028 cache->reserved_pinned = 0;
8029 cache->ro = 1;
8030 ret = 0;
8031 }
8032
8033 spin_unlock(&cache->lock);
8034 spin_unlock(&sinfo->lock);
8035 return ret;
8036 }
8037
8038 int btrfs_set_block_group_ro(struct btrfs_root *root,
8039 struct btrfs_block_group_cache *cache)
8040
8041 {
8042 struct btrfs_trans_handle *trans;
8043 u64 alloc_flags;
8044 int ret;
8045
8046 BUG_ON(cache->ro);
8047
8048 trans = btrfs_join_transaction(root, 1);
8049 BUG_ON(IS_ERR(trans));
8050
8051 alloc_flags = update_block_group_flags(root, cache->flags);
8052 if (alloc_flags != cache->flags)
8053 do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8054
8055 ret = set_block_group_ro(cache);
8056 if (!ret)
8057 goto out;
8058 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
8059 ret = do_chunk_alloc(trans, root, 2 * 1024 * 1024, alloc_flags, 1);
8060 if (ret < 0)
8061 goto out;
8062 ret = set_block_group_ro(cache);
8063 out:
8064 btrfs_end_transaction(trans, root);
8065 return ret;
8066 }
8067
8068 /*
8069 * helper to account the unused space of all the readonly block group in the
8070 * list. takes mirrors into account.
8071 */
8072 static u64 __btrfs_get_ro_block_group_free_space(struct list_head *groups_list)
8073 {
8074 struct btrfs_block_group_cache *block_group;
8075 u64 free_bytes = 0;
8076 int factor;
8077
8078 list_for_each_entry(block_group, groups_list, list) {
8079 spin_lock(&block_group->lock);
8080
8081 if (!block_group->ro) {
8082 spin_unlock(&block_group->lock);
8083 continue;
8084 }
8085
8086 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
8087 BTRFS_BLOCK_GROUP_RAID10 |
8088 BTRFS_BLOCK_GROUP_DUP))
8089 factor = 2;
8090 else
8091 factor = 1;
8092
8093 free_bytes += (block_group->key.offset -
8094 btrfs_block_group_used(&block_group->item)) *
8095 factor;
8096
8097 spin_unlock(&block_group->lock);
8098 }
8099
8100 return free_bytes;
8101 }
8102
8103 /*
8104 * helper to account the unused space of all the readonly block group in the
8105 * space_info. takes mirrors into account.
8106 */
8107 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
8108 {
8109 int i;
8110 u64 free_bytes = 0;
8111
8112 spin_lock(&sinfo->lock);
8113
8114 for(i = 0; i < BTRFS_NR_RAID_TYPES; i++)
8115 if (!list_empty(&sinfo->block_groups[i]))
8116 free_bytes += __btrfs_get_ro_block_group_free_space(
8117 &sinfo->block_groups[i]);
8118
8119 spin_unlock(&sinfo->lock);
8120
8121 return free_bytes;
8122 }
8123
8124 int btrfs_set_block_group_rw(struct btrfs_root *root,
8125 struct btrfs_block_group_cache *cache)
8126 {
8127 struct btrfs_space_info *sinfo = cache->space_info;
8128 u64 num_bytes;
8129
8130 BUG_ON(!cache->ro);
8131
8132 spin_lock(&sinfo->lock);
8133 spin_lock(&cache->lock);
8134 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
8135 cache->bytes_super - btrfs_block_group_used(&cache->item);
8136 sinfo->bytes_readonly -= num_bytes;
8137 cache->ro = 0;
8138 spin_unlock(&cache->lock);
8139 spin_unlock(&sinfo->lock);
8140 return 0;
8141 }
8142
8143 /*
8144 * checks to see if its even possible to relocate this block group.
8145 *
8146 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
8147 * ok to go ahead and try.
8148 */
8149 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
8150 {
8151 struct btrfs_block_group_cache *block_group;
8152 struct btrfs_space_info *space_info;
8153 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
8154 struct btrfs_device *device;
8155 int full = 0;
8156 int ret = 0;
8157
8158 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
8159
8160 /* odd, couldn't find the block group, leave it alone */
8161 if (!block_group)
8162 return -1;
8163
8164 /* no bytes used, we're good */
8165 if (!btrfs_block_group_used(&block_group->item))
8166 goto out;
8167
8168 space_info = block_group->space_info;
8169 spin_lock(&space_info->lock);
8170
8171 full = space_info->full;
8172
8173 /*
8174 * if this is the last block group we have in this space, we can't
8175 * relocate it unless we're able to allocate a new chunk below.
8176 *
8177 * Otherwise, we need to make sure we have room in the space to handle
8178 * all of the extents from this block group. If we can, we're good
8179 */
8180 if ((space_info->total_bytes != block_group->key.offset) &&
8181 (space_info->bytes_used + space_info->bytes_reserved +
8182 space_info->bytes_pinned + space_info->bytes_readonly +
8183 btrfs_block_group_used(&block_group->item) <
8184 space_info->total_bytes)) {
8185 spin_unlock(&space_info->lock);
8186 goto out;
8187 }
8188 spin_unlock(&space_info->lock);
8189
8190 /*
8191 * ok we don't have enough space, but maybe we have free space on our
8192 * devices to allocate new chunks for relocation, so loop through our
8193 * alloc devices and guess if we have enough space. However, if we
8194 * were marked as full, then we know there aren't enough chunks, and we
8195 * can just return.
8196 */
8197 ret = -1;
8198 if (full)
8199 goto out;
8200
8201 mutex_lock(&root->fs_info->chunk_mutex);
8202 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
8203 u64 min_free = btrfs_block_group_used(&block_group->item);
8204 u64 dev_offset;
8205
8206 /*
8207 * check to make sure we can actually find a chunk with enough
8208 * space to fit our block group in.
8209 */
8210 if (device->total_bytes > device->bytes_used + min_free) {
8211 ret = find_free_dev_extent(NULL, device, min_free,
8212 &dev_offset, NULL);
8213 if (!ret)
8214 break;
8215 ret = -1;
8216 }
8217 }
8218 mutex_unlock(&root->fs_info->chunk_mutex);
8219 out:
8220 btrfs_put_block_group(block_group);
8221 return ret;
8222 }
8223
8224 static int find_first_block_group(struct btrfs_root *root,
8225 struct btrfs_path *path, struct btrfs_key *key)
8226 {
8227 int ret = 0;
8228 struct btrfs_key found_key;
8229 struct extent_buffer *leaf;
8230 int slot;
8231
8232 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
8233 if (ret < 0)
8234 goto out;
8235
8236 while (1) {
8237 slot = path->slots[0];
8238 leaf = path->nodes[0];
8239 if (slot >= btrfs_header_nritems(leaf)) {
8240 ret = btrfs_next_leaf(root, path);
8241 if (ret == 0)
8242 continue;
8243 if (ret < 0)
8244 goto out;
8245 break;
8246 }
8247 btrfs_item_key_to_cpu(leaf, &found_key, slot);
8248
8249 if (found_key.objectid >= key->objectid &&
8250 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
8251 ret = 0;
8252 goto out;
8253 }
8254 path->slots[0]++;
8255 }
8256 out:
8257 return ret;
8258 }
8259
8260 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
8261 {
8262 struct btrfs_block_group_cache *block_group;
8263 u64 last = 0;
8264
8265 while (1) {
8266 struct inode *inode;
8267
8268 block_group = btrfs_lookup_first_block_group(info, last);
8269 while (block_group) {
8270 spin_lock(&block_group->lock);
8271 if (block_group->iref)
8272 break;
8273 spin_unlock(&block_group->lock);
8274 block_group = next_block_group(info->tree_root,
8275 block_group);
8276 }
8277 if (!block_group) {
8278 if (last == 0)
8279 break;
8280 last = 0;
8281 continue;
8282 }
8283
8284 inode = block_group->inode;
8285 block_group->iref = 0;
8286 block_group->inode = NULL;
8287 spin_unlock(&block_group->lock);
8288 iput(inode);
8289 last = block_group->key.objectid + block_group->key.offset;
8290 btrfs_put_block_group(block_group);
8291 }
8292 }
8293
8294 int btrfs_free_block_groups(struct btrfs_fs_info *info)
8295 {
8296 struct btrfs_block_group_cache *block_group;
8297 struct btrfs_space_info *space_info;
8298 struct btrfs_caching_control *caching_ctl;
8299 struct rb_node *n;
8300
8301 down_write(&info->extent_commit_sem);
8302 while (!list_empty(&info->caching_block_groups)) {
8303 caching_ctl = list_entry(info->caching_block_groups.next,
8304 struct btrfs_caching_control, list);
8305 list_del(&caching_ctl->list);
8306 put_caching_control(caching_ctl);
8307 }
8308 up_write(&info->extent_commit_sem);
8309
8310 spin_lock(&info->block_group_cache_lock);
8311 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
8312 block_group = rb_entry(n, struct btrfs_block_group_cache,
8313 cache_node);
8314 rb_erase(&block_group->cache_node,
8315 &info->block_group_cache_tree);
8316 spin_unlock(&info->block_group_cache_lock);
8317
8318 down_write(&block_group->space_info->groups_sem);
8319 list_del(&block_group->list);
8320 up_write(&block_group->space_info->groups_sem);
8321
8322 if (block_group->cached == BTRFS_CACHE_STARTED)
8323 wait_block_group_cache_done(block_group);
8324
8325 /*
8326 * We haven't cached this block group, which means we could
8327 * possibly have excluded extents on this block group.
8328 */
8329 if (block_group->cached == BTRFS_CACHE_NO)
8330 free_excluded_extents(info->extent_root, block_group);
8331
8332 btrfs_remove_free_space_cache(block_group);
8333 btrfs_put_block_group(block_group);
8334
8335 spin_lock(&info->block_group_cache_lock);
8336 }
8337 spin_unlock(&info->block_group_cache_lock);
8338
8339 /* now that all the block groups are freed, go through and
8340 * free all the space_info structs. This is only called during
8341 * the final stages of unmount, and so we know nobody is
8342 * using them. We call synchronize_rcu() once before we start,
8343 * just to be on the safe side.
8344 */
8345 synchronize_rcu();
8346
8347 release_global_block_rsv(info);
8348
8349 while(!list_empty(&info->space_info)) {
8350 space_info = list_entry(info->space_info.next,
8351 struct btrfs_space_info,
8352 list);
8353 if (space_info->bytes_pinned > 0 ||
8354 space_info->bytes_reserved > 0) {
8355 WARN_ON(1);
8356 dump_space_info(space_info, 0, 0);
8357 }
8358 list_del(&space_info->list);
8359 kfree(space_info);
8360 }
8361 return 0;
8362 }
8363
8364 static void __link_block_group(struct btrfs_space_info *space_info,
8365 struct btrfs_block_group_cache *cache)
8366 {
8367 int index = get_block_group_index(cache);
8368
8369 down_write(&space_info->groups_sem);
8370 list_add_tail(&cache->list, &space_info->block_groups[index]);
8371 up_write(&space_info->groups_sem);
8372 }
8373
8374 int btrfs_read_block_groups(struct btrfs_root *root)
8375 {
8376 struct btrfs_path *path;
8377 int ret;
8378 struct btrfs_block_group_cache *cache;
8379 struct btrfs_fs_info *info = root->fs_info;
8380 struct btrfs_space_info *space_info;
8381 struct btrfs_key key;
8382 struct btrfs_key found_key;
8383 struct extent_buffer *leaf;
8384 int need_clear = 0;
8385 u64 cache_gen;
8386
8387 root = info->extent_root;
8388 key.objectid = 0;
8389 key.offset = 0;
8390 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
8391 path = btrfs_alloc_path();
8392 if (!path)
8393 return -ENOMEM;
8394
8395 cache_gen = btrfs_super_cache_generation(&root->fs_info->super_copy);
8396 if (cache_gen != 0 &&
8397 btrfs_super_generation(&root->fs_info->super_copy) != cache_gen)
8398 need_clear = 1;
8399 if (btrfs_test_opt(root, CLEAR_CACHE))
8400 need_clear = 1;
8401 if (!btrfs_test_opt(root, SPACE_CACHE) && cache_gen)
8402 printk(KERN_INFO "btrfs: disk space caching is enabled\n");
8403
8404 while (1) {
8405 ret = find_first_block_group(root, path, &key);
8406 if (ret > 0)
8407 break;
8408 if (ret != 0)
8409 goto error;
8410 leaf = path->nodes[0];
8411 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
8412 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8413 if (!cache) {
8414 ret = -ENOMEM;
8415 goto error;
8416 }
8417
8418 atomic_set(&cache->count, 1);
8419 spin_lock_init(&cache->lock);
8420 spin_lock_init(&cache->tree_lock);
8421 cache->fs_info = info;
8422 INIT_LIST_HEAD(&cache->list);
8423 INIT_LIST_HEAD(&cache->cluster_list);
8424
8425 if (need_clear)
8426 cache->disk_cache_state = BTRFS_DC_CLEAR;
8427
8428 /*
8429 * we only want to have 32k of ram per block group for keeping
8430 * track of free space, and if we pass 1/2 of that we want to
8431 * start converting things over to using bitmaps
8432 */
8433 cache->extents_thresh = ((1024 * 32) / 2) /
8434 sizeof(struct btrfs_free_space);
8435
8436 read_extent_buffer(leaf, &cache->item,
8437 btrfs_item_ptr_offset(leaf, path->slots[0]),
8438 sizeof(cache->item));
8439 memcpy(&cache->key, &found_key, sizeof(found_key));
8440
8441 key.objectid = found_key.objectid + found_key.offset;
8442 btrfs_release_path(root, path);
8443 cache->flags = btrfs_block_group_flags(&cache->item);
8444 cache->sectorsize = root->sectorsize;
8445
8446 /*
8447 * We need to exclude the super stripes now so that the space
8448 * info has super bytes accounted for, otherwise we'll think
8449 * we have more space than we actually do.
8450 */
8451 exclude_super_stripes(root, cache);
8452
8453 /*
8454 * check for two cases, either we are full, and therefore
8455 * don't need to bother with the caching work since we won't
8456 * find any space, or we are empty, and we can just add all
8457 * the space in and be done with it. This saves us _alot_ of
8458 * time, particularly in the full case.
8459 */
8460 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
8461 cache->last_byte_to_unpin = (u64)-1;
8462 cache->cached = BTRFS_CACHE_FINISHED;
8463 free_excluded_extents(root, cache);
8464 } else if (btrfs_block_group_used(&cache->item) == 0) {
8465 cache->last_byte_to_unpin = (u64)-1;
8466 cache->cached = BTRFS_CACHE_FINISHED;
8467 add_new_free_space(cache, root->fs_info,
8468 found_key.objectid,
8469 found_key.objectid +
8470 found_key.offset);
8471 free_excluded_extents(root, cache);
8472 }
8473
8474 ret = update_space_info(info, cache->flags, found_key.offset,
8475 btrfs_block_group_used(&cache->item),
8476 &space_info);
8477 BUG_ON(ret);
8478 cache->space_info = space_info;
8479 spin_lock(&cache->space_info->lock);
8480 cache->space_info->bytes_readonly += cache->bytes_super;
8481 spin_unlock(&cache->space_info->lock);
8482
8483 __link_block_group(space_info, cache);
8484
8485 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8486 BUG_ON(ret);
8487
8488 set_avail_alloc_bits(root->fs_info, cache->flags);
8489 if (btrfs_chunk_readonly(root, cache->key.objectid))
8490 set_block_group_ro(cache);
8491 }
8492
8493 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
8494 if (!(get_alloc_profile(root, space_info->flags) &
8495 (BTRFS_BLOCK_GROUP_RAID10 |
8496 BTRFS_BLOCK_GROUP_RAID1 |
8497 BTRFS_BLOCK_GROUP_DUP)))
8498 continue;
8499 /*
8500 * avoid allocating from un-mirrored block group if there are
8501 * mirrored block groups.
8502 */
8503 list_for_each_entry(cache, &space_info->block_groups[3], list)
8504 set_block_group_ro(cache);
8505 list_for_each_entry(cache, &space_info->block_groups[4], list)
8506 set_block_group_ro(cache);
8507 }
8508
8509 init_global_block_rsv(info);
8510 ret = 0;
8511 error:
8512 btrfs_free_path(path);
8513 return ret;
8514 }
8515
8516 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
8517 struct btrfs_root *root, u64 bytes_used,
8518 u64 type, u64 chunk_objectid, u64 chunk_offset,
8519 u64 size)
8520 {
8521 int ret;
8522 struct btrfs_root *extent_root;
8523 struct btrfs_block_group_cache *cache;
8524
8525 extent_root = root->fs_info->extent_root;
8526
8527 root->fs_info->last_trans_log_full_commit = trans->transid;
8528
8529 cache = kzalloc(sizeof(*cache), GFP_NOFS);
8530 if (!cache)
8531 return -ENOMEM;
8532
8533 cache->key.objectid = chunk_offset;
8534 cache->key.offset = size;
8535 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
8536 cache->sectorsize = root->sectorsize;
8537 cache->fs_info = root->fs_info;
8538
8539 /*
8540 * we only want to have 32k of ram per block group for keeping track
8541 * of free space, and if we pass 1/2 of that we want to start
8542 * converting things over to using bitmaps
8543 */
8544 cache->extents_thresh = ((1024 * 32) / 2) /
8545 sizeof(struct btrfs_free_space);
8546 atomic_set(&cache->count, 1);
8547 spin_lock_init(&cache->lock);
8548 spin_lock_init(&cache->tree_lock);
8549 INIT_LIST_HEAD(&cache->list);
8550 INIT_LIST_HEAD(&cache->cluster_list);
8551
8552 btrfs_set_block_group_used(&cache->item, bytes_used);
8553 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
8554 cache->flags = type;
8555 btrfs_set_block_group_flags(&cache->item, type);
8556
8557 cache->last_byte_to_unpin = (u64)-1;
8558 cache->cached = BTRFS_CACHE_FINISHED;
8559 exclude_super_stripes(root, cache);
8560
8561 add_new_free_space(cache, root->fs_info, chunk_offset,
8562 chunk_offset + size);
8563
8564 free_excluded_extents(root, cache);
8565
8566 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
8567 &cache->space_info);
8568 BUG_ON(ret);
8569
8570 spin_lock(&cache->space_info->lock);
8571 cache->space_info->bytes_readonly += cache->bytes_super;
8572 spin_unlock(&cache->space_info->lock);
8573
8574 __link_block_group(cache->space_info, cache);
8575
8576 ret = btrfs_add_block_group_cache(root->fs_info, cache);
8577 BUG_ON(ret);
8578
8579 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
8580 sizeof(cache->item));
8581 BUG_ON(ret);
8582
8583 set_avail_alloc_bits(extent_root->fs_info, type);
8584
8585 return 0;
8586 }
8587
8588 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
8589 struct btrfs_root *root, u64 group_start)
8590 {
8591 struct btrfs_path *path;
8592 struct btrfs_block_group_cache *block_group;
8593 struct btrfs_free_cluster *cluster;
8594 struct btrfs_root *tree_root = root->fs_info->tree_root;
8595 struct btrfs_key key;
8596 struct inode *inode;
8597 int ret;
8598 int factor;
8599
8600 root = root->fs_info->extent_root;
8601
8602 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
8603 BUG_ON(!block_group);
8604 BUG_ON(!block_group->ro);
8605
8606 memcpy(&key, &block_group->key, sizeof(key));
8607 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
8608 BTRFS_BLOCK_GROUP_RAID1 |
8609 BTRFS_BLOCK_GROUP_RAID10))
8610 factor = 2;
8611 else
8612 factor = 1;
8613
8614 /* make sure this block group isn't part of an allocation cluster */
8615 cluster = &root->fs_info->data_alloc_cluster;
8616 spin_lock(&cluster->refill_lock);
8617 btrfs_return_cluster_to_free_space(block_group, cluster);
8618 spin_unlock(&cluster->refill_lock);
8619
8620 /*
8621 * make sure this block group isn't part of a metadata
8622 * allocation cluster
8623 */
8624 cluster = &root->fs_info->meta_alloc_cluster;
8625 spin_lock(&cluster->refill_lock);
8626 btrfs_return_cluster_to_free_space(block_group, cluster);
8627 spin_unlock(&cluster->refill_lock);
8628
8629 path = btrfs_alloc_path();
8630 BUG_ON(!path);
8631
8632 inode = lookup_free_space_inode(root, block_group, path);
8633 if (!IS_ERR(inode)) {
8634 btrfs_orphan_add(trans, inode);
8635 clear_nlink(inode);
8636 /* One for the block groups ref */
8637 spin_lock(&block_group->lock);
8638 if (block_group->iref) {
8639 block_group->iref = 0;
8640 block_group->inode = NULL;
8641 spin_unlock(&block_group->lock);
8642 iput(inode);
8643 } else {
8644 spin_unlock(&block_group->lock);
8645 }
8646 /* One for our lookup ref */
8647 iput(inode);
8648 }
8649
8650 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
8651 key.offset = block_group->key.objectid;
8652 key.type = 0;
8653
8654 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
8655 if (ret < 0)
8656 goto out;
8657 if (ret > 0)
8658 btrfs_release_path(tree_root, path);
8659 if (ret == 0) {
8660 ret = btrfs_del_item(trans, tree_root, path);
8661 if (ret)
8662 goto out;
8663 btrfs_release_path(tree_root, path);
8664 }
8665
8666 spin_lock(&root->fs_info->block_group_cache_lock);
8667 rb_erase(&block_group->cache_node,
8668 &root->fs_info->block_group_cache_tree);
8669 spin_unlock(&root->fs_info->block_group_cache_lock);
8670
8671 down_write(&block_group->space_info->groups_sem);
8672 /*
8673 * we must use list_del_init so people can check to see if they
8674 * are still on the list after taking the semaphore
8675 */
8676 list_del_init(&block_group->list);
8677 up_write(&block_group->space_info->groups_sem);
8678
8679 if (block_group->cached == BTRFS_CACHE_STARTED)
8680 wait_block_group_cache_done(block_group);
8681
8682 btrfs_remove_free_space_cache(block_group);
8683
8684 spin_lock(&block_group->space_info->lock);
8685 block_group->space_info->total_bytes -= block_group->key.offset;
8686 block_group->space_info->bytes_readonly -= block_group->key.offset;
8687 block_group->space_info->disk_total -= block_group->key.offset * factor;
8688 spin_unlock(&block_group->space_info->lock);
8689
8690 memcpy(&key, &block_group->key, sizeof(key));
8691
8692 btrfs_clear_space_info_full(root->fs_info);
8693
8694 btrfs_put_block_group(block_group);
8695 btrfs_put_block_group(block_group);
8696
8697 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
8698 if (ret > 0)
8699 ret = -EIO;
8700 if (ret < 0)
8701 goto out;
8702
8703 ret = btrfs_del_item(trans, root, path);
8704 out:
8705 btrfs_free_path(path);
8706 return ret;
8707 }
8708
8709 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
8710 {
8711 return unpin_extent_range(root, start, end);
8712 }
8713
8714 int btrfs_error_discard_extent(struct btrfs_root *root, u64 bytenr,
8715 u64 num_bytes)
8716 {
8717 return btrfs_discard_extent(root, bytenr, num_bytes);
8718 }
This page took 0.2568 seconds and 5 git commands to generate.