Merge commit 'fixes.2015.02.23a' into core/rcu
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include <linux/rcupdate.h>
24 #include <linux/kthread.h>
25 #include <linux/slab.h>
26 #include <linux/ratelimit.h>
27 #include <linux/percpu_counter.h>
28 #include "hash.h"
29 #include "tree-log.h"
30 #include "disk-io.h"
31 #include "print-tree.h"
32 #include "volumes.h"
33 #include "raid56.h"
34 #include "locking.h"
35 #include "free-space-cache.h"
36 #include "free-space-tree.h"
37 #include "math.h"
38 #include "sysfs.h"
39 #include "qgroup.h"
40
41 #undef SCRAMBLE_DELAYED_REFS
42
43 /*
44 * control flags for do_chunk_alloc's force field
45 * CHUNK_ALLOC_NO_FORCE means to only allocate a chunk
46 * if we really need one.
47 *
48 * CHUNK_ALLOC_LIMITED means to only try and allocate one
49 * if we have very few chunks already allocated. This is
50 * used as part of the clustering code to help make sure
51 * we have a good pool of storage to cluster in, without
52 * filling the FS with empty chunks
53 *
54 * CHUNK_ALLOC_FORCE means it must try to allocate one
55 *
56 */
57 enum {
58 CHUNK_ALLOC_NO_FORCE = 0,
59 CHUNK_ALLOC_LIMITED = 1,
60 CHUNK_ALLOC_FORCE = 2,
61 };
62
63 /*
64 * Control how reservations are dealt with.
65 *
66 * RESERVE_FREE - freeing a reservation.
67 * RESERVE_ALLOC - allocating space and we need to update bytes_may_use for
68 * ENOSPC accounting
69 * RESERVE_ALLOC_NO_ACCOUNT - allocating space and we should not update
70 * bytes_may_use as the ENOSPC accounting is done elsewhere
71 */
72 enum {
73 RESERVE_FREE = 0,
74 RESERVE_ALLOC = 1,
75 RESERVE_ALLOC_NO_ACCOUNT = 2,
76 };
77
78 static int update_block_group(struct btrfs_trans_handle *trans,
79 struct btrfs_root *root, u64 bytenr,
80 u64 num_bytes, int alloc);
81 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
82 struct btrfs_root *root,
83 struct btrfs_delayed_ref_node *node, u64 parent,
84 u64 root_objectid, u64 owner_objectid,
85 u64 owner_offset, int refs_to_drop,
86 struct btrfs_delayed_extent_op *extra_op);
87 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
88 struct extent_buffer *leaf,
89 struct btrfs_extent_item *ei);
90 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
91 struct btrfs_root *root,
92 u64 parent, u64 root_objectid,
93 u64 flags, u64 owner, u64 offset,
94 struct btrfs_key *ins, int ref_mod);
95 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
96 struct btrfs_root *root,
97 u64 parent, u64 root_objectid,
98 u64 flags, struct btrfs_disk_key *key,
99 int level, struct btrfs_key *ins);
100 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
101 struct btrfs_root *extent_root, u64 flags,
102 int force);
103 static int find_next_key(struct btrfs_path *path, int level,
104 struct btrfs_key *key);
105 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
106 int dump_block_groups);
107 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
108 u64 num_bytes, int reserve,
109 int delalloc);
110 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
111 u64 num_bytes);
112 int btrfs_pin_extent(struct btrfs_root *root,
113 u64 bytenr, u64 num_bytes, int reserved);
114
115 static noinline int
116 block_group_cache_done(struct btrfs_block_group_cache *cache)
117 {
118 smp_mb();
119 return cache->cached == BTRFS_CACHE_FINISHED ||
120 cache->cached == BTRFS_CACHE_ERROR;
121 }
122
123 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
124 {
125 return (cache->flags & bits) == bits;
126 }
127
128 void btrfs_get_block_group(struct btrfs_block_group_cache *cache)
129 {
130 atomic_inc(&cache->count);
131 }
132
133 void btrfs_put_block_group(struct btrfs_block_group_cache *cache)
134 {
135 if (atomic_dec_and_test(&cache->count)) {
136 WARN_ON(cache->pinned > 0);
137 WARN_ON(cache->reserved > 0);
138 kfree(cache->free_space_ctl);
139 kfree(cache);
140 }
141 }
142
143 /*
144 * this adds the block group to the fs_info rb tree for the block group
145 * cache
146 */
147 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
148 struct btrfs_block_group_cache *block_group)
149 {
150 struct rb_node **p;
151 struct rb_node *parent = NULL;
152 struct btrfs_block_group_cache *cache;
153
154 spin_lock(&info->block_group_cache_lock);
155 p = &info->block_group_cache_tree.rb_node;
156
157 while (*p) {
158 parent = *p;
159 cache = rb_entry(parent, struct btrfs_block_group_cache,
160 cache_node);
161 if (block_group->key.objectid < cache->key.objectid) {
162 p = &(*p)->rb_left;
163 } else if (block_group->key.objectid > cache->key.objectid) {
164 p = &(*p)->rb_right;
165 } else {
166 spin_unlock(&info->block_group_cache_lock);
167 return -EEXIST;
168 }
169 }
170
171 rb_link_node(&block_group->cache_node, parent, p);
172 rb_insert_color(&block_group->cache_node,
173 &info->block_group_cache_tree);
174
175 if (info->first_logical_byte > block_group->key.objectid)
176 info->first_logical_byte = block_group->key.objectid;
177
178 spin_unlock(&info->block_group_cache_lock);
179
180 return 0;
181 }
182
183 /*
184 * This will return the block group at or after bytenr if contains is 0, else
185 * it will return the block group that contains the bytenr
186 */
187 static struct btrfs_block_group_cache *
188 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
189 int contains)
190 {
191 struct btrfs_block_group_cache *cache, *ret = NULL;
192 struct rb_node *n;
193 u64 end, start;
194
195 spin_lock(&info->block_group_cache_lock);
196 n = info->block_group_cache_tree.rb_node;
197
198 while (n) {
199 cache = rb_entry(n, struct btrfs_block_group_cache,
200 cache_node);
201 end = cache->key.objectid + cache->key.offset - 1;
202 start = cache->key.objectid;
203
204 if (bytenr < start) {
205 if (!contains && (!ret || start < ret->key.objectid))
206 ret = cache;
207 n = n->rb_left;
208 } else if (bytenr > start) {
209 if (contains && bytenr <= end) {
210 ret = cache;
211 break;
212 }
213 n = n->rb_right;
214 } else {
215 ret = cache;
216 break;
217 }
218 }
219 if (ret) {
220 btrfs_get_block_group(ret);
221 if (bytenr == 0 && info->first_logical_byte > ret->key.objectid)
222 info->first_logical_byte = ret->key.objectid;
223 }
224 spin_unlock(&info->block_group_cache_lock);
225
226 return ret;
227 }
228
229 static int add_excluded_extent(struct btrfs_root *root,
230 u64 start, u64 num_bytes)
231 {
232 u64 end = start + num_bytes - 1;
233 set_extent_bits(&root->fs_info->freed_extents[0],
234 start, end, EXTENT_UPTODATE, GFP_NOFS);
235 set_extent_bits(&root->fs_info->freed_extents[1],
236 start, end, EXTENT_UPTODATE, GFP_NOFS);
237 return 0;
238 }
239
240 static void free_excluded_extents(struct btrfs_root *root,
241 struct btrfs_block_group_cache *cache)
242 {
243 u64 start, end;
244
245 start = cache->key.objectid;
246 end = start + cache->key.offset - 1;
247
248 clear_extent_bits(&root->fs_info->freed_extents[0],
249 start, end, EXTENT_UPTODATE, GFP_NOFS);
250 clear_extent_bits(&root->fs_info->freed_extents[1],
251 start, end, EXTENT_UPTODATE, GFP_NOFS);
252 }
253
254 static int exclude_super_stripes(struct btrfs_root *root,
255 struct btrfs_block_group_cache *cache)
256 {
257 u64 bytenr;
258 u64 *logical;
259 int stripe_len;
260 int i, nr, ret;
261
262 if (cache->key.objectid < BTRFS_SUPER_INFO_OFFSET) {
263 stripe_len = BTRFS_SUPER_INFO_OFFSET - cache->key.objectid;
264 cache->bytes_super += stripe_len;
265 ret = add_excluded_extent(root, cache->key.objectid,
266 stripe_len);
267 if (ret)
268 return ret;
269 }
270
271 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
272 bytenr = btrfs_sb_offset(i);
273 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
274 cache->key.objectid, bytenr,
275 0, &logical, &nr, &stripe_len);
276 if (ret)
277 return ret;
278
279 while (nr--) {
280 u64 start, len;
281
282 if (logical[nr] > cache->key.objectid +
283 cache->key.offset)
284 continue;
285
286 if (logical[nr] + stripe_len <= cache->key.objectid)
287 continue;
288
289 start = logical[nr];
290 if (start < cache->key.objectid) {
291 start = cache->key.objectid;
292 len = (logical[nr] + stripe_len) - start;
293 } else {
294 len = min_t(u64, stripe_len,
295 cache->key.objectid +
296 cache->key.offset - start);
297 }
298
299 cache->bytes_super += len;
300 ret = add_excluded_extent(root, start, len);
301 if (ret) {
302 kfree(logical);
303 return ret;
304 }
305 }
306
307 kfree(logical);
308 }
309 return 0;
310 }
311
312 static struct btrfs_caching_control *
313 get_caching_control(struct btrfs_block_group_cache *cache)
314 {
315 struct btrfs_caching_control *ctl;
316
317 spin_lock(&cache->lock);
318 if (!cache->caching_ctl) {
319 spin_unlock(&cache->lock);
320 return NULL;
321 }
322
323 ctl = cache->caching_ctl;
324 atomic_inc(&ctl->count);
325 spin_unlock(&cache->lock);
326 return ctl;
327 }
328
329 static void put_caching_control(struct btrfs_caching_control *ctl)
330 {
331 if (atomic_dec_and_test(&ctl->count))
332 kfree(ctl);
333 }
334
335 #ifdef CONFIG_BTRFS_DEBUG
336 static void fragment_free_space(struct btrfs_root *root,
337 struct btrfs_block_group_cache *block_group)
338 {
339 u64 start = block_group->key.objectid;
340 u64 len = block_group->key.offset;
341 u64 chunk = block_group->flags & BTRFS_BLOCK_GROUP_METADATA ?
342 root->nodesize : root->sectorsize;
343 u64 step = chunk << 1;
344
345 while (len > chunk) {
346 btrfs_remove_free_space(block_group, start, chunk);
347 start += step;
348 if (len < step)
349 len = 0;
350 else
351 len -= step;
352 }
353 }
354 #endif
355
356 /*
357 * this is only called by cache_block_group, since we could have freed extents
358 * we need to check the pinned_extents for any extents that can't be used yet
359 * since their free space will be released as soon as the transaction commits.
360 */
361 u64 add_new_free_space(struct btrfs_block_group_cache *block_group,
362 struct btrfs_fs_info *info, u64 start, u64 end)
363 {
364 u64 extent_start, extent_end, size, total_added = 0;
365 int ret;
366
367 while (start < end) {
368 ret = find_first_extent_bit(info->pinned_extents, start,
369 &extent_start, &extent_end,
370 EXTENT_DIRTY | EXTENT_UPTODATE,
371 NULL);
372 if (ret)
373 break;
374
375 if (extent_start <= start) {
376 start = extent_end + 1;
377 } else if (extent_start > start && extent_start < end) {
378 size = extent_start - start;
379 total_added += size;
380 ret = btrfs_add_free_space(block_group, start,
381 size);
382 BUG_ON(ret); /* -ENOMEM or logic error */
383 start = extent_end + 1;
384 } else {
385 break;
386 }
387 }
388
389 if (start < end) {
390 size = end - start;
391 total_added += size;
392 ret = btrfs_add_free_space(block_group, start, size);
393 BUG_ON(ret); /* -ENOMEM or logic error */
394 }
395
396 return total_added;
397 }
398
399 static int load_extent_tree_free(struct btrfs_caching_control *caching_ctl)
400 {
401 struct btrfs_block_group_cache *block_group;
402 struct btrfs_fs_info *fs_info;
403 struct btrfs_root *extent_root;
404 struct btrfs_path *path;
405 struct extent_buffer *leaf;
406 struct btrfs_key key;
407 u64 total_found = 0;
408 u64 last = 0;
409 u32 nritems;
410 int ret;
411 bool wakeup = true;
412
413 block_group = caching_ctl->block_group;
414 fs_info = block_group->fs_info;
415 extent_root = fs_info->extent_root;
416
417 path = btrfs_alloc_path();
418 if (!path)
419 return -ENOMEM;
420
421 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
422
423 #ifdef CONFIG_BTRFS_DEBUG
424 /*
425 * If we're fragmenting we don't want to make anybody think we can
426 * allocate from this block group until we've had a chance to fragment
427 * the free space.
428 */
429 if (btrfs_should_fragment_free_space(extent_root, block_group))
430 wakeup = false;
431 #endif
432 /*
433 * We don't want to deadlock with somebody trying to allocate a new
434 * extent for the extent root while also trying to search the extent
435 * root to add free space. So we skip locking and search the commit
436 * root, since its read-only
437 */
438 path->skip_locking = 1;
439 path->search_commit_root = 1;
440 path->reada = READA_FORWARD;
441
442 key.objectid = last;
443 key.offset = 0;
444 key.type = BTRFS_EXTENT_ITEM_KEY;
445
446 next:
447 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
448 if (ret < 0)
449 goto out;
450
451 leaf = path->nodes[0];
452 nritems = btrfs_header_nritems(leaf);
453
454 while (1) {
455 if (btrfs_fs_closing(fs_info) > 1) {
456 last = (u64)-1;
457 break;
458 }
459
460 if (path->slots[0] < nritems) {
461 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
462 } else {
463 ret = find_next_key(path, 0, &key);
464 if (ret)
465 break;
466
467 if (need_resched() ||
468 rwsem_is_contended(&fs_info->commit_root_sem)) {
469 if (wakeup)
470 caching_ctl->progress = last;
471 btrfs_release_path(path);
472 up_read(&fs_info->commit_root_sem);
473 mutex_unlock(&caching_ctl->mutex);
474 cond_resched();
475 mutex_lock(&caching_ctl->mutex);
476 down_read(&fs_info->commit_root_sem);
477 goto next;
478 }
479
480 ret = btrfs_next_leaf(extent_root, path);
481 if (ret < 0)
482 goto out;
483 if (ret)
484 break;
485 leaf = path->nodes[0];
486 nritems = btrfs_header_nritems(leaf);
487 continue;
488 }
489
490 if (key.objectid < last) {
491 key.objectid = last;
492 key.offset = 0;
493 key.type = BTRFS_EXTENT_ITEM_KEY;
494
495 if (wakeup)
496 caching_ctl->progress = last;
497 btrfs_release_path(path);
498 goto next;
499 }
500
501 if (key.objectid < block_group->key.objectid) {
502 path->slots[0]++;
503 continue;
504 }
505
506 if (key.objectid >= block_group->key.objectid +
507 block_group->key.offset)
508 break;
509
510 if (key.type == BTRFS_EXTENT_ITEM_KEY ||
511 key.type == BTRFS_METADATA_ITEM_KEY) {
512 total_found += add_new_free_space(block_group,
513 fs_info, last,
514 key.objectid);
515 if (key.type == BTRFS_METADATA_ITEM_KEY)
516 last = key.objectid +
517 fs_info->tree_root->nodesize;
518 else
519 last = key.objectid + key.offset;
520
521 if (total_found > CACHING_CTL_WAKE_UP) {
522 total_found = 0;
523 if (wakeup)
524 wake_up(&caching_ctl->wait);
525 }
526 }
527 path->slots[0]++;
528 }
529 ret = 0;
530
531 total_found += add_new_free_space(block_group, fs_info, last,
532 block_group->key.objectid +
533 block_group->key.offset);
534 caching_ctl->progress = (u64)-1;
535
536 out:
537 btrfs_free_path(path);
538 return ret;
539 }
540
541 static noinline void caching_thread(struct btrfs_work *work)
542 {
543 struct btrfs_block_group_cache *block_group;
544 struct btrfs_fs_info *fs_info;
545 struct btrfs_caching_control *caching_ctl;
546 struct btrfs_root *extent_root;
547 int ret;
548
549 caching_ctl = container_of(work, struct btrfs_caching_control, work);
550 block_group = caching_ctl->block_group;
551 fs_info = block_group->fs_info;
552 extent_root = fs_info->extent_root;
553
554 mutex_lock(&caching_ctl->mutex);
555 down_read(&fs_info->commit_root_sem);
556
557 if (btrfs_fs_compat_ro(fs_info, FREE_SPACE_TREE))
558 ret = load_free_space_tree(caching_ctl);
559 else
560 ret = load_extent_tree_free(caching_ctl);
561
562 spin_lock(&block_group->lock);
563 block_group->caching_ctl = NULL;
564 block_group->cached = ret ? BTRFS_CACHE_ERROR : BTRFS_CACHE_FINISHED;
565 spin_unlock(&block_group->lock);
566
567 #ifdef CONFIG_BTRFS_DEBUG
568 if (btrfs_should_fragment_free_space(extent_root, block_group)) {
569 u64 bytes_used;
570
571 spin_lock(&block_group->space_info->lock);
572 spin_lock(&block_group->lock);
573 bytes_used = block_group->key.offset -
574 btrfs_block_group_used(&block_group->item);
575 block_group->space_info->bytes_used += bytes_used >> 1;
576 spin_unlock(&block_group->lock);
577 spin_unlock(&block_group->space_info->lock);
578 fragment_free_space(extent_root, block_group);
579 }
580 #endif
581
582 caching_ctl->progress = (u64)-1;
583
584 up_read(&fs_info->commit_root_sem);
585 free_excluded_extents(fs_info->extent_root, block_group);
586 mutex_unlock(&caching_ctl->mutex);
587
588 wake_up(&caching_ctl->wait);
589
590 put_caching_control(caching_ctl);
591 btrfs_put_block_group(block_group);
592 }
593
594 static int cache_block_group(struct btrfs_block_group_cache *cache,
595 int load_cache_only)
596 {
597 DEFINE_WAIT(wait);
598 struct btrfs_fs_info *fs_info = cache->fs_info;
599 struct btrfs_caching_control *caching_ctl;
600 int ret = 0;
601
602 caching_ctl = kzalloc(sizeof(*caching_ctl), GFP_NOFS);
603 if (!caching_ctl)
604 return -ENOMEM;
605
606 INIT_LIST_HEAD(&caching_ctl->list);
607 mutex_init(&caching_ctl->mutex);
608 init_waitqueue_head(&caching_ctl->wait);
609 caching_ctl->block_group = cache;
610 caching_ctl->progress = cache->key.objectid;
611 atomic_set(&caching_ctl->count, 1);
612 btrfs_init_work(&caching_ctl->work, btrfs_cache_helper,
613 caching_thread, NULL, NULL);
614
615 spin_lock(&cache->lock);
616 /*
617 * This should be a rare occasion, but this could happen I think in the
618 * case where one thread starts to load the space cache info, and then
619 * some other thread starts a transaction commit which tries to do an
620 * allocation while the other thread is still loading the space cache
621 * info. The previous loop should have kept us from choosing this block
622 * group, but if we've moved to the state where we will wait on caching
623 * block groups we need to first check if we're doing a fast load here,
624 * so we can wait for it to finish, otherwise we could end up allocating
625 * from a block group who's cache gets evicted for one reason or
626 * another.
627 */
628 while (cache->cached == BTRFS_CACHE_FAST) {
629 struct btrfs_caching_control *ctl;
630
631 ctl = cache->caching_ctl;
632 atomic_inc(&ctl->count);
633 prepare_to_wait(&ctl->wait, &wait, TASK_UNINTERRUPTIBLE);
634 spin_unlock(&cache->lock);
635
636 schedule();
637
638 finish_wait(&ctl->wait, &wait);
639 put_caching_control(ctl);
640 spin_lock(&cache->lock);
641 }
642
643 if (cache->cached != BTRFS_CACHE_NO) {
644 spin_unlock(&cache->lock);
645 kfree(caching_ctl);
646 return 0;
647 }
648 WARN_ON(cache->caching_ctl);
649 cache->caching_ctl = caching_ctl;
650 cache->cached = BTRFS_CACHE_FAST;
651 spin_unlock(&cache->lock);
652
653 if (fs_info->mount_opt & BTRFS_MOUNT_SPACE_CACHE) {
654 mutex_lock(&caching_ctl->mutex);
655 ret = load_free_space_cache(fs_info, cache);
656
657 spin_lock(&cache->lock);
658 if (ret == 1) {
659 cache->caching_ctl = NULL;
660 cache->cached = BTRFS_CACHE_FINISHED;
661 cache->last_byte_to_unpin = (u64)-1;
662 caching_ctl->progress = (u64)-1;
663 } else {
664 if (load_cache_only) {
665 cache->caching_ctl = NULL;
666 cache->cached = BTRFS_CACHE_NO;
667 } else {
668 cache->cached = BTRFS_CACHE_STARTED;
669 cache->has_caching_ctl = 1;
670 }
671 }
672 spin_unlock(&cache->lock);
673 #ifdef CONFIG_BTRFS_DEBUG
674 if (ret == 1 &&
675 btrfs_should_fragment_free_space(fs_info->extent_root,
676 cache)) {
677 u64 bytes_used;
678
679 spin_lock(&cache->space_info->lock);
680 spin_lock(&cache->lock);
681 bytes_used = cache->key.offset -
682 btrfs_block_group_used(&cache->item);
683 cache->space_info->bytes_used += bytes_used >> 1;
684 spin_unlock(&cache->lock);
685 spin_unlock(&cache->space_info->lock);
686 fragment_free_space(fs_info->extent_root, cache);
687 }
688 #endif
689 mutex_unlock(&caching_ctl->mutex);
690
691 wake_up(&caching_ctl->wait);
692 if (ret == 1) {
693 put_caching_control(caching_ctl);
694 free_excluded_extents(fs_info->extent_root, cache);
695 return 0;
696 }
697 } else {
698 /*
699 * We're either using the free space tree or no caching at all.
700 * Set cached to the appropriate value and wakeup any waiters.
701 */
702 spin_lock(&cache->lock);
703 if (load_cache_only) {
704 cache->caching_ctl = NULL;
705 cache->cached = BTRFS_CACHE_NO;
706 } else {
707 cache->cached = BTRFS_CACHE_STARTED;
708 cache->has_caching_ctl = 1;
709 }
710 spin_unlock(&cache->lock);
711 wake_up(&caching_ctl->wait);
712 }
713
714 if (load_cache_only) {
715 put_caching_control(caching_ctl);
716 return 0;
717 }
718
719 down_write(&fs_info->commit_root_sem);
720 atomic_inc(&caching_ctl->count);
721 list_add_tail(&caching_ctl->list, &fs_info->caching_block_groups);
722 up_write(&fs_info->commit_root_sem);
723
724 btrfs_get_block_group(cache);
725
726 btrfs_queue_work(fs_info->caching_workers, &caching_ctl->work);
727
728 return ret;
729 }
730
731 /*
732 * return the block group that starts at or after bytenr
733 */
734 static struct btrfs_block_group_cache *
735 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
736 {
737 struct btrfs_block_group_cache *cache;
738
739 cache = block_group_cache_tree_search(info, bytenr, 0);
740
741 return cache;
742 }
743
744 /*
745 * return the block group that contains the given bytenr
746 */
747 struct btrfs_block_group_cache *btrfs_lookup_block_group(
748 struct btrfs_fs_info *info,
749 u64 bytenr)
750 {
751 struct btrfs_block_group_cache *cache;
752
753 cache = block_group_cache_tree_search(info, bytenr, 1);
754
755 return cache;
756 }
757
758 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
759 u64 flags)
760 {
761 struct list_head *head = &info->space_info;
762 struct btrfs_space_info *found;
763
764 flags &= BTRFS_BLOCK_GROUP_TYPE_MASK;
765
766 rcu_read_lock();
767 list_for_each_entry_rcu(found, head, list) {
768 if (found->flags & flags) {
769 rcu_read_unlock();
770 return found;
771 }
772 }
773 rcu_read_unlock();
774 return NULL;
775 }
776
777 /*
778 * after adding space to the filesystem, we need to clear the full flags
779 * on all the space infos.
780 */
781 void btrfs_clear_space_info_full(struct btrfs_fs_info *info)
782 {
783 struct list_head *head = &info->space_info;
784 struct btrfs_space_info *found;
785
786 rcu_read_lock();
787 list_for_each_entry_rcu(found, head, list)
788 found->full = 0;
789 rcu_read_unlock();
790 }
791
792 /* simple helper to search for an existing data extent at a given offset */
793 int btrfs_lookup_data_extent(struct btrfs_root *root, u64 start, u64 len)
794 {
795 int ret;
796 struct btrfs_key key;
797 struct btrfs_path *path;
798
799 path = btrfs_alloc_path();
800 if (!path)
801 return -ENOMEM;
802
803 key.objectid = start;
804 key.offset = len;
805 key.type = BTRFS_EXTENT_ITEM_KEY;
806 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
807 0, 0);
808 btrfs_free_path(path);
809 return ret;
810 }
811
812 /*
813 * helper function to lookup reference count and flags of a tree block.
814 *
815 * the head node for delayed ref is used to store the sum of all the
816 * reference count modifications queued up in the rbtree. the head
817 * node may also store the extent flags to set. This way you can check
818 * to see what the reference count and extent flags would be if all of
819 * the delayed refs are not processed.
820 */
821 int btrfs_lookup_extent_info(struct btrfs_trans_handle *trans,
822 struct btrfs_root *root, u64 bytenr,
823 u64 offset, int metadata, u64 *refs, u64 *flags)
824 {
825 struct btrfs_delayed_ref_head *head;
826 struct btrfs_delayed_ref_root *delayed_refs;
827 struct btrfs_path *path;
828 struct btrfs_extent_item *ei;
829 struct extent_buffer *leaf;
830 struct btrfs_key key;
831 u32 item_size;
832 u64 num_refs;
833 u64 extent_flags;
834 int ret;
835
836 /*
837 * If we don't have skinny metadata, don't bother doing anything
838 * different
839 */
840 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA)) {
841 offset = root->nodesize;
842 metadata = 0;
843 }
844
845 path = btrfs_alloc_path();
846 if (!path)
847 return -ENOMEM;
848
849 if (!trans) {
850 path->skip_locking = 1;
851 path->search_commit_root = 1;
852 }
853
854 search_again:
855 key.objectid = bytenr;
856 key.offset = offset;
857 if (metadata)
858 key.type = BTRFS_METADATA_ITEM_KEY;
859 else
860 key.type = BTRFS_EXTENT_ITEM_KEY;
861
862 ret = btrfs_search_slot(trans, root->fs_info->extent_root,
863 &key, path, 0, 0);
864 if (ret < 0)
865 goto out_free;
866
867 if (ret > 0 && metadata && key.type == BTRFS_METADATA_ITEM_KEY) {
868 if (path->slots[0]) {
869 path->slots[0]--;
870 btrfs_item_key_to_cpu(path->nodes[0], &key,
871 path->slots[0]);
872 if (key.objectid == bytenr &&
873 key.type == BTRFS_EXTENT_ITEM_KEY &&
874 key.offset == root->nodesize)
875 ret = 0;
876 }
877 }
878
879 if (ret == 0) {
880 leaf = path->nodes[0];
881 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
882 if (item_size >= sizeof(*ei)) {
883 ei = btrfs_item_ptr(leaf, path->slots[0],
884 struct btrfs_extent_item);
885 num_refs = btrfs_extent_refs(leaf, ei);
886 extent_flags = btrfs_extent_flags(leaf, ei);
887 } else {
888 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
889 struct btrfs_extent_item_v0 *ei0;
890 BUG_ON(item_size != sizeof(*ei0));
891 ei0 = btrfs_item_ptr(leaf, path->slots[0],
892 struct btrfs_extent_item_v0);
893 num_refs = btrfs_extent_refs_v0(leaf, ei0);
894 /* FIXME: this isn't correct for data */
895 extent_flags = BTRFS_BLOCK_FLAG_FULL_BACKREF;
896 #else
897 BUG();
898 #endif
899 }
900 BUG_ON(num_refs == 0);
901 } else {
902 num_refs = 0;
903 extent_flags = 0;
904 ret = 0;
905 }
906
907 if (!trans)
908 goto out;
909
910 delayed_refs = &trans->transaction->delayed_refs;
911 spin_lock(&delayed_refs->lock);
912 head = btrfs_find_delayed_ref_head(trans, bytenr);
913 if (head) {
914 if (!mutex_trylock(&head->mutex)) {
915 atomic_inc(&head->node.refs);
916 spin_unlock(&delayed_refs->lock);
917
918 btrfs_release_path(path);
919
920 /*
921 * Mutex was contended, block until it's released and try
922 * again
923 */
924 mutex_lock(&head->mutex);
925 mutex_unlock(&head->mutex);
926 btrfs_put_delayed_ref(&head->node);
927 goto search_again;
928 }
929 spin_lock(&head->lock);
930 if (head->extent_op && head->extent_op->update_flags)
931 extent_flags |= head->extent_op->flags_to_set;
932 else
933 BUG_ON(num_refs == 0);
934
935 num_refs += head->node.ref_mod;
936 spin_unlock(&head->lock);
937 mutex_unlock(&head->mutex);
938 }
939 spin_unlock(&delayed_refs->lock);
940 out:
941 WARN_ON(num_refs == 0);
942 if (refs)
943 *refs = num_refs;
944 if (flags)
945 *flags = extent_flags;
946 out_free:
947 btrfs_free_path(path);
948 return ret;
949 }
950
951 /*
952 * Back reference rules. Back refs have three main goals:
953 *
954 * 1) differentiate between all holders of references to an extent so that
955 * when a reference is dropped we can make sure it was a valid reference
956 * before freeing the extent.
957 *
958 * 2) Provide enough information to quickly find the holders of an extent
959 * if we notice a given block is corrupted or bad.
960 *
961 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
962 * maintenance. This is actually the same as #2, but with a slightly
963 * different use case.
964 *
965 * There are two kinds of back refs. The implicit back refs is optimized
966 * for pointers in non-shared tree blocks. For a given pointer in a block,
967 * back refs of this kind provide information about the block's owner tree
968 * and the pointer's key. These information allow us to find the block by
969 * b-tree searching. The full back refs is for pointers in tree blocks not
970 * referenced by their owner trees. The location of tree block is recorded
971 * in the back refs. Actually the full back refs is generic, and can be
972 * used in all cases the implicit back refs is used. The major shortcoming
973 * of the full back refs is its overhead. Every time a tree block gets
974 * COWed, we have to update back refs entry for all pointers in it.
975 *
976 * For a newly allocated tree block, we use implicit back refs for
977 * pointers in it. This means most tree related operations only involve
978 * implicit back refs. For a tree block created in old transaction, the
979 * only way to drop a reference to it is COW it. So we can detect the
980 * event that tree block loses its owner tree's reference and do the
981 * back refs conversion.
982 *
983 * When a tree block is COW'd through a tree, there are four cases:
984 *
985 * The reference count of the block is one and the tree is the block's
986 * owner tree. Nothing to do in this case.
987 *
988 * The reference count of the block is one and the tree is not the
989 * block's owner tree. In this case, full back refs is used for pointers
990 * in the block. Remove these full back refs, add implicit back refs for
991 * every pointers in the new block.
992 *
993 * The reference count of the block is greater than one and the tree is
994 * the block's owner tree. In this case, implicit back refs is used for
995 * pointers in the block. Add full back refs for every pointers in the
996 * block, increase lower level extents' reference counts. The original
997 * implicit back refs are entailed to the new block.
998 *
999 * The reference count of the block is greater than one and the tree is
1000 * not the block's owner tree. Add implicit back refs for every pointer in
1001 * the new block, increase lower level extents' reference count.
1002 *
1003 * Back Reference Key composing:
1004 *
1005 * The key objectid corresponds to the first byte in the extent,
1006 * The key type is used to differentiate between types of back refs.
1007 * There are different meanings of the key offset for different types
1008 * of back refs.
1009 *
1010 * File extents can be referenced by:
1011 *
1012 * - multiple snapshots, subvolumes, or different generations in one subvol
1013 * - different files inside a single subvolume
1014 * - different offsets inside a file (bookend extents in file.c)
1015 *
1016 * The extent ref structure for the implicit back refs has fields for:
1017 *
1018 * - Objectid of the subvolume root
1019 * - objectid of the file holding the reference
1020 * - original offset in the file
1021 * - how many bookend extents
1022 *
1023 * The key offset for the implicit back refs is hash of the first
1024 * three fields.
1025 *
1026 * The extent ref structure for the full back refs has field for:
1027 *
1028 * - number of pointers in the tree leaf
1029 *
1030 * The key offset for the implicit back refs is the first byte of
1031 * the tree leaf
1032 *
1033 * When a file extent is allocated, The implicit back refs is used.
1034 * the fields are filled in:
1035 *
1036 * (root_key.objectid, inode objectid, offset in file, 1)
1037 *
1038 * When a file extent is removed file truncation, we find the
1039 * corresponding implicit back refs and check the following fields:
1040 *
1041 * (btrfs_header_owner(leaf), inode objectid, offset in file)
1042 *
1043 * Btree extents can be referenced by:
1044 *
1045 * - Different subvolumes
1046 *
1047 * Both the implicit back refs and the full back refs for tree blocks
1048 * only consist of key. The key offset for the implicit back refs is
1049 * objectid of block's owner tree. The key offset for the full back refs
1050 * is the first byte of parent block.
1051 *
1052 * When implicit back refs is used, information about the lowest key and
1053 * level of the tree block are required. These information are stored in
1054 * tree block info structure.
1055 */
1056
1057 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1058 static int convert_extent_item_v0(struct btrfs_trans_handle *trans,
1059 struct btrfs_root *root,
1060 struct btrfs_path *path,
1061 u64 owner, u32 extra_size)
1062 {
1063 struct btrfs_extent_item *item;
1064 struct btrfs_extent_item_v0 *ei0;
1065 struct btrfs_extent_ref_v0 *ref0;
1066 struct btrfs_tree_block_info *bi;
1067 struct extent_buffer *leaf;
1068 struct btrfs_key key;
1069 struct btrfs_key found_key;
1070 u32 new_size = sizeof(*item);
1071 u64 refs;
1072 int ret;
1073
1074 leaf = path->nodes[0];
1075 BUG_ON(btrfs_item_size_nr(leaf, path->slots[0]) != sizeof(*ei0));
1076
1077 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1078 ei0 = btrfs_item_ptr(leaf, path->slots[0],
1079 struct btrfs_extent_item_v0);
1080 refs = btrfs_extent_refs_v0(leaf, ei0);
1081
1082 if (owner == (u64)-1) {
1083 while (1) {
1084 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
1085 ret = btrfs_next_leaf(root, path);
1086 if (ret < 0)
1087 return ret;
1088 BUG_ON(ret > 0); /* Corruption */
1089 leaf = path->nodes[0];
1090 }
1091 btrfs_item_key_to_cpu(leaf, &found_key,
1092 path->slots[0]);
1093 BUG_ON(key.objectid != found_key.objectid);
1094 if (found_key.type != BTRFS_EXTENT_REF_V0_KEY) {
1095 path->slots[0]++;
1096 continue;
1097 }
1098 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1099 struct btrfs_extent_ref_v0);
1100 owner = btrfs_ref_objectid_v0(leaf, ref0);
1101 break;
1102 }
1103 }
1104 btrfs_release_path(path);
1105
1106 if (owner < BTRFS_FIRST_FREE_OBJECTID)
1107 new_size += sizeof(*bi);
1108
1109 new_size -= sizeof(*ei0);
1110 ret = btrfs_search_slot(trans, root, &key, path,
1111 new_size + extra_size, 1);
1112 if (ret < 0)
1113 return ret;
1114 BUG_ON(ret); /* Corruption */
1115
1116 btrfs_extend_item(root, path, new_size);
1117
1118 leaf = path->nodes[0];
1119 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1120 btrfs_set_extent_refs(leaf, item, refs);
1121 /* FIXME: get real generation */
1122 btrfs_set_extent_generation(leaf, item, 0);
1123 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1124 btrfs_set_extent_flags(leaf, item,
1125 BTRFS_EXTENT_FLAG_TREE_BLOCK |
1126 BTRFS_BLOCK_FLAG_FULL_BACKREF);
1127 bi = (struct btrfs_tree_block_info *)(item + 1);
1128 /* FIXME: get first key of the block */
1129 memset_extent_buffer(leaf, 0, (unsigned long)bi, sizeof(*bi));
1130 btrfs_set_tree_block_level(leaf, bi, (int)owner);
1131 } else {
1132 btrfs_set_extent_flags(leaf, item, BTRFS_EXTENT_FLAG_DATA);
1133 }
1134 btrfs_mark_buffer_dirty(leaf);
1135 return 0;
1136 }
1137 #endif
1138
1139 static u64 hash_extent_data_ref(u64 root_objectid, u64 owner, u64 offset)
1140 {
1141 u32 high_crc = ~(u32)0;
1142 u32 low_crc = ~(u32)0;
1143 __le64 lenum;
1144
1145 lenum = cpu_to_le64(root_objectid);
1146 high_crc = btrfs_crc32c(high_crc, &lenum, sizeof(lenum));
1147 lenum = cpu_to_le64(owner);
1148 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1149 lenum = cpu_to_le64(offset);
1150 low_crc = btrfs_crc32c(low_crc, &lenum, sizeof(lenum));
1151
1152 return ((u64)high_crc << 31) ^ (u64)low_crc;
1153 }
1154
1155 static u64 hash_extent_data_ref_item(struct extent_buffer *leaf,
1156 struct btrfs_extent_data_ref *ref)
1157 {
1158 return hash_extent_data_ref(btrfs_extent_data_ref_root(leaf, ref),
1159 btrfs_extent_data_ref_objectid(leaf, ref),
1160 btrfs_extent_data_ref_offset(leaf, ref));
1161 }
1162
1163 static int match_extent_data_ref(struct extent_buffer *leaf,
1164 struct btrfs_extent_data_ref *ref,
1165 u64 root_objectid, u64 owner, u64 offset)
1166 {
1167 if (btrfs_extent_data_ref_root(leaf, ref) != root_objectid ||
1168 btrfs_extent_data_ref_objectid(leaf, ref) != owner ||
1169 btrfs_extent_data_ref_offset(leaf, ref) != offset)
1170 return 0;
1171 return 1;
1172 }
1173
1174 static noinline int lookup_extent_data_ref(struct btrfs_trans_handle *trans,
1175 struct btrfs_root *root,
1176 struct btrfs_path *path,
1177 u64 bytenr, u64 parent,
1178 u64 root_objectid,
1179 u64 owner, u64 offset)
1180 {
1181 struct btrfs_key key;
1182 struct btrfs_extent_data_ref *ref;
1183 struct extent_buffer *leaf;
1184 u32 nritems;
1185 int ret;
1186 int recow;
1187 int err = -ENOENT;
1188
1189 key.objectid = bytenr;
1190 if (parent) {
1191 key.type = BTRFS_SHARED_DATA_REF_KEY;
1192 key.offset = parent;
1193 } else {
1194 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1195 key.offset = hash_extent_data_ref(root_objectid,
1196 owner, offset);
1197 }
1198 again:
1199 recow = 0;
1200 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1201 if (ret < 0) {
1202 err = ret;
1203 goto fail;
1204 }
1205
1206 if (parent) {
1207 if (!ret)
1208 return 0;
1209 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1210 key.type = BTRFS_EXTENT_REF_V0_KEY;
1211 btrfs_release_path(path);
1212 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1213 if (ret < 0) {
1214 err = ret;
1215 goto fail;
1216 }
1217 if (!ret)
1218 return 0;
1219 #endif
1220 goto fail;
1221 }
1222
1223 leaf = path->nodes[0];
1224 nritems = btrfs_header_nritems(leaf);
1225 while (1) {
1226 if (path->slots[0] >= nritems) {
1227 ret = btrfs_next_leaf(root, path);
1228 if (ret < 0)
1229 err = ret;
1230 if (ret)
1231 goto fail;
1232
1233 leaf = path->nodes[0];
1234 nritems = btrfs_header_nritems(leaf);
1235 recow = 1;
1236 }
1237
1238 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1239 if (key.objectid != bytenr ||
1240 key.type != BTRFS_EXTENT_DATA_REF_KEY)
1241 goto fail;
1242
1243 ref = btrfs_item_ptr(leaf, path->slots[0],
1244 struct btrfs_extent_data_ref);
1245
1246 if (match_extent_data_ref(leaf, ref, root_objectid,
1247 owner, offset)) {
1248 if (recow) {
1249 btrfs_release_path(path);
1250 goto again;
1251 }
1252 err = 0;
1253 break;
1254 }
1255 path->slots[0]++;
1256 }
1257 fail:
1258 return err;
1259 }
1260
1261 static noinline int insert_extent_data_ref(struct btrfs_trans_handle *trans,
1262 struct btrfs_root *root,
1263 struct btrfs_path *path,
1264 u64 bytenr, u64 parent,
1265 u64 root_objectid, u64 owner,
1266 u64 offset, int refs_to_add)
1267 {
1268 struct btrfs_key key;
1269 struct extent_buffer *leaf;
1270 u32 size;
1271 u32 num_refs;
1272 int ret;
1273
1274 key.objectid = bytenr;
1275 if (parent) {
1276 key.type = BTRFS_SHARED_DATA_REF_KEY;
1277 key.offset = parent;
1278 size = sizeof(struct btrfs_shared_data_ref);
1279 } else {
1280 key.type = BTRFS_EXTENT_DATA_REF_KEY;
1281 key.offset = hash_extent_data_ref(root_objectid,
1282 owner, offset);
1283 size = sizeof(struct btrfs_extent_data_ref);
1284 }
1285
1286 ret = btrfs_insert_empty_item(trans, root, path, &key, size);
1287 if (ret && ret != -EEXIST)
1288 goto fail;
1289
1290 leaf = path->nodes[0];
1291 if (parent) {
1292 struct btrfs_shared_data_ref *ref;
1293 ref = btrfs_item_ptr(leaf, path->slots[0],
1294 struct btrfs_shared_data_ref);
1295 if (ret == 0) {
1296 btrfs_set_shared_data_ref_count(leaf, ref, refs_to_add);
1297 } else {
1298 num_refs = btrfs_shared_data_ref_count(leaf, ref);
1299 num_refs += refs_to_add;
1300 btrfs_set_shared_data_ref_count(leaf, ref, num_refs);
1301 }
1302 } else {
1303 struct btrfs_extent_data_ref *ref;
1304 while (ret == -EEXIST) {
1305 ref = btrfs_item_ptr(leaf, path->slots[0],
1306 struct btrfs_extent_data_ref);
1307 if (match_extent_data_ref(leaf, ref, root_objectid,
1308 owner, offset))
1309 break;
1310 btrfs_release_path(path);
1311 key.offset++;
1312 ret = btrfs_insert_empty_item(trans, root, path, &key,
1313 size);
1314 if (ret && ret != -EEXIST)
1315 goto fail;
1316
1317 leaf = path->nodes[0];
1318 }
1319 ref = btrfs_item_ptr(leaf, path->slots[0],
1320 struct btrfs_extent_data_ref);
1321 if (ret == 0) {
1322 btrfs_set_extent_data_ref_root(leaf, ref,
1323 root_objectid);
1324 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
1325 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
1326 btrfs_set_extent_data_ref_count(leaf, ref, refs_to_add);
1327 } else {
1328 num_refs = btrfs_extent_data_ref_count(leaf, ref);
1329 num_refs += refs_to_add;
1330 btrfs_set_extent_data_ref_count(leaf, ref, num_refs);
1331 }
1332 }
1333 btrfs_mark_buffer_dirty(leaf);
1334 ret = 0;
1335 fail:
1336 btrfs_release_path(path);
1337 return ret;
1338 }
1339
1340 static noinline int remove_extent_data_ref(struct btrfs_trans_handle *trans,
1341 struct btrfs_root *root,
1342 struct btrfs_path *path,
1343 int refs_to_drop, int *last_ref)
1344 {
1345 struct btrfs_key key;
1346 struct btrfs_extent_data_ref *ref1 = NULL;
1347 struct btrfs_shared_data_ref *ref2 = NULL;
1348 struct extent_buffer *leaf;
1349 u32 num_refs = 0;
1350 int ret = 0;
1351
1352 leaf = path->nodes[0];
1353 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1354
1355 if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1356 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1357 struct btrfs_extent_data_ref);
1358 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1359 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1360 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1361 struct btrfs_shared_data_ref);
1362 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1363 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1364 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1365 struct btrfs_extent_ref_v0 *ref0;
1366 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1367 struct btrfs_extent_ref_v0);
1368 num_refs = btrfs_ref_count_v0(leaf, ref0);
1369 #endif
1370 } else {
1371 BUG();
1372 }
1373
1374 BUG_ON(num_refs < refs_to_drop);
1375 num_refs -= refs_to_drop;
1376
1377 if (num_refs == 0) {
1378 ret = btrfs_del_item(trans, root, path);
1379 *last_ref = 1;
1380 } else {
1381 if (key.type == BTRFS_EXTENT_DATA_REF_KEY)
1382 btrfs_set_extent_data_ref_count(leaf, ref1, num_refs);
1383 else if (key.type == BTRFS_SHARED_DATA_REF_KEY)
1384 btrfs_set_shared_data_ref_count(leaf, ref2, num_refs);
1385 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1386 else {
1387 struct btrfs_extent_ref_v0 *ref0;
1388 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1389 struct btrfs_extent_ref_v0);
1390 btrfs_set_ref_count_v0(leaf, ref0, num_refs);
1391 }
1392 #endif
1393 btrfs_mark_buffer_dirty(leaf);
1394 }
1395 return ret;
1396 }
1397
1398 static noinline u32 extent_data_ref_count(struct btrfs_path *path,
1399 struct btrfs_extent_inline_ref *iref)
1400 {
1401 struct btrfs_key key;
1402 struct extent_buffer *leaf;
1403 struct btrfs_extent_data_ref *ref1;
1404 struct btrfs_shared_data_ref *ref2;
1405 u32 num_refs = 0;
1406
1407 leaf = path->nodes[0];
1408 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
1409 if (iref) {
1410 if (btrfs_extent_inline_ref_type(leaf, iref) ==
1411 BTRFS_EXTENT_DATA_REF_KEY) {
1412 ref1 = (struct btrfs_extent_data_ref *)(&iref->offset);
1413 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1414 } else {
1415 ref2 = (struct btrfs_shared_data_ref *)(iref + 1);
1416 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1417 }
1418 } else if (key.type == BTRFS_EXTENT_DATA_REF_KEY) {
1419 ref1 = btrfs_item_ptr(leaf, path->slots[0],
1420 struct btrfs_extent_data_ref);
1421 num_refs = btrfs_extent_data_ref_count(leaf, ref1);
1422 } else if (key.type == BTRFS_SHARED_DATA_REF_KEY) {
1423 ref2 = btrfs_item_ptr(leaf, path->slots[0],
1424 struct btrfs_shared_data_ref);
1425 num_refs = btrfs_shared_data_ref_count(leaf, ref2);
1426 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1427 } else if (key.type == BTRFS_EXTENT_REF_V0_KEY) {
1428 struct btrfs_extent_ref_v0 *ref0;
1429 ref0 = btrfs_item_ptr(leaf, path->slots[0],
1430 struct btrfs_extent_ref_v0);
1431 num_refs = btrfs_ref_count_v0(leaf, ref0);
1432 #endif
1433 } else {
1434 WARN_ON(1);
1435 }
1436 return num_refs;
1437 }
1438
1439 static noinline int lookup_tree_block_ref(struct btrfs_trans_handle *trans,
1440 struct btrfs_root *root,
1441 struct btrfs_path *path,
1442 u64 bytenr, u64 parent,
1443 u64 root_objectid)
1444 {
1445 struct btrfs_key key;
1446 int ret;
1447
1448 key.objectid = bytenr;
1449 if (parent) {
1450 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1451 key.offset = parent;
1452 } else {
1453 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1454 key.offset = root_objectid;
1455 }
1456
1457 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1458 if (ret > 0)
1459 ret = -ENOENT;
1460 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1461 if (ret == -ENOENT && parent) {
1462 btrfs_release_path(path);
1463 key.type = BTRFS_EXTENT_REF_V0_KEY;
1464 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
1465 if (ret > 0)
1466 ret = -ENOENT;
1467 }
1468 #endif
1469 return ret;
1470 }
1471
1472 static noinline int insert_tree_block_ref(struct btrfs_trans_handle *trans,
1473 struct btrfs_root *root,
1474 struct btrfs_path *path,
1475 u64 bytenr, u64 parent,
1476 u64 root_objectid)
1477 {
1478 struct btrfs_key key;
1479 int ret;
1480
1481 key.objectid = bytenr;
1482 if (parent) {
1483 key.type = BTRFS_SHARED_BLOCK_REF_KEY;
1484 key.offset = parent;
1485 } else {
1486 key.type = BTRFS_TREE_BLOCK_REF_KEY;
1487 key.offset = root_objectid;
1488 }
1489
1490 ret = btrfs_insert_empty_item(trans, root, path, &key, 0);
1491 btrfs_release_path(path);
1492 return ret;
1493 }
1494
1495 static inline int extent_ref_type(u64 parent, u64 owner)
1496 {
1497 int type;
1498 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1499 if (parent > 0)
1500 type = BTRFS_SHARED_BLOCK_REF_KEY;
1501 else
1502 type = BTRFS_TREE_BLOCK_REF_KEY;
1503 } else {
1504 if (parent > 0)
1505 type = BTRFS_SHARED_DATA_REF_KEY;
1506 else
1507 type = BTRFS_EXTENT_DATA_REF_KEY;
1508 }
1509 return type;
1510 }
1511
1512 static int find_next_key(struct btrfs_path *path, int level,
1513 struct btrfs_key *key)
1514
1515 {
1516 for (; level < BTRFS_MAX_LEVEL; level++) {
1517 if (!path->nodes[level])
1518 break;
1519 if (path->slots[level] + 1 >=
1520 btrfs_header_nritems(path->nodes[level]))
1521 continue;
1522 if (level == 0)
1523 btrfs_item_key_to_cpu(path->nodes[level], key,
1524 path->slots[level] + 1);
1525 else
1526 btrfs_node_key_to_cpu(path->nodes[level], key,
1527 path->slots[level] + 1);
1528 return 0;
1529 }
1530 return 1;
1531 }
1532
1533 /*
1534 * look for inline back ref. if back ref is found, *ref_ret is set
1535 * to the address of inline back ref, and 0 is returned.
1536 *
1537 * if back ref isn't found, *ref_ret is set to the address where it
1538 * should be inserted, and -ENOENT is returned.
1539 *
1540 * if insert is true and there are too many inline back refs, the path
1541 * points to the extent item, and -EAGAIN is returned.
1542 *
1543 * NOTE: inline back refs are ordered in the same way that back ref
1544 * items in the tree are ordered.
1545 */
1546 static noinline_for_stack
1547 int lookup_inline_extent_backref(struct btrfs_trans_handle *trans,
1548 struct btrfs_root *root,
1549 struct btrfs_path *path,
1550 struct btrfs_extent_inline_ref **ref_ret,
1551 u64 bytenr, u64 num_bytes,
1552 u64 parent, u64 root_objectid,
1553 u64 owner, u64 offset, int insert)
1554 {
1555 struct btrfs_key key;
1556 struct extent_buffer *leaf;
1557 struct btrfs_extent_item *ei;
1558 struct btrfs_extent_inline_ref *iref;
1559 u64 flags;
1560 u64 item_size;
1561 unsigned long ptr;
1562 unsigned long end;
1563 int extra_size;
1564 int type;
1565 int want;
1566 int ret;
1567 int err = 0;
1568 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
1569 SKINNY_METADATA);
1570
1571 key.objectid = bytenr;
1572 key.type = BTRFS_EXTENT_ITEM_KEY;
1573 key.offset = num_bytes;
1574
1575 want = extent_ref_type(parent, owner);
1576 if (insert) {
1577 extra_size = btrfs_extent_inline_ref_size(want);
1578 path->keep_locks = 1;
1579 } else
1580 extra_size = -1;
1581
1582 /*
1583 * Owner is our parent level, so we can just add one to get the level
1584 * for the block we are interested in.
1585 */
1586 if (skinny_metadata && owner < BTRFS_FIRST_FREE_OBJECTID) {
1587 key.type = BTRFS_METADATA_ITEM_KEY;
1588 key.offset = owner;
1589 }
1590
1591 again:
1592 ret = btrfs_search_slot(trans, root, &key, path, extra_size, 1);
1593 if (ret < 0) {
1594 err = ret;
1595 goto out;
1596 }
1597
1598 /*
1599 * We may be a newly converted file system which still has the old fat
1600 * extent entries for metadata, so try and see if we have one of those.
1601 */
1602 if (ret > 0 && skinny_metadata) {
1603 skinny_metadata = false;
1604 if (path->slots[0]) {
1605 path->slots[0]--;
1606 btrfs_item_key_to_cpu(path->nodes[0], &key,
1607 path->slots[0]);
1608 if (key.objectid == bytenr &&
1609 key.type == BTRFS_EXTENT_ITEM_KEY &&
1610 key.offset == num_bytes)
1611 ret = 0;
1612 }
1613 if (ret) {
1614 key.objectid = bytenr;
1615 key.type = BTRFS_EXTENT_ITEM_KEY;
1616 key.offset = num_bytes;
1617 btrfs_release_path(path);
1618 goto again;
1619 }
1620 }
1621
1622 if (ret && !insert) {
1623 err = -ENOENT;
1624 goto out;
1625 } else if (WARN_ON(ret)) {
1626 err = -EIO;
1627 goto out;
1628 }
1629
1630 leaf = path->nodes[0];
1631 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1632 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
1633 if (item_size < sizeof(*ei)) {
1634 if (!insert) {
1635 err = -ENOENT;
1636 goto out;
1637 }
1638 ret = convert_extent_item_v0(trans, root, path, owner,
1639 extra_size);
1640 if (ret < 0) {
1641 err = ret;
1642 goto out;
1643 }
1644 leaf = path->nodes[0];
1645 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1646 }
1647 #endif
1648 BUG_ON(item_size < sizeof(*ei));
1649
1650 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1651 flags = btrfs_extent_flags(leaf, ei);
1652
1653 ptr = (unsigned long)(ei + 1);
1654 end = (unsigned long)ei + item_size;
1655
1656 if (flags & BTRFS_EXTENT_FLAG_TREE_BLOCK && !skinny_metadata) {
1657 ptr += sizeof(struct btrfs_tree_block_info);
1658 BUG_ON(ptr > end);
1659 }
1660
1661 err = -ENOENT;
1662 while (1) {
1663 if (ptr >= end) {
1664 WARN_ON(ptr > end);
1665 break;
1666 }
1667 iref = (struct btrfs_extent_inline_ref *)ptr;
1668 type = btrfs_extent_inline_ref_type(leaf, iref);
1669 if (want < type)
1670 break;
1671 if (want > type) {
1672 ptr += btrfs_extent_inline_ref_size(type);
1673 continue;
1674 }
1675
1676 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1677 struct btrfs_extent_data_ref *dref;
1678 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1679 if (match_extent_data_ref(leaf, dref, root_objectid,
1680 owner, offset)) {
1681 err = 0;
1682 break;
1683 }
1684 if (hash_extent_data_ref_item(leaf, dref) <
1685 hash_extent_data_ref(root_objectid, owner, offset))
1686 break;
1687 } else {
1688 u64 ref_offset;
1689 ref_offset = btrfs_extent_inline_ref_offset(leaf, iref);
1690 if (parent > 0) {
1691 if (parent == ref_offset) {
1692 err = 0;
1693 break;
1694 }
1695 if (ref_offset < parent)
1696 break;
1697 } else {
1698 if (root_objectid == ref_offset) {
1699 err = 0;
1700 break;
1701 }
1702 if (ref_offset < root_objectid)
1703 break;
1704 }
1705 }
1706 ptr += btrfs_extent_inline_ref_size(type);
1707 }
1708 if (err == -ENOENT && insert) {
1709 if (item_size + extra_size >=
1710 BTRFS_MAX_EXTENT_ITEM_SIZE(root)) {
1711 err = -EAGAIN;
1712 goto out;
1713 }
1714 /*
1715 * To add new inline back ref, we have to make sure
1716 * there is no corresponding back ref item.
1717 * For simplicity, we just do not add new inline back
1718 * ref if there is any kind of item for this block
1719 */
1720 if (find_next_key(path, 0, &key) == 0 &&
1721 key.objectid == bytenr &&
1722 key.type < BTRFS_BLOCK_GROUP_ITEM_KEY) {
1723 err = -EAGAIN;
1724 goto out;
1725 }
1726 }
1727 *ref_ret = (struct btrfs_extent_inline_ref *)ptr;
1728 out:
1729 if (insert) {
1730 path->keep_locks = 0;
1731 btrfs_unlock_up_safe(path, 1);
1732 }
1733 return err;
1734 }
1735
1736 /*
1737 * helper to add new inline back ref
1738 */
1739 static noinline_for_stack
1740 void setup_inline_extent_backref(struct btrfs_root *root,
1741 struct btrfs_path *path,
1742 struct btrfs_extent_inline_ref *iref,
1743 u64 parent, u64 root_objectid,
1744 u64 owner, u64 offset, int refs_to_add,
1745 struct btrfs_delayed_extent_op *extent_op)
1746 {
1747 struct extent_buffer *leaf;
1748 struct btrfs_extent_item *ei;
1749 unsigned long ptr;
1750 unsigned long end;
1751 unsigned long item_offset;
1752 u64 refs;
1753 int size;
1754 int type;
1755
1756 leaf = path->nodes[0];
1757 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1758 item_offset = (unsigned long)iref - (unsigned long)ei;
1759
1760 type = extent_ref_type(parent, owner);
1761 size = btrfs_extent_inline_ref_size(type);
1762
1763 btrfs_extend_item(root, path, size);
1764
1765 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1766 refs = btrfs_extent_refs(leaf, ei);
1767 refs += refs_to_add;
1768 btrfs_set_extent_refs(leaf, ei, refs);
1769 if (extent_op)
1770 __run_delayed_extent_op(extent_op, leaf, ei);
1771
1772 ptr = (unsigned long)ei + item_offset;
1773 end = (unsigned long)ei + btrfs_item_size_nr(leaf, path->slots[0]);
1774 if (ptr < end - size)
1775 memmove_extent_buffer(leaf, ptr + size, ptr,
1776 end - size - ptr);
1777
1778 iref = (struct btrfs_extent_inline_ref *)ptr;
1779 btrfs_set_extent_inline_ref_type(leaf, iref, type);
1780 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1781 struct btrfs_extent_data_ref *dref;
1782 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1783 btrfs_set_extent_data_ref_root(leaf, dref, root_objectid);
1784 btrfs_set_extent_data_ref_objectid(leaf, dref, owner);
1785 btrfs_set_extent_data_ref_offset(leaf, dref, offset);
1786 btrfs_set_extent_data_ref_count(leaf, dref, refs_to_add);
1787 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1788 struct btrfs_shared_data_ref *sref;
1789 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1790 btrfs_set_shared_data_ref_count(leaf, sref, refs_to_add);
1791 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1792 } else if (type == BTRFS_SHARED_BLOCK_REF_KEY) {
1793 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
1794 } else {
1795 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
1796 }
1797 btrfs_mark_buffer_dirty(leaf);
1798 }
1799
1800 static int lookup_extent_backref(struct btrfs_trans_handle *trans,
1801 struct btrfs_root *root,
1802 struct btrfs_path *path,
1803 struct btrfs_extent_inline_ref **ref_ret,
1804 u64 bytenr, u64 num_bytes, u64 parent,
1805 u64 root_objectid, u64 owner, u64 offset)
1806 {
1807 int ret;
1808
1809 ret = lookup_inline_extent_backref(trans, root, path, ref_ret,
1810 bytenr, num_bytes, parent,
1811 root_objectid, owner, offset, 0);
1812 if (ret != -ENOENT)
1813 return ret;
1814
1815 btrfs_release_path(path);
1816 *ref_ret = NULL;
1817
1818 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1819 ret = lookup_tree_block_ref(trans, root, path, bytenr, parent,
1820 root_objectid);
1821 } else {
1822 ret = lookup_extent_data_ref(trans, root, path, bytenr, parent,
1823 root_objectid, owner, offset);
1824 }
1825 return ret;
1826 }
1827
1828 /*
1829 * helper to update/remove inline back ref
1830 */
1831 static noinline_for_stack
1832 void update_inline_extent_backref(struct btrfs_root *root,
1833 struct btrfs_path *path,
1834 struct btrfs_extent_inline_ref *iref,
1835 int refs_to_mod,
1836 struct btrfs_delayed_extent_op *extent_op,
1837 int *last_ref)
1838 {
1839 struct extent_buffer *leaf;
1840 struct btrfs_extent_item *ei;
1841 struct btrfs_extent_data_ref *dref = NULL;
1842 struct btrfs_shared_data_ref *sref = NULL;
1843 unsigned long ptr;
1844 unsigned long end;
1845 u32 item_size;
1846 int size;
1847 int type;
1848 u64 refs;
1849
1850 leaf = path->nodes[0];
1851 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
1852 refs = btrfs_extent_refs(leaf, ei);
1853 WARN_ON(refs_to_mod < 0 && refs + refs_to_mod <= 0);
1854 refs += refs_to_mod;
1855 btrfs_set_extent_refs(leaf, ei, refs);
1856 if (extent_op)
1857 __run_delayed_extent_op(extent_op, leaf, ei);
1858
1859 type = btrfs_extent_inline_ref_type(leaf, iref);
1860
1861 if (type == BTRFS_EXTENT_DATA_REF_KEY) {
1862 dref = (struct btrfs_extent_data_ref *)(&iref->offset);
1863 refs = btrfs_extent_data_ref_count(leaf, dref);
1864 } else if (type == BTRFS_SHARED_DATA_REF_KEY) {
1865 sref = (struct btrfs_shared_data_ref *)(iref + 1);
1866 refs = btrfs_shared_data_ref_count(leaf, sref);
1867 } else {
1868 refs = 1;
1869 BUG_ON(refs_to_mod != -1);
1870 }
1871
1872 BUG_ON(refs_to_mod < 0 && refs < -refs_to_mod);
1873 refs += refs_to_mod;
1874
1875 if (refs > 0) {
1876 if (type == BTRFS_EXTENT_DATA_REF_KEY)
1877 btrfs_set_extent_data_ref_count(leaf, dref, refs);
1878 else
1879 btrfs_set_shared_data_ref_count(leaf, sref, refs);
1880 } else {
1881 *last_ref = 1;
1882 size = btrfs_extent_inline_ref_size(type);
1883 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
1884 ptr = (unsigned long)iref;
1885 end = (unsigned long)ei + item_size;
1886 if (ptr + size < end)
1887 memmove_extent_buffer(leaf, ptr, ptr + size,
1888 end - ptr - size);
1889 item_size -= size;
1890 btrfs_truncate_item(root, path, item_size, 1);
1891 }
1892 btrfs_mark_buffer_dirty(leaf);
1893 }
1894
1895 static noinline_for_stack
1896 int insert_inline_extent_backref(struct btrfs_trans_handle *trans,
1897 struct btrfs_root *root,
1898 struct btrfs_path *path,
1899 u64 bytenr, u64 num_bytes, u64 parent,
1900 u64 root_objectid, u64 owner,
1901 u64 offset, int refs_to_add,
1902 struct btrfs_delayed_extent_op *extent_op)
1903 {
1904 struct btrfs_extent_inline_ref *iref;
1905 int ret;
1906
1907 ret = lookup_inline_extent_backref(trans, root, path, &iref,
1908 bytenr, num_bytes, parent,
1909 root_objectid, owner, offset, 1);
1910 if (ret == 0) {
1911 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID);
1912 update_inline_extent_backref(root, path, iref,
1913 refs_to_add, extent_op, NULL);
1914 } else if (ret == -ENOENT) {
1915 setup_inline_extent_backref(root, path, iref, parent,
1916 root_objectid, owner, offset,
1917 refs_to_add, extent_op);
1918 ret = 0;
1919 }
1920 return ret;
1921 }
1922
1923 static int insert_extent_backref(struct btrfs_trans_handle *trans,
1924 struct btrfs_root *root,
1925 struct btrfs_path *path,
1926 u64 bytenr, u64 parent, u64 root_objectid,
1927 u64 owner, u64 offset, int refs_to_add)
1928 {
1929 int ret;
1930 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
1931 BUG_ON(refs_to_add != 1);
1932 ret = insert_tree_block_ref(trans, root, path, bytenr,
1933 parent, root_objectid);
1934 } else {
1935 ret = insert_extent_data_ref(trans, root, path, bytenr,
1936 parent, root_objectid,
1937 owner, offset, refs_to_add);
1938 }
1939 return ret;
1940 }
1941
1942 static int remove_extent_backref(struct btrfs_trans_handle *trans,
1943 struct btrfs_root *root,
1944 struct btrfs_path *path,
1945 struct btrfs_extent_inline_ref *iref,
1946 int refs_to_drop, int is_data, int *last_ref)
1947 {
1948 int ret = 0;
1949
1950 BUG_ON(!is_data && refs_to_drop != 1);
1951 if (iref) {
1952 update_inline_extent_backref(root, path, iref,
1953 -refs_to_drop, NULL, last_ref);
1954 } else if (is_data) {
1955 ret = remove_extent_data_ref(trans, root, path, refs_to_drop,
1956 last_ref);
1957 } else {
1958 *last_ref = 1;
1959 ret = btrfs_del_item(trans, root, path);
1960 }
1961 return ret;
1962 }
1963
1964 #define in_range(b, first, len) ((b) >= (first) && (b) < (first) + (len))
1965 static int btrfs_issue_discard(struct block_device *bdev, u64 start, u64 len,
1966 u64 *discarded_bytes)
1967 {
1968 int j, ret = 0;
1969 u64 bytes_left, end;
1970 u64 aligned_start = ALIGN(start, 1 << 9);
1971
1972 if (WARN_ON(start != aligned_start)) {
1973 len -= aligned_start - start;
1974 len = round_down(len, 1 << 9);
1975 start = aligned_start;
1976 }
1977
1978 *discarded_bytes = 0;
1979
1980 if (!len)
1981 return 0;
1982
1983 end = start + len;
1984 bytes_left = len;
1985
1986 /* Skip any superblocks on this device. */
1987 for (j = 0; j < BTRFS_SUPER_MIRROR_MAX; j++) {
1988 u64 sb_start = btrfs_sb_offset(j);
1989 u64 sb_end = sb_start + BTRFS_SUPER_INFO_SIZE;
1990 u64 size = sb_start - start;
1991
1992 if (!in_range(sb_start, start, bytes_left) &&
1993 !in_range(sb_end, start, bytes_left) &&
1994 !in_range(start, sb_start, BTRFS_SUPER_INFO_SIZE))
1995 continue;
1996
1997 /*
1998 * Superblock spans beginning of range. Adjust start and
1999 * try again.
2000 */
2001 if (sb_start <= start) {
2002 start += sb_end - start;
2003 if (start > end) {
2004 bytes_left = 0;
2005 break;
2006 }
2007 bytes_left = end - start;
2008 continue;
2009 }
2010
2011 if (size) {
2012 ret = blkdev_issue_discard(bdev, start >> 9, size >> 9,
2013 GFP_NOFS, 0);
2014 if (!ret)
2015 *discarded_bytes += size;
2016 else if (ret != -EOPNOTSUPP)
2017 return ret;
2018 }
2019
2020 start = sb_end;
2021 if (start > end) {
2022 bytes_left = 0;
2023 break;
2024 }
2025 bytes_left = end - start;
2026 }
2027
2028 if (bytes_left) {
2029 ret = blkdev_issue_discard(bdev, start >> 9, bytes_left >> 9,
2030 GFP_NOFS, 0);
2031 if (!ret)
2032 *discarded_bytes += bytes_left;
2033 }
2034 return ret;
2035 }
2036
2037 int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
2038 u64 num_bytes, u64 *actual_bytes)
2039 {
2040 int ret;
2041 u64 discarded_bytes = 0;
2042 struct btrfs_bio *bbio = NULL;
2043
2044
2045 /* Tell the block device(s) that the sectors can be discarded */
2046 ret = btrfs_map_block(root->fs_info, REQ_DISCARD,
2047 bytenr, &num_bytes, &bbio, 0);
2048 /* Error condition is -ENOMEM */
2049 if (!ret) {
2050 struct btrfs_bio_stripe *stripe = bbio->stripes;
2051 int i;
2052
2053
2054 for (i = 0; i < bbio->num_stripes; i++, stripe++) {
2055 u64 bytes;
2056 if (!stripe->dev->can_discard)
2057 continue;
2058
2059 ret = btrfs_issue_discard(stripe->dev->bdev,
2060 stripe->physical,
2061 stripe->length,
2062 &bytes);
2063 if (!ret)
2064 discarded_bytes += bytes;
2065 else if (ret != -EOPNOTSUPP)
2066 break; /* Logic errors or -ENOMEM, or -EIO but I don't know how that could happen JDM */
2067
2068 /*
2069 * Just in case we get back EOPNOTSUPP for some reason,
2070 * just ignore the return value so we don't screw up
2071 * people calling discard_extent.
2072 */
2073 ret = 0;
2074 }
2075 btrfs_put_bbio(bbio);
2076 }
2077
2078 if (actual_bytes)
2079 *actual_bytes = discarded_bytes;
2080
2081
2082 if (ret == -EOPNOTSUPP)
2083 ret = 0;
2084 return ret;
2085 }
2086
2087 /* Can return -ENOMEM */
2088 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2089 struct btrfs_root *root,
2090 u64 bytenr, u64 num_bytes, u64 parent,
2091 u64 root_objectid, u64 owner, u64 offset)
2092 {
2093 int ret;
2094 struct btrfs_fs_info *fs_info = root->fs_info;
2095
2096 BUG_ON(owner < BTRFS_FIRST_FREE_OBJECTID &&
2097 root_objectid == BTRFS_TREE_LOG_OBJECTID);
2098
2099 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
2100 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
2101 num_bytes,
2102 parent, root_objectid, (int)owner,
2103 BTRFS_ADD_DELAYED_REF, NULL);
2104 } else {
2105 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
2106 num_bytes, parent, root_objectid,
2107 owner, offset, 0,
2108 BTRFS_ADD_DELAYED_REF, NULL);
2109 }
2110 return ret;
2111 }
2112
2113 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
2114 struct btrfs_root *root,
2115 struct btrfs_delayed_ref_node *node,
2116 u64 parent, u64 root_objectid,
2117 u64 owner, u64 offset, int refs_to_add,
2118 struct btrfs_delayed_extent_op *extent_op)
2119 {
2120 struct btrfs_fs_info *fs_info = root->fs_info;
2121 struct btrfs_path *path;
2122 struct extent_buffer *leaf;
2123 struct btrfs_extent_item *item;
2124 struct btrfs_key key;
2125 u64 bytenr = node->bytenr;
2126 u64 num_bytes = node->num_bytes;
2127 u64 refs;
2128 int ret;
2129
2130 path = btrfs_alloc_path();
2131 if (!path)
2132 return -ENOMEM;
2133
2134 path->reada = READA_FORWARD;
2135 path->leave_spinning = 1;
2136 /* this will setup the path even if it fails to insert the back ref */
2137 ret = insert_inline_extent_backref(trans, fs_info->extent_root, path,
2138 bytenr, num_bytes, parent,
2139 root_objectid, owner, offset,
2140 refs_to_add, extent_op);
2141 if ((ret < 0 && ret != -EAGAIN) || !ret)
2142 goto out;
2143
2144 /*
2145 * Ok we had -EAGAIN which means we didn't have space to insert and
2146 * inline extent ref, so just update the reference count and add a
2147 * normal backref.
2148 */
2149 leaf = path->nodes[0];
2150 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
2151 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2152 refs = btrfs_extent_refs(leaf, item);
2153 btrfs_set_extent_refs(leaf, item, refs + refs_to_add);
2154 if (extent_op)
2155 __run_delayed_extent_op(extent_op, leaf, item);
2156
2157 btrfs_mark_buffer_dirty(leaf);
2158 btrfs_release_path(path);
2159
2160 path->reada = READA_FORWARD;
2161 path->leave_spinning = 1;
2162 /* now insert the actual backref */
2163 ret = insert_extent_backref(trans, root->fs_info->extent_root,
2164 path, bytenr, parent, root_objectid,
2165 owner, offset, refs_to_add);
2166 if (ret)
2167 btrfs_abort_transaction(trans, root, ret);
2168 out:
2169 btrfs_free_path(path);
2170 return ret;
2171 }
2172
2173 static int run_delayed_data_ref(struct btrfs_trans_handle *trans,
2174 struct btrfs_root *root,
2175 struct btrfs_delayed_ref_node *node,
2176 struct btrfs_delayed_extent_op *extent_op,
2177 int insert_reserved)
2178 {
2179 int ret = 0;
2180 struct btrfs_delayed_data_ref *ref;
2181 struct btrfs_key ins;
2182 u64 parent = 0;
2183 u64 ref_root = 0;
2184 u64 flags = 0;
2185
2186 ins.objectid = node->bytenr;
2187 ins.offset = node->num_bytes;
2188 ins.type = BTRFS_EXTENT_ITEM_KEY;
2189
2190 ref = btrfs_delayed_node_to_data_ref(node);
2191 trace_run_delayed_data_ref(node, ref, node->action);
2192
2193 if (node->type == BTRFS_SHARED_DATA_REF_KEY)
2194 parent = ref->parent;
2195 ref_root = ref->root;
2196
2197 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2198 if (extent_op)
2199 flags |= extent_op->flags_to_set;
2200 ret = alloc_reserved_file_extent(trans, root,
2201 parent, ref_root, flags,
2202 ref->objectid, ref->offset,
2203 &ins, node->ref_mod);
2204 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2205 ret = __btrfs_inc_extent_ref(trans, root, node, parent,
2206 ref_root, ref->objectid,
2207 ref->offset, node->ref_mod,
2208 extent_op);
2209 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2210 ret = __btrfs_free_extent(trans, root, node, parent,
2211 ref_root, ref->objectid,
2212 ref->offset, node->ref_mod,
2213 extent_op);
2214 } else {
2215 BUG();
2216 }
2217 return ret;
2218 }
2219
2220 static void __run_delayed_extent_op(struct btrfs_delayed_extent_op *extent_op,
2221 struct extent_buffer *leaf,
2222 struct btrfs_extent_item *ei)
2223 {
2224 u64 flags = btrfs_extent_flags(leaf, ei);
2225 if (extent_op->update_flags) {
2226 flags |= extent_op->flags_to_set;
2227 btrfs_set_extent_flags(leaf, ei, flags);
2228 }
2229
2230 if (extent_op->update_key) {
2231 struct btrfs_tree_block_info *bi;
2232 BUG_ON(!(flags & BTRFS_EXTENT_FLAG_TREE_BLOCK));
2233 bi = (struct btrfs_tree_block_info *)(ei + 1);
2234 btrfs_set_tree_block_key(leaf, bi, &extent_op->key);
2235 }
2236 }
2237
2238 static int run_delayed_extent_op(struct btrfs_trans_handle *trans,
2239 struct btrfs_root *root,
2240 struct btrfs_delayed_ref_node *node,
2241 struct btrfs_delayed_extent_op *extent_op)
2242 {
2243 struct btrfs_key key;
2244 struct btrfs_path *path;
2245 struct btrfs_extent_item *ei;
2246 struct extent_buffer *leaf;
2247 u32 item_size;
2248 int ret;
2249 int err = 0;
2250 int metadata = !extent_op->is_data;
2251
2252 if (trans->aborted)
2253 return 0;
2254
2255 if (metadata && !btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2256 metadata = 0;
2257
2258 path = btrfs_alloc_path();
2259 if (!path)
2260 return -ENOMEM;
2261
2262 key.objectid = node->bytenr;
2263
2264 if (metadata) {
2265 key.type = BTRFS_METADATA_ITEM_KEY;
2266 key.offset = extent_op->level;
2267 } else {
2268 key.type = BTRFS_EXTENT_ITEM_KEY;
2269 key.offset = node->num_bytes;
2270 }
2271
2272 again:
2273 path->reada = READA_FORWARD;
2274 path->leave_spinning = 1;
2275 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key,
2276 path, 0, 1);
2277 if (ret < 0) {
2278 err = ret;
2279 goto out;
2280 }
2281 if (ret > 0) {
2282 if (metadata) {
2283 if (path->slots[0] > 0) {
2284 path->slots[0]--;
2285 btrfs_item_key_to_cpu(path->nodes[0], &key,
2286 path->slots[0]);
2287 if (key.objectid == node->bytenr &&
2288 key.type == BTRFS_EXTENT_ITEM_KEY &&
2289 key.offset == node->num_bytes)
2290 ret = 0;
2291 }
2292 if (ret > 0) {
2293 btrfs_release_path(path);
2294 metadata = 0;
2295
2296 key.objectid = node->bytenr;
2297 key.offset = node->num_bytes;
2298 key.type = BTRFS_EXTENT_ITEM_KEY;
2299 goto again;
2300 }
2301 } else {
2302 err = -EIO;
2303 goto out;
2304 }
2305 }
2306
2307 leaf = path->nodes[0];
2308 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2309 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
2310 if (item_size < sizeof(*ei)) {
2311 ret = convert_extent_item_v0(trans, root->fs_info->extent_root,
2312 path, (u64)-1, 0);
2313 if (ret < 0) {
2314 err = ret;
2315 goto out;
2316 }
2317 leaf = path->nodes[0];
2318 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
2319 }
2320 #endif
2321 BUG_ON(item_size < sizeof(*ei));
2322 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
2323 __run_delayed_extent_op(extent_op, leaf, ei);
2324
2325 btrfs_mark_buffer_dirty(leaf);
2326 out:
2327 btrfs_free_path(path);
2328 return err;
2329 }
2330
2331 static int run_delayed_tree_ref(struct btrfs_trans_handle *trans,
2332 struct btrfs_root *root,
2333 struct btrfs_delayed_ref_node *node,
2334 struct btrfs_delayed_extent_op *extent_op,
2335 int insert_reserved)
2336 {
2337 int ret = 0;
2338 struct btrfs_delayed_tree_ref *ref;
2339 struct btrfs_key ins;
2340 u64 parent = 0;
2341 u64 ref_root = 0;
2342 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
2343 SKINNY_METADATA);
2344
2345 ref = btrfs_delayed_node_to_tree_ref(node);
2346 trace_run_delayed_tree_ref(node, ref, node->action);
2347
2348 if (node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2349 parent = ref->parent;
2350 ref_root = ref->root;
2351
2352 ins.objectid = node->bytenr;
2353 if (skinny_metadata) {
2354 ins.offset = ref->level;
2355 ins.type = BTRFS_METADATA_ITEM_KEY;
2356 } else {
2357 ins.offset = node->num_bytes;
2358 ins.type = BTRFS_EXTENT_ITEM_KEY;
2359 }
2360
2361 BUG_ON(node->ref_mod != 1);
2362 if (node->action == BTRFS_ADD_DELAYED_REF && insert_reserved) {
2363 BUG_ON(!extent_op || !extent_op->update_flags);
2364 ret = alloc_reserved_tree_block(trans, root,
2365 parent, ref_root,
2366 extent_op->flags_to_set,
2367 &extent_op->key,
2368 ref->level, &ins);
2369 } else if (node->action == BTRFS_ADD_DELAYED_REF) {
2370 ret = __btrfs_inc_extent_ref(trans, root, node,
2371 parent, ref_root,
2372 ref->level, 0, 1,
2373 extent_op);
2374 } else if (node->action == BTRFS_DROP_DELAYED_REF) {
2375 ret = __btrfs_free_extent(trans, root, node,
2376 parent, ref_root,
2377 ref->level, 0, 1, extent_op);
2378 } else {
2379 BUG();
2380 }
2381 return ret;
2382 }
2383
2384 /* helper function to actually process a single delayed ref entry */
2385 static int run_one_delayed_ref(struct btrfs_trans_handle *trans,
2386 struct btrfs_root *root,
2387 struct btrfs_delayed_ref_node *node,
2388 struct btrfs_delayed_extent_op *extent_op,
2389 int insert_reserved)
2390 {
2391 int ret = 0;
2392
2393 if (trans->aborted) {
2394 if (insert_reserved)
2395 btrfs_pin_extent(root, node->bytenr,
2396 node->num_bytes, 1);
2397 return 0;
2398 }
2399
2400 if (btrfs_delayed_ref_is_head(node)) {
2401 struct btrfs_delayed_ref_head *head;
2402 /*
2403 * we've hit the end of the chain and we were supposed
2404 * to insert this extent into the tree. But, it got
2405 * deleted before we ever needed to insert it, so all
2406 * we have to do is clean up the accounting
2407 */
2408 BUG_ON(extent_op);
2409 head = btrfs_delayed_node_to_head(node);
2410 trace_run_delayed_ref_head(node, head, node->action);
2411
2412 if (insert_reserved) {
2413 btrfs_pin_extent(root, node->bytenr,
2414 node->num_bytes, 1);
2415 if (head->is_data) {
2416 ret = btrfs_del_csums(trans, root,
2417 node->bytenr,
2418 node->num_bytes);
2419 }
2420 }
2421
2422 /* Also free its reserved qgroup space */
2423 btrfs_qgroup_free_delayed_ref(root->fs_info,
2424 head->qgroup_ref_root,
2425 head->qgroup_reserved);
2426 return ret;
2427 }
2428
2429 if (node->type == BTRFS_TREE_BLOCK_REF_KEY ||
2430 node->type == BTRFS_SHARED_BLOCK_REF_KEY)
2431 ret = run_delayed_tree_ref(trans, root, node, extent_op,
2432 insert_reserved);
2433 else if (node->type == BTRFS_EXTENT_DATA_REF_KEY ||
2434 node->type == BTRFS_SHARED_DATA_REF_KEY)
2435 ret = run_delayed_data_ref(trans, root, node, extent_op,
2436 insert_reserved);
2437 else
2438 BUG();
2439 return ret;
2440 }
2441
2442 static inline struct btrfs_delayed_ref_node *
2443 select_delayed_ref(struct btrfs_delayed_ref_head *head)
2444 {
2445 struct btrfs_delayed_ref_node *ref;
2446
2447 if (list_empty(&head->ref_list))
2448 return NULL;
2449
2450 /*
2451 * Select a delayed ref of type BTRFS_ADD_DELAYED_REF first.
2452 * This is to prevent a ref count from going down to zero, which deletes
2453 * the extent item from the extent tree, when there still are references
2454 * to add, which would fail because they would not find the extent item.
2455 */
2456 list_for_each_entry(ref, &head->ref_list, list) {
2457 if (ref->action == BTRFS_ADD_DELAYED_REF)
2458 return ref;
2459 }
2460
2461 return list_entry(head->ref_list.next, struct btrfs_delayed_ref_node,
2462 list);
2463 }
2464
2465 /*
2466 * Returns 0 on success or if called with an already aborted transaction.
2467 * Returns -ENOMEM or -EIO on failure and will abort the transaction.
2468 */
2469 static noinline int __btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2470 struct btrfs_root *root,
2471 unsigned long nr)
2472 {
2473 struct btrfs_delayed_ref_root *delayed_refs;
2474 struct btrfs_delayed_ref_node *ref;
2475 struct btrfs_delayed_ref_head *locked_ref = NULL;
2476 struct btrfs_delayed_extent_op *extent_op;
2477 struct btrfs_fs_info *fs_info = root->fs_info;
2478 ktime_t start = ktime_get();
2479 int ret;
2480 unsigned long count = 0;
2481 unsigned long actual_count = 0;
2482 int must_insert_reserved = 0;
2483
2484 delayed_refs = &trans->transaction->delayed_refs;
2485 while (1) {
2486 if (!locked_ref) {
2487 if (count >= nr)
2488 break;
2489
2490 spin_lock(&delayed_refs->lock);
2491 locked_ref = btrfs_select_ref_head(trans);
2492 if (!locked_ref) {
2493 spin_unlock(&delayed_refs->lock);
2494 break;
2495 }
2496
2497 /* grab the lock that says we are going to process
2498 * all the refs for this head */
2499 ret = btrfs_delayed_ref_lock(trans, locked_ref);
2500 spin_unlock(&delayed_refs->lock);
2501 /*
2502 * we may have dropped the spin lock to get the head
2503 * mutex lock, and that might have given someone else
2504 * time to free the head. If that's true, it has been
2505 * removed from our list and we can move on.
2506 */
2507 if (ret == -EAGAIN) {
2508 locked_ref = NULL;
2509 count++;
2510 continue;
2511 }
2512 }
2513
2514 /*
2515 * We need to try and merge add/drops of the same ref since we
2516 * can run into issues with relocate dropping the implicit ref
2517 * and then it being added back again before the drop can
2518 * finish. If we merged anything we need to re-loop so we can
2519 * get a good ref.
2520 * Or we can get node references of the same type that weren't
2521 * merged when created due to bumps in the tree mod seq, and
2522 * we need to merge them to prevent adding an inline extent
2523 * backref before dropping it (triggering a BUG_ON at
2524 * insert_inline_extent_backref()).
2525 */
2526 spin_lock(&locked_ref->lock);
2527 btrfs_merge_delayed_refs(trans, fs_info, delayed_refs,
2528 locked_ref);
2529
2530 /*
2531 * locked_ref is the head node, so we have to go one
2532 * node back for any delayed ref updates
2533 */
2534 ref = select_delayed_ref(locked_ref);
2535
2536 if (ref && ref->seq &&
2537 btrfs_check_delayed_seq(fs_info, delayed_refs, ref->seq)) {
2538 spin_unlock(&locked_ref->lock);
2539 btrfs_delayed_ref_unlock(locked_ref);
2540 spin_lock(&delayed_refs->lock);
2541 locked_ref->processing = 0;
2542 delayed_refs->num_heads_ready++;
2543 spin_unlock(&delayed_refs->lock);
2544 locked_ref = NULL;
2545 cond_resched();
2546 count++;
2547 continue;
2548 }
2549
2550 /*
2551 * record the must insert reserved flag before we
2552 * drop the spin lock.
2553 */
2554 must_insert_reserved = locked_ref->must_insert_reserved;
2555 locked_ref->must_insert_reserved = 0;
2556
2557 extent_op = locked_ref->extent_op;
2558 locked_ref->extent_op = NULL;
2559
2560 if (!ref) {
2561
2562
2563 /* All delayed refs have been processed, Go ahead
2564 * and send the head node to run_one_delayed_ref,
2565 * so that any accounting fixes can happen
2566 */
2567 ref = &locked_ref->node;
2568
2569 if (extent_op && must_insert_reserved) {
2570 btrfs_free_delayed_extent_op(extent_op);
2571 extent_op = NULL;
2572 }
2573
2574 if (extent_op) {
2575 spin_unlock(&locked_ref->lock);
2576 ret = run_delayed_extent_op(trans, root,
2577 ref, extent_op);
2578 btrfs_free_delayed_extent_op(extent_op);
2579
2580 if (ret) {
2581 /*
2582 * Need to reset must_insert_reserved if
2583 * there was an error so the abort stuff
2584 * can cleanup the reserved space
2585 * properly.
2586 */
2587 if (must_insert_reserved)
2588 locked_ref->must_insert_reserved = 1;
2589 locked_ref->processing = 0;
2590 btrfs_debug(fs_info, "run_delayed_extent_op returned %d", ret);
2591 btrfs_delayed_ref_unlock(locked_ref);
2592 return ret;
2593 }
2594 continue;
2595 }
2596
2597 /*
2598 * Need to drop our head ref lock and re-aqcuire the
2599 * delayed ref lock and then re-check to make sure
2600 * nobody got added.
2601 */
2602 spin_unlock(&locked_ref->lock);
2603 spin_lock(&delayed_refs->lock);
2604 spin_lock(&locked_ref->lock);
2605 if (!list_empty(&locked_ref->ref_list) ||
2606 locked_ref->extent_op) {
2607 spin_unlock(&locked_ref->lock);
2608 spin_unlock(&delayed_refs->lock);
2609 continue;
2610 }
2611 ref->in_tree = 0;
2612 delayed_refs->num_heads--;
2613 rb_erase(&locked_ref->href_node,
2614 &delayed_refs->href_root);
2615 spin_unlock(&delayed_refs->lock);
2616 } else {
2617 actual_count++;
2618 ref->in_tree = 0;
2619 list_del(&ref->list);
2620 }
2621 atomic_dec(&delayed_refs->num_entries);
2622
2623 if (!btrfs_delayed_ref_is_head(ref)) {
2624 /*
2625 * when we play the delayed ref, also correct the
2626 * ref_mod on head
2627 */
2628 switch (ref->action) {
2629 case BTRFS_ADD_DELAYED_REF:
2630 case BTRFS_ADD_DELAYED_EXTENT:
2631 locked_ref->node.ref_mod -= ref->ref_mod;
2632 break;
2633 case BTRFS_DROP_DELAYED_REF:
2634 locked_ref->node.ref_mod += ref->ref_mod;
2635 break;
2636 default:
2637 WARN_ON(1);
2638 }
2639 }
2640 spin_unlock(&locked_ref->lock);
2641
2642 ret = run_one_delayed_ref(trans, root, ref, extent_op,
2643 must_insert_reserved);
2644
2645 btrfs_free_delayed_extent_op(extent_op);
2646 if (ret) {
2647 locked_ref->processing = 0;
2648 btrfs_delayed_ref_unlock(locked_ref);
2649 btrfs_put_delayed_ref(ref);
2650 btrfs_debug(fs_info, "run_one_delayed_ref returned %d", ret);
2651 return ret;
2652 }
2653
2654 /*
2655 * If this node is a head, that means all the refs in this head
2656 * have been dealt with, and we will pick the next head to deal
2657 * with, so we must unlock the head and drop it from the cluster
2658 * list before we release it.
2659 */
2660 if (btrfs_delayed_ref_is_head(ref)) {
2661 if (locked_ref->is_data &&
2662 locked_ref->total_ref_mod < 0) {
2663 spin_lock(&delayed_refs->lock);
2664 delayed_refs->pending_csums -= ref->num_bytes;
2665 spin_unlock(&delayed_refs->lock);
2666 }
2667 btrfs_delayed_ref_unlock(locked_ref);
2668 locked_ref = NULL;
2669 }
2670 btrfs_put_delayed_ref(ref);
2671 count++;
2672 cond_resched();
2673 }
2674
2675 /*
2676 * We don't want to include ref heads since we can have empty ref heads
2677 * and those will drastically skew our runtime down since we just do
2678 * accounting, no actual extent tree updates.
2679 */
2680 if (actual_count > 0) {
2681 u64 runtime = ktime_to_ns(ktime_sub(ktime_get(), start));
2682 u64 avg;
2683
2684 /*
2685 * We weigh the current average higher than our current runtime
2686 * to avoid large swings in the average.
2687 */
2688 spin_lock(&delayed_refs->lock);
2689 avg = fs_info->avg_delayed_ref_runtime * 3 + runtime;
2690 fs_info->avg_delayed_ref_runtime = avg >> 2; /* div by 4 */
2691 spin_unlock(&delayed_refs->lock);
2692 }
2693 return 0;
2694 }
2695
2696 #ifdef SCRAMBLE_DELAYED_REFS
2697 /*
2698 * Normally delayed refs get processed in ascending bytenr order. This
2699 * correlates in most cases to the order added. To expose dependencies on this
2700 * order, we start to process the tree in the middle instead of the beginning
2701 */
2702 static u64 find_middle(struct rb_root *root)
2703 {
2704 struct rb_node *n = root->rb_node;
2705 struct btrfs_delayed_ref_node *entry;
2706 int alt = 1;
2707 u64 middle;
2708 u64 first = 0, last = 0;
2709
2710 n = rb_first(root);
2711 if (n) {
2712 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2713 first = entry->bytenr;
2714 }
2715 n = rb_last(root);
2716 if (n) {
2717 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2718 last = entry->bytenr;
2719 }
2720 n = root->rb_node;
2721
2722 while (n) {
2723 entry = rb_entry(n, struct btrfs_delayed_ref_node, rb_node);
2724 WARN_ON(!entry->in_tree);
2725
2726 middle = entry->bytenr;
2727
2728 if (alt)
2729 n = n->rb_left;
2730 else
2731 n = n->rb_right;
2732
2733 alt = 1 - alt;
2734 }
2735 return middle;
2736 }
2737 #endif
2738
2739 static inline u64 heads_to_leaves(struct btrfs_root *root, u64 heads)
2740 {
2741 u64 num_bytes;
2742
2743 num_bytes = heads * (sizeof(struct btrfs_extent_item) +
2744 sizeof(struct btrfs_extent_inline_ref));
2745 if (!btrfs_fs_incompat(root->fs_info, SKINNY_METADATA))
2746 num_bytes += heads * sizeof(struct btrfs_tree_block_info);
2747
2748 /*
2749 * We don't ever fill up leaves all the way so multiply by 2 just to be
2750 * closer to what we're really going to want to ouse.
2751 */
2752 return div_u64(num_bytes, BTRFS_LEAF_DATA_SIZE(root));
2753 }
2754
2755 /*
2756 * Takes the number of bytes to be csumm'ed and figures out how many leaves it
2757 * would require to store the csums for that many bytes.
2758 */
2759 u64 btrfs_csum_bytes_to_leaves(struct btrfs_root *root, u64 csum_bytes)
2760 {
2761 u64 csum_size;
2762 u64 num_csums_per_leaf;
2763 u64 num_csums;
2764
2765 csum_size = BTRFS_LEAF_DATA_SIZE(root) - sizeof(struct btrfs_item);
2766 num_csums_per_leaf = div64_u64(csum_size,
2767 (u64)btrfs_super_csum_size(root->fs_info->super_copy));
2768 num_csums = div64_u64(csum_bytes, root->sectorsize);
2769 num_csums += num_csums_per_leaf - 1;
2770 num_csums = div64_u64(num_csums, num_csums_per_leaf);
2771 return num_csums;
2772 }
2773
2774 int btrfs_check_space_for_delayed_refs(struct btrfs_trans_handle *trans,
2775 struct btrfs_root *root)
2776 {
2777 struct btrfs_block_rsv *global_rsv;
2778 u64 num_heads = trans->transaction->delayed_refs.num_heads_ready;
2779 u64 csum_bytes = trans->transaction->delayed_refs.pending_csums;
2780 u64 num_dirty_bgs = trans->transaction->num_dirty_bgs;
2781 u64 num_bytes, num_dirty_bgs_bytes;
2782 int ret = 0;
2783
2784 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
2785 num_heads = heads_to_leaves(root, num_heads);
2786 if (num_heads > 1)
2787 num_bytes += (num_heads - 1) * root->nodesize;
2788 num_bytes <<= 1;
2789 num_bytes += btrfs_csum_bytes_to_leaves(root, csum_bytes) * root->nodesize;
2790 num_dirty_bgs_bytes = btrfs_calc_trans_metadata_size(root,
2791 num_dirty_bgs);
2792 global_rsv = &root->fs_info->global_block_rsv;
2793
2794 /*
2795 * If we can't allocate any more chunks lets make sure we have _lots_ of
2796 * wiggle room since running delayed refs can create more delayed refs.
2797 */
2798 if (global_rsv->space_info->full) {
2799 num_dirty_bgs_bytes <<= 1;
2800 num_bytes <<= 1;
2801 }
2802
2803 spin_lock(&global_rsv->lock);
2804 if (global_rsv->reserved <= num_bytes + num_dirty_bgs_bytes)
2805 ret = 1;
2806 spin_unlock(&global_rsv->lock);
2807 return ret;
2808 }
2809
2810 int btrfs_should_throttle_delayed_refs(struct btrfs_trans_handle *trans,
2811 struct btrfs_root *root)
2812 {
2813 struct btrfs_fs_info *fs_info = root->fs_info;
2814 u64 num_entries =
2815 atomic_read(&trans->transaction->delayed_refs.num_entries);
2816 u64 avg_runtime;
2817 u64 val;
2818
2819 smp_mb();
2820 avg_runtime = fs_info->avg_delayed_ref_runtime;
2821 val = num_entries * avg_runtime;
2822 if (num_entries * avg_runtime >= NSEC_PER_SEC)
2823 return 1;
2824 if (val >= NSEC_PER_SEC / 2)
2825 return 2;
2826
2827 return btrfs_check_space_for_delayed_refs(trans, root);
2828 }
2829
2830 struct async_delayed_refs {
2831 struct btrfs_root *root;
2832 int count;
2833 int error;
2834 int sync;
2835 struct completion wait;
2836 struct btrfs_work work;
2837 };
2838
2839 static void delayed_ref_async_start(struct btrfs_work *work)
2840 {
2841 struct async_delayed_refs *async;
2842 struct btrfs_trans_handle *trans;
2843 int ret;
2844
2845 async = container_of(work, struct async_delayed_refs, work);
2846
2847 trans = btrfs_join_transaction(async->root);
2848 if (IS_ERR(trans)) {
2849 async->error = PTR_ERR(trans);
2850 goto done;
2851 }
2852
2853 /*
2854 * trans->sync means that when we call end_transaciton, we won't
2855 * wait on delayed refs
2856 */
2857 trans->sync = true;
2858 ret = btrfs_run_delayed_refs(trans, async->root, async->count);
2859 if (ret)
2860 async->error = ret;
2861
2862 ret = btrfs_end_transaction(trans, async->root);
2863 if (ret && !async->error)
2864 async->error = ret;
2865 done:
2866 if (async->sync)
2867 complete(&async->wait);
2868 else
2869 kfree(async);
2870 }
2871
2872 int btrfs_async_run_delayed_refs(struct btrfs_root *root,
2873 unsigned long count, int wait)
2874 {
2875 struct async_delayed_refs *async;
2876 int ret;
2877
2878 async = kmalloc(sizeof(*async), GFP_NOFS);
2879 if (!async)
2880 return -ENOMEM;
2881
2882 async->root = root->fs_info->tree_root;
2883 async->count = count;
2884 async->error = 0;
2885 if (wait)
2886 async->sync = 1;
2887 else
2888 async->sync = 0;
2889 init_completion(&async->wait);
2890
2891 btrfs_init_work(&async->work, btrfs_extent_refs_helper,
2892 delayed_ref_async_start, NULL, NULL);
2893
2894 btrfs_queue_work(root->fs_info->extent_workers, &async->work);
2895
2896 if (wait) {
2897 wait_for_completion(&async->wait);
2898 ret = async->error;
2899 kfree(async);
2900 return ret;
2901 }
2902 return 0;
2903 }
2904
2905 /*
2906 * this starts processing the delayed reference count updates and
2907 * extent insertions we have queued up so far. count can be
2908 * 0, which means to process everything in the tree at the start
2909 * of the run (but not newly added entries), or it can be some target
2910 * number you'd like to process.
2911 *
2912 * Returns 0 on success or if called with an aborted transaction
2913 * Returns <0 on error and aborts the transaction
2914 */
2915 int btrfs_run_delayed_refs(struct btrfs_trans_handle *trans,
2916 struct btrfs_root *root, unsigned long count)
2917 {
2918 struct rb_node *node;
2919 struct btrfs_delayed_ref_root *delayed_refs;
2920 struct btrfs_delayed_ref_head *head;
2921 int ret;
2922 int run_all = count == (unsigned long)-1;
2923 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
2924
2925 /* We'll clean this up in btrfs_cleanup_transaction */
2926 if (trans->aborted)
2927 return 0;
2928
2929 if (root->fs_info->creating_free_space_tree)
2930 return 0;
2931
2932 if (root == root->fs_info->extent_root)
2933 root = root->fs_info->tree_root;
2934
2935 delayed_refs = &trans->transaction->delayed_refs;
2936 if (count == 0)
2937 count = atomic_read(&delayed_refs->num_entries) * 2;
2938
2939 again:
2940 #ifdef SCRAMBLE_DELAYED_REFS
2941 delayed_refs->run_delayed_start = find_middle(&delayed_refs->root);
2942 #endif
2943 trans->can_flush_pending_bgs = false;
2944 ret = __btrfs_run_delayed_refs(trans, root, count);
2945 if (ret < 0) {
2946 btrfs_abort_transaction(trans, root, ret);
2947 return ret;
2948 }
2949
2950 if (run_all) {
2951 if (!list_empty(&trans->new_bgs))
2952 btrfs_create_pending_block_groups(trans, root);
2953
2954 spin_lock(&delayed_refs->lock);
2955 node = rb_first(&delayed_refs->href_root);
2956 if (!node) {
2957 spin_unlock(&delayed_refs->lock);
2958 goto out;
2959 }
2960 count = (unsigned long)-1;
2961
2962 while (node) {
2963 head = rb_entry(node, struct btrfs_delayed_ref_head,
2964 href_node);
2965 if (btrfs_delayed_ref_is_head(&head->node)) {
2966 struct btrfs_delayed_ref_node *ref;
2967
2968 ref = &head->node;
2969 atomic_inc(&ref->refs);
2970
2971 spin_unlock(&delayed_refs->lock);
2972 /*
2973 * Mutex was contended, block until it's
2974 * released and try again
2975 */
2976 mutex_lock(&head->mutex);
2977 mutex_unlock(&head->mutex);
2978
2979 btrfs_put_delayed_ref(ref);
2980 cond_resched();
2981 goto again;
2982 } else {
2983 WARN_ON(1);
2984 }
2985 node = rb_next(node);
2986 }
2987 spin_unlock(&delayed_refs->lock);
2988 cond_resched();
2989 goto again;
2990 }
2991 out:
2992 assert_qgroups_uptodate(trans);
2993 trans->can_flush_pending_bgs = can_flush_pending_bgs;
2994 return 0;
2995 }
2996
2997 int btrfs_set_disk_extent_flags(struct btrfs_trans_handle *trans,
2998 struct btrfs_root *root,
2999 u64 bytenr, u64 num_bytes, u64 flags,
3000 int level, int is_data)
3001 {
3002 struct btrfs_delayed_extent_op *extent_op;
3003 int ret;
3004
3005 extent_op = btrfs_alloc_delayed_extent_op();
3006 if (!extent_op)
3007 return -ENOMEM;
3008
3009 extent_op->flags_to_set = flags;
3010 extent_op->update_flags = true;
3011 extent_op->update_key = false;
3012 extent_op->is_data = is_data ? true : false;
3013 extent_op->level = level;
3014
3015 ret = btrfs_add_delayed_extent_op(root->fs_info, trans, bytenr,
3016 num_bytes, extent_op);
3017 if (ret)
3018 btrfs_free_delayed_extent_op(extent_op);
3019 return ret;
3020 }
3021
3022 static noinline int check_delayed_ref(struct btrfs_trans_handle *trans,
3023 struct btrfs_root *root,
3024 struct btrfs_path *path,
3025 u64 objectid, u64 offset, u64 bytenr)
3026 {
3027 struct btrfs_delayed_ref_head *head;
3028 struct btrfs_delayed_ref_node *ref;
3029 struct btrfs_delayed_data_ref *data_ref;
3030 struct btrfs_delayed_ref_root *delayed_refs;
3031 int ret = 0;
3032
3033 delayed_refs = &trans->transaction->delayed_refs;
3034 spin_lock(&delayed_refs->lock);
3035 head = btrfs_find_delayed_ref_head(trans, bytenr);
3036 if (!head) {
3037 spin_unlock(&delayed_refs->lock);
3038 return 0;
3039 }
3040
3041 if (!mutex_trylock(&head->mutex)) {
3042 atomic_inc(&head->node.refs);
3043 spin_unlock(&delayed_refs->lock);
3044
3045 btrfs_release_path(path);
3046
3047 /*
3048 * Mutex was contended, block until it's released and let
3049 * caller try again
3050 */
3051 mutex_lock(&head->mutex);
3052 mutex_unlock(&head->mutex);
3053 btrfs_put_delayed_ref(&head->node);
3054 return -EAGAIN;
3055 }
3056 spin_unlock(&delayed_refs->lock);
3057
3058 spin_lock(&head->lock);
3059 list_for_each_entry(ref, &head->ref_list, list) {
3060 /* If it's a shared ref we know a cross reference exists */
3061 if (ref->type != BTRFS_EXTENT_DATA_REF_KEY) {
3062 ret = 1;
3063 break;
3064 }
3065
3066 data_ref = btrfs_delayed_node_to_data_ref(ref);
3067
3068 /*
3069 * If our ref doesn't match the one we're currently looking at
3070 * then we have a cross reference.
3071 */
3072 if (data_ref->root != root->root_key.objectid ||
3073 data_ref->objectid != objectid ||
3074 data_ref->offset != offset) {
3075 ret = 1;
3076 break;
3077 }
3078 }
3079 spin_unlock(&head->lock);
3080 mutex_unlock(&head->mutex);
3081 return ret;
3082 }
3083
3084 static noinline int check_committed_ref(struct btrfs_trans_handle *trans,
3085 struct btrfs_root *root,
3086 struct btrfs_path *path,
3087 u64 objectid, u64 offset, u64 bytenr)
3088 {
3089 struct btrfs_root *extent_root = root->fs_info->extent_root;
3090 struct extent_buffer *leaf;
3091 struct btrfs_extent_data_ref *ref;
3092 struct btrfs_extent_inline_ref *iref;
3093 struct btrfs_extent_item *ei;
3094 struct btrfs_key key;
3095 u32 item_size;
3096 int ret;
3097
3098 key.objectid = bytenr;
3099 key.offset = (u64)-1;
3100 key.type = BTRFS_EXTENT_ITEM_KEY;
3101
3102 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
3103 if (ret < 0)
3104 goto out;
3105 BUG_ON(ret == 0); /* Corruption */
3106
3107 ret = -ENOENT;
3108 if (path->slots[0] == 0)
3109 goto out;
3110
3111 path->slots[0]--;
3112 leaf = path->nodes[0];
3113 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
3114
3115 if (key.objectid != bytenr || key.type != BTRFS_EXTENT_ITEM_KEY)
3116 goto out;
3117
3118 ret = 1;
3119 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
3120 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
3121 if (item_size < sizeof(*ei)) {
3122 WARN_ON(item_size != sizeof(struct btrfs_extent_item_v0));
3123 goto out;
3124 }
3125 #endif
3126 ei = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_item);
3127
3128 if (item_size != sizeof(*ei) +
3129 btrfs_extent_inline_ref_size(BTRFS_EXTENT_DATA_REF_KEY))
3130 goto out;
3131
3132 if (btrfs_extent_generation(leaf, ei) <=
3133 btrfs_root_last_snapshot(&root->root_item))
3134 goto out;
3135
3136 iref = (struct btrfs_extent_inline_ref *)(ei + 1);
3137 if (btrfs_extent_inline_ref_type(leaf, iref) !=
3138 BTRFS_EXTENT_DATA_REF_KEY)
3139 goto out;
3140
3141 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
3142 if (btrfs_extent_refs(leaf, ei) !=
3143 btrfs_extent_data_ref_count(leaf, ref) ||
3144 btrfs_extent_data_ref_root(leaf, ref) !=
3145 root->root_key.objectid ||
3146 btrfs_extent_data_ref_objectid(leaf, ref) != objectid ||
3147 btrfs_extent_data_ref_offset(leaf, ref) != offset)
3148 goto out;
3149
3150 ret = 0;
3151 out:
3152 return ret;
3153 }
3154
3155 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
3156 struct btrfs_root *root,
3157 u64 objectid, u64 offset, u64 bytenr)
3158 {
3159 struct btrfs_path *path;
3160 int ret;
3161 int ret2;
3162
3163 path = btrfs_alloc_path();
3164 if (!path)
3165 return -ENOENT;
3166
3167 do {
3168 ret = check_committed_ref(trans, root, path, objectid,
3169 offset, bytenr);
3170 if (ret && ret != -ENOENT)
3171 goto out;
3172
3173 ret2 = check_delayed_ref(trans, root, path, objectid,
3174 offset, bytenr);
3175 } while (ret2 == -EAGAIN);
3176
3177 if (ret2 && ret2 != -ENOENT) {
3178 ret = ret2;
3179 goto out;
3180 }
3181
3182 if (ret != -ENOENT || ret2 != -ENOENT)
3183 ret = 0;
3184 out:
3185 btrfs_free_path(path);
3186 if (root->root_key.objectid == BTRFS_DATA_RELOC_TREE_OBJECTID)
3187 WARN_ON(ret > 0);
3188 return ret;
3189 }
3190
3191 static int __btrfs_mod_ref(struct btrfs_trans_handle *trans,
3192 struct btrfs_root *root,
3193 struct extent_buffer *buf,
3194 int full_backref, int inc)
3195 {
3196 u64 bytenr;
3197 u64 num_bytes;
3198 u64 parent;
3199 u64 ref_root;
3200 u32 nritems;
3201 struct btrfs_key key;
3202 struct btrfs_file_extent_item *fi;
3203 int i;
3204 int level;
3205 int ret = 0;
3206 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
3207 u64, u64, u64, u64, u64, u64);
3208
3209
3210 if (btrfs_test_is_dummy_root(root))
3211 return 0;
3212
3213 ref_root = btrfs_header_owner(buf);
3214 nritems = btrfs_header_nritems(buf);
3215 level = btrfs_header_level(buf);
3216
3217 if (!test_bit(BTRFS_ROOT_REF_COWS, &root->state) && level == 0)
3218 return 0;
3219
3220 if (inc)
3221 process_func = btrfs_inc_extent_ref;
3222 else
3223 process_func = btrfs_free_extent;
3224
3225 if (full_backref)
3226 parent = buf->start;
3227 else
3228 parent = 0;
3229
3230 for (i = 0; i < nritems; i++) {
3231 if (level == 0) {
3232 btrfs_item_key_to_cpu(buf, &key, i);
3233 if (key.type != BTRFS_EXTENT_DATA_KEY)
3234 continue;
3235 fi = btrfs_item_ptr(buf, i,
3236 struct btrfs_file_extent_item);
3237 if (btrfs_file_extent_type(buf, fi) ==
3238 BTRFS_FILE_EXTENT_INLINE)
3239 continue;
3240 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
3241 if (bytenr == 0)
3242 continue;
3243
3244 num_bytes = btrfs_file_extent_disk_num_bytes(buf, fi);
3245 key.offset -= btrfs_file_extent_offset(buf, fi);
3246 ret = process_func(trans, root, bytenr, num_bytes,
3247 parent, ref_root, key.objectid,
3248 key.offset);
3249 if (ret)
3250 goto fail;
3251 } else {
3252 bytenr = btrfs_node_blockptr(buf, i);
3253 num_bytes = root->nodesize;
3254 ret = process_func(trans, root, bytenr, num_bytes,
3255 parent, ref_root, level - 1, 0);
3256 if (ret)
3257 goto fail;
3258 }
3259 }
3260 return 0;
3261 fail:
3262 return ret;
3263 }
3264
3265 int btrfs_inc_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3266 struct extent_buffer *buf, int full_backref)
3267 {
3268 return __btrfs_mod_ref(trans, root, buf, full_backref, 1);
3269 }
3270
3271 int btrfs_dec_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
3272 struct extent_buffer *buf, int full_backref)
3273 {
3274 return __btrfs_mod_ref(trans, root, buf, full_backref, 0);
3275 }
3276
3277 static int write_one_cache_group(struct btrfs_trans_handle *trans,
3278 struct btrfs_root *root,
3279 struct btrfs_path *path,
3280 struct btrfs_block_group_cache *cache)
3281 {
3282 int ret;
3283 struct btrfs_root *extent_root = root->fs_info->extent_root;
3284 unsigned long bi;
3285 struct extent_buffer *leaf;
3286
3287 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
3288 if (ret) {
3289 if (ret > 0)
3290 ret = -ENOENT;
3291 goto fail;
3292 }
3293
3294 leaf = path->nodes[0];
3295 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
3296 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
3297 btrfs_mark_buffer_dirty(leaf);
3298 fail:
3299 btrfs_release_path(path);
3300 return ret;
3301
3302 }
3303
3304 static struct btrfs_block_group_cache *
3305 next_block_group(struct btrfs_root *root,
3306 struct btrfs_block_group_cache *cache)
3307 {
3308 struct rb_node *node;
3309
3310 spin_lock(&root->fs_info->block_group_cache_lock);
3311
3312 /* If our block group was removed, we need a full search. */
3313 if (RB_EMPTY_NODE(&cache->cache_node)) {
3314 const u64 next_bytenr = cache->key.objectid + cache->key.offset;
3315
3316 spin_unlock(&root->fs_info->block_group_cache_lock);
3317 btrfs_put_block_group(cache);
3318 cache = btrfs_lookup_first_block_group(root->fs_info,
3319 next_bytenr);
3320 return cache;
3321 }
3322 node = rb_next(&cache->cache_node);
3323 btrfs_put_block_group(cache);
3324 if (node) {
3325 cache = rb_entry(node, struct btrfs_block_group_cache,
3326 cache_node);
3327 btrfs_get_block_group(cache);
3328 } else
3329 cache = NULL;
3330 spin_unlock(&root->fs_info->block_group_cache_lock);
3331 return cache;
3332 }
3333
3334 static int cache_save_setup(struct btrfs_block_group_cache *block_group,
3335 struct btrfs_trans_handle *trans,
3336 struct btrfs_path *path)
3337 {
3338 struct btrfs_root *root = block_group->fs_info->tree_root;
3339 struct inode *inode = NULL;
3340 u64 alloc_hint = 0;
3341 int dcs = BTRFS_DC_ERROR;
3342 u64 num_pages = 0;
3343 int retries = 0;
3344 int ret = 0;
3345
3346 /*
3347 * If this block group is smaller than 100 megs don't bother caching the
3348 * block group.
3349 */
3350 if (block_group->key.offset < (100 * SZ_1M)) {
3351 spin_lock(&block_group->lock);
3352 block_group->disk_cache_state = BTRFS_DC_WRITTEN;
3353 spin_unlock(&block_group->lock);
3354 return 0;
3355 }
3356
3357 if (trans->aborted)
3358 return 0;
3359 again:
3360 inode = lookup_free_space_inode(root, block_group, path);
3361 if (IS_ERR(inode) && PTR_ERR(inode) != -ENOENT) {
3362 ret = PTR_ERR(inode);
3363 btrfs_release_path(path);
3364 goto out;
3365 }
3366
3367 if (IS_ERR(inode)) {
3368 BUG_ON(retries);
3369 retries++;
3370
3371 if (block_group->ro)
3372 goto out_free;
3373
3374 ret = create_free_space_inode(root, trans, block_group, path);
3375 if (ret)
3376 goto out_free;
3377 goto again;
3378 }
3379
3380 /* We've already setup this transaction, go ahead and exit */
3381 if (block_group->cache_generation == trans->transid &&
3382 i_size_read(inode)) {
3383 dcs = BTRFS_DC_SETUP;
3384 goto out_put;
3385 }
3386
3387 /*
3388 * We want to set the generation to 0, that way if anything goes wrong
3389 * from here on out we know not to trust this cache when we load up next
3390 * time.
3391 */
3392 BTRFS_I(inode)->generation = 0;
3393 ret = btrfs_update_inode(trans, root, inode);
3394 if (ret) {
3395 /*
3396 * So theoretically we could recover from this, simply set the
3397 * super cache generation to 0 so we know to invalidate the
3398 * cache, but then we'd have to keep track of the block groups
3399 * that fail this way so we know we _have_ to reset this cache
3400 * before the next commit or risk reading stale cache. So to
3401 * limit our exposure to horrible edge cases lets just abort the
3402 * transaction, this only happens in really bad situations
3403 * anyway.
3404 */
3405 btrfs_abort_transaction(trans, root, ret);
3406 goto out_put;
3407 }
3408 WARN_ON(ret);
3409
3410 if (i_size_read(inode) > 0) {
3411 ret = btrfs_check_trunc_cache_free_space(root,
3412 &root->fs_info->global_block_rsv);
3413 if (ret)
3414 goto out_put;
3415
3416 ret = btrfs_truncate_free_space_cache(root, trans, NULL, inode);
3417 if (ret)
3418 goto out_put;
3419 }
3420
3421 spin_lock(&block_group->lock);
3422 if (block_group->cached != BTRFS_CACHE_FINISHED ||
3423 !btrfs_test_opt(root, SPACE_CACHE)) {
3424 /*
3425 * don't bother trying to write stuff out _if_
3426 * a) we're not cached,
3427 * b) we're with nospace_cache mount option.
3428 */
3429 dcs = BTRFS_DC_WRITTEN;
3430 spin_unlock(&block_group->lock);
3431 goto out_put;
3432 }
3433 spin_unlock(&block_group->lock);
3434
3435 /*
3436 * We hit an ENOSPC when setting up the cache in this transaction, just
3437 * skip doing the setup, we've already cleared the cache so we're safe.
3438 */
3439 if (test_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags)) {
3440 ret = -ENOSPC;
3441 goto out_put;
3442 }
3443
3444 /*
3445 * Try to preallocate enough space based on how big the block group is.
3446 * Keep in mind this has to include any pinned space which could end up
3447 * taking up quite a bit since it's not folded into the other space
3448 * cache.
3449 */
3450 num_pages = div_u64(block_group->key.offset, SZ_256M);
3451 if (!num_pages)
3452 num_pages = 1;
3453
3454 num_pages *= 16;
3455 num_pages *= PAGE_CACHE_SIZE;
3456
3457 ret = btrfs_check_data_free_space(inode, 0, num_pages);
3458 if (ret)
3459 goto out_put;
3460
3461 ret = btrfs_prealloc_file_range_trans(inode, trans, 0, 0, num_pages,
3462 num_pages, num_pages,
3463 &alloc_hint);
3464 /*
3465 * Our cache requires contiguous chunks so that we don't modify a bunch
3466 * of metadata or split extents when writing the cache out, which means
3467 * we can enospc if we are heavily fragmented in addition to just normal
3468 * out of space conditions. So if we hit this just skip setting up any
3469 * other block groups for this transaction, maybe we'll unpin enough
3470 * space the next time around.
3471 */
3472 if (!ret)
3473 dcs = BTRFS_DC_SETUP;
3474 else if (ret == -ENOSPC)
3475 set_bit(BTRFS_TRANS_CACHE_ENOSPC, &trans->transaction->flags);
3476 btrfs_free_reserved_data_space(inode, 0, num_pages);
3477
3478 out_put:
3479 iput(inode);
3480 out_free:
3481 btrfs_release_path(path);
3482 out:
3483 spin_lock(&block_group->lock);
3484 if (!ret && dcs == BTRFS_DC_SETUP)
3485 block_group->cache_generation = trans->transid;
3486 block_group->disk_cache_state = dcs;
3487 spin_unlock(&block_group->lock);
3488
3489 return ret;
3490 }
3491
3492 int btrfs_setup_space_cache(struct btrfs_trans_handle *trans,
3493 struct btrfs_root *root)
3494 {
3495 struct btrfs_block_group_cache *cache, *tmp;
3496 struct btrfs_transaction *cur_trans = trans->transaction;
3497 struct btrfs_path *path;
3498
3499 if (list_empty(&cur_trans->dirty_bgs) ||
3500 !btrfs_test_opt(root, SPACE_CACHE))
3501 return 0;
3502
3503 path = btrfs_alloc_path();
3504 if (!path)
3505 return -ENOMEM;
3506
3507 /* Could add new block groups, use _safe just in case */
3508 list_for_each_entry_safe(cache, tmp, &cur_trans->dirty_bgs,
3509 dirty_list) {
3510 if (cache->disk_cache_state == BTRFS_DC_CLEAR)
3511 cache_save_setup(cache, trans, path);
3512 }
3513
3514 btrfs_free_path(path);
3515 return 0;
3516 }
3517
3518 /*
3519 * transaction commit does final block group cache writeback during a
3520 * critical section where nothing is allowed to change the FS. This is
3521 * required in order for the cache to actually match the block group,
3522 * but can introduce a lot of latency into the commit.
3523 *
3524 * So, btrfs_start_dirty_block_groups is here to kick off block group
3525 * cache IO. There's a chance we'll have to redo some of it if the
3526 * block group changes again during the commit, but it greatly reduces
3527 * the commit latency by getting rid of the easy block groups while
3528 * we're still allowing others to join the commit.
3529 */
3530 int btrfs_start_dirty_block_groups(struct btrfs_trans_handle *trans,
3531 struct btrfs_root *root)
3532 {
3533 struct btrfs_block_group_cache *cache;
3534 struct btrfs_transaction *cur_trans = trans->transaction;
3535 int ret = 0;
3536 int should_put;
3537 struct btrfs_path *path = NULL;
3538 LIST_HEAD(dirty);
3539 struct list_head *io = &cur_trans->io_bgs;
3540 int num_started = 0;
3541 int loops = 0;
3542
3543 spin_lock(&cur_trans->dirty_bgs_lock);
3544 if (list_empty(&cur_trans->dirty_bgs)) {
3545 spin_unlock(&cur_trans->dirty_bgs_lock);
3546 return 0;
3547 }
3548 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3549 spin_unlock(&cur_trans->dirty_bgs_lock);
3550
3551 again:
3552 /*
3553 * make sure all the block groups on our dirty list actually
3554 * exist
3555 */
3556 btrfs_create_pending_block_groups(trans, root);
3557
3558 if (!path) {
3559 path = btrfs_alloc_path();
3560 if (!path)
3561 return -ENOMEM;
3562 }
3563
3564 /*
3565 * cache_write_mutex is here only to save us from balance or automatic
3566 * removal of empty block groups deleting this block group while we are
3567 * writing out the cache
3568 */
3569 mutex_lock(&trans->transaction->cache_write_mutex);
3570 while (!list_empty(&dirty)) {
3571 cache = list_first_entry(&dirty,
3572 struct btrfs_block_group_cache,
3573 dirty_list);
3574 /*
3575 * this can happen if something re-dirties a block
3576 * group that is already under IO. Just wait for it to
3577 * finish and then do it all again
3578 */
3579 if (!list_empty(&cache->io_list)) {
3580 list_del_init(&cache->io_list);
3581 btrfs_wait_cache_io(root, trans, cache,
3582 &cache->io_ctl, path,
3583 cache->key.objectid);
3584 btrfs_put_block_group(cache);
3585 }
3586
3587
3588 /*
3589 * btrfs_wait_cache_io uses the cache->dirty_list to decide
3590 * if it should update the cache_state. Don't delete
3591 * until after we wait.
3592 *
3593 * Since we're not running in the commit critical section
3594 * we need the dirty_bgs_lock to protect from update_block_group
3595 */
3596 spin_lock(&cur_trans->dirty_bgs_lock);
3597 list_del_init(&cache->dirty_list);
3598 spin_unlock(&cur_trans->dirty_bgs_lock);
3599
3600 should_put = 1;
3601
3602 cache_save_setup(cache, trans, path);
3603
3604 if (cache->disk_cache_state == BTRFS_DC_SETUP) {
3605 cache->io_ctl.inode = NULL;
3606 ret = btrfs_write_out_cache(root, trans, cache, path);
3607 if (ret == 0 && cache->io_ctl.inode) {
3608 num_started++;
3609 should_put = 0;
3610
3611 /*
3612 * the cache_write_mutex is protecting
3613 * the io_list
3614 */
3615 list_add_tail(&cache->io_list, io);
3616 } else {
3617 /*
3618 * if we failed to write the cache, the
3619 * generation will be bad and life goes on
3620 */
3621 ret = 0;
3622 }
3623 }
3624 if (!ret) {
3625 ret = write_one_cache_group(trans, root, path, cache);
3626 /*
3627 * Our block group might still be attached to the list
3628 * of new block groups in the transaction handle of some
3629 * other task (struct btrfs_trans_handle->new_bgs). This
3630 * means its block group item isn't yet in the extent
3631 * tree. If this happens ignore the error, as we will
3632 * try again later in the critical section of the
3633 * transaction commit.
3634 */
3635 if (ret == -ENOENT) {
3636 ret = 0;
3637 spin_lock(&cur_trans->dirty_bgs_lock);
3638 if (list_empty(&cache->dirty_list)) {
3639 list_add_tail(&cache->dirty_list,
3640 &cur_trans->dirty_bgs);
3641 btrfs_get_block_group(cache);
3642 }
3643 spin_unlock(&cur_trans->dirty_bgs_lock);
3644 } else if (ret) {
3645 btrfs_abort_transaction(trans, root, ret);
3646 }
3647 }
3648
3649 /* if its not on the io list, we need to put the block group */
3650 if (should_put)
3651 btrfs_put_block_group(cache);
3652
3653 if (ret)
3654 break;
3655
3656 /*
3657 * Avoid blocking other tasks for too long. It might even save
3658 * us from writing caches for block groups that are going to be
3659 * removed.
3660 */
3661 mutex_unlock(&trans->transaction->cache_write_mutex);
3662 mutex_lock(&trans->transaction->cache_write_mutex);
3663 }
3664 mutex_unlock(&trans->transaction->cache_write_mutex);
3665
3666 /*
3667 * go through delayed refs for all the stuff we've just kicked off
3668 * and then loop back (just once)
3669 */
3670 ret = btrfs_run_delayed_refs(trans, root, 0);
3671 if (!ret && loops == 0) {
3672 loops++;
3673 spin_lock(&cur_trans->dirty_bgs_lock);
3674 list_splice_init(&cur_trans->dirty_bgs, &dirty);
3675 /*
3676 * dirty_bgs_lock protects us from concurrent block group
3677 * deletes too (not just cache_write_mutex).
3678 */
3679 if (!list_empty(&dirty)) {
3680 spin_unlock(&cur_trans->dirty_bgs_lock);
3681 goto again;
3682 }
3683 spin_unlock(&cur_trans->dirty_bgs_lock);
3684 }
3685
3686 btrfs_free_path(path);
3687 return ret;
3688 }
3689
3690 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
3691 struct btrfs_root *root)
3692 {
3693 struct btrfs_block_group_cache *cache;
3694 struct btrfs_transaction *cur_trans = trans->transaction;
3695 int ret = 0;
3696 int should_put;
3697 struct btrfs_path *path;
3698 struct list_head *io = &cur_trans->io_bgs;
3699 int num_started = 0;
3700
3701 path = btrfs_alloc_path();
3702 if (!path)
3703 return -ENOMEM;
3704
3705 /*
3706 * Even though we are in the critical section of the transaction commit,
3707 * we can still have concurrent tasks adding elements to this
3708 * transaction's list of dirty block groups. These tasks correspond to
3709 * endio free space workers started when writeback finishes for a
3710 * space cache, which run inode.c:btrfs_finish_ordered_io(), and can
3711 * allocate new block groups as a result of COWing nodes of the root
3712 * tree when updating the free space inode. The writeback for the space
3713 * caches is triggered by an earlier call to
3714 * btrfs_start_dirty_block_groups() and iterations of the following
3715 * loop.
3716 * Also we want to do the cache_save_setup first and then run the
3717 * delayed refs to make sure we have the best chance at doing this all
3718 * in one shot.
3719 */
3720 spin_lock(&cur_trans->dirty_bgs_lock);
3721 while (!list_empty(&cur_trans->dirty_bgs)) {
3722 cache = list_first_entry(&cur_trans->dirty_bgs,
3723 struct btrfs_block_group_cache,
3724 dirty_list);
3725
3726 /*
3727 * this can happen if cache_save_setup re-dirties a block
3728 * group that is already under IO. Just wait for it to
3729 * finish and then do it all again
3730 */
3731 if (!list_empty(&cache->io_list)) {
3732 spin_unlock(&cur_trans->dirty_bgs_lock);
3733 list_del_init(&cache->io_list);
3734 btrfs_wait_cache_io(root, trans, cache,
3735 &cache->io_ctl, path,
3736 cache->key.objectid);
3737 btrfs_put_block_group(cache);
3738 spin_lock(&cur_trans->dirty_bgs_lock);
3739 }
3740
3741 /*
3742 * don't remove from the dirty list until after we've waited
3743 * on any pending IO
3744 */
3745 list_del_init(&cache->dirty_list);
3746 spin_unlock(&cur_trans->dirty_bgs_lock);
3747 should_put = 1;
3748
3749 cache_save_setup(cache, trans, path);
3750
3751 if (!ret)
3752 ret = btrfs_run_delayed_refs(trans, root, (unsigned long) -1);
3753
3754 if (!ret && cache->disk_cache_state == BTRFS_DC_SETUP) {
3755 cache->io_ctl.inode = NULL;
3756 ret = btrfs_write_out_cache(root, trans, cache, path);
3757 if (ret == 0 && cache->io_ctl.inode) {
3758 num_started++;
3759 should_put = 0;
3760 list_add_tail(&cache->io_list, io);
3761 } else {
3762 /*
3763 * if we failed to write the cache, the
3764 * generation will be bad and life goes on
3765 */
3766 ret = 0;
3767 }
3768 }
3769 if (!ret) {
3770 ret = write_one_cache_group(trans, root, path, cache);
3771 /*
3772 * One of the free space endio workers might have
3773 * created a new block group while updating a free space
3774 * cache's inode (at inode.c:btrfs_finish_ordered_io())
3775 * and hasn't released its transaction handle yet, in
3776 * which case the new block group is still attached to
3777 * its transaction handle and its creation has not
3778 * finished yet (no block group item in the extent tree
3779 * yet, etc). If this is the case, wait for all free
3780 * space endio workers to finish and retry. This is a
3781 * a very rare case so no need for a more efficient and
3782 * complex approach.
3783 */
3784 if (ret == -ENOENT) {
3785 wait_event(cur_trans->writer_wait,
3786 atomic_read(&cur_trans->num_writers) == 1);
3787 ret = write_one_cache_group(trans, root, path,
3788 cache);
3789 }
3790 if (ret)
3791 btrfs_abort_transaction(trans, root, ret);
3792 }
3793
3794 /* if its not on the io list, we need to put the block group */
3795 if (should_put)
3796 btrfs_put_block_group(cache);
3797 spin_lock(&cur_trans->dirty_bgs_lock);
3798 }
3799 spin_unlock(&cur_trans->dirty_bgs_lock);
3800
3801 while (!list_empty(io)) {
3802 cache = list_first_entry(io, struct btrfs_block_group_cache,
3803 io_list);
3804 list_del_init(&cache->io_list);
3805 btrfs_wait_cache_io(root, trans, cache,
3806 &cache->io_ctl, path, cache->key.objectid);
3807 btrfs_put_block_group(cache);
3808 }
3809
3810 btrfs_free_path(path);
3811 return ret;
3812 }
3813
3814 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
3815 {
3816 struct btrfs_block_group_cache *block_group;
3817 int readonly = 0;
3818
3819 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
3820 if (!block_group || block_group->ro)
3821 readonly = 1;
3822 if (block_group)
3823 btrfs_put_block_group(block_group);
3824 return readonly;
3825 }
3826
3827 static const char *alloc_name(u64 flags)
3828 {
3829 switch (flags) {
3830 case BTRFS_BLOCK_GROUP_METADATA|BTRFS_BLOCK_GROUP_DATA:
3831 return "mixed";
3832 case BTRFS_BLOCK_GROUP_METADATA:
3833 return "metadata";
3834 case BTRFS_BLOCK_GROUP_DATA:
3835 return "data";
3836 case BTRFS_BLOCK_GROUP_SYSTEM:
3837 return "system";
3838 default:
3839 WARN_ON(1);
3840 return "invalid-combination";
3841 };
3842 }
3843
3844 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
3845 u64 total_bytes, u64 bytes_used,
3846 struct btrfs_space_info **space_info)
3847 {
3848 struct btrfs_space_info *found;
3849 int i;
3850 int factor;
3851 int ret;
3852
3853 if (flags & (BTRFS_BLOCK_GROUP_DUP | BTRFS_BLOCK_GROUP_RAID1 |
3854 BTRFS_BLOCK_GROUP_RAID10))
3855 factor = 2;
3856 else
3857 factor = 1;
3858
3859 found = __find_space_info(info, flags);
3860 if (found) {
3861 spin_lock(&found->lock);
3862 found->total_bytes += total_bytes;
3863 found->disk_total += total_bytes * factor;
3864 found->bytes_used += bytes_used;
3865 found->disk_used += bytes_used * factor;
3866 if (total_bytes > 0)
3867 found->full = 0;
3868 spin_unlock(&found->lock);
3869 *space_info = found;
3870 return 0;
3871 }
3872 found = kzalloc(sizeof(*found), GFP_NOFS);
3873 if (!found)
3874 return -ENOMEM;
3875
3876 ret = percpu_counter_init(&found->total_bytes_pinned, 0, GFP_KERNEL);
3877 if (ret) {
3878 kfree(found);
3879 return ret;
3880 }
3881
3882 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++)
3883 INIT_LIST_HEAD(&found->block_groups[i]);
3884 init_rwsem(&found->groups_sem);
3885 spin_lock_init(&found->lock);
3886 found->flags = flags & BTRFS_BLOCK_GROUP_TYPE_MASK;
3887 found->total_bytes = total_bytes;
3888 found->disk_total = total_bytes * factor;
3889 found->bytes_used = bytes_used;
3890 found->disk_used = bytes_used * factor;
3891 found->bytes_pinned = 0;
3892 found->bytes_reserved = 0;
3893 found->bytes_readonly = 0;
3894 found->bytes_may_use = 0;
3895 found->full = 0;
3896 found->max_extent_size = 0;
3897 found->force_alloc = CHUNK_ALLOC_NO_FORCE;
3898 found->chunk_alloc = 0;
3899 found->flush = 0;
3900 init_waitqueue_head(&found->wait);
3901 INIT_LIST_HEAD(&found->ro_bgs);
3902
3903 ret = kobject_init_and_add(&found->kobj, &space_info_ktype,
3904 info->space_info_kobj, "%s",
3905 alloc_name(found->flags));
3906 if (ret) {
3907 kfree(found);
3908 return ret;
3909 }
3910
3911 *space_info = found;
3912 list_add_rcu(&found->list, &info->space_info);
3913 if (flags & BTRFS_BLOCK_GROUP_DATA)
3914 info->data_sinfo = found;
3915
3916 return ret;
3917 }
3918
3919 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
3920 {
3921 u64 extra_flags = chunk_to_extended(flags) &
3922 BTRFS_EXTENDED_PROFILE_MASK;
3923
3924 write_seqlock(&fs_info->profiles_lock);
3925 if (flags & BTRFS_BLOCK_GROUP_DATA)
3926 fs_info->avail_data_alloc_bits |= extra_flags;
3927 if (flags & BTRFS_BLOCK_GROUP_METADATA)
3928 fs_info->avail_metadata_alloc_bits |= extra_flags;
3929 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
3930 fs_info->avail_system_alloc_bits |= extra_flags;
3931 write_sequnlock(&fs_info->profiles_lock);
3932 }
3933
3934 /*
3935 * returns target flags in extended format or 0 if restripe for this
3936 * chunk_type is not in progress
3937 *
3938 * should be called with either volume_mutex or balance_lock held
3939 */
3940 static u64 get_restripe_target(struct btrfs_fs_info *fs_info, u64 flags)
3941 {
3942 struct btrfs_balance_control *bctl = fs_info->balance_ctl;
3943 u64 target = 0;
3944
3945 if (!bctl)
3946 return 0;
3947
3948 if (flags & BTRFS_BLOCK_GROUP_DATA &&
3949 bctl->data.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3950 target = BTRFS_BLOCK_GROUP_DATA | bctl->data.target;
3951 } else if (flags & BTRFS_BLOCK_GROUP_SYSTEM &&
3952 bctl->sys.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3953 target = BTRFS_BLOCK_GROUP_SYSTEM | bctl->sys.target;
3954 } else if (flags & BTRFS_BLOCK_GROUP_METADATA &&
3955 bctl->meta.flags & BTRFS_BALANCE_ARGS_CONVERT) {
3956 target = BTRFS_BLOCK_GROUP_METADATA | bctl->meta.target;
3957 }
3958
3959 return target;
3960 }
3961
3962 /*
3963 * @flags: available profiles in extended format (see ctree.h)
3964 *
3965 * Returns reduced profile in chunk format. If profile changing is in
3966 * progress (either running or paused) picks the target profile (if it's
3967 * already available), otherwise falls back to plain reducing.
3968 */
3969 static u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
3970 {
3971 u64 num_devices = root->fs_info->fs_devices->rw_devices;
3972 u64 target;
3973 u64 raid_type;
3974 u64 allowed = 0;
3975
3976 /*
3977 * see if restripe for this chunk_type is in progress, if so
3978 * try to reduce to the target profile
3979 */
3980 spin_lock(&root->fs_info->balance_lock);
3981 target = get_restripe_target(root->fs_info, flags);
3982 if (target) {
3983 /* pick target profile only if it's already available */
3984 if ((flags & target) & BTRFS_EXTENDED_PROFILE_MASK) {
3985 spin_unlock(&root->fs_info->balance_lock);
3986 return extended_to_chunk(target);
3987 }
3988 }
3989 spin_unlock(&root->fs_info->balance_lock);
3990
3991 /* First, mask out the RAID levels which aren't possible */
3992 for (raid_type = 0; raid_type < BTRFS_NR_RAID_TYPES; raid_type++) {
3993 if (num_devices >= btrfs_raid_array[raid_type].devs_min)
3994 allowed |= btrfs_raid_group[raid_type];
3995 }
3996 allowed &= flags;
3997
3998 if (allowed & BTRFS_BLOCK_GROUP_RAID6)
3999 allowed = BTRFS_BLOCK_GROUP_RAID6;
4000 else if (allowed & BTRFS_BLOCK_GROUP_RAID5)
4001 allowed = BTRFS_BLOCK_GROUP_RAID5;
4002 else if (allowed & BTRFS_BLOCK_GROUP_RAID10)
4003 allowed = BTRFS_BLOCK_GROUP_RAID10;
4004 else if (allowed & BTRFS_BLOCK_GROUP_RAID1)
4005 allowed = BTRFS_BLOCK_GROUP_RAID1;
4006 else if (allowed & BTRFS_BLOCK_GROUP_RAID0)
4007 allowed = BTRFS_BLOCK_GROUP_RAID0;
4008
4009 flags &= ~BTRFS_BLOCK_GROUP_PROFILE_MASK;
4010
4011 return extended_to_chunk(flags | allowed);
4012 }
4013
4014 static u64 get_alloc_profile(struct btrfs_root *root, u64 orig_flags)
4015 {
4016 unsigned seq;
4017 u64 flags;
4018
4019 do {
4020 flags = orig_flags;
4021 seq = read_seqbegin(&root->fs_info->profiles_lock);
4022
4023 if (flags & BTRFS_BLOCK_GROUP_DATA)
4024 flags |= root->fs_info->avail_data_alloc_bits;
4025 else if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
4026 flags |= root->fs_info->avail_system_alloc_bits;
4027 else if (flags & BTRFS_BLOCK_GROUP_METADATA)
4028 flags |= root->fs_info->avail_metadata_alloc_bits;
4029 } while (read_seqretry(&root->fs_info->profiles_lock, seq));
4030
4031 return btrfs_reduce_alloc_profile(root, flags);
4032 }
4033
4034 u64 btrfs_get_alloc_profile(struct btrfs_root *root, int data)
4035 {
4036 u64 flags;
4037 u64 ret;
4038
4039 if (data)
4040 flags = BTRFS_BLOCK_GROUP_DATA;
4041 else if (root == root->fs_info->chunk_root)
4042 flags = BTRFS_BLOCK_GROUP_SYSTEM;
4043 else
4044 flags = BTRFS_BLOCK_GROUP_METADATA;
4045
4046 ret = get_alloc_profile(root, flags);
4047 return ret;
4048 }
4049
4050 int btrfs_alloc_data_chunk_ondemand(struct inode *inode, u64 bytes)
4051 {
4052 struct btrfs_space_info *data_sinfo;
4053 struct btrfs_root *root = BTRFS_I(inode)->root;
4054 struct btrfs_fs_info *fs_info = root->fs_info;
4055 u64 used;
4056 int ret = 0;
4057 int need_commit = 2;
4058 int have_pinned_space;
4059
4060 /* make sure bytes are sectorsize aligned */
4061 bytes = ALIGN(bytes, root->sectorsize);
4062
4063 if (btrfs_is_free_space_inode(inode)) {
4064 need_commit = 0;
4065 ASSERT(current->journal_info);
4066 }
4067
4068 data_sinfo = fs_info->data_sinfo;
4069 if (!data_sinfo)
4070 goto alloc;
4071
4072 again:
4073 /* make sure we have enough space to handle the data first */
4074 spin_lock(&data_sinfo->lock);
4075 used = data_sinfo->bytes_used + data_sinfo->bytes_reserved +
4076 data_sinfo->bytes_pinned + data_sinfo->bytes_readonly +
4077 data_sinfo->bytes_may_use;
4078
4079 if (used + bytes > data_sinfo->total_bytes) {
4080 struct btrfs_trans_handle *trans;
4081
4082 /*
4083 * if we don't have enough free bytes in this space then we need
4084 * to alloc a new chunk.
4085 */
4086 if (!data_sinfo->full) {
4087 u64 alloc_target;
4088
4089 data_sinfo->force_alloc = CHUNK_ALLOC_FORCE;
4090 spin_unlock(&data_sinfo->lock);
4091 alloc:
4092 alloc_target = btrfs_get_alloc_profile(root, 1);
4093 /*
4094 * It is ugly that we don't call nolock join
4095 * transaction for the free space inode case here.
4096 * But it is safe because we only do the data space
4097 * reservation for the free space cache in the
4098 * transaction context, the common join transaction
4099 * just increase the counter of the current transaction
4100 * handler, doesn't try to acquire the trans_lock of
4101 * the fs.
4102 */
4103 trans = btrfs_join_transaction(root);
4104 if (IS_ERR(trans))
4105 return PTR_ERR(trans);
4106
4107 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4108 alloc_target,
4109 CHUNK_ALLOC_NO_FORCE);
4110 btrfs_end_transaction(trans, root);
4111 if (ret < 0) {
4112 if (ret != -ENOSPC)
4113 return ret;
4114 else {
4115 have_pinned_space = 1;
4116 goto commit_trans;
4117 }
4118 }
4119
4120 if (!data_sinfo)
4121 data_sinfo = fs_info->data_sinfo;
4122
4123 goto again;
4124 }
4125
4126 /*
4127 * If we don't have enough pinned space to deal with this
4128 * allocation, and no removed chunk in current transaction,
4129 * don't bother committing the transaction.
4130 */
4131 have_pinned_space = percpu_counter_compare(
4132 &data_sinfo->total_bytes_pinned,
4133 used + bytes - data_sinfo->total_bytes);
4134 spin_unlock(&data_sinfo->lock);
4135
4136 /* commit the current transaction and try again */
4137 commit_trans:
4138 if (need_commit &&
4139 !atomic_read(&root->fs_info->open_ioctl_trans)) {
4140 need_commit--;
4141
4142 if (need_commit > 0) {
4143 btrfs_start_delalloc_roots(fs_info, 0, -1);
4144 btrfs_wait_ordered_roots(fs_info, -1);
4145 }
4146
4147 trans = btrfs_join_transaction(root);
4148 if (IS_ERR(trans))
4149 return PTR_ERR(trans);
4150 if (have_pinned_space >= 0 ||
4151 test_bit(BTRFS_TRANS_HAVE_FREE_BGS,
4152 &trans->transaction->flags) ||
4153 need_commit > 0) {
4154 ret = btrfs_commit_transaction(trans, root);
4155 if (ret)
4156 return ret;
4157 /*
4158 * The cleaner kthread might still be doing iput
4159 * operations. Wait for it to finish so that
4160 * more space is released.
4161 */
4162 mutex_lock(&root->fs_info->cleaner_delayed_iput_mutex);
4163 mutex_unlock(&root->fs_info->cleaner_delayed_iput_mutex);
4164 goto again;
4165 } else {
4166 btrfs_end_transaction(trans, root);
4167 }
4168 }
4169
4170 trace_btrfs_space_reservation(root->fs_info,
4171 "space_info:enospc",
4172 data_sinfo->flags, bytes, 1);
4173 return -ENOSPC;
4174 }
4175 data_sinfo->bytes_may_use += bytes;
4176 trace_btrfs_space_reservation(root->fs_info, "space_info",
4177 data_sinfo->flags, bytes, 1);
4178 spin_unlock(&data_sinfo->lock);
4179
4180 return ret;
4181 }
4182
4183 /*
4184 * New check_data_free_space() with ability for precious data reservation
4185 * Will replace old btrfs_check_data_free_space(), but for patch split,
4186 * add a new function first and then replace it.
4187 */
4188 int btrfs_check_data_free_space(struct inode *inode, u64 start, u64 len)
4189 {
4190 struct btrfs_root *root = BTRFS_I(inode)->root;
4191 int ret;
4192
4193 /* align the range */
4194 len = round_up(start + len, root->sectorsize) -
4195 round_down(start, root->sectorsize);
4196 start = round_down(start, root->sectorsize);
4197
4198 ret = btrfs_alloc_data_chunk_ondemand(inode, len);
4199 if (ret < 0)
4200 return ret;
4201
4202 /*
4203 * Use new btrfs_qgroup_reserve_data to reserve precious data space
4204 *
4205 * TODO: Find a good method to avoid reserve data space for NOCOW
4206 * range, but don't impact performance on quota disable case.
4207 */
4208 ret = btrfs_qgroup_reserve_data(inode, start, len);
4209 return ret;
4210 }
4211
4212 /*
4213 * Called if we need to clear a data reservation for this inode
4214 * Normally in a error case.
4215 *
4216 * This one will *NOT* use accurate qgroup reserved space API, just for case
4217 * which we can't sleep and is sure it won't affect qgroup reserved space.
4218 * Like clear_bit_hook().
4219 */
4220 void btrfs_free_reserved_data_space_noquota(struct inode *inode, u64 start,
4221 u64 len)
4222 {
4223 struct btrfs_root *root = BTRFS_I(inode)->root;
4224 struct btrfs_space_info *data_sinfo;
4225
4226 /* Make sure the range is aligned to sectorsize */
4227 len = round_up(start + len, root->sectorsize) -
4228 round_down(start, root->sectorsize);
4229 start = round_down(start, root->sectorsize);
4230
4231 data_sinfo = root->fs_info->data_sinfo;
4232 spin_lock(&data_sinfo->lock);
4233 if (WARN_ON(data_sinfo->bytes_may_use < len))
4234 data_sinfo->bytes_may_use = 0;
4235 else
4236 data_sinfo->bytes_may_use -= len;
4237 trace_btrfs_space_reservation(root->fs_info, "space_info",
4238 data_sinfo->flags, len, 0);
4239 spin_unlock(&data_sinfo->lock);
4240 }
4241
4242 /*
4243 * Called if we need to clear a data reservation for this inode
4244 * Normally in a error case.
4245 *
4246 * This one will handle the per-indoe data rsv map for accurate reserved
4247 * space framework.
4248 */
4249 void btrfs_free_reserved_data_space(struct inode *inode, u64 start, u64 len)
4250 {
4251 btrfs_free_reserved_data_space_noquota(inode, start, len);
4252 btrfs_qgroup_free_data(inode, start, len);
4253 }
4254
4255 static void force_metadata_allocation(struct btrfs_fs_info *info)
4256 {
4257 struct list_head *head = &info->space_info;
4258 struct btrfs_space_info *found;
4259
4260 rcu_read_lock();
4261 list_for_each_entry_rcu(found, head, list) {
4262 if (found->flags & BTRFS_BLOCK_GROUP_METADATA)
4263 found->force_alloc = CHUNK_ALLOC_FORCE;
4264 }
4265 rcu_read_unlock();
4266 }
4267
4268 static inline u64 calc_global_rsv_need_space(struct btrfs_block_rsv *global)
4269 {
4270 return (global->size << 1);
4271 }
4272
4273 static int should_alloc_chunk(struct btrfs_root *root,
4274 struct btrfs_space_info *sinfo, int force)
4275 {
4276 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4277 u64 num_bytes = sinfo->total_bytes - sinfo->bytes_readonly;
4278 u64 num_allocated = sinfo->bytes_used + sinfo->bytes_reserved;
4279 u64 thresh;
4280
4281 if (force == CHUNK_ALLOC_FORCE)
4282 return 1;
4283
4284 /*
4285 * We need to take into account the global rsv because for all intents
4286 * and purposes it's used space. Don't worry about locking the
4287 * global_rsv, it doesn't change except when the transaction commits.
4288 */
4289 if (sinfo->flags & BTRFS_BLOCK_GROUP_METADATA)
4290 num_allocated += calc_global_rsv_need_space(global_rsv);
4291
4292 /*
4293 * in limited mode, we want to have some free space up to
4294 * about 1% of the FS size.
4295 */
4296 if (force == CHUNK_ALLOC_LIMITED) {
4297 thresh = btrfs_super_total_bytes(root->fs_info->super_copy);
4298 thresh = max_t(u64, SZ_64M, div_factor_fine(thresh, 1));
4299
4300 if (num_bytes - num_allocated < thresh)
4301 return 1;
4302 }
4303
4304 if (num_allocated + SZ_2M < div_factor(num_bytes, 8))
4305 return 0;
4306 return 1;
4307 }
4308
4309 static u64 get_profile_num_devs(struct btrfs_root *root, u64 type)
4310 {
4311 u64 num_dev;
4312
4313 if (type & (BTRFS_BLOCK_GROUP_RAID10 |
4314 BTRFS_BLOCK_GROUP_RAID0 |
4315 BTRFS_BLOCK_GROUP_RAID5 |
4316 BTRFS_BLOCK_GROUP_RAID6))
4317 num_dev = root->fs_info->fs_devices->rw_devices;
4318 else if (type & BTRFS_BLOCK_GROUP_RAID1)
4319 num_dev = 2;
4320 else
4321 num_dev = 1; /* DUP or single */
4322
4323 return num_dev;
4324 }
4325
4326 /*
4327 * If @is_allocation is true, reserve space in the system space info necessary
4328 * for allocating a chunk, otherwise if it's false, reserve space necessary for
4329 * removing a chunk.
4330 */
4331 void check_system_chunk(struct btrfs_trans_handle *trans,
4332 struct btrfs_root *root,
4333 u64 type)
4334 {
4335 struct btrfs_space_info *info;
4336 u64 left;
4337 u64 thresh;
4338 int ret = 0;
4339 u64 num_devs;
4340
4341 /*
4342 * Needed because we can end up allocating a system chunk and for an
4343 * atomic and race free space reservation in the chunk block reserve.
4344 */
4345 ASSERT(mutex_is_locked(&root->fs_info->chunk_mutex));
4346
4347 info = __find_space_info(root->fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
4348 spin_lock(&info->lock);
4349 left = info->total_bytes - info->bytes_used - info->bytes_pinned -
4350 info->bytes_reserved - info->bytes_readonly -
4351 info->bytes_may_use;
4352 spin_unlock(&info->lock);
4353
4354 num_devs = get_profile_num_devs(root, type);
4355
4356 /* num_devs device items to update and 1 chunk item to add or remove */
4357 thresh = btrfs_calc_trunc_metadata_size(root, num_devs) +
4358 btrfs_calc_trans_metadata_size(root, 1);
4359
4360 if (left < thresh && btrfs_test_opt(root, ENOSPC_DEBUG)) {
4361 btrfs_info(root->fs_info, "left=%llu, need=%llu, flags=%llu",
4362 left, thresh, type);
4363 dump_space_info(info, 0, 0);
4364 }
4365
4366 if (left < thresh) {
4367 u64 flags;
4368
4369 flags = btrfs_get_alloc_profile(root->fs_info->chunk_root, 0);
4370 /*
4371 * Ignore failure to create system chunk. We might end up not
4372 * needing it, as we might not need to COW all nodes/leafs from
4373 * the paths we visit in the chunk tree (they were already COWed
4374 * or created in the current transaction for example).
4375 */
4376 ret = btrfs_alloc_chunk(trans, root, flags);
4377 }
4378
4379 if (!ret) {
4380 ret = btrfs_block_rsv_add(root->fs_info->chunk_root,
4381 &root->fs_info->chunk_block_rsv,
4382 thresh, BTRFS_RESERVE_NO_FLUSH);
4383 if (!ret)
4384 trans->chunk_bytes_reserved += thresh;
4385 }
4386 }
4387
4388 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
4389 struct btrfs_root *extent_root, u64 flags, int force)
4390 {
4391 struct btrfs_space_info *space_info;
4392 struct btrfs_fs_info *fs_info = extent_root->fs_info;
4393 int wait_for_alloc = 0;
4394 int ret = 0;
4395
4396 /* Don't re-enter if we're already allocating a chunk */
4397 if (trans->allocating_chunk)
4398 return -ENOSPC;
4399
4400 space_info = __find_space_info(extent_root->fs_info, flags);
4401 if (!space_info) {
4402 ret = update_space_info(extent_root->fs_info, flags,
4403 0, 0, &space_info);
4404 BUG_ON(ret); /* -ENOMEM */
4405 }
4406 BUG_ON(!space_info); /* Logic error */
4407
4408 again:
4409 spin_lock(&space_info->lock);
4410 if (force < space_info->force_alloc)
4411 force = space_info->force_alloc;
4412 if (space_info->full) {
4413 if (should_alloc_chunk(extent_root, space_info, force))
4414 ret = -ENOSPC;
4415 else
4416 ret = 0;
4417 spin_unlock(&space_info->lock);
4418 return ret;
4419 }
4420
4421 if (!should_alloc_chunk(extent_root, space_info, force)) {
4422 spin_unlock(&space_info->lock);
4423 return 0;
4424 } else if (space_info->chunk_alloc) {
4425 wait_for_alloc = 1;
4426 } else {
4427 space_info->chunk_alloc = 1;
4428 }
4429
4430 spin_unlock(&space_info->lock);
4431
4432 mutex_lock(&fs_info->chunk_mutex);
4433
4434 /*
4435 * The chunk_mutex is held throughout the entirety of a chunk
4436 * allocation, so once we've acquired the chunk_mutex we know that the
4437 * other guy is done and we need to recheck and see if we should
4438 * allocate.
4439 */
4440 if (wait_for_alloc) {
4441 mutex_unlock(&fs_info->chunk_mutex);
4442 wait_for_alloc = 0;
4443 goto again;
4444 }
4445
4446 trans->allocating_chunk = true;
4447
4448 /*
4449 * If we have mixed data/metadata chunks we want to make sure we keep
4450 * allocating mixed chunks instead of individual chunks.
4451 */
4452 if (btrfs_mixed_space_info(space_info))
4453 flags |= (BTRFS_BLOCK_GROUP_DATA | BTRFS_BLOCK_GROUP_METADATA);
4454
4455 /*
4456 * if we're doing a data chunk, go ahead and make sure that
4457 * we keep a reasonable number of metadata chunks allocated in the
4458 * FS as well.
4459 */
4460 if (flags & BTRFS_BLOCK_GROUP_DATA && fs_info->metadata_ratio) {
4461 fs_info->data_chunk_allocations++;
4462 if (!(fs_info->data_chunk_allocations %
4463 fs_info->metadata_ratio))
4464 force_metadata_allocation(fs_info);
4465 }
4466
4467 /*
4468 * Check if we have enough space in SYSTEM chunk because we may need
4469 * to update devices.
4470 */
4471 check_system_chunk(trans, extent_root, flags);
4472
4473 ret = btrfs_alloc_chunk(trans, extent_root, flags);
4474 trans->allocating_chunk = false;
4475
4476 spin_lock(&space_info->lock);
4477 if (ret < 0 && ret != -ENOSPC)
4478 goto out;
4479 if (ret)
4480 space_info->full = 1;
4481 else
4482 ret = 1;
4483
4484 space_info->force_alloc = CHUNK_ALLOC_NO_FORCE;
4485 out:
4486 space_info->chunk_alloc = 0;
4487 spin_unlock(&space_info->lock);
4488 mutex_unlock(&fs_info->chunk_mutex);
4489 /*
4490 * When we allocate a new chunk we reserve space in the chunk block
4491 * reserve to make sure we can COW nodes/leafs in the chunk tree or
4492 * add new nodes/leafs to it if we end up needing to do it when
4493 * inserting the chunk item and updating device items as part of the
4494 * second phase of chunk allocation, performed by
4495 * btrfs_finish_chunk_alloc(). So make sure we don't accumulate a
4496 * large number of new block groups to create in our transaction
4497 * handle's new_bgs list to avoid exhausting the chunk block reserve
4498 * in extreme cases - like having a single transaction create many new
4499 * block groups when starting to write out the free space caches of all
4500 * the block groups that were made dirty during the lifetime of the
4501 * transaction.
4502 */
4503 if (trans->can_flush_pending_bgs &&
4504 trans->chunk_bytes_reserved >= (u64)SZ_2M) {
4505 btrfs_create_pending_block_groups(trans, trans->root);
4506 btrfs_trans_release_chunk_metadata(trans);
4507 }
4508 return ret;
4509 }
4510
4511 static int can_overcommit(struct btrfs_root *root,
4512 struct btrfs_space_info *space_info, u64 bytes,
4513 enum btrfs_reserve_flush_enum flush)
4514 {
4515 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
4516 u64 profile = btrfs_get_alloc_profile(root, 0);
4517 u64 space_size;
4518 u64 avail;
4519 u64 used;
4520
4521 used = space_info->bytes_used + space_info->bytes_reserved +
4522 space_info->bytes_pinned + space_info->bytes_readonly;
4523
4524 /*
4525 * We only want to allow over committing if we have lots of actual space
4526 * free, but if we don't have enough space to handle the global reserve
4527 * space then we could end up having a real enospc problem when trying
4528 * to allocate a chunk or some other such important allocation.
4529 */
4530 spin_lock(&global_rsv->lock);
4531 space_size = calc_global_rsv_need_space(global_rsv);
4532 spin_unlock(&global_rsv->lock);
4533 if (used + space_size >= space_info->total_bytes)
4534 return 0;
4535
4536 used += space_info->bytes_may_use;
4537
4538 spin_lock(&root->fs_info->free_chunk_lock);
4539 avail = root->fs_info->free_chunk_space;
4540 spin_unlock(&root->fs_info->free_chunk_lock);
4541
4542 /*
4543 * If we have dup, raid1 or raid10 then only half of the free
4544 * space is actually useable. For raid56, the space info used
4545 * doesn't include the parity drive, so we don't have to
4546 * change the math
4547 */
4548 if (profile & (BTRFS_BLOCK_GROUP_DUP |
4549 BTRFS_BLOCK_GROUP_RAID1 |
4550 BTRFS_BLOCK_GROUP_RAID10))
4551 avail >>= 1;
4552
4553 /*
4554 * If we aren't flushing all things, let us overcommit up to
4555 * 1/2th of the space. If we can flush, don't let us overcommit
4556 * too much, let it overcommit up to 1/8 of the space.
4557 */
4558 if (flush == BTRFS_RESERVE_FLUSH_ALL)
4559 avail >>= 3;
4560 else
4561 avail >>= 1;
4562
4563 if (used + bytes < space_info->total_bytes + avail)
4564 return 1;
4565 return 0;
4566 }
4567
4568 static void btrfs_writeback_inodes_sb_nr(struct btrfs_root *root,
4569 unsigned long nr_pages, int nr_items)
4570 {
4571 struct super_block *sb = root->fs_info->sb;
4572
4573 if (down_read_trylock(&sb->s_umount)) {
4574 writeback_inodes_sb_nr(sb, nr_pages, WB_REASON_FS_FREE_SPACE);
4575 up_read(&sb->s_umount);
4576 } else {
4577 /*
4578 * We needn't worry the filesystem going from r/w to r/o though
4579 * we don't acquire ->s_umount mutex, because the filesystem
4580 * should guarantee the delalloc inodes list be empty after
4581 * the filesystem is readonly(all dirty pages are written to
4582 * the disk).
4583 */
4584 btrfs_start_delalloc_roots(root->fs_info, 0, nr_items);
4585 if (!current->journal_info)
4586 btrfs_wait_ordered_roots(root->fs_info, nr_items);
4587 }
4588 }
4589
4590 static inline int calc_reclaim_items_nr(struct btrfs_root *root, u64 to_reclaim)
4591 {
4592 u64 bytes;
4593 int nr;
4594
4595 bytes = btrfs_calc_trans_metadata_size(root, 1);
4596 nr = (int)div64_u64(to_reclaim, bytes);
4597 if (!nr)
4598 nr = 1;
4599 return nr;
4600 }
4601
4602 #define EXTENT_SIZE_PER_ITEM SZ_256K
4603
4604 /*
4605 * shrink metadata reservation for delalloc
4606 */
4607 static void shrink_delalloc(struct btrfs_root *root, u64 to_reclaim, u64 orig,
4608 bool wait_ordered)
4609 {
4610 struct btrfs_block_rsv *block_rsv;
4611 struct btrfs_space_info *space_info;
4612 struct btrfs_trans_handle *trans;
4613 u64 delalloc_bytes;
4614 u64 max_reclaim;
4615 long time_left;
4616 unsigned long nr_pages;
4617 int loops;
4618 int items;
4619 enum btrfs_reserve_flush_enum flush;
4620
4621 /* Calc the number of the pages we need flush for space reservation */
4622 items = calc_reclaim_items_nr(root, to_reclaim);
4623 to_reclaim = items * EXTENT_SIZE_PER_ITEM;
4624
4625 trans = (struct btrfs_trans_handle *)current->journal_info;
4626 block_rsv = &root->fs_info->delalloc_block_rsv;
4627 space_info = block_rsv->space_info;
4628
4629 delalloc_bytes = percpu_counter_sum_positive(
4630 &root->fs_info->delalloc_bytes);
4631 if (delalloc_bytes == 0) {
4632 if (trans)
4633 return;
4634 if (wait_ordered)
4635 btrfs_wait_ordered_roots(root->fs_info, items);
4636 return;
4637 }
4638
4639 loops = 0;
4640 while (delalloc_bytes && loops < 3) {
4641 max_reclaim = min(delalloc_bytes, to_reclaim);
4642 nr_pages = max_reclaim >> PAGE_CACHE_SHIFT;
4643 btrfs_writeback_inodes_sb_nr(root, nr_pages, items);
4644 /*
4645 * We need to wait for the async pages to actually start before
4646 * we do anything.
4647 */
4648 max_reclaim = atomic_read(&root->fs_info->async_delalloc_pages);
4649 if (!max_reclaim)
4650 goto skip_async;
4651
4652 if (max_reclaim <= nr_pages)
4653 max_reclaim = 0;
4654 else
4655 max_reclaim -= nr_pages;
4656
4657 wait_event(root->fs_info->async_submit_wait,
4658 atomic_read(&root->fs_info->async_delalloc_pages) <=
4659 (int)max_reclaim);
4660 skip_async:
4661 if (!trans)
4662 flush = BTRFS_RESERVE_FLUSH_ALL;
4663 else
4664 flush = BTRFS_RESERVE_NO_FLUSH;
4665 spin_lock(&space_info->lock);
4666 if (can_overcommit(root, space_info, orig, flush)) {
4667 spin_unlock(&space_info->lock);
4668 break;
4669 }
4670 spin_unlock(&space_info->lock);
4671
4672 loops++;
4673 if (wait_ordered && !trans) {
4674 btrfs_wait_ordered_roots(root->fs_info, items);
4675 } else {
4676 time_left = schedule_timeout_killable(1);
4677 if (time_left)
4678 break;
4679 }
4680 delalloc_bytes = percpu_counter_sum_positive(
4681 &root->fs_info->delalloc_bytes);
4682 }
4683 }
4684
4685 /**
4686 * maybe_commit_transaction - possibly commit the transaction if its ok to
4687 * @root - the root we're allocating for
4688 * @bytes - the number of bytes we want to reserve
4689 * @force - force the commit
4690 *
4691 * This will check to make sure that committing the transaction will actually
4692 * get us somewhere and then commit the transaction if it does. Otherwise it
4693 * will return -ENOSPC.
4694 */
4695 static int may_commit_transaction(struct btrfs_root *root,
4696 struct btrfs_space_info *space_info,
4697 u64 bytes, int force)
4698 {
4699 struct btrfs_block_rsv *delayed_rsv = &root->fs_info->delayed_block_rsv;
4700 struct btrfs_trans_handle *trans;
4701
4702 trans = (struct btrfs_trans_handle *)current->journal_info;
4703 if (trans)
4704 return -EAGAIN;
4705
4706 if (force)
4707 goto commit;
4708
4709 /* See if there is enough pinned space to make this reservation */
4710 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4711 bytes) >= 0)
4712 goto commit;
4713
4714 /*
4715 * See if there is some space in the delayed insertion reservation for
4716 * this reservation.
4717 */
4718 if (space_info != delayed_rsv->space_info)
4719 return -ENOSPC;
4720
4721 spin_lock(&delayed_rsv->lock);
4722 if (percpu_counter_compare(&space_info->total_bytes_pinned,
4723 bytes - delayed_rsv->size) >= 0) {
4724 spin_unlock(&delayed_rsv->lock);
4725 return -ENOSPC;
4726 }
4727 spin_unlock(&delayed_rsv->lock);
4728
4729 commit:
4730 trans = btrfs_join_transaction(root);
4731 if (IS_ERR(trans))
4732 return -ENOSPC;
4733
4734 return btrfs_commit_transaction(trans, root);
4735 }
4736
4737 enum flush_state {
4738 FLUSH_DELAYED_ITEMS_NR = 1,
4739 FLUSH_DELAYED_ITEMS = 2,
4740 FLUSH_DELALLOC = 3,
4741 FLUSH_DELALLOC_WAIT = 4,
4742 ALLOC_CHUNK = 5,
4743 COMMIT_TRANS = 6,
4744 };
4745
4746 static int flush_space(struct btrfs_root *root,
4747 struct btrfs_space_info *space_info, u64 num_bytes,
4748 u64 orig_bytes, int state)
4749 {
4750 struct btrfs_trans_handle *trans;
4751 int nr;
4752 int ret = 0;
4753
4754 switch (state) {
4755 case FLUSH_DELAYED_ITEMS_NR:
4756 case FLUSH_DELAYED_ITEMS:
4757 if (state == FLUSH_DELAYED_ITEMS_NR)
4758 nr = calc_reclaim_items_nr(root, num_bytes) * 2;
4759 else
4760 nr = -1;
4761
4762 trans = btrfs_join_transaction(root);
4763 if (IS_ERR(trans)) {
4764 ret = PTR_ERR(trans);
4765 break;
4766 }
4767 ret = btrfs_run_delayed_items_nr(trans, root, nr);
4768 btrfs_end_transaction(trans, root);
4769 break;
4770 case FLUSH_DELALLOC:
4771 case FLUSH_DELALLOC_WAIT:
4772 shrink_delalloc(root, num_bytes * 2, orig_bytes,
4773 state == FLUSH_DELALLOC_WAIT);
4774 break;
4775 case ALLOC_CHUNK:
4776 trans = btrfs_join_transaction(root);
4777 if (IS_ERR(trans)) {
4778 ret = PTR_ERR(trans);
4779 break;
4780 }
4781 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
4782 btrfs_get_alloc_profile(root, 0),
4783 CHUNK_ALLOC_NO_FORCE);
4784 btrfs_end_transaction(trans, root);
4785 if (ret == -ENOSPC)
4786 ret = 0;
4787 break;
4788 case COMMIT_TRANS:
4789 ret = may_commit_transaction(root, space_info, orig_bytes, 0);
4790 break;
4791 default:
4792 ret = -ENOSPC;
4793 break;
4794 }
4795
4796 return ret;
4797 }
4798
4799 static inline u64
4800 btrfs_calc_reclaim_metadata_size(struct btrfs_root *root,
4801 struct btrfs_space_info *space_info)
4802 {
4803 u64 used;
4804 u64 expected;
4805 u64 to_reclaim;
4806
4807 to_reclaim = min_t(u64, num_online_cpus() * SZ_1M, SZ_16M);
4808 spin_lock(&space_info->lock);
4809 if (can_overcommit(root, space_info, to_reclaim,
4810 BTRFS_RESERVE_FLUSH_ALL)) {
4811 to_reclaim = 0;
4812 goto out;
4813 }
4814
4815 used = space_info->bytes_used + space_info->bytes_reserved +
4816 space_info->bytes_pinned + space_info->bytes_readonly +
4817 space_info->bytes_may_use;
4818 if (can_overcommit(root, space_info, SZ_1M, BTRFS_RESERVE_FLUSH_ALL))
4819 expected = div_factor_fine(space_info->total_bytes, 95);
4820 else
4821 expected = div_factor_fine(space_info->total_bytes, 90);
4822
4823 if (used > expected)
4824 to_reclaim = used - expected;
4825 else
4826 to_reclaim = 0;
4827 to_reclaim = min(to_reclaim, space_info->bytes_may_use +
4828 space_info->bytes_reserved);
4829 out:
4830 spin_unlock(&space_info->lock);
4831
4832 return to_reclaim;
4833 }
4834
4835 static inline int need_do_async_reclaim(struct btrfs_space_info *space_info,
4836 struct btrfs_fs_info *fs_info, u64 used)
4837 {
4838 u64 thresh = div_factor_fine(space_info->total_bytes, 98);
4839
4840 /* If we're just plain full then async reclaim just slows us down. */
4841 if (space_info->bytes_used >= thresh)
4842 return 0;
4843
4844 return (used >= thresh && !btrfs_fs_closing(fs_info) &&
4845 !test_bit(BTRFS_FS_STATE_REMOUNTING, &fs_info->fs_state));
4846 }
4847
4848 static int btrfs_need_do_async_reclaim(struct btrfs_space_info *space_info,
4849 struct btrfs_fs_info *fs_info,
4850 int flush_state)
4851 {
4852 u64 used;
4853
4854 spin_lock(&space_info->lock);
4855 /*
4856 * We run out of space and have not got any free space via flush_space,
4857 * so don't bother doing async reclaim.
4858 */
4859 if (flush_state > COMMIT_TRANS && space_info->full) {
4860 spin_unlock(&space_info->lock);
4861 return 0;
4862 }
4863
4864 used = space_info->bytes_used + space_info->bytes_reserved +
4865 space_info->bytes_pinned + space_info->bytes_readonly +
4866 space_info->bytes_may_use;
4867 if (need_do_async_reclaim(space_info, fs_info, used)) {
4868 spin_unlock(&space_info->lock);
4869 return 1;
4870 }
4871 spin_unlock(&space_info->lock);
4872
4873 return 0;
4874 }
4875
4876 static void btrfs_async_reclaim_metadata_space(struct work_struct *work)
4877 {
4878 struct btrfs_fs_info *fs_info;
4879 struct btrfs_space_info *space_info;
4880 u64 to_reclaim;
4881 int flush_state;
4882
4883 fs_info = container_of(work, struct btrfs_fs_info, async_reclaim_work);
4884 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
4885
4886 to_reclaim = btrfs_calc_reclaim_metadata_size(fs_info->fs_root,
4887 space_info);
4888 if (!to_reclaim)
4889 return;
4890
4891 flush_state = FLUSH_DELAYED_ITEMS_NR;
4892 do {
4893 flush_space(fs_info->fs_root, space_info, to_reclaim,
4894 to_reclaim, flush_state);
4895 flush_state++;
4896 if (!btrfs_need_do_async_reclaim(space_info, fs_info,
4897 flush_state))
4898 return;
4899 } while (flush_state < COMMIT_TRANS);
4900 }
4901
4902 void btrfs_init_async_reclaim_work(struct work_struct *work)
4903 {
4904 INIT_WORK(work, btrfs_async_reclaim_metadata_space);
4905 }
4906
4907 /**
4908 * reserve_metadata_bytes - try to reserve bytes from the block_rsv's space
4909 * @root - the root we're allocating for
4910 * @block_rsv - the block_rsv we're allocating for
4911 * @orig_bytes - the number of bytes we want
4912 * @flush - whether or not we can flush to make our reservation
4913 *
4914 * This will reserve orgi_bytes number of bytes from the space info associated
4915 * with the block_rsv. If there is not enough space it will make an attempt to
4916 * flush out space to make room. It will do this by flushing delalloc if
4917 * possible or committing the transaction. If flush is 0 then no attempts to
4918 * regain reservations will be made and this will fail if there is not enough
4919 * space already.
4920 */
4921 static int reserve_metadata_bytes(struct btrfs_root *root,
4922 struct btrfs_block_rsv *block_rsv,
4923 u64 orig_bytes,
4924 enum btrfs_reserve_flush_enum flush)
4925 {
4926 struct btrfs_space_info *space_info = block_rsv->space_info;
4927 u64 used;
4928 u64 num_bytes = orig_bytes;
4929 int flush_state = FLUSH_DELAYED_ITEMS_NR;
4930 int ret = 0;
4931 bool flushing = false;
4932
4933 again:
4934 ret = 0;
4935 spin_lock(&space_info->lock);
4936 /*
4937 * We only want to wait if somebody other than us is flushing and we
4938 * are actually allowed to flush all things.
4939 */
4940 while (flush == BTRFS_RESERVE_FLUSH_ALL && !flushing &&
4941 space_info->flush) {
4942 spin_unlock(&space_info->lock);
4943 /*
4944 * If we have a trans handle we can't wait because the flusher
4945 * may have to commit the transaction, which would mean we would
4946 * deadlock since we are waiting for the flusher to finish, but
4947 * hold the current transaction open.
4948 */
4949 if (current->journal_info)
4950 return -EAGAIN;
4951 ret = wait_event_killable(space_info->wait, !space_info->flush);
4952 /* Must have been killed, return */
4953 if (ret)
4954 return -EINTR;
4955
4956 spin_lock(&space_info->lock);
4957 }
4958
4959 ret = -ENOSPC;
4960 used = space_info->bytes_used + space_info->bytes_reserved +
4961 space_info->bytes_pinned + space_info->bytes_readonly +
4962 space_info->bytes_may_use;
4963
4964 /*
4965 * The idea here is that we've not already over-reserved the block group
4966 * then we can go ahead and save our reservation first and then start
4967 * flushing if we need to. Otherwise if we've already overcommitted
4968 * lets start flushing stuff first and then come back and try to make
4969 * our reservation.
4970 */
4971 if (used <= space_info->total_bytes) {
4972 if (used + orig_bytes <= space_info->total_bytes) {
4973 space_info->bytes_may_use += orig_bytes;
4974 trace_btrfs_space_reservation(root->fs_info,
4975 "space_info", space_info->flags, orig_bytes, 1);
4976 ret = 0;
4977 } else {
4978 /*
4979 * Ok set num_bytes to orig_bytes since we aren't
4980 * overocmmitted, this way we only try and reclaim what
4981 * we need.
4982 */
4983 num_bytes = orig_bytes;
4984 }
4985 } else {
4986 /*
4987 * Ok we're over committed, set num_bytes to the overcommitted
4988 * amount plus the amount of bytes that we need for this
4989 * reservation.
4990 */
4991 num_bytes = used - space_info->total_bytes +
4992 (orig_bytes * 2);
4993 }
4994
4995 if (ret && can_overcommit(root, space_info, orig_bytes, flush)) {
4996 space_info->bytes_may_use += orig_bytes;
4997 trace_btrfs_space_reservation(root->fs_info, "space_info",
4998 space_info->flags, orig_bytes,
4999 1);
5000 ret = 0;
5001 }
5002
5003 /*
5004 * Couldn't make our reservation, save our place so while we're trying
5005 * to reclaim space we can actually use it instead of somebody else
5006 * stealing it from us.
5007 *
5008 * We make the other tasks wait for the flush only when we can flush
5009 * all things.
5010 */
5011 if (ret && flush != BTRFS_RESERVE_NO_FLUSH) {
5012 flushing = true;
5013 space_info->flush = 1;
5014 } else if (!ret && space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
5015 used += orig_bytes;
5016 /*
5017 * We will do the space reservation dance during log replay,
5018 * which means we won't have fs_info->fs_root set, so don't do
5019 * the async reclaim as we will panic.
5020 */
5021 if (!root->fs_info->log_root_recovering &&
5022 need_do_async_reclaim(space_info, root->fs_info, used) &&
5023 !work_busy(&root->fs_info->async_reclaim_work))
5024 queue_work(system_unbound_wq,
5025 &root->fs_info->async_reclaim_work);
5026 }
5027 spin_unlock(&space_info->lock);
5028
5029 if (!ret || flush == BTRFS_RESERVE_NO_FLUSH)
5030 goto out;
5031
5032 ret = flush_space(root, space_info, num_bytes, orig_bytes,
5033 flush_state);
5034 flush_state++;
5035
5036 /*
5037 * If we are FLUSH_LIMIT, we can not flush delalloc, or the deadlock
5038 * would happen. So skip delalloc flush.
5039 */
5040 if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5041 (flush_state == FLUSH_DELALLOC ||
5042 flush_state == FLUSH_DELALLOC_WAIT))
5043 flush_state = ALLOC_CHUNK;
5044
5045 if (!ret)
5046 goto again;
5047 else if (flush == BTRFS_RESERVE_FLUSH_LIMIT &&
5048 flush_state < COMMIT_TRANS)
5049 goto again;
5050 else if (flush == BTRFS_RESERVE_FLUSH_ALL &&
5051 flush_state <= COMMIT_TRANS)
5052 goto again;
5053
5054 out:
5055 if (ret == -ENOSPC &&
5056 unlikely(root->orphan_cleanup_state == ORPHAN_CLEANUP_STARTED)) {
5057 struct btrfs_block_rsv *global_rsv =
5058 &root->fs_info->global_block_rsv;
5059
5060 if (block_rsv != global_rsv &&
5061 !block_rsv_use_bytes(global_rsv, orig_bytes))
5062 ret = 0;
5063 }
5064 if (ret == -ENOSPC)
5065 trace_btrfs_space_reservation(root->fs_info,
5066 "space_info:enospc",
5067 space_info->flags, orig_bytes, 1);
5068 if (flushing) {
5069 spin_lock(&space_info->lock);
5070 space_info->flush = 0;
5071 wake_up_all(&space_info->wait);
5072 spin_unlock(&space_info->lock);
5073 }
5074 return ret;
5075 }
5076
5077 static struct btrfs_block_rsv *get_block_rsv(
5078 const struct btrfs_trans_handle *trans,
5079 const struct btrfs_root *root)
5080 {
5081 struct btrfs_block_rsv *block_rsv = NULL;
5082
5083 if (test_bit(BTRFS_ROOT_REF_COWS, &root->state) ||
5084 (root == root->fs_info->csum_root && trans->adding_csums) ||
5085 (root == root->fs_info->uuid_root))
5086 block_rsv = trans->block_rsv;
5087
5088 if (!block_rsv)
5089 block_rsv = root->block_rsv;
5090
5091 if (!block_rsv)
5092 block_rsv = &root->fs_info->empty_block_rsv;
5093
5094 return block_rsv;
5095 }
5096
5097 static int block_rsv_use_bytes(struct btrfs_block_rsv *block_rsv,
5098 u64 num_bytes)
5099 {
5100 int ret = -ENOSPC;
5101 spin_lock(&block_rsv->lock);
5102 if (block_rsv->reserved >= num_bytes) {
5103 block_rsv->reserved -= num_bytes;
5104 if (block_rsv->reserved < block_rsv->size)
5105 block_rsv->full = 0;
5106 ret = 0;
5107 }
5108 spin_unlock(&block_rsv->lock);
5109 return ret;
5110 }
5111
5112 static void block_rsv_add_bytes(struct btrfs_block_rsv *block_rsv,
5113 u64 num_bytes, int update_size)
5114 {
5115 spin_lock(&block_rsv->lock);
5116 block_rsv->reserved += num_bytes;
5117 if (update_size)
5118 block_rsv->size += num_bytes;
5119 else if (block_rsv->reserved >= block_rsv->size)
5120 block_rsv->full = 1;
5121 spin_unlock(&block_rsv->lock);
5122 }
5123
5124 int btrfs_cond_migrate_bytes(struct btrfs_fs_info *fs_info,
5125 struct btrfs_block_rsv *dest, u64 num_bytes,
5126 int min_factor)
5127 {
5128 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
5129 u64 min_bytes;
5130
5131 if (global_rsv->space_info != dest->space_info)
5132 return -ENOSPC;
5133
5134 spin_lock(&global_rsv->lock);
5135 min_bytes = div_factor(global_rsv->size, min_factor);
5136 if (global_rsv->reserved < min_bytes + num_bytes) {
5137 spin_unlock(&global_rsv->lock);
5138 return -ENOSPC;
5139 }
5140 global_rsv->reserved -= num_bytes;
5141 if (global_rsv->reserved < global_rsv->size)
5142 global_rsv->full = 0;
5143 spin_unlock(&global_rsv->lock);
5144
5145 block_rsv_add_bytes(dest, num_bytes, 1);
5146 return 0;
5147 }
5148
5149 static void block_rsv_release_bytes(struct btrfs_fs_info *fs_info,
5150 struct btrfs_block_rsv *block_rsv,
5151 struct btrfs_block_rsv *dest, u64 num_bytes)
5152 {
5153 struct btrfs_space_info *space_info = block_rsv->space_info;
5154
5155 spin_lock(&block_rsv->lock);
5156 if (num_bytes == (u64)-1)
5157 num_bytes = block_rsv->size;
5158 block_rsv->size -= num_bytes;
5159 if (block_rsv->reserved >= block_rsv->size) {
5160 num_bytes = block_rsv->reserved - block_rsv->size;
5161 block_rsv->reserved = block_rsv->size;
5162 block_rsv->full = 1;
5163 } else {
5164 num_bytes = 0;
5165 }
5166 spin_unlock(&block_rsv->lock);
5167
5168 if (num_bytes > 0) {
5169 if (dest) {
5170 spin_lock(&dest->lock);
5171 if (!dest->full) {
5172 u64 bytes_to_add;
5173
5174 bytes_to_add = dest->size - dest->reserved;
5175 bytes_to_add = min(num_bytes, bytes_to_add);
5176 dest->reserved += bytes_to_add;
5177 if (dest->reserved >= dest->size)
5178 dest->full = 1;
5179 num_bytes -= bytes_to_add;
5180 }
5181 spin_unlock(&dest->lock);
5182 }
5183 if (num_bytes) {
5184 spin_lock(&space_info->lock);
5185 space_info->bytes_may_use -= num_bytes;
5186 trace_btrfs_space_reservation(fs_info, "space_info",
5187 space_info->flags, num_bytes, 0);
5188 spin_unlock(&space_info->lock);
5189 }
5190 }
5191 }
5192
5193 static int block_rsv_migrate_bytes(struct btrfs_block_rsv *src,
5194 struct btrfs_block_rsv *dst, u64 num_bytes)
5195 {
5196 int ret;
5197
5198 ret = block_rsv_use_bytes(src, num_bytes);
5199 if (ret)
5200 return ret;
5201
5202 block_rsv_add_bytes(dst, num_bytes, 1);
5203 return 0;
5204 }
5205
5206 void btrfs_init_block_rsv(struct btrfs_block_rsv *rsv, unsigned short type)
5207 {
5208 memset(rsv, 0, sizeof(*rsv));
5209 spin_lock_init(&rsv->lock);
5210 rsv->type = type;
5211 }
5212
5213 struct btrfs_block_rsv *btrfs_alloc_block_rsv(struct btrfs_root *root,
5214 unsigned short type)
5215 {
5216 struct btrfs_block_rsv *block_rsv;
5217 struct btrfs_fs_info *fs_info = root->fs_info;
5218
5219 block_rsv = kmalloc(sizeof(*block_rsv), GFP_NOFS);
5220 if (!block_rsv)
5221 return NULL;
5222
5223 btrfs_init_block_rsv(block_rsv, type);
5224 block_rsv->space_info = __find_space_info(fs_info,
5225 BTRFS_BLOCK_GROUP_METADATA);
5226 return block_rsv;
5227 }
5228
5229 void btrfs_free_block_rsv(struct btrfs_root *root,
5230 struct btrfs_block_rsv *rsv)
5231 {
5232 if (!rsv)
5233 return;
5234 btrfs_block_rsv_release(root, rsv, (u64)-1);
5235 kfree(rsv);
5236 }
5237
5238 void __btrfs_free_block_rsv(struct btrfs_block_rsv *rsv)
5239 {
5240 kfree(rsv);
5241 }
5242
5243 int btrfs_block_rsv_add(struct btrfs_root *root,
5244 struct btrfs_block_rsv *block_rsv, u64 num_bytes,
5245 enum btrfs_reserve_flush_enum flush)
5246 {
5247 int ret;
5248
5249 if (num_bytes == 0)
5250 return 0;
5251
5252 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5253 if (!ret) {
5254 block_rsv_add_bytes(block_rsv, num_bytes, 1);
5255 return 0;
5256 }
5257
5258 return ret;
5259 }
5260
5261 int btrfs_block_rsv_check(struct btrfs_root *root,
5262 struct btrfs_block_rsv *block_rsv, int min_factor)
5263 {
5264 u64 num_bytes = 0;
5265 int ret = -ENOSPC;
5266
5267 if (!block_rsv)
5268 return 0;
5269
5270 spin_lock(&block_rsv->lock);
5271 num_bytes = div_factor(block_rsv->size, min_factor);
5272 if (block_rsv->reserved >= num_bytes)
5273 ret = 0;
5274 spin_unlock(&block_rsv->lock);
5275
5276 return ret;
5277 }
5278
5279 int btrfs_block_rsv_refill(struct btrfs_root *root,
5280 struct btrfs_block_rsv *block_rsv, u64 min_reserved,
5281 enum btrfs_reserve_flush_enum flush)
5282 {
5283 u64 num_bytes = 0;
5284 int ret = -ENOSPC;
5285
5286 if (!block_rsv)
5287 return 0;
5288
5289 spin_lock(&block_rsv->lock);
5290 num_bytes = min_reserved;
5291 if (block_rsv->reserved >= num_bytes)
5292 ret = 0;
5293 else
5294 num_bytes -= block_rsv->reserved;
5295 spin_unlock(&block_rsv->lock);
5296
5297 if (!ret)
5298 return 0;
5299
5300 ret = reserve_metadata_bytes(root, block_rsv, num_bytes, flush);
5301 if (!ret) {
5302 block_rsv_add_bytes(block_rsv, num_bytes, 0);
5303 return 0;
5304 }
5305
5306 return ret;
5307 }
5308
5309 int btrfs_block_rsv_migrate(struct btrfs_block_rsv *src_rsv,
5310 struct btrfs_block_rsv *dst_rsv,
5311 u64 num_bytes)
5312 {
5313 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5314 }
5315
5316 void btrfs_block_rsv_release(struct btrfs_root *root,
5317 struct btrfs_block_rsv *block_rsv,
5318 u64 num_bytes)
5319 {
5320 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5321 if (global_rsv == block_rsv ||
5322 block_rsv->space_info != global_rsv->space_info)
5323 global_rsv = NULL;
5324 block_rsv_release_bytes(root->fs_info, block_rsv, global_rsv,
5325 num_bytes);
5326 }
5327
5328 /*
5329 * helper to calculate size of global block reservation.
5330 * the desired value is sum of space used by extent tree,
5331 * checksum tree and root tree
5332 */
5333 static u64 calc_global_metadata_size(struct btrfs_fs_info *fs_info)
5334 {
5335 struct btrfs_space_info *sinfo;
5336 u64 num_bytes;
5337 u64 meta_used;
5338 u64 data_used;
5339 int csum_size = btrfs_super_csum_size(fs_info->super_copy);
5340
5341 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_DATA);
5342 spin_lock(&sinfo->lock);
5343 data_used = sinfo->bytes_used;
5344 spin_unlock(&sinfo->lock);
5345
5346 sinfo = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5347 spin_lock(&sinfo->lock);
5348 if (sinfo->flags & BTRFS_BLOCK_GROUP_DATA)
5349 data_used = 0;
5350 meta_used = sinfo->bytes_used;
5351 spin_unlock(&sinfo->lock);
5352
5353 num_bytes = (data_used >> fs_info->sb->s_blocksize_bits) *
5354 csum_size * 2;
5355 num_bytes += div_u64(data_used + meta_used, 50);
5356
5357 if (num_bytes * 3 > meta_used)
5358 num_bytes = div_u64(meta_used, 3);
5359
5360 return ALIGN(num_bytes, fs_info->extent_root->nodesize << 10);
5361 }
5362
5363 static void update_global_block_rsv(struct btrfs_fs_info *fs_info)
5364 {
5365 struct btrfs_block_rsv *block_rsv = &fs_info->global_block_rsv;
5366 struct btrfs_space_info *sinfo = block_rsv->space_info;
5367 u64 num_bytes;
5368
5369 num_bytes = calc_global_metadata_size(fs_info);
5370
5371 spin_lock(&sinfo->lock);
5372 spin_lock(&block_rsv->lock);
5373
5374 block_rsv->size = min_t(u64, num_bytes, SZ_512M);
5375
5376 num_bytes = sinfo->bytes_used + sinfo->bytes_pinned +
5377 sinfo->bytes_reserved + sinfo->bytes_readonly +
5378 sinfo->bytes_may_use;
5379
5380 if (sinfo->total_bytes > num_bytes) {
5381 num_bytes = sinfo->total_bytes - num_bytes;
5382 block_rsv->reserved += num_bytes;
5383 sinfo->bytes_may_use += num_bytes;
5384 trace_btrfs_space_reservation(fs_info, "space_info",
5385 sinfo->flags, num_bytes, 1);
5386 }
5387
5388 if (block_rsv->reserved >= block_rsv->size) {
5389 num_bytes = block_rsv->reserved - block_rsv->size;
5390 sinfo->bytes_may_use -= num_bytes;
5391 trace_btrfs_space_reservation(fs_info, "space_info",
5392 sinfo->flags, num_bytes, 0);
5393 block_rsv->reserved = block_rsv->size;
5394 block_rsv->full = 1;
5395 }
5396
5397 spin_unlock(&block_rsv->lock);
5398 spin_unlock(&sinfo->lock);
5399 }
5400
5401 static void init_global_block_rsv(struct btrfs_fs_info *fs_info)
5402 {
5403 struct btrfs_space_info *space_info;
5404
5405 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_SYSTEM);
5406 fs_info->chunk_block_rsv.space_info = space_info;
5407
5408 space_info = __find_space_info(fs_info, BTRFS_BLOCK_GROUP_METADATA);
5409 fs_info->global_block_rsv.space_info = space_info;
5410 fs_info->delalloc_block_rsv.space_info = space_info;
5411 fs_info->trans_block_rsv.space_info = space_info;
5412 fs_info->empty_block_rsv.space_info = space_info;
5413 fs_info->delayed_block_rsv.space_info = space_info;
5414
5415 fs_info->extent_root->block_rsv = &fs_info->global_block_rsv;
5416 fs_info->csum_root->block_rsv = &fs_info->global_block_rsv;
5417 fs_info->dev_root->block_rsv = &fs_info->global_block_rsv;
5418 fs_info->tree_root->block_rsv = &fs_info->global_block_rsv;
5419 if (fs_info->quota_root)
5420 fs_info->quota_root->block_rsv = &fs_info->global_block_rsv;
5421 fs_info->chunk_root->block_rsv = &fs_info->chunk_block_rsv;
5422
5423 update_global_block_rsv(fs_info);
5424 }
5425
5426 static void release_global_block_rsv(struct btrfs_fs_info *fs_info)
5427 {
5428 block_rsv_release_bytes(fs_info, &fs_info->global_block_rsv, NULL,
5429 (u64)-1);
5430 WARN_ON(fs_info->delalloc_block_rsv.size > 0);
5431 WARN_ON(fs_info->delalloc_block_rsv.reserved > 0);
5432 WARN_ON(fs_info->trans_block_rsv.size > 0);
5433 WARN_ON(fs_info->trans_block_rsv.reserved > 0);
5434 WARN_ON(fs_info->chunk_block_rsv.size > 0);
5435 WARN_ON(fs_info->chunk_block_rsv.reserved > 0);
5436 WARN_ON(fs_info->delayed_block_rsv.size > 0);
5437 WARN_ON(fs_info->delayed_block_rsv.reserved > 0);
5438 }
5439
5440 void btrfs_trans_release_metadata(struct btrfs_trans_handle *trans,
5441 struct btrfs_root *root)
5442 {
5443 if (!trans->block_rsv)
5444 return;
5445
5446 if (!trans->bytes_reserved)
5447 return;
5448
5449 trace_btrfs_space_reservation(root->fs_info, "transaction",
5450 trans->transid, trans->bytes_reserved, 0);
5451 btrfs_block_rsv_release(root, trans->block_rsv, trans->bytes_reserved);
5452 trans->bytes_reserved = 0;
5453 }
5454
5455 /*
5456 * To be called after all the new block groups attached to the transaction
5457 * handle have been created (btrfs_create_pending_block_groups()).
5458 */
5459 void btrfs_trans_release_chunk_metadata(struct btrfs_trans_handle *trans)
5460 {
5461 struct btrfs_fs_info *fs_info = trans->root->fs_info;
5462
5463 if (!trans->chunk_bytes_reserved)
5464 return;
5465
5466 WARN_ON_ONCE(!list_empty(&trans->new_bgs));
5467
5468 block_rsv_release_bytes(fs_info, &fs_info->chunk_block_rsv, NULL,
5469 trans->chunk_bytes_reserved);
5470 trans->chunk_bytes_reserved = 0;
5471 }
5472
5473 /* Can only return 0 or -ENOSPC */
5474 int btrfs_orphan_reserve_metadata(struct btrfs_trans_handle *trans,
5475 struct inode *inode)
5476 {
5477 struct btrfs_root *root = BTRFS_I(inode)->root;
5478 struct btrfs_block_rsv *src_rsv = get_block_rsv(trans, root);
5479 struct btrfs_block_rsv *dst_rsv = root->orphan_block_rsv;
5480
5481 /*
5482 * We need to hold space in order to delete our orphan item once we've
5483 * added it, so this takes the reservation so we can release it later
5484 * when we are truly done with the orphan item.
5485 */
5486 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5487 trace_btrfs_space_reservation(root->fs_info, "orphan",
5488 btrfs_ino(inode), num_bytes, 1);
5489 return block_rsv_migrate_bytes(src_rsv, dst_rsv, num_bytes);
5490 }
5491
5492 void btrfs_orphan_release_metadata(struct inode *inode)
5493 {
5494 struct btrfs_root *root = BTRFS_I(inode)->root;
5495 u64 num_bytes = btrfs_calc_trans_metadata_size(root, 1);
5496 trace_btrfs_space_reservation(root->fs_info, "orphan",
5497 btrfs_ino(inode), num_bytes, 0);
5498 btrfs_block_rsv_release(root, root->orphan_block_rsv, num_bytes);
5499 }
5500
5501 /*
5502 * btrfs_subvolume_reserve_metadata() - reserve space for subvolume operation
5503 * root: the root of the parent directory
5504 * rsv: block reservation
5505 * items: the number of items that we need do reservation
5506 * qgroup_reserved: used to return the reserved size in qgroup
5507 *
5508 * This function is used to reserve the space for snapshot/subvolume
5509 * creation and deletion. Those operations are different with the
5510 * common file/directory operations, they change two fs/file trees
5511 * and root tree, the number of items that the qgroup reserves is
5512 * different with the free space reservation. So we can not use
5513 * the space reseravtion mechanism in start_transaction().
5514 */
5515 int btrfs_subvolume_reserve_metadata(struct btrfs_root *root,
5516 struct btrfs_block_rsv *rsv,
5517 int items,
5518 u64 *qgroup_reserved,
5519 bool use_global_rsv)
5520 {
5521 u64 num_bytes;
5522 int ret;
5523 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
5524
5525 if (root->fs_info->quota_enabled) {
5526 /* One for parent inode, two for dir entries */
5527 num_bytes = 3 * root->nodesize;
5528 ret = btrfs_qgroup_reserve_meta(root, num_bytes);
5529 if (ret)
5530 return ret;
5531 } else {
5532 num_bytes = 0;
5533 }
5534
5535 *qgroup_reserved = num_bytes;
5536
5537 num_bytes = btrfs_calc_trans_metadata_size(root, items);
5538 rsv->space_info = __find_space_info(root->fs_info,
5539 BTRFS_BLOCK_GROUP_METADATA);
5540 ret = btrfs_block_rsv_add(root, rsv, num_bytes,
5541 BTRFS_RESERVE_FLUSH_ALL);
5542
5543 if (ret == -ENOSPC && use_global_rsv)
5544 ret = btrfs_block_rsv_migrate(global_rsv, rsv, num_bytes);
5545
5546 if (ret && *qgroup_reserved)
5547 btrfs_qgroup_free_meta(root, *qgroup_reserved);
5548
5549 return ret;
5550 }
5551
5552 void btrfs_subvolume_release_metadata(struct btrfs_root *root,
5553 struct btrfs_block_rsv *rsv,
5554 u64 qgroup_reserved)
5555 {
5556 btrfs_block_rsv_release(root, rsv, (u64)-1);
5557 }
5558
5559 /**
5560 * drop_outstanding_extent - drop an outstanding extent
5561 * @inode: the inode we're dropping the extent for
5562 * @num_bytes: the number of bytes we're relaseing.
5563 *
5564 * This is called when we are freeing up an outstanding extent, either called
5565 * after an error or after an extent is written. This will return the number of
5566 * reserved extents that need to be freed. This must be called with
5567 * BTRFS_I(inode)->lock held.
5568 */
5569 static unsigned drop_outstanding_extent(struct inode *inode, u64 num_bytes)
5570 {
5571 unsigned drop_inode_space = 0;
5572 unsigned dropped_extents = 0;
5573 unsigned num_extents = 0;
5574
5575 num_extents = (unsigned)div64_u64(num_bytes +
5576 BTRFS_MAX_EXTENT_SIZE - 1,
5577 BTRFS_MAX_EXTENT_SIZE);
5578 ASSERT(num_extents);
5579 ASSERT(BTRFS_I(inode)->outstanding_extents >= num_extents);
5580 BTRFS_I(inode)->outstanding_extents -= num_extents;
5581
5582 if (BTRFS_I(inode)->outstanding_extents == 0 &&
5583 test_and_clear_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5584 &BTRFS_I(inode)->runtime_flags))
5585 drop_inode_space = 1;
5586
5587 /*
5588 * If we have more or the same amount of outsanding extents than we have
5589 * reserved then we need to leave the reserved extents count alone.
5590 */
5591 if (BTRFS_I(inode)->outstanding_extents >=
5592 BTRFS_I(inode)->reserved_extents)
5593 return drop_inode_space;
5594
5595 dropped_extents = BTRFS_I(inode)->reserved_extents -
5596 BTRFS_I(inode)->outstanding_extents;
5597 BTRFS_I(inode)->reserved_extents -= dropped_extents;
5598 return dropped_extents + drop_inode_space;
5599 }
5600
5601 /**
5602 * calc_csum_metadata_size - return the amount of metada space that must be
5603 * reserved/free'd for the given bytes.
5604 * @inode: the inode we're manipulating
5605 * @num_bytes: the number of bytes in question
5606 * @reserve: 1 if we are reserving space, 0 if we are freeing space
5607 *
5608 * This adjusts the number of csum_bytes in the inode and then returns the
5609 * correct amount of metadata that must either be reserved or freed. We
5610 * calculate how many checksums we can fit into one leaf and then divide the
5611 * number of bytes that will need to be checksumed by this value to figure out
5612 * how many checksums will be required. If we are adding bytes then the number
5613 * may go up and we will return the number of additional bytes that must be
5614 * reserved. If it is going down we will return the number of bytes that must
5615 * be freed.
5616 *
5617 * This must be called with BTRFS_I(inode)->lock held.
5618 */
5619 static u64 calc_csum_metadata_size(struct inode *inode, u64 num_bytes,
5620 int reserve)
5621 {
5622 struct btrfs_root *root = BTRFS_I(inode)->root;
5623 u64 old_csums, num_csums;
5624
5625 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM &&
5626 BTRFS_I(inode)->csum_bytes == 0)
5627 return 0;
5628
5629 old_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5630 if (reserve)
5631 BTRFS_I(inode)->csum_bytes += num_bytes;
5632 else
5633 BTRFS_I(inode)->csum_bytes -= num_bytes;
5634 num_csums = btrfs_csum_bytes_to_leaves(root, BTRFS_I(inode)->csum_bytes);
5635
5636 /* No change, no need to reserve more */
5637 if (old_csums == num_csums)
5638 return 0;
5639
5640 if (reserve)
5641 return btrfs_calc_trans_metadata_size(root,
5642 num_csums - old_csums);
5643
5644 return btrfs_calc_trans_metadata_size(root, old_csums - num_csums);
5645 }
5646
5647 int btrfs_delalloc_reserve_metadata(struct inode *inode, u64 num_bytes)
5648 {
5649 struct btrfs_root *root = BTRFS_I(inode)->root;
5650 struct btrfs_block_rsv *block_rsv = &root->fs_info->delalloc_block_rsv;
5651 u64 to_reserve = 0;
5652 u64 csum_bytes;
5653 unsigned nr_extents = 0;
5654 int extra_reserve = 0;
5655 enum btrfs_reserve_flush_enum flush = BTRFS_RESERVE_FLUSH_ALL;
5656 int ret = 0;
5657 bool delalloc_lock = true;
5658 u64 to_free = 0;
5659 unsigned dropped;
5660
5661 /* If we are a free space inode we need to not flush since we will be in
5662 * the middle of a transaction commit. We also don't need the delalloc
5663 * mutex since we won't race with anybody. We need this mostly to make
5664 * lockdep shut its filthy mouth.
5665 */
5666 if (btrfs_is_free_space_inode(inode)) {
5667 flush = BTRFS_RESERVE_NO_FLUSH;
5668 delalloc_lock = false;
5669 }
5670
5671 if (flush != BTRFS_RESERVE_NO_FLUSH &&
5672 btrfs_transaction_in_commit(root->fs_info))
5673 schedule_timeout(1);
5674
5675 if (delalloc_lock)
5676 mutex_lock(&BTRFS_I(inode)->delalloc_mutex);
5677
5678 num_bytes = ALIGN(num_bytes, root->sectorsize);
5679
5680 spin_lock(&BTRFS_I(inode)->lock);
5681 nr_extents = (unsigned)div64_u64(num_bytes +
5682 BTRFS_MAX_EXTENT_SIZE - 1,
5683 BTRFS_MAX_EXTENT_SIZE);
5684 BTRFS_I(inode)->outstanding_extents += nr_extents;
5685 nr_extents = 0;
5686
5687 if (BTRFS_I(inode)->outstanding_extents >
5688 BTRFS_I(inode)->reserved_extents)
5689 nr_extents = BTRFS_I(inode)->outstanding_extents -
5690 BTRFS_I(inode)->reserved_extents;
5691
5692 /*
5693 * Add an item to reserve for updating the inode when we complete the
5694 * delalloc io.
5695 */
5696 if (!test_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5697 &BTRFS_I(inode)->runtime_flags)) {
5698 nr_extents++;
5699 extra_reserve = 1;
5700 }
5701
5702 to_reserve = btrfs_calc_trans_metadata_size(root, nr_extents);
5703 to_reserve += calc_csum_metadata_size(inode, num_bytes, 1);
5704 csum_bytes = BTRFS_I(inode)->csum_bytes;
5705 spin_unlock(&BTRFS_I(inode)->lock);
5706
5707 if (root->fs_info->quota_enabled) {
5708 ret = btrfs_qgroup_reserve_meta(root,
5709 nr_extents * root->nodesize);
5710 if (ret)
5711 goto out_fail;
5712 }
5713
5714 ret = reserve_metadata_bytes(root, block_rsv, to_reserve, flush);
5715 if (unlikely(ret)) {
5716 btrfs_qgroup_free_meta(root, nr_extents * root->nodesize);
5717 goto out_fail;
5718 }
5719
5720 spin_lock(&BTRFS_I(inode)->lock);
5721 if (extra_reserve) {
5722 set_bit(BTRFS_INODE_DELALLOC_META_RESERVED,
5723 &BTRFS_I(inode)->runtime_flags);
5724 nr_extents--;
5725 }
5726 BTRFS_I(inode)->reserved_extents += nr_extents;
5727 spin_unlock(&BTRFS_I(inode)->lock);
5728
5729 if (delalloc_lock)
5730 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5731
5732 if (to_reserve)
5733 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5734 btrfs_ino(inode), to_reserve, 1);
5735 block_rsv_add_bytes(block_rsv, to_reserve, 1);
5736
5737 return 0;
5738
5739 out_fail:
5740 spin_lock(&BTRFS_I(inode)->lock);
5741 dropped = drop_outstanding_extent(inode, num_bytes);
5742 /*
5743 * If the inodes csum_bytes is the same as the original
5744 * csum_bytes then we know we haven't raced with any free()ers
5745 * so we can just reduce our inodes csum bytes and carry on.
5746 */
5747 if (BTRFS_I(inode)->csum_bytes == csum_bytes) {
5748 calc_csum_metadata_size(inode, num_bytes, 0);
5749 } else {
5750 u64 orig_csum_bytes = BTRFS_I(inode)->csum_bytes;
5751 u64 bytes;
5752
5753 /*
5754 * This is tricky, but first we need to figure out how much we
5755 * free'd from any free-ers that occured during this
5756 * reservation, so we reset ->csum_bytes to the csum_bytes
5757 * before we dropped our lock, and then call the free for the
5758 * number of bytes that were freed while we were trying our
5759 * reservation.
5760 */
5761 bytes = csum_bytes - BTRFS_I(inode)->csum_bytes;
5762 BTRFS_I(inode)->csum_bytes = csum_bytes;
5763 to_free = calc_csum_metadata_size(inode, bytes, 0);
5764
5765
5766 /*
5767 * Now we need to see how much we would have freed had we not
5768 * been making this reservation and our ->csum_bytes were not
5769 * artificially inflated.
5770 */
5771 BTRFS_I(inode)->csum_bytes = csum_bytes - num_bytes;
5772 bytes = csum_bytes - orig_csum_bytes;
5773 bytes = calc_csum_metadata_size(inode, bytes, 0);
5774
5775 /*
5776 * Now reset ->csum_bytes to what it should be. If bytes is
5777 * more than to_free then we would have free'd more space had we
5778 * not had an artificially high ->csum_bytes, so we need to free
5779 * the remainder. If bytes is the same or less then we don't
5780 * need to do anything, the other free-ers did the correct
5781 * thing.
5782 */
5783 BTRFS_I(inode)->csum_bytes = orig_csum_bytes - num_bytes;
5784 if (bytes > to_free)
5785 to_free = bytes - to_free;
5786 else
5787 to_free = 0;
5788 }
5789 spin_unlock(&BTRFS_I(inode)->lock);
5790 if (dropped)
5791 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5792
5793 if (to_free) {
5794 btrfs_block_rsv_release(root, block_rsv, to_free);
5795 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5796 btrfs_ino(inode), to_free, 0);
5797 }
5798 if (delalloc_lock)
5799 mutex_unlock(&BTRFS_I(inode)->delalloc_mutex);
5800 return ret;
5801 }
5802
5803 /**
5804 * btrfs_delalloc_release_metadata - release a metadata reservation for an inode
5805 * @inode: the inode to release the reservation for
5806 * @num_bytes: the number of bytes we're releasing
5807 *
5808 * This will release the metadata reservation for an inode. This can be called
5809 * once we complete IO for a given set of bytes to release their metadata
5810 * reservations.
5811 */
5812 void btrfs_delalloc_release_metadata(struct inode *inode, u64 num_bytes)
5813 {
5814 struct btrfs_root *root = BTRFS_I(inode)->root;
5815 u64 to_free = 0;
5816 unsigned dropped;
5817
5818 num_bytes = ALIGN(num_bytes, root->sectorsize);
5819 spin_lock(&BTRFS_I(inode)->lock);
5820 dropped = drop_outstanding_extent(inode, num_bytes);
5821
5822 if (num_bytes)
5823 to_free = calc_csum_metadata_size(inode, num_bytes, 0);
5824 spin_unlock(&BTRFS_I(inode)->lock);
5825 if (dropped > 0)
5826 to_free += btrfs_calc_trans_metadata_size(root, dropped);
5827
5828 if (btrfs_test_is_dummy_root(root))
5829 return;
5830
5831 trace_btrfs_space_reservation(root->fs_info, "delalloc",
5832 btrfs_ino(inode), to_free, 0);
5833
5834 btrfs_block_rsv_release(root, &root->fs_info->delalloc_block_rsv,
5835 to_free);
5836 }
5837
5838 /**
5839 * btrfs_delalloc_reserve_space - reserve data and metadata space for
5840 * delalloc
5841 * @inode: inode we're writing to
5842 * @start: start range we are writing to
5843 * @len: how long the range we are writing to
5844 *
5845 * TODO: This function will finally replace old btrfs_delalloc_reserve_space()
5846 *
5847 * This will do the following things
5848 *
5849 * o reserve space in data space info for num bytes
5850 * and reserve precious corresponding qgroup space
5851 * (Done in check_data_free_space)
5852 *
5853 * o reserve space for metadata space, based on the number of outstanding
5854 * extents and how much csums will be needed
5855 * also reserve metadata space in a per root over-reserve method.
5856 * o add to the inodes->delalloc_bytes
5857 * o add it to the fs_info's delalloc inodes list.
5858 * (Above 3 all done in delalloc_reserve_metadata)
5859 *
5860 * Return 0 for success
5861 * Return <0 for error(-ENOSPC or -EQUOT)
5862 */
5863 int btrfs_delalloc_reserve_space(struct inode *inode, u64 start, u64 len)
5864 {
5865 int ret;
5866
5867 ret = btrfs_check_data_free_space(inode, start, len);
5868 if (ret < 0)
5869 return ret;
5870 ret = btrfs_delalloc_reserve_metadata(inode, len);
5871 if (ret < 0)
5872 btrfs_free_reserved_data_space(inode, start, len);
5873 return ret;
5874 }
5875
5876 /**
5877 * btrfs_delalloc_release_space - release data and metadata space for delalloc
5878 * @inode: inode we're releasing space for
5879 * @start: start position of the space already reserved
5880 * @len: the len of the space already reserved
5881 *
5882 * This must be matched with a call to btrfs_delalloc_reserve_space. This is
5883 * called in the case that we don't need the metadata AND data reservations
5884 * anymore. So if there is an error or we insert an inline extent.
5885 *
5886 * This function will release the metadata space that was not used and will
5887 * decrement ->delalloc_bytes and remove it from the fs_info delalloc_inodes
5888 * list if there are no delalloc bytes left.
5889 * Also it will handle the qgroup reserved space.
5890 */
5891 void btrfs_delalloc_release_space(struct inode *inode, u64 start, u64 len)
5892 {
5893 btrfs_delalloc_release_metadata(inode, len);
5894 btrfs_free_reserved_data_space(inode, start, len);
5895 }
5896
5897 static int update_block_group(struct btrfs_trans_handle *trans,
5898 struct btrfs_root *root, u64 bytenr,
5899 u64 num_bytes, int alloc)
5900 {
5901 struct btrfs_block_group_cache *cache = NULL;
5902 struct btrfs_fs_info *info = root->fs_info;
5903 u64 total = num_bytes;
5904 u64 old_val;
5905 u64 byte_in_group;
5906 int factor;
5907
5908 /* block accounting for super block */
5909 spin_lock(&info->delalloc_root_lock);
5910 old_val = btrfs_super_bytes_used(info->super_copy);
5911 if (alloc)
5912 old_val += num_bytes;
5913 else
5914 old_val -= num_bytes;
5915 btrfs_set_super_bytes_used(info->super_copy, old_val);
5916 spin_unlock(&info->delalloc_root_lock);
5917
5918 while (total) {
5919 cache = btrfs_lookup_block_group(info, bytenr);
5920 if (!cache)
5921 return -ENOENT;
5922 if (cache->flags & (BTRFS_BLOCK_GROUP_DUP |
5923 BTRFS_BLOCK_GROUP_RAID1 |
5924 BTRFS_BLOCK_GROUP_RAID10))
5925 factor = 2;
5926 else
5927 factor = 1;
5928 /*
5929 * If this block group has free space cache written out, we
5930 * need to make sure to load it if we are removing space. This
5931 * is because we need the unpinning stage to actually add the
5932 * space back to the block group, otherwise we will leak space.
5933 */
5934 if (!alloc && cache->cached == BTRFS_CACHE_NO)
5935 cache_block_group(cache, 1);
5936
5937 byte_in_group = bytenr - cache->key.objectid;
5938 WARN_ON(byte_in_group > cache->key.offset);
5939
5940 spin_lock(&cache->space_info->lock);
5941 spin_lock(&cache->lock);
5942
5943 if (btrfs_test_opt(root, SPACE_CACHE) &&
5944 cache->disk_cache_state < BTRFS_DC_CLEAR)
5945 cache->disk_cache_state = BTRFS_DC_CLEAR;
5946
5947 old_val = btrfs_block_group_used(&cache->item);
5948 num_bytes = min(total, cache->key.offset - byte_in_group);
5949 if (alloc) {
5950 old_val += num_bytes;
5951 btrfs_set_block_group_used(&cache->item, old_val);
5952 cache->reserved -= num_bytes;
5953 cache->space_info->bytes_reserved -= num_bytes;
5954 cache->space_info->bytes_used += num_bytes;
5955 cache->space_info->disk_used += num_bytes * factor;
5956 spin_unlock(&cache->lock);
5957 spin_unlock(&cache->space_info->lock);
5958 } else {
5959 old_val -= num_bytes;
5960 btrfs_set_block_group_used(&cache->item, old_val);
5961 cache->pinned += num_bytes;
5962 cache->space_info->bytes_pinned += num_bytes;
5963 cache->space_info->bytes_used -= num_bytes;
5964 cache->space_info->disk_used -= num_bytes * factor;
5965 spin_unlock(&cache->lock);
5966 spin_unlock(&cache->space_info->lock);
5967
5968 set_extent_dirty(info->pinned_extents,
5969 bytenr, bytenr + num_bytes - 1,
5970 GFP_NOFS | __GFP_NOFAIL);
5971 }
5972
5973 spin_lock(&trans->transaction->dirty_bgs_lock);
5974 if (list_empty(&cache->dirty_list)) {
5975 list_add_tail(&cache->dirty_list,
5976 &trans->transaction->dirty_bgs);
5977 trans->transaction->num_dirty_bgs++;
5978 btrfs_get_block_group(cache);
5979 }
5980 spin_unlock(&trans->transaction->dirty_bgs_lock);
5981
5982 /*
5983 * No longer have used bytes in this block group, queue it for
5984 * deletion. We do this after adding the block group to the
5985 * dirty list to avoid races between cleaner kthread and space
5986 * cache writeout.
5987 */
5988 if (!alloc && old_val == 0) {
5989 spin_lock(&info->unused_bgs_lock);
5990 if (list_empty(&cache->bg_list)) {
5991 btrfs_get_block_group(cache);
5992 list_add_tail(&cache->bg_list,
5993 &info->unused_bgs);
5994 }
5995 spin_unlock(&info->unused_bgs_lock);
5996 }
5997
5998 btrfs_put_block_group(cache);
5999 total -= num_bytes;
6000 bytenr += num_bytes;
6001 }
6002 return 0;
6003 }
6004
6005 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
6006 {
6007 struct btrfs_block_group_cache *cache;
6008 u64 bytenr;
6009
6010 spin_lock(&root->fs_info->block_group_cache_lock);
6011 bytenr = root->fs_info->first_logical_byte;
6012 spin_unlock(&root->fs_info->block_group_cache_lock);
6013
6014 if (bytenr < (u64)-1)
6015 return bytenr;
6016
6017 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
6018 if (!cache)
6019 return 0;
6020
6021 bytenr = cache->key.objectid;
6022 btrfs_put_block_group(cache);
6023
6024 return bytenr;
6025 }
6026
6027 static int pin_down_extent(struct btrfs_root *root,
6028 struct btrfs_block_group_cache *cache,
6029 u64 bytenr, u64 num_bytes, int reserved)
6030 {
6031 spin_lock(&cache->space_info->lock);
6032 spin_lock(&cache->lock);
6033 cache->pinned += num_bytes;
6034 cache->space_info->bytes_pinned += num_bytes;
6035 if (reserved) {
6036 cache->reserved -= num_bytes;
6037 cache->space_info->bytes_reserved -= num_bytes;
6038 }
6039 spin_unlock(&cache->lock);
6040 spin_unlock(&cache->space_info->lock);
6041
6042 set_extent_dirty(root->fs_info->pinned_extents, bytenr,
6043 bytenr + num_bytes - 1, GFP_NOFS | __GFP_NOFAIL);
6044 if (reserved)
6045 trace_btrfs_reserved_extent_free(root, bytenr, num_bytes);
6046 return 0;
6047 }
6048
6049 /*
6050 * this function must be called within transaction
6051 */
6052 int btrfs_pin_extent(struct btrfs_root *root,
6053 u64 bytenr, u64 num_bytes, int reserved)
6054 {
6055 struct btrfs_block_group_cache *cache;
6056
6057 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6058 BUG_ON(!cache); /* Logic error */
6059
6060 pin_down_extent(root, cache, bytenr, num_bytes, reserved);
6061
6062 btrfs_put_block_group(cache);
6063 return 0;
6064 }
6065
6066 /*
6067 * this function must be called within transaction
6068 */
6069 int btrfs_pin_extent_for_log_replay(struct btrfs_root *root,
6070 u64 bytenr, u64 num_bytes)
6071 {
6072 struct btrfs_block_group_cache *cache;
6073 int ret;
6074
6075 cache = btrfs_lookup_block_group(root->fs_info, bytenr);
6076 if (!cache)
6077 return -EINVAL;
6078
6079 /*
6080 * pull in the free space cache (if any) so that our pin
6081 * removes the free space from the cache. We have load_only set
6082 * to one because the slow code to read in the free extents does check
6083 * the pinned extents.
6084 */
6085 cache_block_group(cache, 1);
6086
6087 pin_down_extent(root, cache, bytenr, num_bytes, 0);
6088
6089 /* remove us from the free space cache (if we're there at all) */
6090 ret = btrfs_remove_free_space(cache, bytenr, num_bytes);
6091 btrfs_put_block_group(cache);
6092 return ret;
6093 }
6094
6095 static int __exclude_logged_extent(struct btrfs_root *root, u64 start, u64 num_bytes)
6096 {
6097 int ret;
6098 struct btrfs_block_group_cache *block_group;
6099 struct btrfs_caching_control *caching_ctl;
6100
6101 block_group = btrfs_lookup_block_group(root->fs_info, start);
6102 if (!block_group)
6103 return -EINVAL;
6104
6105 cache_block_group(block_group, 0);
6106 caching_ctl = get_caching_control(block_group);
6107
6108 if (!caching_ctl) {
6109 /* Logic error */
6110 BUG_ON(!block_group_cache_done(block_group));
6111 ret = btrfs_remove_free_space(block_group, start, num_bytes);
6112 } else {
6113 mutex_lock(&caching_ctl->mutex);
6114
6115 if (start >= caching_ctl->progress) {
6116 ret = add_excluded_extent(root, start, num_bytes);
6117 } else if (start + num_bytes <= caching_ctl->progress) {
6118 ret = btrfs_remove_free_space(block_group,
6119 start, num_bytes);
6120 } else {
6121 num_bytes = caching_ctl->progress - start;
6122 ret = btrfs_remove_free_space(block_group,
6123 start, num_bytes);
6124 if (ret)
6125 goto out_lock;
6126
6127 num_bytes = (start + num_bytes) -
6128 caching_ctl->progress;
6129 start = caching_ctl->progress;
6130 ret = add_excluded_extent(root, start, num_bytes);
6131 }
6132 out_lock:
6133 mutex_unlock(&caching_ctl->mutex);
6134 put_caching_control(caching_ctl);
6135 }
6136 btrfs_put_block_group(block_group);
6137 return ret;
6138 }
6139
6140 int btrfs_exclude_logged_extents(struct btrfs_root *log,
6141 struct extent_buffer *eb)
6142 {
6143 struct btrfs_file_extent_item *item;
6144 struct btrfs_key key;
6145 int found_type;
6146 int i;
6147
6148 if (!btrfs_fs_incompat(log->fs_info, MIXED_GROUPS))
6149 return 0;
6150
6151 for (i = 0; i < btrfs_header_nritems(eb); i++) {
6152 btrfs_item_key_to_cpu(eb, &key, i);
6153 if (key.type != BTRFS_EXTENT_DATA_KEY)
6154 continue;
6155 item = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
6156 found_type = btrfs_file_extent_type(eb, item);
6157 if (found_type == BTRFS_FILE_EXTENT_INLINE)
6158 continue;
6159 if (btrfs_file_extent_disk_bytenr(eb, item) == 0)
6160 continue;
6161 key.objectid = btrfs_file_extent_disk_bytenr(eb, item);
6162 key.offset = btrfs_file_extent_disk_num_bytes(eb, item);
6163 __exclude_logged_extent(log, key.objectid, key.offset);
6164 }
6165
6166 return 0;
6167 }
6168
6169 /**
6170 * btrfs_update_reserved_bytes - update the block_group and space info counters
6171 * @cache: The cache we are manipulating
6172 * @num_bytes: The number of bytes in question
6173 * @reserve: One of the reservation enums
6174 * @delalloc: The blocks are allocated for the delalloc write
6175 *
6176 * This is called by the allocator when it reserves space, or by somebody who is
6177 * freeing space that was never actually used on disk. For example if you
6178 * reserve some space for a new leaf in transaction A and before transaction A
6179 * commits you free that leaf, you call this with reserve set to 0 in order to
6180 * clear the reservation.
6181 *
6182 * Metadata reservations should be called with RESERVE_ALLOC so we do the proper
6183 * ENOSPC accounting. For data we handle the reservation through clearing the
6184 * delalloc bits in the io_tree. We have to do this since we could end up
6185 * allocating less disk space for the amount of data we have reserved in the
6186 * case of compression.
6187 *
6188 * If this is a reservation and the block group has become read only we cannot
6189 * make the reservation and return -EAGAIN, otherwise this function always
6190 * succeeds.
6191 */
6192 static int btrfs_update_reserved_bytes(struct btrfs_block_group_cache *cache,
6193 u64 num_bytes, int reserve, int delalloc)
6194 {
6195 struct btrfs_space_info *space_info = cache->space_info;
6196 int ret = 0;
6197
6198 spin_lock(&space_info->lock);
6199 spin_lock(&cache->lock);
6200 if (reserve != RESERVE_FREE) {
6201 if (cache->ro) {
6202 ret = -EAGAIN;
6203 } else {
6204 cache->reserved += num_bytes;
6205 space_info->bytes_reserved += num_bytes;
6206 if (reserve == RESERVE_ALLOC) {
6207 trace_btrfs_space_reservation(cache->fs_info,
6208 "space_info", space_info->flags,
6209 num_bytes, 0);
6210 space_info->bytes_may_use -= num_bytes;
6211 }
6212
6213 if (delalloc)
6214 cache->delalloc_bytes += num_bytes;
6215 }
6216 } else {
6217 if (cache->ro)
6218 space_info->bytes_readonly += num_bytes;
6219 cache->reserved -= num_bytes;
6220 space_info->bytes_reserved -= num_bytes;
6221
6222 if (delalloc)
6223 cache->delalloc_bytes -= num_bytes;
6224 }
6225 spin_unlock(&cache->lock);
6226 spin_unlock(&space_info->lock);
6227 return ret;
6228 }
6229
6230 void btrfs_prepare_extent_commit(struct btrfs_trans_handle *trans,
6231 struct btrfs_root *root)
6232 {
6233 struct btrfs_fs_info *fs_info = root->fs_info;
6234 struct btrfs_caching_control *next;
6235 struct btrfs_caching_control *caching_ctl;
6236 struct btrfs_block_group_cache *cache;
6237
6238 down_write(&fs_info->commit_root_sem);
6239
6240 list_for_each_entry_safe(caching_ctl, next,
6241 &fs_info->caching_block_groups, list) {
6242 cache = caching_ctl->block_group;
6243 if (block_group_cache_done(cache)) {
6244 cache->last_byte_to_unpin = (u64)-1;
6245 list_del_init(&caching_ctl->list);
6246 put_caching_control(caching_ctl);
6247 } else {
6248 cache->last_byte_to_unpin = caching_ctl->progress;
6249 }
6250 }
6251
6252 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6253 fs_info->pinned_extents = &fs_info->freed_extents[1];
6254 else
6255 fs_info->pinned_extents = &fs_info->freed_extents[0];
6256
6257 up_write(&fs_info->commit_root_sem);
6258
6259 update_global_block_rsv(fs_info);
6260 }
6261
6262 /*
6263 * Returns the free cluster for the given space info and sets empty_cluster to
6264 * what it should be based on the mount options.
6265 */
6266 static struct btrfs_free_cluster *
6267 fetch_cluster_info(struct btrfs_root *root, struct btrfs_space_info *space_info,
6268 u64 *empty_cluster)
6269 {
6270 struct btrfs_free_cluster *ret = NULL;
6271 bool ssd = btrfs_test_opt(root, SSD);
6272
6273 *empty_cluster = 0;
6274 if (btrfs_mixed_space_info(space_info))
6275 return ret;
6276
6277 if (ssd)
6278 *empty_cluster = SZ_2M;
6279 if (space_info->flags & BTRFS_BLOCK_GROUP_METADATA) {
6280 ret = &root->fs_info->meta_alloc_cluster;
6281 if (!ssd)
6282 *empty_cluster = SZ_64K;
6283 } else if ((space_info->flags & BTRFS_BLOCK_GROUP_DATA) && ssd) {
6284 ret = &root->fs_info->data_alloc_cluster;
6285 }
6286
6287 return ret;
6288 }
6289
6290 static int unpin_extent_range(struct btrfs_root *root, u64 start, u64 end,
6291 const bool return_free_space)
6292 {
6293 struct btrfs_fs_info *fs_info = root->fs_info;
6294 struct btrfs_block_group_cache *cache = NULL;
6295 struct btrfs_space_info *space_info;
6296 struct btrfs_block_rsv *global_rsv = &fs_info->global_block_rsv;
6297 struct btrfs_free_cluster *cluster = NULL;
6298 u64 len;
6299 u64 total_unpinned = 0;
6300 u64 empty_cluster = 0;
6301 bool readonly;
6302
6303 while (start <= end) {
6304 readonly = false;
6305 if (!cache ||
6306 start >= cache->key.objectid + cache->key.offset) {
6307 if (cache)
6308 btrfs_put_block_group(cache);
6309 total_unpinned = 0;
6310 cache = btrfs_lookup_block_group(fs_info, start);
6311 BUG_ON(!cache); /* Logic error */
6312
6313 cluster = fetch_cluster_info(root,
6314 cache->space_info,
6315 &empty_cluster);
6316 empty_cluster <<= 1;
6317 }
6318
6319 len = cache->key.objectid + cache->key.offset - start;
6320 len = min(len, end + 1 - start);
6321
6322 if (start < cache->last_byte_to_unpin) {
6323 len = min(len, cache->last_byte_to_unpin - start);
6324 if (return_free_space)
6325 btrfs_add_free_space(cache, start, len);
6326 }
6327
6328 start += len;
6329 total_unpinned += len;
6330 space_info = cache->space_info;
6331
6332 /*
6333 * If this space cluster has been marked as fragmented and we've
6334 * unpinned enough in this block group to potentially allow a
6335 * cluster to be created inside of it go ahead and clear the
6336 * fragmented check.
6337 */
6338 if (cluster && cluster->fragmented &&
6339 total_unpinned > empty_cluster) {
6340 spin_lock(&cluster->lock);
6341 cluster->fragmented = 0;
6342 spin_unlock(&cluster->lock);
6343 }
6344
6345 spin_lock(&space_info->lock);
6346 spin_lock(&cache->lock);
6347 cache->pinned -= len;
6348 space_info->bytes_pinned -= len;
6349 space_info->max_extent_size = 0;
6350 percpu_counter_add(&space_info->total_bytes_pinned, -len);
6351 if (cache->ro) {
6352 space_info->bytes_readonly += len;
6353 readonly = true;
6354 }
6355 spin_unlock(&cache->lock);
6356 if (!readonly && global_rsv->space_info == space_info) {
6357 spin_lock(&global_rsv->lock);
6358 if (!global_rsv->full) {
6359 len = min(len, global_rsv->size -
6360 global_rsv->reserved);
6361 global_rsv->reserved += len;
6362 space_info->bytes_may_use += len;
6363 if (global_rsv->reserved >= global_rsv->size)
6364 global_rsv->full = 1;
6365 }
6366 spin_unlock(&global_rsv->lock);
6367 }
6368 spin_unlock(&space_info->lock);
6369 }
6370
6371 if (cache)
6372 btrfs_put_block_group(cache);
6373 return 0;
6374 }
6375
6376 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
6377 struct btrfs_root *root)
6378 {
6379 struct btrfs_fs_info *fs_info = root->fs_info;
6380 struct btrfs_block_group_cache *block_group, *tmp;
6381 struct list_head *deleted_bgs;
6382 struct extent_io_tree *unpin;
6383 u64 start;
6384 u64 end;
6385 int ret;
6386
6387 if (fs_info->pinned_extents == &fs_info->freed_extents[0])
6388 unpin = &fs_info->freed_extents[1];
6389 else
6390 unpin = &fs_info->freed_extents[0];
6391
6392 while (!trans->aborted) {
6393 mutex_lock(&fs_info->unused_bg_unpin_mutex);
6394 ret = find_first_extent_bit(unpin, 0, &start, &end,
6395 EXTENT_DIRTY, NULL);
6396 if (ret) {
6397 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6398 break;
6399 }
6400
6401 if (btrfs_test_opt(root, DISCARD))
6402 ret = btrfs_discard_extent(root, start,
6403 end + 1 - start, NULL);
6404
6405 clear_extent_dirty(unpin, start, end, GFP_NOFS);
6406 unpin_extent_range(root, start, end, true);
6407 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
6408 cond_resched();
6409 }
6410
6411 /*
6412 * Transaction is finished. We don't need the lock anymore. We
6413 * do need to clean up the block groups in case of a transaction
6414 * abort.
6415 */
6416 deleted_bgs = &trans->transaction->deleted_bgs;
6417 list_for_each_entry_safe(block_group, tmp, deleted_bgs, bg_list) {
6418 u64 trimmed = 0;
6419
6420 ret = -EROFS;
6421 if (!trans->aborted)
6422 ret = btrfs_discard_extent(root,
6423 block_group->key.objectid,
6424 block_group->key.offset,
6425 &trimmed);
6426
6427 list_del_init(&block_group->bg_list);
6428 btrfs_put_block_group_trimming(block_group);
6429 btrfs_put_block_group(block_group);
6430
6431 if (ret) {
6432 const char *errstr = btrfs_decode_error(ret);
6433 btrfs_warn(fs_info,
6434 "Discard failed while removing blockgroup: errno=%d %s\n",
6435 ret, errstr);
6436 }
6437 }
6438
6439 return 0;
6440 }
6441
6442 static void add_pinned_bytes(struct btrfs_fs_info *fs_info, u64 num_bytes,
6443 u64 owner, u64 root_objectid)
6444 {
6445 struct btrfs_space_info *space_info;
6446 u64 flags;
6447
6448 if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6449 if (root_objectid == BTRFS_CHUNK_TREE_OBJECTID)
6450 flags = BTRFS_BLOCK_GROUP_SYSTEM;
6451 else
6452 flags = BTRFS_BLOCK_GROUP_METADATA;
6453 } else {
6454 flags = BTRFS_BLOCK_GROUP_DATA;
6455 }
6456
6457 space_info = __find_space_info(fs_info, flags);
6458 BUG_ON(!space_info); /* Logic bug */
6459 percpu_counter_add(&space_info->total_bytes_pinned, num_bytes);
6460 }
6461
6462
6463 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
6464 struct btrfs_root *root,
6465 struct btrfs_delayed_ref_node *node, u64 parent,
6466 u64 root_objectid, u64 owner_objectid,
6467 u64 owner_offset, int refs_to_drop,
6468 struct btrfs_delayed_extent_op *extent_op)
6469 {
6470 struct btrfs_key key;
6471 struct btrfs_path *path;
6472 struct btrfs_fs_info *info = root->fs_info;
6473 struct btrfs_root *extent_root = info->extent_root;
6474 struct extent_buffer *leaf;
6475 struct btrfs_extent_item *ei;
6476 struct btrfs_extent_inline_ref *iref;
6477 int ret;
6478 int is_data;
6479 int extent_slot = 0;
6480 int found_extent = 0;
6481 int num_to_del = 1;
6482 u32 item_size;
6483 u64 refs;
6484 u64 bytenr = node->bytenr;
6485 u64 num_bytes = node->num_bytes;
6486 int last_ref = 0;
6487 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
6488 SKINNY_METADATA);
6489
6490 path = btrfs_alloc_path();
6491 if (!path)
6492 return -ENOMEM;
6493
6494 path->reada = READA_FORWARD;
6495 path->leave_spinning = 1;
6496
6497 is_data = owner_objectid >= BTRFS_FIRST_FREE_OBJECTID;
6498 BUG_ON(!is_data && refs_to_drop != 1);
6499
6500 if (is_data)
6501 skinny_metadata = 0;
6502
6503 ret = lookup_extent_backref(trans, extent_root, path, &iref,
6504 bytenr, num_bytes, parent,
6505 root_objectid, owner_objectid,
6506 owner_offset);
6507 if (ret == 0) {
6508 extent_slot = path->slots[0];
6509 while (extent_slot >= 0) {
6510 btrfs_item_key_to_cpu(path->nodes[0], &key,
6511 extent_slot);
6512 if (key.objectid != bytenr)
6513 break;
6514 if (key.type == BTRFS_EXTENT_ITEM_KEY &&
6515 key.offset == num_bytes) {
6516 found_extent = 1;
6517 break;
6518 }
6519 if (key.type == BTRFS_METADATA_ITEM_KEY &&
6520 key.offset == owner_objectid) {
6521 found_extent = 1;
6522 break;
6523 }
6524 if (path->slots[0] - extent_slot > 5)
6525 break;
6526 extent_slot--;
6527 }
6528 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6529 item_size = btrfs_item_size_nr(path->nodes[0], extent_slot);
6530 if (found_extent && item_size < sizeof(*ei))
6531 found_extent = 0;
6532 #endif
6533 if (!found_extent) {
6534 BUG_ON(iref);
6535 ret = remove_extent_backref(trans, extent_root, path,
6536 NULL, refs_to_drop,
6537 is_data, &last_ref);
6538 if (ret) {
6539 btrfs_abort_transaction(trans, extent_root, ret);
6540 goto out;
6541 }
6542 btrfs_release_path(path);
6543 path->leave_spinning = 1;
6544
6545 key.objectid = bytenr;
6546 key.type = BTRFS_EXTENT_ITEM_KEY;
6547 key.offset = num_bytes;
6548
6549 if (!is_data && skinny_metadata) {
6550 key.type = BTRFS_METADATA_ITEM_KEY;
6551 key.offset = owner_objectid;
6552 }
6553
6554 ret = btrfs_search_slot(trans, extent_root,
6555 &key, path, -1, 1);
6556 if (ret > 0 && skinny_metadata && path->slots[0]) {
6557 /*
6558 * Couldn't find our skinny metadata item,
6559 * see if we have ye olde extent item.
6560 */
6561 path->slots[0]--;
6562 btrfs_item_key_to_cpu(path->nodes[0], &key,
6563 path->slots[0]);
6564 if (key.objectid == bytenr &&
6565 key.type == BTRFS_EXTENT_ITEM_KEY &&
6566 key.offset == num_bytes)
6567 ret = 0;
6568 }
6569
6570 if (ret > 0 && skinny_metadata) {
6571 skinny_metadata = false;
6572 key.objectid = bytenr;
6573 key.type = BTRFS_EXTENT_ITEM_KEY;
6574 key.offset = num_bytes;
6575 btrfs_release_path(path);
6576 ret = btrfs_search_slot(trans, extent_root,
6577 &key, path, -1, 1);
6578 }
6579
6580 if (ret) {
6581 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6582 ret, bytenr);
6583 if (ret > 0)
6584 btrfs_print_leaf(extent_root,
6585 path->nodes[0]);
6586 }
6587 if (ret < 0) {
6588 btrfs_abort_transaction(trans, extent_root, ret);
6589 goto out;
6590 }
6591 extent_slot = path->slots[0];
6592 }
6593 } else if (WARN_ON(ret == -ENOENT)) {
6594 btrfs_print_leaf(extent_root, path->nodes[0]);
6595 btrfs_err(info,
6596 "unable to find ref byte nr %llu parent %llu root %llu owner %llu offset %llu",
6597 bytenr, parent, root_objectid, owner_objectid,
6598 owner_offset);
6599 btrfs_abort_transaction(trans, extent_root, ret);
6600 goto out;
6601 } else {
6602 btrfs_abort_transaction(trans, extent_root, ret);
6603 goto out;
6604 }
6605
6606 leaf = path->nodes[0];
6607 item_size = btrfs_item_size_nr(leaf, extent_slot);
6608 #ifdef BTRFS_COMPAT_EXTENT_TREE_V0
6609 if (item_size < sizeof(*ei)) {
6610 BUG_ON(found_extent || extent_slot != path->slots[0]);
6611 ret = convert_extent_item_v0(trans, extent_root, path,
6612 owner_objectid, 0);
6613 if (ret < 0) {
6614 btrfs_abort_transaction(trans, extent_root, ret);
6615 goto out;
6616 }
6617
6618 btrfs_release_path(path);
6619 path->leave_spinning = 1;
6620
6621 key.objectid = bytenr;
6622 key.type = BTRFS_EXTENT_ITEM_KEY;
6623 key.offset = num_bytes;
6624
6625 ret = btrfs_search_slot(trans, extent_root, &key, path,
6626 -1, 1);
6627 if (ret) {
6628 btrfs_err(info, "umm, got %d back from search, was looking for %llu",
6629 ret, bytenr);
6630 btrfs_print_leaf(extent_root, path->nodes[0]);
6631 }
6632 if (ret < 0) {
6633 btrfs_abort_transaction(trans, extent_root, ret);
6634 goto out;
6635 }
6636
6637 extent_slot = path->slots[0];
6638 leaf = path->nodes[0];
6639 item_size = btrfs_item_size_nr(leaf, extent_slot);
6640 }
6641 #endif
6642 BUG_ON(item_size < sizeof(*ei));
6643 ei = btrfs_item_ptr(leaf, extent_slot,
6644 struct btrfs_extent_item);
6645 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID &&
6646 key.type == BTRFS_EXTENT_ITEM_KEY) {
6647 struct btrfs_tree_block_info *bi;
6648 BUG_ON(item_size < sizeof(*ei) + sizeof(*bi));
6649 bi = (struct btrfs_tree_block_info *)(ei + 1);
6650 WARN_ON(owner_objectid != btrfs_tree_block_level(leaf, bi));
6651 }
6652
6653 refs = btrfs_extent_refs(leaf, ei);
6654 if (refs < refs_to_drop) {
6655 btrfs_err(info, "trying to drop %d refs but we only have %Lu "
6656 "for bytenr %Lu", refs_to_drop, refs, bytenr);
6657 ret = -EINVAL;
6658 btrfs_abort_transaction(trans, extent_root, ret);
6659 goto out;
6660 }
6661 refs -= refs_to_drop;
6662
6663 if (refs > 0) {
6664 if (extent_op)
6665 __run_delayed_extent_op(extent_op, leaf, ei);
6666 /*
6667 * In the case of inline back ref, reference count will
6668 * be updated by remove_extent_backref
6669 */
6670 if (iref) {
6671 BUG_ON(!found_extent);
6672 } else {
6673 btrfs_set_extent_refs(leaf, ei, refs);
6674 btrfs_mark_buffer_dirty(leaf);
6675 }
6676 if (found_extent) {
6677 ret = remove_extent_backref(trans, extent_root, path,
6678 iref, refs_to_drop,
6679 is_data, &last_ref);
6680 if (ret) {
6681 btrfs_abort_transaction(trans, extent_root, ret);
6682 goto out;
6683 }
6684 }
6685 add_pinned_bytes(root->fs_info, -num_bytes, owner_objectid,
6686 root_objectid);
6687 } else {
6688 if (found_extent) {
6689 BUG_ON(is_data && refs_to_drop !=
6690 extent_data_ref_count(path, iref));
6691 if (iref) {
6692 BUG_ON(path->slots[0] != extent_slot);
6693 } else {
6694 BUG_ON(path->slots[0] != extent_slot + 1);
6695 path->slots[0] = extent_slot;
6696 num_to_del = 2;
6697 }
6698 }
6699
6700 last_ref = 1;
6701 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
6702 num_to_del);
6703 if (ret) {
6704 btrfs_abort_transaction(trans, extent_root, ret);
6705 goto out;
6706 }
6707 btrfs_release_path(path);
6708
6709 if (is_data) {
6710 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
6711 if (ret) {
6712 btrfs_abort_transaction(trans, extent_root, ret);
6713 goto out;
6714 }
6715 }
6716
6717 ret = add_to_free_space_tree(trans, root->fs_info, bytenr,
6718 num_bytes);
6719 if (ret) {
6720 btrfs_abort_transaction(trans, extent_root, ret);
6721 goto out;
6722 }
6723
6724 ret = update_block_group(trans, root, bytenr, num_bytes, 0);
6725 if (ret) {
6726 btrfs_abort_transaction(trans, extent_root, ret);
6727 goto out;
6728 }
6729 }
6730 btrfs_release_path(path);
6731
6732 out:
6733 btrfs_free_path(path);
6734 return ret;
6735 }
6736
6737 /*
6738 * when we free an block, it is possible (and likely) that we free the last
6739 * delayed ref for that extent as well. This searches the delayed ref tree for
6740 * a given extent, and if there are no other delayed refs to be processed, it
6741 * removes it from the tree.
6742 */
6743 static noinline int check_ref_cleanup(struct btrfs_trans_handle *trans,
6744 struct btrfs_root *root, u64 bytenr)
6745 {
6746 struct btrfs_delayed_ref_head *head;
6747 struct btrfs_delayed_ref_root *delayed_refs;
6748 int ret = 0;
6749
6750 delayed_refs = &trans->transaction->delayed_refs;
6751 spin_lock(&delayed_refs->lock);
6752 head = btrfs_find_delayed_ref_head(trans, bytenr);
6753 if (!head)
6754 goto out_delayed_unlock;
6755
6756 spin_lock(&head->lock);
6757 if (!list_empty(&head->ref_list))
6758 goto out;
6759
6760 if (head->extent_op) {
6761 if (!head->must_insert_reserved)
6762 goto out;
6763 btrfs_free_delayed_extent_op(head->extent_op);
6764 head->extent_op = NULL;
6765 }
6766
6767 /*
6768 * waiting for the lock here would deadlock. If someone else has it
6769 * locked they are already in the process of dropping it anyway
6770 */
6771 if (!mutex_trylock(&head->mutex))
6772 goto out;
6773
6774 /*
6775 * at this point we have a head with no other entries. Go
6776 * ahead and process it.
6777 */
6778 head->node.in_tree = 0;
6779 rb_erase(&head->href_node, &delayed_refs->href_root);
6780
6781 atomic_dec(&delayed_refs->num_entries);
6782
6783 /*
6784 * we don't take a ref on the node because we're removing it from the
6785 * tree, so we just steal the ref the tree was holding.
6786 */
6787 delayed_refs->num_heads--;
6788 if (head->processing == 0)
6789 delayed_refs->num_heads_ready--;
6790 head->processing = 0;
6791 spin_unlock(&head->lock);
6792 spin_unlock(&delayed_refs->lock);
6793
6794 BUG_ON(head->extent_op);
6795 if (head->must_insert_reserved)
6796 ret = 1;
6797
6798 mutex_unlock(&head->mutex);
6799 btrfs_put_delayed_ref(&head->node);
6800 return ret;
6801 out:
6802 spin_unlock(&head->lock);
6803
6804 out_delayed_unlock:
6805 spin_unlock(&delayed_refs->lock);
6806 return 0;
6807 }
6808
6809 void btrfs_free_tree_block(struct btrfs_trans_handle *trans,
6810 struct btrfs_root *root,
6811 struct extent_buffer *buf,
6812 u64 parent, int last_ref)
6813 {
6814 int pin = 1;
6815 int ret;
6816
6817 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6818 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
6819 buf->start, buf->len,
6820 parent, root->root_key.objectid,
6821 btrfs_header_level(buf),
6822 BTRFS_DROP_DELAYED_REF, NULL);
6823 BUG_ON(ret); /* -ENOMEM */
6824 }
6825
6826 if (!last_ref)
6827 return;
6828
6829 if (btrfs_header_generation(buf) == trans->transid) {
6830 struct btrfs_block_group_cache *cache;
6831
6832 if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) {
6833 ret = check_ref_cleanup(trans, root, buf->start);
6834 if (!ret)
6835 goto out;
6836 }
6837
6838 cache = btrfs_lookup_block_group(root->fs_info, buf->start);
6839
6840 if (btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
6841 pin_down_extent(root, cache, buf->start, buf->len, 1);
6842 btrfs_put_block_group(cache);
6843 goto out;
6844 }
6845
6846 WARN_ON(test_bit(EXTENT_BUFFER_DIRTY, &buf->bflags));
6847
6848 btrfs_add_free_space(cache, buf->start, buf->len);
6849 btrfs_update_reserved_bytes(cache, buf->len, RESERVE_FREE, 0);
6850 btrfs_put_block_group(cache);
6851 trace_btrfs_reserved_extent_free(root, buf->start, buf->len);
6852 pin = 0;
6853 }
6854 out:
6855 if (pin)
6856 add_pinned_bytes(root->fs_info, buf->len,
6857 btrfs_header_level(buf),
6858 root->root_key.objectid);
6859
6860 /*
6861 * Deleting the buffer, clear the corrupt flag since it doesn't matter
6862 * anymore.
6863 */
6864 clear_bit(EXTENT_BUFFER_CORRUPT, &buf->bflags);
6865 }
6866
6867 /* Can return -ENOMEM */
6868 int btrfs_free_extent(struct btrfs_trans_handle *trans, struct btrfs_root *root,
6869 u64 bytenr, u64 num_bytes, u64 parent, u64 root_objectid,
6870 u64 owner, u64 offset)
6871 {
6872 int ret;
6873 struct btrfs_fs_info *fs_info = root->fs_info;
6874
6875 if (btrfs_test_is_dummy_root(root))
6876 return 0;
6877
6878 add_pinned_bytes(root->fs_info, num_bytes, owner, root_objectid);
6879
6880 /*
6881 * tree log blocks never actually go into the extent allocation
6882 * tree, just update pinning info and exit early.
6883 */
6884 if (root_objectid == BTRFS_TREE_LOG_OBJECTID) {
6885 WARN_ON(owner >= BTRFS_FIRST_FREE_OBJECTID);
6886 /* unlocks the pinned mutex */
6887 btrfs_pin_extent(root, bytenr, num_bytes, 1);
6888 ret = 0;
6889 } else if (owner < BTRFS_FIRST_FREE_OBJECTID) {
6890 ret = btrfs_add_delayed_tree_ref(fs_info, trans, bytenr,
6891 num_bytes,
6892 parent, root_objectid, (int)owner,
6893 BTRFS_DROP_DELAYED_REF, NULL);
6894 } else {
6895 ret = btrfs_add_delayed_data_ref(fs_info, trans, bytenr,
6896 num_bytes,
6897 parent, root_objectid, owner,
6898 offset, 0,
6899 BTRFS_DROP_DELAYED_REF, NULL);
6900 }
6901 return ret;
6902 }
6903
6904 /*
6905 * when we wait for progress in the block group caching, its because
6906 * our allocation attempt failed at least once. So, we must sleep
6907 * and let some progress happen before we try again.
6908 *
6909 * This function will sleep at least once waiting for new free space to
6910 * show up, and then it will check the block group free space numbers
6911 * for our min num_bytes. Another option is to have it go ahead
6912 * and look in the rbtree for a free extent of a given size, but this
6913 * is a good start.
6914 *
6915 * Callers of this must check if cache->cached == BTRFS_CACHE_ERROR before using
6916 * any of the information in this block group.
6917 */
6918 static noinline void
6919 wait_block_group_cache_progress(struct btrfs_block_group_cache *cache,
6920 u64 num_bytes)
6921 {
6922 struct btrfs_caching_control *caching_ctl;
6923
6924 caching_ctl = get_caching_control(cache);
6925 if (!caching_ctl)
6926 return;
6927
6928 wait_event(caching_ctl->wait, block_group_cache_done(cache) ||
6929 (cache->free_space_ctl->free_space >= num_bytes));
6930
6931 put_caching_control(caching_ctl);
6932 }
6933
6934 static noinline int
6935 wait_block_group_cache_done(struct btrfs_block_group_cache *cache)
6936 {
6937 struct btrfs_caching_control *caching_ctl;
6938 int ret = 0;
6939
6940 caching_ctl = get_caching_control(cache);
6941 if (!caching_ctl)
6942 return (cache->cached == BTRFS_CACHE_ERROR) ? -EIO : 0;
6943
6944 wait_event(caching_ctl->wait, block_group_cache_done(cache));
6945 if (cache->cached == BTRFS_CACHE_ERROR)
6946 ret = -EIO;
6947 put_caching_control(caching_ctl);
6948 return ret;
6949 }
6950
6951 int __get_raid_index(u64 flags)
6952 {
6953 if (flags & BTRFS_BLOCK_GROUP_RAID10)
6954 return BTRFS_RAID_RAID10;
6955 else if (flags & BTRFS_BLOCK_GROUP_RAID1)
6956 return BTRFS_RAID_RAID1;
6957 else if (flags & BTRFS_BLOCK_GROUP_DUP)
6958 return BTRFS_RAID_DUP;
6959 else if (flags & BTRFS_BLOCK_GROUP_RAID0)
6960 return BTRFS_RAID_RAID0;
6961 else if (flags & BTRFS_BLOCK_GROUP_RAID5)
6962 return BTRFS_RAID_RAID5;
6963 else if (flags & BTRFS_BLOCK_GROUP_RAID6)
6964 return BTRFS_RAID_RAID6;
6965
6966 return BTRFS_RAID_SINGLE; /* BTRFS_BLOCK_GROUP_SINGLE */
6967 }
6968
6969 int get_block_group_index(struct btrfs_block_group_cache *cache)
6970 {
6971 return __get_raid_index(cache->flags);
6972 }
6973
6974 static const char *btrfs_raid_type_names[BTRFS_NR_RAID_TYPES] = {
6975 [BTRFS_RAID_RAID10] = "raid10",
6976 [BTRFS_RAID_RAID1] = "raid1",
6977 [BTRFS_RAID_DUP] = "dup",
6978 [BTRFS_RAID_RAID0] = "raid0",
6979 [BTRFS_RAID_SINGLE] = "single",
6980 [BTRFS_RAID_RAID5] = "raid5",
6981 [BTRFS_RAID_RAID6] = "raid6",
6982 };
6983
6984 static const char *get_raid_name(enum btrfs_raid_types type)
6985 {
6986 if (type >= BTRFS_NR_RAID_TYPES)
6987 return NULL;
6988
6989 return btrfs_raid_type_names[type];
6990 }
6991
6992 enum btrfs_loop_type {
6993 LOOP_CACHING_NOWAIT = 0,
6994 LOOP_CACHING_WAIT = 1,
6995 LOOP_ALLOC_CHUNK = 2,
6996 LOOP_NO_EMPTY_SIZE = 3,
6997 };
6998
6999 static inline void
7000 btrfs_lock_block_group(struct btrfs_block_group_cache *cache,
7001 int delalloc)
7002 {
7003 if (delalloc)
7004 down_read(&cache->data_rwsem);
7005 }
7006
7007 static inline void
7008 btrfs_grab_block_group(struct btrfs_block_group_cache *cache,
7009 int delalloc)
7010 {
7011 btrfs_get_block_group(cache);
7012 if (delalloc)
7013 down_read(&cache->data_rwsem);
7014 }
7015
7016 static struct btrfs_block_group_cache *
7017 btrfs_lock_cluster(struct btrfs_block_group_cache *block_group,
7018 struct btrfs_free_cluster *cluster,
7019 int delalloc)
7020 {
7021 struct btrfs_block_group_cache *used_bg;
7022 bool locked = false;
7023 again:
7024 spin_lock(&cluster->refill_lock);
7025 if (locked) {
7026 if (used_bg == cluster->block_group)
7027 return used_bg;
7028
7029 up_read(&used_bg->data_rwsem);
7030 btrfs_put_block_group(used_bg);
7031 }
7032
7033 used_bg = cluster->block_group;
7034 if (!used_bg)
7035 return NULL;
7036
7037 if (used_bg == block_group)
7038 return used_bg;
7039
7040 btrfs_get_block_group(used_bg);
7041
7042 if (!delalloc)
7043 return used_bg;
7044
7045 if (down_read_trylock(&used_bg->data_rwsem))
7046 return used_bg;
7047
7048 spin_unlock(&cluster->refill_lock);
7049 down_read(&used_bg->data_rwsem);
7050 locked = true;
7051 goto again;
7052 }
7053
7054 static inline void
7055 btrfs_release_block_group(struct btrfs_block_group_cache *cache,
7056 int delalloc)
7057 {
7058 if (delalloc)
7059 up_read(&cache->data_rwsem);
7060 btrfs_put_block_group(cache);
7061 }
7062
7063 /*
7064 * walks the btree of allocated extents and find a hole of a given size.
7065 * The key ins is changed to record the hole:
7066 * ins->objectid == start position
7067 * ins->flags = BTRFS_EXTENT_ITEM_KEY
7068 * ins->offset == the size of the hole.
7069 * Any available blocks before search_start are skipped.
7070 *
7071 * If there is no suitable free space, we will record the max size of
7072 * the free space extent currently.
7073 */
7074 static noinline int find_free_extent(struct btrfs_root *orig_root,
7075 u64 num_bytes, u64 empty_size,
7076 u64 hint_byte, struct btrfs_key *ins,
7077 u64 flags, int delalloc)
7078 {
7079 int ret = 0;
7080 struct btrfs_root *root = orig_root->fs_info->extent_root;
7081 struct btrfs_free_cluster *last_ptr = NULL;
7082 struct btrfs_block_group_cache *block_group = NULL;
7083 u64 search_start = 0;
7084 u64 max_extent_size = 0;
7085 u64 empty_cluster = 0;
7086 struct btrfs_space_info *space_info;
7087 int loop = 0;
7088 int index = __get_raid_index(flags);
7089 int alloc_type = (flags & BTRFS_BLOCK_GROUP_DATA) ?
7090 RESERVE_ALLOC_NO_ACCOUNT : RESERVE_ALLOC;
7091 bool failed_cluster_refill = false;
7092 bool failed_alloc = false;
7093 bool use_cluster = true;
7094 bool have_caching_bg = false;
7095 bool orig_have_caching_bg = false;
7096 bool full_search = false;
7097
7098 WARN_ON(num_bytes < root->sectorsize);
7099 ins->type = BTRFS_EXTENT_ITEM_KEY;
7100 ins->objectid = 0;
7101 ins->offset = 0;
7102
7103 trace_find_free_extent(orig_root, num_bytes, empty_size, flags);
7104
7105 space_info = __find_space_info(root->fs_info, flags);
7106 if (!space_info) {
7107 btrfs_err(root->fs_info, "No space info for %llu", flags);
7108 return -ENOSPC;
7109 }
7110
7111 /*
7112 * If our free space is heavily fragmented we may not be able to make
7113 * big contiguous allocations, so instead of doing the expensive search
7114 * for free space, simply return ENOSPC with our max_extent_size so we
7115 * can go ahead and search for a more manageable chunk.
7116 *
7117 * If our max_extent_size is large enough for our allocation simply
7118 * disable clustering since we will likely not be able to find enough
7119 * space to create a cluster and induce latency trying.
7120 */
7121 if (unlikely(space_info->max_extent_size)) {
7122 spin_lock(&space_info->lock);
7123 if (space_info->max_extent_size &&
7124 num_bytes > space_info->max_extent_size) {
7125 ins->offset = space_info->max_extent_size;
7126 spin_unlock(&space_info->lock);
7127 return -ENOSPC;
7128 } else if (space_info->max_extent_size) {
7129 use_cluster = false;
7130 }
7131 spin_unlock(&space_info->lock);
7132 }
7133
7134 last_ptr = fetch_cluster_info(orig_root, space_info, &empty_cluster);
7135 if (last_ptr) {
7136 spin_lock(&last_ptr->lock);
7137 if (last_ptr->block_group)
7138 hint_byte = last_ptr->window_start;
7139 if (last_ptr->fragmented) {
7140 /*
7141 * We still set window_start so we can keep track of the
7142 * last place we found an allocation to try and save
7143 * some time.
7144 */
7145 hint_byte = last_ptr->window_start;
7146 use_cluster = false;
7147 }
7148 spin_unlock(&last_ptr->lock);
7149 }
7150
7151 search_start = max(search_start, first_logical_byte(root, 0));
7152 search_start = max(search_start, hint_byte);
7153 if (search_start == hint_byte) {
7154 block_group = btrfs_lookup_block_group(root->fs_info,
7155 search_start);
7156 /*
7157 * we don't want to use the block group if it doesn't match our
7158 * allocation bits, or if its not cached.
7159 *
7160 * However if we are re-searching with an ideal block group
7161 * picked out then we don't care that the block group is cached.
7162 */
7163 if (block_group && block_group_bits(block_group, flags) &&
7164 block_group->cached != BTRFS_CACHE_NO) {
7165 down_read(&space_info->groups_sem);
7166 if (list_empty(&block_group->list) ||
7167 block_group->ro) {
7168 /*
7169 * someone is removing this block group,
7170 * we can't jump into the have_block_group
7171 * target because our list pointers are not
7172 * valid
7173 */
7174 btrfs_put_block_group(block_group);
7175 up_read(&space_info->groups_sem);
7176 } else {
7177 index = get_block_group_index(block_group);
7178 btrfs_lock_block_group(block_group, delalloc);
7179 goto have_block_group;
7180 }
7181 } else if (block_group) {
7182 btrfs_put_block_group(block_group);
7183 }
7184 }
7185 search:
7186 have_caching_bg = false;
7187 if (index == 0 || index == __get_raid_index(flags))
7188 full_search = true;
7189 down_read(&space_info->groups_sem);
7190 list_for_each_entry(block_group, &space_info->block_groups[index],
7191 list) {
7192 u64 offset;
7193 int cached;
7194
7195 btrfs_grab_block_group(block_group, delalloc);
7196 search_start = block_group->key.objectid;
7197
7198 /*
7199 * this can happen if we end up cycling through all the
7200 * raid types, but we want to make sure we only allocate
7201 * for the proper type.
7202 */
7203 if (!block_group_bits(block_group, flags)) {
7204 u64 extra = BTRFS_BLOCK_GROUP_DUP |
7205 BTRFS_BLOCK_GROUP_RAID1 |
7206 BTRFS_BLOCK_GROUP_RAID5 |
7207 BTRFS_BLOCK_GROUP_RAID6 |
7208 BTRFS_BLOCK_GROUP_RAID10;
7209
7210 /*
7211 * if they asked for extra copies and this block group
7212 * doesn't provide them, bail. This does allow us to
7213 * fill raid0 from raid1.
7214 */
7215 if ((flags & extra) && !(block_group->flags & extra))
7216 goto loop;
7217 }
7218
7219 have_block_group:
7220 cached = block_group_cache_done(block_group);
7221 if (unlikely(!cached)) {
7222 have_caching_bg = true;
7223 ret = cache_block_group(block_group, 0);
7224 BUG_ON(ret < 0);
7225 ret = 0;
7226 }
7227
7228 if (unlikely(block_group->cached == BTRFS_CACHE_ERROR))
7229 goto loop;
7230 if (unlikely(block_group->ro))
7231 goto loop;
7232
7233 /*
7234 * Ok we want to try and use the cluster allocator, so
7235 * lets look there
7236 */
7237 if (last_ptr && use_cluster) {
7238 struct btrfs_block_group_cache *used_block_group;
7239 unsigned long aligned_cluster;
7240 /*
7241 * the refill lock keeps out other
7242 * people trying to start a new cluster
7243 */
7244 used_block_group = btrfs_lock_cluster(block_group,
7245 last_ptr,
7246 delalloc);
7247 if (!used_block_group)
7248 goto refill_cluster;
7249
7250 if (used_block_group != block_group &&
7251 (used_block_group->ro ||
7252 !block_group_bits(used_block_group, flags)))
7253 goto release_cluster;
7254
7255 offset = btrfs_alloc_from_cluster(used_block_group,
7256 last_ptr,
7257 num_bytes,
7258 used_block_group->key.objectid,
7259 &max_extent_size);
7260 if (offset) {
7261 /* we have a block, we're done */
7262 spin_unlock(&last_ptr->refill_lock);
7263 trace_btrfs_reserve_extent_cluster(root,
7264 used_block_group,
7265 search_start, num_bytes);
7266 if (used_block_group != block_group) {
7267 btrfs_release_block_group(block_group,
7268 delalloc);
7269 block_group = used_block_group;
7270 }
7271 goto checks;
7272 }
7273
7274 WARN_ON(last_ptr->block_group != used_block_group);
7275 release_cluster:
7276 /* If we are on LOOP_NO_EMPTY_SIZE, we can't
7277 * set up a new clusters, so lets just skip it
7278 * and let the allocator find whatever block
7279 * it can find. If we reach this point, we
7280 * will have tried the cluster allocator
7281 * plenty of times and not have found
7282 * anything, so we are likely way too
7283 * fragmented for the clustering stuff to find
7284 * anything.
7285 *
7286 * However, if the cluster is taken from the
7287 * current block group, release the cluster
7288 * first, so that we stand a better chance of
7289 * succeeding in the unclustered
7290 * allocation. */
7291 if (loop >= LOOP_NO_EMPTY_SIZE &&
7292 used_block_group != block_group) {
7293 spin_unlock(&last_ptr->refill_lock);
7294 btrfs_release_block_group(used_block_group,
7295 delalloc);
7296 goto unclustered_alloc;
7297 }
7298
7299 /*
7300 * this cluster didn't work out, free it and
7301 * start over
7302 */
7303 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7304
7305 if (used_block_group != block_group)
7306 btrfs_release_block_group(used_block_group,
7307 delalloc);
7308 refill_cluster:
7309 if (loop >= LOOP_NO_EMPTY_SIZE) {
7310 spin_unlock(&last_ptr->refill_lock);
7311 goto unclustered_alloc;
7312 }
7313
7314 aligned_cluster = max_t(unsigned long,
7315 empty_cluster + empty_size,
7316 block_group->full_stripe_len);
7317
7318 /* allocate a cluster in this block group */
7319 ret = btrfs_find_space_cluster(root, block_group,
7320 last_ptr, search_start,
7321 num_bytes,
7322 aligned_cluster);
7323 if (ret == 0) {
7324 /*
7325 * now pull our allocation out of this
7326 * cluster
7327 */
7328 offset = btrfs_alloc_from_cluster(block_group,
7329 last_ptr,
7330 num_bytes,
7331 search_start,
7332 &max_extent_size);
7333 if (offset) {
7334 /* we found one, proceed */
7335 spin_unlock(&last_ptr->refill_lock);
7336 trace_btrfs_reserve_extent_cluster(root,
7337 block_group, search_start,
7338 num_bytes);
7339 goto checks;
7340 }
7341 } else if (!cached && loop > LOOP_CACHING_NOWAIT
7342 && !failed_cluster_refill) {
7343 spin_unlock(&last_ptr->refill_lock);
7344
7345 failed_cluster_refill = true;
7346 wait_block_group_cache_progress(block_group,
7347 num_bytes + empty_cluster + empty_size);
7348 goto have_block_group;
7349 }
7350
7351 /*
7352 * at this point we either didn't find a cluster
7353 * or we weren't able to allocate a block from our
7354 * cluster. Free the cluster we've been trying
7355 * to use, and go to the next block group
7356 */
7357 btrfs_return_cluster_to_free_space(NULL, last_ptr);
7358 spin_unlock(&last_ptr->refill_lock);
7359 goto loop;
7360 }
7361
7362 unclustered_alloc:
7363 /*
7364 * We are doing an unclustered alloc, set the fragmented flag so
7365 * we don't bother trying to setup a cluster again until we get
7366 * more space.
7367 */
7368 if (unlikely(last_ptr)) {
7369 spin_lock(&last_ptr->lock);
7370 last_ptr->fragmented = 1;
7371 spin_unlock(&last_ptr->lock);
7372 }
7373 spin_lock(&block_group->free_space_ctl->tree_lock);
7374 if (cached &&
7375 block_group->free_space_ctl->free_space <
7376 num_bytes + empty_cluster + empty_size) {
7377 if (block_group->free_space_ctl->free_space >
7378 max_extent_size)
7379 max_extent_size =
7380 block_group->free_space_ctl->free_space;
7381 spin_unlock(&block_group->free_space_ctl->tree_lock);
7382 goto loop;
7383 }
7384 spin_unlock(&block_group->free_space_ctl->tree_lock);
7385
7386 offset = btrfs_find_space_for_alloc(block_group, search_start,
7387 num_bytes, empty_size,
7388 &max_extent_size);
7389 /*
7390 * If we didn't find a chunk, and we haven't failed on this
7391 * block group before, and this block group is in the middle of
7392 * caching and we are ok with waiting, then go ahead and wait
7393 * for progress to be made, and set failed_alloc to true.
7394 *
7395 * If failed_alloc is true then we've already waited on this
7396 * block group once and should move on to the next block group.
7397 */
7398 if (!offset && !failed_alloc && !cached &&
7399 loop > LOOP_CACHING_NOWAIT) {
7400 wait_block_group_cache_progress(block_group,
7401 num_bytes + empty_size);
7402 failed_alloc = true;
7403 goto have_block_group;
7404 } else if (!offset) {
7405 goto loop;
7406 }
7407 checks:
7408 search_start = ALIGN(offset, root->stripesize);
7409
7410 /* move on to the next group */
7411 if (search_start + num_bytes >
7412 block_group->key.objectid + block_group->key.offset) {
7413 btrfs_add_free_space(block_group, offset, num_bytes);
7414 goto loop;
7415 }
7416
7417 if (offset < search_start)
7418 btrfs_add_free_space(block_group, offset,
7419 search_start - offset);
7420 BUG_ON(offset > search_start);
7421
7422 ret = btrfs_update_reserved_bytes(block_group, num_bytes,
7423 alloc_type, delalloc);
7424 if (ret == -EAGAIN) {
7425 btrfs_add_free_space(block_group, offset, num_bytes);
7426 goto loop;
7427 }
7428
7429 /* we are all good, lets return */
7430 ins->objectid = search_start;
7431 ins->offset = num_bytes;
7432
7433 trace_btrfs_reserve_extent(orig_root, block_group,
7434 search_start, num_bytes);
7435 btrfs_release_block_group(block_group, delalloc);
7436 break;
7437 loop:
7438 failed_cluster_refill = false;
7439 failed_alloc = false;
7440 BUG_ON(index != get_block_group_index(block_group));
7441 btrfs_release_block_group(block_group, delalloc);
7442 }
7443 up_read(&space_info->groups_sem);
7444
7445 if ((loop == LOOP_CACHING_NOWAIT) && have_caching_bg
7446 && !orig_have_caching_bg)
7447 orig_have_caching_bg = true;
7448
7449 if (!ins->objectid && loop >= LOOP_CACHING_WAIT && have_caching_bg)
7450 goto search;
7451
7452 if (!ins->objectid && ++index < BTRFS_NR_RAID_TYPES)
7453 goto search;
7454
7455 /*
7456 * LOOP_CACHING_NOWAIT, search partially cached block groups, kicking
7457 * caching kthreads as we move along
7458 * LOOP_CACHING_WAIT, search everything, and wait if our bg is caching
7459 * LOOP_ALLOC_CHUNK, force a chunk allocation and try again
7460 * LOOP_NO_EMPTY_SIZE, set empty_size and empty_cluster to 0 and try
7461 * again
7462 */
7463 if (!ins->objectid && loop < LOOP_NO_EMPTY_SIZE) {
7464 index = 0;
7465 if (loop == LOOP_CACHING_NOWAIT) {
7466 /*
7467 * We want to skip the LOOP_CACHING_WAIT step if we
7468 * don't have any unached bgs and we've alrelady done a
7469 * full search through.
7470 */
7471 if (orig_have_caching_bg || !full_search)
7472 loop = LOOP_CACHING_WAIT;
7473 else
7474 loop = LOOP_ALLOC_CHUNK;
7475 } else {
7476 loop++;
7477 }
7478
7479 if (loop == LOOP_ALLOC_CHUNK) {
7480 struct btrfs_trans_handle *trans;
7481 int exist = 0;
7482
7483 trans = current->journal_info;
7484 if (trans)
7485 exist = 1;
7486 else
7487 trans = btrfs_join_transaction(root);
7488
7489 if (IS_ERR(trans)) {
7490 ret = PTR_ERR(trans);
7491 goto out;
7492 }
7493
7494 ret = do_chunk_alloc(trans, root, flags,
7495 CHUNK_ALLOC_FORCE);
7496
7497 /*
7498 * If we can't allocate a new chunk we've already looped
7499 * through at least once, move on to the NO_EMPTY_SIZE
7500 * case.
7501 */
7502 if (ret == -ENOSPC)
7503 loop = LOOP_NO_EMPTY_SIZE;
7504
7505 /*
7506 * Do not bail out on ENOSPC since we
7507 * can do more things.
7508 */
7509 if (ret < 0 && ret != -ENOSPC)
7510 btrfs_abort_transaction(trans,
7511 root, ret);
7512 else
7513 ret = 0;
7514 if (!exist)
7515 btrfs_end_transaction(trans, root);
7516 if (ret)
7517 goto out;
7518 }
7519
7520 if (loop == LOOP_NO_EMPTY_SIZE) {
7521 /*
7522 * Don't loop again if we already have no empty_size and
7523 * no empty_cluster.
7524 */
7525 if (empty_size == 0 &&
7526 empty_cluster == 0) {
7527 ret = -ENOSPC;
7528 goto out;
7529 }
7530 empty_size = 0;
7531 empty_cluster = 0;
7532 }
7533
7534 goto search;
7535 } else if (!ins->objectid) {
7536 ret = -ENOSPC;
7537 } else if (ins->objectid) {
7538 if (!use_cluster && last_ptr) {
7539 spin_lock(&last_ptr->lock);
7540 last_ptr->window_start = ins->objectid;
7541 spin_unlock(&last_ptr->lock);
7542 }
7543 ret = 0;
7544 }
7545 out:
7546 if (ret == -ENOSPC) {
7547 spin_lock(&space_info->lock);
7548 space_info->max_extent_size = max_extent_size;
7549 spin_unlock(&space_info->lock);
7550 ins->offset = max_extent_size;
7551 }
7552 return ret;
7553 }
7554
7555 static void dump_space_info(struct btrfs_space_info *info, u64 bytes,
7556 int dump_block_groups)
7557 {
7558 struct btrfs_block_group_cache *cache;
7559 int index = 0;
7560
7561 spin_lock(&info->lock);
7562 printk(KERN_INFO "BTRFS: space_info %llu has %llu free, is %sfull\n",
7563 info->flags,
7564 info->total_bytes - info->bytes_used - info->bytes_pinned -
7565 info->bytes_reserved - info->bytes_readonly,
7566 (info->full) ? "" : "not ");
7567 printk(KERN_INFO "BTRFS: space_info total=%llu, used=%llu, pinned=%llu, "
7568 "reserved=%llu, may_use=%llu, readonly=%llu\n",
7569 info->total_bytes, info->bytes_used, info->bytes_pinned,
7570 info->bytes_reserved, info->bytes_may_use,
7571 info->bytes_readonly);
7572 spin_unlock(&info->lock);
7573
7574 if (!dump_block_groups)
7575 return;
7576
7577 down_read(&info->groups_sem);
7578 again:
7579 list_for_each_entry(cache, &info->block_groups[index], list) {
7580 spin_lock(&cache->lock);
7581 printk(KERN_INFO "BTRFS: "
7582 "block group %llu has %llu bytes, "
7583 "%llu used %llu pinned %llu reserved %s\n",
7584 cache->key.objectid, cache->key.offset,
7585 btrfs_block_group_used(&cache->item), cache->pinned,
7586 cache->reserved, cache->ro ? "[readonly]" : "");
7587 btrfs_dump_free_space(cache, bytes);
7588 spin_unlock(&cache->lock);
7589 }
7590 if (++index < BTRFS_NR_RAID_TYPES)
7591 goto again;
7592 up_read(&info->groups_sem);
7593 }
7594
7595 int btrfs_reserve_extent(struct btrfs_root *root,
7596 u64 num_bytes, u64 min_alloc_size,
7597 u64 empty_size, u64 hint_byte,
7598 struct btrfs_key *ins, int is_data, int delalloc)
7599 {
7600 bool final_tried = num_bytes == min_alloc_size;
7601 u64 flags;
7602 int ret;
7603
7604 flags = btrfs_get_alloc_profile(root, is_data);
7605 again:
7606 WARN_ON(num_bytes < root->sectorsize);
7607 ret = find_free_extent(root, num_bytes, empty_size, hint_byte, ins,
7608 flags, delalloc);
7609
7610 if (ret == -ENOSPC) {
7611 if (!final_tried && ins->offset) {
7612 num_bytes = min(num_bytes >> 1, ins->offset);
7613 num_bytes = round_down(num_bytes, root->sectorsize);
7614 num_bytes = max(num_bytes, min_alloc_size);
7615 if (num_bytes == min_alloc_size)
7616 final_tried = true;
7617 goto again;
7618 } else if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7619 struct btrfs_space_info *sinfo;
7620
7621 sinfo = __find_space_info(root->fs_info, flags);
7622 btrfs_err(root->fs_info, "allocation failed flags %llu, wanted %llu",
7623 flags, num_bytes);
7624 if (sinfo)
7625 dump_space_info(sinfo, num_bytes, 1);
7626 }
7627 }
7628
7629 return ret;
7630 }
7631
7632 static int __btrfs_free_reserved_extent(struct btrfs_root *root,
7633 u64 start, u64 len,
7634 int pin, int delalloc)
7635 {
7636 struct btrfs_block_group_cache *cache;
7637 int ret = 0;
7638
7639 cache = btrfs_lookup_block_group(root->fs_info, start);
7640 if (!cache) {
7641 btrfs_err(root->fs_info, "Unable to find block group for %llu",
7642 start);
7643 return -ENOSPC;
7644 }
7645
7646 if (pin)
7647 pin_down_extent(root, cache, start, len, 1);
7648 else {
7649 if (btrfs_test_opt(root, DISCARD))
7650 ret = btrfs_discard_extent(root, start, len, NULL);
7651 btrfs_add_free_space(cache, start, len);
7652 btrfs_update_reserved_bytes(cache, len, RESERVE_FREE, delalloc);
7653 }
7654
7655 btrfs_put_block_group(cache);
7656
7657 trace_btrfs_reserved_extent_free(root, start, len);
7658
7659 return ret;
7660 }
7661
7662 int btrfs_free_reserved_extent(struct btrfs_root *root,
7663 u64 start, u64 len, int delalloc)
7664 {
7665 return __btrfs_free_reserved_extent(root, start, len, 0, delalloc);
7666 }
7667
7668 int btrfs_free_and_pin_reserved_extent(struct btrfs_root *root,
7669 u64 start, u64 len)
7670 {
7671 return __btrfs_free_reserved_extent(root, start, len, 1, 0);
7672 }
7673
7674 static int alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7675 struct btrfs_root *root,
7676 u64 parent, u64 root_objectid,
7677 u64 flags, u64 owner, u64 offset,
7678 struct btrfs_key *ins, int ref_mod)
7679 {
7680 int ret;
7681 struct btrfs_fs_info *fs_info = root->fs_info;
7682 struct btrfs_extent_item *extent_item;
7683 struct btrfs_extent_inline_ref *iref;
7684 struct btrfs_path *path;
7685 struct extent_buffer *leaf;
7686 int type;
7687 u32 size;
7688
7689 if (parent > 0)
7690 type = BTRFS_SHARED_DATA_REF_KEY;
7691 else
7692 type = BTRFS_EXTENT_DATA_REF_KEY;
7693
7694 size = sizeof(*extent_item) + btrfs_extent_inline_ref_size(type);
7695
7696 path = btrfs_alloc_path();
7697 if (!path)
7698 return -ENOMEM;
7699
7700 path->leave_spinning = 1;
7701 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7702 ins, size);
7703 if (ret) {
7704 btrfs_free_path(path);
7705 return ret;
7706 }
7707
7708 leaf = path->nodes[0];
7709 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7710 struct btrfs_extent_item);
7711 btrfs_set_extent_refs(leaf, extent_item, ref_mod);
7712 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7713 btrfs_set_extent_flags(leaf, extent_item,
7714 flags | BTRFS_EXTENT_FLAG_DATA);
7715
7716 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7717 btrfs_set_extent_inline_ref_type(leaf, iref, type);
7718 if (parent > 0) {
7719 struct btrfs_shared_data_ref *ref;
7720 ref = (struct btrfs_shared_data_ref *)(iref + 1);
7721 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7722 btrfs_set_shared_data_ref_count(leaf, ref, ref_mod);
7723 } else {
7724 struct btrfs_extent_data_ref *ref;
7725 ref = (struct btrfs_extent_data_ref *)(&iref->offset);
7726 btrfs_set_extent_data_ref_root(leaf, ref, root_objectid);
7727 btrfs_set_extent_data_ref_objectid(leaf, ref, owner);
7728 btrfs_set_extent_data_ref_offset(leaf, ref, offset);
7729 btrfs_set_extent_data_ref_count(leaf, ref, ref_mod);
7730 }
7731
7732 btrfs_mark_buffer_dirty(path->nodes[0]);
7733 btrfs_free_path(path);
7734
7735 ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
7736 ins->offset);
7737 if (ret)
7738 return ret;
7739
7740 ret = update_block_group(trans, root, ins->objectid, ins->offset, 1);
7741 if (ret) { /* -ENOENT, logic error */
7742 btrfs_err(fs_info, "update block group failed for %llu %llu",
7743 ins->objectid, ins->offset);
7744 BUG();
7745 }
7746 trace_btrfs_reserved_extent_alloc(root, ins->objectid, ins->offset);
7747 return ret;
7748 }
7749
7750 static int alloc_reserved_tree_block(struct btrfs_trans_handle *trans,
7751 struct btrfs_root *root,
7752 u64 parent, u64 root_objectid,
7753 u64 flags, struct btrfs_disk_key *key,
7754 int level, struct btrfs_key *ins)
7755 {
7756 int ret;
7757 struct btrfs_fs_info *fs_info = root->fs_info;
7758 struct btrfs_extent_item *extent_item;
7759 struct btrfs_tree_block_info *block_info;
7760 struct btrfs_extent_inline_ref *iref;
7761 struct btrfs_path *path;
7762 struct extent_buffer *leaf;
7763 u32 size = sizeof(*extent_item) + sizeof(*iref);
7764 u64 num_bytes = ins->offset;
7765 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
7766 SKINNY_METADATA);
7767
7768 if (!skinny_metadata)
7769 size += sizeof(*block_info);
7770
7771 path = btrfs_alloc_path();
7772 if (!path) {
7773 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7774 root->nodesize);
7775 return -ENOMEM;
7776 }
7777
7778 path->leave_spinning = 1;
7779 ret = btrfs_insert_empty_item(trans, fs_info->extent_root, path,
7780 ins, size);
7781 if (ret) {
7782 btrfs_free_path(path);
7783 btrfs_free_and_pin_reserved_extent(root, ins->objectid,
7784 root->nodesize);
7785 return ret;
7786 }
7787
7788 leaf = path->nodes[0];
7789 extent_item = btrfs_item_ptr(leaf, path->slots[0],
7790 struct btrfs_extent_item);
7791 btrfs_set_extent_refs(leaf, extent_item, 1);
7792 btrfs_set_extent_generation(leaf, extent_item, trans->transid);
7793 btrfs_set_extent_flags(leaf, extent_item,
7794 flags | BTRFS_EXTENT_FLAG_TREE_BLOCK);
7795
7796 if (skinny_metadata) {
7797 iref = (struct btrfs_extent_inline_ref *)(extent_item + 1);
7798 num_bytes = root->nodesize;
7799 } else {
7800 block_info = (struct btrfs_tree_block_info *)(extent_item + 1);
7801 btrfs_set_tree_block_key(leaf, block_info, key);
7802 btrfs_set_tree_block_level(leaf, block_info, level);
7803 iref = (struct btrfs_extent_inline_ref *)(block_info + 1);
7804 }
7805
7806 if (parent > 0) {
7807 BUG_ON(!(flags & BTRFS_BLOCK_FLAG_FULL_BACKREF));
7808 btrfs_set_extent_inline_ref_type(leaf, iref,
7809 BTRFS_SHARED_BLOCK_REF_KEY);
7810 btrfs_set_extent_inline_ref_offset(leaf, iref, parent);
7811 } else {
7812 btrfs_set_extent_inline_ref_type(leaf, iref,
7813 BTRFS_TREE_BLOCK_REF_KEY);
7814 btrfs_set_extent_inline_ref_offset(leaf, iref, root_objectid);
7815 }
7816
7817 btrfs_mark_buffer_dirty(leaf);
7818 btrfs_free_path(path);
7819
7820 ret = remove_from_free_space_tree(trans, fs_info, ins->objectid,
7821 num_bytes);
7822 if (ret)
7823 return ret;
7824
7825 ret = update_block_group(trans, root, ins->objectid, root->nodesize,
7826 1);
7827 if (ret) { /* -ENOENT, logic error */
7828 btrfs_err(fs_info, "update block group failed for %llu %llu",
7829 ins->objectid, ins->offset);
7830 BUG();
7831 }
7832
7833 trace_btrfs_reserved_extent_alloc(root, ins->objectid, root->nodesize);
7834 return ret;
7835 }
7836
7837 int btrfs_alloc_reserved_file_extent(struct btrfs_trans_handle *trans,
7838 struct btrfs_root *root,
7839 u64 root_objectid, u64 owner,
7840 u64 offset, u64 ram_bytes,
7841 struct btrfs_key *ins)
7842 {
7843 int ret;
7844
7845 BUG_ON(root_objectid == BTRFS_TREE_LOG_OBJECTID);
7846
7847 ret = btrfs_add_delayed_data_ref(root->fs_info, trans, ins->objectid,
7848 ins->offset, 0,
7849 root_objectid, owner, offset,
7850 ram_bytes, BTRFS_ADD_DELAYED_EXTENT,
7851 NULL);
7852 return ret;
7853 }
7854
7855 /*
7856 * this is used by the tree logging recovery code. It records that
7857 * an extent has been allocated and makes sure to clear the free
7858 * space cache bits as well
7859 */
7860 int btrfs_alloc_logged_file_extent(struct btrfs_trans_handle *trans,
7861 struct btrfs_root *root,
7862 u64 root_objectid, u64 owner, u64 offset,
7863 struct btrfs_key *ins)
7864 {
7865 int ret;
7866 struct btrfs_block_group_cache *block_group;
7867
7868 /*
7869 * Mixed block groups will exclude before processing the log so we only
7870 * need to do the exlude dance if this fs isn't mixed.
7871 */
7872 if (!btrfs_fs_incompat(root->fs_info, MIXED_GROUPS)) {
7873 ret = __exclude_logged_extent(root, ins->objectid, ins->offset);
7874 if (ret)
7875 return ret;
7876 }
7877
7878 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
7879 if (!block_group)
7880 return -EINVAL;
7881
7882 ret = btrfs_update_reserved_bytes(block_group, ins->offset,
7883 RESERVE_ALLOC_NO_ACCOUNT, 0);
7884 BUG_ON(ret); /* logic error */
7885 ret = alloc_reserved_file_extent(trans, root, 0, root_objectid,
7886 0, owner, offset, ins, 1);
7887 btrfs_put_block_group(block_group);
7888 return ret;
7889 }
7890
7891 static struct extent_buffer *
7892 btrfs_init_new_buffer(struct btrfs_trans_handle *trans, struct btrfs_root *root,
7893 u64 bytenr, int level)
7894 {
7895 struct extent_buffer *buf;
7896
7897 buf = btrfs_find_create_tree_block(root, bytenr);
7898 if (!buf)
7899 return ERR_PTR(-ENOMEM);
7900 btrfs_set_header_generation(buf, trans->transid);
7901 btrfs_set_buffer_lockdep_class(root->root_key.objectid, buf, level);
7902 btrfs_tree_lock(buf);
7903 clean_tree_block(trans, root->fs_info, buf);
7904 clear_bit(EXTENT_BUFFER_STALE, &buf->bflags);
7905
7906 btrfs_set_lock_blocking(buf);
7907 set_extent_buffer_uptodate(buf);
7908
7909 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
7910 buf->log_index = root->log_transid % 2;
7911 /*
7912 * we allow two log transactions at a time, use different
7913 * EXENT bit to differentiate dirty pages.
7914 */
7915 if (buf->log_index == 0)
7916 set_extent_dirty(&root->dirty_log_pages, buf->start,
7917 buf->start + buf->len - 1, GFP_NOFS);
7918 else
7919 set_extent_new(&root->dirty_log_pages, buf->start,
7920 buf->start + buf->len - 1, GFP_NOFS);
7921 } else {
7922 buf->log_index = -1;
7923 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
7924 buf->start + buf->len - 1, GFP_NOFS);
7925 }
7926 trans->blocks_used++;
7927 /* this returns a buffer locked for blocking */
7928 return buf;
7929 }
7930
7931 static struct btrfs_block_rsv *
7932 use_block_rsv(struct btrfs_trans_handle *trans,
7933 struct btrfs_root *root, u32 blocksize)
7934 {
7935 struct btrfs_block_rsv *block_rsv;
7936 struct btrfs_block_rsv *global_rsv = &root->fs_info->global_block_rsv;
7937 int ret;
7938 bool global_updated = false;
7939
7940 block_rsv = get_block_rsv(trans, root);
7941
7942 if (unlikely(block_rsv->size == 0))
7943 goto try_reserve;
7944 again:
7945 ret = block_rsv_use_bytes(block_rsv, blocksize);
7946 if (!ret)
7947 return block_rsv;
7948
7949 if (block_rsv->failfast)
7950 return ERR_PTR(ret);
7951
7952 if (block_rsv->type == BTRFS_BLOCK_RSV_GLOBAL && !global_updated) {
7953 global_updated = true;
7954 update_global_block_rsv(root->fs_info);
7955 goto again;
7956 }
7957
7958 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
7959 static DEFINE_RATELIMIT_STATE(_rs,
7960 DEFAULT_RATELIMIT_INTERVAL * 10,
7961 /*DEFAULT_RATELIMIT_BURST*/ 1);
7962 if (__ratelimit(&_rs))
7963 WARN(1, KERN_DEBUG
7964 "BTRFS: block rsv returned %d\n", ret);
7965 }
7966 try_reserve:
7967 ret = reserve_metadata_bytes(root, block_rsv, blocksize,
7968 BTRFS_RESERVE_NO_FLUSH);
7969 if (!ret)
7970 return block_rsv;
7971 /*
7972 * If we couldn't reserve metadata bytes try and use some from
7973 * the global reserve if its space type is the same as the global
7974 * reservation.
7975 */
7976 if (block_rsv->type != BTRFS_BLOCK_RSV_GLOBAL &&
7977 block_rsv->space_info == global_rsv->space_info) {
7978 ret = block_rsv_use_bytes(global_rsv, blocksize);
7979 if (!ret)
7980 return global_rsv;
7981 }
7982 return ERR_PTR(ret);
7983 }
7984
7985 static void unuse_block_rsv(struct btrfs_fs_info *fs_info,
7986 struct btrfs_block_rsv *block_rsv, u32 blocksize)
7987 {
7988 block_rsv_add_bytes(block_rsv, blocksize, 0);
7989 block_rsv_release_bytes(fs_info, block_rsv, NULL, 0);
7990 }
7991
7992 /*
7993 * finds a free extent and does all the dirty work required for allocation
7994 * returns the tree buffer or an ERR_PTR on error.
7995 */
7996 struct extent_buffer *btrfs_alloc_tree_block(struct btrfs_trans_handle *trans,
7997 struct btrfs_root *root,
7998 u64 parent, u64 root_objectid,
7999 struct btrfs_disk_key *key, int level,
8000 u64 hint, u64 empty_size)
8001 {
8002 struct btrfs_key ins;
8003 struct btrfs_block_rsv *block_rsv;
8004 struct extent_buffer *buf;
8005 struct btrfs_delayed_extent_op *extent_op;
8006 u64 flags = 0;
8007 int ret;
8008 u32 blocksize = root->nodesize;
8009 bool skinny_metadata = btrfs_fs_incompat(root->fs_info,
8010 SKINNY_METADATA);
8011
8012 if (btrfs_test_is_dummy_root(root)) {
8013 buf = btrfs_init_new_buffer(trans, root, root->alloc_bytenr,
8014 level);
8015 if (!IS_ERR(buf))
8016 root->alloc_bytenr += blocksize;
8017 return buf;
8018 }
8019
8020 block_rsv = use_block_rsv(trans, root, blocksize);
8021 if (IS_ERR(block_rsv))
8022 return ERR_CAST(block_rsv);
8023
8024 ret = btrfs_reserve_extent(root, blocksize, blocksize,
8025 empty_size, hint, &ins, 0, 0);
8026 if (ret)
8027 goto out_unuse;
8028
8029 buf = btrfs_init_new_buffer(trans, root, ins.objectid, level);
8030 if (IS_ERR(buf)) {
8031 ret = PTR_ERR(buf);
8032 goto out_free_reserved;
8033 }
8034
8035 if (root_objectid == BTRFS_TREE_RELOC_OBJECTID) {
8036 if (parent == 0)
8037 parent = ins.objectid;
8038 flags |= BTRFS_BLOCK_FLAG_FULL_BACKREF;
8039 } else
8040 BUG_ON(parent > 0);
8041
8042 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
8043 extent_op = btrfs_alloc_delayed_extent_op();
8044 if (!extent_op) {
8045 ret = -ENOMEM;
8046 goto out_free_buf;
8047 }
8048 if (key)
8049 memcpy(&extent_op->key, key, sizeof(extent_op->key));
8050 else
8051 memset(&extent_op->key, 0, sizeof(extent_op->key));
8052 extent_op->flags_to_set = flags;
8053 extent_op->update_key = skinny_metadata ? false : true;
8054 extent_op->update_flags = true;
8055 extent_op->is_data = false;
8056 extent_op->level = level;
8057
8058 ret = btrfs_add_delayed_tree_ref(root->fs_info, trans,
8059 ins.objectid, ins.offset,
8060 parent, root_objectid, level,
8061 BTRFS_ADD_DELAYED_EXTENT,
8062 extent_op);
8063 if (ret)
8064 goto out_free_delayed;
8065 }
8066 return buf;
8067
8068 out_free_delayed:
8069 btrfs_free_delayed_extent_op(extent_op);
8070 out_free_buf:
8071 free_extent_buffer(buf);
8072 out_free_reserved:
8073 btrfs_free_reserved_extent(root, ins.objectid, ins.offset, 0);
8074 out_unuse:
8075 unuse_block_rsv(root->fs_info, block_rsv, blocksize);
8076 return ERR_PTR(ret);
8077 }
8078
8079 struct walk_control {
8080 u64 refs[BTRFS_MAX_LEVEL];
8081 u64 flags[BTRFS_MAX_LEVEL];
8082 struct btrfs_key update_progress;
8083 int stage;
8084 int level;
8085 int shared_level;
8086 int update_ref;
8087 int keep_locks;
8088 int reada_slot;
8089 int reada_count;
8090 int for_reloc;
8091 };
8092
8093 #define DROP_REFERENCE 1
8094 #define UPDATE_BACKREF 2
8095
8096 static noinline void reada_walk_down(struct btrfs_trans_handle *trans,
8097 struct btrfs_root *root,
8098 struct walk_control *wc,
8099 struct btrfs_path *path)
8100 {
8101 u64 bytenr;
8102 u64 generation;
8103 u64 refs;
8104 u64 flags;
8105 u32 nritems;
8106 u32 blocksize;
8107 struct btrfs_key key;
8108 struct extent_buffer *eb;
8109 int ret;
8110 int slot;
8111 int nread = 0;
8112
8113 if (path->slots[wc->level] < wc->reada_slot) {
8114 wc->reada_count = wc->reada_count * 2 / 3;
8115 wc->reada_count = max(wc->reada_count, 2);
8116 } else {
8117 wc->reada_count = wc->reada_count * 3 / 2;
8118 wc->reada_count = min_t(int, wc->reada_count,
8119 BTRFS_NODEPTRS_PER_BLOCK(root));
8120 }
8121
8122 eb = path->nodes[wc->level];
8123 nritems = btrfs_header_nritems(eb);
8124 blocksize = root->nodesize;
8125
8126 for (slot = path->slots[wc->level]; slot < nritems; slot++) {
8127 if (nread >= wc->reada_count)
8128 break;
8129
8130 cond_resched();
8131 bytenr = btrfs_node_blockptr(eb, slot);
8132 generation = btrfs_node_ptr_generation(eb, slot);
8133
8134 if (slot == path->slots[wc->level])
8135 goto reada;
8136
8137 if (wc->stage == UPDATE_BACKREF &&
8138 generation <= root->root_key.offset)
8139 continue;
8140
8141 /* We don't lock the tree block, it's OK to be racy here */
8142 ret = btrfs_lookup_extent_info(trans, root, bytenr,
8143 wc->level - 1, 1, &refs,
8144 &flags);
8145 /* We don't care about errors in readahead. */
8146 if (ret < 0)
8147 continue;
8148 BUG_ON(refs == 0);
8149
8150 if (wc->stage == DROP_REFERENCE) {
8151 if (refs == 1)
8152 goto reada;
8153
8154 if (wc->level == 1 &&
8155 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8156 continue;
8157 if (!wc->update_ref ||
8158 generation <= root->root_key.offset)
8159 continue;
8160 btrfs_node_key_to_cpu(eb, &key, slot);
8161 ret = btrfs_comp_cpu_keys(&key,
8162 &wc->update_progress);
8163 if (ret < 0)
8164 continue;
8165 } else {
8166 if (wc->level == 1 &&
8167 (flags & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8168 continue;
8169 }
8170 reada:
8171 readahead_tree_block(root, bytenr);
8172 nread++;
8173 }
8174 wc->reada_slot = slot;
8175 }
8176
8177 /*
8178 * These may not be seen by the usual inc/dec ref code so we have to
8179 * add them here.
8180 */
8181 static int record_one_subtree_extent(struct btrfs_trans_handle *trans,
8182 struct btrfs_root *root, u64 bytenr,
8183 u64 num_bytes)
8184 {
8185 struct btrfs_qgroup_extent_record *qrecord;
8186 struct btrfs_delayed_ref_root *delayed_refs;
8187
8188 qrecord = kmalloc(sizeof(*qrecord), GFP_NOFS);
8189 if (!qrecord)
8190 return -ENOMEM;
8191
8192 qrecord->bytenr = bytenr;
8193 qrecord->num_bytes = num_bytes;
8194 qrecord->old_roots = NULL;
8195
8196 delayed_refs = &trans->transaction->delayed_refs;
8197 spin_lock(&delayed_refs->lock);
8198 if (btrfs_qgroup_insert_dirty_extent(delayed_refs, qrecord))
8199 kfree(qrecord);
8200 spin_unlock(&delayed_refs->lock);
8201
8202 return 0;
8203 }
8204
8205 static int account_leaf_items(struct btrfs_trans_handle *trans,
8206 struct btrfs_root *root,
8207 struct extent_buffer *eb)
8208 {
8209 int nr = btrfs_header_nritems(eb);
8210 int i, extent_type, ret;
8211 struct btrfs_key key;
8212 struct btrfs_file_extent_item *fi;
8213 u64 bytenr, num_bytes;
8214
8215 /* We can be called directly from walk_up_proc() */
8216 if (!root->fs_info->quota_enabled)
8217 return 0;
8218
8219 for (i = 0; i < nr; i++) {
8220 btrfs_item_key_to_cpu(eb, &key, i);
8221
8222 if (key.type != BTRFS_EXTENT_DATA_KEY)
8223 continue;
8224
8225 fi = btrfs_item_ptr(eb, i, struct btrfs_file_extent_item);
8226 /* filter out non qgroup-accountable extents */
8227 extent_type = btrfs_file_extent_type(eb, fi);
8228
8229 if (extent_type == BTRFS_FILE_EXTENT_INLINE)
8230 continue;
8231
8232 bytenr = btrfs_file_extent_disk_bytenr(eb, fi);
8233 if (!bytenr)
8234 continue;
8235
8236 num_bytes = btrfs_file_extent_disk_num_bytes(eb, fi);
8237
8238 ret = record_one_subtree_extent(trans, root, bytenr, num_bytes);
8239 if (ret)
8240 return ret;
8241 }
8242 return 0;
8243 }
8244
8245 /*
8246 * Walk up the tree from the bottom, freeing leaves and any interior
8247 * nodes which have had all slots visited. If a node (leaf or
8248 * interior) is freed, the node above it will have it's slot
8249 * incremented. The root node will never be freed.
8250 *
8251 * At the end of this function, we should have a path which has all
8252 * slots incremented to the next position for a search. If we need to
8253 * read a new node it will be NULL and the node above it will have the
8254 * correct slot selected for a later read.
8255 *
8256 * If we increment the root nodes slot counter past the number of
8257 * elements, 1 is returned to signal completion of the search.
8258 */
8259 static int adjust_slots_upwards(struct btrfs_root *root,
8260 struct btrfs_path *path, int root_level)
8261 {
8262 int level = 0;
8263 int nr, slot;
8264 struct extent_buffer *eb;
8265
8266 if (root_level == 0)
8267 return 1;
8268
8269 while (level <= root_level) {
8270 eb = path->nodes[level];
8271 nr = btrfs_header_nritems(eb);
8272 path->slots[level]++;
8273 slot = path->slots[level];
8274 if (slot >= nr || level == 0) {
8275 /*
8276 * Don't free the root - we will detect this
8277 * condition after our loop and return a
8278 * positive value for caller to stop walking the tree.
8279 */
8280 if (level != root_level) {
8281 btrfs_tree_unlock_rw(eb, path->locks[level]);
8282 path->locks[level] = 0;
8283
8284 free_extent_buffer(eb);
8285 path->nodes[level] = NULL;
8286 path->slots[level] = 0;
8287 }
8288 } else {
8289 /*
8290 * We have a valid slot to walk back down
8291 * from. Stop here so caller can process these
8292 * new nodes.
8293 */
8294 break;
8295 }
8296
8297 level++;
8298 }
8299
8300 eb = path->nodes[root_level];
8301 if (path->slots[root_level] >= btrfs_header_nritems(eb))
8302 return 1;
8303
8304 return 0;
8305 }
8306
8307 /*
8308 * root_eb is the subtree root and is locked before this function is called.
8309 */
8310 static int account_shared_subtree(struct btrfs_trans_handle *trans,
8311 struct btrfs_root *root,
8312 struct extent_buffer *root_eb,
8313 u64 root_gen,
8314 int root_level)
8315 {
8316 int ret = 0;
8317 int level;
8318 struct extent_buffer *eb = root_eb;
8319 struct btrfs_path *path = NULL;
8320
8321 BUG_ON(root_level < 0 || root_level > BTRFS_MAX_LEVEL);
8322 BUG_ON(root_eb == NULL);
8323
8324 if (!root->fs_info->quota_enabled)
8325 return 0;
8326
8327 if (!extent_buffer_uptodate(root_eb)) {
8328 ret = btrfs_read_buffer(root_eb, root_gen);
8329 if (ret)
8330 goto out;
8331 }
8332
8333 if (root_level == 0) {
8334 ret = account_leaf_items(trans, root, root_eb);
8335 goto out;
8336 }
8337
8338 path = btrfs_alloc_path();
8339 if (!path)
8340 return -ENOMEM;
8341
8342 /*
8343 * Walk down the tree. Missing extent blocks are filled in as
8344 * we go. Metadata is accounted every time we read a new
8345 * extent block.
8346 *
8347 * When we reach a leaf, we account for file extent items in it,
8348 * walk back up the tree (adjusting slot pointers as we go)
8349 * and restart the search process.
8350 */
8351 extent_buffer_get(root_eb); /* For path */
8352 path->nodes[root_level] = root_eb;
8353 path->slots[root_level] = 0;
8354 path->locks[root_level] = 0; /* so release_path doesn't try to unlock */
8355 walk_down:
8356 level = root_level;
8357 while (level >= 0) {
8358 if (path->nodes[level] == NULL) {
8359 int parent_slot;
8360 u64 child_gen;
8361 u64 child_bytenr;
8362
8363 /* We need to get child blockptr/gen from
8364 * parent before we can read it. */
8365 eb = path->nodes[level + 1];
8366 parent_slot = path->slots[level + 1];
8367 child_bytenr = btrfs_node_blockptr(eb, parent_slot);
8368 child_gen = btrfs_node_ptr_generation(eb, parent_slot);
8369
8370 eb = read_tree_block(root, child_bytenr, child_gen);
8371 if (IS_ERR(eb)) {
8372 ret = PTR_ERR(eb);
8373 goto out;
8374 } else if (!extent_buffer_uptodate(eb)) {
8375 free_extent_buffer(eb);
8376 ret = -EIO;
8377 goto out;
8378 }
8379
8380 path->nodes[level] = eb;
8381 path->slots[level] = 0;
8382
8383 btrfs_tree_read_lock(eb);
8384 btrfs_set_lock_blocking_rw(eb, BTRFS_READ_LOCK);
8385 path->locks[level] = BTRFS_READ_LOCK_BLOCKING;
8386
8387 ret = record_one_subtree_extent(trans, root, child_bytenr,
8388 root->nodesize);
8389 if (ret)
8390 goto out;
8391 }
8392
8393 if (level == 0) {
8394 ret = account_leaf_items(trans, root, path->nodes[level]);
8395 if (ret)
8396 goto out;
8397
8398 /* Nonzero return here means we completed our search */
8399 ret = adjust_slots_upwards(root, path, root_level);
8400 if (ret)
8401 break;
8402
8403 /* Restart search with new slots */
8404 goto walk_down;
8405 }
8406
8407 level--;
8408 }
8409
8410 ret = 0;
8411 out:
8412 btrfs_free_path(path);
8413
8414 return ret;
8415 }
8416
8417 /*
8418 * helper to process tree block while walking down the tree.
8419 *
8420 * when wc->stage == UPDATE_BACKREF, this function updates
8421 * back refs for pointers in the block.
8422 *
8423 * NOTE: return value 1 means we should stop walking down.
8424 */
8425 static noinline int walk_down_proc(struct btrfs_trans_handle *trans,
8426 struct btrfs_root *root,
8427 struct btrfs_path *path,
8428 struct walk_control *wc, int lookup_info)
8429 {
8430 int level = wc->level;
8431 struct extent_buffer *eb = path->nodes[level];
8432 u64 flag = BTRFS_BLOCK_FLAG_FULL_BACKREF;
8433 int ret;
8434
8435 if (wc->stage == UPDATE_BACKREF &&
8436 btrfs_header_owner(eb) != root->root_key.objectid)
8437 return 1;
8438
8439 /*
8440 * when reference count of tree block is 1, it won't increase
8441 * again. once full backref flag is set, we never clear it.
8442 */
8443 if (lookup_info &&
8444 ((wc->stage == DROP_REFERENCE && wc->refs[level] != 1) ||
8445 (wc->stage == UPDATE_BACKREF && !(wc->flags[level] & flag)))) {
8446 BUG_ON(!path->locks[level]);
8447 ret = btrfs_lookup_extent_info(trans, root,
8448 eb->start, level, 1,
8449 &wc->refs[level],
8450 &wc->flags[level]);
8451 BUG_ON(ret == -ENOMEM);
8452 if (ret)
8453 return ret;
8454 BUG_ON(wc->refs[level] == 0);
8455 }
8456
8457 if (wc->stage == DROP_REFERENCE) {
8458 if (wc->refs[level] > 1)
8459 return 1;
8460
8461 if (path->locks[level] && !wc->keep_locks) {
8462 btrfs_tree_unlock_rw(eb, path->locks[level]);
8463 path->locks[level] = 0;
8464 }
8465 return 0;
8466 }
8467
8468 /* wc->stage == UPDATE_BACKREF */
8469 if (!(wc->flags[level] & flag)) {
8470 BUG_ON(!path->locks[level]);
8471 ret = btrfs_inc_ref(trans, root, eb, 1);
8472 BUG_ON(ret); /* -ENOMEM */
8473 ret = btrfs_dec_ref(trans, root, eb, 0);
8474 BUG_ON(ret); /* -ENOMEM */
8475 ret = btrfs_set_disk_extent_flags(trans, root, eb->start,
8476 eb->len, flag,
8477 btrfs_header_level(eb), 0);
8478 BUG_ON(ret); /* -ENOMEM */
8479 wc->flags[level] |= flag;
8480 }
8481
8482 /*
8483 * the block is shared by multiple trees, so it's not good to
8484 * keep the tree lock
8485 */
8486 if (path->locks[level] && level > 0) {
8487 btrfs_tree_unlock_rw(eb, path->locks[level]);
8488 path->locks[level] = 0;
8489 }
8490 return 0;
8491 }
8492
8493 /*
8494 * helper to process tree block pointer.
8495 *
8496 * when wc->stage == DROP_REFERENCE, this function checks
8497 * reference count of the block pointed to. if the block
8498 * is shared and we need update back refs for the subtree
8499 * rooted at the block, this function changes wc->stage to
8500 * UPDATE_BACKREF. if the block is shared and there is no
8501 * need to update back, this function drops the reference
8502 * to the block.
8503 *
8504 * NOTE: return value 1 means we should stop walking down.
8505 */
8506 static noinline int do_walk_down(struct btrfs_trans_handle *trans,
8507 struct btrfs_root *root,
8508 struct btrfs_path *path,
8509 struct walk_control *wc, int *lookup_info)
8510 {
8511 u64 bytenr;
8512 u64 generation;
8513 u64 parent;
8514 u32 blocksize;
8515 struct btrfs_key key;
8516 struct extent_buffer *next;
8517 int level = wc->level;
8518 int reada = 0;
8519 int ret = 0;
8520 bool need_account = false;
8521
8522 generation = btrfs_node_ptr_generation(path->nodes[level],
8523 path->slots[level]);
8524 /*
8525 * if the lower level block was created before the snapshot
8526 * was created, we know there is no need to update back refs
8527 * for the subtree
8528 */
8529 if (wc->stage == UPDATE_BACKREF &&
8530 generation <= root->root_key.offset) {
8531 *lookup_info = 1;
8532 return 1;
8533 }
8534
8535 bytenr = btrfs_node_blockptr(path->nodes[level], path->slots[level]);
8536 blocksize = root->nodesize;
8537
8538 next = btrfs_find_tree_block(root->fs_info, bytenr);
8539 if (!next) {
8540 next = btrfs_find_create_tree_block(root, bytenr);
8541 if (!next)
8542 return -ENOMEM;
8543 btrfs_set_buffer_lockdep_class(root->root_key.objectid, next,
8544 level - 1);
8545 reada = 1;
8546 }
8547 btrfs_tree_lock(next);
8548 btrfs_set_lock_blocking(next);
8549
8550 ret = btrfs_lookup_extent_info(trans, root, bytenr, level - 1, 1,
8551 &wc->refs[level - 1],
8552 &wc->flags[level - 1]);
8553 if (ret < 0) {
8554 btrfs_tree_unlock(next);
8555 return ret;
8556 }
8557
8558 if (unlikely(wc->refs[level - 1] == 0)) {
8559 btrfs_err(root->fs_info, "Missing references.");
8560 BUG();
8561 }
8562 *lookup_info = 0;
8563
8564 if (wc->stage == DROP_REFERENCE) {
8565 if (wc->refs[level - 1] > 1) {
8566 need_account = true;
8567 if (level == 1 &&
8568 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8569 goto skip;
8570
8571 if (!wc->update_ref ||
8572 generation <= root->root_key.offset)
8573 goto skip;
8574
8575 btrfs_node_key_to_cpu(path->nodes[level], &key,
8576 path->slots[level]);
8577 ret = btrfs_comp_cpu_keys(&key, &wc->update_progress);
8578 if (ret < 0)
8579 goto skip;
8580
8581 wc->stage = UPDATE_BACKREF;
8582 wc->shared_level = level - 1;
8583 }
8584 } else {
8585 if (level == 1 &&
8586 (wc->flags[0] & BTRFS_BLOCK_FLAG_FULL_BACKREF))
8587 goto skip;
8588 }
8589
8590 if (!btrfs_buffer_uptodate(next, generation, 0)) {
8591 btrfs_tree_unlock(next);
8592 free_extent_buffer(next);
8593 next = NULL;
8594 *lookup_info = 1;
8595 }
8596
8597 if (!next) {
8598 if (reada && level == 1)
8599 reada_walk_down(trans, root, wc, path);
8600 next = read_tree_block(root, bytenr, generation);
8601 if (IS_ERR(next)) {
8602 return PTR_ERR(next);
8603 } else if (!extent_buffer_uptodate(next)) {
8604 free_extent_buffer(next);
8605 return -EIO;
8606 }
8607 btrfs_tree_lock(next);
8608 btrfs_set_lock_blocking(next);
8609 }
8610
8611 level--;
8612 BUG_ON(level != btrfs_header_level(next));
8613 path->nodes[level] = next;
8614 path->slots[level] = 0;
8615 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8616 wc->level = level;
8617 if (wc->level == 1)
8618 wc->reada_slot = 0;
8619 return 0;
8620 skip:
8621 wc->refs[level - 1] = 0;
8622 wc->flags[level - 1] = 0;
8623 if (wc->stage == DROP_REFERENCE) {
8624 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF) {
8625 parent = path->nodes[level]->start;
8626 } else {
8627 BUG_ON(root->root_key.objectid !=
8628 btrfs_header_owner(path->nodes[level]));
8629 parent = 0;
8630 }
8631
8632 if (need_account) {
8633 ret = account_shared_subtree(trans, root, next,
8634 generation, level - 1);
8635 if (ret) {
8636 btrfs_err_rl(root->fs_info,
8637 "Error "
8638 "%d accounting shared subtree. Quota "
8639 "is out of sync, rescan required.",
8640 ret);
8641 }
8642 }
8643 ret = btrfs_free_extent(trans, root, bytenr, blocksize, parent,
8644 root->root_key.objectid, level - 1, 0);
8645 BUG_ON(ret); /* -ENOMEM */
8646 }
8647 btrfs_tree_unlock(next);
8648 free_extent_buffer(next);
8649 *lookup_info = 1;
8650 return 1;
8651 }
8652
8653 /*
8654 * helper to process tree block while walking up the tree.
8655 *
8656 * when wc->stage == DROP_REFERENCE, this function drops
8657 * reference count on the block.
8658 *
8659 * when wc->stage == UPDATE_BACKREF, this function changes
8660 * wc->stage back to DROP_REFERENCE if we changed wc->stage
8661 * to UPDATE_BACKREF previously while processing the block.
8662 *
8663 * NOTE: return value 1 means we should stop walking up.
8664 */
8665 static noinline int walk_up_proc(struct btrfs_trans_handle *trans,
8666 struct btrfs_root *root,
8667 struct btrfs_path *path,
8668 struct walk_control *wc)
8669 {
8670 int ret;
8671 int level = wc->level;
8672 struct extent_buffer *eb = path->nodes[level];
8673 u64 parent = 0;
8674
8675 if (wc->stage == UPDATE_BACKREF) {
8676 BUG_ON(wc->shared_level < level);
8677 if (level < wc->shared_level)
8678 goto out;
8679
8680 ret = find_next_key(path, level + 1, &wc->update_progress);
8681 if (ret > 0)
8682 wc->update_ref = 0;
8683
8684 wc->stage = DROP_REFERENCE;
8685 wc->shared_level = -1;
8686 path->slots[level] = 0;
8687
8688 /*
8689 * check reference count again if the block isn't locked.
8690 * we should start walking down the tree again if reference
8691 * count is one.
8692 */
8693 if (!path->locks[level]) {
8694 BUG_ON(level == 0);
8695 btrfs_tree_lock(eb);
8696 btrfs_set_lock_blocking(eb);
8697 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8698
8699 ret = btrfs_lookup_extent_info(trans, root,
8700 eb->start, level, 1,
8701 &wc->refs[level],
8702 &wc->flags[level]);
8703 if (ret < 0) {
8704 btrfs_tree_unlock_rw(eb, path->locks[level]);
8705 path->locks[level] = 0;
8706 return ret;
8707 }
8708 BUG_ON(wc->refs[level] == 0);
8709 if (wc->refs[level] == 1) {
8710 btrfs_tree_unlock_rw(eb, path->locks[level]);
8711 path->locks[level] = 0;
8712 return 1;
8713 }
8714 }
8715 }
8716
8717 /* wc->stage == DROP_REFERENCE */
8718 BUG_ON(wc->refs[level] > 1 && !path->locks[level]);
8719
8720 if (wc->refs[level] == 1) {
8721 if (level == 0) {
8722 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8723 ret = btrfs_dec_ref(trans, root, eb, 1);
8724 else
8725 ret = btrfs_dec_ref(trans, root, eb, 0);
8726 BUG_ON(ret); /* -ENOMEM */
8727 ret = account_leaf_items(trans, root, eb);
8728 if (ret) {
8729 btrfs_err_rl(root->fs_info,
8730 "error "
8731 "%d accounting leaf items. Quota "
8732 "is out of sync, rescan required.",
8733 ret);
8734 }
8735 }
8736 /* make block locked assertion in clean_tree_block happy */
8737 if (!path->locks[level] &&
8738 btrfs_header_generation(eb) == trans->transid) {
8739 btrfs_tree_lock(eb);
8740 btrfs_set_lock_blocking(eb);
8741 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8742 }
8743 clean_tree_block(trans, root->fs_info, eb);
8744 }
8745
8746 if (eb == root->node) {
8747 if (wc->flags[level] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8748 parent = eb->start;
8749 else
8750 BUG_ON(root->root_key.objectid !=
8751 btrfs_header_owner(eb));
8752 } else {
8753 if (wc->flags[level + 1] & BTRFS_BLOCK_FLAG_FULL_BACKREF)
8754 parent = path->nodes[level + 1]->start;
8755 else
8756 BUG_ON(root->root_key.objectid !=
8757 btrfs_header_owner(path->nodes[level + 1]));
8758 }
8759
8760 btrfs_free_tree_block(trans, root, eb, parent, wc->refs[level] == 1);
8761 out:
8762 wc->refs[level] = 0;
8763 wc->flags[level] = 0;
8764 return 0;
8765 }
8766
8767 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
8768 struct btrfs_root *root,
8769 struct btrfs_path *path,
8770 struct walk_control *wc)
8771 {
8772 int level = wc->level;
8773 int lookup_info = 1;
8774 int ret;
8775
8776 while (level >= 0) {
8777 ret = walk_down_proc(trans, root, path, wc, lookup_info);
8778 if (ret > 0)
8779 break;
8780
8781 if (level == 0)
8782 break;
8783
8784 if (path->slots[level] >=
8785 btrfs_header_nritems(path->nodes[level]))
8786 break;
8787
8788 ret = do_walk_down(trans, root, path, wc, &lookup_info);
8789 if (ret > 0) {
8790 path->slots[level]++;
8791 continue;
8792 } else if (ret < 0)
8793 return ret;
8794 level = wc->level;
8795 }
8796 return 0;
8797 }
8798
8799 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
8800 struct btrfs_root *root,
8801 struct btrfs_path *path,
8802 struct walk_control *wc, int max_level)
8803 {
8804 int level = wc->level;
8805 int ret;
8806
8807 path->slots[level] = btrfs_header_nritems(path->nodes[level]);
8808 while (level < max_level && path->nodes[level]) {
8809 wc->level = level;
8810 if (path->slots[level] + 1 <
8811 btrfs_header_nritems(path->nodes[level])) {
8812 path->slots[level]++;
8813 return 0;
8814 } else {
8815 ret = walk_up_proc(trans, root, path, wc);
8816 if (ret > 0)
8817 return 0;
8818
8819 if (path->locks[level]) {
8820 btrfs_tree_unlock_rw(path->nodes[level],
8821 path->locks[level]);
8822 path->locks[level] = 0;
8823 }
8824 free_extent_buffer(path->nodes[level]);
8825 path->nodes[level] = NULL;
8826 level++;
8827 }
8828 }
8829 return 1;
8830 }
8831
8832 /*
8833 * drop a subvolume tree.
8834 *
8835 * this function traverses the tree freeing any blocks that only
8836 * referenced by the tree.
8837 *
8838 * when a shared tree block is found. this function decreases its
8839 * reference count by one. if update_ref is true, this function
8840 * also make sure backrefs for the shared block and all lower level
8841 * blocks are properly updated.
8842 *
8843 * If called with for_reloc == 0, may exit early with -EAGAIN
8844 */
8845 int btrfs_drop_snapshot(struct btrfs_root *root,
8846 struct btrfs_block_rsv *block_rsv, int update_ref,
8847 int for_reloc)
8848 {
8849 struct btrfs_path *path;
8850 struct btrfs_trans_handle *trans;
8851 struct btrfs_root *tree_root = root->fs_info->tree_root;
8852 struct btrfs_root_item *root_item = &root->root_item;
8853 struct walk_control *wc;
8854 struct btrfs_key key;
8855 int err = 0;
8856 int ret;
8857 int level;
8858 bool root_dropped = false;
8859
8860 btrfs_debug(root->fs_info, "Drop subvolume %llu", root->objectid);
8861
8862 path = btrfs_alloc_path();
8863 if (!path) {
8864 err = -ENOMEM;
8865 goto out;
8866 }
8867
8868 wc = kzalloc(sizeof(*wc), GFP_NOFS);
8869 if (!wc) {
8870 btrfs_free_path(path);
8871 err = -ENOMEM;
8872 goto out;
8873 }
8874
8875 trans = btrfs_start_transaction(tree_root, 0);
8876 if (IS_ERR(trans)) {
8877 err = PTR_ERR(trans);
8878 goto out_free;
8879 }
8880
8881 if (block_rsv)
8882 trans->block_rsv = block_rsv;
8883
8884 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
8885 level = btrfs_header_level(root->node);
8886 path->nodes[level] = btrfs_lock_root_node(root);
8887 btrfs_set_lock_blocking(path->nodes[level]);
8888 path->slots[level] = 0;
8889 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8890 memset(&wc->update_progress, 0,
8891 sizeof(wc->update_progress));
8892 } else {
8893 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
8894 memcpy(&wc->update_progress, &key,
8895 sizeof(wc->update_progress));
8896
8897 level = root_item->drop_level;
8898 BUG_ON(level == 0);
8899 path->lowest_level = level;
8900 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
8901 path->lowest_level = 0;
8902 if (ret < 0) {
8903 err = ret;
8904 goto out_end_trans;
8905 }
8906 WARN_ON(ret > 0);
8907
8908 /*
8909 * unlock our path, this is safe because only this
8910 * function is allowed to delete this snapshot
8911 */
8912 btrfs_unlock_up_safe(path, 0);
8913
8914 level = btrfs_header_level(root->node);
8915 while (1) {
8916 btrfs_tree_lock(path->nodes[level]);
8917 btrfs_set_lock_blocking(path->nodes[level]);
8918 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
8919
8920 ret = btrfs_lookup_extent_info(trans, root,
8921 path->nodes[level]->start,
8922 level, 1, &wc->refs[level],
8923 &wc->flags[level]);
8924 if (ret < 0) {
8925 err = ret;
8926 goto out_end_trans;
8927 }
8928 BUG_ON(wc->refs[level] == 0);
8929
8930 if (level == root_item->drop_level)
8931 break;
8932
8933 btrfs_tree_unlock(path->nodes[level]);
8934 path->locks[level] = 0;
8935 WARN_ON(wc->refs[level] != 1);
8936 level--;
8937 }
8938 }
8939
8940 wc->level = level;
8941 wc->shared_level = -1;
8942 wc->stage = DROP_REFERENCE;
8943 wc->update_ref = update_ref;
8944 wc->keep_locks = 0;
8945 wc->for_reloc = for_reloc;
8946 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
8947
8948 while (1) {
8949
8950 ret = walk_down_tree(trans, root, path, wc);
8951 if (ret < 0) {
8952 err = ret;
8953 break;
8954 }
8955
8956 ret = walk_up_tree(trans, root, path, wc, BTRFS_MAX_LEVEL);
8957 if (ret < 0) {
8958 err = ret;
8959 break;
8960 }
8961
8962 if (ret > 0) {
8963 BUG_ON(wc->stage != DROP_REFERENCE);
8964 break;
8965 }
8966
8967 if (wc->stage == DROP_REFERENCE) {
8968 level = wc->level;
8969 btrfs_node_key(path->nodes[level],
8970 &root_item->drop_progress,
8971 path->slots[level]);
8972 root_item->drop_level = level;
8973 }
8974
8975 BUG_ON(wc->level == 0);
8976 if (btrfs_should_end_transaction(trans, tree_root) ||
8977 (!for_reloc && btrfs_need_cleaner_sleep(root))) {
8978 ret = btrfs_update_root(trans, tree_root,
8979 &root->root_key,
8980 root_item);
8981 if (ret) {
8982 btrfs_abort_transaction(trans, tree_root, ret);
8983 err = ret;
8984 goto out_end_trans;
8985 }
8986
8987 btrfs_end_transaction_throttle(trans, tree_root);
8988 if (!for_reloc && btrfs_need_cleaner_sleep(root)) {
8989 pr_debug("BTRFS: drop snapshot early exit\n");
8990 err = -EAGAIN;
8991 goto out_free;
8992 }
8993
8994 trans = btrfs_start_transaction(tree_root, 0);
8995 if (IS_ERR(trans)) {
8996 err = PTR_ERR(trans);
8997 goto out_free;
8998 }
8999 if (block_rsv)
9000 trans->block_rsv = block_rsv;
9001 }
9002 }
9003 btrfs_release_path(path);
9004 if (err)
9005 goto out_end_trans;
9006
9007 ret = btrfs_del_root(trans, tree_root, &root->root_key);
9008 if (ret) {
9009 btrfs_abort_transaction(trans, tree_root, ret);
9010 goto out_end_trans;
9011 }
9012
9013 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
9014 ret = btrfs_find_root(tree_root, &root->root_key, path,
9015 NULL, NULL);
9016 if (ret < 0) {
9017 btrfs_abort_transaction(trans, tree_root, ret);
9018 err = ret;
9019 goto out_end_trans;
9020 } else if (ret > 0) {
9021 /* if we fail to delete the orphan item this time
9022 * around, it'll get picked up the next time.
9023 *
9024 * The most common failure here is just -ENOENT.
9025 */
9026 btrfs_del_orphan_item(trans, tree_root,
9027 root->root_key.objectid);
9028 }
9029 }
9030
9031 if (test_bit(BTRFS_ROOT_IN_RADIX, &root->state)) {
9032 btrfs_add_dropped_root(trans, root);
9033 } else {
9034 free_extent_buffer(root->node);
9035 free_extent_buffer(root->commit_root);
9036 btrfs_put_fs_root(root);
9037 }
9038 root_dropped = true;
9039 out_end_trans:
9040 btrfs_end_transaction_throttle(trans, tree_root);
9041 out_free:
9042 kfree(wc);
9043 btrfs_free_path(path);
9044 out:
9045 /*
9046 * So if we need to stop dropping the snapshot for whatever reason we
9047 * need to make sure to add it back to the dead root list so that we
9048 * keep trying to do the work later. This also cleans up roots if we
9049 * don't have it in the radix (like when we recover after a power fail
9050 * or unmount) so we don't leak memory.
9051 */
9052 if (!for_reloc && root_dropped == false)
9053 btrfs_add_dead_root(root);
9054 if (err && err != -EAGAIN)
9055 btrfs_std_error(root->fs_info, err, NULL);
9056 return err;
9057 }
9058
9059 /*
9060 * drop subtree rooted at tree block 'node'.
9061 *
9062 * NOTE: this function will unlock and release tree block 'node'
9063 * only used by relocation code
9064 */
9065 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
9066 struct btrfs_root *root,
9067 struct extent_buffer *node,
9068 struct extent_buffer *parent)
9069 {
9070 struct btrfs_path *path;
9071 struct walk_control *wc;
9072 int level;
9073 int parent_level;
9074 int ret = 0;
9075 int wret;
9076
9077 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
9078
9079 path = btrfs_alloc_path();
9080 if (!path)
9081 return -ENOMEM;
9082
9083 wc = kzalloc(sizeof(*wc), GFP_NOFS);
9084 if (!wc) {
9085 btrfs_free_path(path);
9086 return -ENOMEM;
9087 }
9088
9089 btrfs_assert_tree_locked(parent);
9090 parent_level = btrfs_header_level(parent);
9091 extent_buffer_get(parent);
9092 path->nodes[parent_level] = parent;
9093 path->slots[parent_level] = btrfs_header_nritems(parent);
9094
9095 btrfs_assert_tree_locked(node);
9096 level = btrfs_header_level(node);
9097 path->nodes[level] = node;
9098 path->slots[level] = 0;
9099 path->locks[level] = BTRFS_WRITE_LOCK_BLOCKING;
9100
9101 wc->refs[parent_level] = 1;
9102 wc->flags[parent_level] = BTRFS_BLOCK_FLAG_FULL_BACKREF;
9103 wc->level = level;
9104 wc->shared_level = -1;
9105 wc->stage = DROP_REFERENCE;
9106 wc->update_ref = 0;
9107 wc->keep_locks = 1;
9108 wc->for_reloc = 1;
9109 wc->reada_count = BTRFS_NODEPTRS_PER_BLOCK(root);
9110
9111 while (1) {
9112 wret = walk_down_tree(trans, root, path, wc);
9113 if (wret < 0) {
9114 ret = wret;
9115 break;
9116 }
9117
9118 wret = walk_up_tree(trans, root, path, wc, parent_level);
9119 if (wret < 0)
9120 ret = wret;
9121 if (wret != 0)
9122 break;
9123 }
9124
9125 kfree(wc);
9126 btrfs_free_path(path);
9127 return ret;
9128 }
9129
9130 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
9131 {
9132 u64 num_devices;
9133 u64 stripped;
9134
9135 /*
9136 * if restripe for this chunk_type is on pick target profile and
9137 * return, otherwise do the usual balance
9138 */
9139 stripped = get_restripe_target(root->fs_info, flags);
9140 if (stripped)
9141 return extended_to_chunk(stripped);
9142
9143 num_devices = root->fs_info->fs_devices->rw_devices;
9144
9145 stripped = BTRFS_BLOCK_GROUP_RAID0 |
9146 BTRFS_BLOCK_GROUP_RAID5 | BTRFS_BLOCK_GROUP_RAID6 |
9147 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
9148
9149 if (num_devices == 1) {
9150 stripped |= BTRFS_BLOCK_GROUP_DUP;
9151 stripped = flags & ~stripped;
9152
9153 /* turn raid0 into single device chunks */
9154 if (flags & BTRFS_BLOCK_GROUP_RAID0)
9155 return stripped;
9156
9157 /* turn mirroring into duplication */
9158 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
9159 BTRFS_BLOCK_GROUP_RAID10))
9160 return stripped | BTRFS_BLOCK_GROUP_DUP;
9161 } else {
9162 /* they already had raid on here, just return */
9163 if (flags & stripped)
9164 return flags;
9165
9166 stripped |= BTRFS_BLOCK_GROUP_DUP;
9167 stripped = flags & ~stripped;
9168
9169 /* switch duplicated blocks with raid1 */
9170 if (flags & BTRFS_BLOCK_GROUP_DUP)
9171 return stripped | BTRFS_BLOCK_GROUP_RAID1;
9172
9173 /* this is drive concat, leave it alone */
9174 }
9175
9176 return flags;
9177 }
9178
9179 static int inc_block_group_ro(struct btrfs_block_group_cache *cache, int force)
9180 {
9181 struct btrfs_space_info *sinfo = cache->space_info;
9182 u64 num_bytes;
9183 u64 min_allocable_bytes;
9184 int ret = -ENOSPC;
9185
9186 /*
9187 * We need some metadata space and system metadata space for
9188 * allocating chunks in some corner cases until we force to set
9189 * it to be readonly.
9190 */
9191 if ((sinfo->flags &
9192 (BTRFS_BLOCK_GROUP_SYSTEM | BTRFS_BLOCK_GROUP_METADATA)) &&
9193 !force)
9194 min_allocable_bytes = SZ_1M;
9195 else
9196 min_allocable_bytes = 0;
9197
9198 spin_lock(&sinfo->lock);
9199 spin_lock(&cache->lock);
9200
9201 if (cache->ro) {
9202 cache->ro++;
9203 ret = 0;
9204 goto out;
9205 }
9206
9207 num_bytes = cache->key.offset - cache->reserved - cache->pinned -
9208 cache->bytes_super - btrfs_block_group_used(&cache->item);
9209
9210 if (sinfo->bytes_used + sinfo->bytes_reserved + sinfo->bytes_pinned +
9211 sinfo->bytes_may_use + sinfo->bytes_readonly + num_bytes +
9212 min_allocable_bytes <= sinfo->total_bytes) {
9213 sinfo->bytes_readonly += num_bytes;
9214 cache->ro++;
9215 list_add_tail(&cache->ro_list, &sinfo->ro_bgs);
9216 ret = 0;
9217 }
9218 out:
9219 spin_unlock(&cache->lock);
9220 spin_unlock(&sinfo->lock);
9221 return ret;
9222 }
9223
9224 int btrfs_inc_block_group_ro(struct btrfs_root *root,
9225 struct btrfs_block_group_cache *cache)
9226
9227 {
9228 struct btrfs_trans_handle *trans;
9229 u64 alloc_flags;
9230 int ret;
9231
9232 again:
9233 trans = btrfs_join_transaction(root);
9234 if (IS_ERR(trans))
9235 return PTR_ERR(trans);
9236
9237 /*
9238 * we're not allowed to set block groups readonly after the dirty
9239 * block groups cache has started writing. If it already started,
9240 * back off and let this transaction commit
9241 */
9242 mutex_lock(&root->fs_info->ro_block_group_mutex);
9243 if (test_bit(BTRFS_TRANS_DIRTY_BG_RUN, &trans->transaction->flags)) {
9244 u64 transid = trans->transid;
9245
9246 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9247 btrfs_end_transaction(trans, root);
9248
9249 ret = btrfs_wait_for_commit(root, transid);
9250 if (ret)
9251 return ret;
9252 goto again;
9253 }
9254
9255 /*
9256 * if we are changing raid levels, try to allocate a corresponding
9257 * block group with the new raid level.
9258 */
9259 alloc_flags = update_block_group_flags(root, cache->flags);
9260 if (alloc_flags != cache->flags) {
9261 ret = do_chunk_alloc(trans, root, alloc_flags,
9262 CHUNK_ALLOC_FORCE);
9263 /*
9264 * ENOSPC is allowed here, we may have enough space
9265 * already allocated at the new raid level to
9266 * carry on
9267 */
9268 if (ret == -ENOSPC)
9269 ret = 0;
9270 if (ret < 0)
9271 goto out;
9272 }
9273
9274 ret = inc_block_group_ro(cache, 0);
9275 if (!ret)
9276 goto out;
9277 alloc_flags = get_alloc_profile(root, cache->space_info->flags);
9278 ret = do_chunk_alloc(trans, root, alloc_flags,
9279 CHUNK_ALLOC_FORCE);
9280 if (ret < 0)
9281 goto out;
9282 ret = inc_block_group_ro(cache, 0);
9283 out:
9284 if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
9285 alloc_flags = update_block_group_flags(root, cache->flags);
9286 lock_chunks(root->fs_info->chunk_root);
9287 check_system_chunk(trans, root, alloc_flags);
9288 unlock_chunks(root->fs_info->chunk_root);
9289 }
9290 mutex_unlock(&root->fs_info->ro_block_group_mutex);
9291
9292 btrfs_end_transaction(trans, root);
9293 return ret;
9294 }
9295
9296 int btrfs_force_chunk_alloc(struct btrfs_trans_handle *trans,
9297 struct btrfs_root *root, u64 type)
9298 {
9299 u64 alloc_flags = get_alloc_profile(root, type);
9300 return do_chunk_alloc(trans, root, alloc_flags,
9301 CHUNK_ALLOC_FORCE);
9302 }
9303
9304 /*
9305 * helper to account the unused space of all the readonly block group in the
9306 * space_info. takes mirrors into account.
9307 */
9308 u64 btrfs_account_ro_block_groups_free_space(struct btrfs_space_info *sinfo)
9309 {
9310 struct btrfs_block_group_cache *block_group;
9311 u64 free_bytes = 0;
9312 int factor;
9313
9314 /* It's df, we don't care if it's racey */
9315 if (list_empty(&sinfo->ro_bgs))
9316 return 0;
9317
9318 spin_lock(&sinfo->lock);
9319 list_for_each_entry(block_group, &sinfo->ro_bgs, ro_list) {
9320 spin_lock(&block_group->lock);
9321
9322 if (!block_group->ro) {
9323 spin_unlock(&block_group->lock);
9324 continue;
9325 }
9326
9327 if (block_group->flags & (BTRFS_BLOCK_GROUP_RAID1 |
9328 BTRFS_BLOCK_GROUP_RAID10 |
9329 BTRFS_BLOCK_GROUP_DUP))
9330 factor = 2;
9331 else
9332 factor = 1;
9333
9334 free_bytes += (block_group->key.offset -
9335 btrfs_block_group_used(&block_group->item)) *
9336 factor;
9337
9338 spin_unlock(&block_group->lock);
9339 }
9340 spin_unlock(&sinfo->lock);
9341
9342 return free_bytes;
9343 }
9344
9345 void btrfs_dec_block_group_ro(struct btrfs_root *root,
9346 struct btrfs_block_group_cache *cache)
9347 {
9348 struct btrfs_space_info *sinfo = cache->space_info;
9349 u64 num_bytes;
9350
9351 BUG_ON(!cache->ro);
9352
9353 spin_lock(&sinfo->lock);
9354 spin_lock(&cache->lock);
9355 if (!--cache->ro) {
9356 num_bytes = cache->key.offset - cache->reserved -
9357 cache->pinned - cache->bytes_super -
9358 btrfs_block_group_used(&cache->item);
9359 sinfo->bytes_readonly -= num_bytes;
9360 list_del_init(&cache->ro_list);
9361 }
9362 spin_unlock(&cache->lock);
9363 spin_unlock(&sinfo->lock);
9364 }
9365
9366 /*
9367 * checks to see if its even possible to relocate this block group.
9368 *
9369 * @return - -1 if it's not a good idea to relocate this block group, 0 if its
9370 * ok to go ahead and try.
9371 */
9372 int btrfs_can_relocate(struct btrfs_root *root, u64 bytenr)
9373 {
9374 struct btrfs_block_group_cache *block_group;
9375 struct btrfs_space_info *space_info;
9376 struct btrfs_fs_devices *fs_devices = root->fs_info->fs_devices;
9377 struct btrfs_device *device;
9378 struct btrfs_trans_handle *trans;
9379 u64 min_free;
9380 u64 dev_min = 1;
9381 u64 dev_nr = 0;
9382 u64 target;
9383 int index;
9384 int full = 0;
9385 int ret = 0;
9386
9387 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
9388
9389 /* odd, couldn't find the block group, leave it alone */
9390 if (!block_group)
9391 return -1;
9392
9393 min_free = btrfs_block_group_used(&block_group->item);
9394
9395 /* no bytes used, we're good */
9396 if (!min_free)
9397 goto out;
9398
9399 space_info = block_group->space_info;
9400 spin_lock(&space_info->lock);
9401
9402 full = space_info->full;
9403
9404 /*
9405 * if this is the last block group we have in this space, we can't
9406 * relocate it unless we're able to allocate a new chunk below.
9407 *
9408 * Otherwise, we need to make sure we have room in the space to handle
9409 * all of the extents from this block group. If we can, we're good
9410 */
9411 if ((space_info->total_bytes != block_group->key.offset) &&
9412 (space_info->bytes_used + space_info->bytes_reserved +
9413 space_info->bytes_pinned + space_info->bytes_readonly +
9414 min_free < space_info->total_bytes)) {
9415 spin_unlock(&space_info->lock);
9416 goto out;
9417 }
9418 spin_unlock(&space_info->lock);
9419
9420 /*
9421 * ok we don't have enough space, but maybe we have free space on our
9422 * devices to allocate new chunks for relocation, so loop through our
9423 * alloc devices and guess if we have enough space. if this block
9424 * group is going to be restriped, run checks against the target
9425 * profile instead of the current one.
9426 */
9427 ret = -1;
9428
9429 /*
9430 * index:
9431 * 0: raid10
9432 * 1: raid1
9433 * 2: dup
9434 * 3: raid0
9435 * 4: single
9436 */
9437 target = get_restripe_target(root->fs_info, block_group->flags);
9438 if (target) {
9439 index = __get_raid_index(extended_to_chunk(target));
9440 } else {
9441 /*
9442 * this is just a balance, so if we were marked as full
9443 * we know there is no space for a new chunk
9444 */
9445 if (full)
9446 goto out;
9447
9448 index = get_block_group_index(block_group);
9449 }
9450
9451 if (index == BTRFS_RAID_RAID10) {
9452 dev_min = 4;
9453 /* Divide by 2 */
9454 min_free >>= 1;
9455 } else if (index == BTRFS_RAID_RAID1) {
9456 dev_min = 2;
9457 } else if (index == BTRFS_RAID_DUP) {
9458 /* Multiply by 2 */
9459 min_free <<= 1;
9460 } else if (index == BTRFS_RAID_RAID0) {
9461 dev_min = fs_devices->rw_devices;
9462 min_free = div64_u64(min_free, dev_min);
9463 }
9464
9465 /* We need to do this so that we can look at pending chunks */
9466 trans = btrfs_join_transaction(root);
9467 if (IS_ERR(trans)) {
9468 ret = PTR_ERR(trans);
9469 goto out;
9470 }
9471
9472 mutex_lock(&root->fs_info->chunk_mutex);
9473 list_for_each_entry(device, &fs_devices->alloc_list, dev_alloc_list) {
9474 u64 dev_offset;
9475
9476 /*
9477 * check to make sure we can actually find a chunk with enough
9478 * space to fit our block group in.
9479 */
9480 if (device->total_bytes > device->bytes_used + min_free &&
9481 !device->is_tgtdev_for_dev_replace) {
9482 ret = find_free_dev_extent(trans, device, min_free,
9483 &dev_offset, NULL);
9484 if (!ret)
9485 dev_nr++;
9486
9487 if (dev_nr >= dev_min)
9488 break;
9489
9490 ret = -1;
9491 }
9492 }
9493 mutex_unlock(&root->fs_info->chunk_mutex);
9494 btrfs_end_transaction(trans, root);
9495 out:
9496 btrfs_put_block_group(block_group);
9497 return ret;
9498 }
9499
9500 static int find_first_block_group(struct btrfs_root *root,
9501 struct btrfs_path *path, struct btrfs_key *key)
9502 {
9503 int ret = 0;
9504 struct btrfs_key found_key;
9505 struct extent_buffer *leaf;
9506 int slot;
9507
9508 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
9509 if (ret < 0)
9510 goto out;
9511
9512 while (1) {
9513 slot = path->slots[0];
9514 leaf = path->nodes[0];
9515 if (slot >= btrfs_header_nritems(leaf)) {
9516 ret = btrfs_next_leaf(root, path);
9517 if (ret == 0)
9518 continue;
9519 if (ret < 0)
9520 goto out;
9521 break;
9522 }
9523 btrfs_item_key_to_cpu(leaf, &found_key, slot);
9524
9525 if (found_key.objectid >= key->objectid &&
9526 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
9527 ret = 0;
9528 goto out;
9529 }
9530 path->slots[0]++;
9531 }
9532 out:
9533 return ret;
9534 }
9535
9536 void btrfs_put_block_group_cache(struct btrfs_fs_info *info)
9537 {
9538 struct btrfs_block_group_cache *block_group;
9539 u64 last = 0;
9540
9541 while (1) {
9542 struct inode *inode;
9543
9544 block_group = btrfs_lookup_first_block_group(info, last);
9545 while (block_group) {
9546 spin_lock(&block_group->lock);
9547 if (block_group->iref)
9548 break;
9549 spin_unlock(&block_group->lock);
9550 block_group = next_block_group(info->tree_root,
9551 block_group);
9552 }
9553 if (!block_group) {
9554 if (last == 0)
9555 break;
9556 last = 0;
9557 continue;
9558 }
9559
9560 inode = block_group->inode;
9561 block_group->iref = 0;
9562 block_group->inode = NULL;
9563 spin_unlock(&block_group->lock);
9564 iput(inode);
9565 last = block_group->key.objectid + block_group->key.offset;
9566 btrfs_put_block_group(block_group);
9567 }
9568 }
9569
9570 int btrfs_free_block_groups(struct btrfs_fs_info *info)
9571 {
9572 struct btrfs_block_group_cache *block_group;
9573 struct btrfs_space_info *space_info;
9574 struct btrfs_caching_control *caching_ctl;
9575 struct rb_node *n;
9576
9577 down_write(&info->commit_root_sem);
9578 while (!list_empty(&info->caching_block_groups)) {
9579 caching_ctl = list_entry(info->caching_block_groups.next,
9580 struct btrfs_caching_control, list);
9581 list_del(&caching_ctl->list);
9582 put_caching_control(caching_ctl);
9583 }
9584 up_write(&info->commit_root_sem);
9585
9586 spin_lock(&info->unused_bgs_lock);
9587 while (!list_empty(&info->unused_bgs)) {
9588 block_group = list_first_entry(&info->unused_bgs,
9589 struct btrfs_block_group_cache,
9590 bg_list);
9591 list_del_init(&block_group->bg_list);
9592 btrfs_put_block_group(block_group);
9593 }
9594 spin_unlock(&info->unused_bgs_lock);
9595
9596 spin_lock(&info->block_group_cache_lock);
9597 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
9598 block_group = rb_entry(n, struct btrfs_block_group_cache,
9599 cache_node);
9600 rb_erase(&block_group->cache_node,
9601 &info->block_group_cache_tree);
9602 RB_CLEAR_NODE(&block_group->cache_node);
9603 spin_unlock(&info->block_group_cache_lock);
9604
9605 down_write(&block_group->space_info->groups_sem);
9606 list_del(&block_group->list);
9607 up_write(&block_group->space_info->groups_sem);
9608
9609 if (block_group->cached == BTRFS_CACHE_STARTED)
9610 wait_block_group_cache_done(block_group);
9611
9612 /*
9613 * We haven't cached this block group, which means we could
9614 * possibly have excluded extents on this block group.
9615 */
9616 if (block_group->cached == BTRFS_CACHE_NO ||
9617 block_group->cached == BTRFS_CACHE_ERROR)
9618 free_excluded_extents(info->extent_root, block_group);
9619
9620 btrfs_remove_free_space_cache(block_group);
9621 btrfs_put_block_group(block_group);
9622
9623 spin_lock(&info->block_group_cache_lock);
9624 }
9625 spin_unlock(&info->block_group_cache_lock);
9626
9627 /* now that all the block groups are freed, go through and
9628 * free all the space_info structs. This is only called during
9629 * the final stages of unmount, and so we know nobody is
9630 * using them. We call synchronize_rcu() once before we start,
9631 * just to be on the safe side.
9632 */
9633 synchronize_rcu();
9634
9635 release_global_block_rsv(info);
9636
9637 while (!list_empty(&info->space_info)) {
9638 int i;
9639
9640 space_info = list_entry(info->space_info.next,
9641 struct btrfs_space_info,
9642 list);
9643 if (btrfs_test_opt(info->tree_root, ENOSPC_DEBUG)) {
9644 if (WARN_ON(space_info->bytes_pinned > 0 ||
9645 space_info->bytes_reserved > 0 ||
9646 space_info->bytes_may_use > 0)) {
9647 dump_space_info(space_info, 0, 0);
9648 }
9649 }
9650 list_del(&space_info->list);
9651 for (i = 0; i < BTRFS_NR_RAID_TYPES; i++) {
9652 struct kobject *kobj;
9653 kobj = space_info->block_group_kobjs[i];
9654 space_info->block_group_kobjs[i] = NULL;
9655 if (kobj) {
9656 kobject_del(kobj);
9657 kobject_put(kobj);
9658 }
9659 }
9660 kobject_del(&space_info->kobj);
9661 kobject_put(&space_info->kobj);
9662 }
9663 return 0;
9664 }
9665
9666 static void __link_block_group(struct btrfs_space_info *space_info,
9667 struct btrfs_block_group_cache *cache)
9668 {
9669 int index = get_block_group_index(cache);
9670 bool first = false;
9671
9672 down_write(&space_info->groups_sem);
9673 if (list_empty(&space_info->block_groups[index]))
9674 first = true;
9675 list_add_tail(&cache->list, &space_info->block_groups[index]);
9676 up_write(&space_info->groups_sem);
9677
9678 if (first) {
9679 struct raid_kobject *rkobj;
9680 int ret;
9681
9682 rkobj = kzalloc(sizeof(*rkobj), GFP_NOFS);
9683 if (!rkobj)
9684 goto out_err;
9685 rkobj->raid_type = index;
9686 kobject_init(&rkobj->kobj, &btrfs_raid_ktype);
9687 ret = kobject_add(&rkobj->kobj, &space_info->kobj,
9688 "%s", get_raid_name(index));
9689 if (ret) {
9690 kobject_put(&rkobj->kobj);
9691 goto out_err;
9692 }
9693 space_info->block_group_kobjs[index] = &rkobj->kobj;
9694 }
9695
9696 return;
9697 out_err:
9698 pr_warn("BTRFS: failed to add kobject for block cache. ignoring.\n");
9699 }
9700
9701 static struct btrfs_block_group_cache *
9702 btrfs_create_block_group_cache(struct btrfs_root *root, u64 start, u64 size)
9703 {
9704 struct btrfs_block_group_cache *cache;
9705
9706 cache = kzalloc(sizeof(*cache), GFP_NOFS);
9707 if (!cache)
9708 return NULL;
9709
9710 cache->free_space_ctl = kzalloc(sizeof(*cache->free_space_ctl),
9711 GFP_NOFS);
9712 if (!cache->free_space_ctl) {
9713 kfree(cache);
9714 return NULL;
9715 }
9716
9717 cache->key.objectid = start;
9718 cache->key.offset = size;
9719 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9720
9721 cache->sectorsize = root->sectorsize;
9722 cache->fs_info = root->fs_info;
9723 cache->full_stripe_len = btrfs_full_stripe_len(root,
9724 &root->fs_info->mapping_tree,
9725 start);
9726 set_free_space_tree_thresholds(cache);
9727
9728 atomic_set(&cache->count, 1);
9729 spin_lock_init(&cache->lock);
9730 init_rwsem(&cache->data_rwsem);
9731 INIT_LIST_HEAD(&cache->list);
9732 INIT_LIST_HEAD(&cache->cluster_list);
9733 INIT_LIST_HEAD(&cache->bg_list);
9734 INIT_LIST_HEAD(&cache->ro_list);
9735 INIT_LIST_HEAD(&cache->dirty_list);
9736 INIT_LIST_HEAD(&cache->io_list);
9737 btrfs_init_free_space_ctl(cache);
9738 atomic_set(&cache->trimming, 0);
9739 mutex_init(&cache->free_space_lock);
9740
9741 return cache;
9742 }
9743
9744 int btrfs_read_block_groups(struct btrfs_root *root)
9745 {
9746 struct btrfs_path *path;
9747 int ret;
9748 struct btrfs_block_group_cache *cache;
9749 struct btrfs_fs_info *info = root->fs_info;
9750 struct btrfs_space_info *space_info;
9751 struct btrfs_key key;
9752 struct btrfs_key found_key;
9753 struct extent_buffer *leaf;
9754 int need_clear = 0;
9755 u64 cache_gen;
9756
9757 root = info->extent_root;
9758 key.objectid = 0;
9759 key.offset = 0;
9760 key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
9761 path = btrfs_alloc_path();
9762 if (!path)
9763 return -ENOMEM;
9764 path->reada = READA_FORWARD;
9765
9766 cache_gen = btrfs_super_cache_generation(root->fs_info->super_copy);
9767 if (btrfs_test_opt(root, SPACE_CACHE) &&
9768 btrfs_super_generation(root->fs_info->super_copy) != cache_gen)
9769 need_clear = 1;
9770 if (btrfs_test_opt(root, CLEAR_CACHE))
9771 need_clear = 1;
9772
9773 while (1) {
9774 ret = find_first_block_group(root, path, &key);
9775 if (ret > 0)
9776 break;
9777 if (ret != 0)
9778 goto error;
9779
9780 leaf = path->nodes[0];
9781 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
9782
9783 cache = btrfs_create_block_group_cache(root, found_key.objectid,
9784 found_key.offset);
9785 if (!cache) {
9786 ret = -ENOMEM;
9787 goto error;
9788 }
9789
9790 if (need_clear) {
9791 /*
9792 * When we mount with old space cache, we need to
9793 * set BTRFS_DC_CLEAR and set dirty flag.
9794 *
9795 * a) Setting 'BTRFS_DC_CLEAR' makes sure that we
9796 * truncate the old free space cache inode and
9797 * setup a new one.
9798 * b) Setting 'dirty flag' makes sure that we flush
9799 * the new space cache info onto disk.
9800 */
9801 if (btrfs_test_opt(root, SPACE_CACHE))
9802 cache->disk_cache_state = BTRFS_DC_CLEAR;
9803 }
9804
9805 read_extent_buffer(leaf, &cache->item,
9806 btrfs_item_ptr_offset(leaf, path->slots[0]),
9807 sizeof(cache->item));
9808 cache->flags = btrfs_block_group_flags(&cache->item);
9809
9810 key.objectid = found_key.objectid + found_key.offset;
9811 btrfs_release_path(path);
9812
9813 /*
9814 * We need to exclude the super stripes now so that the space
9815 * info has super bytes accounted for, otherwise we'll think
9816 * we have more space than we actually do.
9817 */
9818 ret = exclude_super_stripes(root, cache);
9819 if (ret) {
9820 /*
9821 * We may have excluded something, so call this just in
9822 * case.
9823 */
9824 free_excluded_extents(root, cache);
9825 btrfs_put_block_group(cache);
9826 goto error;
9827 }
9828
9829 /*
9830 * check for two cases, either we are full, and therefore
9831 * don't need to bother with the caching work since we won't
9832 * find any space, or we are empty, and we can just add all
9833 * the space in and be done with it. This saves us _alot_ of
9834 * time, particularly in the full case.
9835 */
9836 if (found_key.offset == btrfs_block_group_used(&cache->item)) {
9837 cache->last_byte_to_unpin = (u64)-1;
9838 cache->cached = BTRFS_CACHE_FINISHED;
9839 free_excluded_extents(root, cache);
9840 } else if (btrfs_block_group_used(&cache->item) == 0) {
9841 cache->last_byte_to_unpin = (u64)-1;
9842 cache->cached = BTRFS_CACHE_FINISHED;
9843 add_new_free_space(cache, root->fs_info,
9844 found_key.objectid,
9845 found_key.objectid +
9846 found_key.offset);
9847 free_excluded_extents(root, cache);
9848 }
9849
9850 ret = btrfs_add_block_group_cache(root->fs_info, cache);
9851 if (ret) {
9852 btrfs_remove_free_space_cache(cache);
9853 btrfs_put_block_group(cache);
9854 goto error;
9855 }
9856
9857 ret = update_space_info(info, cache->flags, found_key.offset,
9858 btrfs_block_group_used(&cache->item),
9859 &space_info);
9860 if (ret) {
9861 btrfs_remove_free_space_cache(cache);
9862 spin_lock(&info->block_group_cache_lock);
9863 rb_erase(&cache->cache_node,
9864 &info->block_group_cache_tree);
9865 RB_CLEAR_NODE(&cache->cache_node);
9866 spin_unlock(&info->block_group_cache_lock);
9867 btrfs_put_block_group(cache);
9868 goto error;
9869 }
9870
9871 cache->space_info = space_info;
9872 spin_lock(&cache->space_info->lock);
9873 cache->space_info->bytes_readonly += cache->bytes_super;
9874 spin_unlock(&cache->space_info->lock);
9875
9876 __link_block_group(space_info, cache);
9877
9878 set_avail_alloc_bits(root->fs_info, cache->flags);
9879 if (btrfs_chunk_readonly(root, cache->key.objectid)) {
9880 inc_block_group_ro(cache, 1);
9881 } else if (btrfs_block_group_used(&cache->item) == 0) {
9882 spin_lock(&info->unused_bgs_lock);
9883 /* Should always be true but just in case. */
9884 if (list_empty(&cache->bg_list)) {
9885 btrfs_get_block_group(cache);
9886 list_add_tail(&cache->bg_list,
9887 &info->unused_bgs);
9888 }
9889 spin_unlock(&info->unused_bgs_lock);
9890 }
9891 }
9892
9893 list_for_each_entry_rcu(space_info, &root->fs_info->space_info, list) {
9894 if (!(get_alloc_profile(root, space_info->flags) &
9895 (BTRFS_BLOCK_GROUP_RAID10 |
9896 BTRFS_BLOCK_GROUP_RAID1 |
9897 BTRFS_BLOCK_GROUP_RAID5 |
9898 BTRFS_BLOCK_GROUP_RAID6 |
9899 BTRFS_BLOCK_GROUP_DUP)))
9900 continue;
9901 /*
9902 * avoid allocating from un-mirrored block group if there are
9903 * mirrored block groups.
9904 */
9905 list_for_each_entry(cache,
9906 &space_info->block_groups[BTRFS_RAID_RAID0],
9907 list)
9908 inc_block_group_ro(cache, 1);
9909 list_for_each_entry(cache,
9910 &space_info->block_groups[BTRFS_RAID_SINGLE],
9911 list)
9912 inc_block_group_ro(cache, 1);
9913 }
9914
9915 init_global_block_rsv(info);
9916 ret = 0;
9917 error:
9918 btrfs_free_path(path);
9919 return ret;
9920 }
9921
9922 void btrfs_create_pending_block_groups(struct btrfs_trans_handle *trans,
9923 struct btrfs_root *root)
9924 {
9925 struct btrfs_block_group_cache *block_group, *tmp;
9926 struct btrfs_root *extent_root = root->fs_info->extent_root;
9927 struct btrfs_block_group_item item;
9928 struct btrfs_key key;
9929 int ret = 0;
9930 bool can_flush_pending_bgs = trans->can_flush_pending_bgs;
9931
9932 trans->can_flush_pending_bgs = false;
9933 list_for_each_entry_safe(block_group, tmp, &trans->new_bgs, bg_list) {
9934 if (ret)
9935 goto next;
9936
9937 spin_lock(&block_group->lock);
9938 memcpy(&item, &block_group->item, sizeof(item));
9939 memcpy(&key, &block_group->key, sizeof(key));
9940 spin_unlock(&block_group->lock);
9941
9942 ret = btrfs_insert_item(trans, extent_root, &key, &item,
9943 sizeof(item));
9944 if (ret)
9945 btrfs_abort_transaction(trans, extent_root, ret);
9946 ret = btrfs_finish_chunk_alloc(trans, extent_root,
9947 key.objectid, key.offset);
9948 if (ret)
9949 btrfs_abort_transaction(trans, extent_root, ret);
9950 add_block_group_free_space(trans, root->fs_info, block_group);
9951 /* already aborted the transaction if it failed. */
9952 next:
9953 list_del_init(&block_group->bg_list);
9954 }
9955 trans->can_flush_pending_bgs = can_flush_pending_bgs;
9956 }
9957
9958 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
9959 struct btrfs_root *root, u64 bytes_used,
9960 u64 type, u64 chunk_objectid, u64 chunk_offset,
9961 u64 size)
9962 {
9963 int ret;
9964 struct btrfs_root *extent_root;
9965 struct btrfs_block_group_cache *cache;
9966
9967 extent_root = root->fs_info->extent_root;
9968
9969 btrfs_set_log_full_commit(root->fs_info, trans);
9970
9971 cache = btrfs_create_block_group_cache(root, chunk_offset, size);
9972 if (!cache)
9973 return -ENOMEM;
9974
9975 btrfs_set_block_group_used(&cache->item, bytes_used);
9976 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
9977 btrfs_set_block_group_flags(&cache->item, type);
9978
9979 cache->flags = type;
9980 cache->last_byte_to_unpin = (u64)-1;
9981 cache->cached = BTRFS_CACHE_FINISHED;
9982 cache->needs_free_space = 1;
9983 ret = exclude_super_stripes(root, cache);
9984 if (ret) {
9985 /*
9986 * We may have excluded something, so call this just in
9987 * case.
9988 */
9989 free_excluded_extents(root, cache);
9990 btrfs_put_block_group(cache);
9991 return ret;
9992 }
9993
9994 add_new_free_space(cache, root->fs_info, chunk_offset,
9995 chunk_offset + size);
9996
9997 free_excluded_extents(root, cache);
9998
9999 #ifdef CONFIG_BTRFS_DEBUG
10000 if (btrfs_should_fragment_free_space(root, cache)) {
10001 u64 new_bytes_used = size - bytes_used;
10002
10003 bytes_used += new_bytes_used >> 1;
10004 fragment_free_space(root, cache);
10005 }
10006 #endif
10007 /*
10008 * Call to ensure the corresponding space_info object is created and
10009 * assigned to our block group, but don't update its counters just yet.
10010 * We want our bg to be added to the rbtree with its ->space_info set.
10011 */
10012 ret = update_space_info(root->fs_info, cache->flags, 0, 0,
10013 &cache->space_info);
10014 if (ret) {
10015 btrfs_remove_free_space_cache(cache);
10016 btrfs_put_block_group(cache);
10017 return ret;
10018 }
10019
10020 ret = btrfs_add_block_group_cache(root->fs_info, cache);
10021 if (ret) {
10022 btrfs_remove_free_space_cache(cache);
10023 btrfs_put_block_group(cache);
10024 return ret;
10025 }
10026
10027 /*
10028 * Now that our block group has its ->space_info set and is inserted in
10029 * the rbtree, update the space info's counters.
10030 */
10031 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
10032 &cache->space_info);
10033 if (ret) {
10034 btrfs_remove_free_space_cache(cache);
10035 spin_lock(&root->fs_info->block_group_cache_lock);
10036 rb_erase(&cache->cache_node,
10037 &root->fs_info->block_group_cache_tree);
10038 RB_CLEAR_NODE(&cache->cache_node);
10039 spin_unlock(&root->fs_info->block_group_cache_lock);
10040 btrfs_put_block_group(cache);
10041 return ret;
10042 }
10043 update_global_block_rsv(root->fs_info);
10044
10045 spin_lock(&cache->space_info->lock);
10046 cache->space_info->bytes_readonly += cache->bytes_super;
10047 spin_unlock(&cache->space_info->lock);
10048
10049 __link_block_group(cache->space_info, cache);
10050
10051 list_add_tail(&cache->bg_list, &trans->new_bgs);
10052
10053 set_avail_alloc_bits(extent_root->fs_info, type);
10054
10055 return 0;
10056 }
10057
10058 static void clear_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
10059 {
10060 u64 extra_flags = chunk_to_extended(flags) &
10061 BTRFS_EXTENDED_PROFILE_MASK;
10062
10063 write_seqlock(&fs_info->profiles_lock);
10064 if (flags & BTRFS_BLOCK_GROUP_DATA)
10065 fs_info->avail_data_alloc_bits &= ~extra_flags;
10066 if (flags & BTRFS_BLOCK_GROUP_METADATA)
10067 fs_info->avail_metadata_alloc_bits &= ~extra_flags;
10068 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
10069 fs_info->avail_system_alloc_bits &= ~extra_flags;
10070 write_sequnlock(&fs_info->profiles_lock);
10071 }
10072
10073 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
10074 struct btrfs_root *root, u64 group_start,
10075 struct extent_map *em)
10076 {
10077 struct btrfs_path *path;
10078 struct btrfs_block_group_cache *block_group;
10079 struct btrfs_free_cluster *cluster;
10080 struct btrfs_root *tree_root = root->fs_info->tree_root;
10081 struct btrfs_key key;
10082 struct inode *inode;
10083 struct kobject *kobj = NULL;
10084 int ret;
10085 int index;
10086 int factor;
10087 struct btrfs_caching_control *caching_ctl = NULL;
10088 bool remove_em;
10089
10090 root = root->fs_info->extent_root;
10091
10092 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
10093 BUG_ON(!block_group);
10094 BUG_ON(!block_group->ro);
10095
10096 /*
10097 * Free the reserved super bytes from this block group before
10098 * remove it.
10099 */
10100 free_excluded_extents(root, block_group);
10101
10102 memcpy(&key, &block_group->key, sizeof(key));
10103 index = get_block_group_index(block_group);
10104 if (block_group->flags & (BTRFS_BLOCK_GROUP_DUP |
10105 BTRFS_BLOCK_GROUP_RAID1 |
10106 BTRFS_BLOCK_GROUP_RAID10))
10107 factor = 2;
10108 else
10109 factor = 1;
10110
10111 /* make sure this block group isn't part of an allocation cluster */
10112 cluster = &root->fs_info->data_alloc_cluster;
10113 spin_lock(&cluster->refill_lock);
10114 btrfs_return_cluster_to_free_space(block_group, cluster);
10115 spin_unlock(&cluster->refill_lock);
10116
10117 /*
10118 * make sure this block group isn't part of a metadata
10119 * allocation cluster
10120 */
10121 cluster = &root->fs_info->meta_alloc_cluster;
10122 spin_lock(&cluster->refill_lock);
10123 btrfs_return_cluster_to_free_space(block_group, cluster);
10124 spin_unlock(&cluster->refill_lock);
10125
10126 path = btrfs_alloc_path();
10127 if (!path) {
10128 ret = -ENOMEM;
10129 goto out;
10130 }
10131
10132 /*
10133 * get the inode first so any iput calls done for the io_list
10134 * aren't the final iput (no unlinks allowed now)
10135 */
10136 inode = lookup_free_space_inode(tree_root, block_group, path);
10137
10138 mutex_lock(&trans->transaction->cache_write_mutex);
10139 /*
10140 * make sure our free spache cache IO is done before remove the
10141 * free space inode
10142 */
10143 spin_lock(&trans->transaction->dirty_bgs_lock);
10144 if (!list_empty(&block_group->io_list)) {
10145 list_del_init(&block_group->io_list);
10146
10147 WARN_ON(!IS_ERR(inode) && inode != block_group->io_ctl.inode);
10148
10149 spin_unlock(&trans->transaction->dirty_bgs_lock);
10150 btrfs_wait_cache_io(root, trans, block_group,
10151 &block_group->io_ctl, path,
10152 block_group->key.objectid);
10153 btrfs_put_block_group(block_group);
10154 spin_lock(&trans->transaction->dirty_bgs_lock);
10155 }
10156
10157 if (!list_empty(&block_group->dirty_list)) {
10158 list_del_init(&block_group->dirty_list);
10159 btrfs_put_block_group(block_group);
10160 }
10161 spin_unlock(&trans->transaction->dirty_bgs_lock);
10162 mutex_unlock(&trans->transaction->cache_write_mutex);
10163
10164 if (!IS_ERR(inode)) {
10165 ret = btrfs_orphan_add(trans, inode);
10166 if (ret) {
10167 btrfs_add_delayed_iput(inode);
10168 goto out;
10169 }
10170 clear_nlink(inode);
10171 /* One for the block groups ref */
10172 spin_lock(&block_group->lock);
10173 if (block_group->iref) {
10174 block_group->iref = 0;
10175 block_group->inode = NULL;
10176 spin_unlock(&block_group->lock);
10177 iput(inode);
10178 } else {
10179 spin_unlock(&block_group->lock);
10180 }
10181 /* One for our lookup ref */
10182 btrfs_add_delayed_iput(inode);
10183 }
10184
10185 key.objectid = BTRFS_FREE_SPACE_OBJECTID;
10186 key.offset = block_group->key.objectid;
10187 key.type = 0;
10188
10189 ret = btrfs_search_slot(trans, tree_root, &key, path, -1, 1);
10190 if (ret < 0)
10191 goto out;
10192 if (ret > 0)
10193 btrfs_release_path(path);
10194 if (ret == 0) {
10195 ret = btrfs_del_item(trans, tree_root, path);
10196 if (ret)
10197 goto out;
10198 btrfs_release_path(path);
10199 }
10200
10201 spin_lock(&root->fs_info->block_group_cache_lock);
10202 rb_erase(&block_group->cache_node,
10203 &root->fs_info->block_group_cache_tree);
10204 RB_CLEAR_NODE(&block_group->cache_node);
10205
10206 if (root->fs_info->first_logical_byte == block_group->key.objectid)
10207 root->fs_info->first_logical_byte = (u64)-1;
10208 spin_unlock(&root->fs_info->block_group_cache_lock);
10209
10210 down_write(&block_group->space_info->groups_sem);
10211 /*
10212 * we must use list_del_init so people can check to see if they
10213 * are still on the list after taking the semaphore
10214 */
10215 list_del_init(&block_group->list);
10216 if (list_empty(&block_group->space_info->block_groups[index])) {
10217 kobj = block_group->space_info->block_group_kobjs[index];
10218 block_group->space_info->block_group_kobjs[index] = NULL;
10219 clear_avail_alloc_bits(root->fs_info, block_group->flags);
10220 }
10221 up_write(&block_group->space_info->groups_sem);
10222 if (kobj) {
10223 kobject_del(kobj);
10224 kobject_put(kobj);
10225 }
10226
10227 if (block_group->has_caching_ctl)
10228 caching_ctl = get_caching_control(block_group);
10229 if (block_group->cached == BTRFS_CACHE_STARTED)
10230 wait_block_group_cache_done(block_group);
10231 if (block_group->has_caching_ctl) {
10232 down_write(&root->fs_info->commit_root_sem);
10233 if (!caching_ctl) {
10234 struct btrfs_caching_control *ctl;
10235
10236 list_for_each_entry(ctl,
10237 &root->fs_info->caching_block_groups, list)
10238 if (ctl->block_group == block_group) {
10239 caching_ctl = ctl;
10240 atomic_inc(&caching_ctl->count);
10241 break;
10242 }
10243 }
10244 if (caching_ctl)
10245 list_del_init(&caching_ctl->list);
10246 up_write(&root->fs_info->commit_root_sem);
10247 if (caching_ctl) {
10248 /* Once for the caching bgs list and once for us. */
10249 put_caching_control(caching_ctl);
10250 put_caching_control(caching_ctl);
10251 }
10252 }
10253
10254 spin_lock(&trans->transaction->dirty_bgs_lock);
10255 if (!list_empty(&block_group->dirty_list)) {
10256 WARN_ON(1);
10257 }
10258 if (!list_empty(&block_group->io_list)) {
10259 WARN_ON(1);
10260 }
10261 spin_unlock(&trans->transaction->dirty_bgs_lock);
10262 btrfs_remove_free_space_cache(block_group);
10263
10264 spin_lock(&block_group->space_info->lock);
10265 list_del_init(&block_group->ro_list);
10266
10267 if (btrfs_test_opt(root, ENOSPC_DEBUG)) {
10268 WARN_ON(block_group->space_info->total_bytes
10269 < block_group->key.offset);
10270 WARN_ON(block_group->space_info->bytes_readonly
10271 < block_group->key.offset);
10272 WARN_ON(block_group->space_info->disk_total
10273 < block_group->key.offset * factor);
10274 }
10275 block_group->space_info->total_bytes -= block_group->key.offset;
10276 block_group->space_info->bytes_readonly -= block_group->key.offset;
10277 block_group->space_info->disk_total -= block_group->key.offset * factor;
10278
10279 spin_unlock(&block_group->space_info->lock);
10280
10281 memcpy(&key, &block_group->key, sizeof(key));
10282
10283 lock_chunks(root);
10284 if (!list_empty(&em->list)) {
10285 /* We're in the transaction->pending_chunks list. */
10286 free_extent_map(em);
10287 }
10288 spin_lock(&block_group->lock);
10289 block_group->removed = 1;
10290 /*
10291 * At this point trimming can't start on this block group, because we
10292 * removed the block group from the tree fs_info->block_group_cache_tree
10293 * so no one can't find it anymore and even if someone already got this
10294 * block group before we removed it from the rbtree, they have already
10295 * incremented block_group->trimming - if they didn't, they won't find
10296 * any free space entries because we already removed them all when we
10297 * called btrfs_remove_free_space_cache().
10298 *
10299 * And we must not remove the extent map from the fs_info->mapping_tree
10300 * to prevent the same logical address range and physical device space
10301 * ranges from being reused for a new block group. This is because our
10302 * fs trim operation (btrfs_trim_fs() / btrfs_ioctl_fitrim()) is
10303 * completely transactionless, so while it is trimming a range the
10304 * currently running transaction might finish and a new one start,
10305 * allowing for new block groups to be created that can reuse the same
10306 * physical device locations unless we take this special care.
10307 *
10308 * There may also be an implicit trim operation if the file system
10309 * is mounted with -odiscard. The same protections must remain
10310 * in place until the extents have been discarded completely when
10311 * the transaction commit has completed.
10312 */
10313 remove_em = (atomic_read(&block_group->trimming) == 0);
10314 /*
10315 * Make sure a trimmer task always sees the em in the pinned_chunks list
10316 * if it sees block_group->removed == 1 (needs to lock block_group->lock
10317 * before checking block_group->removed).
10318 */
10319 if (!remove_em) {
10320 /*
10321 * Our em might be in trans->transaction->pending_chunks which
10322 * is protected by fs_info->chunk_mutex ([lock|unlock]_chunks),
10323 * and so is the fs_info->pinned_chunks list.
10324 *
10325 * So at this point we must be holding the chunk_mutex to avoid
10326 * any races with chunk allocation (more specifically at
10327 * volumes.c:contains_pending_extent()), to ensure it always
10328 * sees the em, either in the pending_chunks list or in the
10329 * pinned_chunks list.
10330 */
10331 list_move_tail(&em->list, &root->fs_info->pinned_chunks);
10332 }
10333 spin_unlock(&block_group->lock);
10334
10335 if (remove_em) {
10336 struct extent_map_tree *em_tree;
10337
10338 em_tree = &root->fs_info->mapping_tree.map_tree;
10339 write_lock(&em_tree->lock);
10340 /*
10341 * The em might be in the pending_chunks list, so make sure the
10342 * chunk mutex is locked, since remove_extent_mapping() will
10343 * delete us from that list.
10344 */
10345 remove_extent_mapping(em_tree, em);
10346 write_unlock(&em_tree->lock);
10347 /* once for the tree */
10348 free_extent_map(em);
10349 }
10350
10351 unlock_chunks(root);
10352
10353 ret = remove_block_group_free_space(trans, root->fs_info, block_group);
10354 if (ret)
10355 goto out;
10356
10357 btrfs_put_block_group(block_group);
10358 btrfs_put_block_group(block_group);
10359
10360 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
10361 if (ret > 0)
10362 ret = -EIO;
10363 if (ret < 0)
10364 goto out;
10365
10366 ret = btrfs_del_item(trans, root, path);
10367 out:
10368 btrfs_free_path(path);
10369 return ret;
10370 }
10371
10372 struct btrfs_trans_handle *
10373 btrfs_start_trans_remove_block_group(struct btrfs_fs_info *fs_info,
10374 const u64 chunk_offset)
10375 {
10376 struct extent_map_tree *em_tree = &fs_info->mapping_tree.map_tree;
10377 struct extent_map *em;
10378 struct map_lookup *map;
10379 unsigned int num_items;
10380
10381 read_lock(&em_tree->lock);
10382 em = lookup_extent_mapping(em_tree, chunk_offset, 1);
10383 read_unlock(&em_tree->lock);
10384 ASSERT(em && em->start == chunk_offset);
10385
10386 /*
10387 * We need to reserve 3 + N units from the metadata space info in order
10388 * to remove a block group (done at btrfs_remove_chunk() and at
10389 * btrfs_remove_block_group()), which are used for:
10390 *
10391 * 1 unit for adding the free space inode's orphan (located in the tree
10392 * of tree roots).
10393 * 1 unit for deleting the block group item (located in the extent
10394 * tree).
10395 * 1 unit for deleting the free space item (located in tree of tree
10396 * roots).
10397 * N units for deleting N device extent items corresponding to each
10398 * stripe (located in the device tree).
10399 *
10400 * In order to remove a block group we also need to reserve units in the
10401 * system space info in order to update the chunk tree (update one or
10402 * more device items and remove one chunk item), but this is done at
10403 * btrfs_remove_chunk() through a call to check_system_chunk().
10404 */
10405 map = em->map_lookup;
10406 num_items = 3 + map->num_stripes;
10407 free_extent_map(em);
10408
10409 return btrfs_start_transaction_fallback_global_rsv(fs_info->extent_root,
10410 num_items, 1);
10411 }
10412
10413 /*
10414 * Process the unused_bgs list and remove any that don't have any allocated
10415 * space inside of them.
10416 */
10417 void btrfs_delete_unused_bgs(struct btrfs_fs_info *fs_info)
10418 {
10419 struct btrfs_block_group_cache *block_group;
10420 struct btrfs_space_info *space_info;
10421 struct btrfs_root *root = fs_info->extent_root;
10422 struct btrfs_trans_handle *trans;
10423 int ret = 0;
10424
10425 if (!fs_info->open)
10426 return;
10427
10428 spin_lock(&fs_info->unused_bgs_lock);
10429 while (!list_empty(&fs_info->unused_bgs)) {
10430 u64 start, end;
10431 int trimming;
10432
10433 block_group = list_first_entry(&fs_info->unused_bgs,
10434 struct btrfs_block_group_cache,
10435 bg_list);
10436 list_del_init(&block_group->bg_list);
10437
10438 space_info = block_group->space_info;
10439
10440 if (ret || btrfs_mixed_space_info(space_info)) {
10441 btrfs_put_block_group(block_group);
10442 continue;
10443 }
10444 spin_unlock(&fs_info->unused_bgs_lock);
10445
10446 mutex_lock(&fs_info->delete_unused_bgs_mutex);
10447
10448 /* Don't want to race with allocators so take the groups_sem */
10449 down_write(&space_info->groups_sem);
10450 spin_lock(&block_group->lock);
10451 if (block_group->reserved ||
10452 btrfs_block_group_used(&block_group->item) ||
10453 block_group->ro ||
10454 list_is_singular(&block_group->list)) {
10455 /*
10456 * We want to bail if we made new allocations or have
10457 * outstanding allocations in this block group. We do
10458 * the ro check in case balance is currently acting on
10459 * this block group.
10460 */
10461 spin_unlock(&block_group->lock);
10462 up_write(&space_info->groups_sem);
10463 goto next;
10464 }
10465 spin_unlock(&block_group->lock);
10466
10467 /* We don't want to force the issue, only flip if it's ok. */
10468 ret = inc_block_group_ro(block_group, 0);
10469 up_write(&space_info->groups_sem);
10470 if (ret < 0) {
10471 ret = 0;
10472 goto next;
10473 }
10474
10475 /*
10476 * Want to do this before we do anything else so we can recover
10477 * properly if we fail to join the transaction.
10478 */
10479 trans = btrfs_start_trans_remove_block_group(fs_info,
10480 block_group->key.objectid);
10481 if (IS_ERR(trans)) {
10482 btrfs_dec_block_group_ro(root, block_group);
10483 ret = PTR_ERR(trans);
10484 goto next;
10485 }
10486
10487 /*
10488 * We could have pending pinned extents for this block group,
10489 * just delete them, we don't care about them anymore.
10490 */
10491 start = block_group->key.objectid;
10492 end = start + block_group->key.offset - 1;
10493 /*
10494 * Hold the unused_bg_unpin_mutex lock to avoid racing with
10495 * btrfs_finish_extent_commit(). If we are at transaction N,
10496 * another task might be running finish_extent_commit() for the
10497 * previous transaction N - 1, and have seen a range belonging
10498 * to the block group in freed_extents[] before we were able to
10499 * clear the whole block group range from freed_extents[]. This
10500 * means that task can lookup for the block group after we
10501 * unpinned it from freed_extents[] and removed it, leading to
10502 * a BUG_ON() at btrfs_unpin_extent_range().
10503 */
10504 mutex_lock(&fs_info->unused_bg_unpin_mutex);
10505 ret = clear_extent_bits(&fs_info->freed_extents[0], start, end,
10506 EXTENT_DIRTY, GFP_NOFS);
10507 if (ret) {
10508 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10509 btrfs_dec_block_group_ro(root, block_group);
10510 goto end_trans;
10511 }
10512 ret = clear_extent_bits(&fs_info->freed_extents[1], start, end,
10513 EXTENT_DIRTY, GFP_NOFS);
10514 if (ret) {
10515 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10516 btrfs_dec_block_group_ro(root, block_group);
10517 goto end_trans;
10518 }
10519 mutex_unlock(&fs_info->unused_bg_unpin_mutex);
10520
10521 /* Reset pinned so btrfs_put_block_group doesn't complain */
10522 spin_lock(&space_info->lock);
10523 spin_lock(&block_group->lock);
10524
10525 space_info->bytes_pinned -= block_group->pinned;
10526 space_info->bytes_readonly += block_group->pinned;
10527 percpu_counter_add(&space_info->total_bytes_pinned,
10528 -block_group->pinned);
10529 block_group->pinned = 0;
10530
10531 spin_unlock(&block_group->lock);
10532 spin_unlock(&space_info->lock);
10533
10534 /* DISCARD can flip during remount */
10535 trimming = btrfs_test_opt(root, DISCARD);
10536
10537 /* Implicit trim during transaction commit. */
10538 if (trimming)
10539 btrfs_get_block_group_trimming(block_group);
10540
10541 /*
10542 * Btrfs_remove_chunk will abort the transaction if things go
10543 * horribly wrong.
10544 */
10545 ret = btrfs_remove_chunk(trans, root,
10546 block_group->key.objectid);
10547
10548 if (ret) {
10549 if (trimming)
10550 btrfs_put_block_group_trimming(block_group);
10551 goto end_trans;
10552 }
10553
10554 /*
10555 * If we're not mounted with -odiscard, we can just forget
10556 * about this block group. Otherwise we'll need to wait
10557 * until transaction commit to do the actual discard.
10558 */
10559 if (trimming) {
10560 spin_lock(&fs_info->unused_bgs_lock);
10561 /*
10562 * A concurrent scrub might have added us to the list
10563 * fs_info->unused_bgs, so use a list_move operation
10564 * to add the block group to the deleted_bgs list.
10565 */
10566 list_move(&block_group->bg_list,
10567 &trans->transaction->deleted_bgs);
10568 spin_unlock(&fs_info->unused_bgs_lock);
10569 btrfs_get_block_group(block_group);
10570 }
10571 end_trans:
10572 btrfs_end_transaction(trans, root);
10573 next:
10574 mutex_unlock(&fs_info->delete_unused_bgs_mutex);
10575 btrfs_put_block_group(block_group);
10576 spin_lock(&fs_info->unused_bgs_lock);
10577 }
10578 spin_unlock(&fs_info->unused_bgs_lock);
10579 }
10580
10581 int btrfs_init_space_info(struct btrfs_fs_info *fs_info)
10582 {
10583 struct btrfs_space_info *space_info;
10584 struct btrfs_super_block *disk_super;
10585 u64 features;
10586 u64 flags;
10587 int mixed = 0;
10588 int ret;
10589
10590 disk_super = fs_info->super_copy;
10591 if (!btrfs_super_root(disk_super))
10592 return -EINVAL;
10593
10594 features = btrfs_super_incompat_flags(disk_super);
10595 if (features & BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS)
10596 mixed = 1;
10597
10598 flags = BTRFS_BLOCK_GROUP_SYSTEM;
10599 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10600 if (ret)
10601 goto out;
10602
10603 if (mixed) {
10604 flags = BTRFS_BLOCK_GROUP_METADATA | BTRFS_BLOCK_GROUP_DATA;
10605 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10606 } else {
10607 flags = BTRFS_BLOCK_GROUP_METADATA;
10608 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10609 if (ret)
10610 goto out;
10611
10612 flags = BTRFS_BLOCK_GROUP_DATA;
10613 ret = update_space_info(fs_info, flags, 0, 0, &space_info);
10614 }
10615 out:
10616 return ret;
10617 }
10618
10619 int btrfs_error_unpin_extent_range(struct btrfs_root *root, u64 start, u64 end)
10620 {
10621 return unpin_extent_range(root, start, end, false);
10622 }
10623
10624 /*
10625 * It used to be that old block groups would be left around forever.
10626 * Iterating over them would be enough to trim unused space. Since we
10627 * now automatically remove them, we also need to iterate over unallocated
10628 * space.
10629 *
10630 * We don't want a transaction for this since the discard may take a
10631 * substantial amount of time. We don't require that a transaction be
10632 * running, but we do need to take a running transaction into account
10633 * to ensure that we're not discarding chunks that were released in
10634 * the current transaction.
10635 *
10636 * Holding the chunks lock will prevent other threads from allocating
10637 * or releasing chunks, but it won't prevent a running transaction
10638 * from committing and releasing the memory that the pending chunks
10639 * list head uses. For that, we need to take a reference to the
10640 * transaction.
10641 */
10642 static int btrfs_trim_free_extents(struct btrfs_device *device,
10643 u64 minlen, u64 *trimmed)
10644 {
10645 u64 start = 0, len = 0;
10646 int ret;
10647
10648 *trimmed = 0;
10649
10650 /* Not writeable = nothing to do. */
10651 if (!device->writeable)
10652 return 0;
10653
10654 /* No free space = nothing to do. */
10655 if (device->total_bytes <= device->bytes_used)
10656 return 0;
10657
10658 ret = 0;
10659
10660 while (1) {
10661 struct btrfs_fs_info *fs_info = device->dev_root->fs_info;
10662 struct btrfs_transaction *trans;
10663 u64 bytes;
10664
10665 ret = mutex_lock_interruptible(&fs_info->chunk_mutex);
10666 if (ret)
10667 return ret;
10668
10669 down_read(&fs_info->commit_root_sem);
10670
10671 spin_lock(&fs_info->trans_lock);
10672 trans = fs_info->running_transaction;
10673 if (trans)
10674 atomic_inc(&trans->use_count);
10675 spin_unlock(&fs_info->trans_lock);
10676
10677 ret = find_free_dev_extent_start(trans, device, minlen, start,
10678 &start, &len);
10679 if (trans)
10680 btrfs_put_transaction(trans);
10681
10682 if (ret) {
10683 up_read(&fs_info->commit_root_sem);
10684 mutex_unlock(&fs_info->chunk_mutex);
10685 if (ret == -ENOSPC)
10686 ret = 0;
10687 break;
10688 }
10689
10690 ret = btrfs_issue_discard(device->bdev, start, len, &bytes);
10691 up_read(&fs_info->commit_root_sem);
10692 mutex_unlock(&fs_info->chunk_mutex);
10693
10694 if (ret)
10695 break;
10696
10697 start += len;
10698 *trimmed += bytes;
10699
10700 if (fatal_signal_pending(current)) {
10701 ret = -ERESTARTSYS;
10702 break;
10703 }
10704
10705 cond_resched();
10706 }
10707
10708 return ret;
10709 }
10710
10711 int btrfs_trim_fs(struct btrfs_root *root, struct fstrim_range *range)
10712 {
10713 struct btrfs_fs_info *fs_info = root->fs_info;
10714 struct btrfs_block_group_cache *cache = NULL;
10715 struct btrfs_device *device;
10716 struct list_head *devices;
10717 u64 group_trimmed;
10718 u64 start;
10719 u64 end;
10720 u64 trimmed = 0;
10721 u64 total_bytes = btrfs_super_total_bytes(fs_info->super_copy);
10722 int ret = 0;
10723
10724 /*
10725 * try to trim all FS space, our block group may start from non-zero.
10726 */
10727 if (range->len == total_bytes)
10728 cache = btrfs_lookup_first_block_group(fs_info, range->start);
10729 else
10730 cache = btrfs_lookup_block_group(fs_info, range->start);
10731
10732 while (cache) {
10733 if (cache->key.objectid >= (range->start + range->len)) {
10734 btrfs_put_block_group(cache);
10735 break;
10736 }
10737
10738 start = max(range->start, cache->key.objectid);
10739 end = min(range->start + range->len,
10740 cache->key.objectid + cache->key.offset);
10741
10742 if (end - start >= range->minlen) {
10743 if (!block_group_cache_done(cache)) {
10744 ret = cache_block_group(cache, 0);
10745 if (ret) {
10746 btrfs_put_block_group(cache);
10747 break;
10748 }
10749 ret = wait_block_group_cache_done(cache);
10750 if (ret) {
10751 btrfs_put_block_group(cache);
10752 break;
10753 }
10754 }
10755 ret = btrfs_trim_block_group(cache,
10756 &group_trimmed,
10757 start,
10758 end,
10759 range->minlen);
10760
10761 trimmed += group_trimmed;
10762 if (ret) {
10763 btrfs_put_block_group(cache);
10764 break;
10765 }
10766 }
10767
10768 cache = next_block_group(fs_info->tree_root, cache);
10769 }
10770
10771 mutex_lock(&root->fs_info->fs_devices->device_list_mutex);
10772 devices = &root->fs_info->fs_devices->alloc_list;
10773 list_for_each_entry(device, devices, dev_alloc_list) {
10774 ret = btrfs_trim_free_extents(device, range->minlen,
10775 &group_trimmed);
10776 if (ret)
10777 break;
10778
10779 trimmed += group_trimmed;
10780 }
10781 mutex_unlock(&root->fs_info->fs_devices->device_list_mutex);
10782
10783 range->len = trimmed;
10784 return ret;
10785 }
10786
10787 /*
10788 * btrfs_{start,end}_write_no_snapshoting() are similar to
10789 * mnt_{want,drop}_write(), they are used to prevent some tasks from writing
10790 * data into the page cache through nocow before the subvolume is snapshoted,
10791 * but flush the data into disk after the snapshot creation, or to prevent
10792 * operations while snapshoting is ongoing and that cause the snapshot to be
10793 * inconsistent (writes followed by expanding truncates for example).
10794 */
10795 void btrfs_end_write_no_snapshoting(struct btrfs_root *root)
10796 {
10797 percpu_counter_dec(&root->subv_writers->counter);
10798 /*
10799 * Make sure counter is updated before we wake up waiters.
10800 */
10801 smp_mb();
10802 if (waitqueue_active(&root->subv_writers->wait))
10803 wake_up(&root->subv_writers->wait);
10804 }
10805
10806 int btrfs_start_write_no_snapshoting(struct btrfs_root *root)
10807 {
10808 if (atomic_read(&root->will_be_snapshoted))
10809 return 0;
10810
10811 percpu_counter_inc(&root->subv_writers->counter);
10812 /*
10813 * Make sure counter is updated before we check for snapshot creation.
10814 */
10815 smp_mb();
10816 if (atomic_read(&root->will_be_snapshoted)) {
10817 btrfs_end_write_no_snapshoting(root);
10818 return 0;
10819 }
10820 return 1;
10821 }
10822
10823 static int wait_snapshoting_atomic_t(atomic_t *a)
10824 {
10825 schedule();
10826 return 0;
10827 }
10828
10829 void btrfs_wait_for_snapshot_creation(struct btrfs_root *root)
10830 {
10831 while (true) {
10832 int ret;
10833
10834 ret = btrfs_start_write_no_snapshoting(root);
10835 if (ret)
10836 break;
10837 wait_on_atomic_t(&root->will_be_snapshoted,
10838 wait_snapshoting_atomic_t,
10839 TASK_UNINTERRUPTIBLE);
10840 }
10841 }
This page took 0.250368 seconds and 5 git commands to generate.