Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/linville/wirel...
[deliverable/linux.git] / fs / btrfs / extent-tree.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18 #include <linux/sched.h>
19 #include <linux/pagemap.h>
20 #include <linux/writeback.h>
21 #include <linux/blkdev.h>
22 #include <linux/sort.h>
23 #include "compat.h"
24 #include "hash.h"
25 #include "crc32c.h"
26 #include "ctree.h"
27 #include "disk-io.h"
28 #include "print-tree.h"
29 #include "transaction.h"
30 #include "volumes.h"
31 #include "locking.h"
32 #include "ref-cache.h"
33
34 #define PENDING_EXTENT_INSERT 0
35 #define PENDING_EXTENT_DELETE 1
36 #define PENDING_BACKREF_UPDATE 2
37
38 struct pending_extent_op {
39 int type;
40 u64 bytenr;
41 u64 num_bytes;
42 u64 parent;
43 u64 orig_parent;
44 u64 generation;
45 u64 orig_generation;
46 int level;
47 struct list_head list;
48 int del;
49 };
50
51 static int finish_current_insert(struct btrfs_trans_handle *trans,
52 struct btrfs_root *extent_root, int all);
53 static int del_pending_extents(struct btrfs_trans_handle *trans,
54 struct btrfs_root *extent_root, int all);
55 static int pin_down_bytes(struct btrfs_trans_handle *trans,
56 struct btrfs_root *root,
57 u64 bytenr, u64 num_bytes, int is_data);
58 static int update_block_group(struct btrfs_trans_handle *trans,
59 struct btrfs_root *root,
60 u64 bytenr, u64 num_bytes, int alloc,
61 int mark_free);
62
63 static int block_group_bits(struct btrfs_block_group_cache *cache, u64 bits)
64 {
65 return (cache->flags & bits) == bits;
66 }
67
68 /*
69 * this adds the block group to the fs_info rb tree for the block group
70 * cache
71 */
72 static int btrfs_add_block_group_cache(struct btrfs_fs_info *info,
73 struct btrfs_block_group_cache *block_group)
74 {
75 struct rb_node **p;
76 struct rb_node *parent = NULL;
77 struct btrfs_block_group_cache *cache;
78
79 spin_lock(&info->block_group_cache_lock);
80 p = &info->block_group_cache_tree.rb_node;
81
82 while (*p) {
83 parent = *p;
84 cache = rb_entry(parent, struct btrfs_block_group_cache,
85 cache_node);
86 if (block_group->key.objectid < cache->key.objectid) {
87 p = &(*p)->rb_left;
88 } else if (block_group->key.objectid > cache->key.objectid) {
89 p = &(*p)->rb_right;
90 } else {
91 spin_unlock(&info->block_group_cache_lock);
92 return -EEXIST;
93 }
94 }
95
96 rb_link_node(&block_group->cache_node, parent, p);
97 rb_insert_color(&block_group->cache_node,
98 &info->block_group_cache_tree);
99 spin_unlock(&info->block_group_cache_lock);
100
101 return 0;
102 }
103
104 /*
105 * This will return the block group at or after bytenr if contains is 0, else
106 * it will return the block group that contains the bytenr
107 */
108 static struct btrfs_block_group_cache *
109 block_group_cache_tree_search(struct btrfs_fs_info *info, u64 bytenr,
110 int contains)
111 {
112 struct btrfs_block_group_cache *cache, *ret = NULL;
113 struct rb_node *n;
114 u64 end, start;
115
116 spin_lock(&info->block_group_cache_lock);
117 n = info->block_group_cache_tree.rb_node;
118
119 while (n) {
120 cache = rb_entry(n, struct btrfs_block_group_cache,
121 cache_node);
122 end = cache->key.objectid + cache->key.offset - 1;
123 start = cache->key.objectid;
124
125 if (bytenr < start) {
126 if (!contains && (!ret || start < ret->key.objectid))
127 ret = cache;
128 n = n->rb_left;
129 } else if (bytenr > start) {
130 if (contains && bytenr <= end) {
131 ret = cache;
132 break;
133 }
134 n = n->rb_right;
135 } else {
136 ret = cache;
137 break;
138 }
139 }
140 if (ret)
141 atomic_inc(&ret->count);
142 spin_unlock(&info->block_group_cache_lock);
143
144 return ret;
145 }
146
147 /*
148 * this is only called by cache_block_group, since we could have freed extents
149 * we need to check the pinned_extents for any extents that can't be used yet
150 * since their free space will be released as soon as the transaction commits.
151 */
152 static int add_new_free_space(struct btrfs_block_group_cache *block_group,
153 struct btrfs_fs_info *info, u64 start, u64 end)
154 {
155 u64 extent_start, extent_end, size;
156 int ret;
157
158 mutex_lock(&info->pinned_mutex);
159 while (start < end) {
160 ret = find_first_extent_bit(&info->pinned_extents, start,
161 &extent_start, &extent_end,
162 EXTENT_DIRTY);
163 if (ret)
164 break;
165
166 if (extent_start == start) {
167 start = extent_end + 1;
168 } else if (extent_start > start && extent_start < end) {
169 size = extent_start - start;
170 ret = btrfs_add_free_space(block_group, start,
171 size);
172 BUG_ON(ret);
173 start = extent_end + 1;
174 } else {
175 break;
176 }
177 }
178
179 if (start < end) {
180 size = end - start;
181 ret = btrfs_add_free_space(block_group, start, size);
182 BUG_ON(ret);
183 }
184 mutex_unlock(&info->pinned_mutex);
185
186 return 0;
187 }
188
189 static int remove_sb_from_cache(struct btrfs_root *root,
190 struct btrfs_block_group_cache *cache)
191 {
192 u64 bytenr;
193 u64 *logical;
194 int stripe_len;
195 int i, nr, ret;
196
197 for (i = 0; i < BTRFS_SUPER_MIRROR_MAX; i++) {
198 bytenr = btrfs_sb_offset(i);
199 ret = btrfs_rmap_block(&root->fs_info->mapping_tree,
200 cache->key.objectid, bytenr, 0,
201 &logical, &nr, &stripe_len);
202 BUG_ON(ret);
203 while (nr--) {
204 btrfs_remove_free_space(cache, logical[nr],
205 stripe_len);
206 }
207 kfree(logical);
208 }
209 return 0;
210 }
211
212 static int cache_block_group(struct btrfs_root *root,
213 struct btrfs_block_group_cache *block_group)
214 {
215 struct btrfs_path *path;
216 int ret = 0;
217 struct btrfs_key key;
218 struct extent_buffer *leaf;
219 int slot;
220 u64 last;
221
222 if (!block_group)
223 return 0;
224
225 root = root->fs_info->extent_root;
226
227 if (block_group->cached)
228 return 0;
229
230 path = btrfs_alloc_path();
231 if (!path)
232 return -ENOMEM;
233
234 path->reada = 2;
235 /*
236 * we get into deadlocks with paths held by callers of this function.
237 * since the alloc_mutex is protecting things right now, just
238 * skip the locking here
239 */
240 path->skip_locking = 1;
241 last = max_t(u64, block_group->key.objectid, BTRFS_SUPER_INFO_OFFSET);
242 key.objectid = last;
243 key.offset = 0;
244 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
245 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
246 if (ret < 0)
247 goto err;
248
249 while (1) {
250 leaf = path->nodes[0];
251 slot = path->slots[0];
252 if (slot >= btrfs_header_nritems(leaf)) {
253 ret = btrfs_next_leaf(root, path);
254 if (ret < 0)
255 goto err;
256 if (ret == 0)
257 continue;
258 else
259 break;
260 }
261 btrfs_item_key_to_cpu(leaf, &key, slot);
262 if (key.objectid < block_group->key.objectid)
263 goto next;
264
265 if (key.objectid >= block_group->key.objectid +
266 block_group->key.offset)
267 break;
268
269 if (btrfs_key_type(&key) == BTRFS_EXTENT_ITEM_KEY) {
270 add_new_free_space(block_group, root->fs_info, last,
271 key.objectid);
272
273 last = key.objectid + key.offset;
274 }
275 next:
276 path->slots[0]++;
277 }
278
279 add_new_free_space(block_group, root->fs_info, last,
280 block_group->key.objectid +
281 block_group->key.offset);
282
283 remove_sb_from_cache(root, block_group);
284 block_group->cached = 1;
285 ret = 0;
286 err:
287 btrfs_free_path(path);
288 return ret;
289 }
290
291 /*
292 * return the block group that starts at or after bytenr
293 */
294 static struct btrfs_block_group_cache *
295 btrfs_lookup_first_block_group(struct btrfs_fs_info *info, u64 bytenr)
296 {
297 struct btrfs_block_group_cache *cache;
298
299 cache = block_group_cache_tree_search(info, bytenr, 0);
300
301 return cache;
302 }
303
304 /*
305 * return the block group that contains teh given bytenr
306 */
307 struct btrfs_block_group_cache *btrfs_lookup_block_group(
308 struct btrfs_fs_info *info,
309 u64 bytenr)
310 {
311 struct btrfs_block_group_cache *cache;
312
313 cache = block_group_cache_tree_search(info, bytenr, 1);
314
315 return cache;
316 }
317
318 static inline void put_block_group(struct btrfs_block_group_cache *cache)
319 {
320 if (atomic_dec_and_test(&cache->count))
321 kfree(cache);
322 }
323
324 static struct btrfs_space_info *__find_space_info(struct btrfs_fs_info *info,
325 u64 flags)
326 {
327 struct list_head *head = &info->space_info;
328 struct btrfs_space_info *found;
329 list_for_each_entry(found, head, list) {
330 if (found->flags == flags)
331 return found;
332 }
333 return NULL;
334 }
335
336 static u64 div_factor(u64 num, int factor)
337 {
338 if (factor == 10)
339 return num;
340 num *= factor;
341 do_div(num, 10);
342 return num;
343 }
344
345 u64 btrfs_find_block_group(struct btrfs_root *root,
346 u64 search_start, u64 search_hint, int owner)
347 {
348 struct btrfs_block_group_cache *cache;
349 u64 used;
350 u64 last = max(search_hint, search_start);
351 u64 group_start = 0;
352 int full_search = 0;
353 int factor = 9;
354 int wrapped = 0;
355 again:
356 while (1) {
357 cache = btrfs_lookup_first_block_group(root->fs_info, last);
358 if (!cache)
359 break;
360
361 spin_lock(&cache->lock);
362 last = cache->key.objectid + cache->key.offset;
363 used = btrfs_block_group_used(&cache->item);
364
365 if ((full_search || !cache->ro) &&
366 block_group_bits(cache, BTRFS_BLOCK_GROUP_METADATA)) {
367 if (used + cache->pinned + cache->reserved <
368 div_factor(cache->key.offset, factor)) {
369 group_start = cache->key.objectid;
370 spin_unlock(&cache->lock);
371 put_block_group(cache);
372 goto found;
373 }
374 }
375 spin_unlock(&cache->lock);
376 put_block_group(cache);
377 cond_resched();
378 }
379 if (!wrapped) {
380 last = search_start;
381 wrapped = 1;
382 goto again;
383 }
384 if (!full_search && factor < 10) {
385 last = search_start;
386 full_search = 1;
387 factor = 10;
388 goto again;
389 }
390 found:
391 return group_start;
392 }
393
394 /* simple helper to search for an existing extent at a given offset */
395 int btrfs_lookup_extent(struct btrfs_root *root, u64 start, u64 len)
396 {
397 int ret;
398 struct btrfs_key key;
399 struct btrfs_path *path;
400
401 path = btrfs_alloc_path();
402 BUG_ON(!path);
403 key.objectid = start;
404 key.offset = len;
405 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
406 ret = btrfs_search_slot(NULL, root->fs_info->extent_root, &key, path,
407 0, 0);
408 btrfs_free_path(path);
409 return ret;
410 }
411
412 /*
413 * Back reference rules. Back refs have three main goals:
414 *
415 * 1) differentiate between all holders of references to an extent so that
416 * when a reference is dropped we can make sure it was a valid reference
417 * before freeing the extent.
418 *
419 * 2) Provide enough information to quickly find the holders of an extent
420 * if we notice a given block is corrupted or bad.
421 *
422 * 3) Make it easy to migrate blocks for FS shrinking or storage pool
423 * maintenance. This is actually the same as #2, but with a slightly
424 * different use case.
425 *
426 * File extents can be referenced by:
427 *
428 * - multiple snapshots, subvolumes, or different generations in one subvol
429 * - different files inside a single subvolume
430 * - different offsets inside a file (bookend extents in file.c)
431 *
432 * The extent ref structure has fields for:
433 *
434 * - Objectid of the subvolume root
435 * - Generation number of the tree holding the reference
436 * - objectid of the file holding the reference
437 * - number of references holding by parent node (alway 1 for tree blocks)
438 *
439 * Btree leaf may hold multiple references to a file extent. In most cases,
440 * these references are from same file and the corresponding offsets inside
441 * the file are close together.
442 *
443 * When a file extent is allocated the fields are filled in:
444 * (root_key.objectid, trans->transid, inode objectid, 1)
445 *
446 * When a leaf is cow'd new references are added for every file extent found
447 * in the leaf. It looks similar to the create case, but trans->transid will
448 * be different when the block is cow'd.
449 *
450 * (root_key.objectid, trans->transid, inode objectid,
451 * number of references in the leaf)
452 *
453 * When a file extent is removed either during snapshot deletion or
454 * file truncation, we find the corresponding back reference and check
455 * the following fields:
456 *
457 * (btrfs_header_owner(leaf), btrfs_header_generation(leaf),
458 * inode objectid)
459 *
460 * Btree extents can be referenced by:
461 *
462 * - Different subvolumes
463 * - Different generations of the same subvolume
464 *
465 * When a tree block is created, back references are inserted:
466 *
467 * (root->root_key.objectid, trans->transid, level, 1)
468 *
469 * When a tree block is cow'd, new back references are added for all the
470 * blocks it points to. If the tree block isn't in reference counted root,
471 * the old back references are removed. These new back references are of
472 * the form (trans->transid will have increased since creation):
473 *
474 * (root->root_key.objectid, trans->transid, level, 1)
475 *
476 * When a backref is in deleting, the following fields are checked:
477 *
478 * if backref was for a tree root:
479 * (btrfs_header_owner(itself), btrfs_header_generation(itself), level)
480 * else
481 * (btrfs_header_owner(parent), btrfs_header_generation(parent), level)
482 *
483 * Back Reference Key composing:
484 *
485 * The key objectid corresponds to the first byte in the extent, the key
486 * type is set to BTRFS_EXTENT_REF_KEY, and the key offset is the first
487 * byte of parent extent. If a extent is tree root, the key offset is set
488 * to the key objectid.
489 */
490
491 static noinline int lookup_extent_backref(struct btrfs_trans_handle *trans,
492 struct btrfs_root *root,
493 struct btrfs_path *path,
494 u64 bytenr, u64 parent,
495 u64 ref_root, u64 ref_generation,
496 u64 owner_objectid, int del)
497 {
498 struct btrfs_key key;
499 struct btrfs_extent_ref *ref;
500 struct extent_buffer *leaf;
501 u64 ref_objectid;
502 int ret;
503
504 key.objectid = bytenr;
505 key.type = BTRFS_EXTENT_REF_KEY;
506 key.offset = parent;
507
508 ret = btrfs_search_slot(trans, root, &key, path, del ? -1 : 0, 1);
509 if (ret < 0)
510 goto out;
511 if (ret > 0) {
512 ret = -ENOENT;
513 goto out;
514 }
515
516 leaf = path->nodes[0];
517 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
518 ref_objectid = btrfs_ref_objectid(leaf, ref);
519 if (btrfs_ref_root(leaf, ref) != ref_root ||
520 btrfs_ref_generation(leaf, ref) != ref_generation ||
521 (ref_objectid != owner_objectid &&
522 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
523 ret = -EIO;
524 WARN_ON(1);
525 goto out;
526 }
527 ret = 0;
528 out:
529 return ret;
530 }
531
532 /*
533 * updates all the backrefs that are pending on update_list for the
534 * extent_root
535 */
536 static noinline int update_backrefs(struct btrfs_trans_handle *trans,
537 struct btrfs_root *extent_root,
538 struct btrfs_path *path,
539 struct list_head *update_list)
540 {
541 struct btrfs_key key;
542 struct btrfs_extent_ref *ref;
543 struct btrfs_fs_info *info = extent_root->fs_info;
544 struct pending_extent_op *op;
545 struct extent_buffer *leaf;
546 int ret = 0;
547 struct list_head *cur = update_list->next;
548 u64 ref_objectid;
549 u64 ref_root = extent_root->root_key.objectid;
550
551 op = list_entry(cur, struct pending_extent_op, list);
552
553 search:
554 key.objectid = op->bytenr;
555 key.type = BTRFS_EXTENT_REF_KEY;
556 key.offset = op->orig_parent;
557
558 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 1);
559 BUG_ON(ret);
560
561 leaf = path->nodes[0];
562
563 loop:
564 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
565
566 ref_objectid = btrfs_ref_objectid(leaf, ref);
567
568 if (btrfs_ref_root(leaf, ref) != ref_root ||
569 btrfs_ref_generation(leaf, ref) != op->orig_generation ||
570 (ref_objectid != op->level &&
571 ref_objectid != BTRFS_MULTIPLE_OBJECTIDS)) {
572 printk(KERN_ERR "btrfs couldn't find %llu, parent %llu, "
573 "root %llu, owner %u\n",
574 (unsigned long long)op->bytenr,
575 (unsigned long long)op->orig_parent,
576 (unsigned long long)ref_root, op->level);
577 btrfs_print_leaf(extent_root, leaf);
578 BUG();
579 }
580
581 key.objectid = op->bytenr;
582 key.offset = op->parent;
583 key.type = BTRFS_EXTENT_REF_KEY;
584 ret = btrfs_set_item_key_safe(trans, extent_root, path, &key);
585 BUG_ON(ret);
586 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
587 btrfs_set_ref_generation(leaf, ref, op->generation);
588
589 cur = cur->next;
590
591 list_del_init(&op->list);
592 unlock_extent(&info->extent_ins, op->bytenr,
593 op->bytenr + op->num_bytes - 1, GFP_NOFS);
594 kfree(op);
595
596 if (cur == update_list) {
597 btrfs_mark_buffer_dirty(path->nodes[0]);
598 btrfs_release_path(extent_root, path);
599 goto out;
600 }
601
602 op = list_entry(cur, struct pending_extent_op, list);
603
604 path->slots[0]++;
605 while (path->slots[0] < btrfs_header_nritems(leaf)) {
606 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
607 if (key.objectid == op->bytenr &&
608 key.type == BTRFS_EXTENT_REF_KEY)
609 goto loop;
610 path->slots[0]++;
611 }
612
613 btrfs_mark_buffer_dirty(path->nodes[0]);
614 btrfs_release_path(extent_root, path);
615 goto search;
616
617 out:
618 return 0;
619 }
620
621 static noinline int insert_extents(struct btrfs_trans_handle *trans,
622 struct btrfs_root *extent_root,
623 struct btrfs_path *path,
624 struct list_head *insert_list, int nr)
625 {
626 struct btrfs_key *keys;
627 u32 *data_size;
628 struct pending_extent_op *op;
629 struct extent_buffer *leaf;
630 struct list_head *cur = insert_list->next;
631 struct btrfs_fs_info *info = extent_root->fs_info;
632 u64 ref_root = extent_root->root_key.objectid;
633 int i = 0, last = 0, ret;
634 int total = nr * 2;
635
636 if (!nr)
637 return 0;
638
639 keys = kzalloc(total * sizeof(struct btrfs_key), GFP_NOFS);
640 if (!keys)
641 return -ENOMEM;
642
643 data_size = kzalloc(total * sizeof(u32), GFP_NOFS);
644 if (!data_size) {
645 kfree(keys);
646 return -ENOMEM;
647 }
648
649 list_for_each_entry(op, insert_list, list) {
650 keys[i].objectid = op->bytenr;
651 keys[i].offset = op->num_bytes;
652 keys[i].type = BTRFS_EXTENT_ITEM_KEY;
653 data_size[i] = sizeof(struct btrfs_extent_item);
654 i++;
655
656 keys[i].objectid = op->bytenr;
657 keys[i].offset = op->parent;
658 keys[i].type = BTRFS_EXTENT_REF_KEY;
659 data_size[i] = sizeof(struct btrfs_extent_ref);
660 i++;
661 }
662
663 op = list_entry(cur, struct pending_extent_op, list);
664 i = 0;
665 while (i < total) {
666 int c;
667 ret = btrfs_insert_some_items(trans, extent_root, path,
668 keys+i, data_size+i, total-i);
669 BUG_ON(ret < 0);
670
671 if (last && ret > 1)
672 BUG();
673
674 leaf = path->nodes[0];
675 for (c = 0; c < ret; c++) {
676 int ref_first = keys[i].type == BTRFS_EXTENT_REF_KEY;
677
678 /*
679 * if the first item we inserted was a backref, then
680 * the EXTENT_ITEM will be the odd c's, else it will
681 * be the even c's
682 */
683 if ((ref_first && (c % 2)) ||
684 (!ref_first && !(c % 2))) {
685 struct btrfs_extent_item *itm;
686
687 itm = btrfs_item_ptr(leaf, path->slots[0] + c,
688 struct btrfs_extent_item);
689 btrfs_set_extent_refs(path->nodes[0], itm, 1);
690 op->del++;
691 } else {
692 struct btrfs_extent_ref *ref;
693
694 ref = btrfs_item_ptr(leaf, path->slots[0] + c,
695 struct btrfs_extent_ref);
696 btrfs_set_ref_root(leaf, ref, ref_root);
697 btrfs_set_ref_generation(leaf, ref,
698 op->generation);
699 btrfs_set_ref_objectid(leaf, ref, op->level);
700 btrfs_set_ref_num_refs(leaf, ref, 1);
701 op->del++;
702 }
703
704 /*
705 * using del to see when its ok to free up the
706 * pending_extent_op. In the case where we insert the
707 * last item on the list in order to help do batching
708 * we need to not free the extent op until we actually
709 * insert the extent_item
710 */
711 if (op->del == 2) {
712 unlock_extent(&info->extent_ins, op->bytenr,
713 op->bytenr + op->num_bytes - 1,
714 GFP_NOFS);
715 cur = cur->next;
716 list_del_init(&op->list);
717 kfree(op);
718 if (cur != insert_list)
719 op = list_entry(cur,
720 struct pending_extent_op,
721 list);
722 }
723 }
724 btrfs_mark_buffer_dirty(leaf);
725 btrfs_release_path(extent_root, path);
726
727 /*
728 * Ok backref's and items usually go right next to eachother,
729 * but if we could only insert 1 item that means that we
730 * inserted on the end of a leaf, and we have no idea what may
731 * be on the next leaf so we just play it safe. In order to
732 * try and help this case we insert the last thing on our
733 * insert list so hopefully it will end up being the last
734 * thing on the leaf and everything else will be before it,
735 * which will let us insert a whole bunch of items at the same
736 * time.
737 */
738 if (ret == 1 && !last && (i + ret < total)) {
739 /*
740 * last: where we will pick up the next time around
741 * i: our current key to insert, will be total - 1
742 * cur: the current op we are screwing with
743 * op: duh
744 */
745 last = i + ret;
746 i = total - 1;
747 cur = insert_list->prev;
748 op = list_entry(cur, struct pending_extent_op, list);
749 } else if (last) {
750 /*
751 * ok we successfully inserted the last item on the
752 * list, lets reset everything
753 *
754 * i: our current key to insert, so where we left off
755 * last time
756 * last: done with this
757 * cur: the op we are messing with
758 * op: duh
759 * total: since we inserted the last key, we need to
760 * decrement total so we dont overflow
761 */
762 i = last;
763 last = 0;
764 total--;
765 if (i < total) {
766 cur = insert_list->next;
767 op = list_entry(cur, struct pending_extent_op,
768 list);
769 }
770 } else {
771 i += ret;
772 }
773
774 cond_resched();
775 }
776 ret = 0;
777 kfree(keys);
778 kfree(data_size);
779 return ret;
780 }
781
782 static noinline int insert_extent_backref(struct btrfs_trans_handle *trans,
783 struct btrfs_root *root,
784 struct btrfs_path *path,
785 u64 bytenr, u64 parent,
786 u64 ref_root, u64 ref_generation,
787 u64 owner_objectid)
788 {
789 struct btrfs_key key;
790 struct extent_buffer *leaf;
791 struct btrfs_extent_ref *ref;
792 u32 num_refs;
793 int ret;
794
795 key.objectid = bytenr;
796 key.type = BTRFS_EXTENT_REF_KEY;
797 key.offset = parent;
798
799 ret = btrfs_insert_empty_item(trans, root, path, &key, sizeof(*ref));
800 if (ret == 0) {
801 leaf = path->nodes[0];
802 ref = btrfs_item_ptr(leaf, path->slots[0],
803 struct btrfs_extent_ref);
804 btrfs_set_ref_root(leaf, ref, ref_root);
805 btrfs_set_ref_generation(leaf, ref, ref_generation);
806 btrfs_set_ref_objectid(leaf, ref, owner_objectid);
807 btrfs_set_ref_num_refs(leaf, ref, 1);
808 } else if (ret == -EEXIST) {
809 u64 existing_owner;
810 BUG_ON(owner_objectid < BTRFS_FIRST_FREE_OBJECTID);
811 leaf = path->nodes[0];
812 ref = btrfs_item_ptr(leaf, path->slots[0],
813 struct btrfs_extent_ref);
814 if (btrfs_ref_root(leaf, ref) != ref_root ||
815 btrfs_ref_generation(leaf, ref) != ref_generation) {
816 ret = -EIO;
817 WARN_ON(1);
818 goto out;
819 }
820
821 num_refs = btrfs_ref_num_refs(leaf, ref);
822 BUG_ON(num_refs == 0);
823 btrfs_set_ref_num_refs(leaf, ref, num_refs + 1);
824
825 existing_owner = btrfs_ref_objectid(leaf, ref);
826 if (existing_owner != owner_objectid &&
827 existing_owner != BTRFS_MULTIPLE_OBJECTIDS) {
828 btrfs_set_ref_objectid(leaf, ref,
829 BTRFS_MULTIPLE_OBJECTIDS);
830 }
831 ret = 0;
832 } else {
833 goto out;
834 }
835 btrfs_mark_buffer_dirty(path->nodes[0]);
836 out:
837 btrfs_release_path(root, path);
838 return ret;
839 }
840
841 static noinline int remove_extent_backref(struct btrfs_trans_handle *trans,
842 struct btrfs_root *root,
843 struct btrfs_path *path)
844 {
845 struct extent_buffer *leaf;
846 struct btrfs_extent_ref *ref;
847 u32 num_refs;
848 int ret = 0;
849
850 leaf = path->nodes[0];
851 ref = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_extent_ref);
852 num_refs = btrfs_ref_num_refs(leaf, ref);
853 BUG_ON(num_refs == 0);
854 num_refs -= 1;
855 if (num_refs == 0) {
856 ret = btrfs_del_item(trans, root, path);
857 } else {
858 btrfs_set_ref_num_refs(leaf, ref, num_refs);
859 btrfs_mark_buffer_dirty(leaf);
860 }
861 btrfs_release_path(root, path);
862 return ret;
863 }
864
865 #ifdef BIO_RW_DISCARD
866 static void btrfs_issue_discard(struct block_device *bdev,
867 u64 start, u64 len)
868 {
869 blkdev_issue_discard(bdev, start >> 9, len >> 9, GFP_KERNEL);
870 }
871 #endif
872
873 static int btrfs_discard_extent(struct btrfs_root *root, u64 bytenr,
874 u64 num_bytes)
875 {
876 #ifdef BIO_RW_DISCARD
877 int ret;
878 u64 map_length = num_bytes;
879 struct btrfs_multi_bio *multi = NULL;
880
881 /* Tell the block device(s) that the sectors can be discarded */
882 ret = btrfs_map_block(&root->fs_info->mapping_tree, READ,
883 bytenr, &map_length, &multi, 0);
884 if (!ret) {
885 struct btrfs_bio_stripe *stripe = multi->stripes;
886 int i;
887
888 if (map_length > num_bytes)
889 map_length = num_bytes;
890
891 for (i = 0; i < multi->num_stripes; i++, stripe++) {
892 btrfs_issue_discard(stripe->dev->bdev,
893 stripe->physical,
894 map_length);
895 }
896 kfree(multi);
897 }
898
899 return ret;
900 #else
901 return 0;
902 #endif
903 }
904
905 static noinline int free_extents(struct btrfs_trans_handle *trans,
906 struct btrfs_root *extent_root,
907 struct list_head *del_list)
908 {
909 struct btrfs_fs_info *info = extent_root->fs_info;
910 struct btrfs_path *path;
911 struct btrfs_key key, found_key;
912 struct extent_buffer *leaf;
913 struct list_head *cur;
914 struct pending_extent_op *op;
915 struct btrfs_extent_item *ei;
916 int ret, num_to_del, extent_slot = 0, found_extent = 0;
917 u32 refs;
918 u64 bytes_freed = 0;
919
920 path = btrfs_alloc_path();
921 if (!path)
922 return -ENOMEM;
923 path->reada = 1;
924
925 search:
926 /* search for the backref for the current ref we want to delete */
927 cur = del_list->next;
928 op = list_entry(cur, struct pending_extent_op, list);
929 ret = lookup_extent_backref(trans, extent_root, path, op->bytenr,
930 op->orig_parent,
931 extent_root->root_key.objectid,
932 op->orig_generation, op->level, 1);
933 if (ret) {
934 printk(KERN_ERR "btrfs unable to find backref byte nr %llu "
935 "root %llu gen %llu owner %u\n",
936 (unsigned long long)op->bytenr,
937 (unsigned long long)extent_root->root_key.objectid,
938 (unsigned long long)op->orig_generation, op->level);
939 btrfs_print_leaf(extent_root, path->nodes[0]);
940 WARN_ON(1);
941 goto out;
942 }
943
944 extent_slot = path->slots[0];
945 num_to_del = 1;
946 found_extent = 0;
947
948 /*
949 * if we aren't the first item on the leaf we can move back one and see
950 * if our ref is right next to our extent item
951 */
952 if (likely(extent_slot)) {
953 extent_slot--;
954 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
955 extent_slot);
956 if (found_key.objectid == op->bytenr &&
957 found_key.type == BTRFS_EXTENT_ITEM_KEY &&
958 found_key.offset == op->num_bytes) {
959 num_to_del++;
960 found_extent = 1;
961 }
962 }
963
964 /*
965 * if we didn't find the extent we need to delete the backref and then
966 * search for the extent item key so we can update its ref count
967 */
968 if (!found_extent) {
969 key.objectid = op->bytenr;
970 key.type = BTRFS_EXTENT_ITEM_KEY;
971 key.offset = op->num_bytes;
972
973 ret = remove_extent_backref(trans, extent_root, path);
974 BUG_ON(ret);
975 btrfs_release_path(extent_root, path);
976 ret = btrfs_search_slot(trans, extent_root, &key, path, -1, 1);
977 BUG_ON(ret);
978 extent_slot = path->slots[0];
979 }
980
981 /* this is where we update the ref count for the extent */
982 leaf = path->nodes[0];
983 ei = btrfs_item_ptr(leaf, extent_slot, struct btrfs_extent_item);
984 refs = btrfs_extent_refs(leaf, ei);
985 BUG_ON(refs == 0);
986 refs--;
987 btrfs_set_extent_refs(leaf, ei, refs);
988
989 btrfs_mark_buffer_dirty(leaf);
990
991 /*
992 * This extent needs deleting. The reason cur_slot is extent_slot +
993 * num_to_del is because extent_slot points to the slot where the extent
994 * is, and if the backref was not right next to the extent we will be
995 * deleting at least 1 item, and will want to start searching at the
996 * slot directly next to extent_slot. However if we did find the
997 * backref next to the extent item them we will be deleting at least 2
998 * items and will want to start searching directly after the ref slot
999 */
1000 if (!refs) {
1001 struct list_head *pos, *n, *end;
1002 int cur_slot = extent_slot+num_to_del;
1003 u64 super_used;
1004 u64 root_used;
1005
1006 path->slots[0] = extent_slot;
1007 bytes_freed = op->num_bytes;
1008
1009 mutex_lock(&info->pinned_mutex);
1010 ret = pin_down_bytes(trans, extent_root, op->bytenr,
1011 op->num_bytes, op->level >=
1012 BTRFS_FIRST_FREE_OBJECTID);
1013 mutex_unlock(&info->pinned_mutex);
1014 BUG_ON(ret < 0);
1015 op->del = ret;
1016
1017 /*
1018 * we need to see if we can delete multiple things at once, so
1019 * start looping through the list of extents we are wanting to
1020 * delete and see if their extent/backref's are right next to
1021 * eachother and the extents only have 1 ref
1022 */
1023 for (pos = cur->next; pos != del_list; pos = pos->next) {
1024 struct pending_extent_op *tmp;
1025
1026 tmp = list_entry(pos, struct pending_extent_op, list);
1027
1028 /* we only want to delete extent+ref at this stage */
1029 if (cur_slot >= btrfs_header_nritems(leaf) - 1)
1030 break;
1031
1032 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot);
1033 if (found_key.objectid != tmp->bytenr ||
1034 found_key.type != BTRFS_EXTENT_ITEM_KEY ||
1035 found_key.offset != tmp->num_bytes)
1036 break;
1037
1038 /* check to make sure this extent only has one ref */
1039 ei = btrfs_item_ptr(leaf, cur_slot,
1040 struct btrfs_extent_item);
1041 if (btrfs_extent_refs(leaf, ei) != 1)
1042 break;
1043
1044 btrfs_item_key_to_cpu(leaf, &found_key, cur_slot+1);
1045 if (found_key.objectid != tmp->bytenr ||
1046 found_key.type != BTRFS_EXTENT_REF_KEY ||
1047 found_key.offset != tmp->orig_parent)
1048 break;
1049
1050 /*
1051 * the ref is right next to the extent, we can set the
1052 * ref count to 0 since we will delete them both now
1053 */
1054 btrfs_set_extent_refs(leaf, ei, 0);
1055
1056 /* pin down the bytes for this extent */
1057 mutex_lock(&info->pinned_mutex);
1058 ret = pin_down_bytes(trans, extent_root, tmp->bytenr,
1059 tmp->num_bytes, tmp->level >=
1060 BTRFS_FIRST_FREE_OBJECTID);
1061 mutex_unlock(&info->pinned_mutex);
1062 BUG_ON(ret < 0);
1063
1064 /*
1065 * use the del field to tell if we need to go ahead and
1066 * free up the extent when we delete the item or not.
1067 */
1068 tmp->del = ret;
1069 bytes_freed += tmp->num_bytes;
1070
1071 num_to_del += 2;
1072 cur_slot += 2;
1073 }
1074 end = pos;
1075
1076 /* update the free space counters */
1077 spin_lock(&info->delalloc_lock);
1078 super_used = btrfs_super_bytes_used(&info->super_copy);
1079 btrfs_set_super_bytes_used(&info->super_copy,
1080 super_used - bytes_freed);
1081
1082 root_used = btrfs_root_used(&extent_root->root_item);
1083 btrfs_set_root_used(&extent_root->root_item,
1084 root_used - bytes_freed);
1085 spin_unlock(&info->delalloc_lock);
1086
1087 /* delete the items */
1088 ret = btrfs_del_items(trans, extent_root, path,
1089 path->slots[0], num_to_del);
1090 BUG_ON(ret);
1091
1092 /*
1093 * loop through the extents we deleted and do the cleanup work
1094 * on them
1095 */
1096 for (pos = cur, n = pos->next; pos != end;
1097 pos = n, n = pos->next) {
1098 struct pending_extent_op *tmp;
1099 tmp = list_entry(pos, struct pending_extent_op, list);
1100
1101 /*
1102 * remember tmp->del tells us wether or not we pinned
1103 * down the extent
1104 */
1105 ret = update_block_group(trans, extent_root,
1106 tmp->bytenr, tmp->num_bytes, 0,
1107 tmp->del);
1108 BUG_ON(ret);
1109
1110 list_del_init(&tmp->list);
1111 unlock_extent(&info->extent_ins, tmp->bytenr,
1112 tmp->bytenr + tmp->num_bytes - 1,
1113 GFP_NOFS);
1114 kfree(tmp);
1115 }
1116 } else if (refs && found_extent) {
1117 /*
1118 * the ref and extent were right next to eachother, but the
1119 * extent still has a ref, so just free the backref and keep
1120 * going
1121 */
1122 ret = remove_extent_backref(trans, extent_root, path);
1123 BUG_ON(ret);
1124
1125 list_del_init(&op->list);
1126 unlock_extent(&info->extent_ins, op->bytenr,
1127 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1128 kfree(op);
1129 } else {
1130 /*
1131 * the extent has multiple refs and the backref we were looking
1132 * for was not right next to it, so just unlock and go next,
1133 * we're good to go
1134 */
1135 list_del_init(&op->list);
1136 unlock_extent(&info->extent_ins, op->bytenr,
1137 op->bytenr + op->num_bytes - 1, GFP_NOFS);
1138 kfree(op);
1139 }
1140
1141 btrfs_release_path(extent_root, path);
1142 if (!list_empty(del_list))
1143 goto search;
1144
1145 out:
1146 btrfs_free_path(path);
1147 return ret;
1148 }
1149
1150 static int __btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1151 struct btrfs_root *root, u64 bytenr,
1152 u64 orig_parent, u64 parent,
1153 u64 orig_root, u64 ref_root,
1154 u64 orig_generation, u64 ref_generation,
1155 u64 owner_objectid)
1156 {
1157 int ret;
1158 struct btrfs_root *extent_root = root->fs_info->extent_root;
1159 struct btrfs_path *path;
1160
1161 if (root == root->fs_info->extent_root) {
1162 struct pending_extent_op *extent_op;
1163 u64 num_bytes;
1164
1165 BUG_ON(owner_objectid >= BTRFS_MAX_LEVEL);
1166 num_bytes = btrfs_level_size(root, (int)owner_objectid);
1167 mutex_lock(&root->fs_info->extent_ins_mutex);
1168 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
1169 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
1170 u64 priv;
1171 ret = get_state_private(&root->fs_info->extent_ins,
1172 bytenr, &priv);
1173 BUG_ON(ret);
1174 extent_op = (struct pending_extent_op *)
1175 (unsigned long)priv;
1176 BUG_ON(extent_op->parent != orig_parent);
1177 BUG_ON(extent_op->generation != orig_generation);
1178
1179 extent_op->parent = parent;
1180 extent_op->generation = ref_generation;
1181 } else {
1182 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
1183 BUG_ON(!extent_op);
1184
1185 extent_op->type = PENDING_BACKREF_UPDATE;
1186 extent_op->bytenr = bytenr;
1187 extent_op->num_bytes = num_bytes;
1188 extent_op->parent = parent;
1189 extent_op->orig_parent = orig_parent;
1190 extent_op->generation = ref_generation;
1191 extent_op->orig_generation = orig_generation;
1192 extent_op->level = (int)owner_objectid;
1193 INIT_LIST_HEAD(&extent_op->list);
1194 extent_op->del = 0;
1195
1196 set_extent_bits(&root->fs_info->extent_ins,
1197 bytenr, bytenr + num_bytes - 1,
1198 EXTENT_WRITEBACK, GFP_NOFS);
1199 set_state_private(&root->fs_info->extent_ins,
1200 bytenr, (unsigned long)extent_op);
1201 }
1202 mutex_unlock(&root->fs_info->extent_ins_mutex);
1203 return 0;
1204 }
1205
1206 path = btrfs_alloc_path();
1207 if (!path)
1208 return -ENOMEM;
1209 ret = lookup_extent_backref(trans, extent_root, path,
1210 bytenr, orig_parent, orig_root,
1211 orig_generation, owner_objectid, 1);
1212 if (ret)
1213 goto out;
1214 ret = remove_extent_backref(trans, extent_root, path);
1215 if (ret)
1216 goto out;
1217 ret = insert_extent_backref(trans, extent_root, path, bytenr,
1218 parent, ref_root, ref_generation,
1219 owner_objectid);
1220 BUG_ON(ret);
1221 finish_current_insert(trans, extent_root, 0);
1222 del_pending_extents(trans, extent_root, 0);
1223 out:
1224 btrfs_free_path(path);
1225 return ret;
1226 }
1227
1228 int btrfs_update_extent_ref(struct btrfs_trans_handle *trans,
1229 struct btrfs_root *root, u64 bytenr,
1230 u64 orig_parent, u64 parent,
1231 u64 ref_root, u64 ref_generation,
1232 u64 owner_objectid)
1233 {
1234 int ret;
1235 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1236 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1237 return 0;
1238 ret = __btrfs_update_extent_ref(trans, root, bytenr, orig_parent,
1239 parent, ref_root, ref_root,
1240 ref_generation, ref_generation,
1241 owner_objectid);
1242 return ret;
1243 }
1244
1245 static int __btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1246 struct btrfs_root *root, u64 bytenr,
1247 u64 orig_parent, u64 parent,
1248 u64 orig_root, u64 ref_root,
1249 u64 orig_generation, u64 ref_generation,
1250 u64 owner_objectid)
1251 {
1252 struct btrfs_path *path;
1253 int ret;
1254 struct btrfs_key key;
1255 struct extent_buffer *l;
1256 struct btrfs_extent_item *item;
1257 u32 refs;
1258
1259 path = btrfs_alloc_path();
1260 if (!path)
1261 return -ENOMEM;
1262
1263 path->reada = 1;
1264 key.objectid = bytenr;
1265 key.type = BTRFS_EXTENT_ITEM_KEY;
1266 key.offset = (u64)-1;
1267
1268 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1269 0, 1);
1270 if (ret < 0)
1271 return ret;
1272 BUG_ON(ret == 0 || path->slots[0] == 0);
1273
1274 path->slots[0]--;
1275 l = path->nodes[0];
1276
1277 btrfs_item_key_to_cpu(l, &key, path->slots[0]);
1278 if (key.objectid != bytenr) {
1279 btrfs_print_leaf(root->fs_info->extent_root, path->nodes[0]);
1280 printk(KERN_ERR "btrfs wanted %llu found %llu\n",
1281 (unsigned long long)bytenr,
1282 (unsigned long long)key.objectid);
1283 BUG();
1284 }
1285 BUG_ON(key.type != BTRFS_EXTENT_ITEM_KEY);
1286
1287 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1288 refs = btrfs_extent_refs(l, item);
1289 btrfs_set_extent_refs(l, item, refs + 1);
1290 btrfs_mark_buffer_dirty(path->nodes[0]);
1291
1292 btrfs_release_path(root->fs_info->extent_root, path);
1293
1294 path->reada = 1;
1295 ret = insert_extent_backref(trans, root->fs_info->extent_root,
1296 path, bytenr, parent,
1297 ref_root, ref_generation,
1298 owner_objectid);
1299 BUG_ON(ret);
1300 finish_current_insert(trans, root->fs_info->extent_root, 0);
1301 del_pending_extents(trans, root->fs_info->extent_root, 0);
1302
1303 btrfs_free_path(path);
1304 return 0;
1305 }
1306
1307 int btrfs_inc_extent_ref(struct btrfs_trans_handle *trans,
1308 struct btrfs_root *root,
1309 u64 bytenr, u64 num_bytes, u64 parent,
1310 u64 ref_root, u64 ref_generation,
1311 u64 owner_objectid)
1312 {
1313 int ret;
1314 if (ref_root == BTRFS_TREE_LOG_OBJECTID &&
1315 owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
1316 return 0;
1317 ret = __btrfs_inc_extent_ref(trans, root, bytenr, 0, parent,
1318 0, ref_root, 0, ref_generation,
1319 owner_objectid);
1320 return ret;
1321 }
1322
1323 int btrfs_extent_post_op(struct btrfs_trans_handle *trans,
1324 struct btrfs_root *root)
1325 {
1326 u64 start;
1327 u64 end;
1328 int ret;
1329
1330 while(1) {
1331 finish_current_insert(trans, root->fs_info->extent_root, 1);
1332 del_pending_extents(trans, root->fs_info->extent_root, 1);
1333
1334 /* is there more work to do? */
1335 ret = find_first_extent_bit(&root->fs_info->pending_del,
1336 0, &start, &end, EXTENT_WRITEBACK);
1337 if (!ret)
1338 continue;
1339 ret = find_first_extent_bit(&root->fs_info->extent_ins,
1340 0, &start, &end, EXTENT_WRITEBACK);
1341 if (!ret)
1342 continue;
1343 break;
1344 }
1345 return 0;
1346 }
1347
1348 int btrfs_lookup_extent_ref(struct btrfs_trans_handle *trans,
1349 struct btrfs_root *root, u64 bytenr,
1350 u64 num_bytes, u32 *refs)
1351 {
1352 struct btrfs_path *path;
1353 int ret;
1354 struct btrfs_key key;
1355 struct extent_buffer *l;
1356 struct btrfs_extent_item *item;
1357
1358 WARN_ON(num_bytes < root->sectorsize);
1359 path = btrfs_alloc_path();
1360 path->reada = 1;
1361 key.objectid = bytenr;
1362 key.offset = num_bytes;
1363 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
1364 ret = btrfs_search_slot(trans, root->fs_info->extent_root, &key, path,
1365 0, 0);
1366 if (ret < 0)
1367 goto out;
1368 if (ret != 0) {
1369 btrfs_print_leaf(root, path->nodes[0]);
1370 printk(KERN_INFO "btrfs failed to find block number %llu\n",
1371 (unsigned long long)bytenr);
1372 BUG();
1373 }
1374 l = path->nodes[0];
1375 item = btrfs_item_ptr(l, path->slots[0], struct btrfs_extent_item);
1376 *refs = btrfs_extent_refs(l, item);
1377 out:
1378 btrfs_free_path(path);
1379 return 0;
1380 }
1381
1382 int btrfs_cross_ref_exist(struct btrfs_trans_handle *trans,
1383 struct btrfs_root *root, u64 objectid, u64 bytenr)
1384 {
1385 struct btrfs_root *extent_root = root->fs_info->extent_root;
1386 struct btrfs_path *path;
1387 struct extent_buffer *leaf;
1388 struct btrfs_extent_ref *ref_item;
1389 struct btrfs_key key;
1390 struct btrfs_key found_key;
1391 u64 ref_root;
1392 u64 last_snapshot;
1393 u32 nritems;
1394 int ret;
1395
1396 key.objectid = bytenr;
1397 key.offset = (u64)-1;
1398 key.type = BTRFS_EXTENT_ITEM_KEY;
1399
1400 path = btrfs_alloc_path();
1401 ret = btrfs_search_slot(NULL, extent_root, &key, path, 0, 0);
1402 if (ret < 0)
1403 goto out;
1404 BUG_ON(ret == 0);
1405
1406 ret = -ENOENT;
1407 if (path->slots[0] == 0)
1408 goto out;
1409
1410 path->slots[0]--;
1411 leaf = path->nodes[0];
1412 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1413
1414 if (found_key.objectid != bytenr ||
1415 found_key.type != BTRFS_EXTENT_ITEM_KEY)
1416 goto out;
1417
1418 last_snapshot = btrfs_root_last_snapshot(&root->root_item);
1419 while (1) {
1420 leaf = path->nodes[0];
1421 nritems = btrfs_header_nritems(leaf);
1422 if (path->slots[0] >= nritems) {
1423 ret = btrfs_next_leaf(extent_root, path);
1424 if (ret < 0)
1425 goto out;
1426 if (ret == 0)
1427 continue;
1428 break;
1429 }
1430 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
1431 if (found_key.objectid != bytenr)
1432 break;
1433
1434 if (found_key.type != BTRFS_EXTENT_REF_KEY) {
1435 path->slots[0]++;
1436 continue;
1437 }
1438
1439 ref_item = btrfs_item_ptr(leaf, path->slots[0],
1440 struct btrfs_extent_ref);
1441 ref_root = btrfs_ref_root(leaf, ref_item);
1442 if ((ref_root != root->root_key.objectid &&
1443 ref_root != BTRFS_TREE_LOG_OBJECTID) ||
1444 objectid != btrfs_ref_objectid(leaf, ref_item)) {
1445 ret = 1;
1446 goto out;
1447 }
1448 if (btrfs_ref_generation(leaf, ref_item) <= last_snapshot) {
1449 ret = 1;
1450 goto out;
1451 }
1452
1453 path->slots[0]++;
1454 }
1455 ret = 0;
1456 out:
1457 btrfs_free_path(path);
1458 return ret;
1459 }
1460
1461 int btrfs_cache_ref(struct btrfs_trans_handle *trans, struct btrfs_root *root,
1462 struct extent_buffer *buf, u32 nr_extents)
1463 {
1464 struct btrfs_key key;
1465 struct btrfs_file_extent_item *fi;
1466 u64 root_gen;
1467 u32 nritems;
1468 int i;
1469 int level;
1470 int ret = 0;
1471 int shared = 0;
1472
1473 if (!root->ref_cows)
1474 return 0;
1475
1476 if (root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID) {
1477 shared = 0;
1478 root_gen = root->root_key.offset;
1479 } else {
1480 shared = 1;
1481 root_gen = trans->transid - 1;
1482 }
1483
1484 level = btrfs_header_level(buf);
1485 nritems = btrfs_header_nritems(buf);
1486
1487 if (level == 0) {
1488 struct btrfs_leaf_ref *ref;
1489 struct btrfs_extent_info *info;
1490
1491 ref = btrfs_alloc_leaf_ref(root, nr_extents);
1492 if (!ref) {
1493 ret = -ENOMEM;
1494 goto out;
1495 }
1496
1497 ref->root_gen = root_gen;
1498 ref->bytenr = buf->start;
1499 ref->owner = btrfs_header_owner(buf);
1500 ref->generation = btrfs_header_generation(buf);
1501 ref->nritems = nr_extents;
1502 info = ref->extents;
1503
1504 for (i = 0; nr_extents > 0 && i < nritems; i++) {
1505 u64 disk_bytenr;
1506 btrfs_item_key_to_cpu(buf, &key, i);
1507 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1508 continue;
1509 fi = btrfs_item_ptr(buf, i,
1510 struct btrfs_file_extent_item);
1511 if (btrfs_file_extent_type(buf, fi) ==
1512 BTRFS_FILE_EXTENT_INLINE)
1513 continue;
1514 disk_bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1515 if (disk_bytenr == 0)
1516 continue;
1517
1518 info->bytenr = disk_bytenr;
1519 info->num_bytes =
1520 btrfs_file_extent_disk_num_bytes(buf, fi);
1521 info->objectid = key.objectid;
1522 info->offset = key.offset;
1523 info++;
1524 }
1525
1526 ret = btrfs_add_leaf_ref(root, ref, shared);
1527 if (ret == -EEXIST && shared) {
1528 struct btrfs_leaf_ref *old;
1529 old = btrfs_lookup_leaf_ref(root, ref->bytenr);
1530 BUG_ON(!old);
1531 btrfs_remove_leaf_ref(root, old);
1532 btrfs_free_leaf_ref(root, old);
1533 ret = btrfs_add_leaf_ref(root, ref, shared);
1534 }
1535 WARN_ON(ret);
1536 btrfs_free_leaf_ref(root, ref);
1537 }
1538 out:
1539 return ret;
1540 }
1541
1542 /* when a block goes through cow, we update the reference counts of
1543 * everything that block points to. The internal pointers of the block
1544 * can be in just about any order, and it is likely to have clusters of
1545 * things that are close together and clusters of things that are not.
1546 *
1547 * To help reduce the seeks that come with updating all of these reference
1548 * counts, sort them by byte number before actual updates are done.
1549 *
1550 * struct refsort is used to match byte number to slot in the btree block.
1551 * we sort based on the byte number and then use the slot to actually
1552 * find the item.
1553 *
1554 * struct refsort is smaller than strcut btrfs_item and smaller than
1555 * struct btrfs_key_ptr. Since we're currently limited to the page size
1556 * for a btree block, there's no way for a kmalloc of refsorts for a
1557 * single node to be bigger than a page.
1558 */
1559 struct refsort {
1560 u64 bytenr;
1561 u32 slot;
1562 };
1563
1564 /*
1565 * for passing into sort()
1566 */
1567 static int refsort_cmp(const void *a_void, const void *b_void)
1568 {
1569 const struct refsort *a = a_void;
1570 const struct refsort *b = b_void;
1571
1572 if (a->bytenr < b->bytenr)
1573 return -1;
1574 if (a->bytenr > b->bytenr)
1575 return 1;
1576 return 0;
1577 }
1578
1579
1580 noinline int btrfs_inc_ref(struct btrfs_trans_handle *trans,
1581 struct btrfs_root *root,
1582 struct extent_buffer *orig_buf,
1583 struct extent_buffer *buf, u32 *nr_extents)
1584 {
1585 u64 bytenr;
1586 u64 ref_root;
1587 u64 orig_root;
1588 u64 ref_generation;
1589 u64 orig_generation;
1590 struct refsort *sorted;
1591 u32 nritems;
1592 u32 nr_file_extents = 0;
1593 struct btrfs_key key;
1594 struct btrfs_file_extent_item *fi;
1595 int i;
1596 int level;
1597 int ret = 0;
1598 int faili = 0;
1599 int refi = 0;
1600 int slot;
1601 int (*process_func)(struct btrfs_trans_handle *, struct btrfs_root *,
1602 u64, u64, u64, u64, u64, u64, u64, u64);
1603
1604 ref_root = btrfs_header_owner(buf);
1605 ref_generation = btrfs_header_generation(buf);
1606 orig_root = btrfs_header_owner(orig_buf);
1607 orig_generation = btrfs_header_generation(orig_buf);
1608
1609 nritems = btrfs_header_nritems(buf);
1610 level = btrfs_header_level(buf);
1611
1612 sorted = kmalloc(sizeof(struct refsort) * nritems, GFP_NOFS);
1613 BUG_ON(!sorted);
1614
1615 if (root->ref_cows) {
1616 process_func = __btrfs_inc_extent_ref;
1617 } else {
1618 if (level == 0 &&
1619 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1620 goto out;
1621 if (level != 0 &&
1622 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1623 goto out;
1624 process_func = __btrfs_update_extent_ref;
1625 }
1626
1627 /*
1628 * we make two passes through the items. In the first pass we
1629 * only record the byte number and slot. Then we sort based on
1630 * byte number and do the actual work based on the sorted results
1631 */
1632 for (i = 0; i < nritems; i++) {
1633 cond_resched();
1634 if (level == 0) {
1635 btrfs_item_key_to_cpu(buf, &key, i);
1636 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1637 continue;
1638 fi = btrfs_item_ptr(buf, i,
1639 struct btrfs_file_extent_item);
1640 if (btrfs_file_extent_type(buf, fi) ==
1641 BTRFS_FILE_EXTENT_INLINE)
1642 continue;
1643 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1644 if (bytenr == 0)
1645 continue;
1646
1647 nr_file_extents++;
1648 sorted[refi].bytenr = bytenr;
1649 sorted[refi].slot = i;
1650 refi++;
1651 } else {
1652 bytenr = btrfs_node_blockptr(buf, i);
1653 sorted[refi].bytenr = bytenr;
1654 sorted[refi].slot = i;
1655 refi++;
1656 }
1657 }
1658 /*
1659 * if refi == 0, we didn't actually put anything into the sorted
1660 * array and we're done
1661 */
1662 if (refi == 0)
1663 goto out;
1664
1665 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
1666
1667 for (i = 0; i < refi; i++) {
1668 cond_resched();
1669 slot = sorted[i].slot;
1670 bytenr = sorted[i].bytenr;
1671
1672 if (level == 0) {
1673 btrfs_item_key_to_cpu(buf, &key, slot);
1674
1675 ret = process_func(trans, root, bytenr,
1676 orig_buf->start, buf->start,
1677 orig_root, ref_root,
1678 orig_generation, ref_generation,
1679 key.objectid);
1680
1681 if (ret) {
1682 faili = slot;
1683 WARN_ON(1);
1684 goto fail;
1685 }
1686 } else {
1687 ret = process_func(trans, root, bytenr,
1688 orig_buf->start, buf->start,
1689 orig_root, ref_root,
1690 orig_generation, ref_generation,
1691 level - 1);
1692 if (ret) {
1693 faili = slot;
1694 WARN_ON(1);
1695 goto fail;
1696 }
1697 }
1698 }
1699 out:
1700 kfree(sorted);
1701 if (nr_extents) {
1702 if (level == 0)
1703 *nr_extents = nr_file_extents;
1704 else
1705 *nr_extents = nritems;
1706 }
1707 return 0;
1708 fail:
1709 kfree(sorted);
1710 WARN_ON(1);
1711 return ret;
1712 }
1713
1714 int btrfs_update_ref(struct btrfs_trans_handle *trans,
1715 struct btrfs_root *root, struct extent_buffer *orig_buf,
1716 struct extent_buffer *buf, int start_slot, int nr)
1717
1718 {
1719 u64 bytenr;
1720 u64 ref_root;
1721 u64 orig_root;
1722 u64 ref_generation;
1723 u64 orig_generation;
1724 struct btrfs_key key;
1725 struct btrfs_file_extent_item *fi;
1726 int i;
1727 int ret;
1728 int slot;
1729 int level;
1730
1731 BUG_ON(start_slot < 0);
1732 BUG_ON(start_slot + nr > btrfs_header_nritems(buf));
1733
1734 ref_root = btrfs_header_owner(buf);
1735 ref_generation = btrfs_header_generation(buf);
1736 orig_root = btrfs_header_owner(orig_buf);
1737 orig_generation = btrfs_header_generation(orig_buf);
1738 level = btrfs_header_level(buf);
1739
1740 if (!root->ref_cows) {
1741 if (level == 0 &&
1742 root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID)
1743 return 0;
1744 if (level != 0 &&
1745 root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID)
1746 return 0;
1747 }
1748
1749 for (i = 0, slot = start_slot; i < nr; i++, slot++) {
1750 cond_resched();
1751 if (level == 0) {
1752 btrfs_item_key_to_cpu(buf, &key, slot);
1753 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
1754 continue;
1755 fi = btrfs_item_ptr(buf, slot,
1756 struct btrfs_file_extent_item);
1757 if (btrfs_file_extent_type(buf, fi) ==
1758 BTRFS_FILE_EXTENT_INLINE)
1759 continue;
1760 bytenr = btrfs_file_extent_disk_bytenr(buf, fi);
1761 if (bytenr == 0)
1762 continue;
1763 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1764 orig_buf->start, buf->start,
1765 orig_root, ref_root,
1766 orig_generation, ref_generation,
1767 key.objectid);
1768 if (ret)
1769 goto fail;
1770 } else {
1771 bytenr = btrfs_node_blockptr(buf, slot);
1772 ret = __btrfs_update_extent_ref(trans, root, bytenr,
1773 orig_buf->start, buf->start,
1774 orig_root, ref_root,
1775 orig_generation, ref_generation,
1776 level - 1);
1777 if (ret)
1778 goto fail;
1779 }
1780 }
1781 return 0;
1782 fail:
1783 WARN_ON(1);
1784 return -1;
1785 }
1786
1787 static int write_one_cache_group(struct btrfs_trans_handle *trans,
1788 struct btrfs_root *root,
1789 struct btrfs_path *path,
1790 struct btrfs_block_group_cache *cache)
1791 {
1792 int ret;
1793 int pending_ret;
1794 struct btrfs_root *extent_root = root->fs_info->extent_root;
1795 unsigned long bi;
1796 struct extent_buffer *leaf;
1797
1798 ret = btrfs_search_slot(trans, extent_root, &cache->key, path, 0, 1);
1799 if (ret < 0)
1800 goto fail;
1801 BUG_ON(ret);
1802
1803 leaf = path->nodes[0];
1804 bi = btrfs_item_ptr_offset(leaf, path->slots[0]);
1805 write_extent_buffer(leaf, &cache->item, bi, sizeof(cache->item));
1806 btrfs_mark_buffer_dirty(leaf);
1807 btrfs_release_path(extent_root, path);
1808 fail:
1809 finish_current_insert(trans, extent_root, 0);
1810 pending_ret = del_pending_extents(trans, extent_root, 0);
1811 if (ret)
1812 return ret;
1813 if (pending_ret)
1814 return pending_ret;
1815 return 0;
1816
1817 }
1818
1819 int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
1820 struct btrfs_root *root)
1821 {
1822 struct btrfs_block_group_cache *cache, *entry;
1823 struct rb_node *n;
1824 int err = 0;
1825 int werr = 0;
1826 struct btrfs_path *path;
1827 u64 last = 0;
1828
1829 path = btrfs_alloc_path();
1830 if (!path)
1831 return -ENOMEM;
1832
1833 while (1) {
1834 cache = NULL;
1835 spin_lock(&root->fs_info->block_group_cache_lock);
1836 for (n = rb_first(&root->fs_info->block_group_cache_tree);
1837 n; n = rb_next(n)) {
1838 entry = rb_entry(n, struct btrfs_block_group_cache,
1839 cache_node);
1840 if (entry->dirty) {
1841 cache = entry;
1842 break;
1843 }
1844 }
1845 spin_unlock(&root->fs_info->block_group_cache_lock);
1846
1847 if (!cache)
1848 break;
1849
1850 cache->dirty = 0;
1851 last += cache->key.offset;
1852
1853 err = write_one_cache_group(trans, root,
1854 path, cache);
1855 /*
1856 * if we fail to write the cache group, we want
1857 * to keep it marked dirty in hopes that a later
1858 * write will work
1859 */
1860 if (err) {
1861 werr = err;
1862 continue;
1863 }
1864 }
1865 btrfs_free_path(path);
1866 return werr;
1867 }
1868
1869 int btrfs_extent_readonly(struct btrfs_root *root, u64 bytenr)
1870 {
1871 struct btrfs_block_group_cache *block_group;
1872 int readonly = 0;
1873
1874 block_group = btrfs_lookup_block_group(root->fs_info, bytenr);
1875 if (!block_group || block_group->ro)
1876 readonly = 1;
1877 if (block_group)
1878 put_block_group(block_group);
1879 return readonly;
1880 }
1881
1882 static int update_space_info(struct btrfs_fs_info *info, u64 flags,
1883 u64 total_bytes, u64 bytes_used,
1884 struct btrfs_space_info **space_info)
1885 {
1886 struct btrfs_space_info *found;
1887
1888 found = __find_space_info(info, flags);
1889 if (found) {
1890 spin_lock(&found->lock);
1891 found->total_bytes += total_bytes;
1892 found->bytes_used += bytes_used;
1893 found->full = 0;
1894 spin_unlock(&found->lock);
1895 *space_info = found;
1896 return 0;
1897 }
1898 found = kzalloc(sizeof(*found), GFP_NOFS);
1899 if (!found)
1900 return -ENOMEM;
1901
1902 list_add(&found->list, &info->space_info);
1903 INIT_LIST_HEAD(&found->block_groups);
1904 init_rwsem(&found->groups_sem);
1905 spin_lock_init(&found->lock);
1906 found->flags = flags;
1907 found->total_bytes = total_bytes;
1908 found->bytes_used = bytes_used;
1909 found->bytes_pinned = 0;
1910 found->bytes_reserved = 0;
1911 found->bytes_readonly = 0;
1912 found->full = 0;
1913 found->force_alloc = 0;
1914 *space_info = found;
1915 return 0;
1916 }
1917
1918 static void set_avail_alloc_bits(struct btrfs_fs_info *fs_info, u64 flags)
1919 {
1920 u64 extra_flags = flags & (BTRFS_BLOCK_GROUP_RAID0 |
1921 BTRFS_BLOCK_GROUP_RAID1 |
1922 BTRFS_BLOCK_GROUP_RAID10 |
1923 BTRFS_BLOCK_GROUP_DUP);
1924 if (extra_flags) {
1925 if (flags & BTRFS_BLOCK_GROUP_DATA)
1926 fs_info->avail_data_alloc_bits |= extra_flags;
1927 if (flags & BTRFS_BLOCK_GROUP_METADATA)
1928 fs_info->avail_metadata_alloc_bits |= extra_flags;
1929 if (flags & BTRFS_BLOCK_GROUP_SYSTEM)
1930 fs_info->avail_system_alloc_bits |= extra_flags;
1931 }
1932 }
1933
1934 static void set_block_group_readonly(struct btrfs_block_group_cache *cache)
1935 {
1936 spin_lock(&cache->space_info->lock);
1937 spin_lock(&cache->lock);
1938 if (!cache->ro) {
1939 cache->space_info->bytes_readonly += cache->key.offset -
1940 btrfs_block_group_used(&cache->item);
1941 cache->ro = 1;
1942 }
1943 spin_unlock(&cache->lock);
1944 spin_unlock(&cache->space_info->lock);
1945 }
1946
1947 u64 btrfs_reduce_alloc_profile(struct btrfs_root *root, u64 flags)
1948 {
1949 u64 num_devices = root->fs_info->fs_devices->rw_devices;
1950
1951 if (num_devices == 1)
1952 flags &= ~(BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID0);
1953 if (num_devices < 4)
1954 flags &= ~BTRFS_BLOCK_GROUP_RAID10;
1955
1956 if ((flags & BTRFS_BLOCK_GROUP_DUP) &&
1957 (flags & (BTRFS_BLOCK_GROUP_RAID1 |
1958 BTRFS_BLOCK_GROUP_RAID10))) {
1959 flags &= ~BTRFS_BLOCK_GROUP_DUP;
1960 }
1961
1962 if ((flags & BTRFS_BLOCK_GROUP_RAID1) &&
1963 (flags & BTRFS_BLOCK_GROUP_RAID10)) {
1964 flags &= ~BTRFS_BLOCK_GROUP_RAID1;
1965 }
1966
1967 if ((flags & BTRFS_BLOCK_GROUP_RAID0) &&
1968 ((flags & BTRFS_BLOCK_GROUP_RAID1) |
1969 (flags & BTRFS_BLOCK_GROUP_RAID10) |
1970 (flags & BTRFS_BLOCK_GROUP_DUP)))
1971 flags &= ~BTRFS_BLOCK_GROUP_RAID0;
1972 return flags;
1973 }
1974
1975 static int do_chunk_alloc(struct btrfs_trans_handle *trans,
1976 struct btrfs_root *extent_root, u64 alloc_bytes,
1977 u64 flags, int force)
1978 {
1979 struct btrfs_space_info *space_info;
1980 u64 thresh;
1981 int ret = 0;
1982
1983 mutex_lock(&extent_root->fs_info->chunk_mutex);
1984
1985 flags = btrfs_reduce_alloc_profile(extent_root, flags);
1986
1987 space_info = __find_space_info(extent_root->fs_info, flags);
1988 if (!space_info) {
1989 ret = update_space_info(extent_root->fs_info, flags,
1990 0, 0, &space_info);
1991 BUG_ON(ret);
1992 }
1993 BUG_ON(!space_info);
1994
1995 spin_lock(&space_info->lock);
1996 if (space_info->force_alloc) {
1997 force = 1;
1998 space_info->force_alloc = 0;
1999 }
2000 if (space_info->full) {
2001 spin_unlock(&space_info->lock);
2002 goto out;
2003 }
2004
2005 thresh = space_info->total_bytes - space_info->bytes_readonly;
2006 thresh = div_factor(thresh, 6);
2007 if (!force &&
2008 (space_info->bytes_used + space_info->bytes_pinned +
2009 space_info->bytes_reserved + alloc_bytes) < thresh) {
2010 spin_unlock(&space_info->lock);
2011 goto out;
2012 }
2013 spin_unlock(&space_info->lock);
2014
2015 ret = btrfs_alloc_chunk(trans, extent_root, flags);
2016 if (ret)
2017 space_info->full = 1;
2018 out:
2019 mutex_unlock(&extent_root->fs_info->chunk_mutex);
2020 return ret;
2021 }
2022
2023 static int update_block_group(struct btrfs_trans_handle *trans,
2024 struct btrfs_root *root,
2025 u64 bytenr, u64 num_bytes, int alloc,
2026 int mark_free)
2027 {
2028 struct btrfs_block_group_cache *cache;
2029 struct btrfs_fs_info *info = root->fs_info;
2030 u64 total = num_bytes;
2031 u64 old_val;
2032 u64 byte_in_group;
2033
2034 while (total) {
2035 cache = btrfs_lookup_block_group(info, bytenr);
2036 if (!cache)
2037 return -1;
2038 byte_in_group = bytenr - cache->key.objectid;
2039 WARN_ON(byte_in_group > cache->key.offset);
2040
2041 spin_lock(&cache->space_info->lock);
2042 spin_lock(&cache->lock);
2043 cache->dirty = 1;
2044 old_val = btrfs_block_group_used(&cache->item);
2045 num_bytes = min(total, cache->key.offset - byte_in_group);
2046 if (alloc) {
2047 old_val += num_bytes;
2048 cache->space_info->bytes_used += num_bytes;
2049 if (cache->ro)
2050 cache->space_info->bytes_readonly -= num_bytes;
2051 btrfs_set_block_group_used(&cache->item, old_val);
2052 spin_unlock(&cache->lock);
2053 spin_unlock(&cache->space_info->lock);
2054 } else {
2055 old_val -= num_bytes;
2056 cache->space_info->bytes_used -= num_bytes;
2057 if (cache->ro)
2058 cache->space_info->bytes_readonly += num_bytes;
2059 btrfs_set_block_group_used(&cache->item, old_val);
2060 spin_unlock(&cache->lock);
2061 spin_unlock(&cache->space_info->lock);
2062 if (mark_free) {
2063 int ret;
2064
2065 ret = btrfs_discard_extent(root, bytenr,
2066 num_bytes);
2067 WARN_ON(ret);
2068
2069 ret = btrfs_add_free_space(cache, bytenr,
2070 num_bytes);
2071 WARN_ON(ret);
2072 }
2073 }
2074 put_block_group(cache);
2075 total -= num_bytes;
2076 bytenr += num_bytes;
2077 }
2078 return 0;
2079 }
2080
2081 static u64 first_logical_byte(struct btrfs_root *root, u64 search_start)
2082 {
2083 struct btrfs_block_group_cache *cache;
2084 u64 bytenr;
2085
2086 cache = btrfs_lookup_first_block_group(root->fs_info, search_start);
2087 if (!cache)
2088 return 0;
2089
2090 bytenr = cache->key.objectid;
2091 put_block_group(cache);
2092
2093 return bytenr;
2094 }
2095
2096 int btrfs_update_pinned_extents(struct btrfs_root *root,
2097 u64 bytenr, u64 num, int pin)
2098 {
2099 u64 len;
2100 struct btrfs_block_group_cache *cache;
2101 struct btrfs_fs_info *fs_info = root->fs_info;
2102
2103 WARN_ON(!mutex_is_locked(&root->fs_info->pinned_mutex));
2104 if (pin) {
2105 set_extent_dirty(&fs_info->pinned_extents,
2106 bytenr, bytenr + num - 1, GFP_NOFS);
2107 } else {
2108 clear_extent_dirty(&fs_info->pinned_extents,
2109 bytenr, bytenr + num - 1, GFP_NOFS);
2110 }
2111 while (num > 0) {
2112 cache = btrfs_lookup_block_group(fs_info, bytenr);
2113 BUG_ON(!cache);
2114 len = min(num, cache->key.offset -
2115 (bytenr - cache->key.objectid));
2116 if (pin) {
2117 spin_lock(&cache->space_info->lock);
2118 spin_lock(&cache->lock);
2119 cache->pinned += len;
2120 cache->space_info->bytes_pinned += len;
2121 spin_unlock(&cache->lock);
2122 spin_unlock(&cache->space_info->lock);
2123 fs_info->total_pinned += len;
2124 } else {
2125 spin_lock(&cache->space_info->lock);
2126 spin_lock(&cache->lock);
2127 cache->pinned -= len;
2128 cache->space_info->bytes_pinned -= len;
2129 spin_unlock(&cache->lock);
2130 spin_unlock(&cache->space_info->lock);
2131 fs_info->total_pinned -= len;
2132 if (cache->cached)
2133 btrfs_add_free_space(cache, bytenr, len);
2134 }
2135 put_block_group(cache);
2136 bytenr += len;
2137 num -= len;
2138 }
2139 return 0;
2140 }
2141
2142 static int update_reserved_extents(struct btrfs_root *root,
2143 u64 bytenr, u64 num, int reserve)
2144 {
2145 u64 len;
2146 struct btrfs_block_group_cache *cache;
2147 struct btrfs_fs_info *fs_info = root->fs_info;
2148
2149 while (num > 0) {
2150 cache = btrfs_lookup_block_group(fs_info, bytenr);
2151 BUG_ON(!cache);
2152 len = min(num, cache->key.offset -
2153 (bytenr - cache->key.objectid));
2154
2155 spin_lock(&cache->space_info->lock);
2156 spin_lock(&cache->lock);
2157 if (reserve) {
2158 cache->reserved += len;
2159 cache->space_info->bytes_reserved += len;
2160 } else {
2161 cache->reserved -= len;
2162 cache->space_info->bytes_reserved -= len;
2163 }
2164 spin_unlock(&cache->lock);
2165 spin_unlock(&cache->space_info->lock);
2166 put_block_group(cache);
2167 bytenr += len;
2168 num -= len;
2169 }
2170 return 0;
2171 }
2172
2173 int btrfs_copy_pinned(struct btrfs_root *root, struct extent_io_tree *copy)
2174 {
2175 u64 last = 0;
2176 u64 start;
2177 u64 end;
2178 struct extent_io_tree *pinned_extents = &root->fs_info->pinned_extents;
2179 int ret;
2180
2181 mutex_lock(&root->fs_info->pinned_mutex);
2182 while (1) {
2183 ret = find_first_extent_bit(pinned_extents, last,
2184 &start, &end, EXTENT_DIRTY);
2185 if (ret)
2186 break;
2187 set_extent_dirty(copy, start, end, GFP_NOFS);
2188 last = end + 1;
2189 }
2190 mutex_unlock(&root->fs_info->pinned_mutex);
2191 return 0;
2192 }
2193
2194 int btrfs_finish_extent_commit(struct btrfs_trans_handle *trans,
2195 struct btrfs_root *root,
2196 struct extent_io_tree *unpin)
2197 {
2198 u64 start;
2199 u64 end;
2200 int ret;
2201
2202 mutex_lock(&root->fs_info->pinned_mutex);
2203 while (1) {
2204 ret = find_first_extent_bit(unpin, 0, &start, &end,
2205 EXTENT_DIRTY);
2206 if (ret)
2207 break;
2208
2209 ret = btrfs_discard_extent(root, start, end + 1 - start);
2210
2211 btrfs_update_pinned_extents(root, start, end + 1 - start, 0);
2212 clear_extent_dirty(unpin, start, end, GFP_NOFS);
2213
2214 if (need_resched()) {
2215 mutex_unlock(&root->fs_info->pinned_mutex);
2216 cond_resched();
2217 mutex_lock(&root->fs_info->pinned_mutex);
2218 }
2219 }
2220 mutex_unlock(&root->fs_info->pinned_mutex);
2221 return ret;
2222 }
2223
2224 static int finish_current_insert(struct btrfs_trans_handle *trans,
2225 struct btrfs_root *extent_root, int all)
2226 {
2227 u64 start;
2228 u64 end;
2229 u64 priv;
2230 u64 search = 0;
2231 struct btrfs_fs_info *info = extent_root->fs_info;
2232 struct btrfs_path *path;
2233 struct pending_extent_op *extent_op, *tmp;
2234 struct list_head insert_list, update_list;
2235 int ret;
2236 int num_inserts = 0, max_inserts, restart = 0;
2237
2238 path = btrfs_alloc_path();
2239 INIT_LIST_HEAD(&insert_list);
2240 INIT_LIST_HEAD(&update_list);
2241
2242 max_inserts = extent_root->leafsize /
2243 (2 * sizeof(struct btrfs_key) + 2 * sizeof(struct btrfs_item) +
2244 sizeof(struct btrfs_extent_ref) +
2245 sizeof(struct btrfs_extent_item));
2246 again:
2247 mutex_lock(&info->extent_ins_mutex);
2248 while (1) {
2249 ret = find_first_extent_bit(&info->extent_ins, search, &start,
2250 &end, EXTENT_WRITEBACK);
2251 if (ret) {
2252 if (restart && !num_inserts &&
2253 list_empty(&update_list)) {
2254 restart = 0;
2255 search = 0;
2256 continue;
2257 }
2258 break;
2259 }
2260
2261 ret = try_lock_extent(&info->extent_ins, start, end, GFP_NOFS);
2262 if (!ret) {
2263 if (all)
2264 restart = 1;
2265 search = end + 1;
2266 if (need_resched()) {
2267 mutex_unlock(&info->extent_ins_mutex);
2268 cond_resched();
2269 mutex_lock(&info->extent_ins_mutex);
2270 }
2271 continue;
2272 }
2273
2274 ret = get_state_private(&info->extent_ins, start, &priv);
2275 BUG_ON(ret);
2276 extent_op = (struct pending_extent_op *)(unsigned long) priv;
2277
2278 if (extent_op->type == PENDING_EXTENT_INSERT) {
2279 num_inserts++;
2280 list_add_tail(&extent_op->list, &insert_list);
2281 search = end + 1;
2282 if (num_inserts == max_inserts) {
2283 restart = 1;
2284 break;
2285 }
2286 } else if (extent_op->type == PENDING_BACKREF_UPDATE) {
2287 list_add_tail(&extent_op->list, &update_list);
2288 search = end + 1;
2289 } else {
2290 BUG();
2291 }
2292 }
2293
2294 /*
2295 * process the update list, clear the writeback bit for it, and if
2296 * somebody marked this thing for deletion then just unlock it and be
2297 * done, the free_extents will handle it
2298 */
2299 list_for_each_entry_safe(extent_op, tmp, &update_list, list) {
2300 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2301 extent_op->bytenr + extent_op->num_bytes - 1,
2302 EXTENT_WRITEBACK, GFP_NOFS);
2303 if (extent_op->del) {
2304 list_del_init(&extent_op->list);
2305 unlock_extent(&info->extent_ins, extent_op->bytenr,
2306 extent_op->bytenr + extent_op->num_bytes
2307 - 1, GFP_NOFS);
2308 kfree(extent_op);
2309 }
2310 }
2311 mutex_unlock(&info->extent_ins_mutex);
2312
2313 /*
2314 * still have things left on the update list, go ahead an update
2315 * everything
2316 */
2317 if (!list_empty(&update_list)) {
2318 ret = update_backrefs(trans, extent_root, path, &update_list);
2319 BUG_ON(ret);
2320
2321 /* we may have COW'ed new blocks, so lets start over */
2322 if (all)
2323 restart = 1;
2324 }
2325
2326 /*
2327 * if no inserts need to be done, but we skipped some extents and we
2328 * need to make sure everything is cleaned then reset everything and
2329 * go back to the beginning
2330 */
2331 if (!num_inserts && restart) {
2332 search = 0;
2333 restart = 0;
2334 INIT_LIST_HEAD(&update_list);
2335 INIT_LIST_HEAD(&insert_list);
2336 goto again;
2337 } else if (!num_inserts) {
2338 goto out;
2339 }
2340
2341 /*
2342 * process the insert extents list. Again if we are deleting this
2343 * extent, then just unlock it, pin down the bytes if need be, and be
2344 * done with it. Saves us from having to actually insert the extent
2345 * into the tree and then subsequently come along and delete it
2346 */
2347 mutex_lock(&info->extent_ins_mutex);
2348 list_for_each_entry_safe(extent_op, tmp, &insert_list, list) {
2349 clear_extent_bits(&info->extent_ins, extent_op->bytenr,
2350 extent_op->bytenr + extent_op->num_bytes - 1,
2351 EXTENT_WRITEBACK, GFP_NOFS);
2352 if (extent_op->del) {
2353 u64 used;
2354 list_del_init(&extent_op->list);
2355 unlock_extent(&info->extent_ins, extent_op->bytenr,
2356 extent_op->bytenr + extent_op->num_bytes
2357 - 1, GFP_NOFS);
2358
2359 mutex_lock(&extent_root->fs_info->pinned_mutex);
2360 ret = pin_down_bytes(trans, extent_root,
2361 extent_op->bytenr,
2362 extent_op->num_bytes, 0);
2363 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2364
2365 spin_lock(&info->delalloc_lock);
2366 used = btrfs_super_bytes_used(&info->super_copy);
2367 btrfs_set_super_bytes_used(&info->super_copy,
2368 used - extent_op->num_bytes);
2369 used = btrfs_root_used(&extent_root->root_item);
2370 btrfs_set_root_used(&extent_root->root_item,
2371 used - extent_op->num_bytes);
2372 spin_unlock(&info->delalloc_lock);
2373
2374 ret = update_block_group(trans, extent_root,
2375 extent_op->bytenr,
2376 extent_op->num_bytes,
2377 0, ret > 0);
2378 BUG_ON(ret);
2379 kfree(extent_op);
2380 num_inserts--;
2381 }
2382 }
2383 mutex_unlock(&info->extent_ins_mutex);
2384
2385 ret = insert_extents(trans, extent_root, path, &insert_list,
2386 num_inserts);
2387 BUG_ON(ret);
2388
2389 /*
2390 * if restart is set for whatever reason we need to go back and start
2391 * searching through the pending list again.
2392 *
2393 * We just inserted some extents, which could have resulted in new
2394 * blocks being allocated, which would result in new blocks needing
2395 * updates, so if all is set we _must_ restart to get the updated
2396 * blocks.
2397 */
2398 if (restart || all) {
2399 INIT_LIST_HEAD(&insert_list);
2400 INIT_LIST_HEAD(&update_list);
2401 search = 0;
2402 restart = 0;
2403 num_inserts = 0;
2404 goto again;
2405 }
2406 out:
2407 btrfs_free_path(path);
2408 return 0;
2409 }
2410
2411 static int pin_down_bytes(struct btrfs_trans_handle *trans,
2412 struct btrfs_root *root,
2413 u64 bytenr, u64 num_bytes, int is_data)
2414 {
2415 int err = 0;
2416 struct extent_buffer *buf;
2417
2418 if (is_data)
2419 goto pinit;
2420
2421 buf = btrfs_find_tree_block(root, bytenr, num_bytes);
2422 if (!buf)
2423 goto pinit;
2424
2425 /* we can reuse a block if it hasn't been written
2426 * and it is from this transaction. We can't
2427 * reuse anything from the tree log root because
2428 * it has tiny sub-transactions.
2429 */
2430 if (btrfs_buffer_uptodate(buf, 0) &&
2431 btrfs_try_tree_lock(buf)) {
2432 u64 header_owner = btrfs_header_owner(buf);
2433 u64 header_transid = btrfs_header_generation(buf);
2434 if (header_owner != BTRFS_TREE_LOG_OBJECTID &&
2435 header_owner != BTRFS_TREE_RELOC_OBJECTID &&
2436 header_transid == trans->transid &&
2437 !btrfs_header_flag(buf, BTRFS_HEADER_FLAG_WRITTEN)) {
2438 clean_tree_block(NULL, root, buf);
2439 btrfs_tree_unlock(buf);
2440 free_extent_buffer(buf);
2441 return 1;
2442 }
2443 btrfs_tree_unlock(buf);
2444 }
2445 free_extent_buffer(buf);
2446 pinit:
2447 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2448
2449 BUG_ON(err < 0);
2450 return 0;
2451 }
2452
2453 /*
2454 * remove an extent from the root, returns 0 on success
2455 */
2456 static int __free_extent(struct btrfs_trans_handle *trans,
2457 struct btrfs_root *root,
2458 u64 bytenr, u64 num_bytes, u64 parent,
2459 u64 root_objectid, u64 ref_generation,
2460 u64 owner_objectid, int pin, int mark_free)
2461 {
2462 struct btrfs_path *path;
2463 struct btrfs_key key;
2464 struct btrfs_fs_info *info = root->fs_info;
2465 struct btrfs_root *extent_root = info->extent_root;
2466 struct extent_buffer *leaf;
2467 int ret;
2468 int extent_slot = 0;
2469 int found_extent = 0;
2470 int num_to_del = 1;
2471 struct btrfs_extent_item *ei;
2472 u32 refs;
2473
2474 key.objectid = bytenr;
2475 btrfs_set_key_type(&key, BTRFS_EXTENT_ITEM_KEY);
2476 key.offset = num_bytes;
2477 path = btrfs_alloc_path();
2478 if (!path)
2479 return -ENOMEM;
2480
2481 path->reada = 1;
2482 ret = lookup_extent_backref(trans, extent_root, path,
2483 bytenr, parent, root_objectid,
2484 ref_generation, owner_objectid, 1);
2485 if (ret == 0) {
2486 struct btrfs_key found_key;
2487 extent_slot = path->slots[0];
2488 while (extent_slot > 0) {
2489 extent_slot--;
2490 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
2491 extent_slot);
2492 if (found_key.objectid != bytenr)
2493 break;
2494 if (found_key.type == BTRFS_EXTENT_ITEM_KEY &&
2495 found_key.offset == num_bytes) {
2496 found_extent = 1;
2497 break;
2498 }
2499 if (path->slots[0] - extent_slot > 5)
2500 break;
2501 }
2502 if (!found_extent) {
2503 ret = remove_extent_backref(trans, extent_root, path);
2504 BUG_ON(ret);
2505 btrfs_release_path(extent_root, path);
2506 ret = btrfs_search_slot(trans, extent_root,
2507 &key, path, -1, 1);
2508 if (ret) {
2509 printk(KERN_ERR "umm, got %d back from search"
2510 ", was looking for %llu\n", ret,
2511 (unsigned long long)bytenr);
2512 btrfs_print_leaf(extent_root, path->nodes[0]);
2513 }
2514 BUG_ON(ret);
2515 extent_slot = path->slots[0];
2516 }
2517 } else {
2518 btrfs_print_leaf(extent_root, path->nodes[0]);
2519 WARN_ON(1);
2520 printk(KERN_ERR "btrfs unable to find ref byte nr %llu "
2521 "root %llu gen %llu owner %llu\n",
2522 (unsigned long long)bytenr,
2523 (unsigned long long)root_objectid,
2524 (unsigned long long)ref_generation,
2525 (unsigned long long)owner_objectid);
2526 }
2527
2528 leaf = path->nodes[0];
2529 ei = btrfs_item_ptr(leaf, extent_slot,
2530 struct btrfs_extent_item);
2531 refs = btrfs_extent_refs(leaf, ei);
2532 BUG_ON(refs == 0);
2533 refs -= 1;
2534 btrfs_set_extent_refs(leaf, ei, refs);
2535
2536 btrfs_mark_buffer_dirty(leaf);
2537
2538 if (refs == 0 && found_extent && path->slots[0] == extent_slot + 1) {
2539 struct btrfs_extent_ref *ref;
2540 ref = btrfs_item_ptr(leaf, path->slots[0],
2541 struct btrfs_extent_ref);
2542 BUG_ON(btrfs_ref_num_refs(leaf, ref) != 1);
2543 /* if the back ref and the extent are next to each other
2544 * they get deleted below in one shot
2545 */
2546 path->slots[0] = extent_slot;
2547 num_to_del = 2;
2548 } else if (found_extent) {
2549 /* otherwise delete the extent back ref */
2550 ret = remove_extent_backref(trans, extent_root, path);
2551 BUG_ON(ret);
2552 /* if refs are 0, we need to setup the path for deletion */
2553 if (refs == 0) {
2554 btrfs_release_path(extent_root, path);
2555 ret = btrfs_search_slot(trans, extent_root, &key, path,
2556 -1, 1);
2557 BUG_ON(ret);
2558 }
2559 }
2560
2561 if (refs == 0) {
2562 u64 super_used;
2563 u64 root_used;
2564
2565 if (pin) {
2566 mutex_lock(&root->fs_info->pinned_mutex);
2567 ret = pin_down_bytes(trans, root, bytenr, num_bytes,
2568 owner_objectid >= BTRFS_FIRST_FREE_OBJECTID);
2569 mutex_unlock(&root->fs_info->pinned_mutex);
2570 if (ret > 0)
2571 mark_free = 1;
2572 BUG_ON(ret < 0);
2573 }
2574 /* block accounting for super block */
2575 spin_lock(&info->delalloc_lock);
2576 super_used = btrfs_super_bytes_used(&info->super_copy);
2577 btrfs_set_super_bytes_used(&info->super_copy,
2578 super_used - num_bytes);
2579
2580 /* block accounting for root item */
2581 root_used = btrfs_root_used(&root->root_item);
2582 btrfs_set_root_used(&root->root_item,
2583 root_used - num_bytes);
2584 spin_unlock(&info->delalloc_lock);
2585 ret = btrfs_del_items(trans, extent_root, path, path->slots[0],
2586 num_to_del);
2587 BUG_ON(ret);
2588 btrfs_release_path(extent_root, path);
2589
2590 if (owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
2591 ret = btrfs_del_csums(trans, root, bytenr, num_bytes);
2592 BUG_ON(ret);
2593 }
2594
2595 ret = update_block_group(trans, root, bytenr, num_bytes, 0,
2596 mark_free);
2597 BUG_ON(ret);
2598 }
2599 btrfs_free_path(path);
2600 finish_current_insert(trans, extent_root, 0);
2601 return ret;
2602 }
2603
2604 /*
2605 * find all the blocks marked as pending in the radix tree and remove
2606 * them from the extent map
2607 */
2608 static int del_pending_extents(struct btrfs_trans_handle *trans,
2609 struct btrfs_root *extent_root, int all)
2610 {
2611 int ret;
2612 int err = 0;
2613 u64 start;
2614 u64 end;
2615 u64 priv;
2616 u64 search = 0;
2617 int nr = 0, skipped = 0;
2618 struct extent_io_tree *pending_del;
2619 struct extent_io_tree *extent_ins;
2620 struct pending_extent_op *extent_op;
2621 struct btrfs_fs_info *info = extent_root->fs_info;
2622 struct list_head delete_list;
2623
2624 INIT_LIST_HEAD(&delete_list);
2625 extent_ins = &extent_root->fs_info->extent_ins;
2626 pending_del = &extent_root->fs_info->pending_del;
2627
2628 again:
2629 mutex_lock(&info->extent_ins_mutex);
2630 while (1) {
2631 ret = find_first_extent_bit(pending_del, search, &start, &end,
2632 EXTENT_WRITEBACK);
2633 if (ret) {
2634 if (all && skipped && !nr) {
2635 search = 0;
2636 skipped = 0;
2637 continue;
2638 }
2639 mutex_unlock(&info->extent_ins_mutex);
2640 break;
2641 }
2642
2643 ret = try_lock_extent(extent_ins, start, end, GFP_NOFS);
2644 if (!ret) {
2645 search = end+1;
2646 skipped = 1;
2647
2648 if (need_resched()) {
2649 mutex_unlock(&info->extent_ins_mutex);
2650 cond_resched();
2651 mutex_lock(&info->extent_ins_mutex);
2652 }
2653
2654 continue;
2655 }
2656 BUG_ON(ret < 0);
2657
2658 ret = get_state_private(pending_del, start, &priv);
2659 BUG_ON(ret);
2660 extent_op = (struct pending_extent_op *)(unsigned long)priv;
2661
2662 clear_extent_bits(pending_del, start, end, EXTENT_WRITEBACK,
2663 GFP_NOFS);
2664 if (!test_range_bit(extent_ins, start, end,
2665 EXTENT_WRITEBACK, 0)) {
2666 list_add_tail(&extent_op->list, &delete_list);
2667 nr++;
2668 } else {
2669 kfree(extent_op);
2670
2671 ret = get_state_private(&info->extent_ins, start,
2672 &priv);
2673 BUG_ON(ret);
2674 extent_op = (struct pending_extent_op *)
2675 (unsigned long)priv;
2676
2677 clear_extent_bits(&info->extent_ins, start, end,
2678 EXTENT_WRITEBACK, GFP_NOFS);
2679
2680 if (extent_op->type == PENDING_BACKREF_UPDATE) {
2681 list_add_tail(&extent_op->list, &delete_list);
2682 search = end + 1;
2683 nr++;
2684 continue;
2685 }
2686
2687 mutex_lock(&extent_root->fs_info->pinned_mutex);
2688 ret = pin_down_bytes(trans, extent_root, start,
2689 end + 1 - start, 0);
2690 mutex_unlock(&extent_root->fs_info->pinned_mutex);
2691
2692 ret = update_block_group(trans, extent_root, start,
2693 end + 1 - start, 0, ret > 0);
2694
2695 unlock_extent(extent_ins, start, end, GFP_NOFS);
2696 BUG_ON(ret);
2697 kfree(extent_op);
2698 }
2699 if (ret)
2700 err = ret;
2701
2702 search = end + 1;
2703
2704 if (need_resched()) {
2705 mutex_unlock(&info->extent_ins_mutex);
2706 cond_resched();
2707 mutex_lock(&info->extent_ins_mutex);
2708 }
2709 }
2710
2711 if (nr) {
2712 ret = free_extents(trans, extent_root, &delete_list);
2713 BUG_ON(ret);
2714 }
2715
2716 if (all && skipped) {
2717 INIT_LIST_HEAD(&delete_list);
2718 search = 0;
2719 nr = 0;
2720 goto again;
2721 }
2722
2723 if (!err)
2724 finish_current_insert(trans, extent_root, 0);
2725 return err;
2726 }
2727
2728 /*
2729 * remove an extent from the root, returns 0 on success
2730 */
2731 static int __btrfs_free_extent(struct btrfs_trans_handle *trans,
2732 struct btrfs_root *root,
2733 u64 bytenr, u64 num_bytes, u64 parent,
2734 u64 root_objectid, u64 ref_generation,
2735 u64 owner_objectid, int pin)
2736 {
2737 struct btrfs_root *extent_root = root->fs_info->extent_root;
2738 int pending_ret;
2739 int ret;
2740
2741 WARN_ON(num_bytes < root->sectorsize);
2742 if (root == extent_root) {
2743 struct pending_extent_op *extent_op = NULL;
2744
2745 mutex_lock(&root->fs_info->extent_ins_mutex);
2746 if (test_range_bit(&root->fs_info->extent_ins, bytenr,
2747 bytenr + num_bytes - 1, EXTENT_WRITEBACK, 0)) {
2748 u64 priv;
2749 ret = get_state_private(&root->fs_info->extent_ins,
2750 bytenr, &priv);
2751 BUG_ON(ret);
2752 extent_op = (struct pending_extent_op *)
2753 (unsigned long)priv;
2754
2755 extent_op->del = 1;
2756 if (extent_op->type == PENDING_EXTENT_INSERT) {
2757 mutex_unlock(&root->fs_info->extent_ins_mutex);
2758 return 0;
2759 }
2760 }
2761
2762 if (extent_op) {
2763 ref_generation = extent_op->orig_generation;
2764 parent = extent_op->orig_parent;
2765 }
2766
2767 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
2768 BUG_ON(!extent_op);
2769
2770 extent_op->type = PENDING_EXTENT_DELETE;
2771 extent_op->bytenr = bytenr;
2772 extent_op->num_bytes = num_bytes;
2773 extent_op->parent = parent;
2774 extent_op->orig_parent = parent;
2775 extent_op->generation = ref_generation;
2776 extent_op->orig_generation = ref_generation;
2777 extent_op->level = (int)owner_objectid;
2778 INIT_LIST_HEAD(&extent_op->list);
2779 extent_op->del = 0;
2780
2781 set_extent_bits(&root->fs_info->pending_del,
2782 bytenr, bytenr + num_bytes - 1,
2783 EXTENT_WRITEBACK, GFP_NOFS);
2784 set_state_private(&root->fs_info->pending_del,
2785 bytenr, (unsigned long)extent_op);
2786 mutex_unlock(&root->fs_info->extent_ins_mutex);
2787 return 0;
2788 }
2789 /* if metadata always pin */
2790 if (owner_objectid < BTRFS_FIRST_FREE_OBJECTID) {
2791 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
2792 mutex_lock(&root->fs_info->pinned_mutex);
2793 btrfs_update_pinned_extents(root, bytenr, num_bytes, 1);
2794 mutex_unlock(&root->fs_info->pinned_mutex);
2795 update_reserved_extents(root, bytenr, num_bytes, 0);
2796 return 0;
2797 }
2798 pin = 1;
2799 }
2800
2801 /* if data pin when any transaction has committed this */
2802 if (ref_generation != trans->transid)
2803 pin = 1;
2804
2805 ret = __free_extent(trans, root, bytenr, num_bytes, parent,
2806 root_objectid, ref_generation,
2807 owner_objectid, pin, pin == 0);
2808
2809 finish_current_insert(trans, root->fs_info->extent_root, 0);
2810 pending_ret = del_pending_extents(trans, root->fs_info->extent_root, 0);
2811 return ret ? ret : pending_ret;
2812 }
2813
2814 int btrfs_free_extent(struct btrfs_trans_handle *trans,
2815 struct btrfs_root *root,
2816 u64 bytenr, u64 num_bytes, u64 parent,
2817 u64 root_objectid, u64 ref_generation,
2818 u64 owner_objectid, int pin)
2819 {
2820 int ret;
2821
2822 ret = __btrfs_free_extent(trans, root, bytenr, num_bytes, parent,
2823 root_objectid, ref_generation,
2824 owner_objectid, pin);
2825 return ret;
2826 }
2827
2828 static u64 stripe_align(struct btrfs_root *root, u64 val)
2829 {
2830 u64 mask = ((u64)root->stripesize - 1);
2831 u64 ret = (val + mask) & ~mask;
2832 return ret;
2833 }
2834
2835 /*
2836 * walks the btree of allocated extents and find a hole of a given size.
2837 * The key ins is changed to record the hole:
2838 * ins->objectid == block start
2839 * ins->flags = BTRFS_EXTENT_ITEM_KEY
2840 * ins->offset == number of blocks
2841 * Any available blocks before search_start are skipped.
2842 */
2843 static noinline int find_free_extent(struct btrfs_trans_handle *trans,
2844 struct btrfs_root *orig_root,
2845 u64 num_bytes, u64 empty_size,
2846 u64 search_start, u64 search_end,
2847 u64 hint_byte, struct btrfs_key *ins,
2848 u64 exclude_start, u64 exclude_nr,
2849 int data)
2850 {
2851 int ret = 0;
2852 struct btrfs_root *root = orig_root->fs_info->extent_root;
2853 u64 total_needed = num_bytes;
2854 u64 *last_ptr = NULL;
2855 u64 last_wanted = 0;
2856 struct btrfs_block_group_cache *block_group = NULL;
2857 int chunk_alloc_done = 0;
2858 int empty_cluster = 2 * 1024 * 1024;
2859 int allowed_chunk_alloc = 0;
2860 struct list_head *head = NULL, *cur = NULL;
2861 int loop = 0;
2862 int extra_loop = 0;
2863 struct btrfs_space_info *space_info;
2864
2865 WARN_ON(num_bytes < root->sectorsize);
2866 btrfs_set_key_type(ins, BTRFS_EXTENT_ITEM_KEY);
2867 ins->objectid = 0;
2868 ins->offset = 0;
2869
2870 if (orig_root->ref_cows || empty_size)
2871 allowed_chunk_alloc = 1;
2872
2873 if (data & BTRFS_BLOCK_GROUP_METADATA) {
2874 last_ptr = &root->fs_info->last_alloc;
2875 if (!btrfs_test_opt(root, SSD))
2876 empty_cluster = 64 * 1024;
2877 }
2878
2879 if ((data & BTRFS_BLOCK_GROUP_DATA) && btrfs_test_opt(root, SSD))
2880 last_ptr = &root->fs_info->last_data_alloc;
2881
2882 if (last_ptr) {
2883 if (*last_ptr) {
2884 hint_byte = *last_ptr;
2885 last_wanted = *last_ptr;
2886 } else
2887 empty_size += empty_cluster;
2888 } else {
2889 empty_cluster = 0;
2890 }
2891 search_start = max(search_start, first_logical_byte(root, 0));
2892 search_start = max(search_start, hint_byte);
2893
2894 if (last_wanted && search_start != last_wanted) {
2895 last_wanted = 0;
2896 empty_size += empty_cluster;
2897 }
2898
2899 total_needed += empty_size;
2900 block_group = btrfs_lookup_block_group(root->fs_info, search_start);
2901 if (!block_group)
2902 block_group = btrfs_lookup_first_block_group(root->fs_info,
2903 search_start);
2904 space_info = __find_space_info(root->fs_info, data);
2905
2906 down_read(&space_info->groups_sem);
2907 while (1) {
2908 struct btrfs_free_space *free_space;
2909 /*
2910 * the only way this happens if our hint points to a block
2911 * group thats not of the proper type, while looping this
2912 * should never happen
2913 */
2914 if (empty_size)
2915 extra_loop = 1;
2916
2917 if (!block_group)
2918 goto new_group_no_lock;
2919
2920 if (unlikely(!block_group->cached)) {
2921 mutex_lock(&block_group->cache_mutex);
2922 ret = cache_block_group(root, block_group);
2923 mutex_unlock(&block_group->cache_mutex);
2924 if (ret)
2925 break;
2926 }
2927
2928 mutex_lock(&block_group->alloc_mutex);
2929 if (unlikely(!block_group_bits(block_group, data)))
2930 goto new_group;
2931
2932 if (unlikely(block_group->ro))
2933 goto new_group;
2934
2935 free_space = btrfs_find_free_space(block_group, search_start,
2936 total_needed);
2937 if (free_space) {
2938 u64 start = block_group->key.objectid;
2939 u64 end = block_group->key.objectid +
2940 block_group->key.offset;
2941
2942 search_start = stripe_align(root, free_space->offset);
2943
2944 /* move on to the next group */
2945 if (search_start + num_bytes >= search_end)
2946 goto new_group;
2947
2948 /* move on to the next group */
2949 if (search_start + num_bytes > end)
2950 goto new_group;
2951
2952 if (last_wanted && search_start != last_wanted) {
2953 total_needed += empty_cluster;
2954 empty_size += empty_cluster;
2955 last_wanted = 0;
2956 /*
2957 * if search_start is still in this block group
2958 * then we just re-search this block group
2959 */
2960 if (search_start >= start &&
2961 search_start < end) {
2962 mutex_unlock(&block_group->alloc_mutex);
2963 continue;
2964 }
2965
2966 /* else we go to the next block group */
2967 goto new_group;
2968 }
2969
2970 if (exclude_nr > 0 &&
2971 (search_start + num_bytes > exclude_start &&
2972 search_start < exclude_start + exclude_nr)) {
2973 search_start = exclude_start + exclude_nr;
2974 /*
2975 * if search_start is still in this block group
2976 * then we just re-search this block group
2977 */
2978 if (search_start >= start &&
2979 search_start < end) {
2980 mutex_unlock(&block_group->alloc_mutex);
2981 last_wanted = 0;
2982 continue;
2983 }
2984
2985 /* else we go to the next block group */
2986 goto new_group;
2987 }
2988
2989 ins->objectid = search_start;
2990 ins->offset = num_bytes;
2991
2992 btrfs_remove_free_space_lock(block_group, search_start,
2993 num_bytes);
2994 /* we are all good, lets return */
2995 mutex_unlock(&block_group->alloc_mutex);
2996 break;
2997 }
2998 new_group:
2999 mutex_unlock(&block_group->alloc_mutex);
3000 put_block_group(block_group);
3001 block_group = NULL;
3002 new_group_no_lock:
3003 /* don't try to compare new allocations against the
3004 * last allocation any more
3005 */
3006 last_wanted = 0;
3007
3008 /*
3009 * Here's how this works.
3010 * loop == 0: we were searching a block group via a hint
3011 * and didn't find anything, so we start at
3012 * the head of the block groups and keep searching
3013 * loop == 1: we're searching through all of the block groups
3014 * if we hit the head again we have searched
3015 * all of the block groups for this space and we
3016 * need to try and allocate, if we cant error out.
3017 * loop == 2: we allocated more space and are looping through
3018 * all of the block groups again.
3019 */
3020 if (loop == 0) {
3021 head = &space_info->block_groups;
3022 cur = head->next;
3023 loop++;
3024 } else if (loop == 1 && cur == head) {
3025 int keep_going;
3026
3027 /* at this point we give up on the empty_size
3028 * allocations and just try to allocate the min
3029 * space.
3030 *
3031 * The extra_loop field was set if an empty_size
3032 * allocation was attempted above, and if this
3033 * is try we need to try the loop again without
3034 * the additional empty_size.
3035 */
3036 total_needed -= empty_size;
3037 empty_size = 0;
3038 keep_going = extra_loop;
3039 loop++;
3040
3041 if (allowed_chunk_alloc && !chunk_alloc_done) {
3042 up_read(&space_info->groups_sem);
3043 ret = do_chunk_alloc(trans, root, num_bytes +
3044 2 * 1024 * 1024, data, 1);
3045 down_read(&space_info->groups_sem);
3046 if (ret < 0)
3047 goto loop_check;
3048 head = &space_info->block_groups;
3049 /*
3050 * we've allocated a new chunk, keep
3051 * trying
3052 */
3053 keep_going = 1;
3054 chunk_alloc_done = 1;
3055 } else if (!allowed_chunk_alloc) {
3056 space_info->force_alloc = 1;
3057 }
3058 loop_check:
3059 if (keep_going) {
3060 cur = head->next;
3061 extra_loop = 0;
3062 } else {
3063 break;
3064 }
3065 } else if (cur == head) {
3066 break;
3067 }
3068
3069 block_group = list_entry(cur, struct btrfs_block_group_cache,
3070 list);
3071 atomic_inc(&block_group->count);
3072
3073 search_start = block_group->key.objectid;
3074 cur = cur->next;
3075 }
3076
3077 /* we found what we needed */
3078 if (ins->objectid) {
3079 if (!(data & BTRFS_BLOCK_GROUP_DATA))
3080 trans->block_group = block_group->key.objectid;
3081
3082 if (last_ptr)
3083 *last_ptr = ins->objectid + ins->offset;
3084 ret = 0;
3085 } else if (!ret) {
3086 printk(KERN_ERR "btrfs searching for %llu bytes, "
3087 "num_bytes %llu, loop %d, allowed_alloc %d\n",
3088 (unsigned long long)total_needed,
3089 (unsigned long long)num_bytes,
3090 loop, allowed_chunk_alloc);
3091 ret = -ENOSPC;
3092 }
3093 if (block_group)
3094 put_block_group(block_group);
3095
3096 up_read(&space_info->groups_sem);
3097 return ret;
3098 }
3099
3100 static void dump_space_info(struct btrfs_space_info *info, u64 bytes)
3101 {
3102 struct btrfs_block_group_cache *cache;
3103
3104 printk(KERN_INFO "space_info has %llu free, is %sfull\n",
3105 (unsigned long long)(info->total_bytes - info->bytes_used -
3106 info->bytes_pinned - info->bytes_reserved),
3107 (info->full) ? "" : "not ");
3108
3109 down_read(&info->groups_sem);
3110 list_for_each_entry(cache, &info->block_groups, list) {
3111 spin_lock(&cache->lock);
3112 printk(KERN_INFO "block group %llu has %llu bytes, %llu used "
3113 "%llu pinned %llu reserved\n",
3114 (unsigned long long)cache->key.objectid,
3115 (unsigned long long)cache->key.offset,
3116 (unsigned long long)btrfs_block_group_used(&cache->item),
3117 (unsigned long long)cache->pinned,
3118 (unsigned long long)cache->reserved);
3119 btrfs_dump_free_space(cache, bytes);
3120 spin_unlock(&cache->lock);
3121 }
3122 up_read(&info->groups_sem);
3123 }
3124
3125 static int __btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3126 struct btrfs_root *root,
3127 u64 num_bytes, u64 min_alloc_size,
3128 u64 empty_size, u64 hint_byte,
3129 u64 search_end, struct btrfs_key *ins,
3130 u64 data)
3131 {
3132 int ret;
3133 u64 search_start = 0;
3134 u64 alloc_profile;
3135 struct btrfs_fs_info *info = root->fs_info;
3136
3137 if (data) {
3138 alloc_profile = info->avail_data_alloc_bits &
3139 info->data_alloc_profile;
3140 data = BTRFS_BLOCK_GROUP_DATA | alloc_profile;
3141 } else if (root == root->fs_info->chunk_root) {
3142 alloc_profile = info->avail_system_alloc_bits &
3143 info->system_alloc_profile;
3144 data = BTRFS_BLOCK_GROUP_SYSTEM | alloc_profile;
3145 } else {
3146 alloc_profile = info->avail_metadata_alloc_bits &
3147 info->metadata_alloc_profile;
3148 data = BTRFS_BLOCK_GROUP_METADATA | alloc_profile;
3149 }
3150 again:
3151 data = btrfs_reduce_alloc_profile(root, data);
3152 /*
3153 * the only place that sets empty_size is btrfs_realloc_node, which
3154 * is not called recursively on allocations
3155 */
3156 if (empty_size || root->ref_cows) {
3157 if (!(data & BTRFS_BLOCK_GROUP_METADATA)) {
3158 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3159 2 * 1024 * 1024,
3160 BTRFS_BLOCK_GROUP_METADATA |
3161 (info->metadata_alloc_profile &
3162 info->avail_metadata_alloc_bits), 0);
3163 }
3164 ret = do_chunk_alloc(trans, root->fs_info->extent_root,
3165 num_bytes + 2 * 1024 * 1024, data, 0);
3166 }
3167
3168 WARN_ON(num_bytes < root->sectorsize);
3169 ret = find_free_extent(trans, root, num_bytes, empty_size,
3170 search_start, search_end, hint_byte, ins,
3171 trans->alloc_exclude_start,
3172 trans->alloc_exclude_nr, data);
3173
3174 if (ret == -ENOSPC && num_bytes > min_alloc_size) {
3175 num_bytes = num_bytes >> 1;
3176 num_bytes = num_bytes & ~(root->sectorsize - 1);
3177 num_bytes = max(num_bytes, min_alloc_size);
3178 do_chunk_alloc(trans, root->fs_info->extent_root,
3179 num_bytes, data, 1);
3180 goto again;
3181 }
3182 if (ret) {
3183 struct btrfs_space_info *sinfo;
3184
3185 sinfo = __find_space_info(root->fs_info, data);
3186 printk(KERN_ERR "btrfs allocation failed flags %llu, "
3187 "wanted %llu\n", (unsigned long long)data,
3188 (unsigned long long)num_bytes);
3189 dump_space_info(sinfo, num_bytes);
3190 BUG();
3191 }
3192
3193 return ret;
3194 }
3195
3196 int btrfs_free_reserved_extent(struct btrfs_root *root, u64 start, u64 len)
3197 {
3198 struct btrfs_block_group_cache *cache;
3199 int ret = 0;
3200
3201 cache = btrfs_lookup_block_group(root->fs_info, start);
3202 if (!cache) {
3203 printk(KERN_ERR "Unable to find block group for %llu\n",
3204 (unsigned long long)start);
3205 return -ENOSPC;
3206 }
3207
3208 ret = btrfs_discard_extent(root, start, len);
3209
3210 btrfs_add_free_space(cache, start, len);
3211 put_block_group(cache);
3212 update_reserved_extents(root, start, len, 0);
3213
3214 return ret;
3215 }
3216
3217 int btrfs_reserve_extent(struct btrfs_trans_handle *trans,
3218 struct btrfs_root *root,
3219 u64 num_bytes, u64 min_alloc_size,
3220 u64 empty_size, u64 hint_byte,
3221 u64 search_end, struct btrfs_key *ins,
3222 u64 data)
3223 {
3224 int ret;
3225 ret = __btrfs_reserve_extent(trans, root, num_bytes, min_alloc_size,
3226 empty_size, hint_byte, search_end, ins,
3227 data);
3228 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3229 return ret;
3230 }
3231
3232 static int __btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3233 struct btrfs_root *root, u64 parent,
3234 u64 root_objectid, u64 ref_generation,
3235 u64 owner, struct btrfs_key *ins)
3236 {
3237 int ret;
3238 int pending_ret;
3239 u64 super_used;
3240 u64 root_used;
3241 u64 num_bytes = ins->offset;
3242 u32 sizes[2];
3243 struct btrfs_fs_info *info = root->fs_info;
3244 struct btrfs_root *extent_root = info->extent_root;
3245 struct btrfs_extent_item *extent_item;
3246 struct btrfs_extent_ref *ref;
3247 struct btrfs_path *path;
3248 struct btrfs_key keys[2];
3249
3250 if (parent == 0)
3251 parent = ins->objectid;
3252
3253 /* block accounting for super block */
3254 spin_lock(&info->delalloc_lock);
3255 super_used = btrfs_super_bytes_used(&info->super_copy);
3256 btrfs_set_super_bytes_used(&info->super_copy, super_used + num_bytes);
3257
3258 /* block accounting for root item */
3259 root_used = btrfs_root_used(&root->root_item);
3260 btrfs_set_root_used(&root->root_item, root_used + num_bytes);
3261 spin_unlock(&info->delalloc_lock);
3262
3263 if (root == extent_root) {
3264 struct pending_extent_op *extent_op;
3265
3266 extent_op = kmalloc(sizeof(*extent_op), GFP_NOFS);
3267 BUG_ON(!extent_op);
3268
3269 extent_op->type = PENDING_EXTENT_INSERT;
3270 extent_op->bytenr = ins->objectid;
3271 extent_op->num_bytes = ins->offset;
3272 extent_op->parent = parent;
3273 extent_op->orig_parent = 0;
3274 extent_op->generation = ref_generation;
3275 extent_op->orig_generation = 0;
3276 extent_op->level = (int)owner;
3277 INIT_LIST_HEAD(&extent_op->list);
3278 extent_op->del = 0;
3279
3280 mutex_lock(&root->fs_info->extent_ins_mutex);
3281 set_extent_bits(&root->fs_info->extent_ins, ins->objectid,
3282 ins->objectid + ins->offset - 1,
3283 EXTENT_WRITEBACK, GFP_NOFS);
3284 set_state_private(&root->fs_info->extent_ins,
3285 ins->objectid, (unsigned long)extent_op);
3286 mutex_unlock(&root->fs_info->extent_ins_mutex);
3287 goto update_block;
3288 }
3289
3290 memcpy(&keys[0], ins, sizeof(*ins));
3291 keys[1].objectid = ins->objectid;
3292 keys[1].type = BTRFS_EXTENT_REF_KEY;
3293 keys[1].offset = parent;
3294 sizes[0] = sizeof(*extent_item);
3295 sizes[1] = sizeof(*ref);
3296
3297 path = btrfs_alloc_path();
3298 BUG_ON(!path);
3299
3300 ret = btrfs_insert_empty_items(trans, extent_root, path, keys,
3301 sizes, 2);
3302 BUG_ON(ret);
3303
3304 extent_item = btrfs_item_ptr(path->nodes[0], path->slots[0],
3305 struct btrfs_extent_item);
3306 btrfs_set_extent_refs(path->nodes[0], extent_item, 1);
3307 ref = btrfs_item_ptr(path->nodes[0], path->slots[0] + 1,
3308 struct btrfs_extent_ref);
3309
3310 btrfs_set_ref_root(path->nodes[0], ref, root_objectid);
3311 btrfs_set_ref_generation(path->nodes[0], ref, ref_generation);
3312 btrfs_set_ref_objectid(path->nodes[0], ref, owner);
3313 btrfs_set_ref_num_refs(path->nodes[0], ref, 1);
3314
3315 btrfs_mark_buffer_dirty(path->nodes[0]);
3316
3317 trans->alloc_exclude_start = 0;
3318 trans->alloc_exclude_nr = 0;
3319 btrfs_free_path(path);
3320 finish_current_insert(trans, extent_root, 0);
3321 pending_ret = del_pending_extents(trans, extent_root, 0);
3322
3323 if (ret)
3324 goto out;
3325 if (pending_ret) {
3326 ret = pending_ret;
3327 goto out;
3328 }
3329
3330 update_block:
3331 ret = update_block_group(trans, root, ins->objectid,
3332 ins->offset, 1, 0);
3333 if (ret) {
3334 printk(KERN_ERR "btrfs update block group failed for %llu "
3335 "%llu\n", (unsigned long long)ins->objectid,
3336 (unsigned long long)ins->offset);
3337 BUG();
3338 }
3339 out:
3340 return ret;
3341 }
3342
3343 int btrfs_alloc_reserved_extent(struct btrfs_trans_handle *trans,
3344 struct btrfs_root *root, u64 parent,
3345 u64 root_objectid, u64 ref_generation,
3346 u64 owner, struct btrfs_key *ins)
3347 {
3348 int ret;
3349
3350 if (root_objectid == BTRFS_TREE_LOG_OBJECTID)
3351 return 0;
3352 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3353 ref_generation, owner, ins);
3354 update_reserved_extents(root, ins->objectid, ins->offset, 0);
3355 return ret;
3356 }
3357
3358 /*
3359 * this is used by the tree logging recovery code. It records that
3360 * an extent has been allocated and makes sure to clear the free
3361 * space cache bits as well
3362 */
3363 int btrfs_alloc_logged_extent(struct btrfs_trans_handle *trans,
3364 struct btrfs_root *root, u64 parent,
3365 u64 root_objectid, u64 ref_generation,
3366 u64 owner, struct btrfs_key *ins)
3367 {
3368 int ret;
3369 struct btrfs_block_group_cache *block_group;
3370
3371 block_group = btrfs_lookup_block_group(root->fs_info, ins->objectid);
3372 mutex_lock(&block_group->cache_mutex);
3373 cache_block_group(root, block_group);
3374 mutex_unlock(&block_group->cache_mutex);
3375
3376 ret = btrfs_remove_free_space(block_group, ins->objectid,
3377 ins->offset);
3378 BUG_ON(ret);
3379 put_block_group(block_group);
3380 ret = __btrfs_alloc_reserved_extent(trans, root, parent, root_objectid,
3381 ref_generation, owner, ins);
3382 return ret;
3383 }
3384
3385 /*
3386 * finds a free extent and does all the dirty work required for allocation
3387 * returns the key for the extent through ins, and a tree buffer for
3388 * the first block of the extent through buf.
3389 *
3390 * returns 0 if everything worked, non-zero otherwise.
3391 */
3392 int btrfs_alloc_extent(struct btrfs_trans_handle *trans,
3393 struct btrfs_root *root,
3394 u64 num_bytes, u64 parent, u64 min_alloc_size,
3395 u64 root_objectid, u64 ref_generation,
3396 u64 owner_objectid, u64 empty_size, u64 hint_byte,
3397 u64 search_end, struct btrfs_key *ins, u64 data)
3398 {
3399 int ret;
3400
3401 ret = __btrfs_reserve_extent(trans, root, num_bytes,
3402 min_alloc_size, empty_size, hint_byte,
3403 search_end, ins, data);
3404 BUG_ON(ret);
3405 if (root_objectid != BTRFS_TREE_LOG_OBJECTID) {
3406 ret = __btrfs_alloc_reserved_extent(trans, root, parent,
3407 root_objectid, ref_generation,
3408 owner_objectid, ins);
3409 BUG_ON(ret);
3410
3411 } else {
3412 update_reserved_extents(root, ins->objectid, ins->offset, 1);
3413 }
3414 return ret;
3415 }
3416
3417 struct extent_buffer *btrfs_init_new_buffer(struct btrfs_trans_handle *trans,
3418 struct btrfs_root *root,
3419 u64 bytenr, u32 blocksize,
3420 int level)
3421 {
3422 struct extent_buffer *buf;
3423
3424 buf = btrfs_find_create_tree_block(root, bytenr, blocksize);
3425 if (!buf)
3426 return ERR_PTR(-ENOMEM);
3427 btrfs_set_header_generation(buf, trans->transid);
3428 btrfs_set_buffer_lockdep_class(buf, level);
3429 btrfs_tree_lock(buf);
3430 clean_tree_block(trans, root, buf);
3431
3432 btrfs_set_lock_blocking(buf);
3433 btrfs_set_buffer_uptodate(buf);
3434
3435 if (root->root_key.objectid == BTRFS_TREE_LOG_OBJECTID) {
3436 set_extent_dirty(&root->dirty_log_pages, buf->start,
3437 buf->start + buf->len - 1, GFP_NOFS);
3438 } else {
3439 set_extent_dirty(&trans->transaction->dirty_pages, buf->start,
3440 buf->start + buf->len - 1, GFP_NOFS);
3441 }
3442 trans->blocks_used++;
3443 /* this returns a buffer locked for blocking */
3444 return buf;
3445 }
3446
3447 /*
3448 * helper function to allocate a block for a given tree
3449 * returns the tree buffer or NULL.
3450 */
3451 struct extent_buffer *btrfs_alloc_free_block(struct btrfs_trans_handle *trans,
3452 struct btrfs_root *root,
3453 u32 blocksize, u64 parent,
3454 u64 root_objectid,
3455 u64 ref_generation,
3456 int level,
3457 u64 hint,
3458 u64 empty_size)
3459 {
3460 struct btrfs_key ins;
3461 int ret;
3462 struct extent_buffer *buf;
3463
3464 ret = btrfs_alloc_extent(trans, root, blocksize, parent, blocksize,
3465 root_objectid, ref_generation, level,
3466 empty_size, hint, (u64)-1, &ins, 0);
3467 if (ret) {
3468 BUG_ON(ret > 0);
3469 return ERR_PTR(ret);
3470 }
3471
3472 buf = btrfs_init_new_buffer(trans, root, ins.objectid,
3473 blocksize, level);
3474 return buf;
3475 }
3476
3477 int btrfs_drop_leaf_ref(struct btrfs_trans_handle *trans,
3478 struct btrfs_root *root, struct extent_buffer *leaf)
3479 {
3480 u64 leaf_owner;
3481 u64 leaf_generation;
3482 struct refsort *sorted;
3483 struct btrfs_key key;
3484 struct btrfs_file_extent_item *fi;
3485 int i;
3486 int nritems;
3487 int ret;
3488 int refi = 0;
3489 int slot;
3490
3491 BUG_ON(!btrfs_is_leaf(leaf));
3492 nritems = btrfs_header_nritems(leaf);
3493 leaf_owner = btrfs_header_owner(leaf);
3494 leaf_generation = btrfs_header_generation(leaf);
3495
3496 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3497 /* we do this loop twice. The first time we build a list
3498 * of the extents we have a reference on, then we sort the list
3499 * by bytenr. The second time around we actually do the
3500 * extent freeing.
3501 */
3502 for (i = 0; i < nritems; i++) {
3503 u64 disk_bytenr;
3504 cond_resched();
3505
3506 btrfs_item_key_to_cpu(leaf, &key, i);
3507
3508 /* only extents have references, skip everything else */
3509 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3510 continue;
3511
3512 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
3513
3514 /* inline extents live in the btree, they don't have refs */
3515 if (btrfs_file_extent_type(leaf, fi) ==
3516 BTRFS_FILE_EXTENT_INLINE)
3517 continue;
3518
3519 disk_bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
3520
3521 /* holes don't have refs */
3522 if (disk_bytenr == 0)
3523 continue;
3524
3525 sorted[refi].bytenr = disk_bytenr;
3526 sorted[refi].slot = i;
3527 refi++;
3528 }
3529
3530 if (refi == 0)
3531 goto out;
3532
3533 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3534
3535 for (i = 0; i < refi; i++) {
3536 u64 disk_bytenr;
3537
3538 disk_bytenr = sorted[i].bytenr;
3539 slot = sorted[i].slot;
3540
3541 cond_resched();
3542
3543 btrfs_item_key_to_cpu(leaf, &key, slot);
3544 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
3545 continue;
3546
3547 fi = btrfs_item_ptr(leaf, slot, struct btrfs_file_extent_item);
3548
3549 ret = __btrfs_free_extent(trans, root, disk_bytenr,
3550 btrfs_file_extent_disk_num_bytes(leaf, fi),
3551 leaf->start, leaf_owner, leaf_generation,
3552 key.objectid, 0);
3553 BUG_ON(ret);
3554
3555 atomic_inc(&root->fs_info->throttle_gen);
3556 wake_up(&root->fs_info->transaction_throttle);
3557 cond_resched();
3558 }
3559 out:
3560 kfree(sorted);
3561 return 0;
3562 }
3563
3564 static noinline int cache_drop_leaf_ref(struct btrfs_trans_handle *trans,
3565 struct btrfs_root *root,
3566 struct btrfs_leaf_ref *ref)
3567 {
3568 int i;
3569 int ret;
3570 struct btrfs_extent_info *info;
3571 struct refsort *sorted;
3572
3573 if (ref->nritems == 0)
3574 return 0;
3575
3576 sorted = kmalloc(sizeof(*sorted) * ref->nritems, GFP_NOFS);
3577 for (i = 0; i < ref->nritems; i++) {
3578 sorted[i].bytenr = ref->extents[i].bytenr;
3579 sorted[i].slot = i;
3580 }
3581 sort(sorted, ref->nritems, sizeof(struct refsort), refsort_cmp, NULL);
3582
3583 /*
3584 * the items in the ref were sorted when the ref was inserted
3585 * into the ref cache, so this is already in order
3586 */
3587 for (i = 0; i < ref->nritems; i++) {
3588 info = ref->extents + sorted[i].slot;
3589 ret = __btrfs_free_extent(trans, root, info->bytenr,
3590 info->num_bytes, ref->bytenr,
3591 ref->owner, ref->generation,
3592 info->objectid, 0);
3593
3594 atomic_inc(&root->fs_info->throttle_gen);
3595 wake_up(&root->fs_info->transaction_throttle);
3596 cond_resched();
3597
3598 BUG_ON(ret);
3599 info++;
3600 }
3601
3602 kfree(sorted);
3603 return 0;
3604 }
3605
3606 static int drop_snap_lookup_refcount(struct btrfs_root *root, u64 start,
3607 u64 len, u32 *refs)
3608 {
3609 int ret;
3610
3611 ret = btrfs_lookup_extent_ref(NULL, root, start, len, refs);
3612 BUG_ON(ret);
3613
3614 #if 0 /* some debugging code in case we see problems here */
3615 /* if the refs count is one, it won't get increased again. But
3616 * if the ref count is > 1, someone may be decreasing it at
3617 * the same time we are.
3618 */
3619 if (*refs != 1) {
3620 struct extent_buffer *eb = NULL;
3621 eb = btrfs_find_create_tree_block(root, start, len);
3622 if (eb)
3623 btrfs_tree_lock(eb);
3624
3625 mutex_lock(&root->fs_info->alloc_mutex);
3626 ret = lookup_extent_ref(NULL, root, start, len, refs);
3627 BUG_ON(ret);
3628 mutex_unlock(&root->fs_info->alloc_mutex);
3629
3630 if (eb) {
3631 btrfs_tree_unlock(eb);
3632 free_extent_buffer(eb);
3633 }
3634 if (*refs == 1) {
3635 printk(KERN_ERR "btrfs block %llu went down to one "
3636 "during drop_snap\n", (unsigned long long)start);
3637 }
3638
3639 }
3640 #endif
3641
3642 cond_resched();
3643 return ret;
3644 }
3645
3646 /*
3647 * this is used while deleting old snapshots, and it drops the refs
3648 * on a whole subtree starting from a level 1 node.
3649 *
3650 * The idea is to sort all the leaf pointers, and then drop the
3651 * ref on all the leaves in order. Most of the time the leaves
3652 * will have ref cache entries, so no leaf IOs will be required to
3653 * find the extents they have references on.
3654 *
3655 * For each leaf, any references it has are also dropped in order
3656 *
3657 * This ends up dropping the references in something close to optimal
3658 * order for reading and modifying the extent allocation tree.
3659 */
3660 static noinline int drop_level_one_refs(struct btrfs_trans_handle *trans,
3661 struct btrfs_root *root,
3662 struct btrfs_path *path)
3663 {
3664 u64 bytenr;
3665 u64 root_owner;
3666 u64 root_gen;
3667 struct extent_buffer *eb = path->nodes[1];
3668 struct extent_buffer *leaf;
3669 struct btrfs_leaf_ref *ref;
3670 struct refsort *sorted = NULL;
3671 int nritems = btrfs_header_nritems(eb);
3672 int ret;
3673 int i;
3674 int refi = 0;
3675 int slot = path->slots[1];
3676 u32 blocksize = btrfs_level_size(root, 0);
3677 u32 refs;
3678
3679 if (nritems == 0)
3680 goto out;
3681
3682 root_owner = btrfs_header_owner(eb);
3683 root_gen = btrfs_header_generation(eb);
3684 sorted = kmalloc(sizeof(*sorted) * nritems, GFP_NOFS);
3685
3686 /*
3687 * step one, sort all the leaf pointers so we don't scribble
3688 * randomly into the extent allocation tree
3689 */
3690 for (i = slot; i < nritems; i++) {
3691 sorted[refi].bytenr = btrfs_node_blockptr(eb, i);
3692 sorted[refi].slot = i;
3693 refi++;
3694 }
3695
3696 /*
3697 * nritems won't be zero, but if we're picking up drop_snapshot
3698 * after a crash, slot might be > 0, so double check things
3699 * just in case.
3700 */
3701 if (refi == 0)
3702 goto out;
3703
3704 sort(sorted, refi, sizeof(struct refsort), refsort_cmp, NULL);
3705
3706 /*
3707 * the first loop frees everything the leaves point to
3708 */
3709 for (i = 0; i < refi; i++) {
3710 u64 ptr_gen;
3711
3712 bytenr = sorted[i].bytenr;
3713
3714 /*
3715 * check the reference count on this leaf. If it is > 1
3716 * we just decrement it below and don't update any
3717 * of the refs the leaf points to.
3718 */
3719 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3720 BUG_ON(ret);
3721 if (refs != 1)
3722 continue;
3723
3724 ptr_gen = btrfs_node_ptr_generation(eb, sorted[i].slot);
3725
3726 /*
3727 * the leaf only had one reference, which means the
3728 * only thing pointing to this leaf is the snapshot
3729 * we're deleting. It isn't possible for the reference
3730 * count to increase again later
3731 *
3732 * The reference cache is checked for the leaf,
3733 * and if found we'll be able to drop any refs held by
3734 * the leaf without needing to read it in.
3735 */
3736 ref = btrfs_lookup_leaf_ref(root, bytenr);
3737 if (ref && ref->generation != ptr_gen) {
3738 btrfs_free_leaf_ref(root, ref);
3739 ref = NULL;
3740 }
3741 if (ref) {
3742 ret = cache_drop_leaf_ref(trans, root, ref);
3743 BUG_ON(ret);
3744 btrfs_remove_leaf_ref(root, ref);
3745 btrfs_free_leaf_ref(root, ref);
3746 } else {
3747 /*
3748 * the leaf wasn't in the reference cache, so
3749 * we have to read it.
3750 */
3751 leaf = read_tree_block(root, bytenr, blocksize,
3752 ptr_gen);
3753 ret = btrfs_drop_leaf_ref(trans, root, leaf);
3754 BUG_ON(ret);
3755 free_extent_buffer(leaf);
3756 }
3757 atomic_inc(&root->fs_info->throttle_gen);
3758 wake_up(&root->fs_info->transaction_throttle);
3759 cond_resched();
3760 }
3761
3762 /*
3763 * run through the loop again to free the refs on the leaves.
3764 * This is faster than doing it in the loop above because
3765 * the leaves are likely to be clustered together. We end up
3766 * working in nice chunks on the extent allocation tree.
3767 */
3768 for (i = 0; i < refi; i++) {
3769 bytenr = sorted[i].bytenr;
3770 ret = __btrfs_free_extent(trans, root, bytenr,
3771 blocksize, eb->start,
3772 root_owner, root_gen, 0, 1);
3773 BUG_ON(ret);
3774
3775 atomic_inc(&root->fs_info->throttle_gen);
3776 wake_up(&root->fs_info->transaction_throttle);
3777 cond_resched();
3778 }
3779 out:
3780 kfree(sorted);
3781
3782 /*
3783 * update the path to show we've processed the entire level 1
3784 * node. This will get saved into the root's drop_snapshot_progress
3785 * field so these drops are not repeated again if this transaction
3786 * commits.
3787 */
3788 path->slots[1] = nritems;
3789 return 0;
3790 }
3791
3792 /*
3793 * helper function for drop_snapshot, this walks down the tree dropping ref
3794 * counts as it goes.
3795 */
3796 static noinline int walk_down_tree(struct btrfs_trans_handle *trans,
3797 struct btrfs_root *root,
3798 struct btrfs_path *path, int *level)
3799 {
3800 u64 root_owner;
3801 u64 root_gen;
3802 u64 bytenr;
3803 u64 ptr_gen;
3804 struct extent_buffer *next;
3805 struct extent_buffer *cur;
3806 struct extent_buffer *parent;
3807 u32 blocksize;
3808 int ret;
3809 u32 refs;
3810
3811 WARN_ON(*level < 0);
3812 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3813 ret = drop_snap_lookup_refcount(root, path->nodes[*level]->start,
3814 path->nodes[*level]->len, &refs);
3815 BUG_ON(ret);
3816 if (refs > 1)
3817 goto out;
3818
3819 /*
3820 * walk down to the last node level and free all the leaves
3821 */
3822 while (*level >= 0) {
3823 WARN_ON(*level < 0);
3824 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3825 cur = path->nodes[*level];
3826
3827 if (btrfs_header_level(cur) != *level)
3828 WARN_ON(1);
3829
3830 if (path->slots[*level] >=
3831 btrfs_header_nritems(cur))
3832 break;
3833
3834 /* the new code goes down to level 1 and does all the
3835 * leaves pointed to that node in bulk. So, this check
3836 * for level 0 will always be false.
3837 *
3838 * But, the disk format allows the drop_snapshot_progress
3839 * field in the root to leave things in a state where
3840 * a leaf will need cleaning up here. If someone crashes
3841 * with the old code and then boots with the new code,
3842 * we might find a leaf here.
3843 */
3844 if (*level == 0) {
3845 ret = btrfs_drop_leaf_ref(trans, root, cur);
3846 BUG_ON(ret);
3847 break;
3848 }
3849
3850 /*
3851 * once we get to level one, process the whole node
3852 * at once, including everything below it.
3853 */
3854 if (*level == 1) {
3855 ret = drop_level_one_refs(trans, root, path);
3856 BUG_ON(ret);
3857 break;
3858 }
3859
3860 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3861 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3862 blocksize = btrfs_level_size(root, *level - 1);
3863
3864 ret = drop_snap_lookup_refcount(root, bytenr, blocksize, &refs);
3865 BUG_ON(ret);
3866
3867 /*
3868 * if there is more than one reference, we don't need
3869 * to read that node to drop any references it has. We
3870 * just drop the ref we hold on that node and move on to the
3871 * next slot in this level.
3872 */
3873 if (refs != 1) {
3874 parent = path->nodes[*level];
3875 root_owner = btrfs_header_owner(parent);
3876 root_gen = btrfs_header_generation(parent);
3877 path->slots[*level]++;
3878
3879 ret = __btrfs_free_extent(trans, root, bytenr,
3880 blocksize, parent->start,
3881 root_owner, root_gen,
3882 *level - 1, 1);
3883 BUG_ON(ret);
3884
3885 atomic_inc(&root->fs_info->throttle_gen);
3886 wake_up(&root->fs_info->transaction_throttle);
3887 cond_resched();
3888
3889 continue;
3890 }
3891
3892 /*
3893 * we need to keep freeing things in the next level down.
3894 * read the block and loop around to process it
3895 */
3896 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3897 WARN_ON(*level <= 0);
3898 if (path->nodes[*level-1])
3899 free_extent_buffer(path->nodes[*level-1]);
3900 path->nodes[*level-1] = next;
3901 *level = btrfs_header_level(next);
3902 path->slots[*level] = 0;
3903 cond_resched();
3904 }
3905 out:
3906 WARN_ON(*level < 0);
3907 WARN_ON(*level >= BTRFS_MAX_LEVEL);
3908
3909 if (path->nodes[*level] == root->node) {
3910 parent = path->nodes[*level];
3911 bytenr = path->nodes[*level]->start;
3912 } else {
3913 parent = path->nodes[*level + 1];
3914 bytenr = btrfs_node_blockptr(parent, path->slots[*level + 1]);
3915 }
3916
3917 blocksize = btrfs_level_size(root, *level);
3918 root_owner = btrfs_header_owner(parent);
3919 root_gen = btrfs_header_generation(parent);
3920
3921 /*
3922 * cleanup and free the reference on the last node
3923 * we processed
3924 */
3925 ret = __btrfs_free_extent(trans, root, bytenr, blocksize,
3926 parent->start, root_owner, root_gen,
3927 *level, 1);
3928 free_extent_buffer(path->nodes[*level]);
3929 path->nodes[*level] = NULL;
3930
3931 *level += 1;
3932 BUG_ON(ret);
3933
3934 cond_resched();
3935 return 0;
3936 }
3937
3938 /*
3939 * helper function for drop_subtree, this function is similar to
3940 * walk_down_tree. The main difference is that it checks reference
3941 * counts while tree blocks are locked.
3942 */
3943 static noinline int walk_down_subtree(struct btrfs_trans_handle *trans,
3944 struct btrfs_root *root,
3945 struct btrfs_path *path, int *level)
3946 {
3947 struct extent_buffer *next;
3948 struct extent_buffer *cur;
3949 struct extent_buffer *parent;
3950 u64 bytenr;
3951 u64 ptr_gen;
3952 u32 blocksize;
3953 u32 refs;
3954 int ret;
3955
3956 cur = path->nodes[*level];
3957 ret = btrfs_lookup_extent_ref(trans, root, cur->start, cur->len,
3958 &refs);
3959 BUG_ON(ret);
3960 if (refs > 1)
3961 goto out;
3962
3963 while (*level >= 0) {
3964 cur = path->nodes[*level];
3965 if (*level == 0) {
3966 ret = btrfs_drop_leaf_ref(trans, root, cur);
3967 BUG_ON(ret);
3968 clean_tree_block(trans, root, cur);
3969 break;
3970 }
3971 if (path->slots[*level] >= btrfs_header_nritems(cur)) {
3972 clean_tree_block(trans, root, cur);
3973 break;
3974 }
3975
3976 bytenr = btrfs_node_blockptr(cur, path->slots[*level]);
3977 blocksize = btrfs_level_size(root, *level - 1);
3978 ptr_gen = btrfs_node_ptr_generation(cur, path->slots[*level]);
3979
3980 next = read_tree_block(root, bytenr, blocksize, ptr_gen);
3981 btrfs_tree_lock(next);
3982 btrfs_set_lock_blocking(next);
3983
3984 ret = btrfs_lookup_extent_ref(trans, root, bytenr, blocksize,
3985 &refs);
3986 BUG_ON(ret);
3987 if (refs > 1) {
3988 parent = path->nodes[*level];
3989 ret = btrfs_free_extent(trans, root, bytenr,
3990 blocksize, parent->start,
3991 btrfs_header_owner(parent),
3992 btrfs_header_generation(parent),
3993 *level - 1, 1);
3994 BUG_ON(ret);
3995 path->slots[*level]++;
3996 btrfs_tree_unlock(next);
3997 free_extent_buffer(next);
3998 continue;
3999 }
4000
4001 *level = btrfs_header_level(next);
4002 path->nodes[*level] = next;
4003 path->slots[*level] = 0;
4004 path->locks[*level] = 1;
4005 cond_resched();
4006 }
4007 out:
4008 parent = path->nodes[*level + 1];
4009 bytenr = path->nodes[*level]->start;
4010 blocksize = path->nodes[*level]->len;
4011
4012 ret = btrfs_free_extent(trans, root, bytenr, blocksize,
4013 parent->start, btrfs_header_owner(parent),
4014 btrfs_header_generation(parent), *level, 1);
4015 BUG_ON(ret);
4016
4017 if (path->locks[*level]) {
4018 btrfs_tree_unlock(path->nodes[*level]);
4019 path->locks[*level] = 0;
4020 }
4021 free_extent_buffer(path->nodes[*level]);
4022 path->nodes[*level] = NULL;
4023 *level += 1;
4024 cond_resched();
4025 return 0;
4026 }
4027
4028 /*
4029 * helper for dropping snapshots. This walks back up the tree in the path
4030 * to find the first node higher up where we haven't yet gone through
4031 * all the slots
4032 */
4033 static noinline int walk_up_tree(struct btrfs_trans_handle *trans,
4034 struct btrfs_root *root,
4035 struct btrfs_path *path,
4036 int *level, int max_level)
4037 {
4038 u64 root_owner;
4039 u64 root_gen;
4040 struct btrfs_root_item *root_item = &root->root_item;
4041 int i;
4042 int slot;
4043 int ret;
4044
4045 for (i = *level; i < max_level && path->nodes[i]; i++) {
4046 slot = path->slots[i];
4047 if (slot < btrfs_header_nritems(path->nodes[i]) - 1) {
4048 struct extent_buffer *node;
4049 struct btrfs_disk_key disk_key;
4050
4051 /*
4052 * there is more work to do in this level.
4053 * Update the drop_progress marker to reflect
4054 * the work we've done so far, and then bump
4055 * the slot number
4056 */
4057 node = path->nodes[i];
4058 path->slots[i]++;
4059 *level = i;
4060 WARN_ON(*level == 0);
4061 btrfs_node_key(node, &disk_key, path->slots[i]);
4062 memcpy(&root_item->drop_progress,
4063 &disk_key, sizeof(disk_key));
4064 root_item->drop_level = i;
4065 return 0;
4066 } else {
4067 struct extent_buffer *parent;
4068
4069 /*
4070 * this whole node is done, free our reference
4071 * on it and go up one level
4072 */
4073 if (path->nodes[*level] == root->node)
4074 parent = path->nodes[*level];
4075 else
4076 parent = path->nodes[*level + 1];
4077
4078 root_owner = btrfs_header_owner(parent);
4079 root_gen = btrfs_header_generation(parent);
4080
4081 clean_tree_block(trans, root, path->nodes[*level]);
4082 ret = btrfs_free_extent(trans, root,
4083 path->nodes[*level]->start,
4084 path->nodes[*level]->len,
4085 parent->start, root_owner,
4086 root_gen, *level, 1);
4087 BUG_ON(ret);
4088 if (path->locks[*level]) {
4089 btrfs_tree_unlock(path->nodes[*level]);
4090 path->locks[*level] = 0;
4091 }
4092 free_extent_buffer(path->nodes[*level]);
4093 path->nodes[*level] = NULL;
4094 *level = i + 1;
4095 }
4096 }
4097 return 1;
4098 }
4099
4100 /*
4101 * drop the reference count on the tree rooted at 'snap'. This traverses
4102 * the tree freeing any blocks that have a ref count of zero after being
4103 * decremented.
4104 */
4105 int btrfs_drop_snapshot(struct btrfs_trans_handle *trans, struct btrfs_root
4106 *root)
4107 {
4108 int ret = 0;
4109 int wret;
4110 int level;
4111 struct btrfs_path *path;
4112 int i;
4113 int orig_level;
4114 struct btrfs_root_item *root_item = &root->root_item;
4115
4116 WARN_ON(!mutex_is_locked(&root->fs_info->drop_mutex));
4117 path = btrfs_alloc_path();
4118 BUG_ON(!path);
4119
4120 level = btrfs_header_level(root->node);
4121 orig_level = level;
4122 if (btrfs_disk_key_objectid(&root_item->drop_progress) == 0) {
4123 path->nodes[level] = root->node;
4124 extent_buffer_get(root->node);
4125 path->slots[level] = 0;
4126 } else {
4127 struct btrfs_key key;
4128 struct btrfs_disk_key found_key;
4129 struct extent_buffer *node;
4130
4131 btrfs_disk_key_to_cpu(&key, &root_item->drop_progress);
4132 level = root_item->drop_level;
4133 path->lowest_level = level;
4134 wret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
4135 if (wret < 0) {
4136 ret = wret;
4137 goto out;
4138 }
4139 node = path->nodes[level];
4140 btrfs_node_key(node, &found_key, path->slots[level]);
4141 WARN_ON(memcmp(&found_key, &root_item->drop_progress,
4142 sizeof(found_key)));
4143 /*
4144 * unlock our path, this is safe because only this
4145 * function is allowed to delete this snapshot
4146 */
4147 for (i = 0; i < BTRFS_MAX_LEVEL; i++) {
4148 if (path->nodes[i] && path->locks[i]) {
4149 path->locks[i] = 0;
4150 btrfs_tree_unlock(path->nodes[i]);
4151 }
4152 }
4153 }
4154 while (1) {
4155 wret = walk_down_tree(trans, root, path, &level);
4156 if (wret > 0)
4157 break;
4158 if (wret < 0)
4159 ret = wret;
4160
4161 wret = walk_up_tree(trans, root, path, &level,
4162 BTRFS_MAX_LEVEL);
4163 if (wret > 0)
4164 break;
4165 if (wret < 0)
4166 ret = wret;
4167 if (trans->transaction->in_commit) {
4168 ret = -EAGAIN;
4169 break;
4170 }
4171 atomic_inc(&root->fs_info->throttle_gen);
4172 wake_up(&root->fs_info->transaction_throttle);
4173 }
4174 for (i = 0; i <= orig_level; i++) {
4175 if (path->nodes[i]) {
4176 free_extent_buffer(path->nodes[i]);
4177 path->nodes[i] = NULL;
4178 }
4179 }
4180 out:
4181 btrfs_free_path(path);
4182 return ret;
4183 }
4184
4185 int btrfs_drop_subtree(struct btrfs_trans_handle *trans,
4186 struct btrfs_root *root,
4187 struct extent_buffer *node,
4188 struct extent_buffer *parent)
4189 {
4190 struct btrfs_path *path;
4191 int level;
4192 int parent_level;
4193 int ret = 0;
4194 int wret;
4195
4196 path = btrfs_alloc_path();
4197 BUG_ON(!path);
4198
4199 BUG_ON(!btrfs_tree_locked(parent));
4200 parent_level = btrfs_header_level(parent);
4201 extent_buffer_get(parent);
4202 path->nodes[parent_level] = parent;
4203 path->slots[parent_level] = btrfs_header_nritems(parent);
4204
4205 BUG_ON(!btrfs_tree_locked(node));
4206 level = btrfs_header_level(node);
4207 extent_buffer_get(node);
4208 path->nodes[level] = node;
4209 path->slots[level] = 0;
4210
4211 while (1) {
4212 wret = walk_down_subtree(trans, root, path, &level);
4213 if (wret < 0)
4214 ret = wret;
4215 if (wret != 0)
4216 break;
4217
4218 wret = walk_up_tree(trans, root, path, &level, parent_level);
4219 if (wret < 0)
4220 ret = wret;
4221 if (wret != 0)
4222 break;
4223 }
4224
4225 btrfs_free_path(path);
4226 return ret;
4227 }
4228
4229 static unsigned long calc_ra(unsigned long start, unsigned long last,
4230 unsigned long nr)
4231 {
4232 return min(last, start + nr - 1);
4233 }
4234
4235 static noinline int relocate_inode_pages(struct inode *inode, u64 start,
4236 u64 len)
4237 {
4238 u64 page_start;
4239 u64 page_end;
4240 unsigned long first_index;
4241 unsigned long last_index;
4242 unsigned long i;
4243 struct page *page;
4244 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
4245 struct file_ra_state *ra;
4246 struct btrfs_ordered_extent *ordered;
4247 unsigned int total_read = 0;
4248 unsigned int total_dirty = 0;
4249 int ret = 0;
4250
4251 ra = kzalloc(sizeof(*ra), GFP_NOFS);
4252
4253 mutex_lock(&inode->i_mutex);
4254 first_index = start >> PAGE_CACHE_SHIFT;
4255 last_index = (start + len - 1) >> PAGE_CACHE_SHIFT;
4256
4257 /* make sure the dirty trick played by the caller work */
4258 ret = invalidate_inode_pages2_range(inode->i_mapping,
4259 first_index, last_index);
4260 if (ret)
4261 goto out_unlock;
4262
4263 file_ra_state_init(ra, inode->i_mapping);
4264
4265 for (i = first_index ; i <= last_index; i++) {
4266 if (total_read % ra->ra_pages == 0) {
4267 btrfs_force_ra(inode->i_mapping, ra, NULL, i,
4268 calc_ra(i, last_index, ra->ra_pages));
4269 }
4270 total_read++;
4271 again:
4272 if (((u64)i << PAGE_CACHE_SHIFT) > i_size_read(inode))
4273 BUG_ON(1);
4274 page = grab_cache_page(inode->i_mapping, i);
4275 if (!page) {
4276 ret = -ENOMEM;
4277 goto out_unlock;
4278 }
4279 if (!PageUptodate(page)) {
4280 btrfs_readpage(NULL, page);
4281 lock_page(page);
4282 if (!PageUptodate(page)) {
4283 unlock_page(page);
4284 page_cache_release(page);
4285 ret = -EIO;
4286 goto out_unlock;
4287 }
4288 }
4289 wait_on_page_writeback(page);
4290
4291 page_start = (u64)page->index << PAGE_CACHE_SHIFT;
4292 page_end = page_start + PAGE_CACHE_SIZE - 1;
4293 lock_extent(io_tree, page_start, page_end, GFP_NOFS);
4294
4295 ordered = btrfs_lookup_ordered_extent(inode, page_start);
4296 if (ordered) {
4297 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4298 unlock_page(page);
4299 page_cache_release(page);
4300 btrfs_start_ordered_extent(inode, ordered, 1);
4301 btrfs_put_ordered_extent(ordered);
4302 goto again;
4303 }
4304 set_page_extent_mapped(page);
4305
4306 if (i == first_index)
4307 set_extent_bits(io_tree, page_start, page_end,
4308 EXTENT_BOUNDARY, GFP_NOFS);
4309 btrfs_set_extent_delalloc(inode, page_start, page_end);
4310
4311 set_page_dirty(page);
4312 total_dirty++;
4313
4314 unlock_extent(io_tree, page_start, page_end, GFP_NOFS);
4315 unlock_page(page);
4316 page_cache_release(page);
4317 }
4318
4319 out_unlock:
4320 kfree(ra);
4321 mutex_unlock(&inode->i_mutex);
4322 balance_dirty_pages_ratelimited_nr(inode->i_mapping, total_dirty);
4323 return ret;
4324 }
4325
4326 static noinline int relocate_data_extent(struct inode *reloc_inode,
4327 struct btrfs_key *extent_key,
4328 u64 offset)
4329 {
4330 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4331 struct extent_map_tree *em_tree = &BTRFS_I(reloc_inode)->extent_tree;
4332 struct extent_map *em;
4333 u64 start = extent_key->objectid - offset;
4334 u64 end = start + extent_key->offset - 1;
4335
4336 em = alloc_extent_map(GFP_NOFS);
4337 BUG_ON(!em || IS_ERR(em));
4338
4339 em->start = start;
4340 em->len = extent_key->offset;
4341 em->block_len = extent_key->offset;
4342 em->block_start = extent_key->objectid;
4343 em->bdev = root->fs_info->fs_devices->latest_bdev;
4344 set_bit(EXTENT_FLAG_PINNED, &em->flags);
4345
4346 /* setup extent map to cheat btrfs_readpage */
4347 lock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4348 while (1) {
4349 int ret;
4350 spin_lock(&em_tree->lock);
4351 ret = add_extent_mapping(em_tree, em);
4352 spin_unlock(&em_tree->lock);
4353 if (ret != -EEXIST) {
4354 free_extent_map(em);
4355 break;
4356 }
4357 btrfs_drop_extent_cache(reloc_inode, start, end, 0);
4358 }
4359 unlock_extent(&BTRFS_I(reloc_inode)->io_tree, start, end, GFP_NOFS);
4360
4361 return relocate_inode_pages(reloc_inode, start, extent_key->offset);
4362 }
4363
4364 struct btrfs_ref_path {
4365 u64 extent_start;
4366 u64 nodes[BTRFS_MAX_LEVEL];
4367 u64 root_objectid;
4368 u64 root_generation;
4369 u64 owner_objectid;
4370 u32 num_refs;
4371 int lowest_level;
4372 int current_level;
4373 int shared_level;
4374
4375 struct btrfs_key node_keys[BTRFS_MAX_LEVEL];
4376 u64 new_nodes[BTRFS_MAX_LEVEL];
4377 };
4378
4379 struct disk_extent {
4380 u64 ram_bytes;
4381 u64 disk_bytenr;
4382 u64 disk_num_bytes;
4383 u64 offset;
4384 u64 num_bytes;
4385 u8 compression;
4386 u8 encryption;
4387 u16 other_encoding;
4388 };
4389
4390 static int is_cowonly_root(u64 root_objectid)
4391 {
4392 if (root_objectid == BTRFS_ROOT_TREE_OBJECTID ||
4393 root_objectid == BTRFS_EXTENT_TREE_OBJECTID ||
4394 root_objectid == BTRFS_CHUNK_TREE_OBJECTID ||
4395 root_objectid == BTRFS_DEV_TREE_OBJECTID ||
4396 root_objectid == BTRFS_TREE_LOG_OBJECTID ||
4397 root_objectid == BTRFS_CSUM_TREE_OBJECTID)
4398 return 1;
4399 return 0;
4400 }
4401
4402 static noinline int __next_ref_path(struct btrfs_trans_handle *trans,
4403 struct btrfs_root *extent_root,
4404 struct btrfs_ref_path *ref_path,
4405 int first_time)
4406 {
4407 struct extent_buffer *leaf;
4408 struct btrfs_path *path;
4409 struct btrfs_extent_ref *ref;
4410 struct btrfs_key key;
4411 struct btrfs_key found_key;
4412 u64 bytenr;
4413 u32 nritems;
4414 int level;
4415 int ret = 1;
4416
4417 path = btrfs_alloc_path();
4418 if (!path)
4419 return -ENOMEM;
4420
4421 if (first_time) {
4422 ref_path->lowest_level = -1;
4423 ref_path->current_level = -1;
4424 ref_path->shared_level = -1;
4425 goto walk_up;
4426 }
4427 walk_down:
4428 level = ref_path->current_level - 1;
4429 while (level >= -1) {
4430 u64 parent;
4431 if (level < ref_path->lowest_level)
4432 break;
4433
4434 if (level >= 0)
4435 bytenr = ref_path->nodes[level];
4436 else
4437 bytenr = ref_path->extent_start;
4438 BUG_ON(bytenr == 0);
4439
4440 parent = ref_path->nodes[level + 1];
4441 ref_path->nodes[level + 1] = 0;
4442 ref_path->current_level = level;
4443 BUG_ON(parent == 0);
4444
4445 key.objectid = bytenr;
4446 key.offset = parent + 1;
4447 key.type = BTRFS_EXTENT_REF_KEY;
4448
4449 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4450 if (ret < 0)
4451 goto out;
4452 BUG_ON(ret == 0);
4453
4454 leaf = path->nodes[0];
4455 nritems = btrfs_header_nritems(leaf);
4456 if (path->slots[0] >= nritems) {
4457 ret = btrfs_next_leaf(extent_root, path);
4458 if (ret < 0)
4459 goto out;
4460 if (ret > 0)
4461 goto next;
4462 leaf = path->nodes[0];
4463 }
4464
4465 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4466 if (found_key.objectid == bytenr &&
4467 found_key.type == BTRFS_EXTENT_REF_KEY) {
4468 if (level < ref_path->shared_level)
4469 ref_path->shared_level = level;
4470 goto found;
4471 }
4472 next:
4473 level--;
4474 btrfs_release_path(extent_root, path);
4475 cond_resched();
4476 }
4477 /* reached lowest level */
4478 ret = 1;
4479 goto out;
4480 walk_up:
4481 level = ref_path->current_level;
4482 while (level < BTRFS_MAX_LEVEL - 1) {
4483 u64 ref_objectid;
4484
4485 if (level >= 0)
4486 bytenr = ref_path->nodes[level];
4487 else
4488 bytenr = ref_path->extent_start;
4489
4490 BUG_ON(bytenr == 0);
4491
4492 key.objectid = bytenr;
4493 key.offset = 0;
4494 key.type = BTRFS_EXTENT_REF_KEY;
4495
4496 ret = btrfs_search_slot(trans, extent_root, &key, path, 0, 0);
4497 if (ret < 0)
4498 goto out;
4499
4500 leaf = path->nodes[0];
4501 nritems = btrfs_header_nritems(leaf);
4502 if (path->slots[0] >= nritems) {
4503 ret = btrfs_next_leaf(extent_root, path);
4504 if (ret < 0)
4505 goto out;
4506 if (ret > 0) {
4507 /* the extent was freed by someone */
4508 if (ref_path->lowest_level == level)
4509 goto out;
4510 btrfs_release_path(extent_root, path);
4511 goto walk_down;
4512 }
4513 leaf = path->nodes[0];
4514 }
4515
4516 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4517 if (found_key.objectid != bytenr ||
4518 found_key.type != BTRFS_EXTENT_REF_KEY) {
4519 /* the extent was freed by someone */
4520 if (ref_path->lowest_level == level) {
4521 ret = 1;
4522 goto out;
4523 }
4524 btrfs_release_path(extent_root, path);
4525 goto walk_down;
4526 }
4527 found:
4528 ref = btrfs_item_ptr(leaf, path->slots[0],
4529 struct btrfs_extent_ref);
4530 ref_objectid = btrfs_ref_objectid(leaf, ref);
4531 if (ref_objectid < BTRFS_FIRST_FREE_OBJECTID) {
4532 if (first_time) {
4533 level = (int)ref_objectid;
4534 BUG_ON(level >= BTRFS_MAX_LEVEL);
4535 ref_path->lowest_level = level;
4536 ref_path->current_level = level;
4537 ref_path->nodes[level] = bytenr;
4538 } else {
4539 WARN_ON(ref_objectid != level);
4540 }
4541 } else {
4542 WARN_ON(level != -1);
4543 }
4544 first_time = 0;
4545
4546 if (ref_path->lowest_level == level) {
4547 ref_path->owner_objectid = ref_objectid;
4548 ref_path->num_refs = btrfs_ref_num_refs(leaf, ref);
4549 }
4550
4551 /*
4552 * the block is tree root or the block isn't in reference
4553 * counted tree.
4554 */
4555 if (found_key.objectid == found_key.offset ||
4556 is_cowonly_root(btrfs_ref_root(leaf, ref))) {
4557 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4558 ref_path->root_generation =
4559 btrfs_ref_generation(leaf, ref);
4560 if (level < 0) {
4561 /* special reference from the tree log */
4562 ref_path->nodes[0] = found_key.offset;
4563 ref_path->current_level = 0;
4564 }
4565 ret = 0;
4566 goto out;
4567 }
4568
4569 level++;
4570 BUG_ON(ref_path->nodes[level] != 0);
4571 ref_path->nodes[level] = found_key.offset;
4572 ref_path->current_level = level;
4573
4574 /*
4575 * the reference was created in the running transaction,
4576 * no need to continue walking up.
4577 */
4578 if (btrfs_ref_generation(leaf, ref) == trans->transid) {
4579 ref_path->root_objectid = btrfs_ref_root(leaf, ref);
4580 ref_path->root_generation =
4581 btrfs_ref_generation(leaf, ref);
4582 ret = 0;
4583 goto out;
4584 }
4585
4586 btrfs_release_path(extent_root, path);
4587 cond_resched();
4588 }
4589 /* reached max tree level, but no tree root found. */
4590 BUG();
4591 out:
4592 btrfs_free_path(path);
4593 return ret;
4594 }
4595
4596 static int btrfs_first_ref_path(struct btrfs_trans_handle *trans,
4597 struct btrfs_root *extent_root,
4598 struct btrfs_ref_path *ref_path,
4599 u64 extent_start)
4600 {
4601 memset(ref_path, 0, sizeof(*ref_path));
4602 ref_path->extent_start = extent_start;
4603
4604 return __next_ref_path(trans, extent_root, ref_path, 1);
4605 }
4606
4607 static int btrfs_next_ref_path(struct btrfs_trans_handle *trans,
4608 struct btrfs_root *extent_root,
4609 struct btrfs_ref_path *ref_path)
4610 {
4611 return __next_ref_path(trans, extent_root, ref_path, 0);
4612 }
4613
4614 static noinline int get_new_locations(struct inode *reloc_inode,
4615 struct btrfs_key *extent_key,
4616 u64 offset, int no_fragment,
4617 struct disk_extent **extents,
4618 int *nr_extents)
4619 {
4620 struct btrfs_root *root = BTRFS_I(reloc_inode)->root;
4621 struct btrfs_path *path;
4622 struct btrfs_file_extent_item *fi;
4623 struct extent_buffer *leaf;
4624 struct disk_extent *exts = *extents;
4625 struct btrfs_key found_key;
4626 u64 cur_pos;
4627 u64 last_byte;
4628 u32 nritems;
4629 int nr = 0;
4630 int max = *nr_extents;
4631 int ret;
4632
4633 WARN_ON(!no_fragment && *extents);
4634 if (!exts) {
4635 max = 1;
4636 exts = kmalloc(sizeof(*exts) * max, GFP_NOFS);
4637 if (!exts)
4638 return -ENOMEM;
4639 }
4640
4641 path = btrfs_alloc_path();
4642 BUG_ON(!path);
4643
4644 cur_pos = extent_key->objectid - offset;
4645 last_byte = extent_key->objectid + extent_key->offset;
4646 ret = btrfs_lookup_file_extent(NULL, root, path, reloc_inode->i_ino,
4647 cur_pos, 0);
4648 if (ret < 0)
4649 goto out;
4650 if (ret > 0) {
4651 ret = -ENOENT;
4652 goto out;
4653 }
4654
4655 while (1) {
4656 leaf = path->nodes[0];
4657 nritems = btrfs_header_nritems(leaf);
4658 if (path->slots[0] >= nritems) {
4659 ret = btrfs_next_leaf(root, path);
4660 if (ret < 0)
4661 goto out;
4662 if (ret > 0)
4663 break;
4664 leaf = path->nodes[0];
4665 }
4666
4667 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
4668 if (found_key.offset != cur_pos ||
4669 found_key.type != BTRFS_EXTENT_DATA_KEY ||
4670 found_key.objectid != reloc_inode->i_ino)
4671 break;
4672
4673 fi = btrfs_item_ptr(leaf, path->slots[0],
4674 struct btrfs_file_extent_item);
4675 if (btrfs_file_extent_type(leaf, fi) !=
4676 BTRFS_FILE_EXTENT_REG ||
4677 btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
4678 break;
4679
4680 if (nr == max) {
4681 struct disk_extent *old = exts;
4682 max *= 2;
4683 exts = kzalloc(sizeof(*exts) * max, GFP_NOFS);
4684 memcpy(exts, old, sizeof(*exts) * nr);
4685 if (old != *extents)
4686 kfree(old);
4687 }
4688
4689 exts[nr].disk_bytenr =
4690 btrfs_file_extent_disk_bytenr(leaf, fi);
4691 exts[nr].disk_num_bytes =
4692 btrfs_file_extent_disk_num_bytes(leaf, fi);
4693 exts[nr].offset = btrfs_file_extent_offset(leaf, fi);
4694 exts[nr].num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4695 exts[nr].ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
4696 exts[nr].compression = btrfs_file_extent_compression(leaf, fi);
4697 exts[nr].encryption = btrfs_file_extent_encryption(leaf, fi);
4698 exts[nr].other_encoding = btrfs_file_extent_other_encoding(leaf,
4699 fi);
4700 BUG_ON(exts[nr].offset > 0);
4701 BUG_ON(exts[nr].compression || exts[nr].encryption);
4702 BUG_ON(exts[nr].num_bytes != exts[nr].disk_num_bytes);
4703
4704 cur_pos += exts[nr].num_bytes;
4705 nr++;
4706
4707 if (cur_pos + offset >= last_byte)
4708 break;
4709
4710 if (no_fragment) {
4711 ret = 1;
4712 goto out;
4713 }
4714 path->slots[0]++;
4715 }
4716
4717 BUG_ON(cur_pos + offset > last_byte);
4718 if (cur_pos + offset < last_byte) {
4719 ret = -ENOENT;
4720 goto out;
4721 }
4722 ret = 0;
4723 out:
4724 btrfs_free_path(path);
4725 if (ret) {
4726 if (exts != *extents)
4727 kfree(exts);
4728 } else {
4729 *extents = exts;
4730 *nr_extents = nr;
4731 }
4732 return ret;
4733 }
4734
4735 static noinline int replace_one_extent(struct btrfs_trans_handle *trans,
4736 struct btrfs_root *root,
4737 struct btrfs_path *path,
4738 struct btrfs_key *extent_key,
4739 struct btrfs_key *leaf_key,
4740 struct btrfs_ref_path *ref_path,
4741 struct disk_extent *new_extents,
4742 int nr_extents)
4743 {
4744 struct extent_buffer *leaf;
4745 struct btrfs_file_extent_item *fi;
4746 struct inode *inode = NULL;
4747 struct btrfs_key key;
4748 u64 lock_start = 0;
4749 u64 lock_end = 0;
4750 u64 num_bytes;
4751 u64 ext_offset;
4752 u64 search_end = (u64)-1;
4753 u32 nritems;
4754 int nr_scaned = 0;
4755 int extent_locked = 0;
4756 int extent_type;
4757 int ret;
4758
4759 memcpy(&key, leaf_key, sizeof(key));
4760 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4761 if (key.objectid < ref_path->owner_objectid ||
4762 (key.objectid == ref_path->owner_objectid &&
4763 key.type < BTRFS_EXTENT_DATA_KEY)) {
4764 key.objectid = ref_path->owner_objectid;
4765 key.type = BTRFS_EXTENT_DATA_KEY;
4766 key.offset = 0;
4767 }
4768 }
4769
4770 while (1) {
4771 ret = btrfs_search_slot(trans, root, &key, path, 0, 1);
4772 if (ret < 0)
4773 goto out;
4774
4775 leaf = path->nodes[0];
4776 nritems = btrfs_header_nritems(leaf);
4777 next:
4778 if (extent_locked && ret > 0) {
4779 /*
4780 * the file extent item was modified by someone
4781 * before the extent got locked.
4782 */
4783 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4784 lock_end, GFP_NOFS);
4785 extent_locked = 0;
4786 }
4787
4788 if (path->slots[0] >= nritems) {
4789 if (++nr_scaned > 2)
4790 break;
4791
4792 BUG_ON(extent_locked);
4793 ret = btrfs_next_leaf(root, path);
4794 if (ret < 0)
4795 goto out;
4796 if (ret > 0)
4797 break;
4798 leaf = path->nodes[0];
4799 nritems = btrfs_header_nritems(leaf);
4800 }
4801
4802 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
4803
4804 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS) {
4805 if ((key.objectid > ref_path->owner_objectid) ||
4806 (key.objectid == ref_path->owner_objectid &&
4807 key.type > BTRFS_EXTENT_DATA_KEY) ||
4808 key.offset >= search_end)
4809 break;
4810 }
4811
4812 if (inode && key.objectid != inode->i_ino) {
4813 BUG_ON(extent_locked);
4814 btrfs_release_path(root, path);
4815 mutex_unlock(&inode->i_mutex);
4816 iput(inode);
4817 inode = NULL;
4818 continue;
4819 }
4820
4821 if (key.type != BTRFS_EXTENT_DATA_KEY) {
4822 path->slots[0]++;
4823 ret = 1;
4824 goto next;
4825 }
4826 fi = btrfs_item_ptr(leaf, path->slots[0],
4827 struct btrfs_file_extent_item);
4828 extent_type = btrfs_file_extent_type(leaf, fi);
4829 if ((extent_type != BTRFS_FILE_EXTENT_REG &&
4830 extent_type != BTRFS_FILE_EXTENT_PREALLOC) ||
4831 (btrfs_file_extent_disk_bytenr(leaf, fi) !=
4832 extent_key->objectid)) {
4833 path->slots[0]++;
4834 ret = 1;
4835 goto next;
4836 }
4837
4838 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
4839 ext_offset = btrfs_file_extent_offset(leaf, fi);
4840
4841 if (search_end == (u64)-1) {
4842 search_end = key.offset - ext_offset +
4843 btrfs_file_extent_ram_bytes(leaf, fi);
4844 }
4845
4846 if (!extent_locked) {
4847 lock_start = key.offset;
4848 lock_end = lock_start + num_bytes - 1;
4849 } else {
4850 if (lock_start > key.offset ||
4851 lock_end + 1 < key.offset + num_bytes) {
4852 unlock_extent(&BTRFS_I(inode)->io_tree,
4853 lock_start, lock_end, GFP_NOFS);
4854 extent_locked = 0;
4855 }
4856 }
4857
4858 if (!inode) {
4859 btrfs_release_path(root, path);
4860
4861 inode = btrfs_iget_locked(root->fs_info->sb,
4862 key.objectid, root);
4863 if (inode->i_state & I_NEW) {
4864 BTRFS_I(inode)->root = root;
4865 BTRFS_I(inode)->location.objectid =
4866 key.objectid;
4867 BTRFS_I(inode)->location.type =
4868 BTRFS_INODE_ITEM_KEY;
4869 BTRFS_I(inode)->location.offset = 0;
4870 btrfs_read_locked_inode(inode);
4871 unlock_new_inode(inode);
4872 }
4873 /*
4874 * some code call btrfs_commit_transaction while
4875 * holding the i_mutex, so we can't use mutex_lock
4876 * here.
4877 */
4878 if (is_bad_inode(inode) ||
4879 !mutex_trylock(&inode->i_mutex)) {
4880 iput(inode);
4881 inode = NULL;
4882 key.offset = (u64)-1;
4883 goto skip;
4884 }
4885 }
4886
4887 if (!extent_locked) {
4888 struct btrfs_ordered_extent *ordered;
4889
4890 btrfs_release_path(root, path);
4891
4892 lock_extent(&BTRFS_I(inode)->io_tree, lock_start,
4893 lock_end, GFP_NOFS);
4894 ordered = btrfs_lookup_first_ordered_extent(inode,
4895 lock_end);
4896 if (ordered &&
4897 ordered->file_offset <= lock_end &&
4898 ordered->file_offset + ordered->len > lock_start) {
4899 unlock_extent(&BTRFS_I(inode)->io_tree,
4900 lock_start, lock_end, GFP_NOFS);
4901 btrfs_start_ordered_extent(inode, ordered, 1);
4902 btrfs_put_ordered_extent(ordered);
4903 key.offset += num_bytes;
4904 goto skip;
4905 }
4906 if (ordered)
4907 btrfs_put_ordered_extent(ordered);
4908
4909 extent_locked = 1;
4910 continue;
4911 }
4912
4913 if (nr_extents == 1) {
4914 /* update extent pointer in place */
4915 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4916 new_extents[0].disk_bytenr);
4917 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4918 new_extents[0].disk_num_bytes);
4919 btrfs_mark_buffer_dirty(leaf);
4920
4921 btrfs_drop_extent_cache(inode, key.offset,
4922 key.offset + num_bytes - 1, 0);
4923
4924 ret = btrfs_inc_extent_ref(trans, root,
4925 new_extents[0].disk_bytenr,
4926 new_extents[0].disk_num_bytes,
4927 leaf->start,
4928 root->root_key.objectid,
4929 trans->transid,
4930 key.objectid);
4931 BUG_ON(ret);
4932
4933 ret = btrfs_free_extent(trans, root,
4934 extent_key->objectid,
4935 extent_key->offset,
4936 leaf->start,
4937 btrfs_header_owner(leaf),
4938 btrfs_header_generation(leaf),
4939 key.objectid, 0);
4940 BUG_ON(ret);
4941
4942 btrfs_release_path(root, path);
4943 key.offset += num_bytes;
4944 } else {
4945 BUG_ON(1);
4946 #if 0
4947 u64 alloc_hint;
4948 u64 extent_len;
4949 int i;
4950 /*
4951 * drop old extent pointer at first, then insert the
4952 * new pointers one bye one
4953 */
4954 btrfs_release_path(root, path);
4955 ret = btrfs_drop_extents(trans, root, inode, key.offset,
4956 key.offset + num_bytes,
4957 key.offset, &alloc_hint);
4958 BUG_ON(ret);
4959
4960 for (i = 0; i < nr_extents; i++) {
4961 if (ext_offset >= new_extents[i].num_bytes) {
4962 ext_offset -= new_extents[i].num_bytes;
4963 continue;
4964 }
4965 extent_len = min(new_extents[i].num_bytes -
4966 ext_offset, num_bytes);
4967
4968 ret = btrfs_insert_empty_item(trans, root,
4969 path, &key,
4970 sizeof(*fi));
4971 BUG_ON(ret);
4972
4973 leaf = path->nodes[0];
4974 fi = btrfs_item_ptr(leaf, path->slots[0],
4975 struct btrfs_file_extent_item);
4976 btrfs_set_file_extent_generation(leaf, fi,
4977 trans->transid);
4978 btrfs_set_file_extent_type(leaf, fi,
4979 BTRFS_FILE_EXTENT_REG);
4980 btrfs_set_file_extent_disk_bytenr(leaf, fi,
4981 new_extents[i].disk_bytenr);
4982 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
4983 new_extents[i].disk_num_bytes);
4984 btrfs_set_file_extent_ram_bytes(leaf, fi,
4985 new_extents[i].ram_bytes);
4986
4987 btrfs_set_file_extent_compression(leaf, fi,
4988 new_extents[i].compression);
4989 btrfs_set_file_extent_encryption(leaf, fi,
4990 new_extents[i].encryption);
4991 btrfs_set_file_extent_other_encoding(leaf, fi,
4992 new_extents[i].other_encoding);
4993
4994 btrfs_set_file_extent_num_bytes(leaf, fi,
4995 extent_len);
4996 ext_offset += new_extents[i].offset;
4997 btrfs_set_file_extent_offset(leaf, fi,
4998 ext_offset);
4999 btrfs_mark_buffer_dirty(leaf);
5000
5001 btrfs_drop_extent_cache(inode, key.offset,
5002 key.offset + extent_len - 1, 0);
5003
5004 ret = btrfs_inc_extent_ref(trans, root,
5005 new_extents[i].disk_bytenr,
5006 new_extents[i].disk_num_bytes,
5007 leaf->start,
5008 root->root_key.objectid,
5009 trans->transid, key.objectid);
5010 BUG_ON(ret);
5011 btrfs_release_path(root, path);
5012
5013 inode_add_bytes(inode, extent_len);
5014
5015 ext_offset = 0;
5016 num_bytes -= extent_len;
5017 key.offset += extent_len;
5018
5019 if (num_bytes == 0)
5020 break;
5021 }
5022 BUG_ON(i >= nr_extents);
5023 #endif
5024 }
5025
5026 if (extent_locked) {
5027 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5028 lock_end, GFP_NOFS);
5029 extent_locked = 0;
5030 }
5031 skip:
5032 if (ref_path->owner_objectid != BTRFS_MULTIPLE_OBJECTIDS &&
5033 key.offset >= search_end)
5034 break;
5035
5036 cond_resched();
5037 }
5038 ret = 0;
5039 out:
5040 btrfs_release_path(root, path);
5041 if (inode) {
5042 mutex_unlock(&inode->i_mutex);
5043 if (extent_locked) {
5044 unlock_extent(&BTRFS_I(inode)->io_tree, lock_start,
5045 lock_end, GFP_NOFS);
5046 }
5047 iput(inode);
5048 }
5049 return ret;
5050 }
5051
5052 int btrfs_reloc_tree_cache_ref(struct btrfs_trans_handle *trans,
5053 struct btrfs_root *root,
5054 struct extent_buffer *buf, u64 orig_start)
5055 {
5056 int level;
5057 int ret;
5058
5059 BUG_ON(btrfs_header_generation(buf) != trans->transid);
5060 BUG_ON(root->root_key.objectid != BTRFS_TREE_RELOC_OBJECTID);
5061
5062 level = btrfs_header_level(buf);
5063 if (level == 0) {
5064 struct btrfs_leaf_ref *ref;
5065 struct btrfs_leaf_ref *orig_ref;
5066
5067 orig_ref = btrfs_lookup_leaf_ref(root, orig_start);
5068 if (!orig_ref)
5069 return -ENOENT;
5070
5071 ref = btrfs_alloc_leaf_ref(root, orig_ref->nritems);
5072 if (!ref) {
5073 btrfs_free_leaf_ref(root, orig_ref);
5074 return -ENOMEM;
5075 }
5076
5077 ref->nritems = orig_ref->nritems;
5078 memcpy(ref->extents, orig_ref->extents,
5079 sizeof(ref->extents[0]) * ref->nritems);
5080
5081 btrfs_free_leaf_ref(root, orig_ref);
5082
5083 ref->root_gen = trans->transid;
5084 ref->bytenr = buf->start;
5085 ref->owner = btrfs_header_owner(buf);
5086 ref->generation = btrfs_header_generation(buf);
5087
5088 ret = btrfs_add_leaf_ref(root, ref, 0);
5089 WARN_ON(ret);
5090 btrfs_free_leaf_ref(root, ref);
5091 }
5092 return 0;
5093 }
5094
5095 static noinline int invalidate_extent_cache(struct btrfs_root *root,
5096 struct extent_buffer *leaf,
5097 struct btrfs_block_group_cache *group,
5098 struct btrfs_root *target_root)
5099 {
5100 struct btrfs_key key;
5101 struct inode *inode = NULL;
5102 struct btrfs_file_extent_item *fi;
5103 u64 num_bytes;
5104 u64 skip_objectid = 0;
5105 u32 nritems;
5106 u32 i;
5107
5108 nritems = btrfs_header_nritems(leaf);
5109 for (i = 0; i < nritems; i++) {
5110 btrfs_item_key_to_cpu(leaf, &key, i);
5111 if (key.objectid == skip_objectid ||
5112 key.type != BTRFS_EXTENT_DATA_KEY)
5113 continue;
5114 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
5115 if (btrfs_file_extent_type(leaf, fi) ==
5116 BTRFS_FILE_EXTENT_INLINE)
5117 continue;
5118 if (btrfs_file_extent_disk_bytenr(leaf, fi) == 0)
5119 continue;
5120 if (!inode || inode->i_ino != key.objectid) {
5121 iput(inode);
5122 inode = btrfs_ilookup(target_root->fs_info->sb,
5123 key.objectid, target_root, 1);
5124 }
5125 if (!inode) {
5126 skip_objectid = key.objectid;
5127 continue;
5128 }
5129 num_bytes = btrfs_file_extent_num_bytes(leaf, fi);
5130
5131 lock_extent(&BTRFS_I(inode)->io_tree, key.offset,
5132 key.offset + num_bytes - 1, GFP_NOFS);
5133 btrfs_drop_extent_cache(inode, key.offset,
5134 key.offset + num_bytes - 1, 1);
5135 unlock_extent(&BTRFS_I(inode)->io_tree, key.offset,
5136 key.offset + num_bytes - 1, GFP_NOFS);
5137 cond_resched();
5138 }
5139 iput(inode);
5140 return 0;
5141 }
5142
5143 static noinline int replace_extents_in_leaf(struct btrfs_trans_handle *trans,
5144 struct btrfs_root *root,
5145 struct extent_buffer *leaf,
5146 struct btrfs_block_group_cache *group,
5147 struct inode *reloc_inode)
5148 {
5149 struct btrfs_key key;
5150 struct btrfs_key extent_key;
5151 struct btrfs_file_extent_item *fi;
5152 struct btrfs_leaf_ref *ref;
5153 struct disk_extent *new_extent;
5154 u64 bytenr;
5155 u64 num_bytes;
5156 u32 nritems;
5157 u32 i;
5158 int ext_index;
5159 int nr_extent;
5160 int ret;
5161
5162 new_extent = kmalloc(sizeof(*new_extent), GFP_NOFS);
5163 BUG_ON(!new_extent);
5164
5165 ref = btrfs_lookup_leaf_ref(root, leaf->start);
5166 BUG_ON(!ref);
5167
5168 ext_index = -1;
5169 nritems = btrfs_header_nritems(leaf);
5170 for (i = 0; i < nritems; i++) {
5171 btrfs_item_key_to_cpu(leaf, &key, i);
5172 if (btrfs_key_type(&key) != BTRFS_EXTENT_DATA_KEY)
5173 continue;
5174 fi = btrfs_item_ptr(leaf, i, struct btrfs_file_extent_item);
5175 if (btrfs_file_extent_type(leaf, fi) ==
5176 BTRFS_FILE_EXTENT_INLINE)
5177 continue;
5178 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
5179 num_bytes = btrfs_file_extent_disk_num_bytes(leaf, fi);
5180 if (bytenr == 0)
5181 continue;
5182
5183 ext_index++;
5184 if (bytenr >= group->key.objectid + group->key.offset ||
5185 bytenr + num_bytes <= group->key.objectid)
5186 continue;
5187
5188 extent_key.objectid = bytenr;
5189 extent_key.offset = num_bytes;
5190 extent_key.type = BTRFS_EXTENT_ITEM_KEY;
5191 nr_extent = 1;
5192 ret = get_new_locations(reloc_inode, &extent_key,
5193 group->key.objectid, 1,
5194 &new_extent, &nr_extent);
5195 if (ret > 0)
5196 continue;
5197 BUG_ON(ret < 0);
5198
5199 BUG_ON(ref->extents[ext_index].bytenr != bytenr);
5200 BUG_ON(ref->extents[ext_index].num_bytes != num_bytes);
5201 ref->extents[ext_index].bytenr = new_extent->disk_bytenr;
5202 ref->extents[ext_index].num_bytes = new_extent->disk_num_bytes;
5203
5204 btrfs_set_file_extent_disk_bytenr(leaf, fi,
5205 new_extent->disk_bytenr);
5206 btrfs_set_file_extent_disk_num_bytes(leaf, fi,
5207 new_extent->disk_num_bytes);
5208 btrfs_mark_buffer_dirty(leaf);
5209
5210 ret = btrfs_inc_extent_ref(trans, root,
5211 new_extent->disk_bytenr,
5212 new_extent->disk_num_bytes,
5213 leaf->start,
5214 root->root_key.objectid,
5215 trans->transid, key.objectid);
5216 BUG_ON(ret);
5217 ret = btrfs_free_extent(trans, root,
5218 bytenr, num_bytes, leaf->start,
5219 btrfs_header_owner(leaf),
5220 btrfs_header_generation(leaf),
5221 key.objectid, 0);
5222 BUG_ON(ret);
5223 cond_resched();
5224 }
5225 kfree(new_extent);
5226 BUG_ON(ext_index + 1 != ref->nritems);
5227 btrfs_free_leaf_ref(root, ref);
5228 return 0;
5229 }
5230
5231 int btrfs_free_reloc_root(struct btrfs_trans_handle *trans,
5232 struct btrfs_root *root)
5233 {
5234 struct btrfs_root *reloc_root;
5235 int ret;
5236
5237 if (root->reloc_root) {
5238 reloc_root = root->reloc_root;
5239 root->reloc_root = NULL;
5240 list_add(&reloc_root->dead_list,
5241 &root->fs_info->dead_reloc_roots);
5242
5243 btrfs_set_root_bytenr(&reloc_root->root_item,
5244 reloc_root->node->start);
5245 btrfs_set_root_level(&root->root_item,
5246 btrfs_header_level(reloc_root->node));
5247 memset(&reloc_root->root_item.drop_progress, 0,
5248 sizeof(struct btrfs_disk_key));
5249 reloc_root->root_item.drop_level = 0;
5250
5251 ret = btrfs_update_root(trans, root->fs_info->tree_root,
5252 &reloc_root->root_key,
5253 &reloc_root->root_item);
5254 BUG_ON(ret);
5255 }
5256 return 0;
5257 }
5258
5259 int btrfs_drop_dead_reloc_roots(struct btrfs_root *root)
5260 {
5261 struct btrfs_trans_handle *trans;
5262 struct btrfs_root *reloc_root;
5263 struct btrfs_root *prev_root = NULL;
5264 struct list_head dead_roots;
5265 int ret;
5266 unsigned long nr;
5267
5268 INIT_LIST_HEAD(&dead_roots);
5269 list_splice_init(&root->fs_info->dead_reloc_roots, &dead_roots);
5270
5271 while (!list_empty(&dead_roots)) {
5272 reloc_root = list_entry(dead_roots.prev,
5273 struct btrfs_root, dead_list);
5274 list_del_init(&reloc_root->dead_list);
5275
5276 BUG_ON(reloc_root->commit_root != NULL);
5277 while (1) {
5278 trans = btrfs_join_transaction(root, 1);
5279 BUG_ON(!trans);
5280
5281 mutex_lock(&root->fs_info->drop_mutex);
5282 ret = btrfs_drop_snapshot(trans, reloc_root);
5283 if (ret != -EAGAIN)
5284 break;
5285 mutex_unlock(&root->fs_info->drop_mutex);
5286
5287 nr = trans->blocks_used;
5288 ret = btrfs_end_transaction(trans, root);
5289 BUG_ON(ret);
5290 btrfs_btree_balance_dirty(root, nr);
5291 }
5292
5293 free_extent_buffer(reloc_root->node);
5294
5295 ret = btrfs_del_root(trans, root->fs_info->tree_root,
5296 &reloc_root->root_key);
5297 BUG_ON(ret);
5298 mutex_unlock(&root->fs_info->drop_mutex);
5299
5300 nr = trans->blocks_used;
5301 ret = btrfs_end_transaction(trans, root);
5302 BUG_ON(ret);
5303 btrfs_btree_balance_dirty(root, nr);
5304
5305 kfree(prev_root);
5306 prev_root = reloc_root;
5307 }
5308 if (prev_root) {
5309 btrfs_remove_leaf_refs(prev_root, (u64)-1, 0);
5310 kfree(prev_root);
5311 }
5312 return 0;
5313 }
5314
5315 int btrfs_add_dead_reloc_root(struct btrfs_root *root)
5316 {
5317 list_add(&root->dead_list, &root->fs_info->dead_reloc_roots);
5318 return 0;
5319 }
5320
5321 int btrfs_cleanup_reloc_trees(struct btrfs_root *root)
5322 {
5323 struct btrfs_root *reloc_root;
5324 struct btrfs_trans_handle *trans;
5325 struct btrfs_key location;
5326 int found;
5327 int ret;
5328
5329 mutex_lock(&root->fs_info->tree_reloc_mutex);
5330 ret = btrfs_find_dead_roots(root, BTRFS_TREE_RELOC_OBJECTID, NULL);
5331 BUG_ON(ret);
5332 found = !list_empty(&root->fs_info->dead_reloc_roots);
5333 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5334
5335 if (found) {
5336 trans = btrfs_start_transaction(root, 1);
5337 BUG_ON(!trans);
5338 ret = btrfs_commit_transaction(trans, root);
5339 BUG_ON(ret);
5340 }
5341
5342 location.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5343 location.offset = (u64)-1;
5344 location.type = BTRFS_ROOT_ITEM_KEY;
5345
5346 reloc_root = btrfs_read_fs_root_no_name(root->fs_info, &location);
5347 BUG_ON(!reloc_root);
5348 btrfs_orphan_cleanup(reloc_root);
5349 return 0;
5350 }
5351
5352 static noinline int init_reloc_tree(struct btrfs_trans_handle *trans,
5353 struct btrfs_root *root)
5354 {
5355 struct btrfs_root *reloc_root;
5356 struct extent_buffer *eb;
5357 struct btrfs_root_item *root_item;
5358 struct btrfs_key root_key;
5359 int ret;
5360
5361 BUG_ON(!root->ref_cows);
5362 if (root->reloc_root)
5363 return 0;
5364
5365 root_item = kmalloc(sizeof(*root_item), GFP_NOFS);
5366 BUG_ON(!root_item);
5367
5368 ret = btrfs_copy_root(trans, root, root->commit_root,
5369 &eb, BTRFS_TREE_RELOC_OBJECTID);
5370 BUG_ON(ret);
5371
5372 root_key.objectid = BTRFS_TREE_RELOC_OBJECTID;
5373 root_key.offset = root->root_key.objectid;
5374 root_key.type = BTRFS_ROOT_ITEM_KEY;
5375
5376 memcpy(root_item, &root->root_item, sizeof(root_item));
5377 btrfs_set_root_refs(root_item, 0);
5378 btrfs_set_root_bytenr(root_item, eb->start);
5379 btrfs_set_root_level(root_item, btrfs_header_level(eb));
5380 btrfs_set_root_generation(root_item, trans->transid);
5381
5382 btrfs_tree_unlock(eb);
5383 free_extent_buffer(eb);
5384
5385 ret = btrfs_insert_root(trans, root->fs_info->tree_root,
5386 &root_key, root_item);
5387 BUG_ON(ret);
5388 kfree(root_item);
5389
5390 reloc_root = btrfs_read_fs_root_no_radix(root->fs_info->tree_root,
5391 &root_key);
5392 BUG_ON(!reloc_root);
5393 reloc_root->last_trans = trans->transid;
5394 reloc_root->commit_root = NULL;
5395 reloc_root->ref_tree = &root->fs_info->reloc_ref_tree;
5396
5397 root->reloc_root = reloc_root;
5398 return 0;
5399 }
5400
5401 /*
5402 * Core function of space balance.
5403 *
5404 * The idea is using reloc trees to relocate tree blocks in reference
5405 * counted roots. There is one reloc tree for each subvol, and all
5406 * reloc trees share same root key objectid. Reloc trees are snapshots
5407 * of the latest committed roots of subvols (root->commit_root).
5408 *
5409 * To relocate a tree block referenced by a subvol, there are two steps.
5410 * COW the block through subvol's reloc tree, then update block pointer
5411 * in the subvol to point to the new block. Since all reloc trees share
5412 * same root key objectid, doing special handing for tree blocks owned
5413 * by them is easy. Once a tree block has been COWed in one reloc tree,
5414 * we can use the resulting new block directly when the same block is
5415 * required to COW again through other reloc trees. By this way, relocated
5416 * tree blocks are shared between reloc trees, so they are also shared
5417 * between subvols.
5418 */
5419 static noinline int relocate_one_path(struct btrfs_trans_handle *trans,
5420 struct btrfs_root *root,
5421 struct btrfs_path *path,
5422 struct btrfs_key *first_key,
5423 struct btrfs_ref_path *ref_path,
5424 struct btrfs_block_group_cache *group,
5425 struct inode *reloc_inode)
5426 {
5427 struct btrfs_root *reloc_root;
5428 struct extent_buffer *eb = NULL;
5429 struct btrfs_key *keys;
5430 u64 *nodes;
5431 int level;
5432 int shared_level;
5433 int lowest_level = 0;
5434 int ret;
5435
5436 if (ref_path->owner_objectid < BTRFS_FIRST_FREE_OBJECTID)
5437 lowest_level = ref_path->owner_objectid;
5438
5439 if (!root->ref_cows) {
5440 path->lowest_level = lowest_level;
5441 ret = btrfs_search_slot(trans, root, first_key, path, 0, 1);
5442 BUG_ON(ret < 0);
5443 path->lowest_level = 0;
5444 btrfs_release_path(root, path);
5445 return 0;
5446 }
5447
5448 mutex_lock(&root->fs_info->tree_reloc_mutex);
5449 ret = init_reloc_tree(trans, root);
5450 BUG_ON(ret);
5451 reloc_root = root->reloc_root;
5452
5453 shared_level = ref_path->shared_level;
5454 ref_path->shared_level = BTRFS_MAX_LEVEL - 1;
5455
5456 keys = ref_path->node_keys;
5457 nodes = ref_path->new_nodes;
5458 memset(&keys[shared_level + 1], 0,
5459 sizeof(*keys) * (BTRFS_MAX_LEVEL - shared_level - 1));
5460 memset(&nodes[shared_level + 1], 0,
5461 sizeof(*nodes) * (BTRFS_MAX_LEVEL - shared_level - 1));
5462
5463 if (nodes[lowest_level] == 0) {
5464 path->lowest_level = lowest_level;
5465 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5466 0, 1);
5467 BUG_ON(ret);
5468 for (level = lowest_level; level < BTRFS_MAX_LEVEL; level++) {
5469 eb = path->nodes[level];
5470 if (!eb || eb == reloc_root->node)
5471 break;
5472 nodes[level] = eb->start;
5473 if (level == 0)
5474 btrfs_item_key_to_cpu(eb, &keys[level], 0);
5475 else
5476 btrfs_node_key_to_cpu(eb, &keys[level], 0);
5477 }
5478 if (nodes[0] &&
5479 ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5480 eb = path->nodes[0];
5481 ret = replace_extents_in_leaf(trans, reloc_root, eb,
5482 group, reloc_inode);
5483 BUG_ON(ret);
5484 }
5485 btrfs_release_path(reloc_root, path);
5486 } else {
5487 ret = btrfs_merge_path(trans, reloc_root, keys, nodes,
5488 lowest_level);
5489 BUG_ON(ret);
5490 }
5491
5492 /*
5493 * replace tree blocks in the fs tree with tree blocks in
5494 * the reloc tree.
5495 */
5496 ret = btrfs_merge_path(trans, root, keys, nodes, lowest_level);
5497 BUG_ON(ret < 0);
5498
5499 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5500 ret = btrfs_search_slot(trans, reloc_root, first_key, path,
5501 0, 0);
5502 BUG_ON(ret);
5503 extent_buffer_get(path->nodes[0]);
5504 eb = path->nodes[0];
5505 btrfs_release_path(reloc_root, path);
5506 ret = invalidate_extent_cache(reloc_root, eb, group, root);
5507 BUG_ON(ret);
5508 free_extent_buffer(eb);
5509 }
5510
5511 mutex_unlock(&root->fs_info->tree_reloc_mutex);
5512 path->lowest_level = 0;
5513 return 0;
5514 }
5515
5516 static noinline int relocate_tree_block(struct btrfs_trans_handle *trans,
5517 struct btrfs_root *root,
5518 struct btrfs_path *path,
5519 struct btrfs_key *first_key,
5520 struct btrfs_ref_path *ref_path)
5521 {
5522 int ret;
5523
5524 ret = relocate_one_path(trans, root, path, first_key,
5525 ref_path, NULL, NULL);
5526 BUG_ON(ret);
5527
5528 if (root == root->fs_info->extent_root)
5529 btrfs_extent_post_op(trans, root);
5530
5531 return 0;
5532 }
5533
5534 static noinline int del_extent_zero(struct btrfs_trans_handle *trans,
5535 struct btrfs_root *extent_root,
5536 struct btrfs_path *path,
5537 struct btrfs_key *extent_key)
5538 {
5539 int ret;
5540
5541 ret = btrfs_search_slot(trans, extent_root, extent_key, path, -1, 1);
5542 if (ret)
5543 goto out;
5544 ret = btrfs_del_item(trans, extent_root, path);
5545 out:
5546 btrfs_release_path(extent_root, path);
5547 return ret;
5548 }
5549
5550 static noinline struct btrfs_root *read_ref_root(struct btrfs_fs_info *fs_info,
5551 struct btrfs_ref_path *ref_path)
5552 {
5553 struct btrfs_key root_key;
5554
5555 root_key.objectid = ref_path->root_objectid;
5556 root_key.type = BTRFS_ROOT_ITEM_KEY;
5557 if (is_cowonly_root(ref_path->root_objectid))
5558 root_key.offset = 0;
5559 else
5560 root_key.offset = (u64)-1;
5561
5562 return btrfs_read_fs_root_no_name(fs_info, &root_key);
5563 }
5564
5565 static noinline int relocate_one_extent(struct btrfs_root *extent_root,
5566 struct btrfs_path *path,
5567 struct btrfs_key *extent_key,
5568 struct btrfs_block_group_cache *group,
5569 struct inode *reloc_inode, int pass)
5570 {
5571 struct btrfs_trans_handle *trans;
5572 struct btrfs_root *found_root;
5573 struct btrfs_ref_path *ref_path = NULL;
5574 struct disk_extent *new_extents = NULL;
5575 int nr_extents = 0;
5576 int loops;
5577 int ret;
5578 int level;
5579 struct btrfs_key first_key;
5580 u64 prev_block = 0;
5581
5582
5583 trans = btrfs_start_transaction(extent_root, 1);
5584 BUG_ON(!trans);
5585
5586 if (extent_key->objectid == 0) {
5587 ret = del_extent_zero(trans, extent_root, path, extent_key);
5588 goto out;
5589 }
5590
5591 ref_path = kmalloc(sizeof(*ref_path), GFP_NOFS);
5592 if (!ref_path) {
5593 ret = -ENOMEM;
5594 goto out;
5595 }
5596
5597 for (loops = 0; ; loops++) {
5598 if (loops == 0) {
5599 ret = btrfs_first_ref_path(trans, extent_root, ref_path,
5600 extent_key->objectid);
5601 } else {
5602 ret = btrfs_next_ref_path(trans, extent_root, ref_path);
5603 }
5604 if (ret < 0)
5605 goto out;
5606 if (ret > 0)
5607 break;
5608
5609 if (ref_path->root_objectid == BTRFS_TREE_LOG_OBJECTID ||
5610 ref_path->root_objectid == BTRFS_TREE_RELOC_OBJECTID)
5611 continue;
5612
5613 found_root = read_ref_root(extent_root->fs_info, ref_path);
5614 BUG_ON(!found_root);
5615 /*
5616 * for reference counted tree, only process reference paths
5617 * rooted at the latest committed root.
5618 */
5619 if (found_root->ref_cows &&
5620 ref_path->root_generation != found_root->root_key.offset)
5621 continue;
5622
5623 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5624 if (pass == 0) {
5625 /*
5626 * copy data extents to new locations
5627 */
5628 u64 group_start = group->key.objectid;
5629 ret = relocate_data_extent(reloc_inode,
5630 extent_key,
5631 group_start);
5632 if (ret < 0)
5633 goto out;
5634 break;
5635 }
5636 level = 0;
5637 } else {
5638 level = ref_path->owner_objectid;
5639 }
5640
5641 if (prev_block != ref_path->nodes[level]) {
5642 struct extent_buffer *eb;
5643 u64 block_start = ref_path->nodes[level];
5644 u64 block_size = btrfs_level_size(found_root, level);
5645
5646 eb = read_tree_block(found_root, block_start,
5647 block_size, 0);
5648 btrfs_tree_lock(eb);
5649 BUG_ON(level != btrfs_header_level(eb));
5650
5651 if (level == 0)
5652 btrfs_item_key_to_cpu(eb, &first_key, 0);
5653 else
5654 btrfs_node_key_to_cpu(eb, &first_key, 0);
5655
5656 btrfs_tree_unlock(eb);
5657 free_extent_buffer(eb);
5658 prev_block = block_start;
5659 }
5660
5661 mutex_lock(&extent_root->fs_info->trans_mutex);
5662 btrfs_record_root_in_trans(found_root);
5663 mutex_unlock(&extent_root->fs_info->trans_mutex);
5664 if (ref_path->owner_objectid >= BTRFS_FIRST_FREE_OBJECTID) {
5665 /*
5666 * try to update data extent references while
5667 * keeping metadata shared between snapshots.
5668 */
5669 if (pass == 1) {
5670 ret = relocate_one_path(trans, found_root,
5671 path, &first_key, ref_path,
5672 group, reloc_inode);
5673 if (ret < 0)
5674 goto out;
5675 continue;
5676 }
5677 /*
5678 * use fallback method to process the remaining
5679 * references.
5680 */
5681 if (!new_extents) {
5682 u64 group_start = group->key.objectid;
5683 new_extents = kmalloc(sizeof(*new_extents),
5684 GFP_NOFS);
5685 nr_extents = 1;
5686 ret = get_new_locations(reloc_inode,
5687 extent_key,
5688 group_start, 1,
5689 &new_extents,
5690 &nr_extents);
5691 if (ret)
5692 goto out;
5693 }
5694 ret = replace_one_extent(trans, found_root,
5695 path, extent_key,
5696 &first_key, ref_path,
5697 new_extents, nr_extents);
5698 } else {
5699 ret = relocate_tree_block(trans, found_root, path,
5700 &first_key, ref_path);
5701 }
5702 if (ret < 0)
5703 goto out;
5704 }
5705 ret = 0;
5706 out:
5707 btrfs_end_transaction(trans, extent_root);
5708 kfree(new_extents);
5709 kfree(ref_path);
5710 return ret;
5711 }
5712
5713 static u64 update_block_group_flags(struct btrfs_root *root, u64 flags)
5714 {
5715 u64 num_devices;
5716 u64 stripped = BTRFS_BLOCK_GROUP_RAID0 |
5717 BTRFS_BLOCK_GROUP_RAID1 | BTRFS_BLOCK_GROUP_RAID10;
5718
5719 num_devices = root->fs_info->fs_devices->rw_devices;
5720 if (num_devices == 1) {
5721 stripped |= BTRFS_BLOCK_GROUP_DUP;
5722 stripped = flags & ~stripped;
5723
5724 /* turn raid0 into single device chunks */
5725 if (flags & BTRFS_BLOCK_GROUP_RAID0)
5726 return stripped;
5727
5728 /* turn mirroring into duplication */
5729 if (flags & (BTRFS_BLOCK_GROUP_RAID1 |
5730 BTRFS_BLOCK_GROUP_RAID10))
5731 return stripped | BTRFS_BLOCK_GROUP_DUP;
5732 return flags;
5733 } else {
5734 /* they already had raid on here, just return */
5735 if (flags & stripped)
5736 return flags;
5737
5738 stripped |= BTRFS_BLOCK_GROUP_DUP;
5739 stripped = flags & ~stripped;
5740
5741 /* switch duplicated blocks with raid1 */
5742 if (flags & BTRFS_BLOCK_GROUP_DUP)
5743 return stripped | BTRFS_BLOCK_GROUP_RAID1;
5744
5745 /* turn single device chunks into raid0 */
5746 return stripped | BTRFS_BLOCK_GROUP_RAID0;
5747 }
5748 return flags;
5749 }
5750
5751 static int __alloc_chunk_for_shrink(struct btrfs_root *root,
5752 struct btrfs_block_group_cache *shrink_block_group,
5753 int force)
5754 {
5755 struct btrfs_trans_handle *trans;
5756 u64 new_alloc_flags;
5757 u64 calc;
5758
5759 spin_lock(&shrink_block_group->lock);
5760 if (btrfs_block_group_used(&shrink_block_group->item) > 0) {
5761 spin_unlock(&shrink_block_group->lock);
5762
5763 trans = btrfs_start_transaction(root, 1);
5764 spin_lock(&shrink_block_group->lock);
5765
5766 new_alloc_flags = update_block_group_flags(root,
5767 shrink_block_group->flags);
5768 if (new_alloc_flags != shrink_block_group->flags) {
5769 calc =
5770 btrfs_block_group_used(&shrink_block_group->item);
5771 } else {
5772 calc = shrink_block_group->key.offset;
5773 }
5774 spin_unlock(&shrink_block_group->lock);
5775
5776 do_chunk_alloc(trans, root->fs_info->extent_root,
5777 calc + 2 * 1024 * 1024, new_alloc_flags, force);
5778
5779 btrfs_end_transaction(trans, root);
5780 } else
5781 spin_unlock(&shrink_block_group->lock);
5782 return 0;
5783 }
5784
5785 static int __insert_orphan_inode(struct btrfs_trans_handle *trans,
5786 struct btrfs_root *root,
5787 u64 objectid, u64 size)
5788 {
5789 struct btrfs_path *path;
5790 struct btrfs_inode_item *item;
5791 struct extent_buffer *leaf;
5792 int ret;
5793
5794 path = btrfs_alloc_path();
5795 if (!path)
5796 return -ENOMEM;
5797
5798 ret = btrfs_insert_empty_inode(trans, root, path, objectid);
5799 if (ret)
5800 goto out;
5801
5802 leaf = path->nodes[0];
5803 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_inode_item);
5804 memset_extent_buffer(leaf, 0, (unsigned long)item, sizeof(*item));
5805 btrfs_set_inode_generation(leaf, item, 1);
5806 btrfs_set_inode_size(leaf, item, size);
5807 btrfs_set_inode_mode(leaf, item, S_IFREG | 0600);
5808 btrfs_set_inode_flags(leaf, item, BTRFS_INODE_NOCOMPRESS);
5809 btrfs_mark_buffer_dirty(leaf);
5810 btrfs_release_path(root, path);
5811 out:
5812 btrfs_free_path(path);
5813 return ret;
5814 }
5815
5816 static noinline struct inode *create_reloc_inode(struct btrfs_fs_info *fs_info,
5817 struct btrfs_block_group_cache *group)
5818 {
5819 struct inode *inode = NULL;
5820 struct btrfs_trans_handle *trans;
5821 struct btrfs_root *root;
5822 struct btrfs_key root_key;
5823 u64 objectid = BTRFS_FIRST_FREE_OBJECTID;
5824 int err = 0;
5825
5826 root_key.objectid = BTRFS_DATA_RELOC_TREE_OBJECTID;
5827 root_key.type = BTRFS_ROOT_ITEM_KEY;
5828 root_key.offset = (u64)-1;
5829 root = btrfs_read_fs_root_no_name(fs_info, &root_key);
5830 if (IS_ERR(root))
5831 return ERR_CAST(root);
5832
5833 trans = btrfs_start_transaction(root, 1);
5834 BUG_ON(!trans);
5835
5836 err = btrfs_find_free_objectid(trans, root, objectid, &objectid);
5837 if (err)
5838 goto out;
5839
5840 err = __insert_orphan_inode(trans, root, objectid, group->key.offset);
5841 BUG_ON(err);
5842
5843 err = btrfs_insert_file_extent(trans, root, objectid, 0, 0, 0,
5844 group->key.offset, 0, group->key.offset,
5845 0, 0, 0);
5846 BUG_ON(err);
5847
5848 inode = btrfs_iget_locked(root->fs_info->sb, objectid, root);
5849 if (inode->i_state & I_NEW) {
5850 BTRFS_I(inode)->root = root;
5851 BTRFS_I(inode)->location.objectid = objectid;
5852 BTRFS_I(inode)->location.type = BTRFS_INODE_ITEM_KEY;
5853 BTRFS_I(inode)->location.offset = 0;
5854 btrfs_read_locked_inode(inode);
5855 unlock_new_inode(inode);
5856 BUG_ON(is_bad_inode(inode));
5857 } else {
5858 BUG_ON(1);
5859 }
5860 BTRFS_I(inode)->index_cnt = group->key.objectid;
5861
5862 err = btrfs_orphan_add(trans, inode);
5863 out:
5864 btrfs_end_transaction(trans, root);
5865 if (err) {
5866 if (inode)
5867 iput(inode);
5868 inode = ERR_PTR(err);
5869 }
5870 return inode;
5871 }
5872
5873 int btrfs_reloc_clone_csums(struct inode *inode, u64 file_pos, u64 len)
5874 {
5875
5876 struct btrfs_ordered_sum *sums;
5877 struct btrfs_sector_sum *sector_sum;
5878 struct btrfs_ordered_extent *ordered;
5879 struct btrfs_root *root = BTRFS_I(inode)->root;
5880 struct list_head list;
5881 size_t offset;
5882 int ret;
5883 u64 disk_bytenr;
5884
5885 INIT_LIST_HEAD(&list);
5886
5887 ordered = btrfs_lookup_ordered_extent(inode, file_pos);
5888 BUG_ON(ordered->file_offset != file_pos || ordered->len != len);
5889
5890 disk_bytenr = file_pos + BTRFS_I(inode)->index_cnt;
5891 ret = btrfs_lookup_csums_range(root->fs_info->csum_root, disk_bytenr,
5892 disk_bytenr + len - 1, &list);
5893
5894 while (!list_empty(&list)) {
5895 sums = list_entry(list.next, struct btrfs_ordered_sum, list);
5896 list_del_init(&sums->list);
5897
5898 sector_sum = sums->sums;
5899 sums->bytenr = ordered->start;
5900
5901 offset = 0;
5902 while (offset < sums->len) {
5903 sector_sum->bytenr += ordered->start - disk_bytenr;
5904 sector_sum++;
5905 offset += root->sectorsize;
5906 }
5907
5908 btrfs_add_ordered_sum(inode, ordered, sums);
5909 }
5910 btrfs_put_ordered_extent(ordered);
5911 return 0;
5912 }
5913
5914 int btrfs_relocate_block_group(struct btrfs_root *root, u64 group_start)
5915 {
5916 struct btrfs_trans_handle *trans;
5917 struct btrfs_path *path;
5918 struct btrfs_fs_info *info = root->fs_info;
5919 struct extent_buffer *leaf;
5920 struct inode *reloc_inode;
5921 struct btrfs_block_group_cache *block_group;
5922 struct btrfs_key key;
5923 u64 skipped;
5924 u64 cur_byte;
5925 u64 total_found;
5926 u32 nritems;
5927 int ret;
5928 int progress;
5929 int pass = 0;
5930
5931 root = root->fs_info->extent_root;
5932
5933 block_group = btrfs_lookup_block_group(info, group_start);
5934 BUG_ON(!block_group);
5935
5936 printk(KERN_INFO "btrfs relocating block group %llu flags %llu\n",
5937 (unsigned long long)block_group->key.objectid,
5938 (unsigned long long)block_group->flags);
5939
5940 path = btrfs_alloc_path();
5941 BUG_ON(!path);
5942
5943 reloc_inode = create_reloc_inode(info, block_group);
5944 BUG_ON(IS_ERR(reloc_inode));
5945
5946 __alloc_chunk_for_shrink(root, block_group, 1);
5947 set_block_group_readonly(block_group);
5948
5949 btrfs_start_delalloc_inodes(info->tree_root);
5950 btrfs_wait_ordered_extents(info->tree_root, 0);
5951 again:
5952 skipped = 0;
5953 total_found = 0;
5954 progress = 0;
5955 key.objectid = block_group->key.objectid;
5956 key.offset = 0;
5957 key.type = 0;
5958 cur_byte = key.objectid;
5959
5960 trans = btrfs_start_transaction(info->tree_root, 1);
5961 btrfs_commit_transaction(trans, info->tree_root);
5962
5963 mutex_lock(&root->fs_info->cleaner_mutex);
5964 btrfs_clean_old_snapshots(info->tree_root);
5965 btrfs_remove_leaf_refs(info->tree_root, (u64)-1, 1);
5966 mutex_unlock(&root->fs_info->cleaner_mutex);
5967
5968 while (1) {
5969 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
5970 if (ret < 0)
5971 goto out;
5972 next:
5973 leaf = path->nodes[0];
5974 nritems = btrfs_header_nritems(leaf);
5975 if (path->slots[0] >= nritems) {
5976 ret = btrfs_next_leaf(root, path);
5977 if (ret < 0)
5978 goto out;
5979 if (ret == 1) {
5980 ret = 0;
5981 break;
5982 }
5983 leaf = path->nodes[0];
5984 nritems = btrfs_header_nritems(leaf);
5985 }
5986
5987 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
5988
5989 if (key.objectid >= block_group->key.objectid +
5990 block_group->key.offset)
5991 break;
5992
5993 if (progress && need_resched()) {
5994 btrfs_release_path(root, path);
5995 cond_resched();
5996 progress = 0;
5997 continue;
5998 }
5999 progress = 1;
6000
6001 if (btrfs_key_type(&key) != BTRFS_EXTENT_ITEM_KEY ||
6002 key.objectid + key.offset <= cur_byte) {
6003 path->slots[0]++;
6004 goto next;
6005 }
6006
6007 total_found++;
6008 cur_byte = key.objectid + key.offset;
6009 btrfs_release_path(root, path);
6010
6011 __alloc_chunk_for_shrink(root, block_group, 0);
6012 ret = relocate_one_extent(root, path, &key, block_group,
6013 reloc_inode, pass);
6014 BUG_ON(ret < 0);
6015 if (ret > 0)
6016 skipped++;
6017
6018 key.objectid = cur_byte;
6019 key.type = 0;
6020 key.offset = 0;
6021 }
6022
6023 btrfs_release_path(root, path);
6024
6025 if (pass == 0) {
6026 btrfs_wait_ordered_range(reloc_inode, 0, (u64)-1);
6027 invalidate_mapping_pages(reloc_inode->i_mapping, 0, -1);
6028 }
6029
6030 if (total_found > 0) {
6031 printk(KERN_INFO "btrfs found %llu extents in pass %d\n",
6032 (unsigned long long)total_found, pass);
6033 pass++;
6034 if (total_found == skipped && pass > 2) {
6035 iput(reloc_inode);
6036 reloc_inode = create_reloc_inode(info, block_group);
6037 pass = 0;
6038 }
6039 goto again;
6040 }
6041
6042 /* delete reloc_inode */
6043 iput(reloc_inode);
6044
6045 /* unpin extents in this range */
6046 trans = btrfs_start_transaction(info->tree_root, 1);
6047 btrfs_commit_transaction(trans, info->tree_root);
6048
6049 spin_lock(&block_group->lock);
6050 WARN_ON(block_group->pinned > 0);
6051 WARN_ON(block_group->reserved > 0);
6052 WARN_ON(btrfs_block_group_used(&block_group->item) > 0);
6053 spin_unlock(&block_group->lock);
6054 put_block_group(block_group);
6055 ret = 0;
6056 out:
6057 btrfs_free_path(path);
6058 return ret;
6059 }
6060
6061 static int find_first_block_group(struct btrfs_root *root,
6062 struct btrfs_path *path, struct btrfs_key *key)
6063 {
6064 int ret = 0;
6065 struct btrfs_key found_key;
6066 struct extent_buffer *leaf;
6067 int slot;
6068
6069 ret = btrfs_search_slot(NULL, root, key, path, 0, 0);
6070 if (ret < 0)
6071 goto out;
6072
6073 while (1) {
6074 slot = path->slots[0];
6075 leaf = path->nodes[0];
6076 if (slot >= btrfs_header_nritems(leaf)) {
6077 ret = btrfs_next_leaf(root, path);
6078 if (ret == 0)
6079 continue;
6080 if (ret < 0)
6081 goto out;
6082 break;
6083 }
6084 btrfs_item_key_to_cpu(leaf, &found_key, slot);
6085
6086 if (found_key.objectid >= key->objectid &&
6087 found_key.type == BTRFS_BLOCK_GROUP_ITEM_KEY) {
6088 ret = 0;
6089 goto out;
6090 }
6091 path->slots[0]++;
6092 }
6093 ret = -ENOENT;
6094 out:
6095 return ret;
6096 }
6097
6098 int btrfs_free_block_groups(struct btrfs_fs_info *info)
6099 {
6100 struct btrfs_block_group_cache *block_group;
6101 struct rb_node *n;
6102
6103 spin_lock(&info->block_group_cache_lock);
6104 while ((n = rb_last(&info->block_group_cache_tree)) != NULL) {
6105 block_group = rb_entry(n, struct btrfs_block_group_cache,
6106 cache_node);
6107 rb_erase(&block_group->cache_node,
6108 &info->block_group_cache_tree);
6109 spin_unlock(&info->block_group_cache_lock);
6110
6111 btrfs_remove_free_space_cache(block_group);
6112 down_write(&block_group->space_info->groups_sem);
6113 list_del(&block_group->list);
6114 up_write(&block_group->space_info->groups_sem);
6115
6116 WARN_ON(atomic_read(&block_group->count) != 1);
6117 kfree(block_group);
6118
6119 spin_lock(&info->block_group_cache_lock);
6120 }
6121 spin_unlock(&info->block_group_cache_lock);
6122 return 0;
6123 }
6124
6125 int btrfs_read_block_groups(struct btrfs_root *root)
6126 {
6127 struct btrfs_path *path;
6128 int ret;
6129 struct btrfs_block_group_cache *cache;
6130 struct btrfs_fs_info *info = root->fs_info;
6131 struct btrfs_space_info *space_info;
6132 struct btrfs_key key;
6133 struct btrfs_key found_key;
6134 struct extent_buffer *leaf;
6135
6136 root = info->extent_root;
6137 key.objectid = 0;
6138 key.offset = 0;
6139 btrfs_set_key_type(&key, BTRFS_BLOCK_GROUP_ITEM_KEY);
6140 path = btrfs_alloc_path();
6141 if (!path)
6142 return -ENOMEM;
6143
6144 while (1) {
6145 ret = find_first_block_group(root, path, &key);
6146 if (ret > 0) {
6147 ret = 0;
6148 goto error;
6149 }
6150 if (ret != 0)
6151 goto error;
6152
6153 leaf = path->nodes[0];
6154 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
6155 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6156 if (!cache) {
6157 ret = -ENOMEM;
6158 break;
6159 }
6160
6161 atomic_set(&cache->count, 1);
6162 spin_lock_init(&cache->lock);
6163 mutex_init(&cache->alloc_mutex);
6164 mutex_init(&cache->cache_mutex);
6165 INIT_LIST_HEAD(&cache->list);
6166 read_extent_buffer(leaf, &cache->item,
6167 btrfs_item_ptr_offset(leaf, path->slots[0]),
6168 sizeof(cache->item));
6169 memcpy(&cache->key, &found_key, sizeof(found_key));
6170
6171 key.objectid = found_key.objectid + found_key.offset;
6172 btrfs_release_path(root, path);
6173 cache->flags = btrfs_block_group_flags(&cache->item);
6174
6175 ret = update_space_info(info, cache->flags, found_key.offset,
6176 btrfs_block_group_used(&cache->item),
6177 &space_info);
6178 BUG_ON(ret);
6179 cache->space_info = space_info;
6180 down_write(&space_info->groups_sem);
6181 list_add_tail(&cache->list, &space_info->block_groups);
6182 up_write(&space_info->groups_sem);
6183
6184 ret = btrfs_add_block_group_cache(root->fs_info, cache);
6185 BUG_ON(ret);
6186
6187 set_avail_alloc_bits(root->fs_info, cache->flags);
6188 if (btrfs_chunk_readonly(root, cache->key.objectid))
6189 set_block_group_readonly(cache);
6190 }
6191 ret = 0;
6192 error:
6193 btrfs_free_path(path);
6194 return ret;
6195 }
6196
6197 int btrfs_make_block_group(struct btrfs_trans_handle *trans,
6198 struct btrfs_root *root, u64 bytes_used,
6199 u64 type, u64 chunk_objectid, u64 chunk_offset,
6200 u64 size)
6201 {
6202 int ret;
6203 struct btrfs_root *extent_root;
6204 struct btrfs_block_group_cache *cache;
6205
6206 extent_root = root->fs_info->extent_root;
6207
6208 root->fs_info->last_trans_new_blockgroup = trans->transid;
6209
6210 cache = kzalloc(sizeof(*cache), GFP_NOFS);
6211 if (!cache)
6212 return -ENOMEM;
6213
6214 cache->key.objectid = chunk_offset;
6215 cache->key.offset = size;
6216 cache->key.type = BTRFS_BLOCK_GROUP_ITEM_KEY;
6217 atomic_set(&cache->count, 1);
6218 spin_lock_init(&cache->lock);
6219 mutex_init(&cache->alloc_mutex);
6220 mutex_init(&cache->cache_mutex);
6221 INIT_LIST_HEAD(&cache->list);
6222
6223 btrfs_set_block_group_used(&cache->item, bytes_used);
6224 btrfs_set_block_group_chunk_objectid(&cache->item, chunk_objectid);
6225 cache->flags = type;
6226 btrfs_set_block_group_flags(&cache->item, type);
6227
6228 ret = update_space_info(root->fs_info, cache->flags, size, bytes_used,
6229 &cache->space_info);
6230 BUG_ON(ret);
6231 down_write(&cache->space_info->groups_sem);
6232 list_add_tail(&cache->list, &cache->space_info->block_groups);
6233 up_write(&cache->space_info->groups_sem);
6234
6235 ret = btrfs_add_block_group_cache(root->fs_info, cache);
6236 BUG_ON(ret);
6237
6238 ret = btrfs_insert_item(trans, extent_root, &cache->key, &cache->item,
6239 sizeof(cache->item));
6240 BUG_ON(ret);
6241
6242 finish_current_insert(trans, extent_root, 0);
6243 ret = del_pending_extents(trans, extent_root, 0);
6244 BUG_ON(ret);
6245 set_avail_alloc_bits(extent_root->fs_info, type);
6246
6247 return 0;
6248 }
6249
6250 int btrfs_remove_block_group(struct btrfs_trans_handle *trans,
6251 struct btrfs_root *root, u64 group_start)
6252 {
6253 struct btrfs_path *path;
6254 struct btrfs_block_group_cache *block_group;
6255 struct btrfs_key key;
6256 int ret;
6257
6258 root = root->fs_info->extent_root;
6259
6260 block_group = btrfs_lookup_block_group(root->fs_info, group_start);
6261 BUG_ON(!block_group);
6262 BUG_ON(!block_group->ro);
6263
6264 memcpy(&key, &block_group->key, sizeof(key));
6265
6266 path = btrfs_alloc_path();
6267 BUG_ON(!path);
6268
6269 spin_lock(&root->fs_info->block_group_cache_lock);
6270 rb_erase(&block_group->cache_node,
6271 &root->fs_info->block_group_cache_tree);
6272 spin_unlock(&root->fs_info->block_group_cache_lock);
6273 btrfs_remove_free_space_cache(block_group);
6274 down_write(&block_group->space_info->groups_sem);
6275 list_del(&block_group->list);
6276 up_write(&block_group->space_info->groups_sem);
6277
6278 spin_lock(&block_group->space_info->lock);
6279 block_group->space_info->total_bytes -= block_group->key.offset;
6280 block_group->space_info->bytes_readonly -= block_group->key.offset;
6281 spin_unlock(&block_group->space_info->lock);
6282 block_group->space_info->full = 0;
6283
6284 put_block_group(block_group);
6285 put_block_group(block_group);
6286
6287 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
6288 if (ret > 0)
6289 ret = -EIO;
6290 if (ret < 0)
6291 goto out;
6292
6293 ret = btrfs_del_item(trans, root, path);
6294 out:
6295 btrfs_free_path(path);
6296 return ret;
6297 }
This page took 0.151859 seconds and 6 git commands to generate.