2 * Copyright (C) 2008 Red Hat. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/pagemap.h>
20 #include <linux/sched.h>
21 #include <linux/slab.h>
22 #include <linux/math64.h>
23 #include <linux/ratelimit.h>
25 #include "free-space-cache.h"
26 #include "transaction.h"
28 #include "extent_io.h"
29 #include "inode-map.h"
31 #define BITS_PER_BITMAP (PAGE_CACHE_SIZE * 8)
32 #define MAX_CACHE_BYTES_PER_GIG (32 * 1024)
34 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
35 struct btrfs_free_space
*info
);
36 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
37 struct btrfs_free_space
*info
);
39 static struct inode
*__lookup_free_space_inode(struct btrfs_root
*root
,
40 struct btrfs_path
*path
,
44 struct btrfs_key location
;
45 struct btrfs_disk_key disk_key
;
46 struct btrfs_free_space_header
*header
;
47 struct extent_buffer
*leaf
;
48 struct inode
*inode
= NULL
;
51 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
55 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
59 btrfs_release_path(path
);
60 return ERR_PTR(-ENOENT
);
63 leaf
= path
->nodes
[0];
64 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
65 struct btrfs_free_space_header
);
66 btrfs_free_space_key(leaf
, header
, &disk_key
);
67 btrfs_disk_key_to_cpu(&location
, &disk_key
);
68 btrfs_release_path(path
);
70 inode
= btrfs_iget(root
->fs_info
->sb
, &location
, root
, NULL
);
72 return ERR_PTR(-ENOENT
);
75 if (is_bad_inode(inode
)) {
77 return ERR_PTR(-ENOENT
);
80 inode
->i_mapping
->flags
&= ~__GFP_FS
;
85 struct inode
*lookup_free_space_inode(struct btrfs_root
*root
,
86 struct btrfs_block_group_cache
87 *block_group
, struct btrfs_path
*path
)
89 struct inode
*inode
= NULL
;
90 u32 flags
= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
92 spin_lock(&block_group
->lock
);
93 if (block_group
->inode
)
94 inode
= igrab(block_group
->inode
);
95 spin_unlock(&block_group
->lock
);
99 inode
= __lookup_free_space_inode(root
, path
,
100 block_group
->key
.objectid
);
104 spin_lock(&block_group
->lock
);
105 if (!((BTRFS_I(inode
)->flags
& flags
) == flags
)) {
106 printk(KERN_INFO
"Old style space inode found, converting.\n");
107 BTRFS_I(inode
)->flags
|= BTRFS_INODE_NODATASUM
|
108 BTRFS_INODE_NODATACOW
;
109 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
112 if (!block_group
->iref
) {
113 block_group
->inode
= igrab(inode
);
114 block_group
->iref
= 1;
116 spin_unlock(&block_group
->lock
);
121 int __create_free_space_inode(struct btrfs_root
*root
,
122 struct btrfs_trans_handle
*trans
,
123 struct btrfs_path
*path
, u64 ino
, u64 offset
)
125 struct btrfs_key key
;
126 struct btrfs_disk_key disk_key
;
127 struct btrfs_free_space_header
*header
;
128 struct btrfs_inode_item
*inode_item
;
129 struct extent_buffer
*leaf
;
130 u64 flags
= BTRFS_INODE_NOCOMPRESS
| BTRFS_INODE_PREALLOC
;
133 ret
= btrfs_insert_empty_inode(trans
, root
, path
, ino
);
137 /* We inline crc's for the free disk space cache */
138 if (ino
!= BTRFS_FREE_INO_OBJECTID
)
139 flags
|= BTRFS_INODE_NODATASUM
| BTRFS_INODE_NODATACOW
;
141 leaf
= path
->nodes
[0];
142 inode_item
= btrfs_item_ptr(leaf
, path
->slots
[0],
143 struct btrfs_inode_item
);
144 btrfs_item_key(leaf
, &disk_key
, path
->slots
[0]);
145 memset_extent_buffer(leaf
, 0, (unsigned long)inode_item
,
146 sizeof(*inode_item
));
147 btrfs_set_inode_generation(leaf
, inode_item
, trans
->transid
);
148 btrfs_set_inode_size(leaf
, inode_item
, 0);
149 btrfs_set_inode_nbytes(leaf
, inode_item
, 0);
150 btrfs_set_inode_uid(leaf
, inode_item
, 0);
151 btrfs_set_inode_gid(leaf
, inode_item
, 0);
152 btrfs_set_inode_mode(leaf
, inode_item
, S_IFREG
| 0600);
153 btrfs_set_inode_flags(leaf
, inode_item
, flags
);
154 btrfs_set_inode_nlink(leaf
, inode_item
, 1);
155 btrfs_set_inode_transid(leaf
, inode_item
, trans
->transid
);
156 btrfs_set_inode_block_group(leaf
, inode_item
, offset
);
157 btrfs_mark_buffer_dirty(leaf
);
158 btrfs_release_path(path
);
160 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
164 ret
= btrfs_insert_empty_item(trans
, root
, path
, &key
,
165 sizeof(struct btrfs_free_space_header
));
167 btrfs_release_path(path
);
170 leaf
= path
->nodes
[0];
171 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
172 struct btrfs_free_space_header
);
173 memset_extent_buffer(leaf
, 0, (unsigned long)header
, sizeof(*header
));
174 btrfs_set_free_space_key(leaf
, header
, &disk_key
);
175 btrfs_mark_buffer_dirty(leaf
);
176 btrfs_release_path(path
);
181 int create_free_space_inode(struct btrfs_root
*root
,
182 struct btrfs_trans_handle
*trans
,
183 struct btrfs_block_group_cache
*block_group
,
184 struct btrfs_path
*path
)
189 ret
= btrfs_find_free_objectid(root
, &ino
);
193 return __create_free_space_inode(root
, trans
, path
, ino
,
194 block_group
->key
.objectid
);
197 int btrfs_truncate_free_space_cache(struct btrfs_root
*root
,
198 struct btrfs_trans_handle
*trans
,
199 struct btrfs_path
*path
,
202 struct btrfs_block_rsv
*rsv
;
207 rsv
= trans
->block_rsv
;
208 trans
->block_rsv
= &root
->fs_info
->global_block_rsv
;
210 /* 1 for slack space, 1 for updating the inode */
211 needed_bytes
= btrfs_calc_trunc_metadata_size(root
, 1) +
212 btrfs_calc_trans_metadata_size(root
, 1);
214 spin_lock(&trans
->block_rsv
->lock
);
215 if (trans
->block_rsv
->reserved
< needed_bytes
) {
216 spin_unlock(&trans
->block_rsv
->lock
);
217 trans
->block_rsv
= rsv
;
220 spin_unlock(&trans
->block_rsv
->lock
);
222 oldsize
= i_size_read(inode
);
223 btrfs_i_size_write(inode
, 0);
224 truncate_pagecache(inode
, oldsize
, 0);
227 * We don't need an orphan item because truncating the free space cache
228 * will never be split across transactions.
230 ret
= btrfs_truncate_inode_items(trans
, root
, inode
,
231 0, BTRFS_EXTENT_DATA_KEY
);
234 trans
->block_rsv
= rsv
;
235 btrfs_abort_transaction(trans
, root
, ret
);
239 ret
= btrfs_update_inode(trans
, root
, inode
);
241 btrfs_abort_transaction(trans
, root
, ret
);
242 trans
->block_rsv
= rsv
;
247 static int readahead_cache(struct inode
*inode
)
249 struct file_ra_state
*ra
;
250 unsigned long last_index
;
252 ra
= kzalloc(sizeof(*ra
), GFP_NOFS
);
256 file_ra_state_init(ra
, inode
->i_mapping
);
257 last_index
= (i_size_read(inode
) - 1) >> PAGE_CACHE_SHIFT
;
259 page_cache_sync_readahead(inode
->i_mapping
, ra
, NULL
, 0, last_index
);
270 struct btrfs_root
*root
;
274 unsigned check_crcs
:1;
277 static int io_ctl_init(struct io_ctl
*io_ctl
, struct inode
*inode
,
278 struct btrfs_root
*root
)
280 memset(io_ctl
, 0, sizeof(struct io_ctl
));
281 io_ctl
->num_pages
= (i_size_read(inode
) + PAGE_CACHE_SIZE
- 1) >>
283 io_ctl
->pages
= kzalloc(sizeof(struct page
*) * io_ctl
->num_pages
,
288 if (btrfs_ino(inode
) != BTRFS_FREE_INO_OBJECTID
)
289 io_ctl
->check_crcs
= 1;
293 static void io_ctl_free(struct io_ctl
*io_ctl
)
295 kfree(io_ctl
->pages
);
298 static void io_ctl_unmap_page(struct io_ctl
*io_ctl
)
301 kunmap(io_ctl
->page
);
307 static void io_ctl_map_page(struct io_ctl
*io_ctl
, int clear
)
309 WARN_ON(io_ctl
->cur
);
310 BUG_ON(io_ctl
->index
>= io_ctl
->num_pages
);
311 io_ctl
->page
= io_ctl
->pages
[io_ctl
->index
++];
312 io_ctl
->cur
= kmap(io_ctl
->page
);
313 io_ctl
->orig
= io_ctl
->cur
;
314 io_ctl
->size
= PAGE_CACHE_SIZE
;
316 memset(io_ctl
->cur
, 0, PAGE_CACHE_SIZE
);
319 static void io_ctl_drop_pages(struct io_ctl
*io_ctl
)
323 io_ctl_unmap_page(io_ctl
);
325 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
326 if (io_ctl
->pages
[i
]) {
327 ClearPageChecked(io_ctl
->pages
[i
]);
328 unlock_page(io_ctl
->pages
[i
]);
329 page_cache_release(io_ctl
->pages
[i
]);
334 static int io_ctl_prepare_pages(struct io_ctl
*io_ctl
, struct inode
*inode
,
338 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
341 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
342 page
= find_or_create_page(inode
->i_mapping
, i
, mask
);
344 io_ctl_drop_pages(io_ctl
);
347 io_ctl
->pages
[i
] = page
;
348 if (uptodate
&& !PageUptodate(page
)) {
349 btrfs_readpage(NULL
, page
);
351 if (!PageUptodate(page
)) {
352 printk(KERN_ERR
"btrfs: error reading free "
354 io_ctl_drop_pages(io_ctl
);
360 for (i
= 0; i
< io_ctl
->num_pages
; i
++) {
361 clear_page_dirty_for_io(io_ctl
->pages
[i
]);
362 set_page_extent_mapped(io_ctl
->pages
[i
]);
368 static void io_ctl_set_generation(struct io_ctl
*io_ctl
, u64 generation
)
372 io_ctl_map_page(io_ctl
, 1);
375 * Skip the csum areas. If we don't check crcs then we just have a
376 * 64bit chunk at the front of the first page.
378 if (io_ctl
->check_crcs
) {
379 io_ctl
->cur
+= (sizeof(u32
) * io_ctl
->num_pages
);
380 io_ctl
->size
-= sizeof(u64
) + (sizeof(u32
) * io_ctl
->num_pages
);
382 io_ctl
->cur
+= sizeof(u64
);
383 io_ctl
->size
-= sizeof(u64
) * 2;
387 *val
= cpu_to_le64(generation
);
388 io_ctl
->cur
+= sizeof(u64
);
391 static int io_ctl_check_generation(struct io_ctl
*io_ctl
, u64 generation
)
396 * Skip the crc area. If we don't check crcs then we just have a 64bit
397 * chunk at the front of the first page.
399 if (io_ctl
->check_crcs
) {
400 io_ctl
->cur
+= sizeof(u32
) * io_ctl
->num_pages
;
401 io_ctl
->size
-= sizeof(u64
) +
402 (sizeof(u32
) * io_ctl
->num_pages
);
404 io_ctl
->cur
+= sizeof(u64
);
405 io_ctl
->size
-= sizeof(u64
) * 2;
409 if (le64_to_cpu(*gen
) != generation
) {
410 printk_ratelimited(KERN_ERR
"btrfs: space cache generation "
411 "(%Lu) does not match inode (%Lu)\n", *gen
,
413 io_ctl_unmap_page(io_ctl
);
416 io_ctl
->cur
+= sizeof(u64
);
420 static void io_ctl_set_crc(struct io_ctl
*io_ctl
, int index
)
426 if (!io_ctl
->check_crcs
) {
427 io_ctl_unmap_page(io_ctl
);
432 offset
= sizeof(u32
) * io_ctl
->num_pages
;
434 crc
= btrfs_csum_data(io_ctl
->root
, io_ctl
->orig
+ offset
, crc
,
435 PAGE_CACHE_SIZE
- offset
);
436 btrfs_csum_final(crc
, (char *)&crc
);
437 io_ctl_unmap_page(io_ctl
);
438 tmp
= kmap(io_ctl
->pages
[0]);
441 kunmap(io_ctl
->pages
[0]);
444 static int io_ctl_check_crc(struct io_ctl
*io_ctl
, int index
)
450 if (!io_ctl
->check_crcs
) {
451 io_ctl_map_page(io_ctl
, 0);
456 offset
= sizeof(u32
) * io_ctl
->num_pages
;
458 tmp
= kmap(io_ctl
->pages
[0]);
461 kunmap(io_ctl
->pages
[0]);
463 io_ctl_map_page(io_ctl
, 0);
464 crc
= btrfs_csum_data(io_ctl
->root
, io_ctl
->orig
+ offset
, crc
,
465 PAGE_CACHE_SIZE
- offset
);
466 btrfs_csum_final(crc
, (char *)&crc
);
468 printk_ratelimited(KERN_ERR
"btrfs: csum mismatch on free "
470 io_ctl_unmap_page(io_ctl
);
477 static int io_ctl_add_entry(struct io_ctl
*io_ctl
, u64 offset
, u64 bytes
,
480 struct btrfs_free_space_entry
*entry
;
486 entry
->offset
= cpu_to_le64(offset
);
487 entry
->bytes
= cpu_to_le64(bytes
);
488 entry
->type
= (bitmap
) ? BTRFS_FREE_SPACE_BITMAP
:
489 BTRFS_FREE_SPACE_EXTENT
;
490 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
491 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
493 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
496 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
498 /* No more pages to map */
499 if (io_ctl
->index
>= io_ctl
->num_pages
)
502 /* map the next page */
503 io_ctl_map_page(io_ctl
, 1);
507 static int io_ctl_add_bitmap(struct io_ctl
*io_ctl
, void *bitmap
)
513 * If we aren't at the start of the current page, unmap this one and
514 * map the next one if there is any left.
516 if (io_ctl
->cur
!= io_ctl
->orig
) {
517 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
518 if (io_ctl
->index
>= io_ctl
->num_pages
)
520 io_ctl_map_page(io_ctl
, 0);
523 memcpy(io_ctl
->cur
, bitmap
, PAGE_CACHE_SIZE
);
524 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
525 if (io_ctl
->index
< io_ctl
->num_pages
)
526 io_ctl_map_page(io_ctl
, 0);
530 static void io_ctl_zero_remaining_pages(struct io_ctl
*io_ctl
)
533 * If we're not on the boundary we know we've modified the page and we
534 * need to crc the page.
536 if (io_ctl
->cur
!= io_ctl
->orig
)
537 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
539 io_ctl_unmap_page(io_ctl
);
541 while (io_ctl
->index
< io_ctl
->num_pages
) {
542 io_ctl_map_page(io_ctl
, 1);
543 io_ctl_set_crc(io_ctl
, io_ctl
->index
- 1);
547 static int io_ctl_read_entry(struct io_ctl
*io_ctl
,
548 struct btrfs_free_space
*entry
, u8
*type
)
550 struct btrfs_free_space_entry
*e
;
554 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
560 entry
->offset
= le64_to_cpu(e
->offset
);
561 entry
->bytes
= le64_to_cpu(e
->bytes
);
563 io_ctl
->cur
+= sizeof(struct btrfs_free_space_entry
);
564 io_ctl
->size
-= sizeof(struct btrfs_free_space_entry
);
566 if (io_ctl
->size
>= sizeof(struct btrfs_free_space_entry
))
569 io_ctl_unmap_page(io_ctl
);
574 static int io_ctl_read_bitmap(struct io_ctl
*io_ctl
,
575 struct btrfs_free_space
*entry
)
579 ret
= io_ctl_check_crc(io_ctl
, io_ctl
->index
);
583 memcpy(entry
->bitmap
, io_ctl
->cur
, PAGE_CACHE_SIZE
);
584 io_ctl_unmap_page(io_ctl
);
590 * Since we attach pinned extents after the fact we can have contiguous sections
591 * of free space that are split up in entries. This poses a problem with the
592 * tree logging stuff since it could have allocated across what appears to be 2
593 * entries since we would have merged the entries when adding the pinned extents
594 * back to the free space cache. So run through the space cache that we just
595 * loaded and merge contiguous entries. This will make the log replay stuff not
596 * blow up and it will make for nicer allocator behavior.
598 static void merge_space_tree(struct btrfs_free_space_ctl
*ctl
)
600 struct btrfs_free_space
*e
, *prev
= NULL
;
604 spin_lock(&ctl
->tree_lock
);
605 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
606 e
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
609 if (e
->bitmap
|| prev
->bitmap
)
611 if (prev
->offset
+ prev
->bytes
== e
->offset
) {
612 unlink_free_space(ctl
, prev
);
613 unlink_free_space(ctl
, e
);
614 prev
->bytes
+= e
->bytes
;
615 kmem_cache_free(btrfs_free_space_cachep
, e
);
616 link_free_space(ctl
, prev
);
618 spin_unlock(&ctl
->tree_lock
);
624 spin_unlock(&ctl
->tree_lock
);
627 int __load_free_space_cache(struct btrfs_root
*root
, struct inode
*inode
,
628 struct btrfs_free_space_ctl
*ctl
,
629 struct btrfs_path
*path
, u64 offset
)
631 struct btrfs_free_space_header
*header
;
632 struct extent_buffer
*leaf
;
633 struct io_ctl io_ctl
;
634 struct btrfs_key key
;
635 struct btrfs_free_space
*e
, *n
;
636 struct list_head bitmaps
;
643 INIT_LIST_HEAD(&bitmaps
);
645 /* Nothing in the space cache, goodbye */
646 if (!i_size_read(inode
))
649 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
653 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
657 btrfs_release_path(path
);
663 leaf
= path
->nodes
[0];
664 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
665 struct btrfs_free_space_header
);
666 num_entries
= btrfs_free_space_entries(leaf
, header
);
667 num_bitmaps
= btrfs_free_space_bitmaps(leaf
, header
);
668 generation
= btrfs_free_space_generation(leaf
, header
);
669 btrfs_release_path(path
);
671 if (BTRFS_I(inode
)->generation
!= generation
) {
672 printk(KERN_ERR
"btrfs: free space inode generation (%llu) did"
673 " not match free space cache generation (%llu)\n",
674 (unsigned long long)BTRFS_I(inode
)->generation
,
675 (unsigned long long)generation
);
682 ret
= io_ctl_init(&io_ctl
, inode
, root
);
686 ret
= readahead_cache(inode
);
690 ret
= io_ctl_prepare_pages(&io_ctl
, inode
, 1);
694 ret
= io_ctl_check_crc(&io_ctl
, 0);
698 ret
= io_ctl_check_generation(&io_ctl
, generation
);
702 while (num_entries
) {
703 e
= kmem_cache_zalloc(btrfs_free_space_cachep
,
708 ret
= io_ctl_read_entry(&io_ctl
, e
, &type
);
710 kmem_cache_free(btrfs_free_space_cachep
, e
);
715 kmem_cache_free(btrfs_free_space_cachep
, e
);
719 if (type
== BTRFS_FREE_SPACE_EXTENT
) {
720 spin_lock(&ctl
->tree_lock
);
721 ret
= link_free_space(ctl
, e
);
722 spin_unlock(&ctl
->tree_lock
);
724 printk(KERN_ERR
"Duplicate entries in "
725 "free space cache, dumping\n");
726 kmem_cache_free(btrfs_free_space_cachep
, e
);
730 BUG_ON(!num_bitmaps
);
732 e
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
735 btrfs_free_space_cachep
, e
);
738 spin_lock(&ctl
->tree_lock
);
739 ret
= link_free_space(ctl
, e
);
740 ctl
->total_bitmaps
++;
741 ctl
->op
->recalc_thresholds(ctl
);
742 spin_unlock(&ctl
->tree_lock
);
744 printk(KERN_ERR
"Duplicate entries in "
745 "free space cache, dumping\n");
746 kmem_cache_free(btrfs_free_space_cachep
, e
);
749 list_add_tail(&e
->list
, &bitmaps
);
755 io_ctl_unmap_page(&io_ctl
);
758 * We add the bitmaps at the end of the entries in order that
759 * the bitmap entries are added to the cache.
761 list_for_each_entry_safe(e
, n
, &bitmaps
, list
) {
762 list_del_init(&e
->list
);
763 ret
= io_ctl_read_bitmap(&io_ctl
, e
);
768 io_ctl_drop_pages(&io_ctl
);
769 merge_space_tree(ctl
);
772 io_ctl_free(&io_ctl
);
775 io_ctl_drop_pages(&io_ctl
);
776 __btrfs_remove_free_space_cache(ctl
);
780 int load_free_space_cache(struct btrfs_fs_info
*fs_info
,
781 struct btrfs_block_group_cache
*block_group
)
783 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
784 struct btrfs_root
*root
= fs_info
->tree_root
;
786 struct btrfs_path
*path
;
789 u64 used
= btrfs_block_group_used(&block_group
->item
);
792 * If this block group has been marked to be cleared for one reason or
793 * another then we can't trust the on disk cache, so just return.
795 spin_lock(&block_group
->lock
);
796 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
797 spin_unlock(&block_group
->lock
);
800 spin_unlock(&block_group
->lock
);
802 path
= btrfs_alloc_path();
805 path
->search_commit_root
= 1;
806 path
->skip_locking
= 1;
808 inode
= lookup_free_space_inode(root
, block_group
, path
);
810 btrfs_free_path(path
);
814 /* We may have converted the inode and made the cache invalid. */
815 spin_lock(&block_group
->lock
);
816 if (block_group
->disk_cache_state
!= BTRFS_DC_WRITTEN
) {
817 spin_unlock(&block_group
->lock
);
818 btrfs_free_path(path
);
821 spin_unlock(&block_group
->lock
);
823 ret
= __load_free_space_cache(fs_info
->tree_root
, inode
, ctl
,
824 path
, block_group
->key
.objectid
);
825 btrfs_free_path(path
);
829 spin_lock(&ctl
->tree_lock
);
830 matched
= (ctl
->free_space
== (block_group
->key
.offset
- used
-
831 block_group
->bytes_super
));
832 spin_unlock(&ctl
->tree_lock
);
835 __btrfs_remove_free_space_cache(ctl
);
836 printk(KERN_ERR
"block group %llu has an wrong amount of free "
837 "space\n", block_group
->key
.objectid
);
842 /* This cache is bogus, make sure it gets cleared */
843 spin_lock(&block_group
->lock
);
844 block_group
->disk_cache_state
= BTRFS_DC_CLEAR
;
845 spin_unlock(&block_group
->lock
);
848 printk(KERN_ERR
"btrfs: failed to load free space cache "
849 "for block group %llu\n", block_group
->key
.objectid
);
857 * __btrfs_write_out_cache - write out cached info to an inode
858 * @root - the root the inode belongs to
859 * @ctl - the free space cache we are going to write out
860 * @block_group - the block_group for this cache if it belongs to a block_group
861 * @trans - the trans handle
862 * @path - the path to use
863 * @offset - the offset for the key we'll insert
865 * This function writes out a free space cache struct to disk for quick recovery
866 * on mount. This will return 0 if it was successfull in writing the cache out,
867 * and -1 if it was not.
869 int __btrfs_write_out_cache(struct btrfs_root
*root
, struct inode
*inode
,
870 struct btrfs_free_space_ctl
*ctl
,
871 struct btrfs_block_group_cache
*block_group
,
872 struct btrfs_trans_handle
*trans
,
873 struct btrfs_path
*path
, u64 offset
)
875 struct btrfs_free_space_header
*header
;
876 struct extent_buffer
*leaf
;
877 struct rb_node
*node
;
878 struct list_head
*pos
, *n
;
879 struct extent_state
*cached_state
= NULL
;
880 struct btrfs_free_cluster
*cluster
= NULL
;
881 struct extent_io_tree
*unpin
= NULL
;
882 struct io_ctl io_ctl
;
883 struct list_head bitmap_list
;
884 struct btrfs_key key
;
885 u64 start
, extent_start
, extent_end
, len
;
891 INIT_LIST_HEAD(&bitmap_list
);
893 if (!i_size_read(inode
))
896 ret
= io_ctl_init(&io_ctl
, inode
, root
);
900 /* Get the cluster for this block_group if it exists */
901 if (block_group
&& !list_empty(&block_group
->cluster_list
))
902 cluster
= list_entry(block_group
->cluster_list
.next
,
903 struct btrfs_free_cluster
,
906 /* Lock all pages first so we can lock the extent safely. */
907 io_ctl_prepare_pages(&io_ctl
, inode
, 0);
909 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, 0, i_size_read(inode
) - 1,
912 node
= rb_first(&ctl
->free_space_offset
);
913 if (!node
&& cluster
) {
914 node
= rb_first(&cluster
->root
);
918 /* Make sure we can fit our crcs into the first page */
919 if (io_ctl
.check_crcs
&&
920 (io_ctl
.num_pages
* sizeof(u32
)) >= PAGE_CACHE_SIZE
) {
925 io_ctl_set_generation(&io_ctl
, trans
->transid
);
927 /* Write out the extent entries */
929 struct btrfs_free_space
*e
;
931 e
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
934 ret
= io_ctl_add_entry(&io_ctl
, e
->offset
, e
->bytes
,
940 list_add_tail(&e
->list
, &bitmap_list
);
943 node
= rb_next(node
);
944 if (!node
&& cluster
) {
945 node
= rb_first(&cluster
->root
);
951 * We want to add any pinned extents to our free space cache
952 * so we don't leak the space
956 * We shouldn't have switched the pinned extents yet so this is the
959 unpin
= root
->fs_info
->pinned_extents
;
962 start
= block_group
->key
.objectid
;
964 while (block_group
&& (start
< block_group
->key
.objectid
+
965 block_group
->key
.offset
)) {
966 ret
= find_first_extent_bit(unpin
, start
,
967 &extent_start
, &extent_end
,
974 /* This pinned extent is out of our range */
975 if (extent_start
>= block_group
->key
.objectid
+
976 block_group
->key
.offset
)
979 extent_start
= max(extent_start
, start
);
980 extent_end
= min(block_group
->key
.objectid
+
981 block_group
->key
.offset
, extent_end
+ 1);
982 len
= extent_end
- extent_start
;
985 ret
= io_ctl_add_entry(&io_ctl
, extent_start
, len
, NULL
);
992 /* Write out the bitmaps */
993 list_for_each_safe(pos
, n
, &bitmap_list
) {
994 struct btrfs_free_space
*entry
=
995 list_entry(pos
, struct btrfs_free_space
, list
);
997 ret
= io_ctl_add_bitmap(&io_ctl
, entry
->bitmap
);
1000 list_del_init(&entry
->list
);
1003 /* Zero out the rest of the pages just to make sure */
1004 io_ctl_zero_remaining_pages(&io_ctl
);
1006 ret
= btrfs_dirty_pages(root
, inode
, io_ctl
.pages
, io_ctl
.num_pages
,
1007 0, i_size_read(inode
), &cached_state
);
1008 io_ctl_drop_pages(&io_ctl
);
1009 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1010 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1016 btrfs_wait_ordered_range(inode
, 0, (u64
)-1);
1018 key
.objectid
= BTRFS_FREE_SPACE_OBJECTID
;
1019 key
.offset
= offset
;
1022 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1024 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0, inode
->i_size
- 1,
1025 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0, NULL
,
1029 leaf
= path
->nodes
[0];
1031 struct btrfs_key found_key
;
1032 BUG_ON(!path
->slots
[0]);
1034 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
1035 if (found_key
.objectid
!= BTRFS_FREE_SPACE_OBJECTID
||
1036 found_key
.offset
!= offset
) {
1037 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, 0,
1039 EXTENT_DIRTY
| EXTENT_DELALLOC
, 0, 0,
1041 btrfs_release_path(path
);
1046 BTRFS_I(inode
)->generation
= trans
->transid
;
1047 header
= btrfs_item_ptr(leaf
, path
->slots
[0],
1048 struct btrfs_free_space_header
);
1049 btrfs_set_free_space_entries(leaf
, header
, entries
);
1050 btrfs_set_free_space_bitmaps(leaf
, header
, bitmaps
);
1051 btrfs_set_free_space_generation(leaf
, header
, trans
->transid
);
1052 btrfs_mark_buffer_dirty(leaf
);
1053 btrfs_release_path(path
);
1057 io_ctl_free(&io_ctl
);
1059 invalidate_inode_pages2(inode
->i_mapping
);
1060 BTRFS_I(inode
)->generation
= 0;
1062 btrfs_update_inode(trans
, root
, inode
);
1066 list_for_each_safe(pos
, n
, &bitmap_list
) {
1067 struct btrfs_free_space
*entry
=
1068 list_entry(pos
, struct btrfs_free_space
, list
);
1069 list_del_init(&entry
->list
);
1071 io_ctl_drop_pages(&io_ctl
);
1072 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, 0,
1073 i_size_read(inode
) - 1, &cached_state
, GFP_NOFS
);
1077 int btrfs_write_out_cache(struct btrfs_root
*root
,
1078 struct btrfs_trans_handle
*trans
,
1079 struct btrfs_block_group_cache
*block_group
,
1080 struct btrfs_path
*path
)
1082 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1083 struct inode
*inode
;
1086 root
= root
->fs_info
->tree_root
;
1088 spin_lock(&block_group
->lock
);
1089 if (block_group
->disk_cache_state
< BTRFS_DC_SETUP
) {
1090 spin_unlock(&block_group
->lock
);
1093 spin_unlock(&block_group
->lock
);
1095 inode
= lookup_free_space_inode(root
, block_group
, path
);
1099 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, block_group
, trans
,
1100 path
, block_group
->key
.objectid
);
1102 spin_lock(&block_group
->lock
);
1103 block_group
->disk_cache_state
= BTRFS_DC_ERROR
;
1104 spin_unlock(&block_group
->lock
);
1107 printk(KERN_ERR
"btrfs: failed to write free space cache "
1108 "for block group %llu\n", block_group
->key
.objectid
);
1116 static inline unsigned long offset_to_bit(u64 bitmap_start
, u32 unit
,
1119 BUG_ON(offset
< bitmap_start
);
1120 offset
-= bitmap_start
;
1121 return (unsigned long)(div_u64(offset
, unit
));
1124 static inline unsigned long bytes_to_bits(u64 bytes
, u32 unit
)
1126 return (unsigned long)(div_u64(bytes
, unit
));
1129 static inline u64
offset_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1133 u64 bytes_per_bitmap
;
1135 bytes_per_bitmap
= BITS_PER_BITMAP
* ctl
->unit
;
1136 bitmap_start
= offset
- ctl
->start
;
1137 bitmap_start
= div64_u64(bitmap_start
, bytes_per_bitmap
);
1138 bitmap_start
*= bytes_per_bitmap
;
1139 bitmap_start
+= ctl
->start
;
1141 return bitmap_start
;
1144 static int tree_insert_offset(struct rb_root
*root
, u64 offset
,
1145 struct rb_node
*node
, int bitmap
)
1147 struct rb_node
**p
= &root
->rb_node
;
1148 struct rb_node
*parent
= NULL
;
1149 struct btrfs_free_space
*info
;
1153 info
= rb_entry(parent
, struct btrfs_free_space
, offset_index
);
1155 if (offset
< info
->offset
) {
1157 } else if (offset
> info
->offset
) {
1158 p
= &(*p
)->rb_right
;
1161 * we could have a bitmap entry and an extent entry
1162 * share the same offset. If this is the case, we want
1163 * the extent entry to always be found first if we do a
1164 * linear search through the tree, since we want to have
1165 * the quickest allocation time, and allocating from an
1166 * extent is faster than allocating from a bitmap. So
1167 * if we're inserting a bitmap and we find an entry at
1168 * this offset, we want to go right, or after this entry
1169 * logically. If we are inserting an extent and we've
1170 * found a bitmap, we want to go left, or before
1178 p
= &(*p
)->rb_right
;
1180 if (!info
->bitmap
) {
1189 rb_link_node(node
, parent
, p
);
1190 rb_insert_color(node
, root
);
1196 * searches the tree for the given offset.
1198 * fuzzy - If this is set, then we are trying to make an allocation, and we just
1199 * want a section that has at least bytes size and comes at or after the given
1202 static struct btrfs_free_space
*
1203 tree_search_offset(struct btrfs_free_space_ctl
*ctl
,
1204 u64 offset
, int bitmap_only
, int fuzzy
)
1206 struct rb_node
*n
= ctl
->free_space_offset
.rb_node
;
1207 struct btrfs_free_space
*entry
, *prev
= NULL
;
1209 /* find entry that is closest to the 'offset' */
1216 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1219 if (offset
< entry
->offset
)
1221 else if (offset
> entry
->offset
)
1234 * bitmap entry and extent entry may share same offset,
1235 * in that case, bitmap entry comes after extent entry.
1240 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1241 if (entry
->offset
!= offset
)
1244 WARN_ON(!entry
->bitmap
);
1247 if (entry
->bitmap
) {
1249 * if previous extent entry covers the offset,
1250 * we should return it instead of the bitmap entry
1252 n
= &entry
->offset_index
;
1257 prev
= rb_entry(n
, struct btrfs_free_space
,
1259 if (!prev
->bitmap
) {
1260 if (prev
->offset
+ prev
->bytes
> offset
)
1272 /* find last entry before the 'offset' */
1274 if (entry
->offset
> offset
) {
1275 n
= rb_prev(&entry
->offset_index
);
1277 entry
= rb_entry(n
, struct btrfs_free_space
,
1279 BUG_ON(entry
->offset
> offset
);
1288 if (entry
->bitmap
) {
1289 n
= &entry
->offset_index
;
1294 prev
= rb_entry(n
, struct btrfs_free_space
,
1296 if (!prev
->bitmap
) {
1297 if (prev
->offset
+ prev
->bytes
> offset
)
1302 if (entry
->offset
+ BITS_PER_BITMAP
* ctl
->unit
> offset
)
1304 } else if (entry
->offset
+ entry
->bytes
> offset
)
1311 if (entry
->bitmap
) {
1312 if (entry
->offset
+ BITS_PER_BITMAP
*
1316 if (entry
->offset
+ entry
->bytes
> offset
)
1320 n
= rb_next(&entry
->offset_index
);
1323 entry
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
1329 __unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1330 struct btrfs_free_space
*info
)
1332 rb_erase(&info
->offset_index
, &ctl
->free_space_offset
);
1333 ctl
->free_extents
--;
1336 static void unlink_free_space(struct btrfs_free_space_ctl
*ctl
,
1337 struct btrfs_free_space
*info
)
1339 __unlink_free_space(ctl
, info
);
1340 ctl
->free_space
-= info
->bytes
;
1343 static int link_free_space(struct btrfs_free_space_ctl
*ctl
,
1344 struct btrfs_free_space
*info
)
1348 BUG_ON(!info
->bitmap
&& !info
->bytes
);
1349 ret
= tree_insert_offset(&ctl
->free_space_offset
, info
->offset
,
1350 &info
->offset_index
, (info
->bitmap
!= NULL
));
1354 ctl
->free_space
+= info
->bytes
;
1355 ctl
->free_extents
++;
1359 static void recalculate_thresholds(struct btrfs_free_space_ctl
*ctl
)
1361 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1365 u64 size
= block_group
->key
.offset
;
1366 u64 bytes_per_bg
= BITS_PER_BITMAP
* block_group
->sectorsize
;
1367 int max_bitmaps
= div64_u64(size
+ bytes_per_bg
- 1, bytes_per_bg
);
1369 BUG_ON(ctl
->total_bitmaps
> max_bitmaps
);
1372 * The goal is to keep the total amount of memory used per 1gb of space
1373 * at or below 32k, so we need to adjust how much memory we allow to be
1374 * used by extent based free space tracking
1376 if (size
< 1024 * 1024 * 1024)
1377 max_bytes
= MAX_CACHE_BYTES_PER_GIG
;
1379 max_bytes
= MAX_CACHE_BYTES_PER_GIG
*
1380 div64_u64(size
, 1024 * 1024 * 1024);
1383 * we want to account for 1 more bitmap than what we have so we can make
1384 * sure we don't go over our overall goal of MAX_CACHE_BYTES_PER_GIG as
1385 * we add more bitmaps.
1387 bitmap_bytes
= (ctl
->total_bitmaps
+ 1) * PAGE_CACHE_SIZE
;
1389 if (bitmap_bytes
>= max_bytes
) {
1390 ctl
->extents_thresh
= 0;
1395 * we want the extent entry threshold to always be at most 1/2 the maxw
1396 * bytes we can have, or whatever is less than that.
1398 extent_bytes
= max_bytes
- bitmap_bytes
;
1399 extent_bytes
= min_t(u64
, extent_bytes
, div64_u64(max_bytes
, 2));
1401 ctl
->extents_thresh
=
1402 div64_u64(extent_bytes
, (sizeof(struct btrfs_free_space
)));
1405 static inline void __bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1406 struct btrfs_free_space
*info
,
1407 u64 offset
, u64 bytes
)
1409 unsigned long start
, count
;
1411 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1412 count
= bytes_to_bits(bytes
, ctl
->unit
);
1413 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1415 bitmap_clear(info
->bitmap
, start
, count
);
1417 info
->bytes
-= bytes
;
1420 static void bitmap_clear_bits(struct btrfs_free_space_ctl
*ctl
,
1421 struct btrfs_free_space
*info
, u64 offset
,
1424 __bitmap_clear_bits(ctl
, info
, offset
, bytes
);
1425 ctl
->free_space
-= bytes
;
1428 static void bitmap_set_bits(struct btrfs_free_space_ctl
*ctl
,
1429 struct btrfs_free_space
*info
, u64 offset
,
1432 unsigned long start
, count
;
1434 start
= offset_to_bit(info
->offset
, ctl
->unit
, offset
);
1435 count
= bytes_to_bits(bytes
, ctl
->unit
);
1436 BUG_ON(start
+ count
> BITS_PER_BITMAP
);
1438 bitmap_set(info
->bitmap
, start
, count
);
1440 info
->bytes
+= bytes
;
1441 ctl
->free_space
+= bytes
;
1444 static int search_bitmap(struct btrfs_free_space_ctl
*ctl
,
1445 struct btrfs_free_space
*bitmap_info
, u64
*offset
,
1448 unsigned long found_bits
= 0;
1449 unsigned long bits
, i
;
1450 unsigned long next_zero
;
1452 i
= offset_to_bit(bitmap_info
->offset
, ctl
->unit
,
1453 max_t(u64
, *offset
, bitmap_info
->offset
));
1454 bits
= bytes_to_bits(*bytes
, ctl
->unit
);
1456 for (i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
);
1457 i
< BITS_PER_BITMAP
;
1458 i
= find_next_bit(bitmap_info
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
1459 next_zero
= find_next_zero_bit(bitmap_info
->bitmap
,
1460 BITS_PER_BITMAP
, i
);
1461 if ((next_zero
- i
) >= bits
) {
1462 found_bits
= next_zero
- i
;
1469 *offset
= (u64
)(i
* ctl
->unit
) + bitmap_info
->offset
;
1470 *bytes
= (u64
)(found_bits
) * ctl
->unit
;
1477 static struct btrfs_free_space
*
1478 find_free_space(struct btrfs_free_space_ctl
*ctl
, u64
*offset
, u64
*bytes
)
1480 struct btrfs_free_space
*entry
;
1481 struct rb_node
*node
;
1484 if (!ctl
->free_space_offset
.rb_node
)
1487 entry
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, *offset
), 0, 1);
1491 for (node
= &entry
->offset_index
; node
; node
= rb_next(node
)) {
1492 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1493 if (entry
->bytes
< *bytes
)
1496 if (entry
->bitmap
) {
1497 ret
= search_bitmap(ctl
, entry
, offset
, bytes
);
1503 *offset
= entry
->offset
;
1504 *bytes
= entry
->bytes
;
1511 static void add_new_bitmap(struct btrfs_free_space_ctl
*ctl
,
1512 struct btrfs_free_space
*info
, u64 offset
)
1514 info
->offset
= offset_to_bitmap(ctl
, offset
);
1516 INIT_LIST_HEAD(&info
->list
);
1517 link_free_space(ctl
, info
);
1518 ctl
->total_bitmaps
++;
1520 ctl
->op
->recalc_thresholds(ctl
);
1523 static void free_bitmap(struct btrfs_free_space_ctl
*ctl
,
1524 struct btrfs_free_space
*bitmap_info
)
1526 unlink_free_space(ctl
, bitmap_info
);
1527 kfree(bitmap_info
->bitmap
);
1528 kmem_cache_free(btrfs_free_space_cachep
, bitmap_info
);
1529 ctl
->total_bitmaps
--;
1530 ctl
->op
->recalc_thresholds(ctl
);
1533 static noinline
int remove_from_bitmap(struct btrfs_free_space_ctl
*ctl
,
1534 struct btrfs_free_space
*bitmap_info
,
1535 u64
*offset
, u64
*bytes
)
1538 u64 search_start
, search_bytes
;
1542 end
= bitmap_info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
) - 1;
1545 * XXX - this can go away after a few releases.
1547 * since the only user of btrfs_remove_free_space is the tree logging
1548 * stuff, and the only way to test that is under crash conditions, we
1549 * want to have this debug stuff here just in case somethings not
1550 * working. Search the bitmap for the space we are trying to use to
1551 * make sure its actually there. If its not there then we need to stop
1552 * because something has gone wrong.
1554 search_start
= *offset
;
1555 search_bytes
= *bytes
;
1556 search_bytes
= min(search_bytes
, end
- search_start
+ 1);
1557 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
, &search_bytes
);
1558 BUG_ON(ret
< 0 || search_start
!= *offset
);
1560 if (*offset
> bitmap_info
->offset
&& *offset
+ *bytes
> end
) {
1561 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, end
- *offset
+ 1);
1562 *bytes
-= end
- *offset
+ 1;
1564 } else if (*offset
>= bitmap_info
->offset
&& *offset
+ *bytes
<= end
) {
1565 bitmap_clear_bits(ctl
, bitmap_info
, *offset
, *bytes
);
1570 struct rb_node
*next
= rb_next(&bitmap_info
->offset_index
);
1571 if (!bitmap_info
->bytes
)
1572 free_bitmap(ctl
, bitmap_info
);
1575 * no entry after this bitmap, but we still have bytes to
1576 * remove, so something has gone wrong.
1581 bitmap_info
= rb_entry(next
, struct btrfs_free_space
,
1585 * if the next entry isn't a bitmap we need to return to let the
1586 * extent stuff do its work.
1588 if (!bitmap_info
->bitmap
)
1592 * Ok the next item is a bitmap, but it may not actually hold
1593 * the information for the rest of this free space stuff, so
1594 * look for it, and if we don't find it return so we can try
1595 * everything over again.
1597 search_start
= *offset
;
1598 search_bytes
= *bytes
;
1599 ret
= search_bitmap(ctl
, bitmap_info
, &search_start
,
1601 if (ret
< 0 || search_start
!= *offset
)
1605 } else if (!bitmap_info
->bytes
)
1606 free_bitmap(ctl
, bitmap_info
);
1611 static u64
add_bytes_to_bitmap(struct btrfs_free_space_ctl
*ctl
,
1612 struct btrfs_free_space
*info
, u64 offset
,
1615 u64 bytes_to_set
= 0;
1618 end
= info
->offset
+ (u64
)(BITS_PER_BITMAP
* ctl
->unit
);
1620 bytes_to_set
= min(end
- offset
, bytes
);
1622 bitmap_set_bits(ctl
, info
, offset
, bytes_to_set
);
1624 return bytes_to_set
;
1628 static bool use_bitmap(struct btrfs_free_space_ctl
*ctl
,
1629 struct btrfs_free_space
*info
)
1631 struct btrfs_block_group_cache
*block_group
= ctl
->private;
1634 * If we are below the extents threshold then we can add this as an
1635 * extent, and don't have to deal with the bitmap
1637 if (ctl
->free_extents
< ctl
->extents_thresh
) {
1639 * If this block group has some small extents we don't want to
1640 * use up all of our free slots in the cache with them, we want
1641 * to reserve them to larger extents, however if we have plent
1642 * of cache left then go ahead an dadd them, no sense in adding
1643 * the overhead of a bitmap if we don't have to.
1645 if (info
->bytes
<= block_group
->sectorsize
* 4) {
1646 if (ctl
->free_extents
* 2 <= ctl
->extents_thresh
)
1654 * some block groups are so tiny they can't be enveloped by a bitmap, so
1655 * don't even bother to create a bitmap for this
1657 if (BITS_PER_BITMAP
* block_group
->sectorsize
>
1658 block_group
->key
.offset
)
1664 static struct btrfs_free_space_op free_space_op
= {
1665 .recalc_thresholds
= recalculate_thresholds
,
1666 .use_bitmap
= use_bitmap
,
1669 static int insert_into_bitmap(struct btrfs_free_space_ctl
*ctl
,
1670 struct btrfs_free_space
*info
)
1672 struct btrfs_free_space
*bitmap_info
;
1673 struct btrfs_block_group_cache
*block_group
= NULL
;
1675 u64 bytes
, offset
, bytes_added
;
1678 bytes
= info
->bytes
;
1679 offset
= info
->offset
;
1681 if (!ctl
->op
->use_bitmap(ctl
, info
))
1684 if (ctl
->op
== &free_space_op
)
1685 block_group
= ctl
->private;
1688 * Since we link bitmaps right into the cluster we need to see if we
1689 * have a cluster here, and if so and it has our bitmap we need to add
1690 * the free space to that bitmap.
1692 if (block_group
&& !list_empty(&block_group
->cluster_list
)) {
1693 struct btrfs_free_cluster
*cluster
;
1694 struct rb_node
*node
;
1695 struct btrfs_free_space
*entry
;
1697 cluster
= list_entry(block_group
->cluster_list
.next
,
1698 struct btrfs_free_cluster
,
1700 spin_lock(&cluster
->lock
);
1701 node
= rb_first(&cluster
->root
);
1703 spin_unlock(&cluster
->lock
);
1704 goto no_cluster_bitmap
;
1707 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
1708 if (!entry
->bitmap
) {
1709 spin_unlock(&cluster
->lock
);
1710 goto no_cluster_bitmap
;
1713 if (entry
->offset
== offset_to_bitmap(ctl
, offset
)) {
1714 bytes_added
= add_bytes_to_bitmap(ctl
, entry
,
1716 bytes
-= bytes_added
;
1717 offset
+= bytes_added
;
1719 spin_unlock(&cluster
->lock
);
1727 bitmap_info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1734 bytes_added
= add_bytes_to_bitmap(ctl
, bitmap_info
, offset
, bytes
);
1735 bytes
-= bytes_added
;
1736 offset
+= bytes_added
;
1746 if (info
&& info
->bitmap
) {
1747 add_new_bitmap(ctl
, info
, offset
);
1752 spin_unlock(&ctl
->tree_lock
);
1754 /* no pre-allocated info, allocate a new one */
1756 info
= kmem_cache_zalloc(btrfs_free_space_cachep
,
1759 spin_lock(&ctl
->tree_lock
);
1765 /* allocate the bitmap */
1766 info
->bitmap
= kzalloc(PAGE_CACHE_SIZE
, GFP_NOFS
);
1767 spin_lock(&ctl
->tree_lock
);
1768 if (!info
->bitmap
) {
1778 kfree(info
->bitmap
);
1779 kmem_cache_free(btrfs_free_space_cachep
, info
);
1785 static bool try_merge_free_space(struct btrfs_free_space_ctl
*ctl
,
1786 struct btrfs_free_space
*info
, bool update_stat
)
1788 struct btrfs_free_space
*left_info
;
1789 struct btrfs_free_space
*right_info
;
1790 bool merged
= false;
1791 u64 offset
= info
->offset
;
1792 u64 bytes
= info
->bytes
;
1795 * first we want to see if there is free space adjacent to the range we
1796 * are adding, if there is remove that struct and add a new one to
1797 * cover the entire range
1799 right_info
= tree_search_offset(ctl
, offset
+ bytes
, 0, 0);
1800 if (right_info
&& rb_prev(&right_info
->offset_index
))
1801 left_info
= rb_entry(rb_prev(&right_info
->offset_index
),
1802 struct btrfs_free_space
, offset_index
);
1804 left_info
= tree_search_offset(ctl
, offset
- 1, 0, 0);
1806 if (right_info
&& !right_info
->bitmap
) {
1808 unlink_free_space(ctl
, right_info
);
1810 __unlink_free_space(ctl
, right_info
);
1811 info
->bytes
+= right_info
->bytes
;
1812 kmem_cache_free(btrfs_free_space_cachep
, right_info
);
1816 if (left_info
&& !left_info
->bitmap
&&
1817 left_info
->offset
+ left_info
->bytes
== offset
) {
1819 unlink_free_space(ctl
, left_info
);
1821 __unlink_free_space(ctl
, left_info
);
1822 info
->offset
= left_info
->offset
;
1823 info
->bytes
+= left_info
->bytes
;
1824 kmem_cache_free(btrfs_free_space_cachep
, left_info
);
1831 int __btrfs_add_free_space(struct btrfs_free_space_ctl
*ctl
,
1832 u64 offset
, u64 bytes
)
1834 struct btrfs_free_space
*info
;
1837 info
= kmem_cache_zalloc(btrfs_free_space_cachep
, GFP_NOFS
);
1841 info
->offset
= offset
;
1842 info
->bytes
= bytes
;
1844 spin_lock(&ctl
->tree_lock
);
1846 if (try_merge_free_space(ctl
, info
, true))
1850 * There was no extent directly to the left or right of this new
1851 * extent then we know we're going to have to allocate a new extent, so
1852 * before we do that see if we need to drop this into a bitmap
1854 ret
= insert_into_bitmap(ctl
, info
);
1862 ret
= link_free_space(ctl
, info
);
1864 kmem_cache_free(btrfs_free_space_cachep
, info
);
1866 spin_unlock(&ctl
->tree_lock
);
1869 printk(KERN_CRIT
"btrfs: unable to add free space :%d\n", ret
);
1870 BUG_ON(ret
== -EEXIST
);
1876 int btrfs_remove_free_space(struct btrfs_block_group_cache
*block_group
,
1877 u64 offset
, u64 bytes
)
1879 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
1880 struct btrfs_free_space
*info
;
1881 struct btrfs_free_space
*next_info
= NULL
;
1884 spin_lock(&ctl
->tree_lock
);
1887 info
= tree_search_offset(ctl
, offset
, 0, 0);
1890 * oops didn't find an extent that matched the space we wanted
1891 * to remove, look for a bitmap instead
1893 info
= tree_search_offset(ctl
, offset_to_bitmap(ctl
, offset
),
1896 /* the tree logging code might be calling us before we
1897 * have fully loaded the free space rbtree for this
1898 * block group. So it is possible the entry won't
1899 * be in the rbtree yet at all. The caching code
1900 * will make sure not to put it in the rbtree if
1901 * the logging code has pinned it.
1907 if (info
->bytes
< bytes
&& rb_next(&info
->offset_index
)) {
1909 next_info
= rb_entry(rb_next(&info
->offset_index
),
1910 struct btrfs_free_space
,
1913 if (next_info
->bitmap
)
1914 end
= next_info
->offset
+
1915 BITS_PER_BITMAP
* ctl
->unit
- 1;
1917 end
= next_info
->offset
+ next_info
->bytes
;
1919 if (next_info
->bytes
< bytes
||
1920 next_info
->offset
> offset
|| offset
> end
) {
1921 printk(KERN_CRIT
"Found free space at %llu, size %llu,"
1922 " trying to use %llu\n",
1923 (unsigned long long)info
->offset
,
1924 (unsigned long long)info
->bytes
,
1925 (unsigned long long)bytes
);
1934 if (info
->bytes
== bytes
) {
1935 unlink_free_space(ctl
, info
);
1937 kfree(info
->bitmap
);
1938 ctl
->total_bitmaps
--;
1940 kmem_cache_free(btrfs_free_space_cachep
, info
);
1945 if (!info
->bitmap
&& info
->offset
== offset
) {
1946 unlink_free_space(ctl
, info
);
1947 info
->offset
+= bytes
;
1948 info
->bytes
-= bytes
;
1949 ret
= link_free_space(ctl
, info
);
1954 if (!info
->bitmap
&& info
->offset
<= offset
&&
1955 info
->offset
+ info
->bytes
>= offset
+ bytes
) {
1956 u64 old_start
= info
->offset
;
1958 * we're freeing space in the middle of the info,
1959 * this can happen during tree log replay
1961 * first unlink the old info and then
1962 * insert it again after the hole we're creating
1964 unlink_free_space(ctl
, info
);
1965 if (offset
+ bytes
< info
->offset
+ info
->bytes
) {
1966 u64 old_end
= info
->offset
+ info
->bytes
;
1968 info
->offset
= offset
+ bytes
;
1969 info
->bytes
= old_end
- info
->offset
;
1970 ret
= link_free_space(ctl
, info
);
1975 /* the hole we're creating ends at the end
1976 * of the info struct, just free the info
1978 kmem_cache_free(btrfs_free_space_cachep
, info
);
1980 spin_unlock(&ctl
->tree_lock
);
1982 /* step two, insert a new info struct to cover
1983 * anything before the hole
1985 ret
= btrfs_add_free_space(block_group
, old_start
,
1986 offset
- old_start
);
1987 WARN_ON(ret
); /* -ENOMEM */
1991 ret
= remove_from_bitmap(ctl
, info
, &offset
, &bytes
);
1994 BUG_ON(ret
); /* logic error */
1996 spin_unlock(&ctl
->tree_lock
);
2001 void btrfs_dump_free_space(struct btrfs_block_group_cache
*block_group
,
2004 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2005 struct btrfs_free_space
*info
;
2009 for (n
= rb_first(&ctl
->free_space_offset
); n
; n
= rb_next(n
)) {
2010 info
= rb_entry(n
, struct btrfs_free_space
, offset_index
);
2011 if (info
->bytes
>= bytes
)
2013 printk(KERN_CRIT
"entry offset %llu, bytes %llu, bitmap %s\n",
2014 (unsigned long long)info
->offset
,
2015 (unsigned long long)info
->bytes
,
2016 (info
->bitmap
) ? "yes" : "no");
2018 printk(KERN_INFO
"block group has cluster?: %s\n",
2019 list_empty(&block_group
->cluster_list
) ? "no" : "yes");
2020 printk(KERN_INFO
"%d blocks of free space at or bigger than bytes is"
2024 void btrfs_init_free_space_ctl(struct btrfs_block_group_cache
*block_group
)
2026 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2028 spin_lock_init(&ctl
->tree_lock
);
2029 ctl
->unit
= block_group
->sectorsize
;
2030 ctl
->start
= block_group
->key
.objectid
;
2031 ctl
->private = block_group
;
2032 ctl
->op
= &free_space_op
;
2035 * we only want to have 32k of ram per block group for keeping
2036 * track of free space, and if we pass 1/2 of that we want to
2037 * start converting things over to using bitmaps
2039 ctl
->extents_thresh
= ((1024 * 32) / 2) /
2040 sizeof(struct btrfs_free_space
);
2044 * for a given cluster, put all of its extents back into the free
2045 * space cache. If the block group passed doesn't match the block group
2046 * pointed to by the cluster, someone else raced in and freed the
2047 * cluster already. In that case, we just return without changing anything
2050 __btrfs_return_cluster_to_free_space(
2051 struct btrfs_block_group_cache
*block_group
,
2052 struct btrfs_free_cluster
*cluster
)
2054 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2055 struct btrfs_free_space
*entry
;
2056 struct rb_node
*node
;
2058 spin_lock(&cluster
->lock
);
2059 if (cluster
->block_group
!= block_group
)
2062 cluster
->block_group
= NULL
;
2063 cluster
->window_start
= 0;
2064 list_del_init(&cluster
->block_group_list
);
2066 node
= rb_first(&cluster
->root
);
2070 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2071 node
= rb_next(&entry
->offset_index
);
2072 rb_erase(&entry
->offset_index
, &cluster
->root
);
2074 bitmap
= (entry
->bitmap
!= NULL
);
2076 try_merge_free_space(ctl
, entry
, false);
2077 tree_insert_offset(&ctl
->free_space_offset
,
2078 entry
->offset
, &entry
->offset_index
, bitmap
);
2080 cluster
->root
= RB_ROOT
;
2083 spin_unlock(&cluster
->lock
);
2084 btrfs_put_block_group(block_group
);
2088 void __btrfs_remove_free_space_cache_locked(struct btrfs_free_space_ctl
*ctl
)
2090 struct btrfs_free_space
*info
;
2091 struct rb_node
*node
;
2093 while ((node
= rb_last(&ctl
->free_space_offset
)) != NULL
) {
2094 info
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2095 if (!info
->bitmap
) {
2096 unlink_free_space(ctl
, info
);
2097 kmem_cache_free(btrfs_free_space_cachep
, info
);
2099 free_bitmap(ctl
, info
);
2101 if (need_resched()) {
2102 spin_unlock(&ctl
->tree_lock
);
2104 spin_lock(&ctl
->tree_lock
);
2109 void __btrfs_remove_free_space_cache(struct btrfs_free_space_ctl
*ctl
)
2111 spin_lock(&ctl
->tree_lock
);
2112 __btrfs_remove_free_space_cache_locked(ctl
);
2113 spin_unlock(&ctl
->tree_lock
);
2116 void btrfs_remove_free_space_cache(struct btrfs_block_group_cache
*block_group
)
2118 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2119 struct btrfs_free_cluster
*cluster
;
2120 struct list_head
*head
;
2122 spin_lock(&ctl
->tree_lock
);
2123 while ((head
= block_group
->cluster_list
.next
) !=
2124 &block_group
->cluster_list
) {
2125 cluster
= list_entry(head
, struct btrfs_free_cluster
,
2128 WARN_ON(cluster
->block_group
!= block_group
);
2129 __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2130 if (need_resched()) {
2131 spin_unlock(&ctl
->tree_lock
);
2133 spin_lock(&ctl
->tree_lock
);
2136 __btrfs_remove_free_space_cache_locked(ctl
);
2137 spin_unlock(&ctl
->tree_lock
);
2141 u64
btrfs_find_space_for_alloc(struct btrfs_block_group_cache
*block_group
,
2142 u64 offset
, u64 bytes
, u64 empty_size
)
2144 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2145 struct btrfs_free_space
*entry
= NULL
;
2146 u64 bytes_search
= bytes
+ empty_size
;
2149 spin_lock(&ctl
->tree_lock
);
2150 entry
= find_free_space(ctl
, &offset
, &bytes_search
);
2155 if (entry
->bitmap
) {
2156 bitmap_clear_bits(ctl
, entry
, offset
, bytes
);
2158 free_bitmap(ctl
, entry
);
2160 unlink_free_space(ctl
, entry
);
2161 entry
->offset
+= bytes
;
2162 entry
->bytes
-= bytes
;
2164 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2166 link_free_space(ctl
, entry
);
2170 spin_unlock(&ctl
->tree_lock
);
2176 * given a cluster, put all of its extents back into the free space
2177 * cache. If a block group is passed, this function will only free
2178 * a cluster that belongs to the passed block group.
2180 * Otherwise, it'll get a reference on the block group pointed to by the
2181 * cluster and remove the cluster from it.
2183 int btrfs_return_cluster_to_free_space(
2184 struct btrfs_block_group_cache
*block_group
,
2185 struct btrfs_free_cluster
*cluster
)
2187 struct btrfs_free_space_ctl
*ctl
;
2190 /* first, get a safe pointer to the block group */
2191 spin_lock(&cluster
->lock
);
2193 block_group
= cluster
->block_group
;
2195 spin_unlock(&cluster
->lock
);
2198 } else if (cluster
->block_group
!= block_group
) {
2199 /* someone else has already freed it don't redo their work */
2200 spin_unlock(&cluster
->lock
);
2203 atomic_inc(&block_group
->count
);
2204 spin_unlock(&cluster
->lock
);
2206 ctl
= block_group
->free_space_ctl
;
2208 /* now return any extents the cluster had on it */
2209 spin_lock(&ctl
->tree_lock
);
2210 ret
= __btrfs_return_cluster_to_free_space(block_group
, cluster
);
2211 spin_unlock(&ctl
->tree_lock
);
2213 /* finally drop our ref */
2214 btrfs_put_block_group(block_group
);
2218 static u64
btrfs_alloc_from_bitmap(struct btrfs_block_group_cache
*block_group
,
2219 struct btrfs_free_cluster
*cluster
,
2220 struct btrfs_free_space
*entry
,
2221 u64 bytes
, u64 min_start
)
2223 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2225 u64 search_start
= cluster
->window_start
;
2226 u64 search_bytes
= bytes
;
2229 search_start
= min_start
;
2230 search_bytes
= bytes
;
2232 err
= search_bitmap(ctl
, entry
, &search_start
, &search_bytes
);
2237 __bitmap_clear_bits(ctl
, entry
, ret
, bytes
);
2243 * given a cluster, try to allocate 'bytes' from it, returns 0
2244 * if it couldn't find anything suitably large, or a logical disk offset
2245 * if things worked out
2247 u64
btrfs_alloc_from_cluster(struct btrfs_block_group_cache
*block_group
,
2248 struct btrfs_free_cluster
*cluster
, u64 bytes
,
2251 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2252 struct btrfs_free_space
*entry
= NULL
;
2253 struct rb_node
*node
;
2256 spin_lock(&cluster
->lock
);
2257 if (bytes
> cluster
->max_size
)
2260 if (cluster
->block_group
!= block_group
)
2263 node
= rb_first(&cluster
->root
);
2267 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2269 if (entry
->bytes
< bytes
||
2270 (!entry
->bitmap
&& entry
->offset
< min_start
)) {
2271 node
= rb_next(&entry
->offset_index
);
2274 entry
= rb_entry(node
, struct btrfs_free_space
,
2279 if (entry
->bitmap
) {
2280 ret
= btrfs_alloc_from_bitmap(block_group
,
2281 cluster
, entry
, bytes
,
2282 cluster
->window_start
);
2284 node
= rb_next(&entry
->offset_index
);
2287 entry
= rb_entry(node
, struct btrfs_free_space
,
2291 cluster
->window_start
+= bytes
;
2293 ret
= entry
->offset
;
2295 entry
->offset
+= bytes
;
2296 entry
->bytes
-= bytes
;
2299 if (entry
->bytes
== 0)
2300 rb_erase(&entry
->offset_index
, &cluster
->root
);
2304 spin_unlock(&cluster
->lock
);
2309 spin_lock(&ctl
->tree_lock
);
2311 ctl
->free_space
-= bytes
;
2312 if (entry
->bytes
== 0) {
2313 ctl
->free_extents
--;
2314 if (entry
->bitmap
) {
2315 kfree(entry
->bitmap
);
2316 ctl
->total_bitmaps
--;
2317 ctl
->op
->recalc_thresholds(ctl
);
2319 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2322 spin_unlock(&ctl
->tree_lock
);
2327 static int btrfs_bitmap_cluster(struct btrfs_block_group_cache
*block_group
,
2328 struct btrfs_free_space
*entry
,
2329 struct btrfs_free_cluster
*cluster
,
2330 u64 offset
, u64 bytes
,
2331 u64 cont1_bytes
, u64 min_bytes
)
2333 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2334 unsigned long next_zero
;
2336 unsigned long want_bits
;
2337 unsigned long min_bits
;
2338 unsigned long found_bits
;
2339 unsigned long start
= 0;
2340 unsigned long total_found
= 0;
2343 i
= offset_to_bit(entry
->offset
, block_group
->sectorsize
,
2344 max_t(u64
, offset
, entry
->offset
));
2345 want_bits
= bytes_to_bits(bytes
, block_group
->sectorsize
);
2346 min_bits
= bytes_to_bits(min_bytes
, block_group
->sectorsize
);
2350 for (i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
);
2351 i
< BITS_PER_BITMAP
;
2352 i
= find_next_bit(entry
->bitmap
, BITS_PER_BITMAP
, i
+ 1)) {
2353 next_zero
= find_next_zero_bit(entry
->bitmap
,
2354 BITS_PER_BITMAP
, i
);
2355 if (next_zero
- i
>= min_bits
) {
2356 found_bits
= next_zero
- i
;
2367 cluster
->max_size
= 0;
2370 total_found
+= found_bits
;
2372 if (cluster
->max_size
< found_bits
* block_group
->sectorsize
)
2373 cluster
->max_size
= found_bits
* block_group
->sectorsize
;
2375 if (total_found
< want_bits
|| cluster
->max_size
< cont1_bytes
) {
2380 cluster
->window_start
= start
* block_group
->sectorsize
+
2382 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2383 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2384 &entry
->offset_index
, 1);
2385 BUG_ON(ret
); /* -EEXIST; Logic error */
2387 trace_btrfs_setup_cluster(block_group
, cluster
,
2388 total_found
* block_group
->sectorsize
, 1);
2393 * This searches the block group for just extents to fill the cluster with.
2394 * Try to find a cluster with at least bytes total bytes, at least one
2395 * extent of cont1_bytes, and other clusters of at least min_bytes.
2398 setup_cluster_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2399 struct btrfs_free_cluster
*cluster
,
2400 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2401 u64 cont1_bytes
, u64 min_bytes
)
2403 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2404 struct btrfs_free_space
*first
= NULL
;
2405 struct btrfs_free_space
*entry
= NULL
;
2406 struct btrfs_free_space
*last
;
2407 struct rb_node
*node
;
2413 entry
= tree_search_offset(ctl
, offset
, 0, 1);
2418 * We don't want bitmaps, so just move along until we find a normal
2421 while (entry
->bitmap
|| entry
->bytes
< min_bytes
) {
2422 if (entry
->bitmap
&& list_empty(&entry
->list
))
2423 list_add_tail(&entry
->list
, bitmaps
);
2424 node
= rb_next(&entry
->offset_index
);
2427 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2430 window_start
= entry
->offset
;
2431 window_free
= entry
->bytes
;
2432 max_extent
= entry
->bytes
;
2436 for (node
= rb_next(&entry
->offset_index
); node
;
2437 node
= rb_next(&entry
->offset_index
)) {
2438 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2440 if (entry
->bitmap
) {
2441 if (list_empty(&entry
->list
))
2442 list_add_tail(&entry
->list
, bitmaps
);
2446 if (entry
->bytes
< min_bytes
)
2450 window_free
+= entry
->bytes
;
2451 if (entry
->bytes
> max_extent
)
2452 max_extent
= entry
->bytes
;
2455 if (window_free
< bytes
|| max_extent
< cont1_bytes
)
2458 cluster
->window_start
= first
->offset
;
2460 node
= &first
->offset_index
;
2463 * now we've found our entries, pull them out of the free space
2464 * cache and put them into the cluster rbtree
2469 entry
= rb_entry(node
, struct btrfs_free_space
, offset_index
);
2470 node
= rb_next(&entry
->offset_index
);
2471 if (entry
->bitmap
|| entry
->bytes
< min_bytes
)
2474 rb_erase(&entry
->offset_index
, &ctl
->free_space_offset
);
2475 ret
= tree_insert_offset(&cluster
->root
, entry
->offset
,
2476 &entry
->offset_index
, 0);
2477 total_size
+= entry
->bytes
;
2478 BUG_ON(ret
); /* -EEXIST; Logic error */
2479 } while (node
&& entry
!= last
);
2481 cluster
->max_size
= max_extent
;
2482 trace_btrfs_setup_cluster(block_group
, cluster
, total_size
, 0);
2487 * This specifically looks for bitmaps that may work in the cluster, we assume
2488 * that we have already failed to find extents that will work.
2491 setup_cluster_bitmap(struct btrfs_block_group_cache
*block_group
,
2492 struct btrfs_free_cluster
*cluster
,
2493 struct list_head
*bitmaps
, u64 offset
, u64 bytes
,
2494 u64 cont1_bytes
, u64 min_bytes
)
2496 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2497 struct btrfs_free_space
*entry
;
2499 u64 bitmap_offset
= offset_to_bitmap(ctl
, offset
);
2501 if (ctl
->total_bitmaps
== 0)
2505 * The bitmap that covers offset won't be in the list unless offset
2506 * is just its start offset.
2508 entry
= list_first_entry(bitmaps
, struct btrfs_free_space
, list
);
2509 if (entry
->offset
!= bitmap_offset
) {
2510 entry
= tree_search_offset(ctl
, bitmap_offset
, 1, 0);
2511 if (entry
&& list_empty(&entry
->list
))
2512 list_add(&entry
->list
, bitmaps
);
2515 list_for_each_entry(entry
, bitmaps
, list
) {
2516 if (entry
->bytes
< bytes
)
2518 ret
= btrfs_bitmap_cluster(block_group
, entry
, cluster
, offset
,
2519 bytes
, cont1_bytes
, min_bytes
);
2525 * The bitmaps list has all the bitmaps that record free space
2526 * starting after offset, so no more search is required.
2532 * here we try to find a cluster of blocks in a block group. The goal
2533 * is to find at least bytes+empty_size.
2534 * We might not find them all in one contiguous area.
2536 * returns zero and sets up cluster if things worked out, otherwise
2537 * it returns -enospc
2539 int btrfs_find_space_cluster(struct btrfs_trans_handle
*trans
,
2540 struct btrfs_root
*root
,
2541 struct btrfs_block_group_cache
*block_group
,
2542 struct btrfs_free_cluster
*cluster
,
2543 u64 offset
, u64 bytes
, u64 empty_size
)
2545 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2546 struct btrfs_free_space
*entry
, *tmp
;
2553 * Choose the minimum extent size we'll require for this
2554 * cluster. For SSD_SPREAD, don't allow any fragmentation.
2555 * For metadata, allow allocates with smaller extents. For
2556 * data, keep it dense.
2558 if (btrfs_test_opt(root
, SSD_SPREAD
)) {
2559 cont1_bytes
= min_bytes
= bytes
+ empty_size
;
2560 } else if (block_group
->flags
& BTRFS_BLOCK_GROUP_METADATA
) {
2561 cont1_bytes
= bytes
;
2562 min_bytes
= block_group
->sectorsize
;
2564 cont1_bytes
= max(bytes
, (bytes
+ empty_size
) >> 2);
2565 min_bytes
= block_group
->sectorsize
;
2568 spin_lock(&ctl
->tree_lock
);
2571 * If we know we don't have enough space to make a cluster don't even
2572 * bother doing all the work to try and find one.
2574 if (ctl
->free_space
< bytes
) {
2575 spin_unlock(&ctl
->tree_lock
);
2579 spin_lock(&cluster
->lock
);
2581 /* someone already found a cluster, hooray */
2582 if (cluster
->block_group
) {
2587 trace_btrfs_find_cluster(block_group
, offset
, bytes
, empty_size
,
2590 INIT_LIST_HEAD(&bitmaps
);
2591 ret
= setup_cluster_no_bitmap(block_group
, cluster
, &bitmaps
, offset
,
2593 cont1_bytes
, min_bytes
);
2595 ret
= setup_cluster_bitmap(block_group
, cluster
, &bitmaps
,
2596 offset
, bytes
+ empty_size
,
2597 cont1_bytes
, min_bytes
);
2599 /* Clear our temporary list */
2600 list_for_each_entry_safe(entry
, tmp
, &bitmaps
, list
)
2601 list_del_init(&entry
->list
);
2604 atomic_inc(&block_group
->count
);
2605 list_add_tail(&cluster
->block_group_list
,
2606 &block_group
->cluster_list
);
2607 cluster
->block_group
= block_group
;
2609 trace_btrfs_failed_cluster_setup(block_group
);
2612 spin_unlock(&cluster
->lock
);
2613 spin_unlock(&ctl
->tree_lock
);
2619 * simple code to zero out a cluster
2621 void btrfs_init_free_cluster(struct btrfs_free_cluster
*cluster
)
2623 spin_lock_init(&cluster
->lock
);
2624 spin_lock_init(&cluster
->refill_lock
);
2625 cluster
->root
= RB_ROOT
;
2626 cluster
->max_size
= 0;
2627 INIT_LIST_HEAD(&cluster
->block_group_list
);
2628 cluster
->block_group
= NULL
;
2631 static int do_trimming(struct btrfs_block_group_cache
*block_group
,
2632 u64
*total_trimmed
, u64 start
, u64 bytes
,
2633 u64 reserved_start
, u64 reserved_bytes
)
2635 struct btrfs_space_info
*space_info
= block_group
->space_info
;
2636 struct btrfs_fs_info
*fs_info
= block_group
->fs_info
;
2641 spin_lock(&space_info
->lock
);
2642 spin_lock(&block_group
->lock
);
2643 if (!block_group
->ro
) {
2644 block_group
->reserved
+= reserved_bytes
;
2645 space_info
->bytes_reserved
+= reserved_bytes
;
2648 spin_unlock(&block_group
->lock
);
2649 spin_unlock(&space_info
->lock
);
2651 ret
= btrfs_error_discard_extent(fs_info
->extent_root
,
2652 start
, bytes
, &trimmed
);
2654 *total_trimmed
+= trimmed
;
2656 btrfs_add_free_space(block_group
, reserved_start
, reserved_bytes
);
2659 spin_lock(&space_info
->lock
);
2660 spin_lock(&block_group
->lock
);
2661 if (block_group
->ro
)
2662 space_info
->bytes_readonly
+= reserved_bytes
;
2663 block_group
->reserved
-= reserved_bytes
;
2664 space_info
->bytes_reserved
-= reserved_bytes
;
2665 spin_unlock(&space_info
->lock
);
2666 spin_unlock(&block_group
->lock
);
2672 static int trim_no_bitmap(struct btrfs_block_group_cache
*block_group
,
2673 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2675 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2676 struct btrfs_free_space
*entry
;
2677 struct rb_node
*node
;
2683 while (start
< end
) {
2684 spin_lock(&ctl
->tree_lock
);
2686 if (ctl
->free_space
< minlen
) {
2687 spin_unlock(&ctl
->tree_lock
);
2691 entry
= tree_search_offset(ctl
, start
, 0, 1);
2693 spin_unlock(&ctl
->tree_lock
);
2698 while (entry
->bitmap
) {
2699 node
= rb_next(&entry
->offset_index
);
2701 spin_unlock(&ctl
->tree_lock
);
2704 entry
= rb_entry(node
, struct btrfs_free_space
,
2708 if (entry
->offset
>= end
) {
2709 spin_unlock(&ctl
->tree_lock
);
2713 extent_start
= entry
->offset
;
2714 extent_bytes
= entry
->bytes
;
2715 start
= max(start
, extent_start
);
2716 bytes
= min(extent_start
+ extent_bytes
, end
) - start
;
2717 if (bytes
< minlen
) {
2718 spin_unlock(&ctl
->tree_lock
);
2722 unlink_free_space(ctl
, entry
);
2723 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2725 spin_unlock(&ctl
->tree_lock
);
2727 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2728 extent_start
, extent_bytes
);
2734 if (fatal_signal_pending(current
)) {
2745 static int trim_bitmaps(struct btrfs_block_group_cache
*block_group
,
2746 u64
*total_trimmed
, u64 start
, u64 end
, u64 minlen
)
2748 struct btrfs_free_space_ctl
*ctl
= block_group
->free_space_ctl
;
2749 struct btrfs_free_space
*entry
;
2753 u64 offset
= offset_to_bitmap(ctl
, start
);
2755 while (offset
< end
) {
2756 bool next_bitmap
= false;
2758 spin_lock(&ctl
->tree_lock
);
2760 if (ctl
->free_space
< minlen
) {
2761 spin_unlock(&ctl
->tree_lock
);
2765 entry
= tree_search_offset(ctl
, offset
, 1, 0);
2767 spin_unlock(&ctl
->tree_lock
);
2773 ret2
= search_bitmap(ctl
, entry
, &start
, &bytes
);
2774 if (ret2
|| start
>= end
) {
2775 spin_unlock(&ctl
->tree_lock
);
2780 bytes
= min(bytes
, end
- start
);
2781 if (bytes
< minlen
) {
2782 spin_unlock(&ctl
->tree_lock
);
2786 bitmap_clear_bits(ctl
, entry
, start
, bytes
);
2787 if (entry
->bytes
== 0)
2788 free_bitmap(ctl
, entry
);
2790 spin_unlock(&ctl
->tree_lock
);
2792 ret
= do_trimming(block_group
, total_trimmed
, start
, bytes
,
2798 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2801 if (start
>= offset
+ BITS_PER_BITMAP
* ctl
->unit
)
2802 offset
+= BITS_PER_BITMAP
* ctl
->unit
;
2805 if (fatal_signal_pending(current
)) {
2816 int btrfs_trim_block_group(struct btrfs_block_group_cache
*block_group
,
2817 u64
*trimmed
, u64 start
, u64 end
, u64 minlen
)
2823 ret
= trim_no_bitmap(block_group
, trimmed
, start
, end
, minlen
);
2827 ret
= trim_bitmaps(block_group
, trimmed
, start
, end
, minlen
);
2833 * Find the left-most item in the cache tree, and then return the
2834 * smallest inode number in the item.
2836 * Note: the returned inode number may not be the smallest one in
2837 * the tree, if the left-most item is a bitmap.
2839 u64
btrfs_find_ino_for_alloc(struct btrfs_root
*fs_root
)
2841 struct btrfs_free_space_ctl
*ctl
= fs_root
->free_ino_ctl
;
2842 struct btrfs_free_space
*entry
= NULL
;
2845 spin_lock(&ctl
->tree_lock
);
2847 if (RB_EMPTY_ROOT(&ctl
->free_space_offset
))
2850 entry
= rb_entry(rb_first(&ctl
->free_space_offset
),
2851 struct btrfs_free_space
, offset_index
);
2853 if (!entry
->bitmap
) {
2854 ino
= entry
->offset
;
2856 unlink_free_space(ctl
, entry
);
2860 kmem_cache_free(btrfs_free_space_cachep
, entry
);
2862 link_free_space(ctl
, entry
);
2868 ret
= search_bitmap(ctl
, entry
, &offset
, &count
);
2869 /* Logic error; Should be empty if it can't find anything */
2873 bitmap_clear_bits(ctl
, entry
, offset
, 1);
2874 if (entry
->bytes
== 0)
2875 free_bitmap(ctl
, entry
);
2878 spin_unlock(&ctl
->tree_lock
);
2883 struct inode
*lookup_free_ino_inode(struct btrfs_root
*root
,
2884 struct btrfs_path
*path
)
2886 struct inode
*inode
= NULL
;
2888 spin_lock(&root
->cache_lock
);
2889 if (root
->cache_inode
)
2890 inode
= igrab(root
->cache_inode
);
2891 spin_unlock(&root
->cache_lock
);
2895 inode
= __lookup_free_space_inode(root
, path
, 0);
2899 spin_lock(&root
->cache_lock
);
2900 if (!btrfs_fs_closing(root
->fs_info
))
2901 root
->cache_inode
= igrab(inode
);
2902 spin_unlock(&root
->cache_lock
);
2907 int create_free_ino_inode(struct btrfs_root
*root
,
2908 struct btrfs_trans_handle
*trans
,
2909 struct btrfs_path
*path
)
2911 return __create_free_space_inode(root
, trans
, path
,
2912 BTRFS_FREE_INO_OBJECTID
, 0);
2915 int load_free_ino_cache(struct btrfs_fs_info
*fs_info
, struct btrfs_root
*root
)
2917 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2918 struct btrfs_path
*path
;
2919 struct inode
*inode
;
2921 u64 root_gen
= btrfs_root_generation(&root
->root_item
);
2923 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2927 * If we're unmounting then just return, since this does a search on the
2928 * normal root and not the commit root and we could deadlock.
2930 if (btrfs_fs_closing(fs_info
))
2933 path
= btrfs_alloc_path();
2937 inode
= lookup_free_ino_inode(root
, path
);
2941 if (root_gen
!= BTRFS_I(inode
)->generation
)
2944 ret
= __load_free_space_cache(root
, inode
, ctl
, path
, 0);
2947 printk(KERN_ERR
"btrfs: failed to load free ino cache for "
2948 "root %llu\n", root
->root_key
.objectid
);
2952 btrfs_free_path(path
);
2956 int btrfs_write_out_ino_cache(struct btrfs_root
*root
,
2957 struct btrfs_trans_handle
*trans
,
2958 struct btrfs_path
*path
)
2960 struct btrfs_free_space_ctl
*ctl
= root
->free_ino_ctl
;
2961 struct inode
*inode
;
2964 if (!btrfs_test_opt(root
, INODE_MAP_CACHE
))
2967 inode
= lookup_free_ino_inode(root
, path
);
2971 ret
= __btrfs_write_out_cache(root
, inode
, ctl
, NULL
, trans
, path
, 0);
2973 btrfs_delalloc_release_metadata(inode
, inode
->i_size
);
2975 printk(KERN_ERR
"btrfs: failed to write free ino cache "
2976 "for root %llu\n", root
->root_key
.objectid
);