2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
20 #include <linux/pagemap.h>
21 #include <linux/highmem.h>
22 #include <linux/time.h>
23 #include <linux/init.h>
24 #include <linux/string.h>
25 #include <linux/backing-dev.h>
26 #include <linux/mpage.h>
27 #include <linux/falloc.h>
28 #include <linux/swap.h>
29 #include <linux/writeback.h>
30 #include <linux/statfs.h>
31 #include <linux/compat.h>
32 #include <linux/slab.h>
35 #include "transaction.h"
36 #include "btrfs_inode.h"
38 #include "print-tree.h"
45 * when auto defrag is enabled we
46 * queue up these defrag structs to remember which
47 * inodes need defragging passes
50 struct rb_node rb_node
;
54 * transid where the defrag was added, we search for
55 * extents newer than this
62 /* last offset we were able to defrag */
65 /* if we've wrapped around back to zero once already */
69 static int __compare_inode_defrag(struct inode_defrag
*defrag1
,
70 struct inode_defrag
*defrag2
)
72 if (defrag1
->root
> defrag2
->root
)
74 else if (defrag1
->root
< defrag2
->root
)
76 else if (defrag1
->ino
> defrag2
->ino
)
78 else if (defrag1
->ino
< defrag2
->ino
)
84 /* pop a record for an inode into the defrag tree. The lock
85 * must be held already
87 * If you're inserting a record for an older transid than an
88 * existing record, the transid already in the tree is lowered
90 * If an existing record is found the defrag item you
93 static void __btrfs_add_inode_defrag(struct inode
*inode
,
94 struct inode_defrag
*defrag
)
96 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
97 struct inode_defrag
*entry
;
99 struct rb_node
*parent
= NULL
;
102 p
= &root
->fs_info
->defrag_inodes
.rb_node
;
105 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
107 ret
= __compare_inode_defrag(defrag
, entry
);
109 p
= &parent
->rb_left
;
111 p
= &parent
->rb_right
;
113 /* if we're reinserting an entry for
114 * an old defrag run, make sure to
115 * lower the transid of our existing record
117 if (defrag
->transid
< entry
->transid
)
118 entry
->transid
= defrag
->transid
;
119 if (defrag
->last_offset
> entry
->last_offset
)
120 entry
->last_offset
= defrag
->last_offset
;
124 set_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
125 rb_link_node(&defrag
->rb_node
, parent
, p
);
126 rb_insert_color(&defrag
->rb_node
, &root
->fs_info
->defrag_inodes
);
136 * insert a defrag record for this inode if auto defrag is
139 int btrfs_add_inode_defrag(struct btrfs_trans_handle
*trans
,
142 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
143 struct inode_defrag
*defrag
;
146 if (!btrfs_test_opt(root
, AUTO_DEFRAG
))
149 if (btrfs_fs_closing(root
->fs_info
))
152 if (test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
156 transid
= trans
->transid
;
158 transid
= BTRFS_I(inode
)->root
->last_trans
;
160 defrag
= kzalloc(sizeof(*defrag
), GFP_NOFS
);
164 defrag
->ino
= btrfs_ino(inode
);
165 defrag
->transid
= transid
;
166 defrag
->root
= root
->root_key
.objectid
;
168 spin_lock(&root
->fs_info
->defrag_inodes_lock
);
169 if (!test_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
))
170 __btrfs_add_inode_defrag(inode
, defrag
);
173 spin_unlock(&root
->fs_info
->defrag_inodes_lock
);
178 * must be called with the defrag_inodes lock held
180 struct inode_defrag
*btrfs_find_defrag_inode(struct btrfs_fs_info
*info
,
182 struct rb_node
**next
)
184 struct inode_defrag
*entry
= NULL
;
185 struct inode_defrag tmp
;
187 struct rb_node
*parent
= NULL
;
193 p
= info
->defrag_inodes
.rb_node
;
196 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
198 ret
= __compare_inode_defrag(&tmp
, entry
);
202 p
= parent
->rb_right
;
208 while (parent
&& __compare_inode_defrag(&tmp
, entry
) > 0) {
209 parent
= rb_next(parent
);
210 entry
= rb_entry(parent
, struct inode_defrag
, rb_node
);
218 * run through the list of inodes in the FS that need
221 int btrfs_run_defrag_inodes(struct btrfs_fs_info
*fs_info
)
223 struct inode_defrag
*defrag
;
224 struct btrfs_root
*inode_root
;
227 struct btrfs_key key
;
228 struct btrfs_ioctl_defrag_range_args range
;
230 u64 root_objectid
= 0;
232 int defrag_batch
= 1024;
234 memset(&range
, 0, sizeof(range
));
237 atomic_inc(&fs_info
->defrag_running
);
238 spin_lock(&fs_info
->defrag_inodes_lock
);
242 /* find an inode to defrag */
243 defrag
= btrfs_find_defrag_inode(fs_info
, root_objectid
,
247 defrag
= rb_entry(n
, struct inode_defrag
,
249 } else if (root_objectid
|| first_ino
) {
258 /* remove it from the rbtree */
259 first_ino
= defrag
->ino
+ 1;
260 root_objectid
= defrag
->root
;
261 rb_erase(&defrag
->rb_node
, &fs_info
->defrag_inodes
);
263 if (btrfs_fs_closing(fs_info
))
266 spin_unlock(&fs_info
->defrag_inodes_lock
);
269 key
.objectid
= defrag
->root
;
270 btrfs_set_key_type(&key
, BTRFS_ROOT_ITEM_KEY
);
271 key
.offset
= (u64
)-1;
272 inode_root
= btrfs_read_fs_root_no_name(fs_info
, &key
);
273 if (IS_ERR(inode_root
))
276 key
.objectid
= defrag
->ino
;
277 btrfs_set_key_type(&key
, BTRFS_INODE_ITEM_KEY
);
280 inode
= btrfs_iget(fs_info
->sb
, &key
, inode_root
, NULL
);
284 /* do a chunk of defrag */
285 clear_bit(BTRFS_INODE_IN_DEFRAG
, &BTRFS_I(inode
)->runtime_flags
);
286 range
.start
= defrag
->last_offset
;
287 num_defrag
= btrfs_defrag_file(inode
, NULL
, &range
, defrag
->transid
,
290 * if we filled the whole defrag batch, there
291 * must be more work to do. Queue this defrag
294 if (num_defrag
== defrag_batch
) {
295 defrag
->last_offset
= range
.start
;
296 __btrfs_add_inode_defrag(inode
, defrag
);
298 * we don't want to kfree defrag, we added it back to
302 } else if (defrag
->last_offset
&& !defrag
->cycled
) {
304 * we didn't fill our defrag batch, but
305 * we didn't start at zero. Make sure we loop
306 * around to the start of the file.
308 defrag
->last_offset
= 0;
310 __btrfs_add_inode_defrag(inode
, defrag
);
316 spin_lock(&fs_info
->defrag_inodes_lock
);
320 spin_unlock(&fs_info
->defrag_inodes_lock
);
322 atomic_dec(&fs_info
->defrag_running
);
325 * during unmount, we use the transaction_wait queue to
326 * wait for the defragger to stop
328 wake_up(&fs_info
->transaction_wait
);
332 /* simple helper to fault in pages and copy. This should go away
333 * and be replaced with calls into generic code.
335 static noinline
int btrfs_copy_from_user(loff_t pos
, int num_pages
,
337 struct page
**prepared_pages
,
341 size_t total_copied
= 0;
343 int offset
= pos
& (PAGE_CACHE_SIZE
- 1);
345 while (write_bytes
> 0) {
346 size_t count
= min_t(size_t,
347 PAGE_CACHE_SIZE
- offset
, write_bytes
);
348 struct page
*page
= prepared_pages
[pg
];
350 * Copy data from userspace to the current page
352 * Disable pagefault to avoid recursive lock since
353 * the pages are already locked
356 copied
= iov_iter_copy_from_user_atomic(page
, i
, offset
, count
);
359 /* Flush processor's dcache for this page */
360 flush_dcache_page(page
);
363 * if we get a partial write, we can end up with
364 * partially up to date pages. These add
365 * a lot of complexity, so make sure they don't
366 * happen by forcing this copy to be retried.
368 * The rest of the btrfs_file_write code will fall
369 * back to page at a time copies after we return 0.
371 if (!PageUptodate(page
) && copied
< count
)
374 iov_iter_advance(i
, copied
);
375 write_bytes
-= copied
;
376 total_copied
+= copied
;
378 /* Return to btrfs_file_aio_write to fault page */
379 if (unlikely(copied
== 0))
382 if (unlikely(copied
< PAGE_CACHE_SIZE
- offset
)) {
393 * unlocks pages after btrfs_file_write is done with them
395 void btrfs_drop_pages(struct page
**pages
, size_t num_pages
)
398 for (i
= 0; i
< num_pages
; i
++) {
399 /* page checked is some magic around finding pages that
400 * have been modified without going through btrfs_set_page_dirty
403 ClearPageChecked(pages
[i
]);
404 unlock_page(pages
[i
]);
405 mark_page_accessed(pages
[i
]);
406 page_cache_release(pages
[i
]);
411 * after copy_from_user, pages need to be dirtied and we need to make
412 * sure holes are created between the current EOF and the start of
413 * any next extents (if required).
415 * this also makes the decision about creating an inline extent vs
416 * doing real data extents, marking pages dirty and delalloc as required.
418 int btrfs_dirty_pages(struct btrfs_root
*root
, struct inode
*inode
,
419 struct page
**pages
, size_t num_pages
,
420 loff_t pos
, size_t write_bytes
,
421 struct extent_state
**cached
)
427 u64 end_of_last_block
;
428 u64 end_pos
= pos
+ write_bytes
;
429 loff_t isize
= i_size_read(inode
);
431 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
432 num_bytes
= (write_bytes
+ pos
- start_pos
+
433 root
->sectorsize
- 1) & ~((u64
)root
->sectorsize
- 1);
435 end_of_last_block
= start_pos
+ num_bytes
- 1;
436 err
= btrfs_set_extent_delalloc(inode
, start_pos
, end_of_last_block
,
441 for (i
= 0; i
< num_pages
; i
++) {
442 struct page
*p
= pages
[i
];
449 * we've only changed i_size in ram, and we haven't updated
450 * the disk i_size. There is no need to log the inode
454 i_size_write(inode
, end_pos
);
459 * this drops all the extents in the cache that intersect the range
460 * [start, end]. Existing extents are split as required.
462 void btrfs_drop_extent_cache(struct inode
*inode
, u64 start
, u64 end
,
465 struct extent_map
*em
;
466 struct extent_map
*split
= NULL
;
467 struct extent_map
*split2
= NULL
;
468 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
469 u64 len
= end
- start
+ 1;
476 WARN_ON(end
< start
);
477 if (end
== (u64
)-1) {
485 split
= alloc_extent_map();
487 split2
= alloc_extent_map();
488 if (!split
|| !split2
)
491 write_lock(&em_tree
->lock
);
492 em
= lookup_extent_mapping(em_tree
, start
, len
);
494 write_unlock(&em_tree
->lock
);
498 gen
= em
->generation
;
499 if (skip_pinned
&& test_bit(EXTENT_FLAG_PINNED
, &em
->flags
)) {
500 if (testend
&& em
->start
+ em
->len
>= start
+ len
) {
502 write_unlock(&em_tree
->lock
);
505 start
= em
->start
+ em
->len
;
507 len
= start
+ len
- (em
->start
+ em
->len
);
509 write_unlock(&em_tree
->lock
);
512 compressed
= test_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
513 clear_bit(EXTENT_FLAG_PINNED
, &em
->flags
);
514 remove_extent_mapping(em_tree
, em
);
518 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
520 split
->start
= em
->start
;
521 split
->len
= start
- em
->start
;
522 split
->orig_start
= em
->orig_start
;
523 split
->block_start
= em
->block_start
;
526 split
->block_len
= em
->block_len
;
528 split
->block_len
= split
->len
;
529 split
->generation
= gen
;
530 split
->bdev
= em
->bdev
;
531 split
->flags
= flags
;
532 split
->compress_type
= em
->compress_type
;
533 ret
= add_extent_mapping(em_tree
, split
);
534 BUG_ON(ret
); /* Logic error */
535 list_move(&split
->list
, &em_tree
->modified_extents
);
536 free_extent_map(split
);
540 if (em
->block_start
< EXTENT_MAP_LAST_BYTE
&&
541 testend
&& em
->start
+ em
->len
> start
+ len
) {
542 u64 diff
= start
+ len
- em
->start
;
544 split
->start
= start
+ len
;
545 split
->len
= em
->start
+ em
->len
- (start
+ len
);
546 split
->bdev
= em
->bdev
;
547 split
->flags
= flags
;
548 split
->compress_type
= em
->compress_type
;
549 split
->generation
= gen
;
552 split
->block_len
= em
->block_len
;
553 split
->block_start
= em
->block_start
;
554 split
->orig_start
= em
->orig_start
;
556 split
->block_len
= split
->len
;
557 split
->block_start
= em
->block_start
+ diff
;
558 split
->orig_start
= split
->start
;
561 ret
= add_extent_mapping(em_tree
, split
);
562 BUG_ON(ret
); /* Logic error */
563 list_move(&split
->list
, &em_tree
->modified_extents
);
564 free_extent_map(split
);
568 write_unlock(&em_tree
->lock
);
572 /* once for the tree*/
576 free_extent_map(split
);
578 free_extent_map(split2
);
582 * this is very complex, but the basic idea is to drop all extents
583 * in the range start - end. hint_block is filled in with a block number
584 * that would be a good hint to the block allocator for this file.
586 * If an extent intersects the range but is not entirely inside the range
587 * it is either truncated or split. Anything entirely inside the range
588 * is deleted from the tree.
590 int __btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
591 struct btrfs_root
*root
, struct inode
*inode
,
592 struct btrfs_path
*path
, u64 start
, u64 end
,
593 u64
*drop_end
, int drop_cache
)
595 struct extent_buffer
*leaf
;
596 struct btrfs_file_extent_item
*fi
;
597 struct btrfs_key key
;
598 struct btrfs_key new_key
;
599 u64 ino
= btrfs_ino(inode
);
600 u64 search_start
= start
;
603 u64 extent_offset
= 0;
610 int modify_tree
= -1;
611 int update_refs
= (root
->ref_cows
|| root
== root
->fs_info
->tree_root
);
615 btrfs_drop_extent_cache(inode
, start
, end
- 1, 0);
617 if (start
>= BTRFS_I(inode
)->disk_i_size
)
622 ret
= btrfs_lookup_file_extent(trans
, root
, path
, ino
,
623 search_start
, modify_tree
);
626 if (ret
> 0 && path
->slots
[0] > 0 && search_start
== start
) {
627 leaf
= path
->nodes
[0];
628 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
629 if (key
.objectid
== ino
&&
630 key
.type
== BTRFS_EXTENT_DATA_KEY
)
635 leaf
= path
->nodes
[0];
636 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
638 ret
= btrfs_next_leaf(root
, path
);
645 leaf
= path
->nodes
[0];
649 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
650 if (key
.objectid
> ino
||
651 key
.type
> BTRFS_EXTENT_DATA_KEY
|| key
.offset
>= end
)
654 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
655 struct btrfs_file_extent_item
);
656 extent_type
= btrfs_file_extent_type(leaf
, fi
);
658 if (extent_type
== BTRFS_FILE_EXTENT_REG
||
659 extent_type
== BTRFS_FILE_EXTENT_PREALLOC
) {
660 disk_bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
661 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
662 extent_offset
= btrfs_file_extent_offset(leaf
, fi
);
663 extent_end
= key
.offset
+
664 btrfs_file_extent_num_bytes(leaf
, fi
);
665 } else if (extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
666 extent_end
= key
.offset
+
667 btrfs_file_extent_inline_len(leaf
, fi
);
670 extent_end
= search_start
;
673 if (extent_end
<= search_start
) {
679 search_start
= max(key
.offset
, start
);
680 if (recow
|| !modify_tree
) {
682 btrfs_release_path(path
);
687 * | - range to drop - |
688 * | -------- extent -------- |
690 if (start
> key
.offset
&& end
< extent_end
) {
692 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
694 memcpy(&new_key
, &key
, sizeof(new_key
));
695 new_key
.offset
= start
;
696 ret
= btrfs_duplicate_item(trans
, root
, path
,
698 if (ret
== -EAGAIN
) {
699 btrfs_release_path(path
);
705 leaf
= path
->nodes
[0];
706 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
707 struct btrfs_file_extent_item
);
708 btrfs_set_file_extent_num_bytes(leaf
, fi
,
711 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
712 struct btrfs_file_extent_item
);
714 extent_offset
+= start
- key
.offset
;
715 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
716 btrfs_set_file_extent_num_bytes(leaf
, fi
,
718 btrfs_mark_buffer_dirty(leaf
);
720 if (update_refs
&& disk_bytenr
> 0) {
721 ret
= btrfs_inc_extent_ref(trans
, root
,
722 disk_bytenr
, num_bytes
, 0,
723 root
->root_key
.objectid
,
725 start
- extent_offset
, 0);
726 BUG_ON(ret
); /* -ENOMEM */
731 * | ---- range to drop ----- |
732 * | -------- extent -------- |
734 if (start
<= key
.offset
&& end
< extent_end
) {
735 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
737 memcpy(&new_key
, &key
, sizeof(new_key
));
738 new_key
.offset
= end
;
739 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
741 extent_offset
+= end
- key
.offset
;
742 btrfs_set_file_extent_offset(leaf
, fi
, extent_offset
);
743 btrfs_set_file_extent_num_bytes(leaf
, fi
,
745 btrfs_mark_buffer_dirty(leaf
);
746 if (update_refs
&& disk_bytenr
> 0)
747 inode_sub_bytes(inode
, end
- key
.offset
);
751 search_start
= extent_end
;
753 * | ---- range to drop ----- |
754 * | -------- extent -------- |
756 if (start
> key
.offset
&& end
>= extent_end
) {
758 BUG_ON(extent_type
== BTRFS_FILE_EXTENT_INLINE
);
760 btrfs_set_file_extent_num_bytes(leaf
, fi
,
762 btrfs_mark_buffer_dirty(leaf
);
763 if (update_refs
&& disk_bytenr
> 0)
764 inode_sub_bytes(inode
, extent_end
- start
);
765 if (end
== extent_end
)
773 * | ---- range to drop ----- |
774 * | ------ extent ------ |
776 if (start
<= key
.offset
&& end
>= extent_end
) {
778 del_slot
= path
->slots
[0];
781 BUG_ON(del_slot
+ del_nr
!= path
->slots
[0]);
786 extent_type
== BTRFS_FILE_EXTENT_INLINE
) {
787 inode_sub_bytes(inode
,
788 extent_end
- key
.offset
);
789 extent_end
= ALIGN(extent_end
,
791 } else if (update_refs
&& disk_bytenr
> 0) {
792 ret
= btrfs_free_extent(trans
, root
,
793 disk_bytenr
, num_bytes
, 0,
794 root
->root_key
.objectid
,
795 key
.objectid
, key
.offset
-
797 BUG_ON(ret
); /* -ENOMEM */
798 inode_sub_bytes(inode
,
799 extent_end
- key
.offset
);
802 if (end
== extent_end
)
805 if (path
->slots
[0] + 1 < btrfs_header_nritems(leaf
)) {
810 ret
= btrfs_del_items(trans
, root
, path
, del_slot
,
813 btrfs_abort_transaction(trans
, root
, ret
);
820 btrfs_release_path(path
);
827 if (!ret
&& del_nr
> 0) {
828 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
830 btrfs_abort_transaction(trans
, root
, ret
);
834 *drop_end
= found
? min(end
, extent_end
) : end
;
835 btrfs_release_path(path
);
839 int btrfs_drop_extents(struct btrfs_trans_handle
*trans
,
840 struct btrfs_root
*root
, struct inode
*inode
, u64 start
,
841 u64 end
, int drop_cache
)
843 struct btrfs_path
*path
;
846 path
= btrfs_alloc_path();
849 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
, start
, end
, NULL
,
851 btrfs_free_path(path
);
855 static int extent_mergeable(struct extent_buffer
*leaf
, int slot
,
856 u64 objectid
, u64 bytenr
, u64 orig_offset
,
857 u64
*start
, u64
*end
)
859 struct btrfs_file_extent_item
*fi
;
860 struct btrfs_key key
;
863 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
866 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
867 if (key
.objectid
!= objectid
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
)
870 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
871 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
||
872 btrfs_file_extent_disk_bytenr(leaf
, fi
) != bytenr
||
873 btrfs_file_extent_offset(leaf
, fi
) != key
.offset
- orig_offset
||
874 btrfs_file_extent_compression(leaf
, fi
) ||
875 btrfs_file_extent_encryption(leaf
, fi
) ||
876 btrfs_file_extent_other_encoding(leaf
, fi
))
879 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
880 if ((*start
&& *start
!= key
.offset
) || (*end
&& *end
!= extent_end
))
889 * Mark extent in the range start - end as written.
891 * This changes extent type from 'pre-allocated' to 'regular'. If only
892 * part of extent is marked as written, the extent will be split into
895 int btrfs_mark_extent_written(struct btrfs_trans_handle
*trans
,
896 struct inode
*inode
, u64 start
, u64 end
)
898 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
899 struct extent_buffer
*leaf
;
900 struct btrfs_path
*path
;
901 struct btrfs_file_extent_item
*fi
;
902 struct btrfs_key key
;
903 struct btrfs_key new_key
;
915 u64 ino
= btrfs_ino(inode
);
917 path
= btrfs_alloc_path();
924 key
.type
= BTRFS_EXTENT_DATA_KEY
;
927 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
930 if (ret
> 0 && path
->slots
[0] > 0)
933 leaf
= path
->nodes
[0];
934 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
935 BUG_ON(key
.objectid
!= ino
|| key
.type
!= BTRFS_EXTENT_DATA_KEY
);
936 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
937 struct btrfs_file_extent_item
);
938 BUG_ON(btrfs_file_extent_type(leaf
, fi
) !=
939 BTRFS_FILE_EXTENT_PREALLOC
);
940 extent_end
= key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
);
941 BUG_ON(key
.offset
> start
|| extent_end
< end
);
943 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
944 num_bytes
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
945 orig_offset
= key
.offset
- btrfs_file_extent_offset(leaf
, fi
);
946 memcpy(&new_key
, &key
, sizeof(new_key
));
948 if (start
== key
.offset
&& end
< extent_end
) {
951 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
952 ino
, bytenr
, orig_offset
,
953 &other_start
, &other_end
)) {
954 new_key
.offset
= end
;
955 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
956 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
957 struct btrfs_file_extent_item
);
958 btrfs_set_file_extent_generation(leaf
, fi
,
960 btrfs_set_file_extent_num_bytes(leaf
, fi
,
962 btrfs_set_file_extent_offset(leaf
, fi
,
964 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
965 struct btrfs_file_extent_item
);
966 btrfs_set_file_extent_generation(leaf
, fi
,
968 btrfs_set_file_extent_num_bytes(leaf
, fi
,
970 btrfs_mark_buffer_dirty(leaf
);
975 if (start
> key
.offset
&& end
== extent_end
) {
978 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
979 ino
, bytenr
, orig_offset
,
980 &other_start
, &other_end
)) {
981 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
982 struct btrfs_file_extent_item
);
983 btrfs_set_file_extent_num_bytes(leaf
, fi
,
985 btrfs_set_file_extent_generation(leaf
, fi
,
988 new_key
.offset
= start
;
989 btrfs_set_item_key_safe(trans
, root
, path
, &new_key
);
991 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
992 struct btrfs_file_extent_item
);
993 btrfs_set_file_extent_generation(leaf
, fi
,
995 btrfs_set_file_extent_num_bytes(leaf
, fi
,
997 btrfs_set_file_extent_offset(leaf
, fi
,
998 start
- orig_offset
);
999 btrfs_mark_buffer_dirty(leaf
);
1004 while (start
> key
.offset
|| end
< extent_end
) {
1005 if (key
.offset
== start
)
1008 new_key
.offset
= split
;
1009 ret
= btrfs_duplicate_item(trans
, root
, path
, &new_key
);
1010 if (ret
== -EAGAIN
) {
1011 btrfs_release_path(path
);
1015 btrfs_abort_transaction(trans
, root
, ret
);
1019 leaf
= path
->nodes
[0];
1020 fi
= btrfs_item_ptr(leaf
, path
->slots
[0] - 1,
1021 struct btrfs_file_extent_item
);
1022 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1023 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1024 split
- key
.offset
);
1026 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1027 struct btrfs_file_extent_item
);
1029 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1030 btrfs_set_file_extent_offset(leaf
, fi
, split
- orig_offset
);
1031 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1032 extent_end
- split
);
1033 btrfs_mark_buffer_dirty(leaf
);
1035 ret
= btrfs_inc_extent_ref(trans
, root
, bytenr
, num_bytes
, 0,
1036 root
->root_key
.objectid
,
1037 ino
, orig_offset
, 0);
1038 BUG_ON(ret
); /* -ENOMEM */
1040 if (split
== start
) {
1043 BUG_ON(start
!= key
.offset
);
1052 if (extent_mergeable(leaf
, path
->slots
[0] + 1,
1053 ino
, bytenr
, orig_offset
,
1054 &other_start
, &other_end
)) {
1056 btrfs_release_path(path
);
1059 extent_end
= other_end
;
1060 del_slot
= path
->slots
[0] + 1;
1062 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1063 0, root
->root_key
.objectid
,
1064 ino
, orig_offset
, 0);
1065 BUG_ON(ret
); /* -ENOMEM */
1069 if (extent_mergeable(leaf
, path
->slots
[0] - 1,
1070 ino
, bytenr
, orig_offset
,
1071 &other_start
, &other_end
)) {
1073 btrfs_release_path(path
);
1076 key
.offset
= other_start
;
1077 del_slot
= path
->slots
[0];
1079 ret
= btrfs_free_extent(trans
, root
, bytenr
, num_bytes
,
1080 0, root
->root_key
.objectid
,
1081 ino
, orig_offset
, 0);
1082 BUG_ON(ret
); /* -ENOMEM */
1085 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1086 struct btrfs_file_extent_item
);
1087 btrfs_set_file_extent_type(leaf
, fi
,
1088 BTRFS_FILE_EXTENT_REG
);
1089 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1090 btrfs_mark_buffer_dirty(leaf
);
1092 fi
= btrfs_item_ptr(leaf
, del_slot
- 1,
1093 struct btrfs_file_extent_item
);
1094 btrfs_set_file_extent_type(leaf
, fi
,
1095 BTRFS_FILE_EXTENT_REG
);
1096 btrfs_set_file_extent_generation(leaf
, fi
, trans
->transid
);
1097 btrfs_set_file_extent_num_bytes(leaf
, fi
,
1098 extent_end
- key
.offset
);
1099 btrfs_mark_buffer_dirty(leaf
);
1101 ret
= btrfs_del_items(trans
, root
, path
, del_slot
, del_nr
);
1103 btrfs_abort_transaction(trans
, root
, ret
);
1108 btrfs_free_path(path
);
1113 * on error we return an unlocked page and the error value
1114 * on success we return a locked page and 0
1116 static int prepare_uptodate_page(struct page
*page
, u64 pos
,
1117 bool force_uptodate
)
1121 if (((pos
& (PAGE_CACHE_SIZE
- 1)) || force_uptodate
) &&
1122 !PageUptodate(page
)) {
1123 ret
= btrfs_readpage(NULL
, page
);
1127 if (!PageUptodate(page
)) {
1136 * this gets pages into the page cache and locks them down, it also properly
1137 * waits for data=ordered extents to finish before allowing the pages to be
1140 static noinline
int prepare_pages(struct btrfs_root
*root
, struct file
*file
,
1141 struct page
**pages
, size_t num_pages
,
1142 loff_t pos
, unsigned long first_index
,
1143 size_t write_bytes
, bool force_uptodate
)
1145 struct extent_state
*cached_state
= NULL
;
1147 unsigned long index
= pos
>> PAGE_CACHE_SHIFT
;
1148 struct inode
*inode
= fdentry(file
)->d_inode
;
1149 gfp_t mask
= btrfs_alloc_write_mask(inode
->i_mapping
);
1155 start_pos
= pos
& ~((u64
)root
->sectorsize
- 1);
1156 last_pos
= ((u64
)index
+ num_pages
) << PAGE_CACHE_SHIFT
;
1159 for (i
= 0; i
< num_pages
; i
++) {
1160 pages
[i
] = find_or_create_page(inode
->i_mapping
, index
+ i
,
1161 mask
| __GFP_WRITE
);
1169 err
= prepare_uptodate_page(pages
[i
], pos
,
1171 if (i
== num_pages
- 1)
1172 err
= prepare_uptodate_page(pages
[i
],
1173 pos
+ write_bytes
, false);
1175 page_cache_release(pages
[i
]);
1179 wait_on_page_writeback(pages
[i
]);
1182 if (start_pos
< inode
->i_size
) {
1183 struct btrfs_ordered_extent
*ordered
;
1184 lock_extent_bits(&BTRFS_I(inode
)->io_tree
,
1185 start_pos
, last_pos
- 1, 0, &cached_state
);
1186 ordered
= btrfs_lookup_first_ordered_extent(inode
,
1189 ordered
->file_offset
+ ordered
->len
> start_pos
&&
1190 ordered
->file_offset
< last_pos
) {
1191 btrfs_put_ordered_extent(ordered
);
1192 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1193 start_pos
, last_pos
- 1,
1194 &cached_state
, GFP_NOFS
);
1195 for (i
= 0; i
< num_pages
; i
++) {
1196 unlock_page(pages
[i
]);
1197 page_cache_release(pages
[i
]);
1199 btrfs_wait_ordered_range(inode
, start_pos
,
1200 last_pos
- start_pos
);
1204 btrfs_put_ordered_extent(ordered
);
1206 clear_extent_bit(&BTRFS_I(inode
)->io_tree
, start_pos
,
1207 last_pos
- 1, EXTENT_DIRTY
| EXTENT_DELALLOC
|
1208 EXTENT_DO_ACCOUNTING
| EXTENT_DEFRAG
,
1209 0, 0, &cached_state
, GFP_NOFS
);
1210 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
1211 start_pos
, last_pos
- 1, &cached_state
,
1214 for (i
= 0; i
< num_pages
; i
++) {
1215 if (clear_page_dirty_for_io(pages
[i
]))
1216 account_page_redirty(pages
[i
]);
1217 set_page_extent_mapped(pages
[i
]);
1218 WARN_ON(!PageLocked(pages
[i
]));
1222 while (faili
>= 0) {
1223 unlock_page(pages
[faili
]);
1224 page_cache_release(pages
[faili
]);
1231 static noinline ssize_t
__btrfs_buffered_write(struct file
*file
,
1235 struct inode
*inode
= fdentry(file
)->d_inode
;
1236 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1237 struct page
**pages
= NULL
;
1238 unsigned long first_index
;
1239 size_t num_written
= 0;
1242 bool force_page_uptodate
= false;
1244 nrptrs
= min((iov_iter_count(i
) + PAGE_CACHE_SIZE
- 1) /
1245 PAGE_CACHE_SIZE
, PAGE_CACHE_SIZE
/
1246 (sizeof(struct page
*)));
1247 nrptrs
= min(nrptrs
, current
->nr_dirtied_pause
- current
->nr_dirtied
);
1248 nrptrs
= max(nrptrs
, 8);
1249 pages
= kmalloc(nrptrs
* sizeof(struct page
*), GFP_KERNEL
);
1253 first_index
= pos
>> PAGE_CACHE_SHIFT
;
1255 while (iov_iter_count(i
) > 0) {
1256 size_t offset
= pos
& (PAGE_CACHE_SIZE
- 1);
1257 size_t write_bytes
= min(iov_iter_count(i
),
1258 nrptrs
* (size_t)PAGE_CACHE_SIZE
-
1260 size_t num_pages
= (write_bytes
+ offset
+
1261 PAGE_CACHE_SIZE
- 1) >> PAGE_CACHE_SHIFT
;
1265 WARN_ON(num_pages
> nrptrs
);
1268 * Fault pages before locking them in prepare_pages
1269 * to avoid recursive lock
1271 if (unlikely(iov_iter_fault_in_readable(i
, write_bytes
))) {
1276 ret
= btrfs_delalloc_reserve_space(inode
,
1277 num_pages
<< PAGE_CACHE_SHIFT
);
1282 * This is going to setup the pages array with the number of
1283 * pages we want, so we don't really need to worry about the
1284 * contents of pages from loop to loop
1286 ret
= prepare_pages(root
, file
, pages
, num_pages
,
1287 pos
, first_index
, write_bytes
,
1288 force_page_uptodate
);
1290 btrfs_delalloc_release_space(inode
,
1291 num_pages
<< PAGE_CACHE_SHIFT
);
1295 copied
= btrfs_copy_from_user(pos
, num_pages
,
1296 write_bytes
, pages
, i
);
1299 * if we have trouble faulting in the pages, fall
1300 * back to one page at a time
1302 if (copied
< write_bytes
)
1306 force_page_uptodate
= true;
1309 force_page_uptodate
= false;
1310 dirty_pages
= (copied
+ offset
+
1311 PAGE_CACHE_SIZE
- 1) >>
1316 * If we had a short copy we need to release the excess delaloc
1317 * bytes we reserved. We need to increment outstanding_extents
1318 * because btrfs_delalloc_release_space will decrement it, but
1319 * we still have an outstanding extent for the chunk we actually
1322 if (num_pages
> dirty_pages
) {
1324 spin_lock(&BTRFS_I(inode
)->lock
);
1325 BTRFS_I(inode
)->outstanding_extents
++;
1326 spin_unlock(&BTRFS_I(inode
)->lock
);
1328 btrfs_delalloc_release_space(inode
,
1329 (num_pages
- dirty_pages
) <<
1334 ret
= btrfs_dirty_pages(root
, inode
, pages
,
1335 dirty_pages
, pos
, copied
,
1338 btrfs_delalloc_release_space(inode
,
1339 dirty_pages
<< PAGE_CACHE_SHIFT
);
1340 btrfs_drop_pages(pages
, num_pages
);
1345 btrfs_drop_pages(pages
, num_pages
);
1349 balance_dirty_pages_ratelimited_nr(inode
->i_mapping
,
1351 if (dirty_pages
< (root
->leafsize
>> PAGE_CACHE_SHIFT
) + 1)
1352 btrfs_btree_balance_dirty(root
, 1);
1355 num_written
+= copied
;
1360 return num_written
? num_written
: ret
;
1363 static ssize_t
__btrfs_direct_write(struct kiocb
*iocb
,
1364 const struct iovec
*iov
,
1365 unsigned long nr_segs
, loff_t pos
,
1366 loff_t
*ppos
, size_t count
, size_t ocount
)
1368 struct file
*file
= iocb
->ki_filp
;
1371 ssize_t written_buffered
;
1375 written
= generic_file_direct_write(iocb
, iov
, &nr_segs
, pos
, ppos
,
1378 if (written
< 0 || written
== count
)
1383 iov_iter_init(&i
, iov
, nr_segs
, count
, written
);
1384 written_buffered
= __btrfs_buffered_write(file
, &i
, pos
);
1385 if (written_buffered
< 0) {
1386 err
= written_buffered
;
1389 endbyte
= pos
+ written_buffered
- 1;
1390 err
= filemap_write_and_wait_range(file
->f_mapping
, pos
, endbyte
);
1393 written
+= written_buffered
;
1394 *ppos
= pos
+ written_buffered
;
1395 invalidate_mapping_pages(file
->f_mapping
, pos
>> PAGE_CACHE_SHIFT
,
1396 endbyte
>> PAGE_CACHE_SHIFT
);
1398 return written
? written
: err
;
1401 static ssize_t
btrfs_file_aio_write(struct kiocb
*iocb
,
1402 const struct iovec
*iov
,
1403 unsigned long nr_segs
, loff_t pos
)
1405 struct file
*file
= iocb
->ki_filp
;
1406 struct inode
*inode
= fdentry(file
)->d_inode
;
1407 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1408 loff_t
*ppos
= &iocb
->ki_pos
;
1410 ssize_t num_written
= 0;
1412 size_t count
, ocount
;
1414 sb_start_write(inode
->i_sb
);
1416 mutex_lock(&inode
->i_mutex
);
1418 err
= generic_segment_checks(iov
, &nr_segs
, &ocount
, VERIFY_READ
);
1420 mutex_unlock(&inode
->i_mutex
);
1425 current
->backing_dev_info
= inode
->i_mapping
->backing_dev_info
;
1426 err
= generic_write_checks(file
, &pos
, &count
, S_ISBLK(inode
->i_mode
));
1428 mutex_unlock(&inode
->i_mutex
);
1433 mutex_unlock(&inode
->i_mutex
);
1437 err
= file_remove_suid(file
);
1439 mutex_unlock(&inode
->i_mutex
);
1444 * If BTRFS flips readonly due to some impossible error
1445 * (fs_info->fs_state now has BTRFS_SUPER_FLAG_ERROR),
1446 * although we have opened a file as writable, we have
1447 * to stop this write operation to ensure FS consistency.
1449 if (root
->fs_info
->fs_state
& BTRFS_SUPER_FLAG_ERROR
) {
1450 mutex_unlock(&inode
->i_mutex
);
1455 err
= file_update_time(file
);
1457 mutex_unlock(&inode
->i_mutex
);
1461 start_pos
= round_down(pos
, root
->sectorsize
);
1462 if (start_pos
> i_size_read(inode
)) {
1463 err
= btrfs_cont_expand(inode
, i_size_read(inode
), start_pos
);
1465 mutex_unlock(&inode
->i_mutex
);
1470 if (unlikely(file
->f_flags
& O_DIRECT
)) {
1471 num_written
= __btrfs_direct_write(iocb
, iov
, nr_segs
,
1472 pos
, ppos
, count
, ocount
);
1476 iov_iter_init(&i
, iov
, nr_segs
, count
, num_written
);
1478 num_written
= __btrfs_buffered_write(file
, &i
, pos
);
1479 if (num_written
> 0)
1480 *ppos
= pos
+ num_written
;
1483 mutex_unlock(&inode
->i_mutex
);
1486 * we want to make sure fsync finds this change
1487 * but we haven't joined a transaction running right now.
1489 * Later on, someone is sure to update the inode and get the
1490 * real transid recorded.
1492 * We set last_trans now to the fs_info generation + 1,
1493 * this will either be one more than the running transaction
1494 * or the generation used for the next transaction if there isn't
1495 * one running right now.
1497 BTRFS_I(inode
)->last_trans
= root
->fs_info
->generation
+ 1;
1498 if (num_written
> 0 || num_written
== -EIOCBQUEUED
) {
1499 err
= generic_write_sync(file
, pos
, num_written
);
1500 if (err
< 0 && num_written
> 0)
1504 sb_end_write(inode
->i_sb
);
1505 current
->backing_dev_info
= NULL
;
1506 return num_written
? num_written
: err
;
1509 int btrfs_release_file(struct inode
*inode
, struct file
*filp
)
1512 * ordered_data_close is set by settattr when we are about to truncate
1513 * a file from a non-zero size to a zero size. This tries to
1514 * flush down new bytes that may have been written if the
1515 * application were using truncate to replace a file in place.
1517 if (test_and_clear_bit(BTRFS_INODE_ORDERED_DATA_CLOSE
,
1518 &BTRFS_I(inode
)->runtime_flags
)) {
1519 btrfs_add_ordered_operation(NULL
, BTRFS_I(inode
)->root
, inode
);
1520 if (inode
->i_size
> BTRFS_ORDERED_OPERATIONS_FLUSH_LIMIT
)
1521 filemap_flush(inode
->i_mapping
);
1523 if (filp
->private_data
)
1524 btrfs_ioctl_trans_end(filp
);
1529 * fsync call for both files and directories. This logs the inode into
1530 * the tree log instead of forcing full commits whenever possible.
1532 * It needs to call filemap_fdatawait so that all ordered extent updates are
1533 * in the metadata btree are up to date for copying to the log.
1535 * It drops the inode mutex before doing the tree log commit. This is an
1536 * important optimization for directories because holding the mutex prevents
1537 * new operations on the dir while we write to disk.
1539 int btrfs_sync_file(struct file
*file
, loff_t start
, loff_t end
, int datasync
)
1541 struct dentry
*dentry
= file
->f_path
.dentry
;
1542 struct inode
*inode
= dentry
->d_inode
;
1543 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1545 struct btrfs_trans_handle
*trans
;
1547 trace_btrfs_sync_file(file
, datasync
);
1550 * We write the dirty pages in the range and wait until they complete
1551 * out of the ->i_mutex. If so, we can flush the dirty pages by
1552 * multi-task, and make the performance up.
1554 ret
= filemap_write_and_wait_range(inode
->i_mapping
, start
, end
);
1558 mutex_lock(&inode
->i_mutex
);
1561 * We flush the dirty pages again to avoid some dirty pages in the
1564 atomic_inc(&root
->log_batch
);
1565 btrfs_wait_ordered_range(inode
, start
, end
);
1566 atomic_inc(&root
->log_batch
);
1569 * check the transaction that last modified this inode
1570 * and see if its already been committed
1572 if (!BTRFS_I(inode
)->last_trans
) {
1573 mutex_unlock(&inode
->i_mutex
);
1578 * if the last transaction that changed this file was before
1579 * the current transaction, we can bail out now without any
1583 if (btrfs_inode_in_log(inode
, root
->fs_info
->generation
) ||
1584 BTRFS_I(inode
)->last_trans
<=
1585 root
->fs_info
->last_trans_committed
) {
1586 BTRFS_I(inode
)->last_trans
= 0;
1589 * We'v had everything committed since the last time we were
1590 * modified so clear this flag in case it was set for whatever
1591 * reason, it's no longer relevant.
1593 clear_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1594 &BTRFS_I(inode
)->runtime_flags
);
1595 mutex_unlock(&inode
->i_mutex
);
1600 * ok we haven't committed the transaction yet, lets do a commit
1602 if (file
->private_data
)
1603 btrfs_ioctl_trans_end(file
);
1605 trans
= btrfs_start_transaction(root
, 0);
1606 if (IS_ERR(trans
)) {
1607 ret
= PTR_ERR(trans
);
1608 mutex_unlock(&inode
->i_mutex
);
1612 ret
= btrfs_log_dentry_safe(trans
, root
, dentry
);
1614 mutex_unlock(&inode
->i_mutex
);
1618 /* we've logged all the items and now have a consistent
1619 * version of the file in the log. It is possible that
1620 * someone will come in and modify the file, but that's
1621 * fine because the log is consistent on disk, and we
1622 * have references to all of the file's extents
1624 * It is possible that someone will come in and log the
1625 * file again, but that will end up using the synchronization
1626 * inside btrfs_sync_log to keep things safe.
1628 mutex_unlock(&inode
->i_mutex
);
1630 if (ret
!= BTRFS_NO_LOG_SYNC
) {
1632 ret
= btrfs_commit_transaction(trans
, root
);
1634 ret
= btrfs_sync_log(trans
, root
);
1636 ret
= btrfs_end_transaction(trans
, root
);
1638 ret
= btrfs_commit_transaction(trans
, root
);
1641 ret
= btrfs_end_transaction(trans
, root
);
1644 return ret
> 0 ? -EIO
: ret
;
1647 static const struct vm_operations_struct btrfs_file_vm_ops
= {
1648 .fault
= filemap_fault
,
1649 .page_mkwrite
= btrfs_page_mkwrite
,
1650 .remap_pages
= generic_file_remap_pages
,
1653 static int btrfs_file_mmap(struct file
*filp
, struct vm_area_struct
*vma
)
1655 struct address_space
*mapping
= filp
->f_mapping
;
1657 if (!mapping
->a_ops
->readpage
)
1660 file_accessed(filp
);
1661 vma
->vm_ops
= &btrfs_file_vm_ops
;
1666 static int hole_mergeable(struct inode
*inode
, struct extent_buffer
*leaf
,
1667 int slot
, u64 start
, u64 end
)
1669 struct btrfs_file_extent_item
*fi
;
1670 struct btrfs_key key
;
1672 if (slot
< 0 || slot
>= btrfs_header_nritems(leaf
))
1675 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
1676 if (key
.objectid
!= btrfs_ino(inode
) ||
1677 key
.type
!= BTRFS_EXTENT_DATA_KEY
)
1680 fi
= btrfs_item_ptr(leaf
, slot
, struct btrfs_file_extent_item
);
1682 if (btrfs_file_extent_type(leaf
, fi
) != BTRFS_FILE_EXTENT_REG
)
1685 if (btrfs_file_extent_disk_bytenr(leaf
, fi
))
1688 if (key
.offset
== end
)
1690 if (key
.offset
+ btrfs_file_extent_num_bytes(leaf
, fi
) == start
)
1695 static int fill_holes(struct btrfs_trans_handle
*trans
, struct inode
*inode
,
1696 struct btrfs_path
*path
, u64 offset
, u64 end
)
1698 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1699 struct extent_buffer
*leaf
;
1700 struct btrfs_file_extent_item
*fi
;
1701 struct extent_map
*hole_em
;
1702 struct extent_map_tree
*em_tree
= &BTRFS_I(inode
)->extent_tree
;
1703 struct btrfs_key key
;
1706 key
.objectid
= btrfs_ino(inode
);
1707 key
.type
= BTRFS_EXTENT_DATA_KEY
;
1708 key
.offset
= offset
;
1711 ret
= btrfs_search_slot(trans
, root
, &key
, path
, 0, 1);
1716 leaf
= path
->nodes
[0];
1717 if (hole_mergeable(inode
, leaf
, path
->slots
[0]-1, offset
, end
)) {
1721 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1722 struct btrfs_file_extent_item
);
1723 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) +
1725 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
1726 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
1727 btrfs_set_file_extent_offset(leaf
, fi
, 0);
1728 btrfs_mark_buffer_dirty(leaf
);
1732 if (hole_mergeable(inode
, leaf
, path
->slots
[0]+1, offset
, end
)) {
1736 key
.offset
= offset
;
1737 btrfs_set_item_key_safe(trans
, root
, path
, &key
);
1738 fi
= btrfs_item_ptr(leaf
, path
->slots
[0],
1739 struct btrfs_file_extent_item
);
1740 num_bytes
= btrfs_file_extent_num_bytes(leaf
, fi
) + end
-
1742 btrfs_set_file_extent_num_bytes(leaf
, fi
, num_bytes
);
1743 btrfs_set_file_extent_ram_bytes(leaf
, fi
, num_bytes
);
1744 btrfs_set_file_extent_offset(leaf
, fi
, 0);
1745 btrfs_mark_buffer_dirty(leaf
);
1748 btrfs_release_path(path
);
1750 ret
= btrfs_insert_file_extent(trans
, root
, btrfs_ino(inode
), offset
,
1751 0, 0, end
- offset
, 0, end
- offset
,
1757 btrfs_release_path(path
);
1759 hole_em
= alloc_extent_map();
1761 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
1762 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1763 &BTRFS_I(inode
)->runtime_flags
);
1765 hole_em
->start
= offset
;
1766 hole_em
->len
= end
- offset
;
1767 hole_em
->orig_start
= offset
;
1769 hole_em
->block_start
= EXTENT_MAP_HOLE
;
1770 hole_em
->block_len
= 0;
1771 hole_em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
1772 hole_em
->compress_type
= BTRFS_COMPRESS_NONE
;
1773 hole_em
->generation
= trans
->transid
;
1776 btrfs_drop_extent_cache(inode
, offset
, end
- 1, 0);
1777 write_lock(&em_tree
->lock
);
1778 ret
= add_extent_mapping(em_tree
, hole_em
);
1780 list_move(&hole_em
->list
,
1781 &em_tree
->modified_extents
);
1782 write_unlock(&em_tree
->lock
);
1783 } while (ret
== -EEXIST
);
1784 free_extent_map(hole_em
);
1786 set_bit(BTRFS_INODE_NEEDS_FULL_SYNC
,
1787 &BTRFS_I(inode
)->runtime_flags
);
1793 static int btrfs_punch_hole(struct inode
*inode
, loff_t offset
, loff_t len
)
1795 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
1796 struct extent_state
*cached_state
= NULL
;
1797 struct btrfs_path
*path
;
1798 struct btrfs_block_rsv
*rsv
;
1799 struct btrfs_trans_handle
*trans
;
1800 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
1801 u64 lockstart
= (offset
+ mask
) & ~mask
;
1802 u64 lockend
= ((offset
+ len
) & ~mask
) - 1;
1803 u64 cur_offset
= lockstart
;
1804 u64 min_size
= btrfs_calc_trunc_metadata_size(root
, 1);
1809 bool same_page
= (offset
>> PAGE_CACHE_SHIFT
) ==
1810 ((offset
+ len
) >> PAGE_CACHE_SHIFT
);
1812 btrfs_wait_ordered_range(inode
, offset
, len
);
1814 mutex_lock(&inode
->i_mutex
);
1815 if (offset
>= inode
->i_size
) {
1816 mutex_unlock(&inode
->i_mutex
);
1821 * Only do this if we are in the same page and we aren't doing the
1824 if (same_page
&& len
< PAGE_CACHE_SIZE
) {
1825 ret
= btrfs_truncate_page(inode
, offset
, len
, 0);
1826 mutex_unlock(&inode
->i_mutex
);
1830 /* zero back part of the first page */
1831 ret
= btrfs_truncate_page(inode
, offset
, 0, 0);
1833 mutex_unlock(&inode
->i_mutex
);
1837 /* zero the front end of the last page */
1838 ret
= btrfs_truncate_page(inode
, offset
+ len
, 0, 1);
1840 mutex_unlock(&inode
->i_mutex
);
1844 if (lockend
< lockstart
) {
1845 mutex_unlock(&inode
->i_mutex
);
1850 struct btrfs_ordered_extent
*ordered
;
1852 truncate_pagecache_range(inode
, lockstart
, lockend
);
1854 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
1856 ordered
= btrfs_lookup_first_ordered_extent(inode
, lockend
);
1859 * We need to make sure we have no ordered extents in this range
1860 * and nobody raced in and read a page in this range, if we did
1861 * we need to try again.
1864 (ordered
->file_offset
+ ordered
->len
< lockstart
||
1865 ordered
->file_offset
> lockend
)) &&
1866 !test_range_bit(&BTRFS_I(inode
)->io_tree
, lockstart
,
1867 lockend
, EXTENT_UPTODATE
, 0,
1870 btrfs_put_ordered_extent(ordered
);
1874 btrfs_put_ordered_extent(ordered
);
1875 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
,
1876 lockend
, &cached_state
, GFP_NOFS
);
1877 btrfs_wait_ordered_range(inode
, lockstart
,
1878 lockend
- lockstart
+ 1);
1881 path
= btrfs_alloc_path();
1887 rsv
= btrfs_alloc_block_rsv(root
, BTRFS_BLOCK_RSV_TEMP
);
1892 rsv
->size
= btrfs_calc_trunc_metadata_size(root
, 1);
1896 * 1 - update the inode
1897 * 1 - removing the extents in the range
1898 * 1 - adding the hole extent
1900 trans
= btrfs_start_transaction(root
, 3);
1901 if (IS_ERR(trans
)) {
1902 err
= PTR_ERR(trans
);
1906 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
, rsv
,
1909 trans
->block_rsv
= rsv
;
1911 while (cur_offset
< lockend
) {
1912 ret
= __btrfs_drop_extents(trans
, root
, inode
, path
,
1913 cur_offset
, lockend
+ 1,
1918 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
1920 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
1926 cur_offset
= drop_end
;
1928 ret
= btrfs_update_inode(trans
, root
, inode
);
1934 nr
= trans
->blocks_used
;
1935 btrfs_end_transaction(trans
, root
);
1936 btrfs_btree_balance_dirty(root
, nr
);
1938 trans
= btrfs_start_transaction(root
, 3);
1939 if (IS_ERR(trans
)) {
1940 ret
= PTR_ERR(trans
);
1945 ret
= btrfs_block_rsv_migrate(&root
->fs_info
->trans_block_rsv
,
1947 BUG_ON(ret
); /* shouldn't happen */
1948 trans
->block_rsv
= rsv
;
1956 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
1957 ret
= fill_holes(trans
, inode
, path
, cur_offset
, drop_end
);
1967 trans
->block_rsv
= &root
->fs_info
->trans_block_rsv
;
1968 ret
= btrfs_update_inode(trans
, root
, inode
);
1969 nr
= trans
->blocks_used
;
1970 btrfs_end_transaction(trans
, root
);
1971 btrfs_btree_balance_dirty(root
, nr
);
1973 btrfs_free_path(path
);
1974 btrfs_free_block_rsv(root
, rsv
);
1976 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
1977 &cached_state
, GFP_NOFS
);
1978 mutex_unlock(&inode
->i_mutex
);
1984 static long btrfs_fallocate(struct file
*file
, int mode
,
1985 loff_t offset
, loff_t len
)
1987 struct inode
*inode
= file
->f_path
.dentry
->d_inode
;
1988 struct extent_state
*cached_state
= NULL
;
1995 u64 mask
= BTRFS_I(inode
)->root
->sectorsize
- 1;
1996 struct extent_map
*em
;
1999 alloc_start
= offset
& ~mask
;
2000 alloc_end
= (offset
+ len
+ mask
) & ~mask
;
2002 /* Make sure we aren't being give some crap mode */
2003 if (mode
& ~(FALLOC_FL_KEEP_SIZE
| FALLOC_FL_PUNCH_HOLE
))
2006 if (mode
& FALLOC_FL_PUNCH_HOLE
)
2007 return btrfs_punch_hole(inode
, offset
, len
);
2010 * Make sure we have enough space before we do the
2013 ret
= btrfs_check_data_free_space(inode
, alloc_end
- alloc_start
+ 1);
2018 * wait for ordered IO before we have any locks. We'll loop again
2019 * below with the locks held.
2021 btrfs_wait_ordered_range(inode
, alloc_start
, alloc_end
- alloc_start
);
2023 mutex_lock(&inode
->i_mutex
);
2024 ret
= inode_newsize_ok(inode
, alloc_end
);
2028 if (alloc_start
> inode
->i_size
) {
2029 ret
= btrfs_cont_expand(inode
, i_size_read(inode
),
2035 locked_end
= alloc_end
- 1;
2037 struct btrfs_ordered_extent
*ordered
;
2039 /* the extent lock is ordered inside the running
2042 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, alloc_start
,
2043 locked_end
, 0, &cached_state
);
2044 ordered
= btrfs_lookup_first_ordered_extent(inode
,
2047 ordered
->file_offset
+ ordered
->len
> alloc_start
&&
2048 ordered
->file_offset
< alloc_end
) {
2049 btrfs_put_ordered_extent(ordered
);
2050 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
,
2051 alloc_start
, locked_end
,
2052 &cached_state
, GFP_NOFS
);
2054 * we can't wait on the range with the transaction
2055 * running or with the extent lock held
2057 btrfs_wait_ordered_range(inode
, alloc_start
,
2058 alloc_end
- alloc_start
);
2061 btrfs_put_ordered_extent(ordered
);
2066 cur_offset
= alloc_start
;
2070 em
= btrfs_get_extent(inode
, NULL
, 0, cur_offset
,
2071 alloc_end
- cur_offset
, 0);
2072 if (IS_ERR_OR_NULL(em
)) {
2079 last_byte
= min(extent_map_end(em
), alloc_end
);
2080 actual_end
= min_t(u64
, extent_map_end(em
), offset
+ len
);
2081 last_byte
= (last_byte
+ mask
) & ~mask
;
2083 if (em
->block_start
== EXTENT_MAP_HOLE
||
2084 (cur_offset
>= inode
->i_size
&&
2085 !test_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
))) {
2086 ret
= btrfs_prealloc_file_range(inode
, mode
, cur_offset
,
2087 last_byte
- cur_offset
,
2088 1 << inode
->i_blkbits
,
2093 free_extent_map(em
);
2096 } else if (actual_end
> inode
->i_size
&&
2097 !(mode
& FALLOC_FL_KEEP_SIZE
)) {
2099 * We didn't need to allocate any more space, but we
2100 * still extended the size of the file so we need to
2103 inode
->i_ctime
= CURRENT_TIME
;
2104 i_size_write(inode
, actual_end
);
2105 btrfs_ordered_update_i_size(inode
, actual_end
, NULL
);
2107 free_extent_map(em
);
2109 cur_offset
= last_byte
;
2110 if (cur_offset
>= alloc_end
) {
2115 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, alloc_start
, locked_end
,
2116 &cached_state
, GFP_NOFS
);
2118 mutex_unlock(&inode
->i_mutex
);
2119 /* Let go of our reservation. */
2120 btrfs_free_reserved_data_space(inode
, alloc_end
- alloc_start
+ 1);
2124 static int find_desired_extent(struct inode
*inode
, loff_t
*offset
, int origin
)
2126 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
2127 struct extent_map
*em
;
2128 struct extent_state
*cached_state
= NULL
;
2129 u64 lockstart
= *offset
;
2130 u64 lockend
= i_size_read(inode
);
2131 u64 start
= *offset
;
2132 u64 orig_start
= *offset
;
2133 u64 len
= i_size_read(inode
);
2137 lockend
= max_t(u64
, root
->sectorsize
, lockend
);
2138 if (lockend
<= lockstart
)
2139 lockend
= lockstart
+ root
->sectorsize
;
2141 len
= lockend
- lockstart
+ 1;
2143 len
= max_t(u64
, len
, root
->sectorsize
);
2144 if (inode
->i_size
== 0)
2147 lock_extent_bits(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
, 0,
2151 * Delalloc is such a pain. If we have a hole and we have pending
2152 * delalloc for a portion of the hole we will get back a hole that
2153 * exists for the entire range since it hasn't been actually written
2154 * yet. So to take care of this case we need to look for an extent just
2155 * before the position we want in case there is outstanding delalloc
2158 if (origin
== SEEK_HOLE
&& start
!= 0) {
2159 if (start
<= root
->sectorsize
)
2160 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, 0,
2161 root
->sectorsize
, 0);
2163 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0,
2164 start
- root
->sectorsize
,
2165 root
->sectorsize
, 0);
2170 last_end
= em
->start
+ em
->len
;
2171 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
2172 last_end
= min_t(u64
, last_end
, inode
->i_size
);
2173 free_extent_map(em
);
2177 em
= btrfs_get_extent_fiemap(inode
, NULL
, 0, start
, len
, 0);
2183 if (em
->block_start
== EXTENT_MAP_HOLE
) {
2184 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
2185 if (last_end
<= orig_start
) {
2186 free_extent_map(em
);
2192 if (origin
== SEEK_HOLE
) {
2194 free_extent_map(em
);
2198 if (origin
== SEEK_DATA
) {
2199 if (em
->block_start
== EXTENT_MAP_DELALLOC
) {
2200 if (start
>= inode
->i_size
) {
2201 free_extent_map(em
);
2208 free_extent_map(em
);
2213 start
= em
->start
+ em
->len
;
2214 last_end
= em
->start
+ em
->len
;
2216 if (em
->block_start
== EXTENT_MAP_DELALLOC
)
2217 last_end
= min_t(u64
, last_end
, inode
->i_size
);
2219 if (test_bit(EXTENT_FLAG_VACANCY
, &em
->flags
)) {
2220 free_extent_map(em
);
2224 free_extent_map(em
);
2228 *offset
= min(*offset
, inode
->i_size
);
2230 unlock_extent_cached(&BTRFS_I(inode
)->io_tree
, lockstart
, lockend
,
2231 &cached_state
, GFP_NOFS
);
2235 static loff_t
btrfs_file_llseek(struct file
*file
, loff_t offset
, int origin
)
2237 struct inode
*inode
= file
->f_mapping
->host
;
2240 mutex_lock(&inode
->i_mutex
);
2244 offset
= generic_file_llseek(file
, offset
, origin
);
2248 if (offset
>= i_size_read(inode
)) {
2249 mutex_unlock(&inode
->i_mutex
);
2253 ret
= find_desired_extent(inode
, &offset
, origin
);
2255 mutex_unlock(&inode
->i_mutex
);
2260 if (offset
< 0 && !(file
->f_mode
& FMODE_UNSIGNED_OFFSET
)) {
2264 if (offset
> inode
->i_sb
->s_maxbytes
) {
2269 /* Special lock needed here? */
2270 if (offset
!= file
->f_pos
) {
2271 file
->f_pos
= offset
;
2272 file
->f_version
= 0;
2275 mutex_unlock(&inode
->i_mutex
);
2279 const struct file_operations btrfs_file_operations
= {
2280 .llseek
= btrfs_file_llseek
,
2281 .read
= do_sync_read
,
2282 .write
= do_sync_write
,
2283 .aio_read
= generic_file_aio_read
,
2284 .splice_read
= generic_file_splice_read
,
2285 .aio_write
= btrfs_file_aio_write
,
2286 .mmap
= btrfs_file_mmap
,
2287 .open
= generic_file_open
,
2288 .release
= btrfs_release_file
,
2289 .fsync
= btrfs_sync_file
,
2290 .fallocate
= btrfs_fallocate
,
2291 .unlocked_ioctl
= btrfs_ioctl
,
2292 #ifdef CONFIG_COMPAT
2293 .compat_ioctl
= btrfs_ioctl
,