2 * Copyright (C) 2007 Oracle. All rights reserved.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
25 #include "transaction.h"
27 #include "print-tree.h"
29 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
30 sizeof(struct btrfs_item) * 2) / \
33 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
36 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
37 sizeof(struct btrfs_ordered_sum)) / \
38 sizeof(u32) * (r)->sectorsize)
40 int btrfs_insert_file_extent(struct btrfs_trans_handle
*trans
,
41 struct btrfs_root
*root
,
42 u64 objectid
, u64 pos
,
43 u64 disk_offset
, u64 disk_num_bytes
,
44 u64 num_bytes
, u64 offset
, u64 ram_bytes
,
45 u8 compression
, u8 encryption
, u16 other_encoding
)
48 struct btrfs_file_extent_item
*item
;
49 struct btrfs_key file_key
;
50 struct btrfs_path
*path
;
51 struct extent_buffer
*leaf
;
53 path
= btrfs_alloc_path();
56 file_key
.objectid
= objectid
;
57 file_key
.offset
= pos
;
58 file_key
.type
= BTRFS_EXTENT_DATA_KEY
;
60 path
->leave_spinning
= 1;
61 ret
= btrfs_insert_empty_item(trans
, root
, path
, &file_key
,
65 BUG_ON(ret
); /* Can't happen */
66 leaf
= path
->nodes
[0];
67 item
= btrfs_item_ptr(leaf
, path
->slots
[0],
68 struct btrfs_file_extent_item
);
69 btrfs_set_file_extent_disk_bytenr(leaf
, item
, disk_offset
);
70 btrfs_set_file_extent_disk_num_bytes(leaf
, item
, disk_num_bytes
);
71 btrfs_set_file_extent_offset(leaf
, item
, offset
);
72 btrfs_set_file_extent_num_bytes(leaf
, item
, num_bytes
);
73 btrfs_set_file_extent_ram_bytes(leaf
, item
, ram_bytes
);
74 btrfs_set_file_extent_generation(leaf
, item
, trans
->transid
);
75 btrfs_set_file_extent_type(leaf
, item
, BTRFS_FILE_EXTENT_REG
);
76 btrfs_set_file_extent_compression(leaf
, item
, compression
);
77 btrfs_set_file_extent_encryption(leaf
, item
, encryption
);
78 btrfs_set_file_extent_other_encoding(leaf
, item
, other_encoding
);
80 btrfs_mark_buffer_dirty(leaf
);
82 btrfs_free_path(path
);
86 static struct btrfs_csum_item
*
87 btrfs_lookup_csum(struct btrfs_trans_handle
*trans
,
88 struct btrfs_root
*root
,
89 struct btrfs_path
*path
,
93 struct btrfs_key file_key
;
94 struct btrfs_key found_key
;
95 struct btrfs_csum_item
*item
;
96 struct extent_buffer
*leaf
;
98 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
101 file_key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
102 file_key
.offset
= bytenr
;
103 file_key
.type
= BTRFS_EXTENT_CSUM_KEY
;
104 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
, 0, cow
);
107 leaf
= path
->nodes
[0];
110 if (path
->slots
[0] == 0)
113 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
114 if (found_key
.type
!= BTRFS_EXTENT_CSUM_KEY
)
117 csum_offset
= (bytenr
- found_key
.offset
) >>
118 root
->fs_info
->sb
->s_blocksize_bits
;
119 csums_in_item
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
120 csums_in_item
/= csum_size
;
122 if (csum_offset
== csums_in_item
) {
125 } else if (csum_offset
> csums_in_item
) {
129 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_csum_item
);
130 item
= (struct btrfs_csum_item
*)((unsigned char *)item
+
131 csum_offset
* csum_size
);
139 int btrfs_lookup_file_extent(struct btrfs_trans_handle
*trans
,
140 struct btrfs_root
*root
,
141 struct btrfs_path
*path
, u64 objectid
,
145 struct btrfs_key file_key
;
146 int ins_len
= mod
< 0 ? -1 : 0;
149 file_key
.objectid
= objectid
;
150 file_key
.offset
= offset
;
151 file_key
.type
= BTRFS_EXTENT_DATA_KEY
;
152 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
, ins_len
, cow
);
156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio
*bio
, int err
)
158 kfree(bio
->csum_allocated
);
161 static int __btrfs_lookup_bio_sums(struct btrfs_root
*root
,
162 struct inode
*inode
, struct bio
*bio
,
163 u64 logical_offset
, u32
*dst
, int dio
)
165 struct bio_vec
*bvec
= bio
->bi_io_vec
;
166 struct btrfs_io_bio
*btrfs_bio
= btrfs_io_bio(bio
);
167 struct btrfs_csum_item
*item
= NULL
;
168 struct extent_io_tree
*io_tree
= &BTRFS_I(inode
)->io_tree
;
169 struct btrfs_path
*path
;
172 u64 item_start_offset
= 0;
173 u64 item_last_offset
= 0;
179 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
181 path
= btrfs_alloc_path();
185 nblocks
= bio
->bi_iter
.bi_size
>> inode
->i_sb
->s_blocksize_bits
;
187 if (nblocks
* csum_size
> BTRFS_BIO_INLINE_CSUM_SIZE
) {
188 btrfs_bio
->csum_allocated
= kmalloc_array(nblocks
,
189 csum_size
, GFP_NOFS
);
190 if (!btrfs_bio
->csum_allocated
) {
191 btrfs_free_path(path
);
194 btrfs_bio
->csum
= btrfs_bio
->csum_allocated
;
195 btrfs_bio
->end_io
= btrfs_io_bio_endio_readpage
;
197 btrfs_bio
->csum
= btrfs_bio
->csum_inline
;
199 csum
= btrfs_bio
->csum
;
204 if (bio
->bi_iter
.bi_size
> PAGE_CACHE_SIZE
* 8)
205 path
->reada
= READA_FORWARD
;
207 WARN_ON(bio
->bi_vcnt
<= 0);
210 * the free space stuff is only read when it hasn't been
211 * updated in the current transaction. So, we can safely
212 * read from the commit root and sidestep a nasty deadlock
213 * between reading the free space cache and updating the csum tree.
215 if (btrfs_is_free_space_inode(inode
)) {
216 path
->search_commit_root
= 1;
217 path
->skip_locking
= 1;
220 disk_bytenr
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
222 offset
= logical_offset
;
223 while (bio_index
< bio
->bi_vcnt
) {
225 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
226 count
= btrfs_find_ordered_sum(inode
, offset
, disk_bytenr
,
227 (u32
*)csum
, nblocks
);
231 if (!item
|| disk_bytenr
< item_start_offset
||
232 disk_bytenr
>= item_last_offset
) {
233 struct btrfs_key found_key
;
237 btrfs_release_path(path
);
238 item
= btrfs_lookup_csum(NULL
, root
->fs_info
->csum_root
,
239 path
, disk_bytenr
, 0);
242 memset(csum
, 0, csum_size
);
243 if (BTRFS_I(inode
)->root
->root_key
.objectid
==
244 BTRFS_DATA_RELOC_TREE_OBJECTID
) {
245 set_extent_bits(io_tree
, offset
,
246 offset
+ bvec
->bv_len
- 1,
247 EXTENT_NODATASUM
, GFP_NOFS
);
249 btrfs_info(BTRFS_I(inode
)->root
->fs_info
,
250 "no csum found for inode %llu start %llu",
251 btrfs_ino(inode
), offset
);
254 btrfs_release_path(path
);
257 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
,
260 item_start_offset
= found_key
.offset
;
261 item_size
= btrfs_item_size_nr(path
->nodes
[0],
263 item_last_offset
= item_start_offset
+
264 (item_size
/ csum_size
) *
266 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
267 struct btrfs_csum_item
);
270 * this byte range must be able to fit inside
271 * a single leaf so it will also fit inside a u32
273 diff
= disk_bytenr
- item_start_offset
;
274 diff
= diff
/ root
->sectorsize
;
275 diff
= diff
* csum_size
;
276 count
= min_t(int, nblocks
, (item_last_offset
- disk_bytenr
) >>
277 inode
->i_sb
->s_blocksize_bits
);
278 read_extent_buffer(path
->nodes
[0], csum
,
279 ((unsigned long)item
) + diff
,
282 csum
+= count
* csum_size
;
286 disk_bytenr
+= bvec
->bv_len
;
287 offset
+= bvec
->bv_len
;
291 btrfs_free_path(path
);
295 int btrfs_lookup_bio_sums(struct btrfs_root
*root
, struct inode
*inode
,
296 struct bio
*bio
, u32
*dst
)
298 return __btrfs_lookup_bio_sums(root
, inode
, bio
, 0, dst
, 0);
301 int btrfs_lookup_bio_sums_dio(struct btrfs_root
*root
, struct inode
*inode
,
302 struct bio
*bio
, u64 offset
)
304 return __btrfs_lookup_bio_sums(root
, inode
, bio
, offset
, NULL
, 1);
307 int btrfs_lookup_csums_range(struct btrfs_root
*root
, u64 start
, u64 end
,
308 struct list_head
*list
, int search_commit
)
310 struct btrfs_key key
;
311 struct btrfs_path
*path
;
312 struct extent_buffer
*leaf
;
313 struct btrfs_ordered_sum
*sums
;
314 struct btrfs_csum_item
*item
;
316 unsigned long offset
;
320 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
322 ASSERT(IS_ALIGNED(start
, root
->sectorsize
) &&
323 IS_ALIGNED(end
+ 1, root
->sectorsize
));
325 path
= btrfs_alloc_path();
330 path
->skip_locking
= 1;
331 path
->reada
= READA_FORWARD
;
332 path
->search_commit_root
= 1;
335 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
337 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
339 ret
= btrfs_search_slot(NULL
, root
, &key
, path
, 0, 0);
342 if (ret
> 0 && path
->slots
[0] > 0) {
343 leaf
= path
->nodes
[0];
344 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0] - 1);
345 if (key
.objectid
== BTRFS_EXTENT_CSUM_OBJECTID
&&
346 key
.type
== BTRFS_EXTENT_CSUM_KEY
) {
347 offset
= (start
- key
.offset
) >>
348 root
->fs_info
->sb
->s_blocksize_bits
;
349 if (offset
* csum_size
<
350 btrfs_item_size_nr(leaf
, path
->slots
[0] - 1))
355 while (start
<= end
) {
356 leaf
= path
->nodes
[0];
357 if (path
->slots
[0] >= btrfs_header_nritems(leaf
)) {
358 ret
= btrfs_next_leaf(root
, path
);
363 leaf
= path
->nodes
[0];
366 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
367 if (key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
368 key
.type
!= BTRFS_EXTENT_CSUM_KEY
||
372 if (key
.offset
> start
)
375 size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
376 csum_end
= key
.offset
+ (size
/ csum_size
) * root
->sectorsize
;
377 if (csum_end
<= start
) {
382 csum_end
= min(csum_end
, end
+ 1);
383 item
= btrfs_item_ptr(path
->nodes
[0], path
->slots
[0],
384 struct btrfs_csum_item
);
385 while (start
< csum_end
) {
386 size
= min_t(size_t, csum_end
- start
,
387 MAX_ORDERED_SUM_BYTES(root
));
388 sums
= kzalloc(btrfs_ordered_sum_size(root
, size
),
395 sums
->bytenr
= start
;
396 sums
->len
= (int)size
;
398 offset
= (start
- key
.offset
) >>
399 root
->fs_info
->sb
->s_blocksize_bits
;
401 size
>>= root
->fs_info
->sb
->s_blocksize_bits
;
403 read_extent_buffer(path
->nodes
[0],
405 ((unsigned long)item
) + offset
,
408 start
+= root
->sectorsize
* size
;
409 list_add_tail(&sums
->list
, &tmplist
);
415 while (ret
< 0 && !list_empty(&tmplist
)) {
416 sums
= list_entry(tmplist
.next
, struct btrfs_ordered_sum
, list
);
417 list_del(&sums
->list
);
420 list_splice_tail(&tmplist
, list
);
422 btrfs_free_path(path
);
426 int btrfs_csum_one_bio(struct btrfs_root
*root
, struct inode
*inode
,
427 struct bio
*bio
, u64 file_start
, int contig
)
429 struct btrfs_ordered_sum
*sums
;
430 struct btrfs_ordered_extent
*ordered
;
432 struct bio_vec
*bvec
= bio
->bi_io_vec
;
435 unsigned long total_bytes
= 0;
436 unsigned long this_sum_bytes
= 0;
439 WARN_ON(bio
->bi_vcnt
<= 0);
440 sums
= kzalloc(btrfs_ordered_sum_size(root
, bio
->bi_iter
.bi_size
),
445 sums
->len
= bio
->bi_iter
.bi_size
;
446 INIT_LIST_HEAD(&sums
->list
);
451 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
453 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
454 BUG_ON(!ordered
); /* Logic error */
455 sums
->bytenr
= (u64
)bio
->bi_iter
.bi_sector
<< 9;
458 while (bio_index
< bio
->bi_vcnt
) {
460 offset
= page_offset(bvec
->bv_page
) + bvec
->bv_offset
;
462 if (offset
>= ordered
->file_offset
+ ordered
->len
||
463 offset
< ordered
->file_offset
) {
464 unsigned long bytes_left
;
465 sums
->len
= this_sum_bytes
;
467 btrfs_add_ordered_sum(inode
, ordered
, sums
);
468 btrfs_put_ordered_extent(ordered
);
470 bytes_left
= bio
->bi_iter
.bi_size
- total_bytes
;
472 sums
= kzalloc(btrfs_ordered_sum_size(root
, bytes_left
),
474 BUG_ON(!sums
); /* -ENOMEM */
475 sums
->len
= bytes_left
;
476 ordered
= btrfs_lookup_ordered_extent(inode
, offset
);
477 BUG_ON(!ordered
); /* Logic error */
478 sums
->bytenr
= ((u64
)bio
->bi_iter
.bi_sector
<< 9) +
483 data
= kmap_atomic(bvec
->bv_page
);
484 sums
->sums
[index
] = ~(u32
)0;
485 sums
->sums
[index
] = btrfs_csum_data(data
+ bvec
->bv_offset
,
489 btrfs_csum_final(sums
->sums
[index
],
490 (char *)(sums
->sums
+ index
));
494 total_bytes
+= bvec
->bv_len
;
495 this_sum_bytes
+= bvec
->bv_len
;
496 offset
+= bvec
->bv_len
;
500 btrfs_add_ordered_sum(inode
, ordered
, sums
);
501 btrfs_put_ordered_extent(ordered
);
506 * helper function for csum removal, this expects the
507 * key to describe the csum pointed to by the path, and it expects
508 * the csum to overlap the range [bytenr, len]
510 * The csum should not be entirely contained in the range and the
511 * range should not be entirely contained in the csum.
513 * This calls btrfs_truncate_item with the correct args based on the
514 * overlap, and fixes up the key as required.
516 static noinline
void truncate_one_csum(struct btrfs_root
*root
,
517 struct btrfs_path
*path
,
518 struct btrfs_key
*key
,
521 struct extent_buffer
*leaf
;
522 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
524 u64 end_byte
= bytenr
+ len
;
525 u32 blocksize_bits
= root
->fs_info
->sb
->s_blocksize_bits
;
527 leaf
= path
->nodes
[0];
528 csum_end
= btrfs_item_size_nr(leaf
, path
->slots
[0]) / csum_size
;
529 csum_end
<<= root
->fs_info
->sb
->s_blocksize_bits
;
530 csum_end
+= key
->offset
;
532 if (key
->offset
< bytenr
&& csum_end
<= end_byte
) {
537 * A simple truncate off the end of the item
539 u32 new_size
= (bytenr
- key
->offset
) >> blocksize_bits
;
540 new_size
*= csum_size
;
541 btrfs_truncate_item(root
, path
, new_size
, 1);
542 } else if (key
->offset
>= bytenr
&& csum_end
> end_byte
&&
543 end_byte
> key
->offset
) {
548 * we need to truncate from the beginning of the csum
550 u32 new_size
= (csum_end
- end_byte
) >> blocksize_bits
;
551 new_size
*= csum_size
;
553 btrfs_truncate_item(root
, path
, new_size
, 0);
555 key
->offset
= end_byte
;
556 btrfs_set_item_key_safe(root
->fs_info
, path
, key
);
563 * deletes the csum items from the csum tree for a given
566 int btrfs_del_csums(struct btrfs_trans_handle
*trans
,
567 struct btrfs_root
*root
, u64 bytenr
, u64 len
)
569 struct btrfs_path
*path
;
570 struct btrfs_key key
;
571 u64 end_byte
= bytenr
+ len
;
573 struct extent_buffer
*leaf
;
575 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
576 int blocksize_bits
= root
->fs_info
->sb
->s_blocksize_bits
;
578 root
= root
->fs_info
->csum_root
;
580 path
= btrfs_alloc_path();
585 key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
586 key
.offset
= end_byte
- 1;
587 key
.type
= BTRFS_EXTENT_CSUM_KEY
;
589 path
->leave_spinning
= 1;
590 ret
= btrfs_search_slot(trans
, root
, &key
, path
, -1, 1);
592 if (path
->slots
[0] == 0)
595 } else if (ret
< 0) {
599 leaf
= path
->nodes
[0];
600 btrfs_item_key_to_cpu(leaf
, &key
, path
->slots
[0]);
602 if (key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
603 key
.type
!= BTRFS_EXTENT_CSUM_KEY
) {
607 if (key
.offset
>= end_byte
)
610 csum_end
= btrfs_item_size_nr(leaf
, path
->slots
[0]) / csum_size
;
611 csum_end
<<= blocksize_bits
;
612 csum_end
+= key
.offset
;
614 /* this csum ends before we start, we're done */
615 if (csum_end
<= bytenr
)
618 /* delete the entire item, it is inside our range */
619 if (key
.offset
>= bytenr
&& csum_end
<= end_byte
) {
620 ret
= btrfs_del_item(trans
, root
, path
);
623 if (key
.offset
== bytenr
)
625 } else if (key
.offset
< bytenr
&& csum_end
> end_byte
) {
626 unsigned long offset
;
627 unsigned long shift_len
;
628 unsigned long item_offset
;
633 * Our bytes are in the middle of the csum,
634 * we need to split this item and insert a new one.
636 * But we can't drop the path because the
637 * csum could change, get removed, extended etc.
639 * The trick here is the max size of a csum item leaves
640 * enough room in the tree block for a single
641 * item header. So, we split the item in place,
642 * adding a new header pointing to the existing
643 * bytes. Then we loop around again and we have
644 * a nicely formed csum item that we can neatly
647 offset
= (bytenr
- key
.offset
) >> blocksize_bits
;
650 shift_len
= (len
>> blocksize_bits
) * csum_size
;
652 item_offset
= btrfs_item_ptr_offset(leaf
,
655 memset_extent_buffer(leaf
, 0, item_offset
+ offset
,
660 * btrfs_split_item returns -EAGAIN when the
661 * item changed size or key
663 ret
= btrfs_split_item(trans
, root
, path
, &key
, offset
);
664 if (ret
&& ret
!= -EAGAIN
) {
665 btrfs_abort_transaction(trans
, root
, ret
);
669 key
.offset
= end_byte
- 1;
671 truncate_one_csum(root
, path
, &key
, bytenr
, len
);
672 if (key
.offset
< bytenr
)
675 btrfs_release_path(path
);
679 btrfs_free_path(path
);
683 int btrfs_csum_file_blocks(struct btrfs_trans_handle
*trans
,
684 struct btrfs_root
*root
,
685 struct btrfs_ordered_sum
*sums
)
687 struct btrfs_key file_key
;
688 struct btrfs_key found_key
;
689 struct btrfs_path
*path
;
690 struct btrfs_csum_item
*item
;
691 struct btrfs_csum_item
*item_end
;
692 struct extent_buffer
*leaf
= NULL
;
702 u16 csum_size
= btrfs_super_csum_size(root
->fs_info
->super_copy
);
704 path
= btrfs_alloc_path();
708 next_offset
= (u64
)-1;
710 bytenr
= sums
->bytenr
+ total_bytes
;
711 file_key
.objectid
= BTRFS_EXTENT_CSUM_OBJECTID
;
712 file_key
.offset
= bytenr
;
713 file_key
.type
= BTRFS_EXTENT_CSUM_KEY
;
715 item
= btrfs_lookup_csum(trans
, root
, path
, bytenr
, 1);
718 leaf
= path
->nodes
[0];
719 item_end
= btrfs_item_ptr(leaf
, path
->slots
[0],
720 struct btrfs_csum_item
);
721 item_end
= (struct btrfs_csum_item
*)((char *)item_end
+
722 btrfs_item_size_nr(leaf
, path
->slots
[0]));
726 if (ret
!= -EFBIG
&& ret
!= -ENOENT
)
731 /* we found one, but it isn't big enough yet */
732 leaf
= path
->nodes
[0];
733 item_size
= btrfs_item_size_nr(leaf
, path
->slots
[0]);
734 if ((item_size
/ csum_size
) >=
735 MAX_CSUM_ITEMS(root
, csum_size
)) {
736 /* already at max size, make a new one */
740 int slot
= path
->slots
[0] + 1;
741 /* we didn't find a csum item, insert one */
742 nritems
= btrfs_header_nritems(path
->nodes
[0]);
743 if (!nritems
|| (path
->slots
[0] >= nritems
- 1)) {
744 ret
= btrfs_next_leaf(root
, path
);
749 slot
= path
->slots
[0];
751 btrfs_item_key_to_cpu(path
->nodes
[0], &found_key
, slot
);
752 if (found_key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
753 found_key
.type
!= BTRFS_EXTENT_CSUM_KEY
) {
757 next_offset
= found_key
.offset
;
763 * at this point, we know the tree has an item, but it isn't big
764 * enough yet to put our csum in. Grow it
766 btrfs_release_path(path
);
767 ret
= btrfs_search_slot(trans
, root
, &file_key
, path
,
773 if (path
->slots
[0] == 0)
778 leaf
= path
->nodes
[0];
779 btrfs_item_key_to_cpu(leaf
, &found_key
, path
->slots
[0]);
780 csum_offset
= (bytenr
- found_key
.offset
) >>
781 root
->fs_info
->sb
->s_blocksize_bits
;
783 if (found_key
.type
!= BTRFS_EXTENT_CSUM_KEY
||
784 found_key
.objectid
!= BTRFS_EXTENT_CSUM_OBJECTID
||
785 csum_offset
>= MAX_CSUM_ITEMS(root
, csum_size
)) {
789 if (csum_offset
== btrfs_item_size_nr(leaf
, path
->slots
[0]) /
796 if (btrfs_leaf_free_space(root
, leaf
) <
797 sizeof(struct btrfs_item
) + csum_size
* 2)
800 free_space
= btrfs_leaf_free_space(root
, leaf
) -
801 sizeof(struct btrfs_item
) - csum_size
;
802 tmp
= sums
->len
- total_bytes
;
803 tmp
>>= root
->fs_info
->sb
->s_blocksize_bits
;
806 extend_nr
= max_t(int, 1, (int)tmp
);
807 diff
= (csum_offset
+ extend_nr
) * csum_size
;
808 diff
= min(diff
, MAX_CSUM_ITEMS(root
, csum_size
) * csum_size
);
810 diff
= diff
- btrfs_item_size_nr(leaf
, path
->slots
[0]);
811 diff
= min(free_space
, diff
);
815 btrfs_extend_item(root
, path
, diff
);
821 btrfs_release_path(path
);
826 tmp
= sums
->len
- total_bytes
;
827 tmp
>>= root
->fs_info
->sb
->s_blocksize_bits
;
828 tmp
= min(tmp
, (next_offset
- file_key
.offset
) >>
829 root
->fs_info
->sb
->s_blocksize_bits
);
831 tmp
= max((u64
)1, tmp
);
832 tmp
= min(tmp
, (u64
)MAX_CSUM_ITEMS(root
, csum_size
));
833 ins_size
= csum_size
* tmp
;
835 ins_size
= csum_size
;
837 path
->leave_spinning
= 1;
838 ret
= btrfs_insert_empty_item(trans
, root
, path
, &file_key
,
840 path
->leave_spinning
= 0;
843 if (WARN_ON(ret
!= 0))
845 leaf
= path
->nodes
[0];
847 item
= btrfs_item_ptr(leaf
, path
->slots
[0], struct btrfs_csum_item
);
848 item_end
= (struct btrfs_csum_item
*)((unsigned char *)item
+
849 btrfs_item_size_nr(leaf
, path
->slots
[0]));
850 item
= (struct btrfs_csum_item
*)((unsigned char *)item
+
851 csum_offset
* csum_size
);
853 ins_size
= (u32
)(sums
->len
- total_bytes
) >>
854 root
->fs_info
->sb
->s_blocksize_bits
;
855 ins_size
*= csum_size
;
856 ins_size
= min_t(u32
, (unsigned long)item_end
- (unsigned long)item
,
858 write_extent_buffer(leaf
, sums
->sums
+ index
, (unsigned long)item
,
861 ins_size
/= csum_size
;
862 total_bytes
+= ins_size
* root
->sectorsize
;
865 btrfs_mark_buffer_dirty(path
->nodes
[0]);
866 if (total_bytes
< sums
->len
) {
867 btrfs_release_path(path
);
872 btrfs_free_path(path
);
879 void btrfs_extent_item_to_extent_map(struct inode
*inode
,
880 const struct btrfs_path
*path
,
881 struct btrfs_file_extent_item
*fi
,
882 const bool new_inline
,
883 struct extent_map
*em
)
885 struct btrfs_root
*root
= BTRFS_I(inode
)->root
;
886 struct extent_buffer
*leaf
= path
->nodes
[0];
887 const int slot
= path
->slots
[0];
888 struct btrfs_key key
;
889 u64 extent_start
, extent_end
;
891 u8 type
= btrfs_file_extent_type(leaf
, fi
);
892 int compress_type
= btrfs_file_extent_compression(leaf
, fi
);
894 em
->bdev
= root
->fs_info
->fs_devices
->latest_bdev
;
895 btrfs_item_key_to_cpu(leaf
, &key
, slot
);
896 extent_start
= key
.offset
;
898 if (type
== BTRFS_FILE_EXTENT_REG
||
899 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
900 extent_end
= extent_start
+
901 btrfs_file_extent_num_bytes(leaf
, fi
);
902 } else if (type
== BTRFS_FILE_EXTENT_INLINE
) {
904 size
= btrfs_file_extent_inline_len(leaf
, slot
, fi
);
905 extent_end
= ALIGN(extent_start
+ size
, root
->sectorsize
);
908 em
->ram_bytes
= btrfs_file_extent_ram_bytes(leaf
, fi
);
909 if (type
== BTRFS_FILE_EXTENT_REG
||
910 type
== BTRFS_FILE_EXTENT_PREALLOC
) {
911 em
->start
= extent_start
;
912 em
->len
= extent_end
- extent_start
;
913 em
->orig_start
= extent_start
-
914 btrfs_file_extent_offset(leaf
, fi
);
915 em
->orig_block_len
= btrfs_file_extent_disk_num_bytes(leaf
, fi
);
916 bytenr
= btrfs_file_extent_disk_bytenr(leaf
, fi
);
918 em
->block_start
= EXTENT_MAP_HOLE
;
921 if (compress_type
!= BTRFS_COMPRESS_NONE
) {
922 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
923 em
->compress_type
= compress_type
;
924 em
->block_start
= bytenr
;
925 em
->block_len
= em
->orig_block_len
;
927 bytenr
+= btrfs_file_extent_offset(leaf
, fi
);
928 em
->block_start
= bytenr
;
929 em
->block_len
= em
->len
;
930 if (type
== BTRFS_FILE_EXTENT_PREALLOC
)
931 set_bit(EXTENT_FLAG_PREALLOC
, &em
->flags
);
933 } else if (type
== BTRFS_FILE_EXTENT_INLINE
) {
934 em
->block_start
= EXTENT_MAP_INLINE
;
935 em
->start
= extent_start
;
936 em
->len
= extent_end
- extent_start
;
938 * Initialize orig_start and block_len with the same values
939 * as in inode.c:btrfs_get_extent().
941 em
->orig_start
= EXTENT_MAP_HOLE
;
942 em
->block_len
= (u64
)-1;
943 if (!new_inline
&& compress_type
!= BTRFS_COMPRESS_NONE
) {
944 set_bit(EXTENT_FLAG_COMPRESSED
, &em
->flags
);
945 em
->compress_type
= compress_type
;
948 btrfs_err(root
->fs_info
,
949 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
950 type
, btrfs_ino(inode
), extent_start
,
951 root
->root_key
.objectid
);