Merge commit 'fixes.2015.02.23a' into core/rcu
[deliverable/linux.git] / fs / btrfs / file-item.c
1 /*
2 * Copyright (C) 2007 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/bio.h>
20 #include <linux/slab.h>
21 #include <linux/pagemap.h>
22 #include <linux/highmem.h>
23 #include "ctree.h"
24 #include "disk-io.h"
25 #include "transaction.h"
26 #include "volumes.h"
27 #include "print-tree.h"
28
29 #define __MAX_CSUM_ITEMS(r, size) ((unsigned long)(((BTRFS_LEAF_DATA_SIZE(r) - \
30 sizeof(struct btrfs_item) * 2) / \
31 size) - 1))
32
33 #define MAX_CSUM_ITEMS(r, size) (min_t(u32, __MAX_CSUM_ITEMS(r, size), \
34 PAGE_CACHE_SIZE))
35
36 #define MAX_ORDERED_SUM_BYTES(r) ((PAGE_SIZE - \
37 sizeof(struct btrfs_ordered_sum)) / \
38 sizeof(u32) * (r)->sectorsize)
39
40 int btrfs_insert_file_extent(struct btrfs_trans_handle *trans,
41 struct btrfs_root *root,
42 u64 objectid, u64 pos,
43 u64 disk_offset, u64 disk_num_bytes,
44 u64 num_bytes, u64 offset, u64 ram_bytes,
45 u8 compression, u8 encryption, u16 other_encoding)
46 {
47 int ret = 0;
48 struct btrfs_file_extent_item *item;
49 struct btrfs_key file_key;
50 struct btrfs_path *path;
51 struct extent_buffer *leaf;
52
53 path = btrfs_alloc_path();
54 if (!path)
55 return -ENOMEM;
56 file_key.objectid = objectid;
57 file_key.offset = pos;
58 file_key.type = BTRFS_EXTENT_DATA_KEY;
59
60 path->leave_spinning = 1;
61 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
62 sizeof(*item));
63 if (ret < 0)
64 goto out;
65 BUG_ON(ret); /* Can't happen */
66 leaf = path->nodes[0];
67 item = btrfs_item_ptr(leaf, path->slots[0],
68 struct btrfs_file_extent_item);
69 btrfs_set_file_extent_disk_bytenr(leaf, item, disk_offset);
70 btrfs_set_file_extent_disk_num_bytes(leaf, item, disk_num_bytes);
71 btrfs_set_file_extent_offset(leaf, item, offset);
72 btrfs_set_file_extent_num_bytes(leaf, item, num_bytes);
73 btrfs_set_file_extent_ram_bytes(leaf, item, ram_bytes);
74 btrfs_set_file_extent_generation(leaf, item, trans->transid);
75 btrfs_set_file_extent_type(leaf, item, BTRFS_FILE_EXTENT_REG);
76 btrfs_set_file_extent_compression(leaf, item, compression);
77 btrfs_set_file_extent_encryption(leaf, item, encryption);
78 btrfs_set_file_extent_other_encoding(leaf, item, other_encoding);
79
80 btrfs_mark_buffer_dirty(leaf);
81 out:
82 btrfs_free_path(path);
83 return ret;
84 }
85
86 static struct btrfs_csum_item *
87 btrfs_lookup_csum(struct btrfs_trans_handle *trans,
88 struct btrfs_root *root,
89 struct btrfs_path *path,
90 u64 bytenr, int cow)
91 {
92 int ret;
93 struct btrfs_key file_key;
94 struct btrfs_key found_key;
95 struct btrfs_csum_item *item;
96 struct extent_buffer *leaf;
97 u64 csum_offset = 0;
98 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
99 int csums_in_item;
100
101 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
102 file_key.offset = bytenr;
103 file_key.type = BTRFS_EXTENT_CSUM_KEY;
104 ret = btrfs_search_slot(trans, root, &file_key, path, 0, cow);
105 if (ret < 0)
106 goto fail;
107 leaf = path->nodes[0];
108 if (ret > 0) {
109 ret = 1;
110 if (path->slots[0] == 0)
111 goto fail;
112 path->slots[0]--;
113 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
114 if (found_key.type != BTRFS_EXTENT_CSUM_KEY)
115 goto fail;
116
117 csum_offset = (bytenr - found_key.offset) >>
118 root->fs_info->sb->s_blocksize_bits;
119 csums_in_item = btrfs_item_size_nr(leaf, path->slots[0]);
120 csums_in_item /= csum_size;
121
122 if (csum_offset == csums_in_item) {
123 ret = -EFBIG;
124 goto fail;
125 } else if (csum_offset > csums_in_item) {
126 goto fail;
127 }
128 }
129 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
130 item = (struct btrfs_csum_item *)((unsigned char *)item +
131 csum_offset * csum_size);
132 return item;
133 fail:
134 if (ret > 0)
135 ret = -ENOENT;
136 return ERR_PTR(ret);
137 }
138
139 int btrfs_lookup_file_extent(struct btrfs_trans_handle *trans,
140 struct btrfs_root *root,
141 struct btrfs_path *path, u64 objectid,
142 u64 offset, int mod)
143 {
144 int ret;
145 struct btrfs_key file_key;
146 int ins_len = mod < 0 ? -1 : 0;
147 int cow = mod != 0;
148
149 file_key.objectid = objectid;
150 file_key.offset = offset;
151 file_key.type = BTRFS_EXTENT_DATA_KEY;
152 ret = btrfs_search_slot(trans, root, &file_key, path, ins_len, cow);
153 return ret;
154 }
155
156 static void btrfs_io_bio_endio_readpage(struct btrfs_io_bio *bio, int err)
157 {
158 kfree(bio->csum_allocated);
159 }
160
161 static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
162 struct inode *inode, struct bio *bio,
163 u64 logical_offset, u32 *dst, int dio)
164 {
165 struct bio_vec *bvec = bio->bi_io_vec;
166 struct btrfs_io_bio *btrfs_bio = btrfs_io_bio(bio);
167 struct btrfs_csum_item *item = NULL;
168 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
169 struct btrfs_path *path;
170 u8 *csum;
171 u64 offset = 0;
172 u64 item_start_offset = 0;
173 u64 item_last_offset = 0;
174 u64 disk_bytenr;
175 u32 diff;
176 int nblocks;
177 int bio_index = 0;
178 int count;
179 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
180
181 path = btrfs_alloc_path();
182 if (!path)
183 return -ENOMEM;
184
185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
186 if (!dst) {
187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
188 btrfs_bio->csum_allocated = kmalloc_array(nblocks,
189 csum_size, GFP_NOFS);
190 if (!btrfs_bio->csum_allocated) {
191 btrfs_free_path(path);
192 return -ENOMEM;
193 }
194 btrfs_bio->csum = btrfs_bio->csum_allocated;
195 btrfs_bio->end_io = btrfs_io_bio_endio_readpage;
196 } else {
197 btrfs_bio->csum = btrfs_bio->csum_inline;
198 }
199 csum = btrfs_bio->csum;
200 } else {
201 csum = (u8 *)dst;
202 }
203
204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
205 path->reada = READA_FORWARD;
206
207 WARN_ON(bio->bi_vcnt <= 0);
208
209 /*
210 * the free space stuff is only read when it hasn't been
211 * updated in the current transaction. So, we can safely
212 * read from the commit root and sidestep a nasty deadlock
213 * between reading the free space cache and updating the csum tree.
214 */
215 if (btrfs_is_free_space_inode(inode)) {
216 path->search_commit_root = 1;
217 path->skip_locking = 1;
218 }
219
220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
221 if (dio)
222 offset = logical_offset;
223 while (bio_index < bio->bi_vcnt) {
224 if (!dio)
225 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
226 count = btrfs_find_ordered_sum(inode, offset, disk_bytenr,
227 (u32 *)csum, nblocks);
228 if (count)
229 goto found;
230
231 if (!item || disk_bytenr < item_start_offset ||
232 disk_bytenr >= item_last_offset) {
233 struct btrfs_key found_key;
234 u32 item_size;
235
236 if (item)
237 btrfs_release_path(path);
238 item = btrfs_lookup_csum(NULL, root->fs_info->csum_root,
239 path, disk_bytenr, 0);
240 if (IS_ERR(item)) {
241 count = 1;
242 memset(csum, 0, csum_size);
243 if (BTRFS_I(inode)->root->root_key.objectid ==
244 BTRFS_DATA_RELOC_TREE_OBJECTID) {
245 set_extent_bits(io_tree, offset,
246 offset + bvec->bv_len - 1,
247 EXTENT_NODATASUM, GFP_NOFS);
248 } else {
249 btrfs_info(BTRFS_I(inode)->root->fs_info,
250 "no csum found for inode %llu start %llu",
251 btrfs_ino(inode), offset);
252 }
253 item = NULL;
254 btrfs_release_path(path);
255 goto found;
256 }
257 btrfs_item_key_to_cpu(path->nodes[0], &found_key,
258 path->slots[0]);
259
260 item_start_offset = found_key.offset;
261 item_size = btrfs_item_size_nr(path->nodes[0],
262 path->slots[0]);
263 item_last_offset = item_start_offset +
264 (item_size / csum_size) *
265 root->sectorsize;
266 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
267 struct btrfs_csum_item);
268 }
269 /*
270 * this byte range must be able to fit inside
271 * a single leaf so it will also fit inside a u32
272 */
273 diff = disk_bytenr - item_start_offset;
274 diff = diff / root->sectorsize;
275 diff = diff * csum_size;
276 count = min_t(int, nblocks, (item_last_offset - disk_bytenr) >>
277 inode->i_sb->s_blocksize_bits);
278 read_extent_buffer(path->nodes[0], csum,
279 ((unsigned long)item) + diff,
280 csum_size * count);
281 found:
282 csum += count * csum_size;
283 nblocks -= count;
284 bio_index += count;
285 while (count--) {
286 disk_bytenr += bvec->bv_len;
287 offset += bvec->bv_len;
288 bvec++;
289 }
290 }
291 btrfs_free_path(path);
292 return 0;
293 }
294
295 int btrfs_lookup_bio_sums(struct btrfs_root *root, struct inode *inode,
296 struct bio *bio, u32 *dst)
297 {
298 return __btrfs_lookup_bio_sums(root, inode, bio, 0, dst, 0);
299 }
300
301 int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
302 struct bio *bio, u64 offset)
303 {
304 return __btrfs_lookup_bio_sums(root, inode, bio, offset, NULL, 1);
305 }
306
307 int btrfs_lookup_csums_range(struct btrfs_root *root, u64 start, u64 end,
308 struct list_head *list, int search_commit)
309 {
310 struct btrfs_key key;
311 struct btrfs_path *path;
312 struct extent_buffer *leaf;
313 struct btrfs_ordered_sum *sums;
314 struct btrfs_csum_item *item;
315 LIST_HEAD(tmplist);
316 unsigned long offset;
317 int ret;
318 size_t size;
319 u64 csum_end;
320 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
321
322 ASSERT(IS_ALIGNED(start, root->sectorsize) &&
323 IS_ALIGNED(end + 1, root->sectorsize));
324
325 path = btrfs_alloc_path();
326 if (!path)
327 return -ENOMEM;
328
329 if (search_commit) {
330 path->skip_locking = 1;
331 path->reada = READA_FORWARD;
332 path->search_commit_root = 1;
333 }
334
335 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
336 key.offset = start;
337 key.type = BTRFS_EXTENT_CSUM_KEY;
338
339 ret = btrfs_search_slot(NULL, root, &key, path, 0, 0);
340 if (ret < 0)
341 goto fail;
342 if (ret > 0 && path->slots[0] > 0) {
343 leaf = path->nodes[0];
344 btrfs_item_key_to_cpu(leaf, &key, path->slots[0] - 1);
345 if (key.objectid == BTRFS_EXTENT_CSUM_OBJECTID &&
346 key.type == BTRFS_EXTENT_CSUM_KEY) {
347 offset = (start - key.offset) >>
348 root->fs_info->sb->s_blocksize_bits;
349 if (offset * csum_size <
350 btrfs_item_size_nr(leaf, path->slots[0] - 1))
351 path->slots[0]--;
352 }
353 }
354
355 while (start <= end) {
356 leaf = path->nodes[0];
357 if (path->slots[0] >= btrfs_header_nritems(leaf)) {
358 ret = btrfs_next_leaf(root, path);
359 if (ret < 0)
360 goto fail;
361 if (ret > 0)
362 break;
363 leaf = path->nodes[0];
364 }
365
366 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
367 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
368 key.type != BTRFS_EXTENT_CSUM_KEY ||
369 key.offset > end)
370 break;
371
372 if (key.offset > start)
373 start = key.offset;
374
375 size = btrfs_item_size_nr(leaf, path->slots[0]);
376 csum_end = key.offset + (size / csum_size) * root->sectorsize;
377 if (csum_end <= start) {
378 path->slots[0]++;
379 continue;
380 }
381
382 csum_end = min(csum_end, end + 1);
383 item = btrfs_item_ptr(path->nodes[0], path->slots[0],
384 struct btrfs_csum_item);
385 while (start < csum_end) {
386 size = min_t(size_t, csum_end - start,
387 MAX_ORDERED_SUM_BYTES(root));
388 sums = kzalloc(btrfs_ordered_sum_size(root, size),
389 GFP_NOFS);
390 if (!sums) {
391 ret = -ENOMEM;
392 goto fail;
393 }
394
395 sums->bytenr = start;
396 sums->len = (int)size;
397
398 offset = (start - key.offset) >>
399 root->fs_info->sb->s_blocksize_bits;
400 offset *= csum_size;
401 size >>= root->fs_info->sb->s_blocksize_bits;
402
403 read_extent_buffer(path->nodes[0],
404 sums->sums,
405 ((unsigned long)item) + offset,
406 csum_size * size);
407
408 start += root->sectorsize * size;
409 list_add_tail(&sums->list, &tmplist);
410 }
411 path->slots[0]++;
412 }
413 ret = 0;
414 fail:
415 while (ret < 0 && !list_empty(&tmplist)) {
416 sums = list_entry(tmplist.next, struct btrfs_ordered_sum, list);
417 list_del(&sums->list);
418 kfree(sums);
419 }
420 list_splice_tail(&tmplist, list);
421
422 btrfs_free_path(path);
423 return ret;
424 }
425
426 int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
427 struct bio *bio, u64 file_start, int contig)
428 {
429 struct btrfs_ordered_sum *sums;
430 struct btrfs_ordered_extent *ordered;
431 char *data;
432 struct bio_vec *bvec = bio->bi_io_vec;
433 int bio_index = 0;
434 int index;
435 unsigned long total_bytes = 0;
436 unsigned long this_sum_bytes = 0;
437 u64 offset;
438
439 WARN_ON(bio->bi_vcnt <= 0);
440 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
441 GFP_NOFS);
442 if (!sums)
443 return -ENOMEM;
444
445 sums->len = bio->bi_iter.bi_size;
446 INIT_LIST_HEAD(&sums->list);
447
448 if (contig)
449 offset = file_start;
450 else
451 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
452
453 ordered = btrfs_lookup_ordered_extent(inode, offset);
454 BUG_ON(!ordered); /* Logic error */
455 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
456 index = 0;
457
458 while (bio_index < bio->bi_vcnt) {
459 if (!contig)
460 offset = page_offset(bvec->bv_page) + bvec->bv_offset;
461
462 if (offset >= ordered->file_offset + ordered->len ||
463 offset < ordered->file_offset) {
464 unsigned long bytes_left;
465 sums->len = this_sum_bytes;
466 this_sum_bytes = 0;
467 btrfs_add_ordered_sum(inode, ordered, sums);
468 btrfs_put_ordered_extent(ordered);
469
470 bytes_left = bio->bi_iter.bi_size - total_bytes;
471
472 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
473 GFP_NOFS);
474 BUG_ON(!sums); /* -ENOMEM */
475 sums->len = bytes_left;
476 ordered = btrfs_lookup_ordered_extent(inode, offset);
477 BUG_ON(!ordered); /* Logic error */
478 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
479 total_bytes;
480 index = 0;
481 }
482
483 data = kmap_atomic(bvec->bv_page);
484 sums->sums[index] = ~(u32)0;
485 sums->sums[index] = btrfs_csum_data(data + bvec->bv_offset,
486 sums->sums[index],
487 bvec->bv_len);
488 kunmap_atomic(data);
489 btrfs_csum_final(sums->sums[index],
490 (char *)(sums->sums + index));
491
492 bio_index++;
493 index++;
494 total_bytes += bvec->bv_len;
495 this_sum_bytes += bvec->bv_len;
496 offset += bvec->bv_len;
497 bvec++;
498 }
499 this_sum_bytes = 0;
500 btrfs_add_ordered_sum(inode, ordered, sums);
501 btrfs_put_ordered_extent(ordered);
502 return 0;
503 }
504
505 /*
506 * helper function for csum removal, this expects the
507 * key to describe the csum pointed to by the path, and it expects
508 * the csum to overlap the range [bytenr, len]
509 *
510 * The csum should not be entirely contained in the range and the
511 * range should not be entirely contained in the csum.
512 *
513 * This calls btrfs_truncate_item with the correct args based on the
514 * overlap, and fixes up the key as required.
515 */
516 static noinline void truncate_one_csum(struct btrfs_root *root,
517 struct btrfs_path *path,
518 struct btrfs_key *key,
519 u64 bytenr, u64 len)
520 {
521 struct extent_buffer *leaf;
522 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
523 u64 csum_end;
524 u64 end_byte = bytenr + len;
525 u32 blocksize_bits = root->fs_info->sb->s_blocksize_bits;
526
527 leaf = path->nodes[0];
528 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
529 csum_end <<= root->fs_info->sb->s_blocksize_bits;
530 csum_end += key->offset;
531
532 if (key->offset < bytenr && csum_end <= end_byte) {
533 /*
534 * [ bytenr - len ]
535 * [ ]
536 * [csum ]
537 * A simple truncate off the end of the item
538 */
539 u32 new_size = (bytenr - key->offset) >> blocksize_bits;
540 new_size *= csum_size;
541 btrfs_truncate_item(root, path, new_size, 1);
542 } else if (key->offset >= bytenr && csum_end > end_byte &&
543 end_byte > key->offset) {
544 /*
545 * [ bytenr - len ]
546 * [ ]
547 * [csum ]
548 * we need to truncate from the beginning of the csum
549 */
550 u32 new_size = (csum_end - end_byte) >> blocksize_bits;
551 new_size *= csum_size;
552
553 btrfs_truncate_item(root, path, new_size, 0);
554
555 key->offset = end_byte;
556 btrfs_set_item_key_safe(root->fs_info, path, key);
557 } else {
558 BUG();
559 }
560 }
561
562 /*
563 * deletes the csum items from the csum tree for a given
564 * range of bytes.
565 */
566 int btrfs_del_csums(struct btrfs_trans_handle *trans,
567 struct btrfs_root *root, u64 bytenr, u64 len)
568 {
569 struct btrfs_path *path;
570 struct btrfs_key key;
571 u64 end_byte = bytenr + len;
572 u64 csum_end;
573 struct extent_buffer *leaf;
574 int ret;
575 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
576 int blocksize_bits = root->fs_info->sb->s_blocksize_bits;
577
578 root = root->fs_info->csum_root;
579
580 path = btrfs_alloc_path();
581 if (!path)
582 return -ENOMEM;
583
584 while (1) {
585 key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
586 key.offset = end_byte - 1;
587 key.type = BTRFS_EXTENT_CSUM_KEY;
588
589 path->leave_spinning = 1;
590 ret = btrfs_search_slot(trans, root, &key, path, -1, 1);
591 if (ret > 0) {
592 if (path->slots[0] == 0)
593 break;
594 path->slots[0]--;
595 } else if (ret < 0) {
596 break;
597 }
598
599 leaf = path->nodes[0];
600 btrfs_item_key_to_cpu(leaf, &key, path->slots[0]);
601
602 if (key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
603 key.type != BTRFS_EXTENT_CSUM_KEY) {
604 break;
605 }
606
607 if (key.offset >= end_byte)
608 break;
609
610 csum_end = btrfs_item_size_nr(leaf, path->slots[0]) / csum_size;
611 csum_end <<= blocksize_bits;
612 csum_end += key.offset;
613
614 /* this csum ends before we start, we're done */
615 if (csum_end <= bytenr)
616 break;
617
618 /* delete the entire item, it is inside our range */
619 if (key.offset >= bytenr && csum_end <= end_byte) {
620 ret = btrfs_del_item(trans, root, path);
621 if (ret)
622 goto out;
623 if (key.offset == bytenr)
624 break;
625 } else if (key.offset < bytenr && csum_end > end_byte) {
626 unsigned long offset;
627 unsigned long shift_len;
628 unsigned long item_offset;
629 /*
630 * [ bytenr - len ]
631 * [csum ]
632 *
633 * Our bytes are in the middle of the csum,
634 * we need to split this item and insert a new one.
635 *
636 * But we can't drop the path because the
637 * csum could change, get removed, extended etc.
638 *
639 * The trick here is the max size of a csum item leaves
640 * enough room in the tree block for a single
641 * item header. So, we split the item in place,
642 * adding a new header pointing to the existing
643 * bytes. Then we loop around again and we have
644 * a nicely formed csum item that we can neatly
645 * truncate.
646 */
647 offset = (bytenr - key.offset) >> blocksize_bits;
648 offset *= csum_size;
649
650 shift_len = (len >> blocksize_bits) * csum_size;
651
652 item_offset = btrfs_item_ptr_offset(leaf,
653 path->slots[0]);
654
655 memset_extent_buffer(leaf, 0, item_offset + offset,
656 shift_len);
657 key.offset = bytenr;
658
659 /*
660 * btrfs_split_item returns -EAGAIN when the
661 * item changed size or key
662 */
663 ret = btrfs_split_item(trans, root, path, &key, offset);
664 if (ret && ret != -EAGAIN) {
665 btrfs_abort_transaction(trans, root, ret);
666 goto out;
667 }
668
669 key.offset = end_byte - 1;
670 } else {
671 truncate_one_csum(root, path, &key, bytenr, len);
672 if (key.offset < bytenr)
673 break;
674 }
675 btrfs_release_path(path);
676 }
677 ret = 0;
678 out:
679 btrfs_free_path(path);
680 return ret;
681 }
682
683 int btrfs_csum_file_blocks(struct btrfs_trans_handle *trans,
684 struct btrfs_root *root,
685 struct btrfs_ordered_sum *sums)
686 {
687 struct btrfs_key file_key;
688 struct btrfs_key found_key;
689 struct btrfs_path *path;
690 struct btrfs_csum_item *item;
691 struct btrfs_csum_item *item_end;
692 struct extent_buffer *leaf = NULL;
693 u64 next_offset;
694 u64 total_bytes = 0;
695 u64 csum_offset;
696 u64 bytenr;
697 u32 nritems;
698 u32 ins_size;
699 int index = 0;
700 int found_next;
701 int ret;
702 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
703
704 path = btrfs_alloc_path();
705 if (!path)
706 return -ENOMEM;
707 again:
708 next_offset = (u64)-1;
709 found_next = 0;
710 bytenr = sums->bytenr + total_bytes;
711 file_key.objectid = BTRFS_EXTENT_CSUM_OBJECTID;
712 file_key.offset = bytenr;
713 file_key.type = BTRFS_EXTENT_CSUM_KEY;
714
715 item = btrfs_lookup_csum(trans, root, path, bytenr, 1);
716 if (!IS_ERR(item)) {
717 ret = 0;
718 leaf = path->nodes[0];
719 item_end = btrfs_item_ptr(leaf, path->slots[0],
720 struct btrfs_csum_item);
721 item_end = (struct btrfs_csum_item *)((char *)item_end +
722 btrfs_item_size_nr(leaf, path->slots[0]));
723 goto found;
724 }
725 ret = PTR_ERR(item);
726 if (ret != -EFBIG && ret != -ENOENT)
727 goto fail_unlock;
728
729 if (ret == -EFBIG) {
730 u32 item_size;
731 /* we found one, but it isn't big enough yet */
732 leaf = path->nodes[0];
733 item_size = btrfs_item_size_nr(leaf, path->slots[0]);
734 if ((item_size / csum_size) >=
735 MAX_CSUM_ITEMS(root, csum_size)) {
736 /* already at max size, make a new one */
737 goto insert;
738 }
739 } else {
740 int slot = path->slots[0] + 1;
741 /* we didn't find a csum item, insert one */
742 nritems = btrfs_header_nritems(path->nodes[0]);
743 if (!nritems || (path->slots[0] >= nritems - 1)) {
744 ret = btrfs_next_leaf(root, path);
745 if (ret == 1)
746 found_next = 1;
747 if (ret != 0)
748 goto insert;
749 slot = path->slots[0];
750 }
751 btrfs_item_key_to_cpu(path->nodes[0], &found_key, slot);
752 if (found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
753 found_key.type != BTRFS_EXTENT_CSUM_KEY) {
754 found_next = 1;
755 goto insert;
756 }
757 next_offset = found_key.offset;
758 found_next = 1;
759 goto insert;
760 }
761
762 /*
763 * at this point, we know the tree has an item, but it isn't big
764 * enough yet to put our csum in. Grow it
765 */
766 btrfs_release_path(path);
767 ret = btrfs_search_slot(trans, root, &file_key, path,
768 csum_size, 1);
769 if (ret < 0)
770 goto fail_unlock;
771
772 if (ret > 0) {
773 if (path->slots[0] == 0)
774 goto insert;
775 path->slots[0]--;
776 }
777
778 leaf = path->nodes[0];
779 btrfs_item_key_to_cpu(leaf, &found_key, path->slots[0]);
780 csum_offset = (bytenr - found_key.offset) >>
781 root->fs_info->sb->s_blocksize_bits;
782
783 if (found_key.type != BTRFS_EXTENT_CSUM_KEY ||
784 found_key.objectid != BTRFS_EXTENT_CSUM_OBJECTID ||
785 csum_offset >= MAX_CSUM_ITEMS(root, csum_size)) {
786 goto insert;
787 }
788
789 if (csum_offset == btrfs_item_size_nr(leaf, path->slots[0]) /
790 csum_size) {
791 int extend_nr;
792 u64 tmp;
793 u32 diff;
794 u32 free_space;
795
796 if (btrfs_leaf_free_space(root, leaf) <
797 sizeof(struct btrfs_item) + csum_size * 2)
798 goto insert;
799
800 free_space = btrfs_leaf_free_space(root, leaf) -
801 sizeof(struct btrfs_item) - csum_size;
802 tmp = sums->len - total_bytes;
803 tmp >>= root->fs_info->sb->s_blocksize_bits;
804 WARN_ON(tmp < 1);
805
806 extend_nr = max_t(int, 1, (int)tmp);
807 diff = (csum_offset + extend_nr) * csum_size;
808 diff = min(diff, MAX_CSUM_ITEMS(root, csum_size) * csum_size);
809
810 diff = diff - btrfs_item_size_nr(leaf, path->slots[0]);
811 diff = min(free_space, diff);
812 diff /= csum_size;
813 diff *= csum_size;
814
815 btrfs_extend_item(root, path, diff);
816 ret = 0;
817 goto csum;
818 }
819
820 insert:
821 btrfs_release_path(path);
822 csum_offset = 0;
823 if (found_next) {
824 u64 tmp;
825
826 tmp = sums->len - total_bytes;
827 tmp >>= root->fs_info->sb->s_blocksize_bits;
828 tmp = min(tmp, (next_offset - file_key.offset) >>
829 root->fs_info->sb->s_blocksize_bits);
830
831 tmp = max((u64)1, tmp);
832 tmp = min(tmp, (u64)MAX_CSUM_ITEMS(root, csum_size));
833 ins_size = csum_size * tmp;
834 } else {
835 ins_size = csum_size;
836 }
837 path->leave_spinning = 1;
838 ret = btrfs_insert_empty_item(trans, root, path, &file_key,
839 ins_size);
840 path->leave_spinning = 0;
841 if (ret < 0)
842 goto fail_unlock;
843 if (WARN_ON(ret != 0))
844 goto fail_unlock;
845 leaf = path->nodes[0];
846 csum:
847 item = btrfs_item_ptr(leaf, path->slots[0], struct btrfs_csum_item);
848 item_end = (struct btrfs_csum_item *)((unsigned char *)item +
849 btrfs_item_size_nr(leaf, path->slots[0]));
850 item = (struct btrfs_csum_item *)((unsigned char *)item +
851 csum_offset * csum_size);
852 found:
853 ins_size = (u32)(sums->len - total_bytes) >>
854 root->fs_info->sb->s_blocksize_bits;
855 ins_size *= csum_size;
856 ins_size = min_t(u32, (unsigned long)item_end - (unsigned long)item,
857 ins_size);
858 write_extent_buffer(leaf, sums->sums + index, (unsigned long)item,
859 ins_size);
860
861 ins_size /= csum_size;
862 total_bytes += ins_size * root->sectorsize;
863 index += ins_size;
864
865 btrfs_mark_buffer_dirty(path->nodes[0]);
866 if (total_bytes < sums->len) {
867 btrfs_release_path(path);
868 cond_resched();
869 goto again;
870 }
871 out:
872 btrfs_free_path(path);
873 return ret;
874
875 fail_unlock:
876 goto out;
877 }
878
879 void btrfs_extent_item_to_extent_map(struct inode *inode,
880 const struct btrfs_path *path,
881 struct btrfs_file_extent_item *fi,
882 const bool new_inline,
883 struct extent_map *em)
884 {
885 struct btrfs_root *root = BTRFS_I(inode)->root;
886 struct extent_buffer *leaf = path->nodes[0];
887 const int slot = path->slots[0];
888 struct btrfs_key key;
889 u64 extent_start, extent_end;
890 u64 bytenr;
891 u8 type = btrfs_file_extent_type(leaf, fi);
892 int compress_type = btrfs_file_extent_compression(leaf, fi);
893
894 em->bdev = root->fs_info->fs_devices->latest_bdev;
895 btrfs_item_key_to_cpu(leaf, &key, slot);
896 extent_start = key.offset;
897
898 if (type == BTRFS_FILE_EXTENT_REG ||
899 type == BTRFS_FILE_EXTENT_PREALLOC) {
900 extent_end = extent_start +
901 btrfs_file_extent_num_bytes(leaf, fi);
902 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
903 size_t size;
904 size = btrfs_file_extent_inline_len(leaf, slot, fi);
905 extent_end = ALIGN(extent_start + size, root->sectorsize);
906 }
907
908 em->ram_bytes = btrfs_file_extent_ram_bytes(leaf, fi);
909 if (type == BTRFS_FILE_EXTENT_REG ||
910 type == BTRFS_FILE_EXTENT_PREALLOC) {
911 em->start = extent_start;
912 em->len = extent_end - extent_start;
913 em->orig_start = extent_start -
914 btrfs_file_extent_offset(leaf, fi);
915 em->orig_block_len = btrfs_file_extent_disk_num_bytes(leaf, fi);
916 bytenr = btrfs_file_extent_disk_bytenr(leaf, fi);
917 if (bytenr == 0) {
918 em->block_start = EXTENT_MAP_HOLE;
919 return;
920 }
921 if (compress_type != BTRFS_COMPRESS_NONE) {
922 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
923 em->compress_type = compress_type;
924 em->block_start = bytenr;
925 em->block_len = em->orig_block_len;
926 } else {
927 bytenr += btrfs_file_extent_offset(leaf, fi);
928 em->block_start = bytenr;
929 em->block_len = em->len;
930 if (type == BTRFS_FILE_EXTENT_PREALLOC)
931 set_bit(EXTENT_FLAG_PREALLOC, &em->flags);
932 }
933 } else if (type == BTRFS_FILE_EXTENT_INLINE) {
934 em->block_start = EXTENT_MAP_INLINE;
935 em->start = extent_start;
936 em->len = extent_end - extent_start;
937 /*
938 * Initialize orig_start and block_len with the same values
939 * as in inode.c:btrfs_get_extent().
940 */
941 em->orig_start = EXTENT_MAP_HOLE;
942 em->block_len = (u64)-1;
943 if (!new_inline && compress_type != BTRFS_COMPRESS_NONE) {
944 set_bit(EXTENT_FLAG_COMPRESSED, &em->flags);
945 em->compress_type = compress_type;
946 }
947 } else {
948 btrfs_err(root->fs_info,
949 "unknown file extent item type %d, inode %llu, offset %llu, root %llu",
950 type, btrfs_ino(inode), extent_start,
951 root->root_key.objectid);
952 }
953 }
This page took 0.049491 seconds and 5 git commands to generate.