tracing: extend sched_pi_setprio
[deliverable/linux.git] / fs / btrfs / compression.c
1 /*
2 * Copyright (C) 2008 Oracle. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public
6 * License v2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public
14 * License along with this program; if not, write to the
15 * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
16 * Boston, MA 021110-1307, USA.
17 */
18
19 #include <linux/kernel.h>
20 #include <linux/bio.h>
21 #include <linux/buffer_head.h>
22 #include <linux/file.h>
23 #include <linux/fs.h>
24 #include <linux/pagemap.h>
25 #include <linux/highmem.h>
26 #include <linux/time.h>
27 #include <linux/init.h>
28 #include <linux/string.h>
29 #include <linux/backing-dev.h>
30 #include <linux/mpage.h>
31 #include <linux/swap.h>
32 #include <linux/writeback.h>
33 #include <linux/bit_spinlock.h>
34 #include <linux/slab.h>
35 #include "ctree.h"
36 #include "disk-io.h"
37 #include "transaction.h"
38 #include "btrfs_inode.h"
39 #include "volumes.h"
40 #include "ordered-data.h"
41 #include "compression.h"
42 #include "extent_io.h"
43 #include "extent_map.h"
44
45 struct compressed_bio {
46 /* number of bios pending for this compressed extent */
47 atomic_t pending_bios;
48
49 /* the pages with the compressed data on them */
50 struct page **compressed_pages;
51
52 /* inode that owns this data */
53 struct inode *inode;
54
55 /* starting offset in the inode for our pages */
56 u64 start;
57
58 /* number of bytes in the inode we're working on */
59 unsigned long len;
60
61 /* number of bytes on disk */
62 unsigned long compressed_len;
63
64 /* the compression algorithm for this bio */
65 int compress_type;
66
67 /* number of compressed pages in the array */
68 unsigned long nr_pages;
69
70 /* IO errors */
71 int errors;
72 int mirror_num;
73
74 /* for reads, this is the bio we are copying the data into */
75 struct bio *orig_bio;
76
77 /*
78 * the start of a variable length array of checksums only
79 * used by reads
80 */
81 u32 sums;
82 };
83
84 static int btrfs_decompress_biovec(int type, struct page **pages_in,
85 u64 disk_start, struct bio_vec *bvec,
86 int vcnt, size_t srclen);
87
88 static inline int compressed_bio_size(struct btrfs_root *root,
89 unsigned long disk_size)
90 {
91 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
92
93 return sizeof(struct compressed_bio) +
94 (DIV_ROUND_UP(disk_size, root->sectorsize)) * csum_size;
95 }
96
97 static struct bio *compressed_bio_alloc(struct block_device *bdev,
98 u64 first_byte, gfp_t gfp_flags)
99 {
100 return btrfs_bio_alloc(bdev, first_byte >> 9, BIO_MAX_PAGES, gfp_flags);
101 }
102
103 static int check_compressed_csum(struct inode *inode,
104 struct compressed_bio *cb,
105 u64 disk_start)
106 {
107 int ret;
108 struct page *page;
109 unsigned long i;
110 char *kaddr;
111 u32 csum;
112 u32 *cb_sum = &cb->sums;
113
114 if (BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)
115 return 0;
116
117 for (i = 0; i < cb->nr_pages; i++) {
118 page = cb->compressed_pages[i];
119 csum = ~(u32)0;
120
121 kaddr = kmap_atomic(page);
122 csum = btrfs_csum_data(kaddr, csum, PAGE_SIZE);
123 btrfs_csum_final(csum, (char *)&csum);
124 kunmap_atomic(kaddr);
125
126 if (csum != *cb_sum) {
127 btrfs_info(BTRFS_I(inode)->root->fs_info,
128 "csum failed ino %llu extent %llu csum %u wanted %u mirror %d",
129 btrfs_ino(inode), disk_start, csum, *cb_sum,
130 cb->mirror_num);
131 ret = -EIO;
132 goto fail;
133 }
134 cb_sum++;
135
136 }
137 ret = 0;
138 fail:
139 return ret;
140 }
141
142 /* when we finish reading compressed pages from the disk, we
143 * decompress them and then run the bio end_io routines on the
144 * decompressed pages (in the inode address space).
145 *
146 * This allows the checksumming and other IO error handling routines
147 * to work normally
148 *
149 * The compressed pages are freed here, and it must be run
150 * in process context
151 */
152 static void end_compressed_bio_read(struct bio *bio)
153 {
154 struct compressed_bio *cb = bio->bi_private;
155 struct inode *inode;
156 struct page *page;
157 unsigned long index;
158 int ret;
159
160 if (bio->bi_error)
161 cb->errors = 1;
162
163 /* if there are more bios still pending for this compressed
164 * extent, just exit
165 */
166 if (!atomic_dec_and_test(&cb->pending_bios))
167 goto out;
168
169 inode = cb->inode;
170 ret = check_compressed_csum(inode, cb,
171 (u64)bio->bi_iter.bi_sector << 9);
172 if (ret)
173 goto csum_failed;
174
175 /* ok, we're the last bio for this extent, lets start
176 * the decompression.
177 */
178 ret = btrfs_decompress_biovec(cb->compress_type,
179 cb->compressed_pages,
180 cb->start,
181 cb->orig_bio->bi_io_vec,
182 cb->orig_bio->bi_vcnt,
183 cb->compressed_len);
184 csum_failed:
185 if (ret)
186 cb->errors = 1;
187
188 /* release the compressed pages */
189 index = 0;
190 for (index = 0; index < cb->nr_pages; index++) {
191 page = cb->compressed_pages[index];
192 page->mapping = NULL;
193 put_page(page);
194 }
195
196 /* do io completion on the original bio */
197 if (cb->errors) {
198 bio_io_error(cb->orig_bio);
199 } else {
200 int i;
201 struct bio_vec *bvec;
202
203 /*
204 * we have verified the checksum already, set page
205 * checked so the end_io handlers know about it
206 */
207 bio_for_each_segment_all(bvec, cb->orig_bio, i)
208 SetPageChecked(bvec->bv_page);
209
210 bio_endio(cb->orig_bio);
211 }
212
213 /* finally free the cb struct */
214 kfree(cb->compressed_pages);
215 kfree(cb);
216 out:
217 bio_put(bio);
218 }
219
220 /*
221 * Clear the writeback bits on all of the file
222 * pages for a compressed write
223 */
224 static noinline void end_compressed_writeback(struct inode *inode,
225 const struct compressed_bio *cb)
226 {
227 unsigned long index = cb->start >> PAGE_SHIFT;
228 unsigned long end_index = (cb->start + cb->len - 1) >> PAGE_SHIFT;
229 struct page *pages[16];
230 unsigned long nr_pages = end_index - index + 1;
231 int i;
232 int ret;
233
234 if (cb->errors)
235 mapping_set_error(inode->i_mapping, -EIO);
236
237 while (nr_pages > 0) {
238 ret = find_get_pages_contig(inode->i_mapping, index,
239 min_t(unsigned long,
240 nr_pages, ARRAY_SIZE(pages)), pages);
241 if (ret == 0) {
242 nr_pages -= 1;
243 index += 1;
244 continue;
245 }
246 for (i = 0; i < ret; i++) {
247 if (cb->errors)
248 SetPageError(pages[i]);
249 end_page_writeback(pages[i]);
250 put_page(pages[i]);
251 }
252 nr_pages -= ret;
253 index += ret;
254 }
255 /* the inode may be gone now */
256 }
257
258 /*
259 * do the cleanup once all the compressed pages hit the disk.
260 * This will clear writeback on the file pages and free the compressed
261 * pages.
262 *
263 * This also calls the writeback end hooks for the file pages so that
264 * metadata and checksums can be updated in the file.
265 */
266 static void end_compressed_bio_write(struct bio *bio)
267 {
268 struct extent_io_tree *tree;
269 struct compressed_bio *cb = bio->bi_private;
270 struct inode *inode;
271 struct page *page;
272 unsigned long index;
273
274 if (bio->bi_error)
275 cb->errors = 1;
276
277 /* if there are more bios still pending for this compressed
278 * extent, just exit
279 */
280 if (!atomic_dec_and_test(&cb->pending_bios))
281 goto out;
282
283 /* ok, we're the last bio for this extent, step one is to
284 * call back into the FS and do all the end_io operations
285 */
286 inode = cb->inode;
287 tree = &BTRFS_I(inode)->io_tree;
288 cb->compressed_pages[0]->mapping = cb->inode->i_mapping;
289 tree->ops->writepage_end_io_hook(cb->compressed_pages[0],
290 cb->start,
291 cb->start + cb->len - 1,
292 NULL,
293 bio->bi_error ? 0 : 1);
294 cb->compressed_pages[0]->mapping = NULL;
295
296 end_compressed_writeback(inode, cb);
297 /* note, our inode could be gone now */
298
299 /*
300 * release the compressed pages, these came from alloc_page and
301 * are not attached to the inode at all
302 */
303 index = 0;
304 for (index = 0; index < cb->nr_pages; index++) {
305 page = cb->compressed_pages[index];
306 page->mapping = NULL;
307 put_page(page);
308 }
309
310 /* finally free the cb struct */
311 kfree(cb->compressed_pages);
312 kfree(cb);
313 out:
314 bio_put(bio);
315 }
316
317 /*
318 * worker function to build and submit bios for previously compressed pages.
319 * The corresponding pages in the inode should be marked for writeback
320 * and the compressed pages should have a reference on them for dropping
321 * when the IO is complete.
322 *
323 * This also checksums the file bytes and gets things ready for
324 * the end io hooks.
325 */
326 int btrfs_submit_compressed_write(struct inode *inode, u64 start,
327 unsigned long len, u64 disk_start,
328 unsigned long compressed_len,
329 struct page **compressed_pages,
330 unsigned long nr_pages)
331 {
332 struct bio *bio = NULL;
333 struct btrfs_root *root = BTRFS_I(inode)->root;
334 struct compressed_bio *cb;
335 unsigned long bytes_left;
336 struct extent_io_tree *io_tree = &BTRFS_I(inode)->io_tree;
337 int pg_index = 0;
338 struct page *page;
339 u64 first_byte = disk_start;
340 struct block_device *bdev;
341 int ret;
342 int skip_sum = BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM;
343
344 WARN_ON(start & ((u64)PAGE_SIZE - 1));
345 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
346 if (!cb)
347 return -ENOMEM;
348 atomic_set(&cb->pending_bios, 0);
349 cb->errors = 0;
350 cb->inode = inode;
351 cb->start = start;
352 cb->len = len;
353 cb->mirror_num = 0;
354 cb->compressed_pages = compressed_pages;
355 cb->compressed_len = compressed_len;
356 cb->orig_bio = NULL;
357 cb->nr_pages = nr_pages;
358
359 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
360
361 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
362 if (!bio) {
363 kfree(cb);
364 return -ENOMEM;
365 }
366 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
367 bio->bi_private = cb;
368 bio->bi_end_io = end_compressed_bio_write;
369 atomic_inc(&cb->pending_bios);
370
371 /* create and submit bios for the compressed pages */
372 bytes_left = compressed_len;
373 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
374 page = compressed_pages[pg_index];
375 page->mapping = inode->i_mapping;
376 if (bio->bi_iter.bi_size)
377 ret = io_tree->ops->merge_bio_hook(page, 0,
378 PAGE_SIZE,
379 bio, 0);
380 else
381 ret = 0;
382
383 page->mapping = NULL;
384 if (ret || bio_add_page(bio, page, PAGE_SIZE, 0) <
385 PAGE_SIZE) {
386 bio_get(bio);
387
388 /*
389 * inc the count before we submit the bio so
390 * we know the end IO handler won't happen before
391 * we inc the count. Otherwise, the cb might get
392 * freed before we're done setting it up
393 */
394 atomic_inc(&cb->pending_bios);
395 ret = btrfs_bio_wq_end_io(root->fs_info, bio,
396 BTRFS_WQ_ENDIO_DATA);
397 BUG_ON(ret); /* -ENOMEM */
398
399 if (!skip_sum) {
400 ret = btrfs_csum_one_bio(root, inode, bio,
401 start, 1);
402 BUG_ON(ret); /* -ENOMEM */
403 }
404
405 ret = btrfs_map_bio(root, bio, 0, 1);
406 if (ret) {
407 bio->bi_error = ret;
408 bio_endio(bio);
409 }
410
411 bio_put(bio);
412
413 bio = compressed_bio_alloc(bdev, first_byte, GFP_NOFS);
414 BUG_ON(!bio);
415 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
416 bio->bi_private = cb;
417 bio->bi_end_io = end_compressed_bio_write;
418 bio_add_page(bio, page, PAGE_SIZE, 0);
419 }
420 if (bytes_left < PAGE_SIZE) {
421 btrfs_info(BTRFS_I(inode)->root->fs_info,
422 "bytes left %lu compress len %lu nr %lu",
423 bytes_left, cb->compressed_len, cb->nr_pages);
424 }
425 bytes_left -= PAGE_SIZE;
426 first_byte += PAGE_SIZE;
427 cond_resched();
428 }
429 bio_get(bio);
430
431 ret = btrfs_bio_wq_end_io(root->fs_info, bio, BTRFS_WQ_ENDIO_DATA);
432 BUG_ON(ret); /* -ENOMEM */
433
434 if (!skip_sum) {
435 ret = btrfs_csum_one_bio(root, inode, bio, start, 1);
436 BUG_ON(ret); /* -ENOMEM */
437 }
438
439 ret = btrfs_map_bio(root, bio, 0, 1);
440 if (ret) {
441 bio->bi_error = ret;
442 bio_endio(bio);
443 }
444
445 bio_put(bio);
446 return 0;
447 }
448
449 static noinline int add_ra_bio_pages(struct inode *inode,
450 u64 compressed_end,
451 struct compressed_bio *cb)
452 {
453 unsigned long end_index;
454 unsigned long pg_index;
455 u64 last_offset;
456 u64 isize = i_size_read(inode);
457 int ret;
458 struct page *page;
459 unsigned long nr_pages = 0;
460 struct extent_map *em;
461 struct address_space *mapping = inode->i_mapping;
462 struct extent_map_tree *em_tree;
463 struct extent_io_tree *tree;
464 u64 end;
465 int misses = 0;
466
467 page = cb->orig_bio->bi_io_vec[cb->orig_bio->bi_vcnt - 1].bv_page;
468 last_offset = (page_offset(page) + PAGE_SIZE);
469 em_tree = &BTRFS_I(inode)->extent_tree;
470 tree = &BTRFS_I(inode)->io_tree;
471
472 if (isize == 0)
473 return 0;
474
475 end_index = (i_size_read(inode) - 1) >> PAGE_SHIFT;
476
477 while (last_offset < compressed_end) {
478 pg_index = last_offset >> PAGE_SHIFT;
479
480 if (pg_index > end_index)
481 break;
482
483 rcu_read_lock();
484 page = radix_tree_lookup(&mapping->page_tree, pg_index);
485 rcu_read_unlock();
486 if (page && !radix_tree_exceptional_entry(page)) {
487 misses++;
488 if (misses > 4)
489 break;
490 goto next;
491 }
492
493 page = __page_cache_alloc(mapping_gfp_constraint(mapping,
494 ~__GFP_FS));
495 if (!page)
496 break;
497
498 if (add_to_page_cache_lru(page, mapping, pg_index, GFP_NOFS)) {
499 put_page(page);
500 goto next;
501 }
502
503 end = last_offset + PAGE_SIZE - 1;
504 /*
505 * at this point, we have a locked page in the page cache
506 * for these bytes in the file. But, we have to make
507 * sure they map to this compressed extent on disk.
508 */
509 set_page_extent_mapped(page);
510 lock_extent(tree, last_offset, end);
511 read_lock(&em_tree->lock);
512 em = lookup_extent_mapping(em_tree, last_offset,
513 PAGE_SIZE);
514 read_unlock(&em_tree->lock);
515
516 if (!em || last_offset < em->start ||
517 (last_offset + PAGE_SIZE > extent_map_end(em)) ||
518 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
519 free_extent_map(em);
520 unlock_extent(tree, last_offset, end);
521 unlock_page(page);
522 put_page(page);
523 break;
524 }
525 free_extent_map(em);
526
527 if (page->index == end_index) {
528 char *userpage;
529 size_t zero_offset = isize & (PAGE_SIZE - 1);
530
531 if (zero_offset) {
532 int zeros;
533 zeros = PAGE_SIZE - zero_offset;
534 userpage = kmap_atomic(page);
535 memset(userpage + zero_offset, 0, zeros);
536 flush_dcache_page(page);
537 kunmap_atomic(userpage);
538 }
539 }
540
541 ret = bio_add_page(cb->orig_bio, page,
542 PAGE_SIZE, 0);
543
544 if (ret == PAGE_SIZE) {
545 nr_pages++;
546 put_page(page);
547 } else {
548 unlock_extent(tree, last_offset, end);
549 unlock_page(page);
550 put_page(page);
551 break;
552 }
553 next:
554 last_offset += PAGE_SIZE;
555 }
556 return 0;
557 }
558
559 /*
560 * for a compressed read, the bio we get passed has all the inode pages
561 * in it. We don't actually do IO on those pages but allocate new ones
562 * to hold the compressed pages on disk.
563 *
564 * bio->bi_iter.bi_sector points to the compressed extent on disk
565 * bio->bi_io_vec points to all of the inode pages
566 * bio->bi_vcnt is a count of pages
567 *
568 * After the compressed pages are read, we copy the bytes into the
569 * bio we were passed and then call the bio end_io calls
570 */
571 int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
572 int mirror_num, unsigned long bio_flags)
573 {
574 struct extent_io_tree *tree;
575 struct extent_map_tree *em_tree;
576 struct compressed_bio *cb;
577 struct btrfs_root *root = BTRFS_I(inode)->root;
578 unsigned long uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
579 unsigned long compressed_len;
580 unsigned long nr_pages;
581 unsigned long pg_index;
582 struct page *page;
583 struct block_device *bdev;
584 struct bio *comp_bio;
585 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
586 u64 em_len;
587 u64 em_start;
588 struct extent_map *em;
589 int ret = -ENOMEM;
590 int faili = 0;
591 u32 *sums;
592
593 tree = &BTRFS_I(inode)->io_tree;
594 em_tree = &BTRFS_I(inode)->extent_tree;
595
596 /* we need the actual starting offset of this extent in the file */
597 read_lock(&em_tree->lock);
598 em = lookup_extent_mapping(em_tree,
599 page_offset(bio->bi_io_vec->bv_page),
600 PAGE_SIZE);
601 read_unlock(&em_tree->lock);
602 if (!em)
603 return -EIO;
604
605 compressed_len = em->block_len;
606 cb = kmalloc(compressed_bio_size(root, compressed_len), GFP_NOFS);
607 if (!cb)
608 goto out;
609
610 atomic_set(&cb->pending_bios, 0);
611 cb->errors = 0;
612 cb->inode = inode;
613 cb->mirror_num = mirror_num;
614 sums = &cb->sums;
615
616 cb->start = em->orig_start;
617 em_len = em->len;
618 em_start = em->start;
619
620 free_extent_map(em);
621 em = NULL;
622
623 cb->len = uncompressed_len;
624 cb->compressed_len = compressed_len;
625 cb->compress_type = extent_compress_type(bio_flags);
626 cb->orig_bio = bio;
627
628 nr_pages = DIV_ROUND_UP(compressed_len, PAGE_SIZE);
629 cb->compressed_pages = kcalloc(nr_pages, sizeof(struct page *),
630 GFP_NOFS);
631 if (!cb->compressed_pages)
632 goto fail1;
633
634 bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
635
636 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
637 cb->compressed_pages[pg_index] = alloc_page(GFP_NOFS |
638 __GFP_HIGHMEM);
639 if (!cb->compressed_pages[pg_index]) {
640 faili = pg_index - 1;
641 ret = -ENOMEM;
642 goto fail2;
643 }
644 }
645 faili = nr_pages - 1;
646 cb->nr_pages = nr_pages;
647
648 add_ra_bio_pages(inode, em_start + em_len, cb);
649
650 /* include any pages we added in add_ra-bio_pages */
651 uncompressed_len = bio->bi_vcnt * PAGE_SIZE;
652 cb->len = uncompressed_len;
653
654 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte, GFP_NOFS);
655 if (!comp_bio)
656 goto fail2;
657 bio_set_op_attrs (comp_bio, REQ_OP_READ, 0);
658 comp_bio->bi_private = cb;
659 comp_bio->bi_end_io = end_compressed_bio_read;
660 atomic_inc(&cb->pending_bios);
661
662 for (pg_index = 0; pg_index < nr_pages; pg_index++) {
663 page = cb->compressed_pages[pg_index];
664 page->mapping = inode->i_mapping;
665 page->index = em_start >> PAGE_SHIFT;
666
667 if (comp_bio->bi_iter.bi_size)
668 ret = tree->ops->merge_bio_hook(page, 0,
669 PAGE_SIZE,
670 comp_bio, 0);
671 else
672 ret = 0;
673
674 page->mapping = NULL;
675 if (ret || bio_add_page(comp_bio, page, PAGE_SIZE, 0) <
676 PAGE_SIZE) {
677 bio_get(comp_bio);
678
679 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
680 BTRFS_WQ_ENDIO_DATA);
681 BUG_ON(ret); /* -ENOMEM */
682
683 /*
684 * inc the count before we submit the bio so
685 * we know the end IO handler won't happen before
686 * we inc the count. Otherwise, the cb might get
687 * freed before we're done setting it up
688 */
689 atomic_inc(&cb->pending_bios);
690
691 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
692 ret = btrfs_lookup_bio_sums(root, inode,
693 comp_bio, sums);
694 BUG_ON(ret); /* -ENOMEM */
695 }
696 sums += DIV_ROUND_UP(comp_bio->bi_iter.bi_size,
697 root->sectorsize);
698
699 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
700 if (ret) {
701 bio->bi_error = ret;
702 bio_endio(comp_bio);
703 }
704
705 bio_put(comp_bio);
706
707 comp_bio = compressed_bio_alloc(bdev, cur_disk_byte,
708 GFP_NOFS);
709 BUG_ON(!comp_bio);
710 bio_set_op_attrs(comp_bio, REQ_OP_READ, 0);
711 comp_bio->bi_private = cb;
712 comp_bio->bi_end_io = end_compressed_bio_read;
713
714 bio_add_page(comp_bio, page, PAGE_SIZE, 0);
715 }
716 cur_disk_byte += PAGE_SIZE;
717 }
718 bio_get(comp_bio);
719
720 ret = btrfs_bio_wq_end_io(root->fs_info, comp_bio,
721 BTRFS_WQ_ENDIO_DATA);
722 BUG_ON(ret); /* -ENOMEM */
723
724 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
725 ret = btrfs_lookup_bio_sums(root, inode, comp_bio, sums);
726 BUG_ON(ret); /* -ENOMEM */
727 }
728
729 ret = btrfs_map_bio(root, comp_bio, mirror_num, 0);
730 if (ret) {
731 bio->bi_error = ret;
732 bio_endio(comp_bio);
733 }
734
735 bio_put(comp_bio);
736 return 0;
737
738 fail2:
739 while (faili >= 0) {
740 __free_page(cb->compressed_pages[faili]);
741 faili--;
742 }
743
744 kfree(cb->compressed_pages);
745 fail1:
746 kfree(cb);
747 out:
748 free_extent_map(em);
749 return ret;
750 }
751
752 static struct {
753 struct list_head idle_ws;
754 spinlock_t ws_lock;
755 /* Number of free workspaces */
756 int free_ws;
757 /* Total number of allocated workspaces */
758 atomic_t total_ws;
759 /* Waiters for a free workspace */
760 wait_queue_head_t ws_wait;
761 } btrfs_comp_ws[BTRFS_COMPRESS_TYPES];
762
763 static const struct btrfs_compress_op * const btrfs_compress_op[] = {
764 &btrfs_zlib_compress,
765 &btrfs_lzo_compress,
766 };
767
768 void __init btrfs_init_compress(void)
769 {
770 int i;
771
772 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
773 struct list_head *workspace;
774
775 INIT_LIST_HEAD(&btrfs_comp_ws[i].idle_ws);
776 spin_lock_init(&btrfs_comp_ws[i].ws_lock);
777 atomic_set(&btrfs_comp_ws[i].total_ws, 0);
778 init_waitqueue_head(&btrfs_comp_ws[i].ws_wait);
779
780 /*
781 * Preallocate one workspace for each compression type so
782 * we can guarantee forward progress in the worst case
783 */
784 workspace = btrfs_compress_op[i]->alloc_workspace();
785 if (IS_ERR(workspace)) {
786 printk(KERN_WARNING
787 "BTRFS: cannot preallocate compression workspace, will try later");
788 } else {
789 atomic_set(&btrfs_comp_ws[i].total_ws, 1);
790 btrfs_comp_ws[i].free_ws = 1;
791 list_add(workspace, &btrfs_comp_ws[i].idle_ws);
792 }
793 }
794 }
795
796 /*
797 * This finds an available workspace or allocates a new one.
798 * If it's not possible to allocate a new one, waits until there's one.
799 * Preallocation makes a forward progress guarantees and we do not return
800 * errors.
801 */
802 static struct list_head *find_workspace(int type)
803 {
804 struct list_head *workspace;
805 int cpus = num_online_cpus();
806 int idx = type - 1;
807
808 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
809 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
810 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
811 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
812 int *free_ws = &btrfs_comp_ws[idx].free_ws;
813 again:
814 spin_lock(ws_lock);
815 if (!list_empty(idle_ws)) {
816 workspace = idle_ws->next;
817 list_del(workspace);
818 (*free_ws)--;
819 spin_unlock(ws_lock);
820 return workspace;
821
822 }
823 if (atomic_read(total_ws) > cpus) {
824 DEFINE_WAIT(wait);
825
826 spin_unlock(ws_lock);
827 prepare_to_wait(ws_wait, &wait, TASK_UNINTERRUPTIBLE);
828 if (atomic_read(total_ws) > cpus && !*free_ws)
829 schedule();
830 finish_wait(ws_wait, &wait);
831 goto again;
832 }
833 atomic_inc(total_ws);
834 spin_unlock(ws_lock);
835
836 workspace = btrfs_compress_op[idx]->alloc_workspace();
837 if (IS_ERR(workspace)) {
838 atomic_dec(total_ws);
839 wake_up(ws_wait);
840
841 /*
842 * Do not return the error but go back to waiting. There's a
843 * workspace preallocated for each type and the compression
844 * time is bounded so we get to a workspace eventually. This
845 * makes our caller's life easier.
846 *
847 * To prevent silent and low-probability deadlocks (when the
848 * initial preallocation fails), check if there are any
849 * workspaces at all.
850 */
851 if (atomic_read(total_ws) == 0) {
852 static DEFINE_RATELIMIT_STATE(_rs,
853 /* once per minute */ 60 * HZ,
854 /* no burst */ 1);
855
856 if (__ratelimit(&_rs)) {
857 printk(KERN_WARNING
858 "no compression workspaces, low memory, retrying");
859 }
860 }
861 goto again;
862 }
863 return workspace;
864 }
865
866 /*
867 * put a workspace struct back on the list or free it if we have enough
868 * idle ones sitting around
869 */
870 static void free_workspace(int type, struct list_head *workspace)
871 {
872 int idx = type - 1;
873 struct list_head *idle_ws = &btrfs_comp_ws[idx].idle_ws;
874 spinlock_t *ws_lock = &btrfs_comp_ws[idx].ws_lock;
875 atomic_t *total_ws = &btrfs_comp_ws[idx].total_ws;
876 wait_queue_head_t *ws_wait = &btrfs_comp_ws[idx].ws_wait;
877 int *free_ws = &btrfs_comp_ws[idx].free_ws;
878
879 spin_lock(ws_lock);
880 if (*free_ws < num_online_cpus()) {
881 list_add(workspace, idle_ws);
882 (*free_ws)++;
883 spin_unlock(ws_lock);
884 goto wake;
885 }
886 spin_unlock(ws_lock);
887
888 btrfs_compress_op[idx]->free_workspace(workspace);
889 atomic_dec(total_ws);
890 wake:
891 /*
892 * Make sure counter is updated before we wake up waiters.
893 */
894 smp_mb();
895 if (waitqueue_active(ws_wait))
896 wake_up(ws_wait);
897 }
898
899 /*
900 * cleanup function for module exit
901 */
902 static void free_workspaces(void)
903 {
904 struct list_head *workspace;
905 int i;
906
907 for (i = 0; i < BTRFS_COMPRESS_TYPES; i++) {
908 while (!list_empty(&btrfs_comp_ws[i].idle_ws)) {
909 workspace = btrfs_comp_ws[i].idle_ws.next;
910 list_del(workspace);
911 btrfs_compress_op[i]->free_workspace(workspace);
912 atomic_dec(&btrfs_comp_ws[i].total_ws);
913 }
914 }
915 }
916
917 /*
918 * given an address space and start/len, compress the bytes.
919 *
920 * pages are allocated to hold the compressed result and stored
921 * in 'pages'
922 *
923 * out_pages is used to return the number of pages allocated. There
924 * may be pages allocated even if we return an error
925 *
926 * total_in is used to return the number of bytes actually read. It
927 * may be smaller then len if we had to exit early because we
928 * ran out of room in the pages array or because we cross the
929 * max_out threshold.
930 *
931 * total_out is used to return the total number of compressed bytes
932 *
933 * max_out tells us the max number of bytes that we're allowed to
934 * stuff into pages
935 */
936 int btrfs_compress_pages(int type, struct address_space *mapping,
937 u64 start, unsigned long len,
938 struct page **pages,
939 unsigned long nr_dest_pages,
940 unsigned long *out_pages,
941 unsigned long *total_in,
942 unsigned long *total_out,
943 unsigned long max_out)
944 {
945 struct list_head *workspace;
946 int ret;
947
948 workspace = find_workspace(type);
949
950 ret = btrfs_compress_op[type-1]->compress_pages(workspace, mapping,
951 start, len, pages,
952 nr_dest_pages, out_pages,
953 total_in, total_out,
954 max_out);
955 free_workspace(type, workspace);
956 return ret;
957 }
958
959 /*
960 * pages_in is an array of pages with compressed data.
961 *
962 * disk_start is the starting logical offset of this array in the file
963 *
964 * bvec is a bio_vec of pages from the file that we want to decompress into
965 *
966 * vcnt is the count of pages in the biovec
967 *
968 * srclen is the number of bytes in pages_in
969 *
970 * The basic idea is that we have a bio that was created by readpages.
971 * The pages in the bio are for the uncompressed data, and they may not
972 * be contiguous. They all correspond to the range of bytes covered by
973 * the compressed extent.
974 */
975 static int btrfs_decompress_biovec(int type, struct page **pages_in,
976 u64 disk_start, struct bio_vec *bvec,
977 int vcnt, size_t srclen)
978 {
979 struct list_head *workspace;
980 int ret;
981
982 workspace = find_workspace(type);
983
984 ret = btrfs_compress_op[type-1]->decompress_biovec(workspace, pages_in,
985 disk_start,
986 bvec, vcnt, srclen);
987 free_workspace(type, workspace);
988 return ret;
989 }
990
991 /*
992 * a less complex decompression routine. Our compressed data fits in a
993 * single page, and we want to read a single page out of it.
994 * start_byte tells us the offset into the compressed data we're interested in
995 */
996 int btrfs_decompress(int type, unsigned char *data_in, struct page *dest_page,
997 unsigned long start_byte, size_t srclen, size_t destlen)
998 {
999 struct list_head *workspace;
1000 int ret;
1001
1002 workspace = find_workspace(type);
1003
1004 ret = btrfs_compress_op[type-1]->decompress(workspace, data_in,
1005 dest_page, start_byte,
1006 srclen, destlen);
1007
1008 free_workspace(type, workspace);
1009 return ret;
1010 }
1011
1012 void btrfs_exit_compress(void)
1013 {
1014 free_workspaces();
1015 }
1016
1017 /*
1018 * Copy uncompressed data from working buffer to pages.
1019 *
1020 * buf_start is the byte offset we're of the start of our workspace buffer.
1021 *
1022 * total_out is the last byte of the buffer
1023 */
1024 int btrfs_decompress_buf2page(char *buf, unsigned long buf_start,
1025 unsigned long total_out, u64 disk_start,
1026 struct bio_vec *bvec, int vcnt,
1027 unsigned long *pg_index,
1028 unsigned long *pg_offset)
1029 {
1030 unsigned long buf_offset;
1031 unsigned long current_buf_start;
1032 unsigned long start_byte;
1033 unsigned long working_bytes = total_out - buf_start;
1034 unsigned long bytes;
1035 char *kaddr;
1036 struct page *page_out = bvec[*pg_index].bv_page;
1037
1038 /*
1039 * start byte is the first byte of the page we're currently
1040 * copying into relative to the start of the compressed data.
1041 */
1042 start_byte = page_offset(page_out) - disk_start;
1043
1044 /* we haven't yet hit data corresponding to this page */
1045 if (total_out <= start_byte)
1046 return 1;
1047
1048 /*
1049 * the start of the data we care about is offset into
1050 * the middle of our working buffer
1051 */
1052 if (total_out > start_byte && buf_start < start_byte) {
1053 buf_offset = start_byte - buf_start;
1054 working_bytes -= buf_offset;
1055 } else {
1056 buf_offset = 0;
1057 }
1058 current_buf_start = buf_start;
1059
1060 /* copy bytes from the working buffer into the pages */
1061 while (working_bytes > 0) {
1062 bytes = min(PAGE_SIZE - *pg_offset,
1063 PAGE_SIZE - buf_offset);
1064 bytes = min(bytes, working_bytes);
1065 kaddr = kmap_atomic(page_out);
1066 memcpy(kaddr + *pg_offset, buf + buf_offset, bytes);
1067 kunmap_atomic(kaddr);
1068 flush_dcache_page(page_out);
1069
1070 *pg_offset += bytes;
1071 buf_offset += bytes;
1072 working_bytes -= bytes;
1073 current_buf_start += bytes;
1074
1075 /* check if we need to pick another page */
1076 if (*pg_offset == PAGE_SIZE) {
1077 (*pg_index)++;
1078 if (*pg_index >= vcnt)
1079 return 0;
1080
1081 page_out = bvec[*pg_index].bv_page;
1082 *pg_offset = 0;
1083 start_byte = page_offset(page_out) - disk_start;
1084
1085 /*
1086 * make sure our new page is covered by this
1087 * working buffer
1088 */
1089 if (total_out <= start_byte)
1090 return 1;
1091
1092 /*
1093 * the next page in the biovec might not be adjacent
1094 * to the last page, but it might still be found
1095 * inside this working buffer. bump our offset pointer
1096 */
1097 if (total_out > start_byte &&
1098 current_buf_start < start_byte) {
1099 buf_offset = start_byte - buf_start;
1100 working_bytes = total_out - start_byte;
1101 current_buf_start = buf_start + buf_offset;
1102 }
1103 }
1104 }
1105
1106 return 1;
1107 }
1108
1109 /*
1110 * When uncompressing data, we need to make sure and zero any parts of
1111 * the biovec that were not filled in by the decompression code. pg_index
1112 * and pg_offset indicate the last page and the last offset of that page
1113 * that have been filled in. This will zero everything remaining in the
1114 * biovec.
1115 */
1116 void btrfs_clear_biovec_end(struct bio_vec *bvec, int vcnt,
1117 unsigned long pg_index,
1118 unsigned long pg_offset)
1119 {
1120 while (pg_index < vcnt) {
1121 struct page *page = bvec[pg_index].bv_page;
1122 unsigned long off = bvec[pg_index].bv_offset;
1123 unsigned long len = bvec[pg_index].bv_len;
1124
1125 if (pg_offset < off)
1126 pg_offset = off;
1127 if (pg_offset < off + len) {
1128 unsigned long bytes = off + len - pg_offset;
1129 char *kaddr;
1130
1131 kaddr = kmap_atomic(page);
1132 memset(kaddr + pg_offset, 0, bytes);
1133 kunmap_atomic(kaddr);
1134 }
1135 pg_index++;
1136 pg_offset = 0;
1137 }
1138 }
This page took 0.067174 seconds and 5 git commands to generate.