extcon: Use the extcon_set_state_sync() instead of deprecated functions
[deliverable/linux.git] / block / bio.c
1 /*
2 * Copyright (C) 2001 Jens Axboe <axboe@kernel.dk>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 *
13 * You should have received a copy of the GNU General Public Licens
14 * along with this program; if not, write to the Free Software
15 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-
16 *
17 */
18 #include <linux/mm.h>
19 #include <linux/swap.h>
20 #include <linux/bio.h>
21 #include <linux/blkdev.h>
22 #include <linux/uio.h>
23 #include <linux/iocontext.h>
24 #include <linux/slab.h>
25 #include <linux/init.h>
26 #include <linux/kernel.h>
27 #include <linux/export.h>
28 #include <linux/mempool.h>
29 #include <linux/workqueue.h>
30 #include <linux/cgroup.h>
31
32 #include <trace/events/block.h>
33
34 /*
35 * Test patch to inline a certain number of bi_io_vec's inside the bio
36 * itself, to shrink a bio data allocation from two mempool calls to one
37 */
38 #define BIO_INLINE_VECS 4
39
40 /*
41 * if you change this list, also change bvec_alloc or things will
42 * break badly! cannot be bigger than what you can fit into an
43 * unsigned short
44 */
45 #define BV(x) { .nr_vecs = x, .name = "biovec-"__stringify(x) }
46 static struct biovec_slab bvec_slabs[BVEC_POOL_NR] __read_mostly = {
47 BV(1), BV(4), BV(16), BV(64), BV(128), BV(BIO_MAX_PAGES),
48 };
49 #undef BV
50
51 /*
52 * fs_bio_set is the bio_set containing bio and iovec memory pools used by
53 * IO code that does not need private memory pools.
54 */
55 struct bio_set *fs_bio_set;
56 EXPORT_SYMBOL(fs_bio_set);
57
58 /*
59 * Our slab pool management
60 */
61 struct bio_slab {
62 struct kmem_cache *slab;
63 unsigned int slab_ref;
64 unsigned int slab_size;
65 char name[8];
66 };
67 static DEFINE_MUTEX(bio_slab_lock);
68 static struct bio_slab *bio_slabs;
69 static unsigned int bio_slab_nr, bio_slab_max;
70
71 static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
72 {
73 unsigned int sz = sizeof(struct bio) + extra_size;
74 struct kmem_cache *slab = NULL;
75 struct bio_slab *bslab, *new_bio_slabs;
76 unsigned int new_bio_slab_max;
77 unsigned int i, entry = -1;
78
79 mutex_lock(&bio_slab_lock);
80
81 i = 0;
82 while (i < bio_slab_nr) {
83 bslab = &bio_slabs[i];
84
85 if (!bslab->slab && entry == -1)
86 entry = i;
87 else if (bslab->slab_size == sz) {
88 slab = bslab->slab;
89 bslab->slab_ref++;
90 break;
91 }
92 i++;
93 }
94
95 if (slab)
96 goto out_unlock;
97
98 if (bio_slab_nr == bio_slab_max && entry == -1) {
99 new_bio_slab_max = bio_slab_max << 1;
100 new_bio_slabs = krealloc(bio_slabs,
101 new_bio_slab_max * sizeof(struct bio_slab),
102 GFP_KERNEL);
103 if (!new_bio_slabs)
104 goto out_unlock;
105 bio_slab_max = new_bio_slab_max;
106 bio_slabs = new_bio_slabs;
107 }
108 if (entry == -1)
109 entry = bio_slab_nr++;
110
111 bslab = &bio_slabs[entry];
112
113 snprintf(bslab->name, sizeof(bslab->name), "bio-%d", entry);
114 slab = kmem_cache_create(bslab->name, sz, ARCH_KMALLOC_MINALIGN,
115 SLAB_HWCACHE_ALIGN, NULL);
116 if (!slab)
117 goto out_unlock;
118
119 bslab->slab = slab;
120 bslab->slab_ref = 1;
121 bslab->slab_size = sz;
122 out_unlock:
123 mutex_unlock(&bio_slab_lock);
124 return slab;
125 }
126
127 static void bio_put_slab(struct bio_set *bs)
128 {
129 struct bio_slab *bslab = NULL;
130 unsigned int i;
131
132 mutex_lock(&bio_slab_lock);
133
134 for (i = 0; i < bio_slab_nr; i++) {
135 if (bs->bio_slab == bio_slabs[i].slab) {
136 bslab = &bio_slabs[i];
137 break;
138 }
139 }
140
141 if (WARN(!bslab, KERN_ERR "bio: unable to find slab!\n"))
142 goto out;
143
144 WARN_ON(!bslab->slab_ref);
145
146 if (--bslab->slab_ref)
147 goto out;
148
149 kmem_cache_destroy(bslab->slab);
150 bslab->slab = NULL;
151
152 out:
153 mutex_unlock(&bio_slab_lock);
154 }
155
156 unsigned int bvec_nr_vecs(unsigned short idx)
157 {
158 return bvec_slabs[idx].nr_vecs;
159 }
160
161 void bvec_free(mempool_t *pool, struct bio_vec *bv, unsigned int idx)
162 {
163 if (!idx)
164 return;
165 idx--;
166
167 BIO_BUG_ON(idx >= BVEC_POOL_NR);
168
169 if (idx == BVEC_POOL_MAX) {
170 mempool_free(bv, pool);
171 } else {
172 struct biovec_slab *bvs = bvec_slabs + idx;
173
174 kmem_cache_free(bvs->slab, bv);
175 }
176 }
177
178 struct bio_vec *bvec_alloc(gfp_t gfp_mask, int nr, unsigned long *idx,
179 mempool_t *pool)
180 {
181 struct bio_vec *bvl;
182
183 /*
184 * see comment near bvec_array define!
185 */
186 switch (nr) {
187 case 1:
188 *idx = 0;
189 break;
190 case 2 ... 4:
191 *idx = 1;
192 break;
193 case 5 ... 16:
194 *idx = 2;
195 break;
196 case 17 ... 64:
197 *idx = 3;
198 break;
199 case 65 ... 128:
200 *idx = 4;
201 break;
202 case 129 ... BIO_MAX_PAGES:
203 *idx = 5;
204 break;
205 default:
206 return NULL;
207 }
208
209 /*
210 * idx now points to the pool we want to allocate from. only the
211 * 1-vec entry pool is mempool backed.
212 */
213 if (*idx == BVEC_POOL_MAX) {
214 fallback:
215 bvl = mempool_alloc(pool, gfp_mask);
216 } else {
217 struct biovec_slab *bvs = bvec_slabs + *idx;
218 gfp_t __gfp_mask = gfp_mask & ~(__GFP_DIRECT_RECLAIM | __GFP_IO);
219
220 /*
221 * Make this allocation restricted and don't dump info on
222 * allocation failures, since we'll fallback to the mempool
223 * in case of failure.
224 */
225 __gfp_mask |= __GFP_NOMEMALLOC | __GFP_NORETRY | __GFP_NOWARN;
226
227 /*
228 * Try a slab allocation. If this fails and __GFP_DIRECT_RECLAIM
229 * is set, retry with the 1-entry mempool
230 */
231 bvl = kmem_cache_alloc(bvs->slab, __gfp_mask);
232 if (unlikely(!bvl && (gfp_mask & __GFP_DIRECT_RECLAIM))) {
233 *idx = BVEC_POOL_MAX;
234 goto fallback;
235 }
236 }
237
238 (*idx)++;
239 return bvl;
240 }
241
242 static void __bio_free(struct bio *bio)
243 {
244 bio_disassociate_task(bio);
245
246 if (bio_integrity(bio))
247 bio_integrity_free(bio);
248 }
249
250 static void bio_free(struct bio *bio)
251 {
252 struct bio_set *bs = bio->bi_pool;
253 void *p;
254
255 __bio_free(bio);
256
257 if (bs) {
258 bvec_free(bs->bvec_pool, bio->bi_io_vec, BVEC_POOL_IDX(bio));
259
260 /*
261 * If we have front padding, adjust the bio pointer before freeing
262 */
263 p = bio;
264 p -= bs->front_pad;
265
266 mempool_free(p, bs->bio_pool);
267 } else {
268 /* Bio was allocated by bio_kmalloc() */
269 kfree(bio);
270 }
271 }
272
273 void bio_init(struct bio *bio)
274 {
275 memset(bio, 0, sizeof(*bio));
276 atomic_set(&bio->__bi_remaining, 1);
277 atomic_set(&bio->__bi_cnt, 1);
278 }
279 EXPORT_SYMBOL(bio_init);
280
281 /**
282 * bio_reset - reinitialize a bio
283 * @bio: bio to reset
284 *
285 * Description:
286 * After calling bio_reset(), @bio will be in the same state as a freshly
287 * allocated bio returned bio bio_alloc_bioset() - the only fields that are
288 * preserved are the ones that are initialized by bio_alloc_bioset(). See
289 * comment in struct bio.
290 */
291 void bio_reset(struct bio *bio)
292 {
293 unsigned long flags = bio->bi_flags & (~0UL << BIO_RESET_BITS);
294
295 __bio_free(bio);
296
297 memset(bio, 0, BIO_RESET_BYTES);
298 bio->bi_flags = flags;
299 atomic_set(&bio->__bi_remaining, 1);
300 }
301 EXPORT_SYMBOL(bio_reset);
302
303 static struct bio *__bio_chain_endio(struct bio *bio)
304 {
305 struct bio *parent = bio->bi_private;
306
307 if (!parent->bi_error)
308 parent->bi_error = bio->bi_error;
309 bio_put(bio);
310 return parent;
311 }
312
313 static void bio_chain_endio(struct bio *bio)
314 {
315 bio_endio(__bio_chain_endio(bio));
316 }
317
318 /**
319 * bio_chain - chain bio completions
320 * @bio: the target bio
321 * @parent: the @bio's parent bio
322 *
323 * The caller won't have a bi_end_io called when @bio completes - instead,
324 * @parent's bi_end_io won't be called until both @parent and @bio have
325 * completed; the chained bio will also be freed when it completes.
326 *
327 * The caller must not set bi_private or bi_end_io in @bio.
328 */
329 void bio_chain(struct bio *bio, struct bio *parent)
330 {
331 BUG_ON(bio->bi_private || bio->bi_end_io);
332
333 bio->bi_private = parent;
334 bio->bi_end_io = bio_chain_endio;
335 bio_inc_remaining(parent);
336 }
337 EXPORT_SYMBOL(bio_chain);
338
339 static void bio_alloc_rescue(struct work_struct *work)
340 {
341 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
342 struct bio *bio;
343
344 while (1) {
345 spin_lock(&bs->rescue_lock);
346 bio = bio_list_pop(&bs->rescue_list);
347 spin_unlock(&bs->rescue_lock);
348
349 if (!bio)
350 break;
351
352 generic_make_request(bio);
353 }
354 }
355
356 static void punt_bios_to_rescuer(struct bio_set *bs)
357 {
358 struct bio_list punt, nopunt;
359 struct bio *bio;
360
361 /*
362 * In order to guarantee forward progress we must punt only bios that
363 * were allocated from this bio_set; otherwise, if there was a bio on
364 * there for a stacking driver higher up in the stack, processing it
365 * could require allocating bios from this bio_set, and doing that from
366 * our own rescuer would be bad.
367 *
368 * Since bio lists are singly linked, pop them all instead of trying to
369 * remove from the middle of the list:
370 */
371
372 bio_list_init(&punt);
373 bio_list_init(&nopunt);
374
375 while ((bio = bio_list_pop(current->bio_list)))
376 bio_list_add(bio->bi_pool == bs ? &punt : &nopunt, bio);
377
378 *current->bio_list = nopunt;
379
380 spin_lock(&bs->rescue_lock);
381 bio_list_merge(&bs->rescue_list, &punt);
382 spin_unlock(&bs->rescue_lock);
383
384 queue_work(bs->rescue_workqueue, &bs->rescue_work);
385 }
386
387 /**
388 * bio_alloc_bioset - allocate a bio for I/O
389 * @gfp_mask: the GFP_ mask given to the slab allocator
390 * @nr_iovecs: number of iovecs to pre-allocate
391 * @bs: the bio_set to allocate from.
392 *
393 * Description:
394 * If @bs is NULL, uses kmalloc() to allocate the bio; else the allocation is
395 * backed by the @bs's mempool.
396 *
397 * When @bs is not NULL, if %__GFP_DIRECT_RECLAIM is set then bio_alloc will
398 * always be able to allocate a bio. This is due to the mempool guarantees.
399 * To make this work, callers must never allocate more than 1 bio at a time
400 * from this pool. Callers that need to allocate more than 1 bio must always
401 * submit the previously allocated bio for IO before attempting to allocate
402 * a new one. Failure to do so can cause deadlocks under memory pressure.
403 *
404 * Note that when running under generic_make_request() (i.e. any block
405 * driver), bios are not submitted until after you return - see the code in
406 * generic_make_request() that converts recursion into iteration, to prevent
407 * stack overflows.
408 *
409 * This would normally mean allocating multiple bios under
410 * generic_make_request() would be susceptible to deadlocks, but we have
411 * deadlock avoidance code that resubmits any blocked bios from a rescuer
412 * thread.
413 *
414 * However, we do not guarantee forward progress for allocations from other
415 * mempools. Doing multiple allocations from the same mempool under
416 * generic_make_request() should be avoided - instead, use bio_set's front_pad
417 * for per bio allocations.
418 *
419 * RETURNS:
420 * Pointer to new bio on success, NULL on failure.
421 */
422 struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
423 {
424 gfp_t saved_gfp = gfp_mask;
425 unsigned front_pad;
426 unsigned inline_vecs;
427 struct bio_vec *bvl = NULL;
428 struct bio *bio;
429 void *p;
430
431 if (!bs) {
432 if (nr_iovecs > UIO_MAXIOV)
433 return NULL;
434
435 p = kmalloc(sizeof(struct bio) +
436 nr_iovecs * sizeof(struct bio_vec),
437 gfp_mask);
438 front_pad = 0;
439 inline_vecs = nr_iovecs;
440 } else {
441 /* should not use nobvec bioset for nr_iovecs > 0 */
442 if (WARN_ON_ONCE(!bs->bvec_pool && nr_iovecs > 0))
443 return NULL;
444 /*
445 * generic_make_request() converts recursion to iteration; this
446 * means if we're running beneath it, any bios we allocate and
447 * submit will not be submitted (and thus freed) until after we
448 * return.
449 *
450 * This exposes us to a potential deadlock if we allocate
451 * multiple bios from the same bio_set() while running
452 * underneath generic_make_request(). If we were to allocate
453 * multiple bios (say a stacking block driver that was splitting
454 * bios), we would deadlock if we exhausted the mempool's
455 * reserve.
456 *
457 * We solve this, and guarantee forward progress, with a rescuer
458 * workqueue per bio_set. If we go to allocate and there are
459 * bios on current->bio_list, we first try the allocation
460 * without __GFP_DIRECT_RECLAIM; if that fails, we punt those
461 * bios we would be blocking to the rescuer workqueue before
462 * we retry with the original gfp_flags.
463 */
464
465 if (current->bio_list && !bio_list_empty(current->bio_list))
466 gfp_mask &= ~__GFP_DIRECT_RECLAIM;
467
468 p = mempool_alloc(bs->bio_pool, gfp_mask);
469 if (!p && gfp_mask != saved_gfp) {
470 punt_bios_to_rescuer(bs);
471 gfp_mask = saved_gfp;
472 p = mempool_alloc(bs->bio_pool, gfp_mask);
473 }
474
475 front_pad = bs->front_pad;
476 inline_vecs = BIO_INLINE_VECS;
477 }
478
479 if (unlikely(!p))
480 return NULL;
481
482 bio = p + front_pad;
483 bio_init(bio);
484
485 if (nr_iovecs > inline_vecs) {
486 unsigned long idx = 0;
487
488 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
489 if (!bvl && gfp_mask != saved_gfp) {
490 punt_bios_to_rescuer(bs);
491 gfp_mask = saved_gfp;
492 bvl = bvec_alloc(gfp_mask, nr_iovecs, &idx, bs->bvec_pool);
493 }
494
495 if (unlikely(!bvl))
496 goto err_free;
497
498 bio->bi_flags |= idx << BVEC_POOL_OFFSET;
499 } else if (nr_iovecs) {
500 bvl = bio->bi_inline_vecs;
501 }
502
503 bio->bi_pool = bs;
504 bio->bi_max_vecs = nr_iovecs;
505 bio->bi_io_vec = bvl;
506 return bio;
507
508 err_free:
509 mempool_free(p, bs->bio_pool);
510 return NULL;
511 }
512 EXPORT_SYMBOL(bio_alloc_bioset);
513
514 void zero_fill_bio(struct bio *bio)
515 {
516 unsigned long flags;
517 struct bio_vec bv;
518 struct bvec_iter iter;
519
520 bio_for_each_segment(bv, bio, iter) {
521 char *data = bvec_kmap_irq(&bv, &flags);
522 memset(data, 0, bv.bv_len);
523 flush_dcache_page(bv.bv_page);
524 bvec_kunmap_irq(data, &flags);
525 }
526 }
527 EXPORT_SYMBOL(zero_fill_bio);
528
529 /**
530 * bio_put - release a reference to a bio
531 * @bio: bio to release reference to
532 *
533 * Description:
534 * Put a reference to a &struct bio, either one you have gotten with
535 * bio_alloc, bio_get or bio_clone. The last put of a bio will free it.
536 **/
537 void bio_put(struct bio *bio)
538 {
539 if (!bio_flagged(bio, BIO_REFFED))
540 bio_free(bio);
541 else {
542 BIO_BUG_ON(!atomic_read(&bio->__bi_cnt));
543
544 /*
545 * last put frees it
546 */
547 if (atomic_dec_and_test(&bio->__bi_cnt))
548 bio_free(bio);
549 }
550 }
551 EXPORT_SYMBOL(bio_put);
552
553 inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
554 {
555 if (unlikely(!bio_flagged(bio, BIO_SEG_VALID)))
556 blk_recount_segments(q, bio);
557
558 return bio->bi_phys_segments;
559 }
560 EXPORT_SYMBOL(bio_phys_segments);
561
562 /**
563 * __bio_clone_fast - clone a bio that shares the original bio's biovec
564 * @bio: destination bio
565 * @bio_src: bio to clone
566 *
567 * Clone a &bio. Caller will own the returned bio, but not
568 * the actual data it points to. Reference count of returned
569 * bio will be one.
570 *
571 * Caller must ensure that @bio_src is not freed before @bio.
572 */
573 void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
574 {
575 BUG_ON(bio->bi_pool && BVEC_POOL_IDX(bio));
576
577 /*
578 * most users will be overriding ->bi_bdev with a new target,
579 * so we don't set nor calculate new physical/hw segment counts here
580 */
581 bio->bi_bdev = bio_src->bi_bdev;
582 bio_set_flag(bio, BIO_CLONED);
583 bio->bi_opf = bio_src->bi_opf;
584 bio->bi_iter = bio_src->bi_iter;
585 bio->bi_io_vec = bio_src->bi_io_vec;
586
587 bio_clone_blkcg_association(bio, bio_src);
588 }
589 EXPORT_SYMBOL(__bio_clone_fast);
590
591 /**
592 * bio_clone_fast - clone a bio that shares the original bio's biovec
593 * @bio: bio to clone
594 * @gfp_mask: allocation priority
595 * @bs: bio_set to allocate from
596 *
597 * Like __bio_clone_fast, only also allocates the returned bio
598 */
599 struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
600 {
601 struct bio *b;
602
603 b = bio_alloc_bioset(gfp_mask, 0, bs);
604 if (!b)
605 return NULL;
606
607 __bio_clone_fast(b, bio);
608
609 if (bio_integrity(bio)) {
610 int ret;
611
612 ret = bio_integrity_clone(b, bio, gfp_mask);
613
614 if (ret < 0) {
615 bio_put(b);
616 return NULL;
617 }
618 }
619
620 return b;
621 }
622 EXPORT_SYMBOL(bio_clone_fast);
623
624 /**
625 * bio_clone_bioset - clone a bio
626 * @bio_src: bio to clone
627 * @gfp_mask: allocation priority
628 * @bs: bio_set to allocate from
629 *
630 * Clone bio. Caller will own the returned bio, but not the actual data it
631 * points to. Reference count of returned bio will be one.
632 */
633 struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
634 struct bio_set *bs)
635 {
636 struct bvec_iter iter;
637 struct bio_vec bv;
638 struct bio *bio;
639
640 /*
641 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
642 * bio_src->bi_io_vec to bio->bi_io_vec.
643 *
644 * We can't do that anymore, because:
645 *
646 * - The point of cloning the biovec is to produce a bio with a biovec
647 * the caller can modify: bi_idx and bi_bvec_done should be 0.
648 *
649 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
650 * we tried to clone the whole thing bio_alloc_bioset() would fail.
651 * But the clone should succeed as long as the number of biovecs we
652 * actually need to allocate is fewer than BIO_MAX_PAGES.
653 *
654 * - Lastly, bi_vcnt should not be looked at or relied upon by code
655 * that does not own the bio - reason being drivers don't use it for
656 * iterating over the biovec anymore, so expecting it to be kept up
657 * to date (i.e. for clones that share the parent biovec) is just
658 * asking for trouble and would force extra work on
659 * __bio_clone_fast() anyways.
660 */
661
662 bio = bio_alloc_bioset(gfp_mask, bio_segments(bio_src), bs);
663 if (!bio)
664 return NULL;
665 bio->bi_bdev = bio_src->bi_bdev;
666 bio->bi_opf = bio_src->bi_opf;
667 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
668 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
669
670 if (bio_op(bio) == REQ_OP_DISCARD)
671 goto integrity_clone;
672
673 if (bio_op(bio) == REQ_OP_WRITE_SAME) {
674 bio->bi_io_vec[bio->bi_vcnt++] = bio_src->bi_io_vec[0];
675 goto integrity_clone;
676 }
677
678 bio_for_each_segment(bv, bio_src, iter)
679 bio->bi_io_vec[bio->bi_vcnt++] = bv;
680
681 integrity_clone:
682 if (bio_integrity(bio_src)) {
683 int ret;
684
685 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
686 if (ret < 0) {
687 bio_put(bio);
688 return NULL;
689 }
690 }
691
692 bio_clone_blkcg_association(bio, bio_src);
693
694 return bio;
695 }
696 EXPORT_SYMBOL(bio_clone_bioset);
697
698 /**
699 * bio_add_pc_page - attempt to add page to bio
700 * @q: the target queue
701 * @bio: destination bio
702 * @page: page to add
703 * @len: vec entry length
704 * @offset: vec entry offset
705 *
706 * Attempt to add a page to the bio_vec maplist. This can fail for a
707 * number of reasons, such as the bio being full or target block device
708 * limitations. The target block device must allow bio's up to PAGE_SIZE,
709 * so it is always possible to add a single page to an empty bio.
710 *
711 * This should only be used by REQ_PC bios.
712 */
713 int bio_add_pc_page(struct request_queue *q, struct bio *bio, struct page
714 *page, unsigned int len, unsigned int offset)
715 {
716 int retried_segments = 0;
717 struct bio_vec *bvec;
718
719 /*
720 * cloned bio must not modify vec list
721 */
722 if (unlikely(bio_flagged(bio, BIO_CLONED)))
723 return 0;
724
725 if (((bio->bi_iter.bi_size + len) >> 9) > queue_max_hw_sectors(q))
726 return 0;
727
728 /*
729 * For filesystems with a blocksize smaller than the pagesize
730 * we will often be called with the same page as last time and
731 * a consecutive offset. Optimize this special case.
732 */
733 if (bio->bi_vcnt > 0) {
734 struct bio_vec *prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
735
736 if (page == prev->bv_page &&
737 offset == prev->bv_offset + prev->bv_len) {
738 prev->bv_len += len;
739 bio->bi_iter.bi_size += len;
740 goto done;
741 }
742
743 /*
744 * If the queue doesn't support SG gaps and adding this
745 * offset would create a gap, disallow it.
746 */
747 if (bvec_gap_to_prev(q, prev, offset))
748 return 0;
749 }
750
751 if (bio->bi_vcnt >= bio->bi_max_vecs)
752 return 0;
753
754 /*
755 * setup the new entry, we might clear it again later if we
756 * cannot add the page
757 */
758 bvec = &bio->bi_io_vec[bio->bi_vcnt];
759 bvec->bv_page = page;
760 bvec->bv_len = len;
761 bvec->bv_offset = offset;
762 bio->bi_vcnt++;
763 bio->bi_phys_segments++;
764 bio->bi_iter.bi_size += len;
765
766 /*
767 * Perform a recount if the number of segments is greater
768 * than queue_max_segments(q).
769 */
770
771 while (bio->bi_phys_segments > queue_max_segments(q)) {
772
773 if (retried_segments)
774 goto failed;
775
776 retried_segments = 1;
777 blk_recount_segments(q, bio);
778 }
779
780 /* If we may be able to merge these biovecs, force a recount */
781 if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
782 bio_clear_flag(bio, BIO_SEG_VALID);
783
784 done:
785 return len;
786
787 failed:
788 bvec->bv_page = NULL;
789 bvec->bv_len = 0;
790 bvec->bv_offset = 0;
791 bio->bi_vcnt--;
792 bio->bi_iter.bi_size -= len;
793 blk_recount_segments(q, bio);
794 return 0;
795 }
796 EXPORT_SYMBOL(bio_add_pc_page);
797
798 /**
799 * bio_add_page - attempt to add page to bio
800 * @bio: destination bio
801 * @page: page to add
802 * @len: vec entry length
803 * @offset: vec entry offset
804 *
805 * Attempt to add a page to the bio_vec maplist. This will only fail
806 * if either bio->bi_vcnt == bio->bi_max_vecs or it's a cloned bio.
807 */
808 int bio_add_page(struct bio *bio, struct page *page,
809 unsigned int len, unsigned int offset)
810 {
811 struct bio_vec *bv;
812
813 /*
814 * cloned bio must not modify vec list
815 */
816 if (WARN_ON_ONCE(bio_flagged(bio, BIO_CLONED)))
817 return 0;
818
819 /*
820 * For filesystems with a blocksize smaller than the pagesize
821 * we will often be called with the same page as last time and
822 * a consecutive offset. Optimize this special case.
823 */
824 if (bio->bi_vcnt > 0) {
825 bv = &bio->bi_io_vec[bio->bi_vcnt - 1];
826
827 if (page == bv->bv_page &&
828 offset == bv->bv_offset + bv->bv_len) {
829 bv->bv_len += len;
830 goto done;
831 }
832 }
833
834 if (bio->bi_vcnt >= bio->bi_max_vecs)
835 return 0;
836
837 bv = &bio->bi_io_vec[bio->bi_vcnt];
838 bv->bv_page = page;
839 bv->bv_len = len;
840 bv->bv_offset = offset;
841
842 bio->bi_vcnt++;
843 done:
844 bio->bi_iter.bi_size += len;
845 return len;
846 }
847 EXPORT_SYMBOL(bio_add_page);
848
849 struct submit_bio_ret {
850 struct completion event;
851 int error;
852 };
853
854 static void submit_bio_wait_endio(struct bio *bio)
855 {
856 struct submit_bio_ret *ret = bio->bi_private;
857
858 ret->error = bio->bi_error;
859 complete(&ret->event);
860 }
861
862 /**
863 * submit_bio_wait - submit a bio, and wait until it completes
864 * @bio: The &struct bio which describes the I/O
865 *
866 * Simple wrapper around submit_bio(). Returns 0 on success, or the error from
867 * bio_endio() on failure.
868 */
869 int submit_bio_wait(struct bio *bio)
870 {
871 struct submit_bio_ret ret;
872
873 init_completion(&ret.event);
874 bio->bi_private = &ret;
875 bio->bi_end_io = submit_bio_wait_endio;
876 bio->bi_opf |= REQ_SYNC;
877 submit_bio(bio);
878 wait_for_completion_io(&ret.event);
879
880 return ret.error;
881 }
882 EXPORT_SYMBOL(submit_bio_wait);
883
884 /**
885 * bio_advance - increment/complete a bio by some number of bytes
886 * @bio: bio to advance
887 * @bytes: number of bytes to complete
888 *
889 * This updates bi_sector, bi_size and bi_idx; if the number of bytes to
890 * complete doesn't align with a bvec boundary, then bv_len and bv_offset will
891 * be updated on the last bvec as well.
892 *
893 * @bio will then represent the remaining, uncompleted portion of the io.
894 */
895 void bio_advance(struct bio *bio, unsigned bytes)
896 {
897 if (bio_integrity(bio))
898 bio_integrity_advance(bio, bytes);
899
900 bio_advance_iter(bio, &bio->bi_iter, bytes);
901 }
902 EXPORT_SYMBOL(bio_advance);
903
904 /**
905 * bio_alloc_pages - allocates a single page for each bvec in a bio
906 * @bio: bio to allocate pages for
907 * @gfp_mask: flags for allocation
908 *
909 * Allocates pages up to @bio->bi_vcnt.
910 *
911 * Returns 0 on success, -ENOMEM on failure. On failure, any allocated pages are
912 * freed.
913 */
914 int bio_alloc_pages(struct bio *bio, gfp_t gfp_mask)
915 {
916 int i;
917 struct bio_vec *bv;
918
919 bio_for_each_segment_all(bv, bio, i) {
920 bv->bv_page = alloc_page(gfp_mask);
921 if (!bv->bv_page) {
922 while (--bv >= bio->bi_io_vec)
923 __free_page(bv->bv_page);
924 return -ENOMEM;
925 }
926 }
927
928 return 0;
929 }
930 EXPORT_SYMBOL(bio_alloc_pages);
931
932 /**
933 * bio_copy_data - copy contents of data buffers from one chain of bios to
934 * another
935 * @src: source bio list
936 * @dst: destination bio list
937 *
938 * If @src and @dst are single bios, bi_next must be NULL - otherwise, treats
939 * @src and @dst as linked lists of bios.
940 *
941 * Stops when it reaches the end of either @src or @dst - that is, copies
942 * min(src->bi_size, dst->bi_size) bytes (or the equivalent for lists of bios).
943 */
944 void bio_copy_data(struct bio *dst, struct bio *src)
945 {
946 struct bvec_iter src_iter, dst_iter;
947 struct bio_vec src_bv, dst_bv;
948 void *src_p, *dst_p;
949 unsigned bytes;
950
951 src_iter = src->bi_iter;
952 dst_iter = dst->bi_iter;
953
954 while (1) {
955 if (!src_iter.bi_size) {
956 src = src->bi_next;
957 if (!src)
958 break;
959
960 src_iter = src->bi_iter;
961 }
962
963 if (!dst_iter.bi_size) {
964 dst = dst->bi_next;
965 if (!dst)
966 break;
967
968 dst_iter = dst->bi_iter;
969 }
970
971 src_bv = bio_iter_iovec(src, src_iter);
972 dst_bv = bio_iter_iovec(dst, dst_iter);
973
974 bytes = min(src_bv.bv_len, dst_bv.bv_len);
975
976 src_p = kmap_atomic(src_bv.bv_page);
977 dst_p = kmap_atomic(dst_bv.bv_page);
978
979 memcpy(dst_p + dst_bv.bv_offset,
980 src_p + src_bv.bv_offset,
981 bytes);
982
983 kunmap_atomic(dst_p);
984 kunmap_atomic(src_p);
985
986 bio_advance_iter(src, &src_iter, bytes);
987 bio_advance_iter(dst, &dst_iter, bytes);
988 }
989 }
990 EXPORT_SYMBOL(bio_copy_data);
991
992 struct bio_map_data {
993 int is_our_pages;
994 struct iov_iter iter;
995 struct iovec iov[];
996 };
997
998 static struct bio_map_data *bio_alloc_map_data(unsigned int iov_count,
999 gfp_t gfp_mask)
1000 {
1001 if (iov_count > UIO_MAXIOV)
1002 return NULL;
1003
1004 return kmalloc(sizeof(struct bio_map_data) +
1005 sizeof(struct iovec) * iov_count, gfp_mask);
1006 }
1007
1008 /**
1009 * bio_copy_from_iter - copy all pages from iov_iter to bio
1010 * @bio: The &struct bio which describes the I/O as destination
1011 * @iter: iov_iter as source
1012 *
1013 * Copy all pages from iov_iter to bio.
1014 * Returns 0 on success, or error on failure.
1015 */
1016 static int bio_copy_from_iter(struct bio *bio, struct iov_iter iter)
1017 {
1018 int i;
1019 struct bio_vec *bvec;
1020
1021 bio_for_each_segment_all(bvec, bio, i) {
1022 ssize_t ret;
1023
1024 ret = copy_page_from_iter(bvec->bv_page,
1025 bvec->bv_offset,
1026 bvec->bv_len,
1027 &iter);
1028
1029 if (!iov_iter_count(&iter))
1030 break;
1031
1032 if (ret < bvec->bv_len)
1033 return -EFAULT;
1034 }
1035
1036 return 0;
1037 }
1038
1039 /**
1040 * bio_copy_to_iter - copy all pages from bio to iov_iter
1041 * @bio: The &struct bio which describes the I/O as source
1042 * @iter: iov_iter as destination
1043 *
1044 * Copy all pages from bio to iov_iter.
1045 * Returns 0 on success, or error on failure.
1046 */
1047 static int bio_copy_to_iter(struct bio *bio, struct iov_iter iter)
1048 {
1049 int i;
1050 struct bio_vec *bvec;
1051
1052 bio_for_each_segment_all(bvec, bio, i) {
1053 ssize_t ret;
1054
1055 ret = copy_page_to_iter(bvec->bv_page,
1056 bvec->bv_offset,
1057 bvec->bv_len,
1058 &iter);
1059
1060 if (!iov_iter_count(&iter))
1061 break;
1062
1063 if (ret < bvec->bv_len)
1064 return -EFAULT;
1065 }
1066
1067 return 0;
1068 }
1069
1070 static void bio_free_pages(struct bio *bio)
1071 {
1072 struct bio_vec *bvec;
1073 int i;
1074
1075 bio_for_each_segment_all(bvec, bio, i)
1076 __free_page(bvec->bv_page);
1077 }
1078
1079 /**
1080 * bio_uncopy_user - finish previously mapped bio
1081 * @bio: bio being terminated
1082 *
1083 * Free pages allocated from bio_copy_user_iov() and write back data
1084 * to user space in case of a read.
1085 */
1086 int bio_uncopy_user(struct bio *bio)
1087 {
1088 struct bio_map_data *bmd = bio->bi_private;
1089 int ret = 0;
1090
1091 if (!bio_flagged(bio, BIO_NULL_MAPPED)) {
1092 /*
1093 * if we're in a workqueue, the request is orphaned, so
1094 * don't copy into a random user address space, just free
1095 * and return -EINTR so user space doesn't expect any data.
1096 */
1097 if (!current->mm)
1098 ret = -EINTR;
1099 else if (bio_data_dir(bio) == READ)
1100 ret = bio_copy_to_iter(bio, bmd->iter);
1101 if (bmd->is_our_pages)
1102 bio_free_pages(bio);
1103 }
1104 kfree(bmd);
1105 bio_put(bio);
1106 return ret;
1107 }
1108
1109 /**
1110 * bio_copy_user_iov - copy user data to bio
1111 * @q: destination block queue
1112 * @map_data: pointer to the rq_map_data holding pages (if necessary)
1113 * @iter: iovec iterator
1114 * @gfp_mask: memory allocation flags
1115 *
1116 * Prepares and returns a bio for indirect user io, bouncing data
1117 * to/from kernel pages as necessary. Must be paired with
1118 * call bio_uncopy_user() on io completion.
1119 */
1120 struct bio *bio_copy_user_iov(struct request_queue *q,
1121 struct rq_map_data *map_data,
1122 const struct iov_iter *iter,
1123 gfp_t gfp_mask)
1124 {
1125 struct bio_map_data *bmd;
1126 struct page *page;
1127 struct bio *bio;
1128 int i, ret;
1129 int nr_pages = 0;
1130 unsigned int len = iter->count;
1131 unsigned int offset = map_data ? offset_in_page(map_data->offset) : 0;
1132
1133 for (i = 0; i < iter->nr_segs; i++) {
1134 unsigned long uaddr;
1135 unsigned long end;
1136 unsigned long start;
1137
1138 uaddr = (unsigned long) iter->iov[i].iov_base;
1139 end = (uaddr + iter->iov[i].iov_len + PAGE_SIZE - 1)
1140 >> PAGE_SHIFT;
1141 start = uaddr >> PAGE_SHIFT;
1142
1143 /*
1144 * Overflow, abort
1145 */
1146 if (end < start)
1147 return ERR_PTR(-EINVAL);
1148
1149 nr_pages += end - start;
1150 }
1151
1152 if (offset)
1153 nr_pages++;
1154
1155 bmd = bio_alloc_map_data(iter->nr_segs, gfp_mask);
1156 if (!bmd)
1157 return ERR_PTR(-ENOMEM);
1158
1159 /*
1160 * We need to do a deep copy of the iov_iter including the iovecs.
1161 * The caller provided iov might point to an on-stack or otherwise
1162 * shortlived one.
1163 */
1164 bmd->is_our_pages = map_data ? 0 : 1;
1165 memcpy(bmd->iov, iter->iov, sizeof(struct iovec) * iter->nr_segs);
1166 iov_iter_init(&bmd->iter, iter->type, bmd->iov,
1167 iter->nr_segs, iter->count);
1168
1169 ret = -ENOMEM;
1170 bio = bio_kmalloc(gfp_mask, nr_pages);
1171 if (!bio)
1172 goto out_bmd;
1173
1174 if (iter->type & WRITE)
1175 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1176
1177 ret = 0;
1178
1179 if (map_data) {
1180 nr_pages = 1 << map_data->page_order;
1181 i = map_data->offset / PAGE_SIZE;
1182 }
1183 while (len) {
1184 unsigned int bytes = PAGE_SIZE;
1185
1186 bytes -= offset;
1187
1188 if (bytes > len)
1189 bytes = len;
1190
1191 if (map_data) {
1192 if (i == map_data->nr_entries * nr_pages) {
1193 ret = -ENOMEM;
1194 break;
1195 }
1196
1197 page = map_data->pages[i / nr_pages];
1198 page += (i % nr_pages);
1199
1200 i++;
1201 } else {
1202 page = alloc_page(q->bounce_gfp | gfp_mask);
1203 if (!page) {
1204 ret = -ENOMEM;
1205 break;
1206 }
1207 }
1208
1209 if (bio_add_pc_page(q, bio, page, bytes, offset) < bytes)
1210 break;
1211
1212 len -= bytes;
1213 offset = 0;
1214 }
1215
1216 if (ret)
1217 goto cleanup;
1218
1219 /*
1220 * success
1221 */
1222 if (((iter->type & WRITE) && (!map_data || !map_data->null_mapped)) ||
1223 (map_data && map_data->from_user)) {
1224 ret = bio_copy_from_iter(bio, *iter);
1225 if (ret)
1226 goto cleanup;
1227 }
1228
1229 bio->bi_private = bmd;
1230 return bio;
1231 cleanup:
1232 if (!map_data)
1233 bio_free_pages(bio);
1234 bio_put(bio);
1235 out_bmd:
1236 kfree(bmd);
1237 return ERR_PTR(ret);
1238 }
1239
1240 /**
1241 * bio_map_user_iov - map user iovec into bio
1242 * @q: the struct request_queue for the bio
1243 * @iter: iovec iterator
1244 * @gfp_mask: memory allocation flags
1245 *
1246 * Map the user space address into a bio suitable for io to a block
1247 * device. Returns an error pointer in case of error.
1248 */
1249 struct bio *bio_map_user_iov(struct request_queue *q,
1250 const struct iov_iter *iter,
1251 gfp_t gfp_mask)
1252 {
1253 int j;
1254 int nr_pages = 0;
1255 struct page **pages;
1256 struct bio *bio;
1257 int cur_page = 0;
1258 int ret, offset;
1259 struct iov_iter i;
1260 struct iovec iov;
1261
1262 iov_for_each(iov, i, *iter) {
1263 unsigned long uaddr = (unsigned long) iov.iov_base;
1264 unsigned long len = iov.iov_len;
1265 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1266 unsigned long start = uaddr >> PAGE_SHIFT;
1267
1268 /*
1269 * Overflow, abort
1270 */
1271 if (end < start)
1272 return ERR_PTR(-EINVAL);
1273
1274 nr_pages += end - start;
1275 /*
1276 * buffer must be aligned to at least hardsector size for now
1277 */
1278 if (uaddr & queue_dma_alignment(q))
1279 return ERR_PTR(-EINVAL);
1280 }
1281
1282 if (!nr_pages)
1283 return ERR_PTR(-EINVAL);
1284
1285 bio = bio_kmalloc(gfp_mask, nr_pages);
1286 if (!bio)
1287 return ERR_PTR(-ENOMEM);
1288
1289 ret = -ENOMEM;
1290 pages = kcalloc(nr_pages, sizeof(struct page *), gfp_mask);
1291 if (!pages)
1292 goto out;
1293
1294 iov_for_each(iov, i, *iter) {
1295 unsigned long uaddr = (unsigned long) iov.iov_base;
1296 unsigned long len = iov.iov_len;
1297 unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1298 unsigned long start = uaddr >> PAGE_SHIFT;
1299 const int local_nr_pages = end - start;
1300 const int page_limit = cur_page + local_nr_pages;
1301
1302 ret = get_user_pages_fast(uaddr, local_nr_pages,
1303 (iter->type & WRITE) != WRITE,
1304 &pages[cur_page]);
1305 if (ret < local_nr_pages) {
1306 ret = -EFAULT;
1307 goto out_unmap;
1308 }
1309
1310 offset = offset_in_page(uaddr);
1311 for (j = cur_page; j < page_limit; j++) {
1312 unsigned int bytes = PAGE_SIZE - offset;
1313
1314 if (len <= 0)
1315 break;
1316
1317 if (bytes > len)
1318 bytes = len;
1319
1320 /*
1321 * sorry...
1322 */
1323 if (bio_add_pc_page(q, bio, pages[j], bytes, offset) <
1324 bytes)
1325 break;
1326
1327 len -= bytes;
1328 offset = 0;
1329 }
1330
1331 cur_page = j;
1332 /*
1333 * release the pages we didn't map into the bio, if any
1334 */
1335 while (j < page_limit)
1336 put_page(pages[j++]);
1337 }
1338
1339 kfree(pages);
1340
1341 /*
1342 * set data direction, and check if mapped pages need bouncing
1343 */
1344 if (iter->type & WRITE)
1345 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1346
1347 bio_set_flag(bio, BIO_USER_MAPPED);
1348
1349 /*
1350 * subtle -- if __bio_map_user() ended up bouncing a bio,
1351 * it would normally disappear when its bi_end_io is run.
1352 * however, we need it for the unmap, so grab an extra
1353 * reference to it
1354 */
1355 bio_get(bio);
1356 return bio;
1357
1358 out_unmap:
1359 for (j = 0; j < nr_pages; j++) {
1360 if (!pages[j])
1361 break;
1362 put_page(pages[j]);
1363 }
1364 out:
1365 kfree(pages);
1366 bio_put(bio);
1367 return ERR_PTR(ret);
1368 }
1369
1370 static void __bio_unmap_user(struct bio *bio)
1371 {
1372 struct bio_vec *bvec;
1373 int i;
1374
1375 /*
1376 * make sure we dirty pages we wrote to
1377 */
1378 bio_for_each_segment_all(bvec, bio, i) {
1379 if (bio_data_dir(bio) == READ)
1380 set_page_dirty_lock(bvec->bv_page);
1381
1382 put_page(bvec->bv_page);
1383 }
1384
1385 bio_put(bio);
1386 }
1387
1388 /**
1389 * bio_unmap_user - unmap a bio
1390 * @bio: the bio being unmapped
1391 *
1392 * Unmap a bio previously mapped by bio_map_user(). Must be called with
1393 * a process context.
1394 *
1395 * bio_unmap_user() may sleep.
1396 */
1397 void bio_unmap_user(struct bio *bio)
1398 {
1399 __bio_unmap_user(bio);
1400 bio_put(bio);
1401 }
1402
1403 static void bio_map_kern_endio(struct bio *bio)
1404 {
1405 bio_put(bio);
1406 }
1407
1408 /**
1409 * bio_map_kern - map kernel address into bio
1410 * @q: the struct request_queue for the bio
1411 * @data: pointer to buffer to map
1412 * @len: length in bytes
1413 * @gfp_mask: allocation flags for bio allocation
1414 *
1415 * Map the kernel address into a bio suitable for io to a block
1416 * device. Returns an error pointer in case of error.
1417 */
1418 struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1419 gfp_t gfp_mask)
1420 {
1421 unsigned long kaddr = (unsigned long)data;
1422 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1423 unsigned long start = kaddr >> PAGE_SHIFT;
1424 const int nr_pages = end - start;
1425 int offset, i;
1426 struct bio *bio;
1427
1428 bio = bio_kmalloc(gfp_mask, nr_pages);
1429 if (!bio)
1430 return ERR_PTR(-ENOMEM);
1431
1432 offset = offset_in_page(kaddr);
1433 for (i = 0; i < nr_pages; i++) {
1434 unsigned int bytes = PAGE_SIZE - offset;
1435
1436 if (len <= 0)
1437 break;
1438
1439 if (bytes > len)
1440 bytes = len;
1441
1442 if (bio_add_pc_page(q, bio, virt_to_page(data), bytes,
1443 offset) < bytes) {
1444 /* we don't support partial mappings */
1445 bio_put(bio);
1446 return ERR_PTR(-EINVAL);
1447 }
1448
1449 data += bytes;
1450 len -= bytes;
1451 offset = 0;
1452 }
1453
1454 bio->bi_end_io = bio_map_kern_endio;
1455 return bio;
1456 }
1457 EXPORT_SYMBOL(bio_map_kern);
1458
1459 static void bio_copy_kern_endio(struct bio *bio)
1460 {
1461 bio_free_pages(bio);
1462 bio_put(bio);
1463 }
1464
1465 static void bio_copy_kern_endio_read(struct bio *bio)
1466 {
1467 char *p = bio->bi_private;
1468 struct bio_vec *bvec;
1469 int i;
1470
1471 bio_for_each_segment_all(bvec, bio, i) {
1472 memcpy(p, page_address(bvec->bv_page), bvec->bv_len);
1473 p += bvec->bv_len;
1474 }
1475
1476 bio_copy_kern_endio(bio);
1477 }
1478
1479 /**
1480 * bio_copy_kern - copy kernel address into bio
1481 * @q: the struct request_queue for the bio
1482 * @data: pointer to buffer to copy
1483 * @len: length in bytes
1484 * @gfp_mask: allocation flags for bio and page allocation
1485 * @reading: data direction is READ
1486 *
1487 * copy the kernel address into a bio suitable for io to a block
1488 * device. Returns an error pointer in case of error.
1489 */
1490 struct bio *bio_copy_kern(struct request_queue *q, void *data, unsigned int len,
1491 gfp_t gfp_mask, int reading)
1492 {
1493 unsigned long kaddr = (unsigned long)data;
1494 unsigned long end = (kaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
1495 unsigned long start = kaddr >> PAGE_SHIFT;
1496 struct bio *bio;
1497 void *p = data;
1498 int nr_pages = 0;
1499
1500 /*
1501 * Overflow, abort
1502 */
1503 if (end < start)
1504 return ERR_PTR(-EINVAL);
1505
1506 nr_pages = end - start;
1507 bio = bio_kmalloc(gfp_mask, nr_pages);
1508 if (!bio)
1509 return ERR_PTR(-ENOMEM);
1510
1511 while (len) {
1512 struct page *page;
1513 unsigned int bytes = PAGE_SIZE;
1514
1515 if (bytes > len)
1516 bytes = len;
1517
1518 page = alloc_page(q->bounce_gfp | gfp_mask);
1519 if (!page)
1520 goto cleanup;
1521
1522 if (!reading)
1523 memcpy(page_address(page), p, bytes);
1524
1525 if (bio_add_pc_page(q, bio, page, bytes, 0) < bytes)
1526 break;
1527
1528 len -= bytes;
1529 p += bytes;
1530 }
1531
1532 if (reading) {
1533 bio->bi_end_io = bio_copy_kern_endio_read;
1534 bio->bi_private = data;
1535 } else {
1536 bio->bi_end_io = bio_copy_kern_endio;
1537 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
1538 }
1539
1540 return bio;
1541
1542 cleanup:
1543 bio_free_pages(bio);
1544 bio_put(bio);
1545 return ERR_PTR(-ENOMEM);
1546 }
1547
1548 /*
1549 * bio_set_pages_dirty() and bio_check_pages_dirty() are support functions
1550 * for performing direct-IO in BIOs.
1551 *
1552 * The problem is that we cannot run set_page_dirty() from interrupt context
1553 * because the required locks are not interrupt-safe. So what we can do is to
1554 * mark the pages dirty _before_ performing IO. And in interrupt context,
1555 * check that the pages are still dirty. If so, fine. If not, redirty them
1556 * in process context.
1557 *
1558 * We special-case compound pages here: normally this means reads into hugetlb
1559 * pages. The logic in here doesn't really work right for compound pages
1560 * because the VM does not uniformly chase down the head page in all cases.
1561 * But dirtiness of compound pages is pretty meaningless anyway: the VM doesn't
1562 * handle them at all. So we skip compound pages here at an early stage.
1563 *
1564 * Note that this code is very hard to test under normal circumstances because
1565 * direct-io pins the pages with get_user_pages(). This makes
1566 * is_page_cache_freeable return false, and the VM will not clean the pages.
1567 * But other code (eg, flusher threads) could clean the pages if they are mapped
1568 * pagecache.
1569 *
1570 * Simply disabling the call to bio_set_pages_dirty() is a good way to test the
1571 * deferred bio dirtying paths.
1572 */
1573
1574 /*
1575 * bio_set_pages_dirty() will mark all the bio's pages as dirty.
1576 */
1577 void bio_set_pages_dirty(struct bio *bio)
1578 {
1579 struct bio_vec *bvec;
1580 int i;
1581
1582 bio_for_each_segment_all(bvec, bio, i) {
1583 struct page *page = bvec->bv_page;
1584
1585 if (page && !PageCompound(page))
1586 set_page_dirty_lock(page);
1587 }
1588 }
1589
1590 static void bio_release_pages(struct bio *bio)
1591 {
1592 struct bio_vec *bvec;
1593 int i;
1594
1595 bio_for_each_segment_all(bvec, bio, i) {
1596 struct page *page = bvec->bv_page;
1597
1598 if (page)
1599 put_page(page);
1600 }
1601 }
1602
1603 /*
1604 * bio_check_pages_dirty() will check that all the BIO's pages are still dirty.
1605 * If they are, then fine. If, however, some pages are clean then they must
1606 * have been written out during the direct-IO read. So we take another ref on
1607 * the BIO and the offending pages and re-dirty the pages in process context.
1608 *
1609 * It is expected that bio_check_pages_dirty() will wholly own the BIO from
1610 * here on. It will run one put_page() against each page and will run one
1611 * bio_put() against the BIO.
1612 */
1613
1614 static void bio_dirty_fn(struct work_struct *work);
1615
1616 static DECLARE_WORK(bio_dirty_work, bio_dirty_fn);
1617 static DEFINE_SPINLOCK(bio_dirty_lock);
1618 static struct bio *bio_dirty_list;
1619
1620 /*
1621 * This runs in process context
1622 */
1623 static void bio_dirty_fn(struct work_struct *work)
1624 {
1625 unsigned long flags;
1626 struct bio *bio;
1627
1628 spin_lock_irqsave(&bio_dirty_lock, flags);
1629 bio = bio_dirty_list;
1630 bio_dirty_list = NULL;
1631 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1632
1633 while (bio) {
1634 struct bio *next = bio->bi_private;
1635
1636 bio_set_pages_dirty(bio);
1637 bio_release_pages(bio);
1638 bio_put(bio);
1639 bio = next;
1640 }
1641 }
1642
1643 void bio_check_pages_dirty(struct bio *bio)
1644 {
1645 struct bio_vec *bvec;
1646 int nr_clean_pages = 0;
1647 int i;
1648
1649 bio_for_each_segment_all(bvec, bio, i) {
1650 struct page *page = bvec->bv_page;
1651
1652 if (PageDirty(page) || PageCompound(page)) {
1653 put_page(page);
1654 bvec->bv_page = NULL;
1655 } else {
1656 nr_clean_pages++;
1657 }
1658 }
1659
1660 if (nr_clean_pages) {
1661 unsigned long flags;
1662
1663 spin_lock_irqsave(&bio_dirty_lock, flags);
1664 bio->bi_private = bio_dirty_list;
1665 bio_dirty_list = bio;
1666 spin_unlock_irqrestore(&bio_dirty_lock, flags);
1667 schedule_work(&bio_dirty_work);
1668 } else {
1669 bio_put(bio);
1670 }
1671 }
1672
1673 void generic_start_io_acct(int rw, unsigned long sectors,
1674 struct hd_struct *part)
1675 {
1676 int cpu = part_stat_lock();
1677
1678 part_round_stats(cpu, part);
1679 part_stat_inc(cpu, part, ios[rw]);
1680 part_stat_add(cpu, part, sectors[rw], sectors);
1681 part_inc_in_flight(part, rw);
1682
1683 part_stat_unlock();
1684 }
1685 EXPORT_SYMBOL(generic_start_io_acct);
1686
1687 void generic_end_io_acct(int rw, struct hd_struct *part,
1688 unsigned long start_time)
1689 {
1690 unsigned long duration = jiffies - start_time;
1691 int cpu = part_stat_lock();
1692
1693 part_stat_add(cpu, part, ticks[rw], duration);
1694 part_round_stats(cpu, part);
1695 part_dec_in_flight(part, rw);
1696
1697 part_stat_unlock();
1698 }
1699 EXPORT_SYMBOL(generic_end_io_acct);
1700
1701 #if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1702 void bio_flush_dcache_pages(struct bio *bi)
1703 {
1704 struct bio_vec bvec;
1705 struct bvec_iter iter;
1706
1707 bio_for_each_segment(bvec, bi, iter)
1708 flush_dcache_page(bvec.bv_page);
1709 }
1710 EXPORT_SYMBOL(bio_flush_dcache_pages);
1711 #endif
1712
1713 static inline bool bio_remaining_done(struct bio *bio)
1714 {
1715 /*
1716 * If we're not chaining, then ->__bi_remaining is always 1 and
1717 * we always end io on the first invocation.
1718 */
1719 if (!bio_flagged(bio, BIO_CHAIN))
1720 return true;
1721
1722 BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
1723
1724 if (atomic_dec_and_test(&bio->__bi_remaining)) {
1725 bio_clear_flag(bio, BIO_CHAIN);
1726 return true;
1727 }
1728
1729 return false;
1730 }
1731
1732 /**
1733 * bio_endio - end I/O on a bio
1734 * @bio: bio
1735 *
1736 * Description:
1737 * bio_endio() will end I/O on the whole bio. bio_endio() is the preferred
1738 * way to end I/O on a bio. No one should call bi_end_io() directly on a
1739 * bio unless they own it and thus know that it has an end_io function.
1740 **/
1741 void bio_endio(struct bio *bio)
1742 {
1743 again:
1744 if (!bio_remaining_done(bio))
1745 return;
1746
1747 /*
1748 * Need to have a real endio function for chained bios, otherwise
1749 * various corner cases will break (like stacking block devices that
1750 * save/restore bi_end_io) - however, we want to avoid unbounded
1751 * recursion and blowing the stack. Tail call optimization would
1752 * handle this, but compiling with frame pointers also disables
1753 * gcc's sibling call optimization.
1754 */
1755 if (bio->bi_end_io == bio_chain_endio) {
1756 bio = __bio_chain_endio(bio);
1757 goto again;
1758 }
1759
1760 if (bio->bi_end_io)
1761 bio->bi_end_io(bio);
1762 }
1763 EXPORT_SYMBOL(bio_endio);
1764
1765 /**
1766 * bio_split - split a bio
1767 * @bio: bio to split
1768 * @sectors: number of sectors to split from the front of @bio
1769 * @gfp: gfp mask
1770 * @bs: bio set to allocate from
1771 *
1772 * Allocates and returns a new bio which represents @sectors from the start of
1773 * @bio, and updates @bio to represent the remaining sectors.
1774 *
1775 * Unless this is a discard request the newly allocated bio will point
1776 * to @bio's bi_io_vec; it is the caller's responsibility to ensure that
1777 * @bio is not freed before the split.
1778 */
1779 struct bio *bio_split(struct bio *bio, int sectors,
1780 gfp_t gfp, struct bio_set *bs)
1781 {
1782 struct bio *split = NULL;
1783
1784 BUG_ON(sectors <= 0);
1785 BUG_ON(sectors >= bio_sectors(bio));
1786
1787 /*
1788 * Discards need a mutable bio_vec to accommodate the payload
1789 * required by the DSM TRIM and UNMAP commands.
1790 */
1791 if (bio_op(bio) == REQ_OP_DISCARD)
1792 split = bio_clone_bioset(bio, gfp, bs);
1793 else
1794 split = bio_clone_fast(bio, gfp, bs);
1795
1796 if (!split)
1797 return NULL;
1798
1799 split->bi_iter.bi_size = sectors << 9;
1800
1801 if (bio_integrity(split))
1802 bio_integrity_trim(split, 0, sectors);
1803
1804 bio_advance(bio, split->bi_iter.bi_size);
1805
1806 return split;
1807 }
1808 EXPORT_SYMBOL(bio_split);
1809
1810 /**
1811 * bio_trim - trim a bio
1812 * @bio: bio to trim
1813 * @offset: number of sectors to trim from the front of @bio
1814 * @size: size we want to trim @bio to, in sectors
1815 */
1816 void bio_trim(struct bio *bio, int offset, int size)
1817 {
1818 /* 'bio' is a cloned bio which we need to trim to match
1819 * the given offset and size.
1820 */
1821
1822 size <<= 9;
1823 if (offset == 0 && size == bio->bi_iter.bi_size)
1824 return;
1825
1826 bio_clear_flag(bio, BIO_SEG_VALID);
1827
1828 bio_advance(bio, offset << 9);
1829
1830 bio->bi_iter.bi_size = size;
1831 }
1832 EXPORT_SYMBOL_GPL(bio_trim);
1833
1834 /*
1835 * create memory pools for biovec's in a bio_set.
1836 * use the global biovec slabs created for general use.
1837 */
1838 mempool_t *biovec_create_pool(int pool_entries)
1839 {
1840 struct biovec_slab *bp = bvec_slabs + BVEC_POOL_MAX;
1841
1842 return mempool_create_slab_pool(pool_entries, bp->slab);
1843 }
1844
1845 void bioset_free(struct bio_set *bs)
1846 {
1847 if (bs->rescue_workqueue)
1848 destroy_workqueue(bs->rescue_workqueue);
1849
1850 if (bs->bio_pool)
1851 mempool_destroy(bs->bio_pool);
1852
1853 if (bs->bvec_pool)
1854 mempool_destroy(bs->bvec_pool);
1855
1856 bioset_integrity_free(bs);
1857 bio_put_slab(bs);
1858
1859 kfree(bs);
1860 }
1861 EXPORT_SYMBOL(bioset_free);
1862
1863 static struct bio_set *__bioset_create(unsigned int pool_size,
1864 unsigned int front_pad,
1865 bool create_bvec_pool)
1866 {
1867 unsigned int back_pad = BIO_INLINE_VECS * sizeof(struct bio_vec);
1868 struct bio_set *bs;
1869
1870 bs = kzalloc(sizeof(*bs), GFP_KERNEL);
1871 if (!bs)
1872 return NULL;
1873
1874 bs->front_pad = front_pad;
1875
1876 spin_lock_init(&bs->rescue_lock);
1877 bio_list_init(&bs->rescue_list);
1878 INIT_WORK(&bs->rescue_work, bio_alloc_rescue);
1879
1880 bs->bio_slab = bio_find_or_create_slab(front_pad + back_pad);
1881 if (!bs->bio_slab) {
1882 kfree(bs);
1883 return NULL;
1884 }
1885
1886 bs->bio_pool = mempool_create_slab_pool(pool_size, bs->bio_slab);
1887 if (!bs->bio_pool)
1888 goto bad;
1889
1890 if (create_bvec_pool) {
1891 bs->bvec_pool = biovec_create_pool(pool_size);
1892 if (!bs->bvec_pool)
1893 goto bad;
1894 }
1895
1896 bs->rescue_workqueue = alloc_workqueue("bioset", WQ_MEM_RECLAIM, 0);
1897 if (!bs->rescue_workqueue)
1898 goto bad;
1899
1900 return bs;
1901 bad:
1902 bioset_free(bs);
1903 return NULL;
1904 }
1905
1906 /**
1907 * bioset_create - Create a bio_set
1908 * @pool_size: Number of bio and bio_vecs to cache in the mempool
1909 * @front_pad: Number of bytes to allocate in front of the returned bio
1910 *
1911 * Description:
1912 * Set up a bio_set to be used with @bio_alloc_bioset. Allows the caller
1913 * to ask for a number of bytes to be allocated in front of the bio.
1914 * Front pad allocation is useful for embedding the bio inside
1915 * another structure, to avoid allocating extra data to go with the bio.
1916 * Note that the bio must be embedded at the END of that structure always,
1917 * or things will break badly.
1918 */
1919 struct bio_set *bioset_create(unsigned int pool_size, unsigned int front_pad)
1920 {
1921 return __bioset_create(pool_size, front_pad, true);
1922 }
1923 EXPORT_SYMBOL(bioset_create);
1924
1925 /**
1926 * bioset_create_nobvec - Create a bio_set without bio_vec mempool
1927 * @pool_size: Number of bio to cache in the mempool
1928 * @front_pad: Number of bytes to allocate in front of the returned bio
1929 *
1930 * Description:
1931 * Same functionality as bioset_create() except that mempool is not
1932 * created for bio_vecs. Saving some memory for bio_clone_fast() users.
1933 */
1934 struct bio_set *bioset_create_nobvec(unsigned int pool_size, unsigned int front_pad)
1935 {
1936 return __bioset_create(pool_size, front_pad, false);
1937 }
1938 EXPORT_SYMBOL(bioset_create_nobvec);
1939
1940 #ifdef CONFIG_BLK_CGROUP
1941
1942 /**
1943 * bio_associate_blkcg - associate a bio with the specified blkcg
1944 * @bio: target bio
1945 * @blkcg_css: css of the blkcg to associate
1946 *
1947 * Associate @bio with the blkcg specified by @blkcg_css. Block layer will
1948 * treat @bio as if it were issued by a task which belongs to the blkcg.
1949 *
1950 * This function takes an extra reference of @blkcg_css which will be put
1951 * when @bio is released. The caller must own @bio and is responsible for
1952 * synchronizing calls to this function.
1953 */
1954 int bio_associate_blkcg(struct bio *bio, struct cgroup_subsys_state *blkcg_css)
1955 {
1956 if (unlikely(bio->bi_css))
1957 return -EBUSY;
1958 css_get(blkcg_css);
1959 bio->bi_css = blkcg_css;
1960 return 0;
1961 }
1962 EXPORT_SYMBOL_GPL(bio_associate_blkcg);
1963
1964 /**
1965 * bio_associate_current - associate a bio with %current
1966 * @bio: target bio
1967 *
1968 * Associate @bio with %current if it hasn't been associated yet. Block
1969 * layer will treat @bio as if it were issued by %current no matter which
1970 * task actually issues it.
1971 *
1972 * This function takes an extra reference of @task's io_context and blkcg
1973 * which will be put when @bio is released. The caller must own @bio,
1974 * ensure %current->io_context exists, and is responsible for synchronizing
1975 * calls to this function.
1976 */
1977 int bio_associate_current(struct bio *bio)
1978 {
1979 struct io_context *ioc;
1980
1981 if (bio->bi_css)
1982 return -EBUSY;
1983
1984 ioc = current->io_context;
1985 if (!ioc)
1986 return -ENOENT;
1987
1988 get_io_context_active(ioc);
1989 bio->bi_ioc = ioc;
1990 bio->bi_css = task_get_css(current, io_cgrp_id);
1991 return 0;
1992 }
1993 EXPORT_SYMBOL_GPL(bio_associate_current);
1994
1995 /**
1996 * bio_disassociate_task - undo bio_associate_current()
1997 * @bio: target bio
1998 */
1999 void bio_disassociate_task(struct bio *bio)
2000 {
2001 if (bio->bi_ioc) {
2002 put_io_context(bio->bi_ioc);
2003 bio->bi_ioc = NULL;
2004 }
2005 if (bio->bi_css) {
2006 css_put(bio->bi_css);
2007 bio->bi_css = NULL;
2008 }
2009 }
2010
2011 /**
2012 * bio_clone_blkcg_association - clone blkcg association from src to dst bio
2013 * @dst: destination bio
2014 * @src: source bio
2015 */
2016 void bio_clone_blkcg_association(struct bio *dst, struct bio *src)
2017 {
2018 if (src->bi_css)
2019 WARN_ON(bio_associate_blkcg(dst, src->bi_css));
2020 }
2021
2022 #endif /* CONFIG_BLK_CGROUP */
2023
2024 static void __init biovec_init_slabs(void)
2025 {
2026 int i;
2027
2028 for (i = 0; i < BVEC_POOL_NR; i++) {
2029 int size;
2030 struct biovec_slab *bvs = bvec_slabs + i;
2031
2032 if (bvs->nr_vecs <= BIO_INLINE_VECS) {
2033 bvs->slab = NULL;
2034 continue;
2035 }
2036
2037 size = bvs->nr_vecs * sizeof(struct bio_vec);
2038 bvs->slab = kmem_cache_create(bvs->name, size, 0,
2039 SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
2040 }
2041 }
2042
2043 static int __init init_bio(void)
2044 {
2045 bio_slab_max = 2;
2046 bio_slab_nr = 0;
2047 bio_slabs = kzalloc(bio_slab_max * sizeof(struct bio_slab), GFP_KERNEL);
2048 if (!bio_slabs)
2049 panic("bio: can't allocate bios\n");
2050
2051 bio_integrity_init();
2052 biovec_init_slabs();
2053
2054 fs_bio_set = bioset_create(BIO_POOL_SIZE, 0);
2055 if (!fs_bio_set)
2056 panic("bio: can't allocate bios\n");
2057
2058 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2059 panic("bio: can't create integrity pool\n");
2060
2061 return 0;
2062 }
2063 subsys_initcall(init_bio);
This page took 0.073468 seconds and 5 git commands to generate.