2 * Functions related to segment and merge handling
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <linux/scatterlist.h>
12 static unsigned int __blk_recalc_rq_segments(struct request_queue
*q
,
16 struct bio_vec bv
, bvprv
= { NULL
};
17 int cluster
, high
, highprv
= 1;
18 unsigned int seg_size
, nr_phys_segs
;
19 struct bio
*fbio
, *bbio
;
20 struct bvec_iter iter
;
26 * This should probably be returning 0, but blk_add_request_payload()
29 if (bio
->bi_rw
& REQ_DISCARD
)
32 if (bio
->bi_rw
& REQ_WRITE_SAME
)
36 cluster
= blk_queue_cluster(q
);
41 bio_for_each_segment(bv
, bio
, iter
) {
43 * If SG merging is disabled, each bio vector is
50 * the trick here is making sure that a high page is
51 * never considered part of another segment, since
52 * that might change with the bounce page.
54 high
= page_to_pfn(bv
.bv_page
) > queue_bounce_pfn(q
);
55 if (!high
&& !highprv
&& cluster
) {
56 if (seg_size
+ bv
.bv_len
57 > queue_max_segment_size(q
))
59 if (!BIOVEC_PHYS_MERGEABLE(&bvprv
, &bv
))
61 if (!BIOVEC_SEG_BOUNDARY(q
, &bvprv
, &bv
))
64 seg_size
+= bv
.bv_len
;
69 if (nr_phys_segs
== 1 && seg_size
>
70 fbio
->bi_seg_front_size
)
71 fbio
->bi_seg_front_size
= seg_size
;
81 if (nr_phys_segs
== 1 && seg_size
> fbio
->bi_seg_front_size
)
82 fbio
->bi_seg_front_size
= seg_size
;
83 if (seg_size
> bbio
->bi_seg_back_size
)
84 bbio
->bi_seg_back_size
= seg_size
;
89 void blk_recalc_rq_segments(struct request
*rq
)
91 bool no_sg_merge
= !!test_bit(QUEUE_FLAG_NO_SG_MERGE
,
94 rq
->nr_phys_segments
= __blk_recalc_rq_segments(rq
->q
, rq
->bio
,
98 void blk_recount_segments(struct request_queue
*q
, struct bio
*bio
)
100 bool no_sg_merge
= !!test_bit(QUEUE_FLAG_NO_SG_MERGE
,
103 if (no_sg_merge
&& !bio_flagged(bio
, BIO_CLONED
) &&
104 bio
->bi_vcnt
< queue_max_segments(q
))
105 bio
->bi_phys_segments
= bio
->bi_vcnt
;
107 struct bio
*nxt
= bio
->bi_next
;
110 bio
->bi_phys_segments
= __blk_recalc_rq_segments(q
, bio
,
115 bio
->bi_flags
|= (1 << BIO_SEG_VALID
);
117 EXPORT_SYMBOL(blk_recount_segments
);
119 static int blk_phys_contig_segment(struct request_queue
*q
, struct bio
*bio
,
122 struct bio_vec end_bv
= { NULL
}, nxt_bv
;
123 struct bvec_iter iter
;
125 if (!blk_queue_cluster(q
))
128 if (bio
->bi_seg_back_size
+ nxt
->bi_seg_front_size
>
129 queue_max_segment_size(q
))
132 if (!bio_has_data(bio
))
135 bio_for_each_segment(end_bv
, bio
, iter
)
136 if (end_bv
.bv_len
== iter
.bi_size
)
139 nxt_bv
= bio_iovec(nxt
);
141 if (!BIOVEC_PHYS_MERGEABLE(&end_bv
, &nxt_bv
))
145 * bio and nxt are contiguous in memory; check if the queue allows
146 * these two to be merged into one
148 if (BIOVEC_SEG_BOUNDARY(q
, &end_bv
, &nxt_bv
))
155 __blk_segment_map_sg(struct request_queue
*q
, struct bio_vec
*bvec
,
156 struct scatterlist
*sglist
, struct bio_vec
*bvprv
,
157 struct scatterlist
**sg
, int *nsegs
, int *cluster
)
160 int nbytes
= bvec
->bv_len
;
162 if (*sg
&& *cluster
) {
163 if ((*sg
)->length
+ nbytes
> queue_max_segment_size(q
))
166 if (!BIOVEC_PHYS_MERGEABLE(bvprv
, bvec
))
168 if (!BIOVEC_SEG_BOUNDARY(q
, bvprv
, bvec
))
171 (*sg
)->length
+= nbytes
;
178 * If the driver previously mapped a shorter
179 * list, we could see a termination bit
180 * prematurely unless it fully inits the sg
181 * table on each mapping. We KNOW that there
182 * must be more entries here or the driver
183 * would be buggy, so force clear the
184 * termination bit to avoid doing a full
185 * sg_init_table() in drivers for each command.
191 sg_set_page(*sg
, bvec
->bv_page
, nbytes
, bvec
->bv_offset
);
197 static int __blk_bios_map_sg(struct request_queue
*q
, struct bio
*bio
,
198 struct scatterlist
*sglist
,
199 struct scatterlist
**sg
)
201 struct bio_vec bvec
, bvprv
= { NULL
};
202 struct bvec_iter iter
;
206 cluster
= blk_queue_cluster(q
);
208 if (bio
->bi_rw
& REQ_DISCARD
) {
210 * This is a hack - drivers should be neither modifying the
211 * biovec, nor relying on bi_vcnt - but because of
212 * blk_add_request_payload(), a discard bio may or may not have
213 * a payload we need to set up here (thank you Christoph) and
214 * bi_vcnt is really the only way of telling if we need to.
223 if (bio
->bi_rw
& REQ_WRITE_SAME
) {
226 bvec
= bio_iovec(bio
);
227 sg_set_page(*sg
, bvec
.bv_page
, bvec
.bv_len
, bvec
.bv_offset
);
232 bio_for_each_segment(bvec
, bio
, iter
)
233 __blk_segment_map_sg(q
, &bvec
, sglist
, &bvprv
, sg
,
240 * map a request to scatterlist, return number of sg entries setup. Caller
241 * must make sure sg can hold rq->nr_phys_segments entries
243 int blk_rq_map_sg(struct request_queue
*q
, struct request
*rq
,
244 struct scatterlist
*sglist
)
246 struct scatterlist
*sg
= NULL
;
250 nsegs
= __blk_bios_map_sg(q
, rq
->bio
, sglist
, &sg
);
252 if (unlikely(rq
->cmd_flags
& REQ_COPY_USER
) &&
253 (blk_rq_bytes(rq
) & q
->dma_pad_mask
)) {
254 unsigned int pad_len
=
255 (q
->dma_pad_mask
& ~blk_rq_bytes(rq
)) + 1;
257 sg
->length
+= pad_len
;
258 rq
->extra_len
+= pad_len
;
261 if (q
->dma_drain_size
&& q
->dma_drain_needed(rq
)) {
262 if (rq
->cmd_flags
& REQ_WRITE
)
263 memset(q
->dma_drain_buffer
, 0, q
->dma_drain_size
);
265 sg
->page_link
&= ~0x02;
267 sg_set_page(sg
, virt_to_page(q
->dma_drain_buffer
),
269 ((unsigned long)q
->dma_drain_buffer
) &
272 rq
->extra_len
+= q
->dma_drain_size
;
280 EXPORT_SYMBOL(blk_rq_map_sg
);
283 * blk_bio_map_sg - map a bio to a scatterlist
284 * @q: request_queue in question
285 * @bio: bio being mapped
286 * @sglist: scatterlist being mapped
289 * Caller must make sure sg can hold bio->bi_phys_segments entries
291 * Will return the number of sg entries setup
293 int blk_bio_map_sg(struct request_queue
*q
, struct bio
*bio
,
294 struct scatterlist
*sglist
)
296 struct scatterlist
*sg
= NULL
;
298 struct bio
*next
= bio
->bi_next
;
301 nsegs
= __blk_bios_map_sg(q
, bio
, sglist
, &sg
);
306 BUG_ON(bio
->bi_phys_segments
&& nsegs
> bio
->bi_phys_segments
);
309 EXPORT_SYMBOL(blk_bio_map_sg
);
311 static inline int ll_new_hw_segment(struct request_queue
*q
,
315 int nr_phys_segs
= bio_phys_segments(q
, bio
);
317 if (req
->nr_phys_segments
+ nr_phys_segs
> queue_max_segments(q
))
320 if (blk_integrity_merge_bio(q
, req
, bio
) == false)
324 * This will form the start of a new hw segment. Bump both
327 req
->nr_phys_segments
+= nr_phys_segs
;
331 req
->cmd_flags
|= REQ_NOMERGE
;
332 if (req
== q
->last_merge
)
333 q
->last_merge
= NULL
;
337 int ll_back_merge_fn(struct request_queue
*q
, struct request
*req
,
340 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
341 blk_rq_get_max_sectors(req
)) {
342 req
->cmd_flags
|= REQ_NOMERGE
;
343 if (req
== q
->last_merge
)
344 q
->last_merge
= NULL
;
347 if (!bio_flagged(req
->biotail
, BIO_SEG_VALID
))
348 blk_recount_segments(q
, req
->biotail
);
349 if (!bio_flagged(bio
, BIO_SEG_VALID
))
350 blk_recount_segments(q
, bio
);
352 return ll_new_hw_segment(q
, req
, bio
);
355 int ll_front_merge_fn(struct request_queue
*q
, struct request
*req
,
358 if (blk_rq_sectors(req
) + bio_sectors(bio
) >
359 blk_rq_get_max_sectors(req
)) {
360 req
->cmd_flags
|= REQ_NOMERGE
;
361 if (req
== q
->last_merge
)
362 q
->last_merge
= NULL
;
365 if (!bio_flagged(bio
, BIO_SEG_VALID
))
366 blk_recount_segments(q
, bio
);
367 if (!bio_flagged(req
->bio
, BIO_SEG_VALID
))
368 blk_recount_segments(q
, req
->bio
);
370 return ll_new_hw_segment(q
, req
, bio
);
374 * blk-mq uses req->special to carry normal driver per-request payload, it
375 * does not indicate a prepared command that we cannot merge with.
377 static bool req_no_special_merge(struct request
*req
)
379 struct request_queue
*q
= req
->q
;
381 return !q
->mq_ops
&& req
->special
;
384 static int ll_merge_requests_fn(struct request_queue
*q
, struct request
*req
,
385 struct request
*next
)
387 int total_phys_segments
;
388 unsigned int seg_size
=
389 req
->biotail
->bi_seg_back_size
+ next
->bio
->bi_seg_front_size
;
392 * First check if the either of the requests are re-queued
393 * requests. Can't merge them if they are.
395 if (req_no_special_merge(req
) || req_no_special_merge(next
))
399 * Will it become too large?
401 if ((blk_rq_sectors(req
) + blk_rq_sectors(next
)) >
402 blk_rq_get_max_sectors(req
))
405 total_phys_segments
= req
->nr_phys_segments
+ next
->nr_phys_segments
;
406 if (blk_phys_contig_segment(q
, req
->biotail
, next
->bio
)) {
407 if (req
->nr_phys_segments
== 1)
408 req
->bio
->bi_seg_front_size
= seg_size
;
409 if (next
->nr_phys_segments
== 1)
410 next
->biotail
->bi_seg_back_size
= seg_size
;
411 total_phys_segments
--;
414 if (total_phys_segments
> queue_max_segments(q
))
417 if (blk_integrity_merge_rq(q
, req
, next
) == false)
421 req
->nr_phys_segments
= total_phys_segments
;
426 * blk_rq_set_mixed_merge - mark a request as mixed merge
427 * @rq: request to mark as mixed merge
430 * @rq is about to be mixed merged. Make sure the attributes
431 * which can be mixed are set in each bio and mark @rq as mixed
434 void blk_rq_set_mixed_merge(struct request
*rq
)
436 unsigned int ff
= rq
->cmd_flags
& REQ_FAILFAST_MASK
;
439 if (rq
->cmd_flags
& REQ_MIXED_MERGE
)
443 * @rq will no longer represent mixable attributes for all the
444 * contained bios. It will just track those of the first one.
445 * Distributes the attributs to each bio.
447 for (bio
= rq
->bio
; bio
; bio
= bio
->bi_next
) {
448 WARN_ON_ONCE((bio
->bi_rw
& REQ_FAILFAST_MASK
) &&
449 (bio
->bi_rw
& REQ_FAILFAST_MASK
) != ff
);
452 rq
->cmd_flags
|= REQ_MIXED_MERGE
;
455 static void blk_account_io_merge(struct request
*req
)
457 if (blk_do_io_stat(req
)) {
458 struct hd_struct
*part
;
461 cpu
= part_stat_lock();
464 part_round_stats(cpu
, part
);
465 part_dec_in_flight(part
, rq_data_dir(req
));
473 * Has to be called with the request spinlock acquired
475 static int attempt_merge(struct request_queue
*q
, struct request
*req
,
476 struct request
*next
)
478 if (!rq_mergeable(req
) || !rq_mergeable(next
))
481 if (!blk_check_merge_flags(req
->cmd_flags
, next
->cmd_flags
))
487 if (blk_rq_pos(req
) + blk_rq_sectors(req
) != blk_rq_pos(next
))
490 if (rq_data_dir(req
) != rq_data_dir(next
)
491 || req
->rq_disk
!= next
->rq_disk
492 || req_no_special_merge(next
))
495 if (req
->cmd_flags
& REQ_WRITE_SAME
&&
496 !blk_write_same_mergeable(req
->bio
, next
->bio
))
500 * If we are allowed to merge, then append bio list
501 * from next to rq and release next. merge_requests_fn
502 * will have updated segment counts, update sector
505 if (!ll_merge_requests_fn(q
, req
, next
))
509 * If failfast settings disagree or any of the two is already
510 * a mixed merge, mark both as mixed before proceeding. This
511 * makes sure that all involved bios have mixable attributes
514 if ((req
->cmd_flags
| next
->cmd_flags
) & REQ_MIXED_MERGE
||
515 (req
->cmd_flags
& REQ_FAILFAST_MASK
) !=
516 (next
->cmd_flags
& REQ_FAILFAST_MASK
)) {
517 blk_rq_set_mixed_merge(req
);
518 blk_rq_set_mixed_merge(next
);
522 * At this point we have either done a back merge
523 * or front merge. We need the smaller start_time of
524 * the merged requests to be the current request
525 * for accounting purposes.
527 if (time_after(req
->start_time
, next
->start_time
))
528 req
->start_time
= next
->start_time
;
530 req
->biotail
->bi_next
= next
->bio
;
531 req
->biotail
= next
->biotail
;
533 req
->__data_len
+= blk_rq_bytes(next
);
535 elv_merge_requests(q
, req
, next
);
538 * 'next' is going away, so update stats accordingly
540 blk_account_io_merge(next
);
542 req
->ioprio
= ioprio_best(req
->ioprio
, next
->ioprio
);
543 if (blk_rq_cpu_valid(next
))
544 req
->cpu
= next
->cpu
;
546 /* owner-ship of bio passed from next to req */
548 __blk_put_request(q
, next
);
552 int attempt_back_merge(struct request_queue
*q
, struct request
*rq
)
554 struct request
*next
= elv_latter_request(q
, rq
);
557 return attempt_merge(q
, rq
, next
);
562 int attempt_front_merge(struct request_queue
*q
, struct request
*rq
)
564 struct request
*prev
= elv_former_request(q
, rq
);
567 return attempt_merge(q
, prev
, rq
);
572 int blk_attempt_req_merge(struct request_queue
*q
, struct request
*rq
,
573 struct request
*next
)
575 return attempt_merge(q
, rq
, next
);
578 bool blk_rq_merge_ok(struct request
*rq
, struct bio
*bio
)
580 struct request_queue
*q
= rq
->q
;
582 if (!rq_mergeable(rq
) || !bio_mergeable(bio
))
585 if (!blk_check_merge_flags(rq
->cmd_flags
, bio
->bi_rw
))
588 /* different data direction or already started, don't merge */
589 if (bio_data_dir(bio
) != rq_data_dir(rq
))
592 /* must be same device and not a special request */
593 if (rq
->rq_disk
!= bio
->bi_bdev
->bd_disk
|| req_no_special_merge(rq
))
596 /* only merge integrity protected bio into ditto rq */
597 if (blk_integrity_merge_bio(rq
->q
, rq
, bio
) == false)
600 /* must be using the same buffer */
601 if (rq
->cmd_flags
& REQ_WRITE_SAME
&&
602 !blk_write_same_mergeable(rq
->bio
, bio
))
605 if (q
->queue_flags
& (1 << QUEUE_FLAG_SG_GAPS
)) {
606 struct bio_vec
*bprev
;
608 bprev
= &rq
->biotail
->bi_io_vec
[bio
->bi_vcnt
- 1];
609 if (bvec_gap_to_prev(bprev
, bio
->bi_io_vec
[0].bv_offset
))
616 int blk_try_merge(struct request
*rq
, struct bio
*bio
)
618 if (blk_rq_pos(rq
) + blk_rq_sectors(rq
) == bio
->bi_iter
.bi_sector
)
619 return ELEVATOR_BACK_MERGE
;
620 else if (blk_rq_pos(rq
) - bio_sectors(bio
) == bio
->bi_iter
.bi_sector
)
621 return ELEVATOR_FRONT_MERGE
;
622 return ELEVATOR_NO_MERGE
;
This page took 0.054124 seconds and 6 git commands to generate.