2 * Functions related to mapping data to requests
4 #include <linux/kernel.h>
5 #include <linux/module.h>
7 #include <linux/blkdev.h>
8 #include <scsi/sg.h> /* for struct sg_iovec */
12 int blk_rq_append_bio(struct request_queue
*q
, struct request
*rq
,
16 blk_rq_bio_prep(q
, rq
, bio
);
17 else if (!ll_back_merge_fn(q
, rq
, bio
))
20 rq
->biotail
->bi_next
= bio
;
23 rq
->__data_len
+= bio
->bi_size
;
28 static int __blk_rq_unmap_user(struct bio
*bio
)
33 if (bio_flagged(bio
, BIO_USER_MAPPED
))
36 ret
= bio_uncopy_user(bio
);
42 static int __blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
43 struct rq_map_data
*map_data
, void __user
*ubuf
,
44 unsigned int len
, gfp_t gfp_mask
)
47 struct bio
*bio
, *orig_bio
;
50 reading
= rq_data_dir(rq
) == READ
;
53 * if alignment requirement is satisfied, map in user pages for
54 * direct dma. else, set up kernel bounce buffers
56 uaddr
= (unsigned long) ubuf
;
57 if (blk_rq_aligned(q
, uaddr
, len
) && !map_data
)
58 bio
= bio_map_user(q
, NULL
, uaddr
, len
, reading
, gfp_mask
);
60 bio
= bio_copy_user(q
, map_data
, uaddr
, len
, reading
, gfp_mask
);
65 if (map_data
&& map_data
->null_mapped
)
66 bio
->bi_flags
|= (1 << BIO_NULL_MAPPED
);
69 blk_queue_bounce(q
, &bio
);
72 * We link the bounce buffer in and could have to traverse it
73 * later so we have to get a ref to prevent it from being freed
77 ret
= blk_rq_append_bio(q
, rq
, bio
);
81 /* if it was boucned we must call the end io function */
83 __blk_rq_unmap_user(orig_bio
);
89 * blk_rq_map_user - map user data to a request, for REQ_TYPE_BLOCK_PC usage
90 * @q: request queue where request should be inserted
91 * @rq: request structure to fill
92 * @map_data: pointer to the rq_map_data holding pages (if necessary)
93 * @ubuf: the user buffer
94 * @len: length of user data
95 * @gfp_mask: memory allocation flags
98 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
99 * a kernel bounce buffer is used.
101 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
102 * still in process context.
104 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
105 * before being submitted to the device, as pages mapped may be out of
106 * reach. It's the callers responsibility to make sure this happens. The
107 * original bio must be passed back in to blk_rq_unmap_user() for proper
110 int blk_rq_map_user(struct request_queue
*q
, struct request
*rq
,
111 struct rq_map_data
*map_data
, void __user
*ubuf
,
112 unsigned long len
, gfp_t gfp_mask
)
114 unsigned long bytes_read
= 0;
115 struct bio
*bio
= NULL
;
118 if (len
> (queue_max_hw_sectors(q
) << 9))
123 if (!ubuf
&& (!map_data
|| !map_data
->null_mapped
))
126 while (bytes_read
!= len
) {
127 unsigned long map_len
, end
, start
;
129 map_len
= min_t(unsigned long, len
- bytes_read
, BIO_MAX_SIZE
);
130 end
= ((unsigned long)ubuf
+ map_len
+ PAGE_SIZE
- 1)
132 start
= (unsigned long)ubuf
>> PAGE_SHIFT
;
135 * A bad offset could cause us to require BIO_MAX_PAGES + 1
136 * pages. If this happens we just lower the requested
137 * mapping len by a page so that we can fit
139 if (end
- start
> BIO_MAX_PAGES
)
140 map_len
-= PAGE_SIZE
;
142 ret
= __blk_rq_map_user(q
, rq
, map_data
, ubuf
, map_len
,
152 map_data
->offset
+= ret
;
155 if (!bio_flagged(bio
, BIO_USER_MAPPED
))
156 rq
->cmd_flags
|= REQ_COPY_USER
;
161 blk_rq_unmap_user(bio
);
165 EXPORT_SYMBOL(blk_rq_map_user
);
168 * blk_rq_map_user_iov - map user data to a request, for REQ_TYPE_BLOCK_PC usage
169 * @q: request queue where request should be inserted
170 * @rq: request to map data to
171 * @map_data: pointer to the rq_map_data holding pages (if necessary)
172 * @iov: pointer to the iovec
173 * @iov_count: number of elements in the iovec
174 * @len: I/O byte count
175 * @gfp_mask: memory allocation flags
178 * Data will be mapped directly for zero copy I/O, if possible. Otherwise
179 * a kernel bounce buffer is used.
181 * A matching blk_rq_unmap_user() must be issued at the end of I/O, while
182 * still in process context.
184 * Note: The mapped bio may need to be bounced through blk_queue_bounce()
185 * before being submitted to the device, as pages mapped may be out of
186 * reach. It's the callers responsibility to make sure this happens. The
187 * original bio must be passed back in to blk_rq_unmap_user() for proper
190 int blk_rq_map_user_iov(struct request_queue
*q
, struct request
*rq
,
191 struct rq_map_data
*map_data
, struct sg_iovec
*iov
,
192 int iov_count
, unsigned int len
, gfp_t gfp_mask
)
195 int i
, read
= rq_data_dir(rq
) == READ
;
198 if (!iov
|| iov_count
<= 0)
201 for (i
= 0; i
< iov_count
; i
++) {
202 unsigned long uaddr
= (unsigned long)iov
[i
].iov_base
;
208 * Keep going so we check length of all segments
210 if (uaddr
& queue_dma_alignment(q
))
214 if (unaligned
|| (q
->dma_pad_mask
& len
) || map_data
)
215 bio
= bio_copy_user_iov(q
, map_data
, iov
, iov_count
, read
,
218 bio
= bio_map_user_iov(q
, NULL
, iov
, iov_count
, read
, gfp_mask
);
223 if (bio
->bi_size
!= len
) {
225 * Grab an extra reference to this bio, as bio_unmap_user()
226 * expects to be able to drop it twice as it happens on the
227 * normal IO completion path
231 __blk_rq_unmap_user(bio
);
235 if (!bio_flagged(bio
, BIO_USER_MAPPED
))
236 rq
->cmd_flags
|= REQ_COPY_USER
;
238 blk_queue_bounce(q
, &bio
);
240 blk_rq_bio_prep(q
, rq
, bio
);
244 EXPORT_SYMBOL(blk_rq_map_user_iov
);
247 * blk_rq_unmap_user - unmap a request with user data
248 * @bio: start of bio list
251 * Unmap a rq previously mapped by blk_rq_map_user(). The caller must
252 * supply the original rq->bio from the blk_rq_map_user() return, since
253 * the I/O completion may have changed rq->bio.
255 int blk_rq_unmap_user(struct bio
*bio
)
257 struct bio
*mapped_bio
;
262 if (unlikely(bio_flagged(bio
, BIO_BOUNCED
)))
263 mapped_bio
= bio
->bi_private
;
265 ret2
= __blk_rq_unmap_user(mapped_bio
);
276 EXPORT_SYMBOL(blk_rq_unmap_user
);
279 * blk_rq_map_kern - map kernel data to a request, for REQ_TYPE_BLOCK_PC usage
280 * @q: request queue where request should be inserted
281 * @rq: request to fill
282 * @kbuf: the kernel buffer
283 * @len: length of user data
284 * @gfp_mask: memory allocation flags
287 * Data will be mapped directly if possible. Otherwise a bounce
288 * buffer is used. Can be called multple times to append multple
291 int blk_rq_map_kern(struct request_queue
*q
, struct request
*rq
, void *kbuf
,
292 unsigned int len
, gfp_t gfp_mask
)
294 int reading
= rq_data_dir(rq
) == READ
;
295 unsigned long addr
= (unsigned long) kbuf
;
300 if (len
> (queue_max_hw_sectors(q
) << 9))
305 do_copy
= !blk_rq_aligned(q
, addr
, len
) || object_is_on_stack(kbuf
);
307 bio
= bio_copy_kern(q
, kbuf
, len
, gfp_mask
, reading
);
309 bio
= bio_map_kern(q
, kbuf
, len
, gfp_mask
);
315 bio
->bi_rw
|= REQ_WRITE
;
318 rq
->cmd_flags
|= REQ_COPY_USER
;
320 ret
= blk_rq_append_bio(q
, rq
, bio
);
322 /* request is too big */
327 blk_queue_bounce(q
, &rq
->bio
);
331 EXPORT_SYMBOL(blk_rq_map_kern
);
This page took 0.037775 seconds and 5 git commands to generate.