Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software | |
891ce207 | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
952b3557 MP |
8 | #include "dm.h" |
9 | ||
586e80e6 | 10 | #include <linux/device-mapper.h> |
1da177e4 LT |
11 | |
12 | #include <linux/bio.h> | |
13 | #include <linux/mempool.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
a765e20e | 17 | #include <linux/dm-io.h> |
1da177e4 | 18 | |
f1e53987 MP |
19 | #define DM_MSG_PREFIX "io" |
20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | |
22 | ||
891ce207 HM |
23 | struct dm_io_client { |
24 | mempool_t *pool; | |
25 | struct bio_set *bios; | |
26 | }; | |
27 | ||
f1e53987 MP |
28 | /* |
29 | * Aligning 'struct io' reduces the number of bits required to store | |
30 | * its address. Refer to store_io_and_region_in_bio() below. | |
31 | */ | |
1da177e4 | 32 | struct io { |
e01fd7ee | 33 | unsigned long error_bits; |
1da177e4 LT |
34 | atomic_t count; |
35 | struct task_struct *sleeper; | |
891ce207 | 36 | struct dm_io_client *client; |
1da177e4 LT |
37 | io_notify_fn callback; |
38 | void *context; | |
bb91bc7b MP |
39 | void *vma_invalidate_address; |
40 | unsigned long vma_invalidate_size; | |
f1e53987 | 41 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
1da177e4 | 42 | |
952b3557 MP |
43 | static struct kmem_cache *_dm_io_cache; |
44 | ||
c8b03afe HM |
45 | /* |
46 | * Create a client with mempool and bioset. | |
47 | */ | |
bda8efec | 48 | struct dm_io_client *dm_io_client_create(void) |
c8b03afe | 49 | { |
c8b03afe | 50 | struct dm_io_client *client; |
e8603136 | 51 | unsigned min_ios = dm_get_reserved_bio_based_ios(); |
c8b03afe HM |
52 | |
53 | client = kmalloc(sizeof(*client), GFP_KERNEL); | |
54 | if (!client) | |
55 | return ERR_PTR(-ENOMEM); | |
56 | ||
e8603136 | 57 | client->pool = mempool_create_slab_pool(min_ios, _dm_io_cache); |
c8b03afe HM |
58 | if (!client->pool) |
59 | goto bad; | |
60 | ||
e8603136 | 61 | client->bios = bioset_create(min_ios, 0); |
c8b03afe HM |
62 | if (!client->bios) |
63 | goto bad; | |
64 | ||
65 | return client; | |
66 | ||
67 | bad: | |
68 | if (client->pool) | |
69 | mempool_destroy(client->pool); | |
70 | kfree(client); | |
71 | return ERR_PTR(-ENOMEM); | |
72 | } | |
73 | EXPORT_SYMBOL(dm_io_client_create); | |
74 | ||
c8b03afe HM |
75 | void dm_io_client_destroy(struct dm_io_client *client) |
76 | { | |
77 | mempool_destroy(client->pool); | |
78 | bioset_free(client->bios); | |
79 | kfree(client); | |
80 | } | |
81 | EXPORT_SYMBOL(dm_io_client_destroy); | |
82 | ||
1da177e4 LT |
83 | /*----------------------------------------------------------------- |
84 | * We need to keep track of which region a bio is doing io for. | |
f1e53987 MP |
85 | * To avoid a memory allocation to store just 5 or 6 bits, we |
86 | * ensure the 'struct io' pointer is aligned so enough low bits are | |
87 | * always zero and then combine it with the region number directly in | |
88 | * bi_private. | |
1da177e4 | 89 | *---------------------------------------------------------------*/ |
f1e53987 MP |
90 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
91 | unsigned region) | |
1da177e4 | 92 | { |
f1e53987 MP |
93 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
94 | DMCRIT("Unaligned struct io pointer %p", io); | |
95 | BUG(); | |
96 | } | |
97 | ||
98 | bio->bi_private = (void *)((unsigned long)io | region); | |
1da177e4 LT |
99 | } |
100 | ||
f1e53987 MP |
101 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
102 | unsigned *region) | |
1da177e4 | 103 | { |
f1e53987 MP |
104 | unsigned long val = (unsigned long)bio->bi_private; |
105 | ||
106 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); | |
107 | *region = val & (DM_IO_MAX_REGIONS - 1); | |
1da177e4 LT |
108 | } |
109 | ||
110 | /*----------------------------------------------------------------- | |
111 | * We need an io object to keep track of the number of bios that | |
112 | * have been dispatched for a particular io. | |
113 | *---------------------------------------------------------------*/ | |
114 | static void dec_count(struct io *io, unsigned int region, int error) | |
115 | { | |
d87f4c14 | 116 | if (error) |
e01fd7ee | 117 | set_bit(region, &io->error_bits); |
1da177e4 LT |
118 | |
119 | if (atomic_dec_and_test(&io->count)) { | |
bb91bc7b MP |
120 | if (io->vma_invalidate_size) |
121 | invalidate_kernel_vmap_range(io->vma_invalidate_address, | |
122 | io->vma_invalidate_size); | |
123 | ||
1da177e4 LT |
124 | if (io->sleeper) |
125 | wake_up_process(io->sleeper); | |
126 | ||
127 | else { | |
e01fd7ee | 128 | unsigned long r = io->error_bits; |
1da177e4 LT |
129 | io_notify_fn fn = io->callback; |
130 | void *context = io->context; | |
131 | ||
bf17ce3a | 132 | mempool_free(io, io->client->pool); |
1da177e4 LT |
133 | fn(r, context); |
134 | } | |
135 | } | |
136 | } | |
137 | ||
6712ecf8 | 138 | static void endio(struct bio *bio, int error) |
1da177e4 | 139 | { |
c897feb3 HM |
140 | struct io *io; |
141 | unsigned region; | |
1da177e4 | 142 | |
1da177e4 LT |
143 | if (error && bio_data_dir(bio) == READ) |
144 | zero_fill_bio(bio); | |
145 | ||
c897feb3 HM |
146 | /* |
147 | * The bio destructor in bio_put() may use the io object. | |
148 | */ | |
f1e53987 | 149 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
c897feb3 | 150 | |
1da177e4 LT |
151 | bio_put(bio); |
152 | ||
c897feb3 | 153 | dec_count(io, region, error); |
1da177e4 LT |
154 | } |
155 | ||
156 | /*----------------------------------------------------------------- | |
157 | * These little objects provide an abstraction for getting a new | |
158 | * destination page for io. | |
159 | *---------------------------------------------------------------*/ | |
160 | struct dpages { | |
161 | void (*get_page)(struct dpages *dp, | |
162 | struct page **p, unsigned long *len, unsigned *offset); | |
163 | void (*next_page)(struct dpages *dp); | |
164 | ||
165 | unsigned context_u; | |
166 | void *context_ptr; | |
bb91bc7b MP |
167 | |
168 | void *vma_invalidate_address; | |
169 | unsigned long vma_invalidate_size; | |
1da177e4 LT |
170 | }; |
171 | ||
172 | /* | |
173 | * Functions for getting the pages from a list. | |
174 | */ | |
175 | static void list_get_page(struct dpages *dp, | |
176 | struct page **p, unsigned long *len, unsigned *offset) | |
177 | { | |
178 | unsigned o = dp->context_u; | |
179 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
180 | ||
181 | *p = pl->page; | |
182 | *len = PAGE_SIZE - o; | |
183 | *offset = o; | |
184 | } | |
185 | ||
186 | static void list_next_page(struct dpages *dp) | |
187 | { | |
188 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
189 | dp->context_ptr = pl->next; | |
190 | dp->context_u = 0; | |
191 | } | |
192 | ||
193 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) | |
194 | { | |
195 | dp->get_page = list_get_page; | |
196 | dp->next_page = list_next_page; | |
197 | dp->context_u = offset; | |
198 | dp->context_ptr = pl; | |
199 | } | |
200 | ||
201 | /* | |
202 | * Functions for getting the pages from a bvec. | |
203 | */ | |
204 | static void bvec_get_page(struct dpages *dp, | |
205 | struct page **p, unsigned long *len, unsigned *offset) | |
206 | { | |
207 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
208 | *p = bvec->bv_page; | |
209 | *len = bvec->bv_len; | |
210 | *offset = bvec->bv_offset; | |
211 | } | |
212 | ||
213 | static void bvec_next_page(struct dpages *dp) | |
214 | { | |
215 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
216 | dp->context_ptr = bvec + 1; | |
217 | } | |
218 | ||
219 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | |
220 | { | |
221 | dp->get_page = bvec_get_page; | |
222 | dp->next_page = bvec_next_page; | |
223 | dp->context_ptr = bvec; | |
224 | } | |
225 | ||
c8b03afe HM |
226 | /* |
227 | * Functions for getting the pages from a VMA. | |
228 | */ | |
1da177e4 LT |
229 | static void vm_get_page(struct dpages *dp, |
230 | struct page **p, unsigned long *len, unsigned *offset) | |
231 | { | |
232 | *p = vmalloc_to_page(dp->context_ptr); | |
233 | *offset = dp->context_u; | |
234 | *len = PAGE_SIZE - dp->context_u; | |
235 | } | |
236 | ||
237 | static void vm_next_page(struct dpages *dp) | |
238 | { | |
239 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
240 | dp->context_u = 0; | |
241 | } | |
242 | ||
243 | static void vm_dp_init(struct dpages *dp, void *data) | |
244 | { | |
245 | dp->get_page = vm_get_page; | |
246 | dp->next_page = vm_next_page; | |
247 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
248 | dp->context_ptr = data; | |
249 | } | |
250 | ||
c8b03afe HM |
251 | /* |
252 | * Functions for getting the pages from kernel memory. | |
253 | */ | |
254 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, | |
255 | unsigned *offset) | |
256 | { | |
257 | *p = virt_to_page(dp->context_ptr); | |
258 | *offset = dp->context_u; | |
259 | *len = PAGE_SIZE - dp->context_u; | |
260 | } | |
261 | ||
262 | static void km_next_page(struct dpages *dp) | |
263 | { | |
264 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
265 | dp->context_u = 0; | |
266 | } | |
267 | ||
268 | static void km_dp_init(struct dpages *dp, void *data) | |
269 | { | |
270 | dp->get_page = km_get_page; | |
271 | dp->next_page = km_next_page; | |
272 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
273 | dp->context_ptr = data; | |
274 | } | |
275 | ||
1da177e4 LT |
276 | /*----------------------------------------------------------------- |
277 | * IO routines that accept a list of pages. | |
278 | *---------------------------------------------------------------*/ | |
22a1ceb1 | 279 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
1da177e4 LT |
280 | struct dpages *dp, struct io *io) |
281 | { | |
282 | struct bio *bio; | |
283 | struct page *page; | |
284 | unsigned long len; | |
285 | unsigned offset; | |
286 | unsigned num_bvecs; | |
287 | sector_t remaining = where->count; | |
0c535e0d | 288 | struct request_queue *q = bdev_get_queue(where->bdev); |
70d6c400 MS |
289 | unsigned short logical_block_size = queue_logical_block_size(q); |
290 | sector_t num_sectors; | |
1da177e4 | 291 | |
12fc0f49 | 292 | /* |
d87f4c14 TH |
293 | * where->count may be zero if rw holds a flush and we need to |
294 | * send a zero-sized flush. | |
12fc0f49 MP |
295 | */ |
296 | do { | |
1da177e4 | 297 | /* |
f1e53987 | 298 | * Allocate a suitably sized-bio. |
1da177e4 | 299 | */ |
70d6c400 | 300 | if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) |
0c535e0d MB |
301 | num_bvecs = 1; |
302 | else | |
303 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), | |
304 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); | |
305 | ||
bf17ce3a | 306 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
1da177e4 LT |
307 | bio->bi_sector = where->sector + (where->count - remaining); |
308 | bio->bi_bdev = where->bdev; | |
309 | bio->bi_end_io = endio; | |
f1e53987 | 310 | store_io_and_region_in_bio(bio, io, region); |
1da177e4 | 311 | |
0c535e0d | 312 | if (rw & REQ_DISCARD) { |
70d6c400 MS |
313 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); |
314 | bio->bi_size = num_sectors << SECTOR_SHIFT; | |
315 | remaining -= num_sectors; | |
316 | } else if (rw & REQ_WRITE_SAME) { | |
317 | /* | |
318 | * WRITE SAME only uses a single page. | |
319 | */ | |
320 | dp->get_page(dp, &page, &len, &offset); | |
321 | bio_add_page(bio, page, logical_block_size, offset); | |
322 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | |
323 | bio->bi_size = num_sectors << SECTOR_SHIFT; | |
324 | ||
325 | offset = 0; | |
326 | remaining -= num_sectors; | |
327 | dp->next_page(dp); | |
0c535e0d MB |
328 | } else while (remaining) { |
329 | /* | |
330 | * Try and add as many pages as possible. | |
331 | */ | |
1da177e4 LT |
332 | dp->get_page(dp, &page, &len, &offset); |
333 | len = min(len, to_bytes(remaining)); | |
334 | if (!bio_add_page(bio, page, len, offset)) | |
335 | break; | |
336 | ||
337 | offset = 0; | |
338 | remaining -= to_sector(len); | |
339 | dp->next_page(dp); | |
340 | } | |
341 | ||
342 | atomic_inc(&io->count); | |
343 | submit_bio(rw, bio); | |
12fc0f49 | 344 | } while (remaining); |
1da177e4 LT |
345 | } |
346 | ||
347 | static void dispatch_io(int rw, unsigned int num_regions, | |
22a1ceb1 | 348 | struct dm_io_region *where, struct dpages *dp, |
1da177e4 LT |
349 | struct io *io, int sync) |
350 | { | |
351 | int i; | |
352 | struct dpages old_pages = *dp; | |
353 | ||
f1e53987 MP |
354 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
355 | ||
1da177e4 | 356 | if (sync) |
721a9602 | 357 | rw |= REQ_SYNC; |
1da177e4 LT |
358 | |
359 | /* | |
360 | * For multiple regions we need to be careful to rewind | |
361 | * the dp object for each call to do_region. | |
362 | */ | |
363 | for (i = 0; i < num_regions; i++) { | |
364 | *dp = old_pages; | |
d87f4c14 | 365 | if (where[i].count || (rw & REQ_FLUSH)) |
1da177e4 LT |
366 | do_region(rw, i, where + i, dp, io); |
367 | } | |
368 | ||
369 | /* | |
f00b16ad | 370 | * Drop the extra reference that we were holding to avoid |
1da177e4 LT |
371 | * the io being completed too early. |
372 | */ | |
373 | dec_count(io, 0, 0); | |
374 | } | |
375 | ||
891ce207 | 376 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 377 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 378 | unsigned long *error_bits) |
1da177e4 | 379 | { |
f1e53987 MP |
380 | /* |
381 | * gcc <= 4.3 can't do the alignment for stack variables, so we must | |
382 | * align it on our own. | |
383 | * volatile prevents the optimizer from removing or reusing | |
384 | * "io_" field from the stack frame (allowed in ANSI C). | |
385 | */ | |
386 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; | |
387 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); | |
1da177e4 | 388 | |
7ff14a36 | 389 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
390 | WARN_ON(1); |
391 | return -EIO; | |
392 | } | |
393 | ||
f1e53987 | 394 | io->error_bits = 0; |
f1e53987 MP |
395 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
396 | io->sleeper = current; | |
397 | io->client = client; | |
1da177e4 | 398 | |
bb91bc7b MP |
399 | io->vma_invalidate_address = dp->vma_invalidate_address; |
400 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
401 | ||
f1e53987 | 402 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1da177e4 LT |
403 | |
404 | while (1) { | |
405 | set_current_state(TASK_UNINTERRUPTIBLE); | |
406 | ||
f1e53987 | 407 | if (!atomic_read(&io->count)) |
1da177e4 LT |
408 | break; |
409 | ||
410 | io_schedule(); | |
411 | } | |
412 | set_current_state(TASK_RUNNING); | |
413 | ||
891ce207 | 414 | if (error_bits) |
f1e53987 | 415 | *error_bits = io->error_bits; |
891ce207 | 416 | |
f1e53987 | 417 | return io->error_bits ? -EIO : 0; |
1da177e4 LT |
418 | } |
419 | ||
891ce207 | 420 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 421 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 422 | io_notify_fn fn, void *context) |
1da177e4 LT |
423 | { |
424 | struct io *io; | |
425 | ||
7ff14a36 | 426 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
427 | WARN_ON(1); |
428 | fn(1, context); | |
429 | return -EIO; | |
430 | } | |
431 | ||
bf17ce3a | 432 | io = mempool_alloc(client->pool, GFP_NOIO); |
e01fd7ee | 433 | io->error_bits = 0; |
1da177e4 LT |
434 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
435 | io->sleeper = NULL; | |
891ce207 | 436 | io->client = client; |
1da177e4 LT |
437 | io->callback = fn; |
438 | io->context = context; | |
439 | ||
bb91bc7b MP |
440 | io->vma_invalidate_address = dp->vma_invalidate_address; |
441 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
442 | ||
1da177e4 LT |
443 | dispatch_io(rw, num_regions, where, dp, io, 0); |
444 | return 0; | |
445 | } | |
446 | ||
bb91bc7b MP |
447 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp, |
448 | unsigned long size) | |
c8b03afe HM |
449 | { |
450 | /* Set up dpages based on memory type */ | |
bb91bc7b MP |
451 | |
452 | dp->vma_invalidate_address = NULL; | |
453 | dp->vma_invalidate_size = 0; | |
454 | ||
c8b03afe HM |
455 | switch (io_req->mem.type) { |
456 | case DM_IO_PAGE_LIST: | |
457 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | |
458 | break; | |
459 | ||
460 | case DM_IO_BVEC: | |
461 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | |
462 | break; | |
463 | ||
464 | case DM_IO_VMA: | |
bb91bc7b MP |
465 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); |
466 | if ((io_req->bi_rw & RW_MASK) == READ) { | |
467 | dp->vma_invalidate_address = io_req->mem.ptr.vma; | |
468 | dp->vma_invalidate_size = size; | |
469 | } | |
c8b03afe HM |
470 | vm_dp_init(dp, io_req->mem.ptr.vma); |
471 | break; | |
472 | ||
473 | case DM_IO_KMEM: | |
474 | km_dp_init(dp, io_req->mem.ptr.addr); | |
475 | break; | |
476 | ||
477 | default: | |
478 | return -EINVAL; | |
479 | } | |
480 | ||
481 | return 0; | |
482 | } | |
483 | ||
484 | /* | |
7ff14a36 MP |
485 | * New collapsed (a)synchronous interface. |
486 | * | |
487 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | |
7b6d91da CH |
488 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
489 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | |
7ff14a36 | 490 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
c8b03afe HM |
491 | */ |
492 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |
22a1ceb1 | 493 | struct dm_io_region *where, unsigned long *sync_error_bits) |
c8b03afe HM |
494 | { |
495 | int r; | |
496 | struct dpages dp; | |
497 | ||
bb91bc7b | 498 | r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); |
c8b03afe HM |
499 | if (r) |
500 | return r; | |
501 | ||
502 | if (!io_req->notify.fn) | |
503 | return sync_io(io_req->client, num_regions, where, | |
504 | io_req->bi_rw, &dp, sync_error_bits); | |
505 | ||
506 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, | |
507 | &dp, io_req->notify.fn, io_req->notify.context); | |
508 | } | |
509 | EXPORT_SYMBOL(dm_io); | |
952b3557 MP |
510 | |
511 | int __init dm_io_init(void) | |
512 | { | |
513 | _dm_io_cache = KMEM_CACHE(io, 0); | |
514 | if (!_dm_io_cache) | |
515 | return -ENOMEM; | |
516 | ||
517 | return 0; | |
518 | } | |
519 | ||
520 | void dm_io_exit(void) | |
521 | { | |
522 | kmem_cache_destroy(_dm_io_cache); | |
523 | _dm_io_cache = NULL; | |
524 | } |