Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software | |
891ce207 | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
952b3557 MP |
8 | #include "dm.h" |
9 | ||
586e80e6 | 10 | #include <linux/device-mapper.h> |
1da177e4 LT |
11 | |
12 | #include <linux/bio.h> | |
13 | #include <linux/mempool.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
a765e20e | 17 | #include <linux/dm-io.h> |
1da177e4 | 18 | |
f1e53987 MP |
19 | #define DM_MSG_PREFIX "io" |
20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | |
bda8efec MP |
22 | #define MIN_IOS 16 |
23 | #define MIN_BIOS 16 | |
f1e53987 | 24 | |
891ce207 HM |
25 | struct dm_io_client { |
26 | mempool_t *pool; | |
27 | struct bio_set *bios; | |
28 | }; | |
29 | ||
f1e53987 MP |
30 | /* |
31 | * Aligning 'struct io' reduces the number of bits required to store | |
32 | * its address. Refer to store_io_and_region_in_bio() below. | |
33 | */ | |
1da177e4 | 34 | struct io { |
e01fd7ee | 35 | unsigned long error_bits; |
1da177e4 LT |
36 | atomic_t count; |
37 | struct task_struct *sleeper; | |
891ce207 | 38 | struct dm_io_client *client; |
1da177e4 LT |
39 | io_notify_fn callback; |
40 | void *context; | |
bb91bc7b MP |
41 | void *vma_invalidate_address; |
42 | unsigned long vma_invalidate_size; | |
f1e53987 | 43 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
1da177e4 | 44 | |
952b3557 MP |
45 | static struct kmem_cache *_dm_io_cache; |
46 | ||
c8b03afe HM |
47 | /* |
48 | * Create a client with mempool and bioset. | |
49 | */ | |
bda8efec | 50 | struct dm_io_client *dm_io_client_create(void) |
c8b03afe | 51 | { |
c8b03afe HM |
52 | struct dm_io_client *client; |
53 | ||
54 | client = kmalloc(sizeof(*client), GFP_KERNEL); | |
55 | if (!client) | |
56 | return ERR_PTR(-ENOMEM); | |
57 | ||
bda8efec | 58 | client->pool = mempool_create_slab_pool(MIN_IOS, _dm_io_cache); |
c8b03afe HM |
59 | if (!client->pool) |
60 | goto bad; | |
61 | ||
bda8efec | 62 | client->bios = bioset_create(MIN_BIOS, 0); |
c8b03afe HM |
63 | if (!client->bios) |
64 | goto bad; | |
65 | ||
66 | return client; | |
67 | ||
68 | bad: | |
69 | if (client->pool) | |
70 | mempool_destroy(client->pool); | |
71 | kfree(client); | |
72 | return ERR_PTR(-ENOMEM); | |
73 | } | |
74 | EXPORT_SYMBOL(dm_io_client_create); | |
75 | ||
c8b03afe HM |
76 | void dm_io_client_destroy(struct dm_io_client *client) |
77 | { | |
78 | mempool_destroy(client->pool); | |
79 | bioset_free(client->bios); | |
80 | kfree(client); | |
81 | } | |
82 | EXPORT_SYMBOL(dm_io_client_destroy); | |
83 | ||
1da177e4 LT |
84 | /*----------------------------------------------------------------- |
85 | * We need to keep track of which region a bio is doing io for. | |
f1e53987 MP |
86 | * To avoid a memory allocation to store just 5 or 6 bits, we |
87 | * ensure the 'struct io' pointer is aligned so enough low bits are | |
88 | * always zero and then combine it with the region number directly in | |
89 | * bi_private. | |
1da177e4 | 90 | *---------------------------------------------------------------*/ |
f1e53987 MP |
91 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
92 | unsigned region) | |
1da177e4 | 93 | { |
f1e53987 MP |
94 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
95 | DMCRIT("Unaligned struct io pointer %p", io); | |
96 | BUG(); | |
97 | } | |
98 | ||
99 | bio->bi_private = (void *)((unsigned long)io | region); | |
1da177e4 LT |
100 | } |
101 | ||
f1e53987 MP |
102 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
103 | unsigned *region) | |
1da177e4 | 104 | { |
f1e53987 MP |
105 | unsigned long val = (unsigned long)bio->bi_private; |
106 | ||
107 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); | |
108 | *region = val & (DM_IO_MAX_REGIONS - 1); | |
1da177e4 LT |
109 | } |
110 | ||
111 | /*----------------------------------------------------------------- | |
112 | * We need an io object to keep track of the number of bios that | |
113 | * have been dispatched for a particular io. | |
114 | *---------------------------------------------------------------*/ | |
115 | static void dec_count(struct io *io, unsigned int region, int error) | |
116 | { | |
d87f4c14 | 117 | if (error) |
e01fd7ee | 118 | set_bit(region, &io->error_bits); |
1da177e4 LT |
119 | |
120 | if (atomic_dec_and_test(&io->count)) { | |
bb91bc7b MP |
121 | if (io->vma_invalidate_size) |
122 | invalidate_kernel_vmap_range(io->vma_invalidate_address, | |
123 | io->vma_invalidate_size); | |
124 | ||
1da177e4 LT |
125 | if (io->sleeper) |
126 | wake_up_process(io->sleeper); | |
127 | ||
128 | else { | |
e01fd7ee | 129 | unsigned long r = io->error_bits; |
1da177e4 LT |
130 | io_notify_fn fn = io->callback; |
131 | void *context = io->context; | |
132 | ||
bf17ce3a | 133 | mempool_free(io, io->client->pool); |
1da177e4 LT |
134 | fn(r, context); |
135 | } | |
136 | } | |
137 | } | |
138 | ||
6712ecf8 | 139 | static void endio(struct bio *bio, int error) |
1da177e4 | 140 | { |
c897feb3 HM |
141 | struct io *io; |
142 | unsigned region; | |
1da177e4 | 143 | |
1da177e4 LT |
144 | if (error && bio_data_dir(bio) == READ) |
145 | zero_fill_bio(bio); | |
146 | ||
c897feb3 HM |
147 | /* |
148 | * The bio destructor in bio_put() may use the io object. | |
149 | */ | |
f1e53987 | 150 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
c897feb3 | 151 | |
1da177e4 LT |
152 | bio_put(bio); |
153 | ||
c897feb3 | 154 | dec_count(io, region, error); |
1da177e4 LT |
155 | } |
156 | ||
157 | /*----------------------------------------------------------------- | |
158 | * These little objects provide an abstraction for getting a new | |
159 | * destination page for io. | |
160 | *---------------------------------------------------------------*/ | |
161 | struct dpages { | |
162 | void (*get_page)(struct dpages *dp, | |
163 | struct page **p, unsigned long *len, unsigned *offset); | |
164 | void (*next_page)(struct dpages *dp); | |
165 | ||
166 | unsigned context_u; | |
167 | void *context_ptr; | |
bb91bc7b MP |
168 | |
169 | void *vma_invalidate_address; | |
170 | unsigned long vma_invalidate_size; | |
1da177e4 LT |
171 | }; |
172 | ||
173 | /* | |
174 | * Functions for getting the pages from a list. | |
175 | */ | |
176 | static void list_get_page(struct dpages *dp, | |
177 | struct page **p, unsigned long *len, unsigned *offset) | |
178 | { | |
179 | unsigned o = dp->context_u; | |
180 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
181 | ||
182 | *p = pl->page; | |
183 | *len = PAGE_SIZE - o; | |
184 | *offset = o; | |
185 | } | |
186 | ||
187 | static void list_next_page(struct dpages *dp) | |
188 | { | |
189 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
190 | dp->context_ptr = pl->next; | |
191 | dp->context_u = 0; | |
192 | } | |
193 | ||
194 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) | |
195 | { | |
196 | dp->get_page = list_get_page; | |
197 | dp->next_page = list_next_page; | |
198 | dp->context_u = offset; | |
199 | dp->context_ptr = pl; | |
200 | } | |
201 | ||
202 | /* | |
203 | * Functions for getting the pages from a bvec. | |
204 | */ | |
205 | static void bvec_get_page(struct dpages *dp, | |
206 | struct page **p, unsigned long *len, unsigned *offset) | |
207 | { | |
208 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
209 | *p = bvec->bv_page; | |
210 | *len = bvec->bv_len; | |
211 | *offset = bvec->bv_offset; | |
212 | } | |
213 | ||
214 | static void bvec_next_page(struct dpages *dp) | |
215 | { | |
216 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
217 | dp->context_ptr = bvec + 1; | |
218 | } | |
219 | ||
220 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | |
221 | { | |
222 | dp->get_page = bvec_get_page; | |
223 | dp->next_page = bvec_next_page; | |
224 | dp->context_ptr = bvec; | |
225 | } | |
226 | ||
c8b03afe HM |
227 | /* |
228 | * Functions for getting the pages from a VMA. | |
229 | */ | |
1da177e4 LT |
230 | static void vm_get_page(struct dpages *dp, |
231 | struct page **p, unsigned long *len, unsigned *offset) | |
232 | { | |
233 | *p = vmalloc_to_page(dp->context_ptr); | |
234 | *offset = dp->context_u; | |
235 | *len = PAGE_SIZE - dp->context_u; | |
236 | } | |
237 | ||
238 | static void vm_next_page(struct dpages *dp) | |
239 | { | |
240 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
241 | dp->context_u = 0; | |
242 | } | |
243 | ||
244 | static void vm_dp_init(struct dpages *dp, void *data) | |
245 | { | |
246 | dp->get_page = vm_get_page; | |
247 | dp->next_page = vm_next_page; | |
248 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
249 | dp->context_ptr = data; | |
250 | } | |
251 | ||
c8b03afe HM |
252 | /* |
253 | * Functions for getting the pages from kernel memory. | |
254 | */ | |
255 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, | |
256 | unsigned *offset) | |
257 | { | |
258 | *p = virt_to_page(dp->context_ptr); | |
259 | *offset = dp->context_u; | |
260 | *len = PAGE_SIZE - dp->context_u; | |
261 | } | |
262 | ||
263 | static void km_next_page(struct dpages *dp) | |
264 | { | |
265 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
266 | dp->context_u = 0; | |
267 | } | |
268 | ||
269 | static void km_dp_init(struct dpages *dp, void *data) | |
270 | { | |
271 | dp->get_page = km_get_page; | |
272 | dp->next_page = km_next_page; | |
273 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
274 | dp->context_ptr = data; | |
275 | } | |
276 | ||
1da177e4 LT |
277 | /*----------------------------------------------------------------- |
278 | * IO routines that accept a list of pages. | |
279 | *---------------------------------------------------------------*/ | |
22a1ceb1 | 280 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
1da177e4 LT |
281 | struct dpages *dp, struct io *io) |
282 | { | |
283 | struct bio *bio; | |
284 | struct page *page; | |
285 | unsigned long len; | |
286 | unsigned offset; | |
287 | unsigned num_bvecs; | |
288 | sector_t remaining = where->count; | |
0c535e0d | 289 | struct request_queue *q = bdev_get_queue(where->bdev); |
70d6c400 MS |
290 | unsigned short logical_block_size = queue_logical_block_size(q); |
291 | sector_t num_sectors; | |
1da177e4 | 292 | |
12fc0f49 | 293 | /* |
d87f4c14 TH |
294 | * where->count may be zero if rw holds a flush and we need to |
295 | * send a zero-sized flush. | |
12fc0f49 MP |
296 | */ |
297 | do { | |
1da177e4 | 298 | /* |
f1e53987 | 299 | * Allocate a suitably sized-bio. |
1da177e4 | 300 | */ |
70d6c400 | 301 | if ((rw & REQ_DISCARD) || (rw & REQ_WRITE_SAME)) |
0c535e0d MB |
302 | num_bvecs = 1; |
303 | else | |
304 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), | |
305 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); | |
306 | ||
bf17ce3a | 307 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
1da177e4 LT |
308 | bio->bi_sector = where->sector + (where->count - remaining); |
309 | bio->bi_bdev = where->bdev; | |
310 | bio->bi_end_io = endio; | |
f1e53987 | 311 | store_io_and_region_in_bio(bio, io, region); |
1da177e4 | 312 | |
0c535e0d | 313 | if (rw & REQ_DISCARD) { |
70d6c400 MS |
314 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); |
315 | bio->bi_size = num_sectors << SECTOR_SHIFT; | |
316 | remaining -= num_sectors; | |
317 | } else if (rw & REQ_WRITE_SAME) { | |
318 | /* | |
319 | * WRITE SAME only uses a single page. | |
320 | */ | |
321 | dp->get_page(dp, &page, &len, &offset); | |
322 | bio_add_page(bio, page, logical_block_size, offset); | |
323 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | |
324 | bio->bi_size = num_sectors << SECTOR_SHIFT; | |
325 | ||
326 | offset = 0; | |
327 | remaining -= num_sectors; | |
328 | dp->next_page(dp); | |
0c535e0d MB |
329 | } else while (remaining) { |
330 | /* | |
331 | * Try and add as many pages as possible. | |
332 | */ | |
1da177e4 LT |
333 | dp->get_page(dp, &page, &len, &offset); |
334 | len = min(len, to_bytes(remaining)); | |
335 | if (!bio_add_page(bio, page, len, offset)) | |
336 | break; | |
337 | ||
338 | offset = 0; | |
339 | remaining -= to_sector(len); | |
340 | dp->next_page(dp); | |
341 | } | |
342 | ||
343 | atomic_inc(&io->count); | |
344 | submit_bio(rw, bio); | |
12fc0f49 | 345 | } while (remaining); |
1da177e4 LT |
346 | } |
347 | ||
348 | static void dispatch_io(int rw, unsigned int num_regions, | |
22a1ceb1 | 349 | struct dm_io_region *where, struct dpages *dp, |
1da177e4 LT |
350 | struct io *io, int sync) |
351 | { | |
352 | int i; | |
353 | struct dpages old_pages = *dp; | |
354 | ||
f1e53987 MP |
355 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
356 | ||
1da177e4 | 357 | if (sync) |
721a9602 | 358 | rw |= REQ_SYNC; |
1da177e4 LT |
359 | |
360 | /* | |
361 | * For multiple regions we need to be careful to rewind | |
362 | * the dp object for each call to do_region. | |
363 | */ | |
364 | for (i = 0; i < num_regions; i++) { | |
365 | *dp = old_pages; | |
d87f4c14 | 366 | if (where[i].count || (rw & REQ_FLUSH)) |
1da177e4 LT |
367 | do_region(rw, i, where + i, dp, io); |
368 | } | |
369 | ||
370 | /* | |
f00b16ad | 371 | * Drop the extra reference that we were holding to avoid |
1da177e4 LT |
372 | * the io being completed too early. |
373 | */ | |
374 | dec_count(io, 0, 0); | |
375 | } | |
376 | ||
891ce207 | 377 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 378 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 379 | unsigned long *error_bits) |
1da177e4 | 380 | { |
f1e53987 MP |
381 | /* |
382 | * gcc <= 4.3 can't do the alignment for stack variables, so we must | |
383 | * align it on our own. | |
384 | * volatile prevents the optimizer from removing or reusing | |
385 | * "io_" field from the stack frame (allowed in ANSI C). | |
386 | */ | |
387 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; | |
388 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); | |
1da177e4 | 389 | |
7ff14a36 | 390 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
391 | WARN_ON(1); |
392 | return -EIO; | |
393 | } | |
394 | ||
f1e53987 | 395 | io->error_bits = 0; |
f1e53987 MP |
396 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
397 | io->sleeper = current; | |
398 | io->client = client; | |
1da177e4 | 399 | |
bb91bc7b MP |
400 | io->vma_invalidate_address = dp->vma_invalidate_address; |
401 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
402 | ||
f1e53987 | 403 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1da177e4 LT |
404 | |
405 | while (1) { | |
406 | set_current_state(TASK_UNINTERRUPTIBLE); | |
407 | ||
f1e53987 | 408 | if (!atomic_read(&io->count)) |
1da177e4 LT |
409 | break; |
410 | ||
411 | io_schedule(); | |
412 | } | |
413 | set_current_state(TASK_RUNNING); | |
414 | ||
891ce207 | 415 | if (error_bits) |
f1e53987 | 416 | *error_bits = io->error_bits; |
891ce207 | 417 | |
f1e53987 | 418 | return io->error_bits ? -EIO : 0; |
1da177e4 LT |
419 | } |
420 | ||
891ce207 | 421 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 422 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 423 | io_notify_fn fn, void *context) |
1da177e4 LT |
424 | { |
425 | struct io *io; | |
426 | ||
7ff14a36 | 427 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
428 | WARN_ON(1); |
429 | fn(1, context); | |
430 | return -EIO; | |
431 | } | |
432 | ||
bf17ce3a | 433 | io = mempool_alloc(client->pool, GFP_NOIO); |
e01fd7ee | 434 | io->error_bits = 0; |
1da177e4 LT |
435 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
436 | io->sleeper = NULL; | |
891ce207 | 437 | io->client = client; |
1da177e4 LT |
438 | io->callback = fn; |
439 | io->context = context; | |
440 | ||
bb91bc7b MP |
441 | io->vma_invalidate_address = dp->vma_invalidate_address; |
442 | io->vma_invalidate_size = dp->vma_invalidate_size; | |
443 | ||
1da177e4 LT |
444 | dispatch_io(rw, num_regions, where, dp, io, 0); |
445 | return 0; | |
446 | } | |
447 | ||
bb91bc7b MP |
448 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp, |
449 | unsigned long size) | |
c8b03afe HM |
450 | { |
451 | /* Set up dpages based on memory type */ | |
bb91bc7b MP |
452 | |
453 | dp->vma_invalidate_address = NULL; | |
454 | dp->vma_invalidate_size = 0; | |
455 | ||
c8b03afe HM |
456 | switch (io_req->mem.type) { |
457 | case DM_IO_PAGE_LIST: | |
458 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | |
459 | break; | |
460 | ||
461 | case DM_IO_BVEC: | |
462 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | |
463 | break; | |
464 | ||
465 | case DM_IO_VMA: | |
bb91bc7b MP |
466 | flush_kernel_vmap_range(io_req->mem.ptr.vma, size); |
467 | if ((io_req->bi_rw & RW_MASK) == READ) { | |
468 | dp->vma_invalidate_address = io_req->mem.ptr.vma; | |
469 | dp->vma_invalidate_size = size; | |
470 | } | |
c8b03afe HM |
471 | vm_dp_init(dp, io_req->mem.ptr.vma); |
472 | break; | |
473 | ||
474 | case DM_IO_KMEM: | |
475 | km_dp_init(dp, io_req->mem.ptr.addr); | |
476 | break; | |
477 | ||
478 | default: | |
479 | return -EINVAL; | |
480 | } | |
481 | ||
482 | return 0; | |
483 | } | |
484 | ||
485 | /* | |
7ff14a36 MP |
486 | * New collapsed (a)synchronous interface. |
487 | * | |
488 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | |
7b6d91da CH |
489 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
490 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | |
7ff14a36 | 491 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
c8b03afe HM |
492 | */ |
493 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |
22a1ceb1 | 494 | struct dm_io_region *where, unsigned long *sync_error_bits) |
c8b03afe HM |
495 | { |
496 | int r; | |
497 | struct dpages dp; | |
498 | ||
bb91bc7b | 499 | r = dp_init(io_req, &dp, (unsigned long)where->count << SECTOR_SHIFT); |
c8b03afe HM |
500 | if (r) |
501 | return r; | |
502 | ||
503 | if (!io_req->notify.fn) | |
504 | return sync_io(io_req->client, num_regions, where, | |
505 | io_req->bi_rw, &dp, sync_error_bits); | |
506 | ||
507 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, | |
508 | &dp, io_req->notify.fn, io_req->notify.context); | |
509 | } | |
510 | EXPORT_SYMBOL(dm_io); | |
952b3557 MP |
511 | |
512 | int __init dm_io_init(void) | |
513 | { | |
514 | _dm_io_cache = KMEM_CACHE(io, 0); | |
515 | if (!_dm_io_cache) | |
516 | return -ENOMEM; | |
517 | ||
518 | return 0; | |
519 | } | |
520 | ||
521 | void dm_io_exit(void) | |
522 | { | |
523 | kmem_cache_destroy(_dm_io_cache); | |
524 | _dm_io_cache = NULL; | |
525 | } |