Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Copyright (C) 2003 Sistina Software | |
891ce207 | 3 | * Copyright (C) 2006 Red Hat GmbH |
1da177e4 LT |
4 | * |
5 | * This file is released under the GPL. | |
6 | */ | |
7 | ||
952b3557 MP |
8 | #include "dm.h" |
9 | ||
586e80e6 | 10 | #include <linux/device-mapper.h> |
1da177e4 LT |
11 | |
12 | #include <linux/bio.h> | |
13 | #include <linux/mempool.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/slab.h> | |
a765e20e | 17 | #include <linux/dm-io.h> |
1da177e4 | 18 | |
f1e53987 MP |
19 | #define DM_MSG_PREFIX "io" |
20 | ||
21 | #define DM_IO_MAX_REGIONS BITS_PER_LONG | |
22 | ||
891ce207 HM |
23 | struct dm_io_client { |
24 | mempool_t *pool; | |
25 | struct bio_set *bios; | |
26 | }; | |
27 | ||
f1e53987 MP |
28 | /* |
29 | * Aligning 'struct io' reduces the number of bits required to store | |
30 | * its address. Refer to store_io_and_region_in_bio() below. | |
31 | */ | |
1da177e4 | 32 | struct io { |
e01fd7ee | 33 | unsigned long error_bits; |
5af443a7 | 34 | unsigned long eopnotsupp_bits; |
1da177e4 LT |
35 | atomic_t count; |
36 | struct task_struct *sleeper; | |
891ce207 | 37 | struct dm_io_client *client; |
1da177e4 LT |
38 | io_notify_fn callback; |
39 | void *context; | |
f1e53987 | 40 | } __attribute__((aligned(DM_IO_MAX_REGIONS))); |
1da177e4 | 41 | |
952b3557 MP |
42 | static struct kmem_cache *_dm_io_cache; |
43 | ||
1da177e4 LT |
44 | /* |
45 | * io contexts are only dynamically allocated for asynchronous | |
46 | * io. Since async io is likely to be the majority of io we'll | |
891ce207 | 47 | * have the same number of io contexts as bios! (FIXME: must reduce this). |
1da177e4 | 48 | */ |
891ce207 | 49 | |
1da177e4 LT |
50 | static unsigned int pages_to_ios(unsigned int pages) |
51 | { | |
52 | return 4 * pages; /* too many ? */ | |
53 | } | |
54 | ||
c8b03afe HM |
55 | /* |
56 | * Create a client with mempool and bioset. | |
57 | */ | |
58 | struct dm_io_client *dm_io_client_create(unsigned num_pages) | |
59 | { | |
60 | unsigned ios = pages_to_ios(num_pages); | |
61 | struct dm_io_client *client; | |
62 | ||
63 | client = kmalloc(sizeof(*client), GFP_KERNEL); | |
64 | if (!client) | |
65 | return ERR_PTR(-ENOMEM); | |
66 | ||
952b3557 | 67 | client->pool = mempool_create_slab_pool(ios, _dm_io_cache); |
c8b03afe HM |
68 | if (!client->pool) |
69 | goto bad; | |
70 | ||
bb799ca0 | 71 | client->bios = bioset_create(16, 0); |
c8b03afe HM |
72 | if (!client->bios) |
73 | goto bad; | |
74 | ||
75 | return client; | |
76 | ||
77 | bad: | |
78 | if (client->pool) | |
79 | mempool_destroy(client->pool); | |
80 | kfree(client); | |
81 | return ERR_PTR(-ENOMEM); | |
82 | } | |
83 | EXPORT_SYMBOL(dm_io_client_create); | |
84 | ||
85 | int dm_io_client_resize(unsigned num_pages, struct dm_io_client *client) | |
86 | { | |
87 | return mempool_resize(client->pool, pages_to_ios(num_pages), | |
88 | GFP_KERNEL); | |
89 | } | |
90 | EXPORT_SYMBOL(dm_io_client_resize); | |
91 | ||
92 | void dm_io_client_destroy(struct dm_io_client *client) | |
93 | { | |
94 | mempool_destroy(client->pool); | |
95 | bioset_free(client->bios); | |
96 | kfree(client); | |
97 | } | |
98 | EXPORT_SYMBOL(dm_io_client_destroy); | |
99 | ||
1da177e4 LT |
100 | /*----------------------------------------------------------------- |
101 | * We need to keep track of which region a bio is doing io for. | |
f1e53987 MP |
102 | * To avoid a memory allocation to store just 5 or 6 bits, we |
103 | * ensure the 'struct io' pointer is aligned so enough low bits are | |
104 | * always zero and then combine it with the region number directly in | |
105 | * bi_private. | |
1da177e4 | 106 | *---------------------------------------------------------------*/ |
f1e53987 MP |
107 | static void store_io_and_region_in_bio(struct bio *bio, struct io *io, |
108 | unsigned region) | |
1da177e4 | 109 | { |
f1e53987 MP |
110 | if (unlikely(!IS_ALIGNED((unsigned long)io, DM_IO_MAX_REGIONS))) { |
111 | DMCRIT("Unaligned struct io pointer %p", io); | |
112 | BUG(); | |
113 | } | |
114 | ||
115 | bio->bi_private = (void *)((unsigned long)io | region); | |
1da177e4 LT |
116 | } |
117 | ||
f1e53987 MP |
118 | static void retrieve_io_and_region_from_bio(struct bio *bio, struct io **io, |
119 | unsigned *region) | |
1da177e4 | 120 | { |
f1e53987 MP |
121 | unsigned long val = (unsigned long)bio->bi_private; |
122 | ||
123 | *io = (void *)(val & -(unsigned long)DM_IO_MAX_REGIONS); | |
124 | *region = val & (DM_IO_MAX_REGIONS - 1); | |
1da177e4 LT |
125 | } |
126 | ||
127 | /*----------------------------------------------------------------- | |
128 | * We need an io object to keep track of the number of bios that | |
129 | * have been dispatched for a particular io. | |
130 | *---------------------------------------------------------------*/ | |
131 | static void dec_count(struct io *io, unsigned int region, int error) | |
132 | { | |
5af443a7 | 133 | if (error) { |
e01fd7ee | 134 | set_bit(region, &io->error_bits); |
5af443a7 MP |
135 | if (error == -EOPNOTSUPP) |
136 | set_bit(region, &io->eopnotsupp_bits); | |
137 | } | |
1da177e4 LT |
138 | |
139 | if (atomic_dec_and_test(&io->count)) { | |
140 | if (io->sleeper) | |
141 | wake_up_process(io->sleeper); | |
142 | ||
143 | else { | |
e01fd7ee | 144 | unsigned long r = io->error_bits; |
1da177e4 LT |
145 | io_notify_fn fn = io->callback; |
146 | void *context = io->context; | |
147 | ||
bf17ce3a | 148 | mempool_free(io, io->client->pool); |
1da177e4 LT |
149 | fn(r, context); |
150 | } | |
151 | } | |
152 | } | |
153 | ||
6712ecf8 | 154 | static void endio(struct bio *bio, int error) |
1da177e4 | 155 | { |
c897feb3 HM |
156 | struct io *io; |
157 | unsigned region; | |
1da177e4 | 158 | |
1da177e4 LT |
159 | if (error && bio_data_dir(bio) == READ) |
160 | zero_fill_bio(bio); | |
161 | ||
c897feb3 HM |
162 | /* |
163 | * The bio destructor in bio_put() may use the io object. | |
164 | */ | |
f1e53987 | 165 | retrieve_io_and_region_from_bio(bio, &io, ®ion); |
c897feb3 | 166 | |
1da177e4 LT |
167 | bio_put(bio); |
168 | ||
c897feb3 | 169 | dec_count(io, region, error); |
1da177e4 LT |
170 | } |
171 | ||
172 | /*----------------------------------------------------------------- | |
173 | * These little objects provide an abstraction for getting a new | |
174 | * destination page for io. | |
175 | *---------------------------------------------------------------*/ | |
176 | struct dpages { | |
177 | void (*get_page)(struct dpages *dp, | |
178 | struct page **p, unsigned long *len, unsigned *offset); | |
179 | void (*next_page)(struct dpages *dp); | |
180 | ||
181 | unsigned context_u; | |
182 | void *context_ptr; | |
183 | }; | |
184 | ||
185 | /* | |
186 | * Functions for getting the pages from a list. | |
187 | */ | |
188 | static void list_get_page(struct dpages *dp, | |
189 | struct page **p, unsigned long *len, unsigned *offset) | |
190 | { | |
191 | unsigned o = dp->context_u; | |
192 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
193 | ||
194 | *p = pl->page; | |
195 | *len = PAGE_SIZE - o; | |
196 | *offset = o; | |
197 | } | |
198 | ||
199 | static void list_next_page(struct dpages *dp) | |
200 | { | |
201 | struct page_list *pl = (struct page_list *) dp->context_ptr; | |
202 | dp->context_ptr = pl->next; | |
203 | dp->context_u = 0; | |
204 | } | |
205 | ||
206 | static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offset) | |
207 | { | |
208 | dp->get_page = list_get_page; | |
209 | dp->next_page = list_next_page; | |
210 | dp->context_u = offset; | |
211 | dp->context_ptr = pl; | |
212 | } | |
213 | ||
214 | /* | |
215 | * Functions for getting the pages from a bvec. | |
216 | */ | |
217 | static void bvec_get_page(struct dpages *dp, | |
218 | struct page **p, unsigned long *len, unsigned *offset) | |
219 | { | |
220 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
221 | *p = bvec->bv_page; | |
222 | *len = bvec->bv_len; | |
223 | *offset = bvec->bv_offset; | |
224 | } | |
225 | ||
226 | static void bvec_next_page(struct dpages *dp) | |
227 | { | |
228 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | |
229 | dp->context_ptr = bvec + 1; | |
230 | } | |
231 | ||
232 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | |
233 | { | |
234 | dp->get_page = bvec_get_page; | |
235 | dp->next_page = bvec_next_page; | |
236 | dp->context_ptr = bvec; | |
237 | } | |
238 | ||
c8b03afe HM |
239 | /* |
240 | * Functions for getting the pages from a VMA. | |
241 | */ | |
1da177e4 LT |
242 | static void vm_get_page(struct dpages *dp, |
243 | struct page **p, unsigned long *len, unsigned *offset) | |
244 | { | |
245 | *p = vmalloc_to_page(dp->context_ptr); | |
246 | *offset = dp->context_u; | |
247 | *len = PAGE_SIZE - dp->context_u; | |
248 | } | |
249 | ||
250 | static void vm_next_page(struct dpages *dp) | |
251 | { | |
252 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
253 | dp->context_u = 0; | |
254 | } | |
255 | ||
256 | static void vm_dp_init(struct dpages *dp, void *data) | |
257 | { | |
258 | dp->get_page = vm_get_page; | |
259 | dp->next_page = vm_next_page; | |
260 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
261 | dp->context_ptr = data; | |
262 | } | |
263 | ||
3676347a PO |
264 | static void dm_bio_destructor(struct bio *bio) |
265 | { | |
f1e53987 MP |
266 | unsigned region; |
267 | struct io *io; | |
268 | ||
269 | retrieve_io_and_region_from_bio(bio, &io, ®ion); | |
891ce207 | 270 | |
bf17ce3a | 271 | bio_free(bio, io->client->bios); |
3676347a PO |
272 | } |
273 | ||
c8b03afe HM |
274 | /* |
275 | * Functions for getting the pages from kernel memory. | |
276 | */ | |
277 | static void km_get_page(struct dpages *dp, struct page **p, unsigned long *len, | |
278 | unsigned *offset) | |
279 | { | |
280 | *p = virt_to_page(dp->context_ptr); | |
281 | *offset = dp->context_u; | |
282 | *len = PAGE_SIZE - dp->context_u; | |
283 | } | |
284 | ||
285 | static void km_next_page(struct dpages *dp) | |
286 | { | |
287 | dp->context_ptr += PAGE_SIZE - dp->context_u; | |
288 | dp->context_u = 0; | |
289 | } | |
290 | ||
291 | static void km_dp_init(struct dpages *dp, void *data) | |
292 | { | |
293 | dp->get_page = km_get_page; | |
294 | dp->next_page = km_next_page; | |
295 | dp->context_u = ((unsigned long) data) & (PAGE_SIZE - 1); | |
296 | dp->context_ptr = data; | |
297 | } | |
298 | ||
1da177e4 LT |
299 | /*----------------------------------------------------------------- |
300 | * IO routines that accept a list of pages. | |
301 | *---------------------------------------------------------------*/ | |
22a1ceb1 | 302 | static void do_region(int rw, unsigned region, struct dm_io_region *where, |
1da177e4 LT |
303 | struct dpages *dp, struct io *io) |
304 | { | |
305 | struct bio *bio; | |
306 | struct page *page; | |
307 | unsigned long len; | |
308 | unsigned offset; | |
309 | unsigned num_bvecs; | |
310 | sector_t remaining = where->count; | |
311 | ||
12fc0f49 MP |
312 | /* |
313 | * where->count may be zero if rw holds a write barrier and we | |
314 | * need to send a zero-sized barrier. | |
315 | */ | |
316 | do { | |
1da177e4 | 317 | /* |
f1e53987 | 318 | * Allocate a suitably sized-bio. |
1da177e4 | 319 | */ |
596f138e JN |
320 | num_bvecs = dm_sector_div_up(remaining, |
321 | (PAGE_SIZE >> SECTOR_SHIFT)); | |
f1e53987 | 322 | num_bvecs = min_t(int, bio_get_nr_vecs(where->bdev), num_bvecs); |
bf17ce3a | 323 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
1da177e4 LT |
324 | bio->bi_sector = where->sector + (where->count - remaining); |
325 | bio->bi_bdev = where->bdev; | |
326 | bio->bi_end_io = endio; | |
3676347a | 327 | bio->bi_destructor = dm_bio_destructor; |
f1e53987 | 328 | store_io_and_region_in_bio(bio, io, region); |
1da177e4 LT |
329 | |
330 | /* | |
331 | * Try and add as many pages as possible. | |
332 | */ | |
333 | while (remaining) { | |
334 | dp->get_page(dp, &page, &len, &offset); | |
335 | len = min(len, to_bytes(remaining)); | |
336 | if (!bio_add_page(bio, page, len, offset)) | |
337 | break; | |
338 | ||
339 | offset = 0; | |
340 | remaining -= to_sector(len); | |
341 | dp->next_page(dp); | |
342 | } | |
343 | ||
344 | atomic_inc(&io->count); | |
345 | submit_bio(rw, bio); | |
12fc0f49 | 346 | } while (remaining); |
1da177e4 LT |
347 | } |
348 | ||
349 | static void dispatch_io(int rw, unsigned int num_regions, | |
22a1ceb1 | 350 | struct dm_io_region *where, struct dpages *dp, |
1da177e4 LT |
351 | struct io *io, int sync) |
352 | { | |
353 | int i; | |
354 | struct dpages old_pages = *dp; | |
355 | ||
f1e53987 MP |
356 | BUG_ON(num_regions > DM_IO_MAX_REGIONS); |
357 | ||
1da177e4 | 358 | if (sync) |
7b6d91da | 359 | rw |= REQ_SYNC | REQ_UNPLUG; |
1da177e4 LT |
360 | |
361 | /* | |
362 | * For multiple regions we need to be careful to rewind | |
363 | * the dp object for each call to do_region. | |
364 | */ | |
365 | for (i = 0; i < num_regions; i++) { | |
366 | *dp = old_pages; | |
7b6d91da | 367 | if (where[i].count || (rw & REQ_HARDBARRIER)) |
1da177e4 LT |
368 | do_region(rw, i, where + i, dp, io); |
369 | } | |
370 | ||
371 | /* | |
f00b16ad | 372 | * Drop the extra reference that we were holding to avoid |
1da177e4 LT |
373 | * the io being completed too early. |
374 | */ | |
375 | dec_count(io, 0, 0); | |
376 | } | |
377 | ||
891ce207 | 378 | static int sync_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 379 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 380 | unsigned long *error_bits) |
1da177e4 | 381 | { |
f1e53987 MP |
382 | /* |
383 | * gcc <= 4.3 can't do the alignment for stack variables, so we must | |
384 | * align it on our own. | |
385 | * volatile prevents the optimizer from removing or reusing | |
386 | * "io_" field from the stack frame (allowed in ANSI C). | |
387 | */ | |
388 | volatile char io_[sizeof(struct io) + __alignof__(struct io) - 1]; | |
389 | struct io *io = (struct io *)PTR_ALIGN(&io_, __alignof__(struct io)); | |
1da177e4 | 390 | |
7ff14a36 | 391 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
392 | WARN_ON(1); |
393 | return -EIO; | |
394 | } | |
395 | ||
51aa3228 | 396 | retry: |
f1e53987 MP |
397 | io->error_bits = 0; |
398 | io->eopnotsupp_bits = 0; | |
399 | atomic_set(&io->count, 1); /* see dispatch_io() */ | |
400 | io->sleeper = current; | |
401 | io->client = client; | |
1da177e4 | 402 | |
f1e53987 | 403 | dispatch_io(rw, num_regions, where, dp, io, 1); |
1da177e4 LT |
404 | |
405 | while (1) { | |
406 | set_current_state(TASK_UNINTERRUPTIBLE); | |
407 | ||
f1e53987 | 408 | if (!atomic_read(&io->count)) |
1da177e4 LT |
409 | break; |
410 | ||
411 | io_schedule(); | |
412 | } | |
413 | set_current_state(TASK_RUNNING); | |
414 | ||
7b6d91da CH |
415 | if (io->eopnotsupp_bits && (rw & REQ_HARDBARRIER)) { |
416 | rw &= ~REQ_HARDBARRIER; | |
51aa3228 MP |
417 | goto retry; |
418 | } | |
419 | ||
891ce207 | 420 | if (error_bits) |
f1e53987 | 421 | *error_bits = io->error_bits; |
891ce207 | 422 | |
f1e53987 | 423 | return io->error_bits ? -EIO : 0; |
1da177e4 LT |
424 | } |
425 | ||
891ce207 | 426 | static int async_io(struct dm_io_client *client, unsigned int num_regions, |
22a1ceb1 | 427 | struct dm_io_region *where, int rw, struct dpages *dp, |
891ce207 | 428 | io_notify_fn fn, void *context) |
1da177e4 LT |
429 | { |
430 | struct io *io; | |
431 | ||
7ff14a36 | 432 | if (num_regions > 1 && (rw & RW_MASK) != WRITE) { |
1da177e4 LT |
433 | WARN_ON(1); |
434 | fn(1, context); | |
435 | return -EIO; | |
436 | } | |
437 | ||
bf17ce3a | 438 | io = mempool_alloc(client->pool, GFP_NOIO); |
e01fd7ee | 439 | io->error_bits = 0; |
5af443a7 | 440 | io->eopnotsupp_bits = 0; |
1da177e4 LT |
441 | atomic_set(&io->count, 1); /* see dispatch_io() */ |
442 | io->sleeper = NULL; | |
891ce207 | 443 | io->client = client; |
1da177e4 LT |
444 | io->callback = fn; |
445 | io->context = context; | |
446 | ||
447 | dispatch_io(rw, num_regions, where, dp, io, 0); | |
448 | return 0; | |
449 | } | |
450 | ||
c8b03afe HM |
451 | static int dp_init(struct dm_io_request *io_req, struct dpages *dp) |
452 | { | |
453 | /* Set up dpages based on memory type */ | |
454 | switch (io_req->mem.type) { | |
455 | case DM_IO_PAGE_LIST: | |
456 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | |
457 | break; | |
458 | ||
459 | case DM_IO_BVEC: | |
460 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | |
461 | break; | |
462 | ||
463 | case DM_IO_VMA: | |
464 | vm_dp_init(dp, io_req->mem.ptr.vma); | |
465 | break; | |
466 | ||
467 | case DM_IO_KMEM: | |
468 | km_dp_init(dp, io_req->mem.ptr.addr); | |
469 | break; | |
470 | ||
471 | default: | |
472 | return -EINVAL; | |
473 | } | |
474 | ||
475 | return 0; | |
476 | } | |
477 | ||
478 | /* | |
7ff14a36 MP |
479 | * New collapsed (a)synchronous interface. |
480 | * | |
481 | * If the IO is asynchronous (i.e. it has notify.fn), you must either unplug | |
7b6d91da CH |
482 | * the queue with blk_unplug() some time later or set REQ_SYNC in |
483 | io_req->bi_rw. If you fail to do one of these, the IO will be submitted to | |
7ff14a36 | 484 | * the disk after q->unplug_delay, which defaults to 3ms in blk-settings.c. |
c8b03afe HM |
485 | */ |
486 | int dm_io(struct dm_io_request *io_req, unsigned num_regions, | |
22a1ceb1 | 487 | struct dm_io_region *where, unsigned long *sync_error_bits) |
c8b03afe HM |
488 | { |
489 | int r; | |
490 | struct dpages dp; | |
491 | ||
492 | r = dp_init(io_req, &dp); | |
493 | if (r) | |
494 | return r; | |
495 | ||
496 | if (!io_req->notify.fn) | |
497 | return sync_io(io_req->client, num_regions, where, | |
498 | io_req->bi_rw, &dp, sync_error_bits); | |
499 | ||
500 | return async_io(io_req->client, num_regions, where, io_req->bi_rw, | |
501 | &dp, io_req->notify.fn, io_req->notify.context); | |
502 | } | |
503 | EXPORT_SYMBOL(dm_io); | |
952b3557 MP |
504 | |
505 | int __init dm_io_init(void) | |
506 | { | |
507 | _dm_io_cache = KMEM_CACHE(io, 0); | |
508 | if (!_dm_io_cache) | |
509 | return -ENOMEM; | |
510 | ||
511 | return 0; | |
512 | } | |
513 | ||
514 | void dm_io_exit(void) | |
515 | { | |
516 | kmem_cache_destroy(_dm_io_cache); | |
517 | _dm_io_cache = NULL; | |
518 | } |