dm: use common __issue_target_request for flush and discard support
[deliverable/linux.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
6e9624b8 18#include <linux/smp_lock.h>
1da177e4
LT
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
3ac51e74 22#include <linux/hdreg.h>
3f77316d 23#include <linux/delay.h>
55782138
LZ
24
25#include <trace/events/block.h>
1da177e4 26
72d94861
AK
27#define DM_MSG_PREFIX "core"
28
60935eb2
MB
29/*
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
32 */
33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34#define DM_COOKIE_LENGTH 24
35
1da177e4
LT
36static const char *_name = DM_NAME;
37
38static unsigned int major = 0;
39static unsigned int _major = 0;
40
f32c10b0 41static DEFINE_SPINLOCK(_minor_lock);
1da177e4 42/*
8fbf26ad 43 * For bio-based dm.
1da177e4
LT
44 * One of these is allocated per bio.
45 */
46struct dm_io {
47 struct mapped_device *md;
48 int error;
1da177e4 49 atomic_t io_count;
6ae2fa67 50 struct bio *bio;
3eaf840e 51 unsigned long start_time;
f88fb981 52 spinlock_t endio_lock;
1da177e4
LT
53};
54
55/*
8fbf26ad 56 * For bio-based dm.
1da177e4
LT
57 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
59 */
028867ac 60struct dm_target_io {
1da177e4
LT
61 struct dm_io *io;
62 struct dm_target *ti;
63 union map_info info;
64};
65
8fbf26ad
KU
66/*
67 * For request-based dm.
68 * One of these is allocated per request.
69 */
70struct dm_rq_target_io {
71 struct mapped_device *md;
72 struct dm_target *ti;
73 struct request *orig, clone;
74 int error;
75 union map_info info;
76};
77
78/*
79 * For request-based dm.
80 * One of these is allocated per bio.
81 */
82struct dm_rq_clone_bio_info {
83 struct bio *orig;
cec47e3d 84 struct dm_rq_target_io *tio;
8fbf26ad
KU
85};
86
1da177e4
LT
87union map_info *dm_get_mapinfo(struct bio *bio)
88{
17b2f66f 89 if (bio && bio->bi_private)
028867ac 90 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 91 return NULL;
1da177e4
LT
92}
93
cec47e3d
KU
94union map_info *dm_get_rq_mapinfo(struct request *rq)
95{
96 if (rq && rq->end_io_data)
97 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101
ba61fdd1
JM
102#define MINOR_ALLOCED ((void *)-1)
103
1da177e4
LT
104/*
105 * Bits for the md->flags field.
106 */
1eb787ec 107#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 108#define DMF_SUSPENDED 1
aa8d7c2f 109#define DMF_FROZEN 2
fba9f90e 110#define DMF_FREEING 3
5c6bd75d 111#define DMF_DELETING 4
2e93ccc1 112#define DMF_NOFLUSH_SUSPENDING 5
1eb787ec 113#define DMF_QUEUE_IO_TO_THREAD 6
1da177e4 114
304f3f6a
MB
115/*
116 * Work processed by per-device workqueue.
117 */
1da177e4 118struct mapped_device {
2ca3310e 119 struct rw_semaphore io_lock;
e61290a4 120 struct mutex suspend_lock;
1da177e4
LT
121 rwlock_t map_lock;
122 atomic_t holders;
5c6bd75d 123 atomic_t open_count;
1da177e4
LT
124
125 unsigned long flags;
126
165125e1 127 struct request_queue *queue;
a5664dad 128 unsigned type;
4a0b4ddf 129 /* Protect queue and type against concurrent access. */
a5664dad
MS
130 struct mutex type_lock;
131
1da177e4 132 struct gendisk *disk;
7e51f257 133 char name[16];
1da177e4
LT
134
135 void *interface_ptr;
136
137 /*
138 * A list of ios that arrived while we were suspended.
139 */
316d315b 140 atomic_t pending[2];
1da177e4 141 wait_queue_head_t wait;
53d5914f 142 struct work_struct work;
74859364 143 struct bio_list deferred;
022c2611 144 spinlock_t deferred_lock;
1da177e4 145
af7e466a
MP
146 /*
147 * An error from the barrier request currently being processed.
148 */
149 int barrier_error;
150
d0bcb878
KU
151 /*
152 * Protect barrier_error from concurrent endio processing
153 * in request-based dm.
154 */
155 spinlock_t barrier_error_lock;
156
304f3f6a
MB
157 /*
158 * Processing queue (flush/barriers)
159 */
160 struct workqueue_struct *wq;
d0bcb878
KU
161 struct work_struct barrier_work;
162
163 /* A pointer to the currently processing pre/post flush request */
164 struct request *flush_request;
304f3f6a 165
1da177e4
LT
166 /*
167 * The current mapping.
168 */
169 struct dm_table *map;
170
171 /*
172 * io objects are allocated from here.
173 */
174 mempool_t *io_pool;
175 mempool_t *tio_pool;
176
9faf400f
SB
177 struct bio_set *bs;
178
1da177e4
LT
179 /*
180 * Event handling.
181 */
182 atomic_t event_nr;
183 wait_queue_head_t eventq;
7a8c3d3b
MA
184 atomic_t uevent_seq;
185 struct list_head uevent_list;
186 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
187
188 /*
189 * freeze/thaw support require holding onto a super block
190 */
191 struct super_block *frozen_sb;
db8fef4f 192 struct block_device *bdev;
3ac51e74
DW
193
194 /* forced geometry settings */
195 struct hd_geometry geometry;
784aae73 196
cec47e3d
KU
197 /* For saving the address of __make_request for request based dm */
198 make_request_fn *saved_make_request_fn;
199
784aae73
MB
200 /* sysfs handle */
201 struct kobject kobj;
52b1fd5a
MP
202
203 /* zero-length barrier that will be cloned and submitted to targets */
204 struct bio barrier_bio;
1da177e4
LT
205};
206
e6ee8c0b
KU
207/*
208 * For mempools pre-allocation at the table loading time.
209 */
210struct dm_md_mempools {
211 mempool_t *io_pool;
212 mempool_t *tio_pool;
213 struct bio_set *bs;
214};
215
1da177e4 216#define MIN_IOS 256
e18b890b
CL
217static struct kmem_cache *_io_cache;
218static struct kmem_cache *_tio_cache;
8fbf26ad
KU
219static struct kmem_cache *_rq_tio_cache;
220static struct kmem_cache *_rq_bio_info_cache;
1da177e4 221
1da177e4
LT
222static int __init local_init(void)
223{
51157b4a 224 int r = -ENOMEM;
1da177e4 225
1da177e4 226 /* allocate a slab for the dm_ios */
028867ac 227 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 228 if (!_io_cache)
51157b4a 229 return r;
1da177e4
LT
230
231 /* allocate a slab for the target ios */
028867ac 232 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
233 if (!_tio_cache)
234 goto out_free_io_cache;
1da177e4 235
8fbf26ad
KU
236 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
237 if (!_rq_tio_cache)
238 goto out_free_tio_cache;
239
240 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
241 if (!_rq_bio_info_cache)
242 goto out_free_rq_tio_cache;
243
51e5b2bd 244 r = dm_uevent_init();
51157b4a 245 if (r)
8fbf26ad 246 goto out_free_rq_bio_info_cache;
51e5b2bd 247
1da177e4
LT
248 _major = major;
249 r = register_blkdev(_major, _name);
51157b4a
KU
250 if (r < 0)
251 goto out_uevent_exit;
1da177e4
LT
252
253 if (!_major)
254 _major = r;
255
256 return 0;
51157b4a
KU
257
258out_uevent_exit:
259 dm_uevent_exit();
8fbf26ad
KU
260out_free_rq_bio_info_cache:
261 kmem_cache_destroy(_rq_bio_info_cache);
262out_free_rq_tio_cache:
263 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
264out_free_tio_cache:
265 kmem_cache_destroy(_tio_cache);
266out_free_io_cache:
267 kmem_cache_destroy(_io_cache);
268
269 return r;
1da177e4
LT
270}
271
272static void local_exit(void)
273{
8fbf26ad
KU
274 kmem_cache_destroy(_rq_bio_info_cache);
275 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
276 kmem_cache_destroy(_tio_cache);
277 kmem_cache_destroy(_io_cache);
00d59405 278 unregister_blkdev(_major, _name);
51e5b2bd 279 dm_uevent_exit();
1da177e4
LT
280
281 _major = 0;
282
283 DMINFO("cleaned up");
284}
285
b9249e55 286static int (*_inits[])(void) __initdata = {
1da177e4
LT
287 local_init,
288 dm_target_init,
289 dm_linear_init,
290 dm_stripe_init,
952b3557 291 dm_io_init,
945fa4d2 292 dm_kcopyd_init,
1da177e4
LT
293 dm_interface_init,
294};
295
b9249e55 296static void (*_exits[])(void) = {
1da177e4
LT
297 local_exit,
298 dm_target_exit,
299 dm_linear_exit,
300 dm_stripe_exit,
952b3557 301 dm_io_exit,
945fa4d2 302 dm_kcopyd_exit,
1da177e4
LT
303 dm_interface_exit,
304};
305
306static int __init dm_init(void)
307{
308 const int count = ARRAY_SIZE(_inits);
309
310 int r, i;
311
312 for (i = 0; i < count; i++) {
313 r = _inits[i]();
314 if (r)
315 goto bad;
316 }
317
318 return 0;
319
320 bad:
321 while (i--)
322 _exits[i]();
323
324 return r;
325}
326
327static void __exit dm_exit(void)
328{
329 int i = ARRAY_SIZE(_exits);
330
331 while (i--)
332 _exits[i]();
333}
334
335/*
336 * Block device functions
337 */
432a212c
MA
338int dm_deleting_md(struct mapped_device *md)
339{
340 return test_bit(DMF_DELETING, &md->flags);
341}
342
fe5f9f2c 343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
344{
345 struct mapped_device *md;
346
6e9624b8 347 lock_kernel();
fba9f90e
JM
348 spin_lock(&_minor_lock);
349
fe5f9f2c 350 md = bdev->bd_disk->private_data;
fba9f90e
JM
351 if (!md)
352 goto out;
353
5c6bd75d 354 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 355 dm_deleting_md(md)) {
fba9f90e
JM
356 md = NULL;
357 goto out;
358 }
359
1da177e4 360 dm_get(md);
5c6bd75d 361 atomic_inc(&md->open_count);
fba9f90e
JM
362
363out:
364 spin_unlock(&_minor_lock);
6e9624b8 365 unlock_kernel();
fba9f90e
JM
366
367 return md ? 0 : -ENXIO;
1da177e4
LT
368}
369
fe5f9f2c 370static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 371{
fe5f9f2c 372 struct mapped_device *md = disk->private_data;
6e9624b8
AB
373
374 lock_kernel();
5c6bd75d 375 atomic_dec(&md->open_count);
1da177e4 376 dm_put(md);
6e9624b8
AB
377 unlock_kernel();
378
1da177e4
LT
379 return 0;
380}
381
5c6bd75d
AK
382int dm_open_count(struct mapped_device *md)
383{
384 return atomic_read(&md->open_count);
385}
386
387/*
388 * Guarantees nothing is using the device before it's deleted.
389 */
390int dm_lock_for_deletion(struct mapped_device *md)
391{
392 int r = 0;
393
394 spin_lock(&_minor_lock);
395
396 if (dm_open_count(md))
397 r = -EBUSY;
398 else
399 set_bit(DMF_DELETING, &md->flags);
400
401 spin_unlock(&_minor_lock);
402
403 return r;
404}
405
3ac51e74
DW
406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
407{
408 struct mapped_device *md = bdev->bd_disk->private_data;
409
410 return dm_get_geometry(md, geo);
411}
412
fe5f9f2c 413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
414 unsigned int cmd, unsigned long arg)
415{
fe5f9f2c 416 struct mapped_device *md = bdev->bd_disk->private_data;
7c666411 417 struct dm_table *map = dm_get_live_table(md);
aa129a22
MB
418 struct dm_target *tgt;
419 int r = -ENOTTY;
420
aa129a22
MB
421 if (!map || !dm_table_get_size(map))
422 goto out;
423
424 /* We only support devices that have a single target */
425 if (dm_table_get_num_targets(map) != 1)
426 goto out;
427
428 tgt = dm_table_get_target(map, 0);
429
4f186f8b 430 if (dm_suspended_md(md)) {
aa129a22
MB
431 r = -EAGAIN;
432 goto out;
433 }
434
435 if (tgt->type->ioctl)
647b3d00 436 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
437
438out:
439 dm_table_put(map);
440
aa129a22
MB
441 return r;
442}
443
028867ac 444static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
445{
446 return mempool_alloc(md->io_pool, GFP_NOIO);
447}
448
028867ac 449static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
450{
451 mempool_free(io, md->io_pool);
452}
453
028867ac 454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
455{
456 mempool_free(tio, md->tio_pool);
457}
458
08885643
KU
459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
460 gfp_t gfp_mask)
cec47e3d 461{
08885643 462 return mempool_alloc(md->tio_pool, gfp_mask);
cec47e3d
KU
463}
464
465static void free_rq_tio(struct dm_rq_target_io *tio)
466{
467 mempool_free(tio, tio->md->tio_pool);
468}
469
470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
471{
472 return mempool_alloc(md->io_pool, GFP_ATOMIC);
473}
474
475static void free_bio_info(struct dm_rq_clone_bio_info *info)
476{
477 mempool_free(info, info->tio->md->io_pool);
478}
479
90abb8c4
KU
480static int md_in_flight(struct mapped_device *md)
481{
482 return atomic_read(&md->pending[READ]) +
483 atomic_read(&md->pending[WRITE]);
484}
485
3eaf840e
JNN
486static void start_io_acct(struct dm_io *io)
487{
488 struct mapped_device *md = io->md;
c9959059 489 int cpu;
316d315b 490 int rw = bio_data_dir(io->bio);
3eaf840e
JNN
491
492 io->start_time = jiffies;
493
074a7aca
TH
494 cpu = part_stat_lock();
495 part_round_stats(cpu, &dm_disk(md)->part0);
496 part_stat_unlock();
316d315b 497 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
3eaf840e
JNN
498}
499
d221d2e7 500static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
501{
502 struct mapped_device *md = io->md;
503 struct bio *bio = io->bio;
504 unsigned long duration = jiffies - io->start_time;
c9959059 505 int pending, cpu;
3eaf840e
JNN
506 int rw = bio_data_dir(bio);
507
074a7aca
TH
508 cpu = part_stat_lock();
509 part_round_stats(cpu, &dm_disk(md)->part0);
510 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
511 part_stat_unlock();
3eaf840e 512
af7e466a
MP
513 /*
514 * After this is decremented the bio must not be touched if it is
515 * a barrier.
516 */
316d315b
NK
517 dm_disk(md)->part0.in_flight[rw] = pending =
518 atomic_dec_return(&md->pending[rw]);
519 pending += atomic_read(&md->pending[rw^0x1]);
3eaf840e 520
d221d2e7
MP
521 /* nudge anyone waiting on suspend queue */
522 if (!pending)
523 wake_up(&md->wait);
3eaf840e
JNN
524}
525
1da177e4
LT
526/*
527 * Add the bio to the list of deferred io.
528 */
92c63902 529static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 530{
2ca3310e 531 down_write(&md->io_lock);
1da177e4 532
022c2611 533 spin_lock_irq(&md->deferred_lock);
1da177e4 534 bio_list_add(&md->deferred, bio);
022c2611 535 spin_unlock_irq(&md->deferred_lock);
1da177e4 536
92c63902
MP
537 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
538 queue_work(md->wq, &md->work);
539
2ca3310e 540 up_write(&md->io_lock);
1da177e4
LT
541}
542
543/*
544 * Everyone (including functions in this file), should use this
545 * function to access the md->map field, and make sure they call
546 * dm_table_put() when finished.
547 */
7c666411 548struct dm_table *dm_get_live_table(struct mapped_device *md)
1da177e4
LT
549{
550 struct dm_table *t;
523d9297 551 unsigned long flags;
1da177e4 552
523d9297 553 read_lock_irqsave(&md->map_lock, flags);
1da177e4
LT
554 t = md->map;
555 if (t)
556 dm_table_get(t);
523d9297 557 read_unlock_irqrestore(&md->map_lock, flags);
1da177e4
LT
558
559 return t;
560}
561
3ac51e74
DW
562/*
563 * Get the geometry associated with a dm device
564 */
565int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
566{
567 *geo = md->geometry;
568
569 return 0;
570}
571
572/*
573 * Set the geometry of a device.
574 */
575int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
576{
577 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
578
579 if (geo->start > sz) {
580 DMWARN("Start sector is beyond the geometry limits.");
581 return -EINVAL;
582 }
583
584 md->geometry = *geo;
585
586 return 0;
587}
588
1da177e4
LT
589/*-----------------------------------------------------------------
590 * CRUD START:
591 * A more elegant soln is in the works that uses the queue
592 * merge fn, unfortunately there are a couple of changes to
593 * the block layer that I want to make for this. So in the
594 * interests of getting something for people to use I give
595 * you this clearly demarcated crap.
596 *---------------------------------------------------------------*/
597
2e93ccc1
KU
598static int __noflush_suspending(struct mapped_device *md)
599{
600 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
601}
602
1da177e4
LT
603/*
604 * Decrements the number of outstanding ios that a bio has been
605 * cloned into, completing the original io if necc.
606 */
858119e1 607static void dec_pending(struct dm_io *io, int error)
1da177e4 608{
2e93ccc1 609 unsigned long flags;
b35f8caa
MB
610 int io_error;
611 struct bio *bio;
612 struct mapped_device *md = io->md;
2e93ccc1
KU
613
614 /* Push-back supersedes any I/O errors */
f88fb981
KU
615 if (unlikely(error)) {
616 spin_lock_irqsave(&io->endio_lock, flags);
617 if (!(io->error > 0 && __noflush_suspending(md)))
618 io->error = error;
619 spin_unlock_irqrestore(&io->endio_lock, flags);
620 }
1da177e4
LT
621
622 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
623 if (io->error == DM_ENDIO_REQUEUE) {
624 /*
625 * Target requested pushing back the I/O.
2e93ccc1 626 */
022c2611 627 spin_lock_irqsave(&md->deferred_lock, flags);
2761e95f 628 if (__noflush_suspending(md)) {
7b6d91da 629 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
2761e95f
MP
630 bio_list_add_head(&md->deferred,
631 io->bio);
632 } else
2e93ccc1
KU
633 /* noflush suspend was interrupted. */
634 io->error = -EIO;
022c2611 635 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
636 }
637
b35f8caa
MB
638 io_error = io->error;
639 bio = io->bio;
2e93ccc1 640
7b6d91da 641 if (bio->bi_rw & REQ_HARDBARRIER) {
af7e466a
MP
642 /*
643 * There can be just one barrier request so we use
644 * a per-device variable for error reporting.
645 * Note that you can't touch the bio after end_io_acct
708e9295
MP
646 *
647 * We ignore -EOPNOTSUPP for empty flush reported by
648 * underlying devices. We assume that if the device
649 * doesn't support empty barriers, it doesn't need
650 * cache flushing commands.
af7e466a 651 */
708e9295
MP
652 if (!md->barrier_error &&
653 !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
5aa2781d 654 md->barrier_error = io_error;
af7e466a 655 end_io_acct(io);
a97f925a 656 free_io(md, io);
af7e466a
MP
657 } else {
658 end_io_acct(io);
a97f925a 659 free_io(md, io);
b35f8caa 660
af7e466a
MP
661 if (io_error != DM_ENDIO_REQUEUE) {
662 trace_block_bio_complete(md->queue, bio);
2056a782 663
af7e466a
MP
664 bio_endio(bio, io_error);
665 }
b35f8caa 666 }
1da177e4
LT
667 }
668}
669
6712ecf8 670static void clone_endio(struct bio *bio, int error)
1da177e4
LT
671{
672 int r = 0;
028867ac 673 struct dm_target_io *tio = bio->bi_private;
b35f8caa 674 struct dm_io *io = tio->io;
9faf400f 675 struct mapped_device *md = tio->io->md;
1da177e4
LT
676 dm_endio_fn endio = tio->ti->type->end_io;
677
1da177e4
LT
678 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
679 error = -EIO;
680
681 if (endio) {
682 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
683 if (r < 0 || r == DM_ENDIO_REQUEUE)
684 /*
685 * error and requeue request are handled
686 * in dec_pending().
687 */
1da177e4 688 error = r;
45cbcd79
KU
689 else if (r == DM_ENDIO_INCOMPLETE)
690 /* The target will handle the io */
6712ecf8 691 return;
45cbcd79
KU
692 else if (r) {
693 DMWARN("unimplemented target endio return value: %d", r);
694 BUG();
695 }
1da177e4
LT
696 }
697
9faf400f
SB
698 /*
699 * Store md for cleanup instead of tio which is about to get freed.
700 */
701 bio->bi_private = md->bs;
702
9faf400f 703 free_tio(md, tio);
b35f8caa
MB
704 bio_put(bio);
705 dec_pending(io, error);
1da177e4
LT
706}
707
cec47e3d
KU
708/*
709 * Partial completion handling for request-based dm
710 */
711static void end_clone_bio(struct bio *clone, int error)
712{
713 struct dm_rq_clone_bio_info *info = clone->bi_private;
714 struct dm_rq_target_io *tio = info->tio;
715 struct bio *bio = info->orig;
716 unsigned int nr_bytes = info->orig->bi_size;
717
718 bio_put(clone);
719
720 if (tio->error)
721 /*
722 * An error has already been detected on the request.
723 * Once error occurred, just let clone->end_io() handle
724 * the remainder.
725 */
726 return;
727 else if (error) {
728 /*
729 * Don't notice the error to the upper layer yet.
730 * The error handling decision is made by the target driver,
731 * when the request is completed.
732 */
733 tio->error = error;
734 return;
735 }
736
737 /*
738 * I/O for the bio successfully completed.
739 * Notice the data completion to the upper layer.
740 */
741
742 /*
743 * bios are processed from the head of the list.
744 * So the completing bio should always be rq->bio.
745 * If it's not, something wrong is happening.
746 */
747 if (tio->orig->bio != bio)
748 DMERR("bio completion is going in the middle of the request");
749
750 /*
751 * Update the original request.
752 * Do not use blk_end_request() here, because it may complete
753 * the original request before the clone, and break the ordering.
754 */
755 blk_update_request(tio->orig, 0, nr_bytes);
756}
757
d0bcb878
KU
758static void store_barrier_error(struct mapped_device *md, int error)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&md->barrier_error_lock, flags);
763 /*
764 * Basically, the first error is taken, but:
765 * -EOPNOTSUPP supersedes any I/O error.
766 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
767 */
768 if (!md->barrier_error || error == -EOPNOTSUPP ||
769 (md->barrier_error != -EOPNOTSUPP &&
770 error == DM_ENDIO_REQUEUE))
771 md->barrier_error = error;
772 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
773}
774
cec47e3d
KU
775/*
776 * Don't touch any member of the md after calling this function because
777 * the md may be freed in dm_put() at the end of this function.
778 * Or do dm_get() before calling this function and dm_put() later.
779 */
b4324fee 780static void rq_completed(struct mapped_device *md, int rw, int run_queue)
cec47e3d 781{
b4324fee 782 atomic_dec(&md->pending[rw]);
cec47e3d
KU
783
784 /* nudge anyone waiting on suspend queue */
b4324fee 785 if (!md_in_flight(md))
cec47e3d
KU
786 wake_up(&md->wait);
787
788 if (run_queue)
b4324fee 789 blk_run_queue(md->queue);
cec47e3d
KU
790
791 /*
792 * dm_put() must be at the end of this function. See the comment above
793 */
794 dm_put(md);
795}
796
a77e28c7
KU
797static void free_rq_clone(struct request *clone)
798{
799 struct dm_rq_target_io *tio = clone->end_io_data;
800
801 blk_rq_unprep_clone(clone);
802 free_rq_tio(tio);
803}
804
980691e5
KU
805/*
806 * Complete the clone and the original request.
807 * Must be called without queue lock.
808 */
809static void dm_end_request(struct request *clone, int error)
810{
811 int rw = rq_data_dir(clone);
d0bcb878 812 int run_queue = 1;
33659ebb 813 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
980691e5
KU
814 struct dm_rq_target_io *tio = clone->end_io_data;
815 struct mapped_device *md = tio->md;
816 struct request *rq = tio->orig;
817
33659ebb 818 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
980691e5
KU
819 rq->errors = clone->errors;
820 rq->resid_len = clone->resid_len;
821
822 if (rq->sense)
823 /*
824 * We are using the sense buffer of the original
825 * request.
826 * So setting the length of the sense data is enough.
827 */
828 rq->sense_len = clone->sense_len;
829 }
830
831 free_rq_clone(clone);
832
d0bcb878
KU
833 if (unlikely(is_barrier)) {
834 if (unlikely(error))
835 store_barrier_error(md, error);
836 run_queue = 0;
837 } else
838 blk_end_request_all(rq, error);
980691e5 839
d0bcb878 840 rq_completed(md, rw, run_queue);
980691e5
KU
841}
842
cec47e3d
KU
843static void dm_unprep_request(struct request *rq)
844{
845 struct request *clone = rq->special;
cec47e3d
KU
846
847 rq->special = NULL;
848 rq->cmd_flags &= ~REQ_DONTPREP;
849
a77e28c7 850 free_rq_clone(clone);
cec47e3d
KU
851}
852
853/*
854 * Requeue the original request of a clone.
855 */
856void dm_requeue_unmapped_request(struct request *clone)
857{
b4324fee 858 int rw = rq_data_dir(clone);
cec47e3d
KU
859 struct dm_rq_target_io *tio = clone->end_io_data;
860 struct mapped_device *md = tio->md;
861 struct request *rq = tio->orig;
862 struct request_queue *q = rq->q;
863 unsigned long flags;
864
33659ebb 865 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
866 /*
867 * Barrier clones share an original request.
868 * Leave it to dm_end_request(), which handles this special
869 * case.
870 */
871 dm_end_request(clone, DM_ENDIO_REQUEUE);
872 return;
873 }
874
cec47e3d
KU
875 dm_unprep_request(rq);
876
877 spin_lock_irqsave(q->queue_lock, flags);
878 if (elv_queue_empty(q))
879 blk_plug_device(q);
880 blk_requeue_request(q, rq);
881 spin_unlock_irqrestore(q->queue_lock, flags);
882
b4324fee 883 rq_completed(md, rw, 0);
cec47e3d
KU
884}
885EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
886
887static void __stop_queue(struct request_queue *q)
888{
889 blk_stop_queue(q);
890}
891
892static void stop_queue(struct request_queue *q)
893{
894 unsigned long flags;
895
896 spin_lock_irqsave(q->queue_lock, flags);
897 __stop_queue(q);
898 spin_unlock_irqrestore(q->queue_lock, flags);
899}
900
901static void __start_queue(struct request_queue *q)
902{
903 if (blk_queue_stopped(q))
904 blk_start_queue(q);
905}
906
907static void start_queue(struct request_queue *q)
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(q->queue_lock, flags);
912 __start_queue(q);
913 spin_unlock_irqrestore(q->queue_lock, flags);
914}
915
11a68244 916static void dm_done(struct request *clone, int error, bool mapped)
cec47e3d 917{
11a68244 918 int r = error;
cec47e3d
KU
919 struct dm_rq_target_io *tio = clone->end_io_data;
920 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
cec47e3d 921
11a68244
KU
922 if (mapped && rq_end_io)
923 r = rq_end_io(tio->ti, clone, error, &tio->info);
cec47e3d 924
11a68244 925 if (r <= 0)
cec47e3d 926 /* The target wants to complete the I/O */
11a68244
KU
927 dm_end_request(clone, r);
928 else if (r == DM_ENDIO_INCOMPLETE)
cec47e3d
KU
929 /* The target will handle the I/O */
930 return;
11a68244 931 else if (r == DM_ENDIO_REQUEUE)
cec47e3d
KU
932 /* The target wants to requeue the I/O */
933 dm_requeue_unmapped_request(clone);
934 else {
11a68244 935 DMWARN("unimplemented target endio return value: %d", r);
cec47e3d
KU
936 BUG();
937 }
938}
939
11a68244
KU
940/*
941 * Request completion handler for request-based dm
942 */
943static void dm_softirq_done(struct request *rq)
944{
945 bool mapped = true;
946 struct request *clone = rq->completion_data;
947 struct dm_rq_target_io *tio = clone->end_io_data;
948
949 if (rq->cmd_flags & REQ_FAILED)
950 mapped = false;
951
952 dm_done(clone, tio->error, mapped);
953}
954
cec47e3d
KU
955/*
956 * Complete the clone and the original request with the error status
957 * through softirq context.
958 */
959static void dm_complete_request(struct request *clone, int error)
960{
961 struct dm_rq_target_io *tio = clone->end_io_data;
962 struct request *rq = tio->orig;
963
33659ebb 964 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
965 /*
966 * Barrier clones share an original request. So can't use
967 * softirq_done with the original.
968 * Pass the clone to dm_done() directly in this special case.
969 * It is safe (even if clone->q->queue_lock is held here)
970 * because there is no I/O dispatching during the completion
971 * of barrier clone.
972 */
973 dm_done(clone, error, true);
974 return;
975 }
976
cec47e3d
KU
977 tio->error = error;
978 rq->completion_data = clone;
979 blk_complete_request(rq);
980}
981
982/*
983 * Complete the not-mapped clone and the original request with the error status
984 * through softirq context.
985 * Target's rq_end_io() function isn't called.
986 * This may be used when the target's map_rq() function fails.
987 */
988void dm_kill_unmapped_request(struct request *clone, int error)
989{
990 struct dm_rq_target_io *tio = clone->end_io_data;
991 struct request *rq = tio->orig;
992
33659ebb 993 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
994 /*
995 * Barrier clones share an original request.
996 * Leave it to dm_end_request(), which handles this special
997 * case.
998 */
999 BUG_ON(error > 0);
1000 dm_end_request(clone, error);
1001 return;
1002 }
1003
cec47e3d
KU
1004 rq->cmd_flags |= REQ_FAILED;
1005 dm_complete_request(clone, error);
1006}
1007EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1008
1009/*
1010 * Called with the queue lock held
1011 */
1012static void end_clone_request(struct request *clone, int error)
1013{
1014 /*
1015 * For just cleaning up the information of the queue in which
1016 * the clone was dispatched.
1017 * The clone is *NOT* freed actually here because it is alloced from
1018 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1019 */
1020 __blk_put_request(clone->q, clone);
1021
1022 /*
1023 * Actual request completion is done in a softirq context which doesn't
1024 * hold the queue lock. Otherwise, deadlock could occur because:
1025 * - another request may be submitted by the upper level driver
1026 * of the stacking during the completion
1027 * - the submission which requires queue lock may be done
1028 * against this queue
1029 */
1030 dm_complete_request(clone, error);
1031}
1032
1da177e4
LT
1033static sector_t max_io_len(struct mapped_device *md,
1034 sector_t sector, struct dm_target *ti)
1035{
1036 sector_t offset = sector - ti->begin;
1037 sector_t len = ti->len - offset;
1038
1039 /*
1040 * Does the target need to split even further ?
1041 */
1042 if (ti->split_io) {
1043 sector_t boundary;
1044 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1045 - offset;
1046 if (len > boundary)
1047 len = boundary;
1048 }
1049
1050 return len;
1051}
1052
1053static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 1054 struct dm_target_io *tio)
1da177e4
LT
1055{
1056 int r;
2056a782 1057 sector_t sector;
9faf400f 1058 struct mapped_device *md;
1da177e4 1059
1da177e4
LT
1060 clone->bi_end_io = clone_endio;
1061 clone->bi_private = tio;
1062
1063 /*
1064 * Map the clone. If r == 0 we don't need to do
1065 * anything, the target has assumed ownership of
1066 * this io.
1067 */
1068 atomic_inc(&tio->io->io_count);
2056a782 1069 sector = clone->bi_sector;
1da177e4 1070 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 1071 if (r == DM_MAPIO_REMAPPED) {
1da177e4 1072 /* the bio has been remapped so dispatch it */
2056a782 1073
5f3ea37c 1074 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
22a7c31a 1075 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 1076
1da177e4 1077 generic_make_request(clone);
2e93ccc1
KU
1078 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1079 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
1080 md = tio->io->md;
1081 dec_pending(tio->io, r);
1082 /*
1083 * Store bio_set for cleanup.
1084 */
1085 clone->bi_private = md->bs;
1da177e4 1086 bio_put(clone);
9faf400f 1087 free_tio(md, tio);
45cbcd79
KU
1088 } else if (r) {
1089 DMWARN("unimplemented target map return value: %d", r);
1090 BUG();
1da177e4
LT
1091 }
1092}
1093
1094struct clone_info {
1095 struct mapped_device *md;
1096 struct dm_table *map;
1097 struct bio *bio;
1098 struct dm_io *io;
1099 sector_t sector;
1100 sector_t sector_count;
1101 unsigned short idx;
1102};
1103
3676347a
PO
1104static void dm_bio_destructor(struct bio *bio)
1105{
9faf400f
SB
1106 struct bio_set *bs = bio->bi_private;
1107
1108 bio_free(bio, bs);
3676347a
PO
1109}
1110
1da177e4
LT
1111/*
1112 * Creates a little bio that is just does part of a bvec.
1113 */
1114static struct bio *split_bvec(struct bio *bio, sector_t sector,
1115 unsigned short idx, unsigned int offset,
9faf400f 1116 unsigned int len, struct bio_set *bs)
1da177e4
LT
1117{
1118 struct bio *clone;
1119 struct bio_vec *bv = bio->bi_io_vec + idx;
1120
9faf400f 1121 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 1122 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
1123 *clone->bi_io_vec = *bv;
1124
1125 clone->bi_sector = sector;
1126 clone->bi_bdev = bio->bi_bdev;
7b6d91da 1127 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1da177e4
LT
1128 clone->bi_vcnt = 1;
1129 clone->bi_size = to_bytes(len);
1130 clone->bi_io_vec->bv_offset = offset;
1131 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 1132 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4 1133
9c47008d 1134 if (bio_integrity(bio)) {
7878cba9 1135 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
9c47008d
MP
1136 bio_integrity_trim(clone,
1137 bio_sector_offset(bio, idx, offset), len);
1138 }
1139
1da177e4
LT
1140 return clone;
1141}
1142
1143/*
1144 * Creates a bio that consists of range of complete bvecs.
1145 */
1146static struct bio *clone_bio(struct bio *bio, sector_t sector,
1147 unsigned short idx, unsigned short bv_count,
9faf400f 1148 unsigned int len, struct bio_set *bs)
1da177e4
LT
1149{
1150 struct bio *clone;
1151
9faf400f
SB
1152 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1153 __bio_clone(clone, bio);
7b6d91da 1154 clone->bi_rw &= ~REQ_HARDBARRIER;
9faf400f 1155 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
1156 clone->bi_sector = sector;
1157 clone->bi_idx = idx;
1158 clone->bi_vcnt = idx + bv_count;
1159 clone->bi_size = to_bytes(len);
1160 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1161
9c47008d 1162 if (bio_integrity(bio)) {
7878cba9 1163 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
9c47008d
MP
1164
1165 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1166 bio_integrity_trim(clone,
1167 bio_sector_offset(bio, idx, 0), len);
1168 }
1169
1da177e4
LT
1170 return clone;
1171}
1172
9015df24
AK
1173static struct dm_target_io *alloc_tio(struct clone_info *ci,
1174 struct dm_target *ti)
f9ab94ce 1175{
9015df24 1176 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
f9ab94ce
MP
1177
1178 tio->io = ci->io;
1179 tio->ti = ti;
f9ab94ce 1180 memset(&tio->info, 0, sizeof(tio->info));
9015df24
AK
1181
1182 return tio;
1183}
1184
06a426ce
MS
1185static void __issue_target_request(struct clone_info *ci, struct dm_target *ti,
1186 unsigned request_nr)
9015df24
AK
1187{
1188 struct dm_target_io *tio = alloc_tio(ci, ti);
1189 struct bio *clone;
1190
57cba5d3 1191 tio->info.target_request_nr = request_nr;
f9ab94ce 1192
06a426ce
MS
1193 /*
1194 * Discard requests require the bio's inline iovecs be initialized.
1195 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1196 * and discard, so no need for concern about wasted bvec allocations.
1197 */
1198 clone = bio_alloc_bioset(GFP_NOIO, ci->bio->bi_max_vecs, ci->md->bs);
f9ab94ce
MP
1199 __bio_clone(clone, ci->bio);
1200 clone->bi_destructor = dm_bio_destructor;
1201
1202 __map_bio(ti, clone, tio);
1203}
1204
06a426ce
MS
1205static void __issue_target_requests(struct clone_info *ci, struct dm_target *ti,
1206 unsigned num_requests)
1207{
1208 unsigned request_nr;
1209
1210 for (request_nr = 0; request_nr < num_requests; request_nr++)
1211 __issue_target_request(ci, ti, request_nr);
1212}
1213
f9ab94ce
MP
1214static int __clone_and_map_empty_barrier(struct clone_info *ci)
1215{
06a426ce 1216 unsigned target_nr = 0;
f9ab94ce
MP
1217 struct dm_target *ti;
1218
1219 while ((ti = dm_table_get_target(ci->map, target_nr++)))
06a426ce 1220 __issue_target_requests(ci, ti, ti->num_flush_requests);
f9ab94ce
MP
1221
1222 ci->sector_count = 0;
1223
1224 return 0;
1225}
1226
5ae89a87
MS
1227/*
1228 * Perform all io with a single clone.
1229 */
1230static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1231{
1232 struct bio *clone, *bio = ci->bio;
1233 struct dm_target_io *tio;
1234
1235 tio = alloc_tio(ci, ti);
1236 clone = clone_bio(bio, ci->sector, ci->idx,
1237 bio->bi_vcnt - ci->idx, ci->sector_count,
1238 ci->md->bs);
1239 __map_bio(ti, clone, tio);
1240 ci->sector_count = 0;
1241}
1242
1243static int __clone_and_map_discard(struct clone_info *ci)
1244{
1245 struct dm_target *ti;
1246 sector_t max;
1247
1248 ti = dm_table_find_target(ci->map, ci->sector);
1249 if (!dm_target_is_valid(ti))
1250 return -EIO;
1251
1252 /*
1253 * Even though the device advertised discard support,
1254 * reconfiguration might have changed that since the
1255 * check was performed.
1256 */
1257
1258 if (!ti->num_discard_requests)
1259 return -EOPNOTSUPP;
1260
1261 max = max_io_len(ci->md, ci->sector, ti);
1262
1263 if (ci->sector_count > max)
1264 /*
1265 * FIXME: Handle a discard that spans two or more targets.
1266 */
1267 return -EOPNOTSUPP;
1268
06a426ce
MS
1269 __issue_target_requests(ci, ti, ti->num_discard_requests);
1270
1271 ci->sector_count = 0;
5ae89a87
MS
1272
1273 return 0;
1274}
1275
512875bd 1276static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
1277{
1278 struct bio *clone, *bio = ci->bio;
512875bd
JN
1279 struct dm_target *ti;
1280 sector_t len = 0, max;
028867ac 1281 struct dm_target_io *tio;
1da177e4 1282
f9ab94ce
MP
1283 if (unlikely(bio_empty_barrier(bio)))
1284 return __clone_and_map_empty_barrier(ci);
1285
5ae89a87
MS
1286 if (unlikely(bio->bi_rw & REQ_DISCARD))
1287 return __clone_and_map_discard(ci);
1288
512875bd
JN
1289 ti = dm_table_find_target(ci->map, ci->sector);
1290 if (!dm_target_is_valid(ti))
1291 return -EIO;
1292
1293 max = max_io_len(ci->md, ci->sector, ti);
1294
1da177e4
LT
1295 if (ci->sector_count <= max) {
1296 /*
1297 * Optimise for the simple case where we can do all of
1298 * the remaining io with a single clone.
1299 */
5ae89a87 1300 __clone_and_map_simple(ci, ti);
1da177e4
LT
1301
1302 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1303 /*
1304 * There are some bvecs that don't span targets.
1305 * Do as many of these as possible.
1306 */
1307 int i;
1308 sector_t remaining = max;
1309 sector_t bv_len;
1310
1311 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1312 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1313
1314 if (bv_len > remaining)
1315 break;
1316
1317 remaining -= bv_len;
1318 len += bv_len;
1319 }
1320
5ae89a87 1321 tio = alloc_tio(ci, ti);
9faf400f
SB
1322 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1323 ci->md->bs);
1da177e4
LT
1324 __map_bio(ti, clone, tio);
1325
1326 ci->sector += len;
1327 ci->sector_count -= len;
1328 ci->idx = i;
1329
1330 } else {
1331 /*
d2044a94 1332 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
1333 */
1334 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
1335 sector_t remaining = to_sector(bv->bv_len);
1336 unsigned int offset = 0;
1da177e4 1337
d2044a94
AK
1338 do {
1339 if (offset) {
1340 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
1341 if (!dm_target_is_valid(ti))
1342 return -EIO;
1343
d2044a94 1344 max = max_io_len(ci->md, ci->sector, ti);
d2044a94
AK
1345 }
1346
1347 len = min(remaining, max);
1348
5ae89a87 1349 tio = alloc_tio(ci, ti);
d2044a94 1350 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
1351 bv->bv_offset + offset, len,
1352 ci->md->bs);
d2044a94
AK
1353
1354 __map_bio(ti, clone, tio);
1355
1356 ci->sector += len;
1357 ci->sector_count -= len;
1358 offset += to_bytes(len);
1359 } while (remaining -= len);
1da177e4 1360
1da177e4
LT
1361 ci->idx++;
1362 }
512875bd
JN
1363
1364 return 0;
1da177e4
LT
1365}
1366
1367/*
8a53c28d 1368 * Split the bio into several clones and submit it to targets.
1da177e4 1369 */
f0b9a450 1370static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
1371{
1372 struct clone_info ci;
512875bd 1373 int error = 0;
1da177e4 1374
7c666411 1375 ci.map = dm_get_live_table(md);
f0b9a450 1376 if (unlikely(!ci.map)) {
7b6d91da 1377 if (!(bio->bi_rw & REQ_HARDBARRIER))
af7e466a
MP
1378 bio_io_error(bio);
1379 else
5aa2781d
MP
1380 if (!md->barrier_error)
1381 md->barrier_error = -EIO;
f0b9a450
MP
1382 return;
1383 }
692d0eb9 1384
1da177e4
LT
1385 ci.md = md;
1386 ci.bio = bio;
1387 ci.io = alloc_io(md);
1388 ci.io->error = 0;
1389 atomic_set(&ci.io->io_count, 1);
1390 ci.io->bio = bio;
1391 ci.io->md = md;
f88fb981 1392 spin_lock_init(&ci.io->endio_lock);
1da177e4
LT
1393 ci.sector = bio->bi_sector;
1394 ci.sector_count = bio_sectors(bio);
f9ab94ce
MP
1395 if (unlikely(bio_empty_barrier(bio)))
1396 ci.sector_count = 1;
1da177e4
LT
1397 ci.idx = bio->bi_idx;
1398
3eaf840e 1399 start_io_acct(ci.io);
512875bd
JN
1400 while (ci.sector_count && !error)
1401 error = __clone_and_map(&ci);
1da177e4
LT
1402
1403 /* drop the extra reference count */
512875bd 1404 dec_pending(ci.io, error);
1da177e4
LT
1405 dm_table_put(ci.map);
1406}
1407/*-----------------------------------------------------------------
1408 * CRUD END
1409 *---------------------------------------------------------------*/
1410
f6fccb12
MB
1411static int dm_merge_bvec(struct request_queue *q,
1412 struct bvec_merge_data *bvm,
1413 struct bio_vec *biovec)
1414{
1415 struct mapped_device *md = q->queuedata;
7c666411 1416 struct dm_table *map = dm_get_live_table(md);
f6fccb12
MB
1417 struct dm_target *ti;
1418 sector_t max_sectors;
5037108a 1419 int max_size = 0;
f6fccb12
MB
1420
1421 if (unlikely(!map))
5037108a 1422 goto out;
f6fccb12
MB
1423
1424 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
1425 if (!dm_target_is_valid(ti))
1426 goto out_table;
f6fccb12
MB
1427
1428 /*
1429 * Find maximum amount of I/O that won't need splitting
1430 */
1431 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1432 (sector_t) BIO_MAX_SECTORS);
1433 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1434 if (max_size < 0)
1435 max_size = 0;
1436
1437 /*
1438 * merge_bvec_fn() returns number of bytes
1439 * it can accept at this offset
1440 * max is precomputed maximal io size
1441 */
1442 if (max_size && ti->type->merge)
1443 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
1444 /*
1445 * If the target doesn't support merge method and some of the devices
1446 * provided their merge_bvec method (we know this by looking at
1447 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1448 * entries. So always set max_size to 0, and the code below allows
1449 * just one page.
1450 */
1451 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1452
1453 max_size = 0;
f6fccb12 1454
b01cd5ac 1455out_table:
5037108a
MP
1456 dm_table_put(map);
1457
1458out:
f6fccb12
MB
1459 /*
1460 * Always allow an entire first page
1461 */
1462 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1463 max_size = biovec->bv_len;
1464
f6fccb12
MB
1465 return max_size;
1466}
1467
1da177e4
LT
1468/*
1469 * The request function that just remaps the bio built up by
1470 * dm_merge_bvec.
1471 */
cec47e3d 1472static int _dm_request(struct request_queue *q, struct bio *bio)
1da177e4 1473{
12f03a49 1474 int rw = bio_data_dir(bio);
1da177e4 1475 struct mapped_device *md = q->queuedata;
c9959059 1476 int cpu;
1da177e4 1477
2ca3310e 1478 down_read(&md->io_lock);
1da177e4 1479
074a7aca
TH
1480 cpu = part_stat_lock();
1481 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1482 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1483 part_stat_unlock();
12f03a49 1484
1da177e4 1485 /*
1eb787ec
AK
1486 * If we're suspended or the thread is processing barriers
1487 * we have to queue this io for later.
1da177e4 1488 */
af7e466a 1489 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
7b6d91da 1490 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
2ca3310e 1491 up_read(&md->io_lock);
1da177e4 1492
54d9a1b4
AK
1493 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1494 bio_rw(bio) == READA) {
1495 bio_io_error(bio);
1496 return 0;
1497 }
1da177e4 1498
92c63902 1499 queue_io(md, bio);
1da177e4 1500
92c63902 1501 return 0;
1da177e4
LT
1502 }
1503
f0b9a450 1504 __split_and_process_bio(md, bio);
2ca3310e 1505 up_read(&md->io_lock);
f0b9a450 1506 return 0;
1da177e4
LT
1507}
1508
cec47e3d
KU
1509static int dm_make_request(struct request_queue *q, struct bio *bio)
1510{
1511 struct mapped_device *md = q->queuedata;
1512
cec47e3d
KU
1513 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1514}
1515
1516static int dm_request_based(struct mapped_device *md)
1517{
1518 return blk_queue_stackable(md->queue);
1519}
1520
1521static int dm_request(struct request_queue *q, struct bio *bio)
1522{
1523 struct mapped_device *md = q->queuedata;
1524
1525 if (dm_request_based(md))
1526 return dm_make_request(q, bio);
1527
1528 return _dm_request(q, bio);
1529}
1530
d0bcb878
KU
1531static bool dm_rq_is_flush_request(struct request *rq)
1532{
144d6ed5 1533 if (rq->cmd_flags & REQ_FLUSH)
d0bcb878
KU
1534 return true;
1535 else
1536 return false;
1537}
1538
cec47e3d
KU
1539void dm_dispatch_request(struct request *rq)
1540{
1541 int r;
1542
1543 if (blk_queue_io_stat(rq->q))
1544 rq->cmd_flags |= REQ_IO_STAT;
1545
1546 rq->start_time = jiffies;
1547 r = blk_insert_cloned_request(rq->q, rq);
1548 if (r)
1549 dm_complete_request(rq, r);
1550}
1551EXPORT_SYMBOL_GPL(dm_dispatch_request);
1552
1553static void dm_rq_bio_destructor(struct bio *bio)
1554{
1555 struct dm_rq_clone_bio_info *info = bio->bi_private;
1556 struct mapped_device *md = info->tio->md;
1557
1558 free_bio_info(info);
1559 bio_free(bio, md->bs);
1560}
1561
1562static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1563 void *data)
1564{
1565 struct dm_rq_target_io *tio = data;
1566 struct mapped_device *md = tio->md;
1567 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1568
1569 if (!info)
1570 return -ENOMEM;
1571
1572 info->orig = bio_orig;
1573 info->tio = tio;
1574 bio->bi_end_io = end_clone_bio;
1575 bio->bi_private = info;
1576 bio->bi_destructor = dm_rq_bio_destructor;
1577
1578 return 0;
1579}
1580
1581static int setup_clone(struct request *clone, struct request *rq,
1582 struct dm_rq_target_io *tio)
1583{
d0bcb878 1584 int r;
cec47e3d 1585
d0bcb878
KU
1586 if (dm_rq_is_flush_request(rq)) {
1587 blk_rq_init(NULL, clone);
1588 clone->cmd_type = REQ_TYPE_FS;
1589 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1590 } else {
1591 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1592 dm_rq_bio_constructor, tio);
1593 if (r)
1594 return r;
1595
1596 clone->cmd = rq->cmd;
1597 clone->cmd_len = rq->cmd_len;
1598 clone->sense = rq->sense;
1599 clone->buffer = rq->buffer;
1600 }
cec47e3d 1601
cec47e3d
KU
1602 clone->end_io = end_clone_request;
1603 clone->end_io_data = tio;
1604
1605 return 0;
1606}
1607
6facdaff
KU
1608static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1609 gfp_t gfp_mask)
1610{
1611 struct request *clone;
1612 struct dm_rq_target_io *tio;
1613
1614 tio = alloc_rq_tio(md, gfp_mask);
1615 if (!tio)
1616 return NULL;
1617
1618 tio->md = md;
1619 tio->ti = NULL;
1620 tio->orig = rq;
1621 tio->error = 0;
1622 memset(&tio->info, 0, sizeof(tio->info));
1623
1624 clone = &tio->clone;
1625 if (setup_clone(clone, rq, tio)) {
1626 /* -ENOMEM */
1627 free_rq_tio(tio);
1628 return NULL;
1629 }
1630
1631 return clone;
1632}
1633
cec47e3d
KU
1634/*
1635 * Called with the queue lock held.
1636 */
1637static int dm_prep_fn(struct request_queue *q, struct request *rq)
1638{
1639 struct mapped_device *md = q->queuedata;
cec47e3d
KU
1640 struct request *clone;
1641
d0bcb878
KU
1642 if (unlikely(dm_rq_is_flush_request(rq)))
1643 return BLKPREP_OK;
1644
cec47e3d
KU
1645 if (unlikely(rq->special)) {
1646 DMWARN("Already has something in rq->special.");
1647 return BLKPREP_KILL;
1648 }
1649
6facdaff
KU
1650 clone = clone_rq(rq, md, GFP_ATOMIC);
1651 if (!clone)
cec47e3d 1652 return BLKPREP_DEFER;
cec47e3d
KU
1653
1654 rq->special = clone;
1655 rq->cmd_flags |= REQ_DONTPREP;
1656
1657 return BLKPREP_OK;
1658}
1659
9eef87da
KU
1660/*
1661 * Returns:
1662 * 0 : the request has been processed (not requeued)
1663 * !0 : the request has been requeued
1664 */
1665static int map_request(struct dm_target *ti, struct request *clone,
1666 struct mapped_device *md)
cec47e3d 1667{
9eef87da 1668 int r, requeued = 0;
cec47e3d
KU
1669 struct dm_rq_target_io *tio = clone->end_io_data;
1670
1671 /*
1672 * Hold the md reference here for the in-flight I/O.
1673 * We can't rely on the reference count by device opener,
1674 * because the device may be closed during the request completion
1675 * when all bios are completed.
1676 * See the comment in rq_completed() too.
1677 */
1678 dm_get(md);
1679
1680 tio->ti = ti;
1681 r = ti->type->map_rq(ti, clone, &tio->info);
1682 switch (r) {
1683 case DM_MAPIO_SUBMITTED:
1684 /* The target has taken the I/O to submit by itself later */
1685 break;
1686 case DM_MAPIO_REMAPPED:
1687 /* The target has remapped the I/O so dispatch it */
6db4ccd6
JN
1688 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1689 blk_rq_pos(tio->orig));
cec47e3d
KU
1690 dm_dispatch_request(clone);
1691 break;
1692 case DM_MAPIO_REQUEUE:
1693 /* The target wants to requeue the I/O */
1694 dm_requeue_unmapped_request(clone);
9eef87da 1695 requeued = 1;
cec47e3d
KU
1696 break;
1697 default:
1698 if (r > 0) {
1699 DMWARN("unimplemented target map return value: %d", r);
1700 BUG();
1701 }
1702
1703 /* The target wants to complete the I/O */
1704 dm_kill_unmapped_request(clone, r);
1705 break;
1706 }
9eef87da
KU
1707
1708 return requeued;
cec47e3d
KU
1709}
1710
1711/*
1712 * q->request_fn for request-based dm.
1713 * Called with the queue lock held.
1714 */
1715static void dm_request_fn(struct request_queue *q)
1716{
1717 struct mapped_device *md = q->queuedata;
7c666411 1718 struct dm_table *map = dm_get_live_table(md);
cec47e3d 1719 struct dm_target *ti;
b4324fee 1720 struct request *rq, *clone;
cec47e3d
KU
1721
1722 /*
b4324fee
KU
1723 * For suspend, check blk_queue_stopped() and increment
1724 * ->pending within a single queue_lock not to increment the
1725 * number of in-flight I/Os after the queue is stopped in
1726 * dm_suspend().
cec47e3d
KU
1727 */
1728 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1729 rq = blk_peek_request(q);
1730 if (!rq)
1731 goto plug_and_out;
1732
d0bcb878
KU
1733 if (unlikely(dm_rq_is_flush_request(rq))) {
1734 BUG_ON(md->flush_request);
1735 md->flush_request = rq;
1736 blk_start_request(rq);
1737 queue_work(md->wq, &md->barrier_work);
1738 goto out;
1739 }
1740
cec47e3d
KU
1741 ti = dm_table_find_target(map, blk_rq_pos(rq));
1742 if (ti->type->busy && ti->type->busy(ti))
1743 goto plug_and_out;
1744
1745 blk_start_request(rq);
b4324fee
KU
1746 clone = rq->special;
1747 atomic_inc(&md->pending[rq_data_dir(clone)]);
1748
cec47e3d 1749 spin_unlock(q->queue_lock);
9eef87da
KU
1750 if (map_request(ti, clone, md))
1751 goto requeued;
1752
cec47e3d
KU
1753 spin_lock_irq(q->queue_lock);
1754 }
1755
1756 goto out;
1757
9eef87da
KU
1758requeued:
1759 spin_lock_irq(q->queue_lock);
1760
cec47e3d
KU
1761plug_and_out:
1762 if (!elv_queue_empty(q))
1763 /* Some requests still remain, retry later */
1764 blk_plug_device(q);
1765
1766out:
1767 dm_table_put(map);
1768
1769 return;
1770}
1771
1772int dm_underlying_device_busy(struct request_queue *q)
1773{
1774 return blk_lld_busy(q);
1775}
1776EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1777
1778static int dm_lld_busy(struct request_queue *q)
1779{
1780 int r;
1781 struct mapped_device *md = q->queuedata;
7c666411 1782 struct dm_table *map = dm_get_live_table(md);
cec47e3d
KU
1783
1784 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1785 r = 1;
1786 else
1787 r = dm_table_any_busy_target(map);
1788
1789 dm_table_put(map);
1790
1791 return r;
1792}
1793
165125e1 1794static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
1795{
1796 struct mapped_device *md = q->queuedata;
7c666411 1797 struct dm_table *map = dm_get_live_table(md);
1da177e4
LT
1798
1799 if (map) {
cec47e3d
KU
1800 if (dm_request_based(md))
1801 generic_unplug_device(q);
1802
1da177e4
LT
1803 dm_table_unplug_all(map);
1804 dm_table_put(map);
1805 }
1806}
1807
1808static int dm_any_congested(void *congested_data, int bdi_bits)
1809{
8a57dfc6
CS
1810 int r = bdi_bits;
1811 struct mapped_device *md = congested_data;
1812 struct dm_table *map;
1da177e4 1813
1eb787ec 1814 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
7c666411 1815 map = dm_get_live_table(md);
8a57dfc6 1816 if (map) {
cec47e3d
KU
1817 /*
1818 * Request-based dm cares about only own queue for
1819 * the query about congestion status of request_queue
1820 */
1821 if (dm_request_based(md))
1822 r = md->queue->backing_dev_info.state &
1823 bdi_bits;
1824 else
1825 r = dm_table_any_congested(map, bdi_bits);
1826
8a57dfc6
CS
1827 dm_table_put(map);
1828 }
1829 }
1830
1da177e4
LT
1831 return r;
1832}
1833
1834/*-----------------------------------------------------------------
1835 * An IDR is used to keep track of allocated minor numbers.
1836 *---------------------------------------------------------------*/
1da177e4
LT
1837static DEFINE_IDR(_minor_idr);
1838
2b06cfff 1839static void free_minor(int minor)
1da177e4 1840{
f32c10b0 1841 spin_lock(&_minor_lock);
1da177e4 1842 idr_remove(&_minor_idr, minor);
f32c10b0 1843 spin_unlock(&_minor_lock);
1da177e4
LT
1844}
1845
1846/*
1847 * See if the device with a specific minor # is free.
1848 */
cf13ab8e 1849static int specific_minor(int minor)
1da177e4
LT
1850{
1851 int r, m;
1852
1853 if (minor >= (1 << MINORBITS))
1854 return -EINVAL;
1855
62f75c2f
JM
1856 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1857 if (!r)
1858 return -ENOMEM;
1859
f32c10b0 1860 spin_lock(&_minor_lock);
1da177e4
LT
1861
1862 if (idr_find(&_minor_idr, minor)) {
1863 r = -EBUSY;
1864 goto out;
1865 }
1866
ba61fdd1 1867 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1868 if (r)
1da177e4 1869 goto out;
1da177e4
LT
1870
1871 if (m != minor) {
1872 idr_remove(&_minor_idr, m);
1873 r = -EBUSY;
1874 goto out;
1875 }
1876
1877out:
f32c10b0 1878 spin_unlock(&_minor_lock);
1da177e4
LT
1879 return r;
1880}
1881
cf13ab8e 1882static int next_free_minor(int *minor)
1da177e4 1883{
2b06cfff 1884 int r, m;
1da177e4 1885
1da177e4 1886 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1887 if (!r)
1888 return -ENOMEM;
1889
f32c10b0 1890 spin_lock(&_minor_lock);
1da177e4 1891
ba61fdd1 1892 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1893 if (r)
1da177e4 1894 goto out;
1da177e4
LT
1895
1896 if (m >= (1 << MINORBITS)) {
1897 idr_remove(&_minor_idr, m);
1898 r = -ENOSPC;
1899 goto out;
1900 }
1901
1902 *minor = m;
1903
1904out:
f32c10b0 1905 spin_unlock(&_minor_lock);
1da177e4
LT
1906 return r;
1907}
1908
83d5cde4 1909static const struct block_device_operations dm_blk_dops;
1da177e4 1910
53d5914f 1911static void dm_wq_work(struct work_struct *work);
d0bcb878 1912static void dm_rq_barrier_work(struct work_struct *work);
53d5914f 1913
4a0b4ddf
MS
1914static void dm_init_md_queue(struct mapped_device *md)
1915{
1916 /*
1917 * Request-based dm devices cannot be stacked on top of bio-based dm
1918 * devices. The type of this dm device has not been decided yet.
1919 * The type is decided at the first table loading time.
1920 * To prevent problematic device stacking, clear the queue flag
1921 * for request stacking support until then.
1922 *
1923 * This queue is new, so no concurrency on the queue_flags.
1924 */
1925 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1926
1927 md->queue->queuedata = md;
1928 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1929 md->queue->backing_dev_info.congested_data = md;
1930 blk_queue_make_request(md->queue, dm_request);
1931 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1932 md->queue->unplug_fn = dm_unplug_all;
1933 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1934}
1935
1da177e4
LT
1936/*
1937 * Allocate and initialise a blank device with a given minor.
1938 */
2b06cfff 1939static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1940{
1941 int r;
cf13ab8e 1942 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1943 void *old_md;
1da177e4
LT
1944
1945 if (!md) {
1946 DMWARN("unable to allocate device, out of memory.");
1947 return NULL;
1948 }
1949
10da4f79 1950 if (!try_module_get(THIS_MODULE))
6ed7ade8 1951 goto bad_module_get;
10da4f79 1952
1da177e4 1953 /* get a minor number for the dev */
2b06cfff 1954 if (minor == DM_ANY_MINOR)
cf13ab8e 1955 r = next_free_minor(&minor);
2b06cfff 1956 else
cf13ab8e 1957 r = specific_minor(minor);
1da177e4 1958 if (r < 0)
6ed7ade8 1959 goto bad_minor;
1da177e4 1960
a5664dad 1961 md->type = DM_TYPE_NONE;
2ca3310e 1962 init_rwsem(&md->io_lock);
e61290a4 1963 mutex_init(&md->suspend_lock);
a5664dad 1964 mutex_init(&md->type_lock);
022c2611 1965 spin_lock_init(&md->deferred_lock);
d0bcb878 1966 spin_lock_init(&md->barrier_error_lock);
1da177e4
LT
1967 rwlock_init(&md->map_lock);
1968 atomic_set(&md->holders, 1);
5c6bd75d 1969 atomic_set(&md->open_count, 0);
1da177e4 1970 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1971 atomic_set(&md->uevent_seq, 0);
1972 INIT_LIST_HEAD(&md->uevent_list);
1973 spin_lock_init(&md->uevent_lock);
1da177e4 1974
4a0b4ddf 1975 md->queue = blk_alloc_queue(GFP_KERNEL);
1da177e4 1976 if (!md->queue)
6ed7ade8 1977 goto bad_queue;
1da177e4 1978
4a0b4ddf 1979 dm_init_md_queue(md);
9faf400f 1980
1da177e4
LT
1981 md->disk = alloc_disk(1);
1982 if (!md->disk)
6ed7ade8 1983 goto bad_disk;
1da177e4 1984
316d315b
NK
1985 atomic_set(&md->pending[0], 0);
1986 atomic_set(&md->pending[1], 0);
f0b04115 1987 init_waitqueue_head(&md->wait);
53d5914f 1988 INIT_WORK(&md->work, dm_wq_work);
d0bcb878 1989 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
f0b04115
JM
1990 init_waitqueue_head(&md->eventq);
1991
1da177e4
LT
1992 md->disk->major = _major;
1993 md->disk->first_minor = minor;
1994 md->disk->fops = &dm_blk_dops;
1995 md->disk->queue = md->queue;
1996 md->disk->private_data = md;
1997 sprintf(md->disk->disk_name, "dm-%d", minor);
1998 add_disk(md->disk);
7e51f257 1999 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 2000
304f3f6a
MB
2001 md->wq = create_singlethread_workqueue("kdmflush");
2002 if (!md->wq)
2003 goto bad_thread;
2004
32a926da
MP
2005 md->bdev = bdget_disk(md->disk, 0);
2006 if (!md->bdev)
2007 goto bad_bdev;
2008
ba61fdd1 2009 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 2010 spin_lock(&_minor_lock);
ba61fdd1 2011 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 2012 spin_unlock(&_minor_lock);
ba61fdd1
JM
2013
2014 BUG_ON(old_md != MINOR_ALLOCED);
2015
1da177e4
LT
2016 return md;
2017
32a926da
MP
2018bad_bdev:
2019 destroy_workqueue(md->wq);
304f3f6a 2020bad_thread:
03022c54 2021 del_gendisk(md->disk);
304f3f6a 2022 put_disk(md->disk);
6ed7ade8 2023bad_disk:
1312f40e 2024 blk_cleanup_queue(md->queue);
6ed7ade8 2025bad_queue:
1da177e4 2026 free_minor(minor);
6ed7ade8 2027bad_minor:
10da4f79 2028 module_put(THIS_MODULE);
6ed7ade8 2029bad_module_get:
1da177e4
LT
2030 kfree(md);
2031 return NULL;
2032}
2033
ae9da83f
JN
2034static void unlock_fs(struct mapped_device *md);
2035
1da177e4
LT
2036static void free_dev(struct mapped_device *md)
2037{
f331c029 2038 int minor = MINOR(disk_devt(md->disk));
63d94e48 2039
32a926da
MP
2040 unlock_fs(md);
2041 bdput(md->bdev);
304f3f6a 2042 destroy_workqueue(md->wq);
e6ee8c0b
KU
2043 if (md->tio_pool)
2044 mempool_destroy(md->tio_pool);
2045 if (md->io_pool)
2046 mempool_destroy(md->io_pool);
2047 if (md->bs)
2048 bioset_free(md->bs);
9c47008d 2049 blk_integrity_unregister(md->disk);
1da177e4 2050 del_gendisk(md->disk);
63d94e48 2051 free_minor(minor);
fba9f90e
JM
2052
2053 spin_lock(&_minor_lock);
2054 md->disk->private_data = NULL;
2055 spin_unlock(&_minor_lock);
2056
1da177e4 2057 put_disk(md->disk);
1312f40e 2058 blk_cleanup_queue(md->queue);
10da4f79 2059 module_put(THIS_MODULE);
1da177e4
LT
2060 kfree(md);
2061}
2062
e6ee8c0b
KU
2063static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2064{
2065 struct dm_md_mempools *p;
2066
2067 if (md->io_pool && md->tio_pool && md->bs)
2068 /* the md already has necessary mempools */
2069 goto out;
2070
2071 p = dm_table_get_md_mempools(t);
2072 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2073
2074 md->io_pool = p->io_pool;
2075 p->io_pool = NULL;
2076 md->tio_pool = p->tio_pool;
2077 p->tio_pool = NULL;
2078 md->bs = p->bs;
2079 p->bs = NULL;
2080
2081out:
2082 /* mempool bind completed, now no need any mempools in the table */
2083 dm_table_free_md_mempools(t);
2084}
2085
1da177e4
LT
2086/*
2087 * Bind a table to the device.
2088 */
2089static void event_callback(void *context)
2090{
7a8c3d3b
MA
2091 unsigned long flags;
2092 LIST_HEAD(uevents);
1da177e4
LT
2093 struct mapped_device *md = (struct mapped_device *) context;
2094
7a8c3d3b
MA
2095 spin_lock_irqsave(&md->uevent_lock, flags);
2096 list_splice_init(&md->uevent_list, &uevents);
2097 spin_unlock_irqrestore(&md->uevent_lock, flags);
2098
ed9e1982 2099 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2100
1da177e4
LT
2101 atomic_inc(&md->event_nr);
2102 wake_up(&md->eventq);
2103}
2104
4e90188b 2105static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 2106{
4e90188b 2107 set_capacity(md->disk, size);
1da177e4 2108
db8fef4f
MP
2109 mutex_lock(&md->bdev->bd_inode->i_mutex);
2110 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2111 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1da177e4
LT
2112}
2113
042d2a9b
AK
2114/*
2115 * Returns old map, which caller must destroy.
2116 */
2117static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2118 struct queue_limits *limits)
1da177e4 2119{
042d2a9b 2120 struct dm_table *old_map;
165125e1 2121 struct request_queue *q = md->queue;
1da177e4 2122 sector_t size;
523d9297 2123 unsigned long flags;
1da177e4
LT
2124
2125 size = dm_table_get_size(t);
3ac51e74
DW
2126
2127 /*
2128 * Wipe any geometry if the size of the table changed.
2129 */
2130 if (size != get_capacity(md->disk))
2131 memset(&md->geometry, 0, sizeof(md->geometry));
2132
32a926da 2133 __set_size(md, size);
d5816876 2134
2ca3310e
AK
2135 dm_table_event_callback(t, event_callback, md);
2136
e6ee8c0b
KU
2137 /*
2138 * The queue hasn't been stopped yet, if the old table type wasn't
2139 * for request-based during suspension. So stop it to prevent
2140 * I/O mapping before resume.
2141 * This must be done before setting the queue restrictions,
2142 * because request-based dm may be run just after the setting.
2143 */
2144 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2145 stop_queue(q);
2146
2147 __bind_mempools(md, t);
2148
523d9297 2149 write_lock_irqsave(&md->map_lock, flags);
042d2a9b 2150 old_map = md->map;
1da177e4 2151 md->map = t;
754c5fc7 2152 dm_table_set_restrictions(t, q, limits);
523d9297 2153 write_unlock_irqrestore(&md->map_lock, flags);
1da177e4 2154
042d2a9b 2155 return old_map;
1da177e4
LT
2156}
2157
a7940155
AK
2158/*
2159 * Returns unbound table for the caller to free.
2160 */
2161static struct dm_table *__unbind(struct mapped_device *md)
1da177e4
LT
2162{
2163 struct dm_table *map = md->map;
523d9297 2164 unsigned long flags;
1da177e4
LT
2165
2166 if (!map)
a7940155 2167 return NULL;
1da177e4
LT
2168
2169 dm_table_event_callback(map, NULL, NULL);
523d9297 2170 write_lock_irqsave(&md->map_lock, flags);
1da177e4 2171 md->map = NULL;
523d9297 2172 write_unlock_irqrestore(&md->map_lock, flags);
a7940155
AK
2173
2174 return map;
1da177e4
LT
2175}
2176
2177/*
2178 * Constructor for a new device.
2179 */
2b06cfff 2180int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
2181{
2182 struct mapped_device *md;
2183
2b06cfff 2184 md = alloc_dev(minor);
1da177e4
LT
2185 if (!md)
2186 return -ENXIO;
2187
784aae73
MB
2188 dm_sysfs_init(md);
2189
1da177e4
LT
2190 *result = md;
2191 return 0;
2192}
2193
a5664dad
MS
2194/*
2195 * Functions to manage md->type.
2196 * All are required to hold md->type_lock.
2197 */
2198void dm_lock_md_type(struct mapped_device *md)
2199{
2200 mutex_lock(&md->type_lock);
2201}
2202
2203void dm_unlock_md_type(struct mapped_device *md)
2204{
2205 mutex_unlock(&md->type_lock);
2206}
2207
2208void dm_set_md_type(struct mapped_device *md, unsigned type)
2209{
2210 md->type = type;
2211}
2212
2213unsigned dm_get_md_type(struct mapped_device *md)
2214{
2215 return md->type;
2216}
2217
4a0b4ddf
MS
2218/*
2219 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2220 */
2221static int dm_init_request_based_queue(struct mapped_device *md)
2222{
2223 struct request_queue *q = NULL;
2224
2225 if (md->queue->elevator)
2226 return 1;
2227
2228 /* Fully initialize the queue */
2229 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2230 if (!q)
2231 return 0;
2232
2233 md->queue = q;
2234 md->saved_make_request_fn = md->queue->make_request_fn;
2235 dm_init_md_queue(md);
2236 blk_queue_softirq_done(md->queue, dm_softirq_done);
2237 blk_queue_prep_rq(md->queue, dm_prep_fn);
2238 blk_queue_lld_busy(md->queue, dm_lld_busy);
2239 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
2240
2241 elv_register_queue(md->queue);
2242
2243 return 1;
2244}
2245
2246/*
2247 * Setup the DM device's queue based on md's type
2248 */
2249int dm_setup_md_queue(struct mapped_device *md)
2250{
2251 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2252 !dm_init_request_based_queue(md)) {
2253 DMWARN("Cannot initialize queue for request-based mapped device");
2254 return -EINVAL;
2255 }
2256
2257 return 0;
2258}
2259
637842cf 2260static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
2261{
2262 struct mapped_device *md;
1da177e4
LT
2263 unsigned minor = MINOR(dev);
2264
2265 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2266 return NULL;
2267
f32c10b0 2268 spin_lock(&_minor_lock);
1da177e4
LT
2269
2270 md = idr_find(&_minor_idr, minor);
fba9f90e 2271 if (md && (md == MINOR_ALLOCED ||
f331c029 2272 (MINOR(disk_devt(dm_disk(md))) != minor) ||
abdc568b 2273 dm_deleting_md(md) ||
17b2f66f 2274 test_bit(DMF_FREEING, &md->flags))) {
637842cf 2275 md = NULL;
fba9f90e
JM
2276 goto out;
2277 }
1da177e4 2278
fba9f90e 2279out:
f32c10b0 2280 spin_unlock(&_minor_lock);
1da177e4 2281
637842cf
DT
2282 return md;
2283}
2284
d229a958
DT
2285struct mapped_device *dm_get_md(dev_t dev)
2286{
2287 struct mapped_device *md = dm_find_md(dev);
2288
2289 if (md)
2290 dm_get(md);
2291
2292 return md;
2293}
2294
9ade92a9 2295void *dm_get_mdptr(struct mapped_device *md)
637842cf 2296{
9ade92a9 2297 return md->interface_ptr;
1da177e4
LT
2298}
2299
2300void dm_set_mdptr(struct mapped_device *md, void *ptr)
2301{
2302 md->interface_ptr = ptr;
2303}
2304
2305void dm_get(struct mapped_device *md)
2306{
2307 atomic_inc(&md->holders);
3f77316d 2308 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2309}
2310
72d94861
AK
2311const char *dm_device_name(struct mapped_device *md)
2312{
2313 return md->name;
2314}
2315EXPORT_SYMBOL_GPL(dm_device_name);
2316
3f77316d 2317static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2318{
1134e5ae 2319 struct dm_table *map;
1da177e4 2320
3f77316d 2321 might_sleep();
fba9f90e 2322
3f77316d
KU
2323 spin_lock(&_minor_lock);
2324 map = dm_get_live_table(md);
2325 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2326 set_bit(DMF_FREEING, &md->flags);
2327 spin_unlock(&_minor_lock);
2328
2329 if (!dm_suspended_md(md)) {
2330 dm_table_presuspend_targets(map);
2331 dm_table_postsuspend_targets(map);
1da177e4 2332 }
3f77316d
KU
2333
2334 /*
2335 * Rare, but there may be I/O requests still going to complete,
2336 * for example. Wait for all references to disappear.
2337 * No one should increment the reference count of the mapped_device,
2338 * after the mapped_device state becomes DMF_FREEING.
2339 */
2340 if (wait)
2341 while (atomic_read(&md->holders))
2342 msleep(1);
2343 else if (atomic_read(&md->holders))
2344 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2345 dm_device_name(md), atomic_read(&md->holders));
2346
2347 dm_sysfs_exit(md);
2348 dm_table_put(map);
2349 dm_table_destroy(__unbind(md));
2350 free_dev(md);
2351}
2352
2353void dm_destroy(struct mapped_device *md)
2354{
2355 __dm_destroy(md, true);
2356}
2357
2358void dm_destroy_immediate(struct mapped_device *md)
2359{
2360 __dm_destroy(md, false);
2361}
2362
2363void dm_put(struct mapped_device *md)
2364{
2365 atomic_dec(&md->holders);
1da177e4 2366}
79eb885c 2367EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2368
401600df 2369static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
2370{
2371 int r = 0;
b44ebeb0
MP
2372 DECLARE_WAITQUEUE(wait, current);
2373
2374 dm_unplug_all(md->queue);
2375
2376 add_wait_queue(&md->wait, &wait);
46125c1c
MB
2377
2378 while (1) {
401600df 2379 set_current_state(interruptible);
46125c1c
MB
2380
2381 smp_mb();
b4324fee 2382 if (!md_in_flight(md))
46125c1c
MB
2383 break;
2384
401600df
MP
2385 if (interruptible == TASK_INTERRUPTIBLE &&
2386 signal_pending(current)) {
46125c1c
MB
2387 r = -EINTR;
2388 break;
2389 }
2390
2391 io_schedule();
2392 }
2393 set_current_state(TASK_RUNNING);
2394
b44ebeb0
MP
2395 remove_wait_queue(&md->wait, &wait);
2396
46125c1c
MB
2397 return r;
2398}
2399
531fe963 2400static void dm_flush(struct mapped_device *md)
af7e466a
MP
2401{
2402 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
52b1fd5a
MP
2403
2404 bio_init(&md->barrier_bio);
2405 md->barrier_bio.bi_bdev = md->bdev;
2406 md->barrier_bio.bi_rw = WRITE_BARRIER;
2407 __split_and_process_bio(md, &md->barrier_bio);
2408
2409 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
af7e466a
MP
2410}
2411
2412static void process_barrier(struct mapped_device *md, struct bio *bio)
2413{
5aa2781d
MP
2414 md->barrier_error = 0;
2415
531fe963 2416 dm_flush(md);
af7e466a 2417
5aa2781d
MP
2418 if (!bio_empty_barrier(bio)) {
2419 __split_and_process_bio(md, bio);
708e9295
MP
2420 /*
2421 * If the request isn't supported, don't waste time with
2422 * the second flush.
2423 */
2424 if (md->barrier_error != -EOPNOTSUPP)
2425 dm_flush(md);
af7e466a
MP
2426 }
2427
af7e466a 2428 if (md->barrier_error != DM_ENDIO_REQUEUE)
531fe963 2429 bio_endio(bio, md->barrier_error);
2761e95f
MP
2430 else {
2431 spin_lock_irq(&md->deferred_lock);
2432 bio_list_add_head(&md->deferred, bio);
2433 spin_unlock_irq(&md->deferred_lock);
2434 }
af7e466a
MP
2435}
2436
1da177e4
LT
2437/*
2438 * Process the deferred bios
2439 */
ef208587 2440static void dm_wq_work(struct work_struct *work)
1da177e4 2441{
ef208587
MP
2442 struct mapped_device *md = container_of(work, struct mapped_device,
2443 work);
6d6f10df 2444 struct bio *c;
1da177e4 2445
ef208587
MP
2446 down_write(&md->io_lock);
2447
3b00b203 2448 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2449 spin_lock_irq(&md->deferred_lock);
2450 c = bio_list_pop(&md->deferred);
2451 spin_unlock_irq(&md->deferred_lock);
2452
2453 if (!c) {
1eb787ec 2454 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
df12ee99
AK
2455 break;
2456 }
022c2611 2457
3b00b203
MP
2458 up_write(&md->io_lock);
2459
e6ee8c0b
KU
2460 if (dm_request_based(md))
2461 generic_make_request(c);
2462 else {
7b6d91da 2463 if (c->bi_rw & REQ_HARDBARRIER)
e6ee8c0b
KU
2464 process_barrier(md, c);
2465 else
2466 __split_and_process_bio(md, c);
2467 }
3b00b203
MP
2468
2469 down_write(&md->io_lock);
022c2611 2470 }
73d410c0 2471
ef208587 2472 up_write(&md->io_lock);
1da177e4
LT
2473}
2474
9a1fb464 2475static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2476{
3b00b203
MP
2477 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2478 smp_mb__after_clear_bit();
53d5914f 2479 queue_work(md->wq, &md->work);
304f3f6a
MB
2480}
2481
57cba5d3 2482static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
d0bcb878
KU
2483{
2484 struct dm_rq_target_io *tio = clone->end_io_data;
2485
57cba5d3 2486 tio->info.target_request_nr = request_nr;
d0bcb878
KU
2487}
2488
2489/* Issue barrier requests to targets and wait for their completion. */
2490static int dm_rq_barrier(struct mapped_device *md)
2491{
2492 int i, j;
7c666411 2493 struct dm_table *map = dm_get_live_table(md);
d0bcb878
KU
2494 unsigned num_targets = dm_table_get_num_targets(map);
2495 struct dm_target *ti;
2496 struct request *clone;
2497
2498 md->barrier_error = 0;
2499
2500 for (i = 0; i < num_targets; i++) {
2501 ti = dm_table_get_target(map, i);
2502 for (j = 0; j < ti->num_flush_requests; j++) {
2503 clone = clone_rq(md->flush_request, md, GFP_NOIO);
57cba5d3 2504 dm_rq_set_target_request_nr(clone, j);
d0bcb878
KU
2505 atomic_inc(&md->pending[rq_data_dir(clone)]);
2506 map_request(ti, clone, md);
2507 }
2508 }
2509
2510 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2511 dm_table_put(map);
2512
2513 return md->barrier_error;
2514}
2515
2516static void dm_rq_barrier_work(struct work_struct *work)
2517{
2518 int error;
2519 struct mapped_device *md = container_of(work, struct mapped_device,
2520 barrier_work);
2521 struct request_queue *q = md->queue;
2522 struct request *rq;
2523 unsigned long flags;
2524
2525 /*
2526 * Hold the md reference here and leave it at the last part so that
2527 * the md can't be deleted by device opener when the barrier request
2528 * completes.
2529 */
2530 dm_get(md);
2531
2532 error = dm_rq_barrier(md);
2533
2534 rq = md->flush_request;
2535 md->flush_request = NULL;
2536
2537 if (error == DM_ENDIO_REQUEUE) {
2538 spin_lock_irqsave(q->queue_lock, flags);
2539 blk_requeue_request(q, rq);
2540 spin_unlock_irqrestore(q->queue_lock, flags);
2541 } else
2542 blk_end_request_all(rq, error);
2543
2544 blk_run_queue(q);
2545
2546 dm_put(md);
2547}
2548
1da177e4 2549/*
042d2a9b 2550 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2551 */
042d2a9b 2552struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2553{
042d2a9b 2554 struct dm_table *map = ERR_PTR(-EINVAL);
754c5fc7 2555 struct queue_limits limits;
042d2a9b 2556 int r;
1da177e4 2557
e61290a4 2558 mutex_lock(&md->suspend_lock);
1da177e4
LT
2559
2560 /* device must be suspended */
4f186f8b 2561 if (!dm_suspended_md(md))
93c534ae 2562 goto out;
1da177e4 2563
754c5fc7 2564 r = dm_calculate_queue_limits(table, &limits);
042d2a9b
AK
2565 if (r) {
2566 map = ERR_PTR(r);
754c5fc7 2567 goto out;
042d2a9b 2568 }
754c5fc7 2569
042d2a9b 2570 map = __bind(md, table, &limits);
1da177e4 2571
93c534ae 2572out:
e61290a4 2573 mutex_unlock(&md->suspend_lock);
042d2a9b 2574 return map;
1da177e4
LT
2575}
2576
2577/*
2578 * Functions to lock and unlock any filesystem running on the
2579 * device.
2580 */
2ca3310e 2581static int lock_fs(struct mapped_device *md)
1da177e4 2582{
e39e2e95 2583 int r;
1da177e4
LT
2584
2585 WARN_ON(md->frozen_sb);
dfbe03f6 2586
db8fef4f 2587 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2588 if (IS_ERR(md->frozen_sb)) {
cf222b37 2589 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2590 md->frozen_sb = NULL;
2591 return r;
dfbe03f6
AK
2592 }
2593
aa8d7c2f
AK
2594 set_bit(DMF_FROZEN, &md->flags);
2595
1da177e4
LT
2596 return 0;
2597}
2598
2ca3310e 2599static void unlock_fs(struct mapped_device *md)
1da177e4 2600{
aa8d7c2f
AK
2601 if (!test_bit(DMF_FROZEN, &md->flags))
2602 return;
2603
db8fef4f 2604 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2605 md->frozen_sb = NULL;
aa8d7c2f 2606 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2607}
2608
2609/*
2610 * We need to be able to change a mapping table under a mounted
2611 * filesystem. For example we might want to move some data in
2612 * the background. Before the table can be swapped with
2613 * dm_bind_table, dm_suspend must be called to flush any in
2614 * flight bios and ensure that any further io gets deferred.
2615 */
cec47e3d
KU
2616/*
2617 * Suspend mechanism in request-based dm.
2618 *
9f518b27
KU
2619 * 1. Flush all I/Os by lock_fs() if needed.
2620 * 2. Stop dispatching any I/O by stopping the request_queue.
2621 * 3. Wait for all in-flight I/Os to be completed or requeued.
cec47e3d 2622 *
9f518b27 2623 * To abort suspend, start the request_queue.
cec47e3d 2624 */
a3d77d35 2625int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 2626{
2ca3310e 2627 struct dm_table *map = NULL;
46125c1c 2628 int r = 0;
a3d77d35 2629 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 2630 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 2631
e61290a4 2632 mutex_lock(&md->suspend_lock);
2ca3310e 2633
4f186f8b 2634 if (dm_suspended_md(md)) {
73d410c0 2635 r = -EINVAL;
d287483d 2636 goto out_unlock;
73d410c0 2637 }
1da177e4 2638
7c666411 2639 map = dm_get_live_table(md);
1da177e4 2640
2e93ccc1
KU
2641 /*
2642 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2643 * This flag is cleared before dm_suspend returns.
2644 */
2645 if (noflush)
2646 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2647
cf222b37
AK
2648 /* This does not get reverted if there's an error later. */
2649 dm_table_presuspend_targets(map);
2650
32a926da 2651 /*
9f518b27
KU
2652 * Flush I/O to the device.
2653 * Any I/O submitted after lock_fs() may not be flushed.
2654 * noflush takes precedence over do_lockfs.
2655 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2656 */
2657 if (!noflush && do_lockfs) {
2658 r = lock_fs(md);
2659 if (r)
f431d966 2660 goto out;
aa8d7c2f 2661 }
1da177e4
LT
2662
2663 /*
3b00b203
MP
2664 * Here we must make sure that no processes are submitting requests
2665 * to target drivers i.e. no one may be executing
2666 * __split_and_process_bio. This is called from dm_request and
2667 * dm_wq_work.
2668 *
2669 * To get all processes out of __split_and_process_bio in dm_request,
2670 * we take the write lock. To prevent any process from reentering
2671 * __split_and_process_bio from dm_request, we set
2672 * DMF_QUEUE_IO_TO_THREAD.
2673 *
2674 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2675 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2676 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2677 * further calls to __split_and_process_bio from dm_wq_work.
1da177e4 2678 */
2ca3310e 2679 down_write(&md->io_lock);
1eb787ec
AK
2680 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2681 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2ca3310e 2682 up_write(&md->io_lock);
1da177e4 2683
d0bcb878
KU
2684 /*
2685 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2686 * can be kicked until md->queue is stopped. So stop md->queue before
2687 * flushing md->wq.
2688 */
cec47e3d 2689 if (dm_request_based(md))
9f518b27 2690 stop_queue(md->queue);
cec47e3d 2691
d0bcb878
KU
2692 flush_workqueue(md->wq);
2693
1da177e4 2694 /*
3b00b203
MP
2695 * At this point no more requests are entering target request routines.
2696 * We call dm_wait_for_completion to wait for all existing requests
2697 * to finish.
1da177e4 2698 */
401600df 2699 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 2700
2ca3310e 2701 down_write(&md->io_lock);
6d6f10df 2702 if (noflush)
022c2611 2703 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 2704 up_write(&md->io_lock);
2e93ccc1 2705
1da177e4 2706 /* were we interrupted ? */
46125c1c 2707 if (r < 0) {
9a1fb464 2708 dm_queue_flush(md);
73d410c0 2709
cec47e3d 2710 if (dm_request_based(md))
9f518b27 2711 start_queue(md->queue);
cec47e3d 2712
2ca3310e 2713 unlock_fs(md);
2e93ccc1 2714 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 2715 }
1da177e4 2716
3b00b203
MP
2717 /*
2718 * If dm_wait_for_completion returned 0, the device is completely
2719 * quiescent now. There is no request-processing activity. All new
2720 * requests are being added to md->deferred list.
2721 */
2722
2ca3310e 2723 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 2724
4d4471cb
KU
2725 dm_table_postsuspend_targets(map);
2726
2ca3310e
AK
2727out:
2728 dm_table_put(map);
d287483d
AK
2729
2730out_unlock:
e61290a4 2731 mutex_unlock(&md->suspend_lock);
cf222b37 2732 return r;
1da177e4
LT
2733}
2734
2735int dm_resume(struct mapped_device *md)
2736{
cf222b37 2737 int r = -EINVAL;
cf222b37 2738 struct dm_table *map = NULL;
1da177e4 2739
e61290a4 2740 mutex_lock(&md->suspend_lock);
4f186f8b 2741 if (!dm_suspended_md(md))
cf222b37 2742 goto out;
cf222b37 2743
7c666411 2744 map = dm_get_live_table(md);
2ca3310e 2745 if (!map || !dm_table_get_size(map))
cf222b37 2746 goto out;
1da177e4 2747
8757b776
MB
2748 r = dm_table_resume_targets(map);
2749 if (r)
2750 goto out;
2ca3310e 2751
9a1fb464 2752 dm_queue_flush(md);
2ca3310e 2753
cec47e3d
KU
2754 /*
2755 * Flushing deferred I/Os must be done after targets are resumed
2756 * so that mapping of targets can work correctly.
2757 * Request-based dm is queueing the deferred I/Os in its request_queue.
2758 */
2759 if (dm_request_based(md))
2760 start_queue(md->queue);
2761
2ca3310e
AK
2762 unlock_fs(md);
2763
2764 clear_bit(DMF_SUSPENDED, &md->flags);
2765
1da177e4 2766 dm_table_unplug_all(map);
cf222b37
AK
2767 r = 0;
2768out:
2769 dm_table_put(map);
e61290a4 2770 mutex_unlock(&md->suspend_lock);
2ca3310e 2771
cf222b37 2772 return r;
1da177e4
LT
2773}
2774
2775/*-----------------------------------------------------------------
2776 * Event notification.
2777 *---------------------------------------------------------------*/
3abf85b5 2778int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2779 unsigned cookie)
69267a30 2780{
60935eb2
MB
2781 char udev_cookie[DM_COOKIE_LENGTH];
2782 char *envp[] = { udev_cookie, NULL };
2783
2784 if (!cookie)
3abf85b5 2785 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2786 else {
2787 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2788 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
2789 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2790 action, envp);
60935eb2 2791 }
69267a30
AK
2792}
2793
7a8c3d3b
MA
2794uint32_t dm_next_uevent_seq(struct mapped_device *md)
2795{
2796 return atomic_add_return(1, &md->uevent_seq);
2797}
2798
1da177e4
LT
2799uint32_t dm_get_event_nr(struct mapped_device *md)
2800{
2801 return atomic_read(&md->event_nr);
2802}
2803
2804int dm_wait_event(struct mapped_device *md, int event_nr)
2805{
2806 return wait_event_interruptible(md->eventq,
2807 (event_nr != atomic_read(&md->event_nr)));
2808}
2809
7a8c3d3b
MA
2810void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2811{
2812 unsigned long flags;
2813
2814 spin_lock_irqsave(&md->uevent_lock, flags);
2815 list_add(elist, &md->uevent_list);
2816 spin_unlock_irqrestore(&md->uevent_lock, flags);
2817}
2818
1da177e4
LT
2819/*
2820 * The gendisk is only valid as long as you have a reference
2821 * count on 'md'.
2822 */
2823struct gendisk *dm_disk(struct mapped_device *md)
2824{
2825 return md->disk;
2826}
2827
784aae73
MB
2828struct kobject *dm_kobject(struct mapped_device *md)
2829{
2830 return &md->kobj;
2831}
2832
2833/*
2834 * struct mapped_device should not be exported outside of dm.c
2835 * so use this check to verify that kobj is part of md structure
2836 */
2837struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2838{
2839 struct mapped_device *md;
2840
2841 md = container_of(kobj, struct mapped_device, kobj);
2842 if (&md->kobj != kobj)
2843 return NULL;
2844
4d89b7b4 2845 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 2846 dm_deleting_md(md))
4d89b7b4
MB
2847 return NULL;
2848
784aae73
MB
2849 dm_get(md);
2850 return md;
2851}
2852
4f186f8b 2853int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2854{
2855 return test_bit(DMF_SUSPENDED, &md->flags);
2856}
2857
64dbce58
KU
2858int dm_suspended(struct dm_target *ti)
2859{
ecdb2e25 2860 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2861}
2862EXPORT_SYMBOL_GPL(dm_suspended);
2863
2e93ccc1
KU
2864int dm_noflush_suspending(struct dm_target *ti)
2865{
ecdb2e25 2866 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
2867}
2868EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2869
e6ee8c0b
KU
2870struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2871{
2872 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2873
2874 if (!pools)
2875 return NULL;
2876
2877 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2878 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2879 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2880 if (!pools->io_pool)
2881 goto free_pools_and_out;
2882
2883 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2884 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2885 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2886 if (!pools->tio_pool)
2887 goto free_io_pool_and_out;
2888
2889 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2890 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2891 if (!pools->bs)
2892 goto free_tio_pool_and_out;
2893
2894 return pools;
2895
2896free_tio_pool_and_out:
2897 mempool_destroy(pools->tio_pool);
2898
2899free_io_pool_and_out:
2900 mempool_destroy(pools->io_pool);
2901
2902free_pools_and_out:
2903 kfree(pools);
2904
2905 return NULL;
2906}
2907
2908void dm_free_md_mempools(struct dm_md_mempools *pools)
2909{
2910 if (!pools)
2911 return;
2912
2913 if (pools->io_pool)
2914 mempool_destroy(pools->io_pool);
2915
2916 if (pools->tio_pool)
2917 mempool_destroy(pools->tio_pool);
2918
2919 if (pools->bs)
2920 bioset_free(pools->bs);
2921
2922 kfree(pools);
2923}
2924
83d5cde4 2925static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
2926 .open = dm_blk_open,
2927 .release = dm_blk_close,
aa129a22 2928 .ioctl = dm_blk_ioctl,
3ac51e74 2929 .getgeo = dm_blk_getgeo,
1da177e4
LT
2930 .owner = THIS_MODULE
2931};
2932
2933EXPORT_SYMBOL(dm_get_mapinfo);
2934
2935/*
2936 * module hooks
2937 */
2938module_init(dm_init);
2939module_exit(dm_exit);
2940
2941module_param(major, uint, 0);
2942MODULE_PARM_DESC(major, "The major number of the device mapper");
2943MODULE_DESCRIPTION(DM_NAME " driver");
2944MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2945MODULE_LICENSE("GPL");
This page took 0.713464 seconds and 5 git commands to generate.