dm: linear support discard
[deliverable/linux.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
6e9624b8 18#include <linux/smp_lock.h>
1da177e4
LT
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
3ac51e74 22#include <linux/hdreg.h>
3f77316d 23#include <linux/delay.h>
55782138
LZ
24
25#include <trace/events/block.h>
1da177e4 26
72d94861
AK
27#define DM_MSG_PREFIX "core"
28
60935eb2
MB
29/*
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
32 */
33#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34#define DM_COOKIE_LENGTH 24
35
1da177e4
LT
36static const char *_name = DM_NAME;
37
38static unsigned int major = 0;
39static unsigned int _major = 0;
40
f32c10b0 41static DEFINE_SPINLOCK(_minor_lock);
1da177e4 42/*
8fbf26ad 43 * For bio-based dm.
1da177e4
LT
44 * One of these is allocated per bio.
45 */
46struct dm_io {
47 struct mapped_device *md;
48 int error;
1da177e4 49 atomic_t io_count;
6ae2fa67 50 struct bio *bio;
3eaf840e 51 unsigned long start_time;
f88fb981 52 spinlock_t endio_lock;
1da177e4
LT
53};
54
55/*
8fbf26ad 56 * For bio-based dm.
1da177e4
LT
57 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
59 */
028867ac 60struct dm_target_io {
1da177e4
LT
61 struct dm_io *io;
62 struct dm_target *ti;
63 union map_info info;
64};
65
8fbf26ad
KU
66/*
67 * For request-based dm.
68 * One of these is allocated per request.
69 */
70struct dm_rq_target_io {
71 struct mapped_device *md;
72 struct dm_target *ti;
73 struct request *orig, clone;
74 int error;
75 union map_info info;
76};
77
78/*
79 * For request-based dm.
80 * One of these is allocated per bio.
81 */
82struct dm_rq_clone_bio_info {
83 struct bio *orig;
cec47e3d 84 struct dm_rq_target_io *tio;
8fbf26ad
KU
85};
86
1da177e4
LT
87union map_info *dm_get_mapinfo(struct bio *bio)
88{
17b2f66f 89 if (bio && bio->bi_private)
028867ac 90 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 91 return NULL;
1da177e4
LT
92}
93
cec47e3d
KU
94union map_info *dm_get_rq_mapinfo(struct request *rq)
95{
96 if (rq && rq->end_io_data)
97 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
98 return NULL;
99}
100EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
101
ba61fdd1
JM
102#define MINOR_ALLOCED ((void *)-1)
103
1da177e4
LT
104/*
105 * Bits for the md->flags field.
106 */
1eb787ec 107#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 108#define DMF_SUSPENDED 1
aa8d7c2f 109#define DMF_FROZEN 2
fba9f90e 110#define DMF_FREEING 3
5c6bd75d 111#define DMF_DELETING 4
2e93ccc1 112#define DMF_NOFLUSH_SUSPENDING 5
1eb787ec 113#define DMF_QUEUE_IO_TO_THREAD 6
1da177e4 114
304f3f6a
MB
115/*
116 * Work processed by per-device workqueue.
117 */
1da177e4 118struct mapped_device {
2ca3310e 119 struct rw_semaphore io_lock;
e61290a4 120 struct mutex suspend_lock;
1da177e4
LT
121 rwlock_t map_lock;
122 atomic_t holders;
5c6bd75d 123 atomic_t open_count;
1da177e4
LT
124
125 unsigned long flags;
126
165125e1 127 struct request_queue *queue;
a5664dad 128 unsigned type;
4a0b4ddf 129 /* Protect queue and type against concurrent access. */
a5664dad
MS
130 struct mutex type_lock;
131
1da177e4 132 struct gendisk *disk;
7e51f257 133 char name[16];
1da177e4
LT
134
135 void *interface_ptr;
136
137 /*
138 * A list of ios that arrived while we were suspended.
139 */
316d315b 140 atomic_t pending[2];
1da177e4 141 wait_queue_head_t wait;
53d5914f 142 struct work_struct work;
74859364 143 struct bio_list deferred;
022c2611 144 spinlock_t deferred_lock;
1da177e4 145
af7e466a
MP
146 /*
147 * An error from the barrier request currently being processed.
148 */
149 int barrier_error;
150
d0bcb878
KU
151 /*
152 * Protect barrier_error from concurrent endio processing
153 * in request-based dm.
154 */
155 spinlock_t barrier_error_lock;
156
304f3f6a
MB
157 /*
158 * Processing queue (flush/barriers)
159 */
160 struct workqueue_struct *wq;
d0bcb878
KU
161 struct work_struct barrier_work;
162
163 /* A pointer to the currently processing pre/post flush request */
164 struct request *flush_request;
304f3f6a 165
1da177e4
LT
166 /*
167 * The current mapping.
168 */
169 struct dm_table *map;
170
171 /*
172 * io objects are allocated from here.
173 */
174 mempool_t *io_pool;
175 mempool_t *tio_pool;
176
9faf400f
SB
177 struct bio_set *bs;
178
1da177e4
LT
179 /*
180 * Event handling.
181 */
182 atomic_t event_nr;
183 wait_queue_head_t eventq;
7a8c3d3b
MA
184 atomic_t uevent_seq;
185 struct list_head uevent_list;
186 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
187
188 /*
189 * freeze/thaw support require holding onto a super block
190 */
191 struct super_block *frozen_sb;
db8fef4f 192 struct block_device *bdev;
3ac51e74
DW
193
194 /* forced geometry settings */
195 struct hd_geometry geometry;
784aae73 196
cec47e3d
KU
197 /* For saving the address of __make_request for request based dm */
198 make_request_fn *saved_make_request_fn;
199
784aae73
MB
200 /* sysfs handle */
201 struct kobject kobj;
52b1fd5a
MP
202
203 /* zero-length barrier that will be cloned and submitted to targets */
204 struct bio barrier_bio;
1da177e4
LT
205};
206
e6ee8c0b
KU
207/*
208 * For mempools pre-allocation at the table loading time.
209 */
210struct dm_md_mempools {
211 mempool_t *io_pool;
212 mempool_t *tio_pool;
213 struct bio_set *bs;
214};
215
1da177e4 216#define MIN_IOS 256
e18b890b
CL
217static struct kmem_cache *_io_cache;
218static struct kmem_cache *_tio_cache;
8fbf26ad
KU
219static struct kmem_cache *_rq_tio_cache;
220static struct kmem_cache *_rq_bio_info_cache;
1da177e4 221
1da177e4
LT
222static int __init local_init(void)
223{
51157b4a 224 int r = -ENOMEM;
1da177e4 225
1da177e4 226 /* allocate a slab for the dm_ios */
028867ac 227 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 228 if (!_io_cache)
51157b4a 229 return r;
1da177e4
LT
230
231 /* allocate a slab for the target ios */
028867ac 232 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
233 if (!_tio_cache)
234 goto out_free_io_cache;
1da177e4 235
8fbf26ad
KU
236 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
237 if (!_rq_tio_cache)
238 goto out_free_tio_cache;
239
240 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
241 if (!_rq_bio_info_cache)
242 goto out_free_rq_tio_cache;
243
51e5b2bd 244 r = dm_uevent_init();
51157b4a 245 if (r)
8fbf26ad 246 goto out_free_rq_bio_info_cache;
51e5b2bd 247
1da177e4
LT
248 _major = major;
249 r = register_blkdev(_major, _name);
51157b4a
KU
250 if (r < 0)
251 goto out_uevent_exit;
1da177e4
LT
252
253 if (!_major)
254 _major = r;
255
256 return 0;
51157b4a
KU
257
258out_uevent_exit:
259 dm_uevent_exit();
8fbf26ad
KU
260out_free_rq_bio_info_cache:
261 kmem_cache_destroy(_rq_bio_info_cache);
262out_free_rq_tio_cache:
263 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
264out_free_tio_cache:
265 kmem_cache_destroy(_tio_cache);
266out_free_io_cache:
267 kmem_cache_destroy(_io_cache);
268
269 return r;
1da177e4
LT
270}
271
272static void local_exit(void)
273{
8fbf26ad
KU
274 kmem_cache_destroy(_rq_bio_info_cache);
275 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
276 kmem_cache_destroy(_tio_cache);
277 kmem_cache_destroy(_io_cache);
00d59405 278 unregister_blkdev(_major, _name);
51e5b2bd 279 dm_uevent_exit();
1da177e4
LT
280
281 _major = 0;
282
283 DMINFO("cleaned up");
284}
285
b9249e55 286static int (*_inits[])(void) __initdata = {
1da177e4
LT
287 local_init,
288 dm_target_init,
289 dm_linear_init,
290 dm_stripe_init,
952b3557 291 dm_io_init,
945fa4d2 292 dm_kcopyd_init,
1da177e4
LT
293 dm_interface_init,
294};
295
b9249e55 296static void (*_exits[])(void) = {
1da177e4
LT
297 local_exit,
298 dm_target_exit,
299 dm_linear_exit,
300 dm_stripe_exit,
952b3557 301 dm_io_exit,
945fa4d2 302 dm_kcopyd_exit,
1da177e4
LT
303 dm_interface_exit,
304};
305
306static int __init dm_init(void)
307{
308 const int count = ARRAY_SIZE(_inits);
309
310 int r, i;
311
312 for (i = 0; i < count; i++) {
313 r = _inits[i]();
314 if (r)
315 goto bad;
316 }
317
318 return 0;
319
320 bad:
321 while (i--)
322 _exits[i]();
323
324 return r;
325}
326
327static void __exit dm_exit(void)
328{
329 int i = ARRAY_SIZE(_exits);
330
331 while (i--)
332 _exits[i]();
333}
334
335/*
336 * Block device functions
337 */
432a212c
MA
338int dm_deleting_md(struct mapped_device *md)
339{
340 return test_bit(DMF_DELETING, &md->flags);
341}
342
fe5f9f2c 343static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
344{
345 struct mapped_device *md;
346
6e9624b8 347 lock_kernel();
fba9f90e
JM
348 spin_lock(&_minor_lock);
349
fe5f9f2c 350 md = bdev->bd_disk->private_data;
fba9f90e
JM
351 if (!md)
352 goto out;
353
5c6bd75d 354 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 355 dm_deleting_md(md)) {
fba9f90e
JM
356 md = NULL;
357 goto out;
358 }
359
1da177e4 360 dm_get(md);
5c6bd75d 361 atomic_inc(&md->open_count);
fba9f90e
JM
362
363out:
364 spin_unlock(&_minor_lock);
6e9624b8 365 unlock_kernel();
fba9f90e
JM
366
367 return md ? 0 : -ENXIO;
1da177e4
LT
368}
369
fe5f9f2c 370static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 371{
fe5f9f2c 372 struct mapped_device *md = disk->private_data;
6e9624b8
AB
373
374 lock_kernel();
5c6bd75d 375 atomic_dec(&md->open_count);
1da177e4 376 dm_put(md);
6e9624b8
AB
377 unlock_kernel();
378
1da177e4
LT
379 return 0;
380}
381
5c6bd75d
AK
382int dm_open_count(struct mapped_device *md)
383{
384 return atomic_read(&md->open_count);
385}
386
387/*
388 * Guarantees nothing is using the device before it's deleted.
389 */
390int dm_lock_for_deletion(struct mapped_device *md)
391{
392 int r = 0;
393
394 spin_lock(&_minor_lock);
395
396 if (dm_open_count(md))
397 r = -EBUSY;
398 else
399 set_bit(DMF_DELETING, &md->flags);
400
401 spin_unlock(&_minor_lock);
402
403 return r;
404}
405
3ac51e74
DW
406static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
407{
408 struct mapped_device *md = bdev->bd_disk->private_data;
409
410 return dm_get_geometry(md, geo);
411}
412
fe5f9f2c 413static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
414 unsigned int cmd, unsigned long arg)
415{
fe5f9f2c 416 struct mapped_device *md = bdev->bd_disk->private_data;
7c666411 417 struct dm_table *map = dm_get_live_table(md);
aa129a22
MB
418 struct dm_target *tgt;
419 int r = -ENOTTY;
420
aa129a22
MB
421 if (!map || !dm_table_get_size(map))
422 goto out;
423
424 /* We only support devices that have a single target */
425 if (dm_table_get_num_targets(map) != 1)
426 goto out;
427
428 tgt = dm_table_get_target(map, 0);
429
4f186f8b 430 if (dm_suspended_md(md)) {
aa129a22
MB
431 r = -EAGAIN;
432 goto out;
433 }
434
435 if (tgt->type->ioctl)
647b3d00 436 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
437
438out:
439 dm_table_put(map);
440
aa129a22
MB
441 return r;
442}
443
028867ac 444static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
445{
446 return mempool_alloc(md->io_pool, GFP_NOIO);
447}
448
028867ac 449static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
450{
451 mempool_free(io, md->io_pool);
452}
453
028867ac 454static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
455{
456 mempool_free(tio, md->tio_pool);
457}
458
08885643
KU
459static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
460 gfp_t gfp_mask)
cec47e3d 461{
08885643 462 return mempool_alloc(md->tio_pool, gfp_mask);
cec47e3d
KU
463}
464
465static void free_rq_tio(struct dm_rq_target_io *tio)
466{
467 mempool_free(tio, tio->md->tio_pool);
468}
469
470static struct dm_rq_clone_bio_info *alloc_bio_info(struct mapped_device *md)
471{
472 return mempool_alloc(md->io_pool, GFP_ATOMIC);
473}
474
475static void free_bio_info(struct dm_rq_clone_bio_info *info)
476{
477 mempool_free(info, info->tio->md->io_pool);
478}
479
90abb8c4
KU
480static int md_in_flight(struct mapped_device *md)
481{
482 return atomic_read(&md->pending[READ]) +
483 atomic_read(&md->pending[WRITE]);
484}
485
3eaf840e
JNN
486static void start_io_acct(struct dm_io *io)
487{
488 struct mapped_device *md = io->md;
c9959059 489 int cpu;
316d315b 490 int rw = bio_data_dir(io->bio);
3eaf840e
JNN
491
492 io->start_time = jiffies;
493
074a7aca
TH
494 cpu = part_stat_lock();
495 part_round_stats(cpu, &dm_disk(md)->part0);
496 part_stat_unlock();
316d315b 497 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]);
3eaf840e
JNN
498}
499
d221d2e7 500static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
501{
502 struct mapped_device *md = io->md;
503 struct bio *bio = io->bio;
504 unsigned long duration = jiffies - io->start_time;
c9959059 505 int pending, cpu;
3eaf840e
JNN
506 int rw = bio_data_dir(bio);
507
074a7aca
TH
508 cpu = part_stat_lock();
509 part_round_stats(cpu, &dm_disk(md)->part0);
510 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
511 part_stat_unlock();
3eaf840e 512
af7e466a
MP
513 /*
514 * After this is decremented the bio must not be touched if it is
515 * a barrier.
516 */
316d315b
NK
517 dm_disk(md)->part0.in_flight[rw] = pending =
518 atomic_dec_return(&md->pending[rw]);
519 pending += atomic_read(&md->pending[rw^0x1]);
3eaf840e 520
d221d2e7
MP
521 /* nudge anyone waiting on suspend queue */
522 if (!pending)
523 wake_up(&md->wait);
3eaf840e
JNN
524}
525
1da177e4
LT
526/*
527 * Add the bio to the list of deferred io.
528 */
92c63902 529static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 530{
2ca3310e 531 down_write(&md->io_lock);
1da177e4 532
022c2611 533 spin_lock_irq(&md->deferred_lock);
1da177e4 534 bio_list_add(&md->deferred, bio);
022c2611 535 spin_unlock_irq(&md->deferred_lock);
1da177e4 536
92c63902
MP
537 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
538 queue_work(md->wq, &md->work);
539
2ca3310e 540 up_write(&md->io_lock);
1da177e4
LT
541}
542
543/*
544 * Everyone (including functions in this file), should use this
545 * function to access the md->map field, and make sure they call
546 * dm_table_put() when finished.
547 */
7c666411 548struct dm_table *dm_get_live_table(struct mapped_device *md)
1da177e4
LT
549{
550 struct dm_table *t;
523d9297 551 unsigned long flags;
1da177e4 552
523d9297 553 read_lock_irqsave(&md->map_lock, flags);
1da177e4
LT
554 t = md->map;
555 if (t)
556 dm_table_get(t);
523d9297 557 read_unlock_irqrestore(&md->map_lock, flags);
1da177e4
LT
558
559 return t;
560}
561
3ac51e74
DW
562/*
563 * Get the geometry associated with a dm device
564 */
565int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
566{
567 *geo = md->geometry;
568
569 return 0;
570}
571
572/*
573 * Set the geometry of a device.
574 */
575int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
576{
577 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
578
579 if (geo->start > sz) {
580 DMWARN("Start sector is beyond the geometry limits.");
581 return -EINVAL;
582 }
583
584 md->geometry = *geo;
585
586 return 0;
587}
588
1da177e4
LT
589/*-----------------------------------------------------------------
590 * CRUD START:
591 * A more elegant soln is in the works that uses the queue
592 * merge fn, unfortunately there are a couple of changes to
593 * the block layer that I want to make for this. So in the
594 * interests of getting something for people to use I give
595 * you this clearly demarcated crap.
596 *---------------------------------------------------------------*/
597
2e93ccc1
KU
598static int __noflush_suspending(struct mapped_device *md)
599{
600 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
601}
602
1da177e4
LT
603/*
604 * Decrements the number of outstanding ios that a bio has been
605 * cloned into, completing the original io if necc.
606 */
858119e1 607static void dec_pending(struct dm_io *io, int error)
1da177e4 608{
2e93ccc1 609 unsigned long flags;
b35f8caa
MB
610 int io_error;
611 struct bio *bio;
612 struct mapped_device *md = io->md;
2e93ccc1
KU
613
614 /* Push-back supersedes any I/O errors */
f88fb981
KU
615 if (unlikely(error)) {
616 spin_lock_irqsave(&io->endio_lock, flags);
617 if (!(io->error > 0 && __noflush_suspending(md)))
618 io->error = error;
619 spin_unlock_irqrestore(&io->endio_lock, flags);
620 }
1da177e4
LT
621
622 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
623 if (io->error == DM_ENDIO_REQUEUE) {
624 /*
625 * Target requested pushing back the I/O.
2e93ccc1 626 */
022c2611 627 spin_lock_irqsave(&md->deferred_lock, flags);
2761e95f 628 if (__noflush_suspending(md)) {
7b6d91da 629 if (!(io->bio->bi_rw & REQ_HARDBARRIER))
2761e95f
MP
630 bio_list_add_head(&md->deferred,
631 io->bio);
632 } else
2e93ccc1
KU
633 /* noflush suspend was interrupted. */
634 io->error = -EIO;
022c2611 635 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
636 }
637
b35f8caa
MB
638 io_error = io->error;
639 bio = io->bio;
2e93ccc1 640
7b6d91da 641 if (bio->bi_rw & REQ_HARDBARRIER) {
af7e466a
MP
642 /*
643 * There can be just one barrier request so we use
644 * a per-device variable for error reporting.
645 * Note that you can't touch the bio after end_io_acct
708e9295
MP
646 *
647 * We ignore -EOPNOTSUPP for empty flush reported by
648 * underlying devices. We assume that if the device
649 * doesn't support empty barriers, it doesn't need
650 * cache flushing commands.
af7e466a 651 */
708e9295
MP
652 if (!md->barrier_error &&
653 !(bio_empty_barrier(bio) && io_error == -EOPNOTSUPP))
5aa2781d 654 md->barrier_error = io_error;
af7e466a 655 end_io_acct(io);
a97f925a 656 free_io(md, io);
af7e466a
MP
657 } else {
658 end_io_acct(io);
a97f925a 659 free_io(md, io);
b35f8caa 660
af7e466a
MP
661 if (io_error != DM_ENDIO_REQUEUE) {
662 trace_block_bio_complete(md->queue, bio);
2056a782 663
af7e466a
MP
664 bio_endio(bio, io_error);
665 }
b35f8caa 666 }
1da177e4
LT
667 }
668}
669
6712ecf8 670static void clone_endio(struct bio *bio, int error)
1da177e4
LT
671{
672 int r = 0;
028867ac 673 struct dm_target_io *tio = bio->bi_private;
b35f8caa 674 struct dm_io *io = tio->io;
9faf400f 675 struct mapped_device *md = tio->io->md;
1da177e4
LT
676 dm_endio_fn endio = tio->ti->type->end_io;
677
1da177e4
LT
678 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
679 error = -EIO;
680
681 if (endio) {
682 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
683 if (r < 0 || r == DM_ENDIO_REQUEUE)
684 /*
685 * error and requeue request are handled
686 * in dec_pending().
687 */
1da177e4 688 error = r;
45cbcd79
KU
689 else if (r == DM_ENDIO_INCOMPLETE)
690 /* The target will handle the io */
6712ecf8 691 return;
45cbcd79
KU
692 else if (r) {
693 DMWARN("unimplemented target endio return value: %d", r);
694 BUG();
695 }
1da177e4
LT
696 }
697
9faf400f
SB
698 /*
699 * Store md for cleanup instead of tio which is about to get freed.
700 */
701 bio->bi_private = md->bs;
702
9faf400f 703 free_tio(md, tio);
b35f8caa
MB
704 bio_put(bio);
705 dec_pending(io, error);
1da177e4
LT
706}
707
cec47e3d
KU
708/*
709 * Partial completion handling for request-based dm
710 */
711static void end_clone_bio(struct bio *clone, int error)
712{
713 struct dm_rq_clone_bio_info *info = clone->bi_private;
714 struct dm_rq_target_io *tio = info->tio;
715 struct bio *bio = info->orig;
716 unsigned int nr_bytes = info->orig->bi_size;
717
718 bio_put(clone);
719
720 if (tio->error)
721 /*
722 * An error has already been detected on the request.
723 * Once error occurred, just let clone->end_io() handle
724 * the remainder.
725 */
726 return;
727 else if (error) {
728 /*
729 * Don't notice the error to the upper layer yet.
730 * The error handling decision is made by the target driver,
731 * when the request is completed.
732 */
733 tio->error = error;
734 return;
735 }
736
737 /*
738 * I/O for the bio successfully completed.
739 * Notice the data completion to the upper layer.
740 */
741
742 /*
743 * bios are processed from the head of the list.
744 * So the completing bio should always be rq->bio.
745 * If it's not, something wrong is happening.
746 */
747 if (tio->orig->bio != bio)
748 DMERR("bio completion is going in the middle of the request");
749
750 /*
751 * Update the original request.
752 * Do not use blk_end_request() here, because it may complete
753 * the original request before the clone, and break the ordering.
754 */
755 blk_update_request(tio->orig, 0, nr_bytes);
756}
757
d0bcb878
KU
758static void store_barrier_error(struct mapped_device *md, int error)
759{
760 unsigned long flags;
761
762 spin_lock_irqsave(&md->barrier_error_lock, flags);
763 /*
764 * Basically, the first error is taken, but:
765 * -EOPNOTSUPP supersedes any I/O error.
766 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
767 */
768 if (!md->barrier_error || error == -EOPNOTSUPP ||
769 (md->barrier_error != -EOPNOTSUPP &&
770 error == DM_ENDIO_REQUEUE))
771 md->barrier_error = error;
772 spin_unlock_irqrestore(&md->barrier_error_lock, flags);
773}
774
cec47e3d
KU
775/*
776 * Don't touch any member of the md after calling this function because
777 * the md may be freed in dm_put() at the end of this function.
778 * Or do dm_get() before calling this function and dm_put() later.
779 */
b4324fee 780static void rq_completed(struct mapped_device *md, int rw, int run_queue)
cec47e3d 781{
b4324fee 782 atomic_dec(&md->pending[rw]);
cec47e3d
KU
783
784 /* nudge anyone waiting on suspend queue */
b4324fee 785 if (!md_in_flight(md))
cec47e3d
KU
786 wake_up(&md->wait);
787
788 if (run_queue)
b4324fee 789 blk_run_queue(md->queue);
cec47e3d
KU
790
791 /*
792 * dm_put() must be at the end of this function. See the comment above
793 */
794 dm_put(md);
795}
796
a77e28c7
KU
797static void free_rq_clone(struct request *clone)
798{
799 struct dm_rq_target_io *tio = clone->end_io_data;
800
801 blk_rq_unprep_clone(clone);
802 free_rq_tio(tio);
803}
804
980691e5
KU
805/*
806 * Complete the clone and the original request.
807 * Must be called without queue lock.
808 */
809static void dm_end_request(struct request *clone, int error)
810{
811 int rw = rq_data_dir(clone);
d0bcb878 812 int run_queue = 1;
33659ebb 813 bool is_barrier = clone->cmd_flags & REQ_HARDBARRIER;
980691e5
KU
814 struct dm_rq_target_io *tio = clone->end_io_data;
815 struct mapped_device *md = tio->md;
816 struct request *rq = tio->orig;
817
33659ebb 818 if (rq->cmd_type == REQ_TYPE_BLOCK_PC && !is_barrier) {
980691e5
KU
819 rq->errors = clone->errors;
820 rq->resid_len = clone->resid_len;
821
822 if (rq->sense)
823 /*
824 * We are using the sense buffer of the original
825 * request.
826 * So setting the length of the sense data is enough.
827 */
828 rq->sense_len = clone->sense_len;
829 }
830
831 free_rq_clone(clone);
832
d0bcb878
KU
833 if (unlikely(is_barrier)) {
834 if (unlikely(error))
835 store_barrier_error(md, error);
836 run_queue = 0;
837 } else
838 blk_end_request_all(rq, error);
980691e5 839
d0bcb878 840 rq_completed(md, rw, run_queue);
980691e5
KU
841}
842
cec47e3d
KU
843static void dm_unprep_request(struct request *rq)
844{
845 struct request *clone = rq->special;
cec47e3d
KU
846
847 rq->special = NULL;
848 rq->cmd_flags &= ~REQ_DONTPREP;
849
a77e28c7 850 free_rq_clone(clone);
cec47e3d
KU
851}
852
853/*
854 * Requeue the original request of a clone.
855 */
856void dm_requeue_unmapped_request(struct request *clone)
857{
b4324fee 858 int rw = rq_data_dir(clone);
cec47e3d
KU
859 struct dm_rq_target_io *tio = clone->end_io_data;
860 struct mapped_device *md = tio->md;
861 struct request *rq = tio->orig;
862 struct request_queue *q = rq->q;
863 unsigned long flags;
864
33659ebb 865 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
866 /*
867 * Barrier clones share an original request.
868 * Leave it to dm_end_request(), which handles this special
869 * case.
870 */
871 dm_end_request(clone, DM_ENDIO_REQUEUE);
872 return;
873 }
874
cec47e3d
KU
875 dm_unprep_request(rq);
876
877 spin_lock_irqsave(q->queue_lock, flags);
878 if (elv_queue_empty(q))
879 blk_plug_device(q);
880 blk_requeue_request(q, rq);
881 spin_unlock_irqrestore(q->queue_lock, flags);
882
b4324fee 883 rq_completed(md, rw, 0);
cec47e3d
KU
884}
885EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
886
887static void __stop_queue(struct request_queue *q)
888{
889 blk_stop_queue(q);
890}
891
892static void stop_queue(struct request_queue *q)
893{
894 unsigned long flags;
895
896 spin_lock_irqsave(q->queue_lock, flags);
897 __stop_queue(q);
898 spin_unlock_irqrestore(q->queue_lock, flags);
899}
900
901static void __start_queue(struct request_queue *q)
902{
903 if (blk_queue_stopped(q))
904 blk_start_queue(q);
905}
906
907static void start_queue(struct request_queue *q)
908{
909 unsigned long flags;
910
911 spin_lock_irqsave(q->queue_lock, flags);
912 __start_queue(q);
913 spin_unlock_irqrestore(q->queue_lock, flags);
914}
915
11a68244 916static void dm_done(struct request *clone, int error, bool mapped)
cec47e3d 917{
11a68244 918 int r = error;
cec47e3d
KU
919 struct dm_rq_target_io *tio = clone->end_io_data;
920 dm_request_endio_fn rq_end_io = tio->ti->type->rq_end_io;
cec47e3d 921
11a68244
KU
922 if (mapped && rq_end_io)
923 r = rq_end_io(tio->ti, clone, error, &tio->info);
cec47e3d 924
11a68244 925 if (r <= 0)
cec47e3d 926 /* The target wants to complete the I/O */
11a68244
KU
927 dm_end_request(clone, r);
928 else if (r == DM_ENDIO_INCOMPLETE)
cec47e3d
KU
929 /* The target will handle the I/O */
930 return;
11a68244 931 else if (r == DM_ENDIO_REQUEUE)
cec47e3d
KU
932 /* The target wants to requeue the I/O */
933 dm_requeue_unmapped_request(clone);
934 else {
11a68244 935 DMWARN("unimplemented target endio return value: %d", r);
cec47e3d
KU
936 BUG();
937 }
938}
939
11a68244
KU
940/*
941 * Request completion handler for request-based dm
942 */
943static void dm_softirq_done(struct request *rq)
944{
945 bool mapped = true;
946 struct request *clone = rq->completion_data;
947 struct dm_rq_target_io *tio = clone->end_io_data;
948
949 if (rq->cmd_flags & REQ_FAILED)
950 mapped = false;
951
952 dm_done(clone, tio->error, mapped);
953}
954
cec47e3d
KU
955/*
956 * Complete the clone and the original request with the error status
957 * through softirq context.
958 */
959static void dm_complete_request(struct request *clone, int error)
960{
961 struct dm_rq_target_io *tio = clone->end_io_data;
962 struct request *rq = tio->orig;
963
33659ebb 964 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
965 /*
966 * Barrier clones share an original request. So can't use
967 * softirq_done with the original.
968 * Pass the clone to dm_done() directly in this special case.
969 * It is safe (even if clone->q->queue_lock is held here)
970 * because there is no I/O dispatching during the completion
971 * of barrier clone.
972 */
973 dm_done(clone, error, true);
974 return;
975 }
976
cec47e3d
KU
977 tio->error = error;
978 rq->completion_data = clone;
979 blk_complete_request(rq);
980}
981
982/*
983 * Complete the not-mapped clone and the original request with the error status
984 * through softirq context.
985 * Target's rq_end_io() function isn't called.
986 * This may be used when the target's map_rq() function fails.
987 */
988void dm_kill_unmapped_request(struct request *clone, int error)
989{
990 struct dm_rq_target_io *tio = clone->end_io_data;
991 struct request *rq = tio->orig;
992
33659ebb 993 if (unlikely(clone->cmd_flags & REQ_HARDBARRIER)) {
d0bcb878
KU
994 /*
995 * Barrier clones share an original request.
996 * Leave it to dm_end_request(), which handles this special
997 * case.
998 */
999 BUG_ON(error > 0);
1000 dm_end_request(clone, error);
1001 return;
1002 }
1003
cec47e3d
KU
1004 rq->cmd_flags |= REQ_FAILED;
1005 dm_complete_request(clone, error);
1006}
1007EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1008
1009/*
1010 * Called with the queue lock held
1011 */
1012static void end_clone_request(struct request *clone, int error)
1013{
1014 /*
1015 * For just cleaning up the information of the queue in which
1016 * the clone was dispatched.
1017 * The clone is *NOT* freed actually here because it is alloced from
1018 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1019 */
1020 __blk_put_request(clone->q, clone);
1021
1022 /*
1023 * Actual request completion is done in a softirq context which doesn't
1024 * hold the queue lock. Otherwise, deadlock could occur because:
1025 * - another request may be submitted by the upper level driver
1026 * of the stacking during the completion
1027 * - the submission which requires queue lock may be done
1028 * against this queue
1029 */
1030 dm_complete_request(clone, error);
1031}
1032
1da177e4
LT
1033static sector_t max_io_len(struct mapped_device *md,
1034 sector_t sector, struct dm_target *ti)
1035{
1036 sector_t offset = sector - ti->begin;
1037 sector_t len = ti->len - offset;
1038
1039 /*
1040 * Does the target need to split even further ?
1041 */
1042 if (ti->split_io) {
1043 sector_t boundary;
1044 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
1045 - offset;
1046 if (len > boundary)
1047 len = boundary;
1048 }
1049
1050 return len;
1051}
1052
1053static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 1054 struct dm_target_io *tio)
1da177e4
LT
1055{
1056 int r;
2056a782 1057 sector_t sector;
9faf400f 1058 struct mapped_device *md;
1da177e4 1059
1da177e4
LT
1060 clone->bi_end_io = clone_endio;
1061 clone->bi_private = tio;
1062
1063 /*
1064 * Map the clone. If r == 0 we don't need to do
1065 * anything, the target has assumed ownership of
1066 * this io.
1067 */
1068 atomic_inc(&tio->io->io_count);
2056a782 1069 sector = clone->bi_sector;
1da177e4 1070 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 1071 if (r == DM_MAPIO_REMAPPED) {
1da177e4 1072 /* the bio has been remapped so dispatch it */
2056a782 1073
5f3ea37c 1074 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
22a7c31a 1075 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 1076
1da177e4 1077 generic_make_request(clone);
2e93ccc1
KU
1078 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1079 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
1080 md = tio->io->md;
1081 dec_pending(tio->io, r);
1082 /*
1083 * Store bio_set for cleanup.
1084 */
1085 clone->bi_private = md->bs;
1da177e4 1086 bio_put(clone);
9faf400f 1087 free_tio(md, tio);
45cbcd79
KU
1088 } else if (r) {
1089 DMWARN("unimplemented target map return value: %d", r);
1090 BUG();
1da177e4
LT
1091 }
1092}
1093
1094struct clone_info {
1095 struct mapped_device *md;
1096 struct dm_table *map;
1097 struct bio *bio;
1098 struct dm_io *io;
1099 sector_t sector;
1100 sector_t sector_count;
1101 unsigned short idx;
1102};
1103
3676347a
PO
1104static void dm_bio_destructor(struct bio *bio)
1105{
9faf400f
SB
1106 struct bio_set *bs = bio->bi_private;
1107
1108 bio_free(bio, bs);
3676347a
PO
1109}
1110
1da177e4
LT
1111/*
1112 * Creates a little bio that is just does part of a bvec.
1113 */
1114static struct bio *split_bvec(struct bio *bio, sector_t sector,
1115 unsigned short idx, unsigned int offset,
9faf400f 1116 unsigned int len, struct bio_set *bs)
1da177e4
LT
1117{
1118 struct bio *clone;
1119 struct bio_vec *bv = bio->bi_io_vec + idx;
1120
9faf400f 1121 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 1122 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
1123 *clone->bi_io_vec = *bv;
1124
1125 clone->bi_sector = sector;
1126 clone->bi_bdev = bio->bi_bdev;
7b6d91da 1127 clone->bi_rw = bio->bi_rw & ~REQ_HARDBARRIER;
1da177e4
LT
1128 clone->bi_vcnt = 1;
1129 clone->bi_size = to_bytes(len);
1130 clone->bi_io_vec->bv_offset = offset;
1131 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 1132 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4 1133
9c47008d 1134 if (bio_integrity(bio)) {
7878cba9 1135 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
9c47008d
MP
1136 bio_integrity_trim(clone,
1137 bio_sector_offset(bio, idx, offset), len);
1138 }
1139
1da177e4
LT
1140 return clone;
1141}
1142
1143/*
1144 * Creates a bio that consists of range of complete bvecs.
1145 */
1146static struct bio *clone_bio(struct bio *bio, sector_t sector,
1147 unsigned short idx, unsigned short bv_count,
9faf400f 1148 unsigned int len, struct bio_set *bs)
1da177e4
LT
1149{
1150 struct bio *clone;
1151
9faf400f
SB
1152 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
1153 __bio_clone(clone, bio);
7b6d91da 1154 clone->bi_rw &= ~REQ_HARDBARRIER;
9faf400f 1155 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
1156 clone->bi_sector = sector;
1157 clone->bi_idx = idx;
1158 clone->bi_vcnt = idx + bv_count;
1159 clone->bi_size = to_bytes(len);
1160 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
1161
9c47008d 1162 if (bio_integrity(bio)) {
7878cba9 1163 bio_integrity_clone(clone, bio, GFP_NOIO, bs);
9c47008d
MP
1164
1165 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
1166 bio_integrity_trim(clone,
1167 bio_sector_offset(bio, idx, 0), len);
1168 }
1169
1da177e4
LT
1170 return clone;
1171}
1172
9015df24
AK
1173static struct dm_target_io *alloc_tio(struct clone_info *ci,
1174 struct dm_target *ti)
f9ab94ce 1175{
9015df24 1176 struct dm_target_io *tio = mempool_alloc(ci->md->tio_pool, GFP_NOIO);
f9ab94ce
MP
1177
1178 tio->io = ci->io;
1179 tio->ti = ti;
f9ab94ce 1180 memset(&tio->info, 0, sizeof(tio->info));
9015df24
AK
1181
1182 return tio;
1183}
1184
1185static void __flush_target(struct clone_info *ci, struct dm_target *ti,
57cba5d3 1186 unsigned request_nr)
9015df24
AK
1187{
1188 struct dm_target_io *tio = alloc_tio(ci, ti);
1189 struct bio *clone;
1190
57cba5d3 1191 tio->info.target_request_nr = request_nr;
f9ab94ce
MP
1192
1193 clone = bio_alloc_bioset(GFP_NOIO, 0, ci->md->bs);
1194 __bio_clone(clone, ci->bio);
1195 clone->bi_destructor = dm_bio_destructor;
1196
1197 __map_bio(ti, clone, tio);
1198}
1199
1200static int __clone_and_map_empty_barrier(struct clone_info *ci)
1201{
57cba5d3 1202 unsigned target_nr = 0, request_nr;
f9ab94ce
MP
1203 struct dm_target *ti;
1204
1205 while ((ti = dm_table_get_target(ci->map, target_nr++)))
57cba5d3
MS
1206 for (request_nr = 0; request_nr < ti->num_flush_requests;
1207 request_nr++)
1208 __flush_target(ci, ti, request_nr);
f9ab94ce
MP
1209
1210 ci->sector_count = 0;
1211
1212 return 0;
1213}
1214
5ae89a87
MS
1215/*
1216 * Perform all io with a single clone.
1217 */
1218static void __clone_and_map_simple(struct clone_info *ci, struct dm_target *ti)
1219{
1220 struct bio *clone, *bio = ci->bio;
1221 struct dm_target_io *tio;
1222
1223 tio = alloc_tio(ci, ti);
1224 clone = clone_bio(bio, ci->sector, ci->idx,
1225 bio->bi_vcnt - ci->idx, ci->sector_count,
1226 ci->md->bs);
1227 __map_bio(ti, clone, tio);
1228 ci->sector_count = 0;
1229}
1230
1231static int __clone_and_map_discard(struct clone_info *ci)
1232{
1233 struct dm_target *ti;
1234 sector_t max;
1235
1236 ti = dm_table_find_target(ci->map, ci->sector);
1237 if (!dm_target_is_valid(ti))
1238 return -EIO;
1239
1240 /*
1241 * Even though the device advertised discard support,
1242 * reconfiguration might have changed that since the
1243 * check was performed.
1244 */
1245
1246 if (!ti->num_discard_requests)
1247 return -EOPNOTSUPP;
1248
1249 max = max_io_len(ci->md, ci->sector, ti);
1250
1251 if (ci->sector_count > max)
1252 /*
1253 * FIXME: Handle a discard that spans two or more targets.
1254 */
1255 return -EOPNOTSUPP;
1256
1257 __clone_and_map_simple(ci, ti);
1258
1259 return 0;
1260}
1261
512875bd 1262static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
1263{
1264 struct bio *clone, *bio = ci->bio;
512875bd
JN
1265 struct dm_target *ti;
1266 sector_t len = 0, max;
028867ac 1267 struct dm_target_io *tio;
1da177e4 1268
f9ab94ce
MP
1269 if (unlikely(bio_empty_barrier(bio)))
1270 return __clone_and_map_empty_barrier(ci);
1271
5ae89a87
MS
1272 if (unlikely(bio->bi_rw & REQ_DISCARD))
1273 return __clone_and_map_discard(ci);
1274
512875bd
JN
1275 ti = dm_table_find_target(ci->map, ci->sector);
1276 if (!dm_target_is_valid(ti))
1277 return -EIO;
1278
1279 max = max_io_len(ci->md, ci->sector, ti);
1280
1da177e4
LT
1281 if (ci->sector_count <= max) {
1282 /*
1283 * Optimise for the simple case where we can do all of
1284 * the remaining io with a single clone.
1285 */
5ae89a87 1286 __clone_and_map_simple(ci, ti);
1da177e4
LT
1287
1288 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1289 /*
1290 * There are some bvecs that don't span targets.
1291 * Do as many of these as possible.
1292 */
1293 int i;
1294 sector_t remaining = max;
1295 sector_t bv_len;
1296
1297 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
1298 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
1299
1300 if (bv_len > remaining)
1301 break;
1302
1303 remaining -= bv_len;
1304 len += bv_len;
1305 }
1306
5ae89a87 1307 tio = alloc_tio(ci, ti);
9faf400f
SB
1308 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
1309 ci->md->bs);
1da177e4
LT
1310 __map_bio(ti, clone, tio);
1311
1312 ci->sector += len;
1313 ci->sector_count -= len;
1314 ci->idx = i;
1315
1316 } else {
1317 /*
d2044a94 1318 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
1319 */
1320 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
1321 sector_t remaining = to_sector(bv->bv_len);
1322 unsigned int offset = 0;
1da177e4 1323
d2044a94
AK
1324 do {
1325 if (offset) {
1326 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
1327 if (!dm_target_is_valid(ti))
1328 return -EIO;
1329
d2044a94 1330 max = max_io_len(ci->md, ci->sector, ti);
d2044a94
AK
1331 }
1332
1333 len = min(remaining, max);
1334
5ae89a87 1335 tio = alloc_tio(ci, ti);
d2044a94 1336 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
1337 bv->bv_offset + offset, len,
1338 ci->md->bs);
d2044a94
AK
1339
1340 __map_bio(ti, clone, tio);
1341
1342 ci->sector += len;
1343 ci->sector_count -= len;
1344 offset += to_bytes(len);
1345 } while (remaining -= len);
1da177e4 1346
1da177e4
LT
1347 ci->idx++;
1348 }
512875bd
JN
1349
1350 return 0;
1da177e4
LT
1351}
1352
1353/*
8a53c28d 1354 * Split the bio into several clones and submit it to targets.
1da177e4 1355 */
f0b9a450 1356static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
1357{
1358 struct clone_info ci;
512875bd 1359 int error = 0;
1da177e4 1360
7c666411 1361 ci.map = dm_get_live_table(md);
f0b9a450 1362 if (unlikely(!ci.map)) {
7b6d91da 1363 if (!(bio->bi_rw & REQ_HARDBARRIER))
af7e466a
MP
1364 bio_io_error(bio);
1365 else
5aa2781d
MP
1366 if (!md->barrier_error)
1367 md->barrier_error = -EIO;
f0b9a450
MP
1368 return;
1369 }
692d0eb9 1370
1da177e4
LT
1371 ci.md = md;
1372 ci.bio = bio;
1373 ci.io = alloc_io(md);
1374 ci.io->error = 0;
1375 atomic_set(&ci.io->io_count, 1);
1376 ci.io->bio = bio;
1377 ci.io->md = md;
f88fb981 1378 spin_lock_init(&ci.io->endio_lock);
1da177e4
LT
1379 ci.sector = bio->bi_sector;
1380 ci.sector_count = bio_sectors(bio);
f9ab94ce
MP
1381 if (unlikely(bio_empty_barrier(bio)))
1382 ci.sector_count = 1;
1da177e4
LT
1383 ci.idx = bio->bi_idx;
1384
3eaf840e 1385 start_io_acct(ci.io);
512875bd
JN
1386 while (ci.sector_count && !error)
1387 error = __clone_and_map(&ci);
1da177e4
LT
1388
1389 /* drop the extra reference count */
512875bd 1390 dec_pending(ci.io, error);
1da177e4
LT
1391 dm_table_put(ci.map);
1392}
1393/*-----------------------------------------------------------------
1394 * CRUD END
1395 *---------------------------------------------------------------*/
1396
f6fccb12
MB
1397static int dm_merge_bvec(struct request_queue *q,
1398 struct bvec_merge_data *bvm,
1399 struct bio_vec *biovec)
1400{
1401 struct mapped_device *md = q->queuedata;
7c666411 1402 struct dm_table *map = dm_get_live_table(md);
f6fccb12
MB
1403 struct dm_target *ti;
1404 sector_t max_sectors;
5037108a 1405 int max_size = 0;
f6fccb12
MB
1406
1407 if (unlikely(!map))
5037108a 1408 goto out;
f6fccb12
MB
1409
1410 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
1411 if (!dm_target_is_valid(ti))
1412 goto out_table;
f6fccb12
MB
1413
1414 /*
1415 * Find maximum amount of I/O that won't need splitting
1416 */
1417 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
1418 (sector_t) BIO_MAX_SECTORS);
1419 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1420 if (max_size < 0)
1421 max_size = 0;
1422
1423 /*
1424 * merge_bvec_fn() returns number of bytes
1425 * it can accept at this offset
1426 * max is precomputed maximal io size
1427 */
1428 if (max_size && ti->type->merge)
1429 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
1430 /*
1431 * If the target doesn't support merge method and some of the devices
1432 * provided their merge_bvec method (we know this by looking at
1433 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1434 * entries. So always set max_size to 0, and the code below allows
1435 * just one page.
1436 */
1437 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
1438
1439 max_size = 0;
f6fccb12 1440
b01cd5ac 1441out_table:
5037108a
MP
1442 dm_table_put(map);
1443
1444out:
f6fccb12
MB
1445 /*
1446 * Always allow an entire first page
1447 */
1448 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1449 max_size = biovec->bv_len;
1450
f6fccb12
MB
1451 return max_size;
1452}
1453
1da177e4
LT
1454/*
1455 * The request function that just remaps the bio built up by
1456 * dm_merge_bvec.
1457 */
cec47e3d 1458static int _dm_request(struct request_queue *q, struct bio *bio)
1da177e4 1459{
12f03a49 1460 int rw = bio_data_dir(bio);
1da177e4 1461 struct mapped_device *md = q->queuedata;
c9959059 1462 int cpu;
1da177e4 1463
2ca3310e 1464 down_read(&md->io_lock);
1da177e4 1465
074a7aca
TH
1466 cpu = part_stat_lock();
1467 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1468 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1469 part_stat_unlock();
12f03a49 1470
1da177e4 1471 /*
1eb787ec
AK
1472 * If we're suspended or the thread is processing barriers
1473 * we have to queue this io for later.
1da177e4 1474 */
af7e466a 1475 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
7b6d91da 1476 unlikely(bio->bi_rw & REQ_HARDBARRIER)) {
2ca3310e 1477 up_read(&md->io_lock);
1da177e4 1478
54d9a1b4
AK
1479 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
1480 bio_rw(bio) == READA) {
1481 bio_io_error(bio);
1482 return 0;
1483 }
1da177e4 1484
92c63902 1485 queue_io(md, bio);
1da177e4 1486
92c63902 1487 return 0;
1da177e4
LT
1488 }
1489
f0b9a450 1490 __split_and_process_bio(md, bio);
2ca3310e 1491 up_read(&md->io_lock);
f0b9a450 1492 return 0;
1da177e4
LT
1493}
1494
cec47e3d
KU
1495static int dm_make_request(struct request_queue *q, struct bio *bio)
1496{
1497 struct mapped_device *md = q->queuedata;
1498
cec47e3d
KU
1499 return md->saved_make_request_fn(q, bio); /* call __make_request() */
1500}
1501
1502static int dm_request_based(struct mapped_device *md)
1503{
1504 return blk_queue_stackable(md->queue);
1505}
1506
1507static int dm_request(struct request_queue *q, struct bio *bio)
1508{
1509 struct mapped_device *md = q->queuedata;
1510
1511 if (dm_request_based(md))
1512 return dm_make_request(q, bio);
1513
1514 return _dm_request(q, bio);
1515}
1516
d0bcb878
KU
1517static bool dm_rq_is_flush_request(struct request *rq)
1518{
144d6ed5 1519 if (rq->cmd_flags & REQ_FLUSH)
d0bcb878
KU
1520 return true;
1521 else
1522 return false;
1523}
1524
cec47e3d
KU
1525void dm_dispatch_request(struct request *rq)
1526{
1527 int r;
1528
1529 if (blk_queue_io_stat(rq->q))
1530 rq->cmd_flags |= REQ_IO_STAT;
1531
1532 rq->start_time = jiffies;
1533 r = blk_insert_cloned_request(rq->q, rq);
1534 if (r)
1535 dm_complete_request(rq, r);
1536}
1537EXPORT_SYMBOL_GPL(dm_dispatch_request);
1538
1539static void dm_rq_bio_destructor(struct bio *bio)
1540{
1541 struct dm_rq_clone_bio_info *info = bio->bi_private;
1542 struct mapped_device *md = info->tio->md;
1543
1544 free_bio_info(info);
1545 bio_free(bio, md->bs);
1546}
1547
1548static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1549 void *data)
1550{
1551 struct dm_rq_target_io *tio = data;
1552 struct mapped_device *md = tio->md;
1553 struct dm_rq_clone_bio_info *info = alloc_bio_info(md);
1554
1555 if (!info)
1556 return -ENOMEM;
1557
1558 info->orig = bio_orig;
1559 info->tio = tio;
1560 bio->bi_end_io = end_clone_bio;
1561 bio->bi_private = info;
1562 bio->bi_destructor = dm_rq_bio_destructor;
1563
1564 return 0;
1565}
1566
1567static int setup_clone(struct request *clone, struct request *rq,
1568 struct dm_rq_target_io *tio)
1569{
d0bcb878 1570 int r;
cec47e3d 1571
d0bcb878
KU
1572 if (dm_rq_is_flush_request(rq)) {
1573 blk_rq_init(NULL, clone);
1574 clone->cmd_type = REQ_TYPE_FS;
1575 clone->cmd_flags |= (REQ_HARDBARRIER | WRITE);
1576 } else {
1577 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1578 dm_rq_bio_constructor, tio);
1579 if (r)
1580 return r;
1581
1582 clone->cmd = rq->cmd;
1583 clone->cmd_len = rq->cmd_len;
1584 clone->sense = rq->sense;
1585 clone->buffer = rq->buffer;
1586 }
cec47e3d 1587
cec47e3d
KU
1588 clone->end_io = end_clone_request;
1589 clone->end_io_data = tio;
1590
1591 return 0;
1592}
1593
6facdaff
KU
1594static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1595 gfp_t gfp_mask)
1596{
1597 struct request *clone;
1598 struct dm_rq_target_io *tio;
1599
1600 tio = alloc_rq_tio(md, gfp_mask);
1601 if (!tio)
1602 return NULL;
1603
1604 tio->md = md;
1605 tio->ti = NULL;
1606 tio->orig = rq;
1607 tio->error = 0;
1608 memset(&tio->info, 0, sizeof(tio->info));
1609
1610 clone = &tio->clone;
1611 if (setup_clone(clone, rq, tio)) {
1612 /* -ENOMEM */
1613 free_rq_tio(tio);
1614 return NULL;
1615 }
1616
1617 return clone;
1618}
1619
cec47e3d
KU
1620/*
1621 * Called with the queue lock held.
1622 */
1623static int dm_prep_fn(struct request_queue *q, struct request *rq)
1624{
1625 struct mapped_device *md = q->queuedata;
cec47e3d
KU
1626 struct request *clone;
1627
d0bcb878
KU
1628 if (unlikely(dm_rq_is_flush_request(rq)))
1629 return BLKPREP_OK;
1630
cec47e3d
KU
1631 if (unlikely(rq->special)) {
1632 DMWARN("Already has something in rq->special.");
1633 return BLKPREP_KILL;
1634 }
1635
6facdaff
KU
1636 clone = clone_rq(rq, md, GFP_ATOMIC);
1637 if (!clone)
cec47e3d 1638 return BLKPREP_DEFER;
cec47e3d
KU
1639
1640 rq->special = clone;
1641 rq->cmd_flags |= REQ_DONTPREP;
1642
1643 return BLKPREP_OK;
1644}
1645
9eef87da
KU
1646/*
1647 * Returns:
1648 * 0 : the request has been processed (not requeued)
1649 * !0 : the request has been requeued
1650 */
1651static int map_request(struct dm_target *ti, struct request *clone,
1652 struct mapped_device *md)
cec47e3d 1653{
9eef87da 1654 int r, requeued = 0;
cec47e3d
KU
1655 struct dm_rq_target_io *tio = clone->end_io_data;
1656
1657 /*
1658 * Hold the md reference here for the in-flight I/O.
1659 * We can't rely on the reference count by device opener,
1660 * because the device may be closed during the request completion
1661 * when all bios are completed.
1662 * See the comment in rq_completed() too.
1663 */
1664 dm_get(md);
1665
1666 tio->ti = ti;
1667 r = ti->type->map_rq(ti, clone, &tio->info);
1668 switch (r) {
1669 case DM_MAPIO_SUBMITTED:
1670 /* The target has taken the I/O to submit by itself later */
1671 break;
1672 case DM_MAPIO_REMAPPED:
1673 /* The target has remapped the I/O so dispatch it */
6db4ccd6
JN
1674 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1675 blk_rq_pos(tio->orig));
cec47e3d
KU
1676 dm_dispatch_request(clone);
1677 break;
1678 case DM_MAPIO_REQUEUE:
1679 /* The target wants to requeue the I/O */
1680 dm_requeue_unmapped_request(clone);
9eef87da 1681 requeued = 1;
cec47e3d
KU
1682 break;
1683 default:
1684 if (r > 0) {
1685 DMWARN("unimplemented target map return value: %d", r);
1686 BUG();
1687 }
1688
1689 /* The target wants to complete the I/O */
1690 dm_kill_unmapped_request(clone, r);
1691 break;
1692 }
9eef87da
KU
1693
1694 return requeued;
cec47e3d
KU
1695}
1696
1697/*
1698 * q->request_fn for request-based dm.
1699 * Called with the queue lock held.
1700 */
1701static void dm_request_fn(struct request_queue *q)
1702{
1703 struct mapped_device *md = q->queuedata;
7c666411 1704 struct dm_table *map = dm_get_live_table(md);
cec47e3d 1705 struct dm_target *ti;
b4324fee 1706 struct request *rq, *clone;
cec47e3d
KU
1707
1708 /*
b4324fee
KU
1709 * For suspend, check blk_queue_stopped() and increment
1710 * ->pending within a single queue_lock not to increment the
1711 * number of in-flight I/Os after the queue is stopped in
1712 * dm_suspend().
cec47e3d
KU
1713 */
1714 while (!blk_queue_plugged(q) && !blk_queue_stopped(q)) {
1715 rq = blk_peek_request(q);
1716 if (!rq)
1717 goto plug_and_out;
1718
d0bcb878
KU
1719 if (unlikely(dm_rq_is_flush_request(rq))) {
1720 BUG_ON(md->flush_request);
1721 md->flush_request = rq;
1722 blk_start_request(rq);
1723 queue_work(md->wq, &md->barrier_work);
1724 goto out;
1725 }
1726
cec47e3d
KU
1727 ti = dm_table_find_target(map, blk_rq_pos(rq));
1728 if (ti->type->busy && ti->type->busy(ti))
1729 goto plug_and_out;
1730
1731 blk_start_request(rq);
b4324fee
KU
1732 clone = rq->special;
1733 atomic_inc(&md->pending[rq_data_dir(clone)]);
1734
cec47e3d 1735 spin_unlock(q->queue_lock);
9eef87da
KU
1736 if (map_request(ti, clone, md))
1737 goto requeued;
1738
cec47e3d
KU
1739 spin_lock_irq(q->queue_lock);
1740 }
1741
1742 goto out;
1743
9eef87da
KU
1744requeued:
1745 spin_lock_irq(q->queue_lock);
1746
cec47e3d
KU
1747plug_and_out:
1748 if (!elv_queue_empty(q))
1749 /* Some requests still remain, retry later */
1750 blk_plug_device(q);
1751
1752out:
1753 dm_table_put(map);
1754
1755 return;
1756}
1757
1758int dm_underlying_device_busy(struct request_queue *q)
1759{
1760 return blk_lld_busy(q);
1761}
1762EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1763
1764static int dm_lld_busy(struct request_queue *q)
1765{
1766 int r;
1767 struct mapped_device *md = q->queuedata;
7c666411 1768 struct dm_table *map = dm_get_live_table(md);
cec47e3d
KU
1769
1770 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1771 r = 1;
1772 else
1773 r = dm_table_any_busy_target(map);
1774
1775 dm_table_put(map);
1776
1777 return r;
1778}
1779
165125e1 1780static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
1781{
1782 struct mapped_device *md = q->queuedata;
7c666411 1783 struct dm_table *map = dm_get_live_table(md);
1da177e4
LT
1784
1785 if (map) {
cec47e3d
KU
1786 if (dm_request_based(md))
1787 generic_unplug_device(q);
1788
1da177e4
LT
1789 dm_table_unplug_all(map);
1790 dm_table_put(map);
1791 }
1792}
1793
1794static int dm_any_congested(void *congested_data, int bdi_bits)
1795{
8a57dfc6
CS
1796 int r = bdi_bits;
1797 struct mapped_device *md = congested_data;
1798 struct dm_table *map;
1da177e4 1799
1eb787ec 1800 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
7c666411 1801 map = dm_get_live_table(md);
8a57dfc6 1802 if (map) {
cec47e3d
KU
1803 /*
1804 * Request-based dm cares about only own queue for
1805 * the query about congestion status of request_queue
1806 */
1807 if (dm_request_based(md))
1808 r = md->queue->backing_dev_info.state &
1809 bdi_bits;
1810 else
1811 r = dm_table_any_congested(map, bdi_bits);
1812
8a57dfc6
CS
1813 dm_table_put(map);
1814 }
1815 }
1816
1da177e4
LT
1817 return r;
1818}
1819
1820/*-----------------------------------------------------------------
1821 * An IDR is used to keep track of allocated minor numbers.
1822 *---------------------------------------------------------------*/
1da177e4
LT
1823static DEFINE_IDR(_minor_idr);
1824
2b06cfff 1825static void free_minor(int minor)
1da177e4 1826{
f32c10b0 1827 spin_lock(&_minor_lock);
1da177e4 1828 idr_remove(&_minor_idr, minor);
f32c10b0 1829 spin_unlock(&_minor_lock);
1da177e4
LT
1830}
1831
1832/*
1833 * See if the device with a specific minor # is free.
1834 */
cf13ab8e 1835static int specific_minor(int minor)
1da177e4
LT
1836{
1837 int r, m;
1838
1839 if (minor >= (1 << MINORBITS))
1840 return -EINVAL;
1841
62f75c2f
JM
1842 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1843 if (!r)
1844 return -ENOMEM;
1845
f32c10b0 1846 spin_lock(&_minor_lock);
1da177e4
LT
1847
1848 if (idr_find(&_minor_idr, minor)) {
1849 r = -EBUSY;
1850 goto out;
1851 }
1852
ba61fdd1 1853 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1854 if (r)
1da177e4 1855 goto out;
1da177e4
LT
1856
1857 if (m != minor) {
1858 idr_remove(&_minor_idr, m);
1859 r = -EBUSY;
1860 goto out;
1861 }
1862
1863out:
f32c10b0 1864 spin_unlock(&_minor_lock);
1da177e4
LT
1865 return r;
1866}
1867
cf13ab8e 1868static int next_free_minor(int *minor)
1da177e4 1869{
2b06cfff 1870 int r, m;
1da177e4 1871
1da177e4 1872 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1873 if (!r)
1874 return -ENOMEM;
1875
f32c10b0 1876 spin_lock(&_minor_lock);
1da177e4 1877
ba61fdd1 1878 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1879 if (r)
1da177e4 1880 goto out;
1da177e4
LT
1881
1882 if (m >= (1 << MINORBITS)) {
1883 idr_remove(&_minor_idr, m);
1884 r = -ENOSPC;
1885 goto out;
1886 }
1887
1888 *minor = m;
1889
1890out:
f32c10b0 1891 spin_unlock(&_minor_lock);
1da177e4
LT
1892 return r;
1893}
1894
83d5cde4 1895static const struct block_device_operations dm_blk_dops;
1da177e4 1896
53d5914f 1897static void dm_wq_work(struct work_struct *work);
d0bcb878 1898static void dm_rq_barrier_work(struct work_struct *work);
53d5914f 1899
4a0b4ddf
MS
1900static void dm_init_md_queue(struct mapped_device *md)
1901{
1902 /*
1903 * Request-based dm devices cannot be stacked on top of bio-based dm
1904 * devices. The type of this dm device has not been decided yet.
1905 * The type is decided at the first table loading time.
1906 * To prevent problematic device stacking, clear the queue flag
1907 * for request stacking support until then.
1908 *
1909 * This queue is new, so no concurrency on the queue_flags.
1910 */
1911 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1912
1913 md->queue->queuedata = md;
1914 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1915 md->queue->backing_dev_info.congested_data = md;
1916 blk_queue_make_request(md->queue, dm_request);
1917 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1918 md->queue->unplug_fn = dm_unplug_all;
1919 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1920}
1921
1da177e4
LT
1922/*
1923 * Allocate and initialise a blank device with a given minor.
1924 */
2b06cfff 1925static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1926{
1927 int r;
cf13ab8e 1928 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1929 void *old_md;
1da177e4
LT
1930
1931 if (!md) {
1932 DMWARN("unable to allocate device, out of memory.");
1933 return NULL;
1934 }
1935
10da4f79 1936 if (!try_module_get(THIS_MODULE))
6ed7ade8 1937 goto bad_module_get;
10da4f79 1938
1da177e4 1939 /* get a minor number for the dev */
2b06cfff 1940 if (minor == DM_ANY_MINOR)
cf13ab8e 1941 r = next_free_minor(&minor);
2b06cfff 1942 else
cf13ab8e 1943 r = specific_minor(minor);
1da177e4 1944 if (r < 0)
6ed7ade8 1945 goto bad_minor;
1da177e4 1946
a5664dad 1947 md->type = DM_TYPE_NONE;
2ca3310e 1948 init_rwsem(&md->io_lock);
e61290a4 1949 mutex_init(&md->suspend_lock);
a5664dad 1950 mutex_init(&md->type_lock);
022c2611 1951 spin_lock_init(&md->deferred_lock);
d0bcb878 1952 spin_lock_init(&md->barrier_error_lock);
1da177e4
LT
1953 rwlock_init(&md->map_lock);
1954 atomic_set(&md->holders, 1);
5c6bd75d 1955 atomic_set(&md->open_count, 0);
1da177e4 1956 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1957 atomic_set(&md->uevent_seq, 0);
1958 INIT_LIST_HEAD(&md->uevent_list);
1959 spin_lock_init(&md->uevent_lock);
1da177e4 1960
4a0b4ddf 1961 md->queue = blk_alloc_queue(GFP_KERNEL);
1da177e4 1962 if (!md->queue)
6ed7ade8 1963 goto bad_queue;
1da177e4 1964
4a0b4ddf 1965 dm_init_md_queue(md);
9faf400f 1966
1da177e4
LT
1967 md->disk = alloc_disk(1);
1968 if (!md->disk)
6ed7ade8 1969 goto bad_disk;
1da177e4 1970
316d315b
NK
1971 atomic_set(&md->pending[0], 0);
1972 atomic_set(&md->pending[1], 0);
f0b04115 1973 init_waitqueue_head(&md->wait);
53d5914f 1974 INIT_WORK(&md->work, dm_wq_work);
d0bcb878 1975 INIT_WORK(&md->barrier_work, dm_rq_barrier_work);
f0b04115
JM
1976 init_waitqueue_head(&md->eventq);
1977
1da177e4
LT
1978 md->disk->major = _major;
1979 md->disk->first_minor = minor;
1980 md->disk->fops = &dm_blk_dops;
1981 md->disk->queue = md->queue;
1982 md->disk->private_data = md;
1983 sprintf(md->disk->disk_name, "dm-%d", minor);
1984 add_disk(md->disk);
7e51f257 1985 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1986
304f3f6a
MB
1987 md->wq = create_singlethread_workqueue("kdmflush");
1988 if (!md->wq)
1989 goto bad_thread;
1990
32a926da
MP
1991 md->bdev = bdget_disk(md->disk, 0);
1992 if (!md->bdev)
1993 goto bad_bdev;
1994
ba61fdd1 1995 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1996 spin_lock(&_minor_lock);
ba61fdd1 1997 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1998 spin_unlock(&_minor_lock);
ba61fdd1
JM
1999
2000 BUG_ON(old_md != MINOR_ALLOCED);
2001
1da177e4
LT
2002 return md;
2003
32a926da
MP
2004bad_bdev:
2005 destroy_workqueue(md->wq);
304f3f6a 2006bad_thread:
03022c54 2007 del_gendisk(md->disk);
304f3f6a 2008 put_disk(md->disk);
6ed7ade8 2009bad_disk:
1312f40e 2010 blk_cleanup_queue(md->queue);
6ed7ade8 2011bad_queue:
1da177e4 2012 free_minor(minor);
6ed7ade8 2013bad_minor:
10da4f79 2014 module_put(THIS_MODULE);
6ed7ade8 2015bad_module_get:
1da177e4
LT
2016 kfree(md);
2017 return NULL;
2018}
2019
ae9da83f
JN
2020static void unlock_fs(struct mapped_device *md);
2021
1da177e4
LT
2022static void free_dev(struct mapped_device *md)
2023{
f331c029 2024 int minor = MINOR(disk_devt(md->disk));
63d94e48 2025
32a926da
MP
2026 unlock_fs(md);
2027 bdput(md->bdev);
304f3f6a 2028 destroy_workqueue(md->wq);
e6ee8c0b
KU
2029 if (md->tio_pool)
2030 mempool_destroy(md->tio_pool);
2031 if (md->io_pool)
2032 mempool_destroy(md->io_pool);
2033 if (md->bs)
2034 bioset_free(md->bs);
9c47008d 2035 blk_integrity_unregister(md->disk);
1da177e4 2036 del_gendisk(md->disk);
63d94e48 2037 free_minor(minor);
fba9f90e
JM
2038
2039 spin_lock(&_minor_lock);
2040 md->disk->private_data = NULL;
2041 spin_unlock(&_minor_lock);
2042
1da177e4 2043 put_disk(md->disk);
1312f40e 2044 blk_cleanup_queue(md->queue);
10da4f79 2045 module_put(THIS_MODULE);
1da177e4
LT
2046 kfree(md);
2047}
2048
e6ee8c0b
KU
2049static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2050{
2051 struct dm_md_mempools *p;
2052
2053 if (md->io_pool && md->tio_pool && md->bs)
2054 /* the md already has necessary mempools */
2055 goto out;
2056
2057 p = dm_table_get_md_mempools(t);
2058 BUG_ON(!p || md->io_pool || md->tio_pool || md->bs);
2059
2060 md->io_pool = p->io_pool;
2061 p->io_pool = NULL;
2062 md->tio_pool = p->tio_pool;
2063 p->tio_pool = NULL;
2064 md->bs = p->bs;
2065 p->bs = NULL;
2066
2067out:
2068 /* mempool bind completed, now no need any mempools in the table */
2069 dm_table_free_md_mempools(t);
2070}
2071
1da177e4
LT
2072/*
2073 * Bind a table to the device.
2074 */
2075static void event_callback(void *context)
2076{
7a8c3d3b
MA
2077 unsigned long flags;
2078 LIST_HEAD(uevents);
1da177e4
LT
2079 struct mapped_device *md = (struct mapped_device *) context;
2080
7a8c3d3b
MA
2081 spin_lock_irqsave(&md->uevent_lock, flags);
2082 list_splice_init(&md->uevent_list, &uevents);
2083 spin_unlock_irqrestore(&md->uevent_lock, flags);
2084
ed9e1982 2085 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2086
1da177e4
LT
2087 atomic_inc(&md->event_nr);
2088 wake_up(&md->eventq);
2089}
2090
4e90188b 2091static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 2092{
4e90188b 2093 set_capacity(md->disk, size);
1da177e4 2094
db8fef4f
MP
2095 mutex_lock(&md->bdev->bd_inode->i_mutex);
2096 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
2097 mutex_unlock(&md->bdev->bd_inode->i_mutex);
1da177e4
LT
2098}
2099
042d2a9b
AK
2100/*
2101 * Returns old map, which caller must destroy.
2102 */
2103static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2104 struct queue_limits *limits)
1da177e4 2105{
042d2a9b 2106 struct dm_table *old_map;
165125e1 2107 struct request_queue *q = md->queue;
1da177e4 2108 sector_t size;
523d9297 2109 unsigned long flags;
1da177e4
LT
2110
2111 size = dm_table_get_size(t);
3ac51e74
DW
2112
2113 /*
2114 * Wipe any geometry if the size of the table changed.
2115 */
2116 if (size != get_capacity(md->disk))
2117 memset(&md->geometry, 0, sizeof(md->geometry));
2118
32a926da 2119 __set_size(md, size);
d5816876 2120
2ca3310e
AK
2121 dm_table_event_callback(t, event_callback, md);
2122
e6ee8c0b
KU
2123 /*
2124 * The queue hasn't been stopped yet, if the old table type wasn't
2125 * for request-based during suspension. So stop it to prevent
2126 * I/O mapping before resume.
2127 * This must be done before setting the queue restrictions,
2128 * because request-based dm may be run just after the setting.
2129 */
2130 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2131 stop_queue(q);
2132
2133 __bind_mempools(md, t);
2134
523d9297 2135 write_lock_irqsave(&md->map_lock, flags);
042d2a9b 2136 old_map = md->map;
1da177e4 2137 md->map = t;
754c5fc7 2138 dm_table_set_restrictions(t, q, limits);
523d9297 2139 write_unlock_irqrestore(&md->map_lock, flags);
1da177e4 2140
042d2a9b 2141 return old_map;
1da177e4
LT
2142}
2143
a7940155
AK
2144/*
2145 * Returns unbound table for the caller to free.
2146 */
2147static struct dm_table *__unbind(struct mapped_device *md)
1da177e4
LT
2148{
2149 struct dm_table *map = md->map;
523d9297 2150 unsigned long flags;
1da177e4
LT
2151
2152 if (!map)
a7940155 2153 return NULL;
1da177e4
LT
2154
2155 dm_table_event_callback(map, NULL, NULL);
523d9297 2156 write_lock_irqsave(&md->map_lock, flags);
1da177e4 2157 md->map = NULL;
523d9297 2158 write_unlock_irqrestore(&md->map_lock, flags);
a7940155
AK
2159
2160 return map;
1da177e4
LT
2161}
2162
2163/*
2164 * Constructor for a new device.
2165 */
2b06cfff 2166int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
2167{
2168 struct mapped_device *md;
2169
2b06cfff 2170 md = alloc_dev(minor);
1da177e4
LT
2171 if (!md)
2172 return -ENXIO;
2173
784aae73
MB
2174 dm_sysfs_init(md);
2175
1da177e4
LT
2176 *result = md;
2177 return 0;
2178}
2179
a5664dad
MS
2180/*
2181 * Functions to manage md->type.
2182 * All are required to hold md->type_lock.
2183 */
2184void dm_lock_md_type(struct mapped_device *md)
2185{
2186 mutex_lock(&md->type_lock);
2187}
2188
2189void dm_unlock_md_type(struct mapped_device *md)
2190{
2191 mutex_unlock(&md->type_lock);
2192}
2193
2194void dm_set_md_type(struct mapped_device *md, unsigned type)
2195{
2196 md->type = type;
2197}
2198
2199unsigned dm_get_md_type(struct mapped_device *md)
2200{
2201 return md->type;
2202}
2203
4a0b4ddf
MS
2204/*
2205 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2206 */
2207static int dm_init_request_based_queue(struct mapped_device *md)
2208{
2209 struct request_queue *q = NULL;
2210
2211 if (md->queue->elevator)
2212 return 1;
2213
2214 /* Fully initialize the queue */
2215 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2216 if (!q)
2217 return 0;
2218
2219 md->queue = q;
2220 md->saved_make_request_fn = md->queue->make_request_fn;
2221 dm_init_md_queue(md);
2222 blk_queue_softirq_done(md->queue, dm_softirq_done);
2223 blk_queue_prep_rq(md->queue, dm_prep_fn);
2224 blk_queue_lld_busy(md->queue, dm_lld_busy);
2225 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN_FLUSH);
2226
2227 elv_register_queue(md->queue);
2228
2229 return 1;
2230}
2231
2232/*
2233 * Setup the DM device's queue based on md's type
2234 */
2235int dm_setup_md_queue(struct mapped_device *md)
2236{
2237 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2238 !dm_init_request_based_queue(md)) {
2239 DMWARN("Cannot initialize queue for request-based mapped device");
2240 return -EINVAL;
2241 }
2242
2243 return 0;
2244}
2245
637842cf 2246static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
2247{
2248 struct mapped_device *md;
1da177e4
LT
2249 unsigned minor = MINOR(dev);
2250
2251 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2252 return NULL;
2253
f32c10b0 2254 spin_lock(&_minor_lock);
1da177e4
LT
2255
2256 md = idr_find(&_minor_idr, minor);
fba9f90e 2257 if (md && (md == MINOR_ALLOCED ||
f331c029 2258 (MINOR(disk_devt(dm_disk(md))) != minor) ||
abdc568b 2259 dm_deleting_md(md) ||
17b2f66f 2260 test_bit(DMF_FREEING, &md->flags))) {
637842cf 2261 md = NULL;
fba9f90e
JM
2262 goto out;
2263 }
1da177e4 2264
fba9f90e 2265out:
f32c10b0 2266 spin_unlock(&_minor_lock);
1da177e4 2267
637842cf
DT
2268 return md;
2269}
2270
d229a958
DT
2271struct mapped_device *dm_get_md(dev_t dev)
2272{
2273 struct mapped_device *md = dm_find_md(dev);
2274
2275 if (md)
2276 dm_get(md);
2277
2278 return md;
2279}
2280
9ade92a9 2281void *dm_get_mdptr(struct mapped_device *md)
637842cf 2282{
9ade92a9 2283 return md->interface_ptr;
1da177e4
LT
2284}
2285
2286void dm_set_mdptr(struct mapped_device *md, void *ptr)
2287{
2288 md->interface_ptr = ptr;
2289}
2290
2291void dm_get(struct mapped_device *md)
2292{
2293 atomic_inc(&md->holders);
3f77316d 2294 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2295}
2296
72d94861
AK
2297const char *dm_device_name(struct mapped_device *md)
2298{
2299 return md->name;
2300}
2301EXPORT_SYMBOL_GPL(dm_device_name);
2302
3f77316d 2303static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2304{
1134e5ae 2305 struct dm_table *map;
1da177e4 2306
3f77316d 2307 might_sleep();
fba9f90e 2308
3f77316d
KU
2309 spin_lock(&_minor_lock);
2310 map = dm_get_live_table(md);
2311 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2312 set_bit(DMF_FREEING, &md->flags);
2313 spin_unlock(&_minor_lock);
2314
2315 if (!dm_suspended_md(md)) {
2316 dm_table_presuspend_targets(map);
2317 dm_table_postsuspend_targets(map);
1da177e4 2318 }
3f77316d
KU
2319
2320 /*
2321 * Rare, but there may be I/O requests still going to complete,
2322 * for example. Wait for all references to disappear.
2323 * No one should increment the reference count of the mapped_device,
2324 * after the mapped_device state becomes DMF_FREEING.
2325 */
2326 if (wait)
2327 while (atomic_read(&md->holders))
2328 msleep(1);
2329 else if (atomic_read(&md->holders))
2330 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2331 dm_device_name(md), atomic_read(&md->holders));
2332
2333 dm_sysfs_exit(md);
2334 dm_table_put(map);
2335 dm_table_destroy(__unbind(md));
2336 free_dev(md);
2337}
2338
2339void dm_destroy(struct mapped_device *md)
2340{
2341 __dm_destroy(md, true);
2342}
2343
2344void dm_destroy_immediate(struct mapped_device *md)
2345{
2346 __dm_destroy(md, false);
2347}
2348
2349void dm_put(struct mapped_device *md)
2350{
2351 atomic_dec(&md->holders);
1da177e4 2352}
79eb885c 2353EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2354
401600df 2355static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
2356{
2357 int r = 0;
b44ebeb0
MP
2358 DECLARE_WAITQUEUE(wait, current);
2359
2360 dm_unplug_all(md->queue);
2361
2362 add_wait_queue(&md->wait, &wait);
46125c1c
MB
2363
2364 while (1) {
401600df 2365 set_current_state(interruptible);
46125c1c
MB
2366
2367 smp_mb();
b4324fee 2368 if (!md_in_flight(md))
46125c1c
MB
2369 break;
2370
401600df
MP
2371 if (interruptible == TASK_INTERRUPTIBLE &&
2372 signal_pending(current)) {
46125c1c
MB
2373 r = -EINTR;
2374 break;
2375 }
2376
2377 io_schedule();
2378 }
2379 set_current_state(TASK_RUNNING);
2380
b44ebeb0
MP
2381 remove_wait_queue(&md->wait, &wait);
2382
46125c1c
MB
2383 return r;
2384}
2385
531fe963 2386static void dm_flush(struct mapped_device *md)
af7e466a
MP
2387{
2388 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
52b1fd5a
MP
2389
2390 bio_init(&md->barrier_bio);
2391 md->barrier_bio.bi_bdev = md->bdev;
2392 md->barrier_bio.bi_rw = WRITE_BARRIER;
2393 __split_and_process_bio(md, &md->barrier_bio);
2394
2395 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
af7e466a
MP
2396}
2397
2398static void process_barrier(struct mapped_device *md, struct bio *bio)
2399{
5aa2781d
MP
2400 md->barrier_error = 0;
2401
531fe963 2402 dm_flush(md);
af7e466a 2403
5aa2781d
MP
2404 if (!bio_empty_barrier(bio)) {
2405 __split_and_process_bio(md, bio);
708e9295
MP
2406 /*
2407 * If the request isn't supported, don't waste time with
2408 * the second flush.
2409 */
2410 if (md->barrier_error != -EOPNOTSUPP)
2411 dm_flush(md);
af7e466a
MP
2412 }
2413
af7e466a 2414 if (md->barrier_error != DM_ENDIO_REQUEUE)
531fe963 2415 bio_endio(bio, md->barrier_error);
2761e95f
MP
2416 else {
2417 spin_lock_irq(&md->deferred_lock);
2418 bio_list_add_head(&md->deferred, bio);
2419 spin_unlock_irq(&md->deferred_lock);
2420 }
af7e466a
MP
2421}
2422
1da177e4
LT
2423/*
2424 * Process the deferred bios
2425 */
ef208587 2426static void dm_wq_work(struct work_struct *work)
1da177e4 2427{
ef208587
MP
2428 struct mapped_device *md = container_of(work, struct mapped_device,
2429 work);
6d6f10df 2430 struct bio *c;
1da177e4 2431
ef208587
MP
2432 down_write(&md->io_lock);
2433
3b00b203 2434 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2435 spin_lock_irq(&md->deferred_lock);
2436 c = bio_list_pop(&md->deferred);
2437 spin_unlock_irq(&md->deferred_lock);
2438
2439 if (!c) {
1eb787ec 2440 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
df12ee99
AK
2441 break;
2442 }
022c2611 2443
3b00b203
MP
2444 up_write(&md->io_lock);
2445
e6ee8c0b
KU
2446 if (dm_request_based(md))
2447 generic_make_request(c);
2448 else {
7b6d91da 2449 if (c->bi_rw & REQ_HARDBARRIER)
e6ee8c0b
KU
2450 process_barrier(md, c);
2451 else
2452 __split_and_process_bio(md, c);
2453 }
3b00b203
MP
2454
2455 down_write(&md->io_lock);
022c2611 2456 }
73d410c0 2457
ef208587 2458 up_write(&md->io_lock);
1da177e4
LT
2459}
2460
9a1fb464 2461static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2462{
3b00b203
MP
2463 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2464 smp_mb__after_clear_bit();
53d5914f 2465 queue_work(md->wq, &md->work);
304f3f6a
MB
2466}
2467
57cba5d3 2468static void dm_rq_set_target_request_nr(struct request *clone, unsigned request_nr)
d0bcb878
KU
2469{
2470 struct dm_rq_target_io *tio = clone->end_io_data;
2471
57cba5d3 2472 tio->info.target_request_nr = request_nr;
d0bcb878
KU
2473}
2474
2475/* Issue barrier requests to targets and wait for their completion. */
2476static int dm_rq_barrier(struct mapped_device *md)
2477{
2478 int i, j;
7c666411 2479 struct dm_table *map = dm_get_live_table(md);
d0bcb878
KU
2480 unsigned num_targets = dm_table_get_num_targets(map);
2481 struct dm_target *ti;
2482 struct request *clone;
2483
2484 md->barrier_error = 0;
2485
2486 for (i = 0; i < num_targets; i++) {
2487 ti = dm_table_get_target(map, i);
2488 for (j = 0; j < ti->num_flush_requests; j++) {
2489 clone = clone_rq(md->flush_request, md, GFP_NOIO);
57cba5d3 2490 dm_rq_set_target_request_nr(clone, j);
d0bcb878
KU
2491 atomic_inc(&md->pending[rq_data_dir(clone)]);
2492 map_request(ti, clone, md);
2493 }
2494 }
2495
2496 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2497 dm_table_put(map);
2498
2499 return md->barrier_error;
2500}
2501
2502static void dm_rq_barrier_work(struct work_struct *work)
2503{
2504 int error;
2505 struct mapped_device *md = container_of(work, struct mapped_device,
2506 barrier_work);
2507 struct request_queue *q = md->queue;
2508 struct request *rq;
2509 unsigned long flags;
2510
2511 /*
2512 * Hold the md reference here and leave it at the last part so that
2513 * the md can't be deleted by device opener when the barrier request
2514 * completes.
2515 */
2516 dm_get(md);
2517
2518 error = dm_rq_barrier(md);
2519
2520 rq = md->flush_request;
2521 md->flush_request = NULL;
2522
2523 if (error == DM_ENDIO_REQUEUE) {
2524 spin_lock_irqsave(q->queue_lock, flags);
2525 blk_requeue_request(q, rq);
2526 spin_unlock_irqrestore(q->queue_lock, flags);
2527 } else
2528 blk_end_request_all(rq, error);
2529
2530 blk_run_queue(q);
2531
2532 dm_put(md);
2533}
2534
1da177e4 2535/*
042d2a9b 2536 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2537 */
042d2a9b 2538struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2539{
042d2a9b 2540 struct dm_table *map = ERR_PTR(-EINVAL);
754c5fc7 2541 struct queue_limits limits;
042d2a9b 2542 int r;
1da177e4 2543
e61290a4 2544 mutex_lock(&md->suspend_lock);
1da177e4
LT
2545
2546 /* device must be suspended */
4f186f8b 2547 if (!dm_suspended_md(md))
93c534ae 2548 goto out;
1da177e4 2549
754c5fc7 2550 r = dm_calculate_queue_limits(table, &limits);
042d2a9b
AK
2551 if (r) {
2552 map = ERR_PTR(r);
754c5fc7 2553 goto out;
042d2a9b 2554 }
754c5fc7 2555
042d2a9b 2556 map = __bind(md, table, &limits);
1da177e4 2557
93c534ae 2558out:
e61290a4 2559 mutex_unlock(&md->suspend_lock);
042d2a9b 2560 return map;
1da177e4
LT
2561}
2562
2563/*
2564 * Functions to lock and unlock any filesystem running on the
2565 * device.
2566 */
2ca3310e 2567static int lock_fs(struct mapped_device *md)
1da177e4 2568{
e39e2e95 2569 int r;
1da177e4
LT
2570
2571 WARN_ON(md->frozen_sb);
dfbe03f6 2572
db8fef4f 2573 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2574 if (IS_ERR(md->frozen_sb)) {
cf222b37 2575 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2576 md->frozen_sb = NULL;
2577 return r;
dfbe03f6
AK
2578 }
2579
aa8d7c2f
AK
2580 set_bit(DMF_FROZEN, &md->flags);
2581
1da177e4
LT
2582 return 0;
2583}
2584
2ca3310e 2585static void unlock_fs(struct mapped_device *md)
1da177e4 2586{
aa8d7c2f
AK
2587 if (!test_bit(DMF_FROZEN, &md->flags))
2588 return;
2589
db8fef4f 2590 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2591 md->frozen_sb = NULL;
aa8d7c2f 2592 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2593}
2594
2595/*
2596 * We need to be able to change a mapping table under a mounted
2597 * filesystem. For example we might want to move some data in
2598 * the background. Before the table can be swapped with
2599 * dm_bind_table, dm_suspend must be called to flush any in
2600 * flight bios and ensure that any further io gets deferred.
2601 */
cec47e3d
KU
2602/*
2603 * Suspend mechanism in request-based dm.
2604 *
9f518b27
KU
2605 * 1. Flush all I/Os by lock_fs() if needed.
2606 * 2. Stop dispatching any I/O by stopping the request_queue.
2607 * 3. Wait for all in-flight I/Os to be completed or requeued.
cec47e3d 2608 *
9f518b27 2609 * To abort suspend, start the request_queue.
cec47e3d 2610 */
a3d77d35 2611int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 2612{
2ca3310e 2613 struct dm_table *map = NULL;
46125c1c 2614 int r = 0;
a3d77d35 2615 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 2616 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 2617
e61290a4 2618 mutex_lock(&md->suspend_lock);
2ca3310e 2619
4f186f8b 2620 if (dm_suspended_md(md)) {
73d410c0 2621 r = -EINVAL;
d287483d 2622 goto out_unlock;
73d410c0 2623 }
1da177e4 2624
7c666411 2625 map = dm_get_live_table(md);
1da177e4 2626
2e93ccc1
KU
2627 /*
2628 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2629 * This flag is cleared before dm_suspend returns.
2630 */
2631 if (noflush)
2632 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2633
cf222b37
AK
2634 /* This does not get reverted if there's an error later. */
2635 dm_table_presuspend_targets(map);
2636
32a926da 2637 /*
9f518b27
KU
2638 * Flush I/O to the device.
2639 * Any I/O submitted after lock_fs() may not be flushed.
2640 * noflush takes precedence over do_lockfs.
2641 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2642 */
2643 if (!noflush && do_lockfs) {
2644 r = lock_fs(md);
2645 if (r)
f431d966 2646 goto out;
aa8d7c2f 2647 }
1da177e4
LT
2648
2649 /*
3b00b203
MP
2650 * Here we must make sure that no processes are submitting requests
2651 * to target drivers i.e. no one may be executing
2652 * __split_and_process_bio. This is called from dm_request and
2653 * dm_wq_work.
2654 *
2655 * To get all processes out of __split_and_process_bio in dm_request,
2656 * we take the write lock. To prevent any process from reentering
2657 * __split_and_process_bio from dm_request, we set
2658 * DMF_QUEUE_IO_TO_THREAD.
2659 *
2660 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2661 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2662 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2663 * further calls to __split_and_process_bio from dm_wq_work.
1da177e4 2664 */
2ca3310e 2665 down_write(&md->io_lock);
1eb787ec
AK
2666 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2667 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2ca3310e 2668 up_write(&md->io_lock);
1da177e4 2669
d0bcb878
KU
2670 /*
2671 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2672 * can be kicked until md->queue is stopped. So stop md->queue before
2673 * flushing md->wq.
2674 */
cec47e3d 2675 if (dm_request_based(md))
9f518b27 2676 stop_queue(md->queue);
cec47e3d 2677
d0bcb878
KU
2678 flush_workqueue(md->wq);
2679
1da177e4 2680 /*
3b00b203
MP
2681 * At this point no more requests are entering target request routines.
2682 * We call dm_wait_for_completion to wait for all existing requests
2683 * to finish.
1da177e4 2684 */
401600df 2685 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 2686
2ca3310e 2687 down_write(&md->io_lock);
6d6f10df 2688 if (noflush)
022c2611 2689 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 2690 up_write(&md->io_lock);
2e93ccc1 2691
1da177e4 2692 /* were we interrupted ? */
46125c1c 2693 if (r < 0) {
9a1fb464 2694 dm_queue_flush(md);
73d410c0 2695
cec47e3d 2696 if (dm_request_based(md))
9f518b27 2697 start_queue(md->queue);
cec47e3d 2698
2ca3310e 2699 unlock_fs(md);
2e93ccc1 2700 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 2701 }
1da177e4 2702
3b00b203
MP
2703 /*
2704 * If dm_wait_for_completion returned 0, the device is completely
2705 * quiescent now. There is no request-processing activity. All new
2706 * requests are being added to md->deferred list.
2707 */
2708
2ca3310e 2709 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 2710
4d4471cb
KU
2711 dm_table_postsuspend_targets(map);
2712
2ca3310e
AK
2713out:
2714 dm_table_put(map);
d287483d
AK
2715
2716out_unlock:
e61290a4 2717 mutex_unlock(&md->suspend_lock);
cf222b37 2718 return r;
1da177e4
LT
2719}
2720
2721int dm_resume(struct mapped_device *md)
2722{
cf222b37 2723 int r = -EINVAL;
cf222b37 2724 struct dm_table *map = NULL;
1da177e4 2725
e61290a4 2726 mutex_lock(&md->suspend_lock);
4f186f8b 2727 if (!dm_suspended_md(md))
cf222b37 2728 goto out;
cf222b37 2729
7c666411 2730 map = dm_get_live_table(md);
2ca3310e 2731 if (!map || !dm_table_get_size(map))
cf222b37 2732 goto out;
1da177e4 2733
8757b776
MB
2734 r = dm_table_resume_targets(map);
2735 if (r)
2736 goto out;
2ca3310e 2737
9a1fb464 2738 dm_queue_flush(md);
2ca3310e 2739
cec47e3d
KU
2740 /*
2741 * Flushing deferred I/Os must be done after targets are resumed
2742 * so that mapping of targets can work correctly.
2743 * Request-based dm is queueing the deferred I/Os in its request_queue.
2744 */
2745 if (dm_request_based(md))
2746 start_queue(md->queue);
2747
2ca3310e
AK
2748 unlock_fs(md);
2749
2750 clear_bit(DMF_SUSPENDED, &md->flags);
2751
1da177e4 2752 dm_table_unplug_all(map);
cf222b37
AK
2753 r = 0;
2754out:
2755 dm_table_put(map);
e61290a4 2756 mutex_unlock(&md->suspend_lock);
2ca3310e 2757
cf222b37 2758 return r;
1da177e4
LT
2759}
2760
2761/*-----------------------------------------------------------------
2762 * Event notification.
2763 *---------------------------------------------------------------*/
3abf85b5 2764int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2765 unsigned cookie)
69267a30 2766{
60935eb2
MB
2767 char udev_cookie[DM_COOKIE_LENGTH];
2768 char *envp[] = { udev_cookie, NULL };
2769
2770 if (!cookie)
3abf85b5 2771 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2772 else {
2773 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2774 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
2775 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2776 action, envp);
60935eb2 2777 }
69267a30
AK
2778}
2779
7a8c3d3b
MA
2780uint32_t dm_next_uevent_seq(struct mapped_device *md)
2781{
2782 return atomic_add_return(1, &md->uevent_seq);
2783}
2784
1da177e4
LT
2785uint32_t dm_get_event_nr(struct mapped_device *md)
2786{
2787 return atomic_read(&md->event_nr);
2788}
2789
2790int dm_wait_event(struct mapped_device *md, int event_nr)
2791{
2792 return wait_event_interruptible(md->eventq,
2793 (event_nr != atomic_read(&md->event_nr)));
2794}
2795
7a8c3d3b
MA
2796void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2797{
2798 unsigned long flags;
2799
2800 spin_lock_irqsave(&md->uevent_lock, flags);
2801 list_add(elist, &md->uevent_list);
2802 spin_unlock_irqrestore(&md->uevent_lock, flags);
2803}
2804
1da177e4
LT
2805/*
2806 * The gendisk is only valid as long as you have a reference
2807 * count on 'md'.
2808 */
2809struct gendisk *dm_disk(struct mapped_device *md)
2810{
2811 return md->disk;
2812}
2813
784aae73
MB
2814struct kobject *dm_kobject(struct mapped_device *md)
2815{
2816 return &md->kobj;
2817}
2818
2819/*
2820 * struct mapped_device should not be exported outside of dm.c
2821 * so use this check to verify that kobj is part of md structure
2822 */
2823struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2824{
2825 struct mapped_device *md;
2826
2827 md = container_of(kobj, struct mapped_device, kobj);
2828 if (&md->kobj != kobj)
2829 return NULL;
2830
4d89b7b4 2831 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 2832 dm_deleting_md(md))
4d89b7b4
MB
2833 return NULL;
2834
784aae73
MB
2835 dm_get(md);
2836 return md;
2837}
2838
4f186f8b 2839int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2840{
2841 return test_bit(DMF_SUSPENDED, &md->flags);
2842}
2843
64dbce58
KU
2844int dm_suspended(struct dm_target *ti)
2845{
ecdb2e25 2846 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2847}
2848EXPORT_SYMBOL_GPL(dm_suspended);
2849
2e93ccc1
KU
2850int dm_noflush_suspending(struct dm_target *ti)
2851{
ecdb2e25 2852 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
2853}
2854EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2855
e6ee8c0b
KU
2856struct dm_md_mempools *dm_alloc_md_mempools(unsigned type)
2857{
2858 struct dm_md_mempools *pools = kmalloc(sizeof(*pools), GFP_KERNEL);
2859
2860 if (!pools)
2861 return NULL;
2862
2863 pools->io_pool = (type == DM_TYPE_BIO_BASED) ?
2864 mempool_create_slab_pool(MIN_IOS, _io_cache) :
2865 mempool_create_slab_pool(MIN_IOS, _rq_bio_info_cache);
2866 if (!pools->io_pool)
2867 goto free_pools_and_out;
2868
2869 pools->tio_pool = (type == DM_TYPE_BIO_BASED) ?
2870 mempool_create_slab_pool(MIN_IOS, _tio_cache) :
2871 mempool_create_slab_pool(MIN_IOS, _rq_tio_cache);
2872 if (!pools->tio_pool)
2873 goto free_io_pool_and_out;
2874
2875 pools->bs = (type == DM_TYPE_BIO_BASED) ?
2876 bioset_create(16, 0) : bioset_create(MIN_IOS, 0);
2877 if (!pools->bs)
2878 goto free_tio_pool_and_out;
2879
2880 return pools;
2881
2882free_tio_pool_and_out:
2883 mempool_destroy(pools->tio_pool);
2884
2885free_io_pool_and_out:
2886 mempool_destroy(pools->io_pool);
2887
2888free_pools_and_out:
2889 kfree(pools);
2890
2891 return NULL;
2892}
2893
2894void dm_free_md_mempools(struct dm_md_mempools *pools)
2895{
2896 if (!pools)
2897 return;
2898
2899 if (pools->io_pool)
2900 mempool_destroy(pools->io_pool);
2901
2902 if (pools->tio_pool)
2903 mempool_destroy(pools->tio_pool);
2904
2905 if (pools->bs)
2906 bioset_free(pools->bs);
2907
2908 kfree(pools);
2909}
2910
83d5cde4 2911static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
2912 .open = dm_blk_open,
2913 .release = dm_blk_close,
aa129a22 2914 .ioctl = dm_blk_ioctl,
3ac51e74 2915 .getgeo = dm_blk_getgeo,
1da177e4
LT
2916 .owner = THIS_MODULE
2917};
2918
2919EXPORT_SYMBOL(dm_get_mapinfo);
2920
2921/*
2922 * module hooks
2923 */
2924module_init(dm_init);
2925module_exit(dm_exit);
2926
2927module_param(major, uint, 0);
2928MODULE_PARM_DESC(major, "The major number of the device mapper");
2929MODULE_DESCRIPTION(DM_NAME " driver");
2930MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2931MODULE_LICENSE("GPL");
This page took 0.742998 seconds and 5 git commands to generate.