Linux 3.16-rc3
[deliverable/linux.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
1da177e4
LT
17#include <linux/mempool.h>
18#include <linux/slab.h>
19#include <linux/idr.h>
3ac51e74 20#include <linux/hdreg.h>
3f77316d 21#include <linux/delay.h>
55782138
LZ
22
23#include <trace/events/block.h>
1da177e4 24
72d94861
AK
25#define DM_MSG_PREFIX "core"
26
71a16736
NK
27#ifdef CONFIG_PRINTK
28/*
29 * ratelimit state to be used in DMXXX_LIMIT().
30 */
31DEFINE_RATELIMIT_STATE(dm_ratelimit_state,
32 DEFAULT_RATELIMIT_INTERVAL,
33 DEFAULT_RATELIMIT_BURST);
34EXPORT_SYMBOL(dm_ratelimit_state);
35#endif
36
60935eb2
MB
37/*
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
40 */
41#define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42#define DM_COOKIE_LENGTH 24
43
1da177e4
LT
44static const char *_name = DM_NAME;
45
46static unsigned int major = 0;
47static unsigned int _major = 0;
48
d15b774c
AK
49static DEFINE_IDR(_minor_idr);
50
f32c10b0 51static DEFINE_SPINLOCK(_minor_lock);
2c140a24
MP
52
53static void do_deferred_remove(struct work_struct *w);
54
55static DECLARE_WORK(deferred_remove_work, do_deferred_remove);
56
1da177e4 57/*
8fbf26ad 58 * For bio-based dm.
1da177e4
LT
59 * One of these is allocated per bio.
60 */
61struct dm_io {
62 struct mapped_device *md;
63 int error;
1da177e4 64 atomic_t io_count;
6ae2fa67 65 struct bio *bio;
3eaf840e 66 unsigned long start_time;
f88fb981 67 spinlock_t endio_lock;
fd2ed4d2 68 struct dm_stats_aux stats_aux;
1da177e4
LT
69};
70
8fbf26ad
KU
71/*
72 * For request-based dm.
73 * One of these is allocated per request.
74 */
75struct dm_rq_target_io {
76 struct mapped_device *md;
77 struct dm_target *ti;
78 struct request *orig, clone;
79 int error;
80 union map_info info;
81};
82
83/*
94818742
KO
84 * For request-based dm - the bio clones we allocate are embedded in these
85 * structs.
86 *
87 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
88 * the bioset is created - this means the bio has to come at the end of the
89 * struct.
8fbf26ad
KU
90 */
91struct dm_rq_clone_bio_info {
92 struct bio *orig;
cec47e3d 93 struct dm_rq_target_io *tio;
94818742 94 struct bio clone;
8fbf26ad
KU
95};
96
cec47e3d
KU
97union map_info *dm_get_rq_mapinfo(struct request *rq)
98{
99 if (rq && rq->end_io_data)
100 return &((struct dm_rq_target_io *)rq->end_io_data)->info;
101 return NULL;
102}
103EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo);
104
ba61fdd1
JM
105#define MINOR_ALLOCED ((void *)-1)
106
1da177e4
LT
107/*
108 * Bits for the md->flags field.
109 */
1eb787ec 110#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 111#define DMF_SUSPENDED 1
aa8d7c2f 112#define DMF_FROZEN 2
fba9f90e 113#define DMF_FREEING 3
5c6bd75d 114#define DMF_DELETING 4
2e93ccc1 115#define DMF_NOFLUSH_SUSPENDING 5
d5b9dd04 116#define DMF_MERGE_IS_OPTIONAL 6
2c140a24 117#define DMF_DEFERRED_REMOVE 7
1da177e4 118
83d5e5b0
MP
119/*
120 * A dummy definition to make RCU happy.
121 * struct dm_table should never be dereferenced in this file.
122 */
123struct dm_table {
124 int undefined__;
125};
126
304f3f6a
MB
127/*
128 * Work processed by per-device workqueue.
129 */
1da177e4 130struct mapped_device {
83d5e5b0 131 struct srcu_struct io_barrier;
e61290a4 132 struct mutex suspend_lock;
1da177e4 133 atomic_t holders;
5c6bd75d 134 atomic_t open_count;
1da177e4 135
2a7faeb1
MP
136 /*
137 * The current mapping.
138 * Use dm_get_live_table{_fast} or take suspend_lock for
139 * dereference.
140 */
141 struct dm_table *map;
142
1da177e4
LT
143 unsigned long flags;
144
165125e1 145 struct request_queue *queue;
a5664dad 146 unsigned type;
4a0b4ddf 147 /* Protect queue and type against concurrent access. */
a5664dad
MS
148 struct mutex type_lock;
149
36a0456f
AK
150 struct target_type *immutable_target_type;
151
1da177e4 152 struct gendisk *disk;
7e51f257 153 char name[16];
1da177e4
LT
154
155 void *interface_ptr;
156
157 /*
158 * A list of ios that arrived while we were suspended.
159 */
316d315b 160 atomic_t pending[2];
1da177e4 161 wait_queue_head_t wait;
53d5914f 162 struct work_struct work;
74859364 163 struct bio_list deferred;
022c2611 164 spinlock_t deferred_lock;
1da177e4 165
af7e466a 166 /*
29e4013d 167 * Processing queue (flush)
304f3f6a
MB
168 */
169 struct workqueue_struct *wq;
170
1da177e4
LT
171 /*
172 * io objects are allocated from here.
173 */
174 mempool_t *io_pool;
1da177e4 175
9faf400f
SB
176 struct bio_set *bs;
177
1da177e4
LT
178 /*
179 * Event handling.
180 */
181 atomic_t event_nr;
182 wait_queue_head_t eventq;
7a8c3d3b
MA
183 atomic_t uevent_seq;
184 struct list_head uevent_list;
185 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
186
187 /*
188 * freeze/thaw support require holding onto a super block
189 */
190 struct super_block *frozen_sb;
db8fef4f 191 struct block_device *bdev;
3ac51e74
DW
192
193 /* forced geometry settings */
194 struct hd_geometry geometry;
784aae73 195
2995fa78
MP
196 /* kobject and completion */
197 struct dm_kobject_holder kobj_holder;
be35f486 198
d87f4c14
TH
199 /* zero-length flush that will be cloned and submitted to targets */
200 struct bio flush_bio;
fd2ed4d2
MP
201
202 struct dm_stats stats;
1da177e4
LT
203};
204
e6ee8c0b
KU
205/*
206 * For mempools pre-allocation at the table loading time.
207 */
208struct dm_md_mempools {
209 mempool_t *io_pool;
e6ee8c0b
KU
210 struct bio_set *bs;
211};
212
6cfa5857
MS
213#define RESERVED_BIO_BASED_IOS 16
214#define RESERVED_REQUEST_BASED_IOS 256
f4790826 215#define RESERVED_MAX_IOS 1024
e18b890b 216static struct kmem_cache *_io_cache;
8fbf26ad 217static struct kmem_cache *_rq_tio_cache;
94818742 218
e8603136
MS
219/*
220 * Bio-based DM's mempools' reserved IOs set by the user.
221 */
222static unsigned reserved_bio_based_ios = RESERVED_BIO_BASED_IOS;
223
f4790826
MS
224/*
225 * Request-based DM's mempools' reserved IOs set by the user.
226 */
227static unsigned reserved_rq_based_ios = RESERVED_REQUEST_BASED_IOS;
228
229static unsigned __dm_get_reserved_ios(unsigned *reserved_ios,
230 unsigned def, unsigned max)
231{
232 unsigned ios = ACCESS_ONCE(*reserved_ios);
233 unsigned modified_ios = 0;
234
235 if (!ios)
236 modified_ios = def;
237 else if (ios > max)
238 modified_ios = max;
239
240 if (modified_ios) {
241 (void)cmpxchg(reserved_ios, ios, modified_ios);
242 ios = modified_ios;
243 }
244
245 return ios;
246}
247
e8603136
MS
248unsigned dm_get_reserved_bio_based_ios(void)
249{
250 return __dm_get_reserved_ios(&reserved_bio_based_ios,
251 RESERVED_BIO_BASED_IOS, RESERVED_MAX_IOS);
252}
253EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios);
254
f4790826
MS
255unsigned dm_get_reserved_rq_based_ios(void)
256{
257 return __dm_get_reserved_ios(&reserved_rq_based_ios,
258 RESERVED_REQUEST_BASED_IOS, RESERVED_MAX_IOS);
259}
260EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios);
261
1da177e4
LT
262static int __init local_init(void)
263{
51157b4a 264 int r = -ENOMEM;
1da177e4 265
1da177e4 266 /* allocate a slab for the dm_ios */
028867ac 267 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 268 if (!_io_cache)
51157b4a 269 return r;
1da177e4 270
8fbf26ad
KU
271 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
272 if (!_rq_tio_cache)
dba14160 273 goto out_free_io_cache;
8fbf26ad 274
51e5b2bd 275 r = dm_uevent_init();
51157b4a 276 if (r)
23e5083b 277 goto out_free_rq_tio_cache;
51e5b2bd 278
1da177e4
LT
279 _major = major;
280 r = register_blkdev(_major, _name);
51157b4a
KU
281 if (r < 0)
282 goto out_uevent_exit;
1da177e4
LT
283
284 if (!_major)
285 _major = r;
286
287 return 0;
51157b4a
KU
288
289out_uevent_exit:
290 dm_uevent_exit();
8fbf26ad
KU
291out_free_rq_tio_cache:
292 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
293out_free_io_cache:
294 kmem_cache_destroy(_io_cache);
295
296 return r;
1da177e4
LT
297}
298
299static void local_exit(void)
300{
2c140a24
MP
301 flush_scheduled_work();
302
8fbf26ad 303 kmem_cache_destroy(_rq_tio_cache);
1da177e4 304 kmem_cache_destroy(_io_cache);
00d59405 305 unregister_blkdev(_major, _name);
51e5b2bd 306 dm_uevent_exit();
1da177e4
LT
307
308 _major = 0;
309
310 DMINFO("cleaned up");
311}
312
b9249e55 313static int (*_inits[])(void) __initdata = {
1da177e4
LT
314 local_init,
315 dm_target_init,
316 dm_linear_init,
317 dm_stripe_init,
952b3557 318 dm_io_init,
945fa4d2 319 dm_kcopyd_init,
1da177e4 320 dm_interface_init,
fd2ed4d2 321 dm_statistics_init,
1da177e4
LT
322};
323
b9249e55 324static void (*_exits[])(void) = {
1da177e4
LT
325 local_exit,
326 dm_target_exit,
327 dm_linear_exit,
328 dm_stripe_exit,
952b3557 329 dm_io_exit,
945fa4d2 330 dm_kcopyd_exit,
1da177e4 331 dm_interface_exit,
fd2ed4d2 332 dm_statistics_exit,
1da177e4
LT
333};
334
335static int __init dm_init(void)
336{
337 const int count = ARRAY_SIZE(_inits);
338
339 int r, i;
340
341 for (i = 0; i < count; i++) {
342 r = _inits[i]();
343 if (r)
344 goto bad;
345 }
346
347 return 0;
348
349 bad:
350 while (i--)
351 _exits[i]();
352
353 return r;
354}
355
356static void __exit dm_exit(void)
357{
358 int i = ARRAY_SIZE(_exits);
359
360 while (i--)
361 _exits[i]();
d15b774c
AK
362
363 /*
364 * Should be empty by this point.
365 */
d15b774c 366 idr_destroy(&_minor_idr);
1da177e4
LT
367}
368
369/*
370 * Block device functions
371 */
432a212c
MA
372int dm_deleting_md(struct mapped_device *md)
373{
374 return test_bit(DMF_DELETING, &md->flags);
375}
376
fe5f9f2c 377static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
378{
379 struct mapped_device *md;
380
fba9f90e
JM
381 spin_lock(&_minor_lock);
382
fe5f9f2c 383 md = bdev->bd_disk->private_data;
fba9f90e
JM
384 if (!md)
385 goto out;
386
5c6bd75d 387 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 388 dm_deleting_md(md)) {
fba9f90e
JM
389 md = NULL;
390 goto out;
391 }
392
1da177e4 393 dm_get(md);
5c6bd75d 394 atomic_inc(&md->open_count);
fba9f90e
JM
395
396out:
397 spin_unlock(&_minor_lock);
398
399 return md ? 0 : -ENXIO;
1da177e4
LT
400}
401
db2a144b 402static void dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 403{
fe5f9f2c 404 struct mapped_device *md = disk->private_data;
6e9624b8 405
4a1aeb98
MB
406 spin_lock(&_minor_lock);
407
2c140a24
MP
408 if (atomic_dec_and_test(&md->open_count) &&
409 (test_bit(DMF_DEFERRED_REMOVE, &md->flags)))
410 schedule_work(&deferred_remove_work);
411
1da177e4 412 dm_put(md);
4a1aeb98
MB
413
414 spin_unlock(&_minor_lock);
1da177e4
LT
415}
416
5c6bd75d
AK
417int dm_open_count(struct mapped_device *md)
418{
419 return atomic_read(&md->open_count);
420}
421
422/*
423 * Guarantees nothing is using the device before it's deleted.
424 */
2c140a24 425int dm_lock_for_deletion(struct mapped_device *md, bool mark_deferred, bool only_deferred)
5c6bd75d
AK
426{
427 int r = 0;
428
429 spin_lock(&_minor_lock);
430
2c140a24 431 if (dm_open_count(md)) {
5c6bd75d 432 r = -EBUSY;
2c140a24
MP
433 if (mark_deferred)
434 set_bit(DMF_DEFERRED_REMOVE, &md->flags);
435 } else if (only_deferred && !test_bit(DMF_DEFERRED_REMOVE, &md->flags))
436 r = -EEXIST;
5c6bd75d
AK
437 else
438 set_bit(DMF_DELETING, &md->flags);
439
440 spin_unlock(&_minor_lock);
441
442 return r;
443}
444
2c140a24
MP
445int dm_cancel_deferred_remove(struct mapped_device *md)
446{
447 int r = 0;
448
449 spin_lock(&_minor_lock);
450
451 if (test_bit(DMF_DELETING, &md->flags))
452 r = -EBUSY;
453 else
454 clear_bit(DMF_DEFERRED_REMOVE, &md->flags);
455
456 spin_unlock(&_minor_lock);
457
458 return r;
459}
460
461static void do_deferred_remove(struct work_struct *w)
462{
463 dm_deferred_remove();
464}
465
fd2ed4d2
MP
466sector_t dm_get_size(struct mapped_device *md)
467{
468 return get_capacity(md->disk);
469}
470
9974fa2c
MS
471struct request_queue *dm_get_md_queue(struct mapped_device *md)
472{
473 return md->queue;
474}
475
fd2ed4d2
MP
476struct dm_stats *dm_get_stats(struct mapped_device *md)
477{
478 return &md->stats;
479}
480
3ac51e74
DW
481static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
482{
483 struct mapped_device *md = bdev->bd_disk->private_data;
484
485 return dm_get_geometry(md, geo);
486}
487
fe5f9f2c 488static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
489 unsigned int cmd, unsigned long arg)
490{
fe5f9f2c 491 struct mapped_device *md = bdev->bd_disk->private_data;
83d5e5b0 492 int srcu_idx;
6c182cd8 493 struct dm_table *map;
aa129a22
MB
494 struct dm_target *tgt;
495 int r = -ENOTTY;
496
6c182cd8 497retry:
83d5e5b0
MP
498 map = dm_get_live_table(md, &srcu_idx);
499
aa129a22
MB
500 if (!map || !dm_table_get_size(map))
501 goto out;
502
503 /* We only support devices that have a single target */
504 if (dm_table_get_num_targets(map) != 1)
505 goto out;
506
507 tgt = dm_table_get_target(map, 0);
508
4f186f8b 509 if (dm_suspended_md(md)) {
aa129a22
MB
510 r = -EAGAIN;
511 goto out;
512 }
513
514 if (tgt->type->ioctl)
647b3d00 515 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
516
517out:
83d5e5b0 518 dm_put_live_table(md, srcu_idx);
aa129a22 519
6c182cd8
HR
520 if (r == -ENOTCONN) {
521 msleep(10);
522 goto retry;
523 }
524
aa129a22
MB
525 return r;
526}
527
028867ac 528static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
529{
530 return mempool_alloc(md->io_pool, GFP_NOIO);
531}
532
028867ac 533static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
534{
535 mempool_free(io, md->io_pool);
536}
537
028867ac 538static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4 539{
dba14160 540 bio_put(&tio->clone);
1da177e4
LT
541}
542
08885643
KU
543static struct dm_rq_target_io *alloc_rq_tio(struct mapped_device *md,
544 gfp_t gfp_mask)
cec47e3d 545{
5f015204 546 return mempool_alloc(md->io_pool, gfp_mask);
cec47e3d
KU
547}
548
549static void free_rq_tio(struct dm_rq_target_io *tio)
550{
5f015204 551 mempool_free(tio, tio->md->io_pool);
cec47e3d
KU
552}
553
90abb8c4
KU
554static int md_in_flight(struct mapped_device *md)
555{
556 return atomic_read(&md->pending[READ]) +
557 atomic_read(&md->pending[WRITE]);
558}
559
3eaf840e
JNN
560static void start_io_acct(struct dm_io *io)
561{
562 struct mapped_device *md = io->md;
fd2ed4d2 563 struct bio *bio = io->bio;
c9959059 564 int cpu;
fd2ed4d2 565 int rw = bio_data_dir(bio);
3eaf840e
JNN
566
567 io->start_time = jiffies;
568
074a7aca
TH
569 cpu = part_stat_lock();
570 part_round_stats(cpu, &dm_disk(md)->part0);
571 part_stat_unlock();
1e9bb880
SL
572 atomic_set(&dm_disk(md)->part0.in_flight[rw],
573 atomic_inc_return(&md->pending[rw]));
fd2ed4d2
MP
574
575 if (unlikely(dm_stats_used(&md->stats)))
4f024f37 576 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
fd2ed4d2 577 bio_sectors(bio), false, 0, &io->stats_aux);
3eaf840e
JNN
578}
579
d221d2e7 580static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
581{
582 struct mapped_device *md = io->md;
583 struct bio *bio = io->bio;
584 unsigned long duration = jiffies - io->start_time;
c9959059 585 int pending, cpu;
3eaf840e
JNN
586 int rw = bio_data_dir(bio);
587
074a7aca
TH
588 cpu = part_stat_lock();
589 part_round_stats(cpu, &dm_disk(md)->part0);
590 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
591 part_stat_unlock();
3eaf840e 592
fd2ed4d2 593 if (unlikely(dm_stats_used(&md->stats)))
4f024f37 594 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
fd2ed4d2
MP
595 bio_sectors(bio), true, duration, &io->stats_aux);
596
af7e466a
MP
597 /*
598 * After this is decremented the bio must not be touched if it is
d87f4c14 599 * a flush.
af7e466a 600 */
1e9bb880
SL
601 pending = atomic_dec_return(&md->pending[rw]);
602 atomic_set(&dm_disk(md)->part0.in_flight[rw], pending);
316d315b 603 pending += atomic_read(&md->pending[rw^0x1]);
3eaf840e 604
d221d2e7
MP
605 /* nudge anyone waiting on suspend queue */
606 if (!pending)
607 wake_up(&md->wait);
3eaf840e
JNN
608}
609
1da177e4
LT
610/*
611 * Add the bio to the list of deferred io.
612 */
92c63902 613static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 614{
05447420 615 unsigned long flags;
1da177e4 616
05447420 617 spin_lock_irqsave(&md->deferred_lock, flags);
1da177e4 618 bio_list_add(&md->deferred, bio);
05447420 619 spin_unlock_irqrestore(&md->deferred_lock, flags);
6a8736d1 620 queue_work(md->wq, &md->work);
1da177e4
LT
621}
622
623/*
624 * Everyone (including functions in this file), should use this
625 * function to access the md->map field, and make sure they call
83d5e5b0 626 * dm_put_live_table() when finished.
1da177e4 627 */
83d5e5b0 628struct dm_table *dm_get_live_table(struct mapped_device *md, int *srcu_idx) __acquires(md->io_barrier)
1da177e4 629{
83d5e5b0
MP
630 *srcu_idx = srcu_read_lock(&md->io_barrier);
631
632 return srcu_dereference(md->map, &md->io_barrier);
633}
1da177e4 634
83d5e5b0
MP
635void dm_put_live_table(struct mapped_device *md, int srcu_idx) __releases(md->io_barrier)
636{
637 srcu_read_unlock(&md->io_barrier, srcu_idx);
638}
639
640void dm_sync_table(struct mapped_device *md)
641{
642 synchronize_srcu(&md->io_barrier);
643 synchronize_rcu_expedited();
644}
645
646/*
647 * A fast alternative to dm_get_live_table/dm_put_live_table.
648 * The caller must not block between these two functions.
649 */
650static struct dm_table *dm_get_live_table_fast(struct mapped_device *md) __acquires(RCU)
651{
652 rcu_read_lock();
653 return rcu_dereference(md->map);
654}
1da177e4 655
83d5e5b0
MP
656static void dm_put_live_table_fast(struct mapped_device *md) __releases(RCU)
657{
658 rcu_read_unlock();
1da177e4
LT
659}
660
3ac51e74
DW
661/*
662 * Get the geometry associated with a dm device
663 */
664int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
665{
666 *geo = md->geometry;
667
668 return 0;
669}
670
671/*
672 * Set the geometry of a device.
673 */
674int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
675{
676 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
677
678 if (geo->start > sz) {
679 DMWARN("Start sector is beyond the geometry limits.");
680 return -EINVAL;
681 }
682
683 md->geometry = *geo;
684
685 return 0;
686}
687
1da177e4
LT
688/*-----------------------------------------------------------------
689 * CRUD START:
690 * A more elegant soln is in the works that uses the queue
691 * merge fn, unfortunately there are a couple of changes to
692 * the block layer that I want to make for this. So in the
693 * interests of getting something for people to use I give
694 * you this clearly demarcated crap.
695 *---------------------------------------------------------------*/
696
2e93ccc1
KU
697static int __noflush_suspending(struct mapped_device *md)
698{
699 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
700}
701
1da177e4
LT
702/*
703 * Decrements the number of outstanding ios that a bio has been
704 * cloned into, completing the original io if necc.
705 */
858119e1 706static void dec_pending(struct dm_io *io, int error)
1da177e4 707{
2e93ccc1 708 unsigned long flags;
b35f8caa
MB
709 int io_error;
710 struct bio *bio;
711 struct mapped_device *md = io->md;
2e93ccc1
KU
712
713 /* Push-back supersedes any I/O errors */
f88fb981
KU
714 if (unlikely(error)) {
715 spin_lock_irqsave(&io->endio_lock, flags);
716 if (!(io->error > 0 && __noflush_suspending(md)))
717 io->error = error;
718 spin_unlock_irqrestore(&io->endio_lock, flags);
719 }
1da177e4
LT
720
721 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
722 if (io->error == DM_ENDIO_REQUEUE) {
723 /*
724 * Target requested pushing back the I/O.
2e93ccc1 725 */
022c2611 726 spin_lock_irqsave(&md->deferred_lock, flags);
6a8736d1
TH
727 if (__noflush_suspending(md))
728 bio_list_add_head(&md->deferred, io->bio);
729 else
2e93ccc1
KU
730 /* noflush suspend was interrupted. */
731 io->error = -EIO;
022c2611 732 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
733 }
734
b35f8caa
MB
735 io_error = io->error;
736 bio = io->bio;
6a8736d1
TH
737 end_io_acct(io);
738 free_io(md, io);
739
740 if (io_error == DM_ENDIO_REQUEUE)
741 return;
2e93ccc1 742
4f024f37 743 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
af7e466a 744 /*
6a8736d1
TH
745 * Preflush done for flush with data, reissue
746 * without REQ_FLUSH.
af7e466a 747 */
6a8736d1
TH
748 bio->bi_rw &= ~REQ_FLUSH;
749 queue_io(md, bio);
af7e466a 750 } else {
b372d360 751 /* done with normal IO or empty flush */
0a82a8d1 752 trace_block_bio_complete(md->queue, bio, io_error);
b372d360 753 bio_endio(bio, io_error);
b35f8caa 754 }
1da177e4
LT
755 }
756}
757
7eee4ae2
MS
758static void disable_write_same(struct mapped_device *md)
759{
760 struct queue_limits *limits = dm_get_queue_limits(md);
761
762 /* device doesn't really support WRITE SAME, disable it */
763 limits->max_write_same_sectors = 0;
764}
765
6712ecf8 766static void clone_endio(struct bio *bio, int error)
1da177e4
LT
767{
768 int r = 0;
bfc6d41c 769 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
b35f8caa 770 struct dm_io *io = tio->io;
9faf400f 771 struct mapped_device *md = tio->io->md;
1da177e4
LT
772 dm_endio_fn endio = tio->ti->type->end_io;
773
1da177e4
LT
774 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
775 error = -EIO;
776
777 if (endio) {
7de3ee57 778 r = endio(tio->ti, bio, error);
2e93ccc1
KU
779 if (r < 0 || r == DM_ENDIO_REQUEUE)
780 /*
781 * error and requeue request are handled
782 * in dec_pending().
783 */
1da177e4 784 error = r;
45cbcd79
KU
785 else if (r == DM_ENDIO_INCOMPLETE)
786 /* The target will handle the io */
6712ecf8 787 return;
45cbcd79
KU
788 else if (r) {
789 DMWARN("unimplemented target endio return value: %d", r);
790 BUG();
791 }
1da177e4
LT
792 }
793
7eee4ae2
MS
794 if (unlikely(r == -EREMOTEIO && (bio->bi_rw & REQ_WRITE_SAME) &&
795 !bdev_get_queue(bio->bi_bdev)->limits.max_write_same_sectors))
796 disable_write_same(md);
797
9faf400f 798 free_tio(md, tio);
b35f8caa 799 dec_pending(io, error);
1da177e4
LT
800}
801
cec47e3d
KU
802/*
803 * Partial completion handling for request-based dm
804 */
805static void end_clone_bio(struct bio *clone, int error)
806{
bfc6d41c
MP
807 struct dm_rq_clone_bio_info *info =
808 container_of(clone, struct dm_rq_clone_bio_info, clone);
cec47e3d
KU
809 struct dm_rq_target_io *tio = info->tio;
810 struct bio *bio = info->orig;
4f024f37 811 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
cec47e3d
KU
812
813 bio_put(clone);
814
815 if (tio->error)
816 /*
817 * An error has already been detected on the request.
818 * Once error occurred, just let clone->end_io() handle
819 * the remainder.
820 */
821 return;
822 else if (error) {
823 /*
824 * Don't notice the error to the upper layer yet.
825 * The error handling decision is made by the target driver,
826 * when the request is completed.
827 */
828 tio->error = error;
829 return;
830 }
831
832 /*
833 * I/O for the bio successfully completed.
834 * Notice the data completion to the upper layer.
835 */
836
837 /*
838 * bios are processed from the head of the list.
839 * So the completing bio should always be rq->bio.
840 * If it's not, something wrong is happening.
841 */
842 if (tio->orig->bio != bio)
843 DMERR("bio completion is going in the middle of the request");
844
845 /*
846 * Update the original request.
847 * Do not use blk_end_request() here, because it may complete
848 * the original request before the clone, and break the ordering.
849 */
850 blk_update_request(tio->orig, 0, nr_bytes);
851}
852
853/*
854 * Don't touch any member of the md after calling this function because
855 * the md may be freed in dm_put() at the end of this function.
856 * Or do dm_get() before calling this function and dm_put() later.
857 */
b4324fee 858static void rq_completed(struct mapped_device *md, int rw, int run_queue)
cec47e3d 859{
b4324fee 860 atomic_dec(&md->pending[rw]);
cec47e3d
KU
861
862 /* nudge anyone waiting on suspend queue */
b4324fee 863 if (!md_in_flight(md))
cec47e3d
KU
864 wake_up(&md->wait);
865
a8c32a5c
JA
866 /*
867 * Run this off this callpath, as drivers could invoke end_io while
868 * inside their request_fn (and holding the queue lock). Calling
869 * back into ->request_fn() could deadlock attempting to grab the
870 * queue lock again.
871 */
cec47e3d 872 if (run_queue)
a8c32a5c 873 blk_run_queue_async(md->queue);
cec47e3d
KU
874
875 /*
876 * dm_put() must be at the end of this function. See the comment above
877 */
878 dm_put(md);
879}
880
a77e28c7
KU
881static void free_rq_clone(struct request *clone)
882{
883 struct dm_rq_target_io *tio = clone->end_io_data;
884
885 blk_rq_unprep_clone(clone);
886 free_rq_tio(tio);
887}
888
980691e5
KU
889/*
890 * Complete the clone and the original request.
891 * Must be called without queue lock.
892 */
893static void dm_end_request(struct request *clone, int error)
894{
895 int rw = rq_data_dir(clone);
896 struct dm_rq_target_io *tio = clone->end_io_data;
897 struct mapped_device *md = tio->md;
898 struct request *rq = tio->orig;
899
29e4013d 900 if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
980691e5
KU
901 rq->errors = clone->errors;
902 rq->resid_len = clone->resid_len;
903
904 if (rq->sense)
905 /*
906 * We are using the sense buffer of the original
907 * request.
908 * So setting the length of the sense data is enough.
909 */
910 rq->sense_len = clone->sense_len;
911 }
912
913 free_rq_clone(clone);
29e4013d
TH
914 blk_end_request_all(rq, error);
915 rq_completed(md, rw, true);
980691e5
KU
916}
917
cec47e3d
KU
918static void dm_unprep_request(struct request *rq)
919{
920 struct request *clone = rq->special;
cec47e3d
KU
921
922 rq->special = NULL;
923 rq->cmd_flags &= ~REQ_DONTPREP;
924
a77e28c7 925 free_rq_clone(clone);
cec47e3d
KU
926}
927
928/*
929 * Requeue the original request of a clone.
930 */
931void dm_requeue_unmapped_request(struct request *clone)
932{
b4324fee 933 int rw = rq_data_dir(clone);
cec47e3d
KU
934 struct dm_rq_target_io *tio = clone->end_io_data;
935 struct mapped_device *md = tio->md;
936 struct request *rq = tio->orig;
937 struct request_queue *q = rq->q;
938 unsigned long flags;
939
940 dm_unprep_request(rq);
941
942 spin_lock_irqsave(q->queue_lock, flags);
cec47e3d
KU
943 blk_requeue_request(q, rq);
944 spin_unlock_irqrestore(q->queue_lock, flags);
945
b4324fee 946 rq_completed(md, rw, 0);
cec47e3d
KU
947}
948EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request);
949
950static void __stop_queue(struct request_queue *q)
951{
952 blk_stop_queue(q);
953}
954
955static void stop_queue(struct request_queue *q)
956{
957 unsigned long flags;
958
959 spin_lock_irqsave(q->queue_lock, flags);
960 __stop_queue(q);
961 spin_unlock_irqrestore(q->queue_lock, flags);
962}
963
964static void __start_queue(struct request_queue *q)
965{
966 if (blk_queue_stopped(q))
967 blk_start_queue(q);
968}
969
970static void start_queue(struct request_queue *q)
971{
972 unsigned long flags;
973
974 spin_lock_irqsave(q->queue_lock, flags);
975 __start_queue(q);
976 spin_unlock_irqrestore(q->queue_lock, flags);
977}
978
11a68244 979static void dm_done(struct request *clone, int error, bool mapped)
cec47e3d 980{
11a68244 981 int r = error;
cec47e3d 982 struct dm_rq_target_io *tio = clone->end_io_data;
ba1cbad9 983 dm_request_endio_fn rq_end_io = NULL;
cec47e3d 984
ba1cbad9
MS
985 if (tio->ti) {
986 rq_end_io = tio->ti->type->rq_end_io;
987
988 if (mapped && rq_end_io)
989 r = rq_end_io(tio->ti, clone, error, &tio->info);
990 }
cec47e3d 991
7eee4ae2
MS
992 if (unlikely(r == -EREMOTEIO && (clone->cmd_flags & REQ_WRITE_SAME) &&
993 !clone->q->limits.max_write_same_sectors))
994 disable_write_same(tio->md);
995
11a68244 996 if (r <= 0)
cec47e3d 997 /* The target wants to complete the I/O */
11a68244
KU
998 dm_end_request(clone, r);
999 else if (r == DM_ENDIO_INCOMPLETE)
cec47e3d
KU
1000 /* The target will handle the I/O */
1001 return;
11a68244 1002 else if (r == DM_ENDIO_REQUEUE)
cec47e3d
KU
1003 /* The target wants to requeue the I/O */
1004 dm_requeue_unmapped_request(clone);
1005 else {
11a68244 1006 DMWARN("unimplemented target endio return value: %d", r);
cec47e3d
KU
1007 BUG();
1008 }
1009}
1010
11a68244
KU
1011/*
1012 * Request completion handler for request-based dm
1013 */
1014static void dm_softirq_done(struct request *rq)
1015{
1016 bool mapped = true;
1017 struct request *clone = rq->completion_data;
1018 struct dm_rq_target_io *tio = clone->end_io_data;
1019
1020 if (rq->cmd_flags & REQ_FAILED)
1021 mapped = false;
1022
1023 dm_done(clone, tio->error, mapped);
1024}
1025
cec47e3d
KU
1026/*
1027 * Complete the clone and the original request with the error status
1028 * through softirq context.
1029 */
1030static void dm_complete_request(struct request *clone, int error)
1031{
1032 struct dm_rq_target_io *tio = clone->end_io_data;
1033 struct request *rq = tio->orig;
1034
1035 tio->error = error;
1036 rq->completion_data = clone;
1037 blk_complete_request(rq);
1038}
1039
1040/*
1041 * Complete the not-mapped clone and the original request with the error status
1042 * through softirq context.
1043 * Target's rq_end_io() function isn't called.
1044 * This may be used when the target's map_rq() function fails.
1045 */
1046void dm_kill_unmapped_request(struct request *clone, int error)
1047{
1048 struct dm_rq_target_io *tio = clone->end_io_data;
1049 struct request *rq = tio->orig;
1050
1051 rq->cmd_flags |= REQ_FAILED;
1052 dm_complete_request(clone, error);
1053}
1054EXPORT_SYMBOL_GPL(dm_kill_unmapped_request);
1055
1056/*
1057 * Called with the queue lock held
1058 */
1059static void end_clone_request(struct request *clone, int error)
1060{
1061 /*
1062 * For just cleaning up the information of the queue in which
1063 * the clone was dispatched.
1064 * The clone is *NOT* freed actually here because it is alloced from
1065 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1066 */
1067 __blk_put_request(clone->q, clone);
1068
1069 /*
1070 * Actual request completion is done in a softirq context which doesn't
1071 * hold the queue lock. Otherwise, deadlock could occur because:
1072 * - another request may be submitted by the upper level driver
1073 * of the stacking during the completion
1074 * - the submission which requires queue lock may be done
1075 * against this queue
1076 */
1077 dm_complete_request(clone, error);
1078}
1079
56a67df7
MS
1080/*
1081 * Return maximum size of I/O possible at the supplied sector up to the current
1082 * target boundary.
1083 */
1084static sector_t max_io_len_target_boundary(sector_t sector, struct dm_target *ti)
1085{
1086 sector_t target_offset = dm_target_offset(ti, sector);
1087
1088 return ti->len - target_offset;
1089}
1090
1091static sector_t max_io_len(sector_t sector, struct dm_target *ti)
1da177e4 1092{
56a67df7 1093 sector_t len = max_io_len_target_boundary(sector, ti);
542f9038 1094 sector_t offset, max_len;
1da177e4
LT
1095
1096 /*
542f9038 1097 * Does the target need to split even further?
1da177e4 1098 */
542f9038
MS
1099 if (ti->max_io_len) {
1100 offset = dm_target_offset(ti, sector);
1101 if (unlikely(ti->max_io_len & (ti->max_io_len - 1)))
1102 max_len = sector_div(offset, ti->max_io_len);
1103 else
1104 max_len = offset & (ti->max_io_len - 1);
1105 max_len = ti->max_io_len - max_len;
1106
1107 if (len > max_len)
1108 len = max_len;
1da177e4
LT
1109 }
1110
1111 return len;
1112}
1113
542f9038
MS
1114int dm_set_target_max_io_len(struct dm_target *ti, sector_t len)
1115{
1116 if (len > UINT_MAX) {
1117 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1118 (unsigned long long)len, UINT_MAX);
1119 ti->error = "Maximum size of target IO is too large";
1120 return -EINVAL;
1121 }
1122
1123 ti->max_io_len = (uint32_t) len;
1124
1125 return 0;
1126}
1127EXPORT_SYMBOL_GPL(dm_set_target_max_io_len);
1128
1dd40c3e
MP
1129/*
1130 * A target may call dm_accept_partial_bio only from the map routine. It is
1131 * allowed for all bio types except REQ_FLUSH.
1132 *
1133 * dm_accept_partial_bio informs the dm that the target only wants to process
1134 * additional n_sectors sectors of the bio and the rest of the data should be
1135 * sent in a next bio.
1136 *
1137 * A diagram that explains the arithmetics:
1138 * +--------------------+---------------+-------+
1139 * | 1 | 2 | 3 |
1140 * +--------------------+---------------+-------+
1141 *
1142 * <-------------- *tio->len_ptr --------------->
1143 * <------- bi_size ------->
1144 * <-- n_sectors -->
1145 *
1146 * Region 1 was already iterated over with bio_advance or similar function.
1147 * (it may be empty if the target doesn't use bio_advance)
1148 * Region 2 is the remaining bio size that the target wants to process.
1149 * (it may be empty if region 1 is non-empty, although there is no reason
1150 * to make it empty)
1151 * The target requires that region 3 is to be sent in the next bio.
1152 *
1153 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1154 * the partially processed part (the sum of regions 1+2) must be the same for all
1155 * copies of the bio.
1156 */
1157void dm_accept_partial_bio(struct bio *bio, unsigned n_sectors)
1158{
1159 struct dm_target_io *tio = container_of(bio, struct dm_target_io, clone);
1160 unsigned bi_size = bio->bi_iter.bi_size >> SECTOR_SHIFT;
1161 BUG_ON(bio->bi_rw & REQ_FLUSH);
1162 BUG_ON(bi_size > *tio->len_ptr);
1163 BUG_ON(n_sectors > bi_size);
1164 *tio->len_ptr -= bi_size - n_sectors;
1165 bio->bi_iter.bi_size = n_sectors << SECTOR_SHIFT;
1166}
1167EXPORT_SYMBOL_GPL(dm_accept_partial_bio);
1168
bd2a49b8 1169static void __map_bio(struct dm_target_io *tio)
1da177e4
LT
1170{
1171 int r;
2056a782 1172 sector_t sector;
9faf400f 1173 struct mapped_device *md;
dba14160 1174 struct bio *clone = &tio->clone;
bd2a49b8 1175 struct dm_target *ti = tio->ti;
1da177e4 1176
1da177e4 1177 clone->bi_end_io = clone_endio;
1da177e4
LT
1178
1179 /*
1180 * Map the clone. If r == 0 we don't need to do
1181 * anything, the target has assumed ownership of
1182 * this io.
1183 */
1184 atomic_inc(&tio->io->io_count);
4f024f37 1185 sector = clone->bi_iter.bi_sector;
7de3ee57 1186 r = ti->type->map(ti, clone);
45cbcd79 1187 if (r == DM_MAPIO_REMAPPED) {
1da177e4 1188 /* the bio has been remapped so dispatch it */
2056a782 1189
d07335e5
MS
1190 trace_block_bio_remap(bdev_get_queue(clone->bi_bdev), clone,
1191 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 1192
1da177e4 1193 generic_make_request(clone);
2e93ccc1
KU
1194 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
1195 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
1196 md = tio->io->md;
1197 dec_pending(tio->io, r);
9faf400f 1198 free_tio(md, tio);
45cbcd79
KU
1199 } else if (r) {
1200 DMWARN("unimplemented target map return value: %d", r);
1201 BUG();
1da177e4
LT
1202 }
1203}
1204
1205struct clone_info {
1206 struct mapped_device *md;
1207 struct dm_table *map;
1208 struct bio *bio;
1209 struct dm_io *io;
1210 sector_t sector;
e0d6609a 1211 unsigned sector_count;
1da177e4
LT
1212};
1213
e0d6609a 1214static void bio_setup_sector(struct bio *bio, sector_t sector, unsigned len)
bd2a49b8 1215{
4f024f37
KO
1216 bio->bi_iter.bi_sector = sector;
1217 bio->bi_iter.bi_size = to_bytes(len);
1da177e4
LT
1218}
1219
1220/*
1221 * Creates a bio that consists of range of complete bvecs.
1222 */
dba14160 1223static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1c3b13e6 1224 sector_t sector, unsigned len)
1da177e4 1225{
dba14160 1226 struct bio *clone = &tio->clone;
1da177e4 1227
1c3b13e6
KO
1228 __bio_clone_fast(clone, bio);
1229
1230 if (bio_integrity(bio))
1231 bio_integrity_clone(clone, bio, GFP_NOIO);
bd2a49b8 1232
1c3b13e6
KO
1233 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1234 clone->bi_iter.bi_size = to_bytes(len);
1235
1236 if (bio_integrity(bio))
1237 bio_integrity_trim(clone, 0, len);
1da177e4
LT
1238}
1239
9015df24 1240static struct dm_target_io *alloc_tio(struct clone_info *ci,
bd2a49b8 1241 struct dm_target *ti, int nr_iovecs,
55a62eef 1242 unsigned target_bio_nr)
f9ab94ce 1243{
dba14160
MP
1244 struct dm_target_io *tio;
1245 struct bio *clone;
1246
1247 clone = bio_alloc_bioset(GFP_NOIO, nr_iovecs, ci->md->bs);
1248 tio = container_of(clone, struct dm_target_io, clone);
f9ab94ce
MP
1249
1250 tio->io = ci->io;
1251 tio->ti = ti;
55a62eef 1252 tio->target_bio_nr = target_bio_nr;
9015df24
AK
1253
1254 return tio;
1255}
1256
14fe594d
AK
1257static void __clone_and_map_simple_bio(struct clone_info *ci,
1258 struct dm_target *ti,
1dd40c3e 1259 unsigned target_bio_nr, unsigned *len)
9015df24 1260{
55a62eef 1261 struct dm_target_io *tio = alloc_tio(ci, ti, ci->bio->bi_max_vecs, target_bio_nr);
dba14160 1262 struct bio *clone = &tio->clone;
9015df24 1263
1dd40c3e
MP
1264 tio->len_ptr = len;
1265
06a426ce
MS
1266 /*
1267 * Discard requests require the bio's inline iovecs be initialized.
1268 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1269 * and discard, so no need for concern about wasted bvec allocations.
1270 */
1c3b13e6 1271 __bio_clone_fast(clone, ci->bio);
bd2a49b8 1272 if (len)
1dd40c3e 1273 bio_setup_sector(clone, ci->sector, *len);
f9ab94ce 1274
bd2a49b8 1275 __map_bio(tio);
f9ab94ce
MP
1276}
1277
14fe594d 1278static void __send_duplicate_bios(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1279 unsigned num_bios, unsigned *len)
06a426ce 1280{
55a62eef 1281 unsigned target_bio_nr;
06a426ce 1282
55a62eef 1283 for (target_bio_nr = 0; target_bio_nr < num_bios; target_bio_nr++)
14fe594d 1284 __clone_and_map_simple_bio(ci, ti, target_bio_nr, len);
06a426ce
MS
1285}
1286
14fe594d 1287static int __send_empty_flush(struct clone_info *ci)
f9ab94ce 1288{
06a426ce 1289 unsigned target_nr = 0;
f9ab94ce
MP
1290 struct dm_target *ti;
1291
b372d360 1292 BUG_ON(bio_has_data(ci->bio));
f9ab94ce 1293 while ((ti = dm_table_get_target(ci->map, target_nr++)))
1dd40c3e 1294 __send_duplicate_bios(ci, ti, ti->num_flush_bios, NULL);
f9ab94ce 1295
f9ab94ce
MP
1296 return 0;
1297}
1298
e4c93811 1299static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1dd40c3e 1300 sector_t sector, unsigned *len)
5ae89a87 1301{
dba14160 1302 struct bio *bio = ci->bio;
5ae89a87 1303 struct dm_target_io *tio;
b0d8ed4d
AK
1304 unsigned target_bio_nr;
1305 unsigned num_target_bios = 1;
5ae89a87 1306
b0d8ed4d
AK
1307 /*
1308 * Does the target want to receive duplicate copies of the bio?
1309 */
1310 if (bio_data_dir(bio) == WRITE && ti->num_write_bios)
1311 num_target_bios = ti->num_write_bios(ti, bio);
e4c93811 1312
b0d8ed4d 1313 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1c3b13e6 1314 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1dd40c3e
MP
1315 tio->len_ptr = len;
1316 clone_bio(tio, bio, sector, *len);
b0d8ed4d
AK
1317 __map_bio(tio);
1318 }
5ae89a87
MS
1319}
1320
55a62eef 1321typedef unsigned (*get_num_bios_fn)(struct dm_target *ti);
23508a96 1322
55a62eef 1323static unsigned get_num_discard_bios(struct dm_target *ti)
23508a96 1324{
55a62eef 1325 return ti->num_discard_bios;
23508a96
MS
1326}
1327
55a62eef 1328static unsigned get_num_write_same_bios(struct dm_target *ti)
23508a96 1329{
55a62eef 1330 return ti->num_write_same_bios;
23508a96
MS
1331}
1332
1333typedef bool (*is_split_required_fn)(struct dm_target *ti);
1334
1335static bool is_split_required_for_discard(struct dm_target *ti)
1336{
55a62eef 1337 return ti->split_discard_bios;
23508a96
MS
1338}
1339
14fe594d
AK
1340static int __send_changing_extent_only(struct clone_info *ci,
1341 get_num_bios_fn get_num_bios,
1342 is_split_required_fn is_split_required)
5ae89a87
MS
1343{
1344 struct dm_target *ti;
e0d6609a 1345 unsigned len;
55a62eef 1346 unsigned num_bios;
5ae89a87 1347
a79245b3
MS
1348 do {
1349 ti = dm_table_find_target(ci->map, ci->sector);
1350 if (!dm_target_is_valid(ti))
1351 return -EIO;
5ae89a87 1352
5ae89a87 1353 /*
23508a96
MS
1354 * Even though the device advertised support for this type of
1355 * request, that does not mean every target supports it, and
936688d7 1356 * reconfiguration might also have changed that since the
a79245b3 1357 * check was performed.
5ae89a87 1358 */
55a62eef
AK
1359 num_bios = get_num_bios ? get_num_bios(ti) : 0;
1360 if (!num_bios)
a79245b3 1361 return -EOPNOTSUPP;
5ae89a87 1362
23508a96 1363 if (is_split_required && !is_split_required(ti))
e0d6609a 1364 len = min((sector_t)ci->sector_count, max_io_len_target_boundary(ci->sector, ti));
7acf0277 1365 else
e0d6609a 1366 len = min((sector_t)ci->sector_count, max_io_len(ci->sector, ti));
06a426ce 1367
1dd40c3e 1368 __send_duplicate_bios(ci, ti, num_bios, &len);
a79245b3
MS
1369
1370 ci->sector += len;
1371 } while (ci->sector_count -= len);
5ae89a87
MS
1372
1373 return 0;
1374}
1375
14fe594d 1376static int __send_discard(struct clone_info *ci)
23508a96 1377{
14fe594d
AK
1378 return __send_changing_extent_only(ci, get_num_discard_bios,
1379 is_split_required_for_discard);
23508a96
MS
1380}
1381
14fe594d 1382static int __send_write_same(struct clone_info *ci)
23508a96 1383{
14fe594d 1384 return __send_changing_extent_only(ci, get_num_write_same_bios, NULL);
23508a96
MS
1385}
1386
e4c93811
AK
1387/*
1388 * Select the correct strategy for processing a non-flush bio.
1389 */
14fe594d 1390static int __split_and_process_non_flush(struct clone_info *ci)
1da177e4 1391{
dba14160 1392 struct bio *bio = ci->bio;
512875bd 1393 struct dm_target *ti;
1c3b13e6 1394 unsigned len;
1da177e4 1395
5ae89a87 1396 if (unlikely(bio->bi_rw & REQ_DISCARD))
14fe594d 1397 return __send_discard(ci);
23508a96 1398 else if (unlikely(bio->bi_rw & REQ_WRITE_SAME))
14fe594d 1399 return __send_write_same(ci);
5ae89a87 1400
512875bd
JN
1401 ti = dm_table_find_target(ci->map, ci->sector);
1402 if (!dm_target_is_valid(ti))
1403 return -EIO;
1404
1c3b13e6 1405 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1da177e4 1406
1dd40c3e 1407 __clone_and_map_data_bio(ci, ti, ci->sector, &len);
1da177e4 1408
1c3b13e6
KO
1409 ci->sector += len;
1410 ci->sector_count -= len;
1da177e4 1411
1c3b13e6 1412 return 0;
1da177e4
LT
1413}
1414
1415/*
14fe594d 1416 * Entry point to split a bio into clones and submit them to the targets.
1da177e4 1417 */
83d5e5b0
MP
1418static void __split_and_process_bio(struct mapped_device *md,
1419 struct dm_table *map, struct bio *bio)
1da177e4
LT
1420{
1421 struct clone_info ci;
512875bd 1422 int error = 0;
1da177e4 1423
83d5e5b0 1424 if (unlikely(!map)) {
6a8736d1 1425 bio_io_error(bio);
f0b9a450
MP
1426 return;
1427 }
692d0eb9 1428
83d5e5b0 1429 ci.map = map;
1da177e4 1430 ci.md = md;
1da177e4
LT
1431 ci.io = alloc_io(md);
1432 ci.io->error = 0;
1433 atomic_set(&ci.io->io_count, 1);
1434 ci.io->bio = bio;
1435 ci.io->md = md;
f88fb981 1436 spin_lock_init(&ci.io->endio_lock);
4f024f37 1437 ci.sector = bio->bi_iter.bi_sector;
1da177e4 1438
3eaf840e 1439 start_io_acct(ci.io);
bd2a49b8 1440
b372d360
MS
1441 if (bio->bi_rw & REQ_FLUSH) {
1442 ci.bio = &ci.md->flush_bio;
1443 ci.sector_count = 0;
14fe594d 1444 error = __send_empty_flush(&ci);
b372d360
MS
1445 /* dec_pending submits any data associated with flush */
1446 } else {
6a8736d1 1447 ci.bio = bio;
d87f4c14 1448 ci.sector_count = bio_sectors(bio);
b372d360 1449 while (ci.sector_count && !error)
14fe594d 1450 error = __split_and_process_non_flush(&ci);
d87f4c14 1451 }
1da177e4
LT
1452
1453 /* drop the extra reference count */
512875bd 1454 dec_pending(ci.io, error);
1da177e4
LT
1455}
1456/*-----------------------------------------------------------------
1457 * CRUD END
1458 *---------------------------------------------------------------*/
1459
f6fccb12
MB
1460static int dm_merge_bvec(struct request_queue *q,
1461 struct bvec_merge_data *bvm,
1462 struct bio_vec *biovec)
1463{
1464 struct mapped_device *md = q->queuedata;
83d5e5b0 1465 struct dm_table *map = dm_get_live_table_fast(md);
f6fccb12
MB
1466 struct dm_target *ti;
1467 sector_t max_sectors;
5037108a 1468 int max_size = 0;
f6fccb12
MB
1469
1470 if (unlikely(!map))
5037108a 1471 goto out;
f6fccb12
MB
1472
1473 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac 1474 if (!dm_target_is_valid(ti))
83d5e5b0 1475 goto out;
f6fccb12
MB
1476
1477 /*
1478 * Find maximum amount of I/O that won't need splitting
1479 */
56a67df7 1480 max_sectors = min(max_io_len(bvm->bi_sector, ti),
f6fccb12
MB
1481 (sector_t) BIO_MAX_SECTORS);
1482 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
1483 if (max_size < 0)
1484 max_size = 0;
1485
1486 /*
1487 * merge_bvec_fn() returns number of bytes
1488 * it can accept at this offset
1489 * max is precomputed maximal io size
1490 */
1491 if (max_size && ti->type->merge)
1492 max_size = ti->type->merge(ti, bvm, biovec, max_size);
8cbeb67a
MP
1493 /*
1494 * If the target doesn't support merge method and some of the devices
1495 * provided their merge_bvec method (we know this by looking at
1496 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1497 * entries. So always set max_size to 0, and the code below allows
1498 * just one page.
1499 */
1500 else if (queue_max_hw_sectors(q) <= PAGE_SIZE >> 9)
8cbeb67a 1501 max_size = 0;
f6fccb12 1502
5037108a 1503out:
83d5e5b0 1504 dm_put_live_table_fast(md);
f6fccb12
MB
1505 /*
1506 * Always allow an entire first page
1507 */
1508 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
1509 max_size = biovec->bv_len;
1510
f6fccb12
MB
1511 return max_size;
1512}
1513
1da177e4
LT
1514/*
1515 * The request function that just remaps the bio built up by
1516 * dm_merge_bvec.
1517 */
5a7bbad2 1518static void _dm_request(struct request_queue *q, struct bio *bio)
1da177e4 1519{
12f03a49 1520 int rw = bio_data_dir(bio);
1da177e4 1521 struct mapped_device *md = q->queuedata;
c9959059 1522 int cpu;
83d5e5b0
MP
1523 int srcu_idx;
1524 struct dm_table *map;
1da177e4 1525
83d5e5b0 1526 map = dm_get_live_table(md, &srcu_idx);
1da177e4 1527
074a7aca
TH
1528 cpu = part_stat_lock();
1529 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
1530 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
1531 part_stat_unlock();
12f03a49 1532
6a8736d1
TH
1533 /* if we're suspended, we have to queue this io for later */
1534 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))) {
83d5e5b0 1535 dm_put_live_table(md, srcu_idx);
1da177e4 1536
6a8736d1
TH
1537 if (bio_rw(bio) != READA)
1538 queue_io(md, bio);
1539 else
54d9a1b4 1540 bio_io_error(bio);
5a7bbad2 1541 return;
1da177e4
LT
1542 }
1543
83d5e5b0
MP
1544 __split_and_process_bio(md, map, bio);
1545 dm_put_live_table(md, srcu_idx);
5a7bbad2 1546 return;
cec47e3d
KU
1547}
1548
fd2ed4d2 1549int dm_request_based(struct mapped_device *md)
cec47e3d
KU
1550{
1551 return blk_queue_stackable(md->queue);
1552}
1553
5a7bbad2 1554static void dm_request(struct request_queue *q, struct bio *bio)
cec47e3d
KU
1555{
1556 struct mapped_device *md = q->queuedata;
1557
1558 if (dm_request_based(md))
5a7bbad2
CH
1559 blk_queue_bio(q, bio);
1560 else
1561 _dm_request(q, bio);
cec47e3d
KU
1562}
1563
1564void dm_dispatch_request(struct request *rq)
1565{
1566 int r;
1567
1568 if (blk_queue_io_stat(rq->q))
1569 rq->cmd_flags |= REQ_IO_STAT;
1570
1571 rq->start_time = jiffies;
1572 r = blk_insert_cloned_request(rq->q, rq);
1573 if (r)
1574 dm_complete_request(rq, r);
1575}
1576EXPORT_SYMBOL_GPL(dm_dispatch_request);
1577
cec47e3d
KU
1578static int dm_rq_bio_constructor(struct bio *bio, struct bio *bio_orig,
1579 void *data)
1580{
1581 struct dm_rq_target_io *tio = data;
94818742
KO
1582 struct dm_rq_clone_bio_info *info =
1583 container_of(bio, struct dm_rq_clone_bio_info, clone);
cec47e3d
KU
1584
1585 info->orig = bio_orig;
1586 info->tio = tio;
1587 bio->bi_end_io = end_clone_bio;
cec47e3d
KU
1588
1589 return 0;
1590}
1591
1592static int setup_clone(struct request *clone, struct request *rq,
1593 struct dm_rq_target_io *tio)
1594{
d0bcb878 1595 int r;
cec47e3d 1596
29e4013d
TH
1597 r = blk_rq_prep_clone(clone, rq, tio->md->bs, GFP_ATOMIC,
1598 dm_rq_bio_constructor, tio);
1599 if (r)
1600 return r;
cec47e3d 1601
29e4013d
TH
1602 clone->cmd = rq->cmd;
1603 clone->cmd_len = rq->cmd_len;
1604 clone->sense = rq->sense;
cec47e3d
KU
1605 clone->end_io = end_clone_request;
1606 clone->end_io_data = tio;
1607
1608 return 0;
1609}
1610
6facdaff
KU
1611static struct request *clone_rq(struct request *rq, struct mapped_device *md,
1612 gfp_t gfp_mask)
1613{
1614 struct request *clone;
1615 struct dm_rq_target_io *tio;
1616
1617 tio = alloc_rq_tio(md, gfp_mask);
1618 if (!tio)
1619 return NULL;
1620
1621 tio->md = md;
1622 tio->ti = NULL;
1623 tio->orig = rq;
1624 tio->error = 0;
1625 memset(&tio->info, 0, sizeof(tio->info));
1626
1627 clone = &tio->clone;
1628 if (setup_clone(clone, rq, tio)) {
1629 /* -ENOMEM */
1630 free_rq_tio(tio);
1631 return NULL;
1632 }
1633
1634 return clone;
1635}
1636
cec47e3d
KU
1637/*
1638 * Called with the queue lock held.
1639 */
1640static int dm_prep_fn(struct request_queue *q, struct request *rq)
1641{
1642 struct mapped_device *md = q->queuedata;
cec47e3d
KU
1643 struct request *clone;
1644
cec47e3d
KU
1645 if (unlikely(rq->special)) {
1646 DMWARN("Already has something in rq->special.");
1647 return BLKPREP_KILL;
1648 }
1649
6facdaff
KU
1650 clone = clone_rq(rq, md, GFP_ATOMIC);
1651 if (!clone)
cec47e3d 1652 return BLKPREP_DEFER;
cec47e3d
KU
1653
1654 rq->special = clone;
1655 rq->cmd_flags |= REQ_DONTPREP;
1656
1657 return BLKPREP_OK;
1658}
1659
9eef87da
KU
1660/*
1661 * Returns:
1662 * 0 : the request has been processed (not requeued)
1663 * !0 : the request has been requeued
1664 */
1665static int map_request(struct dm_target *ti, struct request *clone,
1666 struct mapped_device *md)
cec47e3d 1667{
9eef87da 1668 int r, requeued = 0;
cec47e3d
KU
1669 struct dm_rq_target_io *tio = clone->end_io_data;
1670
cec47e3d
KU
1671 tio->ti = ti;
1672 r = ti->type->map_rq(ti, clone, &tio->info);
1673 switch (r) {
1674 case DM_MAPIO_SUBMITTED:
1675 /* The target has taken the I/O to submit by itself later */
1676 break;
1677 case DM_MAPIO_REMAPPED:
1678 /* The target has remapped the I/O so dispatch it */
6db4ccd6
JN
1679 trace_block_rq_remap(clone->q, clone, disk_devt(dm_disk(md)),
1680 blk_rq_pos(tio->orig));
cec47e3d
KU
1681 dm_dispatch_request(clone);
1682 break;
1683 case DM_MAPIO_REQUEUE:
1684 /* The target wants to requeue the I/O */
1685 dm_requeue_unmapped_request(clone);
9eef87da 1686 requeued = 1;
cec47e3d
KU
1687 break;
1688 default:
1689 if (r > 0) {
1690 DMWARN("unimplemented target map return value: %d", r);
1691 BUG();
1692 }
1693
1694 /* The target wants to complete the I/O */
1695 dm_kill_unmapped_request(clone, r);
1696 break;
1697 }
9eef87da
KU
1698
1699 return requeued;
cec47e3d
KU
1700}
1701
ba1cbad9
MS
1702static struct request *dm_start_request(struct mapped_device *md, struct request *orig)
1703{
1704 struct request *clone;
1705
1706 blk_start_request(orig);
1707 clone = orig->special;
1708 atomic_inc(&md->pending[rq_data_dir(clone)]);
1709
1710 /*
1711 * Hold the md reference here for the in-flight I/O.
1712 * We can't rely on the reference count by device opener,
1713 * because the device may be closed during the request completion
1714 * when all bios are completed.
1715 * See the comment in rq_completed() too.
1716 */
1717 dm_get(md);
1718
1719 return clone;
1720}
1721
cec47e3d
KU
1722/*
1723 * q->request_fn for request-based dm.
1724 * Called with the queue lock held.
1725 */
1726static void dm_request_fn(struct request_queue *q)
1727{
1728 struct mapped_device *md = q->queuedata;
83d5e5b0
MP
1729 int srcu_idx;
1730 struct dm_table *map = dm_get_live_table(md, &srcu_idx);
cec47e3d 1731 struct dm_target *ti;
b4324fee 1732 struct request *rq, *clone;
29e4013d 1733 sector_t pos;
cec47e3d
KU
1734
1735 /*
b4324fee
KU
1736 * For suspend, check blk_queue_stopped() and increment
1737 * ->pending within a single queue_lock not to increment the
1738 * number of in-flight I/Os after the queue is stopped in
1739 * dm_suspend().
cec47e3d 1740 */
7eaceacc 1741 while (!blk_queue_stopped(q)) {
cec47e3d
KU
1742 rq = blk_peek_request(q);
1743 if (!rq)
7eaceacc 1744 goto delay_and_out;
cec47e3d 1745
29e4013d
TH
1746 /* always use block 0 to find the target for flushes for now */
1747 pos = 0;
1748 if (!(rq->cmd_flags & REQ_FLUSH))
1749 pos = blk_rq_pos(rq);
1750
1751 ti = dm_table_find_target(map, pos);
ba1cbad9
MS
1752 if (!dm_target_is_valid(ti)) {
1753 /*
1754 * Must perform setup, that dm_done() requires,
1755 * before calling dm_kill_unmapped_request
1756 */
1757 DMERR_LIMIT("request attempted access beyond the end of device");
1758 clone = dm_start_request(md, rq);
1759 dm_kill_unmapped_request(clone, -EIO);
1760 continue;
1761 }
d0bcb878 1762
cec47e3d 1763 if (ti->type->busy && ti->type->busy(ti))
7eaceacc 1764 goto delay_and_out;
cec47e3d 1765
ba1cbad9 1766 clone = dm_start_request(md, rq);
b4324fee 1767
cec47e3d 1768 spin_unlock(q->queue_lock);
9eef87da
KU
1769 if (map_request(ti, clone, md))
1770 goto requeued;
1771
052189a2
KU
1772 BUG_ON(!irqs_disabled());
1773 spin_lock(q->queue_lock);
cec47e3d
KU
1774 }
1775
1776 goto out;
1777
9eef87da 1778requeued:
052189a2
KU
1779 BUG_ON(!irqs_disabled());
1780 spin_lock(q->queue_lock);
9eef87da 1781
7eaceacc
JA
1782delay_and_out:
1783 blk_delay_queue(q, HZ / 10);
cec47e3d 1784out:
83d5e5b0 1785 dm_put_live_table(md, srcu_idx);
cec47e3d
KU
1786}
1787
1788int dm_underlying_device_busy(struct request_queue *q)
1789{
1790 return blk_lld_busy(q);
1791}
1792EXPORT_SYMBOL_GPL(dm_underlying_device_busy);
1793
1794static int dm_lld_busy(struct request_queue *q)
1795{
1796 int r;
1797 struct mapped_device *md = q->queuedata;
83d5e5b0 1798 struct dm_table *map = dm_get_live_table_fast(md);
cec47e3d
KU
1799
1800 if (!map || test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags))
1801 r = 1;
1802 else
1803 r = dm_table_any_busy_target(map);
1804
83d5e5b0 1805 dm_put_live_table_fast(md);
cec47e3d
KU
1806
1807 return r;
1808}
1809
1da177e4
LT
1810static int dm_any_congested(void *congested_data, int bdi_bits)
1811{
8a57dfc6
CS
1812 int r = bdi_bits;
1813 struct mapped_device *md = congested_data;
1814 struct dm_table *map;
1da177e4 1815
1eb787ec 1816 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
83d5e5b0 1817 map = dm_get_live_table_fast(md);
8a57dfc6 1818 if (map) {
cec47e3d
KU
1819 /*
1820 * Request-based dm cares about only own queue for
1821 * the query about congestion status of request_queue
1822 */
1823 if (dm_request_based(md))
1824 r = md->queue->backing_dev_info.state &
1825 bdi_bits;
1826 else
1827 r = dm_table_any_congested(map, bdi_bits);
8a57dfc6 1828 }
83d5e5b0 1829 dm_put_live_table_fast(md);
8a57dfc6
CS
1830 }
1831
1da177e4
LT
1832 return r;
1833}
1834
1835/*-----------------------------------------------------------------
1836 * An IDR is used to keep track of allocated minor numbers.
1837 *---------------------------------------------------------------*/
2b06cfff 1838static void free_minor(int minor)
1da177e4 1839{
f32c10b0 1840 spin_lock(&_minor_lock);
1da177e4 1841 idr_remove(&_minor_idr, minor);
f32c10b0 1842 spin_unlock(&_minor_lock);
1da177e4
LT
1843}
1844
1845/*
1846 * See if the device with a specific minor # is free.
1847 */
cf13ab8e 1848static int specific_minor(int minor)
1da177e4 1849{
c9d76be6 1850 int r;
1da177e4
LT
1851
1852 if (minor >= (1 << MINORBITS))
1853 return -EINVAL;
1854
c9d76be6 1855 idr_preload(GFP_KERNEL);
f32c10b0 1856 spin_lock(&_minor_lock);
1da177e4 1857
c9d76be6 1858 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, minor, minor + 1, GFP_NOWAIT);
1da177e4 1859
f32c10b0 1860 spin_unlock(&_minor_lock);
c9d76be6
TH
1861 idr_preload_end();
1862 if (r < 0)
1863 return r == -ENOSPC ? -EBUSY : r;
1864 return 0;
1da177e4
LT
1865}
1866
cf13ab8e 1867static int next_free_minor(int *minor)
1da177e4 1868{
c9d76be6 1869 int r;
62f75c2f 1870
c9d76be6 1871 idr_preload(GFP_KERNEL);
f32c10b0 1872 spin_lock(&_minor_lock);
1da177e4 1873
c9d76be6 1874 r = idr_alloc(&_minor_idr, MINOR_ALLOCED, 0, 1 << MINORBITS, GFP_NOWAIT);
1da177e4 1875
f32c10b0 1876 spin_unlock(&_minor_lock);
c9d76be6
TH
1877 idr_preload_end();
1878 if (r < 0)
1879 return r;
1880 *minor = r;
1881 return 0;
1da177e4
LT
1882}
1883
83d5cde4 1884static const struct block_device_operations dm_blk_dops;
1da177e4 1885
53d5914f
MP
1886static void dm_wq_work(struct work_struct *work);
1887
4a0b4ddf
MS
1888static void dm_init_md_queue(struct mapped_device *md)
1889{
1890 /*
1891 * Request-based dm devices cannot be stacked on top of bio-based dm
1892 * devices. The type of this dm device has not been decided yet.
1893 * The type is decided at the first table loading time.
1894 * To prevent problematic device stacking, clear the queue flag
1895 * for request stacking support until then.
1896 *
1897 * This queue is new, so no concurrency on the queue_flags.
1898 */
1899 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE, md->queue);
1900
1901 md->queue->queuedata = md;
1902 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1903 md->queue->backing_dev_info.congested_data = md;
1904 blk_queue_make_request(md->queue, dm_request);
1905 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
4a0b4ddf
MS
1906 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1907}
1908
1da177e4
LT
1909/*
1910 * Allocate and initialise a blank device with a given minor.
1911 */
2b06cfff 1912static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1913{
1914 int r;
cf13ab8e 1915 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1916 void *old_md;
1da177e4
LT
1917
1918 if (!md) {
1919 DMWARN("unable to allocate device, out of memory.");
1920 return NULL;
1921 }
1922
10da4f79 1923 if (!try_module_get(THIS_MODULE))
6ed7ade8 1924 goto bad_module_get;
10da4f79 1925
1da177e4 1926 /* get a minor number for the dev */
2b06cfff 1927 if (minor == DM_ANY_MINOR)
cf13ab8e 1928 r = next_free_minor(&minor);
2b06cfff 1929 else
cf13ab8e 1930 r = specific_minor(minor);
1da177e4 1931 if (r < 0)
6ed7ade8 1932 goto bad_minor;
1da177e4 1933
83d5e5b0
MP
1934 r = init_srcu_struct(&md->io_barrier);
1935 if (r < 0)
1936 goto bad_io_barrier;
1937
a5664dad 1938 md->type = DM_TYPE_NONE;
e61290a4 1939 mutex_init(&md->suspend_lock);
a5664dad 1940 mutex_init(&md->type_lock);
022c2611 1941 spin_lock_init(&md->deferred_lock);
1da177e4 1942 atomic_set(&md->holders, 1);
5c6bd75d 1943 atomic_set(&md->open_count, 0);
1da177e4 1944 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1945 atomic_set(&md->uevent_seq, 0);
1946 INIT_LIST_HEAD(&md->uevent_list);
1947 spin_lock_init(&md->uevent_lock);
1da177e4 1948
4a0b4ddf 1949 md->queue = blk_alloc_queue(GFP_KERNEL);
1da177e4 1950 if (!md->queue)
6ed7ade8 1951 goto bad_queue;
1da177e4 1952
4a0b4ddf 1953 dm_init_md_queue(md);
9faf400f 1954
1da177e4
LT
1955 md->disk = alloc_disk(1);
1956 if (!md->disk)
6ed7ade8 1957 goto bad_disk;
1da177e4 1958
316d315b
NK
1959 atomic_set(&md->pending[0], 0);
1960 atomic_set(&md->pending[1], 0);
f0b04115 1961 init_waitqueue_head(&md->wait);
53d5914f 1962 INIT_WORK(&md->work, dm_wq_work);
f0b04115 1963 init_waitqueue_head(&md->eventq);
2995fa78 1964 init_completion(&md->kobj_holder.completion);
f0b04115 1965
1da177e4
LT
1966 md->disk->major = _major;
1967 md->disk->first_minor = minor;
1968 md->disk->fops = &dm_blk_dops;
1969 md->disk->queue = md->queue;
1970 md->disk->private_data = md;
1971 sprintf(md->disk->disk_name, "dm-%d", minor);
1972 add_disk(md->disk);
7e51f257 1973 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1974
670368a8 1975 md->wq = alloc_workqueue("kdmflush", WQ_MEM_RECLAIM, 0);
304f3f6a
MB
1976 if (!md->wq)
1977 goto bad_thread;
1978
32a926da
MP
1979 md->bdev = bdget_disk(md->disk, 0);
1980 if (!md->bdev)
1981 goto bad_bdev;
1982
6a8736d1
TH
1983 bio_init(&md->flush_bio);
1984 md->flush_bio.bi_bdev = md->bdev;
1985 md->flush_bio.bi_rw = WRITE_FLUSH;
1986
fd2ed4d2
MP
1987 dm_stats_init(&md->stats);
1988
ba61fdd1 1989 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1990 spin_lock(&_minor_lock);
ba61fdd1 1991 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1992 spin_unlock(&_minor_lock);
ba61fdd1
JM
1993
1994 BUG_ON(old_md != MINOR_ALLOCED);
1995
1da177e4
LT
1996 return md;
1997
32a926da
MP
1998bad_bdev:
1999 destroy_workqueue(md->wq);
304f3f6a 2000bad_thread:
03022c54 2001 del_gendisk(md->disk);
304f3f6a 2002 put_disk(md->disk);
6ed7ade8 2003bad_disk:
1312f40e 2004 blk_cleanup_queue(md->queue);
6ed7ade8 2005bad_queue:
83d5e5b0
MP
2006 cleanup_srcu_struct(&md->io_barrier);
2007bad_io_barrier:
1da177e4 2008 free_minor(minor);
6ed7ade8 2009bad_minor:
10da4f79 2010 module_put(THIS_MODULE);
6ed7ade8 2011bad_module_get:
1da177e4
LT
2012 kfree(md);
2013 return NULL;
2014}
2015
ae9da83f
JN
2016static void unlock_fs(struct mapped_device *md);
2017
1da177e4
LT
2018static void free_dev(struct mapped_device *md)
2019{
f331c029 2020 int minor = MINOR(disk_devt(md->disk));
63d94e48 2021
32a926da
MP
2022 unlock_fs(md);
2023 bdput(md->bdev);
304f3f6a 2024 destroy_workqueue(md->wq);
e6ee8c0b
KU
2025 if (md->io_pool)
2026 mempool_destroy(md->io_pool);
2027 if (md->bs)
2028 bioset_free(md->bs);
9c47008d 2029 blk_integrity_unregister(md->disk);
1da177e4 2030 del_gendisk(md->disk);
83d5e5b0 2031 cleanup_srcu_struct(&md->io_barrier);
63d94e48 2032 free_minor(minor);
fba9f90e
JM
2033
2034 spin_lock(&_minor_lock);
2035 md->disk->private_data = NULL;
2036 spin_unlock(&_minor_lock);
2037
1da177e4 2038 put_disk(md->disk);
1312f40e 2039 blk_cleanup_queue(md->queue);
fd2ed4d2 2040 dm_stats_cleanup(&md->stats);
10da4f79 2041 module_put(THIS_MODULE);
1da177e4
LT
2042 kfree(md);
2043}
2044
e6ee8c0b
KU
2045static void __bind_mempools(struct mapped_device *md, struct dm_table *t)
2046{
c0820cf5 2047 struct dm_md_mempools *p = dm_table_get_md_mempools(t);
e6ee8c0b 2048
5f015204 2049 if (md->io_pool && md->bs) {
16245bdc
JN
2050 /* The md already has necessary mempools. */
2051 if (dm_table_get_type(t) == DM_TYPE_BIO_BASED) {
2052 /*
2053 * Reload bioset because front_pad may have changed
2054 * because a different table was loaded.
2055 */
2056 bioset_free(md->bs);
2057 md->bs = p->bs;
2058 p->bs = NULL;
2059 } else if (dm_table_get_type(t) == DM_TYPE_REQUEST_BASED) {
16245bdc
JN
2060 /*
2061 * There's no need to reload with request-based dm
2062 * because the size of front_pad doesn't change.
2063 * Note for future: If you are to reload bioset,
2064 * prep-ed requests in the queue may refer
2065 * to bio from the old bioset, so you must walk
2066 * through the queue to unprep.
2067 */
2068 }
e6ee8c0b 2069 goto out;
c0820cf5 2070 }
e6ee8c0b 2071
5f015204 2072 BUG_ON(!p || md->io_pool || md->bs);
e6ee8c0b
KU
2073
2074 md->io_pool = p->io_pool;
2075 p->io_pool = NULL;
e6ee8c0b
KU
2076 md->bs = p->bs;
2077 p->bs = NULL;
2078
2079out:
2080 /* mempool bind completed, now no need any mempools in the table */
2081 dm_table_free_md_mempools(t);
2082}
2083
1da177e4
LT
2084/*
2085 * Bind a table to the device.
2086 */
2087static void event_callback(void *context)
2088{
7a8c3d3b
MA
2089 unsigned long flags;
2090 LIST_HEAD(uevents);
1da177e4
LT
2091 struct mapped_device *md = (struct mapped_device *) context;
2092
7a8c3d3b
MA
2093 spin_lock_irqsave(&md->uevent_lock, flags);
2094 list_splice_init(&md->uevent_list, &uevents);
2095 spin_unlock_irqrestore(&md->uevent_lock, flags);
2096
ed9e1982 2097 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 2098
1da177e4
LT
2099 atomic_inc(&md->event_nr);
2100 wake_up(&md->eventq);
2101}
2102
c217649b
MS
2103/*
2104 * Protected by md->suspend_lock obtained by dm_swap_table().
2105 */
4e90188b 2106static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 2107{
4e90188b 2108 set_capacity(md->disk, size);
1da177e4 2109
db8fef4f 2110 i_size_write(md->bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1da177e4
LT
2111}
2112
d5b9dd04
MP
2113/*
2114 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2115 *
2116 * If this function returns 0, then the device is either a non-dm
2117 * device without a merge_bvec_fn, or it is a dm device that is
2118 * able to split any bios it receives that are too big.
2119 */
2120int dm_queue_merge_is_compulsory(struct request_queue *q)
2121{
2122 struct mapped_device *dev_md;
2123
2124 if (!q->merge_bvec_fn)
2125 return 0;
2126
2127 if (q->make_request_fn == dm_request) {
2128 dev_md = q->queuedata;
2129 if (test_bit(DMF_MERGE_IS_OPTIONAL, &dev_md->flags))
2130 return 0;
2131 }
2132
2133 return 1;
2134}
2135
2136static int dm_device_merge_is_compulsory(struct dm_target *ti,
2137 struct dm_dev *dev, sector_t start,
2138 sector_t len, void *data)
2139{
2140 struct block_device *bdev = dev->bdev;
2141 struct request_queue *q = bdev_get_queue(bdev);
2142
2143 return dm_queue_merge_is_compulsory(q);
2144}
2145
2146/*
2147 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2148 * on the properties of the underlying devices.
2149 */
2150static int dm_table_merge_is_optional(struct dm_table *table)
2151{
2152 unsigned i = 0;
2153 struct dm_target *ti;
2154
2155 while (i < dm_table_get_num_targets(table)) {
2156 ti = dm_table_get_target(table, i++);
2157
2158 if (ti->type->iterate_devices &&
2159 ti->type->iterate_devices(ti, dm_device_merge_is_compulsory, NULL))
2160 return 0;
2161 }
2162
2163 return 1;
2164}
2165
042d2a9b
AK
2166/*
2167 * Returns old map, which caller must destroy.
2168 */
2169static struct dm_table *__bind(struct mapped_device *md, struct dm_table *t,
2170 struct queue_limits *limits)
1da177e4 2171{
042d2a9b 2172 struct dm_table *old_map;
165125e1 2173 struct request_queue *q = md->queue;
1da177e4 2174 sector_t size;
d5b9dd04 2175 int merge_is_optional;
1da177e4
LT
2176
2177 size = dm_table_get_size(t);
3ac51e74
DW
2178
2179 /*
2180 * Wipe any geometry if the size of the table changed.
2181 */
fd2ed4d2 2182 if (size != dm_get_size(md))
3ac51e74
DW
2183 memset(&md->geometry, 0, sizeof(md->geometry));
2184
32a926da 2185 __set_size(md, size);
d5816876 2186
2ca3310e
AK
2187 dm_table_event_callback(t, event_callback, md);
2188
e6ee8c0b
KU
2189 /*
2190 * The queue hasn't been stopped yet, if the old table type wasn't
2191 * for request-based during suspension. So stop it to prevent
2192 * I/O mapping before resume.
2193 * This must be done before setting the queue restrictions,
2194 * because request-based dm may be run just after the setting.
2195 */
2196 if (dm_table_request_based(t) && !blk_queue_stopped(q))
2197 stop_queue(q);
2198
2199 __bind_mempools(md, t);
2200
d5b9dd04
MP
2201 merge_is_optional = dm_table_merge_is_optional(t);
2202
042d2a9b 2203 old_map = md->map;
83d5e5b0 2204 rcu_assign_pointer(md->map, t);
36a0456f
AK
2205 md->immutable_target_type = dm_table_get_immutable_target_type(t);
2206
754c5fc7 2207 dm_table_set_restrictions(t, q, limits);
d5b9dd04
MP
2208 if (merge_is_optional)
2209 set_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
2210 else
2211 clear_bit(DMF_MERGE_IS_OPTIONAL, &md->flags);
83d5e5b0 2212 dm_sync_table(md);
1da177e4 2213
042d2a9b 2214 return old_map;
1da177e4
LT
2215}
2216
a7940155
AK
2217/*
2218 * Returns unbound table for the caller to free.
2219 */
2220static struct dm_table *__unbind(struct mapped_device *md)
1da177e4
LT
2221{
2222 struct dm_table *map = md->map;
2223
2224 if (!map)
a7940155 2225 return NULL;
1da177e4
LT
2226
2227 dm_table_event_callback(map, NULL, NULL);
9cdb8520 2228 RCU_INIT_POINTER(md->map, NULL);
83d5e5b0 2229 dm_sync_table(md);
a7940155
AK
2230
2231 return map;
1da177e4
LT
2232}
2233
2234/*
2235 * Constructor for a new device.
2236 */
2b06cfff 2237int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
2238{
2239 struct mapped_device *md;
2240
2b06cfff 2241 md = alloc_dev(minor);
1da177e4
LT
2242 if (!md)
2243 return -ENXIO;
2244
784aae73
MB
2245 dm_sysfs_init(md);
2246
1da177e4
LT
2247 *result = md;
2248 return 0;
2249}
2250
a5664dad
MS
2251/*
2252 * Functions to manage md->type.
2253 * All are required to hold md->type_lock.
2254 */
2255void dm_lock_md_type(struct mapped_device *md)
2256{
2257 mutex_lock(&md->type_lock);
2258}
2259
2260void dm_unlock_md_type(struct mapped_device *md)
2261{
2262 mutex_unlock(&md->type_lock);
2263}
2264
2265void dm_set_md_type(struct mapped_device *md, unsigned type)
2266{
00c4fc3b 2267 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2268 md->type = type;
2269}
2270
2271unsigned dm_get_md_type(struct mapped_device *md)
2272{
00c4fc3b 2273 BUG_ON(!mutex_is_locked(&md->type_lock));
a5664dad
MS
2274 return md->type;
2275}
2276
36a0456f
AK
2277struct target_type *dm_get_immutable_target_type(struct mapped_device *md)
2278{
2279 return md->immutable_target_type;
2280}
2281
f84cb8a4
MS
2282/*
2283 * The queue_limits are only valid as long as you have a reference
2284 * count on 'md'.
2285 */
2286struct queue_limits *dm_get_queue_limits(struct mapped_device *md)
2287{
2288 BUG_ON(!atomic_read(&md->holders));
2289 return &md->queue->limits;
2290}
2291EXPORT_SYMBOL_GPL(dm_get_queue_limits);
2292
4a0b4ddf
MS
2293/*
2294 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2295 */
2296static int dm_init_request_based_queue(struct mapped_device *md)
2297{
2298 struct request_queue *q = NULL;
2299
2300 if (md->queue->elevator)
2301 return 1;
2302
2303 /* Fully initialize the queue */
2304 q = blk_init_allocated_queue(md->queue, dm_request_fn, NULL);
2305 if (!q)
2306 return 0;
2307
2308 md->queue = q;
4a0b4ddf
MS
2309 dm_init_md_queue(md);
2310 blk_queue_softirq_done(md->queue, dm_softirq_done);
2311 blk_queue_prep_rq(md->queue, dm_prep_fn);
2312 blk_queue_lld_busy(md->queue, dm_lld_busy);
4a0b4ddf
MS
2313
2314 elv_register_queue(md->queue);
2315
2316 return 1;
2317}
2318
2319/*
2320 * Setup the DM device's queue based on md's type
2321 */
2322int dm_setup_md_queue(struct mapped_device *md)
2323{
2324 if ((dm_get_md_type(md) == DM_TYPE_REQUEST_BASED) &&
2325 !dm_init_request_based_queue(md)) {
2326 DMWARN("Cannot initialize queue for request-based mapped device");
2327 return -EINVAL;
2328 }
2329
2330 return 0;
2331}
2332
637842cf 2333static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
2334{
2335 struct mapped_device *md;
1da177e4
LT
2336 unsigned minor = MINOR(dev);
2337
2338 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
2339 return NULL;
2340
f32c10b0 2341 spin_lock(&_minor_lock);
1da177e4
LT
2342
2343 md = idr_find(&_minor_idr, minor);
fba9f90e 2344 if (md && (md == MINOR_ALLOCED ||
f331c029 2345 (MINOR(disk_devt(dm_disk(md))) != minor) ||
abdc568b 2346 dm_deleting_md(md) ||
17b2f66f 2347 test_bit(DMF_FREEING, &md->flags))) {
637842cf 2348 md = NULL;
fba9f90e
JM
2349 goto out;
2350 }
1da177e4 2351
fba9f90e 2352out:
f32c10b0 2353 spin_unlock(&_minor_lock);
1da177e4 2354
637842cf
DT
2355 return md;
2356}
2357
d229a958
DT
2358struct mapped_device *dm_get_md(dev_t dev)
2359{
2360 struct mapped_device *md = dm_find_md(dev);
2361
2362 if (md)
2363 dm_get(md);
2364
2365 return md;
2366}
3cf2e4ba 2367EXPORT_SYMBOL_GPL(dm_get_md);
d229a958 2368
9ade92a9 2369void *dm_get_mdptr(struct mapped_device *md)
637842cf 2370{
9ade92a9 2371 return md->interface_ptr;
1da177e4
LT
2372}
2373
2374void dm_set_mdptr(struct mapped_device *md, void *ptr)
2375{
2376 md->interface_ptr = ptr;
2377}
2378
2379void dm_get(struct mapped_device *md)
2380{
2381 atomic_inc(&md->holders);
3f77316d 2382 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1da177e4
LT
2383}
2384
72d94861
AK
2385const char *dm_device_name(struct mapped_device *md)
2386{
2387 return md->name;
2388}
2389EXPORT_SYMBOL_GPL(dm_device_name);
2390
3f77316d 2391static void __dm_destroy(struct mapped_device *md, bool wait)
1da177e4 2392{
1134e5ae 2393 struct dm_table *map;
83d5e5b0 2394 int srcu_idx;
1da177e4 2395
3f77316d 2396 might_sleep();
fba9f90e 2397
3f77316d 2398 spin_lock(&_minor_lock);
83d5e5b0 2399 map = dm_get_live_table(md, &srcu_idx);
3f77316d
KU
2400 idr_replace(&_minor_idr, MINOR_ALLOCED, MINOR(disk_devt(dm_disk(md))));
2401 set_bit(DMF_FREEING, &md->flags);
2402 spin_unlock(&_minor_lock);
2403
2404 if (!dm_suspended_md(md)) {
2405 dm_table_presuspend_targets(map);
2406 dm_table_postsuspend_targets(map);
1da177e4 2407 }
3f77316d 2408
83d5e5b0
MP
2409 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2410 dm_put_live_table(md, srcu_idx);
2411
3f77316d
KU
2412 /*
2413 * Rare, but there may be I/O requests still going to complete,
2414 * for example. Wait for all references to disappear.
2415 * No one should increment the reference count of the mapped_device,
2416 * after the mapped_device state becomes DMF_FREEING.
2417 */
2418 if (wait)
2419 while (atomic_read(&md->holders))
2420 msleep(1);
2421 else if (atomic_read(&md->holders))
2422 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2423 dm_device_name(md), atomic_read(&md->holders));
2424
2425 dm_sysfs_exit(md);
3f77316d
KU
2426 dm_table_destroy(__unbind(md));
2427 free_dev(md);
2428}
2429
2430void dm_destroy(struct mapped_device *md)
2431{
2432 __dm_destroy(md, true);
2433}
2434
2435void dm_destroy_immediate(struct mapped_device *md)
2436{
2437 __dm_destroy(md, false);
2438}
2439
2440void dm_put(struct mapped_device *md)
2441{
2442 atomic_dec(&md->holders);
1da177e4 2443}
79eb885c 2444EXPORT_SYMBOL_GPL(dm_put);
1da177e4 2445
401600df 2446static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
2447{
2448 int r = 0;
b44ebeb0
MP
2449 DECLARE_WAITQUEUE(wait, current);
2450
b44ebeb0 2451 add_wait_queue(&md->wait, &wait);
46125c1c
MB
2452
2453 while (1) {
401600df 2454 set_current_state(interruptible);
46125c1c 2455
b4324fee 2456 if (!md_in_flight(md))
46125c1c
MB
2457 break;
2458
401600df
MP
2459 if (interruptible == TASK_INTERRUPTIBLE &&
2460 signal_pending(current)) {
46125c1c
MB
2461 r = -EINTR;
2462 break;
2463 }
2464
2465 io_schedule();
2466 }
2467 set_current_state(TASK_RUNNING);
2468
b44ebeb0
MP
2469 remove_wait_queue(&md->wait, &wait);
2470
46125c1c
MB
2471 return r;
2472}
2473
1da177e4
LT
2474/*
2475 * Process the deferred bios
2476 */
ef208587 2477static void dm_wq_work(struct work_struct *work)
1da177e4 2478{
ef208587
MP
2479 struct mapped_device *md = container_of(work, struct mapped_device,
2480 work);
6d6f10df 2481 struct bio *c;
83d5e5b0
MP
2482 int srcu_idx;
2483 struct dm_table *map;
1da177e4 2484
83d5e5b0 2485 map = dm_get_live_table(md, &srcu_idx);
ef208587 2486
3b00b203 2487 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
2488 spin_lock_irq(&md->deferred_lock);
2489 c = bio_list_pop(&md->deferred);
2490 spin_unlock_irq(&md->deferred_lock);
2491
6a8736d1 2492 if (!c)
df12ee99 2493 break;
022c2611 2494
e6ee8c0b
KU
2495 if (dm_request_based(md))
2496 generic_make_request(c);
6a8736d1 2497 else
83d5e5b0 2498 __split_and_process_bio(md, map, c);
022c2611 2499 }
73d410c0 2500
83d5e5b0 2501 dm_put_live_table(md, srcu_idx);
1da177e4
LT
2502}
2503
9a1fb464 2504static void dm_queue_flush(struct mapped_device *md)
304f3f6a 2505{
3b00b203 2506 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
4e857c58 2507 smp_mb__after_atomic();
53d5914f 2508 queue_work(md->wq, &md->work);
304f3f6a
MB
2509}
2510
1da177e4 2511/*
042d2a9b 2512 * Swap in a new table, returning the old one for the caller to destroy.
1da177e4 2513 */
042d2a9b 2514struct dm_table *dm_swap_table(struct mapped_device *md, struct dm_table *table)
1da177e4 2515{
87eb5b21 2516 struct dm_table *live_map = NULL, *map = ERR_PTR(-EINVAL);
754c5fc7 2517 struct queue_limits limits;
042d2a9b 2518 int r;
1da177e4 2519
e61290a4 2520 mutex_lock(&md->suspend_lock);
1da177e4
LT
2521
2522 /* device must be suspended */
4f186f8b 2523 if (!dm_suspended_md(md))
93c534ae 2524 goto out;
1da177e4 2525
3ae70656
MS
2526 /*
2527 * If the new table has no data devices, retain the existing limits.
2528 * This helps multipath with queue_if_no_path if all paths disappear,
2529 * then new I/O is queued based on these limits, and then some paths
2530 * reappear.
2531 */
2532 if (dm_table_has_no_data_devices(table)) {
83d5e5b0 2533 live_map = dm_get_live_table_fast(md);
3ae70656
MS
2534 if (live_map)
2535 limits = md->queue->limits;
83d5e5b0 2536 dm_put_live_table_fast(md);
3ae70656
MS
2537 }
2538
87eb5b21
MC
2539 if (!live_map) {
2540 r = dm_calculate_queue_limits(table, &limits);
2541 if (r) {
2542 map = ERR_PTR(r);
2543 goto out;
2544 }
042d2a9b 2545 }
754c5fc7 2546
042d2a9b 2547 map = __bind(md, table, &limits);
1da177e4 2548
93c534ae 2549out:
e61290a4 2550 mutex_unlock(&md->suspend_lock);
042d2a9b 2551 return map;
1da177e4
LT
2552}
2553
2554/*
2555 * Functions to lock and unlock any filesystem running on the
2556 * device.
2557 */
2ca3310e 2558static int lock_fs(struct mapped_device *md)
1da177e4 2559{
e39e2e95 2560 int r;
1da177e4
LT
2561
2562 WARN_ON(md->frozen_sb);
dfbe03f6 2563
db8fef4f 2564 md->frozen_sb = freeze_bdev(md->bdev);
dfbe03f6 2565 if (IS_ERR(md->frozen_sb)) {
cf222b37 2566 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
2567 md->frozen_sb = NULL;
2568 return r;
dfbe03f6
AK
2569 }
2570
aa8d7c2f
AK
2571 set_bit(DMF_FROZEN, &md->flags);
2572
1da177e4
LT
2573 return 0;
2574}
2575
2ca3310e 2576static void unlock_fs(struct mapped_device *md)
1da177e4 2577{
aa8d7c2f
AK
2578 if (!test_bit(DMF_FROZEN, &md->flags))
2579 return;
2580
db8fef4f 2581 thaw_bdev(md->bdev, md->frozen_sb);
1da177e4 2582 md->frozen_sb = NULL;
aa8d7c2f 2583 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
2584}
2585
2586/*
2587 * We need to be able to change a mapping table under a mounted
2588 * filesystem. For example we might want to move some data in
2589 * the background. Before the table can be swapped with
2590 * dm_bind_table, dm_suspend must be called to flush any in
2591 * flight bios and ensure that any further io gets deferred.
2592 */
cec47e3d
KU
2593/*
2594 * Suspend mechanism in request-based dm.
2595 *
9f518b27
KU
2596 * 1. Flush all I/Os by lock_fs() if needed.
2597 * 2. Stop dispatching any I/O by stopping the request_queue.
2598 * 3. Wait for all in-flight I/Os to be completed or requeued.
cec47e3d 2599 *
9f518b27 2600 * To abort suspend, start the request_queue.
cec47e3d 2601 */
a3d77d35 2602int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 2603{
2ca3310e 2604 struct dm_table *map = NULL;
46125c1c 2605 int r = 0;
a3d77d35 2606 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 2607 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 2608
e61290a4 2609 mutex_lock(&md->suspend_lock);
2ca3310e 2610
4f186f8b 2611 if (dm_suspended_md(md)) {
73d410c0 2612 r = -EINVAL;
d287483d 2613 goto out_unlock;
73d410c0 2614 }
1da177e4 2615
83d5e5b0 2616 map = md->map;
1da177e4 2617
2e93ccc1
KU
2618 /*
2619 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2620 * This flag is cleared before dm_suspend returns.
2621 */
2622 if (noflush)
2623 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
2624
cf222b37
AK
2625 /* This does not get reverted if there's an error later. */
2626 dm_table_presuspend_targets(map);
2627
32a926da 2628 /*
9f518b27
KU
2629 * Flush I/O to the device.
2630 * Any I/O submitted after lock_fs() may not be flushed.
2631 * noflush takes precedence over do_lockfs.
2632 * (lock_fs() flushes I/Os and waits for them to complete.)
32a926da
MP
2633 */
2634 if (!noflush && do_lockfs) {
2635 r = lock_fs(md);
2636 if (r)
83d5e5b0 2637 goto out_unlock;
aa8d7c2f 2638 }
1da177e4
LT
2639
2640 /*
3b00b203
MP
2641 * Here we must make sure that no processes are submitting requests
2642 * to target drivers i.e. no one may be executing
2643 * __split_and_process_bio. This is called from dm_request and
2644 * dm_wq_work.
2645 *
2646 * To get all processes out of __split_and_process_bio in dm_request,
2647 * we take the write lock. To prevent any process from reentering
6a8736d1
TH
2648 * __split_and_process_bio from dm_request and quiesce the thread
2649 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2650 * flush_workqueue(md->wq).
1da177e4 2651 */
1eb787ec 2652 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
83d5e5b0 2653 synchronize_srcu(&md->io_barrier);
1da177e4 2654
d0bcb878 2655 /*
29e4013d
TH
2656 * Stop md->queue before flushing md->wq in case request-based
2657 * dm defers requests to md->wq from md->queue.
d0bcb878 2658 */
cec47e3d 2659 if (dm_request_based(md))
9f518b27 2660 stop_queue(md->queue);
cec47e3d 2661
d0bcb878
KU
2662 flush_workqueue(md->wq);
2663
1da177e4 2664 /*
3b00b203
MP
2665 * At this point no more requests are entering target request routines.
2666 * We call dm_wait_for_completion to wait for all existing requests
2667 * to finish.
1da177e4 2668 */
401600df 2669 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 2670
6d6f10df 2671 if (noflush)
022c2611 2672 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
83d5e5b0 2673 synchronize_srcu(&md->io_barrier);
2e93ccc1 2674
1da177e4 2675 /* were we interrupted ? */
46125c1c 2676 if (r < 0) {
9a1fb464 2677 dm_queue_flush(md);
73d410c0 2678
cec47e3d 2679 if (dm_request_based(md))
9f518b27 2680 start_queue(md->queue);
cec47e3d 2681
2ca3310e 2682 unlock_fs(md);
83d5e5b0 2683 goto out_unlock; /* pushback list is already flushed, so skip flush */
2ca3310e 2684 }
1da177e4 2685
3b00b203
MP
2686 /*
2687 * If dm_wait_for_completion returned 0, the device is completely
2688 * quiescent now. There is no request-processing activity. All new
2689 * requests are being added to md->deferred list.
2690 */
2691
2ca3310e 2692 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 2693
4d4471cb
KU
2694 dm_table_postsuspend_targets(map);
2695
d287483d 2696out_unlock:
e61290a4 2697 mutex_unlock(&md->suspend_lock);
cf222b37 2698 return r;
1da177e4
LT
2699}
2700
2701int dm_resume(struct mapped_device *md)
2702{
cf222b37 2703 int r = -EINVAL;
cf222b37 2704 struct dm_table *map = NULL;
1da177e4 2705
e61290a4 2706 mutex_lock(&md->suspend_lock);
4f186f8b 2707 if (!dm_suspended_md(md))
cf222b37 2708 goto out;
cf222b37 2709
83d5e5b0 2710 map = md->map;
2ca3310e 2711 if (!map || !dm_table_get_size(map))
cf222b37 2712 goto out;
1da177e4 2713
8757b776
MB
2714 r = dm_table_resume_targets(map);
2715 if (r)
2716 goto out;
2ca3310e 2717
9a1fb464 2718 dm_queue_flush(md);
2ca3310e 2719
cec47e3d
KU
2720 /*
2721 * Flushing deferred I/Os must be done after targets are resumed
2722 * so that mapping of targets can work correctly.
2723 * Request-based dm is queueing the deferred I/Os in its request_queue.
2724 */
2725 if (dm_request_based(md))
2726 start_queue(md->queue);
2727
2ca3310e
AK
2728 unlock_fs(md);
2729
2730 clear_bit(DMF_SUSPENDED, &md->flags);
2731
cf222b37
AK
2732 r = 0;
2733out:
e61290a4 2734 mutex_unlock(&md->suspend_lock);
2ca3310e 2735
cf222b37 2736 return r;
1da177e4
LT
2737}
2738
fd2ed4d2
MP
2739/*
2740 * Internal suspend/resume works like userspace-driven suspend. It waits
2741 * until all bios finish and prevents issuing new bios to the target drivers.
2742 * It may be used only from the kernel.
2743 *
2744 * Internal suspend holds md->suspend_lock, which prevents interaction with
2745 * userspace-driven suspend.
2746 */
2747
2748void dm_internal_suspend(struct mapped_device *md)
2749{
2750 mutex_lock(&md->suspend_lock);
2751 if (dm_suspended_md(md))
2752 return;
2753
2754 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
2755 synchronize_srcu(&md->io_barrier);
2756 flush_workqueue(md->wq);
2757 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
2758}
2759
2760void dm_internal_resume(struct mapped_device *md)
2761{
2762 if (dm_suspended_md(md))
2763 goto done;
2764
2765 dm_queue_flush(md);
2766
2767done:
2768 mutex_unlock(&md->suspend_lock);
2769}
2770
1da177e4
LT
2771/*-----------------------------------------------------------------
2772 * Event notification.
2773 *---------------------------------------------------------------*/
3abf85b5 2774int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
60935eb2 2775 unsigned cookie)
69267a30 2776{
60935eb2
MB
2777 char udev_cookie[DM_COOKIE_LENGTH];
2778 char *envp[] = { udev_cookie, NULL };
2779
2780 if (!cookie)
3abf85b5 2781 return kobject_uevent(&disk_to_dev(md->disk)->kobj, action);
60935eb2
MB
2782 else {
2783 snprintf(udev_cookie, DM_COOKIE_LENGTH, "%s=%u",
2784 DM_COOKIE_ENV_VAR_NAME, cookie);
3abf85b5
PR
2785 return kobject_uevent_env(&disk_to_dev(md->disk)->kobj,
2786 action, envp);
60935eb2 2787 }
69267a30
AK
2788}
2789
7a8c3d3b
MA
2790uint32_t dm_next_uevent_seq(struct mapped_device *md)
2791{
2792 return atomic_add_return(1, &md->uevent_seq);
2793}
2794
1da177e4
LT
2795uint32_t dm_get_event_nr(struct mapped_device *md)
2796{
2797 return atomic_read(&md->event_nr);
2798}
2799
2800int dm_wait_event(struct mapped_device *md, int event_nr)
2801{
2802 return wait_event_interruptible(md->eventq,
2803 (event_nr != atomic_read(&md->event_nr)));
2804}
2805
7a8c3d3b
MA
2806void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
2807{
2808 unsigned long flags;
2809
2810 spin_lock_irqsave(&md->uevent_lock, flags);
2811 list_add(elist, &md->uevent_list);
2812 spin_unlock_irqrestore(&md->uevent_lock, flags);
2813}
2814
1da177e4
LT
2815/*
2816 * The gendisk is only valid as long as you have a reference
2817 * count on 'md'.
2818 */
2819struct gendisk *dm_disk(struct mapped_device *md)
2820{
2821 return md->disk;
2822}
2823
784aae73
MB
2824struct kobject *dm_kobject(struct mapped_device *md)
2825{
2995fa78 2826 return &md->kobj_holder.kobj;
784aae73
MB
2827}
2828
784aae73
MB
2829struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
2830{
2831 struct mapped_device *md;
2832
2995fa78 2833 md = container_of(kobj, struct mapped_device, kobj_holder.kobj);
784aae73 2834
4d89b7b4 2835 if (test_bit(DMF_FREEING, &md->flags) ||
432a212c 2836 dm_deleting_md(md))
4d89b7b4
MB
2837 return NULL;
2838
784aae73
MB
2839 dm_get(md);
2840 return md;
2841}
2842
4f186f8b 2843int dm_suspended_md(struct mapped_device *md)
1da177e4
LT
2844{
2845 return test_bit(DMF_SUSPENDED, &md->flags);
2846}
2847
2c140a24
MP
2848int dm_test_deferred_remove_flag(struct mapped_device *md)
2849{
2850 return test_bit(DMF_DEFERRED_REMOVE, &md->flags);
2851}
2852
64dbce58
KU
2853int dm_suspended(struct dm_target *ti)
2854{
ecdb2e25 2855 return dm_suspended_md(dm_table_get_md(ti->table));
64dbce58
KU
2856}
2857EXPORT_SYMBOL_GPL(dm_suspended);
2858
2e93ccc1
KU
2859int dm_noflush_suspending(struct dm_target *ti)
2860{
ecdb2e25 2861 return __noflush_suspending(dm_table_get_md(ti->table));
2e93ccc1
KU
2862}
2863EXPORT_SYMBOL_GPL(dm_noflush_suspending);
2864
c0820cf5 2865struct dm_md_mempools *dm_alloc_md_mempools(unsigned type, unsigned integrity, unsigned per_bio_data_size)
e6ee8c0b 2866{
5f015204
JN
2867 struct dm_md_mempools *pools = kzalloc(sizeof(*pools), GFP_KERNEL);
2868 struct kmem_cache *cachep;
2869 unsigned int pool_size;
2870 unsigned int front_pad;
e6ee8c0b
KU
2871
2872 if (!pools)
2873 return NULL;
2874
23e5083b 2875 if (type == DM_TYPE_BIO_BASED) {
5f015204 2876 cachep = _io_cache;
e8603136 2877 pool_size = dm_get_reserved_bio_based_ios();
5f015204
JN
2878 front_pad = roundup(per_bio_data_size, __alignof__(struct dm_target_io)) + offsetof(struct dm_target_io, clone);
2879 } else if (type == DM_TYPE_REQUEST_BASED) {
2880 cachep = _rq_tio_cache;
f4790826 2881 pool_size = dm_get_reserved_rq_based_ios();
5f015204
JN
2882 front_pad = offsetof(struct dm_rq_clone_bio_info, clone);
2883 /* per_bio_data_size is not used. See __bind_mempools(). */
2884 WARN_ON(per_bio_data_size != 0);
2885 } else
2886 goto out;
e6ee8c0b 2887
6cfa5857 2888 pools->io_pool = mempool_create_slab_pool(pool_size, cachep);
5f015204
JN
2889 if (!pools->io_pool)
2890 goto out;
e6ee8c0b 2891
5f015204 2892 pools->bs = bioset_create(pool_size, front_pad);
e6ee8c0b 2893 if (!pools->bs)
5f015204 2894 goto out;
e6ee8c0b 2895
a91a2785 2896 if (integrity && bioset_integrity_create(pools->bs, pool_size))
5f015204 2897 goto out;
a91a2785 2898
e6ee8c0b
KU
2899 return pools;
2900
5f015204
JN
2901out:
2902 dm_free_md_mempools(pools);
e6ee8c0b
KU
2903
2904 return NULL;
2905}
2906
2907void dm_free_md_mempools(struct dm_md_mempools *pools)
2908{
2909 if (!pools)
2910 return;
2911
2912 if (pools->io_pool)
2913 mempool_destroy(pools->io_pool);
2914
e6ee8c0b
KU
2915 if (pools->bs)
2916 bioset_free(pools->bs);
2917
2918 kfree(pools);
2919}
2920
83d5cde4 2921static const struct block_device_operations dm_blk_dops = {
1da177e4
LT
2922 .open = dm_blk_open,
2923 .release = dm_blk_close,
aa129a22 2924 .ioctl = dm_blk_ioctl,
3ac51e74 2925 .getgeo = dm_blk_getgeo,
1da177e4
LT
2926 .owner = THIS_MODULE
2927};
2928
1da177e4
LT
2929/*
2930 * module hooks
2931 */
2932module_init(dm_init);
2933module_exit(dm_exit);
2934
2935module_param(major, uint, 0);
2936MODULE_PARM_DESC(major, "The major number of the device mapper");
f4790826 2937
e8603136
MS
2938module_param(reserved_bio_based_ios, uint, S_IRUGO | S_IWUSR);
2939MODULE_PARM_DESC(reserved_bio_based_ios, "Reserved IOs in bio-based mempools");
2940
f4790826
MS
2941module_param(reserved_rq_based_ios, uint, S_IRUGO | S_IWUSR);
2942MODULE_PARM_DESC(reserved_rq_based_ios, "Reserved IOs in request-based mempools");
2943
1da177e4
LT
2944MODULE_DESCRIPTION(DM_NAME " driver");
2945MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2946MODULE_LICENSE("GPL");
This page took 1.014258 seconds and 5 git commands to generate.