dm mpath: validate hw_handler argument count
[deliverable/linux.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
784aae73 3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
51e5b2bd 9#include "dm-uevent.h"
1da177e4
LT
10
11#include <linux/init.h>
12#include <linux/module.h>
48c9c27b 13#include <linux/mutex.h>
1da177e4
LT
14#include <linux/moduleparam.h>
15#include <linux/blkpg.h>
16#include <linux/bio.h>
17#include <linux/buffer_head.h>
18#include <linux/mempool.h>
19#include <linux/slab.h>
20#include <linux/idr.h>
3ac51e74 21#include <linux/hdreg.h>
55782138
LZ
22
23#include <trace/events/block.h>
1da177e4 24
72d94861
AK
25#define DM_MSG_PREFIX "core"
26
1da177e4
LT
27static const char *_name = DM_NAME;
28
29static unsigned int major = 0;
30static unsigned int _major = 0;
31
f32c10b0 32static DEFINE_SPINLOCK(_minor_lock);
1da177e4 33/*
8fbf26ad 34 * For bio-based dm.
1da177e4
LT
35 * One of these is allocated per bio.
36 */
37struct dm_io {
38 struct mapped_device *md;
39 int error;
1da177e4 40 atomic_t io_count;
6ae2fa67 41 struct bio *bio;
3eaf840e 42 unsigned long start_time;
1da177e4
LT
43};
44
45/*
8fbf26ad 46 * For bio-based dm.
1da177e4
LT
47 * One of these is allocated per target within a bio. Hopefully
48 * this will be simplified out one day.
49 */
028867ac 50struct dm_target_io {
1da177e4
LT
51 struct dm_io *io;
52 struct dm_target *ti;
53 union map_info info;
54};
55
8fbf26ad
KU
56/*
57 * For request-based dm.
58 * One of these is allocated per request.
59 */
60struct dm_rq_target_io {
61 struct mapped_device *md;
62 struct dm_target *ti;
63 struct request *orig, clone;
64 int error;
65 union map_info info;
66};
67
68/*
69 * For request-based dm.
70 * One of these is allocated per bio.
71 */
72struct dm_rq_clone_bio_info {
73 struct bio *orig;
74 struct request *rq;
75};
76
1da177e4
LT
77union map_info *dm_get_mapinfo(struct bio *bio)
78{
17b2f66f 79 if (bio && bio->bi_private)
028867ac 80 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 81 return NULL;
1da177e4
LT
82}
83
ba61fdd1
JM
84#define MINOR_ALLOCED ((void *)-1)
85
1da177e4
LT
86/*
87 * Bits for the md->flags field.
88 */
1eb787ec 89#define DMF_BLOCK_IO_FOR_SUSPEND 0
1da177e4 90#define DMF_SUSPENDED 1
aa8d7c2f 91#define DMF_FROZEN 2
fba9f90e 92#define DMF_FREEING 3
5c6bd75d 93#define DMF_DELETING 4
2e93ccc1 94#define DMF_NOFLUSH_SUSPENDING 5
1eb787ec 95#define DMF_QUEUE_IO_TO_THREAD 6
1da177e4 96
304f3f6a
MB
97/*
98 * Work processed by per-device workqueue.
99 */
1da177e4 100struct mapped_device {
2ca3310e 101 struct rw_semaphore io_lock;
e61290a4 102 struct mutex suspend_lock;
1da177e4
LT
103 rwlock_t map_lock;
104 atomic_t holders;
5c6bd75d 105 atomic_t open_count;
1da177e4
LT
106
107 unsigned long flags;
108
165125e1 109 struct request_queue *queue;
1da177e4 110 struct gendisk *disk;
7e51f257 111 char name[16];
1da177e4
LT
112
113 void *interface_ptr;
114
115 /*
116 * A list of ios that arrived while we were suspended.
117 */
118 atomic_t pending;
119 wait_queue_head_t wait;
53d5914f 120 struct work_struct work;
74859364 121 struct bio_list deferred;
022c2611 122 spinlock_t deferred_lock;
1da177e4 123
af7e466a
MP
124 /*
125 * An error from the barrier request currently being processed.
126 */
127 int barrier_error;
128
304f3f6a
MB
129 /*
130 * Processing queue (flush/barriers)
131 */
132 struct workqueue_struct *wq;
133
1da177e4
LT
134 /*
135 * The current mapping.
136 */
137 struct dm_table *map;
138
139 /*
140 * io objects are allocated from here.
141 */
142 mempool_t *io_pool;
143 mempool_t *tio_pool;
144
9faf400f
SB
145 struct bio_set *bs;
146
1da177e4
LT
147 /*
148 * Event handling.
149 */
150 atomic_t event_nr;
151 wait_queue_head_t eventq;
7a8c3d3b
MA
152 atomic_t uevent_seq;
153 struct list_head uevent_list;
154 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
155
156 /*
157 * freeze/thaw support require holding onto a super block
158 */
159 struct super_block *frozen_sb;
e39e2e95 160 struct block_device *suspended_bdev;
3ac51e74
DW
161
162 /* forced geometry settings */
163 struct hd_geometry geometry;
784aae73
MB
164
165 /* sysfs handle */
166 struct kobject kobj;
1da177e4
LT
167};
168
169#define MIN_IOS 256
e18b890b
CL
170static struct kmem_cache *_io_cache;
171static struct kmem_cache *_tio_cache;
8fbf26ad
KU
172static struct kmem_cache *_rq_tio_cache;
173static struct kmem_cache *_rq_bio_info_cache;
1da177e4 174
1da177e4
LT
175static int __init local_init(void)
176{
51157b4a 177 int r = -ENOMEM;
1da177e4 178
1da177e4 179 /* allocate a slab for the dm_ios */
028867ac 180 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 181 if (!_io_cache)
51157b4a 182 return r;
1da177e4
LT
183
184 /* allocate a slab for the target ios */
028867ac 185 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
186 if (!_tio_cache)
187 goto out_free_io_cache;
1da177e4 188
8fbf26ad
KU
189 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
190 if (!_rq_tio_cache)
191 goto out_free_tio_cache;
192
193 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
194 if (!_rq_bio_info_cache)
195 goto out_free_rq_tio_cache;
196
51e5b2bd 197 r = dm_uevent_init();
51157b4a 198 if (r)
8fbf26ad 199 goto out_free_rq_bio_info_cache;
51e5b2bd 200
1da177e4
LT
201 _major = major;
202 r = register_blkdev(_major, _name);
51157b4a
KU
203 if (r < 0)
204 goto out_uevent_exit;
1da177e4
LT
205
206 if (!_major)
207 _major = r;
208
209 return 0;
51157b4a
KU
210
211out_uevent_exit:
212 dm_uevent_exit();
8fbf26ad
KU
213out_free_rq_bio_info_cache:
214 kmem_cache_destroy(_rq_bio_info_cache);
215out_free_rq_tio_cache:
216 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
217out_free_tio_cache:
218 kmem_cache_destroy(_tio_cache);
219out_free_io_cache:
220 kmem_cache_destroy(_io_cache);
221
222 return r;
1da177e4
LT
223}
224
225static void local_exit(void)
226{
8fbf26ad
KU
227 kmem_cache_destroy(_rq_bio_info_cache);
228 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
229 kmem_cache_destroy(_tio_cache);
230 kmem_cache_destroy(_io_cache);
00d59405 231 unregister_blkdev(_major, _name);
51e5b2bd 232 dm_uevent_exit();
1da177e4
LT
233
234 _major = 0;
235
236 DMINFO("cleaned up");
237}
238
b9249e55 239static int (*_inits[])(void) __initdata = {
1da177e4
LT
240 local_init,
241 dm_target_init,
242 dm_linear_init,
243 dm_stripe_init,
945fa4d2 244 dm_kcopyd_init,
1da177e4
LT
245 dm_interface_init,
246};
247
b9249e55 248static void (*_exits[])(void) = {
1da177e4
LT
249 local_exit,
250 dm_target_exit,
251 dm_linear_exit,
252 dm_stripe_exit,
945fa4d2 253 dm_kcopyd_exit,
1da177e4
LT
254 dm_interface_exit,
255};
256
257static int __init dm_init(void)
258{
259 const int count = ARRAY_SIZE(_inits);
260
261 int r, i;
262
263 for (i = 0; i < count; i++) {
264 r = _inits[i]();
265 if (r)
266 goto bad;
267 }
268
269 return 0;
270
271 bad:
272 while (i--)
273 _exits[i]();
274
275 return r;
276}
277
278static void __exit dm_exit(void)
279{
280 int i = ARRAY_SIZE(_exits);
281
282 while (i--)
283 _exits[i]();
284}
285
286/*
287 * Block device functions
288 */
fe5f9f2c 289static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
290{
291 struct mapped_device *md;
292
fba9f90e
JM
293 spin_lock(&_minor_lock);
294
fe5f9f2c 295 md = bdev->bd_disk->private_data;
fba9f90e
JM
296 if (!md)
297 goto out;
298
5c6bd75d
AK
299 if (test_bit(DMF_FREEING, &md->flags) ||
300 test_bit(DMF_DELETING, &md->flags)) {
fba9f90e
JM
301 md = NULL;
302 goto out;
303 }
304
1da177e4 305 dm_get(md);
5c6bd75d 306 atomic_inc(&md->open_count);
fba9f90e
JM
307
308out:
309 spin_unlock(&_minor_lock);
310
311 return md ? 0 : -ENXIO;
1da177e4
LT
312}
313
fe5f9f2c 314static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 315{
fe5f9f2c 316 struct mapped_device *md = disk->private_data;
5c6bd75d 317 atomic_dec(&md->open_count);
1da177e4
LT
318 dm_put(md);
319 return 0;
320}
321
5c6bd75d
AK
322int dm_open_count(struct mapped_device *md)
323{
324 return atomic_read(&md->open_count);
325}
326
327/*
328 * Guarantees nothing is using the device before it's deleted.
329 */
330int dm_lock_for_deletion(struct mapped_device *md)
331{
332 int r = 0;
333
334 spin_lock(&_minor_lock);
335
336 if (dm_open_count(md))
337 r = -EBUSY;
338 else
339 set_bit(DMF_DELETING, &md->flags);
340
341 spin_unlock(&_minor_lock);
342
343 return r;
344}
345
3ac51e74
DW
346static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
347{
348 struct mapped_device *md = bdev->bd_disk->private_data;
349
350 return dm_get_geometry(md, geo);
351}
352
fe5f9f2c 353static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
354 unsigned int cmd, unsigned long arg)
355{
fe5f9f2c
AV
356 struct mapped_device *md = bdev->bd_disk->private_data;
357 struct dm_table *map = dm_get_table(md);
aa129a22
MB
358 struct dm_target *tgt;
359 int r = -ENOTTY;
360
aa129a22
MB
361 if (!map || !dm_table_get_size(map))
362 goto out;
363
364 /* We only support devices that have a single target */
365 if (dm_table_get_num_targets(map) != 1)
366 goto out;
367
368 tgt = dm_table_get_target(map, 0);
369
370 if (dm_suspended(md)) {
371 r = -EAGAIN;
372 goto out;
373 }
374
375 if (tgt->type->ioctl)
647b3d00 376 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
377
378out:
379 dm_table_put(map);
380
aa129a22
MB
381 return r;
382}
383
028867ac 384static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
385{
386 return mempool_alloc(md->io_pool, GFP_NOIO);
387}
388
028867ac 389static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
390{
391 mempool_free(io, md->io_pool);
392}
393
028867ac 394static struct dm_target_io *alloc_tio(struct mapped_device *md)
1da177e4
LT
395{
396 return mempool_alloc(md->tio_pool, GFP_NOIO);
397}
398
028867ac 399static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
400{
401 mempool_free(tio, md->tio_pool);
402}
403
3eaf840e
JNN
404static void start_io_acct(struct dm_io *io)
405{
406 struct mapped_device *md = io->md;
c9959059 407 int cpu;
3eaf840e
JNN
408
409 io->start_time = jiffies;
410
074a7aca
TH
411 cpu = part_stat_lock();
412 part_round_stats(cpu, &dm_disk(md)->part0);
413 part_stat_unlock();
414 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
3eaf840e
JNN
415}
416
d221d2e7 417static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
418{
419 struct mapped_device *md = io->md;
420 struct bio *bio = io->bio;
421 unsigned long duration = jiffies - io->start_time;
c9959059 422 int pending, cpu;
3eaf840e
JNN
423 int rw = bio_data_dir(bio);
424
074a7aca
TH
425 cpu = part_stat_lock();
426 part_round_stats(cpu, &dm_disk(md)->part0);
427 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
428 part_stat_unlock();
3eaf840e 429
af7e466a
MP
430 /*
431 * After this is decremented the bio must not be touched if it is
432 * a barrier.
433 */
074a7aca
TH
434 dm_disk(md)->part0.in_flight = pending =
435 atomic_dec_return(&md->pending);
3eaf840e 436
d221d2e7
MP
437 /* nudge anyone waiting on suspend queue */
438 if (!pending)
439 wake_up(&md->wait);
3eaf840e
JNN
440}
441
1da177e4
LT
442/*
443 * Add the bio to the list of deferred io.
444 */
92c63902 445static void queue_io(struct mapped_device *md, struct bio *bio)
1da177e4 446{
2ca3310e 447 down_write(&md->io_lock);
1da177e4 448
022c2611 449 spin_lock_irq(&md->deferred_lock);
1da177e4 450 bio_list_add(&md->deferred, bio);
022c2611 451 spin_unlock_irq(&md->deferred_lock);
1da177e4 452
92c63902
MP
453 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags))
454 queue_work(md->wq, &md->work);
455
2ca3310e 456 up_write(&md->io_lock);
1da177e4
LT
457}
458
459/*
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
463 */
464struct dm_table *dm_get_table(struct mapped_device *md)
465{
466 struct dm_table *t;
467
468 read_lock(&md->map_lock);
469 t = md->map;
470 if (t)
471 dm_table_get(t);
472 read_unlock(&md->map_lock);
473
474 return t;
475}
476
3ac51e74
DW
477/*
478 * Get the geometry associated with a dm device
479 */
480int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
481{
482 *geo = md->geometry;
483
484 return 0;
485}
486
487/*
488 * Set the geometry of a device.
489 */
490int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
491{
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
493
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
496 return -EINVAL;
497 }
498
499 md->geometry = *geo;
500
501 return 0;
502}
503
1da177e4
LT
504/*-----------------------------------------------------------------
505 * CRUD START:
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
512
2e93ccc1
KU
513static int __noflush_suspending(struct mapped_device *md)
514{
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
516}
517
1da177e4
LT
518/*
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
521 */
858119e1 522static void dec_pending(struct dm_io *io, int error)
1da177e4 523{
2e93ccc1 524 unsigned long flags;
b35f8caa
MB
525 int io_error;
526 struct bio *bio;
527 struct mapped_device *md = io->md;
2e93ccc1
KU
528
529 /* Push-back supersedes any I/O errors */
b35f8caa 530 if (error && !(io->error > 0 && __noflush_suspending(md)))
1da177e4
LT
531 io->error = error;
532
533 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
534 if (io->error == DM_ENDIO_REQUEUE) {
535 /*
536 * Target requested pushing back the I/O.
2e93ccc1 537 */
022c2611 538 spin_lock_irqsave(&md->deferred_lock, flags);
b35f8caa 539 if (__noflush_suspending(md))
af7e466a 540 bio_list_add_head(&md->deferred, io->bio);
2e93ccc1
KU
541 else
542 /* noflush suspend was interrupted. */
543 io->error = -EIO;
022c2611 544 spin_unlock_irqrestore(&md->deferred_lock, flags);
2e93ccc1
KU
545 }
546
b35f8caa
MB
547 io_error = io->error;
548 bio = io->bio;
2e93ccc1 549
af7e466a
MP
550 if (bio_barrier(bio)) {
551 /*
552 * There can be just one barrier request so we use
553 * a per-device variable for error reporting.
554 * Note that you can't touch the bio after end_io_acct
555 */
556 md->barrier_error = io_error;
557 end_io_acct(io);
558 } else {
559 end_io_acct(io);
b35f8caa 560
af7e466a
MP
561 if (io_error != DM_ENDIO_REQUEUE) {
562 trace_block_bio_complete(md->queue, bio);
2056a782 563
af7e466a
MP
564 bio_endio(bio, io_error);
565 }
b35f8caa 566 }
af7e466a
MP
567
568 free_io(md, io);
1da177e4
LT
569 }
570}
571
6712ecf8 572static void clone_endio(struct bio *bio, int error)
1da177e4
LT
573{
574 int r = 0;
028867ac 575 struct dm_target_io *tio = bio->bi_private;
b35f8caa 576 struct dm_io *io = tio->io;
9faf400f 577 struct mapped_device *md = tio->io->md;
1da177e4
LT
578 dm_endio_fn endio = tio->ti->type->end_io;
579
1da177e4
LT
580 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
581 error = -EIO;
582
583 if (endio) {
584 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
585 if (r < 0 || r == DM_ENDIO_REQUEUE)
586 /*
587 * error and requeue request are handled
588 * in dec_pending().
589 */
1da177e4 590 error = r;
45cbcd79
KU
591 else if (r == DM_ENDIO_INCOMPLETE)
592 /* The target will handle the io */
6712ecf8 593 return;
45cbcd79
KU
594 else if (r) {
595 DMWARN("unimplemented target endio return value: %d", r);
596 BUG();
597 }
1da177e4
LT
598 }
599
9faf400f
SB
600 /*
601 * Store md for cleanup instead of tio which is about to get freed.
602 */
603 bio->bi_private = md->bs;
604
9faf400f 605 free_tio(md, tio);
b35f8caa
MB
606 bio_put(bio);
607 dec_pending(io, error);
1da177e4
LT
608}
609
610static sector_t max_io_len(struct mapped_device *md,
611 sector_t sector, struct dm_target *ti)
612{
613 sector_t offset = sector - ti->begin;
614 sector_t len = ti->len - offset;
615
616 /*
617 * Does the target need to split even further ?
618 */
619 if (ti->split_io) {
620 sector_t boundary;
621 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
622 - offset;
623 if (len > boundary)
624 len = boundary;
625 }
626
627 return len;
628}
629
630static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 631 struct dm_target_io *tio)
1da177e4
LT
632{
633 int r;
2056a782 634 sector_t sector;
9faf400f 635 struct mapped_device *md;
1da177e4
LT
636
637 /*
638 * Sanity checks.
639 */
640 BUG_ON(!clone->bi_size);
641
642 clone->bi_end_io = clone_endio;
643 clone->bi_private = tio;
644
645 /*
646 * Map the clone. If r == 0 we don't need to do
647 * anything, the target has assumed ownership of
648 * this io.
649 */
650 atomic_inc(&tio->io->io_count);
2056a782 651 sector = clone->bi_sector;
1da177e4 652 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 653 if (r == DM_MAPIO_REMAPPED) {
1da177e4 654 /* the bio has been remapped so dispatch it */
2056a782 655
5f3ea37c 656 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
22a7c31a 657 tio->io->bio->bi_bdev->bd_dev, sector);
2056a782 658
1da177e4 659 generic_make_request(clone);
2e93ccc1
KU
660 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
661 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
662 md = tio->io->md;
663 dec_pending(tio->io, r);
664 /*
665 * Store bio_set for cleanup.
666 */
667 clone->bi_private = md->bs;
1da177e4 668 bio_put(clone);
9faf400f 669 free_tio(md, tio);
45cbcd79
KU
670 } else if (r) {
671 DMWARN("unimplemented target map return value: %d", r);
672 BUG();
1da177e4
LT
673 }
674}
675
676struct clone_info {
677 struct mapped_device *md;
678 struct dm_table *map;
679 struct bio *bio;
680 struct dm_io *io;
681 sector_t sector;
682 sector_t sector_count;
683 unsigned short idx;
684};
685
3676347a
PO
686static void dm_bio_destructor(struct bio *bio)
687{
9faf400f
SB
688 struct bio_set *bs = bio->bi_private;
689
690 bio_free(bio, bs);
3676347a
PO
691}
692
1da177e4
LT
693/*
694 * Creates a little bio that is just does part of a bvec.
695 */
696static struct bio *split_bvec(struct bio *bio, sector_t sector,
697 unsigned short idx, unsigned int offset,
9faf400f 698 unsigned int len, struct bio_set *bs)
1da177e4
LT
699{
700 struct bio *clone;
701 struct bio_vec *bv = bio->bi_io_vec + idx;
702
9faf400f 703 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 704 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
705 *clone->bi_io_vec = *bv;
706
707 clone->bi_sector = sector;
708 clone->bi_bdev = bio->bi_bdev;
af7e466a 709 clone->bi_rw = bio->bi_rw & ~(1 << BIO_RW_BARRIER);
1da177e4
LT
710 clone->bi_vcnt = 1;
711 clone->bi_size = to_bytes(len);
712 clone->bi_io_vec->bv_offset = offset;
713 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 714 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4 715
9c47008d
MP
716 if (bio_integrity(bio)) {
717 bio_integrity_clone(clone, bio, GFP_NOIO);
718 bio_integrity_trim(clone,
719 bio_sector_offset(bio, idx, offset), len);
720 }
721
1da177e4
LT
722 return clone;
723}
724
725/*
726 * Creates a bio that consists of range of complete bvecs.
727 */
728static struct bio *clone_bio(struct bio *bio, sector_t sector,
729 unsigned short idx, unsigned short bv_count,
9faf400f 730 unsigned int len, struct bio_set *bs)
1da177e4
LT
731{
732 struct bio *clone;
733
9faf400f
SB
734 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
735 __bio_clone(clone, bio);
af7e466a 736 clone->bi_rw &= ~(1 << BIO_RW_BARRIER);
9faf400f 737 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
738 clone->bi_sector = sector;
739 clone->bi_idx = idx;
740 clone->bi_vcnt = idx + bv_count;
741 clone->bi_size = to_bytes(len);
742 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
743
9c47008d
MP
744 if (bio_integrity(bio)) {
745 bio_integrity_clone(clone, bio, GFP_NOIO);
746
747 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size)
748 bio_integrity_trim(clone,
749 bio_sector_offset(bio, idx, 0), len);
750 }
751
1da177e4
LT
752 return clone;
753}
754
512875bd 755static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
756{
757 struct bio *clone, *bio = ci->bio;
512875bd
JN
758 struct dm_target *ti;
759 sector_t len = 0, max;
028867ac 760 struct dm_target_io *tio;
1da177e4 761
512875bd
JN
762 ti = dm_table_find_target(ci->map, ci->sector);
763 if (!dm_target_is_valid(ti))
764 return -EIO;
765
766 max = max_io_len(ci->md, ci->sector, ti);
767
1da177e4
LT
768 /*
769 * Allocate a target io object.
770 */
771 tio = alloc_tio(ci->md);
772 tio->io = ci->io;
773 tio->ti = ti;
774 memset(&tio->info, 0, sizeof(tio->info));
775
776 if (ci->sector_count <= max) {
777 /*
778 * Optimise for the simple case where we can do all of
779 * the remaining io with a single clone.
780 */
781 clone = clone_bio(bio, ci->sector, ci->idx,
9faf400f
SB
782 bio->bi_vcnt - ci->idx, ci->sector_count,
783 ci->md->bs);
1da177e4
LT
784 __map_bio(ti, clone, tio);
785 ci->sector_count = 0;
786
787 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
788 /*
789 * There are some bvecs that don't span targets.
790 * Do as many of these as possible.
791 */
792 int i;
793 sector_t remaining = max;
794 sector_t bv_len;
795
796 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
797 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
798
799 if (bv_len > remaining)
800 break;
801
802 remaining -= bv_len;
803 len += bv_len;
804 }
805
9faf400f
SB
806 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
807 ci->md->bs);
1da177e4
LT
808 __map_bio(ti, clone, tio);
809
810 ci->sector += len;
811 ci->sector_count -= len;
812 ci->idx = i;
813
814 } else {
815 /*
d2044a94 816 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
817 */
818 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
819 sector_t remaining = to_sector(bv->bv_len);
820 unsigned int offset = 0;
1da177e4 821
d2044a94
AK
822 do {
823 if (offset) {
824 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
825 if (!dm_target_is_valid(ti))
826 return -EIO;
827
d2044a94 828 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 829
d2044a94
AK
830 tio = alloc_tio(ci->md);
831 tio->io = ci->io;
832 tio->ti = ti;
833 memset(&tio->info, 0, sizeof(tio->info));
834 }
835
836 len = min(remaining, max);
837
838 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
839 bv->bv_offset + offset, len,
840 ci->md->bs);
d2044a94
AK
841
842 __map_bio(ti, clone, tio);
843
844 ci->sector += len;
845 ci->sector_count -= len;
846 offset += to_bytes(len);
847 } while (remaining -= len);
1da177e4 848
1da177e4
LT
849 ci->idx++;
850 }
512875bd
JN
851
852 return 0;
1da177e4
LT
853}
854
855/*
8a53c28d 856 * Split the bio into several clones and submit it to targets.
1da177e4 857 */
f0b9a450 858static void __split_and_process_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
859{
860 struct clone_info ci;
512875bd 861 int error = 0;
1da177e4
LT
862
863 ci.map = dm_get_table(md);
f0b9a450 864 if (unlikely(!ci.map)) {
af7e466a
MP
865 if (!bio_barrier(bio))
866 bio_io_error(bio);
867 else
868 md->barrier_error = -EIO;
f0b9a450
MP
869 return;
870 }
692d0eb9 871
1da177e4
LT
872 ci.md = md;
873 ci.bio = bio;
874 ci.io = alloc_io(md);
875 ci.io->error = 0;
876 atomic_set(&ci.io->io_count, 1);
877 ci.io->bio = bio;
878 ci.io->md = md;
879 ci.sector = bio->bi_sector;
880 ci.sector_count = bio_sectors(bio);
881 ci.idx = bio->bi_idx;
882
3eaf840e 883 start_io_acct(ci.io);
512875bd
JN
884 while (ci.sector_count && !error)
885 error = __clone_and_map(&ci);
1da177e4
LT
886
887 /* drop the extra reference count */
512875bd 888 dec_pending(ci.io, error);
1da177e4
LT
889 dm_table_put(ci.map);
890}
891/*-----------------------------------------------------------------
892 * CRUD END
893 *---------------------------------------------------------------*/
894
f6fccb12
MB
895static int dm_merge_bvec(struct request_queue *q,
896 struct bvec_merge_data *bvm,
897 struct bio_vec *biovec)
898{
899 struct mapped_device *md = q->queuedata;
900 struct dm_table *map = dm_get_table(md);
901 struct dm_target *ti;
902 sector_t max_sectors;
5037108a 903 int max_size = 0;
f6fccb12
MB
904
905 if (unlikely(!map))
5037108a 906 goto out;
f6fccb12
MB
907
908 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
909 if (!dm_target_is_valid(ti))
910 goto out_table;
f6fccb12
MB
911
912 /*
913 * Find maximum amount of I/O that won't need splitting
914 */
915 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
916 (sector_t) BIO_MAX_SECTORS);
917 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
918 if (max_size < 0)
919 max_size = 0;
920
921 /*
922 * merge_bvec_fn() returns number of bytes
923 * it can accept at this offset
924 * max is precomputed maximal io size
925 */
926 if (max_size && ti->type->merge)
927 max_size = ti->type->merge(ti, bvm, biovec, max_size);
928
b01cd5ac 929out_table:
5037108a
MP
930 dm_table_put(map);
931
932out:
f6fccb12
MB
933 /*
934 * Always allow an entire first page
935 */
936 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
937 max_size = biovec->bv_len;
938
f6fccb12
MB
939 return max_size;
940}
941
1da177e4
LT
942/*
943 * The request function that just remaps the bio built up by
944 * dm_merge_bvec.
945 */
165125e1 946static int dm_request(struct request_queue *q, struct bio *bio)
1da177e4 947{
12f03a49 948 int rw = bio_data_dir(bio);
1da177e4 949 struct mapped_device *md = q->queuedata;
c9959059 950 int cpu;
1da177e4 951
2ca3310e 952 down_read(&md->io_lock);
1da177e4 953
074a7aca
TH
954 cpu = part_stat_lock();
955 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
956 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
957 part_stat_unlock();
12f03a49 958
1da177e4 959 /*
1eb787ec
AK
960 * If we're suspended or the thread is processing barriers
961 * we have to queue this io for later.
1da177e4 962 */
af7e466a
MP
963 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags)) ||
964 unlikely(bio_barrier(bio))) {
2ca3310e 965 up_read(&md->io_lock);
1da177e4 966
54d9a1b4
AK
967 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) &&
968 bio_rw(bio) == READA) {
969 bio_io_error(bio);
970 return 0;
971 }
1da177e4 972
92c63902 973 queue_io(md, bio);
1da177e4 974
92c63902 975 return 0;
1da177e4
LT
976 }
977
f0b9a450 978 __split_and_process_bio(md, bio);
2ca3310e 979 up_read(&md->io_lock);
f0b9a450 980 return 0;
1da177e4
LT
981}
982
165125e1 983static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
984{
985 struct mapped_device *md = q->queuedata;
986 struct dm_table *map = dm_get_table(md);
987
988 if (map) {
989 dm_table_unplug_all(map);
990 dm_table_put(map);
991 }
992}
993
994static int dm_any_congested(void *congested_data, int bdi_bits)
995{
8a57dfc6
CS
996 int r = bdi_bits;
997 struct mapped_device *md = congested_data;
998 struct dm_table *map;
1da177e4 999
1eb787ec 1000 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
8a57dfc6
CS
1001 map = dm_get_table(md);
1002 if (map) {
1003 r = dm_table_any_congested(map, bdi_bits);
1004 dm_table_put(map);
1005 }
1006 }
1007
1da177e4
LT
1008 return r;
1009}
1010
1011/*-----------------------------------------------------------------
1012 * An IDR is used to keep track of allocated minor numbers.
1013 *---------------------------------------------------------------*/
1da177e4
LT
1014static DEFINE_IDR(_minor_idr);
1015
2b06cfff 1016static void free_minor(int minor)
1da177e4 1017{
f32c10b0 1018 spin_lock(&_minor_lock);
1da177e4 1019 idr_remove(&_minor_idr, minor);
f32c10b0 1020 spin_unlock(&_minor_lock);
1da177e4
LT
1021}
1022
1023/*
1024 * See if the device with a specific minor # is free.
1025 */
cf13ab8e 1026static int specific_minor(int minor)
1da177e4
LT
1027{
1028 int r, m;
1029
1030 if (minor >= (1 << MINORBITS))
1031 return -EINVAL;
1032
62f75c2f
JM
1033 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1034 if (!r)
1035 return -ENOMEM;
1036
f32c10b0 1037 spin_lock(&_minor_lock);
1da177e4
LT
1038
1039 if (idr_find(&_minor_idr, minor)) {
1040 r = -EBUSY;
1041 goto out;
1042 }
1043
ba61fdd1 1044 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1045 if (r)
1da177e4 1046 goto out;
1da177e4
LT
1047
1048 if (m != minor) {
1049 idr_remove(&_minor_idr, m);
1050 r = -EBUSY;
1051 goto out;
1052 }
1053
1054out:
f32c10b0 1055 spin_unlock(&_minor_lock);
1da177e4
LT
1056 return r;
1057}
1058
cf13ab8e 1059static int next_free_minor(int *minor)
1da177e4 1060{
2b06cfff 1061 int r, m;
1da177e4 1062
1da177e4 1063 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1064 if (!r)
1065 return -ENOMEM;
1066
f32c10b0 1067 spin_lock(&_minor_lock);
1da177e4 1068
ba61fdd1 1069 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1070 if (r)
1da177e4 1071 goto out;
1da177e4
LT
1072
1073 if (m >= (1 << MINORBITS)) {
1074 idr_remove(&_minor_idr, m);
1075 r = -ENOSPC;
1076 goto out;
1077 }
1078
1079 *minor = m;
1080
1081out:
f32c10b0 1082 spin_unlock(&_minor_lock);
1da177e4
LT
1083 return r;
1084}
1085
1086static struct block_device_operations dm_blk_dops;
1087
53d5914f
MP
1088static void dm_wq_work(struct work_struct *work);
1089
1da177e4
LT
1090/*
1091 * Allocate and initialise a blank device with a given minor.
1092 */
2b06cfff 1093static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1094{
1095 int r;
cf13ab8e 1096 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1097 void *old_md;
1da177e4
LT
1098
1099 if (!md) {
1100 DMWARN("unable to allocate device, out of memory.");
1101 return NULL;
1102 }
1103
10da4f79 1104 if (!try_module_get(THIS_MODULE))
6ed7ade8 1105 goto bad_module_get;
10da4f79 1106
1da177e4 1107 /* get a minor number for the dev */
2b06cfff 1108 if (minor == DM_ANY_MINOR)
cf13ab8e 1109 r = next_free_minor(&minor);
2b06cfff 1110 else
cf13ab8e 1111 r = specific_minor(minor);
1da177e4 1112 if (r < 0)
6ed7ade8 1113 goto bad_minor;
1da177e4 1114
2ca3310e 1115 init_rwsem(&md->io_lock);
e61290a4 1116 mutex_init(&md->suspend_lock);
022c2611 1117 spin_lock_init(&md->deferred_lock);
1da177e4
LT
1118 rwlock_init(&md->map_lock);
1119 atomic_set(&md->holders, 1);
5c6bd75d 1120 atomic_set(&md->open_count, 0);
1da177e4 1121 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1122 atomic_set(&md->uevent_seq, 0);
1123 INIT_LIST_HEAD(&md->uevent_list);
1124 spin_lock_init(&md->uevent_lock);
1da177e4
LT
1125
1126 md->queue = blk_alloc_queue(GFP_KERNEL);
1127 if (!md->queue)
6ed7ade8 1128 goto bad_queue;
1da177e4
LT
1129
1130 md->queue->queuedata = md;
1131 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1132 md->queue->backing_dev_info.congested_data = md;
1133 blk_queue_make_request(md->queue, dm_request);
99360b4c 1134 blk_queue_ordered(md->queue, QUEUE_ORDERED_DRAIN, NULL);
daef265f 1135 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4 1136 md->queue->unplug_fn = dm_unplug_all;
f6fccb12 1137 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1da177e4 1138
93d2341c 1139 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
74859364 1140 if (!md->io_pool)
6ed7ade8 1141 goto bad_io_pool;
1da177e4 1142
93d2341c 1143 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4 1144 if (!md->tio_pool)
6ed7ade8 1145 goto bad_tio_pool;
1da177e4 1146
bb799ca0 1147 md->bs = bioset_create(16, 0);
9faf400f
SB
1148 if (!md->bs)
1149 goto bad_no_bioset;
1150
1da177e4
LT
1151 md->disk = alloc_disk(1);
1152 if (!md->disk)
6ed7ade8 1153 goto bad_disk;
1da177e4 1154
f0b04115
JM
1155 atomic_set(&md->pending, 0);
1156 init_waitqueue_head(&md->wait);
53d5914f 1157 INIT_WORK(&md->work, dm_wq_work);
f0b04115
JM
1158 init_waitqueue_head(&md->eventq);
1159
1da177e4
LT
1160 md->disk->major = _major;
1161 md->disk->first_minor = minor;
1162 md->disk->fops = &dm_blk_dops;
1163 md->disk->queue = md->queue;
1164 md->disk->private_data = md;
1165 sprintf(md->disk->disk_name, "dm-%d", minor);
1166 add_disk(md->disk);
7e51f257 1167 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1168
304f3f6a
MB
1169 md->wq = create_singlethread_workqueue("kdmflush");
1170 if (!md->wq)
1171 goto bad_thread;
1172
ba61fdd1 1173 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1174 spin_lock(&_minor_lock);
ba61fdd1 1175 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1176 spin_unlock(&_minor_lock);
ba61fdd1
JM
1177
1178 BUG_ON(old_md != MINOR_ALLOCED);
1179
1da177e4
LT
1180 return md;
1181
304f3f6a
MB
1182bad_thread:
1183 put_disk(md->disk);
6ed7ade8 1184bad_disk:
9faf400f 1185 bioset_free(md->bs);
6ed7ade8 1186bad_no_bioset:
1da177e4 1187 mempool_destroy(md->tio_pool);
6ed7ade8 1188bad_tio_pool:
1da177e4 1189 mempool_destroy(md->io_pool);
6ed7ade8 1190bad_io_pool:
1312f40e 1191 blk_cleanup_queue(md->queue);
6ed7ade8 1192bad_queue:
1da177e4 1193 free_minor(minor);
6ed7ade8 1194bad_minor:
10da4f79 1195 module_put(THIS_MODULE);
6ed7ade8 1196bad_module_get:
1da177e4
LT
1197 kfree(md);
1198 return NULL;
1199}
1200
ae9da83f
JN
1201static void unlock_fs(struct mapped_device *md);
1202
1da177e4
LT
1203static void free_dev(struct mapped_device *md)
1204{
f331c029 1205 int minor = MINOR(disk_devt(md->disk));
63d94e48 1206
d9dde59b 1207 if (md->suspended_bdev) {
ae9da83f 1208 unlock_fs(md);
d9dde59b
JN
1209 bdput(md->suspended_bdev);
1210 }
304f3f6a 1211 destroy_workqueue(md->wq);
1da177e4
LT
1212 mempool_destroy(md->tio_pool);
1213 mempool_destroy(md->io_pool);
9faf400f 1214 bioset_free(md->bs);
9c47008d 1215 blk_integrity_unregister(md->disk);
1da177e4 1216 del_gendisk(md->disk);
63d94e48 1217 free_minor(minor);
fba9f90e
JM
1218
1219 spin_lock(&_minor_lock);
1220 md->disk->private_data = NULL;
1221 spin_unlock(&_minor_lock);
1222
1da177e4 1223 put_disk(md->disk);
1312f40e 1224 blk_cleanup_queue(md->queue);
10da4f79 1225 module_put(THIS_MODULE);
1da177e4
LT
1226 kfree(md);
1227}
1228
1229/*
1230 * Bind a table to the device.
1231 */
1232static void event_callback(void *context)
1233{
7a8c3d3b
MA
1234 unsigned long flags;
1235 LIST_HEAD(uevents);
1da177e4
LT
1236 struct mapped_device *md = (struct mapped_device *) context;
1237
7a8c3d3b
MA
1238 spin_lock_irqsave(&md->uevent_lock, flags);
1239 list_splice_init(&md->uevent_list, &uevents);
1240 spin_unlock_irqrestore(&md->uevent_lock, flags);
1241
ed9e1982 1242 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1243
1da177e4
LT
1244 atomic_inc(&md->event_nr);
1245 wake_up(&md->eventq);
1246}
1247
4e90188b 1248static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1249{
4e90188b 1250 set_capacity(md->disk, size);
1da177e4 1251
1b1dcc1b 1252 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
e39e2e95 1253 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1b1dcc1b 1254 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1da177e4
LT
1255}
1256
1257static int __bind(struct mapped_device *md, struct dm_table *t)
1258{
165125e1 1259 struct request_queue *q = md->queue;
1da177e4
LT
1260 sector_t size;
1261
1262 size = dm_table_get_size(t);
3ac51e74
DW
1263
1264 /*
1265 * Wipe any geometry if the size of the table changed.
1266 */
1267 if (size != get_capacity(md->disk))
1268 memset(&md->geometry, 0, sizeof(md->geometry));
1269
bfa152fa
JN
1270 if (md->suspended_bdev)
1271 __set_size(md, size);
d5816876
MP
1272
1273 if (!size) {
1274 dm_table_destroy(t);
1da177e4 1275 return 0;
d5816876 1276 }
1da177e4 1277
2ca3310e
AK
1278 dm_table_event_callback(t, event_callback, md);
1279
1da177e4
LT
1280 write_lock(&md->map_lock);
1281 md->map = t;
2ca3310e 1282 dm_table_set_restrictions(t, q);
1da177e4
LT
1283 write_unlock(&md->map_lock);
1284
1da177e4
LT
1285 return 0;
1286}
1287
1288static void __unbind(struct mapped_device *md)
1289{
1290 struct dm_table *map = md->map;
1291
1292 if (!map)
1293 return;
1294
1295 dm_table_event_callback(map, NULL, NULL);
1296 write_lock(&md->map_lock);
1297 md->map = NULL;
1298 write_unlock(&md->map_lock);
d5816876 1299 dm_table_destroy(map);
1da177e4
LT
1300}
1301
1302/*
1303 * Constructor for a new device.
1304 */
2b06cfff 1305int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1306{
1307 struct mapped_device *md;
1308
2b06cfff 1309 md = alloc_dev(minor);
1da177e4
LT
1310 if (!md)
1311 return -ENXIO;
1312
784aae73
MB
1313 dm_sysfs_init(md);
1314
1da177e4
LT
1315 *result = md;
1316 return 0;
1317}
1318
637842cf 1319static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1320{
1321 struct mapped_device *md;
1da177e4
LT
1322 unsigned minor = MINOR(dev);
1323
1324 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1325 return NULL;
1326
f32c10b0 1327 spin_lock(&_minor_lock);
1da177e4
LT
1328
1329 md = idr_find(&_minor_idr, minor);
fba9f90e 1330 if (md && (md == MINOR_ALLOCED ||
f331c029 1331 (MINOR(disk_devt(dm_disk(md))) != minor) ||
17b2f66f 1332 test_bit(DMF_FREEING, &md->flags))) {
637842cf 1333 md = NULL;
fba9f90e
JM
1334 goto out;
1335 }
1da177e4 1336
fba9f90e 1337out:
f32c10b0 1338 spin_unlock(&_minor_lock);
1da177e4 1339
637842cf
DT
1340 return md;
1341}
1342
d229a958
DT
1343struct mapped_device *dm_get_md(dev_t dev)
1344{
1345 struct mapped_device *md = dm_find_md(dev);
1346
1347 if (md)
1348 dm_get(md);
1349
1350 return md;
1351}
1352
9ade92a9 1353void *dm_get_mdptr(struct mapped_device *md)
637842cf 1354{
9ade92a9 1355 return md->interface_ptr;
1da177e4
LT
1356}
1357
1358void dm_set_mdptr(struct mapped_device *md, void *ptr)
1359{
1360 md->interface_ptr = ptr;
1361}
1362
1363void dm_get(struct mapped_device *md)
1364{
1365 atomic_inc(&md->holders);
1366}
1367
72d94861
AK
1368const char *dm_device_name(struct mapped_device *md)
1369{
1370 return md->name;
1371}
1372EXPORT_SYMBOL_GPL(dm_device_name);
1373
1da177e4
LT
1374void dm_put(struct mapped_device *md)
1375{
1134e5ae 1376 struct dm_table *map;
1da177e4 1377
fba9f90e
JM
1378 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1379
f32c10b0 1380 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1134e5ae 1381 map = dm_get_table(md);
f331c029
TH
1382 idr_replace(&_minor_idr, MINOR_ALLOCED,
1383 MINOR(disk_devt(dm_disk(md))));
fba9f90e 1384 set_bit(DMF_FREEING, &md->flags);
f32c10b0 1385 spin_unlock(&_minor_lock);
cf222b37 1386 if (!dm_suspended(md)) {
1da177e4
LT
1387 dm_table_presuspend_targets(map);
1388 dm_table_postsuspend_targets(map);
1389 }
784aae73 1390 dm_sysfs_exit(md);
1134e5ae 1391 dm_table_put(map);
a1b51e98 1392 __unbind(md);
1da177e4
LT
1393 free_dev(md);
1394 }
1da177e4 1395}
79eb885c 1396EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1397
401600df 1398static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
46125c1c
MB
1399{
1400 int r = 0;
b44ebeb0
MP
1401 DECLARE_WAITQUEUE(wait, current);
1402
1403 dm_unplug_all(md->queue);
1404
1405 add_wait_queue(&md->wait, &wait);
46125c1c
MB
1406
1407 while (1) {
401600df 1408 set_current_state(interruptible);
46125c1c
MB
1409
1410 smp_mb();
1411 if (!atomic_read(&md->pending))
1412 break;
1413
401600df
MP
1414 if (interruptible == TASK_INTERRUPTIBLE &&
1415 signal_pending(current)) {
46125c1c
MB
1416 r = -EINTR;
1417 break;
1418 }
1419
1420 io_schedule();
1421 }
1422 set_current_state(TASK_RUNNING);
1423
b44ebeb0
MP
1424 remove_wait_queue(&md->wait, &wait);
1425
46125c1c
MB
1426 return r;
1427}
1428
af7e466a
MP
1429static int dm_flush(struct mapped_device *md)
1430{
1431 dm_wait_for_completion(md, TASK_UNINTERRUPTIBLE);
1432 return 0;
1433}
1434
1435static void process_barrier(struct mapped_device *md, struct bio *bio)
1436{
1437 int error = dm_flush(md);
1438
1439 if (unlikely(error)) {
1440 bio_endio(bio, error);
1441 return;
1442 }
1443 if (bio_empty_barrier(bio)) {
1444 bio_endio(bio, 0);
1445 return;
1446 }
1447
1448 __split_and_process_bio(md, bio);
1449
1450 error = dm_flush(md);
1451
1452 if (!error && md->barrier_error)
1453 error = md->barrier_error;
1454
1455 if (md->barrier_error != DM_ENDIO_REQUEUE)
1456 bio_endio(bio, error);
1457}
1458
1da177e4
LT
1459/*
1460 * Process the deferred bios
1461 */
ef208587 1462static void dm_wq_work(struct work_struct *work)
1da177e4 1463{
ef208587
MP
1464 struct mapped_device *md = container_of(work, struct mapped_device,
1465 work);
6d6f10df 1466 struct bio *c;
1da177e4 1467
ef208587
MP
1468 down_write(&md->io_lock);
1469
3b00b203 1470 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags)) {
df12ee99
AK
1471 spin_lock_irq(&md->deferred_lock);
1472 c = bio_list_pop(&md->deferred);
1473 spin_unlock_irq(&md->deferred_lock);
1474
1475 if (!c) {
1eb787ec 1476 clear_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
df12ee99
AK
1477 break;
1478 }
022c2611 1479
3b00b203
MP
1480 up_write(&md->io_lock);
1481
af7e466a
MP
1482 if (bio_barrier(c))
1483 process_barrier(md, c);
1484 else
1485 __split_and_process_bio(md, c);
3b00b203
MP
1486
1487 down_write(&md->io_lock);
022c2611 1488 }
73d410c0 1489
ef208587 1490 up_write(&md->io_lock);
1da177e4
LT
1491}
1492
9a1fb464 1493static void dm_queue_flush(struct mapped_device *md)
304f3f6a 1494{
3b00b203
MP
1495 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1496 smp_mb__after_clear_bit();
53d5914f 1497 queue_work(md->wq, &md->work);
304f3f6a
MB
1498}
1499
1da177e4
LT
1500/*
1501 * Swap in a new table (destroying old one).
1502 */
1503int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1504{
93c534ae 1505 int r = -EINVAL;
1da177e4 1506
e61290a4 1507 mutex_lock(&md->suspend_lock);
1da177e4
LT
1508
1509 /* device must be suspended */
cf222b37 1510 if (!dm_suspended(md))
93c534ae 1511 goto out;
1da177e4 1512
bfa152fa
JN
1513 /* without bdev, the device size cannot be changed */
1514 if (!md->suspended_bdev)
1515 if (get_capacity(md->disk) != dm_table_get_size(table))
1516 goto out;
1517
1da177e4
LT
1518 __unbind(md);
1519 r = __bind(md, table);
1da177e4 1520
93c534ae 1521out:
e61290a4 1522 mutex_unlock(&md->suspend_lock);
93c534ae 1523 return r;
1da177e4
LT
1524}
1525
1526/*
1527 * Functions to lock and unlock any filesystem running on the
1528 * device.
1529 */
2ca3310e 1530static int lock_fs(struct mapped_device *md)
1da177e4 1531{
e39e2e95 1532 int r;
1da177e4
LT
1533
1534 WARN_ON(md->frozen_sb);
dfbe03f6 1535
e39e2e95 1536 md->frozen_sb = freeze_bdev(md->suspended_bdev);
dfbe03f6 1537 if (IS_ERR(md->frozen_sb)) {
cf222b37 1538 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1539 md->frozen_sb = NULL;
1540 return r;
dfbe03f6
AK
1541 }
1542
aa8d7c2f
AK
1543 set_bit(DMF_FROZEN, &md->flags);
1544
1da177e4 1545 /* don't bdput right now, we don't want the bdev
e39e2e95 1546 * to go away while it is locked.
1da177e4
LT
1547 */
1548 return 0;
1549}
1550
2ca3310e 1551static void unlock_fs(struct mapped_device *md)
1da177e4 1552{
aa8d7c2f
AK
1553 if (!test_bit(DMF_FROZEN, &md->flags))
1554 return;
1555
e39e2e95 1556 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1da177e4 1557 md->frozen_sb = NULL;
aa8d7c2f 1558 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1559}
1560
1561/*
1562 * We need to be able to change a mapping table under a mounted
1563 * filesystem. For example we might want to move some data in
1564 * the background. Before the table can be swapped with
1565 * dm_bind_table, dm_suspend must be called to flush any in
1566 * flight bios and ensure that any further io gets deferred.
1567 */
a3d77d35 1568int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 1569{
2ca3310e 1570 struct dm_table *map = NULL;
46125c1c 1571 int r = 0;
a3d77d35 1572 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 1573 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 1574
e61290a4 1575 mutex_lock(&md->suspend_lock);
2ca3310e 1576
73d410c0
MB
1577 if (dm_suspended(md)) {
1578 r = -EINVAL;
d287483d 1579 goto out_unlock;
73d410c0 1580 }
1da177e4
LT
1581
1582 map = dm_get_table(md);
1da177e4 1583
2e93ccc1
KU
1584 /*
1585 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1586 * This flag is cleared before dm_suspend returns.
1587 */
1588 if (noflush)
1589 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1590
cf222b37
AK
1591 /* This does not get reverted if there's an error later. */
1592 dm_table_presuspend_targets(map);
1593
bfa152fa
JN
1594 /* bdget() can stall if the pending I/Os are not flushed */
1595 if (!noflush) {
1596 md->suspended_bdev = bdget_disk(md->disk, 0);
1597 if (!md->suspended_bdev) {
1598 DMWARN("bdget failed in dm_suspend");
1599 r = -ENOMEM;
f431d966 1600 goto out;
bfa152fa 1601 }
e39e2e95 1602
6d6f10df
MB
1603 /*
1604 * Flush I/O to the device. noflush supersedes do_lockfs,
1605 * because lock_fs() needs to flush I/Os.
1606 */
1607 if (do_lockfs) {
1608 r = lock_fs(md);
1609 if (r)
1610 goto out;
1611 }
aa8d7c2f 1612 }
1da177e4
LT
1613
1614 /*
3b00b203
MP
1615 * Here we must make sure that no processes are submitting requests
1616 * to target drivers i.e. no one may be executing
1617 * __split_and_process_bio. This is called from dm_request and
1618 * dm_wq_work.
1619 *
1620 * To get all processes out of __split_and_process_bio in dm_request,
1621 * we take the write lock. To prevent any process from reentering
1622 * __split_and_process_bio from dm_request, we set
1623 * DMF_QUEUE_IO_TO_THREAD.
1624 *
1625 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
1626 * and call flush_workqueue(md->wq). flush_workqueue will wait until
1627 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
1628 * further calls to __split_and_process_bio from dm_wq_work.
1da177e4 1629 */
2ca3310e 1630 down_write(&md->io_lock);
1eb787ec
AK
1631 set_bit(DMF_BLOCK_IO_FOR_SUSPEND, &md->flags);
1632 set_bit(DMF_QUEUE_IO_TO_THREAD, &md->flags);
2ca3310e 1633 up_write(&md->io_lock);
1da177e4 1634
3b00b203
MP
1635 flush_workqueue(md->wq);
1636
1da177e4 1637 /*
3b00b203
MP
1638 * At this point no more requests are entering target request routines.
1639 * We call dm_wait_for_completion to wait for all existing requests
1640 * to finish.
1da177e4 1641 */
401600df 1642 r = dm_wait_for_completion(md, TASK_INTERRUPTIBLE);
1da177e4 1643
2ca3310e 1644 down_write(&md->io_lock);
6d6f10df 1645 if (noflush)
022c2611 1646 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
94d6351e 1647 up_write(&md->io_lock);
2e93ccc1 1648
1da177e4 1649 /* were we interrupted ? */
46125c1c 1650 if (r < 0) {
9a1fb464 1651 dm_queue_flush(md);
73d410c0 1652
2ca3310e 1653 unlock_fs(md);
2e93ccc1 1654 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 1655 }
1da177e4 1656
3b00b203
MP
1657 /*
1658 * If dm_wait_for_completion returned 0, the device is completely
1659 * quiescent now. There is no request-processing activity. All new
1660 * requests are being added to md->deferred list.
1661 */
1662
cf222b37 1663 dm_table_postsuspend_targets(map);
1da177e4 1664
2ca3310e 1665 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1666
2ca3310e 1667out:
e39e2e95
AK
1668 if (r && md->suspended_bdev) {
1669 bdput(md->suspended_bdev);
1670 md->suspended_bdev = NULL;
1671 }
1672
2ca3310e 1673 dm_table_put(map);
d287483d
AK
1674
1675out_unlock:
e61290a4 1676 mutex_unlock(&md->suspend_lock);
cf222b37 1677 return r;
1da177e4
LT
1678}
1679
1680int dm_resume(struct mapped_device *md)
1681{
cf222b37 1682 int r = -EINVAL;
cf222b37 1683 struct dm_table *map = NULL;
1da177e4 1684
e61290a4 1685 mutex_lock(&md->suspend_lock);
2ca3310e 1686 if (!dm_suspended(md))
cf222b37 1687 goto out;
cf222b37
AK
1688
1689 map = dm_get_table(md);
2ca3310e 1690 if (!map || !dm_table_get_size(map))
cf222b37 1691 goto out;
1da177e4 1692
8757b776
MB
1693 r = dm_table_resume_targets(map);
1694 if (r)
1695 goto out;
2ca3310e 1696
9a1fb464 1697 dm_queue_flush(md);
2ca3310e
AK
1698
1699 unlock_fs(md);
1700
bfa152fa
JN
1701 if (md->suspended_bdev) {
1702 bdput(md->suspended_bdev);
1703 md->suspended_bdev = NULL;
1704 }
e39e2e95 1705
2ca3310e
AK
1706 clear_bit(DMF_SUSPENDED, &md->flags);
1707
1da177e4 1708 dm_table_unplug_all(map);
1da177e4 1709
69267a30 1710 dm_kobject_uevent(md);
8560ed6f 1711
cf222b37 1712 r = 0;
2ca3310e 1713
cf222b37
AK
1714out:
1715 dm_table_put(map);
e61290a4 1716 mutex_unlock(&md->suspend_lock);
2ca3310e 1717
cf222b37 1718 return r;
1da177e4
LT
1719}
1720
1721/*-----------------------------------------------------------------
1722 * Event notification.
1723 *---------------------------------------------------------------*/
69267a30
AK
1724void dm_kobject_uevent(struct mapped_device *md)
1725{
ed9e1982 1726 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
69267a30
AK
1727}
1728
7a8c3d3b
MA
1729uint32_t dm_next_uevent_seq(struct mapped_device *md)
1730{
1731 return atomic_add_return(1, &md->uevent_seq);
1732}
1733
1da177e4
LT
1734uint32_t dm_get_event_nr(struct mapped_device *md)
1735{
1736 return atomic_read(&md->event_nr);
1737}
1738
1739int dm_wait_event(struct mapped_device *md, int event_nr)
1740{
1741 return wait_event_interruptible(md->eventq,
1742 (event_nr != atomic_read(&md->event_nr)));
1743}
1744
7a8c3d3b
MA
1745void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1746{
1747 unsigned long flags;
1748
1749 spin_lock_irqsave(&md->uevent_lock, flags);
1750 list_add(elist, &md->uevent_list);
1751 spin_unlock_irqrestore(&md->uevent_lock, flags);
1752}
1753
1da177e4
LT
1754/*
1755 * The gendisk is only valid as long as you have a reference
1756 * count on 'md'.
1757 */
1758struct gendisk *dm_disk(struct mapped_device *md)
1759{
1760 return md->disk;
1761}
1762
784aae73
MB
1763struct kobject *dm_kobject(struct mapped_device *md)
1764{
1765 return &md->kobj;
1766}
1767
1768/*
1769 * struct mapped_device should not be exported outside of dm.c
1770 * so use this check to verify that kobj is part of md structure
1771 */
1772struct mapped_device *dm_get_from_kobject(struct kobject *kobj)
1773{
1774 struct mapped_device *md;
1775
1776 md = container_of(kobj, struct mapped_device, kobj);
1777 if (&md->kobj != kobj)
1778 return NULL;
1779
1780 dm_get(md);
1781 return md;
1782}
1783
1da177e4
LT
1784int dm_suspended(struct mapped_device *md)
1785{
1786 return test_bit(DMF_SUSPENDED, &md->flags);
1787}
1788
2e93ccc1
KU
1789int dm_noflush_suspending(struct dm_target *ti)
1790{
1791 struct mapped_device *md = dm_table_get_md(ti->table);
1792 int r = __noflush_suspending(md);
1793
1794 dm_put(md);
1795
1796 return r;
1797}
1798EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1799
1da177e4
LT
1800static struct block_device_operations dm_blk_dops = {
1801 .open = dm_blk_open,
1802 .release = dm_blk_close,
aa129a22 1803 .ioctl = dm_blk_ioctl,
3ac51e74 1804 .getgeo = dm_blk_getgeo,
1da177e4
LT
1805 .owner = THIS_MODULE
1806};
1807
1808EXPORT_SYMBOL(dm_get_mapinfo);
1809
1810/*
1811 * module hooks
1812 */
1813module_init(dm_init);
1814module_exit(dm_exit);
1815
1816module_param(major, uint, 0);
1817MODULE_PARM_DESC(major, "The major number of the device mapper");
1818MODULE_DESCRIPTION(DM_NAME " driver");
1819MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1820MODULE_LICENSE("GPL");
This page took 0.573987 seconds and 5 git commands to generate.