dm: support barriers on simple devices
[deliverable/linux.git] / drivers / md / dm.c
CommitLineData
1da177e4
LT
1/*
2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
2b06cfff 3 * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
1da177e4
LT
4 *
5 * This file is released under the GPL.
6 */
7
8#include "dm.h"
9#include "dm-bio-list.h"
51e5b2bd 10#include "dm-uevent.h"
1da177e4
LT
11
12#include <linux/init.h>
13#include <linux/module.h>
48c9c27b 14#include <linux/mutex.h>
1da177e4
LT
15#include <linux/moduleparam.h>
16#include <linux/blkpg.h>
17#include <linux/bio.h>
18#include <linux/buffer_head.h>
19#include <linux/mempool.h>
20#include <linux/slab.h>
21#include <linux/idr.h>
3ac51e74 22#include <linux/hdreg.h>
2056a782 23#include <linux/blktrace_api.h>
5f3ea37c 24#include <trace/block.h>
1da177e4 25
72d94861
AK
26#define DM_MSG_PREFIX "core"
27
1da177e4
LT
28static const char *_name = DM_NAME;
29
30static unsigned int major = 0;
31static unsigned int _major = 0;
32
f32c10b0 33static DEFINE_SPINLOCK(_minor_lock);
1da177e4 34/*
8fbf26ad 35 * For bio-based dm.
1da177e4
LT
36 * One of these is allocated per bio.
37 */
38struct dm_io {
39 struct mapped_device *md;
40 int error;
1da177e4 41 atomic_t io_count;
6ae2fa67 42 struct bio *bio;
3eaf840e 43 unsigned long start_time;
1da177e4
LT
44};
45
46/*
8fbf26ad 47 * For bio-based dm.
1da177e4
LT
48 * One of these is allocated per target within a bio. Hopefully
49 * this will be simplified out one day.
50 */
028867ac 51struct dm_target_io {
1da177e4
LT
52 struct dm_io *io;
53 struct dm_target *ti;
54 union map_info info;
55};
56
0bfc2455
IM
57DEFINE_TRACE(block_bio_complete);
58
8fbf26ad
KU
59/*
60 * For request-based dm.
61 * One of these is allocated per request.
62 */
63struct dm_rq_target_io {
64 struct mapped_device *md;
65 struct dm_target *ti;
66 struct request *orig, clone;
67 int error;
68 union map_info info;
69};
70
71/*
72 * For request-based dm.
73 * One of these is allocated per bio.
74 */
75struct dm_rq_clone_bio_info {
76 struct bio *orig;
77 struct request *rq;
78};
79
1da177e4
LT
80union map_info *dm_get_mapinfo(struct bio *bio)
81{
17b2f66f 82 if (bio && bio->bi_private)
028867ac 83 return &((struct dm_target_io *)bio->bi_private)->info;
17b2f66f 84 return NULL;
1da177e4
LT
85}
86
ba61fdd1
JM
87#define MINOR_ALLOCED ((void *)-1)
88
1da177e4
LT
89/*
90 * Bits for the md->flags field.
91 */
92#define DMF_BLOCK_IO 0
93#define DMF_SUSPENDED 1
aa8d7c2f 94#define DMF_FROZEN 2
fba9f90e 95#define DMF_FREEING 3
5c6bd75d 96#define DMF_DELETING 4
2e93ccc1 97#define DMF_NOFLUSH_SUSPENDING 5
1da177e4 98
304f3f6a
MB
99/*
100 * Work processed by per-device workqueue.
101 */
102struct dm_wq_req {
103 enum {
304f3f6a
MB
104 DM_WQ_FLUSH_DEFERRED,
105 } type;
106 struct work_struct work;
107 struct mapped_device *md;
108 void *context;
109};
110
1da177e4 111struct mapped_device {
2ca3310e 112 struct rw_semaphore io_lock;
e61290a4 113 struct mutex suspend_lock;
2e93ccc1 114 spinlock_t pushback_lock;
1da177e4
LT
115 rwlock_t map_lock;
116 atomic_t holders;
5c6bd75d 117 atomic_t open_count;
1da177e4
LT
118
119 unsigned long flags;
120
165125e1 121 struct request_queue *queue;
1da177e4 122 struct gendisk *disk;
7e51f257 123 char name[16];
1da177e4
LT
124
125 void *interface_ptr;
126
127 /*
128 * A list of ios that arrived while we were suspended.
129 */
130 atomic_t pending;
131 wait_queue_head_t wait;
74859364 132 struct bio_list deferred;
2e93ccc1 133 struct bio_list pushback;
1da177e4 134
304f3f6a
MB
135 /*
136 * Processing queue (flush/barriers)
137 */
138 struct workqueue_struct *wq;
139
1da177e4
LT
140 /*
141 * The current mapping.
142 */
143 struct dm_table *map;
144
145 /*
146 * io objects are allocated from here.
147 */
148 mempool_t *io_pool;
149 mempool_t *tio_pool;
150
9faf400f
SB
151 struct bio_set *bs;
152
1da177e4
LT
153 /*
154 * Event handling.
155 */
156 atomic_t event_nr;
157 wait_queue_head_t eventq;
7a8c3d3b
MA
158 atomic_t uevent_seq;
159 struct list_head uevent_list;
160 spinlock_t uevent_lock; /* Protect access to uevent_list */
1da177e4
LT
161
162 /*
163 * freeze/thaw support require holding onto a super block
164 */
165 struct super_block *frozen_sb;
e39e2e95 166 struct block_device *suspended_bdev;
3ac51e74
DW
167
168 /* forced geometry settings */
169 struct hd_geometry geometry;
1da177e4
LT
170};
171
172#define MIN_IOS 256
e18b890b
CL
173static struct kmem_cache *_io_cache;
174static struct kmem_cache *_tio_cache;
8fbf26ad
KU
175static struct kmem_cache *_rq_tio_cache;
176static struct kmem_cache *_rq_bio_info_cache;
1da177e4 177
1da177e4
LT
178static int __init local_init(void)
179{
51157b4a 180 int r = -ENOMEM;
1da177e4 181
1da177e4 182 /* allocate a slab for the dm_ios */
028867ac 183 _io_cache = KMEM_CACHE(dm_io, 0);
1da177e4 184 if (!_io_cache)
51157b4a 185 return r;
1da177e4
LT
186
187 /* allocate a slab for the target ios */
028867ac 188 _tio_cache = KMEM_CACHE(dm_target_io, 0);
51157b4a
KU
189 if (!_tio_cache)
190 goto out_free_io_cache;
1da177e4 191
8fbf26ad
KU
192 _rq_tio_cache = KMEM_CACHE(dm_rq_target_io, 0);
193 if (!_rq_tio_cache)
194 goto out_free_tio_cache;
195
196 _rq_bio_info_cache = KMEM_CACHE(dm_rq_clone_bio_info, 0);
197 if (!_rq_bio_info_cache)
198 goto out_free_rq_tio_cache;
199
51e5b2bd 200 r = dm_uevent_init();
51157b4a 201 if (r)
8fbf26ad 202 goto out_free_rq_bio_info_cache;
51e5b2bd 203
1da177e4
LT
204 _major = major;
205 r = register_blkdev(_major, _name);
51157b4a
KU
206 if (r < 0)
207 goto out_uevent_exit;
1da177e4
LT
208
209 if (!_major)
210 _major = r;
211
212 return 0;
51157b4a
KU
213
214out_uevent_exit:
215 dm_uevent_exit();
8fbf26ad
KU
216out_free_rq_bio_info_cache:
217 kmem_cache_destroy(_rq_bio_info_cache);
218out_free_rq_tio_cache:
219 kmem_cache_destroy(_rq_tio_cache);
51157b4a
KU
220out_free_tio_cache:
221 kmem_cache_destroy(_tio_cache);
222out_free_io_cache:
223 kmem_cache_destroy(_io_cache);
224
225 return r;
1da177e4
LT
226}
227
228static void local_exit(void)
229{
8fbf26ad
KU
230 kmem_cache_destroy(_rq_bio_info_cache);
231 kmem_cache_destroy(_rq_tio_cache);
1da177e4
LT
232 kmem_cache_destroy(_tio_cache);
233 kmem_cache_destroy(_io_cache);
00d59405 234 unregister_blkdev(_major, _name);
51e5b2bd 235 dm_uevent_exit();
1da177e4
LT
236
237 _major = 0;
238
239 DMINFO("cleaned up");
240}
241
b9249e55 242static int (*_inits[])(void) __initdata = {
1da177e4
LT
243 local_init,
244 dm_target_init,
245 dm_linear_init,
246 dm_stripe_init,
945fa4d2 247 dm_kcopyd_init,
1da177e4
LT
248 dm_interface_init,
249};
250
b9249e55 251static void (*_exits[])(void) = {
1da177e4
LT
252 local_exit,
253 dm_target_exit,
254 dm_linear_exit,
255 dm_stripe_exit,
945fa4d2 256 dm_kcopyd_exit,
1da177e4
LT
257 dm_interface_exit,
258};
259
260static int __init dm_init(void)
261{
262 const int count = ARRAY_SIZE(_inits);
263
264 int r, i;
265
266 for (i = 0; i < count; i++) {
267 r = _inits[i]();
268 if (r)
269 goto bad;
270 }
271
272 return 0;
273
274 bad:
275 while (i--)
276 _exits[i]();
277
278 return r;
279}
280
281static void __exit dm_exit(void)
282{
283 int i = ARRAY_SIZE(_exits);
284
285 while (i--)
286 _exits[i]();
287}
288
289/*
290 * Block device functions
291 */
fe5f9f2c 292static int dm_blk_open(struct block_device *bdev, fmode_t mode)
1da177e4
LT
293{
294 struct mapped_device *md;
295
fba9f90e
JM
296 spin_lock(&_minor_lock);
297
fe5f9f2c 298 md = bdev->bd_disk->private_data;
fba9f90e
JM
299 if (!md)
300 goto out;
301
5c6bd75d
AK
302 if (test_bit(DMF_FREEING, &md->flags) ||
303 test_bit(DMF_DELETING, &md->flags)) {
fba9f90e
JM
304 md = NULL;
305 goto out;
306 }
307
1da177e4 308 dm_get(md);
5c6bd75d 309 atomic_inc(&md->open_count);
fba9f90e
JM
310
311out:
312 spin_unlock(&_minor_lock);
313
314 return md ? 0 : -ENXIO;
1da177e4
LT
315}
316
fe5f9f2c 317static int dm_blk_close(struct gendisk *disk, fmode_t mode)
1da177e4 318{
fe5f9f2c 319 struct mapped_device *md = disk->private_data;
5c6bd75d 320 atomic_dec(&md->open_count);
1da177e4
LT
321 dm_put(md);
322 return 0;
323}
324
5c6bd75d
AK
325int dm_open_count(struct mapped_device *md)
326{
327 return atomic_read(&md->open_count);
328}
329
330/*
331 * Guarantees nothing is using the device before it's deleted.
332 */
333int dm_lock_for_deletion(struct mapped_device *md)
334{
335 int r = 0;
336
337 spin_lock(&_minor_lock);
338
339 if (dm_open_count(md))
340 r = -EBUSY;
341 else
342 set_bit(DMF_DELETING, &md->flags);
343
344 spin_unlock(&_minor_lock);
345
346 return r;
347}
348
3ac51e74
DW
349static int dm_blk_getgeo(struct block_device *bdev, struct hd_geometry *geo)
350{
351 struct mapped_device *md = bdev->bd_disk->private_data;
352
353 return dm_get_geometry(md, geo);
354}
355
fe5f9f2c 356static int dm_blk_ioctl(struct block_device *bdev, fmode_t mode,
aa129a22
MB
357 unsigned int cmd, unsigned long arg)
358{
fe5f9f2c
AV
359 struct mapped_device *md = bdev->bd_disk->private_data;
360 struct dm_table *map = dm_get_table(md);
aa129a22
MB
361 struct dm_target *tgt;
362 int r = -ENOTTY;
363
aa129a22
MB
364 if (!map || !dm_table_get_size(map))
365 goto out;
366
367 /* We only support devices that have a single target */
368 if (dm_table_get_num_targets(map) != 1)
369 goto out;
370
371 tgt = dm_table_get_target(map, 0);
372
373 if (dm_suspended(md)) {
374 r = -EAGAIN;
375 goto out;
376 }
377
378 if (tgt->type->ioctl)
647b3d00 379 r = tgt->type->ioctl(tgt, cmd, arg);
aa129a22
MB
380
381out:
382 dm_table_put(map);
383
aa129a22
MB
384 return r;
385}
386
028867ac 387static struct dm_io *alloc_io(struct mapped_device *md)
1da177e4
LT
388{
389 return mempool_alloc(md->io_pool, GFP_NOIO);
390}
391
028867ac 392static void free_io(struct mapped_device *md, struct dm_io *io)
1da177e4
LT
393{
394 mempool_free(io, md->io_pool);
395}
396
028867ac 397static struct dm_target_io *alloc_tio(struct mapped_device *md)
1da177e4
LT
398{
399 return mempool_alloc(md->tio_pool, GFP_NOIO);
400}
401
028867ac 402static void free_tio(struct mapped_device *md, struct dm_target_io *tio)
1da177e4
LT
403{
404 mempool_free(tio, md->tio_pool);
405}
406
3eaf840e
JNN
407static void start_io_acct(struct dm_io *io)
408{
409 struct mapped_device *md = io->md;
c9959059 410 int cpu;
3eaf840e
JNN
411
412 io->start_time = jiffies;
413
074a7aca
TH
414 cpu = part_stat_lock();
415 part_round_stats(cpu, &dm_disk(md)->part0);
416 part_stat_unlock();
417 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
3eaf840e
JNN
418}
419
d221d2e7 420static void end_io_acct(struct dm_io *io)
3eaf840e
JNN
421{
422 struct mapped_device *md = io->md;
423 struct bio *bio = io->bio;
424 unsigned long duration = jiffies - io->start_time;
c9959059 425 int pending, cpu;
3eaf840e
JNN
426 int rw = bio_data_dir(bio);
427
074a7aca
TH
428 cpu = part_stat_lock();
429 part_round_stats(cpu, &dm_disk(md)->part0);
430 part_stat_add(cpu, &dm_disk(md)->part0, ticks[rw], duration);
431 part_stat_unlock();
3eaf840e 432
074a7aca
TH
433 dm_disk(md)->part0.in_flight = pending =
434 atomic_dec_return(&md->pending);
3eaf840e 435
d221d2e7
MP
436 /* nudge anyone waiting on suspend queue */
437 if (!pending)
438 wake_up(&md->wait);
3eaf840e
JNN
439}
440
1da177e4
LT
441/*
442 * Add the bio to the list of deferred io.
443 */
444static int queue_io(struct mapped_device *md, struct bio *bio)
445{
2ca3310e 446 down_write(&md->io_lock);
1da177e4
LT
447
448 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 449 up_write(&md->io_lock);
1da177e4
LT
450 return 1;
451 }
452
453 bio_list_add(&md->deferred, bio);
454
2ca3310e 455 up_write(&md->io_lock);
1da177e4
LT
456 return 0; /* deferred successfully */
457}
458
459/*
460 * Everyone (including functions in this file), should use this
461 * function to access the md->map field, and make sure they call
462 * dm_table_put() when finished.
463 */
464struct dm_table *dm_get_table(struct mapped_device *md)
465{
466 struct dm_table *t;
467
468 read_lock(&md->map_lock);
469 t = md->map;
470 if (t)
471 dm_table_get(t);
472 read_unlock(&md->map_lock);
473
474 return t;
475}
476
3ac51e74
DW
477/*
478 * Get the geometry associated with a dm device
479 */
480int dm_get_geometry(struct mapped_device *md, struct hd_geometry *geo)
481{
482 *geo = md->geometry;
483
484 return 0;
485}
486
487/*
488 * Set the geometry of a device.
489 */
490int dm_set_geometry(struct mapped_device *md, struct hd_geometry *geo)
491{
492 sector_t sz = (sector_t)geo->cylinders * geo->heads * geo->sectors;
493
494 if (geo->start > sz) {
495 DMWARN("Start sector is beyond the geometry limits.");
496 return -EINVAL;
497 }
498
499 md->geometry = *geo;
500
501 return 0;
502}
503
1da177e4
LT
504/*-----------------------------------------------------------------
505 * CRUD START:
506 * A more elegant soln is in the works that uses the queue
507 * merge fn, unfortunately there are a couple of changes to
508 * the block layer that I want to make for this. So in the
509 * interests of getting something for people to use I give
510 * you this clearly demarcated crap.
511 *---------------------------------------------------------------*/
512
2e93ccc1
KU
513static int __noflush_suspending(struct mapped_device *md)
514{
515 return test_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
516}
517
1da177e4
LT
518/*
519 * Decrements the number of outstanding ios that a bio has been
520 * cloned into, completing the original io if necc.
521 */
858119e1 522static void dec_pending(struct dm_io *io, int error)
1da177e4 523{
2e93ccc1
KU
524 unsigned long flags;
525
526 /* Push-back supersedes any I/O errors */
527 if (error && !(io->error > 0 && __noflush_suspending(io->md)))
1da177e4
LT
528 io->error = error;
529
530 if (atomic_dec_and_test(&io->io_count)) {
2e93ccc1
KU
531 if (io->error == DM_ENDIO_REQUEUE) {
532 /*
533 * Target requested pushing back the I/O.
534 * This must be handled before the sleeper on
535 * suspend queue merges the pushback list.
536 */
537 spin_lock_irqsave(&io->md->pushback_lock, flags);
538 if (__noflush_suspending(io->md))
539 bio_list_add(&io->md->pushback, io->bio);
540 else
541 /* noflush suspend was interrupted. */
542 io->error = -EIO;
543 spin_unlock_irqrestore(&io->md->pushback_lock, flags);
544 }
545
d221d2e7 546 end_io_acct(io);
1da177e4 547
2e93ccc1 548 if (io->error != DM_ENDIO_REQUEUE) {
5f3ea37c 549 trace_block_bio_complete(io->md->queue, io->bio);
2e93ccc1 550
6712ecf8 551 bio_endio(io->bio, io->error);
2e93ccc1 552 }
2056a782 553
1da177e4
LT
554 free_io(io->md, io);
555 }
556}
557
6712ecf8 558static void clone_endio(struct bio *bio, int error)
1da177e4
LT
559{
560 int r = 0;
028867ac 561 struct dm_target_io *tio = bio->bi_private;
9faf400f 562 struct mapped_device *md = tio->io->md;
1da177e4
LT
563 dm_endio_fn endio = tio->ti->type->end_io;
564
1da177e4
LT
565 if (!bio_flagged(bio, BIO_UPTODATE) && !error)
566 error = -EIO;
567
568 if (endio) {
569 r = endio(tio->ti, bio, error, &tio->info);
2e93ccc1
KU
570 if (r < 0 || r == DM_ENDIO_REQUEUE)
571 /*
572 * error and requeue request are handled
573 * in dec_pending().
574 */
1da177e4 575 error = r;
45cbcd79
KU
576 else if (r == DM_ENDIO_INCOMPLETE)
577 /* The target will handle the io */
6712ecf8 578 return;
45cbcd79
KU
579 else if (r) {
580 DMWARN("unimplemented target endio return value: %d", r);
581 BUG();
582 }
1da177e4
LT
583 }
584
9faf400f
SB
585 dec_pending(tio->io, error);
586
587 /*
588 * Store md for cleanup instead of tio which is about to get freed.
589 */
590 bio->bi_private = md->bs;
591
1da177e4 592 bio_put(bio);
9faf400f 593 free_tio(md, tio);
1da177e4
LT
594}
595
596static sector_t max_io_len(struct mapped_device *md,
597 sector_t sector, struct dm_target *ti)
598{
599 sector_t offset = sector - ti->begin;
600 sector_t len = ti->len - offset;
601
602 /*
603 * Does the target need to split even further ?
604 */
605 if (ti->split_io) {
606 sector_t boundary;
607 boundary = ((offset + ti->split_io) & ~(ti->split_io - 1))
608 - offset;
609 if (len > boundary)
610 len = boundary;
611 }
612
613 return len;
614}
615
616static void __map_bio(struct dm_target *ti, struct bio *clone,
028867ac 617 struct dm_target_io *tio)
1da177e4
LT
618{
619 int r;
2056a782 620 sector_t sector;
9faf400f 621 struct mapped_device *md;
1da177e4
LT
622
623 /*
624 * Sanity checks.
625 */
626 BUG_ON(!clone->bi_size);
627
628 clone->bi_end_io = clone_endio;
629 clone->bi_private = tio;
630
631 /*
632 * Map the clone. If r == 0 we don't need to do
633 * anything, the target has assumed ownership of
634 * this io.
635 */
636 atomic_inc(&tio->io->io_count);
2056a782 637 sector = clone->bi_sector;
1da177e4 638 r = ti->type->map(ti, clone, &tio->info);
45cbcd79 639 if (r == DM_MAPIO_REMAPPED) {
1da177e4 640 /* the bio has been remapped so dispatch it */
2056a782 641
5f3ea37c 642 trace_block_remap(bdev_get_queue(clone->bi_bdev), clone,
c7149d6b
AB
643 tio->io->bio->bi_bdev->bd_dev,
644 clone->bi_sector, sector);
2056a782 645
1da177e4 646 generic_make_request(clone);
2e93ccc1
KU
647 } else if (r < 0 || r == DM_MAPIO_REQUEUE) {
648 /* error the io and bail out, or requeue it if needed */
9faf400f
SB
649 md = tio->io->md;
650 dec_pending(tio->io, r);
651 /*
652 * Store bio_set for cleanup.
653 */
654 clone->bi_private = md->bs;
1da177e4 655 bio_put(clone);
9faf400f 656 free_tio(md, tio);
45cbcd79
KU
657 } else if (r) {
658 DMWARN("unimplemented target map return value: %d", r);
659 BUG();
1da177e4
LT
660 }
661}
662
663struct clone_info {
664 struct mapped_device *md;
665 struct dm_table *map;
666 struct bio *bio;
667 struct dm_io *io;
668 sector_t sector;
669 sector_t sector_count;
670 unsigned short idx;
671};
672
3676347a
PO
673static void dm_bio_destructor(struct bio *bio)
674{
9faf400f
SB
675 struct bio_set *bs = bio->bi_private;
676
677 bio_free(bio, bs);
3676347a
PO
678}
679
1da177e4
LT
680/*
681 * Creates a little bio that is just does part of a bvec.
682 */
683static struct bio *split_bvec(struct bio *bio, sector_t sector,
684 unsigned short idx, unsigned int offset,
9faf400f 685 unsigned int len, struct bio_set *bs)
1da177e4
LT
686{
687 struct bio *clone;
688 struct bio_vec *bv = bio->bi_io_vec + idx;
689
9faf400f 690 clone = bio_alloc_bioset(GFP_NOIO, 1, bs);
3676347a 691 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
692 *clone->bi_io_vec = *bv;
693
694 clone->bi_sector = sector;
695 clone->bi_bdev = bio->bi_bdev;
696 clone->bi_rw = bio->bi_rw;
697 clone->bi_vcnt = 1;
698 clone->bi_size = to_bytes(len);
699 clone->bi_io_vec->bv_offset = offset;
700 clone->bi_io_vec->bv_len = clone->bi_size;
f3e1d26e 701 clone->bi_flags |= 1 << BIO_CLONED;
1da177e4
LT
702
703 return clone;
704}
705
706/*
707 * Creates a bio that consists of range of complete bvecs.
708 */
709static struct bio *clone_bio(struct bio *bio, sector_t sector,
710 unsigned short idx, unsigned short bv_count,
9faf400f 711 unsigned int len, struct bio_set *bs)
1da177e4
LT
712{
713 struct bio *clone;
714
9faf400f
SB
715 clone = bio_alloc_bioset(GFP_NOIO, bio->bi_max_vecs, bs);
716 __bio_clone(clone, bio);
717 clone->bi_destructor = dm_bio_destructor;
1da177e4
LT
718 clone->bi_sector = sector;
719 clone->bi_idx = idx;
720 clone->bi_vcnt = idx + bv_count;
721 clone->bi_size = to_bytes(len);
722 clone->bi_flags &= ~(1 << BIO_SEG_VALID);
723
724 return clone;
725}
726
512875bd 727static int __clone_and_map(struct clone_info *ci)
1da177e4
LT
728{
729 struct bio *clone, *bio = ci->bio;
512875bd
JN
730 struct dm_target *ti;
731 sector_t len = 0, max;
028867ac 732 struct dm_target_io *tio;
1da177e4 733
512875bd
JN
734 ti = dm_table_find_target(ci->map, ci->sector);
735 if (!dm_target_is_valid(ti))
736 return -EIO;
737
738 max = max_io_len(ci->md, ci->sector, ti);
739
1da177e4
LT
740 /*
741 * Allocate a target io object.
742 */
743 tio = alloc_tio(ci->md);
744 tio->io = ci->io;
745 tio->ti = ti;
746 memset(&tio->info, 0, sizeof(tio->info));
747
748 if (ci->sector_count <= max) {
749 /*
750 * Optimise for the simple case where we can do all of
751 * the remaining io with a single clone.
752 */
753 clone = clone_bio(bio, ci->sector, ci->idx,
9faf400f
SB
754 bio->bi_vcnt - ci->idx, ci->sector_count,
755 ci->md->bs);
1da177e4
LT
756 __map_bio(ti, clone, tio);
757 ci->sector_count = 0;
758
759 } else if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
760 /*
761 * There are some bvecs that don't span targets.
762 * Do as many of these as possible.
763 */
764 int i;
765 sector_t remaining = max;
766 sector_t bv_len;
767
768 for (i = ci->idx; remaining && (i < bio->bi_vcnt); i++) {
769 bv_len = to_sector(bio->bi_io_vec[i].bv_len);
770
771 if (bv_len > remaining)
772 break;
773
774 remaining -= bv_len;
775 len += bv_len;
776 }
777
9faf400f
SB
778 clone = clone_bio(bio, ci->sector, ci->idx, i - ci->idx, len,
779 ci->md->bs);
1da177e4
LT
780 __map_bio(ti, clone, tio);
781
782 ci->sector += len;
783 ci->sector_count -= len;
784 ci->idx = i;
785
786 } else {
787 /*
d2044a94 788 * Handle a bvec that must be split between two or more targets.
1da177e4
LT
789 */
790 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
d2044a94
AK
791 sector_t remaining = to_sector(bv->bv_len);
792 unsigned int offset = 0;
1da177e4 793
d2044a94
AK
794 do {
795 if (offset) {
796 ti = dm_table_find_target(ci->map, ci->sector);
512875bd
JN
797 if (!dm_target_is_valid(ti))
798 return -EIO;
799
d2044a94 800 max = max_io_len(ci->md, ci->sector, ti);
1da177e4 801
d2044a94
AK
802 tio = alloc_tio(ci->md);
803 tio->io = ci->io;
804 tio->ti = ti;
805 memset(&tio->info, 0, sizeof(tio->info));
806 }
807
808 len = min(remaining, max);
809
810 clone = split_bvec(bio, ci->sector, ci->idx,
9faf400f
SB
811 bv->bv_offset + offset, len,
812 ci->md->bs);
d2044a94
AK
813
814 __map_bio(ti, clone, tio);
815
816 ci->sector += len;
817 ci->sector_count -= len;
818 offset += to_bytes(len);
819 } while (remaining -= len);
1da177e4 820
1da177e4
LT
821 ci->idx++;
822 }
512875bd
JN
823
824 return 0;
1da177e4
LT
825}
826
827/*
828 * Split the bio into several clones.
829 */
9e4e5f87 830static int __split_bio(struct mapped_device *md, struct bio *bio)
1da177e4
LT
831{
832 struct clone_info ci;
512875bd 833 int error = 0;
1da177e4
LT
834
835 ci.map = dm_get_table(md);
9e4e5f87
MB
836 if (unlikely(!ci.map))
837 return -EIO;
ab4c1424
AK
838 if (unlikely(bio_barrier(bio) && !dm_table_barrier_ok(ci.map))) {
839 dm_table_put(ci.map);
840 bio_endio(bio, -EOPNOTSUPP);
841 return 0;
842 }
1da177e4
LT
843 ci.md = md;
844 ci.bio = bio;
845 ci.io = alloc_io(md);
846 ci.io->error = 0;
847 atomic_set(&ci.io->io_count, 1);
848 ci.io->bio = bio;
849 ci.io->md = md;
850 ci.sector = bio->bi_sector;
851 ci.sector_count = bio_sectors(bio);
852 ci.idx = bio->bi_idx;
853
3eaf840e 854 start_io_acct(ci.io);
512875bd
JN
855 while (ci.sector_count && !error)
856 error = __clone_and_map(&ci);
1da177e4
LT
857
858 /* drop the extra reference count */
512875bd 859 dec_pending(ci.io, error);
1da177e4 860 dm_table_put(ci.map);
9e4e5f87
MB
861
862 return 0;
1da177e4
LT
863}
864/*-----------------------------------------------------------------
865 * CRUD END
866 *---------------------------------------------------------------*/
867
f6fccb12
MB
868static int dm_merge_bvec(struct request_queue *q,
869 struct bvec_merge_data *bvm,
870 struct bio_vec *biovec)
871{
872 struct mapped_device *md = q->queuedata;
873 struct dm_table *map = dm_get_table(md);
874 struct dm_target *ti;
875 sector_t max_sectors;
5037108a 876 int max_size = 0;
f6fccb12
MB
877
878 if (unlikely(!map))
5037108a 879 goto out;
f6fccb12
MB
880
881 ti = dm_table_find_target(map, bvm->bi_sector);
b01cd5ac
MP
882 if (!dm_target_is_valid(ti))
883 goto out_table;
f6fccb12
MB
884
885 /*
886 * Find maximum amount of I/O that won't need splitting
887 */
888 max_sectors = min(max_io_len(md, bvm->bi_sector, ti),
889 (sector_t) BIO_MAX_SECTORS);
890 max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
891 if (max_size < 0)
892 max_size = 0;
893
894 /*
895 * merge_bvec_fn() returns number of bytes
896 * it can accept at this offset
897 * max is precomputed maximal io size
898 */
899 if (max_size && ti->type->merge)
900 max_size = ti->type->merge(ti, bvm, biovec, max_size);
901
b01cd5ac 902out_table:
5037108a
MP
903 dm_table_put(map);
904
905out:
f6fccb12
MB
906 /*
907 * Always allow an entire first page
908 */
909 if (max_size <= biovec->bv_len && !(bvm->bi_size >> SECTOR_SHIFT))
910 max_size = biovec->bv_len;
911
f6fccb12
MB
912 return max_size;
913}
914
1da177e4
LT
915/*
916 * The request function that just remaps the bio built up by
917 * dm_merge_bvec.
918 */
165125e1 919static int dm_request(struct request_queue *q, struct bio *bio)
1da177e4 920{
9e4e5f87 921 int r = -EIO;
12f03a49 922 int rw = bio_data_dir(bio);
1da177e4 923 struct mapped_device *md = q->queuedata;
c9959059 924 int cpu;
1da177e4 925
2ca3310e 926 down_read(&md->io_lock);
1da177e4 927
074a7aca
TH
928 cpu = part_stat_lock();
929 part_stat_inc(cpu, &dm_disk(md)->part0, ios[rw]);
930 part_stat_add(cpu, &dm_disk(md)->part0, sectors[rw], bio_sectors(bio));
931 part_stat_unlock();
12f03a49 932
1da177e4
LT
933 /*
934 * If we're suspended we have to queue
935 * this io for later.
936 */
937 while (test_bit(DMF_BLOCK_IO, &md->flags)) {
2ca3310e 938 up_read(&md->io_lock);
1da177e4 939
9e4e5f87
MB
940 if (bio_rw(bio) != READA)
941 r = queue_io(md, bio);
1da177e4 942
9e4e5f87
MB
943 if (r <= 0)
944 goto out_req;
1da177e4
LT
945
946 /*
947 * We're in a while loop, because someone could suspend
948 * before we get to the following read lock.
949 */
2ca3310e 950 down_read(&md->io_lock);
1da177e4
LT
951 }
952
9e4e5f87 953 r = __split_bio(md, bio);
2ca3310e 954 up_read(&md->io_lock);
9e4e5f87
MB
955
956out_req:
957 if (r < 0)
958 bio_io_error(bio);
959
1da177e4
LT
960 return 0;
961}
962
165125e1 963static void dm_unplug_all(struct request_queue *q)
1da177e4
LT
964{
965 struct mapped_device *md = q->queuedata;
966 struct dm_table *map = dm_get_table(md);
967
968 if (map) {
969 dm_table_unplug_all(map);
970 dm_table_put(map);
971 }
972}
973
974static int dm_any_congested(void *congested_data, int bdi_bits)
975{
8a57dfc6
CS
976 int r = bdi_bits;
977 struct mapped_device *md = congested_data;
978 struct dm_table *map;
1da177e4 979
8a57dfc6
CS
980 atomic_inc(&md->pending);
981
982 if (!test_bit(DMF_BLOCK_IO, &md->flags)) {
983 map = dm_get_table(md);
984 if (map) {
985 r = dm_table_any_congested(map, bdi_bits);
986 dm_table_put(map);
987 }
988 }
989
990 if (!atomic_dec_return(&md->pending))
991 /* nudge anyone waiting on suspend queue */
992 wake_up(&md->wait);
1da177e4 993
1da177e4
LT
994 return r;
995}
996
997/*-----------------------------------------------------------------
998 * An IDR is used to keep track of allocated minor numbers.
999 *---------------------------------------------------------------*/
1da177e4
LT
1000static DEFINE_IDR(_minor_idr);
1001
2b06cfff 1002static void free_minor(int minor)
1da177e4 1003{
f32c10b0 1004 spin_lock(&_minor_lock);
1da177e4 1005 idr_remove(&_minor_idr, minor);
f32c10b0 1006 spin_unlock(&_minor_lock);
1da177e4
LT
1007}
1008
1009/*
1010 * See if the device with a specific minor # is free.
1011 */
cf13ab8e 1012static int specific_minor(int minor)
1da177e4
LT
1013{
1014 int r, m;
1015
1016 if (minor >= (1 << MINORBITS))
1017 return -EINVAL;
1018
62f75c2f
JM
1019 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
1020 if (!r)
1021 return -ENOMEM;
1022
f32c10b0 1023 spin_lock(&_minor_lock);
1da177e4
LT
1024
1025 if (idr_find(&_minor_idr, minor)) {
1026 r = -EBUSY;
1027 goto out;
1028 }
1029
ba61fdd1 1030 r = idr_get_new_above(&_minor_idr, MINOR_ALLOCED, minor, &m);
62f75c2f 1031 if (r)
1da177e4 1032 goto out;
1da177e4
LT
1033
1034 if (m != minor) {
1035 idr_remove(&_minor_idr, m);
1036 r = -EBUSY;
1037 goto out;
1038 }
1039
1040out:
f32c10b0 1041 spin_unlock(&_minor_lock);
1da177e4
LT
1042 return r;
1043}
1044
cf13ab8e 1045static int next_free_minor(int *minor)
1da177e4 1046{
2b06cfff 1047 int r, m;
1da177e4 1048
1da177e4 1049 r = idr_pre_get(&_minor_idr, GFP_KERNEL);
62f75c2f
JM
1050 if (!r)
1051 return -ENOMEM;
1052
f32c10b0 1053 spin_lock(&_minor_lock);
1da177e4 1054
ba61fdd1 1055 r = idr_get_new(&_minor_idr, MINOR_ALLOCED, &m);
cf13ab8e 1056 if (r)
1da177e4 1057 goto out;
1da177e4
LT
1058
1059 if (m >= (1 << MINORBITS)) {
1060 idr_remove(&_minor_idr, m);
1061 r = -ENOSPC;
1062 goto out;
1063 }
1064
1065 *minor = m;
1066
1067out:
f32c10b0 1068 spin_unlock(&_minor_lock);
1da177e4
LT
1069 return r;
1070}
1071
1072static struct block_device_operations dm_blk_dops;
1073
1074/*
1075 * Allocate and initialise a blank device with a given minor.
1076 */
2b06cfff 1077static struct mapped_device *alloc_dev(int minor)
1da177e4
LT
1078{
1079 int r;
cf13ab8e 1080 struct mapped_device *md = kzalloc(sizeof(*md), GFP_KERNEL);
ba61fdd1 1081 void *old_md;
1da177e4
LT
1082
1083 if (!md) {
1084 DMWARN("unable to allocate device, out of memory.");
1085 return NULL;
1086 }
1087
10da4f79 1088 if (!try_module_get(THIS_MODULE))
6ed7ade8 1089 goto bad_module_get;
10da4f79 1090
1da177e4 1091 /* get a minor number for the dev */
2b06cfff 1092 if (minor == DM_ANY_MINOR)
cf13ab8e 1093 r = next_free_minor(&minor);
2b06cfff 1094 else
cf13ab8e 1095 r = specific_minor(minor);
1da177e4 1096 if (r < 0)
6ed7ade8 1097 goto bad_minor;
1da177e4 1098
2ca3310e 1099 init_rwsem(&md->io_lock);
e61290a4 1100 mutex_init(&md->suspend_lock);
2e93ccc1 1101 spin_lock_init(&md->pushback_lock);
1da177e4
LT
1102 rwlock_init(&md->map_lock);
1103 atomic_set(&md->holders, 1);
5c6bd75d 1104 atomic_set(&md->open_count, 0);
1da177e4 1105 atomic_set(&md->event_nr, 0);
7a8c3d3b
MA
1106 atomic_set(&md->uevent_seq, 0);
1107 INIT_LIST_HEAD(&md->uevent_list);
1108 spin_lock_init(&md->uevent_lock);
1da177e4
LT
1109
1110 md->queue = blk_alloc_queue(GFP_KERNEL);
1111 if (!md->queue)
6ed7ade8 1112 goto bad_queue;
1da177e4
LT
1113
1114 md->queue->queuedata = md;
1115 md->queue->backing_dev_info.congested_fn = dm_any_congested;
1116 md->queue->backing_dev_info.congested_data = md;
1117 blk_queue_make_request(md->queue, dm_request);
daef265f 1118 blk_queue_bounce_limit(md->queue, BLK_BOUNCE_ANY);
1da177e4 1119 md->queue->unplug_fn = dm_unplug_all;
f6fccb12 1120 blk_queue_merge_bvec(md->queue, dm_merge_bvec);
1da177e4 1121
93d2341c 1122 md->io_pool = mempool_create_slab_pool(MIN_IOS, _io_cache);
74859364 1123 if (!md->io_pool)
6ed7ade8 1124 goto bad_io_pool;
1da177e4 1125
93d2341c 1126 md->tio_pool = mempool_create_slab_pool(MIN_IOS, _tio_cache);
1da177e4 1127 if (!md->tio_pool)
6ed7ade8 1128 goto bad_tio_pool;
1da177e4 1129
bb799ca0 1130 md->bs = bioset_create(16, 0);
9faf400f
SB
1131 if (!md->bs)
1132 goto bad_no_bioset;
1133
1da177e4
LT
1134 md->disk = alloc_disk(1);
1135 if (!md->disk)
6ed7ade8 1136 goto bad_disk;
1da177e4 1137
f0b04115
JM
1138 atomic_set(&md->pending, 0);
1139 init_waitqueue_head(&md->wait);
1140 init_waitqueue_head(&md->eventq);
1141
1da177e4
LT
1142 md->disk->major = _major;
1143 md->disk->first_minor = minor;
1144 md->disk->fops = &dm_blk_dops;
1145 md->disk->queue = md->queue;
1146 md->disk->private_data = md;
1147 sprintf(md->disk->disk_name, "dm-%d", minor);
1148 add_disk(md->disk);
7e51f257 1149 format_dev_t(md->name, MKDEV(_major, minor));
1da177e4 1150
304f3f6a
MB
1151 md->wq = create_singlethread_workqueue("kdmflush");
1152 if (!md->wq)
1153 goto bad_thread;
1154
ba61fdd1 1155 /* Populate the mapping, nobody knows we exist yet */
f32c10b0 1156 spin_lock(&_minor_lock);
ba61fdd1 1157 old_md = idr_replace(&_minor_idr, md, minor);
f32c10b0 1158 spin_unlock(&_minor_lock);
ba61fdd1
JM
1159
1160 BUG_ON(old_md != MINOR_ALLOCED);
1161
1da177e4
LT
1162 return md;
1163
304f3f6a
MB
1164bad_thread:
1165 put_disk(md->disk);
6ed7ade8 1166bad_disk:
9faf400f 1167 bioset_free(md->bs);
6ed7ade8 1168bad_no_bioset:
1da177e4 1169 mempool_destroy(md->tio_pool);
6ed7ade8 1170bad_tio_pool:
1da177e4 1171 mempool_destroy(md->io_pool);
6ed7ade8 1172bad_io_pool:
1312f40e 1173 blk_cleanup_queue(md->queue);
6ed7ade8 1174bad_queue:
1da177e4 1175 free_minor(minor);
6ed7ade8 1176bad_minor:
10da4f79 1177 module_put(THIS_MODULE);
6ed7ade8 1178bad_module_get:
1da177e4
LT
1179 kfree(md);
1180 return NULL;
1181}
1182
ae9da83f
JN
1183static void unlock_fs(struct mapped_device *md);
1184
1da177e4
LT
1185static void free_dev(struct mapped_device *md)
1186{
f331c029 1187 int minor = MINOR(disk_devt(md->disk));
63d94e48 1188
d9dde59b 1189 if (md->suspended_bdev) {
ae9da83f 1190 unlock_fs(md);
d9dde59b
JN
1191 bdput(md->suspended_bdev);
1192 }
304f3f6a 1193 destroy_workqueue(md->wq);
1da177e4
LT
1194 mempool_destroy(md->tio_pool);
1195 mempool_destroy(md->io_pool);
9faf400f 1196 bioset_free(md->bs);
1da177e4 1197 del_gendisk(md->disk);
63d94e48 1198 free_minor(minor);
fba9f90e
JM
1199
1200 spin_lock(&_minor_lock);
1201 md->disk->private_data = NULL;
1202 spin_unlock(&_minor_lock);
1203
1da177e4 1204 put_disk(md->disk);
1312f40e 1205 blk_cleanup_queue(md->queue);
10da4f79 1206 module_put(THIS_MODULE);
1da177e4
LT
1207 kfree(md);
1208}
1209
1210/*
1211 * Bind a table to the device.
1212 */
1213static void event_callback(void *context)
1214{
7a8c3d3b
MA
1215 unsigned long flags;
1216 LIST_HEAD(uevents);
1da177e4
LT
1217 struct mapped_device *md = (struct mapped_device *) context;
1218
7a8c3d3b
MA
1219 spin_lock_irqsave(&md->uevent_lock, flags);
1220 list_splice_init(&md->uevent_list, &uevents);
1221 spin_unlock_irqrestore(&md->uevent_lock, flags);
1222
ed9e1982 1223 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
7a8c3d3b 1224
1da177e4
LT
1225 atomic_inc(&md->event_nr);
1226 wake_up(&md->eventq);
1227}
1228
4e90188b 1229static void __set_size(struct mapped_device *md, sector_t size)
1da177e4 1230{
4e90188b 1231 set_capacity(md->disk, size);
1da177e4 1232
1b1dcc1b 1233 mutex_lock(&md->suspended_bdev->bd_inode->i_mutex);
e39e2e95 1234 i_size_write(md->suspended_bdev->bd_inode, (loff_t)size << SECTOR_SHIFT);
1b1dcc1b 1235 mutex_unlock(&md->suspended_bdev->bd_inode->i_mutex);
1da177e4
LT
1236}
1237
1238static int __bind(struct mapped_device *md, struct dm_table *t)
1239{
165125e1 1240 struct request_queue *q = md->queue;
1da177e4
LT
1241 sector_t size;
1242
1243 size = dm_table_get_size(t);
3ac51e74
DW
1244
1245 /*
1246 * Wipe any geometry if the size of the table changed.
1247 */
1248 if (size != get_capacity(md->disk))
1249 memset(&md->geometry, 0, sizeof(md->geometry));
1250
bfa152fa
JN
1251 if (md->suspended_bdev)
1252 __set_size(md, size);
1da177e4
LT
1253 if (size == 0)
1254 return 0;
1255
2ca3310e
AK
1256 dm_table_get(t);
1257 dm_table_event_callback(t, event_callback, md);
1258
1da177e4
LT
1259 write_lock(&md->map_lock);
1260 md->map = t;
2ca3310e 1261 dm_table_set_restrictions(t, q);
1da177e4
LT
1262 write_unlock(&md->map_lock);
1263
1da177e4
LT
1264 return 0;
1265}
1266
1267static void __unbind(struct mapped_device *md)
1268{
1269 struct dm_table *map = md->map;
1270
1271 if (!map)
1272 return;
1273
1274 dm_table_event_callback(map, NULL, NULL);
1275 write_lock(&md->map_lock);
1276 md->map = NULL;
1277 write_unlock(&md->map_lock);
1278 dm_table_put(map);
1279}
1280
1281/*
1282 * Constructor for a new device.
1283 */
2b06cfff 1284int dm_create(int minor, struct mapped_device **result)
1da177e4
LT
1285{
1286 struct mapped_device *md;
1287
2b06cfff 1288 md = alloc_dev(minor);
1da177e4
LT
1289 if (!md)
1290 return -ENXIO;
1291
1292 *result = md;
1293 return 0;
1294}
1295
637842cf 1296static struct mapped_device *dm_find_md(dev_t dev)
1da177e4
LT
1297{
1298 struct mapped_device *md;
1da177e4
LT
1299 unsigned minor = MINOR(dev);
1300
1301 if (MAJOR(dev) != _major || minor >= (1 << MINORBITS))
1302 return NULL;
1303
f32c10b0 1304 spin_lock(&_minor_lock);
1da177e4
LT
1305
1306 md = idr_find(&_minor_idr, minor);
fba9f90e 1307 if (md && (md == MINOR_ALLOCED ||
f331c029 1308 (MINOR(disk_devt(dm_disk(md))) != minor) ||
17b2f66f 1309 test_bit(DMF_FREEING, &md->flags))) {
637842cf 1310 md = NULL;
fba9f90e
JM
1311 goto out;
1312 }
1da177e4 1313
fba9f90e 1314out:
f32c10b0 1315 spin_unlock(&_minor_lock);
1da177e4 1316
637842cf
DT
1317 return md;
1318}
1319
d229a958
DT
1320struct mapped_device *dm_get_md(dev_t dev)
1321{
1322 struct mapped_device *md = dm_find_md(dev);
1323
1324 if (md)
1325 dm_get(md);
1326
1327 return md;
1328}
1329
9ade92a9 1330void *dm_get_mdptr(struct mapped_device *md)
637842cf 1331{
9ade92a9 1332 return md->interface_ptr;
1da177e4
LT
1333}
1334
1335void dm_set_mdptr(struct mapped_device *md, void *ptr)
1336{
1337 md->interface_ptr = ptr;
1338}
1339
1340void dm_get(struct mapped_device *md)
1341{
1342 atomic_inc(&md->holders);
1343}
1344
72d94861
AK
1345const char *dm_device_name(struct mapped_device *md)
1346{
1347 return md->name;
1348}
1349EXPORT_SYMBOL_GPL(dm_device_name);
1350
1da177e4
LT
1351void dm_put(struct mapped_device *md)
1352{
1134e5ae 1353 struct dm_table *map;
1da177e4 1354
fba9f90e
JM
1355 BUG_ON(test_bit(DMF_FREEING, &md->flags));
1356
f32c10b0 1357 if (atomic_dec_and_lock(&md->holders, &_minor_lock)) {
1134e5ae 1358 map = dm_get_table(md);
f331c029
TH
1359 idr_replace(&_minor_idr, MINOR_ALLOCED,
1360 MINOR(disk_devt(dm_disk(md))));
fba9f90e 1361 set_bit(DMF_FREEING, &md->flags);
f32c10b0 1362 spin_unlock(&_minor_lock);
cf222b37 1363 if (!dm_suspended(md)) {
1da177e4
LT
1364 dm_table_presuspend_targets(map);
1365 dm_table_postsuspend_targets(map);
1366 }
1134e5ae 1367 dm_table_put(map);
a1b51e98 1368 __unbind(md);
1da177e4
LT
1369 free_dev(md);
1370 }
1da177e4 1371}
79eb885c 1372EXPORT_SYMBOL_GPL(dm_put);
1da177e4 1373
46125c1c
MB
1374static int dm_wait_for_completion(struct mapped_device *md)
1375{
1376 int r = 0;
1377
1378 while (1) {
1379 set_current_state(TASK_INTERRUPTIBLE);
1380
1381 smp_mb();
1382 if (!atomic_read(&md->pending))
1383 break;
1384
1385 if (signal_pending(current)) {
1386 r = -EINTR;
1387 break;
1388 }
1389
1390 io_schedule();
1391 }
1392 set_current_state(TASK_RUNNING);
1393
1394 return r;
1395}
1396
1da177e4
LT
1397/*
1398 * Process the deferred bios
1399 */
6d6f10df 1400static void __flush_deferred_io(struct mapped_device *md)
1da177e4 1401{
6d6f10df 1402 struct bio *c;
1da177e4 1403
6d6f10df 1404 while ((c = bio_list_pop(&md->deferred))) {
9e4e5f87
MB
1405 if (__split_bio(md, c))
1406 bio_io_error(c);
1da177e4 1407 }
73d410c0
MB
1408
1409 clear_bit(DMF_BLOCK_IO, &md->flags);
1da177e4
LT
1410}
1411
6d6f10df
MB
1412static void __merge_pushback_list(struct mapped_device *md)
1413{
1414 unsigned long flags;
1415
1416 spin_lock_irqsave(&md->pushback_lock, flags);
1417 clear_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1418 bio_list_merge_head(&md->deferred, &md->pushback);
1419 bio_list_init(&md->pushback);
1420 spin_unlock_irqrestore(&md->pushback_lock, flags);
1421}
1422
304f3f6a
MB
1423static void dm_wq_work(struct work_struct *work)
1424{
1425 struct dm_wq_req *req = container_of(work, struct dm_wq_req, work);
1426 struct mapped_device *md = req->md;
1427
1428 down_write(&md->io_lock);
1429 switch (req->type) {
304f3f6a
MB
1430 case DM_WQ_FLUSH_DEFERRED:
1431 __flush_deferred_io(md);
1432 break;
1433 default:
1434 DMERR("dm_wq_work: unrecognised work type %d", req->type);
1435 BUG();
1436 }
1437 up_write(&md->io_lock);
1438}
1439
1440static void dm_wq_queue(struct mapped_device *md, int type, void *context,
1441 struct dm_wq_req *req)
1442{
1443 req->type = type;
1444 req->md = md;
1445 req->context = context;
1446 INIT_WORK(&req->work, dm_wq_work);
1447 queue_work(md->wq, &req->work);
1448}
1449
1450static void dm_queue_flush(struct mapped_device *md, int type, void *context)
1451{
1452 struct dm_wq_req req;
1453
1454 dm_wq_queue(md, type, context, &req);
1455 flush_workqueue(md->wq);
1456}
1457
1da177e4
LT
1458/*
1459 * Swap in a new table (destroying old one).
1460 */
1461int dm_swap_table(struct mapped_device *md, struct dm_table *table)
1462{
93c534ae 1463 int r = -EINVAL;
1da177e4 1464
e61290a4 1465 mutex_lock(&md->suspend_lock);
1da177e4
LT
1466
1467 /* device must be suspended */
cf222b37 1468 if (!dm_suspended(md))
93c534ae 1469 goto out;
1da177e4 1470
bfa152fa
JN
1471 /* without bdev, the device size cannot be changed */
1472 if (!md->suspended_bdev)
1473 if (get_capacity(md->disk) != dm_table_get_size(table))
1474 goto out;
1475
1da177e4
LT
1476 __unbind(md);
1477 r = __bind(md, table);
1da177e4 1478
93c534ae 1479out:
e61290a4 1480 mutex_unlock(&md->suspend_lock);
93c534ae 1481 return r;
1da177e4
LT
1482}
1483
1484/*
1485 * Functions to lock and unlock any filesystem running on the
1486 * device.
1487 */
2ca3310e 1488static int lock_fs(struct mapped_device *md)
1da177e4 1489{
e39e2e95 1490 int r;
1da177e4
LT
1491
1492 WARN_ON(md->frozen_sb);
dfbe03f6 1493
e39e2e95 1494 md->frozen_sb = freeze_bdev(md->suspended_bdev);
dfbe03f6 1495 if (IS_ERR(md->frozen_sb)) {
cf222b37 1496 r = PTR_ERR(md->frozen_sb);
e39e2e95
AK
1497 md->frozen_sb = NULL;
1498 return r;
dfbe03f6
AK
1499 }
1500
aa8d7c2f
AK
1501 set_bit(DMF_FROZEN, &md->flags);
1502
1da177e4 1503 /* don't bdput right now, we don't want the bdev
e39e2e95 1504 * to go away while it is locked.
1da177e4
LT
1505 */
1506 return 0;
1507}
1508
2ca3310e 1509static void unlock_fs(struct mapped_device *md)
1da177e4 1510{
aa8d7c2f
AK
1511 if (!test_bit(DMF_FROZEN, &md->flags))
1512 return;
1513
e39e2e95 1514 thaw_bdev(md->suspended_bdev, md->frozen_sb);
1da177e4 1515 md->frozen_sb = NULL;
aa8d7c2f 1516 clear_bit(DMF_FROZEN, &md->flags);
1da177e4
LT
1517}
1518
1519/*
1520 * We need to be able to change a mapping table under a mounted
1521 * filesystem. For example we might want to move some data in
1522 * the background. Before the table can be swapped with
1523 * dm_bind_table, dm_suspend must be called to flush any in
1524 * flight bios and ensure that any further io gets deferred.
1525 */
a3d77d35 1526int dm_suspend(struct mapped_device *md, unsigned suspend_flags)
1da177e4 1527{
2ca3310e 1528 struct dm_table *map = NULL;
1da177e4 1529 DECLARE_WAITQUEUE(wait, current);
46125c1c 1530 int r = 0;
a3d77d35 1531 int do_lockfs = suspend_flags & DM_SUSPEND_LOCKFS_FLAG ? 1 : 0;
2e93ccc1 1532 int noflush = suspend_flags & DM_SUSPEND_NOFLUSH_FLAG ? 1 : 0;
1da177e4 1533
e61290a4 1534 mutex_lock(&md->suspend_lock);
2ca3310e 1535
73d410c0
MB
1536 if (dm_suspended(md)) {
1537 r = -EINVAL;
d287483d 1538 goto out_unlock;
73d410c0 1539 }
1da177e4
LT
1540
1541 map = dm_get_table(md);
1da177e4 1542
2e93ccc1
KU
1543 /*
1544 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
1545 * This flag is cleared before dm_suspend returns.
1546 */
1547 if (noflush)
1548 set_bit(DMF_NOFLUSH_SUSPENDING, &md->flags);
1549
cf222b37
AK
1550 /* This does not get reverted if there's an error later. */
1551 dm_table_presuspend_targets(map);
1552
bfa152fa
JN
1553 /* bdget() can stall if the pending I/Os are not flushed */
1554 if (!noflush) {
1555 md->suspended_bdev = bdget_disk(md->disk, 0);
1556 if (!md->suspended_bdev) {
1557 DMWARN("bdget failed in dm_suspend");
1558 r = -ENOMEM;
f431d966 1559 goto out;
bfa152fa 1560 }
e39e2e95 1561
6d6f10df
MB
1562 /*
1563 * Flush I/O to the device. noflush supersedes do_lockfs,
1564 * because lock_fs() needs to flush I/Os.
1565 */
1566 if (do_lockfs) {
1567 r = lock_fs(md);
1568 if (r)
1569 goto out;
1570 }
aa8d7c2f 1571 }
1da177e4
LT
1572
1573 /*
354e0071 1574 * First we set the BLOCK_IO flag so no more ios will be mapped.
1da177e4 1575 */
2ca3310e
AK
1576 down_write(&md->io_lock);
1577 set_bit(DMF_BLOCK_IO, &md->flags);
1da177e4 1578
1da177e4 1579 add_wait_queue(&md->wait, &wait);
2ca3310e 1580 up_write(&md->io_lock);
1da177e4
LT
1581
1582 /* unplug */
2ca3310e 1583 if (map)
1da177e4 1584 dm_table_unplug_all(map);
1da177e4
LT
1585
1586 /*
46125c1c 1587 * Wait for the already-mapped ios to complete.
1da177e4 1588 */
46125c1c 1589 r = dm_wait_for_completion(md);
1da177e4 1590
2ca3310e 1591 down_write(&md->io_lock);
1da177e4
LT
1592 remove_wait_queue(&md->wait, &wait);
1593
6d6f10df
MB
1594 if (noflush)
1595 __merge_pushback_list(md);
94d6351e 1596 up_write(&md->io_lock);
2e93ccc1 1597
1da177e4 1598 /* were we interrupted ? */
46125c1c 1599 if (r < 0) {
304f3f6a 1600 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
73d410c0 1601
2ca3310e 1602 unlock_fs(md);
2e93ccc1 1603 goto out; /* pushback list is already flushed, so skip flush */
2ca3310e 1604 }
1da177e4 1605
cf222b37 1606 dm_table_postsuspend_targets(map);
1da177e4 1607
2ca3310e 1608 set_bit(DMF_SUSPENDED, &md->flags);
b84b0287 1609
2ca3310e 1610out:
e39e2e95
AK
1611 if (r && md->suspended_bdev) {
1612 bdput(md->suspended_bdev);
1613 md->suspended_bdev = NULL;
1614 }
1615
2ca3310e 1616 dm_table_put(map);
d287483d
AK
1617
1618out_unlock:
e61290a4 1619 mutex_unlock(&md->suspend_lock);
cf222b37 1620 return r;
1da177e4
LT
1621}
1622
1623int dm_resume(struct mapped_device *md)
1624{
cf222b37 1625 int r = -EINVAL;
cf222b37 1626 struct dm_table *map = NULL;
1da177e4 1627
e61290a4 1628 mutex_lock(&md->suspend_lock);
2ca3310e 1629 if (!dm_suspended(md))
cf222b37 1630 goto out;
cf222b37
AK
1631
1632 map = dm_get_table(md);
2ca3310e 1633 if (!map || !dm_table_get_size(map))
cf222b37 1634 goto out;
1da177e4 1635
8757b776
MB
1636 r = dm_table_resume_targets(map);
1637 if (r)
1638 goto out;
2ca3310e 1639
304f3f6a 1640 dm_queue_flush(md, DM_WQ_FLUSH_DEFERRED, NULL);
2ca3310e
AK
1641
1642 unlock_fs(md);
1643
bfa152fa
JN
1644 if (md->suspended_bdev) {
1645 bdput(md->suspended_bdev);
1646 md->suspended_bdev = NULL;
1647 }
e39e2e95 1648
2ca3310e
AK
1649 clear_bit(DMF_SUSPENDED, &md->flags);
1650
1da177e4 1651 dm_table_unplug_all(map);
1da177e4 1652
69267a30 1653 dm_kobject_uevent(md);
8560ed6f 1654
cf222b37 1655 r = 0;
2ca3310e 1656
cf222b37
AK
1657out:
1658 dm_table_put(map);
e61290a4 1659 mutex_unlock(&md->suspend_lock);
2ca3310e 1660
cf222b37 1661 return r;
1da177e4
LT
1662}
1663
1664/*-----------------------------------------------------------------
1665 * Event notification.
1666 *---------------------------------------------------------------*/
69267a30
AK
1667void dm_kobject_uevent(struct mapped_device *md)
1668{
ed9e1982 1669 kobject_uevent(&disk_to_dev(md->disk)->kobj, KOBJ_CHANGE);
69267a30
AK
1670}
1671
7a8c3d3b
MA
1672uint32_t dm_next_uevent_seq(struct mapped_device *md)
1673{
1674 return atomic_add_return(1, &md->uevent_seq);
1675}
1676
1da177e4
LT
1677uint32_t dm_get_event_nr(struct mapped_device *md)
1678{
1679 return atomic_read(&md->event_nr);
1680}
1681
1682int dm_wait_event(struct mapped_device *md, int event_nr)
1683{
1684 return wait_event_interruptible(md->eventq,
1685 (event_nr != atomic_read(&md->event_nr)));
1686}
1687
7a8c3d3b
MA
1688void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
1689{
1690 unsigned long flags;
1691
1692 spin_lock_irqsave(&md->uevent_lock, flags);
1693 list_add(elist, &md->uevent_list);
1694 spin_unlock_irqrestore(&md->uevent_lock, flags);
1695}
1696
1da177e4
LT
1697/*
1698 * The gendisk is only valid as long as you have a reference
1699 * count on 'md'.
1700 */
1701struct gendisk *dm_disk(struct mapped_device *md)
1702{
1703 return md->disk;
1704}
1705
1706int dm_suspended(struct mapped_device *md)
1707{
1708 return test_bit(DMF_SUSPENDED, &md->flags);
1709}
1710
2e93ccc1
KU
1711int dm_noflush_suspending(struct dm_target *ti)
1712{
1713 struct mapped_device *md = dm_table_get_md(ti->table);
1714 int r = __noflush_suspending(md);
1715
1716 dm_put(md);
1717
1718 return r;
1719}
1720EXPORT_SYMBOL_GPL(dm_noflush_suspending);
1721
1da177e4
LT
1722static struct block_device_operations dm_blk_dops = {
1723 .open = dm_blk_open,
1724 .release = dm_blk_close,
aa129a22 1725 .ioctl = dm_blk_ioctl,
3ac51e74 1726 .getgeo = dm_blk_getgeo,
1da177e4
LT
1727 .owner = THIS_MODULE
1728};
1729
1730EXPORT_SYMBOL(dm_get_mapinfo);
1731
1732/*
1733 * module hooks
1734 */
1735module_init(dm_init);
1736module_exit(dm_exit);
1737
1738module_param(major, uint, 0);
1739MODULE_PARM_DESC(major, "The major number of the device mapper");
1740MODULE_DESCRIPTION(DM_NAME " driver");
1741MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1742MODULE_LICENSE("GPL");
This page took 0.514372 seconds and 5 git commands to generate.