2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/mempool.h>
18 #include <linux/slab.h>
19 #include <linux/idr.h>
20 #include <linux/hdreg.h>
21 #include <linux/delay.h>
23 #include <trace/events/block.h>
25 #define DM_MSG_PREFIX "core"
29 * ratelimit state to be used in DMXXX_LIMIT().
31 DEFINE_RATELIMIT_STATE(dm_ratelimit_state
,
32 DEFAULT_RATELIMIT_INTERVAL
,
33 DEFAULT_RATELIMIT_BURST
);
34 EXPORT_SYMBOL(dm_ratelimit_state
);
38 * Cookies are numeric values sent with CHANGE and REMOVE
39 * uevents while resuming, removing or renaming the device.
41 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
42 #define DM_COOKIE_LENGTH 24
44 static const char *_name
= DM_NAME
;
46 static unsigned int major
= 0;
47 static unsigned int _major
= 0;
49 static DEFINE_IDR(_minor_idr
);
51 static DEFINE_SPINLOCK(_minor_lock
);
53 static void do_deferred_remove(struct work_struct
*w
);
55 static DECLARE_WORK(deferred_remove_work
, do_deferred_remove
);
57 static struct workqueue_struct
*deferred_remove_workqueue
;
61 * One of these is allocated per bio.
64 struct mapped_device
*md
;
68 unsigned long start_time
;
69 spinlock_t endio_lock
;
70 struct dm_stats_aux stats_aux
;
74 * For request-based dm.
75 * One of these is allocated per request.
77 struct dm_rq_target_io
{
78 struct mapped_device
*md
;
80 struct request
*orig
, clone
;
86 * For request-based dm - the bio clones we allocate are embedded in these
89 * We allocate these with bio_alloc_bioset, using the front_pad parameter when
90 * the bioset is created - this means the bio has to come at the end of the
93 struct dm_rq_clone_bio_info
{
95 struct dm_rq_target_io
*tio
;
99 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
101 if (rq
&& rq
->end_io_data
)
102 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
105 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
107 #define MINOR_ALLOCED ((void *)-1)
110 * Bits for the md->flags field.
112 #define DMF_BLOCK_IO_FOR_SUSPEND 0
113 #define DMF_SUSPENDED 1
115 #define DMF_FREEING 3
116 #define DMF_DELETING 4
117 #define DMF_NOFLUSH_SUSPENDING 5
118 #define DMF_MERGE_IS_OPTIONAL 6
119 #define DMF_DEFERRED_REMOVE 7
122 * A dummy definition to make RCU happy.
123 * struct dm_table should never be dereferenced in this file.
130 * Work processed by per-device workqueue.
132 struct mapped_device
{
133 struct srcu_struct io_barrier
;
134 struct mutex suspend_lock
;
139 * The current mapping.
140 * Use dm_get_live_table{_fast} or take suspend_lock for
143 struct dm_table
*map
;
145 struct list_head table_devices
;
146 struct mutex table_devices_lock
;
150 struct request_queue
*queue
;
152 /* Protect queue and type against concurrent access. */
153 struct mutex type_lock
;
155 struct target_type
*immutable_target_type
;
157 struct gendisk
*disk
;
163 * A list of ios that arrived while we were suspended.
166 wait_queue_head_t wait
;
167 struct work_struct work
;
168 struct bio_list deferred
;
169 spinlock_t deferred_lock
;
172 * Processing queue (flush)
174 struct workqueue_struct
*wq
;
177 * io objects are allocated from here.
187 wait_queue_head_t eventq
;
189 struct list_head uevent_list
;
190 spinlock_t uevent_lock
; /* Protect access to uevent_list */
193 * freeze/thaw support require holding onto a super block
195 struct super_block
*frozen_sb
;
196 struct block_device
*bdev
;
198 /* forced geometry settings */
199 struct hd_geometry geometry
;
201 /* kobject and completion */
202 struct dm_kobject_holder kobj_holder
;
204 /* zero-length flush that will be cloned and submitted to targets */
205 struct bio flush_bio
;
207 struct dm_stats stats
;
211 * For mempools pre-allocation at the table loading time.
213 struct dm_md_mempools
{
218 struct table_device
{
219 struct list_head list
;
221 struct dm_dev dm_dev
;
224 #define RESERVED_BIO_BASED_IOS 16
225 #define RESERVED_REQUEST_BASED_IOS 256
226 #define RESERVED_MAX_IOS 1024
227 static struct kmem_cache
*_io_cache
;
228 static struct kmem_cache
*_rq_tio_cache
;
231 * Bio-based DM's mempools' reserved IOs set by the user.
233 static unsigned reserved_bio_based_ios
= RESERVED_BIO_BASED_IOS
;
236 * Request-based DM's mempools' reserved IOs set by the user.
238 static unsigned reserved_rq_based_ios
= RESERVED_REQUEST_BASED_IOS
;
240 static unsigned __dm_get_reserved_ios(unsigned *reserved_ios
,
241 unsigned def
, unsigned max
)
243 unsigned ios
= ACCESS_ONCE(*reserved_ios
);
244 unsigned modified_ios
= 0;
252 (void)cmpxchg(reserved_ios
, ios
, modified_ios
);
259 unsigned dm_get_reserved_bio_based_ios(void)
261 return __dm_get_reserved_ios(&reserved_bio_based_ios
,
262 RESERVED_BIO_BASED_IOS
, RESERVED_MAX_IOS
);
264 EXPORT_SYMBOL_GPL(dm_get_reserved_bio_based_ios
);
266 unsigned dm_get_reserved_rq_based_ios(void)
268 return __dm_get_reserved_ios(&reserved_rq_based_ios
,
269 RESERVED_REQUEST_BASED_IOS
, RESERVED_MAX_IOS
);
271 EXPORT_SYMBOL_GPL(dm_get_reserved_rq_based_ios
);
273 static int __init
local_init(void)
277 /* allocate a slab for the dm_ios */
278 _io_cache
= KMEM_CACHE(dm_io
, 0);
282 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
284 goto out_free_io_cache
;
286 r
= dm_uevent_init();
288 goto out_free_rq_tio_cache
;
290 deferred_remove_workqueue
= alloc_workqueue("kdmremove", WQ_UNBOUND
, 1);
291 if (!deferred_remove_workqueue
) {
293 goto out_uevent_exit
;
297 r
= register_blkdev(_major
, _name
);
299 goto out_free_workqueue
;
307 destroy_workqueue(deferred_remove_workqueue
);
310 out_free_rq_tio_cache
:
311 kmem_cache_destroy(_rq_tio_cache
);
313 kmem_cache_destroy(_io_cache
);
318 static void local_exit(void)
320 flush_scheduled_work();
321 destroy_workqueue(deferred_remove_workqueue
);
323 kmem_cache_destroy(_rq_tio_cache
);
324 kmem_cache_destroy(_io_cache
);
325 unregister_blkdev(_major
, _name
);
330 DMINFO("cleaned up");
333 static int (*_inits
[])(void) __initdata
= {
344 static void (*_exits
[])(void) = {
355 static int __init
dm_init(void)
357 const int count
= ARRAY_SIZE(_inits
);
361 for (i
= 0; i
< count
; i
++) {
376 static void __exit
dm_exit(void)
378 int i
= ARRAY_SIZE(_exits
);
384 * Should be empty by this point.
386 idr_destroy(&_minor_idr
);
390 * Block device functions
392 int dm_deleting_md(struct mapped_device
*md
)
394 return test_bit(DMF_DELETING
, &md
->flags
);
397 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
399 struct mapped_device
*md
;
401 spin_lock(&_minor_lock
);
403 md
= bdev
->bd_disk
->private_data
;
407 if (test_bit(DMF_FREEING
, &md
->flags
) ||
408 dm_deleting_md(md
)) {
414 atomic_inc(&md
->open_count
);
417 spin_unlock(&_minor_lock
);
419 return md
? 0 : -ENXIO
;
422 static void dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
424 struct mapped_device
*md
= disk
->private_data
;
426 spin_lock(&_minor_lock
);
428 if (atomic_dec_and_test(&md
->open_count
) &&
429 (test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
)))
430 queue_work(deferred_remove_workqueue
, &deferred_remove_work
);
434 spin_unlock(&_minor_lock
);
437 int dm_open_count(struct mapped_device
*md
)
439 return atomic_read(&md
->open_count
);
443 * Guarantees nothing is using the device before it's deleted.
445 int dm_lock_for_deletion(struct mapped_device
*md
, bool mark_deferred
, bool only_deferred
)
449 spin_lock(&_minor_lock
);
451 if (dm_open_count(md
)) {
454 set_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
455 } else if (only_deferred
&& !test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
))
458 set_bit(DMF_DELETING
, &md
->flags
);
460 spin_unlock(&_minor_lock
);
465 int dm_cancel_deferred_remove(struct mapped_device
*md
)
469 spin_lock(&_minor_lock
);
471 if (test_bit(DMF_DELETING
, &md
->flags
))
474 clear_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
476 spin_unlock(&_minor_lock
);
481 static void do_deferred_remove(struct work_struct
*w
)
483 dm_deferred_remove();
486 sector_t
dm_get_size(struct mapped_device
*md
)
488 return get_capacity(md
->disk
);
491 struct request_queue
*dm_get_md_queue(struct mapped_device
*md
)
496 struct dm_stats
*dm_get_stats(struct mapped_device
*md
)
501 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
503 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
505 return dm_get_geometry(md
, geo
);
508 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
509 unsigned int cmd
, unsigned long arg
)
511 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
513 struct dm_table
*map
;
514 struct dm_target
*tgt
;
518 map
= dm_get_live_table(md
, &srcu_idx
);
520 if (!map
|| !dm_table_get_size(map
))
523 /* We only support devices that have a single target */
524 if (dm_table_get_num_targets(map
) != 1)
527 tgt
= dm_table_get_target(map
, 0);
529 if (dm_suspended_md(md
)) {
534 if (tgt
->type
->ioctl
)
535 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
538 dm_put_live_table(md
, srcu_idx
);
540 if (r
== -ENOTCONN
) {
548 static struct dm_io
*alloc_io(struct mapped_device
*md
)
550 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
553 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
555 mempool_free(io
, md
->io_pool
);
558 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
560 bio_put(&tio
->clone
);
563 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
566 return mempool_alloc(md
->io_pool
, gfp_mask
);
569 static void free_rq_tio(struct dm_rq_target_io
*tio
)
571 mempool_free(tio
, tio
->md
->io_pool
);
574 static int md_in_flight(struct mapped_device
*md
)
576 return atomic_read(&md
->pending
[READ
]) +
577 atomic_read(&md
->pending
[WRITE
]);
580 static void start_io_acct(struct dm_io
*io
)
582 struct mapped_device
*md
= io
->md
;
583 struct bio
*bio
= io
->bio
;
585 int rw
= bio_data_dir(bio
);
587 io
->start_time
= jiffies
;
589 cpu
= part_stat_lock();
590 part_round_stats(cpu
, &dm_disk(md
)->part0
);
592 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
],
593 atomic_inc_return(&md
->pending
[rw
]));
595 if (unlikely(dm_stats_used(&md
->stats
)))
596 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
597 bio_sectors(bio
), false, 0, &io
->stats_aux
);
600 static void end_io_acct(struct dm_io
*io
)
602 struct mapped_device
*md
= io
->md
;
603 struct bio
*bio
= io
->bio
;
604 unsigned long duration
= jiffies
- io
->start_time
;
606 int rw
= bio_data_dir(bio
);
608 cpu
= part_stat_lock();
609 part_round_stats(cpu
, &dm_disk(md
)->part0
);
610 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
613 if (unlikely(dm_stats_used(&md
->stats
)))
614 dm_stats_account_io(&md
->stats
, bio
->bi_rw
, bio
->bi_iter
.bi_sector
,
615 bio_sectors(bio
), true, duration
, &io
->stats_aux
);
618 * After this is decremented the bio must not be touched if it is
621 pending
= atomic_dec_return(&md
->pending
[rw
]);
622 atomic_set(&dm_disk(md
)->part0
.in_flight
[rw
], pending
);
623 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
625 /* nudge anyone waiting on suspend queue */
631 * Add the bio to the list of deferred io.
633 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
637 spin_lock_irqsave(&md
->deferred_lock
, flags
);
638 bio_list_add(&md
->deferred
, bio
);
639 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
640 queue_work(md
->wq
, &md
->work
);
644 * Everyone (including functions in this file), should use this
645 * function to access the md->map field, and make sure they call
646 * dm_put_live_table() when finished.
648 struct dm_table
*dm_get_live_table(struct mapped_device
*md
, int *srcu_idx
) __acquires(md
->io_barrier
)
650 *srcu_idx
= srcu_read_lock(&md
->io_barrier
);
652 return srcu_dereference(md
->map
, &md
->io_barrier
);
655 void dm_put_live_table(struct mapped_device
*md
, int srcu_idx
) __releases(md
->io_barrier
)
657 srcu_read_unlock(&md
->io_barrier
, srcu_idx
);
660 void dm_sync_table(struct mapped_device
*md
)
662 synchronize_srcu(&md
->io_barrier
);
663 synchronize_rcu_expedited();
667 * A fast alternative to dm_get_live_table/dm_put_live_table.
668 * The caller must not block between these two functions.
670 static struct dm_table
*dm_get_live_table_fast(struct mapped_device
*md
) __acquires(RCU
)
673 return rcu_dereference(md
->map
);
676 static void dm_put_live_table_fast(struct mapped_device
*md
) __releases(RCU
)
682 * Open a table device so we can use it as a map destination.
684 static int open_table_device(struct table_device
*td
, dev_t dev
,
685 struct mapped_device
*md
)
687 static char *_claim_ptr
= "I belong to device-mapper";
688 struct block_device
*bdev
;
692 BUG_ON(td
->dm_dev
.bdev
);
694 bdev
= blkdev_get_by_dev(dev
, td
->dm_dev
.mode
| FMODE_EXCL
, _claim_ptr
);
696 return PTR_ERR(bdev
);
698 r
= bd_link_disk_holder(bdev
, dm_disk(md
));
700 blkdev_put(bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
704 td
->dm_dev
.bdev
= bdev
;
709 * Close a table device that we've been using.
711 static void close_table_device(struct table_device
*td
, struct mapped_device
*md
)
713 if (!td
->dm_dev
.bdev
)
716 bd_unlink_disk_holder(td
->dm_dev
.bdev
, dm_disk(md
));
717 blkdev_put(td
->dm_dev
.bdev
, td
->dm_dev
.mode
| FMODE_EXCL
);
718 td
->dm_dev
.bdev
= NULL
;
721 static struct table_device
*find_table_device(struct list_head
*l
, dev_t dev
,
723 struct table_device
*td
;
725 list_for_each_entry(td
, l
, list
)
726 if (td
->dm_dev
.bdev
->bd_dev
== dev
&& td
->dm_dev
.mode
== mode
)
732 int dm_get_table_device(struct mapped_device
*md
, dev_t dev
, fmode_t mode
,
733 struct dm_dev
**result
) {
735 struct table_device
*td
;
737 mutex_lock(&md
->table_devices_lock
);
738 td
= find_table_device(&md
->table_devices
, dev
, mode
);
740 td
= kmalloc(sizeof(*td
), GFP_KERNEL
);
742 mutex_unlock(&md
->table_devices_lock
);
746 td
->dm_dev
.mode
= mode
;
747 td
->dm_dev
.bdev
= NULL
;
749 if ((r
= open_table_device(td
, dev
, md
))) {
750 mutex_unlock(&md
->table_devices_lock
);
755 format_dev_t(td
->dm_dev
.name
, dev
);
757 atomic_set(&td
->count
, 0);
758 list_add(&td
->list
, &md
->table_devices
);
760 atomic_inc(&td
->count
);
761 mutex_unlock(&md
->table_devices_lock
);
763 *result
= &td
->dm_dev
;
766 EXPORT_SYMBOL_GPL(dm_get_table_device
);
768 void dm_put_table_device(struct mapped_device
*md
, struct dm_dev
*d
)
770 struct table_device
*td
= container_of(d
, struct table_device
, dm_dev
);
772 mutex_lock(&md
->table_devices_lock
);
773 if (atomic_dec_and_test(&td
->count
)) {
774 close_table_device(td
, md
);
778 mutex_unlock(&md
->table_devices_lock
);
780 EXPORT_SYMBOL(dm_put_table_device
);
782 static void free_table_devices(struct list_head
*devices
)
784 struct list_head
*tmp
, *next
;
786 list_for_each_safe(tmp
, next
, devices
) {
787 struct table_device
*td
= list_entry(tmp
, struct table_device
, list
);
789 DMWARN("dm_destroy: %s still exists with %d references",
790 td
->dm_dev
.name
, atomic_read(&td
->count
));
796 * Get the geometry associated with a dm device
798 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
806 * Set the geometry of a device.
808 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
810 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
812 if (geo
->start
> sz
) {
813 DMWARN("Start sector is beyond the geometry limits.");
822 /*-----------------------------------------------------------------
824 * A more elegant soln is in the works that uses the queue
825 * merge fn, unfortunately there are a couple of changes to
826 * the block layer that I want to make for this. So in the
827 * interests of getting something for people to use I give
828 * you this clearly demarcated crap.
829 *---------------------------------------------------------------*/
831 static int __noflush_suspending(struct mapped_device
*md
)
833 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
837 * Decrements the number of outstanding ios that a bio has been
838 * cloned into, completing the original io if necc.
840 static void dec_pending(struct dm_io
*io
, int error
)
845 struct mapped_device
*md
= io
->md
;
847 /* Push-back supersedes any I/O errors */
848 if (unlikely(error
)) {
849 spin_lock_irqsave(&io
->endio_lock
, flags
);
850 if (!(io
->error
> 0 && __noflush_suspending(md
)))
852 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
855 if (atomic_dec_and_test(&io
->io_count
)) {
856 if (io
->error
== DM_ENDIO_REQUEUE
) {
858 * Target requested pushing back the I/O.
860 spin_lock_irqsave(&md
->deferred_lock
, flags
);
861 if (__noflush_suspending(md
))
862 bio_list_add_head(&md
->deferred
, io
->bio
);
864 /* noflush suspend was interrupted. */
866 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
869 io_error
= io
->error
;
874 if (io_error
== DM_ENDIO_REQUEUE
)
877 if ((bio
->bi_rw
& REQ_FLUSH
) && bio
->bi_iter
.bi_size
) {
879 * Preflush done for flush with data, reissue
882 bio
->bi_rw
&= ~REQ_FLUSH
;
885 /* done with normal IO or empty flush */
886 trace_block_bio_complete(md
->queue
, bio
, io_error
);
887 bio_endio(bio
, io_error
);
892 static void disable_write_same(struct mapped_device
*md
)
894 struct queue_limits
*limits
= dm_get_queue_limits(md
);
896 /* device doesn't really support WRITE SAME, disable it */
897 limits
->max_write_same_sectors
= 0;
900 static void clone_endio(struct bio
*bio
, int error
)
903 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
904 struct dm_io
*io
= tio
->io
;
905 struct mapped_device
*md
= tio
->io
->md
;
906 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
908 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
912 r
= endio(tio
->ti
, bio
, error
);
913 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
915 * error and requeue request are handled
919 else if (r
== DM_ENDIO_INCOMPLETE
)
920 /* The target will handle the io */
923 DMWARN("unimplemented target endio return value: %d", r
);
928 if (unlikely(r
== -EREMOTEIO
&& (bio
->bi_rw
& REQ_WRITE_SAME
) &&
929 !bdev_get_queue(bio
->bi_bdev
)->limits
.max_write_same_sectors
))
930 disable_write_same(md
);
933 dec_pending(io
, error
);
937 * Partial completion handling for request-based dm
939 static void end_clone_bio(struct bio
*clone
, int error
)
941 struct dm_rq_clone_bio_info
*info
=
942 container_of(clone
, struct dm_rq_clone_bio_info
, clone
);
943 struct dm_rq_target_io
*tio
= info
->tio
;
944 struct bio
*bio
= info
->orig
;
945 unsigned int nr_bytes
= info
->orig
->bi_iter
.bi_size
;
951 * An error has already been detected on the request.
952 * Once error occurred, just let clone->end_io() handle
958 * Don't notice the error to the upper layer yet.
959 * The error handling decision is made by the target driver,
960 * when the request is completed.
967 * I/O for the bio successfully completed.
968 * Notice the data completion to the upper layer.
972 * bios are processed from the head of the list.
973 * So the completing bio should always be rq->bio.
974 * If it's not, something wrong is happening.
976 if (tio
->orig
->bio
!= bio
)
977 DMERR("bio completion is going in the middle of the request");
980 * Update the original request.
981 * Do not use blk_end_request() here, because it may complete
982 * the original request before the clone, and break the ordering.
984 blk_update_request(tio
->orig
, 0, nr_bytes
);
988 * Don't touch any member of the md after calling this function because
989 * the md may be freed in dm_put() at the end of this function.
990 * Or do dm_get() before calling this function and dm_put() later.
992 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
994 atomic_dec(&md
->pending
[rw
]);
996 /* nudge anyone waiting on suspend queue */
997 if (!md_in_flight(md
))
1001 * Run this off this callpath, as drivers could invoke end_io while
1002 * inside their request_fn (and holding the queue lock). Calling
1003 * back into ->request_fn() could deadlock attempting to grab the
1007 blk_run_queue_async(md
->queue
);
1010 * dm_put() must be at the end of this function. See the comment above
1015 static void free_rq_clone(struct request
*clone
)
1017 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1019 blk_rq_unprep_clone(clone
);
1024 * Complete the clone and the original request.
1025 * Must be called without queue lock.
1027 static void dm_end_request(struct request
*clone
, int error
)
1029 int rw
= rq_data_dir(clone
);
1030 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1031 struct mapped_device
*md
= tio
->md
;
1032 struct request
*rq
= tio
->orig
;
1034 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
) {
1035 rq
->errors
= clone
->errors
;
1036 rq
->resid_len
= clone
->resid_len
;
1040 * We are using the sense buffer of the original
1042 * So setting the length of the sense data is enough.
1044 rq
->sense_len
= clone
->sense_len
;
1047 free_rq_clone(clone
);
1048 blk_end_request_all(rq
, error
);
1049 rq_completed(md
, rw
, true);
1052 static void dm_unprep_request(struct request
*rq
)
1054 struct request
*clone
= rq
->special
;
1057 rq
->cmd_flags
&= ~REQ_DONTPREP
;
1059 free_rq_clone(clone
);
1063 * Requeue the original request of a clone.
1065 void dm_requeue_unmapped_request(struct request
*clone
)
1067 int rw
= rq_data_dir(clone
);
1068 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1069 struct mapped_device
*md
= tio
->md
;
1070 struct request
*rq
= tio
->orig
;
1071 struct request_queue
*q
= rq
->q
;
1072 unsigned long flags
;
1074 dm_unprep_request(rq
);
1076 spin_lock_irqsave(q
->queue_lock
, flags
);
1077 blk_requeue_request(q
, rq
);
1078 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1080 rq_completed(md
, rw
, 0);
1082 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
1084 static void __stop_queue(struct request_queue
*q
)
1089 static void stop_queue(struct request_queue
*q
)
1091 unsigned long flags
;
1093 spin_lock_irqsave(q
->queue_lock
, flags
);
1095 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1098 static void __start_queue(struct request_queue
*q
)
1100 if (blk_queue_stopped(q
))
1104 static void start_queue(struct request_queue
*q
)
1106 unsigned long flags
;
1108 spin_lock_irqsave(q
->queue_lock
, flags
);
1110 spin_unlock_irqrestore(q
->queue_lock
, flags
);
1113 static void dm_done(struct request
*clone
, int error
, bool mapped
)
1116 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1117 dm_request_endio_fn rq_end_io
= NULL
;
1120 rq_end_io
= tio
->ti
->type
->rq_end_io
;
1122 if (mapped
&& rq_end_io
)
1123 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
1126 if (unlikely(r
== -EREMOTEIO
&& (clone
->cmd_flags
& REQ_WRITE_SAME
) &&
1127 !clone
->q
->limits
.max_write_same_sectors
))
1128 disable_write_same(tio
->md
);
1131 /* The target wants to complete the I/O */
1132 dm_end_request(clone
, r
);
1133 else if (r
== DM_ENDIO_INCOMPLETE
)
1134 /* The target will handle the I/O */
1136 else if (r
== DM_ENDIO_REQUEUE
)
1137 /* The target wants to requeue the I/O */
1138 dm_requeue_unmapped_request(clone
);
1140 DMWARN("unimplemented target endio return value: %d", r
);
1146 * Request completion handler for request-based dm
1148 static void dm_softirq_done(struct request
*rq
)
1151 struct request
*clone
= rq
->completion_data
;
1152 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1154 if (rq
->cmd_flags
& REQ_FAILED
)
1157 dm_done(clone
, tio
->error
, mapped
);
1161 * Complete the clone and the original request with the error status
1162 * through softirq context.
1164 static void dm_complete_request(struct request
*clone
, int error
)
1166 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1167 struct request
*rq
= tio
->orig
;
1170 rq
->completion_data
= clone
;
1171 blk_complete_request(rq
);
1175 * Complete the not-mapped clone and the original request with the error status
1176 * through softirq context.
1177 * Target's rq_end_io() function isn't called.
1178 * This may be used when the target's map_rq() function fails.
1180 void dm_kill_unmapped_request(struct request
*clone
, int error
)
1182 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1183 struct request
*rq
= tio
->orig
;
1185 rq
->cmd_flags
|= REQ_FAILED
;
1186 dm_complete_request(clone
, error
);
1188 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
1191 * Called with the queue lock held
1193 static void end_clone_request(struct request
*clone
, int error
)
1196 * For just cleaning up the information of the queue in which
1197 * the clone was dispatched.
1198 * The clone is *NOT* freed actually here because it is alloced from
1199 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1201 __blk_put_request(clone
->q
, clone
);
1204 * Actual request completion is done in a softirq context which doesn't
1205 * hold the queue lock. Otherwise, deadlock could occur because:
1206 * - another request may be submitted by the upper level driver
1207 * of the stacking during the completion
1208 * - the submission which requires queue lock may be done
1209 * against this queue
1211 dm_complete_request(clone
, error
);
1215 * Return maximum size of I/O possible at the supplied sector up to the current
1218 static sector_t
max_io_len_target_boundary(sector_t sector
, struct dm_target
*ti
)
1220 sector_t target_offset
= dm_target_offset(ti
, sector
);
1222 return ti
->len
- target_offset
;
1225 static sector_t
max_io_len(sector_t sector
, struct dm_target
*ti
)
1227 sector_t len
= max_io_len_target_boundary(sector
, ti
);
1228 sector_t offset
, max_len
;
1231 * Does the target need to split even further?
1233 if (ti
->max_io_len
) {
1234 offset
= dm_target_offset(ti
, sector
);
1235 if (unlikely(ti
->max_io_len
& (ti
->max_io_len
- 1)))
1236 max_len
= sector_div(offset
, ti
->max_io_len
);
1238 max_len
= offset
& (ti
->max_io_len
- 1);
1239 max_len
= ti
->max_io_len
- max_len
;
1248 int dm_set_target_max_io_len(struct dm_target
*ti
, sector_t len
)
1250 if (len
> UINT_MAX
) {
1251 DMERR("Specified maximum size of target IO (%llu) exceeds limit (%u)",
1252 (unsigned long long)len
, UINT_MAX
);
1253 ti
->error
= "Maximum size of target IO is too large";
1257 ti
->max_io_len
= (uint32_t) len
;
1261 EXPORT_SYMBOL_GPL(dm_set_target_max_io_len
);
1264 * A target may call dm_accept_partial_bio only from the map routine. It is
1265 * allowed for all bio types except REQ_FLUSH.
1267 * dm_accept_partial_bio informs the dm that the target only wants to process
1268 * additional n_sectors sectors of the bio and the rest of the data should be
1269 * sent in a next bio.
1271 * A diagram that explains the arithmetics:
1272 * +--------------------+---------------+-------+
1274 * +--------------------+---------------+-------+
1276 * <-------------- *tio->len_ptr --------------->
1277 * <------- bi_size ------->
1280 * Region 1 was already iterated over with bio_advance or similar function.
1281 * (it may be empty if the target doesn't use bio_advance)
1282 * Region 2 is the remaining bio size that the target wants to process.
1283 * (it may be empty if region 1 is non-empty, although there is no reason
1285 * The target requires that region 3 is to be sent in the next bio.
1287 * If the target wants to receive multiple copies of the bio (via num_*bios, etc),
1288 * the partially processed part (the sum of regions 1+2) must be the same for all
1289 * copies of the bio.
1291 void dm_accept_partial_bio(struct bio
*bio
, unsigned n_sectors
)
1293 struct dm_target_io
*tio
= container_of(bio
, struct dm_target_io
, clone
);
1294 unsigned bi_size
= bio
->bi_iter
.bi_size
>> SECTOR_SHIFT
;
1295 BUG_ON(bio
->bi_rw
& REQ_FLUSH
);
1296 BUG_ON(bi_size
> *tio
->len_ptr
);
1297 BUG_ON(n_sectors
> bi_size
);
1298 *tio
->len_ptr
-= bi_size
- n_sectors
;
1299 bio
->bi_iter
.bi_size
= n_sectors
<< SECTOR_SHIFT
;
1301 EXPORT_SYMBOL_GPL(dm_accept_partial_bio
);
1303 static void __map_bio(struct dm_target_io
*tio
)
1307 struct mapped_device
*md
;
1308 struct bio
*clone
= &tio
->clone
;
1309 struct dm_target
*ti
= tio
->ti
;
1311 clone
->bi_end_io
= clone_endio
;
1314 * Map the clone. If r == 0 we don't need to do
1315 * anything, the target has assumed ownership of
1318 atomic_inc(&tio
->io
->io_count
);
1319 sector
= clone
->bi_iter
.bi_sector
;
1320 r
= ti
->type
->map(ti
, clone
);
1321 if (r
== DM_MAPIO_REMAPPED
) {
1322 /* the bio has been remapped so dispatch it */
1324 trace_block_bio_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1325 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1327 generic_make_request(clone
);
1328 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1329 /* error the io and bail out, or requeue it if needed */
1331 dec_pending(tio
->io
, r
);
1334 DMWARN("unimplemented target map return value: %d", r
);
1340 struct mapped_device
*md
;
1341 struct dm_table
*map
;
1345 unsigned sector_count
;
1348 static void bio_setup_sector(struct bio
*bio
, sector_t sector
, unsigned len
)
1350 bio
->bi_iter
.bi_sector
= sector
;
1351 bio
->bi_iter
.bi_size
= to_bytes(len
);
1355 * Creates a bio that consists of range of complete bvecs.
1357 static void clone_bio(struct dm_target_io
*tio
, struct bio
*bio
,
1358 sector_t sector
, unsigned len
)
1360 struct bio
*clone
= &tio
->clone
;
1362 __bio_clone_fast(clone
, bio
);
1364 if (bio_integrity(bio
))
1365 bio_integrity_clone(clone
, bio
, GFP_NOIO
);
1367 bio_advance(clone
, to_bytes(sector
- clone
->bi_iter
.bi_sector
));
1368 clone
->bi_iter
.bi_size
= to_bytes(len
);
1370 if (bio_integrity(bio
))
1371 bio_integrity_trim(clone
, 0, len
);
1374 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1375 struct dm_target
*ti
,
1376 unsigned target_bio_nr
)
1378 struct dm_target_io
*tio
;
1381 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1382 tio
= container_of(clone
, struct dm_target_io
, clone
);
1386 tio
->target_bio_nr
= target_bio_nr
;
1391 static void __clone_and_map_simple_bio(struct clone_info
*ci
,
1392 struct dm_target
*ti
,
1393 unsigned target_bio_nr
, unsigned *len
)
1395 struct dm_target_io
*tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1396 struct bio
*clone
= &tio
->clone
;
1400 __bio_clone_fast(clone
, ci
->bio
);
1402 bio_setup_sector(clone
, ci
->sector
, *len
);
1407 static void __send_duplicate_bios(struct clone_info
*ci
, struct dm_target
*ti
,
1408 unsigned num_bios
, unsigned *len
)
1410 unsigned target_bio_nr
;
1412 for (target_bio_nr
= 0; target_bio_nr
< num_bios
; target_bio_nr
++)
1413 __clone_and_map_simple_bio(ci
, ti
, target_bio_nr
, len
);
1416 static int __send_empty_flush(struct clone_info
*ci
)
1418 unsigned target_nr
= 0;
1419 struct dm_target
*ti
;
1421 BUG_ON(bio_has_data(ci
->bio
));
1422 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1423 __send_duplicate_bios(ci
, ti
, ti
->num_flush_bios
, NULL
);
1428 static void __clone_and_map_data_bio(struct clone_info
*ci
, struct dm_target
*ti
,
1429 sector_t sector
, unsigned *len
)
1431 struct bio
*bio
= ci
->bio
;
1432 struct dm_target_io
*tio
;
1433 unsigned target_bio_nr
;
1434 unsigned num_target_bios
= 1;
1437 * Does the target want to receive duplicate copies of the bio?
1439 if (bio_data_dir(bio
) == WRITE
&& ti
->num_write_bios
)
1440 num_target_bios
= ti
->num_write_bios(ti
, bio
);
1442 for (target_bio_nr
= 0; target_bio_nr
< num_target_bios
; target_bio_nr
++) {
1443 tio
= alloc_tio(ci
, ti
, target_bio_nr
);
1445 clone_bio(tio
, bio
, sector
, *len
);
1450 typedef unsigned (*get_num_bios_fn
)(struct dm_target
*ti
);
1452 static unsigned get_num_discard_bios(struct dm_target
*ti
)
1454 return ti
->num_discard_bios
;
1457 static unsigned get_num_write_same_bios(struct dm_target
*ti
)
1459 return ti
->num_write_same_bios
;
1462 typedef bool (*is_split_required_fn
)(struct dm_target
*ti
);
1464 static bool is_split_required_for_discard(struct dm_target
*ti
)
1466 return ti
->split_discard_bios
;
1469 static int __send_changing_extent_only(struct clone_info
*ci
,
1470 get_num_bios_fn get_num_bios
,
1471 is_split_required_fn is_split_required
)
1473 struct dm_target
*ti
;
1478 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1479 if (!dm_target_is_valid(ti
))
1483 * Even though the device advertised support for this type of
1484 * request, that does not mean every target supports it, and
1485 * reconfiguration might also have changed that since the
1486 * check was performed.
1488 num_bios
= get_num_bios
? get_num_bios(ti
) : 0;
1492 if (is_split_required
&& !is_split_required(ti
))
1493 len
= min((sector_t
)ci
->sector_count
, max_io_len_target_boundary(ci
->sector
, ti
));
1495 len
= min((sector_t
)ci
->sector_count
, max_io_len(ci
->sector
, ti
));
1497 __send_duplicate_bios(ci
, ti
, num_bios
, &len
);
1500 } while (ci
->sector_count
-= len
);
1505 static int __send_discard(struct clone_info
*ci
)
1507 return __send_changing_extent_only(ci
, get_num_discard_bios
,
1508 is_split_required_for_discard
);
1511 static int __send_write_same(struct clone_info
*ci
)
1513 return __send_changing_extent_only(ci
, get_num_write_same_bios
, NULL
);
1517 * Select the correct strategy for processing a non-flush bio.
1519 static int __split_and_process_non_flush(struct clone_info
*ci
)
1521 struct bio
*bio
= ci
->bio
;
1522 struct dm_target
*ti
;
1525 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1526 return __send_discard(ci
);
1527 else if (unlikely(bio
->bi_rw
& REQ_WRITE_SAME
))
1528 return __send_write_same(ci
);
1530 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1531 if (!dm_target_is_valid(ti
))
1534 len
= min_t(sector_t
, max_io_len(ci
->sector
, ti
), ci
->sector_count
);
1536 __clone_and_map_data_bio(ci
, ti
, ci
->sector
, &len
);
1539 ci
->sector_count
-= len
;
1545 * Entry point to split a bio into clones and submit them to the targets.
1547 static void __split_and_process_bio(struct mapped_device
*md
,
1548 struct dm_table
*map
, struct bio
*bio
)
1550 struct clone_info ci
;
1553 if (unlikely(!map
)) {
1560 ci
.io
= alloc_io(md
);
1562 atomic_set(&ci
.io
->io_count
, 1);
1565 spin_lock_init(&ci
.io
->endio_lock
);
1566 ci
.sector
= bio
->bi_iter
.bi_sector
;
1568 start_io_acct(ci
.io
);
1570 if (bio
->bi_rw
& REQ_FLUSH
) {
1571 ci
.bio
= &ci
.md
->flush_bio
;
1572 ci
.sector_count
= 0;
1573 error
= __send_empty_flush(&ci
);
1574 /* dec_pending submits any data associated with flush */
1577 ci
.sector_count
= bio_sectors(bio
);
1578 while (ci
.sector_count
&& !error
)
1579 error
= __split_and_process_non_flush(&ci
);
1582 /* drop the extra reference count */
1583 dec_pending(ci
.io
, error
);
1585 /*-----------------------------------------------------------------
1587 *---------------------------------------------------------------*/
1589 static int dm_merge_bvec(struct request_queue
*q
,
1590 struct bvec_merge_data
*bvm
,
1591 struct bio_vec
*biovec
)
1593 struct mapped_device
*md
= q
->queuedata
;
1594 struct dm_table
*map
= dm_get_live_table_fast(md
);
1595 struct dm_target
*ti
;
1596 sector_t max_sectors
;
1602 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1603 if (!dm_target_is_valid(ti
))
1607 * Find maximum amount of I/O that won't need splitting
1609 max_sectors
= min(max_io_len(bvm
->bi_sector
, ti
),
1610 (sector_t
) BIO_MAX_SECTORS
);
1611 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1616 * merge_bvec_fn() returns number of bytes
1617 * it can accept at this offset
1618 * max is precomputed maximal io size
1620 if (max_size
&& ti
->type
->merge
)
1621 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1623 * If the target doesn't support merge method and some of the devices
1624 * provided their merge_bvec method (we know this by looking at
1625 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1626 * entries. So always set max_size to 0, and the code below allows
1629 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1633 dm_put_live_table_fast(md
);
1635 * Always allow an entire first page
1637 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1638 max_size
= biovec
->bv_len
;
1644 * The request function that just remaps the bio built up by
1647 static void _dm_request(struct request_queue
*q
, struct bio
*bio
)
1649 int rw
= bio_data_dir(bio
);
1650 struct mapped_device
*md
= q
->queuedata
;
1653 struct dm_table
*map
;
1655 map
= dm_get_live_table(md
, &srcu_idx
);
1657 cpu
= part_stat_lock();
1658 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1659 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1662 /* if we're suspended, we have to queue this io for later */
1663 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))) {
1664 dm_put_live_table(md
, srcu_idx
);
1666 if (bio_rw(bio
) != READA
)
1673 __split_and_process_bio(md
, map
, bio
);
1674 dm_put_live_table(md
, srcu_idx
);
1678 int dm_request_based(struct mapped_device
*md
)
1680 return blk_queue_stackable(md
->queue
);
1683 static void dm_request(struct request_queue
*q
, struct bio
*bio
)
1685 struct mapped_device
*md
= q
->queuedata
;
1687 if (dm_request_based(md
))
1688 blk_queue_bio(q
, bio
);
1690 _dm_request(q
, bio
);
1693 void dm_dispatch_request(struct request
*rq
)
1697 if (blk_queue_io_stat(rq
->q
))
1698 rq
->cmd_flags
|= REQ_IO_STAT
;
1700 rq
->start_time
= jiffies
;
1701 r
= blk_insert_cloned_request(rq
->q
, rq
);
1703 dm_complete_request(rq
, r
);
1705 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1707 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1710 struct dm_rq_target_io
*tio
= data
;
1711 struct dm_rq_clone_bio_info
*info
=
1712 container_of(bio
, struct dm_rq_clone_bio_info
, clone
);
1714 info
->orig
= bio_orig
;
1716 bio
->bi_end_io
= end_clone_bio
;
1721 static int setup_clone(struct request
*clone
, struct request
*rq
,
1722 struct dm_rq_target_io
*tio
)
1726 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1727 dm_rq_bio_constructor
, tio
);
1731 clone
->cmd
= rq
->cmd
;
1732 clone
->cmd_len
= rq
->cmd_len
;
1733 clone
->sense
= rq
->sense
;
1734 clone
->end_io
= end_clone_request
;
1735 clone
->end_io_data
= tio
;
1740 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1743 struct request
*clone
;
1744 struct dm_rq_target_io
*tio
;
1746 tio
= alloc_rq_tio(md
, gfp_mask
);
1754 memset(&tio
->info
, 0, sizeof(tio
->info
));
1756 clone
= &tio
->clone
;
1757 if (setup_clone(clone
, rq
, tio
)) {
1767 * Called with the queue lock held.
1769 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1771 struct mapped_device
*md
= q
->queuedata
;
1772 struct request
*clone
;
1774 if (unlikely(rq
->special
)) {
1775 DMWARN("Already has something in rq->special.");
1776 return BLKPREP_KILL
;
1779 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1781 return BLKPREP_DEFER
;
1783 rq
->special
= clone
;
1784 rq
->cmd_flags
|= REQ_DONTPREP
;
1791 * 0 : the request has been processed (not requeued)
1792 * !0 : the request has been requeued
1794 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1795 struct mapped_device
*md
)
1797 int r
, requeued
= 0;
1798 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1801 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1803 case DM_MAPIO_SUBMITTED
:
1804 /* The target has taken the I/O to submit by itself later */
1806 case DM_MAPIO_REMAPPED
:
1807 /* The target has remapped the I/O so dispatch it */
1808 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1809 blk_rq_pos(tio
->orig
));
1810 dm_dispatch_request(clone
);
1812 case DM_MAPIO_REQUEUE
:
1813 /* The target wants to requeue the I/O */
1814 dm_requeue_unmapped_request(clone
);
1819 DMWARN("unimplemented target map return value: %d", r
);
1823 /* The target wants to complete the I/O */
1824 dm_kill_unmapped_request(clone
, r
);
1831 static struct request
*dm_start_request(struct mapped_device
*md
, struct request
*orig
)
1833 struct request
*clone
;
1835 blk_start_request(orig
);
1836 clone
= orig
->special
;
1837 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1840 * Hold the md reference here for the in-flight I/O.
1841 * We can't rely on the reference count by device opener,
1842 * because the device may be closed during the request completion
1843 * when all bios are completed.
1844 * See the comment in rq_completed() too.
1852 * q->request_fn for request-based dm.
1853 * Called with the queue lock held.
1855 static void dm_request_fn(struct request_queue
*q
)
1857 struct mapped_device
*md
= q
->queuedata
;
1859 struct dm_table
*map
= dm_get_live_table(md
, &srcu_idx
);
1860 struct dm_target
*ti
;
1861 struct request
*rq
, *clone
;
1865 * For suspend, check blk_queue_stopped() and increment
1866 * ->pending within a single queue_lock not to increment the
1867 * number of in-flight I/Os after the queue is stopped in
1870 while (!blk_queue_stopped(q
)) {
1871 rq
= blk_peek_request(q
);
1875 /* always use block 0 to find the target for flushes for now */
1877 if (!(rq
->cmd_flags
& REQ_FLUSH
))
1878 pos
= blk_rq_pos(rq
);
1880 ti
= dm_table_find_target(map
, pos
);
1881 if (!dm_target_is_valid(ti
)) {
1883 * Must perform setup, that dm_done() requires,
1884 * before calling dm_kill_unmapped_request
1886 DMERR_LIMIT("request attempted access beyond the end of device");
1887 clone
= dm_start_request(md
, rq
);
1888 dm_kill_unmapped_request(clone
, -EIO
);
1892 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1895 clone
= dm_start_request(md
, rq
);
1897 spin_unlock(q
->queue_lock
);
1898 if (map_request(ti
, clone
, md
))
1901 BUG_ON(!irqs_disabled());
1902 spin_lock(q
->queue_lock
);
1908 BUG_ON(!irqs_disabled());
1909 spin_lock(q
->queue_lock
);
1912 blk_delay_queue(q
, HZ
/ 10);
1914 dm_put_live_table(md
, srcu_idx
);
1917 int dm_underlying_device_busy(struct request_queue
*q
)
1919 return blk_lld_busy(q
);
1921 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1923 static int dm_lld_busy(struct request_queue
*q
)
1926 struct mapped_device
*md
= q
->queuedata
;
1927 struct dm_table
*map
= dm_get_live_table_fast(md
);
1929 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1932 r
= dm_table_any_busy_target(map
);
1934 dm_put_live_table_fast(md
);
1939 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1942 struct mapped_device
*md
= congested_data
;
1943 struct dm_table
*map
;
1945 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1946 map
= dm_get_live_table_fast(md
);
1949 * Request-based dm cares about only own queue for
1950 * the query about congestion status of request_queue
1952 if (dm_request_based(md
))
1953 r
= md
->queue
->backing_dev_info
.state
&
1956 r
= dm_table_any_congested(map
, bdi_bits
);
1958 dm_put_live_table_fast(md
);
1964 /*-----------------------------------------------------------------
1965 * An IDR is used to keep track of allocated minor numbers.
1966 *---------------------------------------------------------------*/
1967 static void free_minor(int minor
)
1969 spin_lock(&_minor_lock
);
1970 idr_remove(&_minor_idr
, minor
);
1971 spin_unlock(&_minor_lock
);
1975 * See if the device with a specific minor # is free.
1977 static int specific_minor(int minor
)
1981 if (minor
>= (1 << MINORBITS
))
1984 idr_preload(GFP_KERNEL
);
1985 spin_lock(&_minor_lock
);
1987 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, minor
, minor
+ 1, GFP_NOWAIT
);
1989 spin_unlock(&_minor_lock
);
1992 return r
== -ENOSPC
? -EBUSY
: r
;
1996 static int next_free_minor(int *minor
)
2000 idr_preload(GFP_KERNEL
);
2001 spin_lock(&_minor_lock
);
2003 r
= idr_alloc(&_minor_idr
, MINOR_ALLOCED
, 0, 1 << MINORBITS
, GFP_NOWAIT
);
2005 spin_unlock(&_minor_lock
);
2013 static const struct block_device_operations dm_blk_dops
;
2015 static void dm_wq_work(struct work_struct
*work
);
2017 static void dm_init_md_queue(struct mapped_device
*md
)
2020 * Request-based dm devices cannot be stacked on top of bio-based dm
2021 * devices. The type of this dm device has not been decided yet.
2022 * The type is decided at the first table loading time.
2023 * To prevent problematic device stacking, clear the queue flag
2024 * for request stacking support until then.
2026 * This queue is new, so no concurrency on the queue_flags.
2028 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
2030 md
->queue
->queuedata
= md
;
2031 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
2032 md
->queue
->backing_dev_info
.congested_data
= md
;
2033 blk_queue_make_request(md
->queue
, dm_request
);
2034 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
2035 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
2039 * Allocate and initialise a blank device with a given minor.
2041 static struct mapped_device
*alloc_dev(int minor
)
2044 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
2048 DMWARN("unable to allocate device, out of memory.");
2052 if (!try_module_get(THIS_MODULE
))
2053 goto bad_module_get
;
2055 /* get a minor number for the dev */
2056 if (minor
== DM_ANY_MINOR
)
2057 r
= next_free_minor(&minor
);
2059 r
= specific_minor(minor
);
2063 r
= init_srcu_struct(&md
->io_barrier
);
2065 goto bad_io_barrier
;
2067 md
->type
= DM_TYPE_NONE
;
2068 mutex_init(&md
->suspend_lock
);
2069 mutex_init(&md
->type_lock
);
2070 mutex_init(&md
->table_devices_lock
);
2071 spin_lock_init(&md
->deferred_lock
);
2072 atomic_set(&md
->holders
, 1);
2073 atomic_set(&md
->open_count
, 0);
2074 atomic_set(&md
->event_nr
, 0);
2075 atomic_set(&md
->uevent_seq
, 0);
2076 INIT_LIST_HEAD(&md
->uevent_list
);
2077 INIT_LIST_HEAD(&md
->table_devices
);
2078 spin_lock_init(&md
->uevent_lock
);
2080 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
2084 dm_init_md_queue(md
);
2086 md
->disk
= alloc_disk(1);
2090 atomic_set(&md
->pending
[0], 0);
2091 atomic_set(&md
->pending
[1], 0);
2092 init_waitqueue_head(&md
->wait
);
2093 INIT_WORK(&md
->work
, dm_wq_work
);
2094 init_waitqueue_head(&md
->eventq
);
2095 init_completion(&md
->kobj_holder
.completion
);
2097 md
->disk
->major
= _major
;
2098 md
->disk
->first_minor
= minor
;
2099 md
->disk
->fops
= &dm_blk_dops
;
2100 md
->disk
->queue
= md
->queue
;
2101 md
->disk
->private_data
= md
;
2102 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
2104 format_dev_t(md
->name
, MKDEV(_major
, minor
));
2106 md
->wq
= alloc_workqueue("kdmflush", WQ_MEM_RECLAIM
, 0);
2110 md
->bdev
= bdget_disk(md
->disk
, 0);
2114 bio_init(&md
->flush_bio
);
2115 md
->flush_bio
.bi_bdev
= md
->bdev
;
2116 md
->flush_bio
.bi_rw
= WRITE_FLUSH
;
2118 dm_stats_init(&md
->stats
);
2120 /* Populate the mapping, nobody knows we exist yet */
2121 spin_lock(&_minor_lock
);
2122 old_md
= idr_replace(&_minor_idr
, md
, minor
);
2123 spin_unlock(&_minor_lock
);
2125 BUG_ON(old_md
!= MINOR_ALLOCED
);
2130 destroy_workqueue(md
->wq
);
2132 del_gendisk(md
->disk
);
2135 blk_cleanup_queue(md
->queue
);
2137 cleanup_srcu_struct(&md
->io_barrier
);
2141 module_put(THIS_MODULE
);
2147 static void unlock_fs(struct mapped_device
*md
);
2149 static void free_dev(struct mapped_device
*md
)
2151 int minor
= MINOR(disk_devt(md
->disk
));
2155 destroy_workqueue(md
->wq
);
2157 mempool_destroy(md
->io_pool
);
2159 bioset_free(md
->bs
);
2160 blk_integrity_unregister(md
->disk
);
2161 del_gendisk(md
->disk
);
2162 cleanup_srcu_struct(&md
->io_barrier
);
2163 free_table_devices(&md
->table_devices
);
2166 spin_lock(&_minor_lock
);
2167 md
->disk
->private_data
= NULL
;
2168 spin_unlock(&_minor_lock
);
2171 blk_cleanup_queue(md
->queue
);
2172 dm_stats_cleanup(&md
->stats
);
2173 module_put(THIS_MODULE
);
2177 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
2179 struct dm_md_mempools
*p
= dm_table_get_md_mempools(t
);
2181 if (md
->io_pool
&& md
->bs
) {
2182 /* The md already has necessary mempools. */
2183 if (dm_table_get_type(t
) == DM_TYPE_BIO_BASED
) {
2185 * Reload bioset because front_pad may have changed
2186 * because a different table was loaded.
2188 bioset_free(md
->bs
);
2191 } else if (dm_table_get_type(t
) == DM_TYPE_REQUEST_BASED
) {
2193 * There's no need to reload with request-based dm
2194 * because the size of front_pad doesn't change.
2195 * Note for future: If you are to reload bioset,
2196 * prep-ed requests in the queue may refer
2197 * to bio from the old bioset, so you must walk
2198 * through the queue to unprep.
2204 BUG_ON(!p
|| md
->io_pool
|| md
->bs
);
2206 md
->io_pool
= p
->io_pool
;
2212 /* mempool bind completed, now no need any mempools in the table */
2213 dm_table_free_md_mempools(t
);
2217 * Bind a table to the device.
2219 static void event_callback(void *context
)
2221 unsigned long flags
;
2223 struct mapped_device
*md
= (struct mapped_device
*) context
;
2225 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2226 list_splice_init(&md
->uevent_list
, &uevents
);
2227 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2229 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2231 atomic_inc(&md
->event_nr
);
2232 wake_up(&md
->eventq
);
2236 * Protected by md->suspend_lock obtained by dm_swap_table().
2238 static void __set_size(struct mapped_device
*md
, sector_t size
)
2240 set_capacity(md
->disk
, size
);
2242 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2246 * Return 1 if the queue has a compulsory merge_bvec_fn function.
2248 * If this function returns 0, then the device is either a non-dm
2249 * device without a merge_bvec_fn, or it is a dm device that is
2250 * able to split any bios it receives that are too big.
2252 int dm_queue_merge_is_compulsory(struct request_queue
*q
)
2254 struct mapped_device
*dev_md
;
2256 if (!q
->merge_bvec_fn
)
2259 if (q
->make_request_fn
== dm_request
) {
2260 dev_md
= q
->queuedata
;
2261 if (test_bit(DMF_MERGE_IS_OPTIONAL
, &dev_md
->flags
))
2268 static int dm_device_merge_is_compulsory(struct dm_target
*ti
,
2269 struct dm_dev
*dev
, sector_t start
,
2270 sector_t len
, void *data
)
2272 struct block_device
*bdev
= dev
->bdev
;
2273 struct request_queue
*q
= bdev_get_queue(bdev
);
2275 return dm_queue_merge_is_compulsory(q
);
2279 * Return 1 if it is acceptable to ignore merge_bvec_fn based
2280 * on the properties of the underlying devices.
2282 static int dm_table_merge_is_optional(struct dm_table
*table
)
2285 struct dm_target
*ti
;
2287 while (i
< dm_table_get_num_targets(table
)) {
2288 ti
= dm_table_get_target(table
, i
++);
2290 if (ti
->type
->iterate_devices
&&
2291 ti
->type
->iterate_devices(ti
, dm_device_merge_is_compulsory
, NULL
))
2299 * Returns old map, which caller must destroy.
2301 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2302 struct queue_limits
*limits
)
2304 struct dm_table
*old_map
;
2305 struct request_queue
*q
= md
->queue
;
2307 int merge_is_optional
;
2309 size
= dm_table_get_size(t
);
2312 * Wipe any geometry if the size of the table changed.
2314 if (size
!= dm_get_size(md
))
2315 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2317 __set_size(md
, size
);
2319 dm_table_event_callback(t
, event_callback
, md
);
2322 * The queue hasn't been stopped yet, if the old table type wasn't
2323 * for request-based during suspension. So stop it to prevent
2324 * I/O mapping before resume.
2325 * This must be done before setting the queue restrictions,
2326 * because request-based dm may be run just after the setting.
2328 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2331 __bind_mempools(md
, t
);
2333 merge_is_optional
= dm_table_merge_is_optional(t
);
2336 rcu_assign_pointer(md
->map
, t
);
2337 md
->immutable_target_type
= dm_table_get_immutable_target_type(t
);
2339 dm_table_set_restrictions(t
, q
, limits
);
2340 if (merge_is_optional
)
2341 set_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2343 clear_bit(DMF_MERGE_IS_OPTIONAL
, &md
->flags
);
2350 * Returns unbound table for the caller to free.
2352 static struct dm_table
*__unbind(struct mapped_device
*md
)
2354 struct dm_table
*map
= md
->map
;
2359 dm_table_event_callback(map
, NULL
, NULL
);
2360 RCU_INIT_POINTER(md
->map
, NULL
);
2367 * Constructor for a new device.
2369 int dm_create(int minor
, struct mapped_device
**result
)
2371 struct mapped_device
*md
;
2373 md
= alloc_dev(minor
);
2384 * Functions to manage md->type.
2385 * All are required to hold md->type_lock.
2387 void dm_lock_md_type(struct mapped_device
*md
)
2389 mutex_lock(&md
->type_lock
);
2392 void dm_unlock_md_type(struct mapped_device
*md
)
2394 mutex_unlock(&md
->type_lock
);
2397 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2399 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2403 unsigned dm_get_md_type(struct mapped_device
*md
)
2405 BUG_ON(!mutex_is_locked(&md
->type_lock
));
2409 struct target_type
*dm_get_immutable_target_type(struct mapped_device
*md
)
2411 return md
->immutable_target_type
;
2415 * The queue_limits are only valid as long as you have a reference
2418 struct queue_limits
*dm_get_queue_limits(struct mapped_device
*md
)
2420 BUG_ON(!atomic_read(&md
->holders
));
2421 return &md
->queue
->limits
;
2423 EXPORT_SYMBOL_GPL(dm_get_queue_limits
);
2426 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2428 static int dm_init_request_based_queue(struct mapped_device
*md
)
2430 struct request_queue
*q
= NULL
;
2432 if (md
->queue
->elevator
)
2435 /* Fully initialize the queue */
2436 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2441 dm_init_md_queue(md
);
2442 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2443 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2444 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
2446 elv_register_queue(md
->queue
);
2452 * Setup the DM device's queue based on md's type
2454 int dm_setup_md_queue(struct mapped_device
*md
)
2456 if ((dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) &&
2457 !dm_init_request_based_queue(md
)) {
2458 DMWARN("Cannot initialize queue for request-based mapped device");
2465 static struct mapped_device
*dm_find_md(dev_t dev
)
2467 struct mapped_device
*md
;
2468 unsigned minor
= MINOR(dev
);
2470 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2473 spin_lock(&_minor_lock
);
2475 md
= idr_find(&_minor_idr
, minor
);
2476 if (md
&& (md
== MINOR_ALLOCED
||
2477 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2478 dm_deleting_md(md
) ||
2479 test_bit(DMF_FREEING
, &md
->flags
))) {
2485 spin_unlock(&_minor_lock
);
2490 struct mapped_device
*dm_get_md(dev_t dev
)
2492 struct mapped_device
*md
= dm_find_md(dev
);
2499 EXPORT_SYMBOL_GPL(dm_get_md
);
2501 void *dm_get_mdptr(struct mapped_device
*md
)
2503 return md
->interface_ptr
;
2506 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2508 md
->interface_ptr
= ptr
;
2511 void dm_get(struct mapped_device
*md
)
2513 atomic_inc(&md
->holders
);
2514 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2517 const char *dm_device_name(struct mapped_device
*md
)
2521 EXPORT_SYMBOL_GPL(dm_device_name
);
2523 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2525 struct dm_table
*map
;
2530 spin_lock(&_minor_lock
);
2531 map
= dm_get_live_table(md
, &srcu_idx
);
2532 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2533 set_bit(DMF_FREEING
, &md
->flags
);
2534 spin_unlock(&_minor_lock
);
2536 if (!dm_suspended_md(md
)) {
2537 dm_table_presuspend_targets(map
);
2538 dm_table_postsuspend_targets(map
);
2541 /* dm_put_live_table must be before msleep, otherwise deadlock is possible */
2542 dm_put_live_table(md
, srcu_idx
);
2545 * Rare, but there may be I/O requests still going to complete,
2546 * for example. Wait for all references to disappear.
2547 * No one should increment the reference count of the mapped_device,
2548 * after the mapped_device state becomes DMF_FREEING.
2551 while (atomic_read(&md
->holders
))
2553 else if (atomic_read(&md
->holders
))
2554 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2555 dm_device_name(md
), atomic_read(&md
->holders
));
2558 dm_table_destroy(__unbind(md
));
2562 void dm_destroy(struct mapped_device
*md
)
2564 __dm_destroy(md
, true);
2567 void dm_destroy_immediate(struct mapped_device
*md
)
2569 __dm_destroy(md
, false);
2572 void dm_put(struct mapped_device
*md
)
2574 atomic_dec(&md
->holders
);
2576 EXPORT_SYMBOL_GPL(dm_put
);
2578 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2581 DECLARE_WAITQUEUE(wait
, current
);
2583 add_wait_queue(&md
->wait
, &wait
);
2586 set_current_state(interruptible
);
2588 if (!md_in_flight(md
))
2591 if (interruptible
== TASK_INTERRUPTIBLE
&&
2592 signal_pending(current
)) {
2599 set_current_state(TASK_RUNNING
);
2601 remove_wait_queue(&md
->wait
, &wait
);
2607 * Process the deferred bios
2609 static void dm_wq_work(struct work_struct
*work
)
2611 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2615 struct dm_table
*map
;
2617 map
= dm_get_live_table(md
, &srcu_idx
);
2619 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2620 spin_lock_irq(&md
->deferred_lock
);
2621 c
= bio_list_pop(&md
->deferred
);
2622 spin_unlock_irq(&md
->deferred_lock
);
2627 if (dm_request_based(md
))
2628 generic_make_request(c
);
2630 __split_and_process_bio(md
, map
, c
);
2633 dm_put_live_table(md
, srcu_idx
);
2636 static void dm_queue_flush(struct mapped_device
*md
)
2638 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2639 smp_mb__after_atomic();
2640 queue_work(md
->wq
, &md
->work
);
2644 * Swap in a new table, returning the old one for the caller to destroy.
2646 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2648 struct dm_table
*live_map
= NULL
, *map
= ERR_PTR(-EINVAL
);
2649 struct queue_limits limits
;
2652 mutex_lock(&md
->suspend_lock
);
2654 /* device must be suspended */
2655 if (!dm_suspended_md(md
))
2659 * If the new table has no data devices, retain the existing limits.
2660 * This helps multipath with queue_if_no_path if all paths disappear,
2661 * then new I/O is queued based on these limits, and then some paths
2664 if (dm_table_has_no_data_devices(table
)) {
2665 live_map
= dm_get_live_table_fast(md
);
2667 limits
= md
->queue
->limits
;
2668 dm_put_live_table_fast(md
);
2672 r
= dm_calculate_queue_limits(table
, &limits
);
2679 map
= __bind(md
, table
, &limits
);
2682 mutex_unlock(&md
->suspend_lock
);
2687 * Functions to lock and unlock any filesystem running on the
2690 static int lock_fs(struct mapped_device
*md
)
2694 WARN_ON(md
->frozen_sb
);
2696 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2697 if (IS_ERR(md
->frozen_sb
)) {
2698 r
= PTR_ERR(md
->frozen_sb
);
2699 md
->frozen_sb
= NULL
;
2703 set_bit(DMF_FROZEN
, &md
->flags
);
2708 static void unlock_fs(struct mapped_device
*md
)
2710 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2713 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2714 md
->frozen_sb
= NULL
;
2715 clear_bit(DMF_FROZEN
, &md
->flags
);
2719 * We need to be able to change a mapping table under a mounted
2720 * filesystem. For example we might want to move some data in
2721 * the background. Before the table can be swapped with
2722 * dm_bind_table, dm_suspend must be called to flush any in
2723 * flight bios and ensure that any further io gets deferred.
2726 * Suspend mechanism in request-based dm.
2728 * 1. Flush all I/Os by lock_fs() if needed.
2729 * 2. Stop dispatching any I/O by stopping the request_queue.
2730 * 3. Wait for all in-flight I/Os to be completed or requeued.
2732 * To abort suspend, start the request_queue.
2734 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2736 struct dm_table
*map
= NULL
;
2738 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2739 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2741 mutex_lock(&md
->suspend_lock
);
2743 if (dm_suspended_md(md
)) {
2751 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2752 * This flag is cleared before dm_suspend returns.
2755 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2757 /* This does not get reverted if there's an error later. */
2758 dm_table_presuspend_targets(map
);
2761 * Flush I/O to the device.
2762 * Any I/O submitted after lock_fs() may not be flushed.
2763 * noflush takes precedence over do_lockfs.
2764 * (lock_fs() flushes I/Os and waits for them to complete.)
2766 if (!noflush
&& do_lockfs
) {
2773 * Here we must make sure that no processes are submitting requests
2774 * to target drivers i.e. no one may be executing
2775 * __split_and_process_bio. This is called from dm_request and
2778 * To get all processes out of __split_and_process_bio in dm_request,
2779 * we take the write lock. To prevent any process from reentering
2780 * __split_and_process_bio from dm_request and quiesce the thread
2781 * (dm_wq_work), we set BMF_BLOCK_IO_FOR_SUSPEND and call
2782 * flush_workqueue(md->wq).
2784 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2785 synchronize_srcu(&md
->io_barrier
);
2788 * Stop md->queue before flushing md->wq in case request-based
2789 * dm defers requests to md->wq from md->queue.
2791 if (dm_request_based(md
))
2792 stop_queue(md
->queue
);
2794 flush_workqueue(md
->wq
);
2797 * At this point no more requests are entering target request routines.
2798 * We call dm_wait_for_completion to wait for all existing requests
2801 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2804 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2805 synchronize_srcu(&md
->io_barrier
);
2807 /* were we interrupted ? */
2811 if (dm_request_based(md
))
2812 start_queue(md
->queue
);
2815 goto out_unlock
; /* pushback list is already flushed, so skip flush */
2819 * If dm_wait_for_completion returned 0, the device is completely
2820 * quiescent now. There is no request-processing activity. All new
2821 * requests are being added to md->deferred list.
2824 set_bit(DMF_SUSPENDED
, &md
->flags
);
2826 dm_table_postsuspend_targets(map
);
2829 mutex_unlock(&md
->suspend_lock
);
2833 int dm_resume(struct mapped_device
*md
)
2836 struct dm_table
*map
= NULL
;
2838 mutex_lock(&md
->suspend_lock
);
2839 if (!dm_suspended_md(md
))
2843 if (!map
|| !dm_table_get_size(map
))
2846 r
= dm_table_resume_targets(map
);
2853 * Flushing deferred I/Os must be done after targets are resumed
2854 * so that mapping of targets can work correctly.
2855 * Request-based dm is queueing the deferred I/Os in its request_queue.
2857 if (dm_request_based(md
))
2858 start_queue(md
->queue
);
2862 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2866 mutex_unlock(&md
->suspend_lock
);
2872 * Internal suspend/resume works like userspace-driven suspend. It waits
2873 * until all bios finish and prevents issuing new bios to the target drivers.
2874 * It may be used only from the kernel.
2876 * Internal suspend holds md->suspend_lock, which prevents interaction with
2877 * userspace-driven suspend.
2880 void dm_internal_suspend(struct mapped_device
*md
)
2882 mutex_lock(&md
->suspend_lock
);
2883 if (dm_suspended_md(md
))
2886 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2887 synchronize_srcu(&md
->io_barrier
);
2888 flush_workqueue(md
->wq
);
2889 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2892 void dm_internal_resume(struct mapped_device
*md
)
2894 if (dm_suspended_md(md
))
2900 mutex_unlock(&md
->suspend_lock
);
2903 /*-----------------------------------------------------------------
2904 * Event notification.
2905 *---------------------------------------------------------------*/
2906 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2909 char udev_cookie
[DM_COOKIE_LENGTH
];
2910 char *envp
[] = { udev_cookie
, NULL
};
2913 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2915 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2916 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2917 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2922 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2924 return atomic_add_return(1, &md
->uevent_seq
);
2927 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2929 return atomic_read(&md
->event_nr
);
2932 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2934 return wait_event_interruptible(md
->eventq
,
2935 (event_nr
!= atomic_read(&md
->event_nr
)));
2938 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2940 unsigned long flags
;
2942 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2943 list_add(elist
, &md
->uevent_list
);
2944 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2948 * The gendisk is only valid as long as you have a reference
2951 struct gendisk
*dm_disk(struct mapped_device
*md
)
2956 struct kobject
*dm_kobject(struct mapped_device
*md
)
2958 return &md
->kobj_holder
.kobj
;
2961 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2963 struct mapped_device
*md
;
2965 md
= container_of(kobj
, struct mapped_device
, kobj_holder
.kobj
);
2967 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2975 int dm_suspended_md(struct mapped_device
*md
)
2977 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2980 int dm_test_deferred_remove_flag(struct mapped_device
*md
)
2982 return test_bit(DMF_DEFERRED_REMOVE
, &md
->flags
);
2985 int dm_suspended(struct dm_target
*ti
)
2987 return dm_suspended_md(dm_table_get_md(ti
->table
));
2989 EXPORT_SYMBOL_GPL(dm_suspended
);
2991 int dm_noflush_suspending(struct dm_target
*ti
)
2993 return __noflush_suspending(dm_table_get_md(ti
->table
));
2995 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2997 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
, unsigned integrity
, unsigned per_bio_data_size
)
2999 struct dm_md_mempools
*pools
= kzalloc(sizeof(*pools
), GFP_KERNEL
);
3000 struct kmem_cache
*cachep
;
3001 unsigned int pool_size
;
3002 unsigned int front_pad
;
3007 if (type
== DM_TYPE_BIO_BASED
) {
3009 pool_size
= dm_get_reserved_bio_based_ios();
3010 front_pad
= roundup(per_bio_data_size
, __alignof__(struct dm_target_io
)) + offsetof(struct dm_target_io
, clone
);
3011 } else if (type
== DM_TYPE_REQUEST_BASED
) {
3012 cachep
= _rq_tio_cache
;
3013 pool_size
= dm_get_reserved_rq_based_ios();
3014 front_pad
= offsetof(struct dm_rq_clone_bio_info
, clone
);
3015 /* per_bio_data_size is not used. See __bind_mempools(). */
3016 WARN_ON(per_bio_data_size
!= 0);
3020 pools
->io_pool
= mempool_create_slab_pool(pool_size
, cachep
);
3021 if (!pools
->io_pool
)
3024 pools
->bs
= bioset_create_nobvec(pool_size
, front_pad
);
3028 if (integrity
&& bioset_integrity_create(pools
->bs
, pool_size
))
3034 dm_free_md_mempools(pools
);
3039 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
3045 mempool_destroy(pools
->io_pool
);
3048 bioset_free(pools
->bs
);
3053 static const struct block_device_operations dm_blk_dops
= {
3054 .open
= dm_blk_open
,
3055 .release
= dm_blk_close
,
3056 .ioctl
= dm_blk_ioctl
,
3057 .getgeo
= dm_blk_getgeo
,
3058 .owner
= THIS_MODULE
3064 module_init(dm_init
);
3065 module_exit(dm_exit
);
3067 module_param(major
, uint
, 0);
3068 MODULE_PARM_DESC(major
, "The major number of the device mapper");
3070 module_param(reserved_bio_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3071 MODULE_PARM_DESC(reserved_bio_based_ios
, "Reserved IOs in bio-based mempools");
3073 module_param(reserved_rq_based_ios
, uint
, S_IRUGO
| S_IWUSR
);
3074 MODULE_PARM_DESC(reserved_rq_based_ios
, "Reserved IOs in request-based mempools");
3076 MODULE_DESCRIPTION(DM_NAME
" driver");
3077 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
3078 MODULE_LICENSE("GPL");