2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/smp_lock.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
24 #include <trace/events/block.h>
26 #define DM_MSG_PREFIX "core"
29 * Cookies are numeric values sent with CHANGE and REMOVE
30 * uevents while resuming, removing or renaming the device.
32 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
33 #define DM_COOKIE_LENGTH 24
35 static const char *_name
= DM_NAME
;
37 static unsigned int major
= 0;
38 static unsigned int _major
= 0;
40 static DEFINE_SPINLOCK(_minor_lock
);
43 * One of these is allocated per bio.
46 struct mapped_device
*md
;
50 unsigned long start_time
;
51 spinlock_t endio_lock
;
56 * One of these is allocated per target within a bio. Hopefully
57 * this will be simplified out one day.
66 * For request-based dm.
67 * One of these is allocated per request.
69 struct dm_rq_target_io
{
70 struct mapped_device
*md
;
72 struct request
*orig
, clone
;
78 * For request-based dm.
79 * One of these is allocated per bio.
81 struct dm_rq_clone_bio_info
{
83 struct dm_rq_target_io
*tio
;
86 union map_info
*dm_get_mapinfo(struct bio
*bio
)
88 if (bio
&& bio
->bi_private
)
89 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
93 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
95 if (rq
&& rq
->end_io_data
)
96 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
99 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
101 #define MINOR_ALLOCED ((void *)-1)
104 * Bits for the md->flags field.
106 #define DMF_BLOCK_IO_FOR_SUSPEND 0
107 #define DMF_SUSPENDED 1
109 #define DMF_FREEING 3
110 #define DMF_DELETING 4
111 #define DMF_NOFLUSH_SUSPENDING 5
112 #define DMF_QUEUE_IO_TO_THREAD 6
115 * Work processed by per-device workqueue.
117 struct mapped_device
{
118 struct rw_semaphore io_lock
;
119 struct mutex suspend_lock
;
126 struct request_queue
*queue
;
127 struct gendisk
*disk
;
133 * A list of ios that arrived while we were suspended.
136 wait_queue_head_t wait
;
137 struct work_struct work
;
138 struct bio_list deferred
;
139 spinlock_t deferred_lock
;
142 * An error from the barrier request currently being processed.
147 * Protect barrier_error from concurrent endio processing
148 * in request-based dm.
150 spinlock_t barrier_error_lock
;
153 * Processing queue (flush/barriers)
155 struct workqueue_struct
*wq
;
156 struct work_struct barrier_work
;
158 /* A pointer to the currently processing pre/post flush request */
159 struct request
*flush_request
;
162 * The current mapping.
164 struct dm_table
*map
;
167 * io objects are allocated from here.
178 wait_queue_head_t eventq
;
180 struct list_head uevent_list
;
181 spinlock_t uevent_lock
; /* Protect access to uevent_list */
184 * freeze/thaw support require holding onto a super block
186 struct super_block
*frozen_sb
;
187 struct block_device
*bdev
;
189 /* forced geometry settings */
190 struct hd_geometry geometry
;
192 /* For saving the address of __make_request for request based dm */
193 make_request_fn
*saved_make_request_fn
;
198 /* zero-length barrier that will be cloned and submitted to targets */
199 struct bio barrier_bio
;
203 * For mempools pre-allocation at the table loading time.
205 struct dm_md_mempools
{
212 static struct kmem_cache
*_io_cache
;
213 static struct kmem_cache
*_tio_cache
;
214 static struct kmem_cache
*_rq_tio_cache
;
215 static struct kmem_cache
*_rq_bio_info_cache
;
217 static int __init
local_init(void)
221 /* allocate a slab for the dm_ios */
222 _io_cache
= KMEM_CACHE(dm_io
, 0);
226 /* allocate a slab for the target ios */
227 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
229 goto out_free_io_cache
;
231 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
233 goto out_free_tio_cache
;
235 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
236 if (!_rq_bio_info_cache
)
237 goto out_free_rq_tio_cache
;
239 r
= dm_uevent_init();
241 goto out_free_rq_bio_info_cache
;
244 r
= register_blkdev(_major
, _name
);
246 goto out_uevent_exit
;
255 out_free_rq_bio_info_cache
:
256 kmem_cache_destroy(_rq_bio_info_cache
);
257 out_free_rq_tio_cache
:
258 kmem_cache_destroy(_rq_tio_cache
);
260 kmem_cache_destroy(_tio_cache
);
262 kmem_cache_destroy(_io_cache
);
267 static void local_exit(void)
269 kmem_cache_destroy(_rq_bio_info_cache
);
270 kmem_cache_destroy(_rq_tio_cache
);
271 kmem_cache_destroy(_tio_cache
);
272 kmem_cache_destroy(_io_cache
);
273 unregister_blkdev(_major
, _name
);
278 DMINFO("cleaned up");
281 static int (*_inits
[])(void) __initdata
= {
291 static void (*_exits
[])(void) = {
301 static int __init
dm_init(void)
303 const int count
= ARRAY_SIZE(_inits
);
307 for (i
= 0; i
< count
; i
++) {
322 static void __exit
dm_exit(void)
324 int i
= ARRAY_SIZE(_exits
);
331 * Block device functions
333 int dm_deleting_md(struct mapped_device
*md
)
335 return test_bit(DMF_DELETING
, &md
->flags
);
338 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
340 struct mapped_device
*md
;
343 spin_lock(&_minor_lock
);
345 md
= bdev
->bd_disk
->private_data
;
349 if (test_bit(DMF_FREEING
, &md
->flags
) ||
350 dm_deleting_md(md
)) {
356 atomic_inc(&md
->open_count
);
359 spin_unlock(&_minor_lock
);
362 return md
? 0 : -ENXIO
;
365 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
367 struct mapped_device
*md
= disk
->private_data
;
370 atomic_dec(&md
->open_count
);
377 int dm_open_count(struct mapped_device
*md
)
379 return atomic_read(&md
->open_count
);
383 * Guarantees nothing is using the device before it's deleted.
385 int dm_lock_for_deletion(struct mapped_device
*md
)
389 spin_lock(&_minor_lock
);
391 if (dm_open_count(md
))
394 set_bit(DMF_DELETING
, &md
->flags
);
396 spin_unlock(&_minor_lock
);
401 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
403 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
405 return dm_get_geometry(md
, geo
);
408 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
409 unsigned int cmd
, unsigned long arg
)
411 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
412 struct dm_table
*map
= dm_get_live_table(md
);
413 struct dm_target
*tgt
;
416 if (!map
|| !dm_table_get_size(map
))
419 /* We only support devices that have a single target */
420 if (dm_table_get_num_targets(map
) != 1)
423 tgt
= dm_table_get_target(map
, 0);
425 if (dm_suspended_md(md
)) {
430 if (tgt
->type
->ioctl
)
431 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
439 static struct dm_io
*alloc_io(struct mapped_device
*md
)
441 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
444 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
446 mempool_free(io
, md
->io_pool
);
449 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
451 mempool_free(tio
, md
->tio_pool
);
454 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
457 return mempool_alloc(md
->tio_pool
, gfp_mask
);
460 static void free_rq_tio(struct dm_rq_target_io
*tio
)
462 mempool_free(tio
, tio
->md
->tio_pool
);
465 static struct dm_rq_clone_bio_info
*alloc_bio_info(struct mapped_device
*md
)
467 return mempool_alloc(md
->io_pool
, GFP_ATOMIC
);
470 static void free_bio_info(struct dm_rq_clone_bio_info
*info
)
472 mempool_free(info
, info
->tio
->md
->io_pool
);
475 static int md_in_flight(struct mapped_device
*md
)
477 return atomic_read(&md
->pending
[READ
]) +
478 atomic_read(&md
->pending
[WRITE
]);
481 static void start_io_acct(struct dm_io
*io
)
483 struct mapped_device
*md
= io
->md
;
485 int rw
= bio_data_dir(io
->bio
);
487 io
->start_time
= jiffies
;
489 cpu
= part_stat_lock();
490 part_round_stats(cpu
, &dm_disk(md
)->part0
);
492 dm_disk(md
)->part0
.in_flight
[rw
] = atomic_inc_return(&md
->pending
[rw
]);
495 static void end_io_acct(struct dm_io
*io
)
497 struct mapped_device
*md
= io
->md
;
498 struct bio
*bio
= io
->bio
;
499 unsigned long duration
= jiffies
- io
->start_time
;
501 int rw
= bio_data_dir(bio
);
503 cpu
= part_stat_lock();
504 part_round_stats(cpu
, &dm_disk(md
)->part0
);
505 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
509 * After this is decremented the bio must not be touched if it is
512 dm_disk(md
)->part0
.in_flight
[rw
] = pending
=
513 atomic_dec_return(&md
->pending
[rw
]);
514 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
516 /* nudge anyone waiting on suspend queue */
522 * Add the bio to the list of deferred io.
524 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
526 down_write(&md
->io_lock
);
528 spin_lock_irq(&md
->deferred_lock
);
529 bio_list_add(&md
->deferred
, bio
);
530 spin_unlock_irq(&md
->deferred_lock
);
532 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
))
533 queue_work(md
->wq
, &md
->work
);
535 up_write(&md
->io_lock
);
539 * Everyone (including functions in this file), should use this
540 * function to access the md->map field, and make sure they call
541 * dm_table_put() when finished.
543 struct dm_table
*dm_get_live_table(struct mapped_device
*md
)
548 read_lock_irqsave(&md
->map_lock
, flags
);
552 read_unlock_irqrestore(&md
->map_lock
, flags
);
558 * Get the geometry associated with a dm device
560 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
568 * Set the geometry of a device.
570 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
572 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
574 if (geo
->start
> sz
) {
575 DMWARN("Start sector is beyond the geometry limits.");
584 /*-----------------------------------------------------------------
586 * A more elegant soln is in the works that uses the queue
587 * merge fn, unfortunately there are a couple of changes to
588 * the block layer that I want to make for this. So in the
589 * interests of getting something for people to use I give
590 * you this clearly demarcated crap.
591 *---------------------------------------------------------------*/
593 static int __noflush_suspending(struct mapped_device
*md
)
595 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
599 * Decrements the number of outstanding ios that a bio has been
600 * cloned into, completing the original io if necc.
602 static void dec_pending(struct dm_io
*io
, int error
)
607 struct mapped_device
*md
= io
->md
;
609 /* Push-back supersedes any I/O errors */
610 if (unlikely(error
)) {
611 spin_lock_irqsave(&io
->endio_lock
, flags
);
612 if (!(io
->error
> 0 && __noflush_suspending(md
)))
614 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
617 if (atomic_dec_and_test(&io
->io_count
)) {
618 if (io
->error
== DM_ENDIO_REQUEUE
) {
620 * Target requested pushing back the I/O.
622 spin_lock_irqsave(&md
->deferred_lock
, flags
);
623 if (__noflush_suspending(md
)) {
624 if (!(io
->bio
->bi_rw
& REQ_HARDBARRIER
))
625 bio_list_add_head(&md
->deferred
,
628 /* noflush suspend was interrupted. */
630 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
633 io_error
= io
->error
;
636 if (bio
->bi_rw
& REQ_HARDBARRIER
) {
638 * There can be just one barrier request so we use
639 * a per-device variable for error reporting.
640 * Note that you can't touch the bio after end_io_acct
642 if (!md
->barrier_error
&& io_error
!= -EOPNOTSUPP
)
643 md
->barrier_error
= io_error
;
650 if (io_error
!= DM_ENDIO_REQUEUE
) {
651 trace_block_bio_complete(md
->queue
, bio
);
653 bio_endio(bio
, io_error
);
659 static void clone_endio(struct bio
*bio
, int error
)
662 struct dm_target_io
*tio
= bio
->bi_private
;
663 struct dm_io
*io
= tio
->io
;
664 struct mapped_device
*md
= tio
->io
->md
;
665 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
667 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
671 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
672 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
674 * error and requeue request are handled
678 else if (r
== DM_ENDIO_INCOMPLETE
)
679 /* The target will handle the io */
682 DMWARN("unimplemented target endio return value: %d", r
);
688 * Store md for cleanup instead of tio which is about to get freed.
690 bio
->bi_private
= md
->bs
;
694 dec_pending(io
, error
);
698 * Partial completion handling for request-based dm
700 static void end_clone_bio(struct bio
*clone
, int error
)
702 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
703 struct dm_rq_target_io
*tio
= info
->tio
;
704 struct bio
*bio
= info
->orig
;
705 unsigned int nr_bytes
= info
->orig
->bi_size
;
711 * An error has already been detected on the request.
712 * Once error occurred, just let clone->end_io() handle
718 * Don't notice the error to the upper layer yet.
719 * The error handling decision is made by the target driver,
720 * when the request is completed.
727 * I/O for the bio successfully completed.
728 * Notice the data completion to the upper layer.
732 * bios are processed from the head of the list.
733 * So the completing bio should always be rq->bio.
734 * If it's not, something wrong is happening.
736 if (tio
->orig
->bio
!= bio
)
737 DMERR("bio completion is going in the middle of the request");
740 * Update the original request.
741 * Do not use blk_end_request() here, because it may complete
742 * the original request before the clone, and break the ordering.
744 blk_update_request(tio
->orig
, 0, nr_bytes
);
747 static void store_barrier_error(struct mapped_device
*md
, int error
)
751 spin_lock_irqsave(&md
->barrier_error_lock
, flags
);
753 * Basically, the first error is taken, but:
754 * -EOPNOTSUPP supersedes any I/O error.
755 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
757 if (!md
->barrier_error
|| error
== -EOPNOTSUPP
||
758 (md
->barrier_error
!= -EOPNOTSUPP
&&
759 error
== DM_ENDIO_REQUEUE
))
760 md
->barrier_error
= error
;
761 spin_unlock_irqrestore(&md
->barrier_error_lock
, flags
);
765 * Don't touch any member of the md after calling this function because
766 * the md may be freed in dm_put() at the end of this function.
767 * Or do dm_get() before calling this function and dm_put() later.
769 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
771 atomic_dec(&md
->pending
[rw
]);
773 /* nudge anyone waiting on suspend queue */
774 if (!md_in_flight(md
))
778 blk_run_queue(md
->queue
);
781 * dm_put() must be at the end of this function. See the comment above
786 static void free_rq_clone(struct request
*clone
)
788 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
790 blk_rq_unprep_clone(clone
);
795 * Complete the clone and the original request.
796 * Must be called without queue lock.
798 static void dm_end_request(struct request
*clone
, int error
)
800 int rw
= rq_data_dir(clone
);
802 bool is_barrier
= clone
->cmd_flags
& REQ_HARDBARRIER
;
803 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
804 struct mapped_device
*md
= tio
->md
;
805 struct request
*rq
= tio
->orig
;
807 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
&& !is_barrier
) {
808 rq
->errors
= clone
->errors
;
809 rq
->resid_len
= clone
->resid_len
;
813 * We are using the sense buffer of the original
815 * So setting the length of the sense data is enough.
817 rq
->sense_len
= clone
->sense_len
;
820 free_rq_clone(clone
);
822 if (unlikely(is_barrier
)) {
824 store_barrier_error(md
, error
);
827 blk_end_request_all(rq
, error
);
829 rq_completed(md
, rw
, run_queue
);
832 static void dm_unprep_request(struct request
*rq
)
834 struct request
*clone
= rq
->special
;
837 rq
->cmd_flags
&= ~REQ_DONTPREP
;
839 free_rq_clone(clone
);
843 * Requeue the original request of a clone.
845 void dm_requeue_unmapped_request(struct request
*clone
)
847 int rw
= rq_data_dir(clone
);
848 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
849 struct mapped_device
*md
= tio
->md
;
850 struct request
*rq
= tio
->orig
;
851 struct request_queue
*q
= rq
->q
;
854 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
856 * Barrier clones share an original request.
857 * Leave it to dm_end_request(), which handles this special
860 dm_end_request(clone
, DM_ENDIO_REQUEUE
);
864 dm_unprep_request(rq
);
866 spin_lock_irqsave(q
->queue_lock
, flags
);
867 if (elv_queue_empty(q
))
869 blk_requeue_request(q
, rq
);
870 spin_unlock_irqrestore(q
->queue_lock
, flags
);
872 rq_completed(md
, rw
, 0);
874 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
876 static void __stop_queue(struct request_queue
*q
)
881 static void stop_queue(struct request_queue
*q
)
885 spin_lock_irqsave(q
->queue_lock
, flags
);
887 spin_unlock_irqrestore(q
->queue_lock
, flags
);
890 static void __start_queue(struct request_queue
*q
)
892 if (blk_queue_stopped(q
))
896 static void start_queue(struct request_queue
*q
)
900 spin_lock_irqsave(q
->queue_lock
, flags
);
902 spin_unlock_irqrestore(q
->queue_lock
, flags
);
905 static void dm_done(struct request
*clone
, int error
, bool mapped
)
908 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
909 dm_request_endio_fn rq_end_io
= tio
->ti
->type
->rq_end_io
;
911 if (mapped
&& rq_end_io
)
912 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
915 /* The target wants to complete the I/O */
916 dm_end_request(clone
, r
);
917 else if (r
== DM_ENDIO_INCOMPLETE
)
918 /* The target will handle the I/O */
920 else if (r
== DM_ENDIO_REQUEUE
)
921 /* The target wants to requeue the I/O */
922 dm_requeue_unmapped_request(clone
);
924 DMWARN("unimplemented target endio return value: %d", r
);
930 * Request completion handler for request-based dm
932 static void dm_softirq_done(struct request
*rq
)
935 struct request
*clone
= rq
->completion_data
;
936 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
938 if (rq
->cmd_flags
& REQ_FAILED
)
941 dm_done(clone
, tio
->error
, mapped
);
945 * Complete the clone and the original request with the error status
946 * through softirq context.
948 static void dm_complete_request(struct request
*clone
, int error
)
950 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
951 struct request
*rq
= tio
->orig
;
953 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
955 * Barrier clones share an original request. So can't use
956 * softirq_done with the original.
957 * Pass the clone to dm_done() directly in this special case.
958 * It is safe (even if clone->q->queue_lock is held here)
959 * because there is no I/O dispatching during the completion
962 dm_done(clone
, error
, true);
967 rq
->completion_data
= clone
;
968 blk_complete_request(rq
);
972 * Complete the not-mapped clone and the original request with the error status
973 * through softirq context.
974 * Target's rq_end_io() function isn't called.
975 * This may be used when the target's map_rq() function fails.
977 void dm_kill_unmapped_request(struct request
*clone
, int error
)
979 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
980 struct request
*rq
= tio
->orig
;
982 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
984 * Barrier clones share an original request.
985 * Leave it to dm_end_request(), which handles this special
989 dm_end_request(clone
, error
);
993 rq
->cmd_flags
|= REQ_FAILED
;
994 dm_complete_request(clone
, error
);
996 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
999 * Called with the queue lock held
1001 static void end_clone_request(struct request
*clone
, int error
)
1004 * For just cleaning up the information of the queue in which
1005 * the clone was dispatched.
1006 * The clone is *NOT* freed actually here because it is alloced from
1007 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1009 __blk_put_request(clone
->q
, clone
);
1012 * Actual request completion is done in a softirq context which doesn't
1013 * hold the queue lock. Otherwise, deadlock could occur because:
1014 * - another request may be submitted by the upper level driver
1015 * of the stacking during the completion
1016 * - the submission which requires queue lock may be done
1017 * against this queue
1019 dm_complete_request(clone
, error
);
1022 static sector_t
max_io_len(struct mapped_device
*md
,
1023 sector_t sector
, struct dm_target
*ti
)
1025 sector_t offset
= sector
- ti
->begin
;
1026 sector_t len
= ti
->len
- offset
;
1029 * Does the target need to split even further ?
1033 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
1042 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
1043 struct dm_target_io
*tio
)
1047 struct mapped_device
*md
;
1049 clone
->bi_end_io
= clone_endio
;
1050 clone
->bi_private
= tio
;
1053 * Map the clone. If r == 0 we don't need to do
1054 * anything, the target has assumed ownership of
1057 atomic_inc(&tio
->io
->io_count
);
1058 sector
= clone
->bi_sector
;
1059 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
1060 if (r
== DM_MAPIO_REMAPPED
) {
1061 /* the bio has been remapped so dispatch it */
1063 trace_block_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1064 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1066 generic_make_request(clone
);
1067 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1068 /* error the io and bail out, or requeue it if needed */
1070 dec_pending(tio
->io
, r
);
1072 * Store bio_set for cleanup.
1074 clone
->bi_private
= md
->bs
;
1078 DMWARN("unimplemented target map return value: %d", r
);
1084 struct mapped_device
*md
;
1085 struct dm_table
*map
;
1089 sector_t sector_count
;
1093 static void dm_bio_destructor(struct bio
*bio
)
1095 struct bio_set
*bs
= bio
->bi_private
;
1101 * Creates a little bio that is just does part of a bvec.
1103 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
1104 unsigned short idx
, unsigned int offset
,
1105 unsigned int len
, struct bio_set
*bs
)
1108 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1110 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
1111 clone
->bi_destructor
= dm_bio_destructor
;
1112 *clone
->bi_io_vec
= *bv
;
1114 clone
->bi_sector
= sector
;
1115 clone
->bi_bdev
= bio
->bi_bdev
;
1116 clone
->bi_rw
= bio
->bi_rw
& ~REQ_HARDBARRIER
;
1118 clone
->bi_size
= to_bytes(len
);
1119 clone
->bi_io_vec
->bv_offset
= offset
;
1120 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1121 clone
->bi_flags
|= 1 << BIO_CLONED
;
1123 if (bio_integrity(bio
)) {
1124 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1125 bio_integrity_trim(clone
,
1126 bio_sector_offset(bio
, idx
, offset
), len
);
1133 * Creates a bio that consists of range of complete bvecs.
1135 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
1136 unsigned short idx
, unsigned short bv_count
,
1137 unsigned int len
, struct bio_set
*bs
)
1141 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
1142 __bio_clone(clone
, bio
);
1143 clone
->bi_rw
&= ~REQ_HARDBARRIER
;
1144 clone
->bi_destructor
= dm_bio_destructor
;
1145 clone
->bi_sector
= sector
;
1146 clone
->bi_idx
= idx
;
1147 clone
->bi_vcnt
= idx
+ bv_count
;
1148 clone
->bi_size
= to_bytes(len
);
1149 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1151 if (bio_integrity(bio
)) {
1152 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1154 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1155 bio_integrity_trim(clone
,
1156 bio_sector_offset(bio
, idx
, 0), len
);
1162 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1163 struct dm_target
*ti
)
1165 struct dm_target_io
*tio
= mempool_alloc(ci
->md
->tio_pool
, GFP_NOIO
);
1169 memset(&tio
->info
, 0, sizeof(tio
->info
));
1174 static void __flush_target(struct clone_info
*ci
, struct dm_target
*ti
,
1177 struct dm_target_io
*tio
= alloc_tio(ci
, ti
);
1180 tio
->info
.flush_request
= flush_nr
;
1182 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1183 __bio_clone(clone
, ci
->bio
);
1184 clone
->bi_destructor
= dm_bio_destructor
;
1186 __map_bio(ti
, clone
, tio
);
1189 static int __clone_and_map_empty_barrier(struct clone_info
*ci
)
1191 unsigned target_nr
= 0, flush_nr
;
1192 struct dm_target
*ti
;
1194 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1195 for (flush_nr
= 0; flush_nr
< ti
->num_flush_requests
;
1197 __flush_target(ci
, ti
, flush_nr
);
1199 ci
->sector_count
= 0;
1204 static int __clone_and_map(struct clone_info
*ci
)
1206 struct bio
*clone
, *bio
= ci
->bio
;
1207 struct dm_target
*ti
;
1208 sector_t len
= 0, max
;
1209 struct dm_target_io
*tio
;
1211 if (unlikely(bio_empty_barrier(bio
)))
1212 return __clone_and_map_empty_barrier(ci
);
1214 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1215 if (!dm_target_is_valid(ti
))
1218 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1221 * Allocate a target io object.
1223 tio
= alloc_tio(ci
, ti
);
1225 if (ci
->sector_count
<= max
) {
1227 * Optimise for the simple case where we can do all of
1228 * the remaining io with a single clone.
1230 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
1231 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
1233 __map_bio(ti
, clone
, tio
);
1234 ci
->sector_count
= 0;
1236 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1238 * There are some bvecs that don't span targets.
1239 * Do as many of these as possible.
1242 sector_t remaining
= max
;
1245 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
1246 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
1248 if (bv_len
> remaining
)
1251 remaining
-= bv_len
;
1255 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
1257 __map_bio(ti
, clone
, tio
);
1260 ci
->sector_count
-= len
;
1265 * Handle a bvec that must be split between two or more targets.
1267 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1268 sector_t remaining
= to_sector(bv
->bv_len
);
1269 unsigned int offset
= 0;
1273 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1274 if (!dm_target_is_valid(ti
))
1277 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1279 tio
= alloc_tio(ci
, ti
);
1282 len
= min(remaining
, max
);
1284 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
1285 bv
->bv_offset
+ offset
, len
,
1288 __map_bio(ti
, clone
, tio
);
1291 ci
->sector_count
-= len
;
1292 offset
+= to_bytes(len
);
1293 } while (remaining
-= len
);
1302 * Split the bio into several clones and submit it to targets.
1304 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
1306 struct clone_info ci
;
1309 ci
.map
= dm_get_live_table(md
);
1310 if (unlikely(!ci
.map
)) {
1311 if (!(bio
->bi_rw
& REQ_HARDBARRIER
))
1314 if (!md
->barrier_error
)
1315 md
->barrier_error
= -EIO
;
1321 ci
.io
= alloc_io(md
);
1323 atomic_set(&ci
.io
->io_count
, 1);
1326 spin_lock_init(&ci
.io
->endio_lock
);
1327 ci
.sector
= bio
->bi_sector
;
1328 ci
.sector_count
= bio_sectors(bio
);
1329 if (unlikely(bio_empty_barrier(bio
)))
1330 ci
.sector_count
= 1;
1331 ci
.idx
= bio
->bi_idx
;
1333 start_io_acct(ci
.io
);
1334 while (ci
.sector_count
&& !error
)
1335 error
= __clone_and_map(&ci
);
1337 /* drop the extra reference count */
1338 dec_pending(ci
.io
, error
);
1339 dm_table_put(ci
.map
);
1341 /*-----------------------------------------------------------------
1343 *---------------------------------------------------------------*/
1345 static int dm_merge_bvec(struct request_queue
*q
,
1346 struct bvec_merge_data
*bvm
,
1347 struct bio_vec
*biovec
)
1349 struct mapped_device
*md
= q
->queuedata
;
1350 struct dm_table
*map
= dm_get_live_table(md
);
1351 struct dm_target
*ti
;
1352 sector_t max_sectors
;
1358 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1359 if (!dm_target_is_valid(ti
))
1363 * Find maximum amount of I/O that won't need splitting
1365 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
1366 (sector_t
) BIO_MAX_SECTORS
);
1367 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1372 * merge_bvec_fn() returns number of bytes
1373 * it can accept at this offset
1374 * max is precomputed maximal io size
1376 if (max_size
&& ti
->type
->merge
)
1377 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1379 * If the target doesn't support merge method and some of the devices
1380 * provided their merge_bvec method (we know this by looking at
1381 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1382 * entries. So always set max_size to 0, and the code below allows
1385 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1394 * Always allow an entire first page
1396 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1397 max_size
= biovec
->bv_len
;
1403 * The request function that just remaps the bio built up by
1406 static int _dm_request(struct request_queue
*q
, struct bio
*bio
)
1408 int rw
= bio_data_dir(bio
);
1409 struct mapped_device
*md
= q
->queuedata
;
1412 down_read(&md
->io_lock
);
1414 cpu
= part_stat_lock();
1415 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1416 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1420 * If we're suspended or the thread is processing barriers
1421 * we have to queue this io for later.
1423 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
)) ||
1424 unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
1425 up_read(&md
->io_lock
);
1427 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) &&
1428 bio_rw(bio
) == READA
) {
1438 __split_and_process_bio(md
, bio
);
1439 up_read(&md
->io_lock
);
1443 static int dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1445 struct mapped_device
*md
= q
->queuedata
;
1447 return md
->saved_make_request_fn(q
, bio
); /* call __make_request() */
1450 static int dm_request_based(struct mapped_device
*md
)
1452 return blk_queue_stackable(md
->queue
);
1455 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
1457 struct mapped_device
*md
= q
->queuedata
;
1459 if (dm_request_based(md
))
1460 return dm_make_request(q
, bio
);
1462 return _dm_request(q
, bio
);
1465 static bool dm_rq_is_flush_request(struct request
*rq
)
1467 if (rq
->cmd_flags
& REQ_FLUSH
)
1473 void dm_dispatch_request(struct request
*rq
)
1477 if (blk_queue_io_stat(rq
->q
))
1478 rq
->cmd_flags
|= REQ_IO_STAT
;
1480 rq
->start_time
= jiffies
;
1481 r
= blk_insert_cloned_request(rq
->q
, rq
);
1483 dm_complete_request(rq
, r
);
1485 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1487 static void dm_rq_bio_destructor(struct bio
*bio
)
1489 struct dm_rq_clone_bio_info
*info
= bio
->bi_private
;
1490 struct mapped_device
*md
= info
->tio
->md
;
1492 free_bio_info(info
);
1493 bio_free(bio
, md
->bs
);
1496 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1499 struct dm_rq_target_io
*tio
= data
;
1500 struct mapped_device
*md
= tio
->md
;
1501 struct dm_rq_clone_bio_info
*info
= alloc_bio_info(md
);
1506 info
->orig
= bio_orig
;
1508 bio
->bi_end_io
= end_clone_bio
;
1509 bio
->bi_private
= info
;
1510 bio
->bi_destructor
= dm_rq_bio_destructor
;
1515 static int setup_clone(struct request
*clone
, struct request
*rq
,
1516 struct dm_rq_target_io
*tio
)
1520 if (dm_rq_is_flush_request(rq
)) {
1521 blk_rq_init(NULL
, clone
);
1522 clone
->cmd_type
= REQ_TYPE_FS
;
1523 clone
->cmd_flags
|= (REQ_HARDBARRIER
| WRITE
);
1525 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1526 dm_rq_bio_constructor
, tio
);
1530 clone
->cmd
= rq
->cmd
;
1531 clone
->cmd_len
= rq
->cmd_len
;
1532 clone
->sense
= rq
->sense
;
1533 clone
->buffer
= rq
->buffer
;
1536 clone
->end_io
= end_clone_request
;
1537 clone
->end_io_data
= tio
;
1542 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1545 struct request
*clone
;
1546 struct dm_rq_target_io
*tio
;
1548 tio
= alloc_rq_tio(md
, gfp_mask
);
1556 memset(&tio
->info
, 0, sizeof(tio
->info
));
1558 clone
= &tio
->clone
;
1559 if (setup_clone(clone
, rq
, tio
)) {
1569 * Called with the queue lock held.
1571 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1573 struct mapped_device
*md
= q
->queuedata
;
1574 struct request
*clone
;
1576 if (unlikely(dm_rq_is_flush_request(rq
)))
1579 if (unlikely(rq
->special
)) {
1580 DMWARN("Already has something in rq->special.");
1581 return BLKPREP_KILL
;
1584 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1586 return BLKPREP_DEFER
;
1588 rq
->special
= clone
;
1589 rq
->cmd_flags
|= REQ_DONTPREP
;
1596 * 0 : the request has been processed (not requeued)
1597 * !0 : the request has been requeued
1599 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1600 struct mapped_device
*md
)
1602 int r
, requeued
= 0;
1603 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1606 * Hold the md reference here for the in-flight I/O.
1607 * We can't rely on the reference count by device opener,
1608 * because the device may be closed during the request completion
1609 * when all bios are completed.
1610 * See the comment in rq_completed() too.
1615 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1617 case DM_MAPIO_SUBMITTED
:
1618 /* The target has taken the I/O to submit by itself later */
1620 case DM_MAPIO_REMAPPED
:
1621 /* The target has remapped the I/O so dispatch it */
1622 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1623 blk_rq_pos(tio
->orig
));
1624 dm_dispatch_request(clone
);
1626 case DM_MAPIO_REQUEUE
:
1627 /* The target wants to requeue the I/O */
1628 dm_requeue_unmapped_request(clone
);
1633 DMWARN("unimplemented target map return value: %d", r
);
1637 /* The target wants to complete the I/O */
1638 dm_kill_unmapped_request(clone
, r
);
1646 * q->request_fn for request-based dm.
1647 * Called with the queue lock held.
1649 static void dm_request_fn(struct request_queue
*q
)
1651 struct mapped_device
*md
= q
->queuedata
;
1652 struct dm_table
*map
= dm_get_live_table(md
);
1653 struct dm_target
*ti
;
1654 struct request
*rq
, *clone
;
1657 * For suspend, check blk_queue_stopped() and increment
1658 * ->pending within a single queue_lock not to increment the
1659 * number of in-flight I/Os after the queue is stopped in
1662 while (!blk_queue_plugged(q
) && !blk_queue_stopped(q
)) {
1663 rq
= blk_peek_request(q
);
1667 if (unlikely(dm_rq_is_flush_request(rq
))) {
1668 BUG_ON(md
->flush_request
);
1669 md
->flush_request
= rq
;
1670 blk_start_request(rq
);
1671 queue_work(md
->wq
, &md
->barrier_work
);
1675 ti
= dm_table_find_target(map
, blk_rq_pos(rq
));
1676 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1679 blk_start_request(rq
);
1680 clone
= rq
->special
;
1681 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1683 spin_unlock(q
->queue_lock
);
1684 if (map_request(ti
, clone
, md
))
1687 spin_lock_irq(q
->queue_lock
);
1693 spin_lock_irq(q
->queue_lock
);
1696 if (!elv_queue_empty(q
))
1697 /* Some requests still remain, retry later */
1706 int dm_underlying_device_busy(struct request_queue
*q
)
1708 return blk_lld_busy(q
);
1710 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1712 static int dm_lld_busy(struct request_queue
*q
)
1715 struct mapped_device
*md
= q
->queuedata
;
1716 struct dm_table
*map
= dm_get_live_table(md
);
1718 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1721 r
= dm_table_any_busy_target(map
);
1728 static void dm_unplug_all(struct request_queue
*q
)
1730 struct mapped_device
*md
= q
->queuedata
;
1731 struct dm_table
*map
= dm_get_live_table(md
);
1734 if (dm_request_based(md
))
1735 generic_unplug_device(q
);
1737 dm_table_unplug_all(map
);
1742 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1745 struct mapped_device
*md
= congested_data
;
1746 struct dm_table
*map
;
1748 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1749 map
= dm_get_live_table(md
);
1752 * Request-based dm cares about only own queue for
1753 * the query about congestion status of request_queue
1755 if (dm_request_based(md
))
1756 r
= md
->queue
->backing_dev_info
.state
&
1759 r
= dm_table_any_congested(map
, bdi_bits
);
1768 /*-----------------------------------------------------------------
1769 * An IDR is used to keep track of allocated minor numbers.
1770 *---------------------------------------------------------------*/
1771 static DEFINE_IDR(_minor_idr
);
1773 static void free_minor(int minor
)
1775 spin_lock(&_minor_lock
);
1776 idr_remove(&_minor_idr
, minor
);
1777 spin_unlock(&_minor_lock
);
1781 * See if the device with a specific minor # is free.
1783 static int specific_minor(int minor
)
1787 if (minor
>= (1 << MINORBITS
))
1790 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1794 spin_lock(&_minor_lock
);
1796 if (idr_find(&_minor_idr
, minor
)) {
1801 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1806 idr_remove(&_minor_idr
, m
);
1812 spin_unlock(&_minor_lock
);
1816 static int next_free_minor(int *minor
)
1820 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1824 spin_lock(&_minor_lock
);
1826 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1830 if (m
>= (1 << MINORBITS
)) {
1831 idr_remove(&_minor_idr
, m
);
1839 spin_unlock(&_minor_lock
);
1843 static const struct block_device_operations dm_blk_dops
;
1845 static void dm_wq_work(struct work_struct
*work
);
1846 static void dm_rq_barrier_work(struct work_struct
*work
);
1849 * Allocate and initialise a blank device with a given minor.
1851 static struct mapped_device
*alloc_dev(int minor
)
1854 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1858 DMWARN("unable to allocate device, out of memory.");
1862 if (!try_module_get(THIS_MODULE
))
1863 goto bad_module_get
;
1865 /* get a minor number for the dev */
1866 if (minor
== DM_ANY_MINOR
)
1867 r
= next_free_minor(&minor
);
1869 r
= specific_minor(minor
);
1873 init_rwsem(&md
->io_lock
);
1874 mutex_init(&md
->suspend_lock
);
1875 spin_lock_init(&md
->deferred_lock
);
1876 spin_lock_init(&md
->barrier_error_lock
);
1877 rwlock_init(&md
->map_lock
);
1878 atomic_set(&md
->holders
, 1);
1879 atomic_set(&md
->open_count
, 0);
1880 atomic_set(&md
->event_nr
, 0);
1881 atomic_set(&md
->uevent_seq
, 0);
1882 INIT_LIST_HEAD(&md
->uevent_list
);
1883 spin_lock_init(&md
->uevent_lock
);
1885 md
->queue
= blk_init_queue(dm_request_fn
, NULL
);
1890 * Request-based dm devices cannot be stacked on top of bio-based dm
1891 * devices. The type of this dm device has not been decided yet,
1892 * although we initialized the queue using blk_init_queue().
1893 * The type is decided at the first table loading time.
1894 * To prevent problematic device stacking, clear the queue flag
1895 * for request stacking support until then.
1897 * This queue is new, so no concurrency on the queue_flags.
1899 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1900 md
->saved_make_request_fn
= md
->queue
->make_request_fn
;
1901 md
->queue
->queuedata
= md
;
1902 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1903 md
->queue
->backing_dev_info
.congested_data
= md
;
1904 blk_queue_make_request(md
->queue
, dm_request
);
1905 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1906 md
->queue
->unplug_fn
= dm_unplug_all
;
1907 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1908 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
1909 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
1910 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
1911 blk_queue_ordered(md
->queue
, QUEUE_ORDERED_DRAIN_FLUSH
);
1913 md
->disk
= alloc_disk(1);
1917 atomic_set(&md
->pending
[0], 0);
1918 atomic_set(&md
->pending
[1], 0);
1919 init_waitqueue_head(&md
->wait
);
1920 INIT_WORK(&md
->work
, dm_wq_work
);
1921 INIT_WORK(&md
->barrier_work
, dm_rq_barrier_work
);
1922 init_waitqueue_head(&md
->eventq
);
1924 md
->disk
->major
= _major
;
1925 md
->disk
->first_minor
= minor
;
1926 md
->disk
->fops
= &dm_blk_dops
;
1927 md
->disk
->queue
= md
->queue
;
1928 md
->disk
->private_data
= md
;
1929 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1931 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1933 md
->wq
= create_singlethread_workqueue("kdmflush");
1937 md
->bdev
= bdget_disk(md
->disk
, 0);
1941 /* Populate the mapping, nobody knows we exist yet */
1942 spin_lock(&_minor_lock
);
1943 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1944 spin_unlock(&_minor_lock
);
1946 BUG_ON(old_md
!= MINOR_ALLOCED
);
1951 destroy_workqueue(md
->wq
);
1953 del_gendisk(md
->disk
);
1956 blk_cleanup_queue(md
->queue
);
1960 module_put(THIS_MODULE
);
1966 static void unlock_fs(struct mapped_device
*md
);
1968 static void free_dev(struct mapped_device
*md
)
1970 int minor
= MINOR(disk_devt(md
->disk
));
1974 destroy_workqueue(md
->wq
);
1976 mempool_destroy(md
->tio_pool
);
1978 mempool_destroy(md
->io_pool
);
1980 bioset_free(md
->bs
);
1981 blk_integrity_unregister(md
->disk
);
1982 del_gendisk(md
->disk
);
1985 spin_lock(&_minor_lock
);
1986 md
->disk
->private_data
= NULL
;
1987 spin_unlock(&_minor_lock
);
1990 blk_cleanup_queue(md
->queue
);
1991 module_put(THIS_MODULE
);
1995 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
1997 struct dm_md_mempools
*p
;
1999 if (md
->io_pool
&& md
->tio_pool
&& md
->bs
)
2000 /* the md already has necessary mempools */
2003 p
= dm_table_get_md_mempools(t
);
2004 BUG_ON(!p
|| md
->io_pool
|| md
->tio_pool
|| md
->bs
);
2006 md
->io_pool
= p
->io_pool
;
2008 md
->tio_pool
= p
->tio_pool
;
2014 /* mempool bind completed, now no need any mempools in the table */
2015 dm_table_free_md_mempools(t
);
2019 * Bind a table to the device.
2021 static void event_callback(void *context
)
2023 unsigned long flags
;
2025 struct mapped_device
*md
= (struct mapped_device
*) context
;
2027 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2028 list_splice_init(&md
->uevent_list
, &uevents
);
2029 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2031 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2033 atomic_inc(&md
->event_nr
);
2034 wake_up(&md
->eventq
);
2037 static void __set_size(struct mapped_device
*md
, sector_t size
)
2039 set_capacity(md
->disk
, size
);
2041 mutex_lock(&md
->bdev
->bd_inode
->i_mutex
);
2042 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2043 mutex_unlock(&md
->bdev
->bd_inode
->i_mutex
);
2047 * Returns old map, which caller must destroy.
2049 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2050 struct queue_limits
*limits
)
2052 struct dm_table
*old_map
;
2053 struct request_queue
*q
= md
->queue
;
2055 unsigned long flags
;
2057 size
= dm_table_get_size(t
);
2060 * Wipe any geometry if the size of the table changed.
2062 if (size
!= get_capacity(md
->disk
))
2063 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2065 __set_size(md
, size
);
2067 dm_table_event_callback(t
, event_callback
, md
);
2070 * The queue hasn't been stopped yet, if the old table type wasn't
2071 * for request-based during suspension. So stop it to prevent
2072 * I/O mapping before resume.
2073 * This must be done before setting the queue restrictions,
2074 * because request-based dm may be run just after the setting.
2076 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2079 __bind_mempools(md
, t
);
2081 write_lock_irqsave(&md
->map_lock
, flags
);
2084 dm_table_set_restrictions(t
, q
, limits
);
2085 write_unlock_irqrestore(&md
->map_lock
, flags
);
2091 * Returns unbound table for the caller to free.
2093 static struct dm_table
*__unbind(struct mapped_device
*md
)
2095 struct dm_table
*map
= md
->map
;
2096 unsigned long flags
;
2101 dm_table_event_callback(map
, NULL
, NULL
);
2102 write_lock_irqsave(&md
->map_lock
, flags
);
2104 write_unlock_irqrestore(&md
->map_lock
, flags
);
2110 * Constructor for a new device.
2112 int dm_create(int minor
, struct mapped_device
**result
)
2114 struct mapped_device
*md
;
2116 md
= alloc_dev(minor
);
2126 static struct mapped_device
*dm_find_md(dev_t dev
)
2128 struct mapped_device
*md
;
2129 unsigned minor
= MINOR(dev
);
2131 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2134 spin_lock(&_minor_lock
);
2136 md
= idr_find(&_minor_idr
, minor
);
2137 if (md
&& (md
== MINOR_ALLOCED
||
2138 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2139 test_bit(DMF_FREEING
, &md
->flags
))) {
2145 spin_unlock(&_minor_lock
);
2150 struct mapped_device
*dm_get_md(dev_t dev
)
2152 struct mapped_device
*md
= dm_find_md(dev
);
2160 void *dm_get_mdptr(struct mapped_device
*md
)
2162 return md
->interface_ptr
;
2165 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2167 md
->interface_ptr
= ptr
;
2170 void dm_get(struct mapped_device
*md
)
2172 atomic_inc(&md
->holders
);
2175 const char *dm_device_name(struct mapped_device
*md
)
2179 EXPORT_SYMBOL_GPL(dm_device_name
);
2181 void dm_put(struct mapped_device
*md
)
2183 struct dm_table
*map
;
2185 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2187 if (atomic_dec_and_lock(&md
->holders
, &_minor_lock
)) {
2188 map
= dm_get_live_table(md
);
2189 idr_replace(&_minor_idr
, MINOR_ALLOCED
,
2190 MINOR(disk_devt(dm_disk(md
))));
2191 set_bit(DMF_FREEING
, &md
->flags
);
2192 spin_unlock(&_minor_lock
);
2193 if (!dm_suspended_md(md
)) {
2194 dm_table_presuspend_targets(map
);
2195 dm_table_postsuspend_targets(map
);
2199 dm_table_destroy(__unbind(md
));
2203 EXPORT_SYMBOL_GPL(dm_put
);
2205 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2208 DECLARE_WAITQUEUE(wait
, current
);
2210 dm_unplug_all(md
->queue
);
2212 add_wait_queue(&md
->wait
, &wait
);
2215 set_current_state(interruptible
);
2218 if (!md_in_flight(md
))
2221 if (interruptible
== TASK_INTERRUPTIBLE
&&
2222 signal_pending(current
)) {
2229 set_current_state(TASK_RUNNING
);
2231 remove_wait_queue(&md
->wait
, &wait
);
2236 static void dm_flush(struct mapped_device
*md
)
2238 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2240 bio_init(&md
->barrier_bio
);
2241 md
->barrier_bio
.bi_bdev
= md
->bdev
;
2242 md
->barrier_bio
.bi_rw
= WRITE_BARRIER
;
2243 __split_and_process_bio(md
, &md
->barrier_bio
);
2245 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2248 static void process_barrier(struct mapped_device
*md
, struct bio
*bio
)
2250 md
->barrier_error
= 0;
2254 if (!bio_empty_barrier(bio
)) {
2255 __split_and_process_bio(md
, bio
);
2259 if (md
->barrier_error
!= DM_ENDIO_REQUEUE
)
2260 bio_endio(bio
, md
->barrier_error
);
2262 spin_lock_irq(&md
->deferred_lock
);
2263 bio_list_add_head(&md
->deferred
, bio
);
2264 spin_unlock_irq(&md
->deferred_lock
);
2269 * Process the deferred bios
2271 static void dm_wq_work(struct work_struct
*work
)
2273 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2277 down_write(&md
->io_lock
);
2279 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2280 spin_lock_irq(&md
->deferred_lock
);
2281 c
= bio_list_pop(&md
->deferred
);
2282 spin_unlock_irq(&md
->deferred_lock
);
2285 clear_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2289 up_write(&md
->io_lock
);
2291 if (dm_request_based(md
))
2292 generic_make_request(c
);
2294 if (c
->bi_rw
& REQ_HARDBARRIER
)
2295 process_barrier(md
, c
);
2297 __split_and_process_bio(md
, c
);
2300 down_write(&md
->io_lock
);
2303 up_write(&md
->io_lock
);
2306 static void dm_queue_flush(struct mapped_device
*md
)
2308 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2309 smp_mb__after_clear_bit();
2310 queue_work(md
->wq
, &md
->work
);
2313 static void dm_rq_set_flush_nr(struct request
*clone
, unsigned flush_nr
)
2315 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
2317 tio
->info
.flush_request
= flush_nr
;
2320 /* Issue barrier requests to targets and wait for their completion. */
2321 static int dm_rq_barrier(struct mapped_device
*md
)
2324 struct dm_table
*map
= dm_get_live_table(md
);
2325 unsigned num_targets
= dm_table_get_num_targets(map
);
2326 struct dm_target
*ti
;
2327 struct request
*clone
;
2329 md
->barrier_error
= 0;
2331 for (i
= 0; i
< num_targets
; i
++) {
2332 ti
= dm_table_get_target(map
, i
);
2333 for (j
= 0; j
< ti
->num_flush_requests
; j
++) {
2334 clone
= clone_rq(md
->flush_request
, md
, GFP_NOIO
);
2335 dm_rq_set_flush_nr(clone
, j
);
2336 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
2337 map_request(ti
, clone
, md
);
2341 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2344 return md
->barrier_error
;
2347 static void dm_rq_barrier_work(struct work_struct
*work
)
2350 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2352 struct request_queue
*q
= md
->queue
;
2354 unsigned long flags
;
2357 * Hold the md reference here and leave it at the last part so that
2358 * the md can't be deleted by device opener when the barrier request
2363 error
= dm_rq_barrier(md
);
2365 rq
= md
->flush_request
;
2366 md
->flush_request
= NULL
;
2368 if (error
== DM_ENDIO_REQUEUE
) {
2369 spin_lock_irqsave(q
->queue_lock
, flags
);
2370 blk_requeue_request(q
, rq
);
2371 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2373 blk_end_request_all(rq
, error
);
2381 * Swap in a new table, returning the old one for the caller to destroy.
2383 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2385 struct dm_table
*map
= ERR_PTR(-EINVAL
);
2386 struct queue_limits limits
;
2389 mutex_lock(&md
->suspend_lock
);
2391 /* device must be suspended */
2392 if (!dm_suspended_md(md
))
2395 r
= dm_calculate_queue_limits(table
, &limits
);
2401 /* cannot change the device type, once a table is bound */
2403 (dm_table_get_type(md
->map
) != dm_table_get_type(table
))) {
2404 DMWARN("can't change the device type after a table is bound");
2408 map
= __bind(md
, table
, &limits
);
2411 mutex_unlock(&md
->suspend_lock
);
2416 * Functions to lock and unlock any filesystem running on the
2419 static int lock_fs(struct mapped_device
*md
)
2423 WARN_ON(md
->frozen_sb
);
2425 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2426 if (IS_ERR(md
->frozen_sb
)) {
2427 r
= PTR_ERR(md
->frozen_sb
);
2428 md
->frozen_sb
= NULL
;
2432 set_bit(DMF_FROZEN
, &md
->flags
);
2437 static void unlock_fs(struct mapped_device
*md
)
2439 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2442 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2443 md
->frozen_sb
= NULL
;
2444 clear_bit(DMF_FROZEN
, &md
->flags
);
2448 * We need to be able to change a mapping table under a mounted
2449 * filesystem. For example we might want to move some data in
2450 * the background. Before the table can be swapped with
2451 * dm_bind_table, dm_suspend must be called to flush any in
2452 * flight bios and ensure that any further io gets deferred.
2455 * Suspend mechanism in request-based dm.
2457 * 1. Flush all I/Os by lock_fs() if needed.
2458 * 2. Stop dispatching any I/O by stopping the request_queue.
2459 * 3. Wait for all in-flight I/Os to be completed or requeued.
2461 * To abort suspend, start the request_queue.
2463 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2465 struct dm_table
*map
= NULL
;
2467 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2468 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2470 mutex_lock(&md
->suspend_lock
);
2472 if (dm_suspended_md(md
)) {
2477 map
= dm_get_live_table(md
);
2480 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2481 * This flag is cleared before dm_suspend returns.
2484 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2486 /* This does not get reverted if there's an error later. */
2487 dm_table_presuspend_targets(map
);
2490 * Flush I/O to the device.
2491 * Any I/O submitted after lock_fs() may not be flushed.
2492 * noflush takes precedence over do_lockfs.
2493 * (lock_fs() flushes I/Os and waits for them to complete.)
2495 if (!noflush
&& do_lockfs
) {
2502 * Here we must make sure that no processes are submitting requests
2503 * to target drivers i.e. no one may be executing
2504 * __split_and_process_bio. This is called from dm_request and
2507 * To get all processes out of __split_and_process_bio in dm_request,
2508 * we take the write lock. To prevent any process from reentering
2509 * __split_and_process_bio from dm_request, we set
2510 * DMF_QUEUE_IO_TO_THREAD.
2512 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2513 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2514 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2515 * further calls to __split_and_process_bio from dm_wq_work.
2517 down_write(&md
->io_lock
);
2518 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2519 set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2520 up_write(&md
->io_lock
);
2523 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2524 * can be kicked until md->queue is stopped. So stop md->queue before
2527 if (dm_request_based(md
))
2528 stop_queue(md
->queue
);
2530 flush_workqueue(md
->wq
);
2533 * At this point no more requests are entering target request routines.
2534 * We call dm_wait_for_completion to wait for all existing requests
2537 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2539 down_write(&md
->io_lock
);
2541 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2542 up_write(&md
->io_lock
);
2544 /* were we interrupted ? */
2548 if (dm_request_based(md
))
2549 start_queue(md
->queue
);
2552 goto out
; /* pushback list is already flushed, so skip flush */
2556 * If dm_wait_for_completion returned 0, the device is completely
2557 * quiescent now. There is no request-processing activity. All new
2558 * requests are being added to md->deferred list.
2561 set_bit(DMF_SUSPENDED
, &md
->flags
);
2563 dm_table_postsuspend_targets(map
);
2569 mutex_unlock(&md
->suspend_lock
);
2573 int dm_resume(struct mapped_device
*md
)
2576 struct dm_table
*map
= NULL
;
2578 mutex_lock(&md
->suspend_lock
);
2579 if (!dm_suspended_md(md
))
2582 map
= dm_get_live_table(md
);
2583 if (!map
|| !dm_table_get_size(map
))
2586 r
= dm_table_resume_targets(map
);
2593 * Flushing deferred I/Os must be done after targets are resumed
2594 * so that mapping of targets can work correctly.
2595 * Request-based dm is queueing the deferred I/Os in its request_queue.
2597 if (dm_request_based(md
))
2598 start_queue(md
->queue
);
2602 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2604 dm_table_unplug_all(map
);
2608 mutex_unlock(&md
->suspend_lock
);
2613 /*-----------------------------------------------------------------
2614 * Event notification.
2615 *---------------------------------------------------------------*/
2616 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2619 char udev_cookie
[DM_COOKIE_LENGTH
];
2620 char *envp
[] = { udev_cookie
, NULL
};
2623 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2625 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2626 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2627 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2632 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2634 return atomic_add_return(1, &md
->uevent_seq
);
2637 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2639 return atomic_read(&md
->event_nr
);
2642 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2644 return wait_event_interruptible(md
->eventq
,
2645 (event_nr
!= atomic_read(&md
->event_nr
)));
2648 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2650 unsigned long flags
;
2652 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2653 list_add(elist
, &md
->uevent_list
);
2654 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2658 * The gendisk is only valid as long as you have a reference
2661 struct gendisk
*dm_disk(struct mapped_device
*md
)
2666 struct kobject
*dm_kobject(struct mapped_device
*md
)
2672 * struct mapped_device should not be exported outside of dm.c
2673 * so use this check to verify that kobj is part of md structure
2675 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2677 struct mapped_device
*md
;
2679 md
= container_of(kobj
, struct mapped_device
, kobj
);
2680 if (&md
->kobj
!= kobj
)
2683 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2691 int dm_suspended_md(struct mapped_device
*md
)
2693 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2696 int dm_suspended(struct dm_target
*ti
)
2698 return dm_suspended_md(dm_table_get_md(ti
->table
));
2700 EXPORT_SYMBOL_GPL(dm_suspended
);
2702 int dm_noflush_suspending(struct dm_target
*ti
)
2704 return __noflush_suspending(dm_table_get_md(ti
->table
));
2706 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2708 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
)
2710 struct dm_md_mempools
*pools
= kmalloc(sizeof(*pools
), GFP_KERNEL
);
2715 pools
->io_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2716 mempool_create_slab_pool(MIN_IOS
, _io_cache
) :
2717 mempool_create_slab_pool(MIN_IOS
, _rq_bio_info_cache
);
2718 if (!pools
->io_pool
)
2719 goto free_pools_and_out
;
2721 pools
->tio_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2722 mempool_create_slab_pool(MIN_IOS
, _tio_cache
) :
2723 mempool_create_slab_pool(MIN_IOS
, _rq_tio_cache
);
2724 if (!pools
->tio_pool
)
2725 goto free_io_pool_and_out
;
2727 pools
->bs
= (type
== DM_TYPE_BIO_BASED
) ?
2728 bioset_create(16, 0) : bioset_create(MIN_IOS
, 0);
2730 goto free_tio_pool_and_out
;
2734 free_tio_pool_and_out
:
2735 mempool_destroy(pools
->tio_pool
);
2737 free_io_pool_and_out
:
2738 mempool_destroy(pools
->io_pool
);
2746 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2752 mempool_destroy(pools
->io_pool
);
2754 if (pools
->tio_pool
)
2755 mempool_destroy(pools
->tio_pool
);
2758 bioset_free(pools
->bs
);
2763 static const struct block_device_operations dm_blk_dops
= {
2764 .open
= dm_blk_open
,
2765 .release
= dm_blk_close
,
2766 .ioctl
= dm_blk_ioctl
,
2767 .getgeo
= dm_blk_getgeo
,
2768 .owner
= THIS_MODULE
2771 EXPORT_SYMBOL(dm_get_mapinfo
);
2776 module_init(dm_init
);
2777 module_exit(dm_exit
);
2779 module_param(major
, uint
, 0);
2780 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2781 MODULE_DESCRIPTION(DM_NAME
" driver");
2782 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2783 MODULE_LICENSE("GPL");