2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
11 #include <linux/init.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/moduleparam.h>
15 #include <linux/blkpg.h>
16 #include <linux/bio.h>
17 #include <linux/buffer_head.h>
18 #include <linux/smp_lock.h>
19 #include <linux/mempool.h>
20 #include <linux/slab.h>
21 #include <linux/idr.h>
22 #include <linux/hdreg.h>
23 #include <linux/delay.h>
25 #include <trace/events/block.h>
27 #define DM_MSG_PREFIX "core"
30 * Cookies are numeric values sent with CHANGE and REMOVE
31 * uevents while resuming, removing or renaming the device.
33 #define DM_COOKIE_ENV_VAR_NAME "DM_COOKIE"
34 #define DM_COOKIE_LENGTH 24
36 static const char *_name
= DM_NAME
;
38 static unsigned int major
= 0;
39 static unsigned int _major
= 0;
41 static DEFINE_SPINLOCK(_minor_lock
);
44 * One of these is allocated per bio.
47 struct mapped_device
*md
;
51 unsigned long start_time
;
52 spinlock_t endio_lock
;
57 * One of these is allocated per target within a bio. Hopefully
58 * this will be simplified out one day.
67 * For request-based dm.
68 * One of these is allocated per request.
70 struct dm_rq_target_io
{
71 struct mapped_device
*md
;
73 struct request
*orig
, clone
;
79 * For request-based dm.
80 * One of these is allocated per bio.
82 struct dm_rq_clone_bio_info
{
84 struct dm_rq_target_io
*tio
;
87 union map_info
*dm_get_mapinfo(struct bio
*bio
)
89 if (bio
&& bio
->bi_private
)
90 return &((struct dm_target_io
*)bio
->bi_private
)->info
;
94 union map_info
*dm_get_rq_mapinfo(struct request
*rq
)
96 if (rq
&& rq
->end_io_data
)
97 return &((struct dm_rq_target_io
*)rq
->end_io_data
)->info
;
100 EXPORT_SYMBOL_GPL(dm_get_rq_mapinfo
);
102 #define MINOR_ALLOCED ((void *)-1)
105 * Bits for the md->flags field.
107 #define DMF_BLOCK_IO_FOR_SUSPEND 0
108 #define DMF_SUSPENDED 1
110 #define DMF_FREEING 3
111 #define DMF_DELETING 4
112 #define DMF_NOFLUSH_SUSPENDING 5
113 #define DMF_QUEUE_IO_TO_THREAD 6
116 * Work processed by per-device workqueue.
118 struct mapped_device
{
119 struct rw_semaphore io_lock
;
120 struct mutex suspend_lock
;
127 struct request_queue
*queue
;
129 /* Protect queue and type against concurrent access. */
130 struct mutex type_lock
;
132 struct gendisk
*disk
;
138 * A list of ios that arrived while we were suspended.
141 wait_queue_head_t wait
;
142 struct work_struct work
;
143 struct bio_list deferred
;
144 spinlock_t deferred_lock
;
147 * An error from the barrier request currently being processed.
152 * Protect barrier_error from concurrent endio processing
153 * in request-based dm.
155 spinlock_t barrier_error_lock
;
158 * Processing queue (flush/barriers)
160 struct workqueue_struct
*wq
;
161 struct work_struct barrier_work
;
163 /* A pointer to the currently processing pre/post flush request */
164 struct request
*flush_request
;
167 * The current mapping.
169 struct dm_table
*map
;
172 * io objects are allocated from here.
183 wait_queue_head_t eventq
;
185 struct list_head uevent_list
;
186 spinlock_t uevent_lock
; /* Protect access to uevent_list */
189 * freeze/thaw support require holding onto a super block
191 struct super_block
*frozen_sb
;
192 struct block_device
*bdev
;
194 /* forced geometry settings */
195 struct hd_geometry geometry
;
197 /* For saving the address of __make_request for request based dm */
198 make_request_fn
*saved_make_request_fn
;
203 /* zero-length barrier that will be cloned and submitted to targets */
204 struct bio barrier_bio
;
208 * For mempools pre-allocation at the table loading time.
210 struct dm_md_mempools
{
217 static struct kmem_cache
*_io_cache
;
218 static struct kmem_cache
*_tio_cache
;
219 static struct kmem_cache
*_rq_tio_cache
;
220 static struct kmem_cache
*_rq_bio_info_cache
;
222 static int __init
local_init(void)
226 /* allocate a slab for the dm_ios */
227 _io_cache
= KMEM_CACHE(dm_io
, 0);
231 /* allocate a slab for the target ios */
232 _tio_cache
= KMEM_CACHE(dm_target_io
, 0);
234 goto out_free_io_cache
;
236 _rq_tio_cache
= KMEM_CACHE(dm_rq_target_io
, 0);
238 goto out_free_tio_cache
;
240 _rq_bio_info_cache
= KMEM_CACHE(dm_rq_clone_bio_info
, 0);
241 if (!_rq_bio_info_cache
)
242 goto out_free_rq_tio_cache
;
244 r
= dm_uevent_init();
246 goto out_free_rq_bio_info_cache
;
249 r
= register_blkdev(_major
, _name
);
251 goto out_uevent_exit
;
260 out_free_rq_bio_info_cache
:
261 kmem_cache_destroy(_rq_bio_info_cache
);
262 out_free_rq_tio_cache
:
263 kmem_cache_destroy(_rq_tio_cache
);
265 kmem_cache_destroy(_tio_cache
);
267 kmem_cache_destroy(_io_cache
);
272 static void local_exit(void)
274 kmem_cache_destroy(_rq_bio_info_cache
);
275 kmem_cache_destroy(_rq_tio_cache
);
276 kmem_cache_destroy(_tio_cache
);
277 kmem_cache_destroy(_io_cache
);
278 unregister_blkdev(_major
, _name
);
283 DMINFO("cleaned up");
286 static int (*_inits
[])(void) __initdata
= {
296 static void (*_exits
[])(void) = {
306 static int __init
dm_init(void)
308 const int count
= ARRAY_SIZE(_inits
);
312 for (i
= 0; i
< count
; i
++) {
327 static void __exit
dm_exit(void)
329 int i
= ARRAY_SIZE(_exits
);
336 * Block device functions
338 int dm_deleting_md(struct mapped_device
*md
)
340 return test_bit(DMF_DELETING
, &md
->flags
);
343 static int dm_blk_open(struct block_device
*bdev
, fmode_t mode
)
345 struct mapped_device
*md
;
348 spin_lock(&_minor_lock
);
350 md
= bdev
->bd_disk
->private_data
;
354 if (test_bit(DMF_FREEING
, &md
->flags
) ||
355 dm_deleting_md(md
)) {
361 atomic_inc(&md
->open_count
);
364 spin_unlock(&_minor_lock
);
367 return md
? 0 : -ENXIO
;
370 static int dm_blk_close(struct gendisk
*disk
, fmode_t mode
)
372 struct mapped_device
*md
= disk
->private_data
;
375 atomic_dec(&md
->open_count
);
382 int dm_open_count(struct mapped_device
*md
)
384 return atomic_read(&md
->open_count
);
388 * Guarantees nothing is using the device before it's deleted.
390 int dm_lock_for_deletion(struct mapped_device
*md
)
394 spin_lock(&_minor_lock
);
396 if (dm_open_count(md
))
399 set_bit(DMF_DELETING
, &md
->flags
);
401 spin_unlock(&_minor_lock
);
406 static int dm_blk_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
408 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
410 return dm_get_geometry(md
, geo
);
413 static int dm_blk_ioctl(struct block_device
*bdev
, fmode_t mode
,
414 unsigned int cmd
, unsigned long arg
)
416 struct mapped_device
*md
= bdev
->bd_disk
->private_data
;
417 struct dm_table
*map
= dm_get_live_table(md
);
418 struct dm_target
*tgt
;
421 if (!map
|| !dm_table_get_size(map
))
424 /* We only support devices that have a single target */
425 if (dm_table_get_num_targets(map
) != 1)
428 tgt
= dm_table_get_target(map
, 0);
430 if (dm_suspended_md(md
)) {
435 if (tgt
->type
->ioctl
)
436 r
= tgt
->type
->ioctl(tgt
, cmd
, arg
);
444 static struct dm_io
*alloc_io(struct mapped_device
*md
)
446 return mempool_alloc(md
->io_pool
, GFP_NOIO
);
449 static void free_io(struct mapped_device
*md
, struct dm_io
*io
)
451 mempool_free(io
, md
->io_pool
);
454 static void free_tio(struct mapped_device
*md
, struct dm_target_io
*tio
)
456 mempool_free(tio
, md
->tio_pool
);
459 static struct dm_rq_target_io
*alloc_rq_tio(struct mapped_device
*md
,
462 return mempool_alloc(md
->tio_pool
, gfp_mask
);
465 static void free_rq_tio(struct dm_rq_target_io
*tio
)
467 mempool_free(tio
, tio
->md
->tio_pool
);
470 static struct dm_rq_clone_bio_info
*alloc_bio_info(struct mapped_device
*md
)
472 return mempool_alloc(md
->io_pool
, GFP_ATOMIC
);
475 static void free_bio_info(struct dm_rq_clone_bio_info
*info
)
477 mempool_free(info
, info
->tio
->md
->io_pool
);
480 static int md_in_flight(struct mapped_device
*md
)
482 return atomic_read(&md
->pending
[READ
]) +
483 atomic_read(&md
->pending
[WRITE
]);
486 static void start_io_acct(struct dm_io
*io
)
488 struct mapped_device
*md
= io
->md
;
490 int rw
= bio_data_dir(io
->bio
);
492 io
->start_time
= jiffies
;
494 cpu
= part_stat_lock();
495 part_round_stats(cpu
, &dm_disk(md
)->part0
);
497 dm_disk(md
)->part0
.in_flight
[rw
] = atomic_inc_return(&md
->pending
[rw
]);
500 static void end_io_acct(struct dm_io
*io
)
502 struct mapped_device
*md
= io
->md
;
503 struct bio
*bio
= io
->bio
;
504 unsigned long duration
= jiffies
- io
->start_time
;
506 int rw
= bio_data_dir(bio
);
508 cpu
= part_stat_lock();
509 part_round_stats(cpu
, &dm_disk(md
)->part0
);
510 part_stat_add(cpu
, &dm_disk(md
)->part0
, ticks
[rw
], duration
);
514 * After this is decremented the bio must not be touched if it is
517 dm_disk(md
)->part0
.in_flight
[rw
] = pending
=
518 atomic_dec_return(&md
->pending
[rw
]);
519 pending
+= atomic_read(&md
->pending
[rw
^0x1]);
521 /* nudge anyone waiting on suspend queue */
527 * Add the bio to the list of deferred io.
529 static void queue_io(struct mapped_device
*md
, struct bio
*bio
)
531 down_write(&md
->io_lock
);
533 spin_lock_irq(&md
->deferred_lock
);
534 bio_list_add(&md
->deferred
, bio
);
535 spin_unlock_irq(&md
->deferred_lock
);
537 if (!test_and_set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
))
538 queue_work(md
->wq
, &md
->work
);
540 up_write(&md
->io_lock
);
544 * Everyone (including functions in this file), should use this
545 * function to access the md->map field, and make sure they call
546 * dm_table_put() when finished.
548 struct dm_table
*dm_get_live_table(struct mapped_device
*md
)
553 read_lock_irqsave(&md
->map_lock
, flags
);
557 read_unlock_irqrestore(&md
->map_lock
, flags
);
563 * Get the geometry associated with a dm device
565 int dm_get_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
573 * Set the geometry of a device.
575 int dm_set_geometry(struct mapped_device
*md
, struct hd_geometry
*geo
)
577 sector_t sz
= (sector_t
)geo
->cylinders
* geo
->heads
* geo
->sectors
;
579 if (geo
->start
> sz
) {
580 DMWARN("Start sector is beyond the geometry limits.");
589 /*-----------------------------------------------------------------
591 * A more elegant soln is in the works that uses the queue
592 * merge fn, unfortunately there are a couple of changes to
593 * the block layer that I want to make for this. So in the
594 * interests of getting something for people to use I give
595 * you this clearly demarcated crap.
596 *---------------------------------------------------------------*/
598 static int __noflush_suspending(struct mapped_device
*md
)
600 return test_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
604 * Decrements the number of outstanding ios that a bio has been
605 * cloned into, completing the original io if necc.
607 static void dec_pending(struct dm_io
*io
, int error
)
612 struct mapped_device
*md
= io
->md
;
614 /* Push-back supersedes any I/O errors */
615 if (unlikely(error
)) {
616 spin_lock_irqsave(&io
->endio_lock
, flags
);
617 if (!(io
->error
> 0 && __noflush_suspending(md
)))
619 spin_unlock_irqrestore(&io
->endio_lock
, flags
);
622 if (atomic_dec_and_test(&io
->io_count
)) {
623 if (io
->error
== DM_ENDIO_REQUEUE
) {
625 * Target requested pushing back the I/O.
627 spin_lock_irqsave(&md
->deferred_lock
, flags
);
628 if (__noflush_suspending(md
)) {
629 if (!(io
->bio
->bi_rw
& REQ_HARDBARRIER
))
630 bio_list_add_head(&md
->deferred
,
633 /* noflush suspend was interrupted. */
635 spin_unlock_irqrestore(&md
->deferred_lock
, flags
);
638 io_error
= io
->error
;
641 if (bio
->bi_rw
& REQ_HARDBARRIER
) {
643 * There can be just one barrier request so we use
644 * a per-device variable for error reporting.
645 * Note that you can't touch the bio after end_io_acct
647 * We ignore -EOPNOTSUPP for empty flush reported by
648 * underlying devices. We assume that if the device
649 * doesn't support empty barriers, it doesn't need
650 * cache flushing commands.
652 if (!md
->barrier_error
&&
653 !(bio_empty_barrier(bio
) && io_error
== -EOPNOTSUPP
))
654 md
->barrier_error
= io_error
;
661 if (io_error
!= DM_ENDIO_REQUEUE
) {
662 trace_block_bio_complete(md
->queue
, bio
);
664 bio_endio(bio
, io_error
);
670 static void clone_endio(struct bio
*bio
, int error
)
673 struct dm_target_io
*tio
= bio
->bi_private
;
674 struct dm_io
*io
= tio
->io
;
675 struct mapped_device
*md
= tio
->io
->md
;
676 dm_endio_fn endio
= tio
->ti
->type
->end_io
;
678 if (!bio_flagged(bio
, BIO_UPTODATE
) && !error
)
682 r
= endio(tio
->ti
, bio
, error
, &tio
->info
);
683 if (r
< 0 || r
== DM_ENDIO_REQUEUE
)
685 * error and requeue request are handled
689 else if (r
== DM_ENDIO_INCOMPLETE
)
690 /* The target will handle the io */
693 DMWARN("unimplemented target endio return value: %d", r
);
699 * Store md for cleanup instead of tio which is about to get freed.
701 bio
->bi_private
= md
->bs
;
705 dec_pending(io
, error
);
709 * Partial completion handling for request-based dm
711 static void end_clone_bio(struct bio
*clone
, int error
)
713 struct dm_rq_clone_bio_info
*info
= clone
->bi_private
;
714 struct dm_rq_target_io
*tio
= info
->tio
;
715 struct bio
*bio
= info
->orig
;
716 unsigned int nr_bytes
= info
->orig
->bi_size
;
722 * An error has already been detected on the request.
723 * Once error occurred, just let clone->end_io() handle
729 * Don't notice the error to the upper layer yet.
730 * The error handling decision is made by the target driver,
731 * when the request is completed.
738 * I/O for the bio successfully completed.
739 * Notice the data completion to the upper layer.
743 * bios are processed from the head of the list.
744 * So the completing bio should always be rq->bio.
745 * If it's not, something wrong is happening.
747 if (tio
->orig
->bio
!= bio
)
748 DMERR("bio completion is going in the middle of the request");
751 * Update the original request.
752 * Do not use blk_end_request() here, because it may complete
753 * the original request before the clone, and break the ordering.
755 blk_update_request(tio
->orig
, 0, nr_bytes
);
758 static void store_barrier_error(struct mapped_device
*md
, int error
)
762 spin_lock_irqsave(&md
->barrier_error_lock
, flags
);
764 * Basically, the first error is taken, but:
765 * -EOPNOTSUPP supersedes any I/O error.
766 * Requeue request supersedes any I/O error but -EOPNOTSUPP.
768 if (!md
->barrier_error
|| error
== -EOPNOTSUPP
||
769 (md
->barrier_error
!= -EOPNOTSUPP
&&
770 error
== DM_ENDIO_REQUEUE
))
771 md
->barrier_error
= error
;
772 spin_unlock_irqrestore(&md
->barrier_error_lock
, flags
);
776 * Don't touch any member of the md after calling this function because
777 * the md may be freed in dm_put() at the end of this function.
778 * Or do dm_get() before calling this function and dm_put() later.
780 static void rq_completed(struct mapped_device
*md
, int rw
, int run_queue
)
782 atomic_dec(&md
->pending
[rw
]);
784 /* nudge anyone waiting on suspend queue */
785 if (!md_in_flight(md
))
789 blk_run_queue(md
->queue
);
792 * dm_put() must be at the end of this function. See the comment above
797 static void free_rq_clone(struct request
*clone
)
799 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
801 blk_rq_unprep_clone(clone
);
806 * Complete the clone and the original request.
807 * Must be called without queue lock.
809 static void dm_end_request(struct request
*clone
, int error
)
811 int rw
= rq_data_dir(clone
);
813 bool is_barrier
= clone
->cmd_flags
& REQ_HARDBARRIER
;
814 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
815 struct mapped_device
*md
= tio
->md
;
816 struct request
*rq
= tio
->orig
;
818 if (rq
->cmd_type
== REQ_TYPE_BLOCK_PC
&& !is_barrier
) {
819 rq
->errors
= clone
->errors
;
820 rq
->resid_len
= clone
->resid_len
;
824 * We are using the sense buffer of the original
826 * So setting the length of the sense data is enough.
828 rq
->sense_len
= clone
->sense_len
;
831 free_rq_clone(clone
);
833 if (unlikely(is_barrier
)) {
835 store_barrier_error(md
, error
);
838 blk_end_request_all(rq
, error
);
840 rq_completed(md
, rw
, run_queue
);
843 static void dm_unprep_request(struct request
*rq
)
845 struct request
*clone
= rq
->special
;
848 rq
->cmd_flags
&= ~REQ_DONTPREP
;
850 free_rq_clone(clone
);
854 * Requeue the original request of a clone.
856 void dm_requeue_unmapped_request(struct request
*clone
)
858 int rw
= rq_data_dir(clone
);
859 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
860 struct mapped_device
*md
= tio
->md
;
861 struct request
*rq
= tio
->orig
;
862 struct request_queue
*q
= rq
->q
;
865 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
867 * Barrier clones share an original request.
868 * Leave it to dm_end_request(), which handles this special
871 dm_end_request(clone
, DM_ENDIO_REQUEUE
);
875 dm_unprep_request(rq
);
877 spin_lock_irqsave(q
->queue_lock
, flags
);
878 if (elv_queue_empty(q
))
880 blk_requeue_request(q
, rq
);
881 spin_unlock_irqrestore(q
->queue_lock
, flags
);
883 rq_completed(md
, rw
, 0);
885 EXPORT_SYMBOL_GPL(dm_requeue_unmapped_request
);
887 static void __stop_queue(struct request_queue
*q
)
892 static void stop_queue(struct request_queue
*q
)
896 spin_lock_irqsave(q
->queue_lock
, flags
);
898 spin_unlock_irqrestore(q
->queue_lock
, flags
);
901 static void __start_queue(struct request_queue
*q
)
903 if (blk_queue_stopped(q
))
907 static void start_queue(struct request_queue
*q
)
911 spin_lock_irqsave(q
->queue_lock
, flags
);
913 spin_unlock_irqrestore(q
->queue_lock
, flags
);
916 static void dm_done(struct request
*clone
, int error
, bool mapped
)
919 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
920 dm_request_endio_fn rq_end_io
= tio
->ti
->type
->rq_end_io
;
922 if (mapped
&& rq_end_io
)
923 r
= rq_end_io(tio
->ti
, clone
, error
, &tio
->info
);
926 /* The target wants to complete the I/O */
927 dm_end_request(clone
, r
);
928 else if (r
== DM_ENDIO_INCOMPLETE
)
929 /* The target will handle the I/O */
931 else if (r
== DM_ENDIO_REQUEUE
)
932 /* The target wants to requeue the I/O */
933 dm_requeue_unmapped_request(clone
);
935 DMWARN("unimplemented target endio return value: %d", r
);
941 * Request completion handler for request-based dm
943 static void dm_softirq_done(struct request
*rq
)
946 struct request
*clone
= rq
->completion_data
;
947 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
949 if (rq
->cmd_flags
& REQ_FAILED
)
952 dm_done(clone
, tio
->error
, mapped
);
956 * Complete the clone and the original request with the error status
957 * through softirq context.
959 static void dm_complete_request(struct request
*clone
, int error
)
961 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
962 struct request
*rq
= tio
->orig
;
964 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
966 * Barrier clones share an original request. So can't use
967 * softirq_done with the original.
968 * Pass the clone to dm_done() directly in this special case.
969 * It is safe (even if clone->q->queue_lock is held here)
970 * because there is no I/O dispatching during the completion
973 dm_done(clone
, error
, true);
978 rq
->completion_data
= clone
;
979 blk_complete_request(rq
);
983 * Complete the not-mapped clone and the original request with the error status
984 * through softirq context.
985 * Target's rq_end_io() function isn't called.
986 * This may be used when the target's map_rq() function fails.
988 void dm_kill_unmapped_request(struct request
*clone
, int error
)
990 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
991 struct request
*rq
= tio
->orig
;
993 if (unlikely(clone
->cmd_flags
& REQ_HARDBARRIER
)) {
995 * Barrier clones share an original request.
996 * Leave it to dm_end_request(), which handles this special
1000 dm_end_request(clone
, error
);
1004 rq
->cmd_flags
|= REQ_FAILED
;
1005 dm_complete_request(clone
, error
);
1007 EXPORT_SYMBOL_GPL(dm_kill_unmapped_request
);
1010 * Called with the queue lock held
1012 static void end_clone_request(struct request
*clone
, int error
)
1015 * For just cleaning up the information of the queue in which
1016 * the clone was dispatched.
1017 * The clone is *NOT* freed actually here because it is alloced from
1018 * dm own mempool and REQ_ALLOCED isn't set in clone->cmd_flags.
1020 __blk_put_request(clone
->q
, clone
);
1023 * Actual request completion is done in a softirq context which doesn't
1024 * hold the queue lock. Otherwise, deadlock could occur because:
1025 * - another request may be submitted by the upper level driver
1026 * of the stacking during the completion
1027 * - the submission which requires queue lock may be done
1028 * against this queue
1030 dm_complete_request(clone
, error
);
1033 static sector_t
max_io_len(struct mapped_device
*md
,
1034 sector_t sector
, struct dm_target
*ti
)
1036 sector_t offset
= sector
- ti
->begin
;
1037 sector_t len
= ti
->len
- offset
;
1040 * Does the target need to split even further ?
1044 boundary
= ((offset
+ ti
->split_io
) & ~(ti
->split_io
- 1))
1053 static void __map_bio(struct dm_target
*ti
, struct bio
*clone
,
1054 struct dm_target_io
*tio
)
1058 struct mapped_device
*md
;
1060 clone
->bi_end_io
= clone_endio
;
1061 clone
->bi_private
= tio
;
1064 * Map the clone. If r == 0 we don't need to do
1065 * anything, the target has assumed ownership of
1068 atomic_inc(&tio
->io
->io_count
);
1069 sector
= clone
->bi_sector
;
1070 r
= ti
->type
->map(ti
, clone
, &tio
->info
);
1071 if (r
== DM_MAPIO_REMAPPED
) {
1072 /* the bio has been remapped so dispatch it */
1074 trace_block_remap(bdev_get_queue(clone
->bi_bdev
), clone
,
1075 tio
->io
->bio
->bi_bdev
->bd_dev
, sector
);
1077 generic_make_request(clone
);
1078 } else if (r
< 0 || r
== DM_MAPIO_REQUEUE
) {
1079 /* error the io and bail out, or requeue it if needed */
1081 dec_pending(tio
->io
, r
);
1083 * Store bio_set for cleanup.
1085 clone
->bi_private
= md
->bs
;
1089 DMWARN("unimplemented target map return value: %d", r
);
1095 struct mapped_device
*md
;
1096 struct dm_table
*map
;
1100 sector_t sector_count
;
1104 static void dm_bio_destructor(struct bio
*bio
)
1106 struct bio_set
*bs
= bio
->bi_private
;
1112 * Creates a little bio that is just does part of a bvec.
1114 static struct bio
*split_bvec(struct bio
*bio
, sector_t sector
,
1115 unsigned short idx
, unsigned int offset
,
1116 unsigned int len
, struct bio_set
*bs
)
1119 struct bio_vec
*bv
= bio
->bi_io_vec
+ idx
;
1121 clone
= bio_alloc_bioset(GFP_NOIO
, 1, bs
);
1122 clone
->bi_destructor
= dm_bio_destructor
;
1123 *clone
->bi_io_vec
= *bv
;
1125 clone
->bi_sector
= sector
;
1126 clone
->bi_bdev
= bio
->bi_bdev
;
1127 clone
->bi_rw
= bio
->bi_rw
& ~REQ_HARDBARRIER
;
1129 clone
->bi_size
= to_bytes(len
);
1130 clone
->bi_io_vec
->bv_offset
= offset
;
1131 clone
->bi_io_vec
->bv_len
= clone
->bi_size
;
1132 clone
->bi_flags
|= 1 << BIO_CLONED
;
1134 if (bio_integrity(bio
)) {
1135 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1136 bio_integrity_trim(clone
,
1137 bio_sector_offset(bio
, idx
, offset
), len
);
1144 * Creates a bio that consists of range of complete bvecs.
1146 static struct bio
*clone_bio(struct bio
*bio
, sector_t sector
,
1147 unsigned short idx
, unsigned short bv_count
,
1148 unsigned int len
, struct bio_set
*bs
)
1152 clone
= bio_alloc_bioset(GFP_NOIO
, bio
->bi_max_vecs
, bs
);
1153 __bio_clone(clone
, bio
);
1154 clone
->bi_rw
&= ~REQ_HARDBARRIER
;
1155 clone
->bi_destructor
= dm_bio_destructor
;
1156 clone
->bi_sector
= sector
;
1157 clone
->bi_idx
= idx
;
1158 clone
->bi_vcnt
= idx
+ bv_count
;
1159 clone
->bi_size
= to_bytes(len
);
1160 clone
->bi_flags
&= ~(1 << BIO_SEG_VALID
);
1162 if (bio_integrity(bio
)) {
1163 bio_integrity_clone(clone
, bio
, GFP_NOIO
, bs
);
1165 if (idx
!= bio
->bi_idx
|| clone
->bi_size
< bio
->bi_size
)
1166 bio_integrity_trim(clone
,
1167 bio_sector_offset(bio
, idx
, 0), len
);
1173 static struct dm_target_io
*alloc_tio(struct clone_info
*ci
,
1174 struct dm_target
*ti
)
1176 struct dm_target_io
*tio
= mempool_alloc(ci
->md
->tio_pool
, GFP_NOIO
);
1180 memset(&tio
->info
, 0, sizeof(tio
->info
));
1185 static void __flush_target(struct clone_info
*ci
, struct dm_target
*ti
,
1186 unsigned request_nr
)
1188 struct dm_target_io
*tio
= alloc_tio(ci
, ti
);
1191 tio
->info
.target_request_nr
= request_nr
;
1193 clone
= bio_alloc_bioset(GFP_NOIO
, 0, ci
->md
->bs
);
1194 __bio_clone(clone
, ci
->bio
);
1195 clone
->bi_destructor
= dm_bio_destructor
;
1197 __map_bio(ti
, clone
, tio
);
1200 static int __clone_and_map_empty_barrier(struct clone_info
*ci
)
1202 unsigned target_nr
= 0, request_nr
;
1203 struct dm_target
*ti
;
1205 while ((ti
= dm_table_get_target(ci
->map
, target_nr
++)))
1206 for (request_nr
= 0; request_nr
< ti
->num_flush_requests
;
1208 __flush_target(ci
, ti
, request_nr
);
1210 ci
->sector_count
= 0;
1216 * Perform all io with a single clone.
1218 static void __clone_and_map_simple(struct clone_info
*ci
, struct dm_target
*ti
)
1220 struct bio
*clone
, *bio
= ci
->bio
;
1221 struct dm_target_io
*tio
;
1223 tio
= alloc_tio(ci
, ti
);
1224 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
,
1225 bio
->bi_vcnt
- ci
->idx
, ci
->sector_count
,
1227 __map_bio(ti
, clone
, tio
);
1228 ci
->sector_count
= 0;
1231 static int __clone_and_map_discard(struct clone_info
*ci
)
1233 struct dm_target
*ti
;
1236 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1237 if (!dm_target_is_valid(ti
))
1241 * Even though the device advertised discard support,
1242 * reconfiguration might have changed that since the
1243 * check was performed.
1246 if (!ti
->num_discard_requests
)
1249 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1251 if (ci
->sector_count
> max
)
1253 * FIXME: Handle a discard that spans two or more targets.
1257 __clone_and_map_simple(ci
, ti
);
1262 static int __clone_and_map(struct clone_info
*ci
)
1264 struct bio
*clone
, *bio
= ci
->bio
;
1265 struct dm_target
*ti
;
1266 sector_t len
= 0, max
;
1267 struct dm_target_io
*tio
;
1269 if (unlikely(bio_empty_barrier(bio
)))
1270 return __clone_and_map_empty_barrier(ci
);
1272 if (unlikely(bio
->bi_rw
& REQ_DISCARD
))
1273 return __clone_and_map_discard(ci
);
1275 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1276 if (!dm_target_is_valid(ti
))
1279 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1281 if (ci
->sector_count
<= max
) {
1283 * Optimise for the simple case where we can do all of
1284 * the remaining io with a single clone.
1286 __clone_and_map_simple(ci
, ti
);
1288 } else if (to_sector(bio
->bi_io_vec
[ci
->idx
].bv_len
) <= max
) {
1290 * There are some bvecs that don't span targets.
1291 * Do as many of these as possible.
1294 sector_t remaining
= max
;
1297 for (i
= ci
->idx
; remaining
&& (i
< bio
->bi_vcnt
); i
++) {
1298 bv_len
= to_sector(bio
->bi_io_vec
[i
].bv_len
);
1300 if (bv_len
> remaining
)
1303 remaining
-= bv_len
;
1307 tio
= alloc_tio(ci
, ti
);
1308 clone
= clone_bio(bio
, ci
->sector
, ci
->idx
, i
- ci
->idx
, len
,
1310 __map_bio(ti
, clone
, tio
);
1313 ci
->sector_count
-= len
;
1318 * Handle a bvec that must be split between two or more targets.
1320 struct bio_vec
*bv
= bio
->bi_io_vec
+ ci
->idx
;
1321 sector_t remaining
= to_sector(bv
->bv_len
);
1322 unsigned int offset
= 0;
1326 ti
= dm_table_find_target(ci
->map
, ci
->sector
);
1327 if (!dm_target_is_valid(ti
))
1330 max
= max_io_len(ci
->md
, ci
->sector
, ti
);
1333 len
= min(remaining
, max
);
1335 tio
= alloc_tio(ci
, ti
);
1336 clone
= split_bvec(bio
, ci
->sector
, ci
->idx
,
1337 bv
->bv_offset
+ offset
, len
,
1340 __map_bio(ti
, clone
, tio
);
1343 ci
->sector_count
-= len
;
1344 offset
+= to_bytes(len
);
1345 } while (remaining
-= len
);
1354 * Split the bio into several clones and submit it to targets.
1356 static void __split_and_process_bio(struct mapped_device
*md
, struct bio
*bio
)
1358 struct clone_info ci
;
1361 ci
.map
= dm_get_live_table(md
);
1362 if (unlikely(!ci
.map
)) {
1363 if (!(bio
->bi_rw
& REQ_HARDBARRIER
))
1366 if (!md
->barrier_error
)
1367 md
->barrier_error
= -EIO
;
1373 ci
.io
= alloc_io(md
);
1375 atomic_set(&ci
.io
->io_count
, 1);
1378 spin_lock_init(&ci
.io
->endio_lock
);
1379 ci
.sector
= bio
->bi_sector
;
1380 ci
.sector_count
= bio_sectors(bio
);
1381 if (unlikely(bio_empty_barrier(bio
)))
1382 ci
.sector_count
= 1;
1383 ci
.idx
= bio
->bi_idx
;
1385 start_io_acct(ci
.io
);
1386 while (ci
.sector_count
&& !error
)
1387 error
= __clone_and_map(&ci
);
1389 /* drop the extra reference count */
1390 dec_pending(ci
.io
, error
);
1391 dm_table_put(ci
.map
);
1393 /*-----------------------------------------------------------------
1395 *---------------------------------------------------------------*/
1397 static int dm_merge_bvec(struct request_queue
*q
,
1398 struct bvec_merge_data
*bvm
,
1399 struct bio_vec
*biovec
)
1401 struct mapped_device
*md
= q
->queuedata
;
1402 struct dm_table
*map
= dm_get_live_table(md
);
1403 struct dm_target
*ti
;
1404 sector_t max_sectors
;
1410 ti
= dm_table_find_target(map
, bvm
->bi_sector
);
1411 if (!dm_target_is_valid(ti
))
1415 * Find maximum amount of I/O that won't need splitting
1417 max_sectors
= min(max_io_len(md
, bvm
->bi_sector
, ti
),
1418 (sector_t
) BIO_MAX_SECTORS
);
1419 max_size
= (max_sectors
<< SECTOR_SHIFT
) - bvm
->bi_size
;
1424 * merge_bvec_fn() returns number of bytes
1425 * it can accept at this offset
1426 * max is precomputed maximal io size
1428 if (max_size
&& ti
->type
->merge
)
1429 max_size
= ti
->type
->merge(ti
, bvm
, biovec
, max_size
);
1431 * If the target doesn't support merge method and some of the devices
1432 * provided their merge_bvec method (we know this by looking at
1433 * queue_max_hw_sectors), then we can't allow bios with multiple vector
1434 * entries. So always set max_size to 0, and the code below allows
1437 else if (queue_max_hw_sectors(q
) <= PAGE_SIZE
>> 9)
1446 * Always allow an entire first page
1448 if (max_size
<= biovec
->bv_len
&& !(bvm
->bi_size
>> SECTOR_SHIFT
))
1449 max_size
= biovec
->bv_len
;
1455 * The request function that just remaps the bio built up by
1458 static int _dm_request(struct request_queue
*q
, struct bio
*bio
)
1460 int rw
= bio_data_dir(bio
);
1461 struct mapped_device
*md
= q
->queuedata
;
1464 down_read(&md
->io_lock
);
1466 cpu
= part_stat_lock();
1467 part_stat_inc(cpu
, &dm_disk(md
)->part0
, ios
[rw
]);
1468 part_stat_add(cpu
, &dm_disk(md
)->part0
, sectors
[rw
], bio_sectors(bio
));
1472 * If we're suspended or the thread is processing barriers
1473 * we have to queue this io for later.
1475 if (unlikely(test_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
)) ||
1476 unlikely(bio
->bi_rw
& REQ_HARDBARRIER
)) {
1477 up_read(&md
->io_lock
);
1479 if (unlikely(test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) &&
1480 bio_rw(bio
) == READA
) {
1490 __split_and_process_bio(md
, bio
);
1491 up_read(&md
->io_lock
);
1495 static int dm_make_request(struct request_queue
*q
, struct bio
*bio
)
1497 struct mapped_device
*md
= q
->queuedata
;
1499 return md
->saved_make_request_fn(q
, bio
); /* call __make_request() */
1502 static int dm_request_based(struct mapped_device
*md
)
1504 return blk_queue_stackable(md
->queue
);
1507 static int dm_request(struct request_queue
*q
, struct bio
*bio
)
1509 struct mapped_device
*md
= q
->queuedata
;
1511 if (dm_request_based(md
))
1512 return dm_make_request(q
, bio
);
1514 return _dm_request(q
, bio
);
1517 static bool dm_rq_is_flush_request(struct request
*rq
)
1519 if (rq
->cmd_flags
& REQ_FLUSH
)
1525 void dm_dispatch_request(struct request
*rq
)
1529 if (blk_queue_io_stat(rq
->q
))
1530 rq
->cmd_flags
|= REQ_IO_STAT
;
1532 rq
->start_time
= jiffies
;
1533 r
= blk_insert_cloned_request(rq
->q
, rq
);
1535 dm_complete_request(rq
, r
);
1537 EXPORT_SYMBOL_GPL(dm_dispatch_request
);
1539 static void dm_rq_bio_destructor(struct bio
*bio
)
1541 struct dm_rq_clone_bio_info
*info
= bio
->bi_private
;
1542 struct mapped_device
*md
= info
->tio
->md
;
1544 free_bio_info(info
);
1545 bio_free(bio
, md
->bs
);
1548 static int dm_rq_bio_constructor(struct bio
*bio
, struct bio
*bio_orig
,
1551 struct dm_rq_target_io
*tio
= data
;
1552 struct mapped_device
*md
= tio
->md
;
1553 struct dm_rq_clone_bio_info
*info
= alloc_bio_info(md
);
1558 info
->orig
= bio_orig
;
1560 bio
->bi_end_io
= end_clone_bio
;
1561 bio
->bi_private
= info
;
1562 bio
->bi_destructor
= dm_rq_bio_destructor
;
1567 static int setup_clone(struct request
*clone
, struct request
*rq
,
1568 struct dm_rq_target_io
*tio
)
1572 if (dm_rq_is_flush_request(rq
)) {
1573 blk_rq_init(NULL
, clone
);
1574 clone
->cmd_type
= REQ_TYPE_FS
;
1575 clone
->cmd_flags
|= (REQ_HARDBARRIER
| WRITE
);
1577 r
= blk_rq_prep_clone(clone
, rq
, tio
->md
->bs
, GFP_ATOMIC
,
1578 dm_rq_bio_constructor
, tio
);
1582 clone
->cmd
= rq
->cmd
;
1583 clone
->cmd_len
= rq
->cmd_len
;
1584 clone
->sense
= rq
->sense
;
1585 clone
->buffer
= rq
->buffer
;
1588 clone
->end_io
= end_clone_request
;
1589 clone
->end_io_data
= tio
;
1594 static struct request
*clone_rq(struct request
*rq
, struct mapped_device
*md
,
1597 struct request
*clone
;
1598 struct dm_rq_target_io
*tio
;
1600 tio
= alloc_rq_tio(md
, gfp_mask
);
1608 memset(&tio
->info
, 0, sizeof(tio
->info
));
1610 clone
= &tio
->clone
;
1611 if (setup_clone(clone
, rq
, tio
)) {
1621 * Called with the queue lock held.
1623 static int dm_prep_fn(struct request_queue
*q
, struct request
*rq
)
1625 struct mapped_device
*md
= q
->queuedata
;
1626 struct request
*clone
;
1628 if (unlikely(dm_rq_is_flush_request(rq
)))
1631 if (unlikely(rq
->special
)) {
1632 DMWARN("Already has something in rq->special.");
1633 return BLKPREP_KILL
;
1636 clone
= clone_rq(rq
, md
, GFP_ATOMIC
);
1638 return BLKPREP_DEFER
;
1640 rq
->special
= clone
;
1641 rq
->cmd_flags
|= REQ_DONTPREP
;
1648 * 0 : the request has been processed (not requeued)
1649 * !0 : the request has been requeued
1651 static int map_request(struct dm_target
*ti
, struct request
*clone
,
1652 struct mapped_device
*md
)
1654 int r
, requeued
= 0;
1655 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
1658 * Hold the md reference here for the in-flight I/O.
1659 * We can't rely on the reference count by device opener,
1660 * because the device may be closed during the request completion
1661 * when all bios are completed.
1662 * See the comment in rq_completed() too.
1667 r
= ti
->type
->map_rq(ti
, clone
, &tio
->info
);
1669 case DM_MAPIO_SUBMITTED
:
1670 /* The target has taken the I/O to submit by itself later */
1672 case DM_MAPIO_REMAPPED
:
1673 /* The target has remapped the I/O so dispatch it */
1674 trace_block_rq_remap(clone
->q
, clone
, disk_devt(dm_disk(md
)),
1675 blk_rq_pos(tio
->orig
));
1676 dm_dispatch_request(clone
);
1678 case DM_MAPIO_REQUEUE
:
1679 /* The target wants to requeue the I/O */
1680 dm_requeue_unmapped_request(clone
);
1685 DMWARN("unimplemented target map return value: %d", r
);
1689 /* The target wants to complete the I/O */
1690 dm_kill_unmapped_request(clone
, r
);
1698 * q->request_fn for request-based dm.
1699 * Called with the queue lock held.
1701 static void dm_request_fn(struct request_queue
*q
)
1703 struct mapped_device
*md
= q
->queuedata
;
1704 struct dm_table
*map
= dm_get_live_table(md
);
1705 struct dm_target
*ti
;
1706 struct request
*rq
, *clone
;
1709 * For suspend, check blk_queue_stopped() and increment
1710 * ->pending within a single queue_lock not to increment the
1711 * number of in-flight I/Os after the queue is stopped in
1714 while (!blk_queue_plugged(q
) && !blk_queue_stopped(q
)) {
1715 rq
= blk_peek_request(q
);
1719 if (unlikely(dm_rq_is_flush_request(rq
))) {
1720 BUG_ON(md
->flush_request
);
1721 md
->flush_request
= rq
;
1722 blk_start_request(rq
);
1723 queue_work(md
->wq
, &md
->barrier_work
);
1727 ti
= dm_table_find_target(map
, blk_rq_pos(rq
));
1728 if (ti
->type
->busy
&& ti
->type
->busy(ti
))
1731 blk_start_request(rq
);
1732 clone
= rq
->special
;
1733 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
1735 spin_unlock(q
->queue_lock
);
1736 if (map_request(ti
, clone
, md
))
1739 spin_lock_irq(q
->queue_lock
);
1745 spin_lock_irq(q
->queue_lock
);
1748 if (!elv_queue_empty(q
))
1749 /* Some requests still remain, retry later */
1758 int dm_underlying_device_busy(struct request_queue
*q
)
1760 return blk_lld_busy(q
);
1762 EXPORT_SYMBOL_GPL(dm_underlying_device_busy
);
1764 static int dm_lld_busy(struct request_queue
*q
)
1767 struct mapped_device
*md
= q
->queuedata
;
1768 struct dm_table
*map
= dm_get_live_table(md
);
1770 if (!map
|| test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
))
1773 r
= dm_table_any_busy_target(map
);
1780 static void dm_unplug_all(struct request_queue
*q
)
1782 struct mapped_device
*md
= q
->queuedata
;
1783 struct dm_table
*map
= dm_get_live_table(md
);
1786 if (dm_request_based(md
))
1787 generic_unplug_device(q
);
1789 dm_table_unplug_all(map
);
1794 static int dm_any_congested(void *congested_data
, int bdi_bits
)
1797 struct mapped_device
*md
= congested_data
;
1798 struct dm_table
*map
;
1800 if (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
1801 map
= dm_get_live_table(md
);
1804 * Request-based dm cares about only own queue for
1805 * the query about congestion status of request_queue
1807 if (dm_request_based(md
))
1808 r
= md
->queue
->backing_dev_info
.state
&
1811 r
= dm_table_any_congested(map
, bdi_bits
);
1820 /*-----------------------------------------------------------------
1821 * An IDR is used to keep track of allocated minor numbers.
1822 *---------------------------------------------------------------*/
1823 static DEFINE_IDR(_minor_idr
);
1825 static void free_minor(int minor
)
1827 spin_lock(&_minor_lock
);
1828 idr_remove(&_minor_idr
, minor
);
1829 spin_unlock(&_minor_lock
);
1833 * See if the device with a specific minor # is free.
1835 static int specific_minor(int minor
)
1839 if (minor
>= (1 << MINORBITS
))
1842 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1846 spin_lock(&_minor_lock
);
1848 if (idr_find(&_minor_idr
, minor
)) {
1853 r
= idr_get_new_above(&_minor_idr
, MINOR_ALLOCED
, minor
, &m
);
1858 idr_remove(&_minor_idr
, m
);
1864 spin_unlock(&_minor_lock
);
1868 static int next_free_minor(int *minor
)
1872 r
= idr_pre_get(&_minor_idr
, GFP_KERNEL
);
1876 spin_lock(&_minor_lock
);
1878 r
= idr_get_new(&_minor_idr
, MINOR_ALLOCED
, &m
);
1882 if (m
>= (1 << MINORBITS
)) {
1883 idr_remove(&_minor_idr
, m
);
1891 spin_unlock(&_minor_lock
);
1895 static const struct block_device_operations dm_blk_dops
;
1897 static void dm_wq_work(struct work_struct
*work
);
1898 static void dm_rq_barrier_work(struct work_struct
*work
);
1900 static void dm_init_md_queue(struct mapped_device
*md
)
1903 * Request-based dm devices cannot be stacked on top of bio-based dm
1904 * devices. The type of this dm device has not been decided yet.
1905 * The type is decided at the first table loading time.
1906 * To prevent problematic device stacking, clear the queue flag
1907 * for request stacking support until then.
1909 * This queue is new, so no concurrency on the queue_flags.
1911 queue_flag_clear_unlocked(QUEUE_FLAG_STACKABLE
, md
->queue
);
1913 md
->queue
->queuedata
= md
;
1914 md
->queue
->backing_dev_info
.congested_fn
= dm_any_congested
;
1915 md
->queue
->backing_dev_info
.congested_data
= md
;
1916 blk_queue_make_request(md
->queue
, dm_request
);
1917 blk_queue_bounce_limit(md
->queue
, BLK_BOUNCE_ANY
);
1918 md
->queue
->unplug_fn
= dm_unplug_all
;
1919 blk_queue_merge_bvec(md
->queue
, dm_merge_bvec
);
1923 * Allocate and initialise a blank device with a given minor.
1925 static struct mapped_device
*alloc_dev(int minor
)
1928 struct mapped_device
*md
= kzalloc(sizeof(*md
), GFP_KERNEL
);
1932 DMWARN("unable to allocate device, out of memory.");
1936 if (!try_module_get(THIS_MODULE
))
1937 goto bad_module_get
;
1939 /* get a minor number for the dev */
1940 if (minor
== DM_ANY_MINOR
)
1941 r
= next_free_minor(&minor
);
1943 r
= specific_minor(minor
);
1947 md
->type
= DM_TYPE_NONE
;
1948 init_rwsem(&md
->io_lock
);
1949 mutex_init(&md
->suspend_lock
);
1950 mutex_init(&md
->type_lock
);
1951 spin_lock_init(&md
->deferred_lock
);
1952 spin_lock_init(&md
->barrier_error_lock
);
1953 rwlock_init(&md
->map_lock
);
1954 atomic_set(&md
->holders
, 1);
1955 atomic_set(&md
->open_count
, 0);
1956 atomic_set(&md
->event_nr
, 0);
1957 atomic_set(&md
->uevent_seq
, 0);
1958 INIT_LIST_HEAD(&md
->uevent_list
);
1959 spin_lock_init(&md
->uevent_lock
);
1961 md
->queue
= blk_alloc_queue(GFP_KERNEL
);
1965 dm_init_md_queue(md
);
1967 md
->disk
= alloc_disk(1);
1971 atomic_set(&md
->pending
[0], 0);
1972 atomic_set(&md
->pending
[1], 0);
1973 init_waitqueue_head(&md
->wait
);
1974 INIT_WORK(&md
->work
, dm_wq_work
);
1975 INIT_WORK(&md
->barrier_work
, dm_rq_barrier_work
);
1976 init_waitqueue_head(&md
->eventq
);
1978 md
->disk
->major
= _major
;
1979 md
->disk
->first_minor
= minor
;
1980 md
->disk
->fops
= &dm_blk_dops
;
1981 md
->disk
->queue
= md
->queue
;
1982 md
->disk
->private_data
= md
;
1983 sprintf(md
->disk
->disk_name
, "dm-%d", minor
);
1985 format_dev_t(md
->name
, MKDEV(_major
, minor
));
1987 md
->wq
= create_singlethread_workqueue("kdmflush");
1991 md
->bdev
= bdget_disk(md
->disk
, 0);
1995 /* Populate the mapping, nobody knows we exist yet */
1996 spin_lock(&_minor_lock
);
1997 old_md
= idr_replace(&_minor_idr
, md
, minor
);
1998 spin_unlock(&_minor_lock
);
2000 BUG_ON(old_md
!= MINOR_ALLOCED
);
2005 destroy_workqueue(md
->wq
);
2007 del_gendisk(md
->disk
);
2010 blk_cleanup_queue(md
->queue
);
2014 module_put(THIS_MODULE
);
2020 static void unlock_fs(struct mapped_device
*md
);
2022 static void free_dev(struct mapped_device
*md
)
2024 int minor
= MINOR(disk_devt(md
->disk
));
2028 destroy_workqueue(md
->wq
);
2030 mempool_destroy(md
->tio_pool
);
2032 mempool_destroy(md
->io_pool
);
2034 bioset_free(md
->bs
);
2035 blk_integrity_unregister(md
->disk
);
2036 del_gendisk(md
->disk
);
2039 spin_lock(&_minor_lock
);
2040 md
->disk
->private_data
= NULL
;
2041 spin_unlock(&_minor_lock
);
2044 blk_cleanup_queue(md
->queue
);
2045 module_put(THIS_MODULE
);
2049 static void __bind_mempools(struct mapped_device
*md
, struct dm_table
*t
)
2051 struct dm_md_mempools
*p
;
2053 if (md
->io_pool
&& md
->tio_pool
&& md
->bs
)
2054 /* the md already has necessary mempools */
2057 p
= dm_table_get_md_mempools(t
);
2058 BUG_ON(!p
|| md
->io_pool
|| md
->tio_pool
|| md
->bs
);
2060 md
->io_pool
= p
->io_pool
;
2062 md
->tio_pool
= p
->tio_pool
;
2068 /* mempool bind completed, now no need any mempools in the table */
2069 dm_table_free_md_mempools(t
);
2073 * Bind a table to the device.
2075 static void event_callback(void *context
)
2077 unsigned long flags
;
2079 struct mapped_device
*md
= (struct mapped_device
*) context
;
2081 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2082 list_splice_init(&md
->uevent_list
, &uevents
);
2083 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2085 dm_send_uevents(&uevents
, &disk_to_dev(md
->disk
)->kobj
);
2087 atomic_inc(&md
->event_nr
);
2088 wake_up(&md
->eventq
);
2091 static void __set_size(struct mapped_device
*md
, sector_t size
)
2093 set_capacity(md
->disk
, size
);
2095 mutex_lock(&md
->bdev
->bd_inode
->i_mutex
);
2096 i_size_write(md
->bdev
->bd_inode
, (loff_t
)size
<< SECTOR_SHIFT
);
2097 mutex_unlock(&md
->bdev
->bd_inode
->i_mutex
);
2101 * Returns old map, which caller must destroy.
2103 static struct dm_table
*__bind(struct mapped_device
*md
, struct dm_table
*t
,
2104 struct queue_limits
*limits
)
2106 struct dm_table
*old_map
;
2107 struct request_queue
*q
= md
->queue
;
2109 unsigned long flags
;
2111 size
= dm_table_get_size(t
);
2114 * Wipe any geometry if the size of the table changed.
2116 if (size
!= get_capacity(md
->disk
))
2117 memset(&md
->geometry
, 0, sizeof(md
->geometry
));
2119 __set_size(md
, size
);
2121 dm_table_event_callback(t
, event_callback
, md
);
2124 * The queue hasn't been stopped yet, if the old table type wasn't
2125 * for request-based during suspension. So stop it to prevent
2126 * I/O mapping before resume.
2127 * This must be done before setting the queue restrictions,
2128 * because request-based dm may be run just after the setting.
2130 if (dm_table_request_based(t
) && !blk_queue_stopped(q
))
2133 __bind_mempools(md
, t
);
2135 write_lock_irqsave(&md
->map_lock
, flags
);
2138 dm_table_set_restrictions(t
, q
, limits
);
2139 write_unlock_irqrestore(&md
->map_lock
, flags
);
2145 * Returns unbound table for the caller to free.
2147 static struct dm_table
*__unbind(struct mapped_device
*md
)
2149 struct dm_table
*map
= md
->map
;
2150 unsigned long flags
;
2155 dm_table_event_callback(map
, NULL
, NULL
);
2156 write_lock_irqsave(&md
->map_lock
, flags
);
2158 write_unlock_irqrestore(&md
->map_lock
, flags
);
2164 * Constructor for a new device.
2166 int dm_create(int minor
, struct mapped_device
**result
)
2168 struct mapped_device
*md
;
2170 md
= alloc_dev(minor
);
2181 * Functions to manage md->type.
2182 * All are required to hold md->type_lock.
2184 void dm_lock_md_type(struct mapped_device
*md
)
2186 mutex_lock(&md
->type_lock
);
2189 void dm_unlock_md_type(struct mapped_device
*md
)
2191 mutex_unlock(&md
->type_lock
);
2194 void dm_set_md_type(struct mapped_device
*md
, unsigned type
)
2199 unsigned dm_get_md_type(struct mapped_device
*md
)
2205 * Fully initialize a request-based queue (->elevator, ->request_fn, etc).
2207 static int dm_init_request_based_queue(struct mapped_device
*md
)
2209 struct request_queue
*q
= NULL
;
2211 if (md
->queue
->elevator
)
2214 /* Fully initialize the queue */
2215 q
= blk_init_allocated_queue(md
->queue
, dm_request_fn
, NULL
);
2220 md
->saved_make_request_fn
= md
->queue
->make_request_fn
;
2221 dm_init_md_queue(md
);
2222 blk_queue_softirq_done(md
->queue
, dm_softirq_done
);
2223 blk_queue_prep_rq(md
->queue
, dm_prep_fn
);
2224 blk_queue_lld_busy(md
->queue
, dm_lld_busy
);
2225 blk_queue_ordered(md
->queue
, QUEUE_ORDERED_DRAIN_FLUSH
);
2227 elv_register_queue(md
->queue
);
2233 * Setup the DM device's queue based on md's type
2235 int dm_setup_md_queue(struct mapped_device
*md
)
2237 if ((dm_get_md_type(md
) == DM_TYPE_REQUEST_BASED
) &&
2238 !dm_init_request_based_queue(md
)) {
2239 DMWARN("Cannot initialize queue for request-based mapped device");
2246 static struct mapped_device
*dm_find_md(dev_t dev
)
2248 struct mapped_device
*md
;
2249 unsigned minor
= MINOR(dev
);
2251 if (MAJOR(dev
) != _major
|| minor
>= (1 << MINORBITS
))
2254 spin_lock(&_minor_lock
);
2256 md
= idr_find(&_minor_idr
, minor
);
2257 if (md
&& (md
== MINOR_ALLOCED
||
2258 (MINOR(disk_devt(dm_disk(md
))) != minor
) ||
2259 dm_deleting_md(md
) ||
2260 test_bit(DMF_FREEING
, &md
->flags
))) {
2266 spin_unlock(&_minor_lock
);
2271 struct mapped_device
*dm_get_md(dev_t dev
)
2273 struct mapped_device
*md
= dm_find_md(dev
);
2281 void *dm_get_mdptr(struct mapped_device
*md
)
2283 return md
->interface_ptr
;
2286 void dm_set_mdptr(struct mapped_device
*md
, void *ptr
)
2288 md
->interface_ptr
= ptr
;
2291 void dm_get(struct mapped_device
*md
)
2293 atomic_inc(&md
->holders
);
2294 BUG_ON(test_bit(DMF_FREEING
, &md
->flags
));
2297 const char *dm_device_name(struct mapped_device
*md
)
2301 EXPORT_SYMBOL_GPL(dm_device_name
);
2303 static void __dm_destroy(struct mapped_device
*md
, bool wait
)
2305 struct dm_table
*map
;
2309 spin_lock(&_minor_lock
);
2310 map
= dm_get_live_table(md
);
2311 idr_replace(&_minor_idr
, MINOR_ALLOCED
, MINOR(disk_devt(dm_disk(md
))));
2312 set_bit(DMF_FREEING
, &md
->flags
);
2313 spin_unlock(&_minor_lock
);
2315 if (!dm_suspended_md(md
)) {
2316 dm_table_presuspend_targets(map
);
2317 dm_table_postsuspend_targets(map
);
2321 * Rare, but there may be I/O requests still going to complete,
2322 * for example. Wait for all references to disappear.
2323 * No one should increment the reference count of the mapped_device,
2324 * after the mapped_device state becomes DMF_FREEING.
2327 while (atomic_read(&md
->holders
))
2329 else if (atomic_read(&md
->holders
))
2330 DMWARN("%s: Forcibly removing mapped_device still in use! (%d users)",
2331 dm_device_name(md
), atomic_read(&md
->holders
));
2335 dm_table_destroy(__unbind(md
));
2339 void dm_destroy(struct mapped_device
*md
)
2341 __dm_destroy(md
, true);
2344 void dm_destroy_immediate(struct mapped_device
*md
)
2346 __dm_destroy(md
, false);
2349 void dm_put(struct mapped_device
*md
)
2351 atomic_dec(&md
->holders
);
2353 EXPORT_SYMBOL_GPL(dm_put
);
2355 static int dm_wait_for_completion(struct mapped_device
*md
, int interruptible
)
2358 DECLARE_WAITQUEUE(wait
, current
);
2360 dm_unplug_all(md
->queue
);
2362 add_wait_queue(&md
->wait
, &wait
);
2365 set_current_state(interruptible
);
2368 if (!md_in_flight(md
))
2371 if (interruptible
== TASK_INTERRUPTIBLE
&&
2372 signal_pending(current
)) {
2379 set_current_state(TASK_RUNNING
);
2381 remove_wait_queue(&md
->wait
, &wait
);
2386 static void dm_flush(struct mapped_device
*md
)
2388 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2390 bio_init(&md
->barrier_bio
);
2391 md
->barrier_bio
.bi_bdev
= md
->bdev
;
2392 md
->barrier_bio
.bi_rw
= WRITE_BARRIER
;
2393 __split_and_process_bio(md
, &md
->barrier_bio
);
2395 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2398 static void process_barrier(struct mapped_device
*md
, struct bio
*bio
)
2400 md
->barrier_error
= 0;
2404 if (!bio_empty_barrier(bio
)) {
2405 __split_and_process_bio(md
, bio
);
2407 * If the request isn't supported, don't waste time with
2410 if (md
->barrier_error
!= -EOPNOTSUPP
)
2414 if (md
->barrier_error
!= DM_ENDIO_REQUEUE
)
2415 bio_endio(bio
, md
->barrier_error
);
2417 spin_lock_irq(&md
->deferred_lock
);
2418 bio_list_add_head(&md
->deferred
, bio
);
2419 spin_unlock_irq(&md
->deferred_lock
);
2424 * Process the deferred bios
2426 static void dm_wq_work(struct work_struct
*work
)
2428 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2432 down_write(&md
->io_lock
);
2434 while (!test_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
)) {
2435 spin_lock_irq(&md
->deferred_lock
);
2436 c
= bio_list_pop(&md
->deferred
);
2437 spin_unlock_irq(&md
->deferred_lock
);
2440 clear_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2444 up_write(&md
->io_lock
);
2446 if (dm_request_based(md
))
2447 generic_make_request(c
);
2449 if (c
->bi_rw
& REQ_HARDBARRIER
)
2450 process_barrier(md
, c
);
2452 __split_and_process_bio(md
, c
);
2455 down_write(&md
->io_lock
);
2458 up_write(&md
->io_lock
);
2461 static void dm_queue_flush(struct mapped_device
*md
)
2463 clear_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2464 smp_mb__after_clear_bit();
2465 queue_work(md
->wq
, &md
->work
);
2468 static void dm_rq_set_target_request_nr(struct request
*clone
, unsigned request_nr
)
2470 struct dm_rq_target_io
*tio
= clone
->end_io_data
;
2472 tio
->info
.target_request_nr
= request_nr
;
2475 /* Issue barrier requests to targets and wait for their completion. */
2476 static int dm_rq_barrier(struct mapped_device
*md
)
2479 struct dm_table
*map
= dm_get_live_table(md
);
2480 unsigned num_targets
= dm_table_get_num_targets(map
);
2481 struct dm_target
*ti
;
2482 struct request
*clone
;
2484 md
->barrier_error
= 0;
2486 for (i
= 0; i
< num_targets
; i
++) {
2487 ti
= dm_table_get_target(map
, i
);
2488 for (j
= 0; j
< ti
->num_flush_requests
; j
++) {
2489 clone
= clone_rq(md
->flush_request
, md
, GFP_NOIO
);
2490 dm_rq_set_target_request_nr(clone
, j
);
2491 atomic_inc(&md
->pending
[rq_data_dir(clone
)]);
2492 map_request(ti
, clone
, md
);
2496 dm_wait_for_completion(md
, TASK_UNINTERRUPTIBLE
);
2499 return md
->barrier_error
;
2502 static void dm_rq_barrier_work(struct work_struct
*work
)
2505 struct mapped_device
*md
= container_of(work
, struct mapped_device
,
2507 struct request_queue
*q
= md
->queue
;
2509 unsigned long flags
;
2512 * Hold the md reference here and leave it at the last part so that
2513 * the md can't be deleted by device opener when the barrier request
2518 error
= dm_rq_barrier(md
);
2520 rq
= md
->flush_request
;
2521 md
->flush_request
= NULL
;
2523 if (error
== DM_ENDIO_REQUEUE
) {
2524 spin_lock_irqsave(q
->queue_lock
, flags
);
2525 blk_requeue_request(q
, rq
);
2526 spin_unlock_irqrestore(q
->queue_lock
, flags
);
2528 blk_end_request_all(rq
, error
);
2536 * Swap in a new table, returning the old one for the caller to destroy.
2538 struct dm_table
*dm_swap_table(struct mapped_device
*md
, struct dm_table
*table
)
2540 struct dm_table
*map
= ERR_PTR(-EINVAL
);
2541 struct queue_limits limits
;
2544 mutex_lock(&md
->suspend_lock
);
2546 /* device must be suspended */
2547 if (!dm_suspended_md(md
))
2550 r
= dm_calculate_queue_limits(table
, &limits
);
2556 map
= __bind(md
, table
, &limits
);
2559 mutex_unlock(&md
->suspend_lock
);
2564 * Functions to lock and unlock any filesystem running on the
2567 static int lock_fs(struct mapped_device
*md
)
2571 WARN_ON(md
->frozen_sb
);
2573 md
->frozen_sb
= freeze_bdev(md
->bdev
);
2574 if (IS_ERR(md
->frozen_sb
)) {
2575 r
= PTR_ERR(md
->frozen_sb
);
2576 md
->frozen_sb
= NULL
;
2580 set_bit(DMF_FROZEN
, &md
->flags
);
2585 static void unlock_fs(struct mapped_device
*md
)
2587 if (!test_bit(DMF_FROZEN
, &md
->flags
))
2590 thaw_bdev(md
->bdev
, md
->frozen_sb
);
2591 md
->frozen_sb
= NULL
;
2592 clear_bit(DMF_FROZEN
, &md
->flags
);
2596 * We need to be able to change a mapping table under a mounted
2597 * filesystem. For example we might want to move some data in
2598 * the background. Before the table can be swapped with
2599 * dm_bind_table, dm_suspend must be called to flush any in
2600 * flight bios and ensure that any further io gets deferred.
2603 * Suspend mechanism in request-based dm.
2605 * 1. Flush all I/Os by lock_fs() if needed.
2606 * 2. Stop dispatching any I/O by stopping the request_queue.
2607 * 3. Wait for all in-flight I/Os to be completed or requeued.
2609 * To abort suspend, start the request_queue.
2611 int dm_suspend(struct mapped_device
*md
, unsigned suspend_flags
)
2613 struct dm_table
*map
= NULL
;
2615 int do_lockfs
= suspend_flags
& DM_SUSPEND_LOCKFS_FLAG
? 1 : 0;
2616 int noflush
= suspend_flags
& DM_SUSPEND_NOFLUSH_FLAG
? 1 : 0;
2618 mutex_lock(&md
->suspend_lock
);
2620 if (dm_suspended_md(md
)) {
2625 map
= dm_get_live_table(md
);
2628 * DMF_NOFLUSH_SUSPENDING must be set before presuspend.
2629 * This flag is cleared before dm_suspend returns.
2632 set_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2634 /* This does not get reverted if there's an error later. */
2635 dm_table_presuspend_targets(map
);
2638 * Flush I/O to the device.
2639 * Any I/O submitted after lock_fs() may not be flushed.
2640 * noflush takes precedence over do_lockfs.
2641 * (lock_fs() flushes I/Os and waits for them to complete.)
2643 if (!noflush
&& do_lockfs
) {
2650 * Here we must make sure that no processes are submitting requests
2651 * to target drivers i.e. no one may be executing
2652 * __split_and_process_bio. This is called from dm_request and
2655 * To get all processes out of __split_and_process_bio in dm_request,
2656 * we take the write lock. To prevent any process from reentering
2657 * __split_and_process_bio from dm_request, we set
2658 * DMF_QUEUE_IO_TO_THREAD.
2660 * To quiesce the thread (dm_wq_work), we set DMF_BLOCK_IO_FOR_SUSPEND
2661 * and call flush_workqueue(md->wq). flush_workqueue will wait until
2662 * dm_wq_work exits and DMF_BLOCK_IO_FOR_SUSPEND will prevent any
2663 * further calls to __split_and_process_bio from dm_wq_work.
2665 down_write(&md
->io_lock
);
2666 set_bit(DMF_BLOCK_IO_FOR_SUSPEND
, &md
->flags
);
2667 set_bit(DMF_QUEUE_IO_TO_THREAD
, &md
->flags
);
2668 up_write(&md
->io_lock
);
2671 * Request-based dm uses md->wq for barrier (dm_rq_barrier_work) which
2672 * can be kicked until md->queue is stopped. So stop md->queue before
2675 if (dm_request_based(md
))
2676 stop_queue(md
->queue
);
2678 flush_workqueue(md
->wq
);
2681 * At this point no more requests are entering target request routines.
2682 * We call dm_wait_for_completion to wait for all existing requests
2685 r
= dm_wait_for_completion(md
, TASK_INTERRUPTIBLE
);
2687 down_write(&md
->io_lock
);
2689 clear_bit(DMF_NOFLUSH_SUSPENDING
, &md
->flags
);
2690 up_write(&md
->io_lock
);
2692 /* were we interrupted ? */
2696 if (dm_request_based(md
))
2697 start_queue(md
->queue
);
2700 goto out
; /* pushback list is already flushed, so skip flush */
2704 * If dm_wait_for_completion returned 0, the device is completely
2705 * quiescent now. There is no request-processing activity. All new
2706 * requests are being added to md->deferred list.
2709 set_bit(DMF_SUSPENDED
, &md
->flags
);
2711 dm_table_postsuspend_targets(map
);
2717 mutex_unlock(&md
->suspend_lock
);
2721 int dm_resume(struct mapped_device
*md
)
2724 struct dm_table
*map
= NULL
;
2726 mutex_lock(&md
->suspend_lock
);
2727 if (!dm_suspended_md(md
))
2730 map
= dm_get_live_table(md
);
2731 if (!map
|| !dm_table_get_size(map
))
2734 r
= dm_table_resume_targets(map
);
2741 * Flushing deferred I/Os must be done after targets are resumed
2742 * so that mapping of targets can work correctly.
2743 * Request-based dm is queueing the deferred I/Os in its request_queue.
2745 if (dm_request_based(md
))
2746 start_queue(md
->queue
);
2750 clear_bit(DMF_SUSPENDED
, &md
->flags
);
2752 dm_table_unplug_all(map
);
2756 mutex_unlock(&md
->suspend_lock
);
2761 /*-----------------------------------------------------------------
2762 * Event notification.
2763 *---------------------------------------------------------------*/
2764 int dm_kobject_uevent(struct mapped_device
*md
, enum kobject_action action
,
2767 char udev_cookie
[DM_COOKIE_LENGTH
];
2768 char *envp
[] = { udev_cookie
, NULL
};
2771 return kobject_uevent(&disk_to_dev(md
->disk
)->kobj
, action
);
2773 snprintf(udev_cookie
, DM_COOKIE_LENGTH
, "%s=%u",
2774 DM_COOKIE_ENV_VAR_NAME
, cookie
);
2775 return kobject_uevent_env(&disk_to_dev(md
->disk
)->kobj
,
2780 uint32_t dm_next_uevent_seq(struct mapped_device
*md
)
2782 return atomic_add_return(1, &md
->uevent_seq
);
2785 uint32_t dm_get_event_nr(struct mapped_device
*md
)
2787 return atomic_read(&md
->event_nr
);
2790 int dm_wait_event(struct mapped_device
*md
, int event_nr
)
2792 return wait_event_interruptible(md
->eventq
,
2793 (event_nr
!= atomic_read(&md
->event_nr
)));
2796 void dm_uevent_add(struct mapped_device
*md
, struct list_head
*elist
)
2798 unsigned long flags
;
2800 spin_lock_irqsave(&md
->uevent_lock
, flags
);
2801 list_add(elist
, &md
->uevent_list
);
2802 spin_unlock_irqrestore(&md
->uevent_lock
, flags
);
2806 * The gendisk is only valid as long as you have a reference
2809 struct gendisk
*dm_disk(struct mapped_device
*md
)
2814 struct kobject
*dm_kobject(struct mapped_device
*md
)
2820 * struct mapped_device should not be exported outside of dm.c
2821 * so use this check to verify that kobj is part of md structure
2823 struct mapped_device
*dm_get_from_kobject(struct kobject
*kobj
)
2825 struct mapped_device
*md
;
2827 md
= container_of(kobj
, struct mapped_device
, kobj
);
2828 if (&md
->kobj
!= kobj
)
2831 if (test_bit(DMF_FREEING
, &md
->flags
) ||
2839 int dm_suspended_md(struct mapped_device
*md
)
2841 return test_bit(DMF_SUSPENDED
, &md
->flags
);
2844 int dm_suspended(struct dm_target
*ti
)
2846 return dm_suspended_md(dm_table_get_md(ti
->table
));
2848 EXPORT_SYMBOL_GPL(dm_suspended
);
2850 int dm_noflush_suspending(struct dm_target
*ti
)
2852 return __noflush_suspending(dm_table_get_md(ti
->table
));
2854 EXPORT_SYMBOL_GPL(dm_noflush_suspending
);
2856 struct dm_md_mempools
*dm_alloc_md_mempools(unsigned type
)
2858 struct dm_md_mempools
*pools
= kmalloc(sizeof(*pools
), GFP_KERNEL
);
2863 pools
->io_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2864 mempool_create_slab_pool(MIN_IOS
, _io_cache
) :
2865 mempool_create_slab_pool(MIN_IOS
, _rq_bio_info_cache
);
2866 if (!pools
->io_pool
)
2867 goto free_pools_and_out
;
2869 pools
->tio_pool
= (type
== DM_TYPE_BIO_BASED
) ?
2870 mempool_create_slab_pool(MIN_IOS
, _tio_cache
) :
2871 mempool_create_slab_pool(MIN_IOS
, _rq_tio_cache
);
2872 if (!pools
->tio_pool
)
2873 goto free_io_pool_and_out
;
2875 pools
->bs
= (type
== DM_TYPE_BIO_BASED
) ?
2876 bioset_create(16, 0) : bioset_create(MIN_IOS
, 0);
2878 goto free_tio_pool_and_out
;
2882 free_tio_pool_and_out
:
2883 mempool_destroy(pools
->tio_pool
);
2885 free_io_pool_and_out
:
2886 mempool_destroy(pools
->io_pool
);
2894 void dm_free_md_mempools(struct dm_md_mempools
*pools
)
2900 mempool_destroy(pools
->io_pool
);
2902 if (pools
->tio_pool
)
2903 mempool_destroy(pools
->tio_pool
);
2906 bioset_free(pools
->bs
);
2911 static const struct block_device_operations dm_blk_dops
= {
2912 .open
= dm_blk_open
,
2913 .release
= dm_blk_close
,
2914 .ioctl
= dm_blk_ioctl
,
2915 .getgeo
= dm_blk_getgeo
,
2916 .owner
= THIS_MODULE
2919 EXPORT_SYMBOL(dm_get_mapinfo
);
2924 module_init(dm_init
);
2925 module_exit(dm_exit
);
2927 module_param(major
, uint
, 0);
2928 MODULE_PARM_DESC(major
, "The major number of the device mapper");
2929 MODULE_DESCRIPTION(DM_NAME
" driver");
2930 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2931 MODULE_LICENSE("GPL");