4 * Copyright (C) 2001-2002 Sistina Software (UK) Limited.
6 * This file is released under the GPL.
9 #include <linux/blkdev.h>
10 #include <linux/device-mapper.h>
11 #include <linux/delay.h>
13 #include <linux/init.h>
14 #include <linux/kdev_t.h>
15 #include <linux/list.h>
16 #include <linux/mempool.h>
17 #include <linux/module.h>
18 #include <linux/slab.h>
19 #include <linux/vmalloc.h>
20 #include <linux/log2.h>
21 #include <linux/dm-kcopyd.h>
22 #include <linux/workqueue.h>
24 #include "dm-exception-store.h"
26 #define DM_MSG_PREFIX "snapshots"
29 * The percentage increment we will wake up users at
31 #define WAKE_UP_PERCENT 5
34 * kcopyd priority of snapshot operations
36 #define SNAPSHOT_COPY_PRIORITY 2
39 * Reserve 1MB for each snapshot initially (with minimum of 1 page).
41 #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1)
44 * The size of the mempool used to track chunks in use.
48 #define DM_TRACKED_CHUNK_HASH_SIZE 16
49 #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \
50 (DM_TRACKED_CHUNK_HASH_SIZE - 1))
52 struct dm_exception_table
{
55 struct list_head
*table
;
59 struct rw_semaphore lock
;
61 struct dm_dev
*origin
;
66 /* List of snapshots per Origin */
67 struct list_head list
;
69 /* You can't use a snapshot if this is 0 (e.g. if full) */
72 /* Origin writes don't trigger exceptions until this is set */
75 /* Whether or not owning mapped_device is suspended */
78 mempool_t
*pending_pool
;
80 atomic_t pending_exceptions_count
;
82 struct dm_exception_table pending
;
83 struct dm_exception_table complete
;
86 * pe_lock protects all pending_exception operations and access
87 * as well as the snapshot_bios list.
91 /* The on disk metadata handler */
92 struct dm_exception_store
*store
;
94 struct dm_kcopyd_client
*kcopyd_client
;
96 /* Queue of snapshot writes for ksnapd to flush */
97 struct bio_list queued_bios
;
98 struct work_struct queued_bios_work
;
100 /* Chunks with outstanding reads */
101 mempool_t
*tracked_chunk_pool
;
102 spinlock_t tracked_chunk_lock
;
103 struct hlist_head tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH_SIZE
];
106 struct dm_dev
*dm_snap_cow(struct dm_snapshot
*s
)
110 EXPORT_SYMBOL(dm_snap_cow
);
112 static struct workqueue_struct
*ksnapd
;
113 static void flush_queued_bios(struct work_struct
*work
);
115 static sector_t
chunk_to_sector(struct dm_exception_store
*store
,
118 return chunk
<< store
->chunk_shift
;
121 static int bdev_equal(struct block_device
*lhs
, struct block_device
*rhs
)
124 * There is only ever one instance of a particular block
125 * device so we can compare pointers safely.
130 struct dm_snap_pending_exception
{
131 struct dm_exception e
;
134 * Origin buffers waiting for this to complete are held
137 struct bio_list origin_bios
;
138 struct bio_list snapshot_bios
;
141 * Short-term queue of pending exceptions prior to submission.
143 struct list_head list
;
146 * The primary pending_exception is the one that holds
147 * the ref_count and the list of origin_bios for a
148 * group of pending_exceptions. It is always last to get freed.
149 * These fields get set up when writing to the origin.
151 struct dm_snap_pending_exception
*primary_pe
;
154 * Number of pending_exceptions processing this chunk.
155 * When this drops to zero we must complete the origin bios.
156 * If incrementing or decrementing this, hold pe->snap->lock for
157 * the sibling concerned and not pe->primary_pe->snap->lock unless
162 /* Pointer back to snapshot context */
163 struct dm_snapshot
*snap
;
166 * 1 indicates the exception has already been sent to
173 * Hash table mapping origin volumes to lists of snapshots and
174 * a lock to protect it
176 static struct kmem_cache
*exception_cache
;
177 static struct kmem_cache
*pending_cache
;
179 struct dm_snap_tracked_chunk
{
180 struct hlist_node node
;
184 static struct kmem_cache
*tracked_chunk_cache
;
186 static struct dm_snap_tracked_chunk
*track_chunk(struct dm_snapshot
*s
,
189 struct dm_snap_tracked_chunk
*c
= mempool_alloc(s
->tracked_chunk_pool
,
195 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
196 hlist_add_head(&c
->node
,
197 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)]);
198 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
203 static void stop_tracking_chunk(struct dm_snapshot
*s
,
204 struct dm_snap_tracked_chunk
*c
)
208 spin_lock_irqsave(&s
->tracked_chunk_lock
, flags
);
210 spin_unlock_irqrestore(&s
->tracked_chunk_lock
, flags
);
212 mempool_free(c
, s
->tracked_chunk_pool
);
215 static int __chunk_is_tracked(struct dm_snapshot
*s
, chunk_t chunk
)
217 struct dm_snap_tracked_chunk
*c
;
218 struct hlist_node
*hn
;
221 spin_lock_irq(&s
->tracked_chunk_lock
);
223 hlist_for_each_entry(c
, hn
,
224 &s
->tracked_chunk_hash
[DM_TRACKED_CHUNK_HASH(chunk
)], node
) {
225 if (c
->chunk
== chunk
) {
231 spin_unlock_irq(&s
->tracked_chunk_lock
);
237 * This conflicting I/O is extremely improbable in the caller,
238 * so msleep(1) is sufficient and there is no need for a wait queue.
240 static void __check_for_conflicting_io(struct dm_snapshot
*s
, chunk_t chunk
)
242 while (__chunk_is_tracked(s
, chunk
))
247 * One of these per registered origin, held in the snapshot_origins hash
250 /* The origin device */
251 struct block_device
*bdev
;
253 struct list_head hash_list
;
255 /* List of snapshots for this origin */
256 struct list_head snapshots
;
260 * Size of the hash table for origin volumes. If we make this
261 * the size of the minors list then it should be nearly perfect
263 #define ORIGIN_HASH_SIZE 256
264 #define ORIGIN_MASK 0xFF
265 static struct list_head
*_origins
;
266 static struct rw_semaphore _origins_lock
;
268 static int init_origin_hash(void)
272 _origins
= kmalloc(ORIGIN_HASH_SIZE
* sizeof(struct list_head
),
275 DMERR("unable to allocate memory");
279 for (i
= 0; i
< ORIGIN_HASH_SIZE
; i
++)
280 INIT_LIST_HEAD(_origins
+ i
);
281 init_rwsem(&_origins_lock
);
286 static void exit_origin_hash(void)
291 static unsigned origin_hash(struct block_device
*bdev
)
293 return bdev
->bd_dev
& ORIGIN_MASK
;
296 static struct origin
*__lookup_origin(struct block_device
*origin
)
298 struct list_head
*ol
;
301 ol
= &_origins
[origin_hash(origin
)];
302 list_for_each_entry (o
, ol
, hash_list
)
303 if (bdev_equal(o
->bdev
, origin
))
309 static void __insert_origin(struct origin
*o
)
311 struct list_head
*sl
= &_origins
[origin_hash(o
->bdev
)];
312 list_add_tail(&o
->hash_list
, sl
);
316 * _origins_lock must be held when calling this function.
317 * Returns number of snapshots registered using the supplied cow device, plus:
318 * snap_src - a snapshot suitable for use as a source of exception handover
319 * snap_dest - a snapshot capable of receiving exception handover.
321 * Possible return values and states:
322 * 0: NULL, NULL - first new snapshot
323 * 1: snap_src, NULL - normal snapshot
324 * 2: snap_src, snap_dest - waiting for handover
325 * 2: snap_src, NULL - handed over, waiting for old to be deleted
326 * 1: NULL, snap_dest - source got destroyed without handover
328 static int __find_snapshots_sharing_cow(struct dm_snapshot
*snap
,
329 struct dm_snapshot
**snap_src
,
330 struct dm_snapshot
**snap_dest
)
332 struct dm_snapshot
*s
;
337 o
= __lookup_origin(snap
->origin
->bdev
);
341 list_for_each_entry(s
, &o
->snapshots
, list
) {
342 if (!bdev_equal(s
->cow
->bdev
, snap
->cow
->bdev
))
352 } else if (snap_dest
)
363 * On success, returns 1 if this snapshot is a handover destination,
364 * otherwise returns 0.
366 static int __validate_exception_handover(struct dm_snapshot
*snap
)
368 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
370 /* Does snapshot need exceptions handed over to it? */
371 if ((__find_snapshots_sharing_cow(snap
, &snap_src
, &snap_dest
) == 2) ||
373 snap
->ti
->error
= "Snapshot cow pairing for exception "
374 "table handover failed";
379 * If no snap_src was found, snap cannot become a handover
388 static void __insert_snapshot(struct origin
*o
, struct dm_snapshot
*s
)
390 struct dm_snapshot
*l
;
392 /* Sort the list according to chunk size, largest-first smallest-last */
393 list_for_each_entry(l
, &o
->snapshots
, list
)
394 if (l
->store
->chunk_size
< s
->store
->chunk_size
)
396 list_add_tail(&s
->list
, &l
->list
);
400 * Make a note of the snapshot and its origin so we can look it
401 * up when the origin has a write on it.
403 * Also validate snapshot exception store handovers.
404 * On success, returns 1 if this registration is a handover destination,
405 * otherwise returns 0.
407 static int register_snapshot(struct dm_snapshot
*snap
)
409 struct origin
*o
, *new_o
= NULL
;
410 struct block_device
*bdev
= snap
->origin
->bdev
;
413 new_o
= kmalloc(sizeof(*new_o
), GFP_KERNEL
);
417 down_write(&_origins_lock
);
419 r
= __validate_exception_handover(snap
);
425 o
= __lookup_origin(bdev
);
432 /* Initialise the struct */
433 INIT_LIST_HEAD(&o
->snapshots
);
439 __insert_snapshot(o
, snap
);
442 up_write(&_origins_lock
);
448 * Move snapshot to correct place in list according to chunk size.
450 static void reregister_snapshot(struct dm_snapshot
*s
)
452 struct block_device
*bdev
= s
->origin
->bdev
;
454 down_write(&_origins_lock
);
457 __insert_snapshot(__lookup_origin(bdev
), s
);
459 up_write(&_origins_lock
);
462 static void unregister_snapshot(struct dm_snapshot
*s
)
466 down_write(&_origins_lock
);
467 o
= __lookup_origin(s
->origin
->bdev
);
470 if (o
&& list_empty(&o
->snapshots
)) {
471 list_del(&o
->hash_list
);
475 up_write(&_origins_lock
);
479 * Implementation of the exception hash tables.
480 * The lowest hash_shift bits of the chunk number are ignored, allowing
481 * some consecutive chunks to be grouped together.
483 static int dm_exception_table_init(struct dm_exception_table
*et
,
484 uint32_t size
, unsigned hash_shift
)
488 et
->hash_shift
= hash_shift
;
489 et
->hash_mask
= size
- 1;
490 et
->table
= dm_vcalloc(size
, sizeof(struct list_head
));
494 for (i
= 0; i
< size
; i
++)
495 INIT_LIST_HEAD(et
->table
+ i
);
500 static void dm_exception_table_exit(struct dm_exception_table
*et
,
501 struct kmem_cache
*mem
)
503 struct list_head
*slot
;
504 struct dm_exception
*ex
, *next
;
507 size
= et
->hash_mask
+ 1;
508 for (i
= 0; i
< size
; i
++) {
509 slot
= et
->table
+ i
;
511 list_for_each_entry_safe (ex
, next
, slot
, hash_list
)
512 kmem_cache_free(mem
, ex
);
518 static uint32_t exception_hash(struct dm_exception_table
*et
, chunk_t chunk
)
520 return (chunk
>> et
->hash_shift
) & et
->hash_mask
;
523 static void dm_remove_exception(struct dm_exception
*e
)
525 list_del(&e
->hash_list
);
529 * Return the exception data for a sector, or NULL if not
532 static struct dm_exception
*dm_lookup_exception(struct dm_exception_table
*et
,
535 struct list_head
*slot
;
536 struct dm_exception
*e
;
538 slot
= &et
->table
[exception_hash(et
, chunk
)];
539 list_for_each_entry (e
, slot
, hash_list
)
540 if (chunk
>= e
->old_chunk
&&
541 chunk
<= e
->old_chunk
+ dm_consecutive_chunk_count(e
))
547 static struct dm_exception
*alloc_completed_exception(void)
549 struct dm_exception
*e
;
551 e
= kmem_cache_alloc(exception_cache
, GFP_NOIO
);
553 e
= kmem_cache_alloc(exception_cache
, GFP_ATOMIC
);
558 static void free_completed_exception(struct dm_exception
*e
)
560 kmem_cache_free(exception_cache
, e
);
563 static struct dm_snap_pending_exception
*alloc_pending_exception(struct dm_snapshot
*s
)
565 struct dm_snap_pending_exception
*pe
= mempool_alloc(s
->pending_pool
,
568 atomic_inc(&s
->pending_exceptions_count
);
574 static void free_pending_exception(struct dm_snap_pending_exception
*pe
)
576 struct dm_snapshot
*s
= pe
->snap
;
578 mempool_free(pe
, s
->pending_pool
);
579 smp_mb__before_atomic_dec();
580 atomic_dec(&s
->pending_exceptions_count
);
583 static void dm_insert_exception(struct dm_exception_table
*eh
,
584 struct dm_exception
*new_e
)
587 struct dm_exception
*e
= NULL
;
589 l
= &eh
->table
[exception_hash(eh
, new_e
->old_chunk
)];
591 /* Add immediately if this table doesn't support consecutive chunks */
595 /* List is ordered by old_chunk */
596 list_for_each_entry_reverse(e
, l
, hash_list
) {
597 /* Insert after an existing chunk? */
598 if (new_e
->old_chunk
== (e
->old_chunk
+
599 dm_consecutive_chunk_count(e
) + 1) &&
600 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) +
601 dm_consecutive_chunk_count(e
) + 1)) {
602 dm_consecutive_chunk_count_inc(e
);
603 free_completed_exception(new_e
);
607 /* Insert before an existing chunk? */
608 if (new_e
->old_chunk
== (e
->old_chunk
- 1) &&
609 new_e
->new_chunk
== (dm_chunk_number(e
->new_chunk
) - 1)) {
610 dm_consecutive_chunk_count_inc(e
);
613 free_completed_exception(new_e
);
617 if (new_e
->old_chunk
> e
->old_chunk
)
622 list_add(&new_e
->hash_list
, e
? &e
->hash_list
: l
);
626 * Callback used by the exception stores to load exceptions when
629 static int dm_add_exception(void *context
, chunk_t old
, chunk_t
new)
631 struct dm_snapshot
*s
= context
;
632 struct dm_exception
*e
;
634 e
= alloc_completed_exception();
640 /* Consecutive_count is implicitly initialised to zero */
643 dm_insert_exception(&s
->complete
, e
);
648 #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r)))
651 * Return a minimum chunk size of all snapshots that have the specified origin.
652 * Return zero if the origin has no snapshots.
654 static sector_t
__minimum_chunk_size(struct origin
*o
)
656 struct dm_snapshot
*snap
;
657 unsigned chunk_size
= 0;
660 list_for_each_entry(snap
, &o
->snapshots
, list
)
661 chunk_size
= min_not_zero(chunk_size
,
662 snap
->store
->chunk_size
);
670 static int calc_max_buckets(void)
672 /* use a fixed size of 2MB */
673 unsigned long mem
= 2 * 1024 * 1024;
674 mem
/= sizeof(struct list_head
);
680 * Allocate room for a suitable hash table.
682 static int init_hash_tables(struct dm_snapshot
*s
)
684 sector_t hash_size
, cow_dev_size
, origin_dev_size
, max_buckets
;
687 * Calculate based on the size of the original volume or
690 cow_dev_size
= get_dev_size(s
->cow
->bdev
);
691 origin_dev_size
= get_dev_size(s
->origin
->bdev
);
692 max_buckets
= calc_max_buckets();
694 hash_size
= min(origin_dev_size
, cow_dev_size
) >> s
->store
->chunk_shift
;
695 hash_size
= min(hash_size
, max_buckets
);
699 hash_size
= rounddown_pow_of_two(hash_size
);
700 if (dm_exception_table_init(&s
->complete
, hash_size
,
701 DM_CHUNK_CONSECUTIVE_BITS
))
705 * Allocate hash table for in-flight exceptions
706 * Make this smaller than the real hash table
712 if (dm_exception_table_init(&s
->pending
, hash_size
, 0)) {
713 dm_exception_table_exit(&s
->complete
, exception_cache
);
721 * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size>
723 static int snapshot_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
725 struct dm_snapshot
*s
;
728 char *origin_path
, *cow_path
;
732 ti
->error
= "requires exactly 4 arguments";
737 origin_path
= argv
[0];
741 s
= kmalloc(sizeof(*s
), GFP_KERNEL
);
743 ti
->error
= "Cannot allocate snapshot context private "
753 r
= dm_get_device(ti
, cow_path
, 0, 0,
754 FMODE_READ
| FMODE_WRITE
, &s
->cow
);
756 ti
->error
= "Cannot get COW device";
760 r
= dm_exception_store_create(ti
, argc
, argv
, s
, &args_used
, &s
->store
);
762 ti
->error
= "Couldn't create exception store";
770 r
= dm_get_device(ti
, origin_path
, 0, ti
->len
, FMODE_READ
, &s
->origin
);
772 ti
->error
= "Cannot get origin device";
780 atomic_set(&s
->pending_exceptions_count
, 0);
781 init_rwsem(&s
->lock
);
782 INIT_LIST_HEAD(&s
->list
);
783 spin_lock_init(&s
->pe_lock
);
785 /* Allocate hash table for COW data */
786 if (init_hash_tables(s
)) {
787 ti
->error
= "Unable to allocate hash table space";
789 goto bad_hash_tables
;
792 r
= dm_kcopyd_client_create(SNAPSHOT_PAGES
, &s
->kcopyd_client
);
794 ti
->error
= "Could not create kcopyd client";
798 s
->pending_pool
= mempool_create_slab_pool(MIN_IOS
, pending_cache
);
799 if (!s
->pending_pool
) {
800 ti
->error
= "Could not allocate mempool for pending exceptions";
801 goto bad_pending_pool
;
804 s
->tracked_chunk_pool
= mempool_create_slab_pool(MIN_IOS
,
805 tracked_chunk_cache
);
806 if (!s
->tracked_chunk_pool
) {
807 ti
->error
= "Could not allocate tracked_chunk mempool for "
809 goto bad_tracked_chunk_pool
;
812 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
813 INIT_HLIST_HEAD(&s
->tracked_chunk_hash
[i
]);
815 spin_lock_init(&s
->tracked_chunk_lock
);
817 bio_list_init(&s
->queued_bios
);
818 INIT_WORK(&s
->queued_bios_work
, flush_queued_bios
);
821 ti
->num_flush_requests
= 1;
823 /* Add snapshot to the list of snapshots for this origin */
824 /* Exceptions aren't triggered till snapshot_resume() is called */
825 r
= register_snapshot(s
);
827 ti
->error
= "Snapshot origin struct allocation failed";
828 goto bad_load_and_register
;
830 /* invalid handover, register_snapshot has set ti->error */
831 goto bad_load_and_register
;
835 * Metadata must only be loaded into one table at once, so skip this
836 * if metadata will be handed over during resume.
837 * Chunk size will be set during the handover - set it to zero to
838 * ensure it's ignored.
841 s
->store
->chunk_size
= 0;
845 r
= s
->store
->type
->read_metadata(s
->store
, dm_add_exception
,
848 ti
->error
= "Failed to read snapshot metadata";
849 goto bad_read_metadata
;
852 DMWARN("Snapshot is marked invalid.");
855 if (!s
->store
->chunk_size
) {
856 ti
->error
= "Chunk size not set";
857 goto bad_read_metadata
;
859 ti
->split_io
= s
->store
->chunk_size
;
864 unregister_snapshot(s
);
866 bad_load_and_register
:
867 mempool_destroy(s
->tracked_chunk_pool
);
869 bad_tracked_chunk_pool
:
870 mempool_destroy(s
->pending_pool
);
873 dm_kcopyd_client_destroy(s
->kcopyd_client
);
876 dm_exception_table_exit(&s
->pending
, pending_cache
);
877 dm_exception_table_exit(&s
->complete
, exception_cache
);
880 dm_put_device(ti
, s
->origin
);
883 dm_exception_store_destroy(s
->store
);
886 dm_put_device(ti
, s
->cow
);
895 static void __free_exceptions(struct dm_snapshot
*s
)
897 dm_kcopyd_client_destroy(s
->kcopyd_client
);
898 s
->kcopyd_client
= NULL
;
900 dm_exception_table_exit(&s
->pending
, pending_cache
);
901 dm_exception_table_exit(&s
->complete
, exception_cache
);
904 static void __handover_exceptions(struct dm_snapshot
*snap_src
,
905 struct dm_snapshot
*snap_dest
)
908 struct dm_exception_table table_swap
;
909 struct dm_exception_store
*store_swap
;
913 * Swap all snapshot context information between the two instances.
915 u
.table_swap
= snap_dest
->complete
;
916 snap_dest
->complete
= snap_src
->complete
;
917 snap_src
->complete
= u
.table_swap
;
919 u
.store_swap
= snap_dest
->store
;
920 snap_dest
->store
= snap_src
->store
;
921 snap_src
->store
= u
.store_swap
;
923 snap_dest
->store
->snap
= snap_dest
;
924 snap_src
->store
->snap
= snap_src
;
926 snap_dest
->ti
->split_io
= snap_dest
->store
->chunk_size
;
927 snap_dest
->valid
= snap_src
->valid
;
930 * Set source invalid to ensure it receives no further I/O.
935 static void snapshot_dtr(struct dm_target
*ti
)
937 #ifdef CONFIG_DM_DEBUG
940 struct dm_snapshot
*s
= ti
->private;
941 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
943 flush_workqueue(ksnapd
);
945 down_read(&_origins_lock
);
946 /* Check whether exception handover must be cancelled */
947 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
);
948 if (snap_src
&& snap_dest
&& (s
== snap_src
)) {
949 down_write(&snap_dest
->lock
);
950 snap_dest
->valid
= 0;
951 up_write(&snap_dest
->lock
);
952 DMERR("Cancelling snapshot handover.");
954 up_read(&_origins_lock
);
956 /* Prevent further origin writes from using this snapshot. */
957 /* After this returns there can be no new kcopyd jobs. */
958 unregister_snapshot(s
);
960 while (atomic_read(&s
->pending_exceptions_count
))
963 * Ensure instructions in mempool_destroy aren't reordered
964 * before atomic_read.
968 #ifdef CONFIG_DM_DEBUG
969 for (i
= 0; i
< DM_TRACKED_CHUNK_HASH_SIZE
; i
++)
970 BUG_ON(!hlist_empty(&s
->tracked_chunk_hash
[i
]));
973 mempool_destroy(s
->tracked_chunk_pool
);
975 __free_exceptions(s
);
977 mempool_destroy(s
->pending_pool
);
979 dm_put_device(ti
, s
->origin
);
981 dm_exception_store_destroy(s
->store
);
983 dm_put_device(ti
, s
->cow
);
989 * Flush a list of buffers.
991 static void flush_bios(struct bio
*bio
)
998 generic_make_request(bio
);
1003 static void flush_queued_bios(struct work_struct
*work
)
1005 struct dm_snapshot
*s
=
1006 container_of(work
, struct dm_snapshot
, queued_bios_work
);
1007 struct bio
*queued_bios
;
1008 unsigned long flags
;
1010 spin_lock_irqsave(&s
->pe_lock
, flags
);
1011 queued_bios
= bio_list_get(&s
->queued_bios
);
1012 spin_unlock_irqrestore(&s
->pe_lock
, flags
);
1014 flush_bios(queued_bios
);
1018 * Error a list of buffers.
1020 static void error_bios(struct bio
*bio
)
1026 bio
->bi_next
= NULL
;
1032 static void __invalidate_snapshot(struct dm_snapshot
*s
, int err
)
1038 DMERR("Invalidating snapshot: Error reading/writing.");
1039 else if (err
== -ENOMEM
)
1040 DMERR("Invalidating snapshot: Unable to allocate exception.");
1042 if (s
->store
->type
->drop_snapshot
)
1043 s
->store
->type
->drop_snapshot(s
->store
);
1047 dm_table_event(s
->ti
->table
);
1050 static void get_pending_exception(struct dm_snap_pending_exception
*pe
)
1052 atomic_inc(&pe
->ref_count
);
1055 static struct bio
*put_pending_exception(struct dm_snap_pending_exception
*pe
)
1057 struct dm_snap_pending_exception
*primary_pe
;
1058 struct bio
*origin_bios
= NULL
;
1060 primary_pe
= pe
->primary_pe
;
1063 * If this pe is involved in a write to the origin and
1064 * it is the last sibling to complete then release
1065 * the bios for the original write to the origin.
1068 atomic_dec_and_test(&primary_pe
->ref_count
)) {
1069 origin_bios
= bio_list_get(&primary_pe
->origin_bios
);
1070 free_pending_exception(primary_pe
);
1074 * Free the pe if it's not linked to an origin write or if
1075 * it's not itself a primary pe.
1077 if (!primary_pe
|| primary_pe
!= pe
)
1078 free_pending_exception(pe
);
1083 static void pending_complete(struct dm_snap_pending_exception
*pe
, int success
)
1085 struct dm_exception
*e
;
1086 struct dm_snapshot
*s
= pe
->snap
;
1087 struct bio
*origin_bios
= NULL
;
1088 struct bio
*snapshot_bios
= NULL
;
1092 /* Read/write error - snapshot is unusable */
1093 down_write(&s
->lock
);
1094 __invalidate_snapshot(s
, -EIO
);
1099 e
= alloc_completed_exception();
1101 down_write(&s
->lock
);
1102 __invalidate_snapshot(s
, -ENOMEM
);
1108 down_write(&s
->lock
);
1110 free_completed_exception(e
);
1115 /* Check for conflicting reads */
1116 __check_for_conflicting_io(s
, pe
->e
.old_chunk
);
1119 * Add a proper exception, and remove the
1120 * in-flight exception from the list.
1122 dm_insert_exception(&s
->complete
, e
);
1125 dm_remove_exception(&pe
->e
);
1126 snapshot_bios
= bio_list_get(&pe
->snapshot_bios
);
1127 origin_bios
= put_pending_exception(pe
);
1131 /* Submit any pending write bios */
1133 error_bios(snapshot_bios
);
1135 flush_bios(snapshot_bios
);
1137 flush_bios(origin_bios
);
1140 static void commit_callback(void *context
, int success
)
1142 struct dm_snap_pending_exception
*pe
= context
;
1144 pending_complete(pe
, success
);
1148 * Called when the copy I/O has finished. kcopyd actually runs
1149 * this code so don't block.
1151 static void copy_callback(int read_err
, unsigned long write_err
, void *context
)
1153 struct dm_snap_pending_exception
*pe
= context
;
1154 struct dm_snapshot
*s
= pe
->snap
;
1156 if (read_err
|| write_err
)
1157 pending_complete(pe
, 0);
1160 /* Update the metadata if we are persistent */
1161 s
->store
->type
->commit_exception(s
->store
, &pe
->e
,
1162 commit_callback
, pe
);
1166 * Dispatches the copy operation to kcopyd.
1168 static void start_copy(struct dm_snap_pending_exception
*pe
)
1170 struct dm_snapshot
*s
= pe
->snap
;
1171 struct dm_io_region src
, dest
;
1172 struct block_device
*bdev
= s
->origin
->bdev
;
1175 dev_size
= get_dev_size(bdev
);
1178 src
.sector
= chunk_to_sector(s
->store
, pe
->e
.old_chunk
);
1179 src
.count
= min((sector_t
)s
->store
->chunk_size
, dev_size
- src
.sector
);
1181 dest
.bdev
= s
->cow
->bdev
;
1182 dest
.sector
= chunk_to_sector(s
->store
, pe
->e
.new_chunk
);
1183 dest
.count
= src
.count
;
1185 /* Hand over to kcopyd */
1186 dm_kcopyd_copy(s
->kcopyd_client
,
1187 &src
, 1, &dest
, 0, copy_callback
, pe
);
1190 static struct dm_snap_pending_exception
*
1191 __lookup_pending_exception(struct dm_snapshot
*s
, chunk_t chunk
)
1193 struct dm_exception
*e
= dm_lookup_exception(&s
->pending
, chunk
);
1198 return container_of(e
, struct dm_snap_pending_exception
, e
);
1202 * Looks to see if this snapshot already has a pending exception
1203 * for this chunk, otherwise it allocates a new one and inserts
1204 * it into the pending table.
1206 * NOTE: a write lock must be held on snap->lock before calling
1209 static struct dm_snap_pending_exception
*
1210 __find_pending_exception(struct dm_snapshot
*s
,
1211 struct dm_snap_pending_exception
*pe
, chunk_t chunk
)
1213 struct dm_snap_pending_exception
*pe2
;
1215 pe2
= __lookup_pending_exception(s
, chunk
);
1217 free_pending_exception(pe
);
1221 pe
->e
.old_chunk
= chunk
;
1222 bio_list_init(&pe
->origin_bios
);
1223 bio_list_init(&pe
->snapshot_bios
);
1224 pe
->primary_pe
= NULL
;
1225 atomic_set(&pe
->ref_count
, 0);
1228 if (s
->store
->type
->prepare_exception(s
->store
, &pe
->e
)) {
1229 free_pending_exception(pe
);
1233 get_pending_exception(pe
);
1234 dm_insert_exception(&s
->pending
, &pe
->e
);
1239 static void remap_exception(struct dm_snapshot
*s
, struct dm_exception
*e
,
1240 struct bio
*bio
, chunk_t chunk
)
1242 bio
->bi_bdev
= s
->cow
->bdev
;
1243 bio
->bi_sector
= chunk_to_sector(s
->store
,
1244 dm_chunk_number(e
->new_chunk
) +
1245 (chunk
- e
->old_chunk
)) +
1247 s
->store
->chunk_mask
);
1250 static int snapshot_map(struct dm_target
*ti
, struct bio
*bio
,
1251 union map_info
*map_context
)
1253 struct dm_exception
*e
;
1254 struct dm_snapshot
*s
= ti
->private;
1255 int r
= DM_MAPIO_REMAPPED
;
1257 struct dm_snap_pending_exception
*pe
= NULL
;
1259 if (unlikely(bio_empty_barrier(bio
))) {
1260 bio
->bi_bdev
= s
->cow
->bdev
;
1261 return DM_MAPIO_REMAPPED
;
1264 chunk
= sector_to_chunk(s
->store
, bio
->bi_sector
);
1266 /* Full snapshots are not usable */
1267 /* To get here the table must be live so s->active is always set. */
1271 /* FIXME: should only take write lock if we need
1272 * to copy an exception */
1273 down_write(&s
->lock
);
1280 /* If the block is already remapped - use that, else remap it */
1281 e
= dm_lookup_exception(&s
->complete
, chunk
);
1283 remap_exception(s
, e
, bio
, chunk
);
1288 * Write to snapshot - higher level takes care of RW/RO
1289 * flags so we should only get this if we are
1292 if (bio_rw(bio
) == WRITE
) {
1293 pe
= __lookup_pending_exception(s
, chunk
);
1296 pe
= alloc_pending_exception(s
);
1297 down_write(&s
->lock
);
1300 free_pending_exception(pe
);
1305 e
= dm_lookup_exception(&s
->complete
, chunk
);
1307 free_pending_exception(pe
);
1308 remap_exception(s
, e
, bio
, chunk
);
1312 pe
= __find_pending_exception(s
, pe
, chunk
);
1314 __invalidate_snapshot(s
, -ENOMEM
);
1320 remap_exception(s
, &pe
->e
, bio
, chunk
);
1321 bio_list_add(&pe
->snapshot_bios
, bio
);
1323 r
= DM_MAPIO_SUBMITTED
;
1326 /* this is protected by snap->lock */
1333 bio
->bi_bdev
= s
->origin
->bdev
;
1334 map_context
->ptr
= track_chunk(s
, chunk
);
1343 static int snapshot_end_io(struct dm_target
*ti
, struct bio
*bio
,
1344 int error
, union map_info
*map_context
)
1346 struct dm_snapshot
*s
= ti
->private;
1347 struct dm_snap_tracked_chunk
*c
= map_context
->ptr
;
1350 stop_tracking_chunk(s
, c
);
1355 static void snapshot_postsuspend(struct dm_target
*ti
)
1357 struct dm_snapshot
*s
= ti
->private;
1359 down_write(&s
->lock
);
1364 static int snapshot_preresume(struct dm_target
*ti
)
1367 struct dm_snapshot
*s
= ti
->private;
1368 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1370 down_read(&_origins_lock
);
1371 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
);
1372 if (snap_src
&& snap_dest
) {
1373 down_read(&snap_src
->lock
);
1374 if (s
== snap_src
) {
1375 DMERR("Unable to resume snapshot source until "
1376 "handover completes.");
1378 } else if (!snap_src
->suspended
) {
1379 DMERR("Unable to perform snapshot handover until "
1380 "source is suspended.");
1383 up_read(&snap_src
->lock
);
1385 up_read(&_origins_lock
);
1390 static void snapshot_resume(struct dm_target
*ti
)
1392 struct dm_snapshot
*s
= ti
->private;
1393 struct dm_snapshot
*snap_src
= NULL
, *snap_dest
= NULL
;
1395 down_read(&_origins_lock
);
1396 (void) __find_snapshots_sharing_cow(s
, &snap_src
, &snap_dest
);
1397 if (snap_src
&& snap_dest
) {
1398 down_write(&snap_src
->lock
);
1399 down_write_nested(&snap_dest
->lock
, SINGLE_DEPTH_NESTING
);
1400 __handover_exceptions(snap_src
, snap_dest
);
1401 up_write(&snap_dest
->lock
);
1402 up_write(&snap_src
->lock
);
1404 up_read(&_origins_lock
);
1406 /* Now we have correct chunk size, reregister */
1407 reregister_snapshot(s
);
1409 down_write(&s
->lock
);
1415 static int snapshot_status(struct dm_target
*ti
, status_type_t type
,
1416 char *result
, unsigned int maxlen
)
1419 struct dm_snapshot
*snap
= ti
->private;
1422 case STATUSTYPE_INFO
:
1424 down_write(&snap
->lock
);
1429 if (snap
->store
->type
->usage
) {
1430 sector_t total_sectors
, sectors_allocated
,
1432 snap
->store
->type
->usage(snap
->store
,
1436 DMEMIT("%llu/%llu %llu",
1437 (unsigned long long)sectors_allocated
,
1438 (unsigned long long)total_sectors
,
1439 (unsigned long long)metadata_sectors
);
1445 up_write(&snap
->lock
);
1449 case STATUSTYPE_TABLE
:
1451 * kdevname returns a static pointer so we need
1452 * to make private copies if the output is to
1455 DMEMIT("%s %s", snap
->origin
->name
, snap
->cow
->name
);
1456 snap
->store
->type
->status(snap
->store
, type
, result
+ sz
,
1464 static int snapshot_iterate_devices(struct dm_target
*ti
,
1465 iterate_devices_callout_fn fn
, void *data
)
1467 struct dm_snapshot
*snap
= ti
->private;
1469 return fn(ti
, snap
->origin
, 0, ti
->len
, data
);
1473 /*-----------------------------------------------------------------
1475 *---------------------------------------------------------------*/
1478 * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any
1479 * supplied bio was ignored. The caller may submit it immediately.
1480 * (No remapping actually occurs as the origin is always a direct linear
1483 * If further exceptions are required, DM_MAPIO_SUBMITTED is returned
1484 * and any supplied bio is added to a list to be submitted once all
1485 * the necessary exceptions exist.
1487 static int __origin_write(struct list_head
*snapshots
, sector_t sector
,
1490 int r
= DM_MAPIO_REMAPPED
, first
= 0;
1491 struct dm_snapshot
*snap
;
1492 struct dm_exception
*e
;
1493 struct dm_snap_pending_exception
*pe
, *next_pe
, *primary_pe
= NULL
;
1495 LIST_HEAD(pe_queue
);
1497 /* Do all the snapshots on this origin */
1498 list_for_each_entry (snap
, snapshots
, list
) {
1500 down_write(&snap
->lock
);
1502 /* Only deal with valid and active snapshots */
1503 if (!snap
->valid
|| !snap
->active
)
1506 /* Nothing to do if writing beyond end of snapshot */
1507 if (sector
>= dm_table_get_size(snap
->ti
->table
))
1511 * Remember, different snapshots can have
1512 * different chunk sizes.
1514 chunk
= sector_to_chunk(snap
->store
, sector
);
1517 * Check exception table to see if block
1518 * is already remapped in this snapshot
1519 * and trigger an exception if not.
1521 * ref_count is initialised to 1 so pending_complete()
1522 * won't destroy the primary_pe while we're inside this loop.
1524 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1528 pe
= __lookup_pending_exception(snap
, chunk
);
1530 up_write(&snap
->lock
);
1531 pe
= alloc_pending_exception(snap
);
1532 down_write(&snap
->lock
);
1535 free_pending_exception(pe
);
1539 e
= dm_lookup_exception(&snap
->complete
, chunk
);
1541 free_pending_exception(pe
);
1545 pe
= __find_pending_exception(snap
, pe
, chunk
);
1547 __invalidate_snapshot(snap
, -ENOMEM
);
1554 * Either every pe here has same
1555 * primary_pe or none has one yet.
1558 primary_pe
= pe
->primary_pe
;
1565 bio_list_add(&primary_pe
->origin_bios
, bio
);
1567 r
= DM_MAPIO_SUBMITTED
;
1570 if (!pe
->primary_pe
) {
1571 pe
->primary_pe
= primary_pe
;
1572 get_pending_exception(primary_pe
);
1577 list_add_tail(&pe
->list
, &pe_queue
);
1581 up_write(&snap
->lock
);
1588 * If this is the first time we're processing this chunk and
1589 * ref_count is now 1 it means all the pending exceptions
1590 * got completed while we were in the loop above, so it falls to
1591 * us here to remove the primary_pe and submit any origin_bios.
1594 if (first
&& atomic_dec_and_test(&primary_pe
->ref_count
)) {
1595 flush_bios(bio_list_get(&primary_pe
->origin_bios
));
1596 free_pending_exception(primary_pe
);
1597 /* If we got here, pe_queue is necessarily empty. */
1602 * Now that we have a complete pe list we can start the copying.
1604 list_for_each_entry_safe(pe
, next_pe
, &pe_queue
, list
)
1611 * Called on a write from the origin driver.
1613 static int do_origin(struct dm_dev
*origin
, struct bio
*bio
)
1616 int r
= DM_MAPIO_REMAPPED
;
1618 down_read(&_origins_lock
);
1619 o
= __lookup_origin(origin
->bdev
);
1621 r
= __origin_write(&o
->snapshots
, bio
->bi_sector
, bio
);
1622 up_read(&_origins_lock
);
1628 * Origin: maps a linear range of a device, with hooks for snapshotting.
1632 * Construct an origin mapping: <dev_path>
1633 * The context for an origin is merely a 'struct dm_dev *'
1634 * pointing to the real device.
1636 static int origin_ctr(struct dm_target
*ti
, unsigned int argc
, char **argv
)
1642 ti
->error
= "origin: incorrect number of arguments";
1646 r
= dm_get_device(ti
, argv
[0], 0, ti
->len
,
1647 dm_table_get_mode(ti
->table
), &dev
);
1649 ti
->error
= "Cannot get target device";
1654 ti
->num_flush_requests
= 1;
1659 static void origin_dtr(struct dm_target
*ti
)
1661 struct dm_dev
*dev
= ti
->private;
1662 dm_put_device(ti
, dev
);
1665 static int origin_map(struct dm_target
*ti
, struct bio
*bio
,
1666 union map_info
*map_context
)
1668 struct dm_dev
*dev
= ti
->private;
1669 bio
->bi_bdev
= dev
->bdev
;
1671 if (unlikely(bio_empty_barrier(bio
)))
1672 return DM_MAPIO_REMAPPED
;
1674 /* Only tell snapshots if this is a write */
1675 return (bio_rw(bio
) == WRITE
) ? do_origin(dev
, bio
) : DM_MAPIO_REMAPPED
;
1679 * Set the target "split_io" field to the minimum of all the snapshots'
1682 static void origin_resume(struct dm_target
*ti
)
1684 struct dm_dev
*dev
= ti
->private;
1686 down_read(&_origins_lock
);
1688 ti
->split_io
= __minimum_chunk_size(__lookup_origin(dev
->bdev
));
1690 up_read(&_origins_lock
);
1693 static int origin_status(struct dm_target
*ti
, status_type_t type
, char *result
,
1694 unsigned int maxlen
)
1696 struct dm_dev
*dev
= ti
->private;
1699 case STATUSTYPE_INFO
:
1703 case STATUSTYPE_TABLE
:
1704 snprintf(result
, maxlen
, "%s", dev
->name
);
1711 static int origin_iterate_devices(struct dm_target
*ti
,
1712 iterate_devices_callout_fn fn
, void *data
)
1714 struct dm_dev
*dev
= ti
->private;
1716 return fn(ti
, dev
, 0, ti
->len
, data
);
1719 static struct target_type origin_target
= {
1720 .name
= "snapshot-origin",
1721 .version
= {1, 7, 0},
1722 .module
= THIS_MODULE
,
1726 .resume
= origin_resume
,
1727 .status
= origin_status
,
1728 .iterate_devices
= origin_iterate_devices
,
1731 static struct target_type snapshot_target
= {
1733 .version
= {1, 9, 0},
1734 .module
= THIS_MODULE
,
1735 .ctr
= snapshot_ctr
,
1736 .dtr
= snapshot_dtr
,
1737 .map
= snapshot_map
,
1738 .end_io
= snapshot_end_io
,
1739 .postsuspend
= snapshot_postsuspend
,
1740 .preresume
= snapshot_preresume
,
1741 .resume
= snapshot_resume
,
1742 .status
= snapshot_status
,
1743 .iterate_devices
= snapshot_iterate_devices
,
1746 static int __init
dm_snapshot_init(void)
1750 r
= dm_exception_store_init();
1752 DMERR("Failed to initialize exception stores");
1756 r
= dm_register_target(&snapshot_target
);
1758 DMERR("snapshot target register failed %d", r
);
1759 goto bad_register_snapshot_target
;
1762 r
= dm_register_target(&origin_target
);
1764 DMERR("Origin target register failed %d", r
);
1768 r
= init_origin_hash();
1770 DMERR("init_origin_hash failed.");
1774 exception_cache
= KMEM_CACHE(dm_exception
, 0);
1775 if (!exception_cache
) {
1776 DMERR("Couldn't create exception cache.");
1781 pending_cache
= KMEM_CACHE(dm_snap_pending_exception
, 0);
1782 if (!pending_cache
) {
1783 DMERR("Couldn't create pending cache.");
1788 tracked_chunk_cache
= KMEM_CACHE(dm_snap_tracked_chunk
, 0);
1789 if (!tracked_chunk_cache
) {
1790 DMERR("Couldn't create cache to track chunks in use.");
1795 ksnapd
= create_singlethread_workqueue("ksnapd");
1797 DMERR("Failed to create ksnapd workqueue.");
1799 goto bad_pending_pool
;
1805 kmem_cache_destroy(tracked_chunk_cache
);
1807 kmem_cache_destroy(pending_cache
);
1809 kmem_cache_destroy(exception_cache
);
1813 dm_unregister_target(&origin_target
);
1815 dm_unregister_target(&snapshot_target
);
1817 bad_register_snapshot_target
:
1818 dm_exception_store_exit();
1822 static void __exit
dm_snapshot_exit(void)
1824 destroy_workqueue(ksnapd
);
1826 dm_unregister_target(&snapshot_target
);
1827 dm_unregister_target(&origin_target
);
1830 kmem_cache_destroy(pending_cache
);
1831 kmem_cache_destroy(exception_cache
);
1832 kmem_cache_destroy(tracked_chunk_cache
);
1834 dm_exception_store_exit();
1838 module_init(dm_snapshot_init
);
1839 module_exit(dm_snapshot_exit
);
1841 MODULE_DESCRIPTION(DM_NAME
" snapshot target");
1842 MODULE_AUTHOR("Joe Thornber");
1843 MODULE_LICENSE("GPL");