Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * dm-snapshot.c | |
3 | * | |
4 | * Copyright (C) 2001-2002 Sistina Software (UK) Limited. | |
5 | * | |
6 | * This file is released under the GPL. | |
7 | */ | |
8 | ||
9 | #include <linux/blkdev.h> | |
1da177e4 | 10 | #include <linux/device-mapper.h> |
90fa1527 | 11 | #include <linux/delay.h> |
1da177e4 LT |
12 | #include <linux/fs.h> |
13 | #include <linux/init.h> | |
14 | #include <linux/kdev_t.h> | |
15 | #include <linux/list.h> | |
16 | #include <linux/mempool.h> | |
17 | #include <linux/module.h> | |
18 | #include <linux/slab.h> | |
19 | #include <linux/vmalloc.h> | |
6f3c3f0a | 20 | #include <linux/log2.h> |
a765e20e | 21 | #include <linux/dm-kcopyd.h> |
ccc45ea8 | 22 | #include <linux/workqueue.h> |
1da177e4 | 23 | |
aea53d92 | 24 | #include "dm-exception-store.h" |
1da177e4 | 25 | |
72d94861 AK |
26 | #define DM_MSG_PREFIX "snapshots" |
27 | ||
d698aa45 MP |
28 | static const char dm_snapshot_merge_target_name[] = "snapshot-merge"; |
29 | ||
30 | #define dm_target_is_snapshot_merge(ti) \ | |
31 | ((ti)->type->name == dm_snapshot_merge_target_name) | |
32 | ||
1da177e4 LT |
33 | /* |
34 | * The percentage increment we will wake up users at | |
35 | */ | |
36 | #define WAKE_UP_PERCENT 5 | |
37 | ||
38 | /* | |
39 | * kcopyd priority of snapshot operations | |
40 | */ | |
41 | #define SNAPSHOT_COPY_PRIORITY 2 | |
42 | ||
43 | /* | |
8ee2767a | 44 | * Reserve 1MB for each snapshot initially (with minimum of 1 page). |
1da177e4 | 45 | */ |
8ee2767a | 46 | #define SNAPSHOT_PAGES (((1UL << 20) >> PAGE_SHIFT) ? : 1) |
1da177e4 | 47 | |
cd45daff MP |
48 | /* |
49 | * The size of the mempool used to track chunks in use. | |
50 | */ | |
51 | #define MIN_IOS 256 | |
52 | ||
ccc45ea8 JB |
53 | #define DM_TRACKED_CHUNK_HASH_SIZE 16 |
54 | #define DM_TRACKED_CHUNK_HASH(x) ((unsigned long)(x) & \ | |
55 | (DM_TRACKED_CHUNK_HASH_SIZE - 1)) | |
56 | ||
191437a5 | 57 | struct dm_exception_table { |
ccc45ea8 JB |
58 | uint32_t hash_mask; |
59 | unsigned hash_shift; | |
60 | struct list_head *table; | |
61 | }; | |
62 | ||
63 | struct dm_snapshot { | |
64 | struct rw_semaphore lock; | |
65 | ||
66 | struct dm_dev *origin; | |
fc56f6fb MS |
67 | struct dm_dev *cow; |
68 | ||
69 | struct dm_target *ti; | |
ccc45ea8 JB |
70 | |
71 | /* List of snapshots per Origin */ | |
72 | struct list_head list; | |
73 | ||
d8ddb1cf MS |
74 | /* |
75 | * You can't use a snapshot if this is 0 (e.g. if full). | |
76 | * A snapshot-merge target never clears this. | |
77 | */ | |
ccc45ea8 JB |
78 | int valid; |
79 | ||
80 | /* Origin writes don't trigger exceptions until this is set */ | |
81 | int active; | |
82 | ||
c26655ca MS |
83 | /* Whether or not owning mapped_device is suspended */ |
84 | int suspended; | |
85 | ||
ccc45ea8 JB |
86 | atomic_t pending_exceptions_count; |
87 | ||
924e600d MS |
88 | mempool_t *pending_pool; |
89 | ||
191437a5 JB |
90 | struct dm_exception_table pending; |
91 | struct dm_exception_table complete; | |
ccc45ea8 JB |
92 | |
93 | /* | |
94 | * pe_lock protects all pending_exception operations and access | |
95 | * as well as the snapshot_bios list. | |
96 | */ | |
97 | spinlock_t pe_lock; | |
98 | ||
924e600d MS |
99 | /* Chunks with outstanding reads */ |
100 | spinlock_t tracked_chunk_lock; | |
101 | mempool_t *tracked_chunk_pool; | |
102 | struct hlist_head tracked_chunk_hash[DM_TRACKED_CHUNK_HASH_SIZE]; | |
103 | ||
ccc45ea8 JB |
104 | /* The on disk metadata handler */ |
105 | struct dm_exception_store *store; | |
106 | ||
107 | struct dm_kcopyd_client *kcopyd_client; | |
108 | ||
109 | /* Queue of snapshot writes for ksnapd to flush */ | |
110 | struct bio_list queued_bios; | |
111 | struct work_struct queued_bios_work; | |
112 | ||
924e600d MS |
113 | /* Wait for events based on state_bits */ |
114 | unsigned long state_bits; | |
115 | ||
116 | /* Range of chunks currently being merged. */ | |
117 | chunk_t first_merging_chunk; | |
118 | int num_merging_chunks; | |
1e03f97e | 119 | |
d8ddb1cf MS |
120 | /* |
121 | * The merge operation failed if this flag is set. | |
122 | * Failure modes are handled as follows: | |
123 | * - I/O error reading the header | |
124 | * => don't load the target; abort. | |
125 | * - Header does not have "valid" flag set | |
126 | * => use the origin; forget about the snapshot. | |
127 | * - I/O error when reading exceptions | |
128 | * => don't load the target; abort. | |
129 | * (We can't use the intermediate origin state.) | |
130 | * - I/O error while merging | |
131 | * => stop merging; set merge_failed; process I/O normally. | |
132 | */ | |
133 | int merge_failed; | |
134 | ||
9fe86254 MP |
135 | /* |
136 | * Incoming bios that overlap with chunks being merged must wait | |
137 | * for them to be committed. | |
138 | */ | |
139 | struct bio_list bios_queued_during_merge; | |
ccc45ea8 JB |
140 | }; |
141 | ||
1e03f97e MP |
142 | /* |
143 | * state_bits: | |
144 | * RUNNING_MERGE - Merge operation is in progress. | |
145 | * SHUTDOWN_MERGE - Set to signal that merge needs to be stopped; | |
146 | * cleared afterwards. | |
147 | */ | |
148 | #define RUNNING_MERGE 0 | |
149 | #define SHUTDOWN_MERGE 1 | |
150 | ||
c2411045 MP |
151 | struct dm_dev *dm_snap_origin(struct dm_snapshot *s) |
152 | { | |
153 | return s->origin; | |
154 | } | |
155 | EXPORT_SYMBOL(dm_snap_origin); | |
156 | ||
fc56f6fb MS |
157 | struct dm_dev *dm_snap_cow(struct dm_snapshot *s) |
158 | { | |
159 | return s->cow; | |
160 | } | |
161 | EXPORT_SYMBOL(dm_snap_cow); | |
162 | ||
c642f9e0 | 163 | static struct workqueue_struct *ksnapd; |
c4028958 | 164 | static void flush_queued_bios(struct work_struct *work); |
ca3a931f | 165 | |
ccc45ea8 JB |
166 | static sector_t chunk_to_sector(struct dm_exception_store *store, |
167 | chunk_t chunk) | |
168 | { | |
169 | return chunk << store->chunk_shift; | |
170 | } | |
171 | ||
172 | static int bdev_equal(struct block_device *lhs, struct block_device *rhs) | |
173 | { | |
174 | /* | |
175 | * There is only ever one instance of a particular block | |
176 | * device so we can compare pointers safely. | |
177 | */ | |
178 | return lhs == rhs; | |
179 | } | |
180 | ||
028867ac | 181 | struct dm_snap_pending_exception { |
1d4989c8 | 182 | struct dm_exception e; |
1da177e4 LT |
183 | |
184 | /* | |
185 | * Origin buffers waiting for this to complete are held | |
186 | * in a bio list | |
187 | */ | |
188 | struct bio_list origin_bios; | |
189 | struct bio_list snapshot_bios; | |
190 | ||
1da177e4 LT |
191 | /* Pointer back to snapshot context */ |
192 | struct dm_snapshot *snap; | |
193 | ||
194 | /* | |
195 | * 1 indicates the exception has already been sent to | |
196 | * kcopyd. | |
197 | */ | |
198 | int started; | |
199 | }; | |
200 | ||
201 | /* | |
202 | * Hash table mapping origin volumes to lists of snapshots and | |
203 | * a lock to protect it | |
204 | */ | |
e18b890b CL |
205 | static struct kmem_cache *exception_cache; |
206 | static struct kmem_cache *pending_cache; | |
1da177e4 | 207 | |
cd45daff MP |
208 | struct dm_snap_tracked_chunk { |
209 | struct hlist_node node; | |
210 | chunk_t chunk; | |
211 | }; | |
212 | ||
213 | static struct kmem_cache *tracked_chunk_cache; | |
214 | ||
215 | static struct dm_snap_tracked_chunk *track_chunk(struct dm_snapshot *s, | |
216 | chunk_t chunk) | |
217 | { | |
218 | struct dm_snap_tracked_chunk *c = mempool_alloc(s->tracked_chunk_pool, | |
219 | GFP_NOIO); | |
220 | unsigned long flags; | |
221 | ||
222 | c->chunk = chunk; | |
223 | ||
224 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
225 | hlist_add_head(&c->node, | |
226 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)]); | |
227 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
228 | ||
229 | return c; | |
230 | } | |
231 | ||
232 | static void stop_tracking_chunk(struct dm_snapshot *s, | |
233 | struct dm_snap_tracked_chunk *c) | |
234 | { | |
235 | unsigned long flags; | |
236 | ||
237 | spin_lock_irqsave(&s->tracked_chunk_lock, flags); | |
238 | hlist_del(&c->node); | |
239 | spin_unlock_irqrestore(&s->tracked_chunk_lock, flags); | |
240 | ||
241 | mempool_free(c, s->tracked_chunk_pool); | |
242 | } | |
243 | ||
a8d41b59 MP |
244 | static int __chunk_is_tracked(struct dm_snapshot *s, chunk_t chunk) |
245 | { | |
246 | struct dm_snap_tracked_chunk *c; | |
247 | struct hlist_node *hn; | |
248 | int found = 0; | |
249 | ||
250 | spin_lock_irq(&s->tracked_chunk_lock); | |
251 | ||
252 | hlist_for_each_entry(c, hn, | |
253 | &s->tracked_chunk_hash[DM_TRACKED_CHUNK_HASH(chunk)], node) { | |
254 | if (c->chunk == chunk) { | |
255 | found = 1; | |
256 | break; | |
257 | } | |
258 | } | |
259 | ||
260 | spin_unlock_irq(&s->tracked_chunk_lock); | |
261 | ||
262 | return found; | |
263 | } | |
264 | ||
615d1eb9 MS |
265 | /* |
266 | * This conflicting I/O is extremely improbable in the caller, | |
267 | * so msleep(1) is sufficient and there is no need for a wait queue. | |
268 | */ | |
269 | static void __check_for_conflicting_io(struct dm_snapshot *s, chunk_t chunk) | |
270 | { | |
271 | while (__chunk_is_tracked(s, chunk)) | |
272 | msleep(1); | |
273 | } | |
274 | ||
1da177e4 LT |
275 | /* |
276 | * One of these per registered origin, held in the snapshot_origins hash | |
277 | */ | |
278 | struct origin { | |
279 | /* The origin device */ | |
280 | struct block_device *bdev; | |
281 | ||
282 | struct list_head hash_list; | |
283 | ||
284 | /* List of snapshots for this origin */ | |
285 | struct list_head snapshots; | |
286 | }; | |
287 | ||
288 | /* | |
289 | * Size of the hash table for origin volumes. If we make this | |
290 | * the size of the minors list then it should be nearly perfect | |
291 | */ | |
292 | #define ORIGIN_HASH_SIZE 256 | |
293 | #define ORIGIN_MASK 0xFF | |
294 | static struct list_head *_origins; | |
295 | static struct rw_semaphore _origins_lock; | |
296 | ||
73dfd078 MP |
297 | static DECLARE_WAIT_QUEUE_HEAD(_pending_exceptions_done); |
298 | static DEFINE_SPINLOCK(_pending_exceptions_done_spinlock); | |
299 | static uint64_t _pending_exceptions_done_count; | |
300 | ||
1da177e4 LT |
301 | static int init_origin_hash(void) |
302 | { | |
303 | int i; | |
304 | ||
305 | _origins = kmalloc(ORIGIN_HASH_SIZE * sizeof(struct list_head), | |
306 | GFP_KERNEL); | |
307 | if (!_origins) { | |
72d94861 | 308 | DMERR("unable to allocate memory"); |
1da177e4 LT |
309 | return -ENOMEM; |
310 | } | |
311 | ||
312 | for (i = 0; i < ORIGIN_HASH_SIZE; i++) | |
313 | INIT_LIST_HEAD(_origins + i); | |
314 | init_rwsem(&_origins_lock); | |
315 | ||
316 | return 0; | |
317 | } | |
318 | ||
319 | static void exit_origin_hash(void) | |
320 | { | |
321 | kfree(_origins); | |
322 | } | |
323 | ||
028867ac | 324 | static unsigned origin_hash(struct block_device *bdev) |
1da177e4 LT |
325 | { |
326 | return bdev->bd_dev & ORIGIN_MASK; | |
327 | } | |
328 | ||
329 | static struct origin *__lookup_origin(struct block_device *origin) | |
330 | { | |
331 | struct list_head *ol; | |
332 | struct origin *o; | |
333 | ||
334 | ol = &_origins[origin_hash(origin)]; | |
335 | list_for_each_entry (o, ol, hash_list) | |
336 | if (bdev_equal(o->bdev, origin)) | |
337 | return o; | |
338 | ||
339 | return NULL; | |
340 | } | |
341 | ||
342 | static void __insert_origin(struct origin *o) | |
343 | { | |
344 | struct list_head *sl = &_origins[origin_hash(o->bdev)]; | |
345 | list_add_tail(&o->hash_list, sl); | |
346 | } | |
347 | ||
c1f0c183 MS |
348 | /* |
349 | * _origins_lock must be held when calling this function. | |
350 | * Returns number of snapshots registered using the supplied cow device, plus: | |
351 | * snap_src - a snapshot suitable for use as a source of exception handover | |
352 | * snap_dest - a snapshot capable of receiving exception handover. | |
9d3b15c4 MP |
353 | * snap_merge - an existing snapshot-merge target linked to the same origin. |
354 | * There can be at most one snapshot-merge target. The parameter is optional. | |
c1f0c183 | 355 | * |
9d3b15c4 | 356 | * Possible return values and states of snap_src and snap_dest. |
c1f0c183 MS |
357 | * 0: NULL, NULL - first new snapshot |
358 | * 1: snap_src, NULL - normal snapshot | |
359 | * 2: snap_src, snap_dest - waiting for handover | |
360 | * 2: snap_src, NULL - handed over, waiting for old to be deleted | |
361 | * 1: NULL, snap_dest - source got destroyed without handover | |
362 | */ | |
363 | static int __find_snapshots_sharing_cow(struct dm_snapshot *snap, | |
364 | struct dm_snapshot **snap_src, | |
9d3b15c4 MP |
365 | struct dm_snapshot **snap_dest, |
366 | struct dm_snapshot **snap_merge) | |
c1f0c183 MS |
367 | { |
368 | struct dm_snapshot *s; | |
369 | struct origin *o; | |
370 | int count = 0; | |
371 | int active; | |
372 | ||
373 | o = __lookup_origin(snap->origin->bdev); | |
374 | if (!o) | |
375 | goto out; | |
376 | ||
377 | list_for_each_entry(s, &o->snapshots, list) { | |
9d3b15c4 MP |
378 | if (dm_target_is_snapshot_merge(s->ti) && snap_merge) |
379 | *snap_merge = s; | |
c1f0c183 MS |
380 | if (!bdev_equal(s->cow->bdev, snap->cow->bdev)) |
381 | continue; | |
382 | ||
383 | down_read(&s->lock); | |
384 | active = s->active; | |
385 | up_read(&s->lock); | |
386 | ||
387 | if (active) { | |
388 | if (snap_src) | |
389 | *snap_src = s; | |
390 | } else if (snap_dest) | |
391 | *snap_dest = s; | |
392 | ||
393 | count++; | |
394 | } | |
395 | ||
396 | out: | |
397 | return count; | |
398 | } | |
399 | ||
400 | /* | |
401 | * On success, returns 1 if this snapshot is a handover destination, | |
402 | * otherwise returns 0. | |
403 | */ | |
404 | static int __validate_exception_handover(struct dm_snapshot *snap) | |
405 | { | |
406 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
9d3b15c4 | 407 | struct dm_snapshot *snap_merge = NULL; |
c1f0c183 MS |
408 | |
409 | /* Does snapshot need exceptions handed over to it? */ | |
9d3b15c4 MP |
410 | if ((__find_snapshots_sharing_cow(snap, &snap_src, &snap_dest, |
411 | &snap_merge) == 2) || | |
c1f0c183 MS |
412 | snap_dest) { |
413 | snap->ti->error = "Snapshot cow pairing for exception " | |
414 | "table handover failed"; | |
415 | return -EINVAL; | |
416 | } | |
417 | ||
418 | /* | |
419 | * If no snap_src was found, snap cannot become a handover | |
420 | * destination. | |
421 | */ | |
422 | if (!snap_src) | |
423 | return 0; | |
424 | ||
9d3b15c4 MP |
425 | /* |
426 | * Non-snapshot-merge handover? | |
427 | */ | |
428 | if (!dm_target_is_snapshot_merge(snap->ti)) | |
429 | return 1; | |
430 | ||
431 | /* | |
432 | * Do not allow more than one merging snapshot. | |
433 | */ | |
434 | if (snap_merge) { | |
435 | snap->ti->error = "A snapshot is already merging."; | |
436 | return -EINVAL; | |
437 | } | |
438 | ||
1e03f97e MP |
439 | if (!snap_src->store->type->prepare_merge || |
440 | !snap_src->store->type->commit_merge) { | |
441 | snap->ti->error = "Snapshot exception store does not " | |
442 | "support snapshot-merge."; | |
443 | return -EINVAL; | |
444 | } | |
445 | ||
c1f0c183 MS |
446 | return 1; |
447 | } | |
448 | ||
449 | static void __insert_snapshot(struct origin *o, struct dm_snapshot *s) | |
450 | { | |
451 | struct dm_snapshot *l; | |
452 | ||
453 | /* Sort the list according to chunk size, largest-first smallest-last */ | |
454 | list_for_each_entry(l, &o->snapshots, list) | |
455 | if (l->store->chunk_size < s->store->chunk_size) | |
456 | break; | |
457 | list_add_tail(&s->list, &l->list); | |
458 | } | |
459 | ||
1da177e4 LT |
460 | /* |
461 | * Make a note of the snapshot and its origin so we can look it | |
462 | * up when the origin has a write on it. | |
c1f0c183 MS |
463 | * |
464 | * Also validate snapshot exception store handovers. | |
465 | * On success, returns 1 if this registration is a handover destination, | |
466 | * otherwise returns 0. | |
1da177e4 LT |
467 | */ |
468 | static int register_snapshot(struct dm_snapshot *snap) | |
469 | { | |
c1f0c183 | 470 | struct origin *o, *new_o = NULL; |
1da177e4 | 471 | struct block_device *bdev = snap->origin->bdev; |
c1f0c183 | 472 | int r = 0; |
1da177e4 | 473 | |
60c856c8 MP |
474 | new_o = kmalloc(sizeof(*new_o), GFP_KERNEL); |
475 | if (!new_o) | |
476 | return -ENOMEM; | |
477 | ||
1da177e4 | 478 | down_write(&_origins_lock); |
1da177e4 | 479 | |
c1f0c183 MS |
480 | r = __validate_exception_handover(snap); |
481 | if (r < 0) { | |
482 | kfree(new_o); | |
483 | goto out; | |
484 | } | |
485 | ||
486 | o = __lookup_origin(bdev); | |
60c856c8 MP |
487 | if (o) |
488 | kfree(new_o); | |
489 | else { | |
1da177e4 | 490 | /* New origin */ |
60c856c8 | 491 | o = new_o; |
1da177e4 LT |
492 | |
493 | /* Initialise the struct */ | |
494 | INIT_LIST_HEAD(&o->snapshots); | |
495 | o->bdev = bdev; | |
496 | ||
497 | __insert_origin(o); | |
498 | } | |
499 | ||
c1f0c183 MS |
500 | __insert_snapshot(o, snap); |
501 | ||
502 | out: | |
503 | up_write(&_origins_lock); | |
504 | ||
505 | return r; | |
506 | } | |
507 | ||
508 | /* | |
509 | * Move snapshot to correct place in list according to chunk size. | |
510 | */ | |
511 | static void reregister_snapshot(struct dm_snapshot *s) | |
512 | { | |
513 | struct block_device *bdev = s->origin->bdev; | |
514 | ||
515 | down_write(&_origins_lock); | |
516 | ||
517 | list_del(&s->list); | |
518 | __insert_snapshot(__lookup_origin(bdev), s); | |
1da177e4 LT |
519 | |
520 | up_write(&_origins_lock); | |
1da177e4 LT |
521 | } |
522 | ||
523 | static void unregister_snapshot(struct dm_snapshot *s) | |
524 | { | |
525 | struct origin *o; | |
526 | ||
527 | down_write(&_origins_lock); | |
528 | o = __lookup_origin(s->origin->bdev); | |
529 | ||
530 | list_del(&s->list); | |
c1f0c183 | 531 | if (o && list_empty(&o->snapshots)) { |
1da177e4 LT |
532 | list_del(&o->hash_list); |
533 | kfree(o); | |
534 | } | |
535 | ||
536 | up_write(&_origins_lock); | |
537 | } | |
538 | ||
539 | /* | |
540 | * Implementation of the exception hash tables. | |
d74f81f8 MB |
541 | * The lowest hash_shift bits of the chunk number are ignored, allowing |
542 | * some consecutive chunks to be grouped together. | |
1da177e4 | 543 | */ |
3510cb94 JB |
544 | static int dm_exception_table_init(struct dm_exception_table *et, |
545 | uint32_t size, unsigned hash_shift) | |
1da177e4 LT |
546 | { |
547 | unsigned int i; | |
548 | ||
d74f81f8 | 549 | et->hash_shift = hash_shift; |
1da177e4 LT |
550 | et->hash_mask = size - 1; |
551 | et->table = dm_vcalloc(size, sizeof(struct list_head)); | |
552 | if (!et->table) | |
553 | return -ENOMEM; | |
554 | ||
555 | for (i = 0; i < size; i++) | |
556 | INIT_LIST_HEAD(et->table + i); | |
557 | ||
558 | return 0; | |
559 | } | |
560 | ||
3510cb94 JB |
561 | static void dm_exception_table_exit(struct dm_exception_table *et, |
562 | struct kmem_cache *mem) | |
1da177e4 LT |
563 | { |
564 | struct list_head *slot; | |
1d4989c8 | 565 | struct dm_exception *ex, *next; |
1da177e4 LT |
566 | int i, size; |
567 | ||
568 | size = et->hash_mask + 1; | |
569 | for (i = 0; i < size; i++) { | |
570 | slot = et->table + i; | |
571 | ||
572 | list_for_each_entry_safe (ex, next, slot, hash_list) | |
573 | kmem_cache_free(mem, ex); | |
574 | } | |
575 | ||
576 | vfree(et->table); | |
577 | } | |
578 | ||
191437a5 | 579 | static uint32_t exception_hash(struct dm_exception_table *et, chunk_t chunk) |
1da177e4 | 580 | { |
d74f81f8 | 581 | return (chunk >> et->hash_shift) & et->hash_mask; |
1da177e4 LT |
582 | } |
583 | ||
3510cb94 | 584 | static void dm_remove_exception(struct dm_exception *e) |
1da177e4 LT |
585 | { |
586 | list_del(&e->hash_list); | |
587 | } | |
588 | ||
589 | /* | |
590 | * Return the exception data for a sector, or NULL if not | |
591 | * remapped. | |
592 | */ | |
3510cb94 JB |
593 | static struct dm_exception *dm_lookup_exception(struct dm_exception_table *et, |
594 | chunk_t chunk) | |
1da177e4 LT |
595 | { |
596 | struct list_head *slot; | |
1d4989c8 | 597 | struct dm_exception *e; |
1da177e4 LT |
598 | |
599 | slot = &et->table[exception_hash(et, chunk)]; | |
600 | list_for_each_entry (e, slot, hash_list) | |
d74f81f8 MB |
601 | if (chunk >= e->old_chunk && |
602 | chunk <= e->old_chunk + dm_consecutive_chunk_count(e)) | |
1da177e4 LT |
603 | return e; |
604 | ||
605 | return NULL; | |
606 | } | |
607 | ||
3510cb94 | 608 | static struct dm_exception *alloc_completed_exception(void) |
1da177e4 | 609 | { |
1d4989c8 | 610 | struct dm_exception *e; |
1da177e4 LT |
611 | |
612 | e = kmem_cache_alloc(exception_cache, GFP_NOIO); | |
613 | if (!e) | |
614 | e = kmem_cache_alloc(exception_cache, GFP_ATOMIC); | |
615 | ||
616 | return e; | |
617 | } | |
618 | ||
3510cb94 | 619 | static void free_completed_exception(struct dm_exception *e) |
1da177e4 LT |
620 | { |
621 | kmem_cache_free(exception_cache, e); | |
622 | } | |
623 | ||
92e86812 | 624 | static struct dm_snap_pending_exception *alloc_pending_exception(struct dm_snapshot *s) |
1da177e4 | 625 | { |
92e86812 MP |
626 | struct dm_snap_pending_exception *pe = mempool_alloc(s->pending_pool, |
627 | GFP_NOIO); | |
628 | ||
879129d2 | 629 | atomic_inc(&s->pending_exceptions_count); |
92e86812 MP |
630 | pe->snap = s; |
631 | ||
632 | return pe; | |
1da177e4 LT |
633 | } |
634 | ||
028867ac | 635 | static void free_pending_exception(struct dm_snap_pending_exception *pe) |
1da177e4 | 636 | { |
879129d2 MP |
637 | struct dm_snapshot *s = pe->snap; |
638 | ||
639 | mempool_free(pe, s->pending_pool); | |
640 | smp_mb__before_atomic_dec(); | |
641 | atomic_dec(&s->pending_exceptions_count); | |
1da177e4 LT |
642 | } |
643 | ||
3510cb94 JB |
644 | static void dm_insert_exception(struct dm_exception_table *eh, |
645 | struct dm_exception *new_e) | |
d74f81f8 | 646 | { |
d74f81f8 | 647 | struct list_head *l; |
1d4989c8 | 648 | struct dm_exception *e = NULL; |
d74f81f8 MB |
649 | |
650 | l = &eh->table[exception_hash(eh, new_e->old_chunk)]; | |
651 | ||
652 | /* Add immediately if this table doesn't support consecutive chunks */ | |
653 | if (!eh->hash_shift) | |
654 | goto out; | |
655 | ||
656 | /* List is ordered by old_chunk */ | |
657 | list_for_each_entry_reverse(e, l, hash_list) { | |
658 | /* Insert after an existing chunk? */ | |
659 | if (new_e->old_chunk == (e->old_chunk + | |
660 | dm_consecutive_chunk_count(e) + 1) && | |
661 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) + | |
662 | dm_consecutive_chunk_count(e) + 1)) { | |
663 | dm_consecutive_chunk_count_inc(e); | |
3510cb94 | 664 | free_completed_exception(new_e); |
d74f81f8 MB |
665 | return; |
666 | } | |
667 | ||
668 | /* Insert before an existing chunk? */ | |
669 | if (new_e->old_chunk == (e->old_chunk - 1) && | |
670 | new_e->new_chunk == (dm_chunk_number(e->new_chunk) - 1)) { | |
671 | dm_consecutive_chunk_count_inc(e); | |
672 | e->old_chunk--; | |
673 | e->new_chunk--; | |
3510cb94 | 674 | free_completed_exception(new_e); |
d74f81f8 MB |
675 | return; |
676 | } | |
677 | ||
678 | if (new_e->old_chunk > e->old_chunk) | |
679 | break; | |
680 | } | |
681 | ||
682 | out: | |
683 | list_add(&new_e->hash_list, e ? &e->hash_list : l); | |
684 | } | |
685 | ||
a159c1ac JB |
686 | /* |
687 | * Callback used by the exception stores to load exceptions when | |
688 | * initialising. | |
689 | */ | |
690 | static int dm_add_exception(void *context, chunk_t old, chunk_t new) | |
1da177e4 | 691 | { |
a159c1ac | 692 | struct dm_snapshot *s = context; |
1d4989c8 | 693 | struct dm_exception *e; |
1da177e4 | 694 | |
3510cb94 | 695 | e = alloc_completed_exception(); |
1da177e4 LT |
696 | if (!e) |
697 | return -ENOMEM; | |
698 | ||
699 | e->old_chunk = old; | |
d74f81f8 MB |
700 | |
701 | /* Consecutive_count is implicitly initialised to zero */ | |
1da177e4 | 702 | e->new_chunk = new; |
d74f81f8 | 703 | |
3510cb94 | 704 | dm_insert_exception(&s->complete, e); |
d74f81f8 | 705 | |
1da177e4 LT |
706 | return 0; |
707 | } | |
708 | ||
7e201b35 MP |
709 | #define min_not_zero(l, r) (((l) == 0) ? (r) : (((r) == 0) ? (l) : min(l, r))) |
710 | ||
711 | /* | |
712 | * Return a minimum chunk size of all snapshots that have the specified origin. | |
713 | * Return zero if the origin has no snapshots. | |
714 | */ | |
715 | static sector_t __minimum_chunk_size(struct origin *o) | |
716 | { | |
717 | struct dm_snapshot *snap; | |
718 | unsigned chunk_size = 0; | |
719 | ||
720 | if (o) | |
721 | list_for_each_entry(snap, &o->snapshots, list) | |
722 | chunk_size = min_not_zero(chunk_size, | |
723 | snap->store->chunk_size); | |
724 | ||
725 | return chunk_size; | |
726 | } | |
727 | ||
1da177e4 LT |
728 | /* |
729 | * Hard coded magic. | |
730 | */ | |
731 | static int calc_max_buckets(void) | |
732 | { | |
733 | /* use a fixed size of 2MB */ | |
734 | unsigned long mem = 2 * 1024 * 1024; | |
735 | mem /= sizeof(struct list_head); | |
736 | ||
737 | return mem; | |
738 | } | |
739 | ||
1da177e4 LT |
740 | /* |
741 | * Allocate room for a suitable hash table. | |
742 | */ | |
fee1998e | 743 | static int init_hash_tables(struct dm_snapshot *s) |
1da177e4 LT |
744 | { |
745 | sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; | |
746 | ||
747 | /* | |
748 | * Calculate based on the size of the original volume or | |
749 | * the COW volume... | |
750 | */ | |
fc56f6fb | 751 | cow_dev_size = get_dev_size(s->cow->bdev); |
1da177e4 LT |
752 | origin_dev_size = get_dev_size(s->origin->bdev); |
753 | max_buckets = calc_max_buckets(); | |
754 | ||
fee1998e | 755 | hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; |
1da177e4 LT |
756 | hash_size = min(hash_size, max_buckets); |
757 | ||
8e87b9b8 MP |
758 | if (hash_size < 64) |
759 | hash_size = 64; | |
8defd830 | 760 | hash_size = rounddown_pow_of_two(hash_size); |
3510cb94 JB |
761 | if (dm_exception_table_init(&s->complete, hash_size, |
762 | DM_CHUNK_CONSECUTIVE_BITS)) | |
1da177e4 LT |
763 | return -ENOMEM; |
764 | ||
765 | /* | |
766 | * Allocate hash table for in-flight exceptions | |
767 | * Make this smaller than the real hash table | |
768 | */ | |
769 | hash_size >>= 3; | |
770 | if (hash_size < 64) | |
771 | hash_size = 64; | |
772 | ||
3510cb94 JB |
773 | if (dm_exception_table_init(&s->pending, hash_size, 0)) { |
774 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 LT |
775 | return -ENOMEM; |
776 | } | |
777 | ||
778 | return 0; | |
779 | } | |
780 | ||
1e03f97e MP |
781 | static void merge_shutdown(struct dm_snapshot *s) |
782 | { | |
783 | clear_bit_unlock(RUNNING_MERGE, &s->state_bits); | |
784 | smp_mb__after_clear_bit(); | |
785 | wake_up_bit(&s->state_bits, RUNNING_MERGE); | |
786 | } | |
787 | ||
9fe86254 MP |
788 | static struct bio *__release_queued_bios_after_merge(struct dm_snapshot *s) |
789 | { | |
790 | s->first_merging_chunk = 0; | |
791 | s->num_merging_chunks = 0; | |
792 | ||
793 | return bio_list_get(&s->bios_queued_during_merge); | |
794 | } | |
795 | ||
1e03f97e MP |
796 | /* |
797 | * Remove one chunk from the index of completed exceptions. | |
798 | */ | |
799 | static int __remove_single_exception_chunk(struct dm_snapshot *s, | |
800 | chunk_t old_chunk) | |
801 | { | |
802 | struct dm_exception *e; | |
803 | ||
1e03f97e MP |
804 | e = dm_lookup_exception(&s->complete, old_chunk); |
805 | if (!e) { | |
806 | DMERR("Corruption detected: exception for block %llu is " | |
807 | "on disk but not in memory", | |
808 | (unsigned long long)old_chunk); | |
809 | return -EINVAL; | |
810 | } | |
811 | ||
812 | /* | |
813 | * If this is the only chunk using this exception, remove exception. | |
814 | */ | |
815 | if (!dm_consecutive_chunk_count(e)) { | |
816 | dm_remove_exception(e); | |
817 | free_completed_exception(e); | |
818 | return 0; | |
819 | } | |
820 | ||
821 | /* | |
822 | * The chunk may be either at the beginning or the end of a | |
823 | * group of consecutive chunks - never in the middle. We are | |
824 | * removing chunks in the opposite order to that in which they | |
825 | * were added, so this should always be true. | |
826 | * Decrement the consecutive chunk counter and adjust the | |
827 | * starting point if necessary. | |
828 | */ | |
829 | if (old_chunk == e->old_chunk) { | |
830 | e->old_chunk++; | |
831 | e->new_chunk++; | |
832 | } else if (old_chunk != e->old_chunk + | |
833 | dm_consecutive_chunk_count(e)) { | |
834 | DMERR("Attempt to merge block %llu from the " | |
835 | "middle of a chunk range [%llu - %llu]", | |
836 | (unsigned long long)old_chunk, | |
837 | (unsigned long long)e->old_chunk, | |
838 | (unsigned long long) | |
839 | e->old_chunk + dm_consecutive_chunk_count(e)); | |
840 | return -EINVAL; | |
841 | } | |
842 | ||
843 | dm_consecutive_chunk_count_dec(e); | |
844 | ||
845 | return 0; | |
846 | } | |
847 | ||
9fe86254 MP |
848 | static void flush_bios(struct bio *bio); |
849 | ||
850 | static int remove_single_exception_chunk(struct dm_snapshot *s) | |
1e03f97e | 851 | { |
9fe86254 MP |
852 | struct bio *b = NULL; |
853 | int r; | |
854 | chunk_t old_chunk = s->first_merging_chunk + s->num_merging_chunks - 1; | |
1e03f97e MP |
855 | |
856 | down_write(&s->lock); | |
9fe86254 MP |
857 | |
858 | /* | |
859 | * Process chunks (and associated exceptions) in reverse order | |
860 | * so that dm_consecutive_chunk_count_dec() accounting works. | |
861 | */ | |
862 | do { | |
863 | r = __remove_single_exception_chunk(s, old_chunk); | |
864 | if (r) | |
865 | goto out; | |
866 | } while (old_chunk-- > s->first_merging_chunk); | |
867 | ||
868 | b = __release_queued_bios_after_merge(s); | |
869 | ||
870 | out: | |
1e03f97e | 871 | up_write(&s->lock); |
9fe86254 MP |
872 | if (b) |
873 | flush_bios(b); | |
1e03f97e MP |
874 | |
875 | return r; | |
876 | } | |
877 | ||
73dfd078 MP |
878 | static int origin_write_extent(struct dm_snapshot *merging_snap, |
879 | sector_t sector, unsigned chunk_size); | |
880 | ||
1e03f97e MP |
881 | static void merge_callback(int read_err, unsigned long write_err, |
882 | void *context); | |
883 | ||
73dfd078 MP |
884 | static uint64_t read_pending_exceptions_done_count(void) |
885 | { | |
886 | uint64_t pending_exceptions_done; | |
887 | ||
888 | spin_lock(&_pending_exceptions_done_spinlock); | |
889 | pending_exceptions_done = _pending_exceptions_done_count; | |
890 | spin_unlock(&_pending_exceptions_done_spinlock); | |
891 | ||
892 | return pending_exceptions_done; | |
893 | } | |
894 | ||
895 | static void increment_pending_exceptions_done_count(void) | |
896 | { | |
897 | spin_lock(&_pending_exceptions_done_spinlock); | |
898 | _pending_exceptions_done_count++; | |
899 | spin_unlock(&_pending_exceptions_done_spinlock); | |
900 | ||
901 | wake_up_all(&_pending_exceptions_done); | |
902 | } | |
903 | ||
1e03f97e MP |
904 | static void snapshot_merge_next_chunks(struct dm_snapshot *s) |
905 | { | |
8a2d5286 | 906 | int i, linear_chunks; |
1e03f97e MP |
907 | chunk_t old_chunk, new_chunk; |
908 | struct dm_io_region src, dest; | |
8a2d5286 | 909 | sector_t io_size; |
73dfd078 | 910 | uint64_t previous_count; |
1e03f97e MP |
911 | |
912 | BUG_ON(!test_bit(RUNNING_MERGE, &s->state_bits)); | |
913 | if (unlikely(test_bit(SHUTDOWN_MERGE, &s->state_bits))) | |
914 | goto shut; | |
915 | ||
916 | /* | |
917 | * valid flag never changes during merge, so no lock required. | |
918 | */ | |
919 | if (!s->valid) { | |
920 | DMERR("Snapshot is invalid: can't merge"); | |
921 | goto shut; | |
922 | } | |
923 | ||
8a2d5286 MS |
924 | linear_chunks = s->store->type->prepare_merge(s->store, &old_chunk, |
925 | &new_chunk); | |
926 | if (linear_chunks <= 0) { | |
d8ddb1cf | 927 | if (linear_chunks < 0) { |
1e03f97e MP |
928 | DMERR("Read error in exception store: " |
929 | "shutting down merge"); | |
d8ddb1cf MS |
930 | down_write(&s->lock); |
931 | s->merge_failed = 1; | |
932 | up_write(&s->lock); | |
933 | } | |
1e03f97e MP |
934 | goto shut; |
935 | } | |
936 | ||
8a2d5286 MS |
937 | /* Adjust old_chunk and new_chunk to reflect start of linear region */ |
938 | old_chunk = old_chunk + 1 - linear_chunks; | |
939 | new_chunk = new_chunk + 1 - linear_chunks; | |
940 | ||
941 | /* | |
942 | * Use one (potentially large) I/O to copy all 'linear_chunks' | |
943 | * from the exception store to the origin | |
944 | */ | |
945 | io_size = linear_chunks * s->store->chunk_size; | |
1e03f97e | 946 | |
1e03f97e MP |
947 | dest.bdev = s->origin->bdev; |
948 | dest.sector = chunk_to_sector(s->store, old_chunk); | |
8a2d5286 | 949 | dest.count = min(io_size, get_dev_size(dest.bdev) - dest.sector); |
1e03f97e MP |
950 | |
951 | src.bdev = s->cow->bdev; | |
952 | src.sector = chunk_to_sector(s->store, new_chunk); | |
953 | src.count = dest.count; | |
954 | ||
73dfd078 MP |
955 | /* |
956 | * Reallocate any exceptions needed in other snapshots then | |
957 | * wait for the pending exceptions to complete. | |
958 | * Each time any pending exception (globally on the system) | |
959 | * completes we are woken and repeat the process to find out | |
960 | * if we can proceed. While this may not seem a particularly | |
961 | * efficient algorithm, it is not expected to have any | |
962 | * significant impact on performance. | |
963 | */ | |
964 | previous_count = read_pending_exceptions_done_count(); | |
8a2d5286 | 965 | while (origin_write_extent(s, dest.sector, io_size)) { |
73dfd078 MP |
966 | wait_event(_pending_exceptions_done, |
967 | (read_pending_exceptions_done_count() != | |
968 | previous_count)); | |
969 | /* Retry after the wait, until all exceptions are done. */ | |
970 | previous_count = read_pending_exceptions_done_count(); | |
971 | } | |
972 | ||
9fe86254 MP |
973 | down_write(&s->lock); |
974 | s->first_merging_chunk = old_chunk; | |
8a2d5286 | 975 | s->num_merging_chunks = linear_chunks; |
9fe86254 MP |
976 | up_write(&s->lock); |
977 | ||
8a2d5286 MS |
978 | /* Wait until writes to all 'linear_chunks' drain */ |
979 | for (i = 0; i < linear_chunks; i++) | |
980 | __check_for_conflicting_io(s, old_chunk + i); | |
9fe86254 | 981 | |
1e03f97e MP |
982 | dm_kcopyd_copy(s->kcopyd_client, &src, 1, &dest, 0, merge_callback, s); |
983 | return; | |
984 | ||
985 | shut: | |
986 | merge_shutdown(s); | |
987 | } | |
988 | ||
9fe86254 MP |
989 | static void error_bios(struct bio *bio); |
990 | ||
1e03f97e MP |
991 | static void merge_callback(int read_err, unsigned long write_err, void *context) |
992 | { | |
993 | struct dm_snapshot *s = context; | |
9fe86254 | 994 | struct bio *b = NULL; |
1e03f97e MP |
995 | |
996 | if (read_err || write_err) { | |
997 | if (read_err) | |
998 | DMERR("Read error: shutting down merge."); | |
999 | else | |
1000 | DMERR("Write error: shutting down merge."); | |
1001 | goto shut; | |
1002 | } | |
1003 | ||
9fe86254 MP |
1004 | if (s->store->type->commit_merge(s->store, |
1005 | s->num_merging_chunks) < 0) { | |
1e03f97e MP |
1006 | DMERR("Write error in exception store: shutting down merge"); |
1007 | goto shut; | |
1008 | } | |
1009 | ||
9fe86254 MP |
1010 | if (remove_single_exception_chunk(s) < 0) |
1011 | goto shut; | |
1012 | ||
1e03f97e MP |
1013 | snapshot_merge_next_chunks(s); |
1014 | ||
1015 | return; | |
1016 | ||
1017 | shut: | |
9fe86254 | 1018 | down_write(&s->lock); |
d8ddb1cf | 1019 | s->merge_failed = 1; |
9fe86254 MP |
1020 | b = __release_queued_bios_after_merge(s); |
1021 | up_write(&s->lock); | |
1022 | error_bios(b); | |
1023 | ||
1e03f97e MP |
1024 | merge_shutdown(s); |
1025 | } | |
1026 | ||
1027 | static void start_merge(struct dm_snapshot *s) | |
1028 | { | |
1029 | if (!test_and_set_bit(RUNNING_MERGE, &s->state_bits)) | |
1030 | snapshot_merge_next_chunks(s); | |
1031 | } | |
1032 | ||
1033 | static int wait_schedule(void *ptr) | |
1034 | { | |
1035 | schedule(); | |
1036 | ||
1037 | return 0; | |
1038 | } | |
1039 | ||
1040 | /* | |
1041 | * Stop the merging process and wait until it finishes. | |
1042 | */ | |
1043 | static void stop_merge(struct dm_snapshot *s) | |
1044 | { | |
1045 | set_bit(SHUTDOWN_MERGE, &s->state_bits); | |
1046 | wait_on_bit(&s->state_bits, RUNNING_MERGE, wait_schedule, | |
1047 | TASK_UNINTERRUPTIBLE); | |
1048 | clear_bit(SHUTDOWN_MERGE, &s->state_bits); | |
1049 | } | |
1050 | ||
1da177e4 LT |
1051 | /* |
1052 | * Construct a snapshot mapping: <origin_dev> <COW-dev> <p/n> <chunk-size> | |
1053 | */ | |
1054 | static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
1055 | { | |
1056 | struct dm_snapshot *s; | |
cd45daff | 1057 | int i; |
1da177e4 | 1058 | int r = -EINVAL; |
fc56f6fb | 1059 | char *origin_path, *cow_path; |
10b8106a MS |
1060 | unsigned args_used, num_flush_requests = 1; |
1061 | fmode_t origin_mode = FMODE_READ; | |
1da177e4 | 1062 | |
4c7e3bf4 | 1063 | if (argc != 4) { |
72d94861 | 1064 | ti->error = "requires exactly 4 arguments"; |
1da177e4 | 1065 | r = -EINVAL; |
fc56f6fb | 1066 | goto bad; |
1da177e4 LT |
1067 | } |
1068 | ||
10b8106a MS |
1069 | if (dm_target_is_snapshot_merge(ti)) { |
1070 | num_flush_requests = 2; | |
1071 | origin_mode = FMODE_WRITE; | |
1072 | } | |
1073 | ||
fc56f6fb MS |
1074 | s = kmalloc(sizeof(*s), GFP_KERNEL); |
1075 | if (!s) { | |
1076 | ti->error = "Cannot allocate snapshot context private " | |
1077 | "structure"; | |
1078 | r = -ENOMEM; | |
1079 | goto bad; | |
1080 | } | |
1081 | ||
c2411045 MP |
1082 | origin_path = argv[0]; |
1083 | argv++; | |
1084 | argc--; | |
1085 | ||
1086 | r = dm_get_device(ti, origin_path, origin_mode, &s->origin); | |
1087 | if (r) { | |
1088 | ti->error = "Cannot get origin device"; | |
1089 | goto bad_origin; | |
1090 | } | |
1091 | ||
fc56f6fb MS |
1092 | cow_path = argv[0]; |
1093 | argv++; | |
1094 | argc--; | |
1095 | ||
8215d6ec | 1096 | r = dm_get_device(ti, cow_path, FMODE_READ | FMODE_WRITE, &s->cow); |
fc56f6fb MS |
1097 | if (r) { |
1098 | ti->error = "Cannot get COW device"; | |
1099 | goto bad_cow; | |
1100 | } | |
1101 | ||
1102 | r = dm_exception_store_create(ti, argc, argv, s, &args_used, &s->store); | |
fee1998e JB |
1103 | if (r) { |
1104 | ti->error = "Couldn't create exception store"; | |
1da177e4 | 1105 | r = -EINVAL; |
fc56f6fb | 1106 | goto bad_store; |
1da177e4 LT |
1107 | } |
1108 | ||
fee1998e JB |
1109 | argv += args_used; |
1110 | argc -= args_used; | |
1111 | ||
fc56f6fb | 1112 | s->ti = ti; |
1da177e4 | 1113 | s->valid = 1; |
aa14edeb | 1114 | s->active = 0; |
c26655ca | 1115 | s->suspended = 0; |
879129d2 | 1116 | atomic_set(&s->pending_exceptions_count, 0); |
1da177e4 | 1117 | init_rwsem(&s->lock); |
c1f0c183 | 1118 | INIT_LIST_HEAD(&s->list); |
ca3a931f | 1119 | spin_lock_init(&s->pe_lock); |
1e03f97e | 1120 | s->state_bits = 0; |
d8ddb1cf | 1121 | s->merge_failed = 0; |
9fe86254 MP |
1122 | s->first_merging_chunk = 0; |
1123 | s->num_merging_chunks = 0; | |
1124 | bio_list_init(&s->bios_queued_during_merge); | |
1da177e4 LT |
1125 | |
1126 | /* Allocate hash table for COW data */ | |
fee1998e | 1127 | if (init_hash_tables(s)) { |
1da177e4 LT |
1128 | ti->error = "Unable to allocate hash table space"; |
1129 | r = -ENOMEM; | |
fee1998e | 1130 | goto bad_hash_tables; |
1da177e4 LT |
1131 | } |
1132 | ||
eb69aca5 | 1133 | r = dm_kcopyd_client_create(SNAPSHOT_PAGES, &s->kcopyd_client); |
1da177e4 LT |
1134 | if (r) { |
1135 | ti->error = "Could not create kcopyd client"; | |
fee1998e | 1136 | goto bad_kcopyd; |
1da177e4 LT |
1137 | } |
1138 | ||
92e86812 MP |
1139 | s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); |
1140 | if (!s->pending_pool) { | |
1141 | ti->error = "Could not allocate mempool for pending exceptions"; | |
fee1998e | 1142 | goto bad_pending_pool; |
92e86812 MP |
1143 | } |
1144 | ||
cd45daff MP |
1145 | s->tracked_chunk_pool = mempool_create_slab_pool(MIN_IOS, |
1146 | tracked_chunk_cache); | |
1147 | if (!s->tracked_chunk_pool) { | |
1148 | ti->error = "Could not allocate tracked_chunk mempool for " | |
1149 | "tracking reads"; | |
92e86812 | 1150 | goto bad_tracked_chunk_pool; |
cd45daff MP |
1151 | } |
1152 | ||
1153 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
1154 | INIT_HLIST_HEAD(&s->tracked_chunk_hash[i]); | |
1155 | ||
1156 | spin_lock_init(&s->tracked_chunk_lock); | |
1157 | ||
c1f0c183 MS |
1158 | bio_list_init(&s->queued_bios); |
1159 | INIT_WORK(&s->queued_bios_work, flush_queued_bios); | |
1160 | ||
1161 | ti->private = s; | |
10b8106a | 1162 | ti->num_flush_requests = num_flush_requests; |
c1f0c183 MS |
1163 | |
1164 | /* Add snapshot to the list of snapshots for this origin */ | |
1165 | /* Exceptions aren't triggered till snapshot_resume() is called */ | |
1166 | r = register_snapshot(s); | |
1167 | if (r == -ENOMEM) { | |
1168 | ti->error = "Snapshot origin struct allocation failed"; | |
1169 | goto bad_load_and_register; | |
1170 | } else if (r < 0) { | |
1171 | /* invalid handover, register_snapshot has set ti->error */ | |
1172 | goto bad_load_and_register; | |
1173 | } | |
1174 | ||
1175 | /* | |
1176 | * Metadata must only be loaded into one table at once, so skip this | |
1177 | * if metadata will be handed over during resume. | |
1178 | * Chunk size will be set during the handover - set it to zero to | |
1179 | * ensure it's ignored. | |
1180 | */ | |
1181 | if (r > 0) { | |
1182 | s->store->chunk_size = 0; | |
1183 | return 0; | |
1184 | } | |
1185 | ||
493df71c JB |
1186 | r = s->store->type->read_metadata(s->store, dm_add_exception, |
1187 | (void *)s); | |
0764147b | 1188 | if (r < 0) { |
f9cea4f7 | 1189 | ti->error = "Failed to read snapshot metadata"; |
c1f0c183 | 1190 | goto bad_read_metadata; |
0764147b MB |
1191 | } else if (r > 0) { |
1192 | s->valid = 0; | |
1193 | DMWARN("Snapshot is marked invalid."); | |
f9cea4f7 | 1194 | } |
aa14edeb | 1195 | |
3f2412dc MP |
1196 | if (!s->store->chunk_size) { |
1197 | ti->error = "Chunk size not set"; | |
c1f0c183 | 1198 | goto bad_read_metadata; |
1da177e4 | 1199 | } |
d0216849 | 1200 | ti->split_io = s->store->chunk_size; |
1da177e4 LT |
1201 | |
1202 | return 0; | |
1203 | ||
c1f0c183 MS |
1204 | bad_read_metadata: |
1205 | unregister_snapshot(s); | |
1206 | ||
fee1998e | 1207 | bad_load_and_register: |
cd45daff MP |
1208 | mempool_destroy(s->tracked_chunk_pool); |
1209 | ||
fee1998e | 1210 | bad_tracked_chunk_pool: |
92e86812 MP |
1211 | mempool_destroy(s->pending_pool); |
1212 | ||
fee1998e | 1213 | bad_pending_pool: |
eb69aca5 | 1214 | dm_kcopyd_client_destroy(s->kcopyd_client); |
1da177e4 | 1215 | |
fee1998e | 1216 | bad_kcopyd: |
3510cb94 JB |
1217 | dm_exception_table_exit(&s->pending, pending_cache); |
1218 | dm_exception_table_exit(&s->complete, exception_cache); | |
1da177e4 | 1219 | |
fee1998e | 1220 | bad_hash_tables: |
fc56f6fb | 1221 | dm_exception_store_destroy(s->store); |
1da177e4 | 1222 | |
fc56f6fb MS |
1223 | bad_store: |
1224 | dm_put_device(ti, s->cow); | |
fee1998e | 1225 | |
fc56f6fb | 1226 | bad_cow: |
c2411045 MP |
1227 | dm_put_device(ti, s->origin); |
1228 | ||
1229 | bad_origin: | |
fc56f6fb MS |
1230 | kfree(s); |
1231 | ||
1232 | bad: | |
1da177e4 LT |
1233 | return r; |
1234 | } | |
1235 | ||
31c93a0c MB |
1236 | static void __free_exceptions(struct dm_snapshot *s) |
1237 | { | |
eb69aca5 | 1238 | dm_kcopyd_client_destroy(s->kcopyd_client); |
31c93a0c MB |
1239 | s->kcopyd_client = NULL; |
1240 | ||
3510cb94 JB |
1241 | dm_exception_table_exit(&s->pending, pending_cache); |
1242 | dm_exception_table_exit(&s->complete, exception_cache); | |
31c93a0c MB |
1243 | } |
1244 | ||
c1f0c183 MS |
1245 | static void __handover_exceptions(struct dm_snapshot *snap_src, |
1246 | struct dm_snapshot *snap_dest) | |
1247 | { | |
1248 | union { | |
1249 | struct dm_exception_table table_swap; | |
1250 | struct dm_exception_store *store_swap; | |
1251 | } u; | |
1252 | ||
1253 | /* | |
1254 | * Swap all snapshot context information between the two instances. | |
1255 | */ | |
1256 | u.table_swap = snap_dest->complete; | |
1257 | snap_dest->complete = snap_src->complete; | |
1258 | snap_src->complete = u.table_swap; | |
1259 | ||
1260 | u.store_swap = snap_dest->store; | |
1261 | snap_dest->store = snap_src->store; | |
1262 | snap_src->store = u.store_swap; | |
1263 | ||
1264 | snap_dest->store->snap = snap_dest; | |
1265 | snap_src->store->snap = snap_src; | |
1266 | ||
1267 | snap_dest->ti->split_io = snap_dest->store->chunk_size; | |
1268 | snap_dest->valid = snap_src->valid; | |
1269 | ||
1270 | /* | |
1271 | * Set source invalid to ensure it receives no further I/O. | |
1272 | */ | |
1273 | snap_src->valid = 0; | |
1274 | } | |
1275 | ||
1da177e4 LT |
1276 | static void snapshot_dtr(struct dm_target *ti) |
1277 | { | |
cd45daff MP |
1278 | #ifdef CONFIG_DM_DEBUG |
1279 | int i; | |
1280 | #endif | |
028867ac | 1281 | struct dm_snapshot *s = ti->private; |
c1f0c183 | 1282 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1da177e4 | 1283 | |
ca3a931f AK |
1284 | flush_workqueue(ksnapd); |
1285 | ||
c1f0c183 MS |
1286 | down_read(&_origins_lock); |
1287 | /* Check whether exception handover must be cancelled */ | |
9d3b15c4 | 1288 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1289 | if (snap_src && snap_dest && (s == snap_src)) { |
1290 | down_write(&snap_dest->lock); | |
1291 | snap_dest->valid = 0; | |
1292 | up_write(&snap_dest->lock); | |
1293 | DMERR("Cancelling snapshot handover."); | |
1294 | } | |
1295 | up_read(&_origins_lock); | |
1296 | ||
1e03f97e MP |
1297 | if (dm_target_is_snapshot_merge(ti)) |
1298 | stop_merge(s); | |
1299 | ||
138728dc AK |
1300 | /* Prevent further origin writes from using this snapshot. */ |
1301 | /* After this returns there can be no new kcopyd jobs. */ | |
1da177e4 LT |
1302 | unregister_snapshot(s); |
1303 | ||
879129d2 | 1304 | while (atomic_read(&s->pending_exceptions_count)) |
90fa1527 | 1305 | msleep(1); |
879129d2 MP |
1306 | /* |
1307 | * Ensure instructions in mempool_destroy aren't reordered | |
1308 | * before atomic_read. | |
1309 | */ | |
1310 | smp_mb(); | |
1311 | ||
cd45daff MP |
1312 | #ifdef CONFIG_DM_DEBUG |
1313 | for (i = 0; i < DM_TRACKED_CHUNK_HASH_SIZE; i++) | |
1314 | BUG_ON(!hlist_empty(&s->tracked_chunk_hash[i])); | |
1315 | #endif | |
1316 | ||
1317 | mempool_destroy(s->tracked_chunk_pool); | |
1318 | ||
31c93a0c | 1319 | __free_exceptions(s); |
1da177e4 | 1320 | |
92e86812 MP |
1321 | mempool_destroy(s->pending_pool); |
1322 | ||
fee1998e | 1323 | dm_exception_store_destroy(s->store); |
138728dc | 1324 | |
fc56f6fb MS |
1325 | dm_put_device(ti, s->cow); |
1326 | ||
c2411045 MP |
1327 | dm_put_device(ti, s->origin); |
1328 | ||
1da177e4 LT |
1329 | kfree(s); |
1330 | } | |
1331 | ||
1332 | /* | |
1333 | * Flush a list of buffers. | |
1334 | */ | |
1335 | static void flush_bios(struct bio *bio) | |
1336 | { | |
1337 | struct bio *n; | |
1338 | ||
1339 | while (bio) { | |
1340 | n = bio->bi_next; | |
1341 | bio->bi_next = NULL; | |
1342 | generic_make_request(bio); | |
1343 | bio = n; | |
1344 | } | |
1345 | } | |
1346 | ||
c4028958 | 1347 | static void flush_queued_bios(struct work_struct *work) |
ca3a931f | 1348 | { |
c4028958 DH |
1349 | struct dm_snapshot *s = |
1350 | container_of(work, struct dm_snapshot, queued_bios_work); | |
ca3a931f AK |
1351 | struct bio *queued_bios; |
1352 | unsigned long flags; | |
1353 | ||
1354 | spin_lock_irqsave(&s->pe_lock, flags); | |
1355 | queued_bios = bio_list_get(&s->queued_bios); | |
1356 | spin_unlock_irqrestore(&s->pe_lock, flags); | |
1357 | ||
1358 | flush_bios(queued_bios); | |
1359 | } | |
1360 | ||
515ad66c MP |
1361 | static int do_origin(struct dm_dev *origin, struct bio *bio); |
1362 | ||
1363 | /* | |
1364 | * Flush a list of buffers. | |
1365 | */ | |
1366 | static void retry_origin_bios(struct dm_snapshot *s, struct bio *bio) | |
1367 | { | |
1368 | struct bio *n; | |
1369 | int r; | |
1370 | ||
1371 | while (bio) { | |
1372 | n = bio->bi_next; | |
1373 | bio->bi_next = NULL; | |
1374 | r = do_origin(s->origin, bio); | |
1375 | if (r == DM_MAPIO_REMAPPED) | |
1376 | generic_make_request(bio); | |
1377 | bio = n; | |
1378 | } | |
1379 | } | |
1380 | ||
1da177e4 LT |
1381 | /* |
1382 | * Error a list of buffers. | |
1383 | */ | |
1384 | static void error_bios(struct bio *bio) | |
1385 | { | |
1386 | struct bio *n; | |
1387 | ||
1388 | while (bio) { | |
1389 | n = bio->bi_next; | |
1390 | bio->bi_next = NULL; | |
6712ecf8 | 1391 | bio_io_error(bio); |
1da177e4 LT |
1392 | bio = n; |
1393 | } | |
1394 | } | |
1395 | ||
695368ac | 1396 | static void __invalidate_snapshot(struct dm_snapshot *s, int err) |
76df1c65 AK |
1397 | { |
1398 | if (!s->valid) | |
1399 | return; | |
1400 | ||
1401 | if (err == -EIO) | |
1402 | DMERR("Invalidating snapshot: Error reading/writing."); | |
1403 | else if (err == -ENOMEM) | |
1404 | DMERR("Invalidating snapshot: Unable to allocate exception."); | |
1405 | ||
493df71c JB |
1406 | if (s->store->type->drop_snapshot) |
1407 | s->store->type->drop_snapshot(s->store); | |
76df1c65 AK |
1408 | |
1409 | s->valid = 0; | |
1410 | ||
fc56f6fb | 1411 | dm_table_event(s->ti->table); |
76df1c65 AK |
1412 | } |
1413 | ||
028867ac | 1414 | static void pending_complete(struct dm_snap_pending_exception *pe, int success) |
1da177e4 | 1415 | { |
1d4989c8 | 1416 | struct dm_exception *e; |
1da177e4 | 1417 | struct dm_snapshot *s = pe->snap; |
9d493fa8 AK |
1418 | struct bio *origin_bios = NULL; |
1419 | struct bio *snapshot_bios = NULL; | |
1420 | int error = 0; | |
1da177e4 | 1421 | |
76df1c65 AK |
1422 | if (!success) { |
1423 | /* Read/write error - snapshot is unusable */ | |
1da177e4 | 1424 | down_write(&s->lock); |
695368ac | 1425 | __invalidate_snapshot(s, -EIO); |
9d493fa8 | 1426 | error = 1; |
76df1c65 AK |
1427 | goto out; |
1428 | } | |
1429 | ||
3510cb94 | 1430 | e = alloc_completed_exception(); |
76df1c65 | 1431 | if (!e) { |
1da177e4 | 1432 | down_write(&s->lock); |
695368ac | 1433 | __invalidate_snapshot(s, -ENOMEM); |
9d493fa8 | 1434 | error = 1; |
76df1c65 AK |
1435 | goto out; |
1436 | } | |
1437 | *e = pe->e; | |
1da177e4 | 1438 | |
76df1c65 AK |
1439 | down_write(&s->lock); |
1440 | if (!s->valid) { | |
3510cb94 | 1441 | free_completed_exception(e); |
9d493fa8 | 1442 | error = 1; |
76df1c65 | 1443 | goto out; |
1da177e4 LT |
1444 | } |
1445 | ||
615d1eb9 MS |
1446 | /* Check for conflicting reads */ |
1447 | __check_for_conflicting_io(s, pe->e.old_chunk); | |
a8d41b59 | 1448 | |
9d493fa8 AK |
1449 | /* |
1450 | * Add a proper exception, and remove the | |
1451 | * in-flight exception from the list. | |
1452 | */ | |
3510cb94 | 1453 | dm_insert_exception(&s->complete, e); |
76df1c65 | 1454 | |
1da177e4 | 1455 | out: |
3510cb94 | 1456 | dm_remove_exception(&pe->e); |
9d493fa8 | 1457 | snapshot_bios = bio_list_get(&pe->snapshot_bios); |
515ad66c MP |
1458 | origin_bios = bio_list_get(&pe->origin_bios); |
1459 | free_pending_exception(pe); | |
1da177e4 | 1460 | |
73dfd078 MP |
1461 | increment_pending_exceptions_done_count(); |
1462 | ||
9d493fa8 AK |
1463 | up_write(&s->lock); |
1464 | ||
1465 | /* Submit any pending write bios */ | |
1466 | if (error) | |
1467 | error_bios(snapshot_bios); | |
1468 | else | |
1469 | flush_bios(snapshot_bios); | |
1470 | ||
515ad66c | 1471 | retry_origin_bios(s, origin_bios); |
1da177e4 LT |
1472 | } |
1473 | ||
1474 | static void commit_callback(void *context, int success) | |
1475 | { | |
028867ac AK |
1476 | struct dm_snap_pending_exception *pe = context; |
1477 | ||
1da177e4 LT |
1478 | pending_complete(pe, success); |
1479 | } | |
1480 | ||
1481 | /* | |
1482 | * Called when the copy I/O has finished. kcopyd actually runs | |
1483 | * this code so don't block. | |
1484 | */ | |
4cdc1d1f | 1485 | static void copy_callback(int read_err, unsigned long write_err, void *context) |
1da177e4 | 1486 | { |
028867ac | 1487 | struct dm_snap_pending_exception *pe = context; |
1da177e4 LT |
1488 | struct dm_snapshot *s = pe->snap; |
1489 | ||
1490 | if (read_err || write_err) | |
1491 | pending_complete(pe, 0); | |
1492 | ||
1493 | else | |
1494 | /* Update the metadata if we are persistent */ | |
493df71c JB |
1495 | s->store->type->commit_exception(s->store, &pe->e, |
1496 | commit_callback, pe); | |
1da177e4 LT |
1497 | } |
1498 | ||
1499 | /* | |
1500 | * Dispatches the copy operation to kcopyd. | |
1501 | */ | |
028867ac | 1502 | static void start_copy(struct dm_snap_pending_exception *pe) |
1da177e4 LT |
1503 | { |
1504 | struct dm_snapshot *s = pe->snap; | |
22a1ceb1 | 1505 | struct dm_io_region src, dest; |
1da177e4 LT |
1506 | struct block_device *bdev = s->origin->bdev; |
1507 | sector_t dev_size; | |
1508 | ||
1509 | dev_size = get_dev_size(bdev); | |
1510 | ||
1511 | src.bdev = bdev; | |
71fab00a | 1512 | src.sector = chunk_to_sector(s->store, pe->e.old_chunk); |
df96eee6 | 1513 | src.count = min((sector_t)s->store->chunk_size, dev_size - src.sector); |
1da177e4 | 1514 | |
fc56f6fb | 1515 | dest.bdev = s->cow->bdev; |
71fab00a | 1516 | dest.sector = chunk_to_sector(s->store, pe->e.new_chunk); |
1da177e4 LT |
1517 | dest.count = src.count; |
1518 | ||
1519 | /* Hand over to kcopyd */ | |
eb69aca5 | 1520 | dm_kcopyd_copy(s->kcopyd_client, |
1da177e4 LT |
1521 | &src, 1, &dest, 0, copy_callback, pe); |
1522 | } | |
1523 | ||
2913808e MP |
1524 | static struct dm_snap_pending_exception * |
1525 | __lookup_pending_exception(struct dm_snapshot *s, chunk_t chunk) | |
1526 | { | |
3510cb94 | 1527 | struct dm_exception *e = dm_lookup_exception(&s->pending, chunk); |
2913808e MP |
1528 | |
1529 | if (!e) | |
1530 | return NULL; | |
1531 | ||
1532 | return container_of(e, struct dm_snap_pending_exception, e); | |
1533 | } | |
1534 | ||
1da177e4 LT |
1535 | /* |
1536 | * Looks to see if this snapshot already has a pending exception | |
1537 | * for this chunk, otherwise it allocates a new one and inserts | |
1538 | * it into the pending table. | |
1539 | * | |
1540 | * NOTE: a write lock must be held on snap->lock before calling | |
1541 | * this. | |
1542 | */ | |
028867ac | 1543 | static struct dm_snap_pending_exception * |
c6621392 MP |
1544 | __find_pending_exception(struct dm_snapshot *s, |
1545 | struct dm_snap_pending_exception *pe, chunk_t chunk) | |
1da177e4 | 1546 | { |
c6621392 | 1547 | struct dm_snap_pending_exception *pe2; |
1da177e4 | 1548 | |
2913808e MP |
1549 | pe2 = __lookup_pending_exception(s, chunk); |
1550 | if (pe2) { | |
76df1c65 | 1551 | free_pending_exception(pe); |
2913808e | 1552 | return pe2; |
1da177e4 LT |
1553 | } |
1554 | ||
76df1c65 AK |
1555 | pe->e.old_chunk = chunk; |
1556 | bio_list_init(&pe->origin_bios); | |
1557 | bio_list_init(&pe->snapshot_bios); | |
76df1c65 AK |
1558 | pe->started = 0; |
1559 | ||
493df71c | 1560 | if (s->store->type->prepare_exception(s->store, &pe->e)) { |
76df1c65 AK |
1561 | free_pending_exception(pe); |
1562 | return NULL; | |
1563 | } | |
1564 | ||
3510cb94 | 1565 | dm_insert_exception(&s->pending, &pe->e); |
76df1c65 | 1566 | |
1da177e4 LT |
1567 | return pe; |
1568 | } | |
1569 | ||
1d4989c8 | 1570 | static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, |
d74f81f8 | 1571 | struct bio *bio, chunk_t chunk) |
1da177e4 | 1572 | { |
fc56f6fb | 1573 | bio->bi_bdev = s->cow->bdev; |
71fab00a JB |
1574 | bio->bi_sector = chunk_to_sector(s->store, |
1575 | dm_chunk_number(e->new_chunk) + | |
1576 | (chunk - e->old_chunk)) + | |
1577 | (bio->bi_sector & | |
1578 | s->store->chunk_mask); | |
1da177e4 LT |
1579 | } |
1580 | ||
1581 | static int snapshot_map(struct dm_target *ti, struct bio *bio, | |
1582 | union map_info *map_context) | |
1583 | { | |
1d4989c8 | 1584 | struct dm_exception *e; |
028867ac | 1585 | struct dm_snapshot *s = ti->private; |
d2a7ad29 | 1586 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1587 | chunk_t chunk; |
028867ac | 1588 | struct dm_snap_pending_exception *pe = NULL; |
1da177e4 | 1589 | |
494b3ee7 | 1590 | if (unlikely(bio_empty_barrier(bio))) { |
fc56f6fb | 1591 | bio->bi_bdev = s->cow->bdev; |
494b3ee7 MP |
1592 | return DM_MAPIO_REMAPPED; |
1593 | } | |
1594 | ||
71fab00a | 1595 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1da177e4 LT |
1596 | |
1597 | /* Full snapshots are not usable */ | |
76df1c65 | 1598 | /* To get here the table must be live so s->active is always set. */ |
1da177e4 | 1599 | if (!s->valid) |
f6a80ea8 | 1600 | return -EIO; |
1da177e4 | 1601 | |
ba40a2aa AK |
1602 | /* FIXME: should only take write lock if we need |
1603 | * to copy an exception */ | |
1604 | down_write(&s->lock); | |
1605 | ||
1606 | if (!s->valid) { | |
1607 | r = -EIO; | |
1608 | goto out_unlock; | |
1609 | } | |
1610 | ||
1611 | /* If the block is already remapped - use that, else remap it */ | |
3510cb94 | 1612 | e = dm_lookup_exception(&s->complete, chunk); |
ba40a2aa | 1613 | if (e) { |
d74f81f8 | 1614 | remap_exception(s, e, bio, chunk); |
ba40a2aa AK |
1615 | goto out_unlock; |
1616 | } | |
1617 | ||
1da177e4 LT |
1618 | /* |
1619 | * Write to snapshot - higher level takes care of RW/RO | |
1620 | * flags so we should only get this if we are | |
1621 | * writeable. | |
1622 | */ | |
1623 | if (bio_rw(bio) == WRITE) { | |
2913808e | 1624 | pe = __lookup_pending_exception(s, chunk); |
76df1c65 | 1625 | if (!pe) { |
c6621392 MP |
1626 | up_write(&s->lock); |
1627 | pe = alloc_pending_exception(s); | |
1628 | down_write(&s->lock); | |
1629 | ||
1630 | if (!s->valid) { | |
1631 | free_pending_exception(pe); | |
1632 | r = -EIO; | |
1633 | goto out_unlock; | |
1634 | } | |
1635 | ||
3510cb94 | 1636 | e = dm_lookup_exception(&s->complete, chunk); |
35bf659b MP |
1637 | if (e) { |
1638 | free_pending_exception(pe); | |
1639 | remap_exception(s, e, bio, chunk); | |
1640 | goto out_unlock; | |
1641 | } | |
1642 | ||
c6621392 | 1643 | pe = __find_pending_exception(s, pe, chunk); |
2913808e MP |
1644 | if (!pe) { |
1645 | __invalidate_snapshot(s, -ENOMEM); | |
1646 | r = -EIO; | |
1647 | goto out_unlock; | |
1648 | } | |
1da177e4 LT |
1649 | } |
1650 | ||
d74f81f8 | 1651 | remap_exception(s, &pe->e, bio, chunk); |
76df1c65 AK |
1652 | bio_list_add(&pe->snapshot_bios, bio); |
1653 | ||
d2a7ad29 | 1654 | r = DM_MAPIO_SUBMITTED; |
ba40a2aa | 1655 | |
76df1c65 AK |
1656 | if (!pe->started) { |
1657 | /* this is protected by snap->lock */ | |
1658 | pe->started = 1; | |
ba40a2aa | 1659 | up_write(&s->lock); |
76df1c65 | 1660 | start_copy(pe); |
ba40a2aa AK |
1661 | goto out; |
1662 | } | |
cd45daff | 1663 | } else { |
ba40a2aa | 1664 | bio->bi_bdev = s->origin->bdev; |
cd45daff MP |
1665 | map_context->ptr = track_chunk(s, chunk); |
1666 | } | |
1da177e4 | 1667 | |
ba40a2aa AK |
1668 | out_unlock: |
1669 | up_write(&s->lock); | |
1670 | out: | |
1da177e4 LT |
1671 | return r; |
1672 | } | |
1673 | ||
3452c2a1 MP |
1674 | /* |
1675 | * A snapshot-merge target behaves like a combination of a snapshot | |
1676 | * target and a snapshot-origin target. It only generates new | |
1677 | * exceptions in other snapshots and not in the one that is being | |
1678 | * merged. | |
1679 | * | |
1680 | * For each chunk, if there is an existing exception, it is used to | |
1681 | * redirect I/O to the cow device. Otherwise I/O is sent to the origin, | |
1682 | * which in turn might generate exceptions in other snapshots. | |
9fe86254 MP |
1683 | * If merging is currently taking place on the chunk in question, the |
1684 | * I/O is deferred by adding it to s->bios_queued_during_merge. | |
3452c2a1 MP |
1685 | */ |
1686 | static int snapshot_merge_map(struct dm_target *ti, struct bio *bio, | |
1687 | union map_info *map_context) | |
1688 | { | |
1689 | struct dm_exception *e; | |
1690 | struct dm_snapshot *s = ti->private; | |
1691 | int r = DM_MAPIO_REMAPPED; | |
1692 | chunk_t chunk; | |
1693 | ||
10b8106a | 1694 | if (unlikely(bio_empty_barrier(bio))) { |
57cba5d3 | 1695 | if (!map_context->target_request_nr) |
10b8106a MS |
1696 | bio->bi_bdev = s->origin->bdev; |
1697 | else | |
1698 | bio->bi_bdev = s->cow->bdev; | |
1699 | map_context->ptr = NULL; | |
1700 | return DM_MAPIO_REMAPPED; | |
1701 | } | |
1702 | ||
3452c2a1 MP |
1703 | chunk = sector_to_chunk(s->store, bio->bi_sector); |
1704 | ||
9fe86254 | 1705 | down_write(&s->lock); |
3452c2a1 | 1706 | |
d2fdb776 MP |
1707 | /* Full merging snapshots are redirected to the origin */ |
1708 | if (!s->valid) | |
1709 | goto redirect_to_origin; | |
3452c2a1 MP |
1710 | |
1711 | /* If the block is already remapped - use that */ | |
1712 | e = dm_lookup_exception(&s->complete, chunk); | |
1713 | if (e) { | |
9fe86254 MP |
1714 | /* Queue writes overlapping with chunks being merged */ |
1715 | if (bio_rw(bio) == WRITE && | |
1716 | chunk >= s->first_merging_chunk && | |
1717 | chunk < (s->first_merging_chunk + | |
1718 | s->num_merging_chunks)) { | |
1719 | bio->bi_bdev = s->origin->bdev; | |
1720 | bio_list_add(&s->bios_queued_during_merge, bio); | |
1721 | r = DM_MAPIO_SUBMITTED; | |
1722 | goto out_unlock; | |
1723 | } | |
17aa0332 | 1724 | |
3452c2a1 | 1725 | remap_exception(s, e, bio, chunk); |
17aa0332 MP |
1726 | |
1727 | if (bio_rw(bio) == WRITE) | |
1728 | map_context->ptr = track_chunk(s, chunk); | |
3452c2a1 MP |
1729 | goto out_unlock; |
1730 | } | |
1731 | ||
d2fdb776 | 1732 | redirect_to_origin: |
3452c2a1 MP |
1733 | bio->bi_bdev = s->origin->bdev; |
1734 | ||
1735 | if (bio_rw(bio) == WRITE) { | |
9fe86254 | 1736 | up_write(&s->lock); |
3452c2a1 MP |
1737 | return do_origin(s->origin, bio); |
1738 | } | |
1739 | ||
1740 | out_unlock: | |
9fe86254 | 1741 | up_write(&s->lock); |
3452c2a1 MP |
1742 | |
1743 | return r; | |
1744 | } | |
1745 | ||
cd45daff MP |
1746 | static int snapshot_end_io(struct dm_target *ti, struct bio *bio, |
1747 | int error, union map_info *map_context) | |
1748 | { | |
1749 | struct dm_snapshot *s = ti->private; | |
1750 | struct dm_snap_tracked_chunk *c = map_context->ptr; | |
1751 | ||
1752 | if (c) | |
1753 | stop_tracking_chunk(s, c); | |
1754 | ||
1755 | return 0; | |
1756 | } | |
1757 | ||
1e03f97e MP |
1758 | static void snapshot_merge_presuspend(struct dm_target *ti) |
1759 | { | |
1760 | struct dm_snapshot *s = ti->private; | |
1761 | ||
1762 | stop_merge(s); | |
1763 | } | |
1764 | ||
c26655ca MS |
1765 | static void snapshot_postsuspend(struct dm_target *ti) |
1766 | { | |
1767 | struct dm_snapshot *s = ti->private; | |
1768 | ||
1769 | down_write(&s->lock); | |
1770 | s->suspended = 1; | |
1771 | up_write(&s->lock); | |
1772 | } | |
1773 | ||
c1f0c183 MS |
1774 | static int snapshot_preresume(struct dm_target *ti) |
1775 | { | |
1776 | int r = 0; | |
1777 | struct dm_snapshot *s = ti->private; | |
1778 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; | |
1779 | ||
1780 | down_read(&_origins_lock); | |
9d3b15c4 | 1781 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1782 | if (snap_src && snap_dest) { |
1783 | down_read(&snap_src->lock); | |
1784 | if (s == snap_src) { | |
1785 | DMERR("Unable to resume snapshot source until " | |
1786 | "handover completes."); | |
1787 | r = -EINVAL; | |
1788 | } else if (!snap_src->suspended) { | |
1789 | DMERR("Unable to perform snapshot handover until " | |
1790 | "source is suspended."); | |
1791 | r = -EINVAL; | |
1792 | } | |
1793 | up_read(&snap_src->lock); | |
1794 | } | |
1795 | up_read(&_origins_lock); | |
1796 | ||
1797 | return r; | |
1798 | } | |
1799 | ||
1da177e4 LT |
1800 | static void snapshot_resume(struct dm_target *ti) |
1801 | { | |
028867ac | 1802 | struct dm_snapshot *s = ti->private; |
c1f0c183 MS |
1803 | struct dm_snapshot *snap_src = NULL, *snap_dest = NULL; |
1804 | ||
1805 | down_read(&_origins_lock); | |
9d3b15c4 | 1806 | (void) __find_snapshots_sharing_cow(s, &snap_src, &snap_dest, NULL); |
c1f0c183 MS |
1807 | if (snap_src && snap_dest) { |
1808 | down_write(&snap_src->lock); | |
1809 | down_write_nested(&snap_dest->lock, SINGLE_DEPTH_NESTING); | |
1810 | __handover_exceptions(snap_src, snap_dest); | |
1811 | up_write(&snap_dest->lock); | |
1812 | up_write(&snap_src->lock); | |
1813 | } | |
1814 | up_read(&_origins_lock); | |
1815 | ||
1816 | /* Now we have correct chunk size, reregister */ | |
1817 | reregister_snapshot(s); | |
1da177e4 | 1818 | |
aa14edeb AK |
1819 | down_write(&s->lock); |
1820 | s->active = 1; | |
c26655ca | 1821 | s->suspended = 0; |
aa14edeb | 1822 | up_write(&s->lock); |
1da177e4 LT |
1823 | } |
1824 | ||
1e03f97e MP |
1825 | static sector_t get_origin_minimum_chunksize(struct block_device *bdev) |
1826 | { | |
1827 | sector_t min_chunksize; | |
1828 | ||
1829 | down_read(&_origins_lock); | |
1830 | min_chunksize = __minimum_chunk_size(__lookup_origin(bdev)); | |
1831 | up_read(&_origins_lock); | |
1832 | ||
1833 | return min_chunksize; | |
1834 | } | |
1835 | ||
1836 | static void snapshot_merge_resume(struct dm_target *ti) | |
1837 | { | |
1838 | struct dm_snapshot *s = ti->private; | |
1839 | ||
1840 | /* | |
1841 | * Handover exceptions from existing snapshot. | |
1842 | */ | |
1843 | snapshot_resume(ti); | |
1844 | ||
1845 | /* | |
1846 | * snapshot-merge acts as an origin, so set ti->split_io | |
1847 | */ | |
1848 | ti->split_io = get_origin_minimum_chunksize(s->origin->bdev); | |
1849 | ||
1850 | start_merge(s); | |
1851 | } | |
1852 | ||
1da177e4 LT |
1853 | static int snapshot_status(struct dm_target *ti, status_type_t type, |
1854 | char *result, unsigned int maxlen) | |
1855 | { | |
2e4a31df | 1856 | unsigned sz = 0; |
028867ac | 1857 | struct dm_snapshot *snap = ti->private; |
1da177e4 LT |
1858 | |
1859 | switch (type) { | |
1860 | case STATUSTYPE_INFO: | |
94e76572 MP |
1861 | |
1862 | down_write(&snap->lock); | |
1863 | ||
1da177e4 | 1864 | if (!snap->valid) |
2e4a31df | 1865 | DMEMIT("Invalid"); |
d8ddb1cf MS |
1866 | else if (snap->merge_failed) |
1867 | DMEMIT("Merge failed"); | |
1da177e4 | 1868 | else { |
985903bb MS |
1869 | if (snap->store->type->usage) { |
1870 | sector_t total_sectors, sectors_allocated, | |
1871 | metadata_sectors; | |
1872 | snap->store->type->usage(snap->store, | |
1873 | &total_sectors, | |
1874 | §ors_allocated, | |
1875 | &metadata_sectors); | |
1876 | DMEMIT("%llu/%llu %llu", | |
1877 | (unsigned long long)sectors_allocated, | |
1878 | (unsigned long long)total_sectors, | |
1879 | (unsigned long long)metadata_sectors); | |
1da177e4 LT |
1880 | } |
1881 | else | |
2e4a31df | 1882 | DMEMIT("Unknown"); |
1da177e4 | 1883 | } |
94e76572 MP |
1884 | |
1885 | up_write(&snap->lock); | |
1886 | ||
1da177e4 LT |
1887 | break; |
1888 | ||
1889 | case STATUSTYPE_TABLE: | |
1890 | /* | |
1891 | * kdevname returns a static pointer so we need | |
1892 | * to make private copies if the output is to | |
1893 | * make sense. | |
1894 | */ | |
fc56f6fb | 1895 | DMEMIT("%s %s", snap->origin->name, snap->cow->name); |
1e302a92 JB |
1896 | snap->store->type->status(snap->store, type, result + sz, |
1897 | maxlen - sz); | |
1da177e4 LT |
1898 | break; |
1899 | } | |
1900 | ||
1901 | return 0; | |
1902 | } | |
1903 | ||
8811f46c MS |
1904 | static int snapshot_iterate_devices(struct dm_target *ti, |
1905 | iterate_devices_callout_fn fn, void *data) | |
1906 | { | |
1907 | struct dm_snapshot *snap = ti->private; | |
1e5554c8 MP |
1908 | int r; |
1909 | ||
1910 | r = fn(ti, snap->origin, 0, ti->len, data); | |
8811f46c | 1911 | |
1e5554c8 MP |
1912 | if (!r) |
1913 | r = fn(ti, snap->cow, 0, get_dev_size(snap->cow->bdev), data); | |
1914 | ||
1915 | return r; | |
8811f46c MS |
1916 | } |
1917 | ||
1918 | ||
1da177e4 LT |
1919 | /*----------------------------------------------------------------- |
1920 | * Origin methods | |
1921 | *---------------------------------------------------------------*/ | |
9eaae8ff MP |
1922 | |
1923 | /* | |
1924 | * If no exceptions need creating, DM_MAPIO_REMAPPED is returned and any | |
1925 | * supplied bio was ignored. The caller may submit it immediately. | |
1926 | * (No remapping actually occurs as the origin is always a direct linear | |
1927 | * map.) | |
1928 | * | |
1929 | * If further exceptions are required, DM_MAPIO_SUBMITTED is returned | |
1930 | * and any supplied bio is added to a list to be submitted once all | |
1931 | * the necessary exceptions exist. | |
1932 | */ | |
1933 | static int __origin_write(struct list_head *snapshots, sector_t sector, | |
1934 | struct bio *bio) | |
1da177e4 | 1935 | { |
515ad66c | 1936 | int r = DM_MAPIO_REMAPPED; |
1da177e4 | 1937 | struct dm_snapshot *snap; |
1d4989c8 | 1938 | struct dm_exception *e; |
515ad66c MP |
1939 | struct dm_snap_pending_exception *pe; |
1940 | struct dm_snap_pending_exception *pe_to_start_now = NULL; | |
1941 | struct dm_snap_pending_exception *pe_to_start_last = NULL; | |
1da177e4 LT |
1942 | chunk_t chunk; |
1943 | ||
1944 | /* Do all the snapshots on this origin */ | |
1945 | list_for_each_entry (snap, snapshots, list) { | |
3452c2a1 MP |
1946 | /* |
1947 | * Don't make new exceptions in a merging snapshot | |
1948 | * because it has effectively been deleted | |
1949 | */ | |
1950 | if (dm_target_is_snapshot_merge(snap->ti)) | |
1951 | continue; | |
1952 | ||
76df1c65 AK |
1953 | down_write(&snap->lock); |
1954 | ||
aa14edeb AK |
1955 | /* Only deal with valid and active snapshots */ |
1956 | if (!snap->valid || !snap->active) | |
76df1c65 | 1957 | goto next_snapshot; |
1da177e4 | 1958 | |
d5e404c1 | 1959 | /* Nothing to do if writing beyond end of snapshot */ |
9eaae8ff | 1960 | if (sector >= dm_table_get_size(snap->ti->table)) |
76df1c65 | 1961 | goto next_snapshot; |
1da177e4 LT |
1962 | |
1963 | /* | |
1964 | * Remember, different snapshots can have | |
1965 | * different chunk sizes. | |
1966 | */ | |
9eaae8ff | 1967 | chunk = sector_to_chunk(snap->store, sector); |
1da177e4 LT |
1968 | |
1969 | /* | |
1970 | * Check exception table to see if block | |
1971 | * is already remapped in this snapshot | |
1972 | * and trigger an exception if not. | |
1973 | */ | |
3510cb94 | 1974 | e = dm_lookup_exception(&snap->complete, chunk); |
76df1c65 AK |
1975 | if (e) |
1976 | goto next_snapshot; | |
1977 | ||
2913808e | 1978 | pe = __lookup_pending_exception(snap, chunk); |
76df1c65 | 1979 | if (!pe) { |
c6621392 MP |
1980 | up_write(&snap->lock); |
1981 | pe = alloc_pending_exception(snap); | |
1982 | down_write(&snap->lock); | |
1983 | ||
1984 | if (!snap->valid) { | |
1985 | free_pending_exception(pe); | |
1986 | goto next_snapshot; | |
1987 | } | |
1988 | ||
3510cb94 | 1989 | e = dm_lookup_exception(&snap->complete, chunk); |
35bf659b MP |
1990 | if (e) { |
1991 | free_pending_exception(pe); | |
1992 | goto next_snapshot; | |
1993 | } | |
1994 | ||
c6621392 | 1995 | pe = __find_pending_exception(snap, pe, chunk); |
2913808e MP |
1996 | if (!pe) { |
1997 | __invalidate_snapshot(snap, -ENOMEM); | |
1998 | goto next_snapshot; | |
1999 | } | |
76df1c65 AK |
2000 | } |
2001 | ||
515ad66c | 2002 | r = DM_MAPIO_SUBMITTED; |
76df1c65 | 2003 | |
515ad66c MP |
2004 | /* |
2005 | * If an origin bio was supplied, queue it to wait for the | |
2006 | * completion of this exception, and start this one last, | |
2007 | * at the end of the function. | |
2008 | */ | |
2009 | if (bio) { | |
2010 | bio_list_add(&pe->origin_bios, bio); | |
2011 | bio = NULL; | |
76df1c65 | 2012 | |
515ad66c MP |
2013 | if (!pe->started) { |
2014 | pe->started = 1; | |
2015 | pe_to_start_last = pe; | |
2016 | } | |
76df1c65 AK |
2017 | } |
2018 | ||
2019 | if (!pe->started) { | |
2020 | pe->started = 1; | |
515ad66c | 2021 | pe_to_start_now = pe; |
1da177e4 LT |
2022 | } |
2023 | ||
76df1c65 | 2024 | next_snapshot: |
1da177e4 | 2025 | up_write(&snap->lock); |
1da177e4 | 2026 | |
515ad66c MP |
2027 | if (pe_to_start_now) { |
2028 | start_copy(pe_to_start_now); | |
2029 | pe_to_start_now = NULL; | |
2030 | } | |
b4b610f6 AK |
2031 | } |
2032 | ||
1da177e4 | 2033 | /* |
515ad66c MP |
2034 | * Submit the exception against which the bio is queued last, |
2035 | * to give the other exceptions a head start. | |
1da177e4 | 2036 | */ |
515ad66c MP |
2037 | if (pe_to_start_last) |
2038 | start_copy(pe_to_start_last); | |
1da177e4 LT |
2039 | |
2040 | return r; | |
2041 | } | |
2042 | ||
2043 | /* | |
2044 | * Called on a write from the origin driver. | |
2045 | */ | |
2046 | static int do_origin(struct dm_dev *origin, struct bio *bio) | |
2047 | { | |
2048 | struct origin *o; | |
d2a7ad29 | 2049 | int r = DM_MAPIO_REMAPPED; |
1da177e4 LT |
2050 | |
2051 | down_read(&_origins_lock); | |
2052 | o = __lookup_origin(origin->bdev); | |
2053 | if (o) | |
9eaae8ff | 2054 | r = __origin_write(&o->snapshots, bio->bi_sector, bio); |
1da177e4 LT |
2055 | up_read(&_origins_lock); |
2056 | ||
2057 | return r; | |
2058 | } | |
2059 | ||
73dfd078 MP |
2060 | /* |
2061 | * Trigger exceptions in all non-merging snapshots. | |
2062 | * | |
2063 | * The chunk size of the merging snapshot may be larger than the chunk | |
2064 | * size of some other snapshot so we may need to reallocate multiple | |
2065 | * chunks in other snapshots. | |
2066 | * | |
2067 | * We scan all the overlapping exceptions in the other snapshots. | |
2068 | * Returns 1 if anything was reallocated and must be waited for, | |
2069 | * otherwise returns 0. | |
2070 | * | |
2071 | * size must be a multiple of merging_snap's chunk_size. | |
2072 | */ | |
2073 | static int origin_write_extent(struct dm_snapshot *merging_snap, | |
2074 | sector_t sector, unsigned size) | |
2075 | { | |
2076 | int must_wait = 0; | |
2077 | sector_t n; | |
2078 | struct origin *o; | |
2079 | ||
2080 | /* | |
2081 | * The origin's __minimum_chunk_size() got stored in split_io | |
2082 | * by snapshot_merge_resume(). | |
2083 | */ | |
2084 | down_read(&_origins_lock); | |
2085 | o = __lookup_origin(merging_snap->origin->bdev); | |
2086 | for (n = 0; n < size; n += merging_snap->ti->split_io) | |
2087 | if (__origin_write(&o->snapshots, sector + n, NULL) == | |
2088 | DM_MAPIO_SUBMITTED) | |
2089 | must_wait = 1; | |
2090 | up_read(&_origins_lock); | |
2091 | ||
2092 | return must_wait; | |
2093 | } | |
2094 | ||
1da177e4 LT |
2095 | /* |
2096 | * Origin: maps a linear range of a device, with hooks for snapshotting. | |
2097 | */ | |
2098 | ||
2099 | /* | |
2100 | * Construct an origin mapping: <dev_path> | |
2101 | * The context for an origin is merely a 'struct dm_dev *' | |
2102 | * pointing to the real device. | |
2103 | */ | |
2104 | static int origin_ctr(struct dm_target *ti, unsigned int argc, char **argv) | |
2105 | { | |
2106 | int r; | |
2107 | struct dm_dev *dev; | |
2108 | ||
2109 | if (argc != 1) { | |
72d94861 | 2110 | ti->error = "origin: incorrect number of arguments"; |
1da177e4 LT |
2111 | return -EINVAL; |
2112 | } | |
2113 | ||
8215d6ec | 2114 | r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &dev); |
1da177e4 LT |
2115 | if (r) { |
2116 | ti->error = "Cannot get target device"; | |
2117 | return r; | |
2118 | } | |
2119 | ||
2120 | ti->private = dev; | |
494b3ee7 MP |
2121 | ti->num_flush_requests = 1; |
2122 | ||
1da177e4 LT |
2123 | return 0; |
2124 | } | |
2125 | ||
2126 | static void origin_dtr(struct dm_target *ti) | |
2127 | { | |
028867ac | 2128 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
2129 | dm_put_device(ti, dev); |
2130 | } | |
2131 | ||
2132 | static int origin_map(struct dm_target *ti, struct bio *bio, | |
2133 | union map_info *map_context) | |
2134 | { | |
028867ac | 2135 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
2136 | bio->bi_bdev = dev->bdev; |
2137 | ||
494b3ee7 MP |
2138 | if (unlikely(bio_empty_barrier(bio))) |
2139 | return DM_MAPIO_REMAPPED; | |
2140 | ||
1da177e4 | 2141 | /* Only tell snapshots if this is a write */ |
d2a7ad29 | 2142 | return (bio_rw(bio) == WRITE) ? do_origin(dev, bio) : DM_MAPIO_REMAPPED; |
1da177e4 LT |
2143 | } |
2144 | ||
1da177e4 LT |
2145 | /* |
2146 | * Set the target "split_io" field to the minimum of all the snapshots' | |
2147 | * chunk sizes. | |
2148 | */ | |
2149 | static void origin_resume(struct dm_target *ti) | |
2150 | { | |
028867ac | 2151 | struct dm_dev *dev = ti->private; |
1da177e4 | 2152 | |
1e03f97e | 2153 | ti->split_io = get_origin_minimum_chunksize(dev->bdev); |
1da177e4 LT |
2154 | } |
2155 | ||
2156 | static int origin_status(struct dm_target *ti, status_type_t type, char *result, | |
2157 | unsigned int maxlen) | |
2158 | { | |
028867ac | 2159 | struct dm_dev *dev = ti->private; |
1da177e4 LT |
2160 | |
2161 | switch (type) { | |
2162 | case STATUSTYPE_INFO: | |
2163 | result[0] = '\0'; | |
2164 | break; | |
2165 | ||
2166 | case STATUSTYPE_TABLE: | |
2167 | snprintf(result, maxlen, "%s", dev->name); | |
2168 | break; | |
2169 | } | |
2170 | ||
2171 | return 0; | |
2172 | } | |
2173 | ||
b1d55528 MP |
2174 | static int origin_merge(struct dm_target *ti, struct bvec_merge_data *bvm, |
2175 | struct bio_vec *biovec, int max_size) | |
2176 | { | |
2177 | struct dm_dev *dev = ti->private; | |
2178 | struct request_queue *q = bdev_get_queue(dev->bdev); | |
2179 | ||
2180 | if (!q->merge_bvec_fn) | |
2181 | return max_size; | |
2182 | ||
2183 | bvm->bi_bdev = dev->bdev; | |
2184 | bvm->bi_sector = bvm->bi_sector; | |
2185 | ||
2186 | return min(max_size, q->merge_bvec_fn(q, bvm, biovec)); | |
2187 | } | |
2188 | ||
8811f46c MS |
2189 | static int origin_iterate_devices(struct dm_target *ti, |
2190 | iterate_devices_callout_fn fn, void *data) | |
2191 | { | |
2192 | struct dm_dev *dev = ti->private; | |
2193 | ||
2194 | return fn(ti, dev, 0, ti->len, data); | |
2195 | } | |
2196 | ||
1da177e4 LT |
2197 | static struct target_type origin_target = { |
2198 | .name = "snapshot-origin", | |
8811f46c | 2199 | .version = {1, 7, 0}, |
1da177e4 LT |
2200 | .module = THIS_MODULE, |
2201 | .ctr = origin_ctr, | |
2202 | .dtr = origin_dtr, | |
2203 | .map = origin_map, | |
2204 | .resume = origin_resume, | |
2205 | .status = origin_status, | |
b1d55528 | 2206 | .merge = origin_merge, |
8811f46c | 2207 | .iterate_devices = origin_iterate_devices, |
1da177e4 LT |
2208 | }; |
2209 | ||
2210 | static struct target_type snapshot_target = { | |
2211 | .name = "snapshot", | |
c26655ca | 2212 | .version = {1, 9, 0}, |
1da177e4 LT |
2213 | .module = THIS_MODULE, |
2214 | .ctr = snapshot_ctr, | |
2215 | .dtr = snapshot_dtr, | |
2216 | .map = snapshot_map, | |
cd45daff | 2217 | .end_io = snapshot_end_io, |
c26655ca | 2218 | .postsuspend = snapshot_postsuspend, |
c1f0c183 | 2219 | .preresume = snapshot_preresume, |
1da177e4 LT |
2220 | .resume = snapshot_resume, |
2221 | .status = snapshot_status, | |
8811f46c | 2222 | .iterate_devices = snapshot_iterate_devices, |
1da177e4 LT |
2223 | }; |
2224 | ||
d698aa45 MP |
2225 | static struct target_type merge_target = { |
2226 | .name = dm_snapshot_merge_target_name, | |
2227 | .version = {1, 0, 0}, | |
2228 | .module = THIS_MODULE, | |
2229 | .ctr = snapshot_ctr, | |
2230 | .dtr = snapshot_dtr, | |
3452c2a1 | 2231 | .map = snapshot_merge_map, |
d698aa45 | 2232 | .end_io = snapshot_end_io, |
1e03f97e | 2233 | .presuspend = snapshot_merge_presuspend, |
d698aa45 MP |
2234 | .postsuspend = snapshot_postsuspend, |
2235 | .preresume = snapshot_preresume, | |
1e03f97e | 2236 | .resume = snapshot_merge_resume, |
d698aa45 MP |
2237 | .status = snapshot_status, |
2238 | .iterate_devices = snapshot_iterate_devices, | |
2239 | }; | |
2240 | ||
1da177e4 LT |
2241 | static int __init dm_snapshot_init(void) |
2242 | { | |
2243 | int r; | |
2244 | ||
4db6bfe0 AK |
2245 | r = dm_exception_store_init(); |
2246 | if (r) { | |
2247 | DMERR("Failed to initialize exception stores"); | |
2248 | return r; | |
2249 | } | |
2250 | ||
1da177e4 | 2251 | r = dm_register_target(&snapshot_target); |
d698aa45 | 2252 | if (r < 0) { |
1da177e4 | 2253 | DMERR("snapshot target register failed %d", r); |
034a186d | 2254 | goto bad_register_snapshot_target; |
1da177e4 LT |
2255 | } |
2256 | ||
2257 | r = dm_register_target(&origin_target); | |
2258 | if (r < 0) { | |
72d94861 | 2259 | DMERR("Origin target register failed %d", r); |
d698aa45 MP |
2260 | goto bad_register_origin_target; |
2261 | } | |
2262 | ||
2263 | r = dm_register_target(&merge_target); | |
2264 | if (r < 0) { | |
2265 | DMERR("Merge target register failed %d", r); | |
2266 | goto bad_register_merge_target; | |
1da177e4 LT |
2267 | } |
2268 | ||
2269 | r = init_origin_hash(); | |
2270 | if (r) { | |
2271 | DMERR("init_origin_hash failed."); | |
d698aa45 | 2272 | goto bad_origin_hash; |
1da177e4 LT |
2273 | } |
2274 | ||
1d4989c8 | 2275 | exception_cache = KMEM_CACHE(dm_exception, 0); |
1da177e4 LT |
2276 | if (!exception_cache) { |
2277 | DMERR("Couldn't create exception cache."); | |
2278 | r = -ENOMEM; | |
d698aa45 | 2279 | goto bad_exception_cache; |
1da177e4 LT |
2280 | } |
2281 | ||
028867ac | 2282 | pending_cache = KMEM_CACHE(dm_snap_pending_exception, 0); |
1da177e4 LT |
2283 | if (!pending_cache) { |
2284 | DMERR("Couldn't create pending cache."); | |
2285 | r = -ENOMEM; | |
d698aa45 | 2286 | goto bad_pending_cache; |
1da177e4 LT |
2287 | } |
2288 | ||
cd45daff MP |
2289 | tracked_chunk_cache = KMEM_CACHE(dm_snap_tracked_chunk, 0); |
2290 | if (!tracked_chunk_cache) { | |
2291 | DMERR("Couldn't create cache to track chunks in use."); | |
2292 | r = -ENOMEM; | |
d698aa45 | 2293 | goto bad_tracked_chunk_cache; |
cd45daff MP |
2294 | } |
2295 | ||
ca3a931f AK |
2296 | ksnapd = create_singlethread_workqueue("ksnapd"); |
2297 | if (!ksnapd) { | |
2298 | DMERR("Failed to create ksnapd workqueue."); | |
2299 | r = -ENOMEM; | |
92e86812 | 2300 | goto bad_pending_pool; |
ca3a931f AK |
2301 | } |
2302 | ||
1da177e4 LT |
2303 | return 0; |
2304 | ||
4db6bfe0 | 2305 | bad_pending_pool: |
cd45daff | 2306 | kmem_cache_destroy(tracked_chunk_cache); |
d698aa45 | 2307 | bad_tracked_chunk_cache: |
1da177e4 | 2308 | kmem_cache_destroy(pending_cache); |
d698aa45 | 2309 | bad_pending_cache: |
1da177e4 | 2310 | kmem_cache_destroy(exception_cache); |
d698aa45 | 2311 | bad_exception_cache: |
1da177e4 | 2312 | exit_origin_hash(); |
d698aa45 MP |
2313 | bad_origin_hash: |
2314 | dm_unregister_target(&merge_target); | |
2315 | bad_register_merge_target: | |
1da177e4 | 2316 | dm_unregister_target(&origin_target); |
d698aa45 | 2317 | bad_register_origin_target: |
1da177e4 | 2318 | dm_unregister_target(&snapshot_target); |
034a186d JB |
2319 | bad_register_snapshot_target: |
2320 | dm_exception_store_exit(); | |
d698aa45 | 2321 | |
1da177e4 LT |
2322 | return r; |
2323 | } | |
2324 | ||
2325 | static void __exit dm_snapshot_exit(void) | |
2326 | { | |
ca3a931f AK |
2327 | destroy_workqueue(ksnapd); |
2328 | ||
10d3bd09 MP |
2329 | dm_unregister_target(&snapshot_target); |
2330 | dm_unregister_target(&origin_target); | |
d698aa45 | 2331 | dm_unregister_target(&merge_target); |
1da177e4 LT |
2332 | |
2333 | exit_origin_hash(); | |
1da177e4 LT |
2334 | kmem_cache_destroy(pending_cache); |
2335 | kmem_cache_destroy(exception_cache); | |
cd45daff | 2336 | kmem_cache_destroy(tracked_chunk_cache); |
4db6bfe0 AK |
2337 | |
2338 | dm_exception_store_exit(); | |
1da177e4 LT |
2339 | } |
2340 | ||
2341 | /* Module hooks */ | |
2342 | module_init(dm_snapshot_init); | |
2343 | module_exit(dm_snapshot_exit); | |
2344 | ||
2345 | MODULE_DESCRIPTION(DM_NAME " snapshot target"); | |
2346 | MODULE_AUTHOR("Joe Thornber"); | |
2347 | MODULE_LICENSE("GPL"); |