dm: rename request variables to bios
[deliverable/linux.git] / drivers / md / dm-thin.c
1 /*
2 * Copyright (C) 2011-2012 Red Hat UK.
3 *
4 * This file is released under the GPL.
5 */
6
7 #include "dm-thin-metadata.h"
8 #include "dm-bio-prison.h"
9 #include "dm.h"
10
11 #include <linux/device-mapper.h>
12 #include <linux/dm-io.h>
13 #include <linux/dm-kcopyd.h>
14 #include <linux/list.h>
15 #include <linux/init.h>
16 #include <linux/module.h>
17 #include <linux/slab.h>
18
19 #define DM_MSG_PREFIX "thin"
20
21 /*
22 * Tunable constants
23 */
24 #define ENDIO_HOOK_POOL_SIZE 1024
25 #define MAPPING_POOL_SIZE 1024
26 #define PRISON_CELLS 1024
27 #define COMMIT_PERIOD HZ
28
29 /*
30 * The block size of the device holding pool data must be
31 * between 64KB and 1GB.
32 */
33 #define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (64 * 1024 >> SECTOR_SHIFT)
34 #define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
35
36 /*
37 * Device id is restricted to 24 bits.
38 */
39 #define MAX_DEV_ID ((1 << 24) - 1)
40
41 /*
42 * How do we handle breaking sharing of data blocks?
43 * =================================================
44 *
45 * We use a standard copy-on-write btree to store the mappings for the
46 * devices (note I'm talking about copy-on-write of the metadata here, not
47 * the data). When you take an internal snapshot you clone the root node
48 * of the origin btree. After this there is no concept of an origin or a
49 * snapshot. They are just two device trees that happen to point to the
50 * same data blocks.
51 *
52 * When we get a write in we decide if it's to a shared data block using
53 * some timestamp magic. If it is, we have to break sharing.
54 *
55 * Let's say we write to a shared block in what was the origin. The
56 * steps are:
57 *
58 * i) plug io further to this physical block. (see bio_prison code).
59 *
60 * ii) quiesce any read io to that shared data block. Obviously
61 * including all devices that share this block. (see dm_deferred_set code)
62 *
63 * iii) copy the data block to a newly allocate block. This step can be
64 * missed out if the io covers the block. (schedule_copy).
65 *
66 * iv) insert the new mapping into the origin's btree
67 * (process_prepared_mapping). This act of inserting breaks some
68 * sharing of btree nodes between the two devices. Breaking sharing only
69 * effects the btree of that specific device. Btrees for the other
70 * devices that share the block never change. The btree for the origin
71 * device as it was after the last commit is untouched, ie. we're using
72 * persistent data structures in the functional programming sense.
73 *
74 * v) unplug io to this physical block, including the io that triggered
75 * the breaking of sharing.
76 *
77 * Steps (ii) and (iii) occur in parallel.
78 *
79 * The metadata _doesn't_ need to be committed before the io continues. We
80 * get away with this because the io is always written to a _new_ block.
81 * If there's a crash, then:
82 *
83 * - The origin mapping will point to the old origin block (the shared
84 * one). This will contain the data as it was before the io that triggered
85 * the breaking of sharing came in.
86 *
87 * - The snap mapping still points to the old block. As it would after
88 * the commit.
89 *
90 * The downside of this scheme is the timestamp magic isn't perfect, and
91 * will continue to think that data block in the snapshot device is shared
92 * even after the write to the origin has broken sharing. I suspect data
93 * blocks will typically be shared by many different devices, so we're
94 * breaking sharing n + 1 times, rather than n, where n is the number of
95 * devices that reference this data block. At the moment I think the
96 * benefits far, far outweigh the disadvantages.
97 */
98
99 /*----------------------------------------------------------------*/
100
101 /*
102 * Key building.
103 */
104 static void build_data_key(struct dm_thin_device *td,
105 dm_block_t b, struct dm_cell_key *key)
106 {
107 key->virtual = 0;
108 key->dev = dm_thin_dev_id(td);
109 key->block = b;
110 }
111
112 static void build_virtual_key(struct dm_thin_device *td, dm_block_t b,
113 struct dm_cell_key *key)
114 {
115 key->virtual = 1;
116 key->dev = dm_thin_dev_id(td);
117 key->block = b;
118 }
119
120 /*----------------------------------------------------------------*/
121
122 /*
123 * A pool device ties together a metadata device and a data device. It
124 * also provides the interface for creating and destroying internal
125 * devices.
126 */
127 struct dm_thin_new_mapping;
128
129 /*
130 * The pool runs in 3 modes. Ordered in degraded order for comparisons.
131 */
132 enum pool_mode {
133 PM_WRITE, /* metadata may be changed */
134 PM_READ_ONLY, /* metadata may not be changed */
135 PM_FAIL, /* all I/O fails */
136 };
137
138 struct pool_features {
139 enum pool_mode mode;
140
141 bool zero_new_blocks:1;
142 bool discard_enabled:1;
143 bool discard_passdown:1;
144 };
145
146 struct thin_c;
147 typedef void (*process_bio_fn)(struct thin_c *tc, struct bio *bio);
148 typedef void (*process_mapping_fn)(struct dm_thin_new_mapping *m);
149
150 struct pool {
151 struct list_head list;
152 struct dm_target *ti; /* Only set if a pool target is bound */
153
154 struct mapped_device *pool_md;
155 struct block_device *md_dev;
156 struct dm_pool_metadata *pmd;
157
158 dm_block_t low_water_blocks;
159 uint32_t sectors_per_block;
160 int sectors_per_block_shift;
161
162 struct pool_features pf;
163 unsigned low_water_triggered:1; /* A dm event has been sent */
164 unsigned no_free_space:1; /* A -ENOSPC warning has been issued */
165
166 struct dm_bio_prison *prison;
167 struct dm_kcopyd_client *copier;
168
169 struct workqueue_struct *wq;
170 struct work_struct worker;
171 struct delayed_work waker;
172
173 unsigned long last_commit_jiffies;
174 unsigned ref_count;
175
176 spinlock_t lock;
177 struct bio_list deferred_bios;
178 struct bio_list deferred_flush_bios;
179 struct list_head prepared_mappings;
180 struct list_head prepared_discards;
181
182 struct bio_list retry_on_resume_list;
183
184 struct dm_deferred_set *shared_read_ds;
185 struct dm_deferred_set *all_io_ds;
186
187 struct dm_thin_new_mapping *next_mapping;
188 mempool_t *mapping_pool;
189
190 process_bio_fn process_bio;
191 process_bio_fn process_discard;
192
193 process_mapping_fn process_prepared_mapping;
194 process_mapping_fn process_prepared_discard;
195 };
196
197 static enum pool_mode get_pool_mode(struct pool *pool);
198 static void set_pool_mode(struct pool *pool, enum pool_mode mode);
199
200 /*
201 * Target context for a pool.
202 */
203 struct pool_c {
204 struct dm_target *ti;
205 struct pool *pool;
206 struct dm_dev *data_dev;
207 struct dm_dev *metadata_dev;
208 struct dm_target_callbacks callbacks;
209
210 dm_block_t low_water_blocks;
211 struct pool_features requested_pf; /* Features requested during table load */
212 struct pool_features adjusted_pf; /* Features used after adjusting for constituent devices */
213 };
214
215 /*
216 * Target context for a thin.
217 */
218 struct thin_c {
219 struct dm_dev *pool_dev;
220 struct dm_dev *origin_dev;
221 dm_thin_id dev_id;
222
223 struct pool *pool;
224 struct dm_thin_device *td;
225 };
226
227 /*----------------------------------------------------------------*/
228
229 /*
230 * A global list of pools that uses a struct mapped_device as a key.
231 */
232 static struct dm_thin_pool_table {
233 struct mutex mutex;
234 struct list_head pools;
235 } dm_thin_pool_table;
236
237 static void pool_table_init(void)
238 {
239 mutex_init(&dm_thin_pool_table.mutex);
240 INIT_LIST_HEAD(&dm_thin_pool_table.pools);
241 }
242
243 static void __pool_table_insert(struct pool *pool)
244 {
245 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
246 list_add(&pool->list, &dm_thin_pool_table.pools);
247 }
248
249 static void __pool_table_remove(struct pool *pool)
250 {
251 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
252 list_del(&pool->list);
253 }
254
255 static struct pool *__pool_table_lookup(struct mapped_device *md)
256 {
257 struct pool *pool = NULL, *tmp;
258
259 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
260
261 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
262 if (tmp->pool_md == md) {
263 pool = tmp;
264 break;
265 }
266 }
267
268 return pool;
269 }
270
271 static struct pool *__pool_table_lookup_metadata_dev(struct block_device *md_dev)
272 {
273 struct pool *pool = NULL, *tmp;
274
275 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
276
277 list_for_each_entry(tmp, &dm_thin_pool_table.pools, list) {
278 if (tmp->md_dev == md_dev) {
279 pool = tmp;
280 break;
281 }
282 }
283
284 return pool;
285 }
286
287 /*----------------------------------------------------------------*/
288
289 struct dm_thin_endio_hook {
290 struct thin_c *tc;
291 struct dm_deferred_entry *shared_read_entry;
292 struct dm_deferred_entry *all_io_entry;
293 struct dm_thin_new_mapping *overwrite_mapping;
294 };
295
296 static void __requeue_bio_list(struct thin_c *tc, struct bio_list *master)
297 {
298 struct bio *bio;
299 struct bio_list bios;
300
301 bio_list_init(&bios);
302 bio_list_merge(&bios, master);
303 bio_list_init(master);
304
305 while ((bio = bio_list_pop(&bios))) {
306 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
307
308 if (h->tc == tc)
309 bio_endio(bio, DM_ENDIO_REQUEUE);
310 else
311 bio_list_add(master, bio);
312 }
313 }
314
315 static void requeue_io(struct thin_c *tc)
316 {
317 struct pool *pool = tc->pool;
318 unsigned long flags;
319
320 spin_lock_irqsave(&pool->lock, flags);
321 __requeue_bio_list(tc, &pool->deferred_bios);
322 __requeue_bio_list(tc, &pool->retry_on_resume_list);
323 spin_unlock_irqrestore(&pool->lock, flags);
324 }
325
326 /*
327 * This section of code contains the logic for processing a thin device's IO.
328 * Much of the code depends on pool object resources (lists, workqueues, etc)
329 * but most is exclusively called from the thin target rather than the thin-pool
330 * target.
331 */
332
333 static bool block_size_is_power_of_two(struct pool *pool)
334 {
335 return pool->sectors_per_block_shift >= 0;
336 }
337
338 static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
339 {
340 struct pool *pool = tc->pool;
341 sector_t block_nr = bio->bi_sector;
342
343 if (block_size_is_power_of_two(pool))
344 block_nr >>= pool->sectors_per_block_shift;
345 else
346 (void) sector_div(block_nr, pool->sectors_per_block);
347
348 return block_nr;
349 }
350
351 static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
352 {
353 struct pool *pool = tc->pool;
354 sector_t bi_sector = bio->bi_sector;
355
356 bio->bi_bdev = tc->pool_dev->bdev;
357 if (block_size_is_power_of_two(pool))
358 bio->bi_sector = (block << pool->sectors_per_block_shift) |
359 (bi_sector & (pool->sectors_per_block - 1));
360 else
361 bio->bi_sector = (block * pool->sectors_per_block) +
362 sector_div(bi_sector, pool->sectors_per_block);
363 }
364
365 static void remap_to_origin(struct thin_c *tc, struct bio *bio)
366 {
367 bio->bi_bdev = tc->origin_dev->bdev;
368 }
369
370 static int bio_triggers_commit(struct thin_c *tc, struct bio *bio)
371 {
372 return (bio->bi_rw & (REQ_FLUSH | REQ_FUA)) &&
373 dm_thin_changed_this_transaction(tc->td);
374 }
375
376 static void inc_all_io_entry(struct pool *pool, struct bio *bio)
377 {
378 struct dm_thin_endio_hook *h;
379
380 if (bio->bi_rw & REQ_DISCARD)
381 return;
382
383 h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
384 h->all_io_entry = dm_deferred_entry_inc(pool->all_io_ds);
385 }
386
387 static void issue(struct thin_c *tc, struct bio *bio)
388 {
389 struct pool *pool = tc->pool;
390 unsigned long flags;
391
392 if (!bio_triggers_commit(tc, bio)) {
393 generic_make_request(bio);
394 return;
395 }
396
397 /*
398 * Complete bio with an error if earlier I/O caused changes to
399 * the metadata that can't be committed e.g, due to I/O errors
400 * on the metadata device.
401 */
402 if (dm_thin_aborted_changes(tc->td)) {
403 bio_io_error(bio);
404 return;
405 }
406
407 /*
408 * Batch together any bios that trigger commits and then issue a
409 * single commit for them in process_deferred_bios().
410 */
411 spin_lock_irqsave(&pool->lock, flags);
412 bio_list_add(&pool->deferred_flush_bios, bio);
413 spin_unlock_irqrestore(&pool->lock, flags);
414 }
415
416 static void remap_to_origin_and_issue(struct thin_c *tc, struct bio *bio)
417 {
418 remap_to_origin(tc, bio);
419 issue(tc, bio);
420 }
421
422 static void remap_and_issue(struct thin_c *tc, struct bio *bio,
423 dm_block_t block)
424 {
425 remap(tc, bio, block);
426 issue(tc, bio);
427 }
428
429 /*
430 * wake_worker() is used when new work is queued and when pool_resume is
431 * ready to continue deferred IO processing.
432 */
433 static void wake_worker(struct pool *pool)
434 {
435 queue_work(pool->wq, &pool->worker);
436 }
437
438 /*----------------------------------------------------------------*/
439
440 /*
441 * Bio endio functions.
442 */
443 struct dm_thin_new_mapping {
444 struct list_head list;
445
446 unsigned quiesced:1;
447 unsigned prepared:1;
448 unsigned pass_discard:1;
449
450 struct thin_c *tc;
451 dm_block_t virt_block;
452 dm_block_t data_block;
453 struct dm_bio_prison_cell *cell, *cell2;
454 int err;
455
456 /*
457 * If the bio covers the whole area of a block then we can avoid
458 * zeroing or copying. Instead this bio is hooked. The bio will
459 * still be in the cell, so care has to be taken to avoid issuing
460 * the bio twice.
461 */
462 struct bio *bio;
463 bio_end_io_t *saved_bi_end_io;
464 };
465
466 static void __maybe_add_mapping(struct dm_thin_new_mapping *m)
467 {
468 struct pool *pool = m->tc->pool;
469
470 if (m->quiesced && m->prepared) {
471 list_add(&m->list, &pool->prepared_mappings);
472 wake_worker(pool);
473 }
474 }
475
476 static void copy_complete(int read_err, unsigned long write_err, void *context)
477 {
478 unsigned long flags;
479 struct dm_thin_new_mapping *m = context;
480 struct pool *pool = m->tc->pool;
481
482 m->err = read_err || write_err ? -EIO : 0;
483
484 spin_lock_irqsave(&pool->lock, flags);
485 m->prepared = 1;
486 __maybe_add_mapping(m);
487 spin_unlock_irqrestore(&pool->lock, flags);
488 }
489
490 static void overwrite_endio(struct bio *bio, int err)
491 {
492 unsigned long flags;
493 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
494 struct dm_thin_new_mapping *m = h->overwrite_mapping;
495 struct pool *pool = m->tc->pool;
496
497 m->err = err;
498
499 spin_lock_irqsave(&pool->lock, flags);
500 m->prepared = 1;
501 __maybe_add_mapping(m);
502 spin_unlock_irqrestore(&pool->lock, flags);
503 }
504
505 /*----------------------------------------------------------------*/
506
507 /*
508 * Workqueue.
509 */
510
511 /*
512 * Prepared mapping jobs.
513 */
514
515 /*
516 * This sends the bios in the cell back to the deferred_bios list.
517 */
518 static void cell_defer(struct thin_c *tc, struct dm_bio_prison_cell *cell)
519 {
520 struct pool *pool = tc->pool;
521 unsigned long flags;
522
523 spin_lock_irqsave(&pool->lock, flags);
524 dm_cell_release(cell, &pool->deferred_bios);
525 spin_unlock_irqrestore(&tc->pool->lock, flags);
526
527 wake_worker(pool);
528 }
529
530 /*
531 * Same as cell_defer except it omits the original holder of the cell.
532 */
533 static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *cell)
534 {
535 struct pool *pool = tc->pool;
536 unsigned long flags;
537
538 spin_lock_irqsave(&pool->lock, flags);
539 dm_cell_release_no_holder(cell, &pool->deferred_bios);
540 spin_unlock_irqrestore(&pool->lock, flags);
541
542 wake_worker(pool);
543 }
544
545 static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
546 {
547 if (m->bio)
548 m->bio->bi_end_io = m->saved_bi_end_io;
549 dm_cell_error(m->cell);
550 list_del(&m->list);
551 mempool_free(m, m->tc->pool->mapping_pool);
552 }
553 static void process_prepared_mapping(struct dm_thin_new_mapping *m)
554 {
555 struct thin_c *tc = m->tc;
556 struct bio *bio;
557 int r;
558
559 bio = m->bio;
560 if (bio)
561 bio->bi_end_io = m->saved_bi_end_io;
562
563 if (m->err) {
564 dm_cell_error(m->cell);
565 goto out;
566 }
567
568 /*
569 * Commit the prepared block into the mapping btree.
570 * Any I/O for this block arriving after this point will get
571 * remapped to it directly.
572 */
573 r = dm_thin_insert_block(tc->td, m->virt_block, m->data_block);
574 if (r) {
575 DMERR_LIMIT("dm_thin_insert_block() failed");
576 dm_cell_error(m->cell);
577 goto out;
578 }
579
580 /*
581 * Release any bios held while the block was being provisioned.
582 * If we are processing a write bio that completely covers the block,
583 * we already processed it so can ignore it now when processing
584 * the bios in the cell.
585 */
586 if (bio) {
587 cell_defer_no_holder(tc, m->cell);
588 bio_endio(bio, 0);
589 } else
590 cell_defer(tc, m->cell);
591
592 out:
593 list_del(&m->list);
594 mempool_free(m, tc->pool->mapping_pool);
595 }
596
597 static void process_prepared_discard_fail(struct dm_thin_new_mapping *m)
598 {
599 struct thin_c *tc = m->tc;
600
601 bio_io_error(m->bio);
602 cell_defer_no_holder(tc, m->cell);
603 cell_defer_no_holder(tc, m->cell2);
604 mempool_free(m, tc->pool->mapping_pool);
605 }
606
607 static void process_prepared_discard_passdown(struct dm_thin_new_mapping *m)
608 {
609 struct thin_c *tc = m->tc;
610
611 inc_all_io_entry(tc->pool, m->bio);
612 cell_defer_no_holder(tc, m->cell);
613 cell_defer_no_holder(tc, m->cell2);
614
615 if (m->pass_discard)
616 remap_and_issue(tc, m->bio, m->data_block);
617 else
618 bio_endio(m->bio, 0);
619
620 mempool_free(m, tc->pool->mapping_pool);
621 }
622
623 static void process_prepared_discard(struct dm_thin_new_mapping *m)
624 {
625 int r;
626 struct thin_c *tc = m->tc;
627
628 r = dm_thin_remove_block(tc->td, m->virt_block);
629 if (r)
630 DMERR_LIMIT("dm_thin_remove_block() failed");
631
632 process_prepared_discard_passdown(m);
633 }
634
635 static void process_prepared(struct pool *pool, struct list_head *head,
636 process_mapping_fn *fn)
637 {
638 unsigned long flags;
639 struct list_head maps;
640 struct dm_thin_new_mapping *m, *tmp;
641
642 INIT_LIST_HEAD(&maps);
643 spin_lock_irqsave(&pool->lock, flags);
644 list_splice_init(head, &maps);
645 spin_unlock_irqrestore(&pool->lock, flags);
646
647 list_for_each_entry_safe(m, tmp, &maps, list)
648 (*fn)(m);
649 }
650
651 /*
652 * Deferred bio jobs.
653 */
654 static int io_overlaps_block(struct pool *pool, struct bio *bio)
655 {
656 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT);
657 }
658
659 static int io_overwrites_block(struct pool *pool, struct bio *bio)
660 {
661 return (bio_data_dir(bio) == WRITE) &&
662 io_overlaps_block(pool, bio);
663 }
664
665 static void save_and_set_endio(struct bio *bio, bio_end_io_t **save,
666 bio_end_io_t *fn)
667 {
668 *save = bio->bi_end_io;
669 bio->bi_end_io = fn;
670 }
671
672 static int ensure_next_mapping(struct pool *pool)
673 {
674 if (pool->next_mapping)
675 return 0;
676
677 pool->next_mapping = mempool_alloc(pool->mapping_pool, GFP_ATOMIC);
678
679 return pool->next_mapping ? 0 : -ENOMEM;
680 }
681
682 static struct dm_thin_new_mapping *get_next_mapping(struct pool *pool)
683 {
684 struct dm_thin_new_mapping *r = pool->next_mapping;
685
686 BUG_ON(!pool->next_mapping);
687
688 pool->next_mapping = NULL;
689
690 return r;
691 }
692
693 static void schedule_copy(struct thin_c *tc, dm_block_t virt_block,
694 struct dm_dev *origin, dm_block_t data_origin,
695 dm_block_t data_dest,
696 struct dm_bio_prison_cell *cell, struct bio *bio)
697 {
698 int r;
699 struct pool *pool = tc->pool;
700 struct dm_thin_new_mapping *m = get_next_mapping(pool);
701
702 INIT_LIST_HEAD(&m->list);
703 m->quiesced = 0;
704 m->prepared = 0;
705 m->tc = tc;
706 m->virt_block = virt_block;
707 m->data_block = data_dest;
708 m->cell = cell;
709 m->err = 0;
710 m->bio = NULL;
711
712 if (!dm_deferred_set_add_work(pool->shared_read_ds, &m->list))
713 m->quiesced = 1;
714
715 /*
716 * IO to pool_dev remaps to the pool target's data_dev.
717 *
718 * If the whole block of data is being overwritten, we can issue the
719 * bio immediately. Otherwise we use kcopyd to clone the data first.
720 */
721 if (io_overwrites_block(pool, bio)) {
722 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
723
724 h->overwrite_mapping = m;
725 m->bio = bio;
726 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
727 inc_all_io_entry(pool, bio);
728 remap_and_issue(tc, bio, data_dest);
729 } else {
730 struct dm_io_region from, to;
731
732 from.bdev = origin->bdev;
733 from.sector = data_origin * pool->sectors_per_block;
734 from.count = pool->sectors_per_block;
735
736 to.bdev = tc->pool_dev->bdev;
737 to.sector = data_dest * pool->sectors_per_block;
738 to.count = pool->sectors_per_block;
739
740 r = dm_kcopyd_copy(pool->copier, &from, 1, &to,
741 0, copy_complete, m);
742 if (r < 0) {
743 mempool_free(m, pool->mapping_pool);
744 DMERR_LIMIT("dm_kcopyd_copy() failed");
745 dm_cell_error(cell);
746 }
747 }
748 }
749
750 static void schedule_internal_copy(struct thin_c *tc, dm_block_t virt_block,
751 dm_block_t data_origin, dm_block_t data_dest,
752 struct dm_bio_prison_cell *cell, struct bio *bio)
753 {
754 schedule_copy(tc, virt_block, tc->pool_dev,
755 data_origin, data_dest, cell, bio);
756 }
757
758 static void schedule_external_copy(struct thin_c *tc, dm_block_t virt_block,
759 dm_block_t data_dest,
760 struct dm_bio_prison_cell *cell, struct bio *bio)
761 {
762 schedule_copy(tc, virt_block, tc->origin_dev,
763 virt_block, data_dest, cell, bio);
764 }
765
766 static void schedule_zero(struct thin_c *tc, dm_block_t virt_block,
767 dm_block_t data_block, struct dm_bio_prison_cell *cell,
768 struct bio *bio)
769 {
770 struct pool *pool = tc->pool;
771 struct dm_thin_new_mapping *m = get_next_mapping(pool);
772
773 INIT_LIST_HEAD(&m->list);
774 m->quiesced = 1;
775 m->prepared = 0;
776 m->tc = tc;
777 m->virt_block = virt_block;
778 m->data_block = data_block;
779 m->cell = cell;
780 m->err = 0;
781 m->bio = NULL;
782
783 /*
784 * If the whole block of data is being overwritten or we are not
785 * zeroing pre-existing data, we can issue the bio immediately.
786 * Otherwise we use kcopyd to zero the data first.
787 */
788 if (!pool->pf.zero_new_blocks)
789 process_prepared_mapping(m);
790
791 else if (io_overwrites_block(pool, bio)) {
792 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
793
794 h->overwrite_mapping = m;
795 m->bio = bio;
796 save_and_set_endio(bio, &m->saved_bi_end_io, overwrite_endio);
797 inc_all_io_entry(pool, bio);
798 remap_and_issue(tc, bio, data_block);
799 } else {
800 int r;
801 struct dm_io_region to;
802
803 to.bdev = tc->pool_dev->bdev;
804 to.sector = data_block * pool->sectors_per_block;
805 to.count = pool->sectors_per_block;
806
807 r = dm_kcopyd_zero(pool->copier, 1, &to, 0, copy_complete, m);
808 if (r < 0) {
809 mempool_free(m, pool->mapping_pool);
810 DMERR_LIMIT("dm_kcopyd_zero() failed");
811 dm_cell_error(cell);
812 }
813 }
814 }
815
816 static int commit(struct pool *pool)
817 {
818 int r;
819
820 r = dm_pool_commit_metadata(pool->pmd);
821 if (r)
822 DMERR_LIMIT("commit failed: error = %d", r);
823
824 return r;
825 }
826
827 /*
828 * A non-zero return indicates read_only or fail_io mode.
829 * Many callers don't care about the return value.
830 */
831 static int commit_or_fallback(struct pool *pool)
832 {
833 int r;
834
835 if (get_pool_mode(pool) != PM_WRITE)
836 return -EINVAL;
837
838 r = commit(pool);
839 if (r)
840 set_pool_mode(pool, PM_READ_ONLY);
841
842 return r;
843 }
844
845 static int alloc_data_block(struct thin_c *tc, dm_block_t *result)
846 {
847 int r;
848 dm_block_t free_blocks;
849 unsigned long flags;
850 struct pool *pool = tc->pool;
851
852 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
853 if (r)
854 return r;
855
856 if (free_blocks <= pool->low_water_blocks && !pool->low_water_triggered) {
857 DMWARN("%s: reached low water mark, sending event.",
858 dm_device_name(pool->pool_md));
859 spin_lock_irqsave(&pool->lock, flags);
860 pool->low_water_triggered = 1;
861 spin_unlock_irqrestore(&pool->lock, flags);
862 dm_table_event(pool->ti->table);
863 }
864
865 if (!free_blocks) {
866 if (pool->no_free_space)
867 return -ENOSPC;
868 else {
869 /*
870 * Try to commit to see if that will free up some
871 * more space.
872 */
873 (void) commit_or_fallback(pool);
874
875 r = dm_pool_get_free_block_count(pool->pmd, &free_blocks);
876 if (r)
877 return r;
878
879 /*
880 * If we still have no space we set a flag to avoid
881 * doing all this checking and return -ENOSPC.
882 */
883 if (!free_blocks) {
884 DMWARN("%s: no free space available.",
885 dm_device_name(pool->pool_md));
886 spin_lock_irqsave(&pool->lock, flags);
887 pool->no_free_space = 1;
888 spin_unlock_irqrestore(&pool->lock, flags);
889 return -ENOSPC;
890 }
891 }
892 }
893
894 r = dm_pool_alloc_data_block(pool->pmd, result);
895 if (r)
896 return r;
897
898 return 0;
899 }
900
901 /*
902 * If we have run out of space, queue bios until the device is
903 * resumed, presumably after having been reloaded with more space.
904 */
905 static void retry_on_resume(struct bio *bio)
906 {
907 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
908 struct thin_c *tc = h->tc;
909 struct pool *pool = tc->pool;
910 unsigned long flags;
911
912 spin_lock_irqsave(&pool->lock, flags);
913 bio_list_add(&pool->retry_on_resume_list, bio);
914 spin_unlock_irqrestore(&pool->lock, flags);
915 }
916
917 static void no_space(struct dm_bio_prison_cell *cell)
918 {
919 struct bio *bio;
920 struct bio_list bios;
921
922 bio_list_init(&bios);
923 dm_cell_release(cell, &bios);
924
925 while ((bio = bio_list_pop(&bios)))
926 retry_on_resume(bio);
927 }
928
929 static void process_discard(struct thin_c *tc, struct bio *bio)
930 {
931 int r;
932 unsigned long flags;
933 struct pool *pool = tc->pool;
934 struct dm_bio_prison_cell *cell, *cell2;
935 struct dm_cell_key key, key2;
936 dm_block_t block = get_bio_block(tc, bio);
937 struct dm_thin_lookup_result lookup_result;
938 struct dm_thin_new_mapping *m;
939
940 build_virtual_key(tc->td, block, &key);
941 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
942 return;
943
944 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
945 switch (r) {
946 case 0:
947 /*
948 * Check nobody is fiddling with this pool block. This can
949 * happen if someone's in the process of breaking sharing
950 * on this block.
951 */
952 build_data_key(tc->td, lookup_result.block, &key2);
953 if (dm_bio_detain(tc->pool->prison, &key2, bio, &cell2)) {
954 cell_defer_no_holder(tc, cell);
955 break;
956 }
957
958 if (io_overlaps_block(pool, bio)) {
959 /*
960 * IO may still be going to the destination block. We must
961 * quiesce before we can do the removal.
962 */
963 m = get_next_mapping(pool);
964 m->tc = tc;
965 m->pass_discard = (!lookup_result.shared) && pool->pf.discard_passdown;
966 m->virt_block = block;
967 m->data_block = lookup_result.block;
968 m->cell = cell;
969 m->cell2 = cell2;
970 m->err = 0;
971 m->bio = bio;
972
973 if (!dm_deferred_set_add_work(pool->all_io_ds, &m->list)) {
974 spin_lock_irqsave(&pool->lock, flags);
975 list_add(&m->list, &pool->prepared_discards);
976 spin_unlock_irqrestore(&pool->lock, flags);
977 wake_worker(pool);
978 }
979 } else {
980 inc_all_io_entry(pool, bio);
981 cell_defer_no_holder(tc, cell);
982 cell_defer_no_holder(tc, cell2);
983
984 /*
985 * The DM core makes sure that the discard doesn't span
986 * a block boundary. So we submit the discard of a
987 * partial block appropriately.
988 */
989 if ((!lookup_result.shared) && pool->pf.discard_passdown)
990 remap_and_issue(tc, bio, lookup_result.block);
991 else
992 bio_endio(bio, 0);
993 }
994 break;
995
996 case -ENODATA:
997 /*
998 * It isn't provisioned, just forget it.
999 */
1000 cell_defer_no_holder(tc, cell);
1001 bio_endio(bio, 0);
1002 break;
1003
1004 default:
1005 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1006 __func__, r);
1007 cell_defer_no_holder(tc, cell);
1008 bio_io_error(bio);
1009 break;
1010 }
1011 }
1012
1013 static void break_sharing(struct thin_c *tc, struct bio *bio, dm_block_t block,
1014 struct dm_cell_key *key,
1015 struct dm_thin_lookup_result *lookup_result,
1016 struct dm_bio_prison_cell *cell)
1017 {
1018 int r;
1019 dm_block_t data_block;
1020
1021 r = alloc_data_block(tc, &data_block);
1022 switch (r) {
1023 case 0:
1024 schedule_internal_copy(tc, block, lookup_result->block,
1025 data_block, cell, bio);
1026 break;
1027
1028 case -ENOSPC:
1029 no_space(cell);
1030 break;
1031
1032 default:
1033 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1034 __func__, r);
1035 dm_cell_error(cell);
1036 break;
1037 }
1038 }
1039
1040 static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1041 dm_block_t block,
1042 struct dm_thin_lookup_result *lookup_result)
1043 {
1044 struct dm_bio_prison_cell *cell;
1045 struct pool *pool = tc->pool;
1046 struct dm_cell_key key;
1047
1048 /*
1049 * If cell is already occupied, then sharing is already in the process
1050 * of being broken so we have nothing further to do here.
1051 */
1052 build_data_key(tc->td, lookup_result->block, &key);
1053 if (dm_bio_detain(pool->prison, &key, bio, &cell))
1054 return;
1055
1056 if (bio_data_dir(bio) == WRITE && bio->bi_size)
1057 break_sharing(tc, bio, block, &key, lookup_result, cell);
1058 else {
1059 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1060
1061 h->shared_read_entry = dm_deferred_entry_inc(pool->shared_read_ds);
1062 inc_all_io_entry(pool, bio);
1063 cell_defer_no_holder(tc, cell);
1064
1065 remap_and_issue(tc, bio, lookup_result->block);
1066 }
1067 }
1068
1069 static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block,
1070 struct dm_bio_prison_cell *cell)
1071 {
1072 int r;
1073 dm_block_t data_block;
1074
1075 /*
1076 * Remap empty bios (flushes) immediately, without provisioning.
1077 */
1078 if (!bio->bi_size) {
1079 inc_all_io_entry(tc->pool, bio);
1080 cell_defer_no_holder(tc, cell);
1081
1082 remap_and_issue(tc, bio, 0);
1083 return;
1084 }
1085
1086 /*
1087 * Fill read bios with zeroes and complete them immediately.
1088 */
1089 if (bio_data_dir(bio) == READ) {
1090 zero_fill_bio(bio);
1091 cell_defer_no_holder(tc, cell);
1092 bio_endio(bio, 0);
1093 return;
1094 }
1095
1096 r = alloc_data_block(tc, &data_block);
1097 switch (r) {
1098 case 0:
1099 if (tc->origin_dev)
1100 schedule_external_copy(tc, block, data_block, cell, bio);
1101 else
1102 schedule_zero(tc, block, data_block, cell, bio);
1103 break;
1104
1105 case -ENOSPC:
1106 no_space(cell);
1107 break;
1108
1109 default:
1110 DMERR_LIMIT("%s: alloc_data_block() failed: error = %d",
1111 __func__, r);
1112 set_pool_mode(tc->pool, PM_READ_ONLY);
1113 dm_cell_error(cell);
1114 break;
1115 }
1116 }
1117
1118 static void process_bio(struct thin_c *tc, struct bio *bio)
1119 {
1120 int r;
1121 dm_block_t block = get_bio_block(tc, bio);
1122 struct dm_bio_prison_cell *cell;
1123 struct dm_cell_key key;
1124 struct dm_thin_lookup_result lookup_result;
1125
1126 /*
1127 * If cell is already occupied, then the block is already
1128 * being provisioned so we have nothing further to do here.
1129 */
1130 build_virtual_key(tc->td, block, &key);
1131 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell))
1132 return;
1133
1134 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1135 switch (r) {
1136 case 0:
1137 if (lookup_result.shared) {
1138 process_shared_bio(tc, bio, block, &lookup_result);
1139 cell_defer_no_holder(tc, cell);
1140 } else {
1141 inc_all_io_entry(tc->pool, bio);
1142 cell_defer_no_holder(tc, cell);
1143
1144 remap_and_issue(tc, bio, lookup_result.block);
1145 }
1146 break;
1147
1148 case -ENODATA:
1149 if (bio_data_dir(bio) == READ && tc->origin_dev) {
1150 inc_all_io_entry(tc->pool, bio);
1151 cell_defer_no_holder(tc, cell);
1152
1153 remap_to_origin_and_issue(tc, bio);
1154 } else
1155 provision_block(tc, bio, block, cell);
1156 break;
1157
1158 default:
1159 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1160 __func__, r);
1161 cell_defer_no_holder(tc, cell);
1162 bio_io_error(bio);
1163 break;
1164 }
1165 }
1166
1167 static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1168 {
1169 int r;
1170 int rw = bio_data_dir(bio);
1171 dm_block_t block = get_bio_block(tc, bio);
1172 struct dm_thin_lookup_result lookup_result;
1173
1174 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1175 switch (r) {
1176 case 0:
1177 if (lookup_result.shared && (rw == WRITE) && bio->bi_size)
1178 bio_io_error(bio);
1179 else {
1180 inc_all_io_entry(tc->pool, bio);
1181 remap_and_issue(tc, bio, lookup_result.block);
1182 }
1183 break;
1184
1185 case -ENODATA:
1186 if (rw != READ) {
1187 bio_io_error(bio);
1188 break;
1189 }
1190
1191 if (tc->origin_dev) {
1192 inc_all_io_entry(tc->pool, bio);
1193 remap_to_origin_and_issue(tc, bio);
1194 break;
1195 }
1196
1197 zero_fill_bio(bio);
1198 bio_endio(bio, 0);
1199 break;
1200
1201 default:
1202 DMERR_LIMIT("%s: dm_thin_find_block() failed: error = %d",
1203 __func__, r);
1204 bio_io_error(bio);
1205 break;
1206 }
1207 }
1208
1209 static void process_bio_fail(struct thin_c *tc, struct bio *bio)
1210 {
1211 bio_io_error(bio);
1212 }
1213
1214 static int need_commit_due_to_time(struct pool *pool)
1215 {
1216 return jiffies < pool->last_commit_jiffies ||
1217 jiffies > pool->last_commit_jiffies + COMMIT_PERIOD;
1218 }
1219
1220 static void process_deferred_bios(struct pool *pool)
1221 {
1222 unsigned long flags;
1223 struct bio *bio;
1224 struct bio_list bios;
1225
1226 bio_list_init(&bios);
1227
1228 spin_lock_irqsave(&pool->lock, flags);
1229 bio_list_merge(&bios, &pool->deferred_bios);
1230 bio_list_init(&pool->deferred_bios);
1231 spin_unlock_irqrestore(&pool->lock, flags);
1232
1233 while ((bio = bio_list_pop(&bios))) {
1234 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1235 struct thin_c *tc = h->tc;
1236
1237 /*
1238 * If we've got no free new_mapping structs, and processing
1239 * this bio might require one, we pause until there are some
1240 * prepared mappings to process.
1241 */
1242 if (ensure_next_mapping(pool)) {
1243 spin_lock_irqsave(&pool->lock, flags);
1244 bio_list_merge(&pool->deferred_bios, &bios);
1245 spin_unlock_irqrestore(&pool->lock, flags);
1246
1247 break;
1248 }
1249
1250 if (bio->bi_rw & REQ_DISCARD)
1251 pool->process_discard(tc, bio);
1252 else
1253 pool->process_bio(tc, bio);
1254 }
1255
1256 /*
1257 * If there are any deferred flush bios, we must commit
1258 * the metadata before issuing them.
1259 */
1260 bio_list_init(&bios);
1261 spin_lock_irqsave(&pool->lock, flags);
1262 bio_list_merge(&bios, &pool->deferred_flush_bios);
1263 bio_list_init(&pool->deferred_flush_bios);
1264 spin_unlock_irqrestore(&pool->lock, flags);
1265
1266 if (bio_list_empty(&bios) && !need_commit_due_to_time(pool))
1267 return;
1268
1269 if (commit_or_fallback(pool)) {
1270 while ((bio = bio_list_pop(&bios)))
1271 bio_io_error(bio);
1272 return;
1273 }
1274 pool->last_commit_jiffies = jiffies;
1275
1276 while ((bio = bio_list_pop(&bios)))
1277 generic_make_request(bio);
1278 }
1279
1280 static void do_worker(struct work_struct *ws)
1281 {
1282 struct pool *pool = container_of(ws, struct pool, worker);
1283
1284 process_prepared(pool, &pool->prepared_mappings, &pool->process_prepared_mapping);
1285 process_prepared(pool, &pool->prepared_discards, &pool->process_prepared_discard);
1286 process_deferred_bios(pool);
1287 }
1288
1289 /*
1290 * We want to commit periodically so that not too much
1291 * unwritten data builds up.
1292 */
1293 static void do_waker(struct work_struct *ws)
1294 {
1295 struct pool *pool = container_of(to_delayed_work(ws), struct pool, waker);
1296 wake_worker(pool);
1297 queue_delayed_work(pool->wq, &pool->waker, COMMIT_PERIOD);
1298 }
1299
1300 /*----------------------------------------------------------------*/
1301
1302 static enum pool_mode get_pool_mode(struct pool *pool)
1303 {
1304 return pool->pf.mode;
1305 }
1306
1307 static void set_pool_mode(struct pool *pool, enum pool_mode mode)
1308 {
1309 int r;
1310
1311 pool->pf.mode = mode;
1312
1313 switch (mode) {
1314 case PM_FAIL:
1315 DMERR("switching pool to failure mode");
1316 pool->process_bio = process_bio_fail;
1317 pool->process_discard = process_bio_fail;
1318 pool->process_prepared_mapping = process_prepared_mapping_fail;
1319 pool->process_prepared_discard = process_prepared_discard_fail;
1320 break;
1321
1322 case PM_READ_ONLY:
1323 DMERR("switching pool to read-only mode");
1324 r = dm_pool_abort_metadata(pool->pmd);
1325 if (r) {
1326 DMERR("aborting transaction failed");
1327 set_pool_mode(pool, PM_FAIL);
1328 } else {
1329 dm_pool_metadata_read_only(pool->pmd);
1330 pool->process_bio = process_bio_read_only;
1331 pool->process_discard = process_discard;
1332 pool->process_prepared_mapping = process_prepared_mapping_fail;
1333 pool->process_prepared_discard = process_prepared_discard_passdown;
1334 }
1335 break;
1336
1337 case PM_WRITE:
1338 pool->process_bio = process_bio;
1339 pool->process_discard = process_discard;
1340 pool->process_prepared_mapping = process_prepared_mapping;
1341 pool->process_prepared_discard = process_prepared_discard;
1342 break;
1343 }
1344 }
1345
1346 /*----------------------------------------------------------------*/
1347
1348 /*
1349 * Mapping functions.
1350 */
1351
1352 /*
1353 * Called only while mapping a thin bio to hand it over to the workqueue.
1354 */
1355 static void thin_defer_bio(struct thin_c *tc, struct bio *bio)
1356 {
1357 unsigned long flags;
1358 struct pool *pool = tc->pool;
1359
1360 spin_lock_irqsave(&pool->lock, flags);
1361 bio_list_add(&pool->deferred_bios, bio);
1362 spin_unlock_irqrestore(&pool->lock, flags);
1363
1364 wake_worker(pool);
1365 }
1366
1367 static void thin_hook_bio(struct thin_c *tc, struct bio *bio)
1368 {
1369 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1370
1371 h->tc = tc;
1372 h->shared_read_entry = NULL;
1373 h->all_io_entry = NULL;
1374 h->overwrite_mapping = NULL;
1375 }
1376
1377 /*
1378 * Non-blocking function called from the thin target's map function.
1379 */
1380 static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1381 {
1382 int r;
1383 struct thin_c *tc = ti->private;
1384 dm_block_t block = get_bio_block(tc, bio);
1385 struct dm_thin_device *td = tc->td;
1386 struct dm_thin_lookup_result result;
1387 struct dm_bio_prison_cell *cell1, *cell2;
1388 struct dm_cell_key key;
1389
1390 thin_hook_bio(tc, bio);
1391
1392 if (get_pool_mode(tc->pool) == PM_FAIL) {
1393 bio_io_error(bio);
1394 return DM_MAPIO_SUBMITTED;
1395 }
1396
1397 if (bio->bi_rw & (REQ_DISCARD | REQ_FLUSH | REQ_FUA)) {
1398 thin_defer_bio(tc, bio);
1399 return DM_MAPIO_SUBMITTED;
1400 }
1401
1402 r = dm_thin_find_block(td, block, 0, &result);
1403
1404 /*
1405 * Note that we defer readahead too.
1406 */
1407 switch (r) {
1408 case 0:
1409 if (unlikely(result.shared)) {
1410 /*
1411 * We have a race condition here between the
1412 * result.shared value returned by the lookup and
1413 * snapshot creation, which may cause new
1414 * sharing.
1415 *
1416 * To avoid this always quiesce the origin before
1417 * taking the snap. You want to do this anyway to
1418 * ensure a consistent application view
1419 * (i.e. lockfs).
1420 *
1421 * More distant ancestors are irrelevant. The
1422 * shared flag will be set in their case.
1423 */
1424 thin_defer_bio(tc, bio);
1425 return DM_MAPIO_SUBMITTED;
1426 }
1427
1428 build_virtual_key(tc->td, block, &key);
1429 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell1))
1430 return DM_MAPIO_SUBMITTED;
1431
1432 build_data_key(tc->td, result.block, &key);
1433 if (dm_bio_detain(tc->pool->prison, &key, bio, &cell2)) {
1434 cell_defer_no_holder(tc, cell1);
1435 return DM_MAPIO_SUBMITTED;
1436 }
1437
1438 inc_all_io_entry(tc->pool, bio);
1439 cell_defer_no_holder(tc, cell2);
1440 cell_defer_no_holder(tc, cell1);
1441
1442 remap(tc, bio, result.block);
1443 return DM_MAPIO_REMAPPED;
1444
1445 case -ENODATA:
1446 if (get_pool_mode(tc->pool) == PM_READ_ONLY) {
1447 /*
1448 * This block isn't provisioned, and we have no way
1449 * of doing so. Just error it.
1450 */
1451 bio_io_error(bio);
1452 return DM_MAPIO_SUBMITTED;
1453 }
1454 /* fall through */
1455
1456 case -EWOULDBLOCK:
1457 /*
1458 * In future, the failed dm_thin_find_block above could
1459 * provide the hint to load the metadata into cache.
1460 */
1461 thin_defer_bio(tc, bio);
1462 return DM_MAPIO_SUBMITTED;
1463
1464 default:
1465 /*
1466 * Must always call bio_io_error on failure.
1467 * dm_thin_find_block can fail with -EINVAL if the
1468 * pool is switched to fail-io mode.
1469 */
1470 bio_io_error(bio);
1471 return DM_MAPIO_SUBMITTED;
1472 }
1473 }
1474
1475 static int pool_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1476 {
1477 int r;
1478 unsigned long flags;
1479 struct pool_c *pt = container_of(cb, struct pool_c, callbacks);
1480
1481 spin_lock_irqsave(&pt->pool->lock, flags);
1482 r = !bio_list_empty(&pt->pool->retry_on_resume_list);
1483 spin_unlock_irqrestore(&pt->pool->lock, flags);
1484
1485 if (!r) {
1486 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1487 r = bdi_congested(&q->backing_dev_info, bdi_bits);
1488 }
1489
1490 return r;
1491 }
1492
1493 static void __requeue_bios(struct pool *pool)
1494 {
1495 bio_list_merge(&pool->deferred_bios, &pool->retry_on_resume_list);
1496 bio_list_init(&pool->retry_on_resume_list);
1497 }
1498
1499 /*----------------------------------------------------------------
1500 * Binding of control targets to a pool object
1501 *--------------------------------------------------------------*/
1502 static bool data_dev_supports_discard(struct pool_c *pt)
1503 {
1504 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
1505
1506 return q && blk_queue_discard(q);
1507 }
1508
1509 /*
1510 * If discard_passdown was enabled verify that the data device
1511 * supports discards. Disable discard_passdown if not.
1512 */
1513 static void disable_passdown_if_not_supported(struct pool_c *pt)
1514 {
1515 struct pool *pool = pt->pool;
1516 struct block_device *data_bdev = pt->data_dev->bdev;
1517 struct queue_limits *data_limits = &bdev_get_queue(data_bdev)->limits;
1518 sector_t block_size = pool->sectors_per_block << SECTOR_SHIFT;
1519 const char *reason = NULL;
1520 char buf[BDEVNAME_SIZE];
1521
1522 if (!pt->adjusted_pf.discard_passdown)
1523 return;
1524
1525 if (!data_dev_supports_discard(pt))
1526 reason = "discard unsupported";
1527
1528 else if (data_limits->max_discard_sectors < pool->sectors_per_block)
1529 reason = "max discard sectors smaller than a block";
1530
1531 else if (data_limits->discard_granularity > block_size)
1532 reason = "discard granularity larger than a block";
1533
1534 else if (block_size & (data_limits->discard_granularity - 1))
1535 reason = "discard granularity not a factor of block size";
1536
1537 if (reason) {
1538 DMWARN("Data device (%s) %s: Disabling discard passdown.", bdevname(data_bdev, buf), reason);
1539 pt->adjusted_pf.discard_passdown = false;
1540 }
1541 }
1542
1543 static int bind_control_target(struct pool *pool, struct dm_target *ti)
1544 {
1545 struct pool_c *pt = ti->private;
1546
1547 /*
1548 * We want to make sure that degraded pools are never upgraded.
1549 */
1550 enum pool_mode old_mode = pool->pf.mode;
1551 enum pool_mode new_mode = pt->adjusted_pf.mode;
1552
1553 if (old_mode > new_mode)
1554 new_mode = old_mode;
1555
1556 pool->ti = ti;
1557 pool->low_water_blocks = pt->low_water_blocks;
1558 pool->pf = pt->adjusted_pf;
1559
1560 set_pool_mode(pool, new_mode);
1561
1562 return 0;
1563 }
1564
1565 static void unbind_control_target(struct pool *pool, struct dm_target *ti)
1566 {
1567 if (pool->ti == ti)
1568 pool->ti = NULL;
1569 }
1570
1571 /*----------------------------------------------------------------
1572 * Pool creation
1573 *--------------------------------------------------------------*/
1574 /* Initialize pool features. */
1575 static void pool_features_init(struct pool_features *pf)
1576 {
1577 pf->mode = PM_WRITE;
1578 pf->zero_new_blocks = true;
1579 pf->discard_enabled = true;
1580 pf->discard_passdown = true;
1581 }
1582
1583 static void __pool_destroy(struct pool *pool)
1584 {
1585 __pool_table_remove(pool);
1586
1587 if (dm_pool_metadata_close(pool->pmd) < 0)
1588 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1589
1590 dm_bio_prison_destroy(pool->prison);
1591 dm_kcopyd_client_destroy(pool->copier);
1592
1593 if (pool->wq)
1594 destroy_workqueue(pool->wq);
1595
1596 if (pool->next_mapping)
1597 mempool_free(pool->next_mapping, pool->mapping_pool);
1598 mempool_destroy(pool->mapping_pool);
1599 dm_deferred_set_destroy(pool->shared_read_ds);
1600 dm_deferred_set_destroy(pool->all_io_ds);
1601 kfree(pool);
1602 }
1603
1604 static struct kmem_cache *_new_mapping_cache;
1605
1606 static struct pool *pool_create(struct mapped_device *pool_md,
1607 struct block_device *metadata_dev,
1608 unsigned long block_size,
1609 int read_only, char **error)
1610 {
1611 int r;
1612 void *err_p;
1613 struct pool *pool;
1614 struct dm_pool_metadata *pmd;
1615 bool format_device = read_only ? false : true;
1616
1617 pmd = dm_pool_metadata_open(metadata_dev, block_size, format_device);
1618 if (IS_ERR(pmd)) {
1619 *error = "Error creating metadata object";
1620 return (struct pool *)pmd;
1621 }
1622
1623 pool = kmalloc(sizeof(*pool), GFP_KERNEL);
1624 if (!pool) {
1625 *error = "Error allocating memory for pool";
1626 err_p = ERR_PTR(-ENOMEM);
1627 goto bad_pool;
1628 }
1629
1630 pool->pmd = pmd;
1631 pool->sectors_per_block = block_size;
1632 if (block_size & (block_size - 1))
1633 pool->sectors_per_block_shift = -1;
1634 else
1635 pool->sectors_per_block_shift = __ffs(block_size);
1636 pool->low_water_blocks = 0;
1637 pool_features_init(&pool->pf);
1638 pool->prison = dm_bio_prison_create(PRISON_CELLS);
1639 if (!pool->prison) {
1640 *error = "Error creating pool's bio prison";
1641 err_p = ERR_PTR(-ENOMEM);
1642 goto bad_prison;
1643 }
1644
1645 pool->copier = dm_kcopyd_client_create();
1646 if (IS_ERR(pool->copier)) {
1647 r = PTR_ERR(pool->copier);
1648 *error = "Error creating pool's kcopyd client";
1649 err_p = ERR_PTR(r);
1650 goto bad_kcopyd_client;
1651 }
1652
1653 /*
1654 * Create singlethreaded workqueue that will service all devices
1655 * that use this metadata.
1656 */
1657 pool->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
1658 if (!pool->wq) {
1659 *error = "Error creating pool's workqueue";
1660 err_p = ERR_PTR(-ENOMEM);
1661 goto bad_wq;
1662 }
1663
1664 INIT_WORK(&pool->worker, do_worker);
1665 INIT_DELAYED_WORK(&pool->waker, do_waker);
1666 spin_lock_init(&pool->lock);
1667 bio_list_init(&pool->deferred_bios);
1668 bio_list_init(&pool->deferred_flush_bios);
1669 INIT_LIST_HEAD(&pool->prepared_mappings);
1670 INIT_LIST_HEAD(&pool->prepared_discards);
1671 pool->low_water_triggered = 0;
1672 pool->no_free_space = 0;
1673 bio_list_init(&pool->retry_on_resume_list);
1674
1675 pool->shared_read_ds = dm_deferred_set_create();
1676 if (!pool->shared_read_ds) {
1677 *error = "Error creating pool's shared read deferred set";
1678 err_p = ERR_PTR(-ENOMEM);
1679 goto bad_shared_read_ds;
1680 }
1681
1682 pool->all_io_ds = dm_deferred_set_create();
1683 if (!pool->all_io_ds) {
1684 *error = "Error creating pool's all io deferred set";
1685 err_p = ERR_PTR(-ENOMEM);
1686 goto bad_all_io_ds;
1687 }
1688
1689 pool->next_mapping = NULL;
1690 pool->mapping_pool = mempool_create_slab_pool(MAPPING_POOL_SIZE,
1691 _new_mapping_cache);
1692 if (!pool->mapping_pool) {
1693 *error = "Error creating pool's mapping mempool";
1694 err_p = ERR_PTR(-ENOMEM);
1695 goto bad_mapping_pool;
1696 }
1697
1698 pool->ref_count = 1;
1699 pool->last_commit_jiffies = jiffies;
1700 pool->pool_md = pool_md;
1701 pool->md_dev = metadata_dev;
1702 __pool_table_insert(pool);
1703
1704 return pool;
1705
1706 bad_mapping_pool:
1707 dm_deferred_set_destroy(pool->all_io_ds);
1708 bad_all_io_ds:
1709 dm_deferred_set_destroy(pool->shared_read_ds);
1710 bad_shared_read_ds:
1711 destroy_workqueue(pool->wq);
1712 bad_wq:
1713 dm_kcopyd_client_destroy(pool->copier);
1714 bad_kcopyd_client:
1715 dm_bio_prison_destroy(pool->prison);
1716 bad_prison:
1717 kfree(pool);
1718 bad_pool:
1719 if (dm_pool_metadata_close(pmd))
1720 DMWARN("%s: dm_pool_metadata_close() failed.", __func__);
1721
1722 return err_p;
1723 }
1724
1725 static void __pool_inc(struct pool *pool)
1726 {
1727 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1728 pool->ref_count++;
1729 }
1730
1731 static void __pool_dec(struct pool *pool)
1732 {
1733 BUG_ON(!mutex_is_locked(&dm_thin_pool_table.mutex));
1734 BUG_ON(!pool->ref_count);
1735 if (!--pool->ref_count)
1736 __pool_destroy(pool);
1737 }
1738
1739 static struct pool *__pool_find(struct mapped_device *pool_md,
1740 struct block_device *metadata_dev,
1741 unsigned long block_size, int read_only,
1742 char **error, int *created)
1743 {
1744 struct pool *pool = __pool_table_lookup_metadata_dev(metadata_dev);
1745
1746 if (pool) {
1747 if (pool->pool_md != pool_md) {
1748 *error = "metadata device already in use by a pool";
1749 return ERR_PTR(-EBUSY);
1750 }
1751 __pool_inc(pool);
1752
1753 } else {
1754 pool = __pool_table_lookup(pool_md);
1755 if (pool) {
1756 if (pool->md_dev != metadata_dev) {
1757 *error = "different pool cannot replace a pool";
1758 return ERR_PTR(-EINVAL);
1759 }
1760 __pool_inc(pool);
1761
1762 } else {
1763 pool = pool_create(pool_md, metadata_dev, block_size, read_only, error);
1764 *created = 1;
1765 }
1766 }
1767
1768 return pool;
1769 }
1770
1771 /*----------------------------------------------------------------
1772 * Pool target methods
1773 *--------------------------------------------------------------*/
1774 static void pool_dtr(struct dm_target *ti)
1775 {
1776 struct pool_c *pt = ti->private;
1777
1778 mutex_lock(&dm_thin_pool_table.mutex);
1779
1780 unbind_control_target(pt->pool, ti);
1781 __pool_dec(pt->pool);
1782 dm_put_device(ti, pt->metadata_dev);
1783 dm_put_device(ti, pt->data_dev);
1784 kfree(pt);
1785
1786 mutex_unlock(&dm_thin_pool_table.mutex);
1787 }
1788
1789 static int parse_pool_features(struct dm_arg_set *as, struct pool_features *pf,
1790 struct dm_target *ti)
1791 {
1792 int r;
1793 unsigned argc;
1794 const char *arg_name;
1795
1796 static struct dm_arg _args[] = {
1797 {0, 3, "Invalid number of pool feature arguments"},
1798 };
1799
1800 /*
1801 * No feature arguments supplied.
1802 */
1803 if (!as->argc)
1804 return 0;
1805
1806 r = dm_read_arg_group(_args, as, &argc, &ti->error);
1807 if (r)
1808 return -EINVAL;
1809
1810 while (argc && !r) {
1811 arg_name = dm_shift_arg(as);
1812 argc--;
1813
1814 if (!strcasecmp(arg_name, "skip_block_zeroing"))
1815 pf->zero_new_blocks = false;
1816
1817 else if (!strcasecmp(arg_name, "ignore_discard"))
1818 pf->discard_enabled = false;
1819
1820 else if (!strcasecmp(arg_name, "no_discard_passdown"))
1821 pf->discard_passdown = false;
1822
1823 else if (!strcasecmp(arg_name, "read_only"))
1824 pf->mode = PM_READ_ONLY;
1825
1826 else {
1827 ti->error = "Unrecognised pool feature requested";
1828 r = -EINVAL;
1829 break;
1830 }
1831 }
1832
1833 return r;
1834 }
1835
1836 /*
1837 * thin-pool <metadata dev> <data dev>
1838 * <data block size (sectors)>
1839 * <low water mark (blocks)>
1840 * [<#feature args> [<arg>]*]
1841 *
1842 * Optional feature arguments are:
1843 * skip_block_zeroing: skips the zeroing of newly-provisioned blocks.
1844 * ignore_discard: disable discard
1845 * no_discard_passdown: don't pass discards down to the data device
1846 */
1847 static int pool_ctr(struct dm_target *ti, unsigned argc, char **argv)
1848 {
1849 int r, pool_created = 0;
1850 struct pool_c *pt;
1851 struct pool *pool;
1852 struct pool_features pf;
1853 struct dm_arg_set as;
1854 struct dm_dev *data_dev;
1855 unsigned long block_size;
1856 dm_block_t low_water_blocks;
1857 struct dm_dev *metadata_dev;
1858 sector_t metadata_dev_size;
1859 char b[BDEVNAME_SIZE];
1860
1861 /*
1862 * FIXME Remove validation from scope of lock.
1863 */
1864 mutex_lock(&dm_thin_pool_table.mutex);
1865
1866 if (argc < 4) {
1867 ti->error = "Invalid argument count";
1868 r = -EINVAL;
1869 goto out_unlock;
1870 }
1871 as.argc = argc;
1872 as.argv = argv;
1873
1874 r = dm_get_device(ti, argv[0], FMODE_READ | FMODE_WRITE, &metadata_dev);
1875 if (r) {
1876 ti->error = "Error opening metadata block device";
1877 goto out_unlock;
1878 }
1879
1880 metadata_dev_size = i_size_read(metadata_dev->bdev->bd_inode) >> SECTOR_SHIFT;
1881 if (metadata_dev_size > THIN_METADATA_MAX_SECTORS_WARNING)
1882 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1883 bdevname(metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1884
1885 r = dm_get_device(ti, argv[1], FMODE_READ | FMODE_WRITE, &data_dev);
1886 if (r) {
1887 ti->error = "Error getting data device";
1888 goto out_metadata;
1889 }
1890
1891 if (kstrtoul(argv[2], 10, &block_size) || !block_size ||
1892 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1893 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1894 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
1895 ti->error = "Invalid block size";
1896 r = -EINVAL;
1897 goto out;
1898 }
1899
1900 if (kstrtoull(argv[3], 10, (unsigned long long *)&low_water_blocks)) {
1901 ti->error = "Invalid low water mark";
1902 r = -EINVAL;
1903 goto out;
1904 }
1905
1906 /*
1907 * Set default pool features.
1908 */
1909 pool_features_init(&pf);
1910
1911 dm_consume_args(&as, 4);
1912 r = parse_pool_features(&as, &pf, ti);
1913 if (r)
1914 goto out;
1915
1916 pt = kzalloc(sizeof(*pt), GFP_KERNEL);
1917 if (!pt) {
1918 r = -ENOMEM;
1919 goto out;
1920 }
1921
1922 pool = __pool_find(dm_table_get_md(ti->table), metadata_dev->bdev,
1923 block_size, pf.mode == PM_READ_ONLY, &ti->error, &pool_created);
1924 if (IS_ERR(pool)) {
1925 r = PTR_ERR(pool);
1926 goto out_free_pt;
1927 }
1928
1929 /*
1930 * 'pool_created' reflects whether this is the first table load.
1931 * Top level discard support is not allowed to be changed after
1932 * initial load. This would require a pool reload to trigger thin
1933 * device changes.
1934 */
1935 if (!pool_created && pf.discard_enabled != pool->pf.discard_enabled) {
1936 ti->error = "Discard support cannot be disabled once enabled";
1937 r = -EINVAL;
1938 goto out_flags_changed;
1939 }
1940
1941 pt->pool = pool;
1942 pt->ti = ti;
1943 pt->metadata_dev = metadata_dev;
1944 pt->data_dev = data_dev;
1945 pt->low_water_blocks = low_water_blocks;
1946 pt->adjusted_pf = pt->requested_pf = pf;
1947 ti->num_flush_bios = 1;
1948
1949 /*
1950 * Only need to enable discards if the pool should pass
1951 * them down to the data device. The thin device's discard
1952 * processing will cause mappings to be removed from the btree.
1953 */
1954 if (pf.discard_enabled && pf.discard_passdown) {
1955 ti->num_discard_bios = 1;
1956
1957 /*
1958 * Setting 'discards_supported' circumvents the normal
1959 * stacking of discard limits (this keeps the pool and
1960 * thin devices' discard limits consistent).
1961 */
1962 ti->discards_supported = true;
1963 ti->discard_zeroes_data_unsupported = true;
1964 }
1965 ti->private = pt;
1966
1967 pt->callbacks.congested_fn = pool_is_congested;
1968 dm_table_add_target_callbacks(ti->table, &pt->callbacks);
1969
1970 mutex_unlock(&dm_thin_pool_table.mutex);
1971
1972 return 0;
1973
1974 out_flags_changed:
1975 __pool_dec(pool);
1976 out_free_pt:
1977 kfree(pt);
1978 out:
1979 dm_put_device(ti, data_dev);
1980 out_metadata:
1981 dm_put_device(ti, metadata_dev);
1982 out_unlock:
1983 mutex_unlock(&dm_thin_pool_table.mutex);
1984
1985 return r;
1986 }
1987
1988 static int pool_map(struct dm_target *ti, struct bio *bio)
1989 {
1990 int r;
1991 struct pool_c *pt = ti->private;
1992 struct pool *pool = pt->pool;
1993 unsigned long flags;
1994
1995 /*
1996 * As this is a singleton target, ti->begin is always zero.
1997 */
1998 spin_lock_irqsave(&pool->lock, flags);
1999 bio->bi_bdev = pt->data_dev->bdev;
2000 r = DM_MAPIO_REMAPPED;
2001 spin_unlock_irqrestore(&pool->lock, flags);
2002
2003 return r;
2004 }
2005
2006 /*
2007 * Retrieves the number of blocks of the data device from
2008 * the superblock and compares it to the actual device size,
2009 * thus resizing the data device in case it has grown.
2010 *
2011 * This both copes with opening preallocated data devices in the ctr
2012 * being followed by a resume
2013 * -and-
2014 * calling the resume method individually after userspace has
2015 * grown the data device in reaction to a table event.
2016 */
2017 static int pool_preresume(struct dm_target *ti)
2018 {
2019 int r;
2020 struct pool_c *pt = ti->private;
2021 struct pool *pool = pt->pool;
2022 sector_t data_size = ti->len;
2023 dm_block_t sb_data_size;
2024
2025 /*
2026 * Take control of the pool object.
2027 */
2028 r = bind_control_target(pool, ti);
2029 if (r)
2030 return r;
2031
2032 (void) sector_div(data_size, pool->sectors_per_block);
2033
2034 r = dm_pool_get_data_dev_size(pool->pmd, &sb_data_size);
2035 if (r) {
2036 DMERR("failed to retrieve data device size");
2037 return r;
2038 }
2039
2040 if (data_size < sb_data_size) {
2041 DMERR("pool target too small, is %llu blocks (expected %llu)",
2042 (unsigned long long)data_size, sb_data_size);
2043 return -EINVAL;
2044
2045 } else if (data_size > sb_data_size) {
2046 r = dm_pool_resize_data_dev(pool->pmd, data_size);
2047 if (r) {
2048 DMERR("failed to resize data device");
2049 /* FIXME Stricter than necessary: Rollback transaction instead here */
2050 set_pool_mode(pool, PM_READ_ONLY);
2051 return r;
2052 }
2053
2054 (void) commit_or_fallback(pool);
2055 }
2056
2057 return 0;
2058 }
2059
2060 static void pool_resume(struct dm_target *ti)
2061 {
2062 struct pool_c *pt = ti->private;
2063 struct pool *pool = pt->pool;
2064 unsigned long flags;
2065
2066 spin_lock_irqsave(&pool->lock, flags);
2067 pool->low_water_triggered = 0;
2068 pool->no_free_space = 0;
2069 __requeue_bios(pool);
2070 spin_unlock_irqrestore(&pool->lock, flags);
2071
2072 do_waker(&pool->waker.work);
2073 }
2074
2075 static void pool_postsuspend(struct dm_target *ti)
2076 {
2077 struct pool_c *pt = ti->private;
2078 struct pool *pool = pt->pool;
2079
2080 cancel_delayed_work(&pool->waker);
2081 flush_workqueue(pool->wq);
2082 (void) commit_or_fallback(pool);
2083 }
2084
2085 static int check_arg_count(unsigned argc, unsigned args_required)
2086 {
2087 if (argc != args_required) {
2088 DMWARN("Message received with %u arguments instead of %u.",
2089 argc, args_required);
2090 return -EINVAL;
2091 }
2092
2093 return 0;
2094 }
2095
2096 static int read_dev_id(char *arg, dm_thin_id *dev_id, int warning)
2097 {
2098 if (!kstrtoull(arg, 10, (unsigned long long *)dev_id) &&
2099 *dev_id <= MAX_DEV_ID)
2100 return 0;
2101
2102 if (warning)
2103 DMWARN("Message received with invalid device id: %s", arg);
2104
2105 return -EINVAL;
2106 }
2107
2108 static int process_create_thin_mesg(unsigned argc, char **argv, struct pool *pool)
2109 {
2110 dm_thin_id dev_id;
2111 int r;
2112
2113 r = check_arg_count(argc, 2);
2114 if (r)
2115 return r;
2116
2117 r = read_dev_id(argv[1], &dev_id, 1);
2118 if (r)
2119 return r;
2120
2121 r = dm_pool_create_thin(pool->pmd, dev_id);
2122 if (r) {
2123 DMWARN("Creation of new thinly-provisioned device with id %s failed.",
2124 argv[1]);
2125 return r;
2126 }
2127
2128 return 0;
2129 }
2130
2131 static int process_create_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2132 {
2133 dm_thin_id dev_id;
2134 dm_thin_id origin_dev_id;
2135 int r;
2136
2137 r = check_arg_count(argc, 3);
2138 if (r)
2139 return r;
2140
2141 r = read_dev_id(argv[1], &dev_id, 1);
2142 if (r)
2143 return r;
2144
2145 r = read_dev_id(argv[2], &origin_dev_id, 1);
2146 if (r)
2147 return r;
2148
2149 r = dm_pool_create_snap(pool->pmd, dev_id, origin_dev_id);
2150 if (r) {
2151 DMWARN("Creation of new snapshot %s of device %s failed.",
2152 argv[1], argv[2]);
2153 return r;
2154 }
2155
2156 return 0;
2157 }
2158
2159 static int process_delete_mesg(unsigned argc, char **argv, struct pool *pool)
2160 {
2161 dm_thin_id dev_id;
2162 int r;
2163
2164 r = check_arg_count(argc, 2);
2165 if (r)
2166 return r;
2167
2168 r = read_dev_id(argv[1], &dev_id, 1);
2169 if (r)
2170 return r;
2171
2172 r = dm_pool_delete_thin_device(pool->pmd, dev_id);
2173 if (r)
2174 DMWARN("Deletion of thin device %s failed.", argv[1]);
2175
2176 return r;
2177 }
2178
2179 static int process_set_transaction_id_mesg(unsigned argc, char **argv, struct pool *pool)
2180 {
2181 dm_thin_id old_id, new_id;
2182 int r;
2183
2184 r = check_arg_count(argc, 3);
2185 if (r)
2186 return r;
2187
2188 if (kstrtoull(argv[1], 10, (unsigned long long *)&old_id)) {
2189 DMWARN("set_transaction_id message: Unrecognised id %s.", argv[1]);
2190 return -EINVAL;
2191 }
2192
2193 if (kstrtoull(argv[2], 10, (unsigned long long *)&new_id)) {
2194 DMWARN("set_transaction_id message: Unrecognised new id %s.", argv[2]);
2195 return -EINVAL;
2196 }
2197
2198 r = dm_pool_set_metadata_transaction_id(pool->pmd, old_id, new_id);
2199 if (r) {
2200 DMWARN("Failed to change transaction id from %s to %s.",
2201 argv[1], argv[2]);
2202 return r;
2203 }
2204
2205 return 0;
2206 }
2207
2208 static int process_reserve_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2209 {
2210 int r;
2211
2212 r = check_arg_count(argc, 1);
2213 if (r)
2214 return r;
2215
2216 (void) commit_or_fallback(pool);
2217
2218 r = dm_pool_reserve_metadata_snap(pool->pmd);
2219 if (r)
2220 DMWARN("reserve_metadata_snap message failed.");
2221
2222 return r;
2223 }
2224
2225 static int process_release_metadata_snap_mesg(unsigned argc, char **argv, struct pool *pool)
2226 {
2227 int r;
2228
2229 r = check_arg_count(argc, 1);
2230 if (r)
2231 return r;
2232
2233 r = dm_pool_release_metadata_snap(pool->pmd);
2234 if (r)
2235 DMWARN("release_metadata_snap message failed.");
2236
2237 return r;
2238 }
2239
2240 /*
2241 * Messages supported:
2242 * create_thin <dev_id>
2243 * create_snap <dev_id> <origin_id>
2244 * delete <dev_id>
2245 * trim <dev_id> <new_size_in_sectors>
2246 * set_transaction_id <current_trans_id> <new_trans_id>
2247 * reserve_metadata_snap
2248 * release_metadata_snap
2249 */
2250 static int pool_message(struct dm_target *ti, unsigned argc, char **argv)
2251 {
2252 int r = -EINVAL;
2253 struct pool_c *pt = ti->private;
2254 struct pool *pool = pt->pool;
2255
2256 if (!strcasecmp(argv[0], "create_thin"))
2257 r = process_create_thin_mesg(argc, argv, pool);
2258
2259 else if (!strcasecmp(argv[0], "create_snap"))
2260 r = process_create_snap_mesg(argc, argv, pool);
2261
2262 else if (!strcasecmp(argv[0], "delete"))
2263 r = process_delete_mesg(argc, argv, pool);
2264
2265 else if (!strcasecmp(argv[0], "set_transaction_id"))
2266 r = process_set_transaction_id_mesg(argc, argv, pool);
2267
2268 else if (!strcasecmp(argv[0], "reserve_metadata_snap"))
2269 r = process_reserve_metadata_snap_mesg(argc, argv, pool);
2270
2271 else if (!strcasecmp(argv[0], "release_metadata_snap"))
2272 r = process_release_metadata_snap_mesg(argc, argv, pool);
2273
2274 else
2275 DMWARN("Unrecognised thin pool target message received: %s", argv[0]);
2276
2277 if (!r)
2278 (void) commit_or_fallback(pool);
2279
2280 return r;
2281 }
2282
2283 static void emit_flags(struct pool_features *pf, char *result,
2284 unsigned sz, unsigned maxlen)
2285 {
2286 unsigned count = !pf->zero_new_blocks + !pf->discard_enabled +
2287 !pf->discard_passdown + (pf->mode == PM_READ_ONLY);
2288 DMEMIT("%u ", count);
2289
2290 if (!pf->zero_new_blocks)
2291 DMEMIT("skip_block_zeroing ");
2292
2293 if (!pf->discard_enabled)
2294 DMEMIT("ignore_discard ");
2295
2296 if (!pf->discard_passdown)
2297 DMEMIT("no_discard_passdown ");
2298
2299 if (pf->mode == PM_READ_ONLY)
2300 DMEMIT("read_only ");
2301 }
2302
2303 /*
2304 * Status line is:
2305 * <transaction id> <used metadata sectors>/<total metadata sectors>
2306 * <used data sectors>/<total data sectors> <held metadata root>
2307 */
2308 static void pool_status(struct dm_target *ti, status_type_t type,
2309 unsigned status_flags, char *result, unsigned maxlen)
2310 {
2311 int r;
2312 unsigned sz = 0;
2313 uint64_t transaction_id;
2314 dm_block_t nr_free_blocks_data;
2315 dm_block_t nr_free_blocks_metadata;
2316 dm_block_t nr_blocks_data;
2317 dm_block_t nr_blocks_metadata;
2318 dm_block_t held_root;
2319 char buf[BDEVNAME_SIZE];
2320 char buf2[BDEVNAME_SIZE];
2321 struct pool_c *pt = ti->private;
2322 struct pool *pool = pt->pool;
2323
2324 switch (type) {
2325 case STATUSTYPE_INFO:
2326 if (get_pool_mode(pool) == PM_FAIL) {
2327 DMEMIT("Fail");
2328 break;
2329 }
2330
2331 /* Commit to ensure statistics aren't out-of-date */
2332 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti))
2333 (void) commit_or_fallback(pool);
2334
2335 r = dm_pool_get_metadata_transaction_id(pool->pmd, &transaction_id);
2336 if (r) {
2337 DMERR("dm_pool_get_metadata_transaction_id returned %d", r);
2338 goto err;
2339 }
2340
2341 r = dm_pool_get_free_metadata_block_count(pool->pmd, &nr_free_blocks_metadata);
2342 if (r) {
2343 DMERR("dm_pool_get_free_metadata_block_count returned %d", r);
2344 goto err;
2345 }
2346
2347 r = dm_pool_get_metadata_dev_size(pool->pmd, &nr_blocks_metadata);
2348 if (r) {
2349 DMERR("dm_pool_get_metadata_dev_size returned %d", r);
2350 goto err;
2351 }
2352
2353 r = dm_pool_get_free_block_count(pool->pmd, &nr_free_blocks_data);
2354 if (r) {
2355 DMERR("dm_pool_get_free_block_count returned %d", r);
2356 goto err;
2357 }
2358
2359 r = dm_pool_get_data_dev_size(pool->pmd, &nr_blocks_data);
2360 if (r) {
2361 DMERR("dm_pool_get_data_dev_size returned %d", r);
2362 goto err;
2363 }
2364
2365 r = dm_pool_get_metadata_snap(pool->pmd, &held_root);
2366 if (r) {
2367 DMERR("dm_pool_get_metadata_snap returned %d", r);
2368 goto err;
2369 }
2370
2371 DMEMIT("%llu %llu/%llu %llu/%llu ",
2372 (unsigned long long)transaction_id,
2373 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2374 (unsigned long long)nr_blocks_metadata,
2375 (unsigned long long)(nr_blocks_data - nr_free_blocks_data),
2376 (unsigned long long)nr_blocks_data);
2377
2378 if (held_root)
2379 DMEMIT("%llu ", held_root);
2380 else
2381 DMEMIT("- ");
2382
2383 if (pool->pf.mode == PM_READ_ONLY)
2384 DMEMIT("ro ");
2385 else
2386 DMEMIT("rw ");
2387
2388 if (!pool->pf.discard_enabled)
2389 DMEMIT("ignore_discard");
2390 else if (pool->pf.discard_passdown)
2391 DMEMIT("discard_passdown");
2392 else
2393 DMEMIT("no_discard_passdown");
2394
2395 break;
2396
2397 case STATUSTYPE_TABLE:
2398 DMEMIT("%s %s %lu %llu ",
2399 format_dev_t(buf, pt->metadata_dev->bdev->bd_dev),
2400 format_dev_t(buf2, pt->data_dev->bdev->bd_dev),
2401 (unsigned long)pool->sectors_per_block,
2402 (unsigned long long)pt->low_water_blocks);
2403 emit_flags(&pt->requested_pf, result, sz, maxlen);
2404 break;
2405 }
2406 return;
2407
2408 err:
2409 DMEMIT("Error");
2410 }
2411
2412 static int pool_iterate_devices(struct dm_target *ti,
2413 iterate_devices_callout_fn fn, void *data)
2414 {
2415 struct pool_c *pt = ti->private;
2416
2417 return fn(ti, pt->data_dev, 0, ti->len, data);
2418 }
2419
2420 static int pool_merge(struct dm_target *ti, struct bvec_merge_data *bvm,
2421 struct bio_vec *biovec, int max_size)
2422 {
2423 struct pool_c *pt = ti->private;
2424 struct request_queue *q = bdev_get_queue(pt->data_dev->bdev);
2425
2426 if (!q->merge_bvec_fn)
2427 return max_size;
2428
2429 bvm->bi_bdev = pt->data_dev->bdev;
2430
2431 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2432 }
2433
2434 static void set_discard_limits(struct pool_c *pt, struct queue_limits *limits)
2435 {
2436 struct pool *pool = pt->pool;
2437 struct queue_limits *data_limits;
2438
2439 limits->max_discard_sectors = pool->sectors_per_block;
2440
2441 /*
2442 * discard_granularity is just a hint, and not enforced.
2443 */
2444 if (pt->adjusted_pf.discard_passdown) {
2445 data_limits = &bdev_get_queue(pt->data_dev->bdev)->limits;
2446 limits->discard_granularity = data_limits->discard_granularity;
2447 } else
2448 limits->discard_granularity = pool->sectors_per_block << SECTOR_SHIFT;
2449 }
2450
2451 static void pool_io_hints(struct dm_target *ti, struct queue_limits *limits)
2452 {
2453 struct pool_c *pt = ti->private;
2454 struct pool *pool = pt->pool;
2455
2456 blk_limits_io_min(limits, 0);
2457 blk_limits_io_opt(limits, pool->sectors_per_block << SECTOR_SHIFT);
2458
2459 /*
2460 * pt->adjusted_pf is a staging area for the actual features to use.
2461 * They get transferred to the live pool in bind_control_target()
2462 * called from pool_preresume().
2463 */
2464 if (!pt->adjusted_pf.discard_enabled)
2465 return;
2466
2467 disable_passdown_if_not_supported(pt);
2468
2469 set_discard_limits(pt, limits);
2470 }
2471
2472 static struct target_type pool_target = {
2473 .name = "thin-pool",
2474 .features = DM_TARGET_SINGLETON | DM_TARGET_ALWAYS_WRITEABLE |
2475 DM_TARGET_IMMUTABLE,
2476 .version = {1, 6, 1},
2477 .module = THIS_MODULE,
2478 .ctr = pool_ctr,
2479 .dtr = pool_dtr,
2480 .map = pool_map,
2481 .postsuspend = pool_postsuspend,
2482 .preresume = pool_preresume,
2483 .resume = pool_resume,
2484 .message = pool_message,
2485 .status = pool_status,
2486 .merge = pool_merge,
2487 .iterate_devices = pool_iterate_devices,
2488 .io_hints = pool_io_hints,
2489 };
2490
2491 /*----------------------------------------------------------------
2492 * Thin target methods
2493 *--------------------------------------------------------------*/
2494 static void thin_dtr(struct dm_target *ti)
2495 {
2496 struct thin_c *tc = ti->private;
2497
2498 mutex_lock(&dm_thin_pool_table.mutex);
2499
2500 __pool_dec(tc->pool);
2501 dm_pool_close_thin_device(tc->td);
2502 dm_put_device(ti, tc->pool_dev);
2503 if (tc->origin_dev)
2504 dm_put_device(ti, tc->origin_dev);
2505 kfree(tc);
2506
2507 mutex_unlock(&dm_thin_pool_table.mutex);
2508 }
2509
2510 /*
2511 * Thin target parameters:
2512 *
2513 * <pool_dev> <dev_id> [origin_dev]
2514 *
2515 * pool_dev: the path to the pool (eg, /dev/mapper/my_pool)
2516 * dev_id: the internal device identifier
2517 * origin_dev: a device external to the pool that should act as the origin
2518 *
2519 * If the pool device has discards disabled, they get disabled for the thin
2520 * device as well.
2521 */
2522 static int thin_ctr(struct dm_target *ti, unsigned argc, char **argv)
2523 {
2524 int r;
2525 struct thin_c *tc;
2526 struct dm_dev *pool_dev, *origin_dev;
2527 struct mapped_device *pool_md;
2528
2529 mutex_lock(&dm_thin_pool_table.mutex);
2530
2531 if (argc != 2 && argc != 3) {
2532 ti->error = "Invalid argument count";
2533 r = -EINVAL;
2534 goto out_unlock;
2535 }
2536
2537 tc = ti->private = kzalloc(sizeof(*tc), GFP_KERNEL);
2538 if (!tc) {
2539 ti->error = "Out of memory";
2540 r = -ENOMEM;
2541 goto out_unlock;
2542 }
2543
2544 if (argc == 3) {
2545 r = dm_get_device(ti, argv[2], FMODE_READ, &origin_dev);
2546 if (r) {
2547 ti->error = "Error opening origin device";
2548 goto bad_origin_dev;
2549 }
2550 tc->origin_dev = origin_dev;
2551 }
2552
2553 r = dm_get_device(ti, argv[0], dm_table_get_mode(ti->table), &pool_dev);
2554 if (r) {
2555 ti->error = "Error opening pool device";
2556 goto bad_pool_dev;
2557 }
2558 tc->pool_dev = pool_dev;
2559
2560 if (read_dev_id(argv[1], (unsigned long long *)&tc->dev_id, 0)) {
2561 ti->error = "Invalid device id";
2562 r = -EINVAL;
2563 goto bad_common;
2564 }
2565
2566 pool_md = dm_get_md(tc->pool_dev->bdev->bd_dev);
2567 if (!pool_md) {
2568 ti->error = "Couldn't get pool mapped device";
2569 r = -EINVAL;
2570 goto bad_common;
2571 }
2572
2573 tc->pool = __pool_table_lookup(pool_md);
2574 if (!tc->pool) {
2575 ti->error = "Couldn't find pool object";
2576 r = -EINVAL;
2577 goto bad_pool_lookup;
2578 }
2579 __pool_inc(tc->pool);
2580
2581 if (get_pool_mode(tc->pool) == PM_FAIL) {
2582 ti->error = "Couldn't open thin device, Pool is in fail mode";
2583 goto bad_thin_open;
2584 }
2585
2586 r = dm_pool_open_thin_device(tc->pool->pmd, tc->dev_id, &tc->td);
2587 if (r) {
2588 ti->error = "Couldn't open thin internal device";
2589 goto bad_thin_open;
2590 }
2591
2592 r = dm_set_target_max_io_len(ti, tc->pool->sectors_per_block);
2593 if (r)
2594 goto bad_thin_open;
2595
2596 ti->num_flush_bios = 1;
2597 ti->flush_supported = true;
2598 ti->per_bio_data_size = sizeof(struct dm_thin_endio_hook);
2599
2600 /* In case the pool supports discards, pass them on. */
2601 if (tc->pool->pf.discard_enabled) {
2602 ti->discards_supported = true;
2603 ti->num_discard_bios = 1;
2604 ti->discard_zeroes_data_unsupported = true;
2605 /* Discard bios must be split on a block boundary */
2606 ti->split_discard_bios = true;
2607 }
2608
2609 dm_put(pool_md);
2610
2611 mutex_unlock(&dm_thin_pool_table.mutex);
2612
2613 return 0;
2614
2615 bad_thin_open:
2616 __pool_dec(tc->pool);
2617 bad_pool_lookup:
2618 dm_put(pool_md);
2619 bad_common:
2620 dm_put_device(ti, tc->pool_dev);
2621 bad_pool_dev:
2622 if (tc->origin_dev)
2623 dm_put_device(ti, tc->origin_dev);
2624 bad_origin_dev:
2625 kfree(tc);
2626 out_unlock:
2627 mutex_unlock(&dm_thin_pool_table.mutex);
2628
2629 return r;
2630 }
2631
2632 static int thin_map(struct dm_target *ti, struct bio *bio)
2633 {
2634 bio->bi_sector = dm_target_offset(ti, bio->bi_sector);
2635
2636 return thin_bio_map(ti, bio);
2637 }
2638
2639 static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
2640 {
2641 unsigned long flags;
2642 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
2643 struct list_head work;
2644 struct dm_thin_new_mapping *m, *tmp;
2645 struct pool *pool = h->tc->pool;
2646
2647 if (h->shared_read_entry) {
2648 INIT_LIST_HEAD(&work);
2649 dm_deferred_entry_dec(h->shared_read_entry, &work);
2650
2651 spin_lock_irqsave(&pool->lock, flags);
2652 list_for_each_entry_safe(m, tmp, &work, list) {
2653 list_del(&m->list);
2654 m->quiesced = 1;
2655 __maybe_add_mapping(m);
2656 }
2657 spin_unlock_irqrestore(&pool->lock, flags);
2658 }
2659
2660 if (h->all_io_entry) {
2661 INIT_LIST_HEAD(&work);
2662 dm_deferred_entry_dec(h->all_io_entry, &work);
2663 if (!list_empty(&work)) {
2664 spin_lock_irqsave(&pool->lock, flags);
2665 list_for_each_entry_safe(m, tmp, &work, list)
2666 list_add(&m->list, &pool->prepared_discards);
2667 spin_unlock_irqrestore(&pool->lock, flags);
2668 wake_worker(pool);
2669 }
2670 }
2671
2672 return 0;
2673 }
2674
2675 static void thin_postsuspend(struct dm_target *ti)
2676 {
2677 if (dm_noflush_suspending(ti))
2678 requeue_io((struct thin_c *)ti->private);
2679 }
2680
2681 /*
2682 * <nr mapped sectors> <highest mapped sector>
2683 */
2684 static void thin_status(struct dm_target *ti, status_type_t type,
2685 unsigned status_flags, char *result, unsigned maxlen)
2686 {
2687 int r;
2688 ssize_t sz = 0;
2689 dm_block_t mapped, highest;
2690 char buf[BDEVNAME_SIZE];
2691 struct thin_c *tc = ti->private;
2692
2693 if (get_pool_mode(tc->pool) == PM_FAIL) {
2694 DMEMIT("Fail");
2695 return;
2696 }
2697
2698 if (!tc->td)
2699 DMEMIT("-");
2700 else {
2701 switch (type) {
2702 case STATUSTYPE_INFO:
2703 r = dm_thin_get_mapped_count(tc->td, &mapped);
2704 if (r) {
2705 DMERR("dm_thin_get_mapped_count returned %d", r);
2706 goto err;
2707 }
2708
2709 r = dm_thin_get_highest_mapped_block(tc->td, &highest);
2710 if (r < 0) {
2711 DMERR("dm_thin_get_highest_mapped_block returned %d", r);
2712 goto err;
2713 }
2714
2715 DMEMIT("%llu ", mapped * tc->pool->sectors_per_block);
2716 if (r)
2717 DMEMIT("%llu", ((highest + 1) *
2718 tc->pool->sectors_per_block) - 1);
2719 else
2720 DMEMIT("-");
2721 break;
2722
2723 case STATUSTYPE_TABLE:
2724 DMEMIT("%s %lu",
2725 format_dev_t(buf, tc->pool_dev->bdev->bd_dev),
2726 (unsigned long) tc->dev_id);
2727 if (tc->origin_dev)
2728 DMEMIT(" %s", format_dev_t(buf, tc->origin_dev->bdev->bd_dev));
2729 break;
2730 }
2731 }
2732
2733 return;
2734
2735 err:
2736 DMEMIT("Error");
2737 }
2738
2739 static int thin_iterate_devices(struct dm_target *ti,
2740 iterate_devices_callout_fn fn, void *data)
2741 {
2742 sector_t blocks;
2743 struct thin_c *tc = ti->private;
2744 struct pool *pool = tc->pool;
2745
2746 /*
2747 * We can't call dm_pool_get_data_dev_size() since that blocks. So
2748 * we follow a more convoluted path through to the pool's target.
2749 */
2750 if (!pool->ti)
2751 return 0; /* nothing is bound */
2752
2753 blocks = pool->ti->len;
2754 (void) sector_div(blocks, pool->sectors_per_block);
2755 if (blocks)
2756 return fn(ti, tc->pool_dev, 0, pool->sectors_per_block * blocks, data);
2757
2758 return 0;
2759 }
2760
2761 static struct target_type thin_target = {
2762 .name = "thin",
2763 .version = {1, 7, 1},
2764 .module = THIS_MODULE,
2765 .ctr = thin_ctr,
2766 .dtr = thin_dtr,
2767 .map = thin_map,
2768 .end_io = thin_endio,
2769 .postsuspend = thin_postsuspend,
2770 .status = thin_status,
2771 .iterate_devices = thin_iterate_devices,
2772 };
2773
2774 /*----------------------------------------------------------------*/
2775
2776 static int __init dm_thin_init(void)
2777 {
2778 int r;
2779
2780 pool_table_init();
2781
2782 r = dm_register_target(&thin_target);
2783 if (r)
2784 return r;
2785
2786 r = dm_register_target(&pool_target);
2787 if (r)
2788 goto bad_pool_target;
2789
2790 r = -ENOMEM;
2791
2792 _new_mapping_cache = KMEM_CACHE(dm_thin_new_mapping, 0);
2793 if (!_new_mapping_cache)
2794 goto bad_new_mapping_cache;
2795
2796 return 0;
2797
2798 bad_new_mapping_cache:
2799 dm_unregister_target(&pool_target);
2800 bad_pool_target:
2801 dm_unregister_target(&thin_target);
2802
2803 return r;
2804 }
2805
2806 static void dm_thin_exit(void)
2807 {
2808 dm_unregister_target(&thin_target);
2809 dm_unregister_target(&pool_target);
2810
2811 kmem_cache_destroy(_new_mapping_cache);
2812 }
2813
2814 module_init(dm_thin_init);
2815 module_exit(dm_thin_exit);
2816
2817 MODULE_DESCRIPTION(DM_NAME " thin provisioning target");
2818 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
2819 MODULE_LICENSE("GPL");
This page took 0.153153 seconds and 5 git commands to generate.