dm cache: cache shrinking support
[deliverable/linux.git] / drivers / md / dm-cache-target.c
CommitLineData
c6b4fcba
JT
1/*
2 * Copyright (C) 2012 Red Hat. All rights reserved.
3 *
4 * This file is released under the GPL.
5 */
6
7#include "dm.h"
8#include "dm-bio-prison.h"
b844fe69 9#include "dm-bio-record.h"
c6b4fcba
JT
10#include "dm-cache-metadata.h"
11
12#include <linux/dm-io.h>
13#include <linux/dm-kcopyd.h>
14#include <linux/init.h>
15#include <linux/mempool.h>
16#include <linux/module.h>
17#include <linux/slab.h>
18#include <linux/vmalloc.h>
19
20#define DM_MSG_PREFIX "cache"
21
22DECLARE_DM_KCOPYD_THROTTLE_WITH_MODULE_PARM(cache_copy_throttle,
23 "A percentage of time allocated for copying to and/or from cache");
24
25/*----------------------------------------------------------------*/
26
27/*
28 * Glossary:
29 *
30 * oblock: index of an origin block
31 * cblock: index of a cache block
32 * promotion: movement of a block from origin to cache
33 * demotion: movement of a block from cache to origin
34 * migration: movement of a block between the origin and cache device,
35 * either direction
36 */
37
38/*----------------------------------------------------------------*/
39
40static size_t bitset_size_in_bytes(unsigned nr_entries)
41{
42 return sizeof(unsigned long) * dm_div_up(nr_entries, BITS_PER_LONG);
43}
44
45static unsigned long *alloc_bitset(unsigned nr_entries)
46{
47 size_t s = bitset_size_in_bytes(nr_entries);
48 return vzalloc(s);
49}
50
51static void clear_bitset(void *bitset, unsigned nr_entries)
52{
53 size_t s = bitset_size_in_bytes(nr_entries);
54 memset(bitset, 0, s);
55}
56
57static void free_bitset(unsigned long *bits)
58{
59 vfree(bits);
60}
61
62/*----------------------------------------------------------------*/
63
c9d28d5d
JT
64/*
65 * There are a couple of places where we let a bio run, but want to do some
66 * work before calling its endio function. We do this by temporarily
67 * changing the endio fn.
68 */
69struct dm_hook_info {
70 bio_end_io_t *bi_end_io;
71 void *bi_private;
72};
73
74static void dm_hook_bio(struct dm_hook_info *h, struct bio *bio,
75 bio_end_io_t *bi_end_io, void *bi_private)
76{
77 h->bi_end_io = bio->bi_end_io;
78 h->bi_private = bio->bi_private;
79
80 bio->bi_end_io = bi_end_io;
81 bio->bi_private = bi_private;
82}
83
84static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85{
86 bio->bi_end_io = h->bi_end_io;
87 bio->bi_private = h->bi_private;
88}
89
90/*----------------------------------------------------------------*/
91
c6b4fcba
JT
92#define PRISON_CELLS 1024
93#define MIGRATION_POOL_SIZE 128
94#define COMMIT_PERIOD HZ
95#define MIGRATION_COUNT_WINDOW 10
96
97/*
05473044
MS
98 * The block size of the device holding cache data must be
99 * between 32KB and 1GB.
c6b4fcba
JT
100 */
101#define DATA_DEV_BLOCK_SIZE_MIN_SECTORS (32 * 1024 >> SECTOR_SHIFT)
05473044 102#define DATA_DEV_BLOCK_SIZE_MAX_SECTORS (1024 * 1024 * 1024 >> SECTOR_SHIFT)
c6b4fcba
JT
103
104/*
105 * FIXME: the cache is read/write for the time being.
106 */
107enum cache_mode {
108 CM_WRITE, /* metadata may be changed */
109 CM_READ_ONLY, /* metadata may not be changed */
110};
111
112struct cache_features {
113 enum cache_mode mode;
114 bool write_through:1;
115};
116
117struct cache_stats {
118 atomic_t read_hit;
119 atomic_t read_miss;
120 atomic_t write_hit;
121 atomic_t write_miss;
122 atomic_t demotion;
123 atomic_t promotion;
124 atomic_t copies_avoided;
125 atomic_t cache_cell_clash;
126 atomic_t commit_count;
127 atomic_t discard_count;
128};
129
130struct cache {
131 struct dm_target *ti;
132 struct dm_target_callbacks callbacks;
133
c9ec5d7c
MS
134 struct dm_cache_metadata *cmd;
135
c6b4fcba
JT
136 /*
137 * Metadata is written to this device.
138 */
139 struct dm_dev *metadata_dev;
140
141 /*
142 * The slower of the two data devices. Typically a spindle.
143 */
144 struct dm_dev *origin_dev;
145
146 /*
147 * The faster of the two data devices. Typically an SSD.
148 */
149 struct dm_dev *cache_dev;
150
c6b4fcba
JT
151 /*
152 * Size of the origin device in _complete_ blocks and native sectors.
153 */
154 dm_oblock_t origin_blocks;
155 sector_t origin_sectors;
156
157 /*
158 * Size of the cache device in blocks.
159 */
160 dm_cblock_t cache_size;
161
162 /*
163 * Fields for converting from sectors to blocks.
164 */
165 uint32_t sectors_per_block;
166 int sectors_per_block_shift;
167
c6b4fcba
JT
168 spinlock_t lock;
169 struct bio_list deferred_bios;
170 struct bio_list deferred_flush_bios;
e2e74d61 171 struct bio_list deferred_writethrough_bios;
c6b4fcba
JT
172 struct list_head quiesced_migrations;
173 struct list_head completed_migrations;
174 struct list_head need_commit_migrations;
175 sector_t migration_threshold;
c6b4fcba 176 wait_queue_head_t migration_wait;
c9ec5d7c 177 atomic_t nr_migrations;
c6b4fcba 178
66cb1910 179 wait_queue_head_t quiescing_wait;
238f8363 180 atomic_t quiescing;
66cb1910
JT
181 atomic_t quiescing_ack;
182
c6b4fcba
JT
183 /*
184 * cache_size entries, dirty if set
185 */
186 dm_cblock_t nr_dirty;
187 unsigned long *dirty_bitset;
188
189 /*
190 * origin_blocks entries, discarded if set.
191 */
c6b4fcba
JT
192 dm_dblock_t discard_nr_blocks;
193 unsigned long *discard_bitset;
c9ec5d7c
MS
194 uint32_t discard_block_size; /* a power of 2 times sectors per block */
195
196 /*
197 * Rather than reconstructing the table line for the status we just
198 * save it and regurgitate.
199 */
200 unsigned nr_ctr_args;
201 const char **ctr_args;
c6b4fcba
JT
202
203 struct dm_kcopyd_client *copier;
204 struct workqueue_struct *wq;
205 struct work_struct worker;
206
207 struct delayed_work waker;
208 unsigned long last_commit_jiffies;
209
210 struct dm_bio_prison *prison;
211 struct dm_deferred_set *all_io_ds;
212
213 mempool_t *migration_pool;
214 struct dm_cache_migration *next_migration;
215
216 struct dm_cache_policy *policy;
217 unsigned policy_nr_args;
218
219 bool need_tick_bio:1;
220 bool sized:1;
c6b4fcba
JT
221 bool commit_requested:1;
222 bool loaded_mappings:1;
223 bool loaded_discards:1;
224
c6b4fcba 225 /*
c9ec5d7c 226 * Cache features such as write-through.
c6b4fcba 227 */
c9ec5d7c
MS
228 struct cache_features features;
229
230 struct cache_stats stats;
c6b4fcba
JT
231};
232
233struct per_bio_data {
234 bool tick:1;
235 unsigned req_nr:2;
236 struct dm_deferred_entry *all_io_entry;
e2e74d61 237
19b0092e
MS
238 /*
239 * writethrough fields. These MUST remain at the end of this
240 * structure and the 'cache' member must be the first as it
aeed1420 241 * is used to determine the offset of the writethrough fields.
19b0092e 242 */
e2e74d61
JT
243 struct cache *cache;
244 dm_cblock_t cblock;
c9d28d5d 245 struct dm_hook_info hook_info;
b844fe69 246 struct dm_bio_details bio_details;
c6b4fcba
JT
247};
248
249struct dm_cache_migration {
250 struct list_head list;
251 struct cache *cache;
252
253 unsigned long start_jiffies;
254 dm_oblock_t old_oblock;
255 dm_oblock_t new_oblock;
256 dm_cblock_t cblock;
257
258 bool err:1;
259 bool writeback:1;
260 bool demote:1;
261 bool promote:1;
c9d28d5d 262 bool requeue_holder:1;
c6b4fcba
JT
263
264 struct dm_bio_prison_cell *old_ocell;
265 struct dm_bio_prison_cell *new_ocell;
266};
267
268/*
269 * Processing a bio in the worker thread may require these memory
270 * allocations. We prealloc to avoid deadlocks (the same worker thread
271 * frees them back to the mempool).
272 */
273struct prealloc {
274 struct dm_cache_migration *mg;
275 struct dm_bio_prison_cell *cell1;
276 struct dm_bio_prison_cell *cell2;
277};
278
279static void wake_worker(struct cache *cache)
280{
281 queue_work(cache->wq, &cache->worker);
282}
283
284/*----------------------------------------------------------------*/
285
286static struct dm_bio_prison_cell *alloc_prison_cell(struct cache *cache)
287{
288 /* FIXME: change to use a local slab. */
289 return dm_bio_prison_alloc_cell(cache->prison, GFP_NOWAIT);
290}
291
292static void free_prison_cell(struct cache *cache, struct dm_bio_prison_cell *cell)
293{
294 dm_bio_prison_free_cell(cache->prison, cell);
295}
296
297static int prealloc_data_structs(struct cache *cache, struct prealloc *p)
298{
299 if (!p->mg) {
300 p->mg = mempool_alloc(cache->migration_pool, GFP_NOWAIT);
301 if (!p->mg)
302 return -ENOMEM;
303 }
304
305 if (!p->cell1) {
306 p->cell1 = alloc_prison_cell(cache);
307 if (!p->cell1)
308 return -ENOMEM;
309 }
310
311 if (!p->cell2) {
312 p->cell2 = alloc_prison_cell(cache);
313 if (!p->cell2)
314 return -ENOMEM;
315 }
316
317 return 0;
318}
319
320static void prealloc_free_structs(struct cache *cache, struct prealloc *p)
321{
322 if (p->cell2)
323 free_prison_cell(cache, p->cell2);
324
325 if (p->cell1)
326 free_prison_cell(cache, p->cell1);
327
328 if (p->mg)
329 mempool_free(p->mg, cache->migration_pool);
330}
331
332static struct dm_cache_migration *prealloc_get_migration(struct prealloc *p)
333{
334 struct dm_cache_migration *mg = p->mg;
335
336 BUG_ON(!mg);
337 p->mg = NULL;
338
339 return mg;
340}
341
342/*
343 * You must have a cell within the prealloc struct to return. If not this
344 * function will BUG() rather than returning NULL.
345 */
346static struct dm_bio_prison_cell *prealloc_get_cell(struct prealloc *p)
347{
348 struct dm_bio_prison_cell *r = NULL;
349
350 if (p->cell1) {
351 r = p->cell1;
352 p->cell1 = NULL;
353
354 } else if (p->cell2) {
355 r = p->cell2;
356 p->cell2 = NULL;
357 } else
358 BUG();
359
360 return r;
361}
362
363/*
364 * You can't have more than two cells in a prealloc struct. BUG() will be
365 * called if you try and overfill.
366 */
367static void prealloc_put_cell(struct prealloc *p, struct dm_bio_prison_cell *cell)
368{
369 if (!p->cell2)
370 p->cell2 = cell;
371
372 else if (!p->cell1)
373 p->cell1 = cell;
374
375 else
376 BUG();
377}
378
379/*----------------------------------------------------------------*/
380
381static void build_key(dm_oblock_t oblock, struct dm_cell_key *key)
382{
383 key->virtual = 0;
384 key->dev = 0;
385 key->block = from_oblock(oblock);
386}
387
388/*
389 * The caller hands in a preallocated cell, and a free function for it.
390 * The cell will be freed if there's an error, or if it wasn't used because
391 * a cell with that key already exists.
392 */
393typedef void (*cell_free_fn)(void *context, struct dm_bio_prison_cell *cell);
394
395static int bio_detain(struct cache *cache, dm_oblock_t oblock,
396 struct bio *bio, struct dm_bio_prison_cell *cell_prealloc,
397 cell_free_fn free_fn, void *free_context,
398 struct dm_bio_prison_cell **cell_result)
399{
400 int r;
401 struct dm_cell_key key;
402
403 build_key(oblock, &key);
404 r = dm_bio_detain(cache->prison, &key, bio, cell_prealloc, cell_result);
405 if (r)
406 free_fn(free_context, cell_prealloc);
407
408 return r;
409}
410
411static int get_cell(struct cache *cache,
412 dm_oblock_t oblock,
413 struct prealloc *structs,
414 struct dm_bio_prison_cell **cell_result)
415{
416 int r;
417 struct dm_cell_key key;
418 struct dm_bio_prison_cell *cell_prealloc;
419
420 cell_prealloc = prealloc_get_cell(structs);
421
422 build_key(oblock, &key);
423 r = dm_get_cell(cache->prison, &key, cell_prealloc, cell_result);
424 if (r)
425 prealloc_put_cell(structs, cell_prealloc);
426
427 return r;
428}
429
aeed1420 430/*----------------------------------------------------------------*/
c6b4fcba
JT
431
432static bool is_dirty(struct cache *cache, dm_cblock_t b)
433{
434 return test_bit(from_cblock(b), cache->dirty_bitset);
435}
436
437static void set_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
438{
439 if (!test_and_set_bit(from_cblock(cblock), cache->dirty_bitset)) {
440 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) + 1);
441 policy_set_dirty(cache->policy, oblock);
442 }
443}
444
445static void clear_dirty(struct cache *cache, dm_oblock_t oblock, dm_cblock_t cblock)
446{
447 if (test_and_clear_bit(from_cblock(cblock), cache->dirty_bitset)) {
448 policy_clear_dirty(cache->policy, oblock);
449 cache->nr_dirty = to_cblock(from_cblock(cache->nr_dirty) - 1);
450 if (!from_cblock(cache->nr_dirty))
451 dm_table_event(cache->ti->table);
452 }
453}
454
455/*----------------------------------------------------------------*/
aeed1420 456
c6b4fcba
JT
457static bool block_size_is_power_of_two(struct cache *cache)
458{
459 return cache->sectors_per_block_shift >= 0;
460}
461
43aeaa29
MP
462/* gcc on ARM generates spurious references to __udivdi3 and __umoddi3 */
463#if defined(CONFIG_ARM) && __GNUC__ == 4 && __GNUC_MINOR__ <= 6
464__always_inline
465#endif
414dd67d
JT
466static dm_block_t block_div(dm_block_t b, uint32_t n)
467{
468 do_div(b, n);
469
470 return b;
471}
472
c6b4fcba
JT
473static dm_dblock_t oblock_to_dblock(struct cache *cache, dm_oblock_t oblock)
474{
414dd67d 475 uint32_t discard_blocks = cache->discard_block_size;
c6b4fcba
JT
476 dm_block_t b = from_oblock(oblock);
477
478 if (!block_size_is_power_of_two(cache))
414dd67d 479 discard_blocks = discard_blocks / cache->sectors_per_block;
c6b4fcba
JT
480 else
481 discard_blocks >>= cache->sectors_per_block_shift;
482
414dd67d 483 b = block_div(b, discard_blocks);
c6b4fcba
JT
484
485 return to_dblock(b);
486}
487
488static void set_discard(struct cache *cache, dm_dblock_t b)
489{
490 unsigned long flags;
491
492 atomic_inc(&cache->stats.discard_count);
493
494 spin_lock_irqsave(&cache->lock, flags);
495 set_bit(from_dblock(b), cache->discard_bitset);
496 spin_unlock_irqrestore(&cache->lock, flags);
497}
498
499static void clear_discard(struct cache *cache, dm_dblock_t b)
500{
501 unsigned long flags;
502
503 spin_lock_irqsave(&cache->lock, flags);
504 clear_bit(from_dblock(b), cache->discard_bitset);
505 spin_unlock_irqrestore(&cache->lock, flags);
506}
507
508static bool is_discarded(struct cache *cache, dm_dblock_t b)
509{
510 int r;
511 unsigned long flags;
512
513 spin_lock_irqsave(&cache->lock, flags);
514 r = test_bit(from_dblock(b), cache->discard_bitset);
515 spin_unlock_irqrestore(&cache->lock, flags);
516
517 return r;
518}
519
520static bool is_discarded_oblock(struct cache *cache, dm_oblock_t b)
521{
522 int r;
523 unsigned long flags;
524
525 spin_lock_irqsave(&cache->lock, flags);
526 r = test_bit(from_dblock(oblock_to_dblock(cache, b)),
527 cache->discard_bitset);
528 spin_unlock_irqrestore(&cache->lock, flags);
529
530 return r;
531}
532
533/*----------------------------------------------------------------*/
534
535static void load_stats(struct cache *cache)
536{
537 struct dm_cache_statistics stats;
538
539 dm_cache_metadata_get_stats(cache->cmd, &stats);
540 atomic_set(&cache->stats.read_hit, stats.read_hits);
541 atomic_set(&cache->stats.read_miss, stats.read_misses);
542 atomic_set(&cache->stats.write_hit, stats.write_hits);
543 atomic_set(&cache->stats.write_miss, stats.write_misses);
544}
545
546static void save_stats(struct cache *cache)
547{
548 struct dm_cache_statistics stats;
549
550 stats.read_hits = atomic_read(&cache->stats.read_hit);
551 stats.read_misses = atomic_read(&cache->stats.read_miss);
552 stats.write_hits = atomic_read(&cache->stats.write_hit);
553 stats.write_misses = atomic_read(&cache->stats.write_miss);
554
555 dm_cache_metadata_set_stats(cache->cmd, &stats);
556}
557
558/*----------------------------------------------------------------
559 * Per bio data
560 *--------------------------------------------------------------*/
19b0092e
MS
561
562/*
563 * If using writeback, leave out struct per_bio_data's writethrough fields.
564 */
565#define PB_DATA_SIZE_WB (offsetof(struct per_bio_data, cache))
566#define PB_DATA_SIZE_WT (sizeof(struct per_bio_data))
567
568static size_t get_per_bio_data_size(struct cache *cache)
569{
570 return cache->features.write_through ? PB_DATA_SIZE_WT : PB_DATA_SIZE_WB;
571}
572
573static struct per_bio_data *get_per_bio_data(struct bio *bio, size_t data_size)
c6b4fcba 574{
19b0092e 575 struct per_bio_data *pb = dm_per_bio_data(bio, data_size);
c6b4fcba
JT
576 BUG_ON(!pb);
577 return pb;
578}
579
19b0092e 580static struct per_bio_data *init_per_bio_data(struct bio *bio, size_t data_size)
c6b4fcba 581{
19b0092e 582 struct per_bio_data *pb = get_per_bio_data(bio, data_size);
c6b4fcba
JT
583
584 pb->tick = false;
585 pb->req_nr = dm_bio_get_target_bio_nr(bio);
586 pb->all_io_entry = NULL;
587
588 return pb;
589}
590
591/*----------------------------------------------------------------
592 * Remapping
593 *--------------------------------------------------------------*/
594static void remap_to_origin(struct cache *cache, struct bio *bio)
595{
596 bio->bi_bdev = cache->origin_dev->bdev;
597}
598
599static void remap_to_cache(struct cache *cache, struct bio *bio,
600 dm_cblock_t cblock)
601{
602 sector_t bi_sector = bio->bi_sector;
603
604 bio->bi_bdev = cache->cache_dev->bdev;
605 if (!block_size_is_power_of_two(cache))
606 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) +
607 sector_div(bi_sector, cache->sectors_per_block);
608 else
609 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) |
610 (bi_sector & (cache->sectors_per_block - 1));
611}
612
613static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
614{
615 unsigned long flags;
19b0092e
MS
616 size_t pb_data_size = get_per_bio_data_size(cache);
617 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
c6b4fcba
JT
618
619 spin_lock_irqsave(&cache->lock, flags);
620 if (cache->need_tick_bio &&
621 !(bio->bi_rw & (REQ_FUA | REQ_FLUSH | REQ_DISCARD))) {
622 pb->tick = true;
623 cache->need_tick_bio = false;
624 }
625 spin_unlock_irqrestore(&cache->lock, flags);
626}
627
628static void remap_to_origin_clear_discard(struct cache *cache, struct bio *bio,
629 dm_oblock_t oblock)
630{
631 check_if_tick_bio_needed(cache, bio);
632 remap_to_origin(cache, bio);
633 if (bio_data_dir(bio) == WRITE)
634 clear_discard(cache, oblock_to_dblock(cache, oblock));
635}
636
637static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
638 dm_oblock_t oblock, dm_cblock_t cblock)
639{
f8e5f01a 640 check_if_tick_bio_needed(cache, bio);
c6b4fcba
JT
641 remap_to_cache(cache, bio, cblock);
642 if (bio_data_dir(bio) == WRITE) {
643 set_dirty(cache, oblock, cblock);
644 clear_discard(cache, oblock_to_dblock(cache, oblock));
645 }
646}
647
648static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
649{
650 sector_t block_nr = bio->bi_sector;
651
652 if (!block_size_is_power_of_two(cache))
653 (void) sector_div(block_nr, cache->sectors_per_block);
654 else
655 block_nr >>= cache->sectors_per_block_shift;
656
657 return to_oblock(block_nr);
658}
659
660static int bio_triggers_commit(struct cache *cache, struct bio *bio)
661{
662 return bio->bi_rw & (REQ_FLUSH | REQ_FUA);
663}
664
665static void issue(struct cache *cache, struct bio *bio)
666{
667 unsigned long flags;
668
669 if (!bio_triggers_commit(cache, bio)) {
670 generic_make_request(bio);
671 return;
672 }
673
674 /*
675 * Batch together any bios that trigger commits and then issue a
676 * single commit for them in do_worker().
677 */
678 spin_lock_irqsave(&cache->lock, flags);
679 cache->commit_requested = true;
680 bio_list_add(&cache->deferred_flush_bios, bio);
681 spin_unlock_irqrestore(&cache->lock, flags);
682}
683
e2e74d61
JT
684static void defer_writethrough_bio(struct cache *cache, struct bio *bio)
685{
686 unsigned long flags;
687
688 spin_lock_irqsave(&cache->lock, flags);
689 bio_list_add(&cache->deferred_writethrough_bios, bio);
690 spin_unlock_irqrestore(&cache->lock, flags);
691
692 wake_worker(cache);
693}
694
695static void writethrough_endio(struct bio *bio, int err)
696{
19b0092e 697 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
c9d28d5d
JT
698
699 dm_unhook_bio(&pb->hook_info, bio);
e2e74d61
JT
700
701 if (err) {
702 bio_endio(bio, err);
703 return;
704 }
705
b844fe69 706 dm_bio_restore(&pb->bio_details, bio);
e2e74d61
JT
707 remap_to_cache(pb->cache, bio, pb->cblock);
708
709 /*
710 * We can't issue this bio directly, since we're in interrupt
aeed1420 711 * context. So it gets put on a bio list for processing by the
e2e74d61
JT
712 * worker thread.
713 */
714 defer_writethrough_bio(pb->cache, bio);
715}
716
717/*
718 * When running in writethrough mode we need to send writes to clean blocks
719 * to both the cache and origin devices. In future we'd like to clone the
720 * bio and send them in parallel, but for now we're doing them in
721 * series as this is easier.
722 */
723static void remap_to_origin_then_cache(struct cache *cache, struct bio *bio,
724 dm_oblock_t oblock, dm_cblock_t cblock)
725{
19b0092e 726 struct per_bio_data *pb = get_per_bio_data(bio, PB_DATA_SIZE_WT);
e2e74d61
JT
727
728 pb->cache = cache;
729 pb->cblock = cblock;
c9d28d5d 730 dm_hook_bio(&pb->hook_info, bio, writethrough_endio, NULL);
b844fe69 731 dm_bio_record(&pb->bio_details, bio);
e2e74d61
JT
732
733 remap_to_origin_clear_discard(pb->cache, bio, oblock);
734}
735
c6b4fcba
JT
736/*----------------------------------------------------------------
737 * Migration processing
738 *
739 * Migration covers moving data from the origin device to the cache, or
740 * vice versa.
741 *--------------------------------------------------------------*/
742static void free_migration(struct dm_cache_migration *mg)
743{
744 mempool_free(mg, mg->cache->migration_pool);
745}
746
747static void inc_nr_migrations(struct cache *cache)
748{
749 atomic_inc(&cache->nr_migrations);
750}
751
752static void dec_nr_migrations(struct cache *cache)
753{
754 atomic_dec(&cache->nr_migrations);
755
756 /*
757 * Wake the worker in case we're suspending the target.
758 */
759 wake_up(&cache->migration_wait);
760}
761
762static void __cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
763 bool holder)
764{
765 (holder ? dm_cell_release : dm_cell_release_no_holder)
766 (cache->prison, cell, &cache->deferred_bios);
767 free_prison_cell(cache, cell);
768}
769
770static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
771 bool holder)
772{
773 unsigned long flags;
774
775 spin_lock_irqsave(&cache->lock, flags);
776 __cell_defer(cache, cell, holder);
777 spin_unlock_irqrestore(&cache->lock, flags);
778
779 wake_worker(cache);
780}
781
782static void cleanup_migration(struct dm_cache_migration *mg)
783{
66cb1910 784 struct cache *cache = mg->cache;
c6b4fcba 785 free_migration(mg);
66cb1910 786 dec_nr_migrations(cache);
c6b4fcba
JT
787}
788
789static void migration_failure(struct dm_cache_migration *mg)
790{
791 struct cache *cache = mg->cache;
792
793 if (mg->writeback) {
794 DMWARN_LIMIT("writeback failed; couldn't copy block");
795 set_dirty(cache, mg->old_oblock, mg->cblock);
796 cell_defer(cache, mg->old_ocell, false);
797
798 } else if (mg->demote) {
799 DMWARN_LIMIT("demotion failed; couldn't copy block");
800 policy_force_mapping(cache->policy, mg->new_oblock, mg->old_oblock);
801
80f659f3 802 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
c6b4fcba 803 if (mg->promote)
80f659f3 804 cell_defer(cache, mg->new_ocell, true);
c6b4fcba
JT
805 } else {
806 DMWARN_LIMIT("promotion failed; couldn't copy block");
807 policy_remove_mapping(cache->policy, mg->new_oblock);
80f659f3 808 cell_defer(cache, mg->new_ocell, true);
c6b4fcba
JT
809 }
810
811 cleanup_migration(mg);
812}
813
814static void migration_success_pre_commit(struct dm_cache_migration *mg)
815{
816 unsigned long flags;
817 struct cache *cache = mg->cache;
818
819 if (mg->writeback) {
820 cell_defer(cache, mg->old_ocell, false);
821 clear_dirty(cache, mg->old_oblock, mg->cblock);
822 cleanup_migration(mg);
823 return;
824
825 } else if (mg->demote) {
826 if (dm_cache_remove_mapping(cache->cmd, mg->cblock)) {
827 DMWARN_LIMIT("demotion failed; couldn't update on disk metadata");
828 policy_force_mapping(cache->policy, mg->new_oblock,
829 mg->old_oblock);
830 if (mg->promote)
831 cell_defer(cache, mg->new_ocell, true);
832 cleanup_migration(mg);
833 return;
834 }
835 } else {
836 if (dm_cache_insert_mapping(cache->cmd, mg->cblock, mg->new_oblock)) {
837 DMWARN_LIMIT("promotion failed; couldn't update on disk metadata");
838 policy_remove_mapping(cache->policy, mg->new_oblock);
839 cleanup_migration(mg);
840 return;
841 }
842 }
843
844 spin_lock_irqsave(&cache->lock, flags);
845 list_add_tail(&mg->list, &cache->need_commit_migrations);
846 cache->commit_requested = true;
847 spin_unlock_irqrestore(&cache->lock, flags);
848}
849
850static void migration_success_post_commit(struct dm_cache_migration *mg)
851{
852 unsigned long flags;
853 struct cache *cache = mg->cache;
854
855 if (mg->writeback) {
856 DMWARN("writeback unexpectedly triggered commit");
857 return;
858
859 } else if (mg->demote) {
80f659f3 860 cell_defer(cache, mg->old_ocell, mg->promote ? false : true);
c6b4fcba
JT
861
862 if (mg->promote) {
863 mg->demote = false;
864
865 spin_lock_irqsave(&cache->lock, flags);
866 list_add_tail(&mg->list, &cache->quiesced_migrations);
867 spin_unlock_irqrestore(&cache->lock, flags);
868
869 } else
870 cleanup_migration(mg);
871
872 } else {
c9d28d5d
JT
873 if (mg->requeue_holder)
874 cell_defer(cache, mg->new_ocell, true);
875 else {
876 bio_endio(mg->new_ocell->holder, 0);
877 cell_defer(cache, mg->new_ocell, false);
878 }
c6b4fcba
JT
879 clear_dirty(cache, mg->new_oblock, mg->cblock);
880 cleanup_migration(mg);
881 }
882}
883
884static void copy_complete(int read_err, unsigned long write_err, void *context)
885{
886 unsigned long flags;
887 struct dm_cache_migration *mg = (struct dm_cache_migration *) context;
888 struct cache *cache = mg->cache;
889
890 if (read_err || write_err)
891 mg->err = true;
892
893 spin_lock_irqsave(&cache->lock, flags);
894 list_add_tail(&mg->list, &cache->completed_migrations);
895 spin_unlock_irqrestore(&cache->lock, flags);
896
897 wake_worker(cache);
898}
899
900static void issue_copy_real(struct dm_cache_migration *mg)
901{
902 int r;
903 struct dm_io_region o_region, c_region;
904 struct cache *cache = mg->cache;
905
906 o_region.bdev = cache->origin_dev->bdev;
907 o_region.count = cache->sectors_per_block;
908
909 c_region.bdev = cache->cache_dev->bdev;
910 c_region.sector = from_cblock(mg->cblock) * cache->sectors_per_block;
911 c_region.count = cache->sectors_per_block;
912
913 if (mg->writeback || mg->demote) {
914 /* demote */
915 o_region.sector = from_oblock(mg->old_oblock) * cache->sectors_per_block;
916 r = dm_kcopyd_copy(cache->copier, &c_region, 1, &o_region, 0, copy_complete, mg);
917 } else {
918 /* promote */
919 o_region.sector = from_oblock(mg->new_oblock) * cache->sectors_per_block;
920 r = dm_kcopyd_copy(cache->copier, &o_region, 1, &c_region, 0, copy_complete, mg);
921 }
922
2c2263c9
HM
923 if (r < 0) {
924 DMERR_LIMIT("issuing migration failed");
c6b4fcba 925 migration_failure(mg);
2c2263c9 926 }
c6b4fcba
JT
927}
928
c9d28d5d
JT
929static void overwrite_endio(struct bio *bio, int err)
930{
931 struct dm_cache_migration *mg = bio->bi_private;
932 struct cache *cache = mg->cache;
933 size_t pb_data_size = get_per_bio_data_size(cache);
934 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
935 unsigned long flags;
936
937 if (err)
938 mg->err = true;
939
940 spin_lock_irqsave(&cache->lock, flags);
941 list_add_tail(&mg->list, &cache->completed_migrations);
942 dm_unhook_bio(&pb->hook_info, bio);
943 mg->requeue_holder = false;
944 spin_unlock_irqrestore(&cache->lock, flags);
945
946 wake_worker(cache);
947}
948
949static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
950{
951 size_t pb_data_size = get_per_bio_data_size(mg->cache);
952 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
953
954 dm_hook_bio(&pb->hook_info, bio, overwrite_endio, mg);
955 remap_to_cache_dirty(mg->cache, bio, mg->new_oblock, mg->cblock);
956 generic_make_request(bio);
957}
958
959static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
960{
961 return (bio_data_dir(bio) == WRITE) &&
962 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
963}
964
c6b4fcba
JT
965static void avoid_copy(struct dm_cache_migration *mg)
966{
967 atomic_inc(&mg->cache->stats.copies_avoided);
968 migration_success_pre_commit(mg);
969}
970
971static void issue_copy(struct dm_cache_migration *mg)
972{
973 bool avoid;
974 struct cache *cache = mg->cache;
975
976 if (mg->writeback || mg->demote)
977 avoid = !is_dirty(cache, mg->cblock) ||
978 is_discarded_oblock(cache, mg->old_oblock);
c9d28d5d
JT
979 else {
980 struct bio *bio = mg->new_ocell->holder;
981
c6b4fcba
JT
982 avoid = is_discarded_oblock(cache, mg->new_oblock);
983
c9d28d5d
JT
984 if (!avoid && bio_writes_complete_block(cache, bio)) {
985 issue_overwrite(mg, bio);
986 return;
987 }
988 }
989
c6b4fcba
JT
990 avoid ? avoid_copy(mg) : issue_copy_real(mg);
991}
992
993static void complete_migration(struct dm_cache_migration *mg)
994{
995 if (mg->err)
996 migration_failure(mg);
997 else
998 migration_success_pre_commit(mg);
999}
1000
1001static void process_migrations(struct cache *cache, struct list_head *head,
1002 void (*fn)(struct dm_cache_migration *))
1003{
1004 unsigned long flags;
1005 struct list_head list;
1006 struct dm_cache_migration *mg, *tmp;
1007
1008 INIT_LIST_HEAD(&list);
1009 spin_lock_irqsave(&cache->lock, flags);
1010 list_splice_init(head, &list);
1011 spin_unlock_irqrestore(&cache->lock, flags);
1012
1013 list_for_each_entry_safe(mg, tmp, &list, list)
1014 fn(mg);
1015}
1016
1017static void __queue_quiesced_migration(struct dm_cache_migration *mg)
1018{
1019 list_add_tail(&mg->list, &mg->cache->quiesced_migrations);
1020}
1021
1022static void queue_quiesced_migration(struct dm_cache_migration *mg)
1023{
1024 unsigned long flags;
1025 struct cache *cache = mg->cache;
1026
1027 spin_lock_irqsave(&cache->lock, flags);
1028 __queue_quiesced_migration(mg);
1029 spin_unlock_irqrestore(&cache->lock, flags);
1030
1031 wake_worker(cache);
1032}
1033
1034static void queue_quiesced_migrations(struct cache *cache, struct list_head *work)
1035{
1036 unsigned long flags;
1037 struct dm_cache_migration *mg, *tmp;
1038
1039 spin_lock_irqsave(&cache->lock, flags);
1040 list_for_each_entry_safe(mg, tmp, work, list)
1041 __queue_quiesced_migration(mg);
1042 spin_unlock_irqrestore(&cache->lock, flags);
1043
1044 wake_worker(cache);
1045}
1046
1047static void check_for_quiesced_migrations(struct cache *cache,
1048 struct per_bio_data *pb)
1049{
1050 struct list_head work;
1051
1052 if (!pb->all_io_entry)
1053 return;
1054
1055 INIT_LIST_HEAD(&work);
1056 if (pb->all_io_entry)
1057 dm_deferred_entry_dec(pb->all_io_entry, &work);
1058
1059 if (!list_empty(&work))
1060 queue_quiesced_migrations(cache, &work);
1061}
1062
1063static void quiesce_migration(struct dm_cache_migration *mg)
1064{
1065 if (!dm_deferred_set_add_work(mg->cache->all_io_ds, &mg->list))
1066 queue_quiesced_migration(mg);
1067}
1068
1069static void promote(struct cache *cache, struct prealloc *structs,
1070 dm_oblock_t oblock, dm_cblock_t cblock,
1071 struct dm_bio_prison_cell *cell)
1072{
1073 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1074
1075 mg->err = false;
1076 mg->writeback = false;
1077 mg->demote = false;
1078 mg->promote = true;
c9d28d5d 1079 mg->requeue_holder = true;
c6b4fcba
JT
1080 mg->cache = cache;
1081 mg->new_oblock = oblock;
1082 mg->cblock = cblock;
1083 mg->old_ocell = NULL;
1084 mg->new_ocell = cell;
1085 mg->start_jiffies = jiffies;
1086
1087 inc_nr_migrations(cache);
1088 quiesce_migration(mg);
1089}
1090
1091static void writeback(struct cache *cache, struct prealloc *structs,
1092 dm_oblock_t oblock, dm_cblock_t cblock,
1093 struct dm_bio_prison_cell *cell)
1094{
1095 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1096
1097 mg->err = false;
1098 mg->writeback = true;
1099 mg->demote = false;
1100 mg->promote = false;
c9d28d5d 1101 mg->requeue_holder = true;
c6b4fcba
JT
1102 mg->cache = cache;
1103 mg->old_oblock = oblock;
1104 mg->cblock = cblock;
1105 mg->old_ocell = cell;
1106 mg->new_ocell = NULL;
1107 mg->start_jiffies = jiffies;
1108
1109 inc_nr_migrations(cache);
1110 quiesce_migration(mg);
1111}
1112
1113static void demote_then_promote(struct cache *cache, struct prealloc *structs,
1114 dm_oblock_t old_oblock, dm_oblock_t new_oblock,
1115 dm_cblock_t cblock,
1116 struct dm_bio_prison_cell *old_ocell,
1117 struct dm_bio_prison_cell *new_ocell)
1118{
1119 struct dm_cache_migration *mg = prealloc_get_migration(structs);
1120
1121 mg->err = false;
1122 mg->writeback = false;
1123 mg->demote = true;
1124 mg->promote = true;
c9d28d5d 1125 mg->requeue_holder = true;
c6b4fcba
JT
1126 mg->cache = cache;
1127 mg->old_oblock = old_oblock;
1128 mg->new_oblock = new_oblock;
1129 mg->cblock = cblock;
1130 mg->old_ocell = old_ocell;
1131 mg->new_ocell = new_ocell;
1132 mg->start_jiffies = jiffies;
1133
1134 inc_nr_migrations(cache);
1135 quiesce_migration(mg);
1136}
1137
1138/*----------------------------------------------------------------
1139 * bio processing
1140 *--------------------------------------------------------------*/
1141static void defer_bio(struct cache *cache, struct bio *bio)
1142{
1143 unsigned long flags;
1144
1145 spin_lock_irqsave(&cache->lock, flags);
1146 bio_list_add(&cache->deferred_bios, bio);
1147 spin_unlock_irqrestore(&cache->lock, flags);
1148
1149 wake_worker(cache);
1150}
1151
1152static void process_flush_bio(struct cache *cache, struct bio *bio)
1153{
19b0092e
MS
1154 size_t pb_data_size = get_per_bio_data_size(cache);
1155 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
c6b4fcba
JT
1156
1157 BUG_ON(bio->bi_size);
1158 if (!pb->req_nr)
1159 remap_to_origin(cache, bio);
1160 else
1161 remap_to_cache(cache, bio, 0);
1162
1163 issue(cache, bio);
1164}
1165
1166/*
1167 * People generally discard large parts of a device, eg, the whole device
1168 * when formatting. Splitting these large discards up into cache block
1169 * sized ios and then quiescing (always neccessary for discard) takes too
1170 * long.
1171 *
1172 * We keep it simple, and allow any size of discard to come in, and just
1173 * mark off blocks on the discard bitset. No passdown occurs!
1174 *
1175 * To implement passdown we need to change the bio_prison such that a cell
1176 * can have a key that spans many blocks.
1177 */
1178static void process_discard_bio(struct cache *cache, struct bio *bio)
1179{
1180 dm_block_t start_block = dm_sector_div_up(bio->bi_sector,
1181 cache->discard_block_size);
1182 dm_block_t end_block = bio->bi_sector + bio_sectors(bio);
1183 dm_block_t b;
1184
414dd67d 1185 end_block = block_div(end_block, cache->discard_block_size);
c6b4fcba
JT
1186
1187 for (b = start_block; b < end_block; b++)
1188 set_discard(cache, to_dblock(b));
1189
1190 bio_endio(bio, 0);
1191}
1192
1193static bool spare_migration_bandwidth(struct cache *cache)
1194{
1195 sector_t current_volume = (atomic_read(&cache->nr_migrations) + 1) *
1196 cache->sectors_per_block;
1197 return current_volume < cache->migration_threshold;
1198}
1199
1200static bool is_writethrough_io(struct cache *cache, struct bio *bio,
1201 dm_cblock_t cblock)
1202{
1203 return bio_data_dir(bio) == WRITE &&
1204 cache->features.write_through && !is_dirty(cache, cblock);
1205}
1206
1207static void inc_hit_counter(struct cache *cache, struct bio *bio)
1208{
1209 atomic_inc(bio_data_dir(bio) == READ ?
1210 &cache->stats.read_hit : &cache->stats.write_hit);
1211}
1212
1213static void inc_miss_counter(struct cache *cache, struct bio *bio)
1214{
1215 atomic_inc(bio_data_dir(bio) == READ ?
1216 &cache->stats.read_miss : &cache->stats.write_miss);
1217}
1218
1219static void process_bio(struct cache *cache, struct prealloc *structs,
1220 struct bio *bio)
1221{
1222 int r;
1223 bool release_cell = true;
1224 dm_oblock_t block = get_bio_block(cache, bio);
1225 struct dm_bio_prison_cell *cell_prealloc, *old_ocell, *new_ocell;
1226 struct policy_result lookup_result;
19b0092e
MS
1227 size_t pb_data_size = get_per_bio_data_size(cache);
1228 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
c6b4fcba
JT
1229 bool discarded_block = is_discarded_oblock(cache, block);
1230 bool can_migrate = discarded_block || spare_migration_bandwidth(cache);
1231
1232 /*
1233 * Check to see if that block is currently migrating.
1234 */
1235 cell_prealloc = prealloc_get_cell(structs);
1236 r = bio_detain(cache, block, bio, cell_prealloc,
1237 (cell_free_fn) prealloc_put_cell,
1238 structs, &new_ocell);
1239 if (r > 0)
1240 return;
1241
1242 r = policy_map(cache->policy, block, true, can_migrate, discarded_block,
1243 bio, &lookup_result);
1244
1245 if (r == -EWOULDBLOCK)
1246 /* migration has been denied */
1247 lookup_result.op = POLICY_MISS;
1248
1249 switch (lookup_result.op) {
1250 case POLICY_HIT:
1251 inc_hit_counter(cache, bio);
1252 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
1253
e2e74d61
JT
1254 if (is_writethrough_io(cache, bio, lookup_result.cblock))
1255 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
1256 else
c6b4fcba
JT
1257 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
1258
1259 issue(cache, bio);
1260 break;
1261
1262 case POLICY_MISS:
1263 inc_miss_counter(cache, bio);
1264 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
e2e74d61
JT
1265 remap_to_origin_clear_discard(cache, bio, block);
1266 issue(cache, bio);
c6b4fcba
JT
1267 break;
1268
1269 case POLICY_NEW:
1270 atomic_inc(&cache->stats.promotion);
1271 promote(cache, structs, block, lookup_result.cblock, new_ocell);
1272 release_cell = false;
1273 break;
1274
1275 case POLICY_REPLACE:
1276 cell_prealloc = prealloc_get_cell(structs);
1277 r = bio_detain(cache, lookup_result.old_oblock, bio, cell_prealloc,
1278 (cell_free_fn) prealloc_put_cell,
1279 structs, &old_ocell);
1280 if (r > 0) {
1281 /*
1282 * We have to be careful to avoid lock inversion of
1283 * the cells. So we back off, and wait for the
1284 * old_ocell to become free.
1285 */
1286 policy_force_mapping(cache->policy, block,
1287 lookup_result.old_oblock);
1288 atomic_inc(&cache->stats.cache_cell_clash);
1289 break;
1290 }
1291 atomic_inc(&cache->stats.demotion);
1292 atomic_inc(&cache->stats.promotion);
1293
1294 demote_then_promote(cache, structs, lookup_result.old_oblock,
1295 block, lookup_result.cblock,
1296 old_ocell, new_ocell);
1297 release_cell = false;
1298 break;
1299
1300 default:
1301 DMERR_LIMIT("%s: erroring bio, unknown policy op: %u", __func__,
1302 (unsigned) lookup_result.op);
1303 bio_io_error(bio);
1304 }
1305
1306 if (release_cell)
1307 cell_defer(cache, new_ocell, false);
1308}
1309
1310static int need_commit_due_to_time(struct cache *cache)
1311{
1312 return jiffies < cache->last_commit_jiffies ||
1313 jiffies > cache->last_commit_jiffies + COMMIT_PERIOD;
1314}
1315
1316static int commit_if_needed(struct cache *cache)
1317{
ffcbcb67
HM
1318 int r = 0;
1319
1320 if ((cache->commit_requested || need_commit_due_to_time(cache)) &&
1321 dm_cache_changed_this_transaction(cache->cmd)) {
c6b4fcba 1322 atomic_inc(&cache->stats.commit_count);
c6b4fcba 1323 cache->commit_requested = false;
ffcbcb67
HM
1324 r = dm_cache_commit(cache->cmd, false);
1325 cache->last_commit_jiffies = jiffies;
c6b4fcba
JT
1326 }
1327
ffcbcb67 1328 return r;
c6b4fcba
JT
1329}
1330
1331static void process_deferred_bios(struct cache *cache)
1332{
1333 unsigned long flags;
1334 struct bio_list bios;
1335 struct bio *bio;
1336 struct prealloc structs;
1337
1338 memset(&structs, 0, sizeof(structs));
1339 bio_list_init(&bios);
1340
1341 spin_lock_irqsave(&cache->lock, flags);
1342 bio_list_merge(&bios, &cache->deferred_bios);
1343 bio_list_init(&cache->deferred_bios);
1344 spin_unlock_irqrestore(&cache->lock, flags);
1345
1346 while (!bio_list_empty(&bios)) {
1347 /*
1348 * If we've got no free migration structs, and processing
1349 * this bio might require one, we pause until there are some
1350 * prepared mappings to process.
1351 */
1352 if (prealloc_data_structs(cache, &structs)) {
1353 spin_lock_irqsave(&cache->lock, flags);
1354 bio_list_merge(&cache->deferred_bios, &bios);
1355 spin_unlock_irqrestore(&cache->lock, flags);
1356 break;
1357 }
1358
1359 bio = bio_list_pop(&bios);
1360
1361 if (bio->bi_rw & REQ_FLUSH)
1362 process_flush_bio(cache, bio);
1363 else if (bio->bi_rw & REQ_DISCARD)
1364 process_discard_bio(cache, bio);
1365 else
1366 process_bio(cache, &structs, bio);
1367 }
1368
1369 prealloc_free_structs(cache, &structs);
1370}
1371
1372static void process_deferred_flush_bios(struct cache *cache, bool submit_bios)
1373{
1374 unsigned long flags;
1375 struct bio_list bios;
1376 struct bio *bio;
1377
1378 bio_list_init(&bios);
1379
1380 spin_lock_irqsave(&cache->lock, flags);
1381 bio_list_merge(&bios, &cache->deferred_flush_bios);
1382 bio_list_init(&cache->deferred_flush_bios);
1383 spin_unlock_irqrestore(&cache->lock, flags);
1384
1385 while ((bio = bio_list_pop(&bios)))
1386 submit_bios ? generic_make_request(bio) : bio_io_error(bio);
1387}
1388
e2e74d61
JT
1389static void process_deferred_writethrough_bios(struct cache *cache)
1390{
1391 unsigned long flags;
1392 struct bio_list bios;
1393 struct bio *bio;
1394
1395 bio_list_init(&bios);
1396
1397 spin_lock_irqsave(&cache->lock, flags);
1398 bio_list_merge(&bios, &cache->deferred_writethrough_bios);
1399 bio_list_init(&cache->deferred_writethrough_bios);
1400 spin_unlock_irqrestore(&cache->lock, flags);
1401
1402 while ((bio = bio_list_pop(&bios)))
1403 generic_make_request(bio);
1404}
1405
c6b4fcba
JT
1406static void writeback_some_dirty_blocks(struct cache *cache)
1407{
1408 int r = 0;
1409 dm_oblock_t oblock;
1410 dm_cblock_t cblock;
1411 struct prealloc structs;
1412 struct dm_bio_prison_cell *old_ocell;
1413
1414 memset(&structs, 0, sizeof(structs));
1415
1416 while (spare_migration_bandwidth(cache)) {
1417 if (prealloc_data_structs(cache, &structs))
1418 break;
1419
1420 r = policy_writeback_work(cache->policy, &oblock, &cblock);
1421 if (r)
1422 break;
1423
1424 r = get_cell(cache, oblock, &structs, &old_ocell);
1425 if (r) {
1426 policy_set_dirty(cache->policy, oblock);
1427 break;
1428 }
1429
1430 writeback(cache, &structs, oblock, cblock, old_ocell);
1431 }
1432
1433 prealloc_free_structs(cache, &structs);
1434}
1435
1436/*----------------------------------------------------------------
1437 * Main worker loop
1438 *--------------------------------------------------------------*/
66cb1910 1439static bool is_quiescing(struct cache *cache)
c6b4fcba 1440{
238f8363 1441 return atomic_read(&cache->quiescing);
c6b4fcba
JT
1442}
1443
66cb1910
JT
1444static void ack_quiescing(struct cache *cache)
1445{
1446 if (is_quiescing(cache)) {
1447 atomic_inc(&cache->quiescing_ack);
1448 wake_up(&cache->quiescing_wait);
1449 }
1450}
1451
1452static void wait_for_quiescing_ack(struct cache *cache)
1453{
1454 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1455}
1456
1457static void start_quiescing(struct cache *cache)
c6b4fcba 1458{
238f8363 1459 atomic_inc(&cache->quiescing);
66cb1910 1460 wait_for_quiescing_ack(cache);
c6b4fcba
JT
1461}
1462
66cb1910 1463static void stop_quiescing(struct cache *cache)
c6b4fcba 1464{
238f8363 1465 atomic_set(&cache->quiescing, 0);
66cb1910 1466 atomic_set(&cache->quiescing_ack, 0);
c6b4fcba
JT
1467}
1468
1469static void wait_for_migrations(struct cache *cache)
1470{
1471 wait_event(cache->migration_wait, !atomic_read(&cache->nr_migrations));
1472}
1473
1474static void stop_worker(struct cache *cache)
1475{
1476 cancel_delayed_work(&cache->waker);
1477 flush_workqueue(cache->wq);
1478}
1479
1480static void requeue_deferred_io(struct cache *cache)
1481{
1482 struct bio *bio;
1483 struct bio_list bios;
1484
1485 bio_list_init(&bios);
1486 bio_list_merge(&bios, &cache->deferred_bios);
1487 bio_list_init(&cache->deferred_bios);
1488
1489 while ((bio = bio_list_pop(&bios)))
1490 bio_endio(bio, DM_ENDIO_REQUEUE);
1491}
1492
1493static int more_work(struct cache *cache)
1494{
1495 if (is_quiescing(cache))
1496 return !list_empty(&cache->quiesced_migrations) ||
1497 !list_empty(&cache->completed_migrations) ||
1498 !list_empty(&cache->need_commit_migrations);
1499 else
1500 return !bio_list_empty(&cache->deferred_bios) ||
1501 !bio_list_empty(&cache->deferred_flush_bios) ||
e2e74d61 1502 !bio_list_empty(&cache->deferred_writethrough_bios) ||
c6b4fcba
JT
1503 !list_empty(&cache->quiesced_migrations) ||
1504 !list_empty(&cache->completed_migrations) ||
1505 !list_empty(&cache->need_commit_migrations);
1506}
1507
1508static void do_worker(struct work_struct *ws)
1509{
1510 struct cache *cache = container_of(ws, struct cache, worker);
1511
1512 do {
66cb1910
JT
1513 if (!is_quiescing(cache)) {
1514 writeback_some_dirty_blocks(cache);
1515 process_deferred_writethrough_bios(cache);
c6b4fcba 1516 process_deferred_bios(cache);
66cb1910 1517 }
c6b4fcba
JT
1518
1519 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1520 process_migrations(cache, &cache->completed_migrations, complete_migration);
1521
c6b4fcba
JT
1522 if (commit_if_needed(cache)) {
1523 process_deferred_flush_bios(cache, false);
1524
1525 /*
1526 * FIXME: rollback metadata or just go into a
1527 * failure mode and error everything
1528 */
1529 } else {
1530 process_deferred_flush_bios(cache, true);
1531 process_migrations(cache, &cache->need_commit_migrations,
1532 migration_success_post_commit);
1533 }
66cb1910
JT
1534
1535 ack_quiescing(cache);
1536
c6b4fcba
JT
1537 } while (more_work(cache));
1538}
1539
1540/*
1541 * We want to commit periodically so that not too much
1542 * unwritten metadata builds up.
1543 */
1544static void do_waker(struct work_struct *ws)
1545{
1546 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
f8350daf 1547 policy_tick(cache->policy);
c6b4fcba
JT
1548 wake_worker(cache);
1549 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
1550}
1551
1552/*----------------------------------------------------------------*/
1553
1554static int is_congested(struct dm_dev *dev, int bdi_bits)
1555{
1556 struct request_queue *q = bdev_get_queue(dev->bdev);
1557 return bdi_congested(&q->backing_dev_info, bdi_bits);
1558}
1559
1560static int cache_is_congested(struct dm_target_callbacks *cb, int bdi_bits)
1561{
1562 struct cache *cache = container_of(cb, struct cache, callbacks);
1563
1564 return is_congested(cache->origin_dev, bdi_bits) ||
1565 is_congested(cache->cache_dev, bdi_bits);
1566}
1567
1568/*----------------------------------------------------------------
1569 * Target methods
1570 *--------------------------------------------------------------*/
1571
1572/*
1573 * This function gets called on the error paths of the constructor, so we
1574 * have to cope with a partially initialised struct.
1575 */
1576static void destroy(struct cache *cache)
1577{
1578 unsigned i;
1579
1580 if (cache->next_migration)
1581 mempool_free(cache->next_migration, cache->migration_pool);
1582
1583 if (cache->migration_pool)
1584 mempool_destroy(cache->migration_pool);
1585
1586 if (cache->all_io_ds)
1587 dm_deferred_set_destroy(cache->all_io_ds);
1588
1589 if (cache->prison)
1590 dm_bio_prison_destroy(cache->prison);
1591
1592 if (cache->wq)
1593 destroy_workqueue(cache->wq);
1594
1595 if (cache->dirty_bitset)
1596 free_bitset(cache->dirty_bitset);
1597
1598 if (cache->discard_bitset)
1599 free_bitset(cache->discard_bitset);
1600
1601 if (cache->copier)
1602 dm_kcopyd_client_destroy(cache->copier);
1603
1604 if (cache->cmd)
1605 dm_cache_metadata_close(cache->cmd);
1606
1607 if (cache->metadata_dev)
1608 dm_put_device(cache->ti, cache->metadata_dev);
1609
1610 if (cache->origin_dev)
1611 dm_put_device(cache->ti, cache->origin_dev);
1612
1613 if (cache->cache_dev)
1614 dm_put_device(cache->ti, cache->cache_dev);
1615
1616 if (cache->policy)
1617 dm_cache_policy_destroy(cache->policy);
1618
1619 for (i = 0; i < cache->nr_ctr_args ; i++)
1620 kfree(cache->ctr_args[i]);
1621 kfree(cache->ctr_args);
1622
1623 kfree(cache);
1624}
1625
1626static void cache_dtr(struct dm_target *ti)
1627{
1628 struct cache *cache = ti->private;
1629
1630 destroy(cache);
1631}
1632
1633static sector_t get_dev_size(struct dm_dev *dev)
1634{
1635 return i_size_read(dev->bdev->bd_inode) >> SECTOR_SHIFT;
1636}
1637
1638/*----------------------------------------------------------------*/
1639
1640/*
1641 * Construct a cache device mapping.
1642 *
1643 * cache <metadata dev> <cache dev> <origin dev> <block size>
1644 * <#feature args> [<feature arg>]*
1645 * <policy> <#policy args> [<policy arg>]*
1646 *
1647 * metadata dev : fast device holding the persistent metadata
1648 * cache dev : fast device holding cached data blocks
1649 * origin dev : slow device holding original data blocks
1650 * block size : cache unit size in sectors
1651 *
1652 * #feature args : number of feature arguments passed
1653 * feature args : writethrough. (The default is writeback.)
1654 *
1655 * policy : the replacement policy to use
1656 * #policy args : an even number of policy arguments corresponding
1657 * to key/value pairs passed to the policy
1658 * policy args : key/value pairs passed to the policy
1659 * E.g. 'sequential_threshold 1024'
1660 * See cache-policies.txt for details.
1661 *
1662 * Optional feature arguments are:
1663 * writethrough : write through caching that prohibits cache block
1664 * content from being different from origin block content.
1665 * Without this argument, the default behaviour is to write
1666 * back cache block contents later for performance reasons,
1667 * so they may differ from the corresponding origin blocks.
1668 */
1669struct cache_args {
1670 struct dm_target *ti;
1671
1672 struct dm_dev *metadata_dev;
1673
1674 struct dm_dev *cache_dev;
1675 sector_t cache_sectors;
1676
1677 struct dm_dev *origin_dev;
1678 sector_t origin_sectors;
1679
1680 uint32_t block_size;
1681
1682 const char *policy_name;
1683 int policy_argc;
1684 const char **policy_argv;
1685
1686 struct cache_features features;
1687};
1688
1689static void destroy_cache_args(struct cache_args *ca)
1690{
1691 if (ca->metadata_dev)
1692 dm_put_device(ca->ti, ca->metadata_dev);
1693
1694 if (ca->cache_dev)
1695 dm_put_device(ca->ti, ca->cache_dev);
1696
1697 if (ca->origin_dev)
1698 dm_put_device(ca->ti, ca->origin_dev);
1699
1700 kfree(ca);
1701}
1702
1703static bool at_least_one_arg(struct dm_arg_set *as, char **error)
1704{
1705 if (!as->argc) {
1706 *error = "Insufficient args";
1707 return false;
1708 }
1709
1710 return true;
1711}
1712
1713static int parse_metadata_dev(struct cache_args *ca, struct dm_arg_set *as,
1714 char **error)
1715{
1716 int r;
1717 sector_t metadata_dev_size;
1718 char b[BDEVNAME_SIZE];
1719
1720 if (!at_least_one_arg(as, error))
1721 return -EINVAL;
1722
1723 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1724 &ca->metadata_dev);
1725 if (r) {
1726 *error = "Error opening metadata device";
1727 return r;
1728 }
1729
1730 metadata_dev_size = get_dev_size(ca->metadata_dev);
1731 if (metadata_dev_size > DM_CACHE_METADATA_MAX_SECTORS_WARNING)
1732 DMWARN("Metadata device %s is larger than %u sectors: excess space will not be used.",
1733 bdevname(ca->metadata_dev->bdev, b), THIN_METADATA_MAX_SECTORS);
1734
1735 return 0;
1736}
1737
1738static int parse_cache_dev(struct cache_args *ca, struct dm_arg_set *as,
1739 char **error)
1740{
1741 int r;
1742
1743 if (!at_least_one_arg(as, error))
1744 return -EINVAL;
1745
1746 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1747 &ca->cache_dev);
1748 if (r) {
1749 *error = "Error opening cache device";
1750 return r;
1751 }
1752 ca->cache_sectors = get_dev_size(ca->cache_dev);
1753
1754 return 0;
1755}
1756
1757static int parse_origin_dev(struct cache_args *ca, struct dm_arg_set *as,
1758 char **error)
1759{
1760 int r;
1761
1762 if (!at_least_one_arg(as, error))
1763 return -EINVAL;
1764
1765 r = dm_get_device(ca->ti, dm_shift_arg(as), FMODE_READ | FMODE_WRITE,
1766 &ca->origin_dev);
1767 if (r) {
1768 *error = "Error opening origin device";
1769 return r;
1770 }
1771
1772 ca->origin_sectors = get_dev_size(ca->origin_dev);
1773 if (ca->ti->len > ca->origin_sectors) {
1774 *error = "Device size larger than cached device";
1775 return -EINVAL;
1776 }
1777
1778 return 0;
1779}
1780
1781static int parse_block_size(struct cache_args *ca, struct dm_arg_set *as,
1782 char **error)
1783{
05473044 1784 unsigned long block_size;
c6b4fcba
JT
1785
1786 if (!at_least_one_arg(as, error))
1787 return -EINVAL;
1788
05473044
MS
1789 if (kstrtoul(dm_shift_arg(as), 10, &block_size) || !block_size ||
1790 block_size < DATA_DEV_BLOCK_SIZE_MIN_SECTORS ||
1791 block_size > DATA_DEV_BLOCK_SIZE_MAX_SECTORS ||
1792 block_size & (DATA_DEV_BLOCK_SIZE_MIN_SECTORS - 1)) {
c6b4fcba
JT
1793 *error = "Invalid data block size";
1794 return -EINVAL;
1795 }
1796
05473044 1797 if (block_size > ca->cache_sectors) {
c6b4fcba
JT
1798 *error = "Data block size is larger than the cache device";
1799 return -EINVAL;
1800 }
1801
05473044 1802 ca->block_size = block_size;
c6b4fcba
JT
1803
1804 return 0;
1805}
1806
1807static void init_features(struct cache_features *cf)
1808{
1809 cf->mode = CM_WRITE;
1810 cf->write_through = false;
1811}
1812
1813static int parse_features(struct cache_args *ca, struct dm_arg_set *as,
1814 char **error)
1815{
1816 static struct dm_arg _args[] = {
1817 {0, 1, "Invalid number of cache feature arguments"},
1818 };
1819
1820 int r;
1821 unsigned argc;
1822 const char *arg;
1823 struct cache_features *cf = &ca->features;
1824
1825 init_features(cf);
1826
1827 r = dm_read_arg_group(_args, as, &argc, error);
1828 if (r)
1829 return -EINVAL;
1830
1831 while (argc--) {
1832 arg = dm_shift_arg(as);
1833
1834 if (!strcasecmp(arg, "writeback"))
1835 cf->write_through = false;
1836
1837 else if (!strcasecmp(arg, "writethrough"))
1838 cf->write_through = true;
1839
1840 else {
1841 *error = "Unrecognised cache feature requested";
1842 return -EINVAL;
1843 }
1844 }
1845
1846 return 0;
1847}
1848
1849static int parse_policy(struct cache_args *ca, struct dm_arg_set *as,
1850 char **error)
1851{
1852 static struct dm_arg _args[] = {
1853 {0, 1024, "Invalid number of policy arguments"},
1854 };
1855
1856 int r;
1857
1858 if (!at_least_one_arg(as, error))
1859 return -EINVAL;
1860
1861 ca->policy_name = dm_shift_arg(as);
1862
1863 r = dm_read_arg_group(_args, as, &ca->policy_argc, error);
1864 if (r)
1865 return -EINVAL;
1866
1867 ca->policy_argv = (const char **)as->argv;
1868 dm_consume_args(as, ca->policy_argc);
1869
1870 return 0;
1871}
1872
1873static int parse_cache_args(struct cache_args *ca, int argc, char **argv,
1874 char **error)
1875{
1876 int r;
1877 struct dm_arg_set as;
1878
1879 as.argc = argc;
1880 as.argv = argv;
1881
1882 r = parse_metadata_dev(ca, &as, error);
1883 if (r)
1884 return r;
1885
1886 r = parse_cache_dev(ca, &as, error);
1887 if (r)
1888 return r;
1889
1890 r = parse_origin_dev(ca, &as, error);
1891 if (r)
1892 return r;
1893
1894 r = parse_block_size(ca, &as, error);
1895 if (r)
1896 return r;
1897
1898 r = parse_features(ca, &as, error);
1899 if (r)
1900 return r;
1901
1902 r = parse_policy(ca, &as, error);
1903 if (r)
1904 return r;
1905
1906 return 0;
1907}
1908
1909/*----------------------------------------------------------------*/
1910
1911static struct kmem_cache *migration_cache;
1912
2c73c471
AK
1913#define NOT_CORE_OPTION 1
1914
2f14f4b5 1915static int process_config_option(struct cache *cache, const char *key, const char *value)
2c73c471
AK
1916{
1917 unsigned long tmp;
1918
2f14f4b5
JT
1919 if (!strcasecmp(key, "migration_threshold")) {
1920 if (kstrtoul(value, 10, &tmp))
2c73c471
AK
1921 return -EINVAL;
1922
1923 cache->migration_threshold = tmp;
1924 return 0;
1925 }
1926
1927 return NOT_CORE_OPTION;
1928}
1929
2f14f4b5
JT
1930static int set_config_value(struct cache *cache, const char *key, const char *value)
1931{
1932 int r = process_config_option(cache, key, value);
1933
1934 if (r == NOT_CORE_OPTION)
1935 r = policy_set_config_value(cache->policy, key, value);
1936
1937 if (r)
1938 DMWARN("bad config value for %s: %s", key, value);
1939
1940 return r;
1941}
1942
1943static int set_config_values(struct cache *cache, int argc, const char **argv)
c6b4fcba
JT
1944{
1945 int r = 0;
1946
1947 if (argc & 1) {
1948 DMWARN("Odd number of policy arguments given but they should be <key> <value> pairs.");
1949 return -EINVAL;
1950 }
1951
1952 while (argc) {
2f14f4b5
JT
1953 r = set_config_value(cache, argv[0], argv[1]);
1954 if (r)
1955 break;
c6b4fcba
JT
1956
1957 argc -= 2;
1958 argv += 2;
1959 }
1960
1961 return r;
1962}
1963
1964static int create_cache_policy(struct cache *cache, struct cache_args *ca,
1965 char **error)
1966{
4cb3e1db
MP
1967 struct dm_cache_policy *p = dm_cache_policy_create(ca->policy_name,
1968 cache->cache_size,
1969 cache->origin_sectors,
1970 cache->sectors_per_block);
1971 if (IS_ERR(p)) {
c6b4fcba 1972 *error = "Error creating cache's policy";
4cb3e1db 1973 return PTR_ERR(p);
c6b4fcba 1974 }
4cb3e1db 1975 cache->policy = p;
c6b4fcba 1976
2f14f4b5 1977 return 0;
c6b4fcba
JT
1978}
1979
1980/*
1981 * We want the discard block size to be a power of two, at least the size
1982 * of the cache block size, and have no more than 2^14 discard blocks
1983 * across the origin.
1984 */
1985#define MAX_DISCARD_BLOCKS (1 << 14)
1986
1987static bool too_many_discard_blocks(sector_t discard_block_size,
1988 sector_t origin_size)
1989{
1990 (void) sector_div(origin_size, discard_block_size);
1991
1992 return origin_size > MAX_DISCARD_BLOCKS;
1993}
1994
1995static sector_t calculate_discard_block_size(sector_t cache_block_size,
1996 sector_t origin_size)
1997{
1998 sector_t discard_block_size;
1999
2000 discard_block_size = roundup_pow_of_two(cache_block_size);
2001
2002 if (origin_size)
2003 while (too_many_discard_blocks(discard_block_size, origin_size))
2004 discard_block_size *= 2;
2005
2006 return discard_block_size;
2007}
2008
f8350daf 2009#define DEFAULT_MIGRATION_THRESHOLD 2048
c6b4fcba 2010
c6b4fcba
JT
2011static int cache_create(struct cache_args *ca, struct cache **result)
2012{
2013 int r = 0;
2014 char **error = &ca->ti->error;
2015 struct cache *cache;
2016 struct dm_target *ti = ca->ti;
2017 dm_block_t origin_blocks;
2018 struct dm_cache_metadata *cmd;
2019 bool may_format = ca->features.mode == CM_WRITE;
2020
2021 cache = kzalloc(sizeof(*cache), GFP_KERNEL);
2022 if (!cache)
2023 return -ENOMEM;
2024
2025 cache->ti = ca->ti;
2026 ti->private = cache;
c6b4fcba
JT
2027 ti->num_flush_bios = 2;
2028 ti->flush_supported = true;
2029
2030 ti->num_discard_bios = 1;
2031 ti->discards_supported = true;
2032 ti->discard_zeroes_data_unsupported = true;
2033
8c5008fa 2034 cache->features = ca->features;
19b0092e 2035 ti->per_bio_data_size = get_per_bio_data_size(cache);
c6b4fcba 2036
c6b4fcba
JT
2037 cache->callbacks.congested_fn = cache_is_congested;
2038 dm_table_add_target_callbacks(ti->table, &cache->callbacks);
2039
2040 cache->metadata_dev = ca->metadata_dev;
2041 cache->origin_dev = ca->origin_dev;
2042 cache->cache_dev = ca->cache_dev;
2043
2044 ca->metadata_dev = ca->origin_dev = ca->cache_dev = NULL;
2045
2046 /* FIXME: factor out this whole section */
2047 origin_blocks = cache->origin_sectors = ca->origin_sectors;
414dd67d 2048 origin_blocks = block_div(origin_blocks, ca->block_size);
c6b4fcba
JT
2049 cache->origin_blocks = to_oblock(origin_blocks);
2050
2051 cache->sectors_per_block = ca->block_size;
2052 if (dm_set_target_max_io_len(ti, cache->sectors_per_block)) {
2053 r = -EINVAL;
2054 goto bad;
2055 }
2056
2057 if (ca->block_size & (ca->block_size - 1)) {
2058 dm_block_t cache_size = ca->cache_sectors;
2059
2060 cache->sectors_per_block_shift = -1;
414dd67d 2061 cache_size = block_div(cache_size, ca->block_size);
c6b4fcba
JT
2062 cache->cache_size = to_cblock(cache_size);
2063 } else {
2064 cache->sectors_per_block_shift = __ffs(ca->block_size);
2065 cache->cache_size = to_cblock(ca->cache_sectors >> cache->sectors_per_block_shift);
2066 }
2067
2068 r = create_cache_policy(cache, ca, error);
2069 if (r)
2070 goto bad;
2f14f4b5 2071
c6b4fcba 2072 cache->policy_nr_args = ca->policy_argc;
2f14f4b5
JT
2073 cache->migration_threshold = DEFAULT_MIGRATION_THRESHOLD;
2074
2075 r = set_config_values(cache, ca->policy_argc, ca->policy_argv);
2076 if (r) {
2077 *error = "Error setting cache policy's config values";
2078 goto bad;
2079 }
c6b4fcba
JT
2080
2081 cmd = dm_cache_metadata_open(cache->metadata_dev->bdev,
2082 ca->block_size, may_format,
2083 dm_cache_policy_get_hint_size(cache->policy));
2084 if (IS_ERR(cmd)) {
2085 *error = "Error creating metadata object";
2086 r = PTR_ERR(cmd);
2087 goto bad;
2088 }
2089 cache->cmd = cmd;
2090
2091 spin_lock_init(&cache->lock);
2092 bio_list_init(&cache->deferred_bios);
2093 bio_list_init(&cache->deferred_flush_bios);
e2e74d61 2094 bio_list_init(&cache->deferred_writethrough_bios);
c6b4fcba
JT
2095 INIT_LIST_HEAD(&cache->quiesced_migrations);
2096 INIT_LIST_HEAD(&cache->completed_migrations);
2097 INIT_LIST_HEAD(&cache->need_commit_migrations);
c6b4fcba
JT
2098 atomic_set(&cache->nr_migrations, 0);
2099 init_waitqueue_head(&cache->migration_wait);
2100
66cb1910 2101 init_waitqueue_head(&cache->quiescing_wait);
238f8363 2102 atomic_set(&cache->quiescing, 0);
66cb1910
JT
2103 atomic_set(&cache->quiescing_ack, 0);
2104
fa4d683a 2105 r = -ENOMEM;
c6b4fcba
JT
2106 cache->nr_dirty = 0;
2107 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));
2108 if (!cache->dirty_bitset) {
2109 *error = "could not allocate dirty bitset";
2110 goto bad;
2111 }
2112 clear_bitset(cache->dirty_bitset, from_cblock(cache->cache_size));
2113
2114 cache->discard_block_size =
2115 calculate_discard_block_size(cache->sectors_per_block,
2116 cache->origin_sectors);
2117 cache->discard_nr_blocks = oblock_to_dblock(cache, cache->origin_blocks);
2118 cache->discard_bitset = alloc_bitset(from_dblock(cache->discard_nr_blocks));
2119 if (!cache->discard_bitset) {
2120 *error = "could not allocate discard bitset";
2121 goto bad;
2122 }
2123 clear_bitset(cache->discard_bitset, from_dblock(cache->discard_nr_blocks));
2124
2125 cache->copier = dm_kcopyd_client_create(&dm_kcopyd_throttle);
2126 if (IS_ERR(cache->copier)) {
2127 *error = "could not create kcopyd client";
2128 r = PTR_ERR(cache->copier);
2129 goto bad;
2130 }
2131
2132 cache->wq = alloc_ordered_workqueue("dm-" DM_MSG_PREFIX, WQ_MEM_RECLAIM);
2133 if (!cache->wq) {
2134 *error = "could not create workqueue for metadata object";
2135 goto bad;
2136 }
2137 INIT_WORK(&cache->worker, do_worker);
2138 INIT_DELAYED_WORK(&cache->waker, do_waker);
2139 cache->last_commit_jiffies = jiffies;
2140
2141 cache->prison = dm_bio_prison_create(PRISON_CELLS);
2142 if (!cache->prison) {
2143 *error = "could not create bio prison";
2144 goto bad;
2145 }
2146
2147 cache->all_io_ds = dm_deferred_set_create();
2148 if (!cache->all_io_ds) {
2149 *error = "could not create all_io deferred set";
2150 goto bad;
2151 }
2152
2153 cache->migration_pool = mempool_create_slab_pool(MIGRATION_POOL_SIZE,
2154 migration_cache);
2155 if (!cache->migration_pool) {
2156 *error = "Error creating cache's migration mempool";
2157 goto bad;
2158 }
2159
2160 cache->next_migration = NULL;
2161
2162 cache->need_tick_bio = true;
2163 cache->sized = false;
c6b4fcba
JT
2164 cache->commit_requested = false;
2165 cache->loaded_mappings = false;
2166 cache->loaded_discards = false;
2167
2168 load_stats(cache);
2169
2170 atomic_set(&cache->stats.demotion, 0);
2171 atomic_set(&cache->stats.promotion, 0);
2172 atomic_set(&cache->stats.copies_avoided, 0);
2173 atomic_set(&cache->stats.cache_cell_clash, 0);
2174 atomic_set(&cache->stats.commit_count, 0);
2175 atomic_set(&cache->stats.discard_count, 0);
2176
2177 *result = cache;
2178 return 0;
2179
2180bad:
2181 destroy(cache);
2182 return r;
2183}
2184
2185static int copy_ctr_args(struct cache *cache, int argc, const char **argv)
2186{
2187 unsigned i;
2188 const char **copy;
2189
2190 copy = kcalloc(argc, sizeof(*copy), GFP_KERNEL);
2191 if (!copy)
2192 return -ENOMEM;
2193 for (i = 0; i < argc; i++) {
2194 copy[i] = kstrdup(argv[i], GFP_KERNEL);
2195 if (!copy[i]) {
2196 while (i--)
2197 kfree(copy[i]);
2198 kfree(copy);
2199 return -ENOMEM;
2200 }
2201 }
2202
2203 cache->nr_ctr_args = argc;
2204 cache->ctr_args = copy;
2205
2206 return 0;
2207}
2208
2209static int cache_ctr(struct dm_target *ti, unsigned argc, char **argv)
2210{
2211 int r = -EINVAL;
2212 struct cache_args *ca;
2213 struct cache *cache = NULL;
2214
2215 ca = kzalloc(sizeof(*ca), GFP_KERNEL);
2216 if (!ca) {
2217 ti->error = "Error allocating memory for cache";
2218 return -ENOMEM;
2219 }
2220 ca->ti = ti;
2221
2222 r = parse_cache_args(ca, argc, argv, &ti->error);
2223 if (r)
2224 goto out;
2225
2226 r = cache_create(ca, &cache);
617a0b89
HM
2227 if (r)
2228 goto out;
c6b4fcba
JT
2229
2230 r = copy_ctr_args(cache, argc - 3, (const char **)argv + 3);
2231 if (r) {
2232 destroy(cache);
2233 goto out;
2234 }
2235
2236 ti->private = cache;
2237
2238out:
2239 destroy_cache_args(ca);
2240 return r;
2241}
2242
c6b4fcba
JT
2243static int cache_map(struct dm_target *ti, struct bio *bio)
2244{
2245 struct cache *cache = ti->private;
2246
2247 int r;
2248 dm_oblock_t block = get_bio_block(cache, bio);
19b0092e 2249 size_t pb_data_size = get_per_bio_data_size(cache);
c6b4fcba
JT
2250 bool can_migrate = false;
2251 bool discarded_block;
2252 struct dm_bio_prison_cell *cell;
2253 struct policy_result lookup_result;
2254 struct per_bio_data *pb;
2255
2256 if (from_oblock(block) > from_oblock(cache->origin_blocks)) {
2257 /*
2258 * This can only occur if the io goes to a partial block at
2259 * the end of the origin device. We don't cache these.
2260 * Just remap to the origin and carry on.
2261 */
2262 remap_to_origin_clear_discard(cache, bio, block);
2263 return DM_MAPIO_REMAPPED;
2264 }
2265
19b0092e 2266 pb = init_per_bio_data(bio, pb_data_size);
c6b4fcba
JT
2267
2268 if (bio->bi_rw & (REQ_FLUSH | REQ_FUA | REQ_DISCARD)) {
2269 defer_bio(cache, bio);
2270 return DM_MAPIO_SUBMITTED;
2271 }
2272
2273 /*
2274 * Check to see if that block is currently migrating.
2275 */
2276 cell = alloc_prison_cell(cache);
2277 if (!cell) {
2278 defer_bio(cache, bio);
2279 return DM_MAPIO_SUBMITTED;
2280 }
2281
2282 r = bio_detain(cache, block, bio, cell,
2283 (cell_free_fn) free_prison_cell,
2284 cache, &cell);
2285 if (r) {
2286 if (r < 0)
2287 defer_bio(cache, bio);
2288
2289 return DM_MAPIO_SUBMITTED;
2290 }
2291
2292 discarded_block = is_discarded_oblock(cache, block);
2293
2294 r = policy_map(cache->policy, block, false, can_migrate, discarded_block,
2295 bio, &lookup_result);
2296 if (r == -EWOULDBLOCK) {
2297 cell_defer(cache, cell, true);
2298 return DM_MAPIO_SUBMITTED;
2299
2300 } else if (r) {
2301 DMERR_LIMIT("Unexpected return from cache replacement policy: %d", r);
2302 bio_io_error(bio);
2303 return DM_MAPIO_SUBMITTED;
2304 }
2305
2306 switch (lookup_result.op) {
2307 case POLICY_HIT:
2308 inc_hit_counter(cache, bio);
2309 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2310
e2e74d61
JT
2311 if (is_writethrough_io(cache, bio, lookup_result.cblock))
2312 remap_to_origin_then_cache(cache, bio, block, lookup_result.cblock);
2313 else
c6b4fcba 2314 remap_to_cache_dirty(cache, bio, block, lookup_result.cblock);
e2e74d61
JT
2315
2316 cell_defer(cache, cell, false);
c6b4fcba
JT
2317 break;
2318
2319 case POLICY_MISS:
2320 inc_miss_counter(cache, bio);
2321 pb->all_io_entry = dm_deferred_entry_inc(cache->all_io_ds);
2322
2323 if (pb->req_nr != 0) {
2324 /*
2325 * This is a duplicate writethrough io that is no
2326 * longer needed because the block has been demoted.
2327 */
2328 bio_endio(bio, 0);
2329 cell_defer(cache, cell, false);
2330 return DM_MAPIO_SUBMITTED;
2331 } else {
2332 remap_to_origin_clear_discard(cache, bio, block);
2333 cell_defer(cache, cell, false);
2334 }
2335 break;
2336
2337 default:
2338 DMERR_LIMIT("%s: erroring bio: unknown policy op: %u", __func__,
2339 (unsigned) lookup_result.op);
2340 bio_io_error(bio);
2341 return DM_MAPIO_SUBMITTED;
2342 }
2343
2344 return DM_MAPIO_REMAPPED;
2345}
2346
2347static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
2348{
2349 struct cache *cache = ti->private;
2350 unsigned long flags;
19b0092e
MS
2351 size_t pb_data_size = get_per_bio_data_size(cache);
2352 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
c6b4fcba
JT
2353
2354 if (pb->tick) {
2355 policy_tick(cache->policy);
2356
2357 spin_lock_irqsave(&cache->lock, flags);
2358 cache->need_tick_bio = true;
2359 spin_unlock_irqrestore(&cache->lock, flags);
2360 }
2361
2362 check_for_quiesced_migrations(cache, pb);
2363
2364 return 0;
2365}
2366
2367static int write_dirty_bitset(struct cache *cache)
2368{
2369 unsigned i, r;
2370
2371 for (i = 0; i < from_cblock(cache->cache_size); i++) {
2372 r = dm_cache_set_dirty(cache->cmd, to_cblock(i),
2373 is_dirty(cache, to_cblock(i)));
2374 if (r)
2375 return r;
2376 }
2377
2378 return 0;
2379}
2380
2381static int write_discard_bitset(struct cache *cache)
2382{
2383 unsigned i, r;
2384
2385 r = dm_cache_discard_bitset_resize(cache->cmd, cache->discard_block_size,
2386 cache->discard_nr_blocks);
2387 if (r) {
2388 DMERR("could not resize on-disk discard bitset");
2389 return r;
2390 }
2391
2392 for (i = 0; i < from_dblock(cache->discard_nr_blocks); i++) {
2393 r = dm_cache_set_discard(cache->cmd, to_dblock(i),
2394 is_discarded(cache, to_dblock(i)));
2395 if (r)
2396 return r;
2397 }
2398
2399 return 0;
2400}
2401
2402static int save_hint(void *context, dm_cblock_t cblock, dm_oblock_t oblock,
2403 uint32_t hint)
2404{
2405 struct cache *cache = context;
2406 return dm_cache_save_hint(cache->cmd, cblock, hint);
2407}
2408
2409static int write_hints(struct cache *cache)
2410{
2411 int r;
2412
2413 r = dm_cache_begin_hints(cache->cmd, cache->policy);
2414 if (r) {
2415 DMERR("dm_cache_begin_hints failed");
2416 return r;
2417 }
2418
2419 r = policy_walk_mappings(cache->policy, save_hint, cache);
2420 if (r)
2421 DMERR("policy_walk_mappings failed");
2422
2423 return r;
2424}
2425
2426/*
2427 * returns true on success
2428 */
2429static bool sync_metadata(struct cache *cache)
2430{
2431 int r1, r2, r3, r4;
2432
2433 r1 = write_dirty_bitset(cache);
2434 if (r1)
2435 DMERR("could not write dirty bitset");
2436
2437 r2 = write_discard_bitset(cache);
2438 if (r2)
2439 DMERR("could not write discard bitset");
2440
2441 save_stats(cache);
2442
2443 r3 = write_hints(cache);
2444 if (r3)
2445 DMERR("could not write hints");
2446
2447 /*
2448 * If writing the above metadata failed, we still commit, but don't
2449 * set the clean shutdown flag. This will effectively force every
2450 * dirty bit to be set on reload.
2451 */
2452 r4 = dm_cache_commit(cache->cmd, !r1 && !r2 && !r3);
2453 if (r4)
2454 DMERR("could not write cache metadata. Data loss may occur.");
2455
2456 return !r1 && !r2 && !r3 && !r4;
2457}
2458
2459static void cache_postsuspend(struct dm_target *ti)
2460{
2461 struct cache *cache = ti->private;
2462
2463 start_quiescing(cache);
2464 wait_for_migrations(cache);
2465 stop_worker(cache);
2466 requeue_deferred_io(cache);
2467 stop_quiescing(cache);
2468
2469 (void) sync_metadata(cache);
2470}
2471
2472static int load_mapping(void *context, dm_oblock_t oblock, dm_cblock_t cblock,
2473 bool dirty, uint32_t hint, bool hint_valid)
2474{
2475 int r;
2476 struct cache *cache = context;
2477
2478 r = policy_load_mapping(cache->policy, oblock, cblock, hint, hint_valid);
2479 if (r)
2480 return r;
2481
2482 if (dirty)
2483 set_dirty(cache, oblock, cblock);
2484 else
2485 clear_dirty(cache, oblock, cblock);
2486
2487 return 0;
2488}
2489
2490static int load_discard(void *context, sector_t discard_block_size,
2491 dm_dblock_t dblock, bool discard)
2492{
2493 struct cache *cache = context;
2494
2495 /* FIXME: handle mis-matched block size */
2496
2497 if (discard)
2498 set_discard(cache, dblock);
2499 else
2500 clear_discard(cache, dblock);
2501
2502 return 0;
2503}
2504
f494a9c6
JT
2505static dm_cblock_t get_cache_dev_size(struct cache *cache)
2506{
2507 sector_t size = get_dev_size(cache->cache_dev);
2508 (void) sector_div(size, cache->sectors_per_block);
2509 return to_cblock(size);
2510}
2511
2512static bool can_resize(struct cache *cache, dm_cblock_t new_size)
2513{
2514 if (from_cblock(new_size) > from_cblock(cache->cache_size))
2515 return true;
2516
2517 /*
2518 * We can't drop a dirty block when shrinking the cache.
2519 */
2520 while (from_cblock(new_size) < from_cblock(cache->cache_size)) {
2521 new_size = to_cblock(from_cblock(new_size) + 1);
2522 if (is_dirty(cache, new_size)) {
2523 DMERR("unable to shrink cache; cache block %llu is dirty",
2524 (unsigned long long) from_cblock(new_size));
2525 return false;
2526 }
2527 }
2528
2529 return true;
2530}
2531
2532static int resize_cache_dev(struct cache *cache, dm_cblock_t new_size)
2533{
2534 int r;
2535
2536 r = dm_cache_resize(cache->cmd, cache->cache_size);
2537 if (r) {
2538 DMERR("could not resize cache metadata");
2539 return r;
2540 }
2541
2542 cache->cache_size = new_size;
2543
2544 return 0;
2545}
2546
c6b4fcba
JT
2547static int cache_preresume(struct dm_target *ti)
2548{
2549 int r = 0;
2550 struct cache *cache = ti->private;
f494a9c6 2551 dm_cblock_t csize = get_cache_dev_size(cache);
c6b4fcba
JT
2552
2553 /*
2554 * Check to see if the cache has resized.
2555 */
f494a9c6
JT
2556 if (!cache->sized) {
2557 r = resize_cache_dev(cache, csize);
2558 if (r)
c6b4fcba 2559 return r;
c6b4fcba
JT
2560
2561 cache->sized = true;
f494a9c6
JT
2562
2563 } else if (csize != cache->cache_size) {
2564 if (!can_resize(cache, csize))
2565 return -EINVAL;
2566
2567 r = resize_cache_dev(cache, csize);
2568 if (r)
2569 return r;
c6b4fcba
JT
2570 }
2571
2572 if (!cache->loaded_mappings) {
ea2dd8c1 2573 r = dm_cache_load_mappings(cache->cmd, cache->policy,
c6b4fcba
JT
2574 load_mapping, cache);
2575 if (r) {
2576 DMERR("could not load cache mappings");
2577 return r;
2578 }
2579
2580 cache->loaded_mappings = true;
2581 }
2582
2583 if (!cache->loaded_discards) {
2584 r = dm_cache_load_discards(cache->cmd, load_discard, cache);
2585 if (r) {
2586 DMERR("could not load origin discards");
2587 return r;
2588 }
2589
2590 cache->loaded_discards = true;
2591 }
2592
2593 return r;
2594}
2595
2596static void cache_resume(struct dm_target *ti)
2597{
2598 struct cache *cache = ti->private;
2599
2600 cache->need_tick_bio = true;
2601 do_waker(&cache->waker.work);
2602}
2603
2604/*
2605 * Status format:
2606 *
2607 * <#used metadata blocks>/<#total metadata blocks>
2608 * <#read hits> <#read misses> <#write hits> <#write misses>
2609 * <#demotions> <#promotions> <#blocks in cache> <#dirty>
2610 * <#features> <features>*
2611 * <#core args> <core args>
2612 * <#policy args> <policy args>*
2613 */
2614static void cache_status(struct dm_target *ti, status_type_t type,
2615 unsigned status_flags, char *result, unsigned maxlen)
2616{
2617 int r = 0;
2618 unsigned i;
2619 ssize_t sz = 0;
2620 dm_block_t nr_free_blocks_metadata = 0;
2621 dm_block_t nr_blocks_metadata = 0;
2622 char buf[BDEVNAME_SIZE];
2623 struct cache *cache = ti->private;
2624 dm_cblock_t residency;
2625
2626 switch (type) {
2627 case STATUSTYPE_INFO:
2628 /* Commit to ensure statistics aren't out-of-date */
2629 if (!(status_flags & DM_STATUS_NOFLUSH_FLAG) && !dm_suspended(ti)) {
2630 r = dm_cache_commit(cache->cmd, false);
2631 if (r)
2632 DMERR("could not commit metadata for accurate status");
2633 }
2634
2635 r = dm_cache_get_free_metadata_block_count(cache->cmd,
2636 &nr_free_blocks_metadata);
2637 if (r) {
2638 DMERR("could not get metadata free block count");
2639 goto err;
2640 }
2641
2642 r = dm_cache_get_metadata_dev_size(cache->cmd, &nr_blocks_metadata);
2643 if (r) {
2644 DMERR("could not get metadata device size");
2645 goto err;
2646 }
2647
2648 residency = policy_residency(cache->policy);
2649
2650 DMEMIT("%llu/%llu %u %u %u %u %u %u %llu %u ",
2651 (unsigned long long)(nr_blocks_metadata - nr_free_blocks_metadata),
2652 (unsigned long long)nr_blocks_metadata,
2653 (unsigned) atomic_read(&cache->stats.read_hit),
2654 (unsigned) atomic_read(&cache->stats.read_miss),
2655 (unsigned) atomic_read(&cache->stats.write_hit),
2656 (unsigned) atomic_read(&cache->stats.write_miss),
2657 (unsigned) atomic_read(&cache->stats.demotion),
2658 (unsigned) atomic_read(&cache->stats.promotion),
2659 (unsigned long long) from_cblock(residency),
2660 cache->nr_dirty);
2661
2662 if (cache->features.write_through)
2663 DMEMIT("1 writethrough ");
2664 else
2665 DMEMIT("0 ");
2666
2667 DMEMIT("2 migration_threshold %llu ", (unsigned long long) cache->migration_threshold);
2668 if (sz < maxlen) {
2669 r = policy_emit_config_values(cache->policy, result + sz, maxlen - sz);
2670 if (r)
2671 DMERR("policy_emit_config_values returned %d", r);
2672 }
2673
2674 break;
2675
2676 case STATUSTYPE_TABLE:
2677 format_dev_t(buf, cache->metadata_dev->bdev->bd_dev);
2678 DMEMIT("%s ", buf);
2679 format_dev_t(buf, cache->cache_dev->bdev->bd_dev);
2680 DMEMIT("%s ", buf);
2681 format_dev_t(buf, cache->origin_dev->bdev->bd_dev);
2682 DMEMIT("%s", buf);
2683
2684 for (i = 0; i < cache->nr_ctr_args - 1; i++)
2685 DMEMIT(" %s", cache->ctr_args[i]);
2686 if (cache->nr_ctr_args)
2687 DMEMIT(" %s", cache->ctr_args[cache->nr_ctr_args - 1]);
2688 }
2689
2690 return;
2691
2692err:
2693 DMEMIT("Error");
2694}
2695
c6b4fcba
JT
2696/*
2697 * Supports <key> <value>.
2698 *
2699 * The key migration_threshold is supported by the cache target core.
2700 */
2701static int cache_message(struct dm_target *ti, unsigned argc, char **argv)
2702{
c6b4fcba
JT
2703 struct cache *cache = ti->private;
2704
2705 if (argc != 2)
2706 return -EINVAL;
2707
2f14f4b5 2708 return set_config_value(cache, argv[0], argv[1]);
c6b4fcba
JT
2709}
2710
2711static int cache_iterate_devices(struct dm_target *ti,
2712 iterate_devices_callout_fn fn, void *data)
2713{
2714 int r = 0;
2715 struct cache *cache = ti->private;
2716
2717 r = fn(ti, cache->cache_dev, 0, get_dev_size(cache->cache_dev), data);
2718 if (!r)
2719 r = fn(ti, cache->origin_dev, 0, ti->len, data);
2720
2721 return r;
2722}
2723
2724/*
2725 * We assume I/O is going to the origin (which is the volume
2726 * more likely to have restrictions e.g. by being striped).
2727 * (Looking up the exact location of the data would be expensive
2728 * and could always be out of date by the time the bio is submitted.)
2729 */
2730static int cache_bvec_merge(struct dm_target *ti,
2731 struct bvec_merge_data *bvm,
2732 struct bio_vec *biovec, int max_size)
2733{
2734 struct cache *cache = ti->private;
2735 struct request_queue *q = bdev_get_queue(cache->origin_dev->bdev);
2736
2737 if (!q->merge_bvec_fn)
2738 return max_size;
2739
2740 bvm->bi_bdev = cache->origin_dev->bdev;
2741 return min(max_size, q->merge_bvec_fn(q, bvm, biovec));
2742}
2743
2744static void set_discard_limits(struct cache *cache, struct queue_limits *limits)
2745{
2746 /*
2747 * FIXME: these limits may be incompatible with the cache device
2748 */
2749 limits->max_discard_sectors = cache->discard_block_size * 1024;
2750 limits->discard_granularity = cache->discard_block_size << SECTOR_SHIFT;
2751}
2752
2753static void cache_io_hints(struct dm_target *ti, struct queue_limits *limits)
2754{
2755 struct cache *cache = ti->private;
f6109372 2756 uint64_t io_opt_sectors = limits->io_opt >> SECTOR_SHIFT;
c6b4fcba 2757
f6109372
MS
2758 /*
2759 * If the system-determined stacked limits are compatible with the
2760 * cache's blocksize (io_opt is a factor) do not override them.
2761 */
2762 if (io_opt_sectors < cache->sectors_per_block ||
2763 do_div(io_opt_sectors, cache->sectors_per_block)) {
2764 blk_limits_io_min(limits, 0);
2765 blk_limits_io_opt(limits, cache->sectors_per_block << SECTOR_SHIFT);
2766 }
c6b4fcba
JT
2767 set_discard_limits(cache, limits);
2768}
2769
2770/*----------------------------------------------------------------*/
2771
2772static struct target_type cache_target = {
2773 .name = "cache",
2f14f4b5 2774 .version = {1, 1, 1},
c6b4fcba
JT
2775 .module = THIS_MODULE,
2776 .ctr = cache_ctr,
2777 .dtr = cache_dtr,
2778 .map = cache_map,
2779 .end_io = cache_end_io,
2780 .postsuspend = cache_postsuspend,
2781 .preresume = cache_preresume,
2782 .resume = cache_resume,
2783 .status = cache_status,
2784 .message = cache_message,
2785 .iterate_devices = cache_iterate_devices,
2786 .merge = cache_bvec_merge,
2787 .io_hints = cache_io_hints,
2788};
2789
2790static int __init dm_cache_init(void)
2791{
2792 int r;
2793
2794 r = dm_register_target(&cache_target);
2795 if (r) {
2796 DMERR("cache target registration failed: %d", r);
2797 return r;
2798 }
2799
2800 migration_cache = KMEM_CACHE(dm_cache_migration, 0);
2801 if (!migration_cache) {
2802 dm_unregister_target(&cache_target);
2803 return -ENOMEM;
2804 }
2805
2806 return 0;
2807}
2808
2809static void __exit dm_cache_exit(void)
2810{
2811 dm_unregister_target(&cache_target);
2812 kmem_cache_destroy(migration_cache);
2813}
2814
2815module_init(dm_cache_init);
2816module_exit(dm_cache_exit);
2817
2818MODULE_DESCRIPTION(DM_NAME " cache target");
2819MODULE_AUTHOR("Joe Thornber <ejt@redhat.com>");
2820MODULE_LICENSE("GPL");
This page took 0.188111 seconds and 5 git commands to generate.