Commit | Line | Data |
---|---|---|
f2836352 JT |
1 | /* |
2 | * Copyright (C) 2012 Red Hat. All rights reserved. | |
3 | * | |
4 | * This file is released under the GPL. | |
5 | */ | |
6 | ||
7 | #include "dm-cache-policy.h" | |
8 | #include "dm.h" | |
9 | ||
10 | #include <linux/hash.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/mutex.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/vmalloc.h> | |
15 | ||
16 | #define DM_MSG_PREFIX "cache-policy-mq" | |
f2836352 JT |
17 | |
18 | static struct kmem_cache *mq_entry_cache; | |
19 | ||
20 | /*----------------------------------------------------------------*/ | |
21 | ||
22 | static unsigned next_power(unsigned n, unsigned min) | |
23 | { | |
24 | return roundup_pow_of_two(max(n, min)); | |
25 | } | |
26 | ||
27 | /*----------------------------------------------------------------*/ | |
28 | ||
f2836352 JT |
29 | /* |
30 | * Large, sequential ios are probably better left on the origin device since | |
31 | * spindles tend to have good bandwidth. | |
32 | * | |
33 | * The io_tracker tries to spot when the io is in one of these sequential | |
34 | * modes. | |
35 | * | |
36 | * Two thresholds to switch between random and sequential io mode are defaulting | |
37 | * as follows and can be adjusted via the constructor and message interfaces. | |
38 | */ | |
39 | #define RANDOM_THRESHOLD_DEFAULT 4 | |
40 | #define SEQUENTIAL_THRESHOLD_DEFAULT 512 | |
41 | ||
42 | enum io_pattern { | |
43 | PATTERN_SEQUENTIAL, | |
44 | PATTERN_RANDOM | |
45 | }; | |
46 | ||
47 | struct io_tracker { | |
48 | enum io_pattern pattern; | |
49 | ||
50 | unsigned nr_seq_samples; | |
51 | unsigned nr_rand_samples; | |
52 | unsigned thresholds[2]; | |
53 | ||
54 | dm_oblock_t last_end_oblock; | |
55 | }; | |
56 | ||
57 | static void iot_init(struct io_tracker *t, | |
58 | int sequential_threshold, int random_threshold) | |
59 | { | |
60 | t->pattern = PATTERN_RANDOM; | |
61 | t->nr_seq_samples = 0; | |
62 | t->nr_rand_samples = 0; | |
63 | t->last_end_oblock = 0; | |
64 | t->thresholds[PATTERN_RANDOM] = random_threshold; | |
65 | t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold; | |
66 | } | |
67 | ||
68 | static enum io_pattern iot_pattern(struct io_tracker *t) | |
69 | { | |
70 | return t->pattern; | |
71 | } | |
72 | ||
73 | static void iot_update_stats(struct io_tracker *t, struct bio *bio) | |
74 | { | |
75 | if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) | |
76 | t->nr_seq_samples++; | |
77 | else { | |
78 | /* | |
79 | * Just one non-sequential IO is enough to reset the | |
80 | * counters. | |
81 | */ | |
82 | if (t->nr_seq_samples) { | |
83 | t->nr_seq_samples = 0; | |
84 | t->nr_rand_samples = 0; | |
85 | } | |
86 | ||
87 | t->nr_rand_samples++; | |
88 | } | |
89 | ||
90 | t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); | |
91 | } | |
92 | ||
93 | static void iot_check_for_pattern_switch(struct io_tracker *t) | |
94 | { | |
95 | switch (t->pattern) { | |
96 | case PATTERN_SEQUENTIAL: | |
97 | if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) { | |
98 | t->pattern = PATTERN_RANDOM; | |
99 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
100 | } | |
101 | break; | |
102 | ||
103 | case PATTERN_RANDOM: | |
104 | if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) { | |
105 | t->pattern = PATTERN_SEQUENTIAL; | |
106 | t->nr_seq_samples = t->nr_rand_samples = 0; | |
107 | } | |
108 | break; | |
109 | } | |
110 | } | |
111 | ||
112 | static void iot_examine_bio(struct io_tracker *t, struct bio *bio) | |
113 | { | |
114 | iot_update_stats(t, bio); | |
115 | iot_check_for_pattern_switch(t); | |
116 | } | |
117 | ||
118 | /*----------------------------------------------------------------*/ | |
119 | ||
120 | ||
121 | /* | |
122 | * This queue is divided up into different levels. Allowing us to push | |
123 | * entries to the back of any of the levels. Think of it as a partially | |
124 | * sorted queue. | |
125 | */ | |
126 | #define NR_QUEUE_LEVELS 16u | |
127 | ||
128 | struct queue { | |
129 | struct list_head qs[NR_QUEUE_LEVELS]; | |
130 | }; | |
131 | ||
132 | static void queue_init(struct queue *q) | |
133 | { | |
134 | unsigned i; | |
135 | ||
136 | for (i = 0; i < NR_QUEUE_LEVELS; i++) | |
137 | INIT_LIST_HEAD(q->qs + i); | |
138 | } | |
139 | ||
c86c3070 JT |
140 | /* |
141 | * Checks to see if the queue is empty. | |
142 | * FIXME: reduce cpu usage. | |
143 | */ | |
144 | static bool queue_empty(struct queue *q) | |
145 | { | |
146 | unsigned i; | |
147 | ||
148 | for (i = 0; i < NR_QUEUE_LEVELS; i++) | |
149 | if (!list_empty(q->qs + i)) | |
150 | return false; | |
151 | ||
152 | return true; | |
153 | } | |
154 | ||
f2836352 JT |
155 | /* |
156 | * Insert an entry to the back of the given level. | |
157 | */ | |
158 | static void queue_push(struct queue *q, unsigned level, struct list_head *elt) | |
159 | { | |
160 | list_add_tail(elt, q->qs + level); | |
161 | } | |
162 | ||
163 | static void queue_remove(struct list_head *elt) | |
164 | { | |
165 | list_del(elt); | |
166 | } | |
167 | ||
168 | /* | |
169 | * Shifts all regions down one level. This has no effect on the order of | |
170 | * the queue. | |
171 | */ | |
172 | static void queue_shift_down(struct queue *q) | |
173 | { | |
174 | unsigned level; | |
175 | ||
176 | for (level = 1; level < NR_QUEUE_LEVELS; level++) | |
177 | list_splice_init(q->qs + level, q->qs + level - 1); | |
178 | } | |
179 | ||
180 | /* | |
181 | * Gives us the oldest entry of the lowest popoulated level. If the first | |
182 | * level is emptied then we shift down one level. | |
183 | */ | |
184 | static struct list_head *queue_pop(struct queue *q) | |
185 | { | |
186 | unsigned level; | |
187 | struct list_head *r; | |
188 | ||
189 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
190 | if (!list_empty(q->qs + level)) { | |
191 | r = q->qs[level].next; | |
192 | list_del(r); | |
193 | ||
194 | /* have we just emptied the bottom level? */ | |
195 | if (level == 0 && list_empty(q->qs)) | |
196 | queue_shift_down(q); | |
197 | ||
198 | return r; | |
199 | } | |
200 | ||
201 | return NULL; | |
202 | } | |
203 | ||
204 | static struct list_head *list_pop(struct list_head *lh) | |
205 | { | |
206 | struct list_head *r = lh->next; | |
207 | ||
208 | BUG_ON(!r); | |
209 | list_del_init(r); | |
210 | ||
211 | return r; | |
212 | } | |
213 | ||
214 | /*----------------------------------------------------------------*/ | |
215 | ||
216 | /* | |
217 | * Describes a cache entry. Used in both the cache and the pre_cache. | |
218 | */ | |
219 | struct entry { | |
220 | struct hlist_node hlist; | |
221 | struct list_head list; | |
222 | dm_oblock_t oblock; | |
f2836352 JT |
223 | |
224 | /* | |
225 | * FIXME: pack these better | |
226 | */ | |
01911c19 | 227 | bool dirty:1; |
f2836352 JT |
228 | unsigned hit_count; |
229 | unsigned generation; | |
230 | unsigned tick; | |
231 | }; | |
232 | ||
633618e3 JT |
233 | /* |
234 | * Rather than storing the cblock in an entry, we allocate all entries in | |
235 | * an array, and infer the cblock from the entry position. | |
236 | * | |
237 | * Free entries are linked together into a list. | |
238 | */ | |
239 | struct entry_pool { | |
240 | struct entry *entries, *entries_end; | |
241 | struct list_head free; | |
242 | unsigned nr_allocated; | |
243 | }; | |
244 | ||
245 | static int epool_init(struct entry_pool *ep, unsigned nr_entries) | |
246 | { | |
247 | unsigned i; | |
248 | ||
249 | ep->entries = vzalloc(sizeof(struct entry) * nr_entries); | |
250 | if (!ep->entries) | |
251 | return -ENOMEM; | |
252 | ||
253 | ep->entries_end = ep->entries + nr_entries; | |
254 | ||
255 | INIT_LIST_HEAD(&ep->free); | |
256 | for (i = 0; i < nr_entries; i++) | |
257 | list_add(&ep->entries[i].list, &ep->free); | |
258 | ||
259 | ep->nr_allocated = 0; | |
260 | ||
261 | return 0; | |
262 | } | |
263 | ||
264 | static void epool_exit(struct entry_pool *ep) | |
265 | { | |
266 | vfree(ep->entries); | |
267 | } | |
268 | ||
269 | static struct entry *alloc_entry(struct entry_pool *ep) | |
270 | { | |
271 | struct entry *e; | |
272 | ||
273 | if (list_empty(&ep->free)) | |
274 | return NULL; | |
275 | ||
276 | e = list_entry(list_pop(&ep->free), struct entry, list); | |
277 | INIT_LIST_HEAD(&e->list); | |
278 | INIT_HLIST_NODE(&e->hlist); | |
279 | ep->nr_allocated++; | |
280 | ||
281 | return e; | |
282 | } | |
283 | ||
284 | /* | |
285 | * This assumes the cblock hasn't already been allocated. | |
286 | */ | |
287 | static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock) | |
288 | { | |
289 | struct entry *e = ep->entries + from_cblock(cblock); | |
633618e3 | 290 | |
b8158051 | 291 | list_del_init(&e->list); |
633618e3 JT |
292 | INIT_HLIST_NODE(&e->hlist); |
293 | ep->nr_allocated++; | |
294 | ||
295 | return e; | |
296 | } | |
297 | ||
298 | static void free_entry(struct entry_pool *ep, struct entry *e) | |
299 | { | |
300 | BUG_ON(!ep->nr_allocated); | |
301 | ep->nr_allocated--; | |
302 | INIT_HLIST_NODE(&e->hlist); | |
303 | list_add(&e->list, &ep->free); | |
304 | } | |
305 | ||
532906aa JT |
306 | /* |
307 | * Returns NULL if the entry is free. | |
308 | */ | |
309 | static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock) | |
310 | { | |
311 | struct entry *e = ep->entries + from_cblock(cblock); | |
7b6b2bc9 | 312 | return !hlist_unhashed(&e->hlist) ? e : NULL; |
532906aa JT |
313 | } |
314 | ||
633618e3 JT |
315 | static bool epool_empty(struct entry_pool *ep) |
316 | { | |
317 | return list_empty(&ep->free); | |
318 | } | |
319 | ||
320 | static bool in_pool(struct entry_pool *ep, struct entry *e) | |
321 | { | |
322 | return e >= ep->entries && e < ep->entries_end; | |
323 | } | |
324 | ||
325 | static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e) | |
326 | { | |
327 | return to_cblock(e - ep->entries); | |
328 | } | |
329 | ||
330 | /*----------------------------------------------------------------*/ | |
331 | ||
f2836352 JT |
332 | struct mq_policy { |
333 | struct dm_cache_policy policy; | |
334 | ||
335 | /* protects everything */ | |
336 | struct mutex lock; | |
337 | dm_cblock_t cache_size; | |
338 | struct io_tracker tracker; | |
339 | ||
633618e3 JT |
340 | /* |
341 | * Entries come from two pools, one of pre-cache entries, and one | |
342 | * for the cache proper. | |
343 | */ | |
344 | struct entry_pool pre_cache_pool; | |
345 | struct entry_pool cache_pool; | |
346 | ||
f2836352 | 347 | /* |
01911c19 JT |
348 | * We maintain three queues of entries. The cache proper, |
349 | * consisting of a clean and dirty queue, contains the currently | |
350 | * active mappings. Whereas the pre_cache tracks blocks that | |
351 | * are being hit frequently and potential candidates for promotion | |
352 | * to the cache. | |
f2836352 JT |
353 | */ |
354 | struct queue pre_cache; | |
01911c19 JT |
355 | struct queue cache_clean; |
356 | struct queue cache_dirty; | |
f2836352 JT |
357 | |
358 | /* | |
359 | * Keeps track of time, incremented by the core. We use this to | |
360 | * avoid attributing multiple hits within the same tick. | |
361 | * | |
362 | * Access to tick_protected should be done with the spin lock held. | |
363 | * It's copied to tick at the start of the map function (within the | |
364 | * mutex). | |
365 | */ | |
366 | spinlock_t tick_lock; | |
367 | unsigned tick_protected; | |
368 | unsigned tick; | |
369 | ||
370 | /* | |
371 | * A count of the number of times the map function has been called | |
372 | * and found an entry in the pre_cache or cache. Currently used to | |
373 | * calculate the generation. | |
374 | */ | |
375 | unsigned hit_count; | |
376 | ||
377 | /* | |
378 | * A generation is a longish period that is used to trigger some | |
379 | * book keeping effects. eg, decrementing hit counts on entries. | |
380 | * This is needed to allow the cache to evolve as io patterns | |
381 | * change. | |
382 | */ | |
383 | unsigned generation; | |
384 | unsigned generation_period; /* in lookups (will probably change) */ | |
385 | ||
386 | /* | |
387 | * Entries in the pre_cache whose hit count passes the promotion | |
388 | * threshold move to the cache proper. Working out the correct | |
389 | * value for the promotion_threshold is crucial to this policy. | |
390 | */ | |
391 | unsigned promote_threshold; | |
392 | ||
f2836352 JT |
393 | /* |
394 | * The hash table allows us to quickly find an entry by origin | |
395 | * block. Both pre_cache and cache entries are in here. | |
396 | */ | |
397 | unsigned nr_buckets; | |
398 | dm_block_t hash_bits; | |
399 | struct hlist_head *table; | |
400 | }; | |
401 | ||
f2836352 JT |
402 | /*----------------------------------------------------------------*/ |
403 | ||
404 | /* | |
405 | * Simple hash table implementation. Should replace with the standard hash | |
406 | * table that's making its way upstream. | |
407 | */ | |
408 | static void hash_insert(struct mq_policy *mq, struct entry *e) | |
409 | { | |
410 | unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits); | |
411 | ||
412 | hlist_add_head(&e->hlist, mq->table + h); | |
413 | } | |
414 | ||
415 | static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock) | |
416 | { | |
417 | unsigned h = hash_64(from_oblock(oblock), mq->hash_bits); | |
418 | struct hlist_head *bucket = mq->table + h; | |
419 | struct entry *e; | |
420 | ||
421 | hlist_for_each_entry(e, bucket, hlist) | |
422 | if (e->oblock == oblock) { | |
423 | hlist_del(&e->hlist); | |
424 | hlist_add_head(&e->hlist, bucket); | |
425 | return e; | |
426 | } | |
427 | ||
428 | return NULL; | |
429 | } | |
430 | ||
431 | static void hash_remove(struct entry *e) | |
432 | { | |
433 | hlist_del(&e->hlist); | |
434 | } | |
435 | ||
436 | /*----------------------------------------------------------------*/ | |
437 | ||
f2836352 JT |
438 | static bool any_free_cblocks(struct mq_policy *mq) |
439 | { | |
633618e3 | 440 | return !epool_empty(&mq->cache_pool); |
f2836352 JT |
441 | } |
442 | ||
c86c3070 JT |
443 | static bool any_clean_cblocks(struct mq_policy *mq) |
444 | { | |
445 | return !queue_empty(&mq->cache_clean); | |
446 | } | |
447 | ||
f2836352 JT |
448 | /*----------------------------------------------------------------*/ |
449 | ||
450 | /* | |
451 | * Now we get to the meat of the policy. This section deals with deciding | |
452 | * when to to add entries to the pre_cache and cache, and move between | |
453 | * them. | |
454 | */ | |
455 | ||
456 | /* | |
457 | * The queue level is based on the log2 of the hit count. | |
458 | */ | |
459 | static unsigned queue_level(struct entry *e) | |
460 | { | |
461 | return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u); | |
462 | } | |
463 | ||
633618e3 JT |
464 | static bool in_cache(struct mq_policy *mq, struct entry *e) |
465 | { | |
466 | return in_pool(&mq->cache_pool, e); | |
467 | } | |
468 | ||
f2836352 JT |
469 | /* |
470 | * Inserts the entry into the pre_cache or the cache. Ensures the cache | |
633618e3 JT |
471 | * block is marked as allocated if necc. Inserts into the hash table. |
472 | * Sets the tick which records when the entry was last moved about. | |
f2836352 JT |
473 | */ |
474 | static void push(struct mq_policy *mq, struct entry *e) | |
475 | { | |
476 | e->tick = mq->tick; | |
477 | hash_insert(mq, e); | |
478 | ||
633618e3 | 479 | if (in_cache(mq, e)) |
01911c19 JT |
480 | queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean, |
481 | queue_level(e), &e->list); | |
633618e3 | 482 | else |
f2836352 JT |
483 | queue_push(&mq->pre_cache, queue_level(e), &e->list); |
484 | } | |
485 | ||
486 | /* | |
487 | * Removes an entry from pre_cache or cache. Removes from the hash table. | |
f2836352 JT |
488 | */ |
489 | static void del(struct mq_policy *mq, struct entry *e) | |
490 | { | |
491 | queue_remove(&e->list); | |
492 | hash_remove(e); | |
f2836352 JT |
493 | } |
494 | ||
495 | /* | |
496 | * Like del, except it removes the first entry in the queue (ie. the least | |
497 | * recently used). | |
498 | */ | |
499 | static struct entry *pop(struct mq_policy *mq, struct queue *q) | |
500 | { | |
0184b44e JT |
501 | struct entry *e; |
502 | struct list_head *h = queue_pop(q); | |
f2836352 | 503 | |
0184b44e JT |
504 | if (!h) |
505 | return NULL; | |
f2836352 | 506 | |
0184b44e JT |
507 | e = container_of(h, struct entry, list); |
508 | hash_remove(e); | |
f2836352 JT |
509 | |
510 | return e; | |
511 | } | |
512 | ||
513 | /* | |
514 | * Has this entry already been updated? | |
515 | */ | |
516 | static bool updated_this_tick(struct mq_policy *mq, struct entry *e) | |
517 | { | |
518 | return mq->tick == e->tick; | |
519 | } | |
520 | ||
521 | /* | |
522 | * The promotion threshold is adjusted every generation. As are the counts | |
523 | * of the entries. | |
524 | * | |
525 | * At the moment the threshold is taken by averaging the hit counts of some | |
01911c19 JT |
526 | * of the entries in the cache (the first 20 entries across all levels in |
527 | * ascending order, giving preference to the clean entries at each level). | |
f2836352 JT |
528 | * |
529 | * We can be much cleverer than this though. For example, each promotion | |
530 | * could bump up the threshold helping to prevent churn. Much more to do | |
531 | * here. | |
532 | */ | |
533 | ||
534 | #define MAX_TO_AVERAGE 20 | |
535 | ||
536 | static void check_generation(struct mq_policy *mq) | |
537 | { | |
538 | unsigned total = 0, nr = 0, count = 0, level; | |
539 | struct list_head *head; | |
540 | struct entry *e; | |
541 | ||
633618e3 | 542 | if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) { |
f2836352 JT |
543 | mq->hit_count = 0; |
544 | mq->generation++; | |
545 | ||
546 | for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) { | |
01911c19 JT |
547 | head = mq->cache_clean.qs + level; |
548 | list_for_each_entry(e, head, list) { | |
549 | nr++; | |
550 | total += e->hit_count; | |
551 | ||
552 | if (++count >= MAX_TO_AVERAGE) | |
553 | break; | |
554 | } | |
555 | ||
556 | head = mq->cache_dirty.qs + level; | |
f2836352 JT |
557 | list_for_each_entry(e, head, list) { |
558 | nr++; | |
559 | total += e->hit_count; | |
560 | ||
561 | if (++count >= MAX_TO_AVERAGE) | |
562 | break; | |
563 | } | |
564 | } | |
565 | ||
566 | mq->promote_threshold = nr ? total / nr : 1; | |
567 | if (mq->promote_threshold * nr < total) | |
568 | mq->promote_threshold++; | |
569 | } | |
570 | } | |
571 | ||
572 | /* | |
573 | * Whenever we use an entry we bump up it's hit counter, and push it to the | |
574 | * back to it's current level. | |
575 | */ | |
576 | static void requeue_and_update_tick(struct mq_policy *mq, struct entry *e) | |
577 | { | |
578 | if (updated_this_tick(mq, e)) | |
579 | return; | |
580 | ||
581 | e->hit_count++; | |
582 | mq->hit_count++; | |
583 | check_generation(mq); | |
584 | ||
585 | /* generation adjustment, to stop the counts increasing forever. */ | |
586 | /* FIXME: divide? */ | |
587 | /* e->hit_count -= min(e->hit_count - 1, mq->generation - e->generation); */ | |
588 | e->generation = mq->generation; | |
589 | ||
590 | del(mq, e); | |
591 | push(mq, e); | |
592 | } | |
593 | ||
594 | /* | |
595 | * Demote the least recently used entry from the cache to the pre_cache. | |
596 | * Returns the new cache entry to use, and the old origin block it was | |
597 | * mapped to. | |
598 | * | |
599 | * We drop the hit count on the demoted entry back to 1 to stop it bouncing | |
600 | * straight back into the cache if it's subsequently hit. There are | |
601 | * various options here, and more experimentation would be good: | |
602 | * | |
603 | * - just forget about the demoted entry completely (ie. don't insert it | |
604 | into the pre_cache). | |
605 | * - divide the hit count rather that setting to some hard coded value. | |
606 | * - set the hit count to a hard coded value other than 1, eg, is it better | |
607 | * if it goes in at level 2? | |
608 | */ | |
633618e3 | 609 | static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock) |
f2836352 | 610 | { |
01911c19 | 611 | struct entry *demoted = pop(mq, &mq->cache_clean); |
f2836352 | 612 | |
01911c19 JT |
613 | if (!demoted) |
614 | /* | |
615 | * We could get a block from mq->cache_dirty, but that | |
616 | * would add extra latency to the triggering bio as it | |
617 | * waits for the writeback. Better to not promote this | |
618 | * time and hope there's a clean block next time this block | |
619 | * is hit. | |
620 | */ | |
621 | return -ENOSPC; | |
622 | ||
f2836352 | 623 | *oblock = demoted->oblock; |
633618e3 JT |
624 | free_entry(&mq->cache_pool, demoted); |
625 | ||
626 | /* | |
627 | * We used to put the demoted block into the pre-cache, but I think | |
628 | * it's simpler to just let it work it's way up from zero again. | |
629 | * Stops blocks flickering in and out of the cache. | |
630 | */ | |
f2836352 | 631 | |
01911c19 | 632 | return 0; |
f2836352 JT |
633 | } |
634 | ||
635 | /* | |
636 | * We modify the basic promotion_threshold depending on the specific io. | |
637 | * | |
638 | * If the origin block has been discarded then there's no cost to copy it | |
639 | * to the cache. | |
640 | * | |
641 | * We bias towards reads, since they can be demoted at no cost if they | |
642 | * haven't been dirtied. | |
643 | */ | |
644 | #define DISCARDED_PROMOTE_THRESHOLD 1 | |
645 | #define READ_PROMOTE_THRESHOLD 4 | |
646 | #define WRITE_PROMOTE_THRESHOLD 8 | |
647 | ||
648 | static unsigned adjusted_promote_threshold(struct mq_policy *mq, | |
649 | bool discarded_oblock, int data_dir) | |
650 | { | |
c86c3070 JT |
651 | if (data_dir == READ) |
652 | return mq->promote_threshold + READ_PROMOTE_THRESHOLD; | |
653 | ||
654 | if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) { | |
f2836352 JT |
655 | /* |
656 | * We don't need to do any copying at all, so give this a | |
c86c3070 | 657 | * very low threshold. |
f2836352 JT |
658 | */ |
659 | return DISCARDED_PROMOTE_THRESHOLD; | |
c86c3070 | 660 | } |
f2836352 | 661 | |
c86c3070 | 662 | return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD; |
f2836352 JT |
663 | } |
664 | ||
665 | static bool should_promote(struct mq_policy *mq, struct entry *e, | |
666 | bool discarded_oblock, int data_dir) | |
667 | { | |
668 | return e->hit_count >= | |
669 | adjusted_promote_threshold(mq, discarded_oblock, data_dir); | |
670 | } | |
671 | ||
672 | static int cache_entry_found(struct mq_policy *mq, | |
673 | struct entry *e, | |
674 | struct policy_result *result) | |
675 | { | |
676 | requeue_and_update_tick(mq, e); | |
677 | ||
633618e3 | 678 | if (in_cache(mq, e)) { |
f2836352 | 679 | result->op = POLICY_HIT; |
633618e3 | 680 | result->cblock = infer_cblock(&mq->cache_pool, e); |
f2836352 JT |
681 | } |
682 | ||
683 | return 0; | |
684 | } | |
685 | ||
686 | /* | |
0184b44e | 687 | * Moves an entry from the pre_cache to the cache. The main work is |
f2836352 JT |
688 | * finding which cache block to use. |
689 | */ | |
690 | static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e, | |
691 | struct policy_result *result) | |
692 | { | |
01911c19 | 693 | int r; |
633618e3 | 694 | struct entry *new_e; |
f2836352 | 695 | |
633618e3 JT |
696 | /* Ensure there's a free cblock in the cache */ |
697 | if (epool_empty(&mq->cache_pool)) { | |
f2836352 | 698 | result->op = POLICY_REPLACE; |
633618e3 | 699 | r = demote_cblock(mq, &result->old_oblock); |
01911c19 JT |
700 | if (r) { |
701 | result->op = POLICY_MISS; | |
702 | return 0; | |
703 | } | |
f2836352 JT |
704 | } else |
705 | result->op = POLICY_NEW; | |
706 | ||
633618e3 JT |
707 | new_e = alloc_entry(&mq->cache_pool); |
708 | BUG_ON(!new_e); | |
709 | ||
710 | new_e->oblock = e->oblock; | |
711 | new_e->dirty = false; | |
712 | new_e->hit_count = e->hit_count; | |
713 | new_e->generation = e->generation; | |
714 | new_e->tick = e->tick; | |
f2836352 JT |
715 | |
716 | del(mq, e); | |
633618e3 JT |
717 | free_entry(&mq->pre_cache_pool, e); |
718 | push(mq, new_e); | |
719 | ||
720 | result->cblock = infer_cblock(&mq->cache_pool, new_e); | |
f2836352 JT |
721 | |
722 | return 0; | |
723 | } | |
724 | ||
725 | static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e, | |
726 | bool can_migrate, bool discarded_oblock, | |
727 | int data_dir, struct policy_result *result) | |
728 | { | |
729 | int r = 0; | |
730 | bool updated = updated_this_tick(mq, e); | |
731 | ||
f2836352 | 732 | if ((!discarded_oblock && updated) || |
af95e7a6 JT |
733 | !should_promote(mq, e, discarded_oblock, data_dir)) { |
734 | requeue_and_update_tick(mq, e); | |
f2836352 | 735 | result->op = POLICY_MISS; |
af95e7a6 JT |
736 | |
737 | } else if (!can_migrate) | |
f2836352 | 738 | r = -EWOULDBLOCK; |
af95e7a6 JT |
739 | |
740 | else { | |
741 | requeue_and_update_tick(mq, e); | |
f2836352 | 742 | r = pre_cache_to_cache(mq, e, result); |
af95e7a6 | 743 | } |
f2836352 JT |
744 | |
745 | return r; | |
746 | } | |
747 | ||
748 | static void insert_in_pre_cache(struct mq_policy *mq, | |
749 | dm_oblock_t oblock) | |
750 | { | |
633618e3 | 751 | struct entry *e = alloc_entry(&mq->pre_cache_pool); |
f2836352 JT |
752 | |
753 | if (!e) | |
754 | /* | |
755 | * There's no spare entry structure, so we grab the least | |
756 | * used one from the pre_cache. | |
757 | */ | |
758 | e = pop(mq, &mq->pre_cache); | |
759 | ||
760 | if (unlikely(!e)) { | |
761 | DMWARN("couldn't pop from pre cache"); | |
762 | return; | |
763 | } | |
764 | ||
633618e3 JT |
765 | e->dirty = false; |
766 | e->oblock = oblock; | |
767 | e->hit_count = 1; | |
768 | e->generation = mq->generation; | |
769 | push(mq, e); | |
f2836352 JT |
770 | } |
771 | ||
772 | static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, | |
773 | struct policy_result *result) | |
774 | { | |
c86c3070 | 775 | int r; |
f2836352 | 776 | struct entry *e; |
f2836352 | 777 | |
633618e3 JT |
778 | if (epool_empty(&mq->cache_pool)) { |
779 | result->op = POLICY_REPLACE; | |
780 | r = demote_cblock(mq, &result->old_oblock); | |
c86c3070 JT |
781 | if (unlikely(r)) { |
782 | result->op = POLICY_MISS; | |
783 | insert_in_pre_cache(mq, oblock); | |
784 | return; | |
785 | } | |
f2836352 | 786 | |
c86c3070 JT |
787 | /* |
788 | * This will always succeed, since we've just demoted. | |
789 | */ | |
633618e3 JT |
790 | e = alloc_entry(&mq->cache_pool); |
791 | BUG_ON(!e); | |
c86c3070 JT |
792 | |
793 | } else { | |
633618e3 | 794 | e = alloc_entry(&mq->cache_pool); |
c86c3070 | 795 | result->op = POLICY_NEW; |
f2836352 JT |
796 | } |
797 | ||
798 | e->oblock = oblock; | |
01911c19 | 799 | e->dirty = false; |
f2836352 JT |
800 | e->hit_count = 1; |
801 | e->generation = mq->generation; | |
802 | push(mq, e); | |
803 | ||
633618e3 | 804 | result->cblock = infer_cblock(&mq->cache_pool, e); |
f2836352 JT |
805 | } |
806 | ||
807 | static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock, | |
808 | bool can_migrate, bool discarded_oblock, | |
809 | int data_dir, struct policy_result *result) | |
810 | { | |
811 | if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) == 1) { | |
812 | if (can_migrate) | |
813 | insert_in_cache(mq, oblock, result); | |
814 | else | |
815 | return -EWOULDBLOCK; | |
816 | } else { | |
817 | insert_in_pre_cache(mq, oblock); | |
818 | result->op = POLICY_MISS; | |
819 | } | |
820 | ||
821 | return 0; | |
822 | } | |
823 | ||
824 | /* | |
825 | * Looks the oblock up in the hash table, then decides whether to put in | |
826 | * pre_cache, or cache etc. | |
827 | */ | |
828 | static int map(struct mq_policy *mq, dm_oblock_t oblock, | |
829 | bool can_migrate, bool discarded_oblock, | |
830 | int data_dir, struct policy_result *result) | |
831 | { | |
832 | int r = 0; | |
833 | struct entry *e = hash_lookup(mq, oblock); | |
834 | ||
633618e3 | 835 | if (e && in_cache(mq, e)) |
f2836352 | 836 | r = cache_entry_found(mq, e, result); |
633618e3 | 837 | |
f2836352 JT |
838 | else if (iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL) |
839 | result->op = POLICY_MISS; | |
633618e3 | 840 | |
f2836352 JT |
841 | else if (e) |
842 | r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock, | |
843 | data_dir, result); | |
633618e3 | 844 | |
f2836352 JT |
845 | else |
846 | r = no_entry_found(mq, oblock, can_migrate, discarded_oblock, | |
847 | data_dir, result); | |
848 | ||
849 | if (r == -EWOULDBLOCK) | |
850 | result->op = POLICY_MISS; | |
851 | ||
852 | return r; | |
853 | } | |
854 | ||
855 | /*----------------------------------------------------------------*/ | |
856 | ||
857 | /* | |
858 | * Public interface, via the policy struct. See dm-cache-policy.h for a | |
859 | * description of these. | |
860 | */ | |
861 | ||
862 | static struct mq_policy *to_mq_policy(struct dm_cache_policy *p) | |
863 | { | |
864 | return container_of(p, struct mq_policy, policy); | |
865 | } | |
866 | ||
867 | static void mq_destroy(struct dm_cache_policy *p) | |
868 | { | |
869 | struct mq_policy *mq = to_mq_policy(p); | |
870 | ||
f2836352 | 871 | kfree(mq->table); |
633618e3 JT |
872 | epool_exit(&mq->cache_pool); |
873 | epool_exit(&mq->pre_cache_pool); | |
f2836352 JT |
874 | kfree(mq); |
875 | } | |
876 | ||
877 | static void copy_tick(struct mq_policy *mq) | |
878 | { | |
879 | unsigned long flags; | |
880 | ||
881 | spin_lock_irqsave(&mq->tick_lock, flags); | |
882 | mq->tick = mq->tick_protected; | |
883 | spin_unlock_irqrestore(&mq->tick_lock, flags); | |
884 | } | |
885 | ||
886 | static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock, | |
887 | bool can_block, bool can_migrate, bool discarded_oblock, | |
888 | struct bio *bio, struct policy_result *result) | |
889 | { | |
890 | int r; | |
891 | struct mq_policy *mq = to_mq_policy(p); | |
892 | ||
893 | result->op = POLICY_MISS; | |
894 | ||
895 | if (can_block) | |
896 | mutex_lock(&mq->lock); | |
897 | else if (!mutex_trylock(&mq->lock)) | |
898 | return -EWOULDBLOCK; | |
899 | ||
900 | copy_tick(mq); | |
901 | ||
902 | iot_examine_bio(&mq->tracker, bio); | |
903 | r = map(mq, oblock, can_migrate, discarded_oblock, | |
904 | bio_data_dir(bio), result); | |
905 | ||
906 | mutex_unlock(&mq->lock); | |
907 | ||
908 | return r; | |
909 | } | |
910 | ||
911 | static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock) | |
912 | { | |
913 | int r; | |
914 | struct mq_policy *mq = to_mq_policy(p); | |
915 | struct entry *e; | |
916 | ||
917 | if (!mutex_trylock(&mq->lock)) | |
918 | return -EWOULDBLOCK; | |
919 | ||
920 | e = hash_lookup(mq, oblock); | |
633618e3 JT |
921 | if (e && in_cache(mq, e)) { |
922 | *cblock = infer_cblock(&mq->cache_pool, e); | |
f2836352 JT |
923 | r = 0; |
924 | } else | |
925 | r = -ENOENT; | |
926 | ||
927 | mutex_unlock(&mq->lock); | |
928 | ||
929 | return r; | |
930 | } | |
931 | ||
633618e3 | 932 | static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set) |
01911c19 | 933 | { |
01911c19 JT |
934 | struct entry *e; |
935 | ||
01911c19 | 936 | e = hash_lookup(mq, oblock); |
633618e3 | 937 | BUG_ON(!e || !in_cache(mq, e)); |
01911c19 | 938 | |
633618e3 JT |
939 | del(mq, e); |
940 | e->dirty = set; | |
941 | push(mq, e); | |
01911c19 JT |
942 | } |
943 | ||
944 | static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
945 | { | |
633618e3 JT |
946 | struct mq_policy *mq = to_mq_policy(p); |
947 | ||
948 | mutex_lock(&mq->lock); | |
949 | __mq_set_clear_dirty(mq, oblock, true); | |
950 | mutex_unlock(&mq->lock); | |
01911c19 JT |
951 | } |
952 | ||
953 | static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock) | |
954 | { | |
633618e3 JT |
955 | struct mq_policy *mq = to_mq_policy(p); |
956 | ||
957 | mutex_lock(&mq->lock); | |
958 | __mq_set_clear_dirty(mq, oblock, false); | |
959 | mutex_unlock(&mq->lock); | |
01911c19 JT |
960 | } |
961 | ||
f2836352 JT |
962 | static int mq_load_mapping(struct dm_cache_policy *p, |
963 | dm_oblock_t oblock, dm_cblock_t cblock, | |
964 | uint32_t hint, bool hint_valid) | |
965 | { | |
966 | struct mq_policy *mq = to_mq_policy(p); | |
967 | struct entry *e; | |
968 | ||
633618e3 | 969 | e = alloc_particular_entry(&mq->cache_pool, cblock); |
f2836352 | 970 | e->oblock = oblock; |
01911c19 | 971 | e->dirty = false; /* this gets corrected in a minute */ |
f2836352 JT |
972 | e->hit_count = hint_valid ? hint : 1; |
973 | e->generation = mq->generation; | |
974 | push(mq, e); | |
975 | ||
976 | return 0; | |
977 | } | |
978 | ||
633618e3 JT |
979 | static int mq_save_hints(struct mq_policy *mq, struct queue *q, |
980 | policy_walk_fn fn, void *context) | |
981 | { | |
982 | int r; | |
983 | unsigned level; | |
984 | struct entry *e; | |
985 | ||
986 | for (level = 0; level < NR_QUEUE_LEVELS; level++) | |
987 | list_for_each_entry(e, q->qs + level, list) { | |
988 | r = fn(context, infer_cblock(&mq->cache_pool, e), | |
989 | e->oblock, e->hit_count); | |
990 | if (r) | |
991 | return r; | |
992 | } | |
993 | ||
994 | return 0; | |
995 | } | |
996 | ||
f2836352 JT |
997 | static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn, |
998 | void *context) | |
999 | { | |
1000 | struct mq_policy *mq = to_mq_policy(p); | |
1001 | int r = 0; | |
f2836352 JT |
1002 | |
1003 | mutex_lock(&mq->lock); | |
1004 | ||
633618e3 JT |
1005 | r = mq_save_hints(mq, &mq->cache_clean, fn, context); |
1006 | if (!r) | |
1007 | r = mq_save_hints(mq, &mq->cache_dirty, fn, context); | |
f2836352 | 1008 | |
f2836352 JT |
1009 | mutex_unlock(&mq->lock); |
1010 | ||
1011 | return r; | |
1012 | } | |
1013 | ||
633618e3 | 1014 | static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock) |
f2836352 | 1015 | { |
b936bf8b GU |
1016 | struct entry *e; |
1017 | ||
b936bf8b | 1018 | e = hash_lookup(mq, oblock); |
633618e3 | 1019 | BUG_ON(!e || !in_cache(mq, e)); |
f2836352 JT |
1020 | |
1021 | del(mq, e); | |
633618e3 JT |
1022 | free_entry(&mq->cache_pool, e); |
1023 | } | |
1024 | ||
1025 | static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock) | |
1026 | { | |
1027 | struct mq_policy *mq = to_mq_policy(p); | |
f2836352 | 1028 | |
633618e3 JT |
1029 | mutex_lock(&mq->lock); |
1030 | __remove_mapping(mq, oblock); | |
f2836352 JT |
1031 | mutex_unlock(&mq->lock); |
1032 | } | |
1033 | ||
532906aa JT |
1034 | static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock) |
1035 | { | |
1036 | struct entry *e = epool_find(&mq->cache_pool, cblock); | |
1037 | ||
1038 | if (!e) | |
1039 | return -ENODATA; | |
1040 | ||
1041 | del(mq, e); | |
1042 | free_entry(&mq->cache_pool, e); | |
1043 | ||
1044 | return 0; | |
1045 | } | |
1046 | ||
1047 | static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock) | |
1048 | { | |
1049 | int r; | |
1050 | struct mq_policy *mq = to_mq_policy(p); | |
1051 | ||
1052 | mutex_lock(&mq->lock); | |
1053 | r = __remove_cblock(mq, cblock); | |
1054 | mutex_unlock(&mq->lock); | |
1055 | ||
1056 | return r; | |
1057 | } | |
1058 | ||
01911c19 JT |
1059 | static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock, |
1060 | dm_cblock_t *cblock) | |
1061 | { | |
1062 | struct entry *e = pop(mq, &mq->cache_dirty); | |
1063 | ||
1064 | if (!e) | |
1065 | return -ENODATA; | |
1066 | ||
1067 | *oblock = e->oblock; | |
633618e3 | 1068 | *cblock = infer_cblock(&mq->cache_pool, e); |
01911c19 JT |
1069 | e->dirty = false; |
1070 | push(mq, e); | |
1071 | ||
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock, | |
1076 | dm_cblock_t *cblock) | |
1077 | { | |
1078 | int r; | |
1079 | struct mq_policy *mq = to_mq_policy(p); | |
1080 | ||
1081 | mutex_lock(&mq->lock); | |
1082 | r = __mq_writeback_work(mq, oblock, cblock); | |
1083 | mutex_unlock(&mq->lock); | |
1084 | ||
1085 | return r; | |
1086 | } | |
1087 | ||
633618e3 JT |
1088 | static void __force_mapping(struct mq_policy *mq, |
1089 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
f2836352 JT |
1090 | { |
1091 | struct entry *e = hash_lookup(mq, current_oblock); | |
1092 | ||
633618e3 JT |
1093 | if (e && in_cache(mq, e)) { |
1094 | del(mq, e); | |
1095 | e->oblock = new_oblock; | |
1096 | e->dirty = true; | |
1097 | push(mq, e); | |
1098 | } | |
f2836352 JT |
1099 | } |
1100 | ||
1101 | static void mq_force_mapping(struct dm_cache_policy *p, | |
1102 | dm_oblock_t current_oblock, dm_oblock_t new_oblock) | |
1103 | { | |
1104 | struct mq_policy *mq = to_mq_policy(p); | |
1105 | ||
1106 | mutex_lock(&mq->lock); | |
633618e3 | 1107 | __force_mapping(mq, current_oblock, new_oblock); |
f2836352 JT |
1108 | mutex_unlock(&mq->lock); |
1109 | } | |
1110 | ||
1111 | static dm_cblock_t mq_residency(struct dm_cache_policy *p) | |
1112 | { | |
99ba2ae4 | 1113 | dm_cblock_t r; |
f2836352 JT |
1114 | struct mq_policy *mq = to_mq_policy(p); |
1115 | ||
99ba2ae4 | 1116 | mutex_lock(&mq->lock); |
633618e3 | 1117 | r = to_cblock(mq->cache_pool.nr_allocated); |
99ba2ae4 JT |
1118 | mutex_unlock(&mq->lock); |
1119 | ||
1120 | return r; | |
f2836352 JT |
1121 | } |
1122 | ||
1123 | static void mq_tick(struct dm_cache_policy *p) | |
1124 | { | |
1125 | struct mq_policy *mq = to_mq_policy(p); | |
1126 | unsigned long flags; | |
1127 | ||
1128 | spin_lock_irqsave(&mq->tick_lock, flags); | |
1129 | mq->tick_protected++; | |
1130 | spin_unlock_irqrestore(&mq->tick_lock, flags); | |
1131 | } | |
1132 | ||
1133 | static int mq_set_config_value(struct dm_cache_policy *p, | |
1134 | const char *key, const char *value) | |
1135 | { | |
1136 | struct mq_policy *mq = to_mq_policy(p); | |
1137 | enum io_pattern pattern; | |
1138 | unsigned long tmp; | |
1139 | ||
1140 | if (!strcasecmp(key, "random_threshold")) | |
1141 | pattern = PATTERN_RANDOM; | |
1142 | else if (!strcasecmp(key, "sequential_threshold")) | |
1143 | pattern = PATTERN_SEQUENTIAL; | |
1144 | else | |
1145 | return -EINVAL; | |
1146 | ||
1147 | if (kstrtoul(value, 10, &tmp)) | |
1148 | return -EINVAL; | |
1149 | ||
1150 | mq->tracker.thresholds[pattern] = tmp; | |
1151 | ||
1152 | return 0; | |
1153 | } | |
1154 | ||
1155 | static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen) | |
1156 | { | |
1157 | ssize_t sz = 0; | |
1158 | struct mq_policy *mq = to_mq_policy(p); | |
1159 | ||
1160 | DMEMIT("4 random_threshold %u sequential_threshold %u", | |
1161 | mq->tracker.thresholds[PATTERN_RANDOM], | |
1162 | mq->tracker.thresholds[PATTERN_SEQUENTIAL]); | |
1163 | ||
1164 | return 0; | |
1165 | } | |
1166 | ||
1167 | /* Init the policy plugin interface function pointers. */ | |
1168 | static void init_policy_functions(struct mq_policy *mq) | |
1169 | { | |
1170 | mq->policy.destroy = mq_destroy; | |
1171 | mq->policy.map = mq_map; | |
1172 | mq->policy.lookup = mq_lookup; | |
01911c19 JT |
1173 | mq->policy.set_dirty = mq_set_dirty; |
1174 | mq->policy.clear_dirty = mq_clear_dirty; | |
f2836352 JT |
1175 | mq->policy.load_mapping = mq_load_mapping; |
1176 | mq->policy.walk_mappings = mq_walk_mappings; | |
1177 | mq->policy.remove_mapping = mq_remove_mapping; | |
532906aa | 1178 | mq->policy.remove_cblock = mq_remove_cblock; |
01911c19 | 1179 | mq->policy.writeback_work = mq_writeback_work; |
f2836352 JT |
1180 | mq->policy.force_mapping = mq_force_mapping; |
1181 | mq->policy.residency = mq_residency; | |
1182 | mq->policy.tick = mq_tick; | |
1183 | mq->policy.emit_config_values = mq_emit_config_values; | |
1184 | mq->policy.set_config_value = mq_set_config_value; | |
1185 | } | |
1186 | ||
1187 | static struct dm_cache_policy *mq_create(dm_cblock_t cache_size, | |
1188 | sector_t origin_size, | |
1189 | sector_t cache_block_size) | |
1190 | { | |
f2836352 JT |
1191 | struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL); |
1192 | ||
1193 | if (!mq) | |
1194 | return NULL; | |
1195 | ||
1196 | init_policy_functions(mq); | |
1197 | iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT); | |
f2836352 | 1198 | mq->cache_size = cache_size; |
633618e3 JT |
1199 | |
1200 | if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) { | |
1201 | DMERR("couldn't initialize pool of pre-cache entries"); | |
1202 | goto bad_pre_cache_init; | |
1203 | } | |
1204 | ||
1205 | if (epool_init(&mq->cache_pool, from_cblock(cache_size))) { | |
1206 | DMERR("couldn't initialize pool of cache entries"); | |
1207 | goto bad_cache_init; | |
1208 | } | |
1209 | ||
f2836352 JT |
1210 | mq->tick_protected = 0; |
1211 | mq->tick = 0; | |
1212 | mq->hit_count = 0; | |
1213 | mq->generation = 0; | |
1214 | mq->promote_threshold = 0; | |
1215 | mutex_init(&mq->lock); | |
1216 | spin_lock_init(&mq->tick_lock); | |
f2836352 JT |
1217 | |
1218 | queue_init(&mq->pre_cache); | |
01911c19 JT |
1219 | queue_init(&mq->cache_clean); |
1220 | queue_init(&mq->cache_dirty); | |
1221 | ||
f2836352 JT |
1222 | mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U); |
1223 | ||
f2836352 JT |
1224 | mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16); |
1225 | mq->hash_bits = ffs(mq->nr_buckets) - 1; | |
1226 | mq->table = kzalloc(sizeof(*mq->table) * mq->nr_buckets, GFP_KERNEL); | |
1227 | if (!mq->table) | |
1228 | goto bad_alloc_table; | |
1229 | ||
f2836352 JT |
1230 | return &mq->policy; |
1231 | ||
f2836352 | 1232 | bad_alloc_table: |
633618e3 JT |
1233 | epool_exit(&mq->cache_pool); |
1234 | bad_cache_init: | |
1235 | epool_exit(&mq->pre_cache_pool); | |
1236 | bad_pre_cache_init: | |
f2836352 JT |
1237 | kfree(mq); |
1238 | ||
1239 | return NULL; | |
1240 | } | |
1241 | ||
1242 | /*----------------------------------------------------------------*/ | |
1243 | ||
1244 | static struct dm_cache_policy_type mq_policy_type = { | |
1245 | .name = "mq", | |
633618e3 | 1246 | .version = {1, 1, 0}, |
f2836352 JT |
1247 | .hint_size = 4, |
1248 | .owner = THIS_MODULE, | |
1249 | .create = mq_create | |
1250 | }; | |
1251 | ||
1252 | static struct dm_cache_policy_type default_policy_type = { | |
1253 | .name = "default", | |
633618e3 | 1254 | .version = {1, 1, 0}, |
f2836352 JT |
1255 | .hint_size = 4, |
1256 | .owner = THIS_MODULE, | |
1257 | .create = mq_create | |
1258 | }; | |
1259 | ||
1260 | static int __init mq_init(void) | |
1261 | { | |
1262 | int r; | |
1263 | ||
1264 | mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry", | |
1265 | sizeof(struct entry), | |
1266 | __alignof__(struct entry), | |
1267 | 0, NULL); | |
1268 | if (!mq_entry_cache) | |
1269 | goto bad; | |
1270 | ||
1271 | r = dm_cache_policy_register(&mq_policy_type); | |
1272 | if (r) { | |
1273 | DMERR("register failed %d", r); | |
1274 | goto bad_register_mq; | |
1275 | } | |
1276 | ||
1277 | r = dm_cache_policy_register(&default_policy_type); | |
1278 | if (!r) { | |
4e7f506f MS |
1279 | DMINFO("version %u.%u.%u loaded", |
1280 | mq_policy_type.version[0], | |
1281 | mq_policy_type.version[1], | |
1282 | mq_policy_type.version[2]); | |
f2836352 JT |
1283 | return 0; |
1284 | } | |
1285 | ||
1286 | DMERR("register failed (as default) %d", r); | |
1287 | ||
1288 | dm_cache_policy_unregister(&mq_policy_type); | |
1289 | bad_register_mq: | |
1290 | kmem_cache_destroy(mq_entry_cache); | |
1291 | bad: | |
1292 | return -ENOMEM; | |
1293 | } | |
1294 | ||
1295 | static void __exit mq_exit(void) | |
1296 | { | |
1297 | dm_cache_policy_unregister(&mq_policy_type); | |
1298 | dm_cache_policy_unregister(&default_policy_type); | |
1299 | ||
1300 | kmem_cache_destroy(mq_entry_cache); | |
1301 | } | |
1302 | ||
1303 | module_init(mq_init); | |
1304 | module_exit(mq_exit); | |
1305 | ||
1306 | MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>"); | |
1307 | MODULE_LICENSE("GPL"); | |
1308 | MODULE_DESCRIPTION("mq cache policy"); | |
1309 | ||
1310 | MODULE_ALIAS("dm-cache-default"); |