2 * Copyright (C) 2012 Red Hat. All rights reserved.
4 * This file is released under the GPL.
7 #include "dm-cache-policy.h"
10 #include <linux/hash.h>
11 #include <linux/jiffies.h>
12 #include <linux/module.h>
13 #include <linux/mutex.h>
14 #include <linux/slab.h>
15 #include <linux/vmalloc.h>
17 #define DM_MSG_PREFIX "cache-policy-mq"
19 static struct kmem_cache *mq_entry_cache;
21 /*----------------------------------------------------------------*/
23 static unsigned next_power(unsigned n, unsigned min)
25 return roundup_pow_of_two(max(n, min));
28 /*----------------------------------------------------------------*/
31 * Large, sequential ios are probably better left on the origin device since
32 * spindles tend to have good bandwidth.
34 * The io_tracker tries to spot when the io is in one of these sequential
37 * Two thresholds to switch between random and sequential io mode are defaulting
38 * as follows and can be adjusted via the constructor and message interfaces.
40 #define RANDOM_THRESHOLD_DEFAULT 4
41 #define SEQUENTIAL_THRESHOLD_DEFAULT 512
49 enum io_pattern pattern;
51 unsigned nr_seq_samples;
52 unsigned nr_rand_samples;
53 unsigned thresholds[2];
55 dm_oblock_t last_end_oblock;
58 static void iot_init(struct io_tracker *t,
59 int sequential_threshold, int random_threshold)
61 t->pattern = PATTERN_RANDOM;
62 t->nr_seq_samples = 0;
63 t->nr_rand_samples = 0;
64 t->last_end_oblock = 0;
65 t->thresholds[PATTERN_RANDOM] = random_threshold;
66 t->thresholds[PATTERN_SEQUENTIAL] = sequential_threshold;
69 static enum io_pattern iot_pattern(struct io_tracker *t)
74 static void iot_update_stats(struct io_tracker *t, struct bio *bio)
76 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
80 * Just one non-sequential IO is enough to reset the
83 if (t->nr_seq_samples) {
84 t->nr_seq_samples = 0;
85 t->nr_rand_samples = 0;
91 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
94 static void iot_check_for_pattern_switch(struct io_tracker *t)
97 case PATTERN_SEQUENTIAL:
98 if (t->nr_rand_samples >= t->thresholds[PATTERN_RANDOM]) {
99 t->pattern = PATTERN_RANDOM;
100 t->nr_seq_samples = t->nr_rand_samples = 0;
105 if (t->nr_seq_samples >= t->thresholds[PATTERN_SEQUENTIAL]) {
106 t->pattern = PATTERN_SEQUENTIAL;
107 t->nr_seq_samples = t->nr_rand_samples = 0;
113 static void iot_examine_bio(struct io_tracker *t, struct bio *bio)
115 iot_update_stats(t, bio);
116 iot_check_for_pattern_switch(t);
119 /*----------------------------------------------------------------*/
123 * This queue is divided up into different levels. Allowing us to push
124 * entries to the back of any of the levels. Think of it as a partially
127 #define NR_QUEUE_LEVELS 16u
128 #define NR_SENTINELS NR_QUEUE_LEVELS * 3
130 #define WRITEBACK_PERIOD HZ
134 bool current_writeback_sentinels;
135 unsigned long next_writeback;
136 struct list_head qs[NR_QUEUE_LEVELS];
137 struct list_head sentinels[NR_SENTINELS];
140 static void queue_init(struct queue *q)
145 q->current_writeback_sentinels = false;
146 q->next_writeback = 0;
147 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
148 INIT_LIST_HEAD(q->qs + i);
149 INIT_LIST_HEAD(q->sentinels + i);
150 INIT_LIST_HEAD(q->sentinels + NR_QUEUE_LEVELS + i);
151 INIT_LIST_HEAD(q->sentinels + (2 * NR_QUEUE_LEVELS) + i);
155 static unsigned queue_size(struct queue *q)
160 static bool queue_empty(struct queue *q)
162 return q->nr_elts == 0;
166 * Insert an entry to the back of the given level.
168 static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
171 list_add_tail(elt, q->qs + level);
174 static void queue_remove(struct queue *q, struct list_head *elt)
180 static bool is_sentinel(struct queue *q, struct list_head *h)
182 return (h >= q->sentinels) && (h < (q->sentinels + NR_SENTINELS));
186 * Gives us the oldest entry of the lowest popoulated level. If the first
187 * level is emptied then we shift down one level.
189 static struct list_head *queue_peek(struct queue *q)
194 for (level = 0; level < NR_QUEUE_LEVELS; level++)
195 list_for_each(h, q->qs + level)
196 if (!is_sentinel(q, h))
202 static struct list_head *queue_pop(struct queue *q)
204 struct list_head *r = queue_peek(q);
215 * Pops an entry from a level that is not past a sentinel.
217 static struct list_head *queue_pop_old(struct queue *q)
222 for (level = 0; level < NR_QUEUE_LEVELS; level++)
223 list_for_each(h, q->qs + level) {
224 if (is_sentinel(q, h))
235 static struct list_head *list_pop(struct list_head *lh)
237 struct list_head *r = lh->next;
245 static struct list_head *writeback_sentinel(struct queue *q, unsigned level)
247 if (q->current_writeback_sentinels)
248 return q->sentinels + NR_QUEUE_LEVELS + level;
250 return q->sentinels + 2 * NR_QUEUE_LEVELS + level;
253 static void queue_update_writeback_sentinels(struct queue *q)
258 if (time_after(jiffies, q->next_writeback)) {
259 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
260 h = writeback_sentinel(q, i);
262 list_add_tail(h, q->qs + i);
265 q->next_writeback = jiffies + WRITEBACK_PERIOD;
266 q->current_writeback_sentinels = !q->current_writeback_sentinels;
271 * Sometimes we want to iterate through entries that have been pushed since
272 * a certain event. We use sentinel entries on the queues to delimit these
275 static void queue_tick(struct queue *q)
279 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
280 list_del(q->sentinels + i);
281 list_add_tail(q->sentinels + i, q->qs + i);
285 typedef void (*iter_fn)(struct list_head *, void *);
286 static void queue_iterate_tick(struct queue *q, iter_fn fn, void *context)
291 for (i = 0; i < NR_QUEUE_LEVELS; i++) {
292 list_for_each_prev(h, q->qs + i) {
293 if (is_sentinel(q, h))
301 /*----------------------------------------------------------------*/
304 * Describes a cache entry. Used in both the cache and the pre_cache.
307 struct hlist_node hlist;
308 struct list_head list;
312 * FIXME: pack these better
319 * Rather than storing the cblock in an entry, we allocate all entries in
320 * an array, and infer the cblock from the entry position.
322 * Free entries are linked together into a list.
325 struct entry *entries, *entries_end;
326 struct list_head free;
327 unsigned nr_allocated;
330 static int epool_init(struct entry_pool *ep, unsigned nr_entries)
334 ep->entries = vzalloc(sizeof(struct entry) * nr_entries);
338 ep->entries_end = ep->entries + nr_entries;
340 INIT_LIST_HEAD(&ep->free);
341 for (i = 0; i < nr_entries; i++)
342 list_add(&ep->entries[i].list, &ep->free);
344 ep->nr_allocated = 0;
349 static void epool_exit(struct entry_pool *ep)
354 static struct entry *alloc_entry(struct entry_pool *ep)
358 if (list_empty(&ep->free))
361 e = list_entry(list_pop(&ep->free), struct entry, list);
362 INIT_LIST_HEAD(&e->list);
363 INIT_HLIST_NODE(&e->hlist);
370 * This assumes the cblock hasn't already been allocated.
372 static struct entry *alloc_particular_entry(struct entry_pool *ep, dm_cblock_t cblock)
374 struct entry *e = ep->entries + from_cblock(cblock);
376 list_del_init(&e->list);
377 INIT_HLIST_NODE(&e->hlist);
383 static void free_entry(struct entry_pool *ep, struct entry *e)
385 BUG_ON(!ep->nr_allocated);
387 INIT_HLIST_NODE(&e->hlist);
388 list_add(&e->list, &ep->free);
392 * Returns NULL if the entry is free.
394 static struct entry *epool_find(struct entry_pool *ep, dm_cblock_t cblock)
396 struct entry *e = ep->entries + from_cblock(cblock);
397 return !hlist_unhashed(&e->hlist) ? e : NULL;
400 static bool epool_empty(struct entry_pool *ep)
402 return list_empty(&ep->free);
405 static bool in_pool(struct entry_pool *ep, struct entry *e)
407 return e >= ep->entries && e < ep->entries_end;
410 static dm_cblock_t infer_cblock(struct entry_pool *ep, struct entry *e)
412 return to_cblock(e - ep->entries);
415 /*----------------------------------------------------------------*/
418 struct dm_cache_policy policy;
420 /* protects everything */
422 dm_cblock_t cache_size;
423 struct io_tracker tracker;
426 * Entries come from two pools, one of pre-cache entries, and one
427 * for the cache proper.
429 struct entry_pool pre_cache_pool;
430 struct entry_pool cache_pool;
433 * We maintain three queues of entries. The cache proper,
434 * consisting of a clean and dirty queue, contains the currently
435 * active mappings. Whereas the pre_cache tracks blocks that
436 * are being hit frequently and potential candidates for promotion
439 struct queue pre_cache;
440 struct queue cache_clean;
441 struct queue cache_dirty;
444 * Keeps track of time, incremented by the core. We use this to
445 * avoid attributing multiple hits within the same tick.
447 * Access to tick_protected should be done with the spin lock held.
448 * It's copied to tick at the start of the map function (within the
451 spinlock_t tick_lock;
452 unsigned tick_protected;
456 * A count of the number of times the map function has been called
457 * and found an entry in the pre_cache or cache. Currently used to
458 * calculate the generation.
463 * A generation is a longish period that is used to trigger some
464 * book keeping effects. eg, decrementing hit counts on entries.
465 * This is needed to allow the cache to evolve as io patterns
469 unsigned generation_period; /* in lookups (will probably change) */
471 unsigned discard_promote_adjustment;
472 unsigned read_promote_adjustment;
473 unsigned write_promote_adjustment;
476 * The hash table allows us to quickly find an entry by origin
477 * block. Both pre_cache and cache entries are in here.
480 dm_block_t hash_bits;
481 struct hlist_head *table;
484 #define DEFAULT_DISCARD_PROMOTE_ADJUSTMENT 1
485 #define DEFAULT_READ_PROMOTE_ADJUSTMENT 4
486 #define DEFAULT_WRITE_PROMOTE_ADJUSTMENT 8
487 #define DISCOURAGE_DEMOTING_DIRTY_THRESHOLD 128
489 /*----------------------------------------------------------------*/
492 * Simple hash table implementation. Should replace with the standard hash
493 * table that's making its way upstream.
495 static void hash_insert(struct mq_policy *mq, struct entry *e)
497 unsigned h = hash_64(from_oblock(e->oblock), mq->hash_bits);
499 hlist_add_head(&e->hlist, mq->table + h);
502 static struct entry *hash_lookup(struct mq_policy *mq, dm_oblock_t oblock)
504 unsigned h = hash_64(from_oblock(oblock), mq->hash_bits);
505 struct hlist_head *bucket = mq->table + h;
508 hlist_for_each_entry(e, bucket, hlist)
509 if (e->oblock == oblock) {
510 hlist_del(&e->hlist);
511 hlist_add_head(&e->hlist, bucket);
518 static void hash_remove(struct entry *e)
520 hlist_del(&e->hlist);
523 /*----------------------------------------------------------------*/
525 static bool any_free_cblocks(struct mq_policy *mq)
527 return !epool_empty(&mq->cache_pool);
530 static bool any_clean_cblocks(struct mq_policy *mq)
532 return !queue_empty(&mq->cache_clean);
535 /*----------------------------------------------------------------*/
538 * Now we get to the meat of the policy. This section deals with deciding
539 * when to to add entries to the pre_cache and cache, and move between
544 * The queue level is based on the log2 of the hit count.
546 static unsigned queue_level(struct entry *e)
548 return min((unsigned) ilog2(e->hit_count), NR_QUEUE_LEVELS - 1u);
551 static bool in_cache(struct mq_policy *mq, struct entry *e)
553 return in_pool(&mq->cache_pool, e);
557 * Inserts the entry into the pre_cache or the cache. Ensures the cache
558 * block is marked as allocated if necc. Inserts into the hash table.
559 * Sets the tick which records when the entry was last moved about.
561 static void push(struct mq_policy *mq, struct entry *e)
566 queue_push(e->dirty ? &mq->cache_dirty : &mq->cache_clean,
567 queue_level(e), &e->list);
569 queue_push(&mq->pre_cache, queue_level(e), &e->list);
573 * Removes an entry from pre_cache or cache. Removes from the hash table.
575 static void del(struct mq_policy *mq, struct entry *e)
578 queue_remove(e->dirty ? &mq->cache_dirty : &mq->cache_clean, &e->list);
580 queue_remove(&mq->pre_cache, &e->list);
586 * Like del, except it removes the first entry in the queue (ie. the least
589 static struct entry *pop(struct mq_policy *mq, struct queue *q)
592 struct list_head *h = queue_pop(q);
597 e = container_of(h, struct entry, list);
603 static struct entry *pop_old(struct mq_policy *mq, struct queue *q)
606 struct list_head *h = queue_pop_old(q);
611 e = container_of(h, struct entry, list);
617 static struct entry *peek(struct queue *q)
619 struct list_head *h = queue_peek(q);
620 return h ? container_of(h, struct entry, list) : NULL;
624 * The promotion threshold is adjusted every generation. As are the counts
627 * At the moment the threshold is taken by averaging the hit counts of some
628 * of the entries in the cache (the first 20 entries across all levels in
629 * ascending order, giving preference to the clean entries at each level).
631 * We can be much cleverer than this though. For example, each promotion
632 * could bump up the threshold helping to prevent churn. Much more to do
636 #define MAX_TO_AVERAGE 20
638 static void check_generation(struct mq_policy *mq)
640 unsigned total = 0, nr = 0, count = 0, level;
641 struct list_head *head;
644 if ((mq->hit_count >= mq->generation_period) && (epool_empty(&mq->cache_pool))) {
648 for (level = 0; level < NR_QUEUE_LEVELS && count < MAX_TO_AVERAGE; level++) {
649 head = mq->cache_clean.qs + level;
650 list_for_each_entry(e, head, list) {
652 total += e->hit_count;
654 if (++count >= MAX_TO_AVERAGE)
658 head = mq->cache_dirty.qs + level;
659 list_for_each_entry(e, head, list) {
661 total += e->hit_count;
663 if (++count >= MAX_TO_AVERAGE)
671 * Whenever we use an entry we bump up it's hit counter, and push it to the
672 * back to it's current level.
674 static void requeue(struct mq_policy *mq, struct entry *e)
676 check_generation(mq);
682 * Demote the least recently used entry from the cache to the pre_cache.
683 * Returns the new cache entry to use, and the old origin block it was
686 * We drop the hit count on the demoted entry back to 1 to stop it bouncing
687 * straight back into the cache if it's subsequently hit. There are
688 * various options here, and more experimentation would be good:
690 * - just forget about the demoted entry completely (ie. don't insert it
692 * - divide the hit count rather that setting to some hard coded value.
693 * - set the hit count to a hard coded value other than 1, eg, is it better
694 * if it goes in at level 2?
696 static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock)
698 struct entry *demoted = pop(mq, &mq->cache_clean);
702 * We could get a block from mq->cache_dirty, but that
703 * would add extra latency to the triggering bio as it
704 * waits for the writeback. Better to not promote this
705 * time and hope there's a clean block next time this block
710 *oblock = demoted->oblock;
711 free_entry(&mq->cache_pool, demoted);
714 * We used to put the demoted block into the pre-cache, but I think
715 * it's simpler to just let it work it's way up from zero again.
716 * Stops blocks flickering in and out of the cache.
723 * Entries in the pre_cache whose hit count passes the promotion
724 * threshold move to the cache proper. Working out the correct
725 * value for the promotion_threshold is crucial to this policy.
727 static unsigned promote_threshold(struct mq_policy *mq)
731 if (any_free_cblocks(mq))
734 e = peek(&mq->cache_clean);
738 e = peek(&mq->cache_dirty);
740 return e->hit_count + DISCOURAGE_DEMOTING_DIRTY_THRESHOLD;
742 /* This should never happen */
747 * We modify the basic promotion_threshold depending on the specific io.
749 * If the origin block has been discarded then there's no cost to copy it
752 * We bias towards reads, since they can be demoted at no cost if they
753 * haven't been dirtied.
755 static unsigned adjusted_promote_threshold(struct mq_policy *mq,
756 bool discarded_oblock, int data_dir)
758 if (data_dir == READ)
759 return promote_threshold(mq) + mq->read_promote_adjustment;
761 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
763 * We don't need to do any copying at all, so give this a
764 * very low threshold.
766 return mq->discard_promote_adjustment;
769 return promote_threshold(mq) + mq->write_promote_adjustment;
772 static bool should_promote(struct mq_policy *mq, struct entry *e,
773 bool discarded_oblock, int data_dir)
775 return e->hit_count >=
776 adjusted_promote_threshold(mq, discarded_oblock, data_dir);
779 static int cache_entry_found(struct mq_policy *mq,
781 struct policy_result *result)
785 if (in_cache(mq, e)) {
786 result->op = POLICY_HIT;
787 result->cblock = infer_cblock(&mq->cache_pool, e);
794 * Moves an entry from the pre_cache to the cache. The main work is
795 * finding which cache block to use.
797 static int pre_cache_to_cache(struct mq_policy *mq, struct entry *e,
798 struct policy_result *result)
803 /* Ensure there's a free cblock in the cache */
804 if (epool_empty(&mq->cache_pool)) {
805 result->op = POLICY_REPLACE;
806 r = demote_cblock(mq, &result->old_oblock);
808 result->op = POLICY_MISS;
812 result->op = POLICY_NEW;
814 new_e = alloc_entry(&mq->cache_pool);
817 new_e->oblock = e->oblock;
818 new_e->dirty = false;
819 new_e->hit_count = e->hit_count;
822 free_entry(&mq->pre_cache_pool, e);
825 result->cblock = infer_cblock(&mq->cache_pool, new_e);
830 static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
831 bool can_migrate, bool discarded_oblock,
832 int data_dir, struct policy_result *result)
836 if (!should_promote(mq, e, discarded_oblock, data_dir)) {
838 result->op = POLICY_MISS;
840 } else if (!can_migrate)
845 r = pre_cache_to_cache(mq, e, result);
851 static void insert_in_pre_cache(struct mq_policy *mq,
854 struct entry *e = alloc_entry(&mq->pre_cache_pool);
858 * There's no spare entry structure, so we grab the least
859 * used one from the pre_cache.
861 e = pop(mq, &mq->pre_cache);
864 DMWARN("couldn't pop from pre cache");
874 static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
875 struct policy_result *result)
880 if (epool_empty(&mq->cache_pool)) {
881 result->op = POLICY_REPLACE;
882 r = demote_cblock(mq, &result->old_oblock);
884 result->op = POLICY_MISS;
885 insert_in_pre_cache(mq, oblock);
890 * This will always succeed, since we've just demoted.
892 e = alloc_entry(&mq->cache_pool);
896 e = alloc_entry(&mq->cache_pool);
897 result->op = POLICY_NEW;
905 result->cblock = infer_cblock(&mq->cache_pool, e);
908 static int no_entry_found(struct mq_policy *mq, dm_oblock_t oblock,
909 bool can_migrate, bool discarded_oblock,
910 int data_dir, struct policy_result *result)
912 if (adjusted_promote_threshold(mq, discarded_oblock, data_dir) <= 1) {
914 insert_in_cache(mq, oblock, result);
918 insert_in_pre_cache(mq, oblock);
919 result->op = POLICY_MISS;
926 * Looks the oblock up in the hash table, then decides whether to put in
927 * pre_cache, or cache etc.
929 static int map(struct mq_policy *mq, dm_oblock_t oblock,
930 bool can_migrate, bool discarded_oblock,
931 int data_dir, struct policy_result *result)
934 struct entry *e = hash_lookup(mq, oblock);
936 if (e && in_cache(mq, e))
937 r = cache_entry_found(mq, e, result);
939 else if (mq->tracker.thresholds[PATTERN_SEQUENTIAL] &&
940 iot_pattern(&mq->tracker) == PATTERN_SEQUENTIAL)
941 result->op = POLICY_MISS;
944 r = pre_cache_entry_found(mq, e, can_migrate, discarded_oblock,
948 r = no_entry_found(mq, oblock, can_migrate, discarded_oblock,
951 if (r == -EWOULDBLOCK)
952 result->op = POLICY_MISS;
957 /*----------------------------------------------------------------*/
960 * Public interface, via the policy struct. See dm-cache-policy.h for a
961 * description of these.
964 static struct mq_policy *to_mq_policy(struct dm_cache_policy *p)
966 return container_of(p, struct mq_policy, policy);
969 static void mq_destroy(struct dm_cache_policy *p)
971 struct mq_policy *mq = to_mq_policy(p);
974 epool_exit(&mq->cache_pool);
975 epool_exit(&mq->pre_cache_pool);
979 static void update_pre_cache_hits(struct list_head *h, void *context)
981 struct entry *e = container_of(h, struct entry, list);
985 static void update_cache_hits(struct list_head *h, void *context)
987 struct mq_policy *mq = context;
988 struct entry *e = container_of(h, struct entry, list);
993 static void copy_tick(struct mq_policy *mq)
995 unsigned long flags, tick;
997 spin_lock_irqsave(&mq->tick_lock, flags);
998 tick = mq->tick_protected;
999 if (tick != mq->tick) {
1000 queue_iterate_tick(&mq->pre_cache, update_pre_cache_hits, mq);
1001 queue_iterate_tick(&mq->cache_dirty, update_cache_hits, mq);
1002 queue_iterate_tick(&mq->cache_clean, update_cache_hits, mq);
1006 queue_tick(&mq->pre_cache);
1007 queue_tick(&mq->cache_dirty);
1008 queue_tick(&mq->cache_clean);
1009 queue_update_writeback_sentinels(&mq->cache_dirty);
1010 spin_unlock_irqrestore(&mq->tick_lock, flags);
1013 static int mq_map(struct dm_cache_policy *p, dm_oblock_t oblock,
1014 bool can_block, bool can_migrate, bool discarded_oblock,
1015 struct bio *bio, struct policy_result *result)
1018 struct mq_policy *mq = to_mq_policy(p);
1020 result->op = POLICY_MISS;
1023 mutex_lock(&mq->lock);
1024 else if (!mutex_trylock(&mq->lock))
1025 return -EWOULDBLOCK;
1029 iot_examine_bio(&mq->tracker, bio);
1030 r = map(mq, oblock, can_migrate, discarded_oblock,
1031 bio_data_dir(bio), result);
1033 mutex_unlock(&mq->lock);
1038 static int mq_lookup(struct dm_cache_policy *p, dm_oblock_t oblock, dm_cblock_t *cblock)
1041 struct mq_policy *mq = to_mq_policy(p);
1044 if (!mutex_trylock(&mq->lock))
1045 return -EWOULDBLOCK;
1047 e = hash_lookup(mq, oblock);
1048 if (e && in_cache(mq, e)) {
1049 *cblock = infer_cblock(&mq->cache_pool, e);
1054 mutex_unlock(&mq->lock);
1059 static void __mq_set_clear_dirty(struct mq_policy *mq, dm_oblock_t oblock, bool set)
1063 e = hash_lookup(mq, oblock);
1064 BUG_ON(!e || !in_cache(mq, e));
1071 static void mq_set_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1073 struct mq_policy *mq = to_mq_policy(p);
1075 mutex_lock(&mq->lock);
1076 __mq_set_clear_dirty(mq, oblock, true);
1077 mutex_unlock(&mq->lock);
1080 static void mq_clear_dirty(struct dm_cache_policy *p, dm_oblock_t oblock)
1082 struct mq_policy *mq = to_mq_policy(p);
1084 mutex_lock(&mq->lock);
1085 __mq_set_clear_dirty(mq, oblock, false);
1086 mutex_unlock(&mq->lock);
1089 static int mq_load_mapping(struct dm_cache_policy *p,
1090 dm_oblock_t oblock, dm_cblock_t cblock,
1091 uint32_t hint, bool hint_valid)
1093 struct mq_policy *mq = to_mq_policy(p);
1096 e = alloc_particular_entry(&mq->cache_pool, cblock);
1098 e->dirty = false; /* this gets corrected in a minute */
1099 e->hit_count = hint_valid ? hint : 1;
1105 static int mq_save_hints(struct mq_policy *mq, struct queue *q,
1106 policy_walk_fn fn, void *context)
1110 struct list_head *h;
1113 for (level = 0; level < NR_QUEUE_LEVELS; level++)
1114 list_for_each(h, q->qs + level) {
1115 if (is_sentinel(q, h))
1118 e = container_of(h, struct entry, list);
1119 r = fn(context, infer_cblock(&mq->cache_pool, e),
1120 e->oblock, e->hit_count);
1128 static int mq_walk_mappings(struct dm_cache_policy *p, policy_walk_fn fn,
1131 struct mq_policy *mq = to_mq_policy(p);
1134 mutex_lock(&mq->lock);
1136 r = mq_save_hints(mq, &mq->cache_clean, fn, context);
1138 r = mq_save_hints(mq, &mq->cache_dirty, fn, context);
1140 mutex_unlock(&mq->lock);
1145 static void __remove_mapping(struct mq_policy *mq, dm_oblock_t oblock)
1149 e = hash_lookup(mq, oblock);
1150 BUG_ON(!e || !in_cache(mq, e));
1153 free_entry(&mq->cache_pool, e);
1156 static void mq_remove_mapping(struct dm_cache_policy *p, dm_oblock_t oblock)
1158 struct mq_policy *mq = to_mq_policy(p);
1160 mutex_lock(&mq->lock);
1161 __remove_mapping(mq, oblock);
1162 mutex_unlock(&mq->lock);
1165 static int __remove_cblock(struct mq_policy *mq, dm_cblock_t cblock)
1167 struct entry *e = epool_find(&mq->cache_pool, cblock);
1173 free_entry(&mq->cache_pool, e);
1178 static int mq_remove_cblock(struct dm_cache_policy *p, dm_cblock_t cblock)
1181 struct mq_policy *mq = to_mq_policy(p);
1183 mutex_lock(&mq->lock);
1184 r = __remove_cblock(mq, cblock);
1185 mutex_unlock(&mq->lock);
1190 #define CLEAN_TARGET_PERCENTAGE 25
1192 static bool clean_target_met(struct mq_policy *mq)
1195 * Cache entries may not be populated. So we're cannot rely on the
1196 * size of the clean queue.
1198 unsigned nr_clean = from_cblock(mq->cache_size) - queue_size(&mq->cache_dirty);
1199 unsigned target = from_cblock(mq->cache_size) * CLEAN_TARGET_PERCENTAGE / 100;
1201 return nr_clean >= target;
1204 static int __mq_writeback_work(struct mq_policy *mq, dm_oblock_t *oblock,
1205 dm_cblock_t *cblock)
1207 struct entry *e = pop_old(mq, &mq->cache_dirty);
1209 if (!e && !clean_target_met(mq))
1210 e = pop(mq, &mq->cache_dirty);
1215 *oblock = e->oblock;
1216 *cblock = infer_cblock(&mq->cache_pool, e);
1223 static int mq_writeback_work(struct dm_cache_policy *p, dm_oblock_t *oblock,
1224 dm_cblock_t *cblock)
1227 struct mq_policy *mq = to_mq_policy(p);
1229 mutex_lock(&mq->lock);
1230 r = __mq_writeback_work(mq, oblock, cblock);
1231 mutex_unlock(&mq->lock);
1236 static void __force_mapping(struct mq_policy *mq,
1237 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1239 struct entry *e = hash_lookup(mq, current_oblock);
1241 if (e && in_cache(mq, e)) {
1243 e->oblock = new_oblock;
1249 static void mq_force_mapping(struct dm_cache_policy *p,
1250 dm_oblock_t current_oblock, dm_oblock_t new_oblock)
1252 struct mq_policy *mq = to_mq_policy(p);
1254 mutex_lock(&mq->lock);
1255 __force_mapping(mq, current_oblock, new_oblock);
1256 mutex_unlock(&mq->lock);
1259 static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1262 struct mq_policy *mq = to_mq_policy(p);
1264 mutex_lock(&mq->lock);
1265 r = to_cblock(mq->cache_pool.nr_allocated);
1266 mutex_unlock(&mq->lock);
1271 static void mq_tick(struct dm_cache_policy *p)
1273 struct mq_policy *mq = to_mq_policy(p);
1274 unsigned long flags;
1276 spin_lock_irqsave(&mq->tick_lock, flags);
1277 mq->tick_protected++;
1278 spin_unlock_irqrestore(&mq->tick_lock, flags);
1281 static int mq_set_config_value(struct dm_cache_policy *p,
1282 const char *key, const char *value)
1284 struct mq_policy *mq = to_mq_policy(p);
1287 if (kstrtoul(value, 10, &tmp))
1290 if (!strcasecmp(key, "random_threshold")) {
1291 mq->tracker.thresholds[PATTERN_RANDOM] = tmp;
1293 } else if (!strcasecmp(key, "sequential_threshold")) {
1294 mq->tracker.thresholds[PATTERN_SEQUENTIAL] = tmp;
1296 } else if (!strcasecmp(key, "discard_promote_adjustment"))
1297 mq->discard_promote_adjustment = tmp;
1299 else if (!strcasecmp(key, "read_promote_adjustment"))
1300 mq->read_promote_adjustment = tmp;
1302 else if (!strcasecmp(key, "write_promote_adjustment"))
1303 mq->write_promote_adjustment = tmp;
1311 static int mq_emit_config_values(struct dm_cache_policy *p, char *result, unsigned maxlen)
1314 struct mq_policy *mq = to_mq_policy(p);
1316 DMEMIT("10 random_threshold %u "
1317 "sequential_threshold %u "
1318 "discard_promote_adjustment %u "
1319 "read_promote_adjustment %u "
1320 "write_promote_adjustment %u",
1321 mq->tracker.thresholds[PATTERN_RANDOM],
1322 mq->tracker.thresholds[PATTERN_SEQUENTIAL],
1323 mq->discard_promote_adjustment,
1324 mq->read_promote_adjustment,
1325 mq->write_promote_adjustment);
1330 /* Init the policy plugin interface function pointers. */
1331 static void init_policy_functions(struct mq_policy *mq)
1333 mq->policy.destroy = mq_destroy;
1334 mq->policy.map = mq_map;
1335 mq->policy.lookup = mq_lookup;
1336 mq->policy.set_dirty = mq_set_dirty;
1337 mq->policy.clear_dirty = mq_clear_dirty;
1338 mq->policy.load_mapping = mq_load_mapping;
1339 mq->policy.walk_mappings = mq_walk_mappings;
1340 mq->policy.remove_mapping = mq_remove_mapping;
1341 mq->policy.remove_cblock = mq_remove_cblock;
1342 mq->policy.writeback_work = mq_writeback_work;
1343 mq->policy.force_mapping = mq_force_mapping;
1344 mq->policy.residency = mq_residency;
1345 mq->policy.tick = mq_tick;
1346 mq->policy.emit_config_values = mq_emit_config_values;
1347 mq->policy.set_config_value = mq_set_config_value;
1350 static struct dm_cache_policy *mq_create(dm_cblock_t cache_size,
1351 sector_t origin_size,
1352 sector_t cache_block_size)
1354 struct mq_policy *mq = kzalloc(sizeof(*mq), GFP_KERNEL);
1359 init_policy_functions(mq);
1360 iot_init(&mq->tracker, SEQUENTIAL_THRESHOLD_DEFAULT, RANDOM_THRESHOLD_DEFAULT);
1361 mq->cache_size = cache_size;
1363 if (epool_init(&mq->pre_cache_pool, from_cblock(cache_size))) {
1364 DMERR("couldn't initialize pool of pre-cache entries");
1365 goto bad_pre_cache_init;
1368 if (epool_init(&mq->cache_pool, from_cblock(cache_size))) {
1369 DMERR("couldn't initialize pool of cache entries");
1370 goto bad_cache_init;
1373 mq->tick_protected = 0;
1377 mq->discard_promote_adjustment = DEFAULT_DISCARD_PROMOTE_ADJUSTMENT;
1378 mq->read_promote_adjustment = DEFAULT_READ_PROMOTE_ADJUSTMENT;
1379 mq->write_promote_adjustment = DEFAULT_WRITE_PROMOTE_ADJUSTMENT;
1380 mutex_init(&mq->lock);
1381 spin_lock_init(&mq->tick_lock);
1383 queue_init(&mq->pre_cache);
1384 queue_init(&mq->cache_clean);
1385 queue_init(&mq->cache_dirty);
1387 mq->generation_period = max((unsigned) from_cblock(cache_size), 1024U);
1389 mq->nr_buckets = next_power(from_cblock(cache_size) / 2, 16);
1390 mq->hash_bits = ffs(mq->nr_buckets) - 1;
1391 mq->table = vzalloc(sizeof(*mq->table) * mq->nr_buckets);
1393 goto bad_alloc_table;
1398 epool_exit(&mq->cache_pool);
1400 epool_exit(&mq->pre_cache_pool);
1407 /*----------------------------------------------------------------*/
1409 static struct dm_cache_policy_type mq_policy_type = {
1411 .version = {1, 3, 0},
1413 .owner = THIS_MODULE,
1417 static struct dm_cache_policy_type default_policy_type = {
1419 .version = {1, 3, 0},
1421 .owner = THIS_MODULE,
1422 .create = mq_create,
1423 .real = &mq_policy_type
1426 static int __init mq_init(void)
1430 mq_entry_cache = kmem_cache_create("dm_mq_policy_cache_entry",
1431 sizeof(struct entry),
1432 __alignof__(struct entry),
1434 if (!mq_entry_cache)
1437 r = dm_cache_policy_register(&mq_policy_type);
1439 DMERR("register failed %d", r);
1440 goto bad_register_mq;
1443 r = dm_cache_policy_register(&default_policy_type);
1445 DMINFO("version %u.%u.%u loaded",
1446 mq_policy_type.version[0],
1447 mq_policy_type.version[1],
1448 mq_policy_type.version[2]);
1452 DMERR("register failed (as default) %d", r);
1454 dm_cache_policy_unregister(&mq_policy_type);
1456 kmem_cache_destroy(mq_entry_cache);
1461 static void __exit mq_exit(void)
1463 dm_cache_policy_unregister(&mq_policy_type);
1464 dm_cache_policy_unregister(&default_policy_type);
1466 kmem_cache_destroy(mq_entry_cache);
1469 module_init(mq_init);
1470 module_exit(mq_exit);
1472 MODULE_AUTHOR("Joe Thornber <dm-devel@redhat.com>");
1473 MODULE_LICENSE("GPL");
1474 MODULE_DESCRIPTION("mq cache policy");
1476 MODULE_ALIAS("dm-cache-default");