aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2013-10-24 14:10:28 -0400
committerMike Snitzer <snitzer@redhat.com>2013-11-09 18:20:25 -0500
commitc86c30706caa02ffe303e6b87d53ef6a077d4cca (patch)
treeadafaec4532de7ebbecf8b413ba380a7d2aa9bc7
parent01911c19bea63b1a958b9d9024504c2e9079f155 (diff)
dm cache: be much more aggressive about promoting writes to discarded blocks
Previously these promotions only got priority if there were unused cache blocks. Now we give them priority if there are any clean blocks in the cache. The fio_soak_test in the device-mapper-test-suite now gives uniform performance across subvolumes (~16 seconds). Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-mq.c84
1 files changed, 63 insertions, 21 deletions
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 6710e038c730..444f0bf10b21 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -151,6 +151,21 @@ static void queue_init(struct queue *q)
151} 151}
152 152
153/* 153/*
154 * Checks to see if the queue is empty.
155 * FIXME: reduce cpu usage.
156 */
157static bool queue_empty(struct queue *q)
158{
159 unsigned i;
160
161 for (i = 0; i < NR_QUEUE_LEVELS; i++)
162 if (!list_empty(q->qs + i))
163 return false;
164
165 return true;
166}
167
168/*
154 * Insert an entry to the back of the given level. 169 * Insert an entry to the back of the given level.
155 */ 170 */
156static void queue_push(struct queue *q, unsigned level, struct list_head *elt) 171static void queue_push(struct queue *q, unsigned level, struct list_head *elt)
@@ -442,6 +457,11 @@ static bool any_free_cblocks(struct mq_policy *mq)
442 return mq->nr_cblocks_allocated < from_cblock(mq->cache_size); 457 return mq->nr_cblocks_allocated < from_cblock(mq->cache_size);
443} 458}
444 459
460static bool any_clean_cblocks(struct mq_policy *mq)
461{
462 return !queue_empty(&mq->cache_clean);
463}
464
445/* 465/*
446 * Fills result out with a cache block that isn't in use, or return 466 * Fills result out with a cache block that isn't in use, or return
447 * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is 467 * -ENOSPC. This does _not_ mark the cblock as allocated, the caller is
@@ -688,17 +708,18 @@ static int demote_cblock(struct mq_policy *mq, dm_oblock_t *oblock, dm_cblock_t
688static unsigned adjusted_promote_threshold(struct mq_policy *mq, 708static unsigned adjusted_promote_threshold(struct mq_policy *mq,
689 bool discarded_oblock, int data_dir) 709 bool discarded_oblock, int data_dir)
690{ 710{
691 if (discarded_oblock && any_free_cblocks(mq) && data_dir == WRITE) 711 if (data_dir == READ)
712 return mq->promote_threshold + READ_PROMOTE_THRESHOLD;
713
714 if (discarded_oblock && (any_free_cblocks(mq) || any_clean_cblocks(mq))) {
692 /* 715 /*
693 * We don't need to do any copying at all, so give this a 716 * We don't need to do any copying at all, so give this a
694 * very low threshold. In practice this only triggers 717 * very low threshold.
695 * during initial population after a format.
696 */ 718 */
697 return DISCARDED_PROMOTE_THRESHOLD; 719 return DISCARDED_PROMOTE_THRESHOLD;
720 }
698 721
699 return data_dir == READ ? 722 return mq->promote_threshold + WRITE_PROMOTE_THRESHOLD;
700 (mq->promote_threshold + READ_PROMOTE_THRESHOLD) :
701 (mq->promote_threshold + WRITE_PROMOTE_THRESHOLD);
702} 723}
703 724
704static bool should_promote(struct mq_policy *mq, struct entry *e, 725static bool should_promote(struct mq_policy *mq, struct entry *e,
@@ -772,6 +793,17 @@ static int pre_cache_entry_found(struct mq_policy *mq, struct entry *e,
772 return r; 793 return r;
773} 794}
774 795
796static void insert_entry_in_pre_cache(struct mq_policy *mq,
797 struct entry *e, dm_oblock_t oblock)
798{
799 e->in_cache = false;
800 e->dirty = false;
801 e->oblock = oblock;
802 e->hit_count = 1;
803 e->generation = mq->generation;
804 push(mq, e);
805}
806
775static void insert_in_pre_cache(struct mq_policy *mq, 807static void insert_in_pre_cache(struct mq_policy *mq,
776 dm_oblock_t oblock) 808 dm_oblock_t oblock)
777{ 809{
@@ -789,30 +821,41 @@ static void insert_in_pre_cache(struct mq_policy *mq,
789 return; 821 return;
790 } 822 }
791 823
792 e->in_cache = false; 824 insert_entry_in_pre_cache(mq, e, oblock);
793 e->dirty = false;
794 e->oblock = oblock;
795 e->hit_count = 1;
796 e->generation = mq->generation;
797 push(mq, e);
798} 825}
799 826
800static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock, 827static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
801 struct policy_result *result) 828 struct policy_result *result)
802{ 829{
830 int r;
803 struct entry *e; 831 struct entry *e;
804 dm_cblock_t cblock; 832 dm_cblock_t cblock;
805 833
806 if (find_free_cblock(mq, &cblock) == -ENOSPC) { 834 if (find_free_cblock(mq, &cblock) == -ENOSPC) {
807 result->op = POLICY_MISS; 835 r = demote_cblock(mq, &result->old_oblock, &cblock);
808 insert_in_pre_cache(mq, oblock); 836 if (unlikely(r)) {
809 return; 837 result->op = POLICY_MISS;
810 } 838 insert_in_pre_cache(mq, oblock);
839 return;
840 }
811 841
812 e = alloc_entry(mq); 842 /*
813 if (unlikely(!e)) { 843 * This will always succeed, since we've just demoted.
814 result->op = POLICY_MISS; 844 */
815 return; 845 e = pop(mq, &mq->pre_cache);
846 result->op = POLICY_REPLACE;
847
848 } else {
849 e = alloc_entry(mq);
850 if (unlikely(!e))
851 e = pop(mq, &mq->pre_cache);
852
853 if (unlikely(!e)) {
854 result->op = POLICY_MISS;
855 return;
856 }
857
858 result->op = POLICY_NEW;
816 } 859 }
817 860
818 e->oblock = oblock; 861 e->oblock = oblock;
@@ -823,7 +866,6 @@ static void insert_in_cache(struct mq_policy *mq, dm_oblock_t oblock,
823 e->generation = mq->generation; 866 e->generation = mq->generation;
824 push(mq, e); 867 push(mq, e);
825 868
826 result->op = POLICY_NEW;
827 result->cblock = e->cblock; 869 result->cblock = e->cblock;
828} 870}
829 871