aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-05-11 07:48:18 -0400
committerMike Snitzer <snitzer@redhat.com>2017-05-14 21:54:33 -0400
commit6cf4cc8f8b3b7bc9e3c04a7eab44b985d50029fc (patch)
treef9efea7081881af53eabb2f36d7aeafbfd44f71d
parent4d44ec5ab751be63c5d348f13294304d87baa8c3 (diff)
dm cache policy smq: stop preemptively demoting blocks
It causes a lot of churn if the working set's size is close to the fast device's size. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-smq.c17
1 files changed, 5 insertions, 12 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 54421a846a0c..758480a1893d 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1134,13 +1134,10 @@ static bool clean_target_met(struct smq_policy *mq, bool idle)
1134 percent_to_target(mq, CLEAN_TARGET); 1134 percent_to_target(mq, CLEAN_TARGET);
1135} 1135}
1136 1136
1137static bool free_target_met(struct smq_policy *mq, bool idle) 1137static bool free_target_met(struct smq_policy *mq)
1138{ 1138{
1139 unsigned nr_free; 1139 unsigned nr_free;
1140 1140
1141 if (!idle)
1142 return true;
1143
1144 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated; 1141 nr_free = from_cblock(mq->cache_size) - mq->cache_alloc.nr_allocated;
1145 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >= 1142 return (nr_free + btracker_nr_demotions_queued(mq->bg_work)) >=
1146 percent_to_target(mq, FREE_TARGET); 1143 percent_to_target(mq, FREE_TARGET);
@@ -1220,7 +1217,7 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1220 * We always claim to be 'idle' to ensure some demotions happen 1217 * We always claim to be 'idle' to ensure some demotions happen
1221 * with continuous loads. 1218 * with continuous loads.
1222 */ 1219 */
1223 if (!free_target_met(mq, true)) 1220 if (!free_target_met(mq))
1224 queue_demotion(mq); 1221 queue_demotion(mq);
1225 return; 1222 return;
1226 } 1223 }
@@ -1421,14 +1418,10 @@ static int smq_get_background_work(struct dm_cache_policy *p, bool idle,
1421 spin_lock_irqsave(&mq->lock, flags); 1418 spin_lock_irqsave(&mq->lock, flags);
1422 r = btracker_issue(mq->bg_work, result); 1419 r = btracker_issue(mq->bg_work, result);
1423 if (r == -ENODATA) { 1420 if (r == -ENODATA) {
1424 /* find some writeback work to do */ 1421 if (!clean_target_met(mq, idle)) {
1425 if (mq->migrations_allowed && !free_target_met(mq, idle))
1426 queue_demotion(mq);
1427
1428 else if (!clean_target_met(mq, idle))
1429 queue_writeback(mq); 1422 queue_writeback(mq);
1430 1423 r = btracker_issue(mq->bg_work, result);
1431 r = btracker_issue(mq->bg_work, result); 1424 }
1432 } 1425 }
1433 spin_unlock_irqrestore(&mq->lock, flags); 1426 spin_unlock_irqrestore(&mq->lock, flags);
1434 1427