aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2015-05-29 05:20:56 -0400
committerMike Snitzer <snitzer@redhat.com>2015-06-11 17:13:01 -0400
commitfba10109a45d864bab98ae90dd63bcc2789352b3 (patch)
tree9cfe1ba112421a7b18515d7854836a1b76aacb0b
parentb61d9509628fea995196a96b4c1713fa67dade88 (diff)
dm cache: age and write back cache entries even without active IO
The policy tick() method is normally called from interrupt context. Both the mq and smq policies do some bottom half work for the tick method in their map functions. However if no IO is going through the cache, then that bottom half work doesn't occur. With these policies this means recently hit entries do not age and do not get written back as early as we'd like. Fix this by introducing a new 'can_block' parameter to the tick() method. When this is set the bottom half work occurs immediately. 'can_block' is set when the tick method is called every second by the core target (not in interrupt context). Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-internal.h4
-rw-r--r--drivers/md/dm-cache-policy-mq.c8
-rw-r--r--drivers/md/dm-cache-policy-smq.c8
-rw-r--r--drivers/md/dm-cache-policy.h4
-rw-r--r--drivers/md/dm-cache-target.c4
5 files changed, 20 insertions, 8 deletions
diff --git a/drivers/md/dm-cache-policy-internal.h b/drivers/md/dm-cache-policy-internal.h
index ccbe852d5362..2816018faa7f 100644
--- a/drivers/md/dm-cache-policy-internal.h
+++ b/drivers/md/dm-cache-policy-internal.h
@@ -83,10 +83,10 @@ static inline dm_cblock_t policy_residency(struct dm_cache_policy *p)
83 return p->residency(p); 83 return p->residency(p);
84} 84}
85 85
86static inline void policy_tick(struct dm_cache_policy *p) 86static inline void policy_tick(struct dm_cache_policy *p, bool can_block)
87{ 87{
88 if (p->tick) 88 if (p->tick)
89 return p->tick(p); 89 return p->tick(p, can_block);
90} 90}
91 91
92static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result, 92static inline int policy_emit_config_values(struct dm_cache_policy *p, char *result,
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 084eec653321..838665bb495a 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -1283,7 +1283,7 @@ static dm_cblock_t mq_residency(struct dm_cache_policy *p)
1283 return r; 1283 return r;
1284} 1284}
1285 1285
1286static void mq_tick(struct dm_cache_policy *p) 1286static void mq_tick(struct dm_cache_policy *p, bool can_block)
1287{ 1287{
1288 struct mq_policy *mq = to_mq_policy(p); 1288 struct mq_policy *mq = to_mq_policy(p);
1289 unsigned long flags; 1289 unsigned long flags;
@@ -1291,6 +1291,12 @@ static void mq_tick(struct dm_cache_policy *p)
1291 spin_lock_irqsave(&mq->tick_lock, flags); 1291 spin_lock_irqsave(&mq->tick_lock, flags);
1292 mq->tick_protected++; 1292 mq->tick_protected++;
1293 spin_unlock_irqrestore(&mq->tick_lock, flags); 1293 spin_unlock_irqrestore(&mq->tick_lock, flags);
1294
1295 if (can_block) {
1296 mutex_lock(&mq->lock);
1297 copy_tick(mq);
1298 mutex_unlock(&mq->lock);
1299 }
1294} 1300}
1295 1301
1296static int mq_set_config_value(struct dm_cache_policy *p, 1302static int mq_set_config_value(struct dm_cache_policy *p,
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index 55a657f78f00..66feb307e697 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1581,7 +1581,7 @@ static dm_cblock_t smq_residency(struct dm_cache_policy *p)
1581 return r; 1581 return r;
1582} 1582}
1583 1583
1584static void smq_tick(struct dm_cache_policy *p) 1584static void smq_tick(struct dm_cache_policy *p, bool can_block)
1585{ 1585{
1586 struct smq_policy *mq = to_smq_policy(p); 1586 struct smq_policy *mq = to_smq_policy(p);
1587 unsigned long flags; 1587 unsigned long flags;
@@ -1589,6 +1589,12 @@ static void smq_tick(struct dm_cache_policy *p)
1589 spin_lock_irqsave(&mq->tick_lock, flags); 1589 spin_lock_irqsave(&mq->tick_lock, flags);
1590 mq->tick_protected++; 1590 mq->tick_protected++;
1591 spin_unlock_irqrestore(&mq->tick_lock, flags); 1591 spin_unlock_irqrestore(&mq->tick_lock, flags);
1592
1593 if (can_block) {
1594 mutex_lock(&mq->lock);
1595 copy_tick(mq);
1596 mutex_unlock(&mq->lock);
1597 }
1592} 1598}
1593 1599
1594/* Init the policy plugin interface function pointers. */ 1600/* Init the policy plugin interface function pointers. */
diff --git a/drivers/md/dm-cache-policy.h b/drivers/md/dm-cache-policy.h
index 74709129d856..05db56eedb6a 100644
--- a/drivers/md/dm-cache-policy.h
+++ b/drivers/md/dm-cache-policy.h
@@ -200,10 +200,10 @@ struct dm_cache_policy {
200 * Because of where we sit in the block layer, we can be asked to 200 * Because of where we sit in the block layer, we can be asked to
201 * map a lot of little bios that are all in the same block (no 201 * map a lot of little bios that are all in the same block (no
202 * queue merging has occurred). To stop the policy being fooled by 202 * queue merging has occurred). To stop the policy being fooled by
203 * these the core target sends regular tick() calls to the policy. 203 * these, the core target sends regular tick() calls to the policy.
204 * The policy should only count an entry as hit once per tick. 204 * The policy should only count an entry as hit once per tick.
205 */ 205 */
206 void (*tick)(struct dm_cache_policy *p); 206 void (*tick)(struct dm_cache_policy *p, bool can_block);
207 207
208 /* 208 /*
209 * Configuration. 209 * Configuration.
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 5aad875b822c..1b4e1756b169 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -2271,7 +2271,7 @@ static void do_worker(struct work_struct *ws)
2271static void do_waker(struct work_struct *ws) 2271static void do_waker(struct work_struct *ws)
2272{ 2272{
2273 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker); 2273 struct cache *cache = container_of(to_delayed_work(ws), struct cache, waker);
2274 policy_tick(cache->policy); 2274 policy_tick(cache->policy, true);
2275 wake_worker(cache); 2275 wake_worker(cache);
2276 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD); 2276 queue_delayed_work(cache->wq, &cache->waker, COMMIT_PERIOD);
2277} 2277}
@@ -3148,7 +3148,7 @@ static int cache_end_io(struct dm_target *ti, struct bio *bio, int error)
3148 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 3148 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
3149 3149
3150 if (pb->tick) { 3150 if (pb->tick) {
3151 policy_tick(cache->policy); 3151 policy_tick(cache->policy, false);
3152 3152
3153 spin_lock_irqsave(&cache->lock, flags); 3153 spin_lock_irqsave(&cache->lock, flags);
3154 cache->need_tick_bio = true; 3154 cache->need_tick_bio = true;