aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2017-11-08 05:56:11 -0500
committerMike Snitzer <snitzer@redhat.com>2017-11-10 15:45:01 -0500
commit1e72a8e809f030bd4e318a49c497ee38e47e82c1 (patch)
tree8cac372c0bc3793760bda1f6f54e612974042217
parent233978449074ca7e45d9c959f9ec612d1b852893 (diff)
dm cache policy smq: handle races with queuing background_work
The background_tracker holds a set of promotions/demotions that the cache policy wishes the core target to implement. When adding a new operation to the tracker it's possible that an operation on the same block is already present (but in practise this doesn't appear to be happening). Catch these situations and do the appropriate cleanup. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-cache-policy-smq.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-policy-smq.c b/drivers/md/dm-cache-policy-smq.c
index e5eb9c9b4bc8..42e5c4b59889 100644
--- a/drivers/md/dm-cache-policy-smq.c
+++ b/drivers/md/dm-cache-policy-smq.c
@@ -1174,12 +1174,16 @@ static void queue_writeback(struct smq_policy *mq)
1174 work.cblock = infer_cblock(mq, e); 1174 work.cblock = infer_cblock(mq, e);
1175 1175
1176 r = btracker_queue(mq->bg_work, &work, NULL); 1176 r = btracker_queue(mq->bg_work, &work, NULL);
1177 WARN_ON_ONCE(r); // FIXME: finish, I think we have to get rid of this race. 1177 if (r) {
1178 clear_pending(mq, e);
1179 q_push_front(&mq->dirty, e);
1180 }
1178 } 1181 }
1179} 1182}
1180 1183
1181static void queue_demotion(struct smq_policy *mq) 1184static void queue_demotion(struct smq_policy *mq)
1182{ 1185{
1186 int r;
1183 struct policy_work work; 1187 struct policy_work work;
1184 struct entry *e; 1188 struct entry *e;
1185 1189
@@ -1199,12 +1203,17 @@ static void queue_demotion(struct smq_policy *mq)
1199 work.op = POLICY_DEMOTE; 1203 work.op = POLICY_DEMOTE;
1200 work.oblock = e->oblock; 1204 work.oblock = e->oblock;
1201 work.cblock = infer_cblock(mq, e); 1205 work.cblock = infer_cblock(mq, e);
1202 btracker_queue(mq->bg_work, &work, NULL); 1206 r = btracker_queue(mq->bg_work, &work, NULL);
1207 if (r) {
1208 clear_pending(mq, e);
1209 q_push_front(&mq->clean, e);
1210 }
1203} 1211}
1204 1212
1205static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock, 1213static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1206 struct policy_work **workp) 1214 struct policy_work **workp)
1207{ 1215{
1216 int r;
1208 struct entry *e; 1217 struct entry *e;
1209 struct policy_work work; 1218 struct policy_work work;
1210 1219
@@ -1234,7 +1243,9 @@ static void queue_promotion(struct smq_policy *mq, dm_oblock_t oblock,
1234 work.op = POLICY_PROMOTE; 1243 work.op = POLICY_PROMOTE;
1235 work.oblock = oblock; 1244 work.oblock = oblock;
1236 work.cblock = infer_cblock(mq, e); 1245 work.cblock = infer_cblock(mq, e);
1237 btracker_queue(mq->bg_work, &work, workp); 1246 r = btracker_queue(mq->bg_work, &work, workp);
1247 if (r)
1248 free_entry(&mq->cache_alloc, e);
1238} 1249}
1239 1250
1240/*----------------------------------------------------------------*/ 1251/*----------------------------------------------------------------*/