aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2014-03-03 10:52:28 -0500
committerMike Snitzer <snitzer@redhat.com>2014-03-05 15:26:59 -0500
commit738211f70a1d0c7ff7dc965395f7b8a436365ebd (patch)
treec18a2183962a4498ef247c01dac98c54f5c95108
parent18adc57779866ba451237dccca2ebf019be2fa20 (diff)
dm thin: fix noflush suspend IO queueing
i) by the time DM core calls the postsuspend hook the dm_noflush flag has been cleared. So the old thin_postsuspend did nothing. We need to use the presuspend hook instead. ii) There was a race between bios leaving DM core and arriving in the deferred queue. thin_presuspend now sets a 'requeue' flag causing all bios destined for that thin to be requeued back to DM core. Then it requeues all held IO, and all IO on the deferred queue (destined for that thin). Finally postsuspend clears the 'requeue' flag. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com>
-rw-r--r--drivers/md/dm-thin.c74
1 files changed, 72 insertions, 2 deletions
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index f39876dd307d..be70d38745f7 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -226,6 +226,7 @@ struct thin_c {
226 226
227 struct pool *pool; 227 struct pool *pool;
228 struct dm_thin_device *td; 228 struct dm_thin_device *td;
229 bool requeue_mode:1;
229}; 230};
230 231
231/*----------------------------------------------------------------*/ 232/*----------------------------------------------------------------*/
@@ -1379,6 +1380,11 @@ static void process_deferred_bios(struct pool *pool)
1379 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1380 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
1380 struct thin_c *tc = h->tc; 1381 struct thin_c *tc = h->tc;
1381 1382
1383 if (tc->requeue_mode) {
1384 bio_endio(bio, DM_ENDIO_REQUEUE);
1385 continue;
1386 }
1387
1382 /* 1388 /*
1383 * If we've got no free new_mapping structs, and processing 1389 * If we've got no free new_mapping structs, and processing
1384 * this bio might require one, we pause until there are some 1390 * this bio might require one, we pause until there are some
@@ -1445,6 +1451,51 @@ static void do_waker(struct work_struct *ws)
1445 1451
1446/*----------------------------------------------------------------*/ 1452/*----------------------------------------------------------------*/
1447 1453
1454struct noflush_work {
1455 struct work_struct worker;
1456 struct thin_c *tc;
1457
1458 atomic_t complete;
1459 wait_queue_head_t wait;
1460};
1461
1462static void complete_noflush_work(struct noflush_work *w)
1463{
1464 atomic_set(&w->complete, 1);
1465 wake_up(&w->wait);
1466}
1467
1468static void do_noflush_start(struct work_struct *ws)
1469{
1470 struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1471 w->tc->requeue_mode = true;
1472 requeue_io(w->tc);
1473 complete_noflush_work(w);
1474}
1475
1476static void do_noflush_stop(struct work_struct *ws)
1477{
1478 struct noflush_work *w = container_of(ws, struct noflush_work, worker);
1479 w->tc->requeue_mode = false;
1480 complete_noflush_work(w);
1481}
1482
1483static void noflush_work(struct thin_c *tc, void (*fn)(struct work_struct *))
1484{
1485 struct noflush_work w;
1486
1487 INIT_WORK(&w.worker, fn);
1488 w.tc = tc;
1489 atomic_set(&w.complete, 0);
1490 init_waitqueue_head(&w.wait);
1491
1492 queue_work(tc->pool->wq, &w.worker);
1493
1494 wait_event(w.wait, atomic_read(&w.complete));
1495}
1496
1497/*----------------------------------------------------------------*/
1498
1448static enum pool_mode get_pool_mode(struct pool *pool) 1499static enum pool_mode get_pool_mode(struct pool *pool)
1449{ 1500{
1450 return pool->pf.mode; 1501 return pool->pf.mode;
@@ -1616,6 +1667,11 @@ static int thin_bio_map(struct dm_target *ti, struct bio *bio)
1616 1667
1617 thin_hook_bio(tc, bio); 1668 thin_hook_bio(tc, bio);
1618 1669
1670 if (tc->requeue_mode) {
1671 bio_endio(bio, DM_ENDIO_REQUEUE);
1672 return DM_MAPIO_SUBMITTED;
1673 }
1674
1619 if (get_pool_mode(tc->pool) == PM_FAIL) { 1675 if (get_pool_mode(tc->pool) == PM_FAIL) {
1620 bio_io_error(bio); 1676 bio_io_error(bio);
1621 return DM_MAPIO_SUBMITTED; 1677 return DM_MAPIO_SUBMITTED;
@@ -3093,10 +3149,23 @@ static int thin_endio(struct dm_target *ti, struct bio *bio, int err)
3093 return 0; 3149 return 0;
3094} 3150}
3095 3151
3096static void thin_postsuspend(struct dm_target *ti) 3152static void thin_presuspend(struct dm_target *ti)
3097{ 3153{
3154 struct thin_c *tc = ti->private;
3155
3098 if (dm_noflush_suspending(ti)) 3156 if (dm_noflush_suspending(ti))
3099 requeue_io((struct thin_c *)ti->private); 3157 noflush_work(tc, do_noflush_start);
3158}
3159
3160static void thin_postsuspend(struct dm_target *ti)
3161{
3162 struct thin_c *tc = ti->private;
3163
3164 /*
3165 * The dm_noflush_suspending flag has been cleared by now, so
3166 * unfortunately we must always run this.
3167 */
3168 noflush_work(tc, do_noflush_stop);
3100} 3169}
3101 3170
3102/* 3171/*
@@ -3187,6 +3256,7 @@ static struct target_type thin_target = {
3187 .dtr = thin_dtr, 3256 .dtr = thin_dtr,
3188 .map = thin_map, 3257 .map = thin_map,
3189 .end_io = thin_endio, 3258 .end_io = thin_endio,
3259 .presuspend = thin_presuspend,
3190 .postsuspend = thin_postsuspend, 3260 .postsuspend = thin_postsuspend,
3191 .status = thin_status, 3261 .status = thin_status,
3192 .iterate_devices = thin_iterate_devices, 3262 .iterate_devices = thin_iterate_devices,