aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorJoe Thornber <ejt@redhat.com>2013-10-30 13:11:58 -0400
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2013-12-04 13:56:43 -0500
commit8fafee9829f539e17ea8678a6f56d2b897ffe3cc (patch)
treef0b37c8a1aac5669952fba34b1c2020db3642365 /drivers/md
parent0c5fd99e89b5f288ffe5c2ed301d2ffaac091891 (diff)
dm cache: fix a race condition between queuing new migrations and quiescing for a shutdown
commit 66cb1910df17b38334153462ec8166e48058035f upstream. The code that was trying to do this was inadequate. The postsuspend method (in ioctl context), needs to wait for the worker thread to acknowledge the request to quiesce. Otherwise the migration count may drop to zero temporarily before the worker thread realises we're quiescing. In this case the target will be taken down, but the worker thread may have issued a new migration, which will cause an oops when it completes. Signed-off-by: Joe Thornber <ejt@redhat.com> Signed-off-by: Mike Snitzer <snitzer@redhat.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-cache-target.c54
1 files changed, 40 insertions, 14 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index df44b60e66f2..516f9c922bb2 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -151,6 +151,9 @@ struct cache {
151 atomic_t nr_migrations; 151 atomic_t nr_migrations;
152 wait_queue_head_t migration_wait; 152 wait_queue_head_t migration_wait;
153 153
154 wait_queue_head_t quiescing_wait;
155 atomic_t quiescing_ack;
156
154 /* 157 /*
155 * cache_size entries, dirty if set 158 * cache_size entries, dirty if set
156 */ 159 */
@@ -742,8 +745,9 @@ static void cell_defer(struct cache *cache, struct dm_bio_prison_cell *cell,
742 745
743static void cleanup_migration(struct dm_cache_migration *mg) 746static void cleanup_migration(struct dm_cache_migration *mg)
744{ 747{
745 dec_nr_migrations(mg->cache); 748 struct cache *cache = mg->cache;
746 free_migration(mg); 749 free_migration(mg);
750 dec_nr_migrations(cache);
747} 751}
748 752
749static void migration_failure(struct dm_cache_migration *mg) 753static void migration_failure(struct dm_cache_migration *mg)
@@ -1340,34 +1344,51 @@ static void writeback_some_dirty_blocks(struct cache *cache)
1340/*---------------------------------------------------------------- 1344/*----------------------------------------------------------------
1341 * Main worker loop 1345 * Main worker loop
1342 *--------------------------------------------------------------*/ 1346 *--------------------------------------------------------------*/
1343static void start_quiescing(struct cache *cache) 1347static bool is_quiescing(struct cache *cache)
1344{ 1348{
1349 int r;
1345 unsigned long flags; 1350 unsigned long flags;
1346 1351
1347 spin_lock_irqsave(&cache->lock, flags); 1352 spin_lock_irqsave(&cache->lock, flags);
1348 cache->quiescing = 1; 1353 r = cache->quiescing;
1349 spin_unlock_irqrestore(&cache->lock, flags); 1354 spin_unlock_irqrestore(&cache->lock, flags);
1355
1356 return r;
1350} 1357}
1351 1358
1352static void stop_quiescing(struct cache *cache) 1359static void ack_quiescing(struct cache *cache)
1360{
1361 if (is_quiescing(cache)) {
1362 atomic_inc(&cache->quiescing_ack);
1363 wake_up(&cache->quiescing_wait);
1364 }
1365}
1366
1367static void wait_for_quiescing_ack(struct cache *cache)
1368{
1369 wait_event(cache->quiescing_wait, atomic_read(&cache->quiescing_ack));
1370}
1371
1372static void start_quiescing(struct cache *cache)
1353{ 1373{
1354 unsigned long flags; 1374 unsigned long flags;
1355 1375
1356 spin_lock_irqsave(&cache->lock, flags); 1376 spin_lock_irqsave(&cache->lock, flags);
1357 cache->quiescing = 0; 1377 cache->quiescing = true;
1358 spin_unlock_irqrestore(&cache->lock, flags); 1378 spin_unlock_irqrestore(&cache->lock, flags);
1379
1380 wait_for_quiescing_ack(cache);
1359} 1381}
1360 1382
1361static bool is_quiescing(struct cache *cache) 1383static void stop_quiescing(struct cache *cache)
1362{ 1384{
1363 int r;
1364 unsigned long flags; 1385 unsigned long flags;
1365 1386
1366 spin_lock_irqsave(&cache->lock, flags); 1387 spin_lock_irqsave(&cache->lock, flags);
1367 r = cache->quiescing; 1388 cache->quiescing = false;
1368 spin_unlock_irqrestore(&cache->lock, flags); 1389 spin_unlock_irqrestore(&cache->lock, flags);
1369 1390
1370 return r; 1391 atomic_set(&cache->quiescing_ack, 0);
1371} 1392}
1372 1393
1373static void wait_for_migrations(struct cache *cache) 1394static void wait_for_migrations(struct cache *cache)
@@ -1414,16 +1435,15 @@ static void do_worker(struct work_struct *ws)
1414 struct cache *cache = container_of(ws, struct cache, worker); 1435 struct cache *cache = container_of(ws, struct cache, worker);
1415 1436
1416 do { 1437 do {
1417 if (!is_quiescing(cache)) 1438 if (!is_quiescing(cache)) {
1439 writeback_some_dirty_blocks(cache);
1440 process_deferred_writethrough_bios(cache);
1418 process_deferred_bios(cache); 1441 process_deferred_bios(cache);
1442 }
1419 1443
1420 process_migrations(cache, &cache->quiesced_migrations, issue_copy); 1444 process_migrations(cache, &cache->quiesced_migrations, issue_copy);
1421 process_migrations(cache, &cache->completed_migrations, complete_migration); 1445 process_migrations(cache, &cache->completed_migrations, complete_migration);
1422 1446
1423 writeback_some_dirty_blocks(cache);
1424
1425 process_deferred_writethrough_bios(cache);
1426
1427 if (commit_if_needed(cache)) { 1447 if (commit_if_needed(cache)) {
1428 process_deferred_flush_bios(cache, false); 1448 process_deferred_flush_bios(cache, false);
1429 1449
@@ -1436,6 +1456,9 @@ static void do_worker(struct work_struct *ws)
1436 process_migrations(cache, &cache->need_commit_migrations, 1456 process_migrations(cache, &cache->need_commit_migrations,
1437 migration_success_post_commit); 1457 migration_success_post_commit);
1438 } 1458 }
1459
1460 ack_quiescing(cache);
1461
1439 } while (more_work(cache)); 1462 } while (more_work(cache));
1440} 1463}
1441 1464
@@ -1998,6 +2021,9 @@ static int cache_create(struct cache_args *ca, struct cache **result)
1998 atomic_set(&cache->nr_migrations, 0); 2021 atomic_set(&cache->nr_migrations, 0);
1999 init_waitqueue_head(&cache->migration_wait); 2022 init_waitqueue_head(&cache->migration_wait);
2000 2023
2024 init_waitqueue_head(&cache->quiescing_wait);
2025 atomic_set(&cache->quiescing_ack, 0);
2026
2001 r = -ENOMEM; 2027 r = -ENOMEM;
2002 cache->nr_dirty = 0; 2028 cache->nr_dirty = 0;
2003 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size)); 2029 cache->dirty_bitset = alloc_bitset(from_cblock(cache->cache_size));