aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/dm-cache-target.c
diff options
context:
space:
mode:
authorMike Snitzer <snitzer@redhat.com>2015-07-29 13:48:23 -0400
committerMike Snitzer <snitzer@redhat.com>2015-07-29 14:32:09 -0400
commit795e633a2dc6cbbeac68bc7f6006082150d38bb7 (patch)
tree1ebe7ece3ff3fd162494731e7e303f6207d893c0 /drivers/md/dm-cache-target.c
parent3508e6590d4729ac07f01f7ae2256c2f9dc738b8 (diff)
dm cache: fix device destroy hang due to improper prealloc_used accounting
Commit 665022d72f9 ("dm cache: avoid calls to prealloc_free_structs() if possible") introduced a regression that caused the removal of a DM cache device to hang in cache_postsuspend()'s call to wait_for_migrations() with the following stack trace: [<ffffffff81651457>] schedule+0x37/0x80 [<ffffffffa041e21b>] cache_postsuspend+0xbb/0x470 [dm_cache] [<ffffffff810ba970>] ? prepare_to_wait_event+0xf0/0xf0 [<ffffffffa0006f77>] dm_table_postsuspend_targets+0x47/0x60 [dm_mod] [<ffffffffa0001eb5>] __dm_destroy+0x215/0x250 [dm_mod] [<ffffffffa0004113>] dm_destroy+0x13/0x20 [dm_mod] [<ffffffffa00098cd>] dev_remove+0x10d/0x170 [dm_mod] [<ffffffffa00097c0>] ? dev_suspend+0x240/0x240 [dm_mod] [<ffffffffa0009f85>] ctl_ioctl+0x255/0x4d0 [dm_mod] [<ffffffff8127ac00>] ? SYSC_semtimedop+0x280/0xe10 [<ffffffffa000a213>] dm_ctl_ioctl+0x13/0x20 [dm_mod] [<ffffffff811fd432>] do_vfs_ioctl+0x2d2/0x4b0 [<ffffffff81117d5f>] ? __audit_syscall_entry+0xaf/0x100 [<ffffffff81022636>] ? do_audit_syscall_entry+0x66/0x70 [<ffffffff811fd689>] SyS_ioctl+0x79/0x90 [<ffffffff81023e58>] ? syscall_trace_leave+0xb8/0x110 [<ffffffff81654f6e>] entry_SYSCALL_64_fastpath+0x12/0x71 Fix this by accounting for the call to prealloc_data_structs() immediately _before_ the call as opposed to after. This is needed because it is possible to break out of the control loop after the call to prealloc_data_structs() but before prealloc_used was set to true. Signed-off-by: Mike Snitzer <snitzer@redhat.com>
Diffstat (limited to 'drivers/md/dm-cache-target.c')
-rw-r--r--drivers/md/dm-cache-target.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 64e96a2bed58..1fe93cfea7d3 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -1967,6 +1967,7 @@ static void process_deferred_bios(struct cache *cache)
1967 * this bio might require one, we pause until there are some 1967 * this bio might require one, we pause until there are some
1968 * prepared mappings to process. 1968 * prepared mappings to process.
1969 */ 1969 */
1970 prealloc_used = true;
1970 if (prealloc_data_structs(cache, &structs)) { 1971 if (prealloc_data_structs(cache, &structs)) {
1971 spin_lock_irqsave(&cache->lock, flags); 1972 spin_lock_irqsave(&cache->lock, flags);
1972 bio_list_merge(&cache->deferred_bios, &bios); 1973 bio_list_merge(&cache->deferred_bios, &bios);
@@ -1982,7 +1983,6 @@ static void process_deferred_bios(struct cache *cache)
1982 process_discard_bio(cache, &structs, bio); 1983 process_discard_bio(cache, &structs, bio);
1983 else 1984 else
1984 process_bio(cache, &structs, bio); 1985 process_bio(cache, &structs, bio);
1985 prealloc_used = true;
1986 } 1986 }
1987 1987
1988 if (prealloc_used) 1988 if (prealloc_used)
@@ -2011,6 +2011,7 @@ static void process_deferred_cells(struct cache *cache)
2011 * this bio might require one, we pause until there are some 2011 * this bio might require one, we pause until there are some
2012 * prepared mappings to process. 2012 * prepared mappings to process.
2013 */ 2013 */
2014 prealloc_used = true;
2014 if (prealloc_data_structs(cache, &structs)) { 2015 if (prealloc_data_structs(cache, &structs)) {
2015 spin_lock_irqsave(&cache->lock, flags); 2016 spin_lock_irqsave(&cache->lock, flags);
2016 list_splice(&cells, &cache->deferred_cells); 2017 list_splice(&cells, &cache->deferred_cells);
@@ -2019,7 +2020,6 @@ static void process_deferred_cells(struct cache *cache)
2019 } 2020 }
2020 2021
2021 process_cell(cache, &structs, cell); 2022 process_cell(cache, &structs, cell);
2022 prealloc_used = true;
2023 } 2023 }
2024 2024
2025 if (prealloc_used) 2025 if (prealloc_used)
@@ -2081,6 +2081,7 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy)) 2081 if (policy_writeback_work(cache->policy, &oblock, &cblock, busy))
2082 break; /* no work to do */ 2082 break; /* no work to do */
2083 2083
2084 prealloc_used = true;
2084 if (prealloc_data_structs(cache, &structs) || 2085 if (prealloc_data_structs(cache, &structs) ||
2085 get_cell(cache, oblock, &structs, &old_ocell)) { 2086 get_cell(cache, oblock, &structs, &old_ocell)) {
2086 policy_set_dirty(cache->policy, oblock); 2087 policy_set_dirty(cache->policy, oblock);
@@ -2088,7 +2089,6 @@ static void writeback_some_dirty_blocks(struct cache *cache)
2088 } 2089 }
2089 2090
2090 writeback(cache, &structs, oblock, cblock, old_ocell); 2091 writeback(cache, &structs, oblock, cblock, old_ocell);
2091 prealloc_used = true;
2092 } 2092 }
2093 2093
2094 if (prealloc_used) 2094 if (prealloc_used)