aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorNate Dailey <nate.dailey@stratus.com>2017-10-17 08:17:03 -0400
committerShaohua Li <shli@fb.com>2017-11-02 00:32:21 -0400
commitf6eca2d43ed694ab8124dd24c88277f7eca93b7d (patch)
tree1ad48d4f54a13f586f9efcc7f1404d089300265c /drivers/md/raid1.c
parentae89fd3de4793c0dc2ec7e9f26b58a357d74a6c7 (diff)
raid1: prevent freeze_array/wait_all_barriers deadlock
If freeze_array is attempted in the middle of close_sync/ wait_all_barriers, deadlock can occur. freeze_array will wait for nr_pending and nr_queued to line up. wait_all_barriers increments nr_pending for each barrier bucket, one at a time, but doesn't actually issue IO that could be counted in nr_queued. So freeze_array is blocked until wait_all_barriers completes and allow_all_barriers runs. At the same time, when _wait_barrier sees array_frozen == 1, it stops and waits for freeze_array to complete. Prevent the deadlock by making close_sync call _wait_barrier and _allow_barrier for one bucket at a time, instead of deferring the _allow_barrier calls until after all _wait_barriers are complete. Signed-off-by: Nate Dailey <nate.dailey@stratus.com> Fix: fd76863e37fe(RAID1: a new I/O barrier implementation to remove resync window) Reviewed-by: Coly Li <colyli@suse.de> Cc: stable@vger.kernel.org (v4.11) Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c24
1 files changed, 6 insertions, 18 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1f36473c79dc..038f5eb299ce 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -989,14 +989,6 @@ static void wait_barrier(struct r1conf *conf, sector_t sector_nr)
989 _wait_barrier(conf, idx); 989 _wait_barrier(conf, idx);
990} 990}
991 991
992static void wait_all_barriers(struct r1conf *conf)
993{
994 int idx;
995
996 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
997 _wait_barrier(conf, idx);
998}
999
1000static void _allow_barrier(struct r1conf *conf, int idx) 992static void _allow_barrier(struct r1conf *conf, int idx)
1001{ 993{
1002 atomic_dec(&conf->nr_pending[idx]); 994 atomic_dec(&conf->nr_pending[idx]);
@@ -1010,14 +1002,6 @@ static void allow_barrier(struct r1conf *conf, sector_t sector_nr)
1010 _allow_barrier(conf, idx); 1002 _allow_barrier(conf, idx);
1011} 1003}
1012 1004
1013static void allow_all_barriers(struct r1conf *conf)
1014{
1015 int idx;
1016
1017 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++)
1018 _allow_barrier(conf, idx);
1019}
1020
1021/* conf->resync_lock should be held */ 1005/* conf->resync_lock should be held */
1022static int get_unqueued_pending(struct r1conf *conf) 1006static int get_unqueued_pending(struct r1conf *conf)
1023{ 1007{
@@ -1645,8 +1629,12 @@ static void print_conf(struct r1conf *conf)
1645 1629
1646static void close_sync(struct r1conf *conf) 1630static void close_sync(struct r1conf *conf)
1647{ 1631{
1648 wait_all_barriers(conf); 1632 int idx;
1649 allow_all_barriers(conf); 1633
1634 for (idx = 0; idx < BARRIER_BUCKETS_NR; idx++) {
1635 _wait_barrier(conf, idx);
1636 _allow_barrier(conf, idx);
1637 }
1650 1638
1651 mempool_destroy(conf->r1buf_pool); 1639 mempool_destroy(conf->r1buf_pool);
1652 conf->r1buf_pool = NULL; 1640 conf->r1buf_pool = NULL;