diff options
author | NeilBrown <neilb@suse.com> | 2017-06-05 02:49:39 -0400 |
---|---|---|
committer | Shaohua Li <shli@fb.com> | 2017-06-13 13:18:01 -0400 |
commit | cc27b0c78c79680d128dbac79de0d40556d041bb (patch) | |
tree | 1f32d4acd29d825eecc1aef90d423cdacea8121c /drivers/md/raid0.c | |
parent | 63f700aab4c11d46626de3cd051dae56cf7e9056 (diff) |
md: fix deadlock between mddev_suspend() and md_write_start()
If mddev_suspend() races with md_write_start() we can deadlock
with mddev_suspend() waiting for the request that is currently
in md_write_start() to complete the ->make_request() call,
and md_write_start() waiting for the metadata to be updated
to mark the array as 'dirty'.
As metadata updates done by md_check_recovery() only happen then
the mddev_lock() can be claimed, and as mddev_suspend() is often
called with the lock held, these threads wait indefinitely for each
other.
We fix this by having md_write_start() abort if mddev_suspend()
is happening, and ->make_request() aborts if md_write_start()
aborted.
md_make_request() can detect this abort, decrease the ->active_io
count, and wait for mddev_suspend().
Reported-by: Nix <nix@esperi.org.uk>
Fix: 68866e425be2(MD: no sync IO while suspended)
Cc: stable@vger.kernel.org
Signed-off-by: NeilBrown <neilb@suse.com>
Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid0.c')
-rw-r--r-- | drivers/md/raid0.c | 7 |
1 files changed, 4 insertions, 3 deletions
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index d6c0bc76e837..94d9ae9b0fd0 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -548,7 +548,7 @@ static void raid0_handle_discard(struct mddev *mddev, struct bio *bio) | |||
548 | bio_endio(bio); | 548 | bio_endio(bio); |
549 | } | 549 | } |
550 | 550 | ||
551 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) | 551 | static bool raid0_make_request(struct mddev *mddev, struct bio *bio) |
552 | { | 552 | { |
553 | struct strip_zone *zone; | 553 | struct strip_zone *zone; |
554 | struct md_rdev *tmp_dev; | 554 | struct md_rdev *tmp_dev; |
@@ -559,12 +559,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
559 | 559 | ||
560 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { | 560 | if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { |
561 | md_flush_request(mddev, bio); | 561 | md_flush_request(mddev, bio); |
562 | return; | 562 | return true; |
563 | } | 563 | } |
564 | 564 | ||
565 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { | 565 | if (unlikely((bio_op(bio) == REQ_OP_DISCARD))) { |
566 | raid0_handle_discard(mddev, bio); | 566 | raid0_handle_discard(mddev, bio); |
567 | return; | 567 | return true; |
568 | } | 568 | } |
569 | 569 | ||
570 | bio_sector = bio->bi_iter.bi_sector; | 570 | bio_sector = bio->bi_iter.bi_sector; |
@@ -599,6 +599,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) | |||
599 | mddev_check_writesame(mddev, bio); | 599 | mddev_check_writesame(mddev, bio); |
600 | mddev_check_write_zeroes(mddev, bio); | 600 | mddev_check_write_zeroes(mddev, bio); |
601 | generic_make_request(bio); | 601 | generic_make_request(bio); |
602 | return true; | ||
602 | } | 603 | } |
603 | 604 | ||
604 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) | 605 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |