aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:50:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-07-08 15:50:18 -0400
commit026d15f6b9878794fae1f794cae881ccd65052e5 (patch)
treed772991739c19d74d6ccdd1c9ae8e1ad72c5e061 /drivers/md/raid10.c
parent43d012099f5479eb057145f273280ff097f0e73d (diff)
parent7184ef8bab0cb865c3cea9dd1a675771145df0af (diff)
Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md
Pull MD update from Shaohua Li: - fixed deadlock in MD suspend and a potential bug in bio allocation (Neil Brown) - fixed signal issue (Mikulas Patocka) - fixed typo in FailFast test (Guoqing Jiang) - other trival fixes * 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/shli/md: MD: fix sleep in atomic MD: fix a null dereference md: use a separate bio_set for synchronous IO. md: change the initialization value for a spare device spot to MD_DISK_ROLE_SPARE md/raid1: remove unused bio in sync_request_write md/raid10: fix FailFast test for wrong device md: don't use flush_signals in userspace processes md: fix deadlock between mddev_suspend() and md_write_start()
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c16
1 files changed, 9 insertions, 7 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 57a250fdbbcc..5026e7ad51d3 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1303,8 +1303,6 @@ static void raid10_write_request(struct mddev *mddev, struct bio *bio,
1303 sector_t sectors; 1303 sector_t sectors;
1304 int max_sectors; 1304 int max_sectors;
1305 1305
1306 md_write_start(mddev, bio);
1307
1308 /* 1306 /*
1309 * Register the new request and wait if the reconstruction 1307 * Register the new request and wait if the reconstruction
1310 * thread has put up a bar for new requests. 1308 * thread has put up a bar for new requests.
@@ -1525,7 +1523,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio, int sectors)
1525 raid10_write_request(mddev, bio, r10_bio); 1523 raid10_write_request(mddev, bio, r10_bio);
1526} 1524}
1527 1525
1528static void raid10_make_request(struct mddev *mddev, struct bio *bio) 1526static bool raid10_make_request(struct mddev *mddev, struct bio *bio)
1529{ 1527{
1530 struct r10conf *conf = mddev->private; 1528 struct r10conf *conf = mddev->private;
1531 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); 1529 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
@@ -1534,9 +1532,12 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1534 1532
1535 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) { 1533 if (unlikely(bio->bi_opf & REQ_PREFLUSH)) {
1536 md_flush_request(mddev, bio); 1534 md_flush_request(mddev, bio);
1537 return; 1535 return true;
1538 } 1536 }
1539 1537
1538 if (!md_write_start(mddev, bio))
1539 return false;
1540
1540 /* 1541 /*
1541 * If this request crosses a chunk boundary, we need to split 1542 * If this request crosses a chunk boundary, we need to split
1542 * it. 1543 * it.
@@ -1553,6 +1554,7 @@ static void raid10_make_request(struct mddev *mddev, struct bio *bio)
1553 1554
1554 /* In case raid10d snuck in to freeze_array */ 1555 /* In case raid10d snuck in to freeze_array */
1555 wake_up(&conf->wait_barrier); 1556 wake_up(&conf->wait_barrier);
1557 return true;
1556} 1558}
1557 1559
1558static void raid10_status(struct seq_file *seq, struct mddev *mddev) 1560static void raid10_status(struct seq_file *seq, struct mddev *mddev)
@@ -3293,7 +3295,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3293 biolist = bio; 3295 biolist = bio;
3294 bio->bi_end_io = end_sync_read; 3296 bio->bi_end_io = end_sync_read;
3295 bio_set_op_attrs(bio, REQ_OP_READ, 0); 3297 bio_set_op_attrs(bio, REQ_OP_READ, 0);
3296 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3298 if (test_bit(FailFast, &rdev->flags))
3297 bio->bi_opf |= MD_FAILFAST; 3299 bio->bi_opf |= MD_FAILFAST;
3298 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3300 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3299 bio->bi_bdev = rdev->bdev; 3301 bio->bi_bdev = rdev->bdev;
@@ -3305,7 +3307,6 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3305 continue; 3307 continue;
3306 } 3308 }
3307 atomic_inc(&rdev->nr_pending); 3309 atomic_inc(&rdev->nr_pending);
3308 rcu_read_unlock();
3309 3310
3310 /* Need to set up for writing to the replacement */ 3311 /* Need to set up for writing to the replacement */
3311 bio = r10_bio->devs[i].repl_bio; 3312 bio = r10_bio->devs[i].repl_bio;
@@ -3316,11 +3317,12 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3316 biolist = bio; 3317 biolist = bio;
3317 bio->bi_end_io = end_sync_write; 3318 bio->bi_end_io = end_sync_write;
3318 bio_set_op_attrs(bio, REQ_OP_WRITE, 0); 3319 bio_set_op_attrs(bio, REQ_OP_WRITE, 0);
3319 if (test_bit(FailFast, &conf->mirrors[d].rdev->flags)) 3320 if (test_bit(FailFast, &rdev->flags))
3320 bio->bi_opf |= MD_FAILFAST; 3321 bio->bi_opf |= MD_FAILFAST;
3321 bio->bi_iter.bi_sector = sector + rdev->data_offset; 3322 bio->bi_iter.bi_sector = sector + rdev->data_offset;
3322 bio->bi_bdev = rdev->bdev; 3323 bio->bi_bdev = rdev->bdev;
3323 count++; 3324 count++;
3325 rcu_read_unlock();
3324 } 3326 }
3325 3327
3326 if (count < 2) { 3328 if (count < 2) {