summaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorNeilBrown <neilb@suse.com>2016-11-04 01:46:03 -0400
committerShaohua Li <shli@fb.com>2016-11-07 18:08:23 -0500
commit5e2c7a3611977b69ae0531e8fbdeab5dad17925a (patch)
treed7618a423b48af572c9f0af614bb9d4d764394dc /drivers/md/raid1.c
parent060b0689f5df7e87641c820a605c779149da33ef (diff)
md/raid1: abort delayed writes when device fails.
When writing to an array with a bitmap enabled, the writes are grouped in batches which are preceded by an update to the bitmap. It is quite likely if that a drive develops a problem which is not media related, that the bitmap write will be the first to report an error and cause the device to be marked faulty (as the bitmap write is at the start of a batch). In this case, there is point submiting the subsequent writes to the failed device - that just wastes times. So re-check the Faulty state of a device before submitting a delayed write. This requires that we keep the 'rdev', rather than the 'bdev' in the bio, then swap in the bdev just before final submission. Signed-off-by: NeilBrown <neilb@suse.com> Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c20
1 files changed, 15 insertions, 5 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 15f0b552bf48..aac2a05cf8d1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -742,9 +742,14 @@ static void flush_pending_writes(struct r1conf *conf)
742 742
743 while (bio) { /* submit pending writes */ 743 while (bio) { /* submit pending writes */
744 struct bio *next = bio->bi_next; 744 struct bio *next = bio->bi_next;
745 struct md_rdev *rdev = (void*)bio->bi_bdev;
745 bio->bi_next = NULL; 746 bio->bi_next = NULL;
746 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 747 bio->bi_bdev = rdev->bdev;
747 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 748 if (test_bit(Faulty, &rdev->flags)) {
749 bio->bi_error = -EIO;
750 bio_endio(bio);
751 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
752 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
748 /* Just ignore it */ 753 /* Just ignore it */
749 bio_endio(bio); 754 bio_endio(bio);
750 else 755 else
@@ -1016,9 +1021,14 @@ static void raid1_unplug(struct blk_plug_cb *cb, bool from_schedule)
1016 1021
1017 while (bio) { /* submit pending writes */ 1022 while (bio) { /* submit pending writes */
1018 struct bio *next = bio->bi_next; 1023 struct bio *next = bio->bi_next;
1024 struct md_rdev *rdev = (void*)bio->bi_bdev;
1019 bio->bi_next = NULL; 1025 bio->bi_next = NULL;
1020 if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1026 bio->bi_bdev = rdev->bdev;
1021 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1027 if (test_bit(Faulty, &rdev->flags)) {
1028 bio->bi_error = -EIO;
1029 bio_endio(bio);
1030 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1031 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1022 /* Just ignore it */ 1032 /* Just ignore it */
1023 bio_endio(bio); 1033 bio_endio(bio);
1024 else 1034 else
@@ -1357,7 +1367,7 @@ read_again:
1357 1367
1358 mbio->bi_iter.bi_sector = (r1_bio->sector + 1368 mbio->bi_iter.bi_sector = (r1_bio->sector +
1359 conf->mirrors[i].rdev->data_offset); 1369 conf->mirrors[i].rdev->data_offset);
1360 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1370 mbio->bi_bdev = (void*)conf->mirrors[i].rdev;
1361 mbio->bi_end_io = raid1_end_write_request; 1371 mbio->bi_end_io = raid1_end_write_request;
1362 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync); 1372 bio_set_op_attrs(mbio, op, do_flush_fua | do_sync);
1363 mbio->bi_private = r1_bio; 1373 mbio->bi_private = r1_bio;