aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorShaohua Li <shli@fb.com>2017-07-17 17:33:48 -0400
committerShaohua Li <shli@fb.com>2017-07-21 15:47:20 -0400
commit16d56e2fcc1fc15b981369653c3b41d7ff0b443d (patch)
treeead4908160acc2c9192c78898f1f5ec8062d231d /drivers/md/raid1.c
parentbe453e7761d0e72d8a1b2fcfde6d1a7e53881190 (diff)
md/raid1: fix writebehind bio clone
After bio is submitted, we should not clone it as its bi_iter might be invalid by driver. This is the case of behind_master_bio. In certain situration, we could dispatch behind_master_bio immediately for the first disk and then clone it for other disks. https://bugzilla.kernel.org/show_bug.cgi?id=196383 Reported-and-tested-by: Markus <m4rkusxxl@web.de> Reviewed-by: Ming Lei <ming.lei@redhat.com> Fix: 841c1316c7da(md: raid1: improve write behind) Cc: stable@vger.kernel.org (4.12+) Signed-off-by: Shaohua Li <shli@fb.com>
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c34
1 files changed, 13 insertions, 21 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 8387eb1540cd..1d235cc8b402 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -484,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
484 } 484 }
485 485
486 if (behind) { 486 if (behind) {
487 /* we release behind master bio when all write are done */
488 if (r1_bio->behind_master_bio == bio)
489 to_put = NULL;
490
491 if (test_bit(WriteMostly, &rdev->flags)) 487 if (test_bit(WriteMostly, &rdev->flags))
492 atomic_dec(&r1_bio->behind_remaining); 488 atomic_dec(&r1_bio->behind_remaining);
493 489
@@ -1080,7 +1076,7 @@ static void unfreeze_array(struct r1conf *conf)
1080 wake_up(&conf->wait_barrier); 1076 wake_up(&conf->wait_barrier);
1081} 1077}
1082 1078
1083static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, 1079static void alloc_behind_master_bio(struct r1bio *r1_bio,
1084 struct bio *bio) 1080 struct bio *bio)
1085{ 1081{
1086 int size = bio->bi_iter.bi_size; 1082 int size = bio->bi_iter.bi_size;
@@ -1090,11 +1086,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1090 1086
1091 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1087 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1092 if (!behind_bio) 1088 if (!behind_bio)
1093 goto fail; 1089 return;
1094 1090
1095 /* discard op, we don't support writezero/writesame yet */ 1091 /* discard op, we don't support writezero/writesame yet */
1096 if (!bio_has_data(bio)) 1092 if (!bio_has_data(bio)) {
1093 behind_bio->bi_iter.bi_size = size;
1097 goto skip_copy; 1094 goto skip_copy;
1095 }
1098 1096
1099 while (i < vcnt && size) { 1097 while (i < vcnt && size) {
1100 struct page *page; 1098 struct page *page;
@@ -1115,14 +1113,13 @@ skip_copy:
1115 r1_bio->behind_master_bio = behind_bio;; 1113 r1_bio->behind_master_bio = behind_bio;;
1116 set_bit(R1BIO_BehindIO, &r1_bio->state); 1114 set_bit(R1BIO_BehindIO, &r1_bio->state);
1117 1115
1118 return behind_bio; 1116 return;
1119 1117
1120free_pages: 1118free_pages:
1121 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1119 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1122 bio->bi_iter.bi_size); 1120 bio->bi_iter.bi_size);
1123 bio_free_pages(behind_bio); 1121 bio_free_pages(behind_bio);
1124fail: 1122 bio_put(behind_bio);
1125 return behind_bio;
1126} 1123}
1127 1124
1128struct raid1_plug_cb { 1125struct raid1_plug_cb {
@@ -1475,7 +1472,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1475 (atomic_read(&bitmap->behind_writes) 1472 (atomic_read(&bitmap->behind_writes)
1476 < mddev->bitmap_info.max_write_behind) && 1473 < mddev->bitmap_info.max_write_behind) &&
1477 !waitqueue_active(&bitmap->behind_wait)) { 1474 !waitqueue_active(&bitmap->behind_wait)) {
1478 mbio = alloc_behind_master_bio(r1_bio, bio); 1475 alloc_behind_master_bio(r1_bio, bio);
1479 } 1476 }
1480 1477
1481 bitmap_startwrite(bitmap, r1_bio->sector, 1478 bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1485,14 +1482,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1485 first_clone = 0; 1482 first_clone = 0;
1486 } 1483 }
1487 1484
1488 if (!mbio) { 1485 if (r1_bio->behind_master_bio)
1489 if (r1_bio->behind_master_bio) 1486 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1490 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1487 GFP_NOIO, mddev->bio_set);
1491 GFP_NOIO, 1488 else
1492 mddev->bio_set); 1489 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1493 else
1494 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1495 }
1496 1490
1497 if (r1_bio->behind_master_bio) { 1491 if (r1_bio->behind_master_bio) {
1498 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1492 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2346,8 +2340,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2346 wbio = bio_clone_fast(r1_bio->behind_master_bio, 2340 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2347 GFP_NOIO, 2341 GFP_NOIO,
2348 mddev->bio_set); 2342 mddev->bio_set);
2349 /* We really need a _all clone */
2350 wbio->bi_iter = (struct bvec_iter){ 0 };
2351 } else { 2343 } else {
2352 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2344 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2353 mddev->bio_set); 2345 mddev->bio_set);