aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index cb882aae9e20..6703751d87d7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1684,13 +1684,12 @@ static void error(struct mddev *mddev, struct md_rdev *rdev)
1684 spin_unlock_irqrestore(&conf->device_lock, flags); 1684 spin_unlock_irqrestore(&conf->device_lock, flags);
1685 return; 1685 return;
1686 } 1686 }
1687 if (test_and_clear_bit(In_sync, &rdev->flags)) { 1687 if (test_and_clear_bit(In_sync, &rdev->flags))
1688 mddev->degraded++; 1688 mddev->degraded++;
1689 /* 1689 /*
1690 * if recovery is running, make sure it aborts. 1690 * If recovery is running, make sure it aborts.
1691 */ 1691 */
1692 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 1692 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
1693 }
1694 set_bit(Blocked, &rdev->flags); 1693 set_bit(Blocked, &rdev->flags);
1695 set_bit(Faulty, &rdev->flags); 1694 set_bit(Faulty, &rdev->flags);
1696 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1695 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -2954,6 +2953,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
2954 */ 2953 */
2955 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) { 2954 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery)) {
2956 end_reshape(conf); 2955 end_reshape(conf);
2956 close_sync(conf);
2957 return 0; 2957 return 0;
2958 } 2958 }
2959 2959
@@ -3082,6 +3082,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3082 } 3082 }
3083 3083
3084 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3084 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3085 r10_bio->state = 0;
3085 raise_barrier(conf, rb2 != NULL); 3086 raise_barrier(conf, rb2 != NULL);
3086 atomic_set(&r10_bio->remaining, 0); 3087 atomic_set(&r10_bio->remaining, 0);
3087 3088
@@ -3270,6 +3271,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3270 if (sync_blocks < max_sync) 3271 if (sync_blocks < max_sync)
3271 max_sync = sync_blocks; 3272 max_sync = sync_blocks;
3272 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 3273 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
3274 r10_bio->state = 0;
3273 3275
3274 r10_bio->mddev = mddev; 3276 r10_bio->mddev = mddev;
3275 atomic_set(&r10_bio->remaining, 0); 3277 atomic_set(&r10_bio->remaining, 0);
@@ -4385,6 +4387,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
4385read_more: 4387read_more:
4386 /* Now schedule reads for blocks from sector_nr to last */ 4388 /* Now schedule reads for blocks from sector_nr to last */
4387 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO); 4389 r10_bio = mempool_alloc(conf->r10buf_pool, GFP_NOIO);
4390 r10_bio->state = 0;
4388 raise_barrier(conf, sectors_done != 0); 4391 raise_barrier(conf, sectors_done != 0);
4389 atomic_set(&r10_bio->remaining, 0); 4392 atomic_set(&r10_bio->remaining, 0);
4390 r10_bio->mddev = mddev; 4393 r10_bio->mddev = mddev;
@@ -4399,6 +4402,7 @@ read_more:
4399 * on all the target devices. 4402 * on all the target devices.
4400 */ 4403 */
4401 // FIXME 4404 // FIXME
4405 mempool_free(r10_bio, conf->r10buf_pool);
4402 set_bit(MD_RECOVERY_INTR, &mddev->recovery); 4406 set_bit(MD_RECOVERY_INTR, &mddev->recovery);
4403 return sectors_done; 4407 return sectors_done;
4404 } 4408 }
@@ -4411,7 +4415,7 @@ read_more:
4411 read_bio->bi_private = r10_bio; 4415 read_bio->bi_private = r10_bio;
4412 read_bio->bi_end_io = end_sync_read; 4416 read_bio->bi_end_io = end_sync_read;
4413 read_bio->bi_rw = READ; 4417 read_bio->bi_rw = READ;
4414 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4418 read_bio->bi_flags &= (~0UL << BIO_RESET_BITS);
4415 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4419 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4416 read_bio->bi_vcnt = 0; 4420 read_bio->bi_vcnt = 0;
4417 read_bio->bi_iter.bi_size = 0; 4421 read_bio->bi_iter.bi_size = 0;