diff options
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 19 |
1 files changed, 10 insertions, 9 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 6736d6dff981..7301631abe04 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1236,6 +1236,7 @@ static void end_sync_read(struct bio *bio, int error) | |||
1236 | /* for reconstruct, we always reschedule after a read. | 1236 | /* for reconstruct, we always reschedule after a read. |
1237 | * for resync, only after all reads | 1237 | * for resync, only after all reads |
1238 | */ | 1238 | */ |
1239 | rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); | ||
1239 | if (test_bit(R10BIO_IsRecover, &r10_bio->state) || | 1240 | if (test_bit(R10BIO_IsRecover, &r10_bio->state) || |
1240 | atomic_dec_and_test(&r10_bio->remaining)) { | 1241 | atomic_dec_and_test(&r10_bio->remaining)) { |
1241 | /* we have read all the blocks, | 1242 | /* we have read all the blocks, |
@@ -1243,7 +1244,6 @@ static void end_sync_read(struct bio *bio, int error) | |||
1243 | */ | 1244 | */ |
1244 | reschedule_retry(r10_bio); | 1245 | reschedule_retry(r10_bio); |
1245 | } | 1246 | } |
1246 | rdev_dec_pending(conf->mirrors[d].rdev, conf->mddev); | ||
1247 | } | 1247 | } |
1248 | 1248 | ||
1249 | static void end_sync_write(struct bio *bio, int error) | 1249 | static void end_sync_write(struct bio *bio, int error) |
@@ -1264,11 +1264,13 @@ static void end_sync_write(struct bio *bio, int error) | |||
1264 | 1264 | ||
1265 | update_head_pos(i, r10_bio); | 1265 | update_head_pos(i, r10_bio); |
1266 | 1266 | ||
1267 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | ||
1267 | while (atomic_dec_and_test(&r10_bio->remaining)) { | 1268 | while (atomic_dec_and_test(&r10_bio->remaining)) { |
1268 | if (r10_bio->master_bio == NULL) { | 1269 | if (r10_bio->master_bio == NULL) { |
1269 | /* the primary of several recovery bios */ | 1270 | /* the primary of several recovery bios */ |
1270 | md_done_sync(mddev, r10_bio->sectors, 1); | 1271 | sector_t s = r10_bio->sectors; |
1271 | put_buf(r10_bio); | 1272 | put_buf(r10_bio); |
1273 | md_done_sync(mddev, s, 1); | ||
1272 | break; | 1274 | break; |
1273 | } else { | 1275 | } else { |
1274 | r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; | 1276 | r10bio_t *r10_bio2 = (r10bio_t *)r10_bio->master_bio; |
@@ -1276,7 +1278,6 @@ static void end_sync_write(struct bio *bio, int error) | |||
1276 | r10_bio = r10_bio2; | 1278 | r10_bio = r10_bio2; |
1277 | } | 1279 | } |
1278 | } | 1280 | } |
1279 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | ||
1280 | } | 1281 | } |
1281 | 1282 | ||
1282 | /* | 1283 | /* |
@@ -1749,8 +1750,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1749 | if (!go_faster && conf->nr_waiting) | 1750 | if (!go_faster && conf->nr_waiting) |
1750 | msleep_interruptible(1000); | 1751 | msleep_interruptible(1000); |
1751 | 1752 | ||
1752 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | ||
1753 | |||
1754 | /* Again, very different code for resync and recovery. | 1753 | /* Again, very different code for resync and recovery. |
1755 | * Both must result in an r10bio with a list of bios that | 1754 | * Both must result in an r10bio with a list of bios that |
1756 | * have bi_end_io, bi_sector, bi_bdev set, | 1755 | * have bi_end_io, bi_sector, bi_bdev set, |
@@ -1886,6 +1885,8 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1886 | /* resync. Schedule a read for every block at this virt offset */ | 1885 | /* resync. Schedule a read for every block at this virt offset */ |
1887 | int count = 0; | 1886 | int count = 0; |
1888 | 1887 | ||
1888 | bitmap_cond_end_sync(mddev->bitmap, sector_nr); | ||
1889 | |||
1889 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, | 1890 | if (!bitmap_start_sync(mddev->bitmap, sector_nr, |
1890 | &sync_blocks, mddev->degraded) && | 1891 | &sync_blocks, mddev->degraded) && |
1891 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { | 1892 | !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) { |
@@ -2010,13 +2011,13 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
2010 | /* There is nowhere to write, so all non-sync | 2011 | /* There is nowhere to write, so all non-sync |
2011 | * drives must be failed, so try the next chunk... | 2012 | * drives must be failed, so try the next chunk... |
2012 | */ | 2013 | */ |
2013 | { | 2014 | if (sector_nr + max_sync < max_sector) |
2014 | sector_t sec = max_sector - sector_nr; | 2015 | max_sector = sector_nr + max_sync; |
2015 | sectors_skipped += sec; | 2016 | |
2017 | sectors_skipped += (max_sector - sector_nr); | ||
2016 | chunks_skipped ++; | 2018 | chunks_skipped ++; |
2017 | sector_nr = max_sector; | 2019 | sector_nr = max_sector; |
2018 | goto skipped; | 2020 | goto skipped; |
2019 | } | ||
2020 | } | 2021 | } |
2021 | 2022 | ||
2022 | static int run(mddev_t *mddev) | 2023 | static int run(mddev_t *mddev) |