aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid10.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r--drivers/md/raid10.c78
1 files changed, 21 insertions, 57 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 018741ba9310..59d4daa5f4c7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1174,14 +1174,13 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1174 /* If this request crosses a chunk boundary, we need to 1174 /* If this request crosses a chunk boundary, we need to
1175 * split it. This will only happen for 1 PAGE (or less) requests. 1175 * split it. This will only happen for 1 PAGE (or less) requests.
1176 */ 1176 */
1177 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) 1177 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1178 > chunk_sects 1178 > chunk_sects
1179 && (conf->geo.near_copies < conf->geo.raid_disks 1179 && (conf->geo.near_copies < conf->geo.raid_disks
1180 || conf->prev.near_copies < conf->prev.raid_disks))) { 1180 || conf->prev.near_copies < conf->prev.raid_disks))) {
1181 struct bio_pair *bp; 1181 struct bio_pair *bp;
1182 /* Sanity check -- queue functions should prevent this happening */ 1182 /* Sanity check -- queue functions should prevent this happening */
1183 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || 1183 if (bio_segments(bio) > 1)
1184 bio->bi_idx != 0)
1185 goto bad_map; 1184 goto bad_map;
1186 /* This is a one page bio that upper layers 1185 /* This is a one page bio that upper layers
1187 * refuse to split for us, so we need to split it. 1186 * refuse to split for us, so we need to split it.
@@ -1214,7 +1213,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1214 bad_map: 1213 bad_map:
1215 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1214 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1216 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1215 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1217 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 1216 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1218 1217
1219 bio_io_error(bio); 1218 bio_io_error(bio);
1220 return; 1219 return;
@@ -1229,7 +1228,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1229 */ 1228 */
1230 wait_barrier(conf); 1229 wait_barrier(conf);
1231 1230
1232 sectors = bio->bi_size >> 9; 1231 sectors = bio_sectors(bio);
1233 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1232 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1234 bio->bi_sector < conf->reshape_progress && 1233 bio->bi_sector < conf->reshape_progress &&
1235 bio->bi_sector + sectors > conf->reshape_progress) { 1234 bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1331,8 +1330,7 @@ read_again:
1331 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1330 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1332 1331
1333 r10_bio->master_bio = bio; 1332 r10_bio->master_bio = bio;
1334 r10_bio->sectors = ((bio->bi_size >> 9) 1333 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1335 - sectors_handled);
1336 r10_bio->state = 0; 1334 r10_bio->state = 0;
1337 r10_bio->mddev = mddev; 1335 r10_bio->mddev = mddev;
1338 r10_bio->sector = bio->bi_sector + sectors_handled; 1336 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1574,7 +1572,7 @@ retry_write:
1574 * after checking if we need to go around again. 1572 * after checking if we need to go around again.
1575 */ 1573 */
1576 1574
1577 if (sectors_handled < (bio->bi_size >> 9)) { 1575 if (sectors_handled < bio_sectors(bio)) {
1578 one_write_done(r10_bio); 1576 one_write_done(r10_bio);
1579 /* We need another r10_bio. It has already been counted 1577 /* We need another r10_bio. It has already been counted
1580 * in bio->bi_phys_segments. 1578 * in bio->bi_phys_segments.
@@ -1582,7 +1580,7 @@ retry_write:
1582 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1580 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1583 1581
1584 r10_bio->master_bio = bio; 1582 r10_bio->master_bio = bio;
1585 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1583 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1586 1584
1587 r10_bio->mddev = mddev; 1585 r10_bio->mddev = mddev;
1588 r10_bio->sector = bio->bi_sector + sectors_handled; 1586 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2084,13 +2082,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2084 * First we need to fixup bv_offset, bv_len and 2082 * First we need to fixup bv_offset, bv_len and
2085 * bi_vecs, as the read request might have corrupted these 2083 * bi_vecs, as the read request might have corrupted these
2086 */ 2084 */
2085 bio_reset(tbio);
2086
2087 tbio->bi_vcnt = vcnt; 2087 tbio->bi_vcnt = vcnt;
2088 tbio->bi_size = r10_bio->sectors << 9; 2088 tbio->bi_size = r10_bio->sectors << 9;
2089 tbio->bi_idx = 0;
2090 tbio->bi_phys_segments = 0;
2091 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
2092 tbio->bi_flags |= 1 << BIO_UPTODATE;
2093 tbio->bi_next = NULL;
2094 tbio->bi_rw = WRITE; 2089 tbio->bi_rw = WRITE;
2095 tbio->bi_private = r10_bio; 2090 tbio->bi_private = r10_bio;
2096 tbio->bi_sector = r10_bio->devs[i].addr; 2091 tbio->bi_sector = r10_bio->devs[i].addr;
@@ -2108,7 +2103,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2108 d = r10_bio->devs[i].devnum; 2103 d = r10_bio->devs[i].devnum;
2109 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2104 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2110 atomic_inc(&r10_bio->remaining); 2105 atomic_inc(&r10_bio->remaining);
2111 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); 2106 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2112 2107
2113 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2108 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2114 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2109 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2133,7 +2128,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2133 d = r10_bio->devs[i].devnum; 2128 d = r10_bio->devs[i].devnum;
2134 atomic_inc(&r10_bio->remaining); 2129 atomic_inc(&r10_bio->remaining);
2135 md_sync_acct(conf->mirrors[d].replacement->bdev, 2130 md_sync_acct(conf->mirrors[d].replacement->bdev,
2136 tbio->bi_size >> 9); 2131 bio_sectors(tbio));
2137 generic_make_request(tbio); 2132 generic_make_request(tbio);
2138 } 2133 }
2139 2134
@@ -2259,13 +2254,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2259 wbio2 = r10_bio->devs[1].repl_bio; 2254 wbio2 = r10_bio->devs[1].repl_bio;
2260 if (wbio->bi_end_io) { 2255 if (wbio->bi_end_io) {
2261 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2262 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 2257 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2263 generic_make_request(wbio); 2258 generic_make_request(wbio);
2264 } 2259 }
2265 if (wbio2 && wbio2->bi_end_io) { 2260 if (wbio2 && wbio2->bi_end_io) {
2266 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2261 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2267 md_sync_acct(conf->mirrors[d].replacement->bdev, 2262 md_sync_acct(conf->mirrors[d].replacement->bdev,
2268 wbio2->bi_size >> 9); 2263 bio_sectors(wbio2));
2269 generic_make_request(wbio2); 2264 generic_make_request(wbio2);
2270 } 2265 }
2271} 2266}
@@ -2536,25 +2531,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
2536 } 2531 }
2537} 2532}
2538 2533
2539static void bi_complete(struct bio *bio, int error)
2540{
2541 complete((struct completion *)bio->bi_private);
2542}
2543
2544static int submit_bio_wait(int rw, struct bio *bio)
2545{
2546 struct completion event;
2547 rw |= REQ_SYNC;
2548
2549 init_completion(&event);
2550 bio->bi_private = &event;
2551 bio->bi_end_io = bi_complete;
2552 submit_bio(rw, bio);
2553 wait_for_completion(&event);
2554
2555 return test_bit(BIO_UPTODATE, &bio->bi_flags);
2556}
2557
2558static int narrow_write_error(struct r10bio *r10_bio, int i) 2534static int narrow_write_error(struct r10bio *r10_bio, int i)
2559{ 2535{
2560 struct bio *bio = r10_bio->master_bio; 2536 struct bio *bio = r10_bio->master_bio;
@@ -2695,8 +2671,7 @@ read_more:
2695 r10_bio = mempool_alloc(conf->r10bio_pool, 2671 r10_bio = mempool_alloc(conf->r10bio_pool,
2696 GFP_NOIO); 2672 GFP_NOIO);
2697 r10_bio->master_bio = mbio; 2673 r10_bio->master_bio = mbio;
2698 r10_bio->sectors = (mbio->bi_size >> 9) 2674 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2699 - sectors_handled;
2700 r10_bio->state = 0; 2675 r10_bio->state = 0;
2701 set_bit(R10BIO_ReadError, 2676 set_bit(R10BIO_ReadError,
2702 &r10_bio->state); 2677 &r10_bio->state);
@@ -3133,6 +3108,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3133 } 3108 }
3134 } 3109 }
3135 bio = r10_bio->devs[0].bio; 3110 bio = r10_bio->devs[0].bio;
3111 bio_reset(bio);
3136 bio->bi_next = biolist; 3112 bio->bi_next = biolist;
3137 biolist = bio; 3113 biolist = bio;
3138 bio->bi_private = r10_bio; 3114 bio->bi_private = r10_bio;
@@ -3157,6 +3133,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 rdev = mirror->rdev; 3133 rdev = mirror->rdev;
3158 if (!test_bit(In_sync, &rdev->flags)) { 3134 if (!test_bit(In_sync, &rdev->flags)) {
3159 bio = r10_bio->devs[1].bio; 3135 bio = r10_bio->devs[1].bio;
3136 bio_reset(bio);
3160 bio->bi_next = biolist; 3137 bio->bi_next = biolist;
3161 biolist = bio; 3138 biolist = bio;
3162 bio->bi_private = r10_bio; 3139 bio->bi_private = r10_bio;
@@ -3185,6 +3162,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3185 if (rdev == NULL || bio == NULL || 3162 if (rdev == NULL || bio == NULL ||
3186 test_bit(Faulty, &rdev->flags)) 3163 test_bit(Faulty, &rdev->flags))
3187 break; 3164 break;
3165 bio_reset(bio);
3188 bio->bi_next = biolist; 3166 bio->bi_next = biolist;
3189 biolist = bio; 3167 biolist = bio;
3190 bio->bi_private = r10_bio; 3168 bio->bi_private = r10_bio;
@@ -3283,7 +3261,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3283 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3261 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3284 3262
3285 bio = r10_bio->devs[i].bio; 3263 bio = r10_bio->devs[i].bio;
3286 bio->bi_end_io = NULL; 3264 bio_reset(bio);
3287 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3265 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3288 if (conf->mirrors[d].rdev == NULL || 3266 if (conf->mirrors[d].rdev == NULL ||
3289 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3267 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
@@ -3320,6 +3298,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3320 3298
3321 /* Need to set up for writing to the replacement */ 3299 /* Need to set up for writing to the replacement */
3322 bio = r10_bio->devs[i].repl_bio; 3300 bio = r10_bio->devs[i].repl_bio;
3301 bio_reset(bio);
3323 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3302 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3324 3303
3325 sector = r10_bio->devs[i].addr; 3304 sector = r10_bio->devs[i].addr;
@@ -3353,17 +3332,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3353 } 3332 }
3354 } 3333 }
3355 3334
3356 for (bio = biolist; bio ; bio=bio->bi_next) {
3357
3358 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3359 if (bio->bi_end_io)
3360 bio->bi_flags |= 1 << BIO_UPTODATE;
3361 bio->bi_vcnt = 0;
3362 bio->bi_idx = 0;
3363 bio->bi_phys_segments = 0;
3364 bio->bi_size = 0;
3365 }
3366
3367 nr_sectors = 0; 3335 nr_sectors = 0;
3368 if (sector_nr + max_sync < max_sector) 3336 if (sector_nr + max_sync < max_sector)
3369 max_sector = sector_nr + max_sync; 3337 max_sector = sector_nr + max_sync;
@@ -4411,7 +4379,6 @@ read_more:
4411 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4379 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4412 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4380 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4413 read_bio->bi_vcnt = 0; 4381 read_bio->bi_vcnt = 0;
4414 read_bio->bi_idx = 0;
4415 read_bio->bi_size = 0; 4382 read_bio->bi_size = 0;
4416 r10_bio->master_bio = read_bio; 4383 r10_bio->master_bio = read_bio;
4417 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4384 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4435,17 +4402,14 @@ read_more:
4435 } 4402 }
4436 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4403 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4437 continue; 4404 continue;
4405
4406 bio_reset(b);
4438 b->bi_bdev = rdev2->bdev; 4407 b->bi_bdev = rdev2->bdev;
4439 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4408 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4440 b->bi_private = r10_bio; 4409 b->bi_private = r10_bio;
4441 b->bi_end_io = end_reshape_write; 4410 b->bi_end_io = end_reshape_write;
4442 b->bi_rw = WRITE; 4411 b->bi_rw = WRITE;
4443 b->bi_flags &= ~(BIO_POOL_MASK - 1);
4444 b->bi_flags |= 1 << BIO_UPTODATE;
4445 b->bi_next = blist; 4412 b->bi_next = blist;
4446 b->bi_vcnt = 0;
4447 b->bi_idx = 0;
4448 b->bi_size = 0;
4449 blist = b; 4413 blist = b;
4450 } 4414 }
4451 4415