diff options
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r-- | drivers/md/raid1.c | 75 |
1 files changed, 39 insertions, 36 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 1e5a540995e9..db3b9d7314f1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
229 | int done; | 229 | int done; |
230 | struct r1conf *conf = r1_bio->mddev->private; | 230 | struct r1conf *conf = r1_bio->mddev->private; |
231 | sector_t start_next_window = r1_bio->start_next_window; | 231 | sector_t start_next_window = r1_bio->start_next_window; |
232 | sector_t bi_sector = bio->bi_sector; | 232 | sector_t bi_sector = bio->bi_iter.bi_sector; |
233 | 233 | ||
234 | if (bio->bi_phys_segments) { | 234 | if (bio->bi_phys_segments) { |
235 | unsigned long flags; | 235 | unsigned long flags; |
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio) | |||
265 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { | 265 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { |
266 | pr_debug("raid1: sync end %s on sectors %llu-%llu\n", | 266 | pr_debug("raid1: sync end %s on sectors %llu-%llu\n", |
267 | (bio_data_dir(bio) == WRITE) ? "write" : "read", | 267 | (bio_data_dir(bio) == WRITE) ? "write" : "read", |
268 | (unsigned long long) bio->bi_sector, | 268 | (unsigned long long) bio->bi_iter.bi_sector, |
269 | (unsigned long long) bio->bi_sector + | 269 | (unsigned long long) bio_end_sector(bio) - 1); |
270 | bio_sectors(bio) - 1); | ||
271 | 270 | ||
272 | call_bio_endio(r1_bio); | 271 | call_bio_endio(r1_bio); |
273 | } | 272 | } |
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
466 | struct bio *mbio = r1_bio->master_bio; | 465 | struct bio *mbio = r1_bio->master_bio; |
467 | pr_debug("raid1: behind end write sectors" | 466 | pr_debug("raid1: behind end write sectors" |
468 | " %llu-%llu\n", | 467 | " %llu-%llu\n", |
469 | (unsigned long long) mbio->bi_sector, | 468 | (unsigned long long) mbio->bi_iter.bi_sector, |
470 | (unsigned long long) mbio->bi_sector + | 469 | (unsigned long long) bio_end_sector(mbio) - 1); |
471 | bio_sectors(mbio) - 1); | ||
472 | call_bio_endio(r1_bio); | 470 | call_bio_endio(r1_bio); |
473 | } | 471 | } |
474 | } | 472 | } |
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) | |||
875 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | 873 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS |
876 | >= bio_end_sector(bio)) || | 874 | >= bio_end_sector(bio)) || |
877 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | 875 | (conf->next_resync + NEXT_NORMALIO_DISTANCE |
878 | <= bio->bi_sector)) | 876 | <= bio->bi_iter.bi_sector)) |
879 | wait = false; | 877 | wait = false; |
880 | else | 878 | else |
881 | wait = true; | 879 | wait = true; |
@@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
913 | 911 | ||
914 | if (bio && bio_data_dir(bio) == WRITE) { | 912 | if (bio && bio_data_dir(bio) == WRITE) { |
915 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | 913 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE |
916 | <= bio->bi_sector) { | 914 | <= bio->bi_iter.bi_sector) { |
917 | if (conf->start_next_window == MaxSector) | 915 | if (conf->start_next_window == MaxSector) |
918 | conf->start_next_window = | 916 | conf->start_next_window = |
919 | conf->next_resync + | 917 | conf->next_resync + |
920 | NEXT_NORMALIO_DISTANCE; | 918 | NEXT_NORMALIO_DISTANCE; |
921 | 919 | ||
922 | if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) | 920 | if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) |
923 | <= bio->bi_sector) | 921 | <= bio->bi_iter.bi_sector) |
924 | conf->next_window_requests++; | 922 | conf->next_window_requests++; |
925 | else | 923 | else |
926 | conf->current_window_requests++; | 924 | conf->current_window_requests++; |
927 | } | 925 | } |
928 | if (bio->bi_sector >= conf->start_next_window) | 926 | if (bio->bi_iter.bi_sector >= conf->start_next_window) |
929 | sector = conf->start_next_window; | 927 | sector = conf->start_next_window; |
930 | } | 928 | } |
931 | 929 | ||
@@ -1028,7 +1026,8 @@ do_sync_io: | |||
1028 | if (bvecs[i].bv_page) | 1026 | if (bvecs[i].bv_page) |
1029 | put_page(bvecs[i].bv_page); | 1027 | put_page(bvecs[i].bv_page); |
1030 | kfree(bvecs); | 1028 | kfree(bvecs); |
1031 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 1029 | pr_debug("%dB behind alloc failed, doing sync I/O\n", |
1030 | bio->bi_iter.bi_size); | ||
1032 | } | 1031 | } |
1033 | 1032 | ||
1034 | struct raid1_plug_cb { | 1033 | struct raid1_plug_cb { |
@@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1108 | 1107 | ||
1109 | if (bio_data_dir(bio) == WRITE && | 1108 | if (bio_data_dir(bio) == WRITE && |
1110 | bio_end_sector(bio) > mddev->suspend_lo && | 1109 | bio_end_sector(bio) > mddev->suspend_lo && |
1111 | bio->bi_sector < mddev->suspend_hi) { | 1110 | bio->bi_iter.bi_sector < mddev->suspend_hi) { |
1112 | /* As the suspend_* range is controlled by | 1111 | /* As the suspend_* range is controlled by |
1113 | * userspace, we want an interruptible | 1112 | * userspace, we want an interruptible |
1114 | * wait. | 1113 | * wait. |
@@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1119 | prepare_to_wait(&conf->wait_barrier, | 1118 | prepare_to_wait(&conf->wait_barrier, |
1120 | &w, TASK_INTERRUPTIBLE); | 1119 | &w, TASK_INTERRUPTIBLE); |
1121 | if (bio_end_sector(bio) <= mddev->suspend_lo || | 1120 | if (bio_end_sector(bio) <= mddev->suspend_lo || |
1122 | bio->bi_sector >= mddev->suspend_hi) | 1121 | bio->bi_iter.bi_sector >= mddev->suspend_hi) |
1123 | break; | 1122 | break; |
1124 | schedule(); | 1123 | schedule(); |
1125 | } | 1124 | } |
@@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1141 | r1_bio->sectors = bio_sectors(bio); | 1140 | r1_bio->sectors = bio_sectors(bio); |
1142 | r1_bio->state = 0; | 1141 | r1_bio->state = 0; |
1143 | r1_bio->mddev = mddev; | 1142 | r1_bio->mddev = mddev; |
1144 | r1_bio->sector = bio->bi_sector; | 1143 | r1_bio->sector = bio->bi_iter.bi_sector; |
1145 | 1144 | ||
1146 | /* We might need to issue multiple reads to different | 1145 | /* We might need to issue multiple reads to different |
1147 | * devices if there are bad blocks around, so we keep | 1146 | * devices if there are bad blocks around, so we keep |
@@ -1181,12 +1180,13 @@ read_again: | |||
1181 | r1_bio->read_disk = rdisk; | 1180 | r1_bio->read_disk = rdisk; |
1182 | 1181 | ||
1183 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1182 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1184 | bio_trim(read_bio, r1_bio->sector - bio->bi_sector, | 1183 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, |
1185 | max_sectors); | 1184 | max_sectors); |
1186 | 1185 | ||
1187 | r1_bio->bios[rdisk] = read_bio; | 1186 | r1_bio->bios[rdisk] = read_bio; |
1188 | 1187 | ||
1189 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; | 1188 | read_bio->bi_iter.bi_sector = r1_bio->sector + |
1189 | mirror->rdev->data_offset; | ||
1190 | read_bio->bi_bdev = mirror->rdev->bdev; | 1190 | read_bio->bi_bdev = mirror->rdev->bdev; |
1191 | read_bio->bi_end_io = raid1_end_read_request; | 1191 | read_bio->bi_end_io = raid1_end_read_request; |
1192 | read_bio->bi_rw = READ | do_sync; | 1192 | read_bio->bi_rw = READ | do_sync; |
@@ -1198,7 +1198,7 @@ read_again: | |||
1198 | */ | 1198 | */ |
1199 | 1199 | ||
1200 | sectors_handled = (r1_bio->sector + max_sectors | 1200 | sectors_handled = (r1_bio->sector + max_sectors |
1201 | - bio->bi_sector); | 1201 | - bio->bi_iter.bi_sector); |
1202 | r1_bio->sectors = max_sectors; | 1202 | r1_bio->sectors = max_sectors; |
1203 | spin_lock_irq(&conf->device_lock); | 1203 | spin_lock_irq(&conf->device_lock); |
1204 | if (bio->bi_phys_segments == 0) | 1204 | if (bio->bi_phys_segments == 0) |
@@ -1219,7 +1219,8 @@ read_again: | |||
1219 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; | 1219 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; |
1220 | r1_bio->state = 0; | 1220 | r1_bio->state = 0; |
1221 | r1_bio->mddev = mddev; | 1221 | r1_bio->mddev = mddev; |
1222 | r1_bio->sector = bio->bi_sector + sectors_handled; | 1222 | r1_bio->sector = bio->bi_iter.bi_sector + |
1223 | sectors_handled; | ||
1223 | goto read_again; | 1224 | goto read_again; |
1224 | } else | 1225 | } else |
1225 | generic_make_request(read_bio); | 1226 | generic_make_request(read_bio); |
@@ -1322,7 +1323,7 @@ read_again: | |||
1322 | if (r1_bio->bios[j]) | 1323 | if (r1_bio->bios[j]) |
1323 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); | 1324 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); |
1324 | r1_bio->state = 0; | 1325 | r1_bio->state = 0; |
1325 | allow_barrier(conf, start_next_window, bio->bi_sector); | 1326 | allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); |
1326 | md_wait_for_blocked_rdev(blocked_rdev, mddev); | 1327 | md_wait_for_blocked_rdev(blocked_rdev, mddev); |
1327 | start_next_window = wait_barrier(conf, bio); | 1328 | start_next_window = wait_barrier(conf, bio); |
1328 | /* | 1329 | /* |
@@ -1349,7 +1350,7 @@ read_again: | |||
1349 | bio->bi_phys_segments++; | 1350 | bio->bi_phys_segments++; |
1350 | spin_unlock_irq(&conf->device_lock); | 1351 | spin_unlock_irq(&conf->device_lock); |
1351 | } | 1352 | } |
1352 | sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; | 1353 | sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; |
1353 | 1354 | ||
1354 | atomic_set(&r1_bio->remaining, 1); | 1355 | atomic_set(&r1_bio->remaining, 1); |
1355 | atomic_set(&r1_bio->behind_remaining, 0); | 1356 | atomic_set(&r1_bio->behind_remaining, 0); |
@@ -1361,7 +1362,7 @@ read_again: | |||
1361 | continue; | 1362 | continue; |
1362 | 1363 | ||
1363 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1364 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1364 | bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); | 1365 | bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); |
1365 | 1366 | ||
1366 | if (first_clone) { | 1367 | if (first_clone) { |
1367 | /* do behind I/O ? | 1368 | /* do behind I/O ? |
@@ -1395,7 +1396,7 @@ read_again: | |||
1395 | 1396 | ||
1396 | r1_bio->bios[i] = mbio; | 1397 | r1_bio->bios[i] = mbio; |
1397 | 1398 | ||
1398 | mbio->bi_sector = (r1_bio->sector + | 1399 | mbio->bi_iter.bi_sector = (r1_bio->sector + |
1399 | conf->mirrors[i].rdev->data_offset); | 1400 | conf->mirrors[i].rdev->data_offset); |
1400 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1401 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1401 | mbio->bi_end_io = raid1_end_write_request; | 1402 | mbio->bi_end_io = raid1_end_write_request; |
@@ -1435,7 +1436,7 @@ read_again: | |||
1435 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; | 1436 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; |
1436 | r1_bio->state = 0; | 1437 | r1_bio->state = 0; |
1437 | r1_bio->mddev = mddev; | 1438 | r1_bio->mddev = mddev; |
1438 | r1_bio->sector = bio->bi_sector + sectors_handled; | 1439 | r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; |
1439 | goto retry_write; | 1440 | goto retry_write; |
1440 | } | 1441 | } |
1441 | 1442 | ||
@@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio) | |||
1959 | /* fixup the bio for reuse */ | 1960 | /* fixup the bio for reuse */ |
1960 | bio_reset(b); | 1961 | bio_reset(b); |
1961 | b->bi_vcnt = vcnt; | 1962 | b->bi_vcnt = vcnt; |
1962 | b->bi_size = r1_bio->sectors << 9; | 1963 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
1963 | b->bi_sector = r1_bio->sector + | 1964 | b->bi_iter.bi_sector = r1_bio->sector + |
1964 | conf->mirrors[i].rdev->data_offset; | 1965 | conf->mirrors[i].rdev->data_offset; |
1965 | b->bi_bdev = conf->mirrors[i].rdev->bdev; | 1966 | b->bi_bdev = conf->mirrors[i].rdev->bdev; |
1966 | b->bi_end_io = end_sync_read; | 1967 | b->bi_end_io = end_sync_read; |
1967 | b->bi_private = r1_bio; | 1968 | b->bi_private = r1_bio; |
1968 | 1969 | ||
1969 | size = b->bi_size; | 1970 | size = b->bi_iter.bi_size; |
1970 | for (j = 0; j < vcnt ; j++) { | 1971 | for (j = 0; j < vcnt ; j++) { |
1971 | struct bio_vec *bi; | 1972 | struct bio_vec *bi; |
1972 | bi = &b->bi_io_vec[j]; | 1973 | bi = &b->bi_io_vec[j]; |
@@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) | |||
2221 | } | 2222 | } |
2222 | 2223 | ||
2223 | wbio->bi_rw = WRITE; | 2224 | wbio->bi_rw = WRITE; |
2224 | wbio->bi_sector = r1_bio->sector; | 2225 | wbio->bi_iter.bi_sector = r1_bio->sector; |
2225 | wbio->bi_size = r1_bio->sectors << 9; | 2226 | wbio->bi_iter.bi_size = r1_bio->sectors << 9; |
2226 | 2227 | ||
2227 | bio_trim(wbio, sector - r1_bio->sector, sectors); | 2228 | bio_trim(wbio, sector - r1_bio->sector, sectors); |
2228 | wbio->bi_sector += rdev->data_offset; | 2229 | wbio->bi_iter.bi_sector += rdev->data_offset; |
2229 | wbio->bi_bdev = rdev->bdev; | 2230 | wbio->bi_bdev = rdev->bdev; |
2230 | if (submit_bio_wait(WRITE, wbio) == 0) | 2231 | if (submit_bio_wait(WRITE, wbio) == 0) |
2231 | /* failure! */ | 2232 | /* failure! */ |
@@ -2339,7 +2340,8 @@ read_more: | |||
2339 | } | 2340 | } |
2340 | r1_bio->read_disk = disk; | 2341 | r1_bio->read_disk = disk; |
2341 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); | 2342 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); |
2342 | bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); | 2343 | bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, |
2344 | max_sectors); | ||
2343 | r1_bio->bios[r1_bio->read_disk] = bio; | 2345 | r1_bio->bios[r1_bio->read_disk] = bio; |
2344 | rdev = conf->mirrors[disk].rdev; | 2346 | rdev = conf->mirrors[disk].rdev; |
2345 | printk_ratelimited(KERN_ERR | 2347 | printk_ratelimited(KERN_ERR |
@@ -2348,7 +2350,7 @@ read_more: | |||
2348 | mdname(mddev), | 2350 | mdname(mddev), |
2349 | (unsigned long long)r1_bio->sector, | 2351 | (unsigned long long)r1_bio->sector, |
2350 | bdevname(rdev->bdev, b)); | 2352 | bdevname(rdev->bdev, b)); |
2351 | bio->bi_sector = r1_bio->sector + rdev->data_offset; | 2353 | bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; |
2352 | bio->bi_bdev = rdev->bdev; | 2354 | bio->bi_bdev = rdev->bdev; |
2353 | bio->bi_end_io = raid1_end_read_request; | 2355 | bio->bi_end_io = raid1_end_read_request; |
2354 | bio->bi_rw = READ | do_sync; | 2356 | bio->bi_rw = READ | do_sync; |
@@ -2357,7 +2359,7 @@ read_more: | |||
2357 | /* Drat - have to split this up more */ | 2359 | /* Drat - have to split this up more */ |
2358 | struct bio *mbio = r1_bio->master_bio; | 2360 | struct bio *mbio = r1_bio->master_bio; |
2359 | int sectors_handled = (r1_bio->sector + max_sectors | 2361 | int sectors_handled = (r1_bio->sector + max_sectors |
2360 | - mbio->bi_sector); | 2362 | - mbio->bi_iter.bi_sector); |
2361 | r1_bio->sectors = max_sectors; | 2363 | r1_bio->sectors = max_sectors; |
2362 | spin_lock_irq(&conf->device_lock); | 2364 | spin_lock_irq(&conf->device_lock); |
2363 | if (mbio->bi_phys_segments == 0) | 2365 | if (mbio->bi_phys_segments == 0) |
@@ -2375,7 +2377,8 @@ read_more: | |||
2375 | r1_bio->state = 0; | 2377 | r1_bio->state = 0; |
2376 | set_bit(R1BIO_ReadError, &r1_bio->state); | 2378 | set_bit(R1BIO_ReadError, &r1_bio->state); |
2377 | r1_bio->mddev = mddev; | 2379 | r1_bio->mddev = mddev; |
2378 | r1_bio->sector = mbio->bi_sector + sectors_handled; | 2380 | r1_bio->sector = mbio->bi_iter.bi_sector + |
2381 | sectors_handled; | ||
2379 | 2382 | ||
2380 | goto read_more; | 2383 | goto read_more; |
2381 | } else | 2384 | } else |
@@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2599 | } | 2602 | } |
2600 | if (bio->bi_end_io) { | 2603 | if (bio->bi_end_io) { |
2601 | atomic_inc(&rdev->nr_pending); | 2604 | atomic_inc(&rdev->nr_pending); |
2602 | bio->bi_sector = sector_nr + rdev->data_offset; | 2605 | bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; |
2603 | bio->bi_bdev = rdev->bdev; | 2606 | bio->bi_bdev = rdev->bdev; |
2604 | bio->bi_private = r1_bio; | 2607 | bio->bi_private = r1_bio; |
2605 | } | 2608 | } |
@@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2699 | continue; | 2702 | continue; |
2700 | /* remove last page from this bio */ | 2703 | /* remove last page from this bio */ |
2701 | bio->bi_vcnt--; | 2704 | bio->bi_vcnt--; |
2702 | bio->bi_size -= len; | 2705 | bio->bi_iter.bi_size -= len; |
2703 | bio->bi_flags &= ~(1<< BIO_SEG_VALID); | 2706 | bio->bi_flags &= ~(1<< BIO_SEG_VALID); |
2704 | } | 2707 | } |
2705 | goto bio_full; | 2708 | goto bio_full; |