aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/md/raid1.c
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c73
1 files changed, 38 insertions, 35 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a49cfcc7a343..fd3a2a14b587 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
229 int done; 229 int done;
230 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window; 231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector; 232 sector_t bi_sector = bio->bi_iter.bi_sector;
233 233
234 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
235 unsigned long flags; 235 unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_iter.bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio_end_sector(bio) - 1);
270 bio_sectors(bio) - 1);
271 270
272 call_bio_endio(r1_bio); 271 call_bio_endio(r1_bio);
273 } 272 }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 struct bio *mbio = r1_bio->master_bio; 465 struct bio *mbio = r1_bio->master_bio;
467 pr_debug("raid1: behind end write sectors" 466 pr_debug("raid1: behind end write sectors"
468 " %llu-%llu\n", 467 " %llu-%llu\n",
469 (unsigned long long) mbio->bi_sector, 468 (unsigned long long) mbio->bi_iter.bi_sector,
470 (unsigned long long) mbio->bi_sector + 469 (unsigned long long) bio_end_sector(mbio) - 1);
471 bio_sectors(mbio) - 1);
472 call_bio_endio(r1_bio); 470 call_bio_endio(r1_bio);
473 } 471 }
474 } 472 }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 873 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) || 874 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 875 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector)) 876 <= bio->bi_iter.bi_sector))
879 wait = false; 877 wait = false;
880 else 878 else
881 wait = true; 879 wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
913 911
914 if (bio && bio_data_dir(bio) == WRITE) { 912 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 913 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) { 914 <= bio->bi_iter.bi_sector) {
917 if (conf->start_next_window == MaxSector) 915 if (conf->start_next_window == MaxSector)
918 conf->start_next_window = 916 conf->start_next_window =
919 conf->next_resync + 917 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE; 918 NEXT_NORMALIO_DISTANCE;
921 919
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 920 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector) 921 <= bio->bi_iter.bi_sector)
924 conf->next_window_requests++; 922 conf->next_window_requests++;
925 else 923 else
926 conf->current_window_requests++; 924 conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
1027 if (bvecs[i].bv_page) 1025 if (bvecs[i].bv_page)
1028 put_page(bvecs[i].bv_page); 1026 put_page(bvecs[i].bv_page);
1029 kfree(bvecs); 1027 kfree(bvecs);
1030 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1028 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1029 bio->bi_iter.bi_size);
1031} 1030}
1032 1031
1033struct raid1_plug_cb { 1032struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1107 1106
1108 if (bio_data_dir(bio) == WRITE && 1107 if (bio_data_dir(bio) == WRITE &&
1109 bio_end_sector(bio) > mddev->suspend_lo && 1108 bio_end_sector(bio) > mddev->suspend_lo &&
1110 bio->bi_sector < mddev->suspend_hi) { 1109 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1111 /* As the suspend_* range is controlled by 1110 /* As the suspend_* range is controlled by
1112 * userspace, we want an interruptible 1111 * userspace, we want an interruptible
1113 * wait. 1112 * wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1118 prepare_to_wait(&conf->wait_barrier, 1117 prepare_to_wait(&conf->wait_barrier,
1119 &w, TASK_INTERRUPTIBLE); 1118 &w, TASK_INTERRUPTIBLE);
1120 if (bio_end_sector(bio) <= mddev->suspend_lo || 1119 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1121 bio->bi_sector >= mddev->suspend_hi) 1120 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1122 break; 1121 break;
1123 schedule(); 1122 schedule();
1124 } 1123 }
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1140 r1_bio->sectors = bio_sectors(bio); 1139 r1_bio->sectors = bio_sectors(bio);
1141 r1_bio->state = 0; 1140 r1_bio->state = 0;
1142 r1_bio->mddev = mddev; 1141 r1_bio->mddev = mddev;
1143 r1_bio->sector = bio->bi_sector; 1142 r1_bio->sector = bio->bi_iter.bi_sector;
1144 1143
1145 /* We might need to issue multiple reads to different 1144 /* We might need to issue multiple reads to different
1146 * devices if there are bad blocks around, so we keep 1145 * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
1180 r1_bio->read_disk = rdisk; 1179 r1_bio->read_disk = rdisk;
1181 1180
1182 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1181 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1183 bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1182 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1184 max_sectors); 1183 max_sectors);
1185 1184
1186 r1_bio->bios[rdisk] = read_bio; 1185 r1_bio->bios[rdisk] = read_bio;
1187 1186
1188 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1187 read_bio->bi_iter.bi_sector = r1_bio->sector +
1188 mirror->rdev->data_offset;
1189 read_bio->bi_bdev = mirror->rdev->bdev; 1189 read_bio->bi_bdev = mirror->rdev->bdev;
1190 read_bio->bi_end_io = raid1_end_read_request; 1190 read_bio->bi_end_io = raid1_end_read_request;
1191 read_bio->bi_rw = READ | do_sync; 1191 read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
1197 */ 1197 */
1198 1198
1199 sectors_handled = (r1_bio->sector + max_sectors 1199 sectors_handled = (r1_bio->sector + max_sectors
1200 - bio->bi_sector); 1200 - bio->bi_iter.bi_sector);
1201 r1_bio->sectors = max_sectors; 1201 r1_bio->sectors = max_sectors;
1202 spin_lock_irq(&conf->device_lock); 1202 spin_lock_irq(&conf->device_lock);
1203 if (bio->bi_phys_segments == 0) 1203 if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
1218 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1218 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1219 r1_bio->state = 0; 1219 r1_bio->state = 0;
1220 r1_bio->mddev = mddev; 1220 r1_bio->mddev = mddev;
1221 r1_bio->sector = bio->bi_sector + sectors_handled; 1221 r1_bio->sector = bio->bi_iter.bi_sector +
1222 sectors_handled;
1222 goto read_again; 1223 goto read_again;
1223 } else 1224 } else
1224 generic_make_request(read_bio); 1225 generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
1321 if (r1_bio->bios[j]) 1322 if (r1_bio->bios[j])
1322 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1323 r1_bio->state = 0; 1324 r1_bio->state = 0;
1324 allow_barrier(conf, start_next_window, bio->bi_sector); 1325 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1325 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1326 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1326 start_next_window = wait_barrier(conf, bio); 1327 start_next_window = wait_barrier(conf, bio);
1327 /* 1328 /*
@@ -1348,7 +1349,7 @@ read_again:
1348 bio->bi_phys_segments++; 1349 bio->bi_phys_segments++;
1349 spin_unlock_irq(&conf->device_lock); 1350 spin_unlock_irq(&conf->device_lock);
1350 } 1351 }
1351 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1352 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1352 1353
1353 atomic_set(&r1_bio->remaining, 1); 1354 atomic_set(&r1_bio->remaining, 1);
1354 atomic_set(&r1_bio->behind_remaining, 0); 1355 atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
1360 continue; 1361 continue;
1361 1362
1362 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1363 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1364 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1364 1365
1365 if (first_clone) { 1366 if (first_clone) {
1366 /* do behind I/O ? 1367 /* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
1394 1395
1395 r1_bio->bios[i] = mbio; 1396 r1_bio->bios[i] = mbio;
1396 1397
1397 mbio->bi_sector = (r1_bio->sector + 1398 mbio->bi_iter.bi_sector = (r1_bio->sector +
1398 conf->mirrors[i].rdev->data_offset); 1399 conf->mirrors[i].rdev->data_offset);
1399 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1400 mbio->bi_end_io = raid1_end_write_request; 1401 mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
1434 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1435 r1_bio->state = 0; 1436 r1_bio->state = 0;
1436 r1_bio->mddev = mddev; 1437 r1_bio->mddev = mddev;
1437 r1_bio->sector = bio->bi_sector + sectors_handled; 1438 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1438 goto retry_write; 1439 goto retry_write;
1439 } 1440 }
1440 1441
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
1958 /* fixup the bio for reuse */ 1959 /* fixup the bio for reuse */
1959 bio_reset(b); 1960 bio_reset(b);
1960 b->bi_vcnt = vcnt; 1961 b->bi_vcnt = vcnt;
1961 b->bi_size = r1_bio->sectors << 9; 1962 b->bi_iter.bi_size = r1_bio->sectors << 9;
1962 b->bi_sector = r1_bio->sector + 1963 b->bi_iter.bi_sector = r1_bio->sector +
1963 conf->mirrors[i].rdev->data_offset; 1964 conf->mirrors[i].rdev->data_offset;
1964 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1965 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1965 b->bi_end_io = end_sync_read; 1966 b->bi_end_io = end_sync_read;
1966 b->bi_private = r1_bio; 1967 b->bi_private = r1_bio;
1967 1968
1968 size = b->bi_size; 1969 size = b->bi_iter.bi_size;
1969 for (j = 0; j < vcnt ; j++) { 1970 for (j = 0; j < vcnt ; j++) {
1970 struct bio_vec *bi; 1971 struct bio_vec *bi;
1971 bi = &b->bi_io_vec[j]; 1972 bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2220 } 2221 }
2221 2222
2222 wbio->bi_rw = WRITE; 2223 wbio->bi_rw = WRITE;
2223 wbio->bi_sector = r1_bio->sector; 2224 wbio->bi_iter.bi_sector = r1_bio->sector;
2224 wbio->bi_size = r1_bio->sectors << 9; 2225 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2225 2226
2226 bio_trim(wbio, sector - r1_bio->sector, sectors); 2227 bio_trim(wbio, sector - r1_bio->sector, sectors);
2227 wbio->bi_sector += rdev->data_offset; 2228 wbio->bi_iter.bi_sector += rdev->data_offset;
2228 wbio->bi_bdev = rdev->bdev; 2229 wbio->bi_bdev = rdev->bdev;
2229 if (submit_bio_wait(WRITE, wbio) == 0) 2230 if (submit_bio_wait(WRITE, wbio) == 0)
2230 /* failure! */ 2231 /* failure! */
@@ -2338,7 +2339,8 @@ read_more:
2338 } 2339 }
2339 r1_bio->read_disk = disk; 2340 r1_bio->read_disk = disk;
2340 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2341 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2342 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2343 max_sectors);
2342 r1_bio->bios[r1_bio->read_disk] = bio; 2344 r1_bio->bios[r1_bio->read_disk] = bio;
2343 rdev = conf->mirrors[disk].rdev; 2345 rdev = conf->mirrors[disk].rdev;
2344 printk_ratelimited(KERN_ERR 2346 printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
2347 mdname(mddev), 2349 mdname(mddev),
2348 (unsigned long long)r1_bio->sector, 2350 (unsigned long long)r1_bio->sector,
2349 bdevname(rdev->bdev, b)); 2351 bdevname(rdev->bdev, b));
2350 bio->bi_sector = r1_bio->sector + rdev->data_offset; 2352 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2351 bio->bi_bdev = rdev->bdev; 2353 bio->bi_bdev = rdev->bdev;
2352 bio->bi_end_io = raid1_end_read_request; 2354 bio->bi_end_io = raid1_end_read_request;
2353 bio->bi_rw = READ | do_sync; 2355 bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
2356 /* Drat - have to split this up more */ 2358 /* Drat - have to split this up more */
2357 struct bio *mbio = r1_bio->master_bio; 2359 struct bio *mbio = r1_bio->master_bio;
2358 int sectors_handled = (r1_bio->sector + max_sectors 2360 int sectors_handled = (r1_bio->sector + max_sectors
2359 - mbio->bi_sector); 2361 - mbio->bi_iter.bi_sector);
2360 r1_bio->sectors = max_sectors; 2362 r1_bio->sectors = max_sectors;
2361 spin_lock_irq(&conf->device_lock); 2363 spin_lock_irq(&conf->device_lock);
2362 if (mbio->bi_phys_segments == 0) 2364 if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
2374 r1_bio->state = 0; 2376 r1_bio->state = 0;
2375 set_bit(R1BIO_ReadError, &r1_bio->state); 2377 set_bit(R1BIO_ReadError, &r1_bio->state);
2376 r1_bio->mddev = mddev; 2378 r1_bio->mddev = mddev;
2377 r1_bio->sector = mbio->bi_sector + sectors_handled; 2379 r1_bio->sector = mbio->bi_iter.bi_sector +
2380 sectors_handled;
2378 2381
2379 goto read_more; 2382 goto read_more;
2380 } else 2383 } else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2598 } 2601 }
2599 if (bio->bi_end_io) { 2602 if (bio->bi_end_io) {
2600 atomic_inc(&rdev->nr_pending); 2603 atomic_inc(&rdev->nr_pending);
2601 bio->bi_sector = sector_nr + rdev->data_offset; 2604 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2602 bio->bi_bdev = rdev->bdev; 2605 bio->bi_bdev = rdev->bdev;
2603 bio->bi_private = r1_bio; 2606 bio->bi_private = r1_bio;
2604 } 2607 }
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2698 continue; 2701 continue;
2699 /* remove last page from this bio */ 2702 /* remove last page from this bio */
2700 bio->bi_vcnt--; 2703 bio->bi_vcnt--;
2701 bio->bi_size -= len; 2704 bio->bi_iter.bi_size -= len;
2702 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2705 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2703 } 2706 }
2704 goto bio_full; 2707 goto bio_full;