aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/raid1.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md/raid1.c')
-rw-r--r--drivers/md/raid1.c216
1 files changed, 134 insertions, 82 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index e16f473bcf46..2da9d3ba902d 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -301,7 +301,7 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
301{ 301{
302 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 302 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
303 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private); 303 r1bio_t * r1_bio = (r1bio_t *)(bio->bi_private);
304 int mirror, behind; 304 int mirror, behind = test_bit(R1BIO_BehindIO, &r1_bio->state);
305 conf_t *conf = mddev_to_conf(r1_bio->mddev); 305 conf_t *conf = mddev_to_conf(r1_bio->mddev);
306 306
307 if (bio->bi_size) 307 if (bio->bi_size)
@@ -311,47 +311,54 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
311 if (r1_bio->bios[mirror] == bio) 311 if (r1_bio->bios[mirror] == bio)
312 break; 312 break;
313 313
314 /* 314 if (error == -ENOTSUPP && test_bit(R1BIO_Barrier, &r1_bio->state)) {
315 * this branch is our 'one mirror IO has finished' event handler: 315 set_bit(BarriersNotsupp, &conf->mirrors[mirror].rdev->flags);
316 */ 316 set_bit(R1BIO_BarrierRetry, &r1_bio->state);
317 if (!uptodate) { 317 r1_bio->mddev->barriers_work = 0;
318 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev); 318 } else {
319 /* an I/O failed, we can't clear the bitmap */
320 set_bit(R1BIO_Degraded, &r1_bio->state);
321 } else
322 /* 319 /*
323 * Set R1BIO_Uptodate in our master bio, so that 320 * this branch is our 'one mirror IO has finished' event handler:
324 * we will return a good error code for to the higher
325 * levels even if IO on some other mirrored buffer fails.
326 *
327 * The 'master' represents the composite IO operation to
328 * user-side. So if something waits for IO, then it will
329 * wait for the 'master' bio.
330 */ 321 */
331 set_bit(R1BIO_Uptodate, &r1_bio->state); 322 r1_bio->bios[mirror] = NULL;
332 323 bio_put(bio);
333 update_head_pos(mirror, r1_bio); 324 if (!uptodate) {
334 325 md_error(r1_bio->mddev, conf->mirrors[mirror].rdev);
335 behind = test_bit(R1BIO_BehindIO, &r1_bio->state); 326 /* an I/O failed, we can't clear the bitmap */
336 if (behind) { 327 set_bit(R1BIO_Degraded, &r1_bio->state);
337 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags)) 328 } else
338 atomic_dec(&r1_bio->behind_remaining); 329 /*
339 330 * Set R1BIO_Uptodate in our master bio, so that
340 /* In behind mode, we ACK the master bio once the I/O has safely 331 * we will return a good error code for to the higher
341 * reached all non-writemostly disks. Setting the Returned bit 332 * levels even if IO on some other mirrored buffer fails.
342 * ensures that this gets done only once -- we don't ever want to 333 *
343 * return -EIO here, instead we'll wait */ 334 * The 'master' represents the composite IO operation to
344 335 * user-side. So if something waits for IO, then it will
345 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) && 336 * wait for the 'master' bio.
346 test_bit(R1BIO_Uptodate, &r1_bio->state)) { 337 */
347 /* Maybe we can return now */ 338 set_bit(R1BIO_Uptodate, &r1_bio->state);
348 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 339
349 struct bio *mbio = r1_bio->master_bio; 340 update_head_pos(mirror, r1_bio);
350 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n", 341
351 (unsigned long long) mbio->bi_sector, 342 if (behind) {
352 (unsigned long long) mbio->bi_sector + 343 if (test_bit(WriteMostly, &conf->mirrors[mirror].rdev->flags))
353 (mbio->bi_size >> 9) - 1); 344 atomic_dec(&r1_bio->behind_remaining);
354 bio_endio(mbio, mbio->bi_size, 0); 345
346 /* In behind mode, we ACK the master bio once the I/O has safely
347 * reached all non-writemostly disks. Setting the Returned bit
348 * ensures that this gets done only once -- we don't ever want to
349 * return -EIO here, instead we'll wait */
350
351 if (atomic_read(&r1_bio->behind_remaining) >= (atomic_read(&r1_bio->remaining)-1) &&
352 test_bit(R1BIO_Uptodate, &r1_bio->state)) {
353 /* Maybe we can return now */
354 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
355 struct bio *mbio = r1_bio->master_bio;
356 PRINTK(KERN_DEBUG "raid1: behind end write sectors %llu-%llu\n",
357 (unsigned long long) mbio->bi_sector,
358 (unsigned long long) mbio->bi_sector +
359 (mbio->bi_size >> 9) - 1);
360 bio_endio(mbio, mbio->bi_size, 0);
361 }
355 } 362 }
356 } 363 }
357 } 364 }
@@ -361,8 +368,16 @@ static int raid1_end_write_request(struct bio *bio, unsigned int bytes_done, int
361 * already. 368 * already.
362 */ 369 */
363 if (atomic_dec_and_test(&r1_bio->remaining)) { 370 if (atomic_dec_and_test(&r1_bio->remaining)) {
371 if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
372 reschedule_retry(r1_bio);
373 /* Don't dec_pending yet, we want to hold
374 * the reference over the retry
375 */
376 return 0;
377 }
364 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) { 378 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
365 /* free extra copy of the data pages */ 379 /* free extra copy of the data pages */
380/* FIXME bio has been freed!!! */
366 int i = bio->bi_vcnt; 381 int i = bio->bi_vcnt;
367 while (i--) 382 while (i--)
368 __free_page(bio->bi_io_vec[i].bv_page); 383 __free_page(bio->bi_io_vec[i].bv_page);
@@ -416,12 +431,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
416 /* Choose the first operation device, for consistancy */ 431 /* Choose the first operation device, for consistancy */
417 new_disk = 0; 432 new_disk = 0;
418 433
419 for (rdev = conf->mirrors[new_disk].rdev; 434 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
420 !rdev || !rdev->in_sync 435 !rdev || !test_bit(In_sync, &rdev->flags)
421 || test_bit(WriteMostly, &rdev->flags); 436 || test_bit(WriteMostly, &rdev->flags);
422 rdev = conf->mirrors[++new_disk].rdev) { 437 rdev = rcu_dereference(conf->mirrors[++new_disk].rdev)) {
423 438
424 if (rdev && rdev->in_sync) 439 if (rdev && test_bit(In_sync, &rdev->flags))
425 wonly_disk = new_disk; 440 wonly_disk = new_disk;
426 441
427 if (new_disk == conf->raid_disks - 1) { 442 if (new_disk == conf->raid_disks - 1) {
@@ -434,12 +449,12 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
434 449
435 450
436 /* make sure the disk is operational */ 451 /* make sure the disk is operational */
437 for (rdev = conf->mirrors[new_disk].rdev; 452 for (rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
438 !rdev || !rdev->in_sync || 453 !rdev || !test_bit(In_sync, &rdev->flags) ||
439 test_bit(WriteMostly, &rdev->flags); 454 test_bit(WriteMostly, &rdev->flags);
440 rdev = conf->mirrors[new_disk].rdev) { 455 rdev = rcu_dereference(conf->mirrors[new_disk].rdev)) {
441 456
442 if (rdev && rdev->in_sync) 457 if (rdev && test_bit(In_sync, &rdev->flags))
443 wonly_disk = new_disk; 458 wonly_disk = new_disk;
444 459
445 if (new_disk <= 0) 460 if (new_disk <= 0)
@@ -474,10 +489,10 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
474 disk = conf->raid_disks; 489 disk = conf->raid_disks;
475 disk--; 490 disk--;
476 491
477 rdev = conf->mirrors[disk].rdev; 492 rdev = rcu_dereference(conf->mirrors[disk].rdev);
478 493
479 if (!rdev || 494 if (!rdev ||
480 !rdev->in_sync || 495 !test_bit(In_sync, &rdev->flags) ||
481 test_bit(WriteMostly, &rdev->flags)) 496 test_bit(WriteMostly, &rdev->flags))
482 continue; 497 continue;
483 498
@@ -496,11 +511,11 @@ static int read_balance(conf_t *conf, r1bio_t *r1_bio)
496 511
497 512
498 if (new_disk >= 0) { 513 if (new_disk >= 0) {
499 rdev = conf->mirrors[new_disk].rdev; 514 rdev = rcu_dereference(conf->mirrors[new_disk].rdev);
500 if (!rdev) 515 if (!rdev)
501 goto retry; 516 goto retry;
502 atomic_inc(&rdev->nr_pending); 517 atomic_inc(&rdev->nr_pending);
503 if (!rdev->in_sync) { 518 if (!test_bit(In_sync, &rdev->flags)) {
504 /* cannot risk returning a device that failed 519 /* cannot risk returning a device that failed
505 * before we inc'ed nr_pending 520 * before we inc'ed nr_pending
506 */ 521 */
@@ -522,8 +537,8 @@ static void unplug_slaves(mddev_t *mddev)
522 537
523 rcu_read_lock(); 538 rcu_read_lock();
524 for (i=0; i<mddev->raid_disks; i++) { 539 for (i=0; i<mddev->raid_disks; i++) {
525 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 540 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
526 if (rdev && !rdev->faulty && atomic_read(&rdev->nr_pending)) { 541 if (rdev && !test_bit(Faulty, &rdev->flags) && atomic_read(&rdev->nr_pending)) {
527 request_queue_t *r_queue = bdev_get_queue(rdev->bdev); 542 request_queue_t *r_queue = bdev_get_queue(rdev->bdev);
528 543
529 atomic_inc(&rdev->nr_pending); 544 atomic_inc(&rdev->nr_pending);
@@ -556,8 +571,8 @@ static int raid1_issue_flush(request_queue_t *q, struct gendisk *disk,
556 571
557 rcu_read_lock(); 572 rcu_read_lock();
558 for (i=0; i<mddev->raid_disks && ret == 0; i++) { 573 for (i=0; i<mddev->raid_disks && ret == 0; i++) {
559 mdk_rdev_t *rdev = conf->mirrors[i].rdev; 574 mdk_rdev_t *rdev = rcu_dereference(conf->mirrors[i].rdev);
560 if (rdev && !rdev->faulty) { 575 if (rdev && !test_bit(Faulty, &rdev->flags)) {
561 struct block_device *bdev = rdev->bdev; 576 struct block_device *bdev = rdev->bdev;
562 request_queue_t *r_queue = bdev_get_queue(bdev); 577 request_queue_t *r_queue = bdev_get_queue(bdev);
563 578
@@ -648,8 +663,9 @@ static int make_request(request_queue_t *q, struct bio * bio)
648 struct bio_list bl; 663 struct bio_list bl;
649 struct page **behind_pages = NULL; 664 struct page **behind_pages = NULL;
650 const int rw = bio_data_dir(bio); 665 const int rw = bio_data_dir(bio);
666 int do_barriers;
651 667
652 if (unlikely(bio_barrier(bio))) { 668 if (unlikely(!mddev->barriers_work && bio_barrier(bio))) {
653 bio_endio(bio, bio->bi_size, -EOPNOTSUPP); 669 bio_endio(bio, bio->bi_size, -EOPNOTSUPP);
654 return 0; 670 return 0;
655 } 671 }
@@ -728,10 +744,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
728#endif 744#endif
729 rcu_read_lock(); 745 rcu_read_lock();
730 for (i = 0; i < disks; i++) { 746 for (i = 0; i < disks; i++) {
731 if ((rdev=conf->mirrors[i].rdev) != NULL && 747 if ((rdev=rcu_dereference(conf->mirrors[i].rdev)) != NULL &&
732 !rdev->faulty) { 748 !test_bit(Faulty, &rdev->flags)) {
733 atomic_inc(&rdev->nr_pending); 749 atomic_inc(&rdev->nr_pending);
734 if (rdev->faulty) { 750 if (test_bit(Faulty, &rdev->flags)) {
735 atomic_dec(&rdev->nr_pending); 751 atomic_dec(&rdev->nr_pending);
736 r1_bio->bios[i] = NULL; 752 r1_bio->bios[i] = NULL;
737 } else 753 } else
@@ -759,6 +775,10 @@ static int make_request(request_queue_t *q, struct bio * bio)
759 atomic_set(&r1_bio->remaining, 0); 775 atomic_set(&r1_bio->remaining, 0);
760 atomic_set(&r1_bio->behind_remaining, 0); 776 atomic_set(&r1_bio->behind_remaining, 0);
761 777
778 do_barriers = bio->bi_rw & BIO_RW_BARRIER;
779 if (do_barriers)
780 set_bit(R1BIO_Barrier, &r1_bio->state);
781
762 bio_list_init(&bl); 782 bio_list_init(&bl);
763 for (i = 0; i < disks; i++) { 783 for (i = 0; i < disks; i++) {
764 struct bio *mbio; 784 struct bio *mbio;
@@ -771,7 +791,7 @@ static int make_request(request_queue_t *q, struct bio * bio)
771 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset; 791 mbio->bi_sector = r1_bio->sector + conf->mirrors[i].rdev->data_offset;
772 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 792 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
773 mbio->bi_end_io = raid1_end_write_request; 793 mbio->bi_end_io = raid1_end_write_request;
774 mbio->bi_rw = WRITE; 794 mbio->bi_rw = WRITE | do_barriers;
775 mbio->bi_private = r1_bio; 795 mbio->bi_private = r1_bio;
776 796
777 if (behind_pages) { 797 if (behind_pages) {
@@ -824,7 +844,7 @@ static void status(struct seq_file *seq, mddev_t *mddev)
824 for (i = 0; i < conf->raid_disks; i++) 844 for (i = 0; i < conf->raid_disks; i++)
825 seq_printf(seq, "%s", 845 seq_printf(seq, "%s",
826 conf->mirrors[i].rdev && 846 conf->mirrors[i].rdev &&
827 conf->mirrors[i].rdev->in_sync ? "U" : "_"); 847 test_bit(In_sync, &conf->mirrors[i].rdev->flags) ? "U" : "_");
828 seq_printf(seq, "]"); 848 seq_printf(seq, "]");
829} 849}
830 850
@@ -840,14 +860,14 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
840 * next level up know. 860 * next level up know.
841 * else mark the drive as failed 861 * else mark the drive as failed
842 */ 862 */
843 if (rdev->in_sync 863 if (test_bit(In_sync, &rdev->flags)
844 && conf->working_disks == 1) 864 && conf->working_disks == 1)
845 /* 865 /*
846 * Don't fail the drive, act as though we were just a 866 * Don't fail the drive, act as though we were just a
847 * normal single drive 867 * normal single drive
848 */ 868 */
849 return; 869 return;
850 if (rdev->in_sync) { 870 if (test_bit(In_sync, &rdev->flags)) {
851 mddev->degraded++; 871 mddev->degraded++;
852 conf->working_disks--; 872 conf->working_disks--;
853 /* 873 /*
@@ -855,8 +875,8 @@ static void error(mddev_t *mddev, mdk_rdev_t *rdev)
855 */ 875 */
856 set_bit(MD_RECOVERY_ERR, &mddev->recovery); 876 set_bit(MD_RECOVERY_ERR, &mddev->recovery);
857 } 877 }
858 rdev->in_sync = 0; 878 clear_bit(In_sync, &rdev->flags);
859 rdev->faulty = 1; 879 set_bit(Faulty, &rdev->flags);
860 mddev->sb_dirty = 1; 880 mddev->sb_dirty = 1;
861 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n" 881 printk(KERN_ALERT "raid1: Disk failure on %s, disabling device. \n"
862 " Operation continuing on %d devices\n", 882 " Operation continuing on %d devices\n",
@@ -881,7 +901,7 @@ static void print_conf(conf_t *conf)
881 tmp = conf->mirrors + i; 901 tmp = conf->mirrors + i;
882 if (tmp->rdev) 902 if (tmp->rdev)
883 printk(" disk %d, wo:%d, o:%d, dev:%s\n", 903 printk(" disk %d, wo:%d, o:%d, dev:%s\n",
884 i, !tmp->rdev->in_sync, !tmp->rdev->faulty, 904 i, !test_bit(In_sync, &tmp->rdev->flags), !test_bit(Faulty, &tmp->rdev->flags),
885 bdevname(tmp->rdev->bdev,b)); 905 bdevname(tmp->rdev->bdev,b));
886 } 906 }
887} 907}
@@ -913,11 +933,11 @@ static int raid1_spare_active(mddev_t *mddev)
913 for (i = 0; i < conf->raid_disks; i++) { 933 for (i = 0; i < conf->raid_disks; i++) {
914 tmp = conf->mirrors + i; 934 tmp = conf->mirrors + i;
915 if (tmp->rdev 935 if (tmp->rdev
916 && !tmp->rdev->faulty 936 && !test_bit(Faulty, &tmp->rdev->flags)
917 && !tmp->rdev->in_sync) { 937 && !test_bit(In_sync, &tmp->rdev->flags)) {
918 conf->working_disks++; 938 conf->working_disks++;
919 mddev->degraded--; 939 mddev->degraded--;
920 tmp->rdev->in_sync = 1; 940 set_bit(In_sync, &tmp->rdev->flags);
921 } 941 }
922 } 942 }
923 943
@@ -954,7 +974,7 @@ static int raid1_add_disk(mddev_t *mddev, mdk_rdev_t *rdev)
954 found = 1; 974 found = 1;
955 if (rdev->saved_raid_disk != mirror) 975 if (rdev->saved_raid_disk != mirror)
956 conf->fullsync = 1; 976 conf->fullsync = 1;
957 p->rdev = rdev; 977 rcu_assign_pointer(p->rdev, rdev);
958 break; 978 break;
959 } 979 }
960 980
@@ -972,7 +992,7 @@ static int raid1_remove_disk(mddev_t *mddev, int number)
972 print_conf(conf); 992 print_conf(conf);
973 rdev = p->rdev; 993 rdev = p->rdev;
974 if (rdev) { 994 if (rdev) {
975 if (rdev->in_sync || 995 if (test_bit(In_sync, &rdev->flags) ||
976 atomic_read(&rdev->nr_pending)) { 996 atomic_read(&rdev->nr_pending)) {
977 err = -EBUSY; 997 err = -EBUSY;
978 goto abort; 998 goto abort;
@@ -1153,6 +1173,36 @@ static void raid1d(mddev_t *mddev)
1153 if (test_bit(R1BIO_IsSync, &r1_bio->state)) { 1173 if (test_bit(R1BIO_IsSync, &r1_bio->state)) {
1154 sync_request_write(mddev, r1_bio); 1174 sync_request_write(mddev, r1_bio);
1155 unplug = 1; 1175 unplug = 1;
1176 } else if (test_bit(R1BIO_BarrierRetry, &r1_bio->state)) {
1177 /* some requests in the r1bio were BIO_RW_BARRIER
1178 * requests which failed with -ENOTSUPP. Hohumm..
1179 * Better resubmit without the barrier.
1180 * We know which devices to resubmit for, because
1181 * all others have had their bios[] entry cleared.
1182 */
1183 int i;
1184 clear_bit(R1BIO_BarrierRetry, &r1_bio->state);
1185 clear_bit(R1BIO_Barrier, &r1_bio->state);
1186 for (i=0; i < conf->raid_disks; i++)
1187 if (r1_bio->bios[i]) {
1188 struct bio_vec *bvec;
1189 int j;
1190
1191 bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
1192 /* copy pages from the failed bio, as
1193 * this might be a write-behind device */
1194 __bio_for_each_segment(bvec, bio, j, 0)
1195 bvec->bv_page = bio_iovec_idx(r1_bio->bios[i], j)->bv_page;
1196 bio_put(r1_bio->bios[i]);
1197 bio->bi_sector = r1_bio->sector +
1198 conf->mirrors[i].rdev->data_offset;
1199 bio->bi_bdev = conf->mirrors[i].rdev->bdev;
1200 bio->bi_end_io = raid1_end_write_request;
1201 bio->bi_rw = WRITE;
1202 bio->bi_private = r1_bio;
1203 r1_bio->bios[i] = bio;
1204 generic_make_request(bio);
1205 }
1156 } else { 1206 } else {
1157 int disk; 1207 int disk;
1158 bio = r1_bio->bios[r1_bio->read_disk]; 1208 bio = r1_bio->bios[r1_bio->read_disk];
@@ -1260,7 +1310,7 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1260 * This call the bitmap_start_sync doesn't actually record anything 1310 * This call the bitmap_start_sync doesn't actually record anything
1261 */ 1311 */
1262 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) && 1312 if (!bitmap_start_sync(mddev->bitmap, sector_nr, &sync_blocks, 1) &&
1263 !conf->fullsync) { 1313 !conf->fullsync && !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1264 /* We can skip this block, and probably several more */ 1314 /* We can skip this block, and probably several more */
1265 *skipped = 1; 1315 *skipped = 1;
1266 return sync_blocks; 1316 return sync_blocks;
@@ -1282,11 +1332,11 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1282 /* make sure disk is operational */ 1332 /* make sure disk is operational */
1283 wonly = disk; 1333 wonly = disk;
1284 while (conf->mirrors[disk].rdev == NULL || 1334 while (conf->mirrors[disk].rdev == NULL ||
1285 !conf->mirrors[disk].rdev->in_sync || 1335 !test_bit(In_sync, &conf->mirrors[disk].rdev->flags) ||
1286 test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags) 1336 test_bit(WriteMostly, &conf->mirrors[disk].rdev->flags)
1287 ) { 1337 ) {
1288 if (conf->mirrors[disk].rdev && 1338 if (conf->mirrors[disk].rdev &&
1289 conf->mirrors[disk].rdev->in_sync) 1339 test_bit(In_sync, &conf->mirrors[disk].rdev->flags))
1290 wonly = disk; 1340 wonly = disk;
1291 if (disk <= 0) 1341 if (disk <= 0)
1292 disk = conf->raid_disks; 1342 disk = conf->raid_disks;
@@ -1333,11 +1383,12 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1333 bio->bi_rw = READ; 1383 bio->bi_rw = READ;
1334 bio->bi_end_io = end_sync_read; 1384 bio->bi_end_io = end_sync_read;
1335 } else if (conf->mirrors[i].rdev == NULL || 1385 } else if (conf->mirrors[i].rdev == NULL ||
1336 conf->mirrors[i].rdev->faulty) { 1386 test_bit(Faulty, &conf->mirrors[i].rdev->flags)) {
1337 still_degraded = 1; 1387 still_degraded = 1;
1338 continue; 1388 continue;
1339 } else if (!conf->mirrors[i].rdev->in_sync || 1389 } else if (!test_bit(In_sync, &conf->mirrors[i].rdev->flags) ||
1340 sector_nr + RESYNC_SECTORS > mddev->recovery_cp) { 1390 sector_nr + RESYNC_SECTORS > mddev->recovery_cp ||
1391 test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery)) {
1341 bio->bi_rw = WRITE; 1392 bio->bi_rw = WRITE;
1342 bio->bi_end_io = end_sync_write; 1393 bio->bi_end_io = end_sync_write;
1343 write_targets ++; 1394 write_targets ++;
@@ -1371,8 +1422,9 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i
1371 break; 1422 break;
1372 if (sync_blocks == 0) { 1423 if (sync_blocks == 0) {
1373 if (!bitmap_start_sync(mddev->bitmap, sector_nr, 1424 if (!bitmap_start_sync(mddev->bitmap, sector_nr,
1374 &sync_blocks, still_degraded) && 1425 &sync_blocks, still_degraded) &&
1375 !conf->fullsync) 1426 !conf->fullsync &&
1427 !test_bit(MD_RECOVERY_REQUESTED, &mddev->recovery))
1376 break; 1428 break;
1377 if (sync_blocks < (PAGE_SIZE>>9)) 1429 if (sync_blocks < (PAGE_SIZE>>9))
1378 BUG(); 1430 BUG();
@@ -1478,7 +1530,7 @@ static int run(mddev_t *mddev)
1478 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9); 1530 blk_queue_max_sectors(mddev->queue, PAGE_SIZE>>9);
1479 1531
1480 disk->head_position = 0; 1532 disk->head_position = 0;
1481 if (!rdev->faulty && rdev->in_sync) 1533 if (!test_bit(Faulty, &rdev->flags) && test_bit(In_sync, &rdev->flags))
1482 conf->working_disks++; 1534 conf->working_disks++;
1483 } 1535 }
1484 conf->raid_disks = mddev->raid_disks; 1536 conf->raid_disks = mddev->raid_disks;
@@ -1518,7 +1570,7 @@ static int run(mddev_t *mddev)
1518 */ 1570 */
1519 for (j = 0; j < conf->raid_disks && 1571 for (j = 0; j < conf->raid_disks &&
1520 (!conf->mirrors[j].rdev || 1572 (!conf->mirrors[j].rdev ||
1521 !conf->mirrors[j].rdev->in_sync) ; j++) 1573 !test_bit(In_sync, &conf->mirrors[j].rdev->flags)) ; j++)
1522 /* nothing */; 1574 /* nothing */;
1523 conf->last_used = j; 1575 conf->last_used = j;
1524 1576