aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorGrant Likely <grant.likely@secretlab.ca>2012-05-08 13:35:37 -0400
committerGrant Likely <grant.likely@secretlab.ca>2012-05-08 13:35:37 -0400
commit7b96c686223a5c902d6a59c7d178f3904f0ab757 (patch)
treefe328ed56ad3719de3cfebad72ef74e34f1ed92b /drivers/md
parentf141ed65f256ec036c7fba604da6b7c448096ef9 (diff)
parentd48b97b403d23f6df0b990cee652bdf9a52337a3 (diff)
Merge tag 'v3.4-rc6' into gpio/next
Linux 3.4-rc6
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bitmap.c8
-rw-r--r--drivers/md/bitmap.h3
-rw-r--r--drivers/md/dm-raid.c4
-rw-r--r--drivers/md/linear.c9
-rw-r--r--drivers/md/md.c7
-rw-r--r--drivers/md/raid0.c27
-rw-r--r--drivers/md/raid1.c16
-rw-r--r--drivers/md/raid10.c6
-rw-r--r--drivers/md/raid5.c59
9 files changed, 88 insertions, 51 deletions
diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
index 3d0dfa7a89a2..17e2b472e16d 100644
--- a/drivers/md/bitmap.c
+++ b/drivers/md/bitmap.c
@@ -539,9 +539,6 @@ static int bitmap_new_disk_sb(struct bitmap *bitmap)
539 bitmap->events_cleared = bitmap->mddev->events; 539 bitmap->events_cleared = bitmap->mddev->events;
540 sb->events_cleared = cpu_to_le64(bitmap->mddev->events); 540 sb->events_cleared = cpu_to_le64(bitmap->mddev->events);
541 541
542 bitmap->flags |= BITMAP_HOSTENDIAN;
543 sb->version = cpu_to_le32(BITMAP_MAJOR_HOSTENDIAN);
544
545 kunmap_atomic(sb); 542 kunmap_atomic(sb);
546 543
547 return 0; 544 return 0;
@@ -1730,8 +1727,7 @@ int bitmap_create(struct mddev *mddev)
1730 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize) 1727 bitmap->chunkshift = (ffz(~mddev->bitmap_info.chunksize)
1731 - BITMAP_BLOCK_SHIFT); 1728 - BITMAP_BLOCK_SHIFT);
1732 1729
1733 /* now that chunksize and chunkshift are set, we can use these macros */ 1730 chunks = (blocks + (1 << bitmap->chunkshift) - 1) >>
1734 chunks = (blocks + bitmap->chunkshift - 1) >>
1735 bitmap->chunkshift; 1731 bitmap->chunkshift;
1736 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO; 1732 pages = (chunks + PAGE_COUNTER_RATIO - 1) / PAGE_COUNTER_RATIO;
1737 1733
@@ -1788,7 +1784,9 @@ int bitmap_load(struct mddev *mddev)
1788 * re-add of a missing device */ 1784 * re-add of a missing device */
1789 start = mddev->recovery_cp; 1785 start = mddev->recovery_cp;
1790 1786
1787 mutex_lock(&mddev->bitmap_info.mutex);
1791 err = bitmap_init_from_disk(bitmap, start); 1788 err = bitmap_init_from_disk(bitmap, start);
1789 mutex_unlock(&mddev->bitmap_info.mutex);
1792 1790
1793 if (err) 1791 if (err)
1794 goto out; 1792 goto out;
diff --git a/drivers/md/bitmap.h b/drivers/md/bitmap.h
index 55ca5aec84e4..b44b0aba2d47 100644
--- a/drivers/md/bitmap.h
+++ b/drivers/md/bitmap.h
@@ -101,9 +101,6 @@ typedef __u16 bitmap_counter_t;
101 101
102#define BITMAP_BLOCK_SHIFT 9 102#define BITMAP_BLOCK_SHIFT 9
103 103
104/* how many blocks per chunk? (this is variable) */
105#define CHUNK_BLOCK_RATIO(bitmap) ((bitmap)->mddev->bitmap_info.chunksize >> BITMAP_BLOCK_SHIFT)
106
107#endif 104#endif
108 105
109/* 106/*
diff --git a/drivers/md/dm-raid.c b/drivers/md/dm-raid.c
index b0ba52459ed7..68965e663248 100644
--- a/drivers/md/dm-raid.c
+++ b/drivers/md/dm-raid.c
@@ -859,7 +859,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
859 int ret; 859 int ret;
860 unsigned redundancy = 0; 860 unsigned redundancy = 0;
861 struct raid_dev *dev; 861 struct raid_dev *dev;
862 struct md_rdev *rdev, *freshest; 862 struct md_rdev *rdev, *tmp, *freshest;
863 struct mddev *mddev = &rs->md; 863 struct mddev *mddev = &rs->md;
864 864
865 switch (rs->raid_type->level) { 865 switch (rs->raid_type->level) {
@@ -877,7 +877,7 @@ static int analyse_superblocks(struct dm_target *ti, struct raid_set *rs)
877 } 877 }
878 878
879 freshest = NULL; 879 freshest = NULL;
880 rdev_for_each(rdev, mddev) { 880 rdev_for_each_safe(rdev, tmp, mddev) {
881 if (!rdev->meta_bdev) 881 if (!rdev->meta_bdev)
882 continue; 882 continue;
883 883
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index b0fcc7d02adb..fa211d80fc0a 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -198,6 +198,7 @@ out:
198static int linear_run (struct mddev *mddev) 198static int linear_run (struct mddev *mddev)
199{ 199{
200 struct linear_conf *conf; 200 struct linear_conf *conf;
201 int ret;
201 202
202 if (md_check_no_bitmap(mddev)) 203 if (md_check_no_bitmap(mddev))
203 return -EINVAL; 204 return -EINVAL;
@@ -211,7 +212,13 @@ static int linear_run (struct mddev *mddev)
211 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec); 212 blk_queue_merge_bvec(mddev->queue, linear_mergeable_bvec);
212 mddev->queue->backing_dev_info.congested_fn = linear_congested; 213 mddev->queue->backing_dev_info.congested_fn = linear_congested;
213 mddev->queue->backing_dev_info.congested_data = mddev; 214 mddev->queue->backing_dev_info.congested_data = mddev;
214 return md_integrity_register(mddev); 215
216 ret = md_integrity_register(mddev);
217 if (ret) {
218 kfree(conf);
219 mddev->private = NULL;
220 }
221 return ret;
215} 222}
216 223
217static int linear_add(struct mddev *mddev, struct md_rdev *rdev) 224static int linear_add(struct mddev *mddev, struct md_rdev *rdev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index b572e1e386ce..477eb2e180c0 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -7560,14 +7560,14 @@ void md_check_recovery(struct mddev *mddev)
7560 * any transients in the value of "sync_action". 7560 * any transients in the value of "sync_action".
7561 */ 7561 */
7562 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery); 7562 set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
7563 clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
7564 /* Clear some bits that don't mean anything, but 7563 /* Clear some bits that don't mean anything, but
7565 * might be left set 7564 * might be left set
7566 */ 7565 */
7567 clear_bit(MD_RECOVERY_INTR, &mddev->recovery); 7566 clear_bit(MD_RECOVERY_INTR, &mddev->recovery);
7568 clear_bit(MD_RECOVERY_DONE, &mddev->recovery); 7567 clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
7569 7568
7570 if (test_bit(MD_RECOVERY_FROZEN, &mddev->recovery)) 7569 if (!test_and_clear_bit(MD_RECOVERY_NEEDED, &mddev->recovery) ||
7570 test_bit(MD_RECOVERY_FROZEN, &mddev->recovery))
7571 goto unlock; 7571 goto unlock;
7572 /* no recovery is running. 7572 /* no recovery is running.
7573 * remove any failed drives, then 7573 * remove any failed drives, then
@@ -8140,7 +8140,8 @@ static int md_notify_reboot(struct notifier_block *this,
8140 8140
8141 for_each_mddev(mddev, tmp) { 8141 for_each_mddev(mddev, tmp) {
8142 if (mddev_trylock(mddev)) { 8142 if (mddev_trylock(mddev)) {
8143 __md_stop_writes(mddev); 8143 if (mddev->pers)
8144 __md_stop_writes(mddev);
8144 mddev->safemode = 2; 8145 mddev->safemode = 2;
8145 mddev_unlock(mddev); 8146 mddev_unlock(mddev);
8146 } 8147 }
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 6f31f5596e01..de63a1fc3737 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -407,6 +407,8 @@ static sector_t raid0_size(struct mddev *mddev, sector_t sectors, int raid_disks
407 return array_sectors; 407 return array_sectors;
408} 408}
409 409
410static int raid0_stop(struct mddev *mddev);
411
410static int raid0_run(struct mddev *mddev) 412static int raid0_run(struct mddev *mddev)
411{ 413{
412 struct r0conf *conf; 414 struct r0conf *conf;
@@ -454,7 +456,12 @@ static int raid0_run(struct mddev *mddev)
454 456
455 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec); 457 blk_queue_merge_bvec(mddev->queue, raid0_mergeable_bvec);
456 dump_zones(mddev); 458 dump_zones(mddev);
457 return md_integrity_register(mddev); 459
460 ret = md_integrity_register(mddev);
461 if (ret)
462 raid0_stop(mddev);
463
464 return ret;
458} 465}
459 466
460static int raid0_stop(struct mddev *mddev) 467static int raid0_stop(struct mddev *mddev)
@@ -625,6 +632,7 @@ static void *raid0_takeover_raid10(struct mddev *mddev)
625static void *raid0_takeover_raid1(struct mddev *mddev) 632static void *raid0_takeover_raid1(struct mddev *mddev)
626{ 633{
627 struct r0conf *priv_conf; 634 struct r0conf *priv_conf;
635 int chunksect;
628 636
629 /* Check layout: 637 /* Check layout:
630 * - (N - 1) mirror drives must be already faulty 638 * - (N - 1) mirror drives must be already faulty
@@ -635,10 +643,25 @@ static void *raid0_takeover_raid1(struct mddev *mddev)
635 return ERR_PTR(-EINVAL); 643 return ERR_PTR(-EINVAL);
636 } 644 }
637 645
646 /*
647 * a raid1 doesn't have the notion of chunk size, so
648 * figure out the largest suitable size we can use.
649 */
650 chunksect = 64 * 2; /* 64K by default */
651
652 /* The array must be an exact multiple of chunksize */
653 while (chunksect && (mddev->array_sectors & (chunksect - 1)))
654 chunksect >>= 1;
655
656 if ((chunksect << 9) < PAGE_SIZE)
657 /* array size does not allow a suitable chunk size */
658 return ERR_PTR(-EINVAL);
659
638 /* Set new parameters */ 660 /* Set new parameters */
639 mddev->new_level = 0; 661 mddev->new_level = 0;
640 mddev->new_layout = 0; 662 mddev->new_layout = 0;
641 mddev->new_chunk_sectors = 128; /* by default set chunk size to 64k */ 663 mddev->new_chunk_sectors = chunksect;
664 mddev->chunk_sectors = chunksect;
642 mddev->delta_disks = 1 - mddev->raid_disks; 665 mddev->delta_disks = 1 - mddev->raid_disks;
643 mddev->raid_disks = 1; 666 mddev->raid_disks = 1;
644 /* make sure it will be not marked as dirty */ 667 /* make sure it will be not marked as dirty */
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 4a40a200d769..15dd59b84e94 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -1712,6 +1712,7 @@ static int process_checks(struct r1bio *r1_bio)
1712 struct r1conf *conf = mddev->private; 1712 struct r1conf *conf = mddev->private;
1713 int primary; 1713 int primary;
1714 int i; 1714 int i;
1715 int vcnt;
1715 1716
1716 for (primary = 0; primary < conf->raid_disks * 2; primary++) 1717 for (primary = 0; primary < conf->raid_disks * 2; primary++)
1717 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 1718 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -1721,9 +1722,9 @@ static int process_checks(struct r1bio *r1_bio)
1721 break; 1722 break;
1722 } 1723 }
1723 r1_bio->read_disk = primary; 1724 r1_bio->read_disk = primary;
1725 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
1724 for (i = 0; i < conf->raid_disks * 2; i++) { 1726 for (i = 0; i < conf->raid_disks * 2; i++) {
1725 int j; 1727 int j;
1726 int vcnt = r1_bio->sectors >> (PAGE_SHIFT- 9);
1727 struct bio *pbio = r1_bio->bios[primary]; 1728 struct bio *pbio = r1_bio->bios[primary];
1728 struct bio *sbio = r1_bio->bios[i]; 1729 struct bio *sbio = r1_bio->bios[i];
1729 int size; 1730 int size;
@@ -1738,7 +1739,7 @@ static int process_checks(struct r1bio *r1_bio)
1738 s = sbio->bi_io_vec[j].bv_page; 1739 s = sbio->bi_io_vec[j].bv_page;
1739 if (memcmp(page_address(p), 1740 if (memcmp(page_address(p),
1740 page_address(s), 1741 page_address(s),
1741 PAGE_SIZE)) 1742 sbio->bi_io_vec[j].bv_len))
1742 break; 1743 break;
1743 } 1744 }
1744 } else 1745 } else
@@ -2386,8 +2387,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2386 int ok = 1; 2387 int ok = 1;
2387 for (i = 0 ; i < conf->raid_disks * 2 ; i++) 2388 for (i = 0 ; i < conf->raid_disks * 2 ; i++)
2388 if (r1_bio->bios[i]->bi_end_io == end_sync_write) { 2389 if (r1_bio->bios[i]->bi_end_io == end_sync_write) {
2389 struct md_rdev *rdev = 2390 struct md_rdev *rdev = conf->mirrors[i].rdev;
2390 rcu_dereference(conf->mirrors[i].rdev);
2391 ok = rdev_set_badblocks(rdev, sector_nr, 2391 ok = rdev_set_badblocks(rdev, sector_nr,
2392 min_bad, 0 2392 min_bad, 0
2393 ) && ok; 2393 ) && ok;
@@ -2636,11 +2636,13 @@ static struct r1conf *setup_conf(struct mddev *mddev)
2636 return ERR_PTR(err); 2636 return ERR_PTR(err);
2637} 2637}
2638 2638
2639static int stop(struct mddev *mddev);
2639static int run(struct mddev *mddev) 2640static int run(struct mddev *mddev)
2640{ 2641{
2641 struct r1conf *conf; 2642 struct r1conf *conf;
2642 int i; 2643 int i;
2643 struct md_rdev *rdev; 2644 struct md_rdev *rdev;
2645 int ret;
2644 2646
2645 if (mddev->level != 1) { 2647 if (mddev->level != 1) {
2646 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n", 2648 printk(KERN_ERR "md/raid1:%s: raid level not set to mirroring (%d)\n",
@@ -2705,7 +2707,11 @@ static int run(struct mddev *mddev)
2705 mddev->queue->backing_dev_info.congested_data = mddev; 2707 mddev->queue->backing_dev_info.congested_data = mddev;
2706 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec); 2708 blk_queue_merge_bvec(mddev->queue, raid1_mergeable_bvec);
2707 } 2709 }
2708 return md_integrity_register(mddev); 2710
2711 ret = md_integrity_register(mddev);
2712 if (ret)
2713 stop(mddev);
2714 return ret;
2709} 2715}
2710 2716
2711static int stop(struct mddev *mddev) 2717static int stop(struct mddev *mddev)
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 3540316886f2..c8dbb84d5357 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1788,6 +1788,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1788 struct r10conf *conf = mddev->private; 1788 struct r10conf *conf = mddev->private;
1789 int i, first; 1789 int i, first;
1790 struct bio *tbio, *fbio; 1790 struct bio *tbio, *fbio;
1791 int vcnt;
1791 1792
1792 atomic_set(&r10_bio->remaining, 1); 1793 atomic_set(&r10_bio->remaining, 1);
1793 1794
@@ -1802,10 +1803,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1802 first = i; 1803 first = i;
1803 fbio = r10_bio->devs[i].bio; 1804 fbio = r10_bio->devs[i].bio;
1804 1805
1806 vcnt = (r10_bio->sectors + (PAGE_SIZE >> 9) - 1) >> (PAGE_SHIFT - 9);
1805 /* now find blocks with errors */ 1807 /* now find blocks with errors */
1806 for (i=0 ; i < conf->copies ; i++) { 1808 for (i=0 ; i < conf->copies ; i++) {
1807 int j, d; 1809 int j, d;
1808 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1809 1810
1810 tbio = r10_bio->devs[i].bio; 1811 tbio = r10_bio->devs[i].bio;
1811 1812
@@ -1821,7 +1822,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1821 for (j = 0; j < vcnt; j++) 1822 for (j = 0; j < vcnt; j++)
1822 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page), 1823 if (memcmp(page_address(fbio->bi_io_vec[j].bv_page),
1823 page_address(tbio->bi_io_vec[j].bv_page), 1824 page_address(tbio->bi_io_vec[j].bv_page),
1824 PAGE_SIZE)) 1825 fbio->bi_io_vec[j].bv_len))
1825 break; 1826 break;
1826 if (j == vcnt) 1827 if (j == vcnt)
1827 continue; 1828 continue;
@@ -1871,7 +1872,6 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
1871 */ 1872 */
1872 for (i = 0; i < conf->copies; i++) { 1873 for (i = 0; i < conf->copies; i++) {
1873 int j, d; 1874 int j, d;
1874 int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9);
1875 1875
1876 tbio = r10_bio->devs[i].repl_bio; 1876 tbio = r10_bio->devs[i].repl_bio;
1877 if (!tbio || !tbio->bi_end_io) 1877 if (!tbio || !tbio->bi_end_io)
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 23ac880bba9a..f351422938e0 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -2471,39 +2471,41 @@ handle_failed_sync(struct r5conf *conf, struct stripe_head *sh,
2471 int abort = 0; 2471 int abort = 0;
2472 int i; 2472 int i;
2473 2473
2474 md_done_sync(conf->mddev, STRIPE_SECTORS, 0);
2475 clear_bit(STRIPE_SYNCING, &sh->state); 2474 clear_bit(STRIPE_SYNCING, &sh->state);
2476 s->syncing = 0; 2475 s->syncing = 0;
2477 s->replacing = 0; 2476 s->replacing = 0;
2478 /* There is nothing more to do for sync/check/repair. 2477 /* There is nothing more to do for sync/check/repair.
2478 * Don't even need to abort as that is handled elsewhere
2479 * if needed, and not always wanted e.g. if there is a known
2480 * bad block here.
2479 * For recover/replace we need to record a bad block on all 2481 * For recover/replace we need to record a bad block on all
2480 * non-sync devices, or abort the recovery 2482 * non-sync devices, or abort the recovery
2481 */ 2483 */
2482 if (!test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) 2484 if (test_bit(MD_RECOVERY_RECOVER, &conf->mddev->recovery)) {
2483 return; 2485 /* During recovery devices cannot be removed, so
2484 /* During recovery devices cannot be removed, so locking and 2486 * locking and refcounting of rdevs is not needed
2485 * refcounting of rdevs is not needed 2487 */
2486 */ 2488 for (i = 0; i < conf->raid_disks; i++) {
2487 for (i = 0; i < conf->raid_disks; i++) { 2489 struct md_rdev *rdev = conf->disks[i].rdev;
2488 struct md_rdev *rdev = conf->disks[i].rdev; 2490 if (rdev
2489 if (rdev 2491 && !test_bit(Faulty, &rdev->flags)
2490 && !test_bit(Faulty, &rdev->flags) 2492 && !test_bit(In_sync, &rdev->flags)
2491 && !test_bit(In_sync, &rdev->flags) 2493 && !rdev_set_badblocks(rdev, sh->sector,
2492 && !rdev_set_badblocks(rdev, sh->sector, 2494 STRIPE_SECTORS, 0))
2493 STRIPE_SECTORS, 0)) 2495 abort = 1;
2494 abort = 1; 2496 rdev = conf->disks[i].replacement;
2495 rdev = conf->disks[i].replacement; 2497 if (rdev
2496 if (rdev 2498 && !test_bit(Faulty, &rdev->flags)
2497 && !test_bit(Faulty, &rdev->flags) 2499 && !test_bit(In_sync, &rdev->flags)
2498 && !test_bit(In_sync, &rdev->flags) 2500 && !rdev_set_badblocks(rdev, sh->sector,
2499 && !rdev_set_badblocks(rdev, sh->sector, 2501 STRIPE_SECTORS, 0))
2500 STRIPE_SECTORS, 0)) 2502 abort = 1;
2501 abort = 1; 2503 }
2502 } 2504 if (abort)
2503 if (abort) { 2505 conf->recovery_disabled =
2504 conf->recovery_disabled = conf->mddev->recovery_disabled; 2506 conf->mddev->recovery_disabled;
2505 set_bit(MD_RECOVERY_INTR, &conf->mddev->recovery);
2506 } 2507 }
2508 md_done_sync(conf->mddev, STRIPE_SECTORS, !abort);
2507} 2509}
2508 2510
2509static int want_replace(struct stripe_head *sh, int disk_idx) 2511static int want_replace(struct stripe_head *sh, int disk_idx)
@@ -3203,7 +3205,8 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3203 /* Not in-sync */; 3205 /* Not in-sync */;
3204 else if (is_bad) { 3206 else if (is_bad) {
3205 /* also not in-sync */ 3207 /* also not in-sync */
3206 if (!test_bit(WriteErrorSeen, &rdev->flags)) { 3208 if (!test_bit(WriteErrorSeen, &rdev->flags) &&
3209 test_bit(R5_UPTODATE, &dev->flags)) {
3207 /* treat as in-sync, but with a read error 3210 /* treat as in-sync, but with a read error
3208 * which we can now try to correct 3211 * which we can now try to correct
3209 */ 3212 */
@@ -3276,12 +3279,14 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
3276 /* If there is a failed device being replaced, 3279 /* If there is a failed device being replaced,
3277 * we must be recovering. 3280 * we must be recovering.
3278 * else if we are after recovery_cp, we must be syncing 3281 * else if we are after recovery_cp, we must be syncing
3282 * else if MD_RECOVERY_REQUESTED is set, we also are syncing.
3279 * else we can only be replacing 3283 * else we can only be replacing
3280 * sync and recovery both need to read all devices, and so 3284 * sync and recovery both need to read all devices, and so
3281 * use the same flag. 3285 * use the same flag.
3282 */ 3286 */
3283 if (do_recovery || 3287 if (do_recovery ||
3284 sh->sector >= conf->mddev->recovery_cp) 3288 sh->sector >= conf->mddev->recovery_cp ||
3289 test_bit(MD_RECOVERY_REQUESTED, &(conf->mddev->recovery)))
3285 s->syncing = 1; 3290 s->syncing = 1;
3286 else 3291 else
3287 s->replacing = 1; 3292 s->replacing = 1;