diff options
Diffstat (limited to 'drivers')
-rw-r--r-- | drivers/md/raid10.c | 105 |
1 files changed, 98 insertions, 7 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 813f52464f8a..8a6862be9f58 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1594,19 +1594,29 @@ static void end_sync_write(struct bio *bio, int error) | |||
1594 | sector_t first_bad; | 1594 | sector_t first_bad; |
1595 | int bad_sectors; | 1595 | int bad_sectors; |
1596 | int slot; | 1596 | int slot; |
1597 | int repl; | ||
1598 | struct md_rdev *rdev; | ||
1597 | 1599 | ||
1598 | d = find_bio_disk(conf, r10_bio, bio, &slot, NULL); | 1600 | d = find_bio_disk(conf, r10_bio, bio, &slot, &repl); |
1601 | if (repl) | ||
1602 | rdev = conf->mirrors[d].replacement; | ||
1603 | else | ||
1604 | rdev = conf->mirrors[d].rdev; | ||
1599 | 1605 | ||
1600 | if (!uptodate) { | 1606 | if (!uptodate) { |
1601 | set_bit(WriteErrorSeen, &conf->mirrors[d].rdev->flags); | 1607 | if (repl) |
1602 | set_bit(R10BIO_WriteError, &r10_bio->state); | 1608 | md_error(mddev, rdev); |
1603 | } else if (is_badblock(conf->mirrors[d].rdev, | 1609 | else { |
1610 | set_bit(WriteErrorSeen, &rdev->flags); | ||
1611 | set_bit(R10BIO_WriteError, &r10_bio->state); | ||
1612 | } | ||
1613 | } else if (is_badblock(rdev, | ||
1604 | r10_bio->devs[slot].addr, | 1614 | r10_bio->devs[slot].addr, |
1605 | r10_bio->sectors, | 1615 | r10_bio->sectors, |
1606 | &first_bad, &bad_sectors)) | 1616 | &first_bad, &bad_sectors)) |
1607 | set_bit(R10BIO_MadeGood, &r10_bio->state); | 1617 | set_bit(R10BIO_MadeGood, &r10_bio->state); |
1608 | 1618 | ||
1609 | rdev_dec_pending(conf->mirrors[d].rdev, mddev); | 1619 | rdev_dec_pending(rdev, mddev); |
1610 | 1620 | ||
1611 | end_sync_request(r10_bio); | 1621 | end_sync_request(r10_bio); |
1612 | } | 1622 | } |
@@ -1710,6 +1720,29 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
1710 | generic_make_request(tbio); | 1720 | generic_make_request(tbio); |
1711 | } | 1721 | } |
1712 | 1722 | ||
1723 | /* Now write out to any replacement devices | ||
1724 | * that are active | ||
1725 | */ | ||
1726 | for (i = 0; i < conf->copies; i++) { | ||
1727 | int j, d; | ||
1728 | int vcnt = r10_bio->sectors >> (PAGE_SHIFT-9); | ||
1729 | |||
1730 | tbio = r10_bio->devs[i].repl_bio; | ||
1731 | if (!tbio || !tbio->bi_end_io) | ||
1732 | continue; | ||
1733 | if (r10_bio->devs[i].bio->bi_end_io != end_sync_write | ||
1734 | && r10_bio->devs[i].bio != fbio) | ||
1735 | for (j = 0; j < vcnt; j++) | ||
1736 | memcpy(page_address(tbio->bi_io_vec[j].bv_page), | ||
1737 | page_address(fbio->bi_io_vec[j].bv_page), | ||
1738 | PAGE_SIZE); | ||
1739 | d = r10_bio->devs[i].devnum; | ||
1740 | atomic_inc(&r10_bio->remaining); | ||
1741 | md_sync_acct(conf->mirrors[d].replacement->bdev, | ||
1742 | tbio->bi_size >> 9); | ||
1743 | generic_make_request(tbio); | ||
1744 | } | ||
1745 | |||
1713 | done: | 1746 | done: |
1714 | if (atomic_dec_and_test(&r10_bio->remaining)) { | 1747 | if (atomic_dec_and_test(&r10_bio->remaining)) { |
1715 | md_done_sync(mddev, r10_bio->sectors, 1); | 1748 | md_done_sync(mddev, r10_bio->sectors, 1); |
@@ -2287,6 +2320,22 @@ static void handle_write_completed(struct r10conf *conf, struct r10bio *r10_bio) | |||
2287 | r10_bio->sectors, 0)) | 2320 | r10_bio->sectors, 0)) |
2288 | md_error(conf->mddev, rdev); | 2321 | md_error(conf->mddev, rdev); |
2289 | } | 2322 | } |
2323 | rdev = conf->mirrors[dev].replacement; | ||
2324 | if (r10_bio->devs[m].repl_bio == NULL) | ||
2325 | continue; | ||
2326 | if (test_bit(BIO_UPTODATE, | ||
2327 | &r10_bio->devs[m].repl_bio->bi_flags)) { | ||
2328 | rdev_clear_badblocks( | ||
2329 | rdev, | ||
2330 | r10_bio->devs[m].addr, | ||
2331 | r10_bio->sectors); | ||
2332 | } else { | ||
2333 | if (!rdev_set_badblocks( | ||
2334 | rdev, | ||
2335 | r10_bio->devs[m].addr, | ||
2336 | r10_bio->sectors, 0)) | ||
2337 | md_error(conf->mddev, rdev); | ||
2338 | } | ||
2290 | } | 2339 | } |
2291 | put_buf(r10_bio); | 2340 | put_buf(r10_bio); |
2292 | } else { | 2341 | } else { |
@@ -2469,9 +2518,22 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2469 | bitmap_end_sync(mddev->bitmap, sect, | 2518 | bitmap_end_sync(mddev->bitmap, sect, |
2470 | &sync_blocks, 1); | 2519 | &sync_blocks, 1); |
2471 | } | 2520 | } |
2472 | } else /* completed sync */ | 2521 | } else { |
2522 | /* completed sync */ | ||
2523 | if ((!mddev->bitmap || conf->fullsync) | ||
2524 | && conf->have_replacement | ||
2525 | && test_bit(MD_RECOVERY_SYNC, &mddev->recovery)) { | ||
2526 | /* Completed a full sync so the replacements | ||
2527 | * are now fully recovered. | ||
2528 | */ | ||
2529 | for (i = 0; i < conf->raid_disks; i++) | ||
2530 | if (conf->mirrors[i].replacement) | ||
2531 | conf->mirrors[i].replacement | ||
2532 | ->recovery_offset | ||
2533 | = MaxSector; | ||
2534 | } | ||
2473 | conf->fullsync = 0; | 2535 | conf->fullsync = 0; |
2474 | 2536 | } | |
2475 | bitmap_close_sync(mddev->bitmap); | 2537 | bitmap_close_sync(mddev->bitmap); |
2476 | close_sync(conf); | 2538 | close_sync(conf); |
2477 | *skipped = 1; | 2539 | *skipped = 1; |
@@ -2719,6 +2781,9 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2719 | sector_t first_bad, sector; | 2781 | sector_t first_bad, sector; |
2720 | int bad_sectors; | 2782 | int bad_sectors; |
2721 | 2783 | ||
2784 | if (r10_bio->devs[i].repl_bio) | ||
2785 | r10_bio->devs[i].repl_bio->bi_end_io = NULL; | ||
2786 | |||
2722 | bio = r10_bio->devs[i].bio; | 2787 | bio = r10_bio->devs[i].bio; |
2723 | bio->bi_end_io = NULL; | 2788 | bio->bi_end_io = NULL; |
2724 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | 2789 | clear_bit(BIO_UPTODATE, &bio->bi_flags); |
@@ -2749,6 +2814,27 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2749 | conf->mirrors[d].rdev->data_offset; | 2814 | conf->mirrors[d].rdev->data_offset; |
2750 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | 2815 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; |
2751 | count++; | 2816 | count++; |
2817 | |||
2818 | if (conf->mirrors[d].replacement == NULL || | ||
2819 | test_bit(Faulty, | ||
2820 | &conf->mirrors[d].replacement->flags)) | ||
2821 | continue; | ||
2822 | |||
2823 | /* Need to set up for writing to the replacement */ | ||
2824 | bio = r10_bio->devs[i].repl_bio; | ||
2825 | clear_bit(BIO_UPTODATE, &bio->bi_flags); | ||
2826 | |||
2827 | sector = r10_bio->devs[i].addr; | ||
2828 | atomic_inc(&conf->mirrors[d].rdev->nr_pending); | ||
2829 | bio->bi_next = biolist; | ||
2830 | biolist = bio; | ||
2831 | bio->bi_private = r10_bio; | ||
2832 | bio->bi_end_io = end_sync_write; | ||
2833 | bio->bi_rw = WRITE; | ||
2834 | bio->bi_sector = sector + | ||
2835 | conf->mirrors[d].replacement->data_offset; | ||
2836 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; | ||
2837 | count++; | ||
2752 | } | 2838 | } |
2753 | 2839 | ||
2754 | if (count < 2) { | 2840 | if (count < 2) { |
@@ -2757,6 +2843,11 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2757 | if (r10_bio->devs[i].bio->bi_end_io) | 2843 | if (r10_bio->devs[i].bio->bi_end_io) |
2758 | rdev_dec_pending(conf->mirrors[d].rdev, | 2844 | rdev_dec_pending(conf->mirrors[d].rdev, |
2759 | mddev); | 2845 | mddev); |
2846 | if (r10_bio->devs[i].repl_bio && | ||
2847 | r10_bio->devs[i].repl_bio->bi_end_io) | ||
2848 | rdev_dec_pending( | ||
2849 | conf->mirrors[d].replacement, | ||
2850 | mddev); | ||
2760 | } | 2851 | } |
2761 | put_buf(r10_bio); | 2852 | put_buf(r10_bio); |
2762 | biolist = NULL; | 2853 | biolist = NULL; |