diff options
author | NeilBrown <neilb@suse.de> | 2011-07-27 21:39:23 -0400 |
---|---|---|
committer | NeilBrown <neilb@suse.de> | 2011-07-27 21:39:23 -0400 |
commit | 7399c31bc92a26bb8388a73f8e14acadcc512fe5 (patch) | |
tree | 0f1d9904fde1d5258c84643a26947288251b9776 /drivers/md | |
parent | 856e08e23762dfb92ffc68fd0a8d228f9e152160 (diff) |
md/raid10: avoid reading from known bad blocks - part 2
When redirecting a read error to a different device, we must
again avoid bad blocks and possibly split the request.
Spin_lock typo fixed thanks to Dan Carpenter <error27@gmail.com>
Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r-- | drivers/md/raid10.c | 45 |
1 files changed, 40 insertions, 5 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 872bf948f33a..37801d68a4cd 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1746,14 +1746,15 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | |||
1746 | rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); | 1746 | rdev_dec_pending(conf->mirrors[mirror].rdev, mddev); |
1747 | 1747 | ||
1748 | bio = r10_bio->devs[slot].bio; | 1748 | bio = r10_bio->devs[slot].bio; |
1749 | bdevname(bio->bi_bdev, b); | ||
1749 | r10_bio->devs[slot].bio = | 1750 | r10_bio->devs[slot].bio = |
1750 | mddev->ro ? IO_BLOCKED : NULL; | 1751 | mddev->ro ? IO_BLOCKED : NULL; |
1752 | read_more: | ||
1751 | mirror = read_balance(conf, r10_bio, &max_sectors); | 1753 | mirror = read_balance(conf, r10_bio, &max_sectors); |
1752 | if (mirror == -1 || max_sectors < r10_bio->sectors) { | 1754 | if (mirror == -1) { |
1753 | printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" | 1755 | printk(KERN_ALERT "md/raid10:%s: %s: unrecoverable I/O" |
1754 | " read error for block %llu\n", | 1756 | " read error for block %llu\n", |
1755 | mdname(mddev), | 1757 | mdname(mddev), b, |
1756 | bdevname(bio->bi_bdev, b), | ||
1757 | (unsigned long long)r10_bio->sector); | 1758 | (unsigned long long)r10_bio->sector); |
1758 | raid_end_bio_io(r10_bio); | 1759 | raid_end_bio_io(r10_bio); |
1759 | bio_put(bio); | 1760 | bio_put(bio); |
@@ -1761,7 +1762,8 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | |||
1761 | } | 1762 | } |
1762 | 1763 | ||
1763 | do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); | 1764 | do_sync = (r10_bio->master_bio->bi_rw & REQ_SYNC); |
1764 | bio_put(bio); | 1765 | if (bio) |
1766 | bio_put(bio); | ||
1765 | slot = r10_bio->read_slot; | 1767 | slot = r10_bio->read_slot; |
1766 | rdev = conf->mirrors[mirror].rdev; | 1768 | rdev = conf->mirrors[mirror].rdev; |
1767 | printk_ratelimited( | 1769 | printk_ratelimited( |
@@ -1773,6 +1775,9 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | |||
1773 | (unsigned long long)r10_bio->sector); | 1775 | (unsigned long long)r10_bio->sector); |
1774 | bio = bio_clone_mddev(r10_bio->master_bio, | 1776 | bio = bio_clone_mddev(r10_bio->master_bio, |
1775 | GFP_NOIO, mddev); | 1777 | GFP_NOIO, mddev); |
1778 | md_trim_bio(bio, | ||
1779 | r10_bio->sector - bio->bi_sector, | ||
1780 | max_sectors); | ||
1776 | r10_bio->devs[slot].bio = bio; | 1781 | r10_bio->devs[slot].bio = bio; |
1777 | bio->bi_sector = r10_bio->devs[slot].addr | 1782 | bio->bi_sector = r10_bio->devs[slot].addr |
1778 | + rdev->data_offset; | 1783 | + rdev->data_offset; |
@@ -1780,7 +1785,37 @@ static void handle_read_error(mddev_t *mddev, r10bio_t *r10_bio) | |||
1780 | bio->bi_rw = READ | do_sync; | 1785 | bio->bi_rw = READ | do_sync; |
1781 | bio->bi_private = r10_bio; | 1786 | bio->bi_private = r10_bio; |
1782 | bio->bi_end_io = raid10_end_read_request; | 1787 | bio->bi_end_io = raid10_end_read_request; |
1783 | generic_make_request(bio); | 1788 | if (max_sectors < r10_bio->sectors) { |
1789 | /* Drat - have to split this up more */ | ||
1790 | struct bio *mbio = r10_bio->master_bio; | ||
1791 | int sectors_handled = | ||
1792 | r10_bio->sector + max_sectors | ||
1793 | - mbio->bi_sector; | ||
1794 | r10_bio->sectors = max_sectors; | ||
1795 | spin_lock_irq(&conf->device_lock); | ||
1796 | if (mbio->bi_phys_segments == 0) | ||
1797 | mbio->bi_phys_segments = 2; | ||
1798 | else | ||
1799 | mbio->bi_phys_segments++; | ||
1800 | spin_unlock_irq(&conf->device_lock); | ||
1801 | generic_make_request(bio); | ||
1802 | bio = NULL; | ||
1803 | |||
1804 | r10_bio = mempool_alloc(conf->r10bio_pool, | ||
1805 | GFP_NOIO); | ||
1806 | r10_bio->master_bio = mbio; | ||
1807 | r10_bio->sectors = (mbio->bi_size >> 9) | ||
1808 | - sectors_handled; | ||
1809 | r10_bio->state = 0; | ||
1810 | set_bit(R10BIO_ReadError, | ||
1811 | &r10_bio->state); | ||
1812 | r10_bio->mddev = mddev; | ||
1813 | r10_bio->sector = mbio->bi_sector | ||
1814 | + sectors_handled; | ||
1815 | |||
1816 | goto read_more; | ||
1817 | } else | ||
1818 | generic_make_request(bio); | ||
1784 | } | 1819 | } |
1785 | 1820 | ||
1786 | static void raid10d(mddev_t *mddev) | 1821 | static void raid10d(mddev_t *mddev) |