diff options
-rw-r--r-- | drivers/md/raid1.c | 1 | ||||
-rw-r--r-- | drivers/md/raid10.c | 1 | ||||
-rw-r--r-- | drivers/md/raid5.c | 66 |
3 files changed, 51 insertions, 17 deletions
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 28a3869dcfd2..0b82030c265d 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -1787,7 +1787,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1787 | bio->bi_vcnt = 0; | 1787 | bio->bi_vcnt = 0; |
1788 | bio->bi_idx = 0; | 1788 | bio->bi_idx = 0; |
1789 | bio->bi_phys_segments = 0; | 1789 | bio->bi_phys_segments = 0; |
1790 | bio->bi_hw_segments = 0; | ||
1791 | bio->bi_size = 0; | 1790 | bio->bi_size = 0; |
1792 | bio->bi_end_io = NULL; | 1791 | bio->bi_end_io = NULL; |
1793 | bio->bi_private = NULL; | 1792 | bio->bi_private = NULL; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 0f40688503e7..d3b9aa096285 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1944,7 +1944,6 @@ static sector_t sync_request(mddev_t *mddev, sector_t sector_nr, int *skipped, i | |||
1944 | bio->bi_vcnt = 0; | 1944 | bio->bi_vcnt = 0; |
1945 | bio->bi_idx = 0; | 1945 | bio->bi_idx = 0; |
1946 | bio->bi_phys_segments = 0; | 1946 | bio->bi_phys_segments = 0; |
1947 | bio->bi_hw_segments = 0; | ||
1948 | bio->bi_size = 0; | 1947 | bio->bi_size = 0; |
1949 | } | 1948 | } |
1950 | 1949 | ||
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 224de022e7c5..05b22925cce4 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -101,6 +101,40 @@ | |||
101 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); | 101 | const char raid6_empty_zero_page[PAGE_SIZE] __attribute__((aligned(256))); |
102 | #endif | 102 | #endif |
103 | 103 | ||
104 | /* | ||
105 | * We maintain a biased count of active stripes in the bottom 8 bits of | ||
106 | * bi_phys_segments, and a count of processed stripes in the upper 8 bits | ||
107 | */ | ||
108 | static inline int raid5_bi_phys_segments(struct bio *bio) | ||
109 | { | ||
110 | return bio->bi_phys_segments & 0xff; | ||
111 | } | ||
112 | |||
113 | static inline int raid5_bi_hw_segments(struct bio *bio) | ||
114 | { | ||
115 | return (bio->bi_phys_segments >> 8) & 0xff; | ||
116 | } | ||
117 | |||
118 | static inline int raid5_dec_bi_phys_segments(struct bio *bio) | ||
119 | { | ||
120 | --bio->bi_phys_segments; | ||
121 | return raid5_bi_phys_segments(bio); | ||
122 | } | ||
123 | |||
124 | static inline int raid5_dec_bi_hw_segments(struct bio *bio) | ||
125 | { | ||
126 | unsigned short val = raid5_bi_hw_segments(bio); | ||
127 | |||
128 | --val; | ||
129 | bio->bi_phys_segments = (val << 8) | raid5_bi_phys_segments(bio); | ||
130 | return val; | ||
131 | } | ||
132 | |||
133 | static inline void raid5_set_bi_hw_segments(struct bio *bio, unsigned int cnt) | ||
134 | { | ||
135 | bio->bi_phys_segments = raid5_bi_phys_segments(bio) || (cnt << 8); | ||
136 | } | ||
137 | |||
104 | static inline int raid6_next_disk(int disk, int raid_disks) | 138 | static inline int raid6_next_disk(int disk, int raid_disks) |
105 | { | 139 | { |
106 | disk++; | 140 | disk++; |
@@ -507,7 +541,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
507 | while (rbi && rbi->bi_sector < | 541 | while (rbi && rbi->bi_sector < |
508 | dev->sector + STRIPE_SECTORS) { | 542 | dev->sector + STRIPE_SECTORS) { |
509 | rbi2 = r5_next_bio(rbi, dev->sector); | 543 | rbi2 = r5_next_bio(rbi, dev->sector); |
510 | if (--rbi->bi_phys_segments == 0) { | 544 | if (!raid5_dec_bi_phys_segments(rbi)) { |
511 | rbi->bi_next = return_bi; | 545 | rbi->bi_next = return_bi; |
512 | return_bi = rbi; | 546 | return_bi = rbi; |
513 | } | 547 | } |
@@ -1725,7 +1759,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
1725 | if (*bip) | 1759 | if (*bip) |
1726 | bi->bi_next = *bip; | 1760 | bi->bi_next = *bip; |
1727 | *bip = bi; | 1761 | *bip = bi; |
1728 | bi->bi_phys_segments ++; | 1762 | bi->bi_phys_segments++; |
1729 | spin_unlock_irq(&conf->device_lock); | 1763 | spin_unlock_irq(&conf->device_lock); |
1730 | spin_unlock(&sh->lock); | 1764 | spin_unlock(&sh->lock); |
1731 | 1765 | ||
@@ -1819,7 +1853,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1819 | sh->dev[i].sector + STRIPE_SECTORS) { | 1853 | sh->dev[i].sector + STRIPE_SECTORS) { |
1820 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 1854 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
1821 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1855 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1822 | if (--bi->bi_phys_segments == 0) { | 1856 | if (!raid5_dec_bi_phys_segments(bi)) { |
1823 | md_write_end(conf->mddev); | 1857 | md_write_end(conf->mddev); |
1824 | bi->bi_next = *return_bi; | 1858 | bi->bi_next = *return_bi; |
1825 | *return_bi = bi; | 1859 | *return_bi = bi; |
@@ -1834,7 +1868,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1834 | sh->dev[i].sector + STRIPE_SECTORS) { | 1868 | sh->dev[i].sector + STRIPE_SECTORS) { |
1835 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 1869 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
1836 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1870 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1837 | if (--bi->bi_phys_segments == 0) { | 1871 | if (!raid5_dec_bi_phys_segments(bi)) { |
1838 | md_write_end(conf->mddev); | 1872 | md_write_end(conf->mddev); |
1839 | bi->bi_next = *return_bi; | 1873 | bi->bi_next = *return_bi; |
1840 | *return_bi = bi; | 1874 | *return_bi = bi; |
@@ -1858,7 +1892,7 @@ handle_failed_stripe(raid5_conf_t *conf, struct stripe_head *sh, | |||
1858 | struct bio *nextbi = | 1892 | struct bio *nextbi = |
1859 | r5_next_bio(bi, sh->dev[i].sector); | 1893 | r5_next_bio(bi, sh->dev[i].sector); |
1860 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 1894 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
1861 | if (--bi->bi_phys_segments == 0) { | 1895 | if (!raid5_dec_bi_phys_segments(bi)) { |
1862 | bi->bi_next = *return_bi; | 1896 | bi->bi_next = *return_bi; |
1863 | *return_bi = bi; | 1897 | *return_bi = bi; |
1864 | } | 1898 | } |
@@ -2033,7 +2067,7 @@ static void handle_stripe_clean_event(raid5_conf_t *conf, | |||
2033 | while (wbi && wbi->bi_sector < | 2067 | while (wbi && wbi->bi_sector < |
2034 | dev->sector + STRIPE_SECTORS) { | 2068 | dev->sector + STRIPE_SECTORS) { |
2035 | wbi2 = r5_next_bio(wbi, dev->sector); | 2069 | wbi2 = r5_next_bio(wbi, dev->sector); |
2036 | if (--wbi->bi_phys_segments == 0) { | 2070 | if (!raid5_dec_bi_phys_segments(wbi)) { |
2037 | md_write_end(conf->mddev); | 2071 | md_write_end(conf->mddev); |
2038 | wbi->bi_next = *return_bi; | 2072 | wbi->bi_next = *return_bi; |
2039 | *return_bi = wbi; | 2073 | *return_bi = wbi; |
@@ -2814,7 +2848,7 @@ static bool handle_stripe6(struct stripe_head *sh, struct page *tmp_page) | |||
2814 | copy_data(0, rbi, dev->page, dev->sector); | 2848 | copy_data(0, rbi, dev->page, dev->sector); |
2815 | rbi2 = r5_next_bio(rbi, dev->sector); | 2849 | rbi2 = r5_next_bio(rbi, dev->sector); |
2816 | spin_lock_irq(&conf->device_lock); | 2850 | spin_lock_irq(&conf->device_lock); |
2817 | if (--rbi->bi_phys_segments == 0) { | 2851 | if (!raid5_dec_bi_phys_segments(rbi)) { |
2818 | rbi->bi_next = return_bi; | 2852 | rbi->bi_next = return_bi; |
2819 | return_bi = rbi; | 2853 | return_bi = rbi; |
2820 | } | 2854 | } |
@@ -3155,8 +3189,11 @@ static struct bio *remove_bio_from_retry(raid5_conf_t *conf) | |||
3155 | if(bi) { | 3189 | if(bi) { |
3156 | conf->retry_read_aligned_list = bi->bi_next; | 3190 | conf->retry_read_aligned_list = bi->bi_next; |
3157 | bi->bi_next = NULL; | 3191 | bi->bi_next = NULL; |
3192 | /* | ||
3193 | * this sets the active strip count to 1 and the processed | ||
3194 | * strip count to zero (upper 8 bits) | ||
3195 | */ | ||
3158 | bi->bi_phys_segments = 1; /* biased count of active stripes */ | 3196 | bi->bi_phys_segments = 1; /* biased count of active stripes */ |
3159 | bi->bi_hw_segments = 0; /* count of processed stripes */ | ||
3160 | } | 3197 | } |
3161 | 3198 | ||
3162 | return bi; | 3199 | return bi; |
@@ -3206,8 +3243,7 @@ static int bio_fits_rdev(struct bio *bi) | |||
3206 | if ((bi->bi_size>>9) > q->max_sectors) | 3243 | if ((bi->bi_size>>9) > q->max_sectors) |
3207 | return 0; | 3244 | return 0; |
3208 | blk_recount_segments(q, bi); | 3245 | blk_recount_segments(q, bi); |
3209 | if (bi->bi_phys_segments > q->max_phys_segments || | 3246 | if (bi->bi_phys_segments > q->max_phys_segments) |
3210 | bi->bi_hw_segments > q->max_hw_segments) | ||
3211 | return 0; | 3247 | return 0; |
3212 | 3248 | ||
3213 | if (q->merge_bvec_fn) | 3249 | if (q->merge_bvec_fn) |
@@ -3468,7 +3504,7 @@ static int make_request(struct request_queue *q, struct bio * bi) | |||
3468 | 3504 | ||
3469 | } | 3505 | } |
3470 | spin_lock_irq(&conf->device_lock); | 3506 | spin_lock_irq(&conf->device_lock); |
3471 | remaining = --bi->bi_phys_segments; | 3507 | remaining = raid5_dec_bi_phys_segments(bi); |
3472 | spin_unlock_irq(&conf->device_lock); | 3508 | spin_unlock_irq(&conf->device_lock); |
3473 | if (remaining == 0) { | 3509 | if (remaining == 0) { |
3474 | 3510 | ||
@@ -3752,7 +3788,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3752 | sector += STRIPE_SECTORS, | 3788 | sector += STRIPE_SECTORS, |
3753 | scnt++) { | 3789 | scnt++) { |
3754 | 3790 | ||
3755 | if (scnt < raid_bio->bi_hw_segments) | 3791 | if (scnt < raid5_bi_hw_segments(raid_bio)) |
3756 | /* already done this stripe */ | 3792 | /* already done this stripe */ |
3757 | continue; | 3793 | continue; |
3758 | 3794 | ||
@@ -3760,7 +3796,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3760 | 3796 | ||
3761 | if (!sh) { | 3797 | if (!sh) { |
3762 | /* failed to get a stripe - must wait */ | 3798 | /* failed to get a stripe - must wait */ |
3763 | raid_bio->bi_hw_segments = scnt; | 3799 | raid5_set_bi_hw_segments(raid_bio, scnt); |
3764 | conf->retry_read_aligned = raid_bio; | 3800 | conf->retry_read_aligned = raid_bio; |
3765 | return handled; | 3801 | return handled; |
3766 | } | 3802 | } |
@@ -3768,7 +3804,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3768 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); | 3804 | set_bit(R5_ReadError, &sh->dev[dd_idx].flags); |
3769 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { | 3805 | if (!add_stripe_bio(sh, raid_bio, dd_idx, 0)) { |
3770 | release_stripe(sh); | 3806 | release_stripe(sh); |
3771 | raid_bio->bi_hw_segments = scnt; | 3807 | raid5_set_bi_hw_segments(raid_bio, scnt); |
3772 | conf->retry_read_aligned = raid_bio; | 3808 | conf->retry_read_aligned = raid_bio; |
3773 | return handled; | 3809 | return handled; |
3774 | } | 3810 | } |
@@ -3778,7 +3814,7 @@ static int retry_aligned_read(raid5_conf_t *conf, struct bio *raid_bio) | |||
3778 | handled++; | 3814 | handled++; |
3779 | } | 3815 | } |
3780 | spin_lock_irq(&conf->device_lock); | 3816 | spin_lock_irq(&conf->device_lock); |
3781 | remaining = --raid_bio->bi_phys_segments; | 3817 | remaining = raid5_dec_bi_phys_segments(raid_bio); |
3782 | spin_unlock_irq(&conf->device_lock); | 3818 | spin_unlock_irq(&conf->device_lock); |
3783 | if (remaining == 0) | 3819 | if (remaining == 0) |
3784 | bio_endio(raid_bio, 0); | 3820 | bio_endio(raid_bio, 0); |