aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2017-07-14 04:14:42 -0400
committerShaohua Li <shli@fb.com>2017-07-21 15:47:20 -0400
commit022e510fcbda79183fd2cdc01abb01b4be80d03f (patch)
treec1ce4e89348f207fb4e1c572bad3bfd2db4c28b6
parent4ec9f7a18b9fcef6e8f7c13279b48e3bb5d4d704 (diff)
md: remove 'idx' from 'struct resync_pages'
bio_add_page() won't fail for resync bio, and the page index for each bio is same, so remove it. More importantly the 'idx' of 'struct resync_pages' is initialized in mempool allocator function, the current way is wrong since mempool is only responsible for allocation, we can't use that for initialization. Suggested-by: NeilBrown <neilb@suse.com> Reported-by: NeilBrown <neilb@suse.com> Reported-and-tested-by: Patrick <dto@gmx.net> Fixes: f0250618361d(md: raid10: don't use bio's vec table to manage resync pages) Fixes: 98d30c5812c3(md: raid1: don't use bio's vec table to manage resync pages) Cc: stable@vger.kernel.org (4.12+) Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Shaohua Li <shli@fb.com>
-rw-r--r--drivers/md/md.h1
-rw-r--r--drivers/md/raid1.c6
-rw-r--r--drivers/md/raid10.c6
3 files changed, 6 insertions, 7 deletions
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b50eb4ac1b82..991769cc3615 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -738,7 +738,6 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
738 738
739/* for managing resync I/O pages */ 739/* for managing resync I/O pages */
740struct resync_pages { 740struct resync_pages {
741 unsigned idx; /* for get/put page from the pool */
742 void *raid_bio; 741 void *raid_bio;
743 struct page *pages[RESYNC_PAGES]; 742 struct page *pages[RESYNC_PAGES];
744}; 743};
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..0896c772a560 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -170,7 +170,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
170 resync_get_all_pages(rp); 170 resync_get_all_pages(rp);
171 } 171 }
172 172
173 rp->idx = 0;
174 rp->raid_bio = r1_bio; 173 rp->raid_bio = r1_bio;
175 bio->bi_private = rp; 174 bio->bi_private = rp;
176 } 175 }
@@ -2619,6 +2618,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2619 int good_sectors = RESYNC_SECTORS; 2618 int good_sectors = RESYNC_SECTORS;
2620 int min_bad = 0; /* number of sectors that are bad in all devices */ 2619 int min_bad = 0; /* number of sectors that are bad in all devices */
2621 int idx = sector_to_idx(sector_nr); 2620 int idx = sector_to_idx(sector_nr);
2621 int page_idx = 0;
2622 2622
2623 if (!conf->r1buf_pool) 2623 if (!conf->r1buf_pool)
2624 if (init_resync(conf)) 2624 if (init_resync(conf))
@@ -2846,7 +2846,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2846 bio = r1_bio->bios[i]; 2846 bio = r1_bio->bios[i];
2847 rp = get_resync_pages(bio); 2847 rp = get_resync_pages(bio);
2848 if (bio->bi_end_io) { 2848 if (bio->bi_end_io) {
2849 page = resync_fetch_page(rp, rp->idx++); 2849 page = resync_fetch_page(rp, page_idx);
2850 2850
2851 /* 2851 /*
2852 * won't fail because the vec table is big 2852 * won't fail because the vec table is big
@@ -2858,7 +2858,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2858 nr_sectors += len>>9; 2858 nr_sectors += len>>9;
2859 sector_nr += len>>9; 2859 sector_nr += len>>9;
2860 sync_blocks -= (len>>9); 2860 sync_blocks -= (len>>9);
2861 } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); 2861 } while (++page_idx < RESYNC_PAGES);
2862 2862
2863 r1_bio->sectors = nr_sectors; 2863 r1_bio->sectors = nr_sectors;
2864 2864
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..fa8bcf04e791 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -221,7 +221,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
221 resync_get_all_pages(rp); 221 resync_get_all_pages(rp);
222 } 222 }
223 223
224 rp->idx = 0;
225 rp->raid_bio = r10_bio; 224 rp->raid_bio = r10_bio;
226 bio->bi_private = rp; 225 bio->bi_private = rp;
227 if (rbio) { 226 if (rbio) {
@@ -2853,6 +2852,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2853 sector_t sectors_skipped = 0; 2852 sector_t sectors_skipped = 0;
2854 int chunks_skipped = 0; 2853 int chunks_skipped = 0;
2855 sector_t chunk_mask = conf->geo.chunk_mask; 2854 sector_t chunk_mask = conf->geo.chunk_mask;
2855 int page_idx = 0;
2856 2856
2857 if (!conf->r10buf_pool) 2857 if (!conf->r10buf_pool)
2858 if (init_resync(conf)) 2858 if (init_resync(conf))
@@ -3355,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3355 break; 3355 break;
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { 3356 for (bio= biolist ; bio ; bio=bio->bi_next) {
3357 struct resync_pages *rp = get_resync_pages(bio); 3357 struct resync_pages *rp = get_resync_pages(bio);
3358 page = resync_fetch_page(rp, rp->idx++); 3358 page = resync_fetch_page(rp, page_idx);
3359 /* 3359 /*
3360 * won't fail because the vec table is big enough 3360 * won't fail because the vec table is big enough
3361 * to hold all these pages 3361 * to hold all these pages
@@ -3364,7 +3364,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3364 } 3364 }
3365 nr_sectors += len>>9; 3365 nr_sectors += len>>9;
3366 sector_nr += len>>9; 3366 sector_nr += len>>9;
3367 } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); 3367 } while (++page_idx < RESYNC_PAGES);
3368 r10_bio->sectors = nr_sectors; 3368 r10_bio->sectors = nr_sectors;
3369 3369
3370 while (biolist) { 3370 while (biolist) {