diff options
Diffstat (limited to 'drivers/md/raid10.c')
-rw-r--r-- | drivers/md/raid10.c | 25 |
1 files changed, 8 insertions, 17 deletions
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 5026e7ad51d3..f55d4cc085f6 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf); | |||
110 | #define raid10_log(md, fmt, args...) \ | 110 | #define raid10_log(md, fmt, args...) \ |
111 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) | 111 | do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) |
112 | 112 | ||
113 | /* | 113 | #include "raid1-10.c" |
114 | * 'strct resync_pages' stores actual pages used for doing the resync | ||
115 | * IO, and it is per-bio, so make .bi_private points to it. | ||
116 | */ | ||
117 | static inline struct resync_pages *get_resync_pages(struct bio *bio) | ||
118 | { | ||
119 | return bio->bi_private; | ||
120 | } | ||
121 | 114 | ||
122 | /* | 115 | /* |
123 | * for resync bio, r10bio pointer can be retrieved from the per-bio | 116 | * for resync bio, r10bio pointer can be retrieved from the per-bio |
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data) | |||
221 | resync_get_all_pages(rp); | 214 | resync_get_all_pages(rp); |
222 | } | 215 | } |
223 | 216 | ||
224 | rp->idx = 0; | ||
225 | rp->raid_bio = r10_bio; | 217 | rp->raid_bio = r10_bio; |
226 | bio->bi_private = rp; | 218 | bio->bi_private = rp; |
227 | if (rbio) { | 219 | if (rbio) { |
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf) | |||
913 | bio->bi_next = NULL; | 905 | bio->bi_next = NULL; |
914 | bio->bi_bdev = rdev->bdev; | 906 | bio->bi_bdev = rdev->bdev; |
915 | if (test_bit(Faulty, &rdev->flags)) { | 907 | if (test_bit(Faulty, &rdev->flags)) { |
916 | bio->bi_status = BLK_STS_IOERR; | 908 | bio_io_error(bio); |
917 | bio_endio(bio); | ||
918 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 909 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
919 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 910 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
920 | /* Just ignore it */ | 911 | /* Just ignore it */ |
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1098 | bio->bi_next = NULL; | 1089 | bio->bi_next = NULL; |
1099 | bio->bi_bdev = rdev->bdev; | 1090 | bio->bi_bdev = rdev->bdev; |
1100 | if (test_bit(Faulty, &rdev->flags)) { | 1091 | if (test_bit(Faulty, &rdev->flags)) { |
1101 | bio->bi_status = BLK_STS_IOERR; | 1092 | bio_io_error(bio); |
1102 | bio_endio(bio); | ||
1103 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && | 1093 | } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && |
1104 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) | 1094 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) |
1105 | /* Just ignore it */ | 1095 | /* Just ignore it */ |
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2087 | rp = get_resync_pages(tbio); | 2077 | rp = get_resync_pages(tbio); |
2088 | bio_reset(tbio); | 2078 | bio_reset(tbio); |
2089 | 2079 | ||
2090 | tbio->bi_vcnt = vcnt; | 2080 | md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size); |
2091 | tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; | 2081 | |
2092 | rp->raid_bio = r10_bio; | 2082 | rp->raid_bio = r10_bio; |
2093 | tbio->bi_private = rp; | 2083 | tbio->bi_private = rp; |
2094 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; | 2084 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
2853 | sector_t sectors_skipped = 0; | 2843 | sector_t sectors_skipped = 0; |
2854 | int chunks_skipped = 0; | 2844 | int chunks_skipped = 0; |
2855 | sector_t chunk_mask = conf->geo.chunk_mask; | 2845 | sector_t chunk_mask = conf->geo.chunk_mask; |
2846 | int page_idx = 0; | ||
2856 | 2847 | ||
2857 | if (!conf->r10buf_pool) | 2848 | if (!conf->r10buf_pool) |
2858 | if (init_resync(conf)) | 2849 | if (init_resync(conf)) |
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3355 | break; | 3346 | break; |
3356 | for (bio= biolist ; bio ; bio=bio->bi_next) { | 3347 | for (bio= biolist ; bio ; bio=bio->bi_next) { |
3357 | struct resync_pages *rp = get_resync_pages(bio); | 3348 | struct resync_pages *rp = get_resync_pages(bio); |
3358 | page = resync_fetch_page(rp, rp->idx++); | 3349 | page = resync_fetch_page(rp, page_idx); |
3359 | /* | 3350 | /* |
3360 | * won't fail because the vec table is big enough | 3351 | * won't fail because the vec table is big enough |
3361 | * to hold all these pages | 3352 | * to hold all these pages |
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3364 | } | 3355 | } |
3365 | nr_sectors += len>>9; | 3356 | nr_sectors += len>>9; |
3366 | sector_nr += len>>9; | 3357 | sector_nr += len>>9; |
3367 | } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); | 3358 | } while (++page_idx < RESYNC_PAGES); |
3368 | r10_bio->sectors = nr_sectors; | 3359 | r10_bio->sectors = nr_sectors; |
3369 | 3360 | ||
3370 | while (biolist) { | 3361 | while (biolist) { |