aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/md/md.c2
-rw-r--r--drivers/md/md.h54
-rw-r--r--drivers/md/raid1-10.c81
-rw-r--r--drivers/md/raid1.c68
-rw-r--r--drivers/md/raid10.c25
-rw-r--r--drivers/md/raid5.c11
6 files changed, 115 insertions, 126 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 8cdca0296749..c99634612fc4 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -2287,7 +2287,7 @@ static void export_array(struct mddev *mddev)
2287 2287
2288static bool set_in_sync(struct mddev *mddev) 2288static bool set_in_sync(struct mddev *mddev)
2289{ 2289{
2290 WARN_ON_ONCE(!spin_is_locked(&mddev->lock)); 2290 WARN_ON_ONCE(NR_CPUS != 1 && !spin_is_locked(&mddev->lock));
2291 if (!mddev->in_sync) { 2291 if (!mddev->in_sync) {
2292 mddev->sync_checkers++; 2292 mddev->sync_checkers++;
2293 spin_unlock(&mddev->lock); 2293 spin_unlock(&mddev->lock);
diff --git a/drivers/md/md.h b/drivers/md/md.h
index b50eb4ac1b82..09db03455801 100644
--- a/drivers/md/md.h
+++ b/drivers/md/md.h
@@ -731,58 +731,4 @@ static inline void mddev_check_write_zeroes(struct mddev *mddev, struct bio *bio
731 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors) 731 !bdev_get_queue(bio->bi_bdev)->limits.max_write_zeroes_sectors)
732 mddev->queue->limits.max_write_zeroes_sectors = 0; 732 mddev->queue->limits.max_write_zeroes_sectors = 0;
733} 733}
734
735/* Maximum size of each resync request */
736#define RESYNC_BLOCK_SIZE (64*1024)
737#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
738
739/* for managing resync I/O pages */
740struct resync_pages {
741 unsigned idx; /* for get/put page from the pool */
742 void *raid_bio;
743 struct page *pages[RESYNC_PAGES];
744};
745
746static inline int resync_alloc_pages(struct resync_pages *rp,
747 gfp_t gfp_flags)
748{
749 int i;
750
751 for (i = 0; i < RESYNC_PAGES; i++) {
752 rp->pages[i] = alloc_page(gfp_flags);
753 if (!rp->pages[i])
754 goto out_free;
755 }
756
757 return 0;
758
759out_free:
760 while (--i >= 0)
761 put_page(rp->pages[i]);
762 return -ENOMEM;
763}
764
765static inline void resync_free_pages(struct resync_pages *rp)
766{
767 int i;
768
769 for (i = 0; i < RESYNC_PAGES; i++)
770 put_page(rp->pages[i]);
771}
772
773static inline void resync_get_all_pages(struct resync_pages *rp)
774{
775 int i;
776
777 for (i = 0; i < RESYNC_PAGES; i++)
778 get_page(rp->pages[i]);
779}
780
781static inline struct page *resync_fetch_page(struct resync_pages *rp,
782 unsigned idx)
783{
784 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
785 return NULL;
786 return rp->pages[idx];
787}
788#endif /* _MD_MD_H */ 734#endif /* _MD_MD_H */
diff --git a/drivers/md/raid1-10.c b/drivers/md/raid1-10.c
new file mode 100644
index 000000000000..9f2670b45f31
--- /dev/null
+++ b/drivers/md/raid1-10.c
@@ -0,0 +1,81 @@
1/* Maximum size of each resync request */
2#define RESYNC_BLOCK_SIZE (64*1024)
3#define RESYNC_PAGES ((RESYNC_BLOCK_SIZE + PAGE_SIZE-1) / PAGE_SIZE)
4
5/* for managing resync I/O pages */
6struct resync_pages {
7 void *raid_bio;
8 struct page *pages[RESYNC_PAGES];
9};
10
11static inline int resync_alloc_pages(struct resync_pages *rp,
12 gfp_t gfp_flags)
13{
14 int i;
15
16 for (i = 0; i < RESYNC_PAGES; i++) {
17 rp->pages[i] = alloc_page(gfp_flags);
18 if (!rp->pages[i])
19 goto out_free;
20 }
21
22 return 0;
23
24out_free:
25 while (--i >= 0)
26 put_page(rp->pages[i]);
27 return -ENOMEM;
28}
29
30static inline void resync_free_pages(struct resync_pages *rp)
31{
32 int i;
33
34 for (i = 0; i < RESYNC_PAGES; i++)
35 put_page(rp->pages[i]);
36}
37
38static inline void resync_get_all_pages(struct resync_pages *rp)
39{
40 int i;
41
42 for (i = 0; i < RESYNC_PAGES; i++)
43 get_page(rp->pages[i]);
44}
45
46static inline struct page *resync_fetch_page(struct resync_pages *rp,
47 unsigned idx)
48{
49 if (WARN_ON_ONCE(idx >= RESYNC_PAGES))
50 return NULL;
51 return rp->pages[idx];
52}
53
54/*
55 * 'strct resync_pages' stores actual pages used for doing the resync
56 * IO, and it is per-bio, so make .bi_private points to it.
57 */
58static inline struct resync_pages *get_resync_pages(struct bio *bio)
59{
60 return bio->bi_private;
61}
62
63/* generally called after bio_reset() for reseting bvec */
64static void md_bio_reset_resync_pages(struct bio *bio, struct resync_pages *rp,
65 int size)
66{
67 int idx = 0;
68
69 /* initialize bvec table again */
70 do {
71 struct page *page = resync_fetch_page(rp, idx);
72 int len = min_t(int, size, PAGE_SIZE);
73
74 /*
75 * won't fail because the vec table is big
76 * enough to hold all these pages
77 */
78 bio_add_page(bio, page, len, 0);
79 size -= len;
80 } while (idx++ < RESYNC_PAGES && size > 0);
81}
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 3febfc8391fb..f50958ded9f0 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -81,14 +81,7 @@ static void lower_barrier(struct r1conf *conf, sector_t sector_nr);
81#define raid1_log(md, fmt, args...) \ 81#define raid1_log(md, fmt, args...) \
82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0) 82 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid1 " fmt, ##args); } while (0)
83 83
84/* 84#include "raid1-10.c"
85 * 'strct resync_pages' stores actual pages used for doing the resync
86 * IO, and it is per-bio, so make .bi_private points to it.
87 */
88static inline struct resync_pages *get_resync_pages(struct bio *bio)
89{
90 return bio->bi_private;
91}
92 85
93/* 86/*
94 * for resync bio, r1bio pointer can be retrieved from the per-bio 87 * for resync bio, r1bio pointer can be retrieved from the per-bio
@@ -170,7 +163,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
170 resync_get_all_pages(rp); 163 resync_get_all_pages(rp);
171 } 164 }
172 165
173 rp->idx = 0;
174 rp->raid_bio = r1_bio; 166 rp->raid_bio = r1_bio;
175 bio->bi_private = rp; 167 bio->bi_private = rp;
176 } 168 }
@@ -492,10 +484,6 @@ static void raid1_end_write_request(struct bio *bio)
492 } 484 }
493 485
494 if (behind) { 486 if (behind) {
495 /* we release behind master bio when all write are done */
496 if (r1_bio->behind_master_bio == bio)
497 to_put = NULL;
498
499 if (test_bit(WriteMostly, &rdev->flags)) 487 if (test_bit(WriteMostly, &rdev->flags))
500 atomic_dec(&r1_bio->behind_remaining); 488 atomic_dec(&r1_bio->behind_remaining);
501 489
@@ -802,8 +790,7 @@ static void flush_bio_list(struct r1conf *conf, struct bio *bio)
802 bio->bi_next = NULL; 790 bio->bi_next = NULL;
803 bio->bi_bdev = rdev->bdev; 791 bio->bi_bdev = rdev->bdev;
804 if (test_bit(Faulty, &rdev->flags)) { 792 if (test_bit(Faulty, &rdev->flags)) {
805 bio->bi_status = BLK_STS_IOERR; 793 bio_io_error(bio);
806 bio_endio(bio);
807 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 794 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
808 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 795 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
809 /* Just ignore it */ 796 /* Just ignore it */
@@ -1088,7 +1075,7 @@ static void unfreeze_array(struct r1conf *conf)
1088 wake_up(&conf->wait_barrier); 1075 wake_up(&conf->wait_barrier);
1089} 1076}
1090 1077
1091static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio, 1078static void alloc_behind_master_bio(struct r1bio *r1_bio,
1092 struct bio *bio) 1079 struct bio *bio)
1093{ 1080{
1094 int size = bio->bi_iter.bi_size; 1081 int size = bio->bi_iter.bi_size;
@@ -1098,11 +1085,13 @@ static struct bio *alloc_behind_master_bio(struct r1bio *r1_bio,
1098 1085
1099 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev); 1086 behind_bio = bio_alloc_mddev(GFP_NOIO, vcnt, r1_bio->mddev);
1100 if (!behind_bio) 1087 if (!behind_bio)
1101 goto fail; 1088 return;
1102 1089
1103 /* discard op, we don't support writezero/writesame yet */ 1090 /* discard op, we don't support writezero/writesame yet */
1104 if (!bio_has_data(bio)) 1091 if (!bio_has_data(bio)) {
1092 behind_bio->bi_iter.bi_size = size;
1105 goto skip_copy; 1093 goto skip_copy;
1094 }
1106 1095
1107 while (i < vcnt && size) { 1096 while (i < vcnt && size) {
1108 struct page *page; 1097 struct page *page;
@@ -1123,14 +1112,13 @@ skip_copy:
1123 r1_bio->behind_master_bio = behind_bio;; 1112 r1_bio->behind_master_bio = behind_bio;;
1124 set_bit(R1BIO_BehindIO, &r1_bio->state); 1113 set_bit(R1BIO_BehindIO, &r1_bio->state);
1125 1114
1126 return behind_bio; 1115 return;
1127 1116
1128free_pages: 1117free_pages:
1129 pr_debug("%dB behind alloc failed, doing sync I/O\n", 1118 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1130 bio->bi_iter.bi_size); 1119 bio->bi_iter.bi_size);
1131 bio_free_pages(behind_bio); 1120 bio_free_pages(behind_bio);
1132fail: 1121 bio_put(behind_bio);
1133 return behind_bio;
1134} 1122}
1135 1123
1136struct raid1_plug_cb { 1124struct raid1_plug_cb {
@@ -1483,7 +1471,7 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1483 (atomic_read(&bitmap->behind_writes) 1471 (atomic_read(&bitmap->behind_writes)
1484 < mddev->bitmap_info.max_write_behind) && 1472 < mddev->bitmap_info.max_write_behind) &&
1485 !waitqueue_active(&bitmap->behind_wait)) { 1473 !waitqueue_active(&bitmap->behind_wait)) {
1486 mbio = alloc_behind_master_bio(r1_bio, bio); 1474 alloc_behind_master_bio(r1_bio, bio);
1487 } 1475 }
1488 1476
1489 bitmap_startwrite(bitmap, r1_bio->sector, 1477 bitmap_startwrite(bitmap, r1_bio->sector,
@@ -1493,14 +1481,11 @@ static void raid1_write_request(struct mddev *mddev, struct bio *bio,
1493 first_clone = 0; 1481 first_clone = 0;
1494 } 1482 }
1495 1483
1496 if (!mbio) { 1484 if (r1_bio->behind_master_bio)
1497 if (r1_bio->behind_master_bio) 1485 mbio = bio_clone_fast(r1_bio->behind_master_bio,
1498 mbio = bio_clone_fast(r1_bio->behind_master_bio, 1486 GFP_NOIO, mddev->bio_set);
1499 GFP_NOIO, 1487 else
1500 mddev->bio_set); 1488 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1501 else
1502 mbio = bio_clone_fast(bio, GFP_NOIO, mddev->bio_set);
1503 }
1504 1489
1505 if (r1_bio->behind_master_bio) { 1490 if (r1_bio->behind_master_bio) {
1506 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1491 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
@@ -2086,10 +2071,7 @@ static void process_checks(struct r1bio *r1_bio)
2086 /* Fix variable parts of all bios */ 2071 /* Fix variable parts of all bios */
2087 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9); 2072 vcnt = (r1_bio->sectors + PAGE_SIZE / 512 - 1) >> (PAGE_SHIFT - 9);
2088 for (i = 0; i < conf->raid_disks * 2; i++) { 2073 for (i = 0; i < conf->raid_disks * 2; i++) {
2089 int j;
2090 int size;
2091 blk_status_t status; 2074 blk_status_t status;
2092 struct bio_vec *bi;
2093 struct bio *b = r1_bio->bios[i]; 2075 struct bio *b = r1_bio->bios[i];
2094 struct resync_pages *rp = get_resync_pages(b); 2076 struct resync_pages *rp = get_resync_pages(b);
2095 if (b->bi_end_io != end_sync_read) 2077 if (b->bi_end_io != end_sync_read)
@@ -2098,8 +2080,6 @@ static void process_checks(struct r1bio *r1_bio)
2098 status = b->bi_status; 2080 status = b->bi_status;
2099 bio_reset(b); 2081 bio_reset(b);
2100 b->bi_status = status; 2082 b->bi_status = status;
2101 b->bi_vcnt = vcnt;
2102 b->bi_iter.bi_size = r1_bio->sectors << 9;
2103 b->bi_iter.bi_sector = r1_bio->sector + 2083 b->bi_iter.bi_sector = r1_bio->sector +
2104 conf->mirrors[i].rdev->data_offset; 2084 conf->mirrors[i].rdev->data_offset;
2105 b->bi_bdev = conf->mirrors[i].rdev->bdev; 2085 b->bi_bdev = conf->mirrors[i].rdev->bdev;
@@ -2107,15 +2087,8 @@ static void process_checks(struct r1bio *r1_bio)
2107 rp->raid_bio = r1_bio; 2087 rp->raid_bio = r1_bio;
2108 b->bi_private = rp; 2088 b->bi_private = rp;
2109 2089
2110 size = b->bi_iter.bi_size; 2090 /* initialize bvec table again */
2111 bio_for_each_segment_all(bi, b, j) { 2091 md_bio_reset_resync_pages(b, rp, r1_bio->sectors << 9);
2112 bi->bv_offset = 0;
2113 if (size > PAGE_SIZE)
2114 bi->bv_len = PAGE_SIZE;
2115 else
2116 bi->bv_len = size;
2117 size -= PAGE_SIZE;
2118 }
2119 } 2092 }
2120 for (primary = 0; primary < conf->raid_disks * 2; primary++) 2093 for (primary = 0; primary < conf->raid_disks * 2; primary++)
2121 if (r1_bio->bios[primary]->bi_end_io == end_sync_read && 2094 if (r1_bio->bios[primary]->bi_end_io == end_sync_read &&
@@ -2366,8 +2339,6 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2366 wbio = bio_clone_fast(r1_bio->behind_master_bio, 2339 wbio = bio_clone_fast(r1_bio->behind_master_bio,
2367 GFP_NOIO, 2340 GFP_NOIO,
2368 mddev->bio_set); 2341 mddev->bio_set);
2369 /* We really need a _all clone */
2370 wbio->bi_iter = (struct bvec_iter){ 0 };
2371 } else { 2342 } else {
2372 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO, 2343 wbio = bio_clone_fast(r1_bio->master_bio, GFP_NOIO,
2373 mddev->bio_set); 2344 mddev->bio_set);
@@ -2619,6 +2590,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2619 int good_sectors = RESYNC_SECTORS; 2590 int good_sectors = RESYNC_SECTORS;
2620 int min_bad = 0; /* number of sectors that are bad in all devices */ 2591 int min_bad = 0; /* number of sectors that are bad in all devices */
2621 int idx = sector_to_idx(sector_nr); 2592 int idx = sector_to_idx(sector_nr);
2593 int page_idx = 0;
2622 2594
2623 if (!conf->r1buf_pool) 2595 if (!conf->r1buf_pool)
2624 if (init_resync(conf)) 2596 if (init_resync(conf))
@@ -2846,7 +2818,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2846 bio = r1_bio->bios[i]; 2818 bio = r1_bio->bios[i];
2847 rp = get_resync_pages(bio); 2819 rp = get_resync_pages(bio);
2848 if (bio->bi_end_io) { 2820 if (bio->bi_end_io) {
2849 page = resync_fetch_page(rp, rp->idx++); 2821 page = resync_fetch_page(rp, page_idx);
2850 2822
2851 /* 2823 /*
2852 * won't fail because the vec table is big 2824 * won't fail because the vec table is big
@@ -2858,7 +2830,7 @@ static sector_t raid1_sync_request(struct mddev *mddev, sector_t sector_nr,
2858 nr_sectors += len>>9; 2830 nr_sectors += len>>9;
2859 sector_nr += len>>9; 2831 sector_nr += len>>9;
2860 sync_blocks -= (len>>9); 2832 sync_blocks -= (len>>9);
2861 } while (get_resync_pages(r1_bio->bios[disk]->bi_private)->idx < RESYNC_PAGES); 2833 } while (++page_idx < RESYNC_PAGES);
2862 2834
2863 r1_bio->sectors = nr_sectors; 2835 r1_bio->sectors = nr_sectors;
2864 2836
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 5026e7ad51d3..f55d4cc085f6 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -110,14 +110,7 @@ static void end_reshape(struct r10conf *conf);
110#define raid10_log(md, fmt, args...) \ 110#define raid10_log(md, fmt, args...) \
111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0) 111 do { if ((md)->queue) blk_add_trace_msg((md)->queue, "raid10 " fmt, ##args); } while (0)
112 112
113/* 113#include "raid1-10.c"
114 * 'strct resync_pages' stores actual pages used for doing the resync
115 * IO, and it is per-bio, so make .bi_private points to it.
116 */
117static inline struct resync_pages *get_resync_pages(struct bio *bio)
118{
119 return bio->bi_private;
120}
121 114
122/* 115/*
123 * for resync bio, r10bio pointer can be retrieved from the per-bio 116 * for resync bio, r10bio pointer can be retrieved from the per-bio
@@ -221,7 +214,6 @@ static void * r10buf_pool_alloc(gfp_t gfp_flags, void *data)
221 resync_get_all_pages(rp); 214 resync_get_all_pages(rp);
222 } 215 }
223 216
224 rp->idx = 0;
225 rp->raid_bio = r10_bio; 217 rp->raid_bio = r10_bio;
226 bio->bi_private = rp; 218 bio->bi_private = rp;
227 if (rbio) { 219 if (rbio) {
@@ -913,8 +905,7 @@ static void flush_pending_writes(struct r10conf *conf)
913 bio->bi_next = NULL; 905 bio->bi_next = NULL;
914 bio->bi_bdev = rdev->bdev; 906 bio->bi_bdev = rdev->bdev;
915 if (test_bit(Faulty, &rdev->flags)) { 907 if (test_bit(Faulty, &rdev->flags)) {
916 bio->bi_status = BLK_STS_IOERR; 908 bio_io_error(bio);
917 bio_endio(bio);
918 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 909 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
919 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 910 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
920 /* Just ignore it */ 911 /* Just ignore it */
@@ -1098,8 +1089,7 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1098 bio->bi_next = NULL; 1089 bio->bi_next = NULL;
1099 bio->bi_bdev = rdev->bdev; 1090 bio->bi_bdev = rdev->bdev;
1100 if (test_bit(Faulty, &rdev->flags)) { 1091 if (test_bit(Faulty, &rdev->flags)) {
1101 bio->bi_status = BLK_STS_IOERR; 1092 bio_io_error(bio);
1102 bio_endio(bio);
1103 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) && 1093 } else if (unlikely((bio_op(bio) == REQ_OP_DISCARD) &&
1104 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) 1094 !blk_queue_discard(bdev_get_queue(bio->bi_bdev))))
1105 /* Just ignore it */ 1095 /* Just ignore it */
@@ -2087,8 +2077,8 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2087 rp = get_resync_pages(tbio); 2077 rp = get_resync_pages(tbio);
2088 bio_reset(tbio); 2078 bio_reset(tbio);
2089 2079
2090 tbio->bi_vcnt = vcnt; 2080 md_bio_reset_resync_pages(tbio, rp, fbio->bi_iter.bi_size);
2091 tbio->bi_iter.bi_size = fbio->bi_iter.bi_size; 2081
2092 rp->raid_bio = r10_bio; 2082 rp->raid_bio = r10_bio;
2093 tbio->bi_private = rp; 2083 tbio->bi_private = rp;
2094 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; 2084 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
@@ -2853,6 +2843,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
2853 sector_t sectors_skipped = 0; 2843 sector_t sectors_skipped = 0;
2854 int chunks_skipped = 0; 2844 int chunks_skipped = 0;
2855 sector_t chunk_mask = conf->geo.chunk_mask; 2845 sector_t chunk_mask = conf->geo.chunk_mask;
2846 int page_idx = 0;
2856 2847
2857 if (!conf->r10buf_pool) 2848 if (!conf->r10buf_pool)
2858 if (init_resync(conf)) 2849 if (init_resync(conf))
@@ -3355,7 +3346,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3355 break; 3346 break;
3356 for (bio= biolist ; bio ; bio=bio->bi_next) { 3347 for (bio= biolist ; bio ; bio=bio->bi_next) {
3357 struct resync_pages *rp = get_resync_pages(bio); 3348 struct resync_pages *rp = get_resync_pages(bio);
3358 page = resync_fetch_page(rp, rp->idx++); 3349 page = resync_fetch_page(rp, page_idx);
3359 /* 3350 /*
3360 * won't fail because the vec table is big enough 3351 * won't fail because the vec table is big enough
3361 * to hold all these pages 3352 * to hold all these pages
@@ -3364,7 +3355,7 @@ static sector_t raid10_sync_request(struct mddev *mddev, sector_t sector_nr,
3364 } 3355 }
3365 nr_sectors += len>>9; 3356 nr_sectors += len>>9;
3366 sector_nr += len>>9; 3357 sector_nr += len>>9;
3367 } while (get_resync_pages(biolist)->idx < RESYNC_PAGES); 3358 } while (++page_idx < RESYNC_PAGES);
3368 r10_bio->sectors = nr_sectors; 3359 r10_bio->sectors = nr_sectors;
3369 3360
3370 while (biolist) { 3361 while (biolist) {
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index aeeb8d6854e2..0fc2748aaf95 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -3381,9 +3381,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3381 sh->dev[i].sector + STRIPE_SECTORS) { 3381 sh->dev[i].sector + STRIPE_SECTORS) {
3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 3382 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
3383 3383
3384 bi->bi_status = BLK_STS_IOERR;
3385 md_write_end(conf->mddev); 3384 md_write_end(conf->mddev);
3386 bio_endio(bi); 3385 bio_io_error(bi);
3387 bi = nextbi; 3386 bi = nextbi;
3388 } 3387 }
3389 if (bitmap_end) 3388 if (bitmap_end)
@@ -3403,9 +3402,8 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3403 sh->dev[i].sector + STRIPE_SECTORS) { 3402 sh->dev[i].sector + STRIPE_SECTORS) {
3404 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 3403 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
3405 3404
3406 bi->bi_status = BLK_STS_IOERR;
3407 md_write_end(conf->mddev); 3405 md_write_end(conf->mddev);
3408 bio_endio(bi); 3406 bio_io_error(bi);
3409 bi = bi2; 3407 bi = bi2;
3410 } 3408 }
3411 3409
@@ -3429,8 +3427,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
3429 struct bio *nextbi = 3427 struct bio *nextbi =
3430 r5_next_bio(bi, sh->dev[i].sector); 3428 r5_next_bio(bi, sh->dev[i].sector);
3431 3429
3432 bi->bi_status = BLK_STS_IOERR; 3430 bio_io_error(bi);
3433 bio_endio(bi);
3434 bi = nextbi; 3431 bi = nextbi;
3435 } 3432 }
3436 } 3433 }
@@ -6237,6 +6234,8 @@ static void raid5_do_work(struct work_struct *work)
6237 pr_debug("%d stripes handled\n", handled); 6234 pr_debug("%d stripes handled\n", handled);
6238 6235
6239 spin_unlock_irq(&conf->device_lock); 6236 spin_unlock_irq(&conf->device_lock);
6237
6238 async_tx_issue_pending_all();
6240 blk_finish_plug(&plug); 6239 blk_finish_plug(&plug);
6241 6240
6242 pr_debug("--- raid5worker inactive\n"); 6241 pr_debug("--- raid5worker inactive\n");