aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/dm-crypt.c3
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-stripe.c2
-rw-r--r--drivers/md/dm-verity.c4
-rw-r--r--drivers/md/faulty.c6
-rw-r--r--drivers/md/linear.c3
-rw-r--r--drivers/md/md.c17
-rw-r--r--drivers/md/raid0.c9
-rw-r--r--drivers/md/raid1.c133
-rw-r--r--drivers/md/raid10.c78
-rw-r--r--drivers/md/raid5.c49
11 files changed, 104 insertions, 202 deletions
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 13c15480d940..6d2d41ae9e32 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -858,8 +858,7 @@ static void crypt_free_buffer_pages(struct crypt_config *cc, struct bio *clone)
858 unsigned int i; 858 unsigned int i;
859 struct bio_vec *bv; 859 struct bio_vec *bv;
860 860
861 for (i = 0; i < clone->bi_vcnt; i++) { 861 bio_for_each_segment_all(bv, clone, i) {
862 bv = bio_iovec_idx(clone, i);
863 BUG_ON(!bv->bv_page); 862 BUG_ON(!bv->bv_page);
864 mempool_free(bv->bv_page, cc->page_pool); 863 mempool_free(bv->bv_page, cc->page_pool);
865 bv->bv_page = NULL; 864 bv->bv_page = NULL;
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index d053098c6a91..699b5be68d31 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
458{ 458{
459 io->bdev = m->dev->bdev; 459 io->bdev = m->dev->bdev;
460 io->sector = map_sector(m, bio); 460 io->sector = map_sector(m, bio);
461 io->count = bio->bi_size >> 9; 461 io->count = bio_sectors(bio);
462} 462}
463 463
464static void hold_bio(struct mirror_set *ms, struct bio *bio) 464static void hold_bio(struct mirror_set *ms, struct bio *bio)
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index d8837d313f54..ea5e878a30b9 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -258,7 +258,7 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
258 sector_t begin, end; 258 sector_t begin, end;
259 259
260 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 260 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin);
261 stripe_map_range_sector(sc, bio->bi_sector + bio_sectors(bio), 261 stripe_map_range_sector(sc, bio_end_sector(bio),
262 target_stripe, &end); 262 target_stripe, &end);
263 if (begin < end) { 263 if (begin < end) {
264 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 264 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index a746f1d21c66..b948fd864d45 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -501,7 +501,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
501 return -EIO; 501 return -EIO;
502 } 502 }
503 503
504 if ((bio->bi_sector + bio_sectors(bio)) >> 504 if (bio_end_sector(bio) >>
505 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) { 505 (v->data_dev_block_bits - SECTOR_SHIFT) > v->data_blocks) {
506 DMERR_LIMIT("io out of range"); 506 DMERR_LIMIT("io out of range");
507 return -EIO; 507 return -EIO;
@@ -519,7 +519,7 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
519 519
520 bio->bi_end_io = verity_end_io; 520 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io; 521 bio->bi_private = io;
522 io->io_vec_size = bio->bi_vcnt - bio->bi_idx; 522 io->io_vec_size = bio_segments(bio);
523 if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) 523 if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
524 io->io_vec = io->io_vec_inline; 524 io->io_vec = io->io_vec_inline;
525 else 525 else
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 5e7dc772f5de..3193aefe982b 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -185,8 +185,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
185 return; 185 return;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_sector, bio->bi_sector+(bio->bi_size>>9), 188 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE))
189 WRITE))
190 failit = 1; 189 failit = 1;
191 if (check_mode(conf, WritePersistent)) { 190 if (check_mode(conf, WritePersistent)) {
192 add_sector(conf, bio->bi_sector, WritePersistent); 191 add_sector(conf, bio->bi_sector, WritePersistent);
@@ -196,8 +195,7 @@ static void make_request(struct mddev *mddev, struct bio *bio)
196 failit = 1; 195 failit = 1;
197 } else { 196 } else {
198 /* read request */ 197 /* read request */
199 if (check_sector(conf, bio->bi_sector, bio->bi_sector + (bio->bi_size>>9), 198 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ))
200 READ))
201 failit = 1; 199 failit = 1;
202 if (check_mode(conf, ReadTransient)) 200 if (check_mode(conf, ReadTransient))
203 failit = 1; 201 failit = 1;
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index 21014836bdbf..f03fabd2b37b 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -317,8 +317,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
317 bio_io_error(bio); 317 bio_io_error(bio);
318 return; 318 return;
319 } 319 }
320 if (unlikely(bio->bi_sector + (bio->bi_size >> 9) > 320 if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
321 tmp_dev->end_sector)) {
322 /* This bio crosses a device boundary, so we have to 321 /* This bio crosses a device boundary, so we have to
323 * split it. 322 * split it.
324 */ 323 */
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 6330c727396c..681d1099a2d5 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -197,21 +197,12 @@ void md_trim_bio(struct bio *bio, int offset, int size)
197 if (offset == 0 && size == bio->bi_size) 197 if (offset == 0 && size == bio->bi_size)
198 return; 198 return;
199 199
200 bio->bi_sector += offset;
201 bio->bi_size = size;
202 offset <<= 9;
203 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 200 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
204 201
205 while (bio->bi_idx < bio->bi_vcnt && 202 bio_advance(bio, offset << 9);
206 bio->bi_io_vec[bio->bi_idx].bv_len <= offset) { 203
207 /* remove this whole bio_vec */ 204 bio->bi_size = size;
208 offset -= bio->bi_io_vec[bio->bi_idx].bv_len; 205
209 bio->bi_idx++;
210 }
211 if (bio->bi_idx < bio->bi_vcnt) {
212 bio->bi_io_vec[bio->bi_idx].bv_offset += offset;
213 bio->bi_io_vec[bio->bi_idx].bv_len -= offset;
214 }
215 /* avoid any complications with bi_idx being non-zero*/ 206 /* avoid any complications with bi_idx being non-zero*/
216 if (bio->bi_idx) { 207 if (bio->bi_idx) {
217 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 208 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index 0505452de8d6..fcf65e512cf5 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
505 + (bio->bi_size >> 9)); 505 + bio_sectors(bio));
506 } else{ 506 } else{
507 sector_t sector = bio->bi_sector; 507 sector_t sector = bio->bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 508 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + (bio->bi_size >> 9)); 509 + bio_sectors(bio));
510 } 510 }
511} 511}
512 512
@@ -527,8 +527,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
527 sector_t sector = bio->bi_sector; 527 sector_t sector = bio->bi_sector;
528 struct bio_pair *bp; 528 struct bio_pair *bp;
529 /* Sanity check -- queue functions should prevent this happening */ 529 /* Sanity check -- queue functions should prevent this happening */
530 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || 530 if (bio_segments(bio) > 1)
531 bio->bi_idx != 0)
532 goto bad_map; 531 goto bad_map;
533 /* This is a one page bio that upper layers 532 /* This is a one page bio that upper layers
534 * refuse to split for us, so we need to split it. 533 * refuse to split for us, so we need to split it.
@@ -567,7 +566,7 @@ bad_map:
567 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 566 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
568 " or bigger than %dk %llu %d\n", 567 " or bigger than %dk %llu %d\n",
569 mdname(mddev), chunk_sects / 2, 568 mdname(mddev), chunk_sects / 2,
570 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 569 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
571 570
572 bio_io_error(bio); 571 bio_io_error(bio);
573 return; 572 return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 851023e2ba5d..55951182af73 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -92,7 +92,6 @@ static void r1bio_pool_free(void *r1_bio, void *data)
92static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) 92static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
93{ 93{
94 struct pool_info *pi = data; 94 struct pool_info *pi = data;
95 struct page *page;
96 struct r1bio *r1_bio; 95 struct r1bio *r1_bio;
97 struct bio *bio; 96 struct bio *bio;
98 int i, j; 97 int i, j;
@@ -122,14 +121,10 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
122 j = 1; 121 j = 1;
123 while(j--) { 122 while(j--) {
124 bio = r1_bio->bios[j]; 123 bio = r1_bio->bios[j];
125 for (i = 0; i < RESYNC_PAGES; i++) { 124 bio->bi_vcnt = RESYNC_PAGES;
126 page = alloc_page(gfp_flags);
127 if (unlikely(!page))
128 goto out_free_pages;
129 125
130 bio->bi_io_vec[i].bv_page = page; 126 if (bio_alloc_pages(bio, gfp_flags))
131 bio->bi_vcnt = i+1; 127 goto out_free_bio;
132 }
133 } 128 }
134 /* If not user-requests, copy the page pointers to all bios */ 129 /* If not user-requests, copy the page pointers to all bios */
135 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) { 130 if (!test_bit(MD_RECOVERY_REQUESTED, &pi->mddev->recovery)) {
@@ -143,11 +138,6 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data)
143 138
144 return r1_bio; 139 return r1_bio;
145 140
146out_free_pages:
147 for (j=0 ; j < pi->raid_disks; j++)
148 for (i=0; i < r1_bio->bios[j]->bi_vcnt ; i++)
149 put_page(r1_bio->bios[j]->bi_io_vec[i].bv_page);
150 j = -1;
151out_free_bio: 141out_free_bio:
152 while (++j < pi->raid_disks) 142 while (++j < pi->raid_disks)
153 bio_put(r1_bio->bios[j]); 143 bio_put(r1_bio->bios[j]);
@@ -267,7 +257,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 257 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 258 (unsigned long long) bio->bi_sector,
269 (unsigned long long) bio->bi_sector + 259 (unsigned long long) bio->bi_sector +
270 (bio->bi_size >> 9) - 1); 260 bio_sectors(bio) - 1);
271 261
272 call_bio_endio(r1_bio); 262 call_bio_endio(r1_bio);
273 } 263 }
@@ -458,7 +448,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
458 " %llu-%llu\n", 448 " %llu-%llu\n",
459 (unsigned long long) mbio->bi_sector, 449 (unsigned long long) mbio->bi_sector,
460 (unsigned long long) mbio->bi_sector + 450 (unsigned long long) mbio->bi_sector +
461 (mbio->bi_size >> 9) - 1); 451 bio_sectors(mbio) - 1);
462 call_bio_endio(r1_bio); 452 call_bio_endio(r1_bio);
463 } 453 }
464 } 454 }
@@ -925,7 +915,7 @@ static void alloc_behind_pages(struct bio *bio, struct r1bio *r1_bio)
925 if (unlikely(!bvecs)) 915 if (unlikely(!bvecs))
926 return; 916 return;
927 917
928 bio_for_each_segment(bvec, bio, i) { 918 bio_for_each_segment_all(bvec, bio, i) {
929 bvecs[i] = *bvec; 919 bvecs[i] = *bvec;
930 bvecs[i].bv_page = alloc_page(GFP_NOIO); 920 bvecs[i].bv_page = alloc_page(GFP_NOIO);
931 if (unlikely(!bvecs[i].bv_page)) 921 if (unlikely(!bvecs[i].bv_page))
@@ -1023,7 +1013,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1023 md_write_start(mddev, bio); /* wait on superblock update early */ 1013 md_write_start(mddev, bio); /* wait on superblock update early */
1024 1014
1025 if (bio_data_dir(bio) == WRITE && 1015 if (bio_data_dir(bio) == WRITE &&
1026 bio->bi_sector + bio->bi_size/512 > mddev->suspend_lo && 1016 bio_end_sector(bio) > mddev->suspend_lo &&
1027 bio->bi_sector < mddev->suspend_hi) { 1017 bio->bi_sector < mddev->suspend_hi) {
1028 /* As the suspend_* range is controlled by 1018 /* As the suspend_* range is controlled by
1029 * userspace, we want an interruptible 1019 * userspace, we want an interruptible
@@ -1034,7 +1024,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1034 flush_signals(current); 1024 flush_signals(current);
1035 prepare_to_wait(&conf->wait_barrier, 1025 prepare_to_wait(&conf->wait_barrier,
1036 &w, TASK_INTERRUPTIBLE); 1026 &w, TASK_INTERRUPTIBLE);
1037 if (bio->bi_sector + bio->bi_size/512 <= mddev->suspend_lo || 1027 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1038 bio->bi_sector >= mddev->suspend_hi) 1028 bio->bi_sector >= mddev->suspend_hi)
1039 break; 1029 break;
1040 schedule(); 1030 schedule();
@@ -1054,7 +1044,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1054 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1044 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1055 1045
1056 r1_bio->master_bio = bio; 1046 r1_bio->master_bio = bio;
1057 r1_bio->sectors = bio->bi_size >> 9; 1047 r1_bio->sectors = bio_sectors(bio);
1058 r1_bio->state = 0; 1048 r1_bio->state = 0;
1059 r1_bio->mddev = mddev; 1049 r1_bio->mddev = mddev;
1060 r1_bio->sector = bio->bi_sector; 1050 r1_bio->sector = bio->bi_sector;
@@ -1132,7 +1122,7 @@ read_again:
1132 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1122 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1133 1123
1134 r1_bio->master_bio = bio; 1124 r1_bio->master_bio = bio;
1135 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1125 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1136 r1_bio->state = 0; 1126 r1_bio->state = 0;
1137 r1_bio->mddev = mddev; 1127 r1_bio->mddev = mddev;
1138 r1_bio->sector = bio->bi_sector + sectors_handled; 1128 r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1289,14 +1279,10 @@ read_again:
1289 struct bio_vec *bvec; 1279 struct bio_vec *bvec;
1290 int j; 1280 int j;
1291 1281
1292 /* Yes, I really want the '__' version so that 1282 /*
1293 * we clear any unused pointer in the io_vec, rather 1283 * We trimmed the bio, so _all is legit
1294 * than leave them unchanged. This is important
1295 * because when we come to free the pages, we won't
1296 * know the original bi_idx, so we just free
1297 * them all
1298 */ 1284 */
1299 __bio_for_each_segment(bvec, mbio, j, 0) 1285 bio_for_each_segment_all(bvec, mbio, j)
1300 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page; 1286 bvec->bv_page = r1_bio->behind_bvecs[j].bv_page;
1301 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags)) 1287 if (test_bit(WriteMostly, &conf->mirrors[i].rdev->flags))
1302 atomic_inc(&r1_bio->behind_remaining); 1288 atomic_inc(&r1_bio->behind_remaining);
@@ -1334,14 +1320,14 @@ read_again:
1334 /* Mustn't call r1_bio_write_done before this next test, 1320 /* Mustn't call r1_bio_write_done before this next test,
1335 * as it could result in the bio being freed. 1321 * as it could result in the bio being freed.
1336 */ 1322 */
1337 if (sectors_handled < (bio->bi_size >> 9)) { 1323 if (sectors_handled < bio_sectors(bio)) {
1338 r1_bio_write_done(r1_bio); 1324 r1_bio_write_done(r1_bio);
1339 /* We need another r1_bio. It has already been counted 1325 /* We need another r1_bio. It has already been counted
1340 * in bio->bi_phys_segments 1326 * in bio->bi_phys_segments
1341 */ 1327 */
1342 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 1328 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
1343 r1_bio->master_bio = bio; 1329 r1_bio->master_bio = bio;
1344 r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1330 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1345 r1_bio->state = 0; 1331 r1_bio->state = 0;
1346 r1_bio->mddev = mddev; 1332 r1_bio->mddev = mddev;
1347 r1_bio->sector = bio->bi_sector + sectors_handled; 1333 r1_bio->sector = bio->bi_sector + sectors_handled;
@@ -1867,7 +1853,7 @@ static int process_checks(struct r1bio *r1_bio)
1867 struct bio *sbio = r1_bio->bios[i]; 1853 struct bio *sbio = r1_bio->bios[i];
1868 int size; 1854 int size;
1869 1855
1870 if (r1_bio->bios[i]->bi_end_io != end_sync_read) 1856 if (sbio->bi_end_io != end_sync_read)
1871 continue; 1857 continue;
1872 1858
1873 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) { 1859 if (test_bit(BIO_UPTODATE, &sbio->bi_flags)) {
@@ -1892,16 +1878,15 @@ static int process_checks(struct r1bio *r1_bio)
1892 continue; 1878 continue;
1893 } 1879 }
1894 /* fixup the bio for reuse */ 1880 /* fixup the bio for reuse */
1881 bio_reset(sbio);
1895 sbio->bi_vcnt = vcnt; 1882 sbio->bi_vcnt = vcnt;
1896 sbio->bi_size = r1_bio->sectors << 9; 1883 sbio->bi_size = r1_bio->sectors << 9;
1897 sbio->bi_idx = 0;
1898 sbio->bi_phys_segments = 0;
1899 sbio->bi_flags &= ~(BIO_POOL_MASK - 1);
1900 sbio->bi_flags |= 1 << BIO_UPTODATE;
1901 sbio->bi_next = NULL;
1902 sbio->bi_sector = r1_bio->sector + 1884 sbio->bi_sector = r1_bio->sector +
1903 conf->mirrors[i].rdev->data_offset; 1885 conf->mirrors[i].rdev->data_offset;
1904 sbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1886 sbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1887 sbio->bi_end_io = end_sync_read;
1888 sbio->bi_private = r1_bio;
1889
1905 size = sbio->bi_size; 1890 size = sbio->bi_size;
1906 for (j = 0; j < vcnt ; j++) { 1891 for (j = 0; j < vcnt ; j++) {
1907 struct bio_vec *bi; 1892 struct bio_vec *bi;
@@ -1912,10 +1897,9 @@ static int process_checks(struct r1bio *r1_bio)
1912 else 1897 else
1913 bi->bv_len = size; 1898 bi->bv_len = size;
1914 size -= PAGE_SIZE; 1899 size -= PAGE_SIZE;
1915 memcpy(page_address(bi->bv_page),
1916 page_address(pbio->bi_io_vec[j].bv_page),
1917 PAGE_SIZE);
1918 } 1900 }
1901
1902 bio_copy_data(sbio, pbio);
1919 } 1903 }
1920 return 0; 1904 return 0;
1921} 1905}
@@ -1952,7 +1936,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
1952 wbio->bi_rw = WRITE; 1936 wbio->bi_rw = WRITE;
1953 wbio->bi_end_io = end_sync_write; 1937 wbio->bi_end_io = end_sync_write;
1954 atomic_inc(&r1_bio->remaining); 1938 atomic_inc(&r1_bio->remaining);
1955 md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); 1939 md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
1956 1940
1957 generic_make_request(wbio); 1941 generic_make_request(wbio);
1958 } 1942 }
@@ -2064,32 +2048,11 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
2064 } 2048 }
2065} 2049}
2066 2050
2067static void bi_complete(struct bio *bio, int error)
2068{
2069 complete((struct completion *)bio->bi_private);
2070}
2071
2072static int submit_bio_wait(int rw, struct bio *bio)
2073{
2074 struct completion event;
2075 rw |= REQ_SYNC;
2076
2077 init_completion(&event);
2078 bio->bi_private = &event;
2079 bio->bi_end_io = bi_complete;
2080 submit_bio(rw, bio);
2081 wait_for_completion(&event);
2082
2083 return test_bit(BIO_UPTODATE, &bio->bi_flags);
2084}
2085
2086static int narrow_write_error(struct r1bio *r1_bio, int i) 2051static int narrow_write_error(struct r1bio *r1_bio, int i)
2087{ 2052{
2088 struct mddev *mddev = r1_bio->mddev; 2053 struct mddev *mddev = r1_bio->mddev;
2089 struct r1conf *conf = mddev->private; 2054 struct r1conf *conf = mddev->private;
2090 struct md_rdev *rdev = conf->mirrors[i].rdev; 2055 struct md_rdev *rdev = conf->mirrors[i].rdev;
2091 int vcnt, idx;
2092 struct bio_vec *vec;
2093 2056
2094 /* bio has the data to be written to device 'i' where 2057 /* bio has the data to be written to device 'i' where
2095 * we just recently had a write error. 2058 * we just recently had a write error.
@@ -2117,30 +2080,32 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2117 & ~(sector_t)(block_sectors - 1)) 2080 & ~(sector_t)(block_sectors - 1))
2118 - sector; 2081 - sector;
2119 2082
2120 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2121 vcnt = r1_bio->behind_page_count;
2122 vec = r1_bio->behind_bvecs;
2123 idx = 0;
2124 while (vec[idx].bv_page == NULL)
2125 idx++;
2126 } else {
2127 vcnt = r1_bio->master_bio->bi_vcnt;
2128 vec = r1_bio->master_bio->bi_io_vec;
2129 idx = r1_bio->master_bio->bi_idx;
2130 }
2131 while (sect_to_write) { 2083 while (sect_to_write) {
2132 struct bio *wbio; 2084 struct bio *wbio;
2133 if (sectors > sect_to_write) 2085 if (sectors > sect_to_write)
2134 sectors = sect_to_write; 2086 sectors = sect_to_write;
2135 /* Write at 'sector' for 'sectors'*/ 2087 /* Write at 'sector' for 'sectors'*/
2136 2088
2137 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev); 2089 if (test_bit(R1BIO_BehindIO, &r1_bio->state)) {
2138 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec)); 2090 unsigned vcnt = r1_bio->behind_page_count;
2139 wbio->bi_sector = r1_bio->sector; 2091 struct bio_vec *vec = r1_bio->behind_bvecs;
2092
2093 while (!vec->bv_page) {
2094 vec++;
2095 vcnt--;
2096 }
2097
2098 wbio = bio_alloc_mddev(GFP_NOIO, vcnt, mddev);
2099 memcpy(wbio->bi_io_vec, vec, vcnt * sizeof(struct bio_vec));
2100
2101 wbio->bi_vcnt = vcnt;
2102 } else {
2103 wbio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2104 }
2105
2140 wbio->bi_rw = WRITE; 2106 wbio->bi_rw = WRITE;
2141 wbio->bi_vcnt = vcnt; 2107 wbio->bi_sector = r1_bio->sector;
2142 wbio->bi_size = r1_bio->sectors << 9; 2108 wbio->bi_size = r1_bio->sectors << 9;
2143 wbio->bi_idx = idx;
2144 2109
2145 md_trim_bio(wbio, sector - r1_bio->sector, sectors); 2110 md_trim_bio(wbio, sector - r1_bio->sector, sectors);
2146 wbio->bi_sector += rdev->data_offset; 2111 wbio->bi_sector += rdev->data_offset;
@@ -2289,8 +2254,7 @@ read_more:
2289 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); 2254 r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
2290 2255
2291 r1_bio->master_bio = mbio; 2256 r1_bio->master_bio = mbio;
2292 r1_bio->sectors = (mbio->bi_size >> 9) 2257 r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
2293 - sectors_handled;
2294 r1_bio->state = 0; 2258 r1_bio->state = 0;
2295 set_bit(R1BIO_ReadError, &r1_bio->state); 2259 set_bit(R1BIO_ReadError, &r1_bio->state);
2296 r1_bio->mddev = mddev; 2260 r1_bio->mddev = mddev;
@@ -2464,18 +2428,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2464 for (i = 0; i < conf->raid_disks * 2; i++) { 2428 for (i = 0; i < conf->raid_disks * 2; i++) {
2465 struct md_rdev *rdev; 2429 struct md_rdev *rdev;
2466 bio = r1_bio->bios[i]; 2430 bio = r1_bio->bios[i];
2467 2431 bio_reset(bio);
2468 /* take from bio_init */
2469 bio->bi_next = NULL;
2470 bio->bi_flags &= ~(BIO_POOL_MASK-1);
2471 bio->bi_flags |= 1 << BIO_UPTODATE;
2472 bio->bi_rw = READ;
2473 bio->bi_vcnt = 0;
2474 bio->bi_idx = 0;
2475 bio->bi_phys_segments = 0;
2476 bio->bi_size = 0;
2477 bio->bi_end_io = NULL;
2478 bio->bi_private = NULL;
2479 2432
2480 rdev = rcu_dereference(conf->mirrors[i].rdev); 2433 rdev = rcu_dereference(conf->mirrors[i].rdev);
2481 if (rdev == NULL || 2434 if (rdev == NULL ||
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 018741ba9310..59d4daa5f4c7 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1174,14 +1174,13 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1174 /* If this request crosses a chunk boundary, we need to 1174 /* If this request crosses a chunk boundary, we need to
1175 * split it. This will only happen for 1 PAGE (or less) requests. 1175 * split it. This will only happen for 1 PAGE (or less) requests.
1176 */ 1176 */
1177 if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) 1177 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1178 > chunk_sects 1178 > chunk_sects
1179 && (conf->geo.near_copies < conf->geo.raid_disks 1179 && (conf->geo.near_copies < conf->geo.raid_disks
1180 || conf->prev.near_copies < conf->prev.raid_disks))) { 1180 || conf->prev.near_copies < conf->prev.raid_disks))) {
1181 struct bio_pair *bp; 1181 struct bio_pair *bp;
1182 /* Sanity check -- queue functions should prevent this happening */ 1182 /* Sanity check -- queue functions should prevent this happening */
1183 if ((bio->bi_vcnt != 1 && bio->bi_vcnt != 0) || 1183 if (bio_segments(bio) > 1)
1184 bio->bi_idx != 0)
1185 goto bad_map; 1184 goto bad_map;
1186 /* This is a one page bio that upper layers 1185 /* This is a one page bio that upper layers
1187 * refuse to split for us, so we need to split it. 1186 * refuse to split for us, so we need to split it.
@@ -1214,7 +1213,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1214 bad_map: 1213 bad_map:
1215 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1214 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1216 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1215 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1217 (unsigned long long)bio->bi_sector, bio->bi_size >> 10); 1216 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1218 1217
1219 bio_io_error(bio); 1218 bio_io_error(bio);
1220 return; 1219 return;
@@ -1229,7 +1228,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1229 */ 1228 */
1230 wait_barrier(conf); 1229 wait_barrier(conf);
1231 1230
1232 sectors = bio->bi_size >> 9; 1231 sectors = bio_sectors(bio);
1233 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1232 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1234 bio->bi_sector < conf->reshape_progress && 1233 bio->bi_sector < conf->reshape_progress &&
1235 bio->bi_sector + sectors > conf->reshape_progress) { 1234 bio->bi_sector + sectors > conf->reshape_progress) {
@@ -1331,8 +1330,7 @@ read_again:
1331 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1330 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1332 1331
1333 r10_bio->master_bio = bio; 1332 r10_bio->master_bio = bio;
1334 r10_bio->sectors = ((bio->bi_size >> 9) 1333 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1335 - sectors_handled);
1336 r10_bio->state = 0; 1334 r10_bio->state = 0;
1337 r10_bio->mddev = mddev; 1335 r10_bio->mddev = mddev;
1338 r10_bio->sector = bio->bi_sector + sectors_handled; 1336 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -1574,7 +1572,7 @@ retry_write:
1574 * after checking if we need to go around again. 1572 * after checking if we need to go around again.
1575 */ 1573 */
1576 1574
1577 if (sectors_handled < (bio->bi_size >> 9)) { 1575 if (sectors_handled < bio_sectors(bio)) {
1578 one_write_done(r10_bio); 1576 one_write_done(r10_bio);
1579 /* We need another r10_bio. It has already been counted 1577 /* We need another r10_bio. It has already been counted
1580 * in bio->bi_phys_segments. 1578 * in bio->bi_phys_segments.
@@ -1582,7 +1580,7 @@ retry_write:
1582 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); 1580 r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
1583 1581
1584 r10_bio->master_bio = bio; 1582 r10_bio->master_bio = bio;
1585 r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; 1583 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1586 1584
1587 r10_bio->mddev = mddev; 1585 r10_bio->mddev = mddev;
1588 r10_bio->sector = bio->bi_sector + sectors_handled; 1586 r10_bio->sector = bio->bi_sector + sectors_handled;
@@ -2084,13 +2082,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2084 * First we need to fixup bv_offset, bv_len and 2082 * First we need to fixup bv_offset, bv_len and
2085 * bi_vecs, as the read request might have corrupted these 2083 * bi_vecs, as the read request might have corrupted these
2086 */ 2084 */
2085 bio_reset(tbio);
2086
2087 tbio->bi_vcnt = vcnt; 2087 tbio->bi_vcnt = vcnt;
2088 tbio->bi_size = r10_bio->sectors << 9; 2088 tbio->bi_size = r10_bio->sectors << 9;
2089 tbio->bi_idx = 0;
2090 tbio->bi_phys_segments = 0;
2091 tbio->bi_flags &= ~(BIO_POOL_MASK - 1);
2092 tbio->bi_flags |= 1 << BIO_UPTODATE;
2093 tbio->bi_next = NULL;
2094 tbio->bi_rw = WRITE; 2089 tbio->bi_rw = WRITE;
2095 tbio->bi_private = r10_bio; 2090 tbio->bi_private = r10_bio;
2096 tbio->bi_sector = r10_bio->devs[i].addr; 2091 tbio->bi_sector = r10_bio->devs[i].addr;
@@ -2108,7 +2103,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2108 d = r10_bio->devs[i].devnum; 2103 d = r10_bio->devs[i].devnum;
2109 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2104 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2110 atomic_inc(&r10_bio->remaining); 2105 atomic_inc(&r10_bio->remaining);
2111 md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); 2106 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2112 2107
2113 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2108 tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
2114 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2109 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
@@ -2133,7 +2128,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2133 d = r10_bio->devs[i].devnum; 2128 d = r10_bio->devs[i].devnum;
2134 atomic_inc(&r10_bio->remaining); 2129 atomic_inc(&r10_bio->remaining);
2135 md_sync_acct(conf->mirrors[d].replacement->bdev, 2130 md_sync_acct(conf->mirrors[d].replacement->bdev,
2136 tbio->bi_size >> 9); 2131 bio_sectors(tbio));
2137 generic_make_request(tbio); 2132 generic_make_request(tbio);
2138 } 2133 }
2139 2134
@@ -2259,13 +2254,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2259 wbio2 = r10_bio->devs[1].repl_bio; 2254 wbio2 = r10_bio->devs[1].repl_bio;
2260 if (wbio->bi_end_io) { 2255 if (wbio->bi_end_io) {
2261 atomic_inc(&conf->mirrors[d].rdev->nr_pending); 2256 atomic_inc(&conf->mirrors[d].rdev->nr_pending);
2262 md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); 2257 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
2263 generic_make_request(wbio); 2258 generic_make_request(wbio);
2264 } 2259 }
2265 if (wbio2 && wbio2->bi_end_io) { 2260 if (wbio2 && wbio2->bi_end_io) {
2266 atomic_inc(&conf->mirrors[d].replacement->nr_pending); 2261 atomic_inc(&conf->mirrors[d].replacement->nr_pending);
2267 md_sync_acct(conf->mirrors[d].replacement->bdev, 2262 md_sync_acct(conf->mirrors[d].replacement->bdev,
2268 wbio2->bi_size >> 9); 2263 bio_sectors(wbio2));
2269 generic_make_request(wbio2); 2264 generic_make_request(wbio2);
2270 } 2265 }
2271} 2266}
@@ -2536,25 +2531,6 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
2536 } 2531 }
2537} 2532}
2538 2533
2539static void bi_complete(struct bio *bio, int error)
2540{
2541 complete((struct completion *)bio->bi_private);
2542}
2543
2544static int submit_bio_wait(int rw, struct bio *bio)
2545{
2546 struct completion event;
2547 rw |= REQ_SYNC;
2548
2549 init_completion(&event);
2550 bio->bi_private = &event;
2551 bio->bi_end_io = bi_complete;
2552 submit_bio(rw, bio);
2553 wait_for_completion(&event);
2554
2555 return test_bit(BIO_UPTODATE, &bio->bi_flags);
2556}
2557
2558static int narrow_write_error(struct r10bio *r10_bio, int i) 2534static int narrow_write_error(struct r10bio *r10_bio, int i)
2559{ 2535{
2560 struct bio *bio = r10_bio->master_bio; 2536 struct bio *bio = r10_bio->master_bio;
@@ -2695,8 +2671,7 @@ read_more:
2695 r10_bio = mempool_alloc(conf->r10bio_pool, 2671 r10_bio = mempool_alloc(conf->r10bio_pool,
2696 GFP_NOIO); 2672 GFP_NOIO);
2697 r10_bio->master_bio = mbio; 2673 r10_bio->master_bio = mbio;
2698 r10_bio->sectors = (mbio->bi_size >> 9) 2674 r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
2699 - sectors_handled;
2700 r10_bio->state = 0; 2675 r10_bio->state = 0;
2701 set_bit(R10BIO_ReadError, 2676 set_bit(R10BIO_ReadError,
2702 &r10_bio->state); 2677 &r10_bio->state);
@@ -3133,6 +3108,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3133 } 3108 }
3134 } 3109 }
3135 bio = r10_bio->devs[0].bio; 3110 bio = r10_bio->devs[0].bio;
3111 bio_reset(bio);
3136 bio->bi_next = biolist; 3112 bio->bi_next = biolist;
3137 biolist = bio; 3113 biolist = bio;
3138 bio->bi_private = r10_bio; 3114 bio->bi_private = r10_bio;
@@ -3157,6 +3133,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 rdev = mirror->rdev; 3133 rdev = mirror->rdev;
3158 if (!test_bit(In_sync, &rdev->flags)) { 3134 if (!test_bit(In_sync, &rdev->flags)) {
3159 bio = r10_bio->devs[1].bio; 3135 bio = r10_bio->devs[1].bio;
3136 bio_reset(bio);
3160 bio->bi_next = biolist; 3137 bio->bi_next = biolist;
3161 biolist = bio; 3138 biolist = bio;
3162 bio->bi_private = r10_bio; 3139 bio->bi_private = r10_bio;
@@ -3185,6 +3162,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3185 if (rdev == NULL || bio == NULL || 3162 if (rdev == NULL || bio == NULL ||
3186 test_bit(Faulty, &rdev->flags)) 3163 test_bit(Faulty, &rdev->flags))
3187 break; 3164 break;
3165 bio_reset(bio);
3188 bio->bi_next = biolist; 3166 bio->bi_next = biolist;
3189 biolist = bio; 3167 biolist = bio;
3190 bio->bi_private = r10_bio; 3168 bio->bi_private = r10_bio;
@@ -3283,7 +3261,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3283 r10_bio->devs[i].repl_bio->bi_end_io = NULL; 3261 r10_bio->devs[i].repl_bio->bi_end_io = NULL;
3284 3262
3285 bio = r10_bio->devs[i].bio; 3263 bio = r10_bio->devs[i].bio;
3286 bio->bi_end_io = NULL; 3264 bio_reset(bio);
3287 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3265 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3288 if (conf->mirrors[d].rdev == NULL || 3266 if (conf->mirrors[d].rdev == NULL ||
3289 test_bit(Faulty, &conf->mirrors[d].rdev->flags)) 3267 test_bit(Faulty, &conf->mirrors[d].rdev->flags))
@@ -3320,6 +3298,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3320 3298
3321 /* Need to set up for writing to the replacement */ 3299 /* Need to set up for writing to the replacement */
3322 bio = r10_bio->devs[i].repl_bio; 3300 bio = r10_bio->devs[i].repl_bio;
3301 bio_reset(bio);
3323 clear_bit(BIO_UPTODATE, &bio->bi_flags); 3302 clear_bit(BIO_UPTODATE, &bio->bi_flags);
3324 3303
3325 sector = r10_bio->devs[i].addr; 3304 sector = r10_bio->devs[i].addr;
@@ -3353,17 +3332,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3353 } 3332 }
3354 } 3333 }
3355 3334
3356 for (bio = biolist; bio ; bio=bio->bi_next) {
3357
3358 bio->bi_flags &= ~(BIO_POOL_MASK - 1);
3359 if (bio->bi_end_io)
3360 bio->bi_flags |= 1 << BIO_UPTODATE;
3361 bio->bi_vcnt = 0;
3362 bio->bi_idx = 0;
3363 bio->bi_phys_segments = 0;
3364 bio->bi_size = 0;
3365 }
3366
3367 nr_sectors = 0; 3335 nr_sectors = 0;
3368 if (sector_nr + max_sync < max_sector) 3336 if (sector_nr + max_sync < max_sector)
3369 max_sector = sector_nr + max_sync; 3337 max_sector = sector_nr + max_sync;
@@ -4411,7 +4379,6 @@ read_more:
4411 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4379 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4412 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4380 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4413 read_bio->bi_vcnt = 0; 4381 read_bio->bi_vcnt = 0;
4414 read_bio->bi_idx = 0;
4415 read_bio->bi_size = 0; 4382 read_bio->bi_size = 0;
4416 r10_bio->master_bio = read_bio; 4383 r10_bio->master_bio = read_bio;
4417 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4384 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
@@ -4435,17 +4402,14 @@ read_more:
4435 } 4402 }
4436 if (!rdev2 || test_bit(Faulty, &rdev2->flags)) 4403 if (!rdev2 || test_bit(Faulty, &rdev2->flags))
4437 continue; 4404 continue;
4405
4406 bio_reset(b);
4438 b->bi_bdev = rdev2->bdev; 4407 b->bi_bdev = rdev2->bdev;
4439 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4408 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset;
4440 b->bi_private = r10_bio; 4409 b->bi_private = r10_bio;
4441 b->bi_end_io = end_reshape_write; 4410 b->bi_end_io = end_reshape_write;
4442 b->bi_rw = WRITE; 4411 b->bi_rw = WRITE;
4443 b->bi_flags &= ~(BIO_POOL_MASK - 1);
4444 b->bi_flags |= 1 << BIO_UPTODATE;
4445 b->bi_next = blist; 4412 b->bi_next = blist;
4446 b->bi_vcnt = 0;
4447 b->bi_idx = 0;
4448 b->bi_size = 0;
4449 blist = b; 4413 blist = b;
4450 } 4414 }
4451 4415
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 4a7be455d6d8..9359828ffe26 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
90 */ 90 */
91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 91static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
92{ 92{
93 int sectors = bio->bi_size >> 9; 93 int sectors = bio_sectors(bio);
94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 94 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
95 return bio->bi_next; 95 return bio->bi_next;
96 else 96 else
@@ -569,14 +569,6 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
569 bi = &sh->dev[i].req; 569 bi = &sh->dev[i].req;
570 rbi = &sh->dev[i].rreq; /* For writing to replacement */ 570 rbi = &sh->dev[i].rreq; /* For writing to replacement */
571 571
572 bi->bi_rw = rw;
573 rbi->bi_rw = rw;
574 if (rw & WRITE) {
575 bi->bi_end_io = raid5_end_write_request;
576 rbi->bi_end_io = raid5_end_write_request;
577 } else
578 bi->bi_end_io = raid5_end_read_request;
579
580 rcu_read_lock(); 572 rcu_read_lock();
581 rrdev = rcu_dereference(conf->disks[i].replacement); 573 rrdev = rcu_dereference(conf->disks[i].replacement);
582 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */ 574 smp_mb(); /* Ensure that if rrdev is NULL, rdev won't be */
@@ -651,7 +643,14 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
651 643
652 set_bit(STRIPE_IO_STARTED, &sh->state); 644 set_bit(STRIPE_IO_STARTED, &sh->state);
653 645
646 bio_reset(bi);
654 bi->bi_bdev = rdev->bdev; 647 bi->bi_bdev = rdev->bdev;
648 bi->bi_rw = rw;
649 bi->bi_end_io = (rw & WRITE)
650 ? raid5_end_write_request
651 : raid5_end_read_request;
652 bi->bi_private = sh;
653
655 pr_debug("%s: for %llu schedule op %ld on disc %d\n", 654 pr_debug("%s: for %llu schedule op %ld on disc %d\n",
656 __func__, (unsigned long long)sh->sector, 655 __func__, (unsigned long long)sh->sector,
657 bi->bi_rw, i); 656 bi->bi_rw, i);
@@ -665,12 +664,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
665 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 664 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
666 bi->bi_rw |= REQ_FLUSH; 665 bi->bi_rw |= REQ_FLUSH;
667 666
668 bi->bi_flags = 1 << BIO_UPTODATE;
669 bi->bi_idx = 0;
670 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 667 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
671 bi->bi_io_vec[0].bv_offset = 0; 668 bi->bi_io_vec[0].bv_offset = 0;
672 bi->bi_size = STRIPE_SIZE; 669 bi->bi_size = STRIPE_SIZE;
673 bi->bi_next = NULL;
674 if (rrdev) 670 if (rrdev)
675 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags); 671 set_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags);
676 672
@@ -687,7 +683,13 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
687 683
688 set_bit(STRIPE_IO_STARTED, &sh->state); 684 set_bit(STRIPE_IO_STARTED, &sh->state);
689 685
686 bio_reset(rbi);
690 rbi->bi_bdev = rrdev->bdev; 687 rbi->bi_bdev = rrdev->bdev;
688 rbi->bi_rw = rw;
689 BUG_ON(!(rw & WRITE));
690 rbi->bi_end_io = raid5_end_write_request;
691 rbi->bi_private = sh;
692
691 pr_debug("%s: for %llu schedule op %ld on " 693 pr_debug("%s: for %llu schedule op %ld on "
692 "replacement disc %d\n", 694 "replacement disc %d\n",
693 __func__, (unsigned long long)sh->sector, 695 __func__, (unsigned long long)sh->sector,
@@ -699,12 +701,9 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
699 else 701 else
700 rbi->bi_sector = (sh->sector 702 rbi->bi_sector = (sh->sector
701 + rrdev->data_offset); 703 + rrdev->data_offset);
702 rbi->bi_flags = 1 << BIO_UPTODATE;
703 rbi->bi_idx = 0;
704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 704 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
705 rbi->bi_io_vec[0].bv_offset = 0; 705 rbi->bi_io_vec[0].bv_offset = 0;
706 rbi->bi_size = STRIPE_SIZE; 706 rbi->bi_size = STRIPE_SIZE;
707 rbi->bi_next = NULL;
708 if (conf->mddev->gendisk) 707 if (conf->mddev->gendisk)
709 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev), 708 trace_block_bio_remap(bdev_get_queue(rbi->bi_bdev),
710 rbi, disk_devt(conf->mddev->gendisk), 709 rbi, disk_devt(conf->mddev->gendisk),
@@ -2402,11 +2401,11 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2402 } else 2401 } else
2403 bip = &sh->dev[dd_idx].toread; 2402 bip = &sh->dev[dd_idx].toread;
2404 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2403 while (*bip && (*bip)->bi_sector < bi->bi_sector) {
2405 if ((*bip)->bi_sector + ((*bip)->bi_size >> 9) > bi->bi_sector) 2404 if (bio_end_sector(*bip) > bi->bi_sector)
2406 goto overlap; 2405 goto overlap;
2407 bip = & (*bip)->bi_next; 2406 bip = & (*bip)->bi_next;
2408 } 2407 }
2409 if (*bip && (*bip)->bi_sector < bi->bi_sector + ((bi->bi_size)>>9)) 2408 if (*bip && (*bip)->bi_sector < bio_end_sector(bi))
2410 goto overlap; 2409 goto overlap;
2411 2410
2412 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2411 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2422,8 +2421,8 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2422 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2421 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2423 bi && bi->bi_sector <= sector; 2422 bi && bi->bi_sector <= sector;
2424 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2423 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2425 if (bi->bi_sector + (bi->bi_size>>9) >= sector) 2424 if (bio_end_sector(bi) >= sector)
2426 sector = bi->bi_sector + (bi->bi_size>>9); 2425 sector = bio_end_sector(bi);
2427 } 2426 }
2428 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS) 2427 if (sector >= sh->dev[dd_idx].sector + STRIPE_SECTORS)
2429 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags); 2428 set_bit(R5_OVERWRITE, &sh->dev[dd_idx].flags);
@@ -3849,7 +3848,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
3849{ 3848{
3850 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 3849 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
3851 unsigned int chunk_sectors = mddev->chunk_sectors; 3850 unsigned int chunk_sectors = mddev->chunk_sectors;
3852 unsigned int bio_sectors = bio->bi_size >> 9; 3851 unsigned int bio_sectors = bio_sectors(bio);
3853 3852
3854 if (mddev->new_chunk_sectors < mddev->chunk_sectors) 3853 if (mddev->new_chunk_sectors < mddev->chunk_sectors)
3855 chunk_sectors = mddev->new_chunk_sectors; 3854 chunk_sectors = mddev->new_chunk_sectors;
@@ -3941,7 +3940,7 @@ static int bio_fits_rdev(struct bio *bi)
3941{ 3940{
3942 struct request_queue *q = bdev_get_queue(bi->bi_bdev); 3941 struct request_queue *q = bdev_get_queue(bi->bi_bdev);
3943 3942
3944 if ((bi->bi_size>>9) > queue_max_sectors(q)) 3943 if (bio_sectors(bi) > queue_max_sectors(q))
3945 return 0; 3944 return 0;
3946 blk_recount_segments(q, bi); 3945 blk_recount_segments(q, bi);
3947 if (bi->bi_phys_segments > queue_max_segments(q)) 3946 if (bi->bi_phys_segments > queue_max_segments(q))
@@ -3988,7 +3987,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
3988 0, 3987 0,
3989 &dd_idx, NULL); 3988 &dd_idx, NULL);
3990 3989
3991 end_sector = align_bi->bi_sector + (align_bi->bi_size >> 9); 3990 end_sector = bio_end_sector(align_bi);
3992 rcu_read_lock(); 3991 rcu_read_lock();
3993 rdev = rcu_dereference(conf->disks[dd_idx].replacement); 3992 rdev = rcu_dereference(conf->disks[dd_idx].replacement);
3994 if (!rdev || test_bit(Faulty, &rdev->flags) || 3993 if (!rdev || test_bit(Faulty, &rdev->flags) ||
@@ -4011,7 +4010,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4011 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4010 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4012 4011
4013 if (!bio_fits_rdev(align_bi) || 4012 if (!bio_fits_rdev(align_bi) ||
4014 is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, 4013 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
4015 &first_bad, &bad_sectors)) { 4014 &first_bad, &bad_sectors)) {
4016 /* too big in some way, or has a known bad block */ 4015 /* too big in some way, or has a known bad block */
4017 bio_put(align_bi); 4016 bio_put(align_bi);
@@ -4273,7 +4272,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4273 } 4272 }
4274 4273
4275 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4274 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4276 last_sector = bi->bi_sector + (bi->bi_size>>9); 4275 last_sector = bio_end_sector(bi);
4277 bi->bi_next = NULL; 4276 bi->bi_next = NULL;
4278 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4277 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
4279 4278
@@ -4739,7 +4738,7 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
4739 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4738 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4740 sector = raid5_compute_sector(conf, logical_sector, 4739 sector = raid5_compute_sector(conf, logical_sector,
4741 0, &dd_idx, NULL); 4740 0, &dd_idx, NULL);
4742 last_sector = raid_bio->bi_sector + (raid_bio->bi_size>>9); 4741 last_sector = bio_end_sector(raid_bio);
4743 4742
4744 for (; logical_sector < last_sector; 4743 for (; logical_sector < last_sector;
4745 logical_sector += STRIPE_SECTORS, 4744 logical_sector += STRIPE_SECTORS,