aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c27
-rw-r--r--drivers/block/drbd/drbd_receiver.c19
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c16
-rw-r--r--drivers/block/loop.c27
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c20
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/nvme-core.c142
-rw-r--r--drivers/block/pktcdvd.c182
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c12
-rw-r--r--drivers/block/rbd.c91
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c15
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c8
-rw-r--r--drivers/md/bcache/debug.c21
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c131
-rw-r--r--drivers/md/bcache/super.c20
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c28
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c37
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c19
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c30
-rw-r--r--drivers/md/dm-verity.c62
-rw-r--r--drivers/md/dm.c189
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c73
-rw-r--r--drivers/md/raid10.c194
-rw-r--r--drivers/md/raid5.c84
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c48
-rw-r--r--drivers/s390/block/dasd_fba.c26
-rw-r--r--drivers/s390/block/dcssblk.c21
-rw-r--r--drivers/s390/block/scm_blk.c8
-rw-r--r--drivers/s390/block/scm_blk_cluster.c4
-rw-r--r--drivers/s390/block/xpram.c19
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c39
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_dif.c30
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c26
-rw-r--r--drivers/staging/zram/zram_drv.c33
-rw-r--r--drivers/target/target_core_iblock.c2
77 files changed, 1080 insertions, 1596 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
100 100
101struct buf { 101struct buf {
102 ulong nframesout; 102 ulong nframesout;
103 ulong resid;
104 ulong bv_resid;
105 sector_t sector;
106 struct bio *bio; 103 struct bio *bio;
107 struct bio_vec *bv; 104 struct bvec_iter iter;
108 struct request *rq; 105 struct request *rq;
109}; 106};
110 107
@@ -120,13 +117,10 @@ struct frame {
120 ulong waited; 117 ulong waited;
121 ulong waited_total; 118 ulong waited_total;
122 struct aoetgt *t; /* parent target I belong to */ 119 struct aoetgt *t; /* parent target I belong to */
123 sector_t lba;
124 struct sk_buff *skb; /* command skb freed on module exit */ 120 struct sk_buff *skb; /* command skb freed on module exit */
125 struct sk_buff *r_skb; /* response skb for async processing */ 121 struct sk_buff *r_skb; /* response skb for async processing */
126 struct buf *buf; 122 struct buf *buf;
127 struct bio_vec *bv; 123 struct bvec_iter iter;
128 ulong bcnt;
129 ulong bv_off;
130 char flags; 124 char flags;
131}; 125};
132 126
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..8184451b57c0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
196 196
197 t = f->t; 197 t = f->t;
198 f->buf = NULL; 198 f->buf = NULL;
199 f->lba = 0; 199 memset(&f->iter, 0, sizeof(f->iter));
200 f->bv = NULL;
201 f->r_skb = NULL; 200 f->r_skb = NULL;
202 f->flags = 0; 201 f->flags = 0;
203 list_add(&f->head, &t->ffree); 202 list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
295} 294}
296 295
297static void 296static void
298skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt) 297skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
299{ 298{
300 int frag = 0; 299 int frag = 0;
301 ulong fcnt; 300 struct bio_vec bv;
302loop: 301
303 fcnt = bv->bv_len - (off - bv->bv_offset); 302 __bio_for_each_segment(bv, bio, iter, iter)
304 if (fcnt > cnt) 303 skb_fill_page_desc(skb, frag++, bv.bv_page,
305 fcnt = cnt; 304 bv.bv_offset, bv.bv_len);
306 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
307 cnt -= fcnt;
308 if (cnt <= 0)
309 return;
310 bv++;
311 off = bv->bv_offset;
312 goto loop;
313} 305}
314 306
315static void 307static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
346 t->nout++; 338 t->nout++;
347 f->waited = 0; 339 f->waited = 0;
348 f->waited_total = 0; 340 f->waited_total = 0;
349 if (f->buf)
350 f->lba = f->buf->sector;
351 341
352 /* set up ata header */ 342 /* set up ata header */
353 ah->scnt = f->bcnt >> 9; 343 ah->scnt = f->iter.bi_size >> 9;
354 put_lba(ah, f->lba); 344 put_lba(ah, f->iter.bi_sector);
355 if (t->d->flags & DEVFL_EXT) { 345 if (t->d->flags & DEVFL_EXT) {
356 ah->aflags |= AOEAFL_EXT; 346 ah->aflags |= AOEAFL_EXT;
357 } else { 347 } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
360 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ 350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
361 } 351 }
362 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { 352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
363 skb_fillup(skb, f->bv, f->bv_off, f->bcnt); 353 skb_fillup(skb, f->buf->bio, f->iter);
364 ah->aflags |= AOEAFL_WRITE; 354 ah->aflags |= AOEAFL_WRITE;
365 skb->len += f->bcnt; 355 skb->len += f->iter.bi_size;
366 skb->data_len = f->bcnt; 356 skb->data_len = f->iter.bi_size;
367 skb->truesize += f->bcnt; 357 skb->truesize += f->iter.bi_size;
368 t->wpkts++; 358 t->wpkts++;
369 } else { 359 } else {
370 t->rpkts++; 360 t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
382 struct buf *buf; 372 struct buf *buf;
383 struct sk_buff *skb; 373 struct sk_buff *skb;
384 struct sk_buff_head queue; 374 struct sk_buff_head queue;
385 ulong bcnt, fbcnt;
386 375
387 buf = nextbuf(d); 376 buf = nextbuf(d);
388 if (buf == NULL) 377 if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
390 f = newframe(d); 379 f = newframe(d);
391 if (f == NULL) 380 if (f == NULL)
392 return 0; 381 return 0;
393 bcnt = d->maxbcnt;
394 if (bcnt == 0)
395 bcnt = DEFAULTBCNT;
396 if (bcnt > buf->resid)
397 bcnt = buf->resid;
398 fbcnt = bcnt;
399 f->bv = buf->bv;
400 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
401 do {
402 if (fbcnt < buf->bv_resid) {
403 buf->bv_resid -= fbcnt;
404 buf->resid -= fbcnt;
405 break;
406 }
407 fbcnt -= buf->bv_resid;
408 buf->resid -= buf->bv_resid;
409 if (buf->resid == 0) {
410 d->ip.buf = NULL;
411 break;
412 }
413 buf->bv++;
414 buf->bv_resid = buf->bv->bv_len;
415 WARN_ON(buf->bv_resid == 0);
416 } while (fbcnt);
417 382
418 /* initialize the headers & frame */ 383 /* initialize the headers & frame */
419 f->buf = buf; 384 f->buf = buf;
420 f->bcnt = bcnt; 385 f->iter = buf->iter;
421 ata_rw_frameinit(f); 386 f->iter.bi_size = min_t(unsigned long,
387 d->maxbcnt ?: DEFAULTBCNT,
388 f->iter.bi_size);
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
390
391 if (!buf->iter.bi_size)
392 d->ip.buf = NULL;
422 393
423 /* mark all tracking fields and load out */ 394 /* mark all tracking fields and load out */
424 buf->nframesout += 1; 395 buf->nframesout += 1;
425 buf->sector += bcnt >> 9; 396
397 ata_rw_frameinit(f);
426 398
427 skb = skb_clone(f->skb, GFP_ATOMIC); 399 skb = skb_clone(f->skb, GFP_ATOMIC);
428 if (skb) { 400 if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
613 skb = nf->skb; 585 skb = nf->skb;
614 nf->skb = f->skb; 586 nf->skb = f->skb;
615 nf->buf = f->buf; 587 nf->buf = f->buf;
616 nf->bcnt = f->bcnt; 588 nf->iter = f->iter;
617 nf->lba = f->lba;
618 nf->bv = f->bv;
619 nf->bv_off = f->bv_off;
620 nf->waited = 0; 589 nf->waited = 0;
621 nf->waited_total = f->waited_total; 590 nf->waited_total = f->waited_total;
622 nf->sent = f->sent; 591 nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
648 } 617 }
649 f->flags |= FFL_PROBE; 618 f->flags |= FFL_PROBE;
650 ifrotate(t); 619 ifrotate(t);
651 f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; 620 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
652 ata_rw_frameinit(f); 621 ata_rw_frameinit(f);
653 skb = f->skb; 622 skb = f->skb;
654 for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) { 623 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
655 if (n < PAGE_SIZE) 624 if (n < PAGE_SIZE)
656 m = n; 625 m = n;
657 else 626 else
658 m = PAGE_SIZE; 627 m = PAGE_SIZE;
659 skb_fill_page_desc(skb, frag, empty_page, 0, m); 628 skb_fill_page_desc(skb, frag, empty_page, 0, m);
660 } 629 }
661 skb->len += f->bcnt; 630 skb->len += f->iter.bi_size;
662 skb->data_len = f->bcnt; 631 skb->data_len = f->iter.bi_size;
663 skb->truesize += f->bcnt; 632 skb->truesize += f->iter.bi_size;
664 633
665 skb = skb_clone(f->skb, GFP_ATOMIC); 634 skb = skb_clone(f->skb, GFP_ATOMIC);
666 if (skb) { 635 if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
897static void 866static void
898bio_pageinc(struct bio *bio) 867bio_pageinc(struct bio *bio)
899{ 868{
900 struct bio_vec *bv; 869 struct bio_vec bv;
901 struct page *page; 870 struct page *page;
902 int i; 871 struct bvec_iter iter;
903 872
904 bio_for_each_segment(bv, bio, i) { 873 bio_for_each_segment(bv, bio, iter) {
905 /* Non-zero page count for non-head members of 874 /* Non-zero page count for non-head members of
906 * compound pages is no longer allowed by the kernel. 875 * compound pages is no longer allowed by the kernel.
907 */ 876 */
908 page = compound_trans_head(bv->bv_page); 877 page = compound_trans_head(bv.bv_page);
909 atomic_inc(&page->_count); 878 atomic_inc(&page->_count);
910 } 879 }
911} 880}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
913static void 882static void
914bio_pagedec(struct bio *bio) 883bio_pagedec(struct bio *bio)
915{ 884{
916 struct bio_vec *bv;
917 struct page *page; 885 struct page *page;
918 int i; 886 struct bio_vec bv;
887 struct bvec_iter iter;
919 888
920 bio_for_each_segment(bv, bio, i) { 889 bio_for_each_segment(bv, bio, iter) {
921 page = compound_trans_head(bv->bv_page); 890 page = compound_trans_head(bv.bv_page);
922 atomic_dec(&page->_count); 891 atomic_dec(&page->_count);
923 } 892 }
924} 893}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
929 memset(buf, 0, sizeof(*buf)); 898 memset(buf, 0, sizeof(*buf));
930 buf->rq = rq; 899 buf->rq = rq;
931 buf->bio = bio; 900 buf->bio = bio;
932 buf->resid = bio->bi_size; 901 buf->iter = bio->bi_iter;
933 buf->sector = bio->bi_sector;
934 bio_pageinc(bio); 902 bio_pageinc(bio);
935 buf->bv = bio_iovec(bio);
936 buf->bv_resid = buf->bv->bv_len;
937 WARN_ON(buf->bv_resid == 0);
938} 903}
939 904
940static struct buf * 905static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
1119} 1084}
1120 1085
1121static void 1086static void
1122bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt) 1087bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
1123{ 1088{
1124 ulong fcnt;
1125 char *p;
1126 int soff = 0; 1089 int soff = 0;
1127loop: 1090 struct bio_vec bv;
1128 fcnt = bv->bv_len - (off - bv->bv_offset); 1091
1129 if (fcnt > cnt) 1092 iter.bi_size = cnt;
1130 fcnt = cnt; 1093
1131 p = page_address(bv->bv_page) + off; 1094 __bio_for_each_segment(bv, bio, iter, iter) {
1132 skb_copy_bits(skb, soff, p, fcnt); 1095 char *p = page_address(bv.bv_page) + bv.bv_offset;
1133 soff += fcnt; 1096 skb_copy_bits(skb, soff, p, bv.bv_len);
1134 cnt -= fcnt; 1097 soff += bv.bv_len;
1135 if (cnt <= 0) 1098 }
1136 return;
1137 bv++;
1138 off = bv->bv_offset;
1139 goto loop;
1140} 1099}
1141 1100
1142void 1101void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1152 do { 1111 do {
1153 bio = rq->bio; 1112 bio = rq->bio;
1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1113 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1156 1115
1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1116 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 if (!fastfail) 1117 if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
1229 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1188 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1230 break; 1189 break;
1231 } 1190 }
1232 bvcpy(f->bv, f->bv_off, skb, n); 1191 if (n > f->iter.bi_size) {
1192 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1193 "aoe: too-large data size in read from",
1194 (long) d->aoemajor, d->aoeminor,
1195 n, f->iter.bi_size);
1196 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1197 break;
1198 }
1199 bvcpy(skb, f->buf->bio, f->iter, n);
1233 case ATA_CMD_PIO_WRITE: 1200 case ATA_CMD_PIO_WRITE:
1234 case ATA_CMD_PIO_WRITE_EXT: 1201 case ATA_CMD_PIO_WRITE_EXT:
1235 spin_lock_irq(&d->lock); 1202 spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
1272 1239
1273 aoe_freetframe(f); 1240 aoe_freetframe(f);
1274 1241
1275 if (buf && --buf->nframesout == 0 && buf->resid == 0) 1242 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
1276 aoe_end_buf(d, buf); 1243 aoe_end_buf(d, buf);
1277 1244
1278 spin_unlock_irq(&d->lock); 1245 spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
1727{ 1694{
1728 if (buf == NULL) 1695 if (buf == NULL)
1729 return; 1696 return;
1730 buf->resid = 0; 1697 buf->iter.bi_size = 0;
1731 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1698 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1732 if (buf->nframesout == 0) 1699 if (buf->nframesout == 0)
1733 aoe_end_buf(d, buf); 1700 aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
328 struct block_device *bdev = bio->bi_bdev; 328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data; 329 struct brd_device *brd = bdev->bd_disk->private_data;
330 int rw; 330 int rw;
331 struct bio_vec *bvec; 331 struct bio_vec bvec;
332 sector_t sector; 332 sector_t sector;
333 int i; 333 struct bvec_iter iter;
334 int err = -EIO; 334 int err = -EIO;
335 335
336 sector = bio->bi_sector; 336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 338 goto out;
339 339
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0; 341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_size); 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 343 goto out;
344 } 344 }
345 345
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
347 if (rw == READA) 347 if (rw == READA)
348 rw = READ; 348 rw = READ;
349 349
350 bio_for_each_segment(bvec, bio, i) { 350 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec->bv_len; 351 unsigned int len = bvec.bv_len;
352 err = brd_do_bvec(brd, bvec->bv_page, len, 352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec->bv_offset, rw, sector); 353 bvec.bv_offset, rw, sector);
354 if (err) 354 if (err)
355 break; 355 break;
356 sector += len >> SECTOR_SHIFT; 356 sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
159 159
160 bio = bio_alloc_drbd(GFP_NOIO); 160 bio = bio_alloc_drbd(GFP_NOIO);
161 bio->bi_bdev = bdev->md_bdev; 161 bio->bi_bdev = bdev->md_bdev;
162 bio->bi_sector = sector; 162 bio->bi_iter.bi_sector = sector;
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1028 } else 1028 } else
1029 page = b->bm_pages[page_nr]; 1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev; 1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector; 1031 bio->bi_iter.bi_sector = on_disk_sector;
1032 /* bio_add_page of a single page to an empty bio will always succeed, 1032 /* bio_add_page of a single page to an empty bio will always succeed,
1033 * according to api. Do we want to assert that? */ 1033 * according to api. Do we want to assert that? */
1034 bio_add_page(bio, page, len, 0); 1034 bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b1bc83..929468e1512a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1537 1537
1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1539{ 1539{
1540 struct bio_vec *bvec; 1540 struct bio_vec bvec;
1541 int i; 1541 struct bvec_iter iter;
1542
1542 /* hint all but last page with MSG_MORE */ 1543 /* hint all but last page with MSG_MORE */
1543 bio_for_each_segment(bvec, bio, i) { 1544 bio_for_each_segment(bvec, bio, iter) {
1544 int err; 1545 int err;
1545 1546
1546 err = _drbd_no_send_page(mdev, bvec->bv_page, 1547 err = _drbd_no_send_page(mdev, bvec.bv_page,
1547 bvec->bv_offset, bvec->bv_len, 1548 bvec.bv_offset, bvec.bv_len,
1548 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1549 bio_iter_last(bvec, iter)
1550 ? 0 : MSG_MORE);
1549 if (err) 1551 if (err)
1550 return err; 1552 return err;
1551 } 1553 }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1554 1556
1555static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1557static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1556{ 1558{
1557 struct bio_vec *bvec; 1559 struct bio_vec bvec;
1558 int i; 1560 struct bvec_iter iter;
1561
1559 /* hint all but last page with MSG_MORE */ 1562 /* hint all but last page with MSG_MORE */
1560 bio_for_each_segment(bvec, bio, i) { 1563 bio_for_each_segment(bvec, bio, iter) {
1561 int err; 1564 int err;
1562 1565
1563 err = _drbd_send_page(mdev, bvec->bv_page, 1566 err = _drbd_send_page(mdev, bvec.bv_page,
1564 bvec->bv_offset, bvec->bv_len, 1567 bvec.bv_offset, bvec.bv_len,
1565 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1568 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1566 if (err) 1569 if (err)
1567 return err; 1570 return err;
1568 } 1571 }
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..d073305ffd5e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
1333 goto fail; 1333 goto fail;
1334 } 1334 }
1335 /* > peer_req->i.sector, unless this is the first bio */ 1335 /* > peer_req->i.sector, unless this is the first bio */
1336 bio->bi_sector = sector; 1336 bio->bi_iter.bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
1338 bio->bi_rw = rw; 1338 bio->bi_rw = rw;
1339 bio->bi_private = peer_req; 1339 bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
1353 dev_err(DEV, 1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, " 1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector); 1356 len, (uint64_t)bio->bi_iter.bi_sector);
1357 err = -ENOSPC; 1357 err = -ENOSPC;
1358 goto fail; 1358 goto fail;
1359 } 1359 }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size) 1596 sector_t sector, int data_size)
1597{ 1597{
1598 struct bio_vec *bvec; 1598 struct bio_vec bvec;
1599 struct bvec_iter iter;
1599 struct bio *bio; 1600 struct bio *bio;
1600 int dgs, err, i, expect; 1601 int dgs, err, expect;
1601 void *dig_in = mdev->tconn->int_dig_in; 1602 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv; 1603 void *dig_vv = mdev->tconn->int_dig_vv;
1603 1604
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1615 mdev->recv_cnt += data_size>>9; 1616 mdev->recv_cnt += data_size>>9;
1616 1617
1617 bio = req->master_bio; 1618 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector); 1619 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1620
1620 bio_for_each_segment(bvec, bio, i) { 1621 bio_for_each_segment(bvec, bio, iter) {
1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1622 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1622 expect = min_t(int, data_size, bvec->bv_len); 1623 expect = min_t(int, data_size, bvec.bv_len);
1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect); 1624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1624 kunmap(bvec->bv_page); 1625 kunmap(bvec.bv_page);
1625 if (err) 1626 if (err)
1626 return err; 1627 return err;
1627 data_size -= expect; 1628 data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
77 req->epoch = 0; 77 req->epoch = 0;
78 78
79 drbd_clear_interval(&req->i); 79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_sector; 80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_size; 81 req->i.size = bio_src->bi_iter.bi_size;
82 req->i.local = true; 82 req->i.local = true;
83 req->i.waiting = false; 83 req->i.waiting = false;
84 84
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1280 /* 1280 /*
1281 * what we "blindly" assume: 1281 * what we "blindly" assume:
1282 */ 1282 */
1283 D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
1284 1284
1285 inc_ap_bio(mdev); 1285 inc_ap_bio(mdev);
1286 __drbd_make_request(mdev, bio, start_time); 1286 __drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
269 269
270/* Short lived temporary struct on the stack. 270/* Short lived temporary struct on the stack.
271 * We could squirrel the error to be returned into 271 * We could squirrel the error to be returned into
272 * bio->bi_size, or similar. But that would be too ugly. */ 272 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273struct bio_and_error { 273struct bio_and_error {
274 struct bio *bio; 274 struct bio *bio;
275 int error; 275 int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..84d3175d493a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
313{ 313{
314 struct hash_desc desc; 314 struct hash_desc desc;
315 struct scatterlist sg; 315 struct scatterlist sg;
316 struct bio_vec *bvec; 316 struct bio_vec bvec;
317 int i; 317 struct bvec_iter iter;
318 318
319 desc.tfm = tfm; 319 desc.tfm = tfm;
320 desc.flags = 0; 320 desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
322 sg_init_table(&sg, 1); 322 sg_init_table(&sg, 1);
323 crypto_hash_init(&desc); 323 crypto_hash_init(&desc);
324 324
325 bio_for_each_segment(bvec, bio, i) { 325 bio_for_each_segment(bvec, bio, iter) {
326 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 326 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
327 crypto_hash_update(&desc, &sg, sg.length); 327 crypto_hash_update(&desc, &sg, sg.length);
328 } 328 }
329 crypto_hash_final(&desc, digest); 329 crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..6b29c4422828 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
2351/* Compute maximal contiguous buffer size. */ 2351/* Compute maximal contiguous buffer size. */
2352static int buffer_chain_size(void) 2352static int buffer_chain_size(void)
2353{ 2353{
2354 struct bio_vec *bv; 2354 struct bio_vec bv;
2355 int size; 2355 int size;
2356 struct req_iterator iter; 2356 struct req_iterator iter;
2357 char *base; 2357 char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
2360 size = 0; 2360 size = 0;
2361 2361
2362 rq_for_each_segment(bv, current_req, iter) { 2362 rq_for_each_segment(bv, current_req, iter) {
2363 if (page_address(bv->bv_page) + bv->bv_offset != base + size) 2363 if (page_address(bv.bv_page) + bv.bv_offset != base + size)
2364 break; 2364 break;
2365 2365
2366 size += bv->bv_len; 2366 size += bv.bv_len;
2367 } 2367 }
2368 2368
2369 return size >> 9; 2369 return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
2389static void copy_buffer(int ssize, int max_sector, int max_sector_2) 2389static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2390{ 2390{
2391 int remaining; /* number of transferred 512-byte sectors */ 2391 int remaining; /* number of transferred 512-byte sectors */
2392 struct bio_vec *bv; 2392 struct bio_vec bv;
2393 char *buffer; 2393 char *buffer;
2394 char *dma_buffer; 2394 char *dma_buffer;
2395 int size; 2395 int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2427 if (!remaining) 2427 if (!remaining)
2428 break; 2428 break;
2429 2429
2430 size = bv->bv_len; 2430 size = bv.bv_len;
2431 SUPBOUND(size, remaining); 2431 SUPBOUND(size, remaining);
2432 2432
2433 buffer = page_address(bv->bv_page) + bv->bv_offset; 2433 buffer = page_address(bv.bv_page) + bv.bv_offset;
2434 if (dma_buffer + size > 2434 if (dma_buffer + size >
2435 floppy_track_buffer + (max_buffer_sectors << 10) || 2435 floppy_track_buffer + (max_buffer_sectors << 10) ||
2436 dma_buffer < floppy_track_buffer) { 2436 dma_buffer < floppy_track_buffer) {
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
3775 bio_vec.bv_len = size; 3775 bio_vec.bv_len = size;
3776 bio_vec.bv_offset = 0; 3776 bio_vec.bv_offset = 0;
3777 bio.bi_vcnt = 1; 3777 bio.bi_vcnt = 1;
3778 bio.bi_size = size; 3778 bio.bi_iter.bi_size = size;
3779 bio.bi_bdev = bdev; 3779 bio.bi_bdev = bdev;
3780 bio.bi_sector = 0; 3780 bio.bi_iter.bi_sector = 0;
3781 bio.bi_flags = (1 << BIO_QUIET); 3781 bio.bi_flags = (1 << BIO_QUIET);
3782 init_completion(&complete); 3782 init_completion(&complete);
3783 bio.bi_private = &complete; 3783 bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..33fde3a39759 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
288{ 288{
289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
290 struct page *page); 290 struct page *page);
291 struct bio_vec *bvec; 291 struct bio_vec bvec;
292 struct bvec_iter iter;
292 struct page *page = NULL; 293 struct page *page = NULL;
293 int i, ret = 0; 294 int ret = 0;
294 295
295 if (lo->transfer != transfer_none) { 296 if (lo->transfer != transfer_none) {
296 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 297 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
302 do_lo_send = do_lo_send_direct_write; 303 do_lo_send = do_lo_send_direct_write;
303 } 304 }
304 305
305 bio_for_each_segment(bvec, bio, i) { 306 bio_for_each_segment(bvec, bio, iter) {
306 ret = do_lo_send(lo, bvec, pos, page); 307 ret = do_lo_send(lo, &bvec, pos, page);
307 if (ret < 0) 308 if (ret < 0)
308 break; 309 break;
309 pos += bvec->bv_len; 310 pos += bvec.bv_len;
310 } 311 }
311 if (page) { 312 if (page) {
312 kunmap(page); 313 kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
392static int 393static int
393lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 394lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
394{ 395{
395 struct bio_vec *bvec; 396 struct bio_vec bvec;
397 struct bvec_iter iter;
396 ssize_t s; 398 ssize_t s;
397 int i;
398 399
399 bio_for_each_segment(bvec, bio, i) { 400 bio_for_each_segment(bvec, bio, iter) {
400 s = do_lo_receive(lo, bvec, bsize, pos); 401 s = do_lo_receive(lo, &bvec, bsize, pos);
401 if (s < 0) 402 if (s < 0)
402 return s; 403 return s;
403 404
404 if (s != bvec->bv_len) { 405 if (s != bvec.bv_len) {
405 zero_fill_bio(bio); 406 zero_fill_bio(bio);
406 break; 407 break;
407 } 408 }
408 pos += bvec->bv_len; 409 pos += bvec.bv_len;
409 } 410 }
410 return 0; 411 return 0;
411} 412}
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
415 loff_t pos; 416 loff_t pos;
416 int ret; 417 int ret;
417 418
418 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 419 pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
419 420
420 if (bio_rw(bio) == WRITE) { 421 if (bio_rw(bio) == WRITE) {
421 struct file *file = lo->lo_backing_file; 422 struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
444 goto out; 445 goto out;
445 } 446 }
446 ret = file->f_op->fallocate(file, mode, pos, 447 ret = file->f_op->fallocate(file, mode, pos,
447 bio->bi_size); 448 bio->bi_iter.bi_size);
448 if (unlikely(ret && ret != -EINVAL && 449 if (unlikely(ret && ret != -EINVAL &&
449 ret != -EOPNOTSUPP)) 450 ret != -EOPNOTSUPP))
450 ret = -EIO; 451 ret = -EIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..52b2f2a71470 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3962{ 3962{
3963 struct driver_data *dd = queue->queuedata; 3963 struct driver_data *dd = queue->queuedata;
3964 struct scatterlist *sg; 3964 struct scatterlist *sg;
3965 struct bio_vec *bvec; 3965 struct bio_vec bvec;
3966 int i, nents = 0; 3966 struct bvec_iter iter;
3967 int nents = 0;
3967 int tag = 0, unaligned = 0; 3968 int tag = 0, unaligned = 0;
3968 3969
3969 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3970 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3993 } 3994 }
3994 3995
3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3996 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3997 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
3997 bio_sectors(bio))); 3998 bio_sectors(bio)));
3998 return; 3999 return;
3999 } 4000 }
@@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4006 4007
4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4008 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
4008 dd->unal_qdepth) { 4009 dd->unal_qdepth) {
4009 if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4010 if (bio->bi_iter.bi_sector % 8 != 0)
4011 /* Unaligned on 4k boundaries */
4010 unaligned = 1; 4012 unaligned = 1;
4011 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4013 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
4012 unaligned = 1; 4014 unaligned = 1;
@@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4025 } 4027 }
4026 4028
4027 /* Create the scatter list for this bio. */ 4029 /* Create the scatter list for this bio. */
4028 bio_for_each_segment(bvec, bio, i) { 4030 bio_for_each_segment(bvec, bio, iter) {
4029 sg_set_page(&sg[nents], 4031 sg_set_page(&sg[nents],
4030 bvec->bv_page, 4032 bvec.bv_page,
4031 bvec->bv_len, 4033 bvec.bv_len,
4032 bvec->bv_offset); 4034 bvec.bv_offset);
4033 nents++; 4035 nents++;
4034 } 4036 }
4035 4037
4036 /* Issue the read/write. */ 4038 /* Issue the read/write. */
4037 mtip_hw_submit_io(dd, 4039 mtip_hw_submit_io(dd,
4038 bio->bi_sector, 4040 bio->bi_iter.bi_sector,
4039 bio_sectors(bio), 4041 bio_sectors(bio),
4040 nents, 4042 nents,
4041 tag, 4043 tag,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
271 271
272 if (nbd_cmd(req) == NBD_CMD_WRITE) { 272 if (nbd_cmd(req) == NBD_CMD_WRITE) {
273 struct req_iterator iter; 273 struct req_iterator iter;
274 struct bio_vec *bvec; 274 struct bio_vec bvec;
275 /* 275 /*
276 * we are really probing at internals to determine 276 * we are really probing at internals to determine
277 * whether to set MSG_MORE or not... 277 * whether to set MSG_MORE or not...
278 */ 278 */
279 rq_for_each_segment(bvec, req, iter) { 279 rq_for_each_segment(bvec, req, iter) {
280 flags = 0; 280 flags = 0;
281 if (!rq_iter_last(req, iter)) 281 if (!rq_iter_last(bvec, iter))
282 flags = MSG_MORE; 282 flags = MSG_MORE;
283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
284 nbd->disk->disk_name, req, bvec->bv_len); 284 nbd->disk->disk_name, req, bvec.bv_len);
285 result = sock_send_bvec(nbd, bvec, flags); 285 result = sock_send_bvec(nbd, &bvec, flags);
286 if (result <= 0) { 286 if (result <= 0) {
287 dev_err(disk_to_dev(nbd->disk), 287 dev_err(disk_to_dev(nbd->disk),
288 "Send data failed (result %d)\n", 288 "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
378 nbd->disk->disk_name, req); 378 nbd->disk->disk_name, req);
379 if (nbd_cmd(req) == NBD_CMD_READ) { 379 if (nbd_cmd(req) == NBD_CMD_READ) {
380 struct req_iterator iter; 380 struct req_iterator iter;
381 struct bio_vec *bvec; 381 struct bio_vec bvec;
382 382
383 rq_for_each_segment(bvec, req, iter) { 383 rq_for_each_segment(bvec, req, iter) {
384 result = sock_recv_bvec(nbd, bvec); 384 result = sock_recv_bvec(nbd, &bvec);
385 if (result <= 0) { 385 if (result <= 0) {
386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
387 result); 387 result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
389 return req; 389 return req;
390 } 390 }
391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
392 nbd->disk->disk_name, req, bvec->bv_len); 392 nbd->disk->disk_name, req, bvec.bv_len);
393 } 393 }
394 } 394 }
395 return req; 395 return req;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..1f14ac403945 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
441 return total_len; 441 return total_len;
442} 442}
443 443
444struct nvme_bio_pair {
445 struct bio b1, b2, *parent;
446 struct bio_vec *bv1, *bv2;
447 int err;
448 atomic_t cnt;
449};
450
451static void nvme_bio_pair_endio(struct bio *bio, int err)
452{
453 struct nvme_bio_pair *bp = bio->bi_private;
454
455 if (err)
456 bp->err = err;
457
458 if (atomic_dec_and_test(&bp->cnt)) {
459 bio_endio(bp->parent, bp->err);
460 kfree(bp->bv1);
461 kfree(bp->bv2);
462 kfree(bp);
463 }
464}
465
466static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
467 int len, int offset)
468{
469 struct nvme_bio_pair *bp;
470
471 BUG_ON(len > bio->bi_size);
472 BUG_ON(idx > bio->bi_vcnt);
473
474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
475 if (!bp)
476 return NULL;
477 bp->err = 0;
478
479 bp->b1 = *bio;
480 bp->b2 = *bio;
481
482 bp->b1.bi_size = len;
483 bp->b2.bi_size -= len;
484 bp->b1.bi_vcnt = idx;
485 bp->b2.bi_idx = idx;
486 bp->b2.bi_sector += len >> 9;
487
488 if (offset) {
489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
490 GFP_ATOMIC);
491 if (!bp->bv1)
492 goto split_fail_1;
493
494 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
495 GFP_ATOMIC);
496 if (!bp->bv2)
497 goto split_fail_2;
498
499 memcpy(bp->bv1, bio->bi_io_vec,
500 bio->bi_max_vecs * sizeof(struct bio_vec));
501 memcpy(bp->bv2, bio->bi_io_vec,
502 bio->bi_max_vecs * sizeof(struct bio_vec));
503
504 bp->b1.bi_io_vec = bp->bv1;
505 bp->b2.bi_io_vec = bp->bv2;
506 bp->b2.bi_io_vec[idx].bv_offset += offset;
507 bp->b2.bi_io_vec[idx].bv_len -= offset;
508 bp->b1.bi_io_vec[idx].bv_len = offset;
509 bp->b1.bi_vcnt++;
510 } else
511 bp->bv1 = bp->bv2 = NULL;
512
513 bp->b1.bi_private = bp;
514 bp->b2.bi_private = bp;
515
516 bp->b1.bi_end_io = nvme_bio_pair_endio;
517 bp->b2.bi_end_io = nvme_bio_pair_endio;
518
519 bp->parent = bio;
520 atomic_set(&bp->cnt, 2);
521
522 return bp;
523
524 split_fail_2:
525 kfree(bp->bv1);
526 split_fail_1:
527 kfree(bp);
528 return NULL;
529}
530
531static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 444static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
532 int idx, int len, int offset) 445 int len)
533{ 446{
534 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); 447 struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
535 if (!bp) 448 if (!split)
536 return -ENOMEM; 449 return -ENOMEM;
537 450
451 bio_chain(split, bio);
452
538 if (bio_list_empty(&nvmeq->sq_cong)) 453 if (bio_list_empty(&nvmeq->sq_cong))
539 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 454 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
540 bio_list_add(&nvmeq->sq_cong, &bp->b1); 455 bio_list_add(&nvmeq->sq_cong, split);
541 bio_list_add(&nvmeq->sq_cong, &bp->b2); 456 bio_list_add(&nvmeq->sq_cong, bio);
542 457
543 return 0; 458 return 0;
544} 459}
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
550static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 465static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
551 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 466 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
552{ 467{
553 struct bio_vec *bvec, *bvprv = NULL; 468 struct bio_vec bvec, bvprv;
469 struct bvec_iter iter;
554 struct scatterlist *sg = NULL; 470 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 471 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
472 int first = 1;
556 473
557 if (nvmeq->dev->stripe_size) 474 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 475 split_len = nvmeq->dev->stripe_size -
559 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 476 ((bio->bi_iter.bi_sector << 9) &
477 (nvmeq->dev->stripe_size - 1));
560 478
561 sg_init_table(iod->sg, psegs); 479 sg_init_table(iod->sg, psegs);
562 bio_for_each_segment(bvec, bio, i) { 480 bio_for_each_segment(bvec, bio, iter) {
563 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 481 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
564 sg->length += bvec->bv_len; 482 sg->length += bvec.bv_len;
565 } else { 483 } else {
566 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 484 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
567 return nvme_split_and_submit(bio, nvmeq, i, 485 return nvme_split_and_submit(bio, nvmeq,
568 length, 0); 486 length);
569 487
570 sg = sg ? sg + 1 : iod->sg; 488 sg = sg ? sg + 1 : iod->sg;
571 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 489 sg_set_page(sg, bvec.bv_page,
572 bvec->bv_offset); 490 bvec.bv_len, bvec.bv_offset);
573 nsegs++; 491 nsegs++;
574 } 492 }
575 493
576 if (split_len - length < bvec->bv_len) 494 if (split_len - length < bvec.bv_len)
577 return nvme_split_and_submit(bio, nvmeq, i, split_len, 495 return nvme_split_and_submit(bio, nvmeq, split_len);
578 split_len - length); 496 length += bvec.bv_len;
579 length += bvec->bv_len;
580 bvprv = bvec; 497 bvprv = bvec;
498 first = 0;
581 } 499 }
582 iod->nents = nsegs; 500 iod->nents = nsegs;
583 sg_mark_end(sg); 501 sg_mark_end(sg);
584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 502 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
585 return -ENOMEM; 503 return -ENOMEM;
586 504
587 BUG_ON(length != bio->bi_size); 505 BUG_ON(length != bio->bi_iter.bi_size);
588 return length; 506 return length;
589} 507}
590 508
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 iod->npages = 0; 526 iod->npages = 0;
609 527
610 range->cattr = cpu_to_le32(0); 528 range->cattr = cpu_to_le32(0);
611 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 529 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
612 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 530 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
613 531
614 memset(cmnd, 0, sizeof(*cmnd)); 532 memset(cmnd, 0, sizeof(*cmnd));
615 cmnd->dsm.opcode = nvme_cmd_dsm; 533 cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
674 } 592 }
675 593
676 result = -ENOMEM; 594 result = -ENOMEM;
677 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 595 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
678 if (!iod) 596 if (!iod)
679 goto nomem; 597 goto nomem;
680 iod->private = bio; 598 iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 641 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 642 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
725 GFP_ATOMIC); 643 GFP_ATOMIC);
726 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 644 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 645 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
728 cmnd->rw.control = cpu_to_le16(control); 646 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 647 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..3dda09a5ec41 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
651 651
652 for (;;) { 652 for (;;) {
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector) 654 if (s <= tmp->bio->bi_iter.bi_sector)
655 next = n->rb_left; 655 next = n->rb_left;
656 else 656 else
657 next = n->rb_right; 657 next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
660 n = next; 660 n = next;
661 } 661 }
662 662
663 if (s > tmp->bio->bi_sector) { 663 if (s > tmp->bio->bi_iter.bi_sector) {
664 tmp = pkt_rbtree_next(tmp); 664 tmp = pkt_rbtree_next(tmp);
665 if (!tmp) 665 if (!tmp)
666 return NULL; 666 return NULL;
667 } 667 }
668 BUG_ON(s > tmp->bio->bi_sector); 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
669 return tmp; 669 return tmp;
670} 670}
671 671
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
676{ 676{
677 struct rb_node **p = &pd->bio_queue.rb_node; 677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL; 678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector; 679 sector_t s = node->bio->bi_iter.bi_sector;
680 struct pkt_rb_node *tmp; 680 struct pkt_rb_node *tmp;
681 681
682 while (*p) { 682 while (*p) {
683 parent = *p; 683 parent = *p;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector) 685 if (s < tmp->bio->bi_iter.bi_sector)
686 p = &(*p)->rb_left; 686 p = &(*p)->rb_left;
687 else 687 else
688 p = &(*p)->rb_right; 688 p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
857 spin_lock(&pd->iosched.lock); 857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue); 858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock); 859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 if (bio && (bio->bi_iter.bi_sector ==
861 pd->iosched.last_write))
861 need_write_seek = 0; 862 need_write_seek = 0;
862 if (need_write_seek && reads_queued) { 863 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
888 continue; 889 continue;
889 890
890 if (bio_data_dir(bio) == READ) 891 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10; 892 pd->iosched.successive_reads +=
893 bio->bi_iter.bi_size >> 10;
892 else { 894 else {
893 pd->iosched.successive_reads = 0; 895 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio); 896 pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
978 980
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 981 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector, 982 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err); 983 (unsigned long long)bio->bi_iter.bi_sector, err);
982 984
983 if (err) 985 if (err)
984 atomic_inc(&pkt->io_errors); 986 atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1026 memset(written, 0, sizeof(written)); 1028 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock); 1029 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) { 1030 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1031 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 int num_frames = bio->bi_size / CD_FRAMESIZE; 1032 (CD_FRAMESIZE >> 9);
1033 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0); 1035 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames); 1036 BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1053 1056
1054 bio = pkt->r_bios[f]; 1057 bio = pkt->r_bios[f];
1055 bio_reset(bio); 1058 bio_reset(bio);
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1059 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev; 1060 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read; 1061 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt; 1062 bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
1150 bio_reset(pkt->bio); 1153 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev; 1154 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE; 1155 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector; 1156 pkt->bio->bi_iter.bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1157 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames; 1158 pkt->bio->bi_vcnt = pkt->frames;
1156 1159
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1160 pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
1213 node = first_node; 1216 node = first_node;
1214 while (node) { 1217 while (node) {
1215 bio = node->bio; 1218 bio = node->bio;
1216 zone = get_zone(bio->bi_sector, pd); 1219 zone = get_zone(bio->bi_iter.bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1220 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) { 1221 if (p->sector == zone) {
1219 bio = NULL; 1222 bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1255 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1256 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1254 bio = node->bio; 1257 bio = node->bio;
1255 pkt_dbg(2, pd, "found zone=%llx\n", 1258 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1256 (unsigned long long)get_zone(bio->bi_sector, pd)); 1259 get_zone(bio->bi_iter.bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone) 1260 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1258 break; 1261 break;
1259 pkt_rbtree_erase(pd, node); 1262 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock); 1263 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio); 1264 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1265 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock); 1266 spin_unlock(&pkt->lock);
1264 } 1267 }
1265 /* check write congestion marks, and if bio_queue_size is 1268 /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1296 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1294 1297
1295 bio_reset(pkt->w_bio); 1298 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector; 1299 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev; 1300 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1301 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt; 1302 pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
2335 pkt_bio_finished(pd); 2338 pkt_bio_finished(pd);
2336} 2339}
2337 2340
2338static void pkt_make_request(struct request_queue *q, struct bio *bio) 2341static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2339{ 2342{
2340 struct pktcdvd_device *pd; 2343 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2341 char b[BDEVNAME_SIZE]; 2344 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2345
2346 psd->pd = pd;
2347 psd->bio = bio;
2348 cloned_bio->bi_bdev = pd->bdev;
2349 cloned_bio->bi_private = psd;
2350 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2351 pd->stats.secs_r += bio_sectors(bio);
2352 pkt_queue_bio(pd, cloned_bio);
2353}
2354
2355static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2356{
2357 struct pktcdvd_device *pd = q->queuedata;
2342 sector_t zone; 2358 sector_t zone;
2343 struct packet_data *pkt; 2359 struct packet_data *pkt;
2344 int was_empty, blocked_bio; 2360 int was_empty, blocked_bio;
2345 struct pkt_rb_node *node; 2361 struct pkt_rb_node *node;
2346 2362
2347 pd = q->queuedata; 2363 zone = get_zone(bio->bi_iter.bi_sector, pd);
2348 if (!pd) {
2349 pr_err("%s incorrect request queue\n",
2350 bdevname(bio->bi_bdev, b));
2351 goto end_io;
2352 }
2353
2354 /*
2355 * Clone READ bios so we can have our own bi_end_io callback.
2356 */
2357 if (bio_data_dir(bio) == READ) {
2358 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2359 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2360
2361 psd->pd = pd;
2362 psd->bio = bio;
2363 cloned_bio->bi_bdev = pd->bdev;
2364 cloned_bio->bi_private = psd;
2365 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2366 pd->stats.secs_r += bio_sectors(bio);
2367 pkt_queue_bio(pd, cloned_bio);
2368 return;
2369 }
2370
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector);
2374 goto end_io;
2375 }
2376
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n");
2379 goto end_io;
2380 }
2381
2382 blk_queue_bounce(q, &bio);
2383
2384 zone = get_zone(bio->bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector,
2387 (unsigned long long)bio_end_sector(bio));
2388
2389 /* Check if we have to split the bio */
2390 {
2391 struct bio_pair *bp;
2392 sector_t last_zone;
2393 int first_sectors;
2394
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector;
2399 bp = bio_split(bio, first_sectors);
2400 BUG_ON(!bp);
2401 pkt_make_request(q, &bp->bio1);
2402 pkt_make_request(q, &bp->bio2);
2403 bio_pair_release(bp);
2404 return;
2405 }
2406 }
2407 2364
2408 /* 2365 /*
2409 * If we find a matching packet in state WAITING or READ_WAIT, we can 2366 * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2417 if ((pkt->state == PACKET_WAITING_STATE) || 2374 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2375 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio); 2376 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2377 pkt->write_size +=
2378 bio->bi_iter.bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) && 2379 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) { 2380 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm); 2381 atomic_inc(&pkt->run_sm);
@@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2476 */ 2434 */
2477 wake_up(&pd->wqueue); 2435 wake_up(&pd->wqueue);
2478 } 2436 }
2437}
2438
2439static void pkt_make_request(struct request_queue *q, struct bio *bio)
2440{
2441 struct pktcdvd_device *pd;
2442 char b[BDEVNAME_SIZE];
2443 struct bio *split;
2444
2445 pd = q->queuedata;
2446 if (!pd) {
2447 pr_err("%s incorrect request queue\n",
2448 bdevname(bio->bi_bdev, b));
2449 goto end_io;
2450 }
2451
2452 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2453 (unsigned long long)bio->bi_iter.bi_sector,
2454 (unsigned long long)bio_end_sector(bio));
2455
2456 /*
2457 * Clone READ bios so we can have our own bi_end_io callback.
2458 */
2459 if (bio_data_dir(bio) == READ) {
2460 pkt_make_request_read(pd, bio);
2461 return;
2462 }
2463
2464 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2465 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2466 (unsigned long long)bio->bi_iter.bi_sector);
2467 goto end_io;
2468 }
2469
2470 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2471 pkt_err(pd, "wrong bio size\n");
2472 goto end_io;
2473 }
2474
2475 blk_queue_bounce(q, &bio);
2476
2477 do {
2478 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2479 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2480
2481 if (last_zone != zone) {
2482 BUG_ON(last_zone != zone + pd->settings.size);
2483
2484 split = bio_split(bio, last_zone -
2485 bio->bi_iter.bi_sector,
2486 GFP_NOIO, fs_bio_set);
2487 bio_chain(split, bio);
2488 } else {
2489 split = bio;
2490 }
2491
2492 pkt_make_request_write(q, split);
2493 } while (split != bio);
2494
2479 return; 2495 return;
2480end_io: 2496end_io:
2481 bio_io_error(bio); 2497 bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
94{ 94{
95 unsigned int offset = 0; 95 unsigned int offset = 0;
96 struct req_iterator iter; 96 struct req_iterator iter;
97 struct bio_vec *bvec; 97 struct bio_vec bvec;
98 unsigned int i = 0; 98 unsigned int i = 0;
99 size_t size; 99 size_t size;
100 void *buf; 100 void *buf;
101 101
102 rq_for_each_segment(bvec, req, iter) { 102 rq_for_each_segment(bvec, req, iter) {
103 unsigned long flags; 103 unsigned long flags;
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 __func__, __LINE__, i, bio_sectors(iter.bio),
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 iter.bio->bi_iter.bi_sector);
107 bio_sectors(iter.bio), iter.bio->bi_sector);
108 107
109 size = bvec->bv_len; 108 size = bvec.bv_len;
110 buf = bvec_kmap_irq(bvec, &flags); 109 buf = bvec_kmap_irq(&bvec, &flags);
111 if (gather) 110 if (gather)
112 memcpy(dev->bounce_buf+offset, buf, size); 111 memcpy(dev->bounce_buf+offset, buf, size);
113 else 112 else
114 memcpy(buf, dev->bounce_buf+offset, size); 113 memcpy(buf, dev->bounce_buf+offset, size);
115 offset += size; 114 offset += size;
116 flush_kernel_dcache_page(bvec->bv_page); 115 flush_kernel_dcache_page(bvec.bv_page);
117 bvec_kunmap_irq(buf, &flags); 116 bvec_kunmap_irq(buf, &flags);
118 i++; 117 i++;
119 } 118 }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
130 129
131#ifdef DEBUG 130#ifdef DEBUG
132 unsigned int n = 0; 131 unsigned int n = 0;
133 struct bio_vec *bv; 132 struct bio_vec bv;
134 struct req_iterator iter; 133 struct req_iterator iter;
135 134
136 rq_for_each_segment(bv, req, iter) 135 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..ef45cfb98fd2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
554 int write = bio_data_dir(bio) == WRITE; 554 int write = bio_data_dir(bio) == WRITE;
555 const char *op = write ? "write" : "read"; 555 const char *op = write ? "write" : "read";
556 loff_t offset = bio->bi_sector << 9; 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 int error = 0; 557 int error = 0;
558 struct bio_vec *bvec; 558 struct bio_vec bvec;
559 unsigned int i; 559 struct bvec_iter iter;
560 struct bio *next; 560 struct bio *next;
561 561
562 bio_for_each_segment(bvec, bio, i) { 562 bio_for_each_segment(bvec, bio, iter) {
563 /* PS3 is ppc64, so we don't handle highmem */ 563 /* PS3 is ppc64, so we don't handle highmem */
564 char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; 564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
565 size_t len = bvec->bv_len, retlen; 565 size_t len = bvec.bv_len, retlen;
566 566
567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, 567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
568 len, offset); 568 len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 16cab6635163..b365e0dfccb6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
1156 */ 1156 */
1157static void zero_bio_chain(struct bio *chain, int start_ofs) 1157static void zero_bio_chain(struct bio *chain, int start_ofs)
1158{ 1158{
1159 struct bio_vec *bv; 1159 struct bio_vec bv;
1160 struct bvec_iter iter;
1160 unsigned long flags; 1161 unsigned long flags;
1161 void *buf; 1162 void *buf;
1162 int i;
1163 int pos = 0; 1163 int pos = 0;
1164 1164
1165 while (chain) { 1165 while (chain) {
1166 bio_for_each_segment(bv, chain, i) { 1166 bio_for_each_segment(bv, chain, iter) {
1167 if (pos + bv->bv_len > start_ofs) { 1167 if (pos + bv.bv_len > start_ofs) {
1168 int remainder = max(start_ofs - pos, 0); 1168 int remainder = max(start_ofs - pos, 0);
1169 buf = bvec_kmap_irq(bv, &flags); 1169 buf = bvec_kmap_irq(&bv, &flags);
1170 memset(buf + remainder, 0, 1170 memset(buf + remainder, 0,
1171 bv->bv_len - remainder); 1171 bv.bv_len - remainder);
1172 flush_dcache_page(bv->bv_page); 1172 flush_dcache_page(bv.bv_page);
1173 bvec_kunmap_irq(buf, &flags); 1173 bvec_kunmap_irq(buf, &flags);
1174 } 1174 }
1175 pos += bv->bv_len; 1175 pos += bv.bv_len;
1176 } 1176 }
1177 1177
1178 chain = chain->bi_next; 1178 chain = chain->bi_next;
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1220 unsigned int len, 1220 unsigned int len,
1221 gfp_t gfpmask) 1221 gfp_t gfpmask)
1222{ 1222{
1223 struct bio_vec *bv;
1224 unsigned int resid;
1225 unsigned short idx;
1226 unsigned int voff;
1227 unsigned short end_idx;
1228 unsigned short vcnt;
1229 struct bio *bio; 1223 struct bio *bio;
1230 1224
1231 /* Handle the easy case for the caller */ 1225 bio = bio_clone(bio_src, gfpmask);
1232
1233 if (!offset && len == bio_src->bi_size)
1234 return bio_clone(bio_src, gfpmask);
1235
1236 if (WARN_ON_ONCE(!len))
1237 return NULL;
1238 if (WARN_ON_ONCE(len > bio_src->bi_size))
1239 return NULL;
1240 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1241 return NULL;
1242
1243 /* Find first affected segment... */
1244
1245 resid = offset;
1246 bio_for_each_segment(bv, bio_src, idx) {
1247 if (resid < bv->bv_len)
1248 break;
1249 resid -= bv->bv_len;
1250 }
1251 voff = resid;
1252
1253 /* ...and the last affected segment */
1254
1255 resid += len;
1256 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1257 if (resid <= bv->bv_len)
1258 break;
1259 resid -= bv->bv_len;
1260 }
1261 vcnt = end_idx - idx + 1;
1262
1263 /* Build the clone */
1264
1265 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1266 if (!bio) 1226 if (!bio)
1267 return NULL; /* ENOMEM */ 1227 return NULL; /* ENOMEM */
1268 1228
1269 bio->bi_bdev = bio_src->bi_bdev; 1229 bio_advance(bio, offset);
1270 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1230 bio->bi_iter.bi_size = len;
1271 bio->bi_rw = bio_src->bi_rw;
1272 bio->bi_flags |= 1 << BIO_CLONED;
1273
1274 /*
1275 * Copy over our part of the bio_vec, then update the first
1276 * and last (or only) entries.
1277 */
1278 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1279 vcnt * sizeof (struct bio_vec));
1280 bio->bi_io_vec[0].bv_offset += voff;
1281 if (vcnt > 1) {
1282 bio->bi_io_vec[0].bv_len -= voff;
1283 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1284 } else {
1285 bio->bi_io_vec[0].bv_len = len;
1286 }
1287
1288 bio->bi_vcnt = vcnt;
1289 bio->bi_size = len;
1290 bio->bi_idx = 0;
1291 1231
1292 return bio; 1232 return bio;
1293} 1233}
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1318 1258
1319 /* Build up a chain of clone bios up to the limit */ 1259 /* Build up a chain of clone bios up to the limit */
1320 1260
1321 if (!bi || off >= bi->bi_size || !len) 1261 if (!bi || off >= bi->bi_iter.bi_size || !len)
1322 return NULL; /* Nothing to clone */ 1262 return NULL; /* Nothing to clone */
1323 1263
1324 end = &chain; 1264 end = &chain;
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1330 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1270 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1331 goto out_err; /* EINVAL; ran out of bio's */ 1271 goto out_err; /* EINVAL; ran out of bio's */
1332 } 1272 }
1333 bi_size = min_t(unsigned int, bi->bi_size - off, len); 1273 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1334 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1274 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1335 if (!bio) 1275 if (!bio)
1336 goto out_err; /* ENOMEM */ 1276 goto out_err; /* ENOMEM */
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1339 end = &bio->bi_next; 1279 end = &bio->bi_next;
1340 1280
1341 off += bi_size; 1281 off += bi_size;
1342 if (off == bi->bi_size) { 1282 if (off == bi->bi_iter.bi_size) {
1343 bi = bi->bi_next; 1283 bi = bi->bi_next;
1344 off = 0; 1284 off = 0;
1345 } 1285 }
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2227 2167
2228 if (type == OBJ_REQUEST_BIO) { 2168 if (type == OBJ_REQUEST_BIO) {
2229 bio_list = data_desc; 2169 bio_list = data_desc;
2230 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2170 rbd_assert(img_offset ==
2171 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2231 } else { 2172 } else {
2232 rbd_assert(type == OBJ_REQUEST_PAGES); 2173 rbd_assert(type == OBJ_REQUEST_PAGES);
2233 pages = data_desc; 2174 pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
174 if (!card) 174 if (!card)
175 goto req_err; 175 goto req_err;
176 176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 if (bio_end_sector(bio) > get_capacity(card->gendisk))
178 goto req_err; 178 goto req_err;
179 179
180 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
187 goto req_err; 187 goto req_err;
188 } 188 }
189 189
190 if (bio->bi_size == 0) { 190 if (bio->bi_iter.bi_size == 0) {
191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
192 goto req_err; 192 goto req_err;
193 } 193 }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
208 208
209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
211 (u64)bio->bi_sector << 9, bio->bi_size); 211 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
212 212
213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
214 bio_dma_done_cb, bio_meta); 214 bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
684 void *cb_data) 684 void *cb_data)
685{ 685{
686 struct list_head dma_list[RSXX_MAX_TARGETS]; 686 struct list_head dma_list[RSXX_MAX_TARGETS];
687 struct bio_vec *bvec; 687 struct bio_vec bvec;
688 struct bvec_iter iter;
688 unsigned long long addr8; 689 unsigned long long addr8;
689 unsigned int laddr; 690 unsigned int laddr;
690 unsigned int bv_len; 691 unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
696 int st; 697 int st;
697 int i; 698 int i;
698 699
699 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
700 atomic_set(n_dmas, 0); 701 atomic_set(n_dmas, 0);
701 702
702 for (i = 0; i < card->n_targets; i++) { 703 for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
705 } 706 }
706 707
707 if (bio->bi_rw & REQ_DISCARD) { 708 if (bio->bi_rw & REQ_DISCARD) {
708 bv_len = bio->bi_size; 709 bv_len = bio->bi_iter.bi_size;
709 710
710 while (bv_len > 0) { 711 while (bv_len > 0) {
711 tgt = rsxx_get_dma_tgt(card, addr8); 712 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
722 bv_len -= RSXX_HW_BLK_SIZE; 723 bv_len -= RSXX_HW_BLK_SIZE;
723 } 724 }
724 } else { 725 } else {
725 bio_for_each_segment(bvec, bio, i) { 726 bio_for_each_segment(bvec, bio, iter) {
726 bv_len = bvec->bv_len; 727 bv_len = bvec.bv_len;
727 bv_off = bvec->bv_offset; 728 bv_off = bvec.bv_offset;
728 729
729 while (bv_len > 0) { 730 while (bv_len > 0) {
730 tgt = rsxx_get_dma_tgt(card, addr8); 731 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
736 st = rsxx_queue_dma(card, &dma_list[tgt], 737 st = rsxx_queue_dma(card, &dma_list[tgt],
737 bio_data_dir(bio), 738 bio_data_dir(bio),
738 dma_off, dma_len, 739 dma_off, dma_len,
739 laddr, bvec->bv_page, 740 laddr, bvec.bv_page,
740 bv_off, cb, cb_data); 741 bv_off, cb, cb_data);
741 if (st) 742 if (st)
742 goto bvec_err; 743 goto bvec_err;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
108 * have been written 108 * have been written
109 */ 109 */
110 struct bio *bio, *currentbio, **biotail; 110 struct bio *bio, *currentbio, **biotail;
111 int current_idx; 111 struct bvec_iter current_iter;
112 sector_t current_sector;
113 112
114 struct request_queue *queue; 113 struct request_queue *queue;
115 114
@@ -118,7 +117,7 @@ struct cardinfo {
118 struct mm_dma_desc *desc; 117 struct mm_dma_desc *desc;
119 int cnt, headcnt; 118 int cnt, headcnt;
120 struct bio *bio, **biotail; 119 struct bio *bio, **biotail;
121 int idx; 120 struct bvec_iter iter;
122 } mm_pages[2]; 121 } mm_pages[2];
123#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 122#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
124 123
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
344 dma_addr_t dma_handle; 343 dma_addr_t dma_handle;
345 int offset; 344 int offset;
346 struct bio *bio; 345 struct bio *bio;
347 struct bio_vec *vec; 346 struct bio_vec vec;
348 int idx;
349 int rw; 347 int rw;
350 int len;
351 348
352 bio = card->currentbio; 349 bio = card->currentbio;
353 if (!bio && card->bio) { 350 if (!bio && card->bio) {
354 card->currentbio = card->bio; 351 card->currentbio = card->bio;
355 card->current_idx = card->bio->bi_idx; 352 card->current_iter = card->bio->bi_iter;
356 card->current_sector = card->bio->bi_sector;
357 card->bio = card->bio->bi_next; 353 card->bio = card->bio->bi_next;
358 if (card->bio == NULL) 354 if (card->bio == NULL)
359 card->biotail = &card->bio; 355 card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
362 } 358 }
363 if (!bio) 359 if (!bio)
364 return 0; 360 return 0;
365 idx = card->current_idx;
366 361
367 rw = bio_rw(bio); 362 rw = bio_rw(bio);
368 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 363 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
369 return 0; 364 return 0;
370 365
371 vec = bio_iovec_idx(bio, idx); 366 vec = bio_iter_iovec(bio, card->current_iter);
372 len = vec->bv_len; 367
373 dma_handle = pci_map_page(card->dev, 368 dma_handle = pci_map_page(card->dev,
374 vec->bv_page, 369 vec.bv_page,
375 vec->bv_offset, 370 vec.bv_offset,
376 len, 371 vec.bv_len,
377 (rw == READ) ? 372 (rw == READ) ?
378 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 373 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
379 374
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
381 desc = &p->desc[p->cnt]; 376 desc = &p->desc[p->cnt];
382 p->cnt++; 377 p->cnt++;
383 if (p->bio == NULL) 378 if (p->bio == NULL)
384 p->idx = idx; 379 p->iter = card->current_iter;
385 if ((p->biotail) != &bio->bi_next) { 380 if ((p->biotail) != &bio->bi_next) {
386 *(p->biotail) = bio; 381 *(p->biotail) = bio;
387 p->biotail = &(bio->bi_next); 382 p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
391 desc->data_dma_handle = dma_handle; 386 desc->data_dma_handle = dma_handle;
392 387
393 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 388 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
394 desc->local_addr = cpu_to_le64(card->current_sector << 9); 389 desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
395 desc->transfer_size = cpu_to_le32(len); 390 desc->transfer_size = cpu_to_le32(vec.bv_len);
396 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); 391 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
397 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 392 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
398 desc->zero1 = desc->zero2 = 0; 393 desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
407 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 402 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
408 desc->sem_control_bits = desc->control_bits; 403 desc->sem_control_bits = desc->control_bits;
409 404
410 card->current_sector += (len >> 9); 405
411 idx++; 406 bio_advance_iter(bio, &card->current_iter, vec.bv_len);
412 card->current_idx = idx; 407 if (!card->current_iter.bi_size)
413 if (idx >= bio->bi_vcnt)
414 card->currentbio = NULL; 408 card->currentbio = NULL;
415 409
416 return 1; 410 return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
439 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 433 struct mm_dma_desc *desc = &page->desc[page->headcnt];
440 int control = le32_to_cpu(desc->sem_control_bits); 434 int control = le32_to_cpu(desc->sem_control_bits);
441 int last = 0; 435 int last = 0;
442 int idx; 436 struct bio_vec vec;
443 437
444 if (!(control & DMASCR_DMA_COMPLETE)) { 438 if (!(control & DMASCR_DMA_COMPLETE)) {
445 control = dma_status; 439 control = dma_status;
446 last = 1; 440 last = 1;
447 } 441 }
442
448 page->headcnt++; 443 page->headcnt++;
449 idx = page->idx; 444 vec = bio_iter_iovec(bio, page->iter);
450 page->idx++; 445 bio_advance_iter(bio, &page->iter, vec.bv_len);
451 if (page->idx >= bio->bi_vcnt) { 446
447 if (!page->iter.bi_size) {
452 page->bio = bio->bi_next; 448 page->bio = bio->bi_next;
453 if (page->bio) 449 if (page->bio)
454 page->idx = page->bio->bi_idx; 450 page->iter = page->bio->bi_iter;
455 } 451 }
456 452
457 pci_unmap_page(card->dev, desc->data_dma_handle, 453 pci_unmap_page(card->dev, desc->data_dma_handle,
458 bio_iovec_idx(bio, idx)->bv_len, 454 vec.bv_len,
459 (control & DMASCR_TRANSFER_READ) ? 455 (control & DMASCR_TRANSFER_READ) ?
460 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 456 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
461 if (control & DMASCR_HARD_ERROR) { 457 if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
532{ 528{
533 struct cardinfo *card = q->queuedata; 529 struct cardinfo *card = q->queuedata;
534 pr_debug("mm_make_request %llu %u\n", 530 pr_debug("mm_make_request %llu %u\n",
535 (unsigned long long)bio->bi_sector, bio->bi_size); 531 (unsigned long long)bio->bi_iter.bi_sector,
532 bio->bi_iter.bi_size);
536 533
537 spin_lock_irq(&card->lock); 534 spin_lock_irq(&card->lock);
538 *card->biotail = bio; 535 *card->biotail = bio;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1257 bio->bi_bdev = preq.bdev; 1257 bio->bi_bdev = preq.bdev;
1258 bio->bi_private = pending_req; 1258 bio->bi_private = pending_req;
1259 bio->bi_end_io = end_block_io_op; 1259 bio->bi_end_io = end_block_io_op;
1260 bio->bi_sector = preq.sector_number; 1260 bio->bi_iter.bi_sector = preq.sector_number;
1261 } 1261 }
1262 1262
1263 preq.sector_number += seg[i].nsec; 1263 preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f9c43f91f03e..8dcfb54f1603 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
1547 for (i = 0; i < pending; i++) { 1547 for (i = 0; i < pending; i++) {
1548 offset = (i * segs * PAGE_SIZE) >> 9; 1548 offset = (i * segs * PAGE_SIZE) >> 9;
1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1550 (unsigned int)(bio->bi_size >> 9) - offset); 1550 (unsigned int)bio_sectors(bio) - offset);
1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1551 cloned_bio = bio_clone(bio, GFP_NOIO);
1552 BUG_ON(cloned_bio == NULL); 1552 BUG_ON(cloned_bio == NULL);
1553 bio_trim(cloned_bio, offset, size); 1553 bio_trim(cloned_bio, offset, size);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f43177483..dbdbca5a9591 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -280,7 +280,6 @@ struct bcache_device {
280 unsigned long sectors_dirty_last; 280 unsigned long sectors_dirty_last;
281 long sectors_dirty_derivative; 281 long sectors_dirty_derivative;
282 282
283 mempool_t *unaligned_bvec;
284 struct bio_set *bio_split; 283 struct bio_set *bio_split;
285 284
286 unsigned data_csum:1; 285 unsigned data_csum:1;
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
902void bch_bbio_free(struct bio *, struct cache_set *); 901void bch_bbio_free(struct bio *, struct cache_set *);
903struct bio *bch_bbio_alloc(struct cache_set *); 902struct bio *bch_bbio_alloc(struct cache_set *);
904 903
905struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
906void bch_generic_make_request(struct bio *, struct bio_split_pool *); 904void bch_generic_make_request(struct bio *, struct bio_split_pool *);
907void __bch_submit_bbio(struct bio *, struct cache_set *); 905void __bch_submit_bbio(struct bio *, struct cache_set *);
908void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 906void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53fcc67a..946ecd3b048b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
362 struct bio_vec *bv; 362 struct bio_vec *bv;
363 int n; 363 int n;
364 364
365 __bio_for_each_segment(bv, b->bio, n, 0) 365 bio_for_each_segment_all(bv, b->bio, n)
366 __free_page(bv->bv_page); 366 __free_page(bv->bv_page);
367 367
368 __btree_node_write_done(cl); 368 __btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
421 struct bio_vec *bv; 421 struct bio_vec *bv;
422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423 423
424 bio_for_each_segment(bv, b->bio, j) 424 bio_for_each_segment_all(bv, b->bio, j)
425 memcpy(page_address(bv->bv_page), 425 memcpy(page_address(bv->bv_page),
426 base + j * PAGE_SIZE, PAGE_SIZE); 426 base + j * PAGE_SIZE, PAGE_SIZE);
427 427
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..03cb4d114e16 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
173{ 173{
174 char name[BDEVNAME_SIZE]; 174 char name[BDEVNAME_SIZE];
175 struct bio *check; 175 struct bio *check;
176 struct bio_vec *bv; 176 struct bio_vec bv, *bv2;
177 struct bvec_iter iter;
177 int i; 178 int i;
178 179
179 check = bio_clone(bio, GFP_NOIO); 180 check = bio_clone(bio, GFP_NOIO);
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
185 186
186 submit_bio_wait(READ_SYNC, check); 187 submit_bio_wait(READ_SYNC, check);
187 188
188 bio_for_each_segment(bv, bio, i) { 189 bio_for_each_segment(bv, bio, iter) {
189 void *p1 = kmap_atomic(bv->bv_page); 190 void *p1 = kmap_atomic(bv.bv_page);
190 void *p2 = page_address(check->bi_io_vec[i].bv_page); 191 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
191 192
192 cache_set_err_on(memcmp(p1 + bv->bv_offset, 193 cache_set_err_on(memcmp(p1 + bv.bv_offset,
193 p2 + bv->bv_offset, 194 p2 + bv.bv_offset,
194 bv->bv_len), 195 bv.bv_len),
195 dc->disk.c, 196 dc->disk.c,
196 "verify failed at dev %s sector %llu", 197 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 198 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 199 (uint64_t) bio->bi_iter.bi_sector);
199 200
200 kunmap_atomic(p1); 201 kunmap_atomic(p1);
201 } 202 }
202 203
203 bio_for_each_segment_all(bv, check, i) 204 bio_for_each_segment_all(bv2, check, i)
204 __free_page(bv->bv_page); 205 __free_page(bv2->bv_page);
205out_put: 206out_put:
206 bio_put(check); 207 bio_put(check);
207} 208}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
11 11
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13 13
14static void bch_bi_idx_hack_endio(struct bio *bio, int error)
15{
16 struct bio *p = bio->bi_private;
17
18 bio_endio(p, error);
19 bio_put(bio);
20}
21
22static void bch_generic_make_request_hack(struct bio *bio)
23{
24 if (bio->bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26
27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec));
30
31 clone->bi_sector = bio->bi_sector;
32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size;
36
37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio;
39
40 bio = clone;
41 }
42
43 /*
44 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
45 * bios might have had more than that (before we split them per device
46 * limitations).
47 *
48 * To be taken out once immutable bvec stuff is in.
49 */
50 bio->bi_max_vecs = bio->bi_vcnt;
51
52 generic_make_request(bio);
53}
54
55/**
56 * bch_bio_split - split a bio
57 * @bio: bio to split
58 * @sectors: number of sectors to split from the front of @bio
59 * @gfp: gfp mask
60 * @bs: bio set to allocate from
61 *
62 * Allocates and returns a new bio which represents @sectors from the start of
63 * @bio, and updates @bio to represent the remaining sectors.
64 *
65 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
66 * unchanged.
67 *
68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
70 * freed before the split.
71 */
72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs)
74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv;
77 struct bio *ret = NULL;
78
79 BUG_ON(sectors <= 0);
80
81 if (sectors >= bio_sectors(bio))
82 return bio;
83
84 if (bio->bi_rw & REQ_DISCARD) {
85 ret = bio_alloc_bioset(gfp, 1, bs);
86 if (!ret)
87 return NULL;
88 idx = 0;
89 goto out;
90 }
91
92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx;
94
95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs);
97 if (!ret)
98 return NULL;
99
100 memcpy(ret->bi_io_vec, bio_iovec(bio),
101 sizeof(struct bio_vec) * vcnt);
102
103 break;
104 } else if (nbytes < bv->bv_len) {
105 ret = bio_alloc_bioset(gfp, ++vcnt, bs);
106 if (!ret)
107 return NULL;
108
109 memcpy(ret->bi_io_vec, bio_iovec(bio),
110 sizeof(struct bio_vec) * vcnt);
111
112 ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
113 bv->bv_offset += nbytes;
114 bv->bv_len -= nbytes;
115 break;
116 }
117
118 nbytes -= bv->bv_len;
119 }
120out:
121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector;
123 ret->bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt;
127
128 bio->bi_sector += sectors;
129 bio->bi_size -= sectors << 9;
130 bio->bi_idx = idx;
131
132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) {
134 bio_put(ret);
135 return NULL;
136 }
137
138 bio_integrity_trim(ret, 0, bio_sectors(ret));
139 bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
140 }
141
142 return ret;
143}
144
145static unsigned bch_bio_max_sectors(struct bio *bio) 14static unsigned bch_bio_max_sectors(struct bio *bio)
146{ 15{
147 unsigned ret = bio_sectors(bio);
148 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 17 struct bio_vec bv;
150 queue_max_segments(q)); 18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
151 20
152 if (bio->bi_rw & REQ_DISCARD) 21 if (bio->bi_rw & REQ_DISCARD)
153 return min(ret, q->limits.max_discard_sectors); 22 return min(bio_sectors(bio), q->limits.max_discard_sectors);
154 23
155 if (bio_segments(bio) > max_segments || 24 bio_for_each_segment(bv, bio, iter) {
156 q->merge_bvec_fn) { 25 struct bvec_merge_data bvm = {
157 struct bio_vec *bv; 26 .bi_bdev = bio->bi_bdev,
158 int i, seg = 0; 27 .bi_sector = bio->bi_iter.bi_sector,
159 28 .bi_size = ret << 9,
160 ret = 0; 29 .bi_rw = bio->bi_rw,
161 30 };
162 bio_for_each_segment(bv, bio, i) { 31
163 struct bvec_merge_data bvm = { 32 if (seg == min_t(unsigned, BIO_MAX_PAGES,
164 .bi_bdev = bio->bi_bdev, 33 queue_max_segments(q)))
165 .bi_sector = bio->bi_sector, 34 break;
166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw,
168 };
169
170 if (seg == max_segments)
171 break;
172 35
173 if (q->merge_bvec_fn && 36 if (q->merge_bvec_fn &&
174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
175 break; 38 break;
176 39
177 seg++; 40 seg++;
178 ret += bv->bv_len >> 9; 41 ret += bv.bv_len >> 9;
179 }
180 } 42 }
181 43
182 ret = min(ret, queue_max_sectors(q)); 44 ret = min(ret, queue_max_sectors(q));
183 45
184 WARN_ON(!ret); 46 WARN_ON(!ret);
185 ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); 47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
186 48
187 return ret; 49 return ret;
188} 50}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
193 55
194 s->bio->bi_end_io = s->bi_end_io; 56 s->bio->bi_end_io = s->bi_end_io;
195 s->bio->bi_private = s->bi_private; 57 s->bio->bi_private = s->bi_private;
196 bio_endio(s->bio, 0); 58 bio_endio_nodec(s->bio, 0);
197 59
198 closure_debug_destroy(&s->cl); 60 closure_debug_destroy(&s->cl);
199 mempool_free(s, s->p->bio_split_hook); 61 mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
232 bio_get(bio); 94 bio_get(bio);
233 95
234 do { 96 do {
235 n = bch_bio_split(bio, bch_bio_max_sectors(bio), 97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split); 98 GFP_NOIO, s->p->bio_split);
237 99
238 n->bi_end_io = bch_bio_submit_split_endio; 100 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl; 101 n->bi_private = &s->cl;
240 102
241 closure_get(&s->cl); 103 closure_get(&s->cl);
242 bch_generic_make_request_hack(n); 104 generic_make_request(n);
243 } while (n != bio); 105 } while (n != bio);
244 106
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL); 107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
246submit: 108submit:
247 bch_generic_make_request_hack(bio); 109 generic_make_request(bio);
248} 110}
249 111
250/* Bios with headers */ 112/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 134{
273 struct bbio *b = container_of(bio, struct bbio, bio); 135 struct bbio *b = container_of(bio, struct bbio, bio);
274 136
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 139
278 b->submit_time_us = local_clock_us(); 140 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998c4a91..052bd24d24b4 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
86 bio_get(bio); 86 bio_get(bio);
87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
88 88
89 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 89 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
91 PAGE_SECTORS); 91 PAGE_SECTORS);
92 bio->bi_private = &io->cl; 92 bio->bi_private = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
102 if (!op->error) { 102 if (!op->error) {
103 moving_init(io); 103 moving_init(io);
104 104
105 io->bio.bio.bi_sector = KEY_START(&io->w->key); 105 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
106 op->write_prio = 1; 106 op->write_prio = 1;
107 op->bio = &io->bio.bio; 107 op->bio = &io->bio.bio;
108 108
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc21d2a0..c906571997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
197 197
198static void bio_csum(struct bio *bio, struct bkey *k) 198static void bio_csum(struct bio *bio, struct bkey *k)
199{ 199{
200 struct bio_vec *bv; 200 struct bio_vec bv;
201 struct bvec_iter iter;
201 uint64_t csum = 0; 202 uint64_t csum = 0;
202 int i;
203 203
204 bio_for_each_segment(bv, bio, i) { 204 bio_for_each_segment(bv, bio, iter) {
205 void *d = kmap(bv->bv_page) + bv->bv_offset; 205 void *d = kmap(bv.bv_page) + bv.bv_offset;
206 csum = bch_crc64_update(csum, d, bv->bv_len); 206 csum = bch_crc64_update(csum, d, bv.bv_len);
207 kunmap(bv->bv_page); 207 kunmap(bv.bv_page);
208 } 208 }
209 209
210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl)
260 struct bio *bio = op->bio; 260 struct bio *bio = op->bio;
261 261
262 pr_debug("invalidating %i sectors from %llu", 262 pr_debug("invalidating %i sectors from %llu",
263 bio_sectors(bio), (uint64_t) bio->bi_sector); 263 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
264 264
265 while (bio_sectors(bio)) { 265 while (bio_sectors(bio)) {
266 unsigned sectors = min(bio_sectors(bio), 266 unsigned sectors = min(bio_sectors(bio),
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl)
269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
270 goto out; 270 goto out;
271 271
272 bio->bi_sector += sectors; 272 bio->bi_iter.bi_sector += sectors;
273 bio->bi_size -= sectors << 9; 273 bio->bi_iter.bi_size -= sectors << 9;
274 274
275 bch_keylist_add(&op->insert_keys, 275 bch_keylist_add(&op->insert_keys,
276 &KEY(op->inode, bio->bi_sector, sectors)); 276 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
277 } 277 }
278 278
279 op->insert_data_done = true; 279 op->insert_data_done = true;
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl)
363 k = op->insert_keys.top; 363 k = op->insert_keys.top;
364 bkey_init(k); 364 bkey_init(k);
365 SET_KEY_INODE(k, op->inode); 365 SET_KEY_INODE(k, op->inode);
366 SET_KEY_OFFSET(k, bio->bi_sector); 366 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
367 367
368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
369 op->write_point, op->write_prio, 369 op->write_point, op->write_prio,
370 op->writeback)) 370 op->writeback))
371 goto err; 371 goto err;
372 372
373 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 373 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
374 374
375 n->bi_end_io = bch_data_insert_endio; 375 n->bi_end_io = bch_data_insert_endio;
376 n->bi_private = cl; 376 n->bi_private = cl;
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
521 (bio->bi_rw & REQ_WRITE))) 521 (bio->bi_rw & REQ_WRITE)))
522 goto skip; 522 goto skip;
523 523
524 if (bio->bi_sector & (c->sb.block_size - 1) || 524 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
525 bio_sectors(bio) & (c->sb.block_size - 1)) { 525 bio_sectors(bio) & (c->sb.block_size - 1)) {
526 pr_debug("skipping unaligned io"); 526 pr_debug("skipping unaligned io");
527 goto skip; 527 goto skip;
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
545 545
546 spin_lock(&dc->io_lock); 546 spin_lock(&dc->io_lock);
547 547
548 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 548 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
549 if (i->last == bio->bi_sector && 549 if (i->last == bio->bi_iter.bi_sector &&
550 time_before(jiffies, i->jiffies)) 550 time_before(jiffies, i->jiffies))
551 goto found; 551 goto found;
552 552
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
555 add_sequential(task); 555 add_sequential(task);
556 i->sequential = 0; 556 i->sequential = 0;
557found: 557found:
558 if (i->sequential + bio->bi_size > i->sequential) 558 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
559 i->sequential += bio->bi_size; 559 i->sequential += bio->bi_iter.bi_size;
560 560
561 i->last = bio_end_sector(bio); 561 i->last = bio_end_sector(bio);
562 i->jiffies = jiffies + msecs_to_jiffies(5000); 562 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -605,7 +605,6 @@ struct search {
605 unsigned insert_bio_sectors; 605 unsigned insert_bio_sectors;
606 606
607 unsigned recoverable:1; 607 unsigned recoverable:1;
608 unsigned unaligned_bvec:1;
609 unsigned write:1; 608 unsigned write:1;
610 unsigned read_dirty_data:1; 609 unsigned read_dirty_data:1;
611 610
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
649 struct bkey *bio_key; 648 struct bkey *bio_key;
650 unsigned ptr; 649 unsigned ptr;
651 650
652 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 651 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
653 return MAP_CONTINUE; 652 return MAP_CONTINUE;
654 653
655 if (KEY_INODE(k) != s->iop.inode || 654 if (KEY_INODE(k) != s->iop.inode ||
656 KEY_START(k) > bio->bi_sector) { 655 KEY_START(k) > bio->bi_iter.bi_sector) {
657 unsigned bio_sectors = bio_sectors(bio); 656 unsigned bio_sectors = bio_sectors(bio);
658 unsigned sectors = KEY_INODE(k) == s->iop.inode 657 unsigned sectors = KEY_INODE(k) == s->iop.inode
659 ? min_t(uint64_t, INT_MAX, 658 ? min_t(uint64_t, INT_MAX,
660 KEY_START(k) - bio->bi_sector) 659 KEY_START(k) - bio->bi_iter.bi_sector)
661 : INT_MAX; 660 : INT_MAX;
662 661
663 int ret = s->d->cache_miss(b, s, bio, sectors); 662 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
679 if (KEY_DIRTY(k)) 678 if (KEY_DIRTY(k))
680 s->read_dirty_data = true; 679 s->read_dirty_data = true;
681 680
682 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 681 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
683 KEY_OFFSET(k) - bio->bi_sector), 682 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
684 GFP_NOIO, s->d->bio_split); 683 GFP_NOIO, s->d->bio_split);
685 684
686 bio_key = &container_of(n, struct bbio, bio)->key; 685 bio_key = &container_of(n, struct bbio, bio)->key;
687 bch_bkey_copy_single_ptr(bio_key, k, ptr); 686 bch_bkey_copy_single_ptr(bio_key, k, ptr);
688 687
689 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 688 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
690 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 689 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
691 690
692 n->bi_end_io = bch_cache_read_endio; 691 n->bi_end_io = bch_cache_read_endio;
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl)
713 struct bio *bio = &s->bio.bio; 712 struct bio *bio = &s->bio.bio;
714 713
715 int ret = bch_btree_map_keys(&s->op, s->iop.c, 714 int ret = bch_btree_map_keys(&s->op, s->iop.c,
716 &KEY(s->iop.inode, bio->bi_sector, 0), 715 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
717 cache_lookup_fn, MAP_END_KEY); 716 cache_lookup_fn, MAP_END_KEY);
718 if (ret == -EAGAIN) 717 if (ret == -EAGAIN)
719 continue_at(cl, cache_lookup, bcache_wq); 718 continue_at(cl, cache_lookup, bcache_wq);
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s)
758static void do_bio_hook(struct search *s) 757static void do_bio_hook(struct search *s)
759{ 758{
760 struct bio *bio = &s->bio.bio; 759 struct bio *bio = &s->bio.bio;
761 memcpy(bio, s->orig_bio, sizeof(struct bio));
762 760
761 bio_init(bio);
762 __bio_clone_fast(bio, s->orig_bio);
763 bio->bi_end_io = request_endio; 763 bio->bi_end_io = request_endio;
764 bio->bi_private = &s->cl; 764 bio->bi_private = &s->cl;
765
765 atomic_set(&bio->bi_cnt, 3); 766 atomic_set(&bio->bi_cnt, 3);
766} 767}
767 768
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl)
773 if (s->iop.bio) 774 if (s->iop.bio)
774 bio_put(s->iop.bio); 775 bio_put(s->iop.bio);
775 776
776 if (s->unaligned_bvec)
777 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
778
779 closure_debug_destroy(cl); 777 closure_debug_destroy(cl);
780 mempool_free(s, s->d->c->search); 778 mempool_free(s, s->d->c->search);
781} 779}
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl)
783static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 781static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
784{ 782{
785 struct search *s; 783 struct search *s;
786 struct bio_vec *bv;
787 784
788 s = mempool_alloc(d->c->search, GFP_NOIO); 785 s = mempool_alloc(d->c->search, GFP_NOIO);
789 memset(s, 0, offsetof(struct search, iop.insert_keys)); 786 memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
802 s->start_time = jiffies; 799 s->start_time = jiffies;
803 do_bio_hook(s); 800 do_bio_hook(s);
804 801
805 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
806 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
807 memcpy(bv, bio_iovec(bio),
808 sizeof(struct bio_vec) * bio_segments(bio));
809
810 s->bio.bio.bi_io_vec = bv;
811 s->unaligned_bvec = 1;
812 }
813
814 return s; 802 return s;
815} 803}
816 804
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl)
849{ 837{
850 struct search *s = container_of(cl, struct search, cl); 838 struct search *s = container_of(cl, struct search, cl);
851 struct bio *bio = &s->bio.bio; 839 struct bio *bio = &s->bio.bio;
852 struct bio_vec *bv;
853 int i;
854 840
855 if (s->recoverable) { 841 if (s->recoverable) {
856 /* Retry from the backing device: */ 842 /* Retry from the backing device: */
857 trace_bcache_read_retry(s->orig_bio); 843 trace_bcache_read_retry(s->orig_bio);
858 844
859 s->iop.error = 0; 845 s->iop.error = 0;
860 bv = s->bio.bio.bi_io_vec;
861 do_bio_hook(s); 846 do_bio_hook(s);
862 s->bio.bio.bi_io_vec = bv;
863
864 if (!s->unaligned_bvec)
865 bio_for_each_segment(bv, s->orig_bio, i)
866 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
867 else
868 memcpy(s->bio.bio.bi_io_vec,
869 bio_iovec(s->orig_bio),
870 sizeof(struct bio_vec) *
871 bio_segments(s->orig_bio));
872 847
873 /* XXX: invalidate cache */ 848 /* XXX: invalidate cache */
874 849
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl)
893 868
894 if (s->iop.bio) { 869 if (s->iop.bio) {
895 bio_reset(s->iop.bio); 870 bio_reset(s->iop.bio);
896 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 871 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
897 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 872 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
898 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 873 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
899 bch_bio_map(s->iop.bio, NULL); 874 bch_bio_map(s->iop.bio, NULL);
900 875
901 bio_copy_data(s->cache_miss, s->iop.bio); 876 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl)
904 s->cache_miss = NULL; 879 s->cache_miss = NULL;
905 } 880 }
906 881
907 if (verify(dc, &s->bio.bio) && s->recoverable && 882 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
908 !s->unaligned_bvec && !s->read_dirty_data)
909 bch_data_verify(dc, s->orig_bio); 883 bch_data_verify(dc, s->orig_bio);
910 884
911 bio_complete(s); 885 bio_complete(s);
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
945 struct bio *miss, *cache_bio; 919 struct bio *miss, *cache_bio;
946 920
947 if (s->cache_miss || s->iop.bypass) { 921 if (s->cache_miss || s->iop.bypass) {
948 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 922 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
949 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 923 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
950 goto out_submit; 924 goto out_submit;
951 } 925 }
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
959 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 933 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
960 934
961 s->iop.replace_key = KEY(s->iop.inode, 935 s->iop.replace_key = KEY(s->iop.inode,
962 bio->bi_sector + s->insert_bio_sectors, 936 bio->bi_iter.bi_sector + s->insert_bio_sectors,
963 s->insert_bio_sectors); 937 s->insert_bio_sectors);
964 938
965 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 939 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
968 942
969 s->iop.replace = true; 943 s->iop.replace = true;
970 944
971 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 945 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
972 946
973 /* btree_search_recurse()'s btree iterator is no good anymore */ 947 /* btree_search_recurse()'s btree iterator is no good anymore */
974 ret = miss == bio ? MAP_DONE : -EINTR; 948 ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
979 if (!cache_bio) 953 if (!cache_bio)
980 goto out_submit; 954 goto out_submit;
981 955
982 cache_bio->bi_sector = miss->bi_sector; 956 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
983 cache_bio->bi_bdev = miss->bi_bdev; 957 cache_bio->bi_bdev = miss->bi_bdev;
984 cache_bio->bi_size = s->insert_bio_sectors << 9; 958 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
985 959
986 cache_bio->bi_end_io = request_endio; 960 cache_bio->bi_end_io = request_endio;
987 cache_bio->bi_private = &s->cl; 961 cache_bio->bi_private = &s->cl;
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1031{ 1005{
1032 struct closure *cl = &s->cl; 1006 struct closure *cl = &s->cl;
1033 struct bio *bio = &s->bio.bio; 1007 struct bio *bio = &s->bio.bio;
1034 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1008 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1035 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1009 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1036 1010
1037 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1011 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1087 closure_bio_submit(flush, cl, s->d); 1061 closure_bio_submit(flush, cl, s->d);
1088 } 1062 }
1089 } else { 1063 } else {
1090 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, 1064 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
1091 dc->disk.bio_split);
1092 1065
1093 closure_bio_submit(bio, cl, s->d); 1066 closure_bio_submit(bio, cl, s->d);
1094 } 1067 }
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1126 part_stat_unlock(); 1099 part_stat_unlock();
1127 1100
1128 bio->bi_bdev = dc->bdev; 1101 bio->bi_bdev = dc->bdev;
1129 bio->bi_sector += dc->sb.data_offset; 1102 bio->bi_iter.bi_sector += dc->sb.data_offset;
1130 1103
1131 if (cached_dev_get(dc)) { 1104 if (cached_dev_get(dc)) {
1132 s = search_alloc(bio, d); 1105 s = search_alloc(bio, d);
1133 trace_bcache_request_start(s->d, bio); 1106 trace_bcache_request_start(s->d, bio);
1134 1107
1135 if (!bio->bi_size) { 1108 if (!bio->bi_iter.bi_size) {
1136 /* 1109 /*
1137 * can't call bch_journal_meta from under 1110 * can't call bch_journal_meta from under
1138 * generic_make_request 1111 * generic_make_request
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1204static int flash_dev_cache_miss(struct btree *b, struct search *s, 1177static int flash_dev_cache_miss(struct btree *b, struct search *s,
1205 struct bio *bio, unsigned sectors) 1178 struct bio *bio, unsigned sectors)
1206{ 1179{
1207 struct bio_vec *bv; 1180 struct bio_vec bv;
1208 int i; 1181 struct bvec_iter iter;
1209 1182
1210 /* Zero fill bio */ 1183 /* Zero fill bio */
1211 1184
1212 bio_for_each_segment(bv, bio, i) { 1185 bio_for_each_segment(bv, bio, iter) {
1213 unsigned j = min(bv->bv_len >> 9, sectors); 1186 unsigned j = min(bv.bv_len >> 9, sectors);
1214 1187
1215 void *p = kmap(bv->bv_page); 1188 void *p = kmap(bv.bv_page);
1216 memset(p + bv->bv_offset, 0, j << 9); 1189 memset(p + bv.bv_offset, 0, j << 9);
1217 kunmap(bv->bv_page); 1190 kunmap(bv.bv_page);
1218 1191
1219 sectors -= j; 1192 sectors -= j;
1220 } 1193 }
1221 1194
1222 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1195 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1223 1196
1224 if (!bio->bi_size) 1197 if (!bio->bi_iter.bi_size)
1225 return MAP_DONE; 1198 return MAP_DONE;
1226 1199
1227 return MAP_CONTINUE; 1200 return MAP_CONTINUE;
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1255 1228
1256 trace_bcache_request_start(s->d, bio); 1229 trace_bcache_request_start(s->d, bio);
1257 1230
1258 if (!bio->bi_size) { 1231 if (!bio->bi_iter.bi_size) {
1259 /* 1232 /*
1260 * can't call bch_journal_meta from under 1233 * can't call bch_journal_meta from under
1261 * generic_make_request 1234 * generic_make_request
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1265 bcache_wq); 1238 bcache_wq);
1266 } else if (rw) { 1239 } else if (rw) {
1267 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1240 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1268 &KEY(d->id, bio->bi_sector, 0), 1241 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1269 &KEY(d->id, bio_end_sector(bio), 0)); 1242 &KEY(d->id, bio_end_sector(bio), 0));
1270 1243
1271 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1244 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa071a57..93d593f957f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
739 } 739 }
740 740
741 bio_split_pool_free(&d->bio_split_hook); 741 bio_split_pool_free(&d->bio_split_hook);
742 if (d->unaligned_bvec)
743 mempool_destroy(d->unaligned_bvec);
744 if (d->bio_split) 742 if (d->bio_split)
745 bioset_free(d->bio_split); 743 bioset_free(d->bio_split);
746 if (is_vmalloc_addr(d->full_dirty_stripes)) 744 if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
793 return minor; 791 return minor;
794 792
795 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 793 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
796 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
797 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
798 bio_split_pool_init(&d->bio_split_hook) || 794 bio_split_pool_init(&d->bio_split_hook) ||
799 !(d->disk = alloc_disk(1))) { 795 !(d->disk = alloc_disk(1))) {
800 ida_simple_remove(&bcache_minor, minor); 796 ida_simple_remove(&bcache_minor, minor);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618e7664..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
224 224
225void bch_bio_map(struct bio *bio, void *base) 225void bch_bio_map(struct bio *bio, void *base)
226{ 226{
227 size_t size = bio->bi_size; 227 size_t size = bio->bi_iter.bi_size;
228 struct bio_vec *bv = bio->bi_io_vec; 228 struct bio_vec *bv = bio->bi_io_vec;
229 229
230 BUG_ON(!bio->bi_size); 230 BUG_ON(!bio->bi_iter.bi_size);
231 BUG_ON(bio->bi_vcnt); 231 BUG_ON(bio->bi_vcnt);
232 232
233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe059c27..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
111 if (!io->dc->writeback_percent) 111 if (!io->dc->writeback_percent)
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
113 113
114 bio->bi_size = KEY_SIZE(&w->key) << 9; 114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
116 bio->bi_private = w; 116 bio->bi_private = w;
117 bio->bi_io_vec = bio->bi_inline_vecs; 117 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
184 184
185 dirty_init(w); 185 dirty_init(w);
186 io->bio.bi_rw = WRITE; 186 io->bio.bi_rw = WRITE;
187 io->bio.bi_sector = KEY_START(&w->key); 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
188 io->bio.bi_bdev = io->dc->bdev; 188 io->bio.bi_bdev = io->dc->bdev;
189 io->bio.bi_end_io = dirty_endio; 189 io->bio.bi_end_io = dirty_endio;
190 190
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
253 io->dc = dc; 253 io->dc = dc;
254 254
255 dirty_init(w); 255 dirty_init(w);
256 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
258 &w->key, 0)->bdev; 258 &w->key, 0)->bdev;
259 io->bio.bi_rw = READ; 259 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
17 * original bio state. 17 * original bio state.
18 */ 18 */
19 19
20struct dm_bio_vec_details {
21#if PAGE_SIZE < 65536
22 __u16 bv_len;
23 __u16 bv_offset;
24#else
25 unsigned bv_len;
26 unsigned bv_offset;
27#endif
28};
29
30struct dm_bio_details { 20struct dm_bio_details {
31 sector_t bi_sector;
32 struct block_device *bi_bdev; 21 struct block_device *bi_bdev;
33 unsigned int bi_size;
34 unsigned short bi_idx;
35 unsigned long bi_flags; 22 unsigned long bi_flags;
36 struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; 23 struct bvec_iter bi_iter;
37}; 24};
38 25
39static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 26static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
40{ 27{
41 unsigned i;
42
43 bd->bi_sector = bio->bi_sector;
44 bd->bi_bdev = bio->bi_bdev; 28 bd->bi_bdev = bio->bi_bdev;
45 bd->bi_size = bio->bi_size;
46 bd->bi_idx = bio->bi_idx;
47 bd->bi_flags = bio->bi_flags; 29 bd->bi_flags = bio->bi_flags;
48 30 bd->bi_iter = bio->bi_iter;
49 for (i = 0; i < bio->bi_vcnt; i++) {
50 bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
51 bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
52 }
53} 31}
54 32
55static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 33static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
56{ 34{
57 unsigned i;
58
59 bio->bi_sector = bd->bi_sector;
60 bio->bi_bdev = bd->bi_bdev; 35 bio->bi_bdev = bd->bi_bdev;
61 bio->bi_size = bd->bi_size;
62 bio->bi_idx = bd->bi_idx;
63 bio->bi_flags = bd->bi_flags; 36 bio->bi_flags = bd->bi_flags;
64 37 bio->bi_iter = bd->bi_iter;
65 for (i = 0; i < bio->bi_vcnt; i++) {
66 bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
67 bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
68 }
69} 38}
70 39
71#endif 40#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 9ed42125514b..66c5d130c8c2 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
540 bio_init(&b->bio); 540 bio_init(&b->bio);
541 b->bio.bi_io_vec = b->bio_vec; 541 b->bio.bi_io_vec = b->bio_vec;
542 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 542 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
543 b->bio.bi_sector = block << b->c->sectors_per_block_bits; 543 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
544 b->bio.bi_bdev = b->c->bdev; 544 b->bio.bi_bdev = b->c->bdev;
545 b->bio.bi_end_io = end_io; 545 b->bio.bi_end_io = end_io;
546 546
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 930e8c3d73e9..1e018e986610 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
72 72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio) 73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{ 74{
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) 75 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
76 t->nr_seq_samples++; 76 t->nr_seq_samples++;
77 else { 77 else {
78 /* 78 /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
87 t->nr_rand_samples++; 87 t->nr_rand_samples++;
88 } 88 }
89 89
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); 90 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
91} 91}
92 92
93static void iot_check_for_pattern_switch(struct io_tracker *t) 93static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 09334c275c79..ffd472e015ca 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85{ 85{
86 bio->bi_end_io = h->bi_end_io; 86 bio->bi_end_io = h->bi_end_io;
87 bio->bi_private = h->bi_private; 87 bio->bi_private = h->bi_private;
88
89 /*
90 * Must bump bi_remaining to allow bio to complete with
91 * restored bi_end_io.
92 */
93 atomic_inc(&bio->bi_remaining);
88} 94}
89 95
90/*----------------------------------------------------------------*/ 96/*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
664static void remap_to_cache(struct cache *cache, struct bio *bio, 670static void remap_to_cache(struct cache *cache, struct bio *bio,
665 dm_cblock_t cblock) 671 dm_cblock_t cblock)
666{ 672{
667 sector_t bi_sector = bio->bi_sector; 673 sector_t bi_sector = bio->bi_iter.bi_sector;
668 674
669 bio->bi_bdev = cache->cache_dev->bdev; 675 bio->bi_bdev = cache->cache_dev->bdev;
670 if (!block_size_is_power_of_two(cache)) 676 if (!block_size_is_power_of_two(cache))
671 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 677 bio->bi_iter.bi_sector =
672 sector_div(bi_sector, cache->sectors_per_block); 678 (from_cblock(cblock) * cache->sectors_per_block) +
679 sector_div(bi_sector, cache->sectors_per_block);
673 else 680 else
674 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 681 bio->bi_iter.bi_sector =
675 (bi_sector & (cache->sectors_per_block - 1)); 682 (from_cblock(cblock) << cache->sectors_per_block_shift) |
683 (bi_sector & (cache->sectors_per_block - 1));
676} 684}
677 685
678static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 686static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
712 720
713static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 721static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
714{ 722{
715 sector_t block_nr = bio->bi_sector; 723 sector_t block_nr = bio->bi_iter.bi_sector;
716 724
717 if (!block_size_is_power_of_two(cache)) 725 if (!block_size_is_power_of_two(cache))
718 (void) sector_div(block_nr, cache->sectors_per_block); 726 (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1027static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1035static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1028{ 1036{
1029 return (bio_data_dir(bio) == WRITE) && 1037 return (bio_data_dir(bio) == WRITE) &&
1030 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1038 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1031} 1039}
1032 1040
1033static void avoid_copy(struct dm_cache_migration *mg) 1041static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 size_t pb_data_size = get_per_bio_data_size(cache); 1260 size_t pb_data_size = get_per_bio_data_size(cache);
1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1261 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1254 1262
1255 BUG_ON(bio->bi_size); 1263 BUG_ON(bio->bi_iter.bi_size);
1256 if (!pb->req_nr) 1264 if (!pb->req_nr)
1257 remap_to_origin(cache, bio); 1265 remap_to_origin(cache, bio);
1258 else 1266 else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1275 */ 1283 */
1276static void process_discard_bio(struct cache *cache, struct bio *bio) 1284static void process_discard_bio(struct cache *cache, struct bio *bio)
1277{ 1285{
1278 dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1286 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1279 cache->discard_block_size); 1287 cache->discard_block_size);
1280 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1288 dm_block_t end_block = bio_end_sector(bio);
1281 dm_block_t b; 1289 dm_block_t b;
1282 1290
1283 end_block = block_div(end_block, cache->discard_block_size); 1291 end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
39 struct completion restart; 39 struct completion restart;
40 struct bio *bio_in; 40 struct bio *bio_in;
41 struct bio *bio_out; 41 struct bio *bio_out;
42 unsigned int offset_in; 42 struct bvec_iter iter_in;
43 unsigned int offset_out; 43 struct bvec_iter iter_out;
44 unsigned int idx_in;
45 unsigned int idx_out;
46 sector_t cc_sector; 44 sector_t cc_sector;
47 atomic_t cc_pending; 45 atomic_t cc_pending;
48}; 46};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
826{ 824{
827 ctx->bio_in = bio_in; 825 ctx->bio_in = bio_in;
828 ctx->bio_out = bio_out; 826 ctx->bio_out = bio_out;
829 ctx->offset_in = 0; 827 if (bio_in)
830 ctx->offset_out = 0; 828 ctx->iter_in = bio_in->bi_iter;
831 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 829 if (bio_out)
832 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 830 ctx->iter_out = bio_out->bi_iter;
833 ctx->cc_sector = sector + cc->iv_offset; 831 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart); 832 init_completion(&ctx->restart);
835} 833}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
857 struct convert_context *ctx, 855 struct convert_context *ctx,
858 struct ablkcipher_request *req) 856 struct ablkcipher_request *req)
859{ 857{
860 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 858 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
861 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 859 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
862 struct dm_crypt_request *dmreq; 860 struct dm_crypt_request *dmreq;
863 u8 *iv; 861 u8 *iv;
864 int r; 862 int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
869 dmreq->iv_sector = ctx->cc_sector; 867 dmreq->iv_sector = ctx->cc_sector;
870 dmreq->ctx = ctx; 868 dmreq->ctx = ctx;
871 sg_init_table(&dmreq->sg_in, 1); 869 sg_init_table(&dmreq->sg_in, 1);
872 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 870 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
873 bv_in->bv_offset + ctx->offset_in); 871 bv_in.bv_offset);
874 872
875 sg_init_table(&dmreq->sg_out, 1); 873 sg_init_table(&dmreq->sg_out, 1);
876 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 874 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
877 bv_out->bv_offset + ctx->offset_out); 875 bv_out.bv_offset);
878 876
879 ctx->offset_in += 1 << SECTOR_SHIFT; 877 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
880 if (ctx->offset_in >= bv_in->bv_len) { 878 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
881 ctx->offset_in = 0;
882 ctx->idx_in++;
883 }
884
885 ctx->offset_out += 1 << SECTOR_SHIFT;
886 if (ctx->offset_out >= bv_out->bv_len) {
887 ctx->offset_out = 0;
888 ctx->idx_out++;
889 }
890 879
891 if (cc->iv_gen_ops) { 880 if (cc->iv_gen_ops) {
892 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 881 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
937 926
938 atomic_set(&ctx->cc_pending, 1); 927 atomic_set(&ctx->cc_pending, 1);
939 928
940 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 929 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
941 ctx->idx_out < ctx->bio_out->bi_vcnt) {
942 930
943 crypt_alloc_req(cc, ctx); 931 crypt_alloc_req(cc, ctx);
944 932
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
1021 size -= len; 1009 size -= len;
1022 } 1010 }
1023 1011
1024 if (!clone->bi_size) { 1012 if (!clone->bi_iter.bi_size) {
1025 bio_put(clone); 1013 bio_put(clone);
1026 return NULL; 1014 return NULL;
1027 } 1015 }
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1161 crypt_inc_pending(io); 1149 crypt_inc_pending(io);
1162 1150
1163 clone_init(io, clone); 1151 clone_init(io, clone);
1164 clone->bi_sector = cc->start + io->sector; 1152 clone->bi_iter.bi_sector = cc->start + io->sector;
1165 1153
1166 generic_make_request(clone); 1154 generic_make_request(clone);
1167 return 0; 1155 return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1207 } 1195 }
1208 1196
1209 /* crypt_convert should have filled the clone bio */ 1197 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1198 BUG_ON(io->ctx.iter_out.bi_size);
1211 1199
1212 clone->bi_sector = cc->start + io->sector; 1200 clone->bi_iter.bi_sector = cc->start + io->sector;
1213 1201
1214 if (async) 1202 if (async)
1215 kcryptd_queue_io(io); 1203 kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1224 struct dm_crypt_io *new_io; 1212 struct dm_crypt_io *new_io;
1225 int crypt_finished; 1213 int crypt_finished;
1226 unsigned out_of_pages = 0; 1214 unsigned out_of_pages = 0;
1227 unsigned remaining = io->base_bio->bi_size; 1215 unsigned remaining = io->base_bio->bi_iter.bi_size;
1228 sector_t sector = io->sector; 1216 sector_t sector = io->sector;
1229 int r; 1217 int r;
1230 1218
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1246 } 1234 }
1247 1235
1248 io->ctx.bio_out = clone; 1236 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0; 1237 io->ctx.iter_out = clone->bi_iter;
1250 1238
1251 remaining -= clone->bi_size; 1239 remaining -= clone->bi_iter.bi_size;
1252 sector += bio_sectors(clone); 1240 sector += bio_sectors(clone);
1253 1241
1254 crypt_inc_pending(io); 1242 crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1290 crypt_inc_pending(new_io); 1278 crypt_inc_pending(new_io);
1291 crypt_convert_init(cc, &new_io->ctx, NULL, 1279 crypt_convert_init(cc, &new_io->ctx, NULL,
1292 io->base_bio, sector); 1280 io->base_bio, sector);
1293 new_io->ctx.idx_in = io->ctx.idx_in; 1281 new_io->ctx.iter_in = io->ctx.iter_in;
1294 new_io->ctx.offset_in = io->ctx.offset_in;
1295 1282
1296 /* 1283 /*
1297 * Fragments after the first use the base_io 1284 * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1856 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1870 bio->bi_bdev = cc->dev->bdev; 1857 bio->bi_bdev = cc->dev->bdev;
1871 if (bio_sectors(bio)) 1858 if (bio_sectors(bio))
1872 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1859 bio->bi_iter.bi_sector = cc->start +
1860 dm_target_offset(ti, bio->bi_iter.bi_sector);
1873 return DM_MAPIO_REMAPPED; 1861 return DM_MAPIO_REMAPPED;
1874 } 1862 }
1875 1863
1876 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); 1864 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1877 1865
1878 if (bio_data_dir(io->base_bio) == READ) { 1866 if (bio_data_dir(io->base_bio) == READ) {
1879 if (kcryptd_io_read(io, GFP_NOWAIT)) 1867 if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index a8a511c053a5..42c3a27a14cc 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
277 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 277 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
278 bio->bi_bdev = dc->dev_write->bdev; 278 bio->bi_bdev = dc->dev_write->bdev;
279 if (bio_sectors(bio)) 279 if (bio_sectors(bio))
280 bio->bi_sector = dc->start_write + 280 bio->bi_iter.bi_sector = dc->start_write +
281 dm_target_offset(ti, bio->bi_sector); 281 dm_target_offset(ti, bio->bi_iter.bi_sector);
282 282
283 return delay_bio(dc, dc->write_delay, bio); 283 return delay_bio(dc, dc->write_delay, bio);
284 } 284 }
285 285
286 bio->bi_bdev = dc->dev_read->bdev; 286 bio->bi_bdev = dc->dev_read->bdev;
287 bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 287 bio->bi_iter.bi_sector = dc->start_read +
288 dm_target_offset(ti, bio->bi_iter.bi_sector);
288 289
289 return delay_bio(dc, dc->read_delay, bio); 290 return delay_bio(dc, dc->read_delay, bio);
290} 291}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
248 248
249 bio->bi_bdev = fc->dev->bdev; 249 bio->bi_bdev = fc->dev->bdev;
250 if (bio_sectors(bio)) 250 if (bio_sectors(bio))
251 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 251 bio->bi_iter.bi_sector =
252 flakey_map_sector(ti, bio->bi_iter.bi_sector);
252} 253}
253 254
254static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 255static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
265 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 266 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
266 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 267 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
267 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 268 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
268 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 269 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
269 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
270 } 271 }
271} 272}
272 273
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..b2b8a10e8427 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
201/* 201/*
202 * Functions for getting the pages from a bvec. 202 * Functions for getting the pages from a bvec.
203 */ 203 */
204static void bvec_get_page(struct dpages *dp, 204static void bio_get_page(struct dpages *dp,
205 struct page **p, unsigned long *len, unsigned *offset) 205 struct page **p, unsigned long *len, unsigned *offset)
206{ 206{
207 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 207 struct bio *bio = dp->context_ptr;
208 *p = bvec->bv_page; 208 struct bio_vec bvec = bio_iovec(bio);
209 *len = bvec->bv_len; 209 *p = bvec.bv_page;
210 *offset = bvec->bv_offset; 210 *len = bvec.bv_len;
211 *offset = bvec.bv_offset;
211} 212}
212 213
213static void bvec_next_page(struct dpages *dp) 214static void bio_next_page(struct dpages *dp)
214{ 215{
215 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 216 struct bio *bio = dp->context_ptr;
216 dp->context_ptr = bvec + 1; 217 struct bio_vec bvec = bio_iovec(bio);
218
219 bio_advance(bio, bvec.bv_len);
217} 220}
218 221
219static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) 222static void bio_dp_init(struct dpages *dp, struct bio *bio)
220{ 223{
221 dp->get_page = bvec_get_page; 224 dp->get_page = bio_get_page;
222 dp->next_page = bvec_next_page; 225 dp->next_page = bio_next_page;
223 dp->context_ptr = bvec; 226 dp->context_ptr = bio;
224} 227}
225 228
226/* 229/*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 307 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
305 308
306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 309 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
307 bio->bi_sector = where->sector + (where->count - remaining); 310 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
308 bio->bi_bdev = where->bdev; 311 bio->bi_bdev = where->bdev;
309 bio->bi_end_io = endio; 312 bio->bi_end_io = endio;
310 store_io_and_region_in_bio(bio, io, region); 313 store_io_and_region_in_bio(bio, io, region);
311 314
312 if (rw & REQ_DISCARD) { 315 if (rw & REQ_DISCARD) {
313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 316 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
314 bio->bi_size = num_sectors << SECTOR_SHIFT; 317 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
315 remaining -= num_sectors; 318 remaining -= num_sectors;
316 } else if (rw & REQ_WRITE_SAME) { 319 } else if (rw & REQ_WRITE_SAME) {
317 /* 320 /*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
320 dp->get_page(dp, &page, &len, &offset); 323 dp->get_page(dp, &page, &len, &offset);
321 bio_add_page(bio, page, logical_block_size, offset); 324 bio_add_page(bio, page, logical_block_size, offset);
322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 325 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
323 bio->bi_size = num_sectors << SECTOR_SHIFT; 326 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
324 327
325 offset = 0; 328 offset = 0;
326 remaining -= num_sectors; 329 remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
457 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 460 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
458 break; 461 break;
459 462
460 case DM_IO_BVEC: 463 case DM_IO_BIO:
461 bvec_dp_init(dp, io_req->mem.ptr.bvec); 464 bio_dp_init(dp, io_req->mem.ptr.bio);
462 break; 465 break;
463 466
464 case DM_IO_VMA: 467 case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
85 85
86 bio->bi_bdev = lc->dev->bdev; 86 bio->bi_bdev = lc->dev->bdev;
87 if (bio_sectors(bio)) 87 if (bio_sectors(bio))
88 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 bio->bi_iter.bi_sector =
89 linear_map_sector(ti, bio->bi_iter.bi_sector);
89} 90}
90 91
91static int linear_map(struct dm_target *ti, struct bio *bio) 92static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..f284e0bfb25f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
432 region_t region = dm_rh_bio_to_region(ms->rh, bio); 432 region_t region = dm_rh_bio_to_region(ms->rh, bio);
433 433
434 if (log->type->in_sync(log, region, 0)) 434 if (log->type->in_sync(log, region, 0))
435 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
436 436
437 return 0; 437 return 0;
438} 438}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
442 */ 442 */
443static sector_t map_sector(struct mirror *m, struct bio *bio) 443static sector_t map_sector(struct mirror *m, struct bio *bio)
444{ 444{
445 if (unlikely(!bio->bi_size)) 445 if (unlikely(!bio->bi_iter.bi_size))
446 return 0; 446 return 0;
447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
448} 448}
449 449
450static void map_bio(struct mirror *m, struct bio *bio) 450static void map_bio(struct mirror *m, struct bio *bio)
451{ 451{
452 bio->bi_bdev = m->dev->bdev; 452 bio->bi_bdev = m->dev->bdev;
453 bio->bi_sector = map_sector(m, bio); 453 bio->bi_iter.bi_sector = map_sector(m, bio);
454} 454}
455 455
456static void map_region(struct dm_io_region *io, struct mirror *m, 456static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
526 struct dm_io_region io; 526 struct dm_io_region io;
527 struct dm_io_request io_req = { 527 struct dm_io_request io_req = {
528 .bi_rw = READ, 528 .bi_rw = READ,
529 .mem.type = DM_IO_BVEC, 529 .mem.type = DM_IO_BIO,
530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 530 .mem.ptr.bio = bio,
531 .notify.fn = read_callback, 531 .notify.fn = read_callback,
532 .notify.context = bio, 532 .notify.context = bio,
533 .client = m->ms->io_client, 533 .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
559 * We can only read balance if the region is in sync. 559 * We can only read balance if the region is in sync.
560 */ 560 */
561 if (likely(region_in_sync(ms, region, 1))) 561 if (likely(region_in_sync(ms, region, 1)))
562 m = choose_mirror(ms, bio->bi_sector); 562 m = choose_mirror(ms, bio->bi_iter.bi_sector);
563 else if (m && atomic_read(&m->error_count)) 563 else if (m && atomic_read(&m->error_count))
564 m = NULL; 564 m = NULL;
565 565
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
629 struct mirror *m; 629 struct mirror *m;
630 struct dm_io_request io_req = { 630 struct dm_io_request io_req = {
631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
632 .mem.type = DM_IO_BVEC, 632 .mem.type = DM_IO_BIO,
633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 633 .mem.ptr.bio = bio,
634 .notify.fn = write_callback, 634 .notify.fn = write_callback,
635 .notify.context = bio, 635 .notify.context = bio,
636 .client = ms->io_client, 636 .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1181 * The region is in-sync and we can perform reads directly. 1181 * The region is in-sync and we can perform reads directly.
1182 * Store enough information so we can retry if it fails. 1182 * Store enough information so we can retry if it fails.
1183 */ 1183 */
1184 m = choose_mirror(ms, bio->bi_sector); 1184 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1185 if (unlikely(!m)) 1185 if (unlikely(!m))
1186 return -EIO; 1186 return -EIO;
1187 1187
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
126 126
127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) 127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
128{ 128{
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
130 rh->target_begin);
130} 131}
131EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); 132EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
132 133
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 717718558bd9..ebddef5237e4 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1438,6 +1438,7 @@ out:
1438 if (full_bio) { 1438 if (full_bio) {
1439 full_bio->bi_end_io = pe->full_bio_end_io; 1439 full_bio->bi_end_io = pe->full_bio_end_io;
1440 full_bio->bi_private = pe->full_bio_private; 1440 full_bio->bi_private = pe->full_bio_private;
1441 atomic_inc(&full_bio->bi_remaining);
1441 } 1442 }
1442 free_pending_exception(pe); 1443 free_pending_exception(pe);
1443 1444
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1619 struct bio *bio, chunk_t chunk) 1620 struct bio *bio, chunk_t chunk)
1620{ 1621{
1621 bio->bi_bdev = s->cow->bdev; 1622 bio->bi_bdev = s->cow->bdev;
1622 bio->bi_sector = chunk_to_sector(s->store, 1623 bio->bi_iter.bi_sector =
1623 dm_chunk_number(e->new_chunk) + 1624 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1624 (chunk - e->old_chunk)) + 1625 (chunk - e->old_chunk)) +
1625 (bio->bi_sector & 1626 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1626 s->store->chunk_mask);
1627} 1627}
1628 1628
1629static int snapshot_map(struct dm_target *ti, struct bio *bio) 1629static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1641 return DM_MAPIO_REMAPPED; 1641 return DM_MAPIO_REMAPPED;
1642 } 1642 }
1643 1643
1644 chunk = sector_to_chunk(s->store, bio->bi_sector); 1644 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1645 1645
1646 /* Full snapshots are not usable */ 1646 /* Full snapshots are not usable */
1647 /* To get here the table must be live so s->active is always set. */ 1647 /* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1702 r = DM_MAPIO_SUBMITTED; 1702 r = DM_MAPIO_SUBMITTED;
1703 1703
1704 if (!pe->started && 1704 if (!pe->started &&
1705 bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { 1705 bio->bi_iter.bi_size ==
1706 (s->store->chunk_size << SECTOR_SHIFT)) {
1706 pe->started = 1; 1707 pe->started = 1;
1707 up_write(&s->lock); 1708 up_write(&s->lock);
1708 start_full_bio(pe, bio); 1709 start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1758 return DM_MAPIO_REMAPPED; 1759 return DM_MAPIO_REMAPPED;
1759 } 1760 }
1760 1761
1761 chunk = sector_to_chunk(s->store, bio->bi_sector); 1762 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1762 1763
1763 down_write(&s->lock); 1764 down_write(&s->lock);
1764 1765
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
2095 down_read(&_origins_lock); 2096 down_read(&_origins_lock);
2096 o = __lookup_origin(origin->bdev); 2097 o = __lookup_origin(origin->bdev);
2097 if (o) 2098 if (o)
2098 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2099 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2099 up_read(&_origins_lock); 2100 up_read(&_origins_lock);
2100 2101
2101 return r; 2102 return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
259{ 259{
260 sector_t begin, end; 260 sector_t begin, end;
261 261
262 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 262 stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
263 target_stripe, &begin);
263 stripe_map_range_sector(sc, bio_end_sector(bio), 264 stripe_map_range_sector(sc, bio_end_sector(bio),
264 target_stripe, &end); 265 target_stripe, &end);
265 if (begin < end) { 266 if (begin < end) {
266 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 267 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
267 bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 268 bio->bi_iter.bi_sector = begin +
268 bio->bi_size = to_bytes(end - begin); 269 sc->stripe[target_stripe].physical_start;
270 bio->bi_iter.bi_size = to_bytes(end - begin);
269 return DM_MAPIO_REMAPPED; 271 return DM_MAPIO_REMAPPED;
270 } else { 272 } else {
271 /* The range doesn't map to the target stripe */ 273 /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
293 return stripe_map_range(sc, bio, target_bio_nr); 295 return stripe_map_range(sc, bio, target_bio_nr);
294 } 296 }
295 297
296 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 298 stripe_map_sector(sc, bio->bi_iter.bi_sector,
299 &stripe, &bio->bi_iter.bi_sector);
297 300
298 bio->bi_sector += sc->stripe[stripe].physical_start; 301 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
299 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 302 bio->bi_bdev = sc->stripe[stripe].dev->bdev;
300 303
301 return DM_MAPIO_REMAPPED; 304 return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
311static int switch_map(struct dm_target *ti, struct bio *bio) 311static int switch_map(struct dm_target *ti, struct bio *bio)
312{ 312{
313 struct switch_ctx *sctx = ti->private; 313 struct switch_ctx *sctx = ti->private;
314 sector_t offset = dm_target_offset(ti, bio->bi_sector); 314 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
315 unsigned path_nr = switch_get_path_nr(sctx, offset); 315 unsigned path_nr = switch_get_path_nr(sctx, offset);
316 316
317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
318 bio->bi_sector = sctx->path_list[path_nr].start + offset; 318 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
319 319
320 return DM_MAPIO_REMAPPED; 320 return DM_MAPIO_REMAPPED;
321} 321}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 726228b33a01..faaf944597ab 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
414static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 414static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
415{ 415{
416 struct pool *pool = tc->pool; 416 struct pool *pool = tc->pool;
417 sector_t block_nr = bio->bi_sector; 417 sector_t block_nr = bio->bi_iter.bi_sector;
418 418
419 if (block_size_is_power_of_two(pool)) 419 if (block_size_is_power_of_two(pool))
420 block_nr >>= pool->sectors_per_block_shift; 420 block_nr >>= pool->sectors_per_block_shift;
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
427static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 427static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
428{ 428{
429 struct pool *pool = tc->pool; 429 struct pool *pool = tc->pool;
430 sector_t bi_sector = bio->bi_sector; 430 sector_t bi_sector = bio->bi_iter.bi_sector;
431 431
432 bio->bi_bdev = tc->pool_dev->bdev; 432 bio->bi_bdev = tc->pool_dev->bdev;
433 if (block_size_is_power_of_two(pool)) 433 if (block_size_is_power_of_two(pool))
434 bio->bi_sector = (block << pool->sectors_per_block_shift) | 434 bio->bi_iter.bi_sector =
435 (bi_sector & (pool->sectors_per_block - 1)); 435 (block << pool->sectors_per_block_shift) |
436 (bi_sector & (pool->sectors_per_block - 1));
436 else 437 else
437 bio->bi_sector = (block * pool->sectors_per_block) + 438 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
438 sector_div(bi_sector, pool->sectors_per_block); 439 sector_div(bi_sector, pool->sectors_per_block);
439} 440}
440 441
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
612 613
613static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 614static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
614{ 615{
615 if (m->bio) 616 if (m->bio) {
616 m->bio->bi_end_io = m->saved_bi_end_io; 617 m->bio->bi_end_io = m->saved_bi_end_io;
618 atomic_inc(&m->bio->bi_remaining);
619 }
617 cell_error(m->tc->pool, m->cell); 620 cell_error(m->tc->pool, m->cell);
618 list_del(&m->list); 621 list_del(&m->list);
619 mempool_free(m, m->tc->pool->mapping_pool); 622 mempool_free(m, m->tc->pool->mapping_pool);
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
627 int r; 630 int r;
628 631
629 bio = m->bio; 632 bio = m->bio;
630 if (bio) 633 if (bio) {
631 bio->bi_end_io = m->saved_bi_end_io; 634 bio->bi_end_io = m->saved_bi_end_io;
635 atomic_inc(&bio->bi_remaining);
636 }
632 637
633 if (m->err) { 638 if (m->err) {
634 cell_error(pool, m->cell); 639 cell_error(pool, m->cell);
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
731 */ 736 */
732static int io_overlaps_block(struct pool *pool, struct bio *bio) 737static int io_overlaps_block(struct pool *pool, struct bio *bio)
733{ 738{
734 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 739 return bio->bi_iter.bi_size ==
740 (pool->sectors_per_block << SECTOR_SHIFT);
735} 741}
736 742
737static int io_overwrites_block(struct pool *pool, struct bio *bio) 743static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1136 if (bio_detain(pool, &key, bio, &cell)) 1142 if (bio_detain(pool, &key, bio, &cell))
1137 return; 1143 return;
1138 1144
1139 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1145 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1140 break_sharing(tc, bio, block, &key, lookup_result, cell); 1146 break_sharing(tc, bio, block, &key, lookup_result, cell);
1141 else { 1147 else {
1142 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1148 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1159 /* 1165 /*
1160 * Remap empty bios (flushes) immediately, without provisioning. 1166 * Remap empty bios (flushes) immediately, without provisioning.
1161 */ 1167 */
1162 if (!bio->bi_size) { 1168 if (!bio->bi_iter.bi_size) {
1163 inc_all_io_entry(pool, bio); 1169 inc_all_io_entry(pool, bio);
1164 cell_defer_no_holder(tc, cell); 1170 cell_defer_no_holder(tc, cell);
1165 1171
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1258 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1264 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1259 switch (r) { 1265 switch (r) {
1260 case 0: 1266 case 0:
1261 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1267 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1262 handle_unserviceable_bio(tc->pool, bio); 1268 handle_unserviceable_bio(tc->pool, bio);
1263 else { 1269 else {
1264 inc_all_io_entry(tc->pool, bio); 1270 inc_all_io_entry(tc->pool, bio);
@@ -2939,7 +2945,7 @@ out_unlock:
2939 2945
2940static int thin_map(struct dm_target *ti, struct bio *bio) 2946static int thin_map(struct dm_target *ti, struct bio *bio)
2941{ 2947{
2942 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2948 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2943 2949
2944 return thin_bio_map(ti, bio); 2950 return thin_bio_map(ti, bio);
2945} 2951}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..796007a5e0e1 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
73 sector_t block; 73 sector_t block;
74 unsigned n_blocks; 74 unsigned n_blocks;
75 75
76 /* saved bio vector */ 76 struct bvec_iter iter;
77 struct bio_vec *io_vec;
78 unsigned io_vec_size;
79 77
80 struct work_struct work; 78 struct work_struct work;
81 79
82 /* A space for short vectors; longer vectors are allocated separately. */
83 struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
84
85 /* 80 /*
86 * Three variably-size fields follow this struct: 81 * Three variably-size fields follow this struct:
87 * 82 *
@@ -284,9 +279,10 @@ release_ret_r:
284static int verity_verify_io(struct dm_verity_io *io) 279static int verity_verify_io(struct dm_verity_io *io)
285{ 280{
286 struct dm_verity *v = io->v; 281 struct dm_verity *v = io->v;
282 struct bio *bio = dm_bio_from_per_bio_data(io,
283 v->ti->per_bio_data_size);
287 unsigned b; 284 unsigned b;
288 int i; 285 int i;
289 unsigned vector = 0, offset = 0;
290 286
291 for (b = 0; b < io->n_blocks; b++) { 287 for (b = 0; b < io->n_blocks; b++) {
292 struct shash_desc *desc; 288 struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
336 } 332 }
337 333
338 todo = 1 << v->data_dev_block_bits; 334 todo = 1 << v->data_dev_block_bits;
339 do { 335 while (io->iter.bi_size) {
340 struct bio_vec *bv;
341 u8 *page; 336 u8 *page;
342 unsigned len; 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
343 338
344 BUG_ON(vector >= io->io_vec_size); 339 page = kmap_atomic(bv.bv_page);
345 bv = &io->io_vec[vector]; 340 r = crypto_shash_update(desc, page + bv.bv_offset,
346 page = kmap_atomic(bv->bv_page); 341 bv.bv_len);
347 len = bv->bv_len - offset;
348 if (likely(len >= todo))
349 len = todo;
350 r = crypto_shash_update(desc,
351 page + bv->bv_offset + offset, len);
352 kunmap_atomic(page); 342 kunmap_atomic(page);
343
353 if (r < 0) { 344 if (r < 0) {
354 DMERR("crypto_shash_update failed: %d", r); 345 DMERR("crypto_shash_update failed: %d", r);
355 return r; 346 return r;
356 } 347 }
357 offset += len; 348
358 if (likely(offset == bv->bv_len)) { 349 bio_advance_iter(bio, &io->iter, bv.bv_len);
359 offset = 0; 350 }
360 vector++;
361 }
362 todo -= len;
363 } while (todo);
364 351
365 if (!v->version) { 352 if (!v->version) {
366 r = crypto_shash_update(desc, v->salt, v->salt_size); 353 r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
383 return -EIO; 370 return -EIO;
384 } 371 }
385 } 372 }
386 BUG_ON(vector != io->io_vec_size);
387 BUG_ON(offset);
388 373
389 return 0; 374 return 0;
390} 375}
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
400 bio->bi_end_io = io->orig_bi_end_io; 385 bio->bi_end_io = io->orig_bi_end_io;
401 bio->bi_private = io->orig_bi_private; 386 bio->bi_private = io->orig_bi_private;
402 387
403 if (io->io_vec != io->io_vec_inline) 388 bio_endio_nodec(bio, error);
404 mempool_free(io->io_vec, v->vec_mempool);
405
406 bio_endio(bio, error);
407} 389}
408 390
409static void verity_work(struct work_struct *w) 391static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
493 struct dm_verity_io *io; 475 struct dm_verity_io *io;
494 476
495 bio->bi_bdev = v->data_dev->bdev; 477 bio->bi_bdev = v->data_dev->bdev;
496 bio->bi_sector = verity_map_sector(v, bio->bi_sector); 478 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
497 479
498 if (((unsigned)bio->bi_sector | bio_sectors(bio)) & 480 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { 481 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
500 DMERR_LIMIT("unaligned io"); 482 DMERR_LIMIT("unaligned io");
501 return -EIO; 483 return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
514 io->v = v; 496 io->v = v;
515 io->orig_bi_end_io = bio->bi_end_io; 497 io->orig_bi_end_io = bio->bi_end_io;
516 io->orig_bi_private = bio->bi_private; 498 io->orig_bi_private = bio->bi_private;
517 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 499 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
518 io->n_blocks = bio->bi_size >> v->data_dev_block_bits; 500 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
519 501
520 bio->bi_end_io = verity_end_io; 502 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io; 503 bio->bi_private = io;
522 io->io_vec_size = bio_segments(bio); 504 io->iter = bio->bi_iter;
523 if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
524 io->io_vec = io->io_vec_inline;
525 else
526 io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
527 memcpy(io->io_vec, bio_iovec(bio),
528 io->io_vec_size * sizeof(struct bio_vec));
529 505
530 verity_submit_prefetch(v, io); 506 verity_submit_prefetch(v, io);
531 507
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index b49c76284241..8c53b09b9a2c 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
575 atomic_inc_return(&md->pending[rw])); 575 atomic_inc_return(&md->pending[rw]));
576 576
577 if (unlikely(dm_stats_used(&md->stats))) 577 if (unlikely(dm_stats_used(&md->stats)))
578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
579 bio_sectors(bio), false, 0, &io->stats_aux); 579 bio_sectors(bio), false, 0, &io->stats_aux);
580} 580}
581 581
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
593 part_stat_unlock(); 593 part_stat_unlock();
594 594
595 if (unlikely(dm_stats_used(&md->stats))) 595 if (unlikely(dm_stats_used(&md->stats)))
596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
597 bio_sectors(bio), true, duration, &io->stats_aux); 597 bio_sectors(bio), true, duration, &io->stats_aux);
598 598
599 /* 599 /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
742 if (io_error == DM_ENDIO_REQUEUE) 742 if (io_error == DM_ENDIO_REQUEUE)
743 return; 743 return;
744 744
745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
746 /* 746 /*
747 * Preflush done for flush with data, reissue 747 * Preflush done for flush with data, reissue
748 * without REQ_FLUSH. 748 * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
797 struct dm_rq_clone_bio_info *info = clone->bi_private; 797 struct dm_rq_clone_bio_info *info = clone->bi_private;
798 struct dm_rq_target_io *tio = info->tio; 798 struct dm_rq_target_io *tio = info->tio;
799 struct bio *bio = info->orig; 799 struct bio *bio = info->orig;
800 unsigned int nr_bytes = info->orig->bi_size; 800 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
801 801
802 bio_put(clone); 802 bio_put(clone);
803 803
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
1128 * this io. 1128 * this io.
1129 */ 1129 */
1130 atomic_inc(&tio->io->io_count); 1130 atomic_inc(&tio->io->io_count);
1131 sector = clone->bi_sector; 1131 sector = clone->bi_iter.bi_sector;
1132 r = ti->type->map(ti, clone); 1132 r = ti->type->map(ti, clone);
1133 if (r == DM_MAPIO_REMAPPED) { 1133 if (r == DM_MAPIO_REMAPPED) {
1134 /* the bio has been remapped so dispatch it */ 1134 /* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
1155 struct dm_io *io; 1155 struct dm_io *io;
1156 sector_t sector; 1156 sector_t sector;
1157 sector_t sector_count; 1157 sector_t sector_count;
1158 unsigned short idx;
1159}; 1158};
1160 1159
1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1160static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1162{ 1161{
1163 bio->bi_sector = sector; 1162 bio->bi_iter.bi_sector = sector;
1164 bio->bi_size = to_bytes(len); 1163 bio->bi_iter.bi_size = to_bytes(len);
1165}
1166
1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1168{
1169 bio->bi_idx = idx;
1170 bio->bi_vcnt = idx + bv_count;
1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1172}
1173
1174static void clone_bio_integrity(struct bio *bio, struct bio *clone,
1175 unsigned short idx, unsigned len, unsigned offset,
1176 unsigned trim)
1177{
1178 if (!bio_integrity(bio))
1179 return;
1180
1181 bio_integrity_clone(clone, bio, GFP_NOIO);
1182
1183 if (trim)
1184 bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
1185}
1186
1187/*
1188 * Creates a little bio that just does part of a bvec.
1189 */
1190static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1191 sector_t sector, unsigned short idx,
1192 unsigned offset, unsigned len)
1193{
1194 struct bio *clone = &tio->clone;
1195 struct bio_vec *bv = bio->bi_io_vec + idx;
1196
1197 *clone->bi_io_vec = *bv;
1198
1199 bio_setup_sector(clone, sector, len);
1200
1201 clone->bi_bdev = bio->bi_bdev;
1202 clone->bi_rw = bio->bi_rw;
1203 clone->bi_vcnt = 1;
1204 clone->bi_io_vec->bv_offset = offset;
1205 clone->bi_io_vec->bv_len = clone->bi_size;
1206 clone->bi_flags |= 1 << BIO_CLONED;
1207
1208 clone_bio_integrity(bio, clone, idx, len, offset, 1);
1209} 1164}
1210 1165
1211/* 1166/*
1212 * Creates a bio that consists of range of complete bvecs. 1167 * Creates a bio that consists of range of complete bvecs.
1213 */ 1168 */
1214static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1169static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1215 sector_t sector, unsigned short idx, 1170 sector_t sector, unsigned len)
1216 unsigned short bv_count, unsigned len)
1217{ 1171{
1218 struct bio *clone = &tio->clone; 1172 struct bio *clone = &tio->clone;
1219 unsigned trim = 0;
1220 1173
1221 __bio_clone(clone, bio); 1174 __bio_clone_fast(clone, bio);
1222 bio_setup_sector(clone, sector, len); 1175
1223 bio_setup_bv(clone, idx, bv_count); 1176 if (bio_integrity(bio))
1177 bio_integrity_clone(clone, bio, GFP_NOIO);
1178
1179 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1180 clone->bi_iter.bi_size = to_bytes(len);
1224 1181
1225 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1182 if (bio_integrity(bio))
1226 trim = 1; 1183 bio_integrity_trim(clone, 0, len);
1227 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1228} 1184}
1229 1185
1230static struct dm_target_io *alloc_tio(struct clone_info *ci, 1186static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
1257 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1213 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1258 * and discard, so no need for concern about wasted bvec allocations. 1214 * and discard, so no need for concern about wasted bvec allocations.
1259 */ 1215 */
1260 __bio_clone(clone, ci->bio); 1216 __bio_clone_fast(clone, ci->bio);
1261 if (len) 1217 if (len)
1262 bio_setup_sector(clone, ci->sector, len); 1218 bio_setup_sector(clone, ci->sector, len);
1263 1219
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
1286} 1242}
1287 1243
1288static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1244static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1289 sector_t sector, int nr_iovecs, 1245 sector_t sector, unsigned len)
1290 unsigned short idx, unsigned short bv_count,
1291 unsigned offset, unsigned len,
1292 unsigned split_bvec)
1293{ 1246{
1294 struct bio *bio = ci->bio; 1247 struct bio *bio = ci->bio;
1295 struct dm_target_io *tio; 1248 struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
1303 num_target_bios = ti->num_write_bios(ti, bio); 1256 num_target_bios = ti->num_write_bios(ti, bio);
1304 1257
1305 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1258 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1306 tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); 1259 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1307 if (split_bvec) 1260 clone_bio(tio, bio, sector, len);
1308 clone_split_bio(tio, bio, sector, idx, offset, len);
1309 else
1310 clone_bio(tio, bio, sector, idx, bv_count, len);
1311 __map_bio(tio); 1261 __map_bio(tio);
1312 } 1262 }
1313} 1263}
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
1379} 1329}
1380 1330
1381/* 1331/*
1382 * Find maximum number of sectors / bvecs we can process with a single bio.
1383 */
1384static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1385{
1386 struct bio *bio = ci->bio;
1387 sector_t bv_len, total_len = 0;
1388
1389 for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1390 bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1391
1392 if (bv_len > max)
1393 break;
1394
1395 max -= bv_len;
1396 total_len += bv_len;
1397 }
1398
1399 return total_len;
1400}
1401
1402static int __split_bvec_across_targets(struct clone_info *ci,
1403 struct dm_target *ti, sector_t max)
1404{
1405 struct bio *bio = ci->bio;
1406 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1407 sector_t remaining = to_sector(bv->bv_len);
1408 unsigned offset = 0;
1409 sector_t len;
1410
1411 do {
1412 if (offset) {
1413 ti = dm_table_find_target(ci->map, ci->sector);
1414 if (!dm_target_is_valid(ti))
1415 return -EIO;
1416
1417 max = max_io_len(ci->sector, ti);
1418 }
1419
1420 len = min(remaining, max);
1421
1422 __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1423 bv->bv_offset + offset, len, 1);
1424
1425 ci->sector += len;
1426 ci->sector_count -= len;
1427 offset += to_bytes(len);
1428 } while (remaining -= len);
1429
1430 ci->idx++;
1431
1432 return 0;
1433}
1434
1435/*
1436 * Select the correct strategy for processing a non-flush bio. 1332 * Select the correct strategy for processing a non-flush bio.
1437 */ 1333 */
1438static int __split_and_process_non_flush(struct clone_info *ci) 1334static int __split_and_process_non_flush(struct clone_info *ci)
1439{ 1335{
1440 struct bio *bio = ci->bio; 1336 struct bio *bio = ci->bio;
1441 struct dm_target *ti; 1337 struct dm_target *ti;
1442 sector_t len, max; 1338 unsigned len;
1443 int idx;
1444 1339
1445 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1340 if (unlikely(bio->bi_rw & REQ_DISCARD))
1446 return __send_discard(ci); 1341 return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1451 if (!dm_target_is_valid(ti)) 1346 if (!dm_target_is_valid(ti))
1452 return -EIO; 1347 return -EIO;
1453 1348
1454 max = max_io_len(ci->sector, ti); 1349 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1455
1456 /*
1457 * Optimise for the simple case where we can do all of
1458 * the remaining io with a single clone.
1459 */
1460 if (ci->sector_count <= max) {
1461 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1462 ci->idx, bio->bi_vcnt - ci->idx, 0,
1463 ci->sector_count, 0);
1464 ci->sector_count = 0;
1465 return 0;
1466 }
1467
1468 /*
1469 * There are some bvecs that don't span targets.
1470 * Do as many of these as possible.
1471 */
1472 if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1473 len = __len_within_target(ci, max, &idx);
1474
1475 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1476 ci->idx, idx - ci->idx, 0, len, 0);
1477 1350
1478 ci->sector += len; 1351 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1479 ci->sector_count -= len;
1480 ci->idx = idx;
1481 1352
1482 return 0; 1353 ci->sector += len;
1483 } 1354 ci->sector_count -= len;
1484 1355
1485 /* 1356 return 0;
1486 * Handle a bvec that must be split between two or more targets.
1487 */
1488 return __split_bvec_across_targets(ci, ti, max);
1489} 1357}
1490 1358
1491/* 1359/*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1510 ci.io->bio = bio; 1378 ci.io->bio = bio;
1511 ci.io->md = md; 1379 ci.io->md = md;
1512 spin_lock_init(&ci.io->endio_lock); 1380 spin_lock_init(&ci.io->endio_lock);
1513 ci.sector = bio->bi_sector; 1381 ci.sector = bio->bi_iter.bi_sector;
1514 ci.idx = bio->bi_idx;
1515 1382
1516 start_io_acct(ci.io); 1383 start_io_acct(ci.io);
1517 1384
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
74{ 74{
75 struct bio *b = bio->bi_private; 75 struct bio *b = bio->bi_private;
76 76
77 b->bi_size = bio->bi_size; 77 b->bi_iter.bi_size = bio->bi_iter.bi_size;
78 b->bi_sector = bio->bi_sector; 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
79 79
80 bio_put(bio); 80 bio_put(bio);
81 81
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
185 return; 185 return;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) 188 if (check_sector(conf, bio->bi_iter.bi_sector,
189 bio_end_sector(bio), WRITE))
189 failit = 1; 190 failit = 1;
190 if (check_mode(conf, WritePersistent)) { 191 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent); 192 add_sector(conf, bio->bi_iter.bi_sector,
193 WritePersistent);
192 failit = 1; 194 failit = 1;
193 } 195 }
194 if (check_mode(conf, WriteTransient)) 196 if (check_mode(conf, WriteTransient))
195 failit = 1; 197 failit = 1;
196 } else { 198 } else {
197 /* read request */ 199 /* read request */
198 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) 200 if (check_sector(conf, bio->bi_iter.bi_sector,
201 bio_end_sector(bio), READ))
199 failit = 1; 202 failit = 1;
200 if (check_mode(conf, ReadTransient)) 203 if (check_mode(conf, ReadTransient))
201 failit = 1; 204 failit = 1;
202 if (check_mode(conf, ReadPersistent)) { 205 if (check_mode(conf, ReadPersistent)) {
203 add_sector(conf, bio->bi_sector, ReadPersistent); 206 add_sector(conf, bio->bi_iter.bi_sector,
207 ReadPersistent);
204 failit = 1; 208 failit = 1;
205 } 209 }
206 if (check_mode(conf, ReadFixable)) { 210 if (check_mode(conf, ReadFixable)) {
207 add_sector(conf, bio->bi_sector, ReadFixable); 211 add_sector(conf, bio->bi_iter.bi_sector,
212 ReadFixable);
208 failit = 1; 213 failit = 1;
209 } 214 }
210 } 215 }
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
288 288
289static void linear_make_request(struct mddev *mddev, struct bio *bio) 289static void linear_make_request(struct mddev *mddev, struct bio *bio)
290{ 290{
291 char b[BDEVNAME_SIZE];
291 struct dev_info *tmp_dev; 292 struct dev_info *tmp_dev;
292 sector_t start_sector; 293 struct bio *split;
294 sector_t start_sector, end_sector, data_offset;
293 295
294 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 296 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
295 md_flush_request(mddev, bio); 297 md_flush_request(mddev, bio);
296 return; 298 return;
297 } 299 }
298 300
299 rcu_read_lock(); 301 do {
300 tmp_dev = which_dev(mddev, bio->bi_sector); 302 rcu_read_lock();
301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
302
303
304 if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
305 || (bio->bi_sector < start_sector))) {
306 char b[BDEVNAME_SIZE];
307
308 printk(KERN_ERR
309 "md/linear:%s: make_request: Sector %llu out of bounds on "
310 "dev %s: %llu sectors, offset %llu\n",
311 mdname(mddev),
312 (unsigned long long)bio->bi_sector,
313 bdevname(tmp_dev->rdev->bdev, b),
314 (unsigned long long)tmp_dev->rdev->sectors,
315 (unsigned long long)start_sector);
316 rcu_read_unlock();
317 bio_io_error(bio);
318 return;
319 }
320 if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
321 /* This bio crosses a device boundary, so we have to
322 * split it.
323 */
324 struct bio_pair *bp;
325 sector_t end_sector = tmp_dev->end_sector;
326 303
327 rcu_read_unlock(); 304 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
328 305 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
329 bp = bio_split(bio, end_sector - bio->bi_sector); 306 end_sector = tmp_dev->end_sector;
307 data_offset = tmp_dev->rdev->data_offset;
308 bio->bi_bdev = tmp_dev->rdev->bdev;
330 309
331 linear_make_request(mddev, &bp->bio1); 310 rcu_read_unlock();
332 linear_make_request(mddev, &bp->bio2);
333 bio_pair_release(bp);
334 return;
335 }
336
337 bio->bi_bdev = tmp_dev->rdev->bdev;
338 bio->bi_sector = bio->bi_sector - start_sector
339 + tmp_dev->rdev->data_offset;
340 rcu_read_unlock();
341 311
342 if (unlikely((bio->bi_rw & REQ_DISCARD) && 312 if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
343 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 313 bio->bi_iter.bi_sector < start_sector))
344 /* Just ignore it */ 314 goto out_of_bounds;
345 bio_endio(bio, 0); 315
346 return; 316 if (unlikely(bio_end_sector(bio) > end_sector)) {
347 } 317 /* This bio crosses a device boundary, so we have to
318 * split it.
319 */
320 split = bio_split(bio, end_sector -
321 bio->bi_iter.bi_sector,
322 GFP_NOIO, fs_bio_set);
323 bio_chain(split, bio);
324 } else {
325 split = bio;
326 }
348 327
349 generic_make_request(bio); 328 split->bi_iter.bi_sector = split->bi_iter.bi_sector -
329 start_sector + data_offset;
330
331 if (unlikely((split->bi_rw & REQ_DISCARD) &&
332 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
333 /* Just ignore it */
334 bio_endio(split, 0);
335 } else
336 generic_make_request(split);
337 } while (split != bio);
338 return;
339
340out_of_bounds:
341 printk(KERN_ERR
342 "md/linear:%s: make_request: Sector %llu out of bounds on "
343 "dev %s: %llu sectors, offset %llu\n",
344 mdname(mddev),
345 (unsigned long long)bio->bi_iter.bi_sector,
346 bdevname(tmp_dev->rdev->bdev, b),
347 (unsigned long long)tmp_dev->rdev->sectors,
348 (unsigned long long)start_sector);
349 bio_io_error(bio);
350} 350}
351 351
352static void linear_status (struct seq_file *seq, struct mddev *mddev) 352static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 40c531359a15..4ad5cc4e63e8 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
393 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 393 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
394 struct bio *bio = mddev->flush_bio; 394 struct bio *bio = mddev->flush_bio;
395 395
396 if (bio->bi_size == 0) 396 if (bio->bi_iter.bi_size == 0)
397 /* an empty barrier - all done */ 397 /* an empty barrier - all done */
398 bio_endio(bio, 0); 398 bio_endio(bio, 0);
399 else { 399 else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755 755
756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
757 bio->bi_sector = sector; 757 bio->bi_iter.bi_sector = sector;
758 bio_add_page(bio, page, size, 0); 758 bio_add_page(bio, page, size, 0);
759 bio->bi_private = rdev; 759 bio->bi_private = rdev;
760 bio->bi_end_io = super_written; 760 bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
783 int ret; 783 int ret;
784 784
785 rw |= REQ_SYNC;
786
787 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
788 rdev->meta_bdev : rdev->bdev; 786 rdev->meta_bdev : rdev->bdev;
789 if (metadata_op) 787 if (metadata_op)
790 bio->bi_sector = sector + rdev->sb_start; 788 bio->bi_iter.bi_sector = sector + rdev->sb_start;
791 else if (rdev->mddev->reshape_position != MaxSector && 789 else if (rdev->mddev->reshape_position != MaxSector &&
792 (rdev->mddev->reshape_backwards == 790 (rdev->mddev->reshape_backwards ==
793 (sector >= rdev->mddev->reshape_position))) 791 (sector >= rdev->mddev->reshape_position)))
794 bio->bi_sector = sector + rdev->new_data_offset; 792 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
795 else 793 else
796 bio->bi_sector = sector + rdev->data_offset; 794 bio->bi_iter.bi_sector = sector + rdev->data_offset;
797 bio_add_page(bio, page, size, 0); 795 bio_add_page(bio, page, size, 0);
798 submit_bio_wait(rw, bio); 796 submit_bio_wait(rw, bio);
799 797
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
100 md_error (mp_bh->mddev, rdev); 100 md_error (mp_bh->mddev, rdev);
101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
102 bdevname(rdev->bdev,b), 102 bdevname(rdev->bdev,b),
103 (unsigned long long)bio->bi_sector); 103 (unsigned long long)bio->bi_iter.bi_sector);
104 multipath_reschedule_retry(mp_bh); 104 multipath_reschedule_retry(mp_bh);
105 } else 105 } else
106 multipath_end_bh_io(mp_bh, error); 106 multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
132 multipath = conf->multipaths + mp_bh->path; 132 multipath = conf->multipaths + mp_bh->path;
133 133
134 mp_bh->bio = *bio; 134 mp_bh->bio = *bio;
135 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request; 138 mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
355 spin_unlock_irqrestore(&conf->device_lock, flags); 355 spin_unlock_irqrestore(&conf->device_lock, flags);
356 356
357 bio = &mp_bh->bio; 357 bio = &mp_bh->bio;
358 bio->bi_sector = mp_bh->master_bio->bi_sector; 358 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
359 359
360 if ((mp_bh->path = multipath_map (conf))<0) { 360 if ((mp_bh->path = multipath_map (conf))<0) {
361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
362 " error for block %llu\n", 362 " error for block %llu\n",
363 bdevname(bio->bi_bdev,b), 363 bdevname(bio->bi_bdev,b),
364 (unsigned long long)bio->bi_sector); 364 (unsigned long long)bio->bi_iter.bi_sector);
365 multipath_end_bh_io(mp_bh, -EIO); 365 multipath_end_bh_io(mp_bh, -EIO);
366 } else { 366 } else {
367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
368 " to another IO path\n", 368 " to another IO path\n",
369 bdevname(bio->bi_bdev,b), 369 bdevname(bio->bi_bdev,b),
370 (unsigned long long)bio->bi_sector); 370 (unsigned long long)bio->bi_iter.bi_sector);
371 *bio = *(mp_bh->master_bio); 371 *bio = *(mp_bh->master_bio);
372 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 372 bio->bi_iter.bi_sector +=
373 conf->multipaths[mp_bh->path].rdev->data_offset;
373 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
374 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
375 bio->bi_end_io = multipath_end_request; 376 bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
501 unsigned int chunk_sects, struct bio *bio) 501 unsigned int chunk_sects, struct bio *bio)
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >=
505 ((bio->bi_iter.bi_sector & (chunk_sects-1))
505 + bio_sectors(bio)); 506 + bio_sectors(bio));
506 } else{ 507 } else{
507 sector_t sector = bio->bi_sector; 508 sector_t sector = bio->bi_iter.bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 509 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + bio_sectors(bio)); 510 + bio_sectors(bio));
510 } 511 }
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
512 513
513static void raid0_make_request(struct mddev *mddev, struct bio *bio) 514static void raid0_make_request(struct mddev *mddev, struct bio *bio)
514{ 515{
515 unsigned int chunk_sects;
516 sector_t sector_offset;
517 struct strip_zone *zone; 516 struct strip_zone *zone;
518 struct md_rdev *tmp_dev; 517 struct md_rdev *tmp_dev;
518 struct bio *split;
519 519
520 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 520 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
521 md_flush_request(mddev, bio); 521 md_flush_request(mddev, bio);
522 return; 522 return;
523 } 523 }
524 524
525 chunk_sects = mddev->chunk_sectors; 525 do {
526 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 526 sector_t sector = bio->bi_iter.bi_sector;
527 sector_t sector = bio->bi_sector; 527 unsigned chunk_sects = mddev->chunk_sectors;
528 struct bio_pair *bp;
529 /* Sanity check -- queue functions should prevent this happening */
530 if (bio_segments(bio) > 1)
531 goto bad_map;
532 /* This is a one page bio that upper layers
533 * refuse to split for us, so we need to split it.
534 */
535 if (likely(is_power_of_2(chunk_sects)))
536 bp = bio_split(bio, chunk_sects - (sector &
537 (chunk_sects-1)));
538 else
539 bp = bio_split(bio, chunk_sects -
540 sector_div(sector, chunk_sects));
541 raid0_make_request(mddev, &bp->bio1);
542 raid0_make_request(mddev, &bp->bio2);
543 bio_pair_release(bp);
544 return;
545 }
546 528
547 sector_offset = bio->bi_sector; 529 unsigned sectors = chunk_sects -
548 zone = find_zone(mddev->private, &sector_offset); 530 (likely(is_power_of_2(chunk_sects))
549 tmp_dev = map_sector(mddev, zone, bio->bi_sector, 531 ? (sector & (chunk_sects-1))
550 &sector_offset); 532 : sector_div(sector, chunk_sects));
551 bio->bi_bdev = tmp_dev->bdev;
552 bio->bi_sector = sector_offset + zone->dev_start +
553 tmp_dev->data_offset;
554
555 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
556 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
557 /* Just ignore it */
558 bio_endio(bio, 0);
559 return;
560 }
561 533
562 generic_make_request(bio); 534 if (sectors < bio_sectors(bio)) {
563 return; 535 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
564 536 bio_chain(split, bio);
565bad_map: 537 } else {
566 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 538 split = bio;
567 " or bigger than %dk %llu %d\n", 539 }
568 mdname(mddev), chunk_sects / 2,
569 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
570 540
571 bio_io_error(bio); 541 zone = find_zone(mddev->private, &sector);
572 return; 542 tmp_dev = map_sector(mddev, zone, sector, &sector);
543 split->bi_bdev = tmp_dev->bdev;
544 split->bi_iter.bi_sector = sector + zone->dev_start +
545 tmp_dev->data_offset;
546
547 if (unlikely((split->bi_rw & REQ_DISCARD) &&
548 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
549 /* Just ignore it */
550 bio_endio(split, 0);
551 } else
552 generic_make_request(split);
553 } while (split != bio);
573} 554}
574 555
575static void raid0_status(struct seq_file *seq, struct mddev *mddev) 556static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index a49cfcc7a343..fd3a2a14b587 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
229 int done; 229 int done;
230 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window; 231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector; 232 sector_t bi_sector = bio->bi_iter.bi_sector;
233 233
234 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
235 unsigned long flags; 235 unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_iter.bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio_end_sector(bio) - 1);
270 bio_sectors(bio) - 1);
271 270
272 call_bio_endio(r1_bio); 271 call_bio_endio(r1_bio);
273 } 272 }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 struct bio *mbio = r1_bio->master_bio; 465 struct bio *mbio = r1_bio->master_bio;
467 pr_debug("raid1: behind end write sectors" 466 pr_debug("raid1: behind end write sectors"
468 " %llu-%llu\n", 467 " %llu-%llu\n",
469 (unsigned long long) mbio->bi_sector, 468 (unsigned long long) mbio->bi_iter.bi_sector,
470 (unsigned long long) mbio->bi_sector + 469 (unsigned long long) bio_end_sector(mbio) - 1);
471 bio_sectors(mbio) - 1);
472 call_bio_endio(r1_bio); 470 call_bio_endio(r1_bio);
473 } 471 }
474 } 472 }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 873 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) || 874 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 875 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector)) 876 <= bio->bi_iter.bi_sector))
879 wait = false; 877 wait = false;
880 else 878 else
881 wait = true; 879 wait = true;
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
913 911
914 if (bio && bio_data_dir(bio) == WRITE) { 912 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 913 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) { 914 <= bio->bi_iter.bi_sector) {
917 if (conf->start_next_window == MaxSector) 915 if (conf->start_next_window == MaxSector)
918 conf->start_next_window = 916 conf->start_next_window =
919 conf->next_resync + 917 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE; 918 NEXT_NORMALIO_DISTANCE;
921 919
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 920 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector) 921 <= bio->bi_iter.bi_sector)
924 conf->next_window_requests++; 922 conf->next_window_requests++;
925 else 923 else
926 conf->current_window_requests++; 924 conf->current_window_requests++;
@@ -1027,7 +1025,8 @@ do_sync_io:
1027 if (bvecs[i].bv_page) 1025 if (bvecs[i].bv_page)
1028 put_page(bvecs[i].bv_page); 1026 put_page(bvecs[i].bv_page);
1029 kfree(bvecs); 1027 kfree(bvecs);
1030 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1028 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1029 bio->bi_iter.bi_size);
1031} 1030}
1032 1031
1033struct raid1_plug_cb { 1032struct raid1_plug_cb {
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1107 1106
1108 if (bio_data_dir(bio) == WRITE && 1107 if (bio_data_dir(bio) == WRITE &&
1109 bio_end_sector(bio) > mddev->suspend_lo && 1108 bio_end_sector(bio) > mddev->suspend_lo &&
1110 bio->bi_sector < mddev->suspend_hi) { 1109 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1111 /* As the suspend_* range is controlled by 1110 /* As the suspend_* range is controlled by
1112 * userspace, we want an interruptible 1111 * userspace, we want an interruptible
1113 * wait. 1112 * wait.
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1118 prepare_to_wait(&conf->wait_barrier, 1117 prepare_to_wait(&conf->wait_barrier,
1119 &w, TASK_INTERRUPTIBLE); 1118 &w, TASK_INTERRUPTIBLE);
1120 if (bio_end_sector(bio) <= mddev->suspend_lo || 1119 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1121 bio->bi_sector >= mddev->suspend_hi) 1120 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1122 break; 1121 break;
1123 schedule(); 1122 schedule();
1124 } 1123 }
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1140 r1_bio->sectors = bio_sectors(bio); 1139 r1_bio->sectors = bio_sectors(bio);
1141 r1_bio->state = 0; 1140 r1_bio->state = 0;
1142 r1_bio->mddev = mddev; 1141 r1_bio->mddev = mddev;
1143 r1_bio->sector = bio->bi_sector; 1142 r1_bio->sector = bio->bi_iter.bi_sector;
1144 1143
1145 /* We might need to issue multiple reads to different 1144 /* We might need to issue multiple reads to different
1146 * devices if there are bad blocks around, so we keep 1145 * devices if there are bad blocks around, so we keep
@@ -1180,12 +1179,13 @@ read_again:
1180 r1_bio->read_disk = rdisk; 1179 r1_bio->read_disk = rdisk;
1181 1180
1182 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1181 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1183 bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1182 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1184 max_sectors); 1183 max_sectors);
1185 1184
1186 r1_bio->bios[rdisk] = read_bio; 1185 r1_bio->bios[rdisk] = read_bio;
1187 1186
1188 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1187 read_bio->bi_iter.bi_sector = r1_bio->sector +
1188 mirror->rdev->data_offset;
1189 read_bio->bi_bdev = mirror->rdev->bdev; 1189 read_bio->bi_bdev = mirror->rdev->bdev;
1190 read_bio->bi_end_io = raid1_end_read_request; 1190 read_bio->bi_end_io = raid1_end_read_request;
1191 read_bio->bi_rw = READ | do_sync; 1191 read_bio->bi_rw = READ | do_sync;
@@ -1197,7 +1197,7 @@ read_again:
1197 */ 1197 */
1198 1198
1199 sectors_handled = (r1_bio->sector + max_sectors 1199 sectors_handled = (r1_bio->sector + max_sectors
1200 - bio->bi_sector); 1200 - bio->bi_iter.bi_sector);
1201 r1_bio->sectors = max_sectors; 1201 r1_bio->sectors = max_sectors;
1202 spin_lock_irq(&conf->device_lock); 1202 spin_lock_irq(&conf->device_lock);
1203 if (bio->bi_phys_segments == 0) 1203 if (bio->bi_phys_segments == 0)
@@ -1218,7 +1218,8 @@ read_again:
1218 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1218 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1219 r1_bio->state = 0; 1219 r1_bio->state = 0;
1220 r1_bio->mddev = mddev; 1220 r1_bio->mddev = mddev;
1221 r1_bio->sector = bio->bi_sector + sectors_handled; 1221 r1_bio->sector = bio->bi_iter.bi_sector +
1222 sectors_handled;
1222 goto read_again; 1223 goto read_again;
1223 } else 1224 } else
1224 generic_make_request(read_bio); 1225 generic_make_request(read_bio);
@@ -1321,7 +1322,7 @@ read_again:
1321 if (r1_bio->bios[j]) 1322 if (r1_bio->bios[j])
1322 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1323 r1_bio->state = 0; 1324 r1_bio->state = 0;
1324 allow_barrier(conf, start_next_window, bio->bi_sector); 1325 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1325 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1326 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1326 start_next_window = wait_barrier(conf, bio); 1327 start_next_window = wait_barrier(conf, bio);
1327 /* 1328 /*
@@ -1348,7 +1349,7 @@ read_again:
1348 bio->bi_phys_segments++; 1349 bio->bi_phys_segments++;
1349 spin_unlock_irq(&conf->device_lock); 1350 spin_unlock_irq(&conf->device_lock);
1350 } 1351 }
1351 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1352 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1352 1353
1353 atomic_set(&r1_bio->remaining, 1); 1354 atomic_set(&r1_bio->remaining, 1);
1354 atomic_set(&r1_bio->behind_remaining, 0); 1355 atomic_set(&r1_bio->behind_remaining, 0);
@@ -1360,7 +1361,7 @@ read_again:
1360 continue; 1361 continue;
1361 1362
1362 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1363 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1364 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1364 1365
1365 if (first_clone) { 1366 if (first_clone) {
1366 /* do behind I/O ? 1367 /* do behind I/O ?
@@ -1394,7 +1395,7 @@ read_again:
1394 1395
1395 r1_bio->bios[i] = mbio; 1396 r1_bio->bios[i] = mbio;
1396 1397
1397 mbio->bi_sector = (r1_bio->sector + 1398 mbio->bi_iter.bi_sector = (r1_bio->sector +
1398 conf->mirrors[i].rdev->data_offset); 1399 conf->mirrors[i].rdev->data_offset);
1399 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1400 mbio->bi_end_io = raid1_end_write_request; 1401 mbio->bi_end_io = raid1_end_write_request;
@@ -1434,7 +1435,7 @@ read_again:
1434 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1435 r1_bio->state = 0; 1436 r1_bio->state = 0;
1436 r1_bio->mddev = mddev; 1437 r1_bio->mddev = mddev;
1437 r1_bio->sector = bio->bi_sector + sectors_handled; 1438 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1438 goto retry_write; 1439 goto retry_write;
1439 } 1440 }
1440 1441
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio)
1958 /* fixup the bio for reuse */ 1959 /* fixup the bio for reuse */
1959 bio_reset(b); 1960 bio_reset(b);
1960 b->bi_vcnt = vcnt; 1961 b->bi_vcnt = vcnt;
1961 b->bi_size = r1_bio->sectors << 9; 1962 b->bi_iter.bi_size = r1_bio->sectors << 9;
1962 b->bi_sector = r1_bio->sector + 1963 b->bi_iter.bi_sector = r1_bio->sector +
1963 conf->mirrors[i].rdev->data_offset; 1964 conf->mirrors[i].rdev->data_offset;
1964 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1965 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1965 b->bi_end_io = end_sync_read; 1966 b->bi_end_io = end_sync_read;
1966 b->bi_private = r1_bio; 1967 b->bi_private = r1_bio;
1967 1968
1968 size = b->bi_size; 1969 size = b->bi_iter.bi_size;
1969 for (j = 0; j < vcnt ; j++) { 1970 for (j = 0; j < vcnt ; j++) {
1970 struct bio_vec *bi; 1971 struct bio_vec *bi;
1971 bi = &b->bi_io_vec[j]; 1972 bi = &b->bi_io_vec[j];
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2220 } 2221 }
2221 2222
2222 wbio->bi_rw = WRITE; 2223 wbio->bi_rw = WRITE;
2223 wbio->bi_sector = r1_bio->sector; 2224 wbio->bi_iter.bi_sector = r1_bio->sector;
2224 wbio->bi_size = r1_bio->sectors << 9; 2225 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2225 2226
2226 bio_trim(wbio, sector - r1_bio->sector, sectors); 2227 bio_trim(wbio, sector - r1_bio->sector, sectors);
2227 wbio->bi_sector += rdev->data_offset; 2228 wbio->bi_iter.bi_sector += rdev->data_offset;
2228 wbio->bi_bdev = rdev->bdev; 2229 wbio->bi_bdev = rdev->bdev;
2229 if (submit_bio_wait(WRITE, wbio) == 0) 2230 if (submit_bio_wait(WRITE, wbio) == 0)
2230 /* failure! */ 2231 /* failure! */
@@ -2338,7 +2339,8 @@ read_more:
2338 } 2339 }
2339 r1_bio->read_disk = disk; 2340 r1_bio->read_disk = disk;
2340 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2341 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2342 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2343 max_sectors);
2342 r1_bio->bios[r1_bio->read_disk] = bio; 2344 r1_bio->bios[r1_bio->read_disk] = bio;
2343 rdev = conf->mirrors[disk].rdev; 2345 rdev = conf->mirrors[disk].rdev;
2344 printk_ratelimited(KERN_ERR 2346 printk_ratelimited(KERN_ERR
@@ -2347,7 +2349,7 @@ read_more:
2347 mdname(mddev), 2349 mdname(mddev),
2348 (unsigned long long)r1_bio->sector, 2350 (unsigned long long)r1_bio->sector,
2349 bdevname(rdev->bdev, b)); 2351 bdevname(rdev->bdev, b));
2350 bio->bi_sector = r1_bio->sector + rdev->data_offset; 2352 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2351 bio->bi_bdev = rdev->bdev; 2353 bio->bi_bdev = rdev->bdev;
2352 bio->bi_end_io = raid1_end_read_request; 2354 bio->bi_end_io = raid1_end_read_request;
2353 bio->bi_rw = READ | do_sync; 2355 bio->bi_rw = READ | do_sync;
@@ -2356,7 +2358,7 @@ read_more:
2356 /* Drat - have to split this up more */ 2358 /* Drat - have to split this up more */
2357 struct bio *mbio = r1_bio->master_bio; 2359 struct bio *mbio = r1_bio->master_bio;
2358 int sectors_handled = (r1_bio->sector + max_sectors 2360 int sectors_handled = (r1_bio->sector + max_sectors
2359 - mbio->bi_sector); 2361 - mbio->bi_iter.bi_sector);
2360 r1_bio->sectors = max_sectors; 2362 r1_bio->sectors = max_sectors;
2361 spin_lock_irq(&conf->device_lock); 2363 spin_lock_irq(&conf->device_lock);
2362 if (mbio->bi_phys_segments == 0) 2364 if (mbio->bi_phys_segments == 0)
@@ -2374,7 +2376,8 @@ read_more:
2374 r1_bio->state = 0; 2376 r1_bio->state = 0;
2375 set_bit(R1BIO_ReadError, &r1_bio->state); 2377 set_bit(R1BIO_ReadError, &r1_bio->state);
2376 r1_bio->mddev = mddev; 2378 r1_bio->mddev = mddev;
2377 r1_bio->sector = mbio->bi_sector + sectors_handled; 2379 r1_bio->sector = mbio->bi_iter.bi_sector +
2380 sectors_handled;
2378 2381
2379 goto read_more; 2382 goto read_more;
2380 } else 2383 } else
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2598 } 2601 }
2599 if (bio->bi_end_io) { 2602 if (bio->bi_end_io) {
2600 atomic_inc(&rdev->nr_pending); 2603 atomic_inc(&rdev->nr_pending);
2601 bio->bi_sector = sector_nr + rdev->data_offset; 2604 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2602 bio->bi_bdev = rdev->bdev; 2605 bio->bi_bdev = rdev->bdev;
2603 bio->bi_private = r1_bio; 2606 bio->bi_private = r1_bio;
2604 } 2607 }
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2698 continue; 2701 continue;
2699 /* remove last page from this bio */ 2702 /* remove last page from this bio */
2700 bio->bi_vcnt--; 2703 bio->bi_vcnt--;
2701 bio->bi_size -= len; 2704 bio->bi_iter.bi_size -= len;
2702 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2705 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2703 } 2706 }
2704 goto bio_full; 2707 goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index 8d39d63281b9..33fc408e5eac 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1152 kfree(plug); 1152 kfree(plug);
1153} 1153}
1154 1154
1155static void make_request(struct mddev *mddev, struct bio * bio) 1155static void __make_request(struct mddev *mddev, struct bio *bio)
1156{ 1156{
1157 struct r10conf *conf = mddev->private; 1157 struct r10conf *conf = mddev->private;
1158 struct r10bio *r10_bio; 1158 struct r10bio *r10_bio;
1159 struct bio *read_bio; 1159 struct bio *read_bio;
1160 int i; 1160 int i;
1161 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1162 int chunk_sects = chunk_mask + 1;
1163 const int rw = bio_data_dir(bio); 1161 const int rw = bio_data_dir(bio);
1164 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1162 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1165 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1163 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1174 int max_sectors; 1172 int max_sectors;
1175 int sectors; 1173 int sectors;
1176 1174
1177 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1178 md_flush_request(mddev, bio);
1179 return;
1180 }
1181
1182 /* If this request crosses a chunk boundary, we need to
1183 * split it. This will only happen for 1 PAGE (or less) requests.
1184 */
1185 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1186 > chunk_sects
1187 && (conf->geo.near_copies < conf->geo.raid_disks
1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
1189 struct bio_pair *bp;
1190 /* Sanity check -- queue functions should prevent this happening */
1191 if (bio_segments(bio) > 1)
1192 goto bad_map;
1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it.
1195 */
1196 bp = bio_split(bio,
1197 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1198
1199 /* Each of these 'make_request' calls will call 'wait_barrier'.
1200 * If the first succeeds but the second blocks due to the resync
1201 * thread raising the barrier, we will deadlock because the
1202 * IO to the underlying device will be queued in generic_make_request
1203 * and will never complete, so will never reduce nr_pending.
1204 * So increment nr_waiting here so no new raise_barriers will
1205 * succeed, and so the second wait_barrier cannot block.
1206 */
1207 spin_lock_irq(&conf->resync_lock);
1208 conf->nr_waiting++;
1209 spin_unlock_irq(&conf->resync_lock);
1210
1211 make_request(mddev, &bp->bio1);
1212 make_request(mddev, &bp->bio2);
1213
1214 spin_lock_irq(&conf->resync_lock);
1215 conf->nr_waiting--;
1216 wake_up(&conf->wait_barrier);
1217 spin_unlock_irq(&conf->resync_lock);
1218
1219 bio_pair_release(bp);
1220 return;
1221 bad_map:
1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1224 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1225
1226 bio_io_error(bio);
1227 return;
1228 }
1229
1230 md_write_start(mddev, bio);
1231
1232 /*
1233 * Register the new request and wait if the reconstruction
1234 * thread has put up a bar for new requests.
1235 * Continue immediately if no resync is active currently.
1236 */
1237 wait_barrier(conf);
1238
1239 sectors = bio_sectors(bio); 1175 sectors = bio_sectors(bio);
1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1176 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1241 bio->bi_sector < conf->reshape_progress && 1177 bio->bi_iter.bi_sector < conf->reshape_progress &&
1242 bio->bi_sector + sectors > conf->reshape_progress) { 1178 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1243 /* IO spans the reshape position. Need to wait for 1179 /* IO spans the reshape position. Need to wait for
1244 * reshape to pass 1180 * reshape to pass
1245 */ 1181 */
1246 allow_barrier(conf); 1182 allow_barrier(conf);
1247 wait_event(conf->wait_barrier, 1183 wait_event(conf->wait_barrier,
1248 conf->reshape_progress <= bio->bi_sector || 1184 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1249 conf->reshape_progress >= bio->bi_sector + sectors); 1185 conf->reshape_progress >= bio->bi_iter.bi_sector +
1186 sectors);
1250 wait_barrier(conf); 1187 wait_barrier(conf);
1251 } 1188 }
1252 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1189 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1253 bio_data_dir(bio) == WRITE && 1190 bio_data_dir(bio) == WRITE &&
1254 (mddev->reshape_backwards 1191 (mddev->reshape_backwards
1255 ? (bio->bi_sector < conf->reshape_safe && 1192 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1256 bio->bi_sector + sectors > conf->reshape_progress) 1193 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1257 : (bio->bi_sector + sectors > conf->reshape_safe && 1194 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1258 bio->bi_sector < conf->reshape_progress))) { 1195 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1259 /* Need to update reshape_position in metadata */ 1196 /* Need to update reshape_position in metadata */
1260 mddev->reshape_position = conf->reshape_progress; 1197 mddev->reshape_position = conf->reshape_progress;
1261 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1198 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1273 r10_bio->sectors = sectors; 1210 r10_bio->sectors = sectors;
1274 1211
1275 r10_bio->mddev = mddev; 1212 r10_bio->mddev = mddev;
1276 r10_bio->sector = bio->bi_sector; 1213 r10_bio->sector = bio->bi_iter.bi_sector;
1277 r10_bio->state = 0; 1214 r10_bio->state = 0;
1278 1215
1279 /* We might need to issue multiple reads to different 1216 /* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
1302 slot = r10_bio->read_slot; 1239 slot = r10_bio->read_slot;
1303 1240
1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1241 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1305 bio_trim(read_bio, r10_bio->sector - bio->bi_sector, 1242 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1306 max_sectors); 1243 max_sectors);
1307 1244
1308 r10_bio->devs[slot].bio = read_bio; 1245 r10_bio->devs[slot].bio = read_bio;
1309 r10_bio->devs[slot].rdev = rdev; 1246 r10_bio->devs[slot].rdev = rdev;
1310 1247
1311 read_bio->bi_sector = r10_bio->devs[slot].addr + 1248 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1312 choose_data_offset(r10_bio, rdev); 1249 choose_data_offset(r10_bio, rdev);
1313 read_bio->bi_bdev = rdev->bdev; 1250 read_bio->bi_bdev = rdev->bdev;
1314 read_bio->bi_end_io = raid10_end_read_request; 1251 read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
1320 * need another r10_bio. 1257 * need another r10_bio.
1321 */ 1258 */
1322 sectors_handled = (r10_bio->sector + max_sectors 1259 sectors_handled = (r10_bio->sector + max_sectors
1323 - bio->bi_sector); 1260 - bio->bi_iter.bi_sector);
1324 r10_bio->sectors = max_sectors; 1261 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1262 spin_lock_irq(&conf->device_lock);
1326 if (bio->bi_phys_segments == 0) 1263 if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1278 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1342 r10_bio->state = 0; 1279 r10_bio->state = 0;
1343 r10_bio->mddev = mddev; 1280 r10_bio->mddev = mddev;
1344 r10_bio->sector = bio->bi_sector + sectors_handled; 1281 r10_bio->sector = bio->bi_iter.bi_sector +
1282 sectors_handled;
1345 goto read_again; 1283 goto read_again;
1346 } else 1284 } else
1347 generic_make_request(read_bio); 1285 generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
1499 bio->bi_phys_segments++; 1437 bio->bi_phys_segments++;
1500 spin_unlock_irq(&conf->device_lock); 1438 spin_unlock_irq(&conf->device_lock);
1501 } 1439 }
1502 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1440 sectors_handled = r10_bio->sector + max_sectors -
1441 bio->bi_iter.bi_sector;
1503 1442
1504 atomic_set(&r10_bio->remaining, 1); 1443 atomic_set(&r10_bio->remaining, 1);
1505 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1444 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
1510 if (r10_bio->devs[i].bio) { 1449 if (r10_bio->devs[i].bio) {
1511 struct md_rdev *rdev = conf->mirrors[d].rdev; 1450 struct md_rdev *rdev = conf->mirrors[d].rdev;
1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1451 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1513 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1452 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1514 max_sectors); 1453 max_sectors);
1515 r10_bio->devs[i].bio = mbio; 1454 r10_bio->devs[i].bio = mbio;
1516 1455
1517 mbio->bi_sector = (r10_bio->devs[i].addr+ 1456 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
1518 choose_data_offset(r10_bio, 1457 choose_data_offset(r10_bio,
1519 rdev)); 1458 rdev));
1520 mbio->bi_bdev = rdev->bdev; 1459 mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
1553 rdev = conf->mirrors[d].rdev; 1492 rdev = conf->mirrors[d].rdev;
1554 } 1493 }
1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1494 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1556 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1495 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1557 max_sectors); 1496 max_sectors);
1558 r10_bio->devs[i].repl_bio = mbio; 1497 r10_bio->devs[i].repl_bio = mbio;
1559 1498
1560 mbio->bi_sector = (r10_bio->devs[i].addr + 1499 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
1561 choose_data_offset( 1500 choose_data_offset(
1562 r10_bio, rdev)); 1501 r10_bio, rdev));
1563 mbio->bi_bdev = rdev->bdev; 1502 mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1530 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1592 1531
1593 r10_bio->mddev = mddev; 1532 r10_bio->mddev = mddev;
1594 r10_bio->sector = bio->bi_sector + sectors_handled; 1533 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1595 r10_bio->state = 0; 1534 r10_bio->state = 0;
1596 goto retry_write; 1535 goto retry_write;
1597 } 1536 }
1598 one_write_done(r10_bio); 1537 one_write_done(r10_bio);
1538}
1539
1540static void make_request(struct mddev *mddev, struct bio *bio)
1541{
1542 struct r10conf *conf = mddev->private;
1543 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1544 int chunk_sects = chunk_mask + 1;
1545
1546 struct bio *split;
1547
1548 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1549 md_flush_request(mddev, bio);
1550 return;
1551 }
1552
1553 md_write_start(mddev, bio);
1554
1555 /*
1556 * Register the new request and wait if the reconstruction
1557 * thread has put up a bar for new requests.
1558 * Continue immediately if no resync is active currently.
1559 */
1560 wait_barrier(conf);
1561
1562 do {
1563
1564 /*
1565 * If this request crosses a chunk boundary, we need to split
1566 * it.
1567 */
1568 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1569 bio_sectors(bio) > chunk_sects
1570 && (conf->geo.near_copies < conf->geo.raid_disks
1571 || conf->prev.near_copies <
1572 conf->prev.raid_disks))) {
1573 split = bio_split(bio, chunk_sects -
1574 (bio->bi_iter.bi_sector &
1575 (chunk_sects - 1)),
1576 GFP_NOIO, fs_bio_set);
1577 bio_chain(split, bio);
1578 } else {
1579 split = bio;
1580 }
1581
1582 __make_request(mddev, split);
1583 } while (split != bio);
1599 1584
1600 /* In case raid10d snuck in to freeze_array */ 1585 /* In case raid10d snuck in to freeze_array */
1601 wake_up(&conf->wait_barrier); 1586 wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2124 bio_reset(tbio); 2109 bio_reset(tbio);
2125 2110
2126 tbio->bi_vcnt = vcnt; 2111 tbio->bi_vcnt = vcnt;
2127 tbio->bi_size = r10_bio->sectors << 9; 2112 tbio->bi_iter.bi_size = r10_bio->sectors << 9;
2128 tbio->bi_rw = WRITE; 2113 tbio->bi_rw = WRITE;
2129 tbio->bi_private = r10_bio; 2114 tbio->bi_private = r10_bio;
2130 tbio->bi_sector = r10_bio->devs[i].addr; 2115 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2131 2116
2132 for (j=0; j < vcnt ; j++) { 2117 for (j=0; j < vcnt ; j++) {
2133 tbio->bi_io_vec[j].bv_offset = 0; 2118 tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2144 atomic_inc(&r10_bio->remaining); 2129 atomic_inc(&r10_bio->remaining);
2145 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2130 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2146 2131
2147 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2132 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2148 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2133 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2149 generic_make_request(tbio); 2134 generic_make_request(tbio);
2150 } 2135 }
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2614 sectors = sect_to_write; 2599 sectors = sect_to_write;
2615 /* Write at 'sector' for 'sectors' */ 2600 /* Write at 'sector' for 'sectors' */
2616 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2601 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2617 bio_trim(wbio, sector - bio->bi_sector, sectors); 2602 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2618 wbio->bi_sector = (r10_bio->devs[i].addr+ 2603 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2619 choose_data_offset(r10_bio, rdev) + 2604 choose_data_offset(r10_bio, rdev) +
2620 (sector - r10_bio->sector)); 2605 (sector - r10_bio->sector));
2621 wbio->bi_bdev = rdev->bdev; 2606 wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
2687 (unsigned long long)r10_bio->sector); 2672 (unsigned long long)r10_bio->sector);
2688 bio = bio_clone_mddev(r10_bio->master_bio, 2673 bio = bio_clone_mddev(r10_bio->master_bio,
2689 GFP_NOIO, mddev); 2674 GFP_NOIO, mddev);
2690 bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); 2675 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2691 r10_bio->devs[slot].bio = bio; 2676 r10_bio->devs[slot].bio = bio;
2692 r10_bio->devs[slot].rdev = rdev; 2677 r10_bio->devs[slot].rdev = rdev;
2693 bio->bi_sector = r10_bio->devs[slot].addr 2678 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2694 + choose_data_offset(r10_bio, rdev); 2679 + choose_data_offset(r10_bio, rdev);
2695 bio->bi_bdev = rdev->bdev; 2680 bio->bi_bdev = rdev->bdev;
2696 bio->bi_rw = READ | do_sync; 2681 bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
2701 struct bio *mbio = r10_bio->master_bio; 2686 struct bio *mbio = r10_bio->master_bio;
2702 int sectors_handled = 2687 int sectors_handled =
2703 r10_bio->sector + max_sectors 2688 r10_bio->sector + max_sectors
2704 - mbio->bi_sector; 2689 - mbio->bi_iter.bi_sector;
2705 r10_bio->sectors = max_sectors; 2690 r10_bio->sectors = max_sectors;
2706 spin_lock_irq(&conf->device_lock); 2691 spin_lock_irq(&conf->device_lock);
2707 if (mbio->bi_phys_segments == 0) 2692 if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
2719 set_bit(R10BIO_ReadError, 2704 set_bit(R10BIO_ReadError,
2720 &r10_bio->state); 2705 &r10_bio->state);
2721 r10_bio->mddev = mddev; 2706 r10_bio->mddev = mddev;
2722 r10_bio->sector = mbio->bi_sector 2707 r10_bio->sector = mbio->bi_iter.bi_sector
2723 + sectors_handled; 2708 + sectors_handled;
2724 2709
2725 goto read_more; 2710 goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 bio->bi_end_io = end_sync_read; 3142 bio->bi_end_io = end_sync_read;
3158 bio->bi_rw = READ; 3143 bio->bi_rw = READ;
3159 from_addr = r10_bio->devs[j].addr; 3144 from_addr = r10_bio->devs[j].addr;
3160 bio->bi_sector = from_addr + rdev->data_offset; 3145 bio->bi_iter.bi_sector = from_addr +
3146 rdev->data_offset;
3161 bio->bi_bdev = rdev->bdev; 3147 bio->bi_bdev = rdev->bdev;
3162 atomic_inc(&rdev->nr_pending); 3148 atomic_inc(&rdev->nr_pending);
3163 /* and we write to 'i' (if not in_sync) */ 3149 /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3181 bio->bi_private = r10_bio; 3167 bio->bi_private = r10_bio;
3182 bio->bi_end_io = end_sync_write; 3168 bio->bi_end_io = end_sync_write;
3183 bio->bi_rw = WRITE; 3169 bio->bi_rw = WRITE;
3184 bio->bi_sector = to_addr 3170 bio->bi_iter.bi_sector = to_addr
3185 + rdev->data_offset; 3171 + rdev->data_offset;
3186 bio->bi_bdev = rdev->bdev; 3172 bio->bi_bdev = rdev->bdev;
3187 atomic_inc(&r10_bio->remaining); 3173 atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3210 bio->bi_private = r10_bio; 3196 bio->bi_private = r10_bio;
3211 bio->bi_end_io = end_sync_write; 3197 bio->bi_end_io = end_sync_write;
3212 bio->bi_rw = WRITE; 3198 bio->bi_rw = WRITE;
3213 bio->bi_sector = to_addr + rdev->data_offset; 3199 bio->bi_iter.bi_sector = to_addr +
3200 rdev->data_offset;
3214 bio->bi_bdev = rdev->bdev; 3201 bio->bi_bdev = rdev->bdev;
3215 atomic_inc(&r10_bio->remaining); 3202 atomic_inc(&r10_bio->remaining);
3216 break; 3203 break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3328 bio->bi_private = r10_bio; 3315 bio->bi_private = r10_bio;
3329 bio->bi_end_io = end_sync_read; 3316 bio->bi_end_io = end_sync_read;
3330 bio->bi_rw = READ; 3317 bio->bi_rw = READ;
3331 bio->bi_sector = sector + 3318 bio->bi_iter.bi_sector = sector +
3332 conf->mirrors[d].rdev->data_offset; 3319 conf->mirrors[d].rdev->data_offset;
3333 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3320 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3334 count++; 3321 count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3350 bio->bi_private = r10_bio; 3337 bio->bi_private = r10_bio;
3351 bio->bi_end_io = end_sync_write; 3338 bio->bi_end_io = end_sync_write;
3352 bio->bi_rw = WRITE; 3339 bio->bi_rw = WRITE;
3353 bio->bi_sector = sector + 3340 bio->bi_iter.bi_sector = sector +
3354 conf->mirrors[d].replacement->data_offset; 3341 conf->mirrors[d].replacement->data_offset;
3355 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3342 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3356 count++; 3343 count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3397 bio2 = bio2->bi_next) { 3384 bio2 = bio2->bi_next) {
3398 /* remove last page from this bio */ 3385 /* remove last page from this bio */
3399 bio2->bi_vcnt--; 3386 bio2->bi_vcnt--;
3400 bio2->bi_size -= len; 3387 bio2->bi_iter.bi_size -= len;
3401 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3388 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3402 } 3389 }
3403 goto bio_full; 3390 goto bio_full;
@@ -4418,7 +4405,7 @@ read_more:
4418 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4405 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4419 4406
4420 read_bio->bi_bdev = rdev->bdev; 4407 read_bio->bi_bdev = rdev->bdev;
4421 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4408 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4422 + rdev->data_offset); 4409 + rdev->data_offset);
4423 read_bio->bi_private = r10_bio; 4410 read_bio->bi_private = r10_bio;
4424 read_bio->bi_end_io = end_sync_read; 4411 read_bio->bi_end_io = end_sync_read;
@@ -4426,7 +4413,7 @@ read_more:
4426 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4413 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4427 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4414 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4428 read_bio->bi_vcnt = 0; 4415 read_bio->bi_vcnt = 0;
4429 read_bio->bi_size = 0; 4416 read_bio->bi_iter.bi_size = 0;
4430 r10_bio->master_bio = read_bio; 4417 r10_bio->master_bio = read_bio;
4431 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4418 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4432 4419
@@ -4452,7 +4439,8 @@ read_more:
4452 4439
4453 bio_reset(b); 4440 bio_reset(b);
4454 b->bi_bdev = rdev2->bdev; 4441 b->bi_bdev = rdev2->bdev;
4455 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4442 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4443 rdev2->new_data_offset;
4456 b->bi_private = r10_bio; 4444 b->bi_private = r10_bio;
4457 b->bi_end_io = end_reshape_write; 4445 b->bi_end_io = end_reshape_write;
4458 b->bi_rw = WRITE; 4446 b->bi_rw = WRITE;
@@ -4479,7 +4467,7 @@ read_more:
4479 bio2 = bio2->bi_next) { 4467 bio2 = bio2->bi_next) {
4480 /* Remove last page from this bio */ 4468 /* Remove last page from this bio */
4481 bio2->bi_vcnt--; 4469 bio2->bi_vcnt--;
4482 bio2->bi_size -= len; 4470 bio2->bi_iter.bi_size -= len;
4483 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4471 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4484 } 4472 }
4485 goto bio_full; 4473 goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 03f82ab87d9e..67ca9c3d2939 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
852 bi->bi_rw, i); 852 bi->bi_rw, i);
853 atomic_inc(&sh->count); 853 atomic_inc(&sh->count);
854 if (use_new_offset(conf, sh)) 854 if (use_new_offset(conf, sh))
855 bi->bi_sector = (sh->sector 855 bi->bi_iter.bi_sector = (sh->sector
856 + rdev->new_data_offset); 856 + rdev->new_data_offset);
857 else 857 else
858 bi->bi_sector = (sh->sector 858 bi->bi_iter.bi_sector = (sh->sector
859 + rdev->data_offset); 859 + rdev->data_offset);
860 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 860 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
861 bi->bi_rw |= REQ_NOMERGE; 861 bi->bi_rw |= REQ_NOMERGE;
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
863 bi->bi_vcnt = 1; 863 bi->bi_vcnt = 1;
864 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 864 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
865 bi->bi_io_vec[0].bv_offset = 0; 865 bi->bi_io_vec[0].bv_offset = 0;
866 bi->bi_size = STRIPE_SIZE; 866 bi->bi_iter.bi_size = STRIPE_SIZE;
867 /* 867 /*
868 * If this is discard request, set bi_vcnt 0. We don't 868 * If this is discard request, set bi_vcnt 0. We don't
869 * want to confuse SCSI because SCSI will replace payload 869 * want to confuse SCSI because SCSI will replace payload
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
899 rbi->bi_rw, i); 899 rbi->bi_rw, i);
900 atomic_inc(&sh->count); 900 atomic_inc(&sh->count);
901 if (use_new_offset(conf, sh)) 901 if (use_new_offset(conf, sh))
902 rbi->bi_sector = (sh->sector 902 rbi->bi_iter.bi_sector = (sh->sector
903 + rrdev->new_data_offset); 903 + rrdev->new_data_offset);
904 else 904 else
905 rbi->bi_sector = (sh->sector 905 rbi->bi_iter.bi_sector = (sh->sector
906 + rrdev->data_offset); 906 + rrdev->data_offset);
907 rbi->bi_vcnt = 1; 907 rbi->bi_vcnt = 1;
908 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 908 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
909 rbi->bi_io_vec[0].bv_offset = 0; 909 rbi->bi_io_vec[0].bv_offset = 0;
910 rbi->bi_size = STRIPE_SIZE; 910 rbi->bi_iter.bi_size = STRIPE_SIZE;
911 /* 911 /*
912 * If this is discard request, set bi_vcnt 0. We don't 912 * If this is discard request, set bi_vcnt 0. We don't
913 * want to confuse SCSI because SCSI will replace payload 913 * want to confuse SCSI because SCSI will replace payload
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor *
935async_copy_data(int frombio, struct bio *bio, struct page *page, 935async_copy_data(int frombio, struct bio *bio, struct page *page,
936 sector_t sector, struct dma_async_tx_descriptor *tx) 936 sector_t sector, struct dma_async_tx_descriptor *tx)
937{ 937{
938 struct bio_vec *bvl; 938 struct bio_vec bvl;
939 struct bvec_iter iter;
939 struct page *bio_page; 940 struct page *bio_page;
940 int i;
941 int page_offset; 941 int page_offset;
942 struct async_submit_ctl submit; 942 struct async_submit_ctl submit;
943 enum async_tx_flags flags = 0; 943 enum async_tx_flags flags = 0;
944 944
945 if (bio->bi_sector >= sector) 945 if (bio->bi_iter.bi_sector >= sector)
946 page_offset = (signed)(bio->bi_sector - sector) * 512; 946 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
947 else 947 else
948 page_offset = (signed)(sector - bio->bi_sector) * -512; 948 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
949 949
950 if (frombio) 950 if (frombio)
951 flags |= ASYNC_TX_FENCE; 951 flags |= ASYNC_TX_FENCE;
952 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 952 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
953 953
954 bio_for_each_segment(bvl, bio, i) { 954 bio_for_each_segment(bvl, bio, iter) {
955 int len = bvl->bv_len; 955 int len = bvl.bv_len;
956 int clen; 956 int clen;
957 int b_offset = 0; 957 int b_offset = 0;
958 958
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
968 clen = len; 968 clen = len;
969 969
970 if (clen > 0) { 970 if (clen > 0) {
971 b_offset += bvl->bv_offset; 971 b_offset += bvl.bv_offset;
972 bio_page = bvl->bv_page; 972 bio_page = bvl.bv_page;
973 if (frombio) 973 if (frombio)
974 tx = async_memcpy(page, bio_page, page_offset, 974 tx = async_memcpy(page, bio_page, page_offset,
975 b_offset, clen, &submit); 975 b_offset, clen, &submit);
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1012 BUG_ON(!dev->read); 1012 BUG_ON(!dev->read);
1013 rbi = dev->read; 1013 rbi = dev->read;
1014 dev->read = NULL; 1014 dev->read = NULL;
1015 while (rbi && rbi->bi_sector < 1015 while (rbi && rbi->bi_iter.bi_sector <
1016 dev->sector + STRIPE_SECTORS) { 1016 dev->sector + STRIPE_SECTORS) {
1017 rbi2 = r5_next_bio(rbi, dev->sector); 1017 rbi2 = r5_next_bio(rbi, dev->sector);
1018 if (!raid5_dec_bi_active_stripes(rbi)) { 1018 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1048 dev->read = rbi = dev->toread; 1048 dev->read = rbi = dev->toread;
1049 dev->toread = NULL; 1049 dev->toread = NULL;
1050 spin_unlock_irq(&sh->stripe_lock); 1050 spin_unlock_irq(&sh->stripe_lock);
1051 while (rbi && rbi->bi_sector < 1051 while (rbi && rbi->bi_iter.bi_sector <
1052 dev->sector + STRIPE_SECTORS) { 1052 dev->sector + STRIPE_SECTORS) {
1053 tx = async_copy_data(0, rbi, dev->page, 1053 tx = async_copy_data(0, rbi, dev->page,
1054 dev->sector, tx); 1054 dev->sector, tx);
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1390 wbi = dev->written = chosen; 1390 wbi = dev->written = chosen;
1391 spin_unlock_irq(&sh->stripe_lock); 1391 spin_unlock_irq(&sh->stripe_lock);
1392 1392
1393 while (wbi && wbi->bi_sector < 1393 while (wbi && wbi->bi_iter.bi_sector <
1394 dev->sector + STRIPE_SECTORS) { 1394 dev->sector + STRIPE_SECTORS) {
1395 if (wbi->bi_rw & REQ_FUA) 1395 if (wbi->bi_rw & REQ_FUA)
1396 set_bit(R5_WantFUA, &dev->flags); 1396 set_bit(R5_WantFUA, &dev->flags);
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2615 int firstwrite=0; 2615 int firstwrite=0;
2616 2616
2617 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2617 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2618 (unsigned long long)bi->bi_sector, 2618 (unsigned long long)bi->bi_iter.bi_sector,
2619 (unsigned long long)sh->sector); 2619 (unsigned long long)sh->sector);
2620 2620
2621 /* 2621 /*
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2633 firstwrite = 1; 2633 firstwrite = 1;
2634 } else 2634 } else
2635 bip = &sh->dev[dd_idx].toread; 2635 bip = &sh->dev[dd_idx].toread;
2636 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2636 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2637 if (bio_end_sector(*bip) > bi->bi_sector) 2637 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2638 goto overlap; 2638 goto overlap;
2639 bip = & (*bip)->bi_next; 2639 bip = & (*bip)->bi_next;
2640 } 2640 }
2641 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2641 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2642 goto overlap; 2642 goto overlap;
2643 2643
2644 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2644 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2652 sector_t sector = sh->dev[dd_idx].sector; 2652 sector_t sector = sh->dev[dd_idx].sector;
2653 for (bi=sh->dev[dd_idx].towrite; 2653 for (bi=sh->dev[dd_idx].towrite;
2654 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2654 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2655 bi && bi->bi_sector <= sector; 2655 bi && bi->bi_iter.bi_sector <= sector;
2656 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2656 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2657 if (bio_end_sector(bi) >= sector) 2657 if (bio_end_sector(bi) >= sector)
2658 sector = bio_end_sector(bi); 2658 sector = bio_end_sector(bi);
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2662 } 2662 }
2663 2663
2664 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2664 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2665 (unsigned long long)(*bip)->bi_sector, 2665 (unsigned long long)(*bip)->bi_iter.bi_sector,
2666 (unsigned long long)sh->sector, dd_idx); 2666 (unsigned long long)sh->sector, dd_idx);
2667 spin_unlock_irq(&sh->stripe_lock); 2667 spin_unlock_irq(&sh->stripe_lock);
2668 2668
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2737 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2737 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2738 wake_up(&conf->wait_for_overlap); 2738 wake_up(&conf->wait_for_overlap);
2739 2739
2740 while (bi && bi->bi_sector < 2740 while (bi && bi->bi_iter.bi_sector <
2741 sh->dev[i].sector + STRIPE_SECTORS) { 2741 sh->dev[i].sector + STRIPE_SECTORS) {
2742 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2742 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2743 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2743 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2756 bi = sh->dev[i].written; 2756 bi = sh->dev[i].written;
2757 sh->dev[i].written = NULL; 2757 sh->dev[i].written = NULL;
2758 if (bi) bitmap_end = 1; 2758 if (bi) bitmap_end = 1;
2759 while (bi && bi->bi_sector < 2759 while (bi && bi->bi_iter.bi_sector <
2760 sh->dev[i].sector + STRIPE_SECTORS) { 2760 sh->dev[i].sector + STRIPE_SECTORS) {
2761 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2761 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2762 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2762 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2780 spin_unlock_irq(&sh->stripe_lock); 2780 spin_unlock_irq(&sh->stripe_lock);
2781 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2781 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2782 wake_up(&conf->wait_for_overlap); 2782 wake_up(&conf->wait_for_overlap);
2783 while (bi && bi->bi_sector < 2783 while (bi && bi->bi_iter.bi_sector <
2784 sh->dev[i].sector + STRIPE_SECTORS) { 2784 sh->dev[i].sector + STRIPE_SECTORS) {
2785 struct bio *nextbi = 2785 struct bio *nextbi =
2786 r5_next_bio(bi, sh->dev[i].sector); 2786 r5_next_bio(bi, sh->dev[i].sector);
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3004 clear_bit(R5_UPTODATE, &dev->flags); 3004 clear_bit(R5_UPTODATE, &dev->flags);
3005 wbi = dev->written; 3005 wbi = dev->written;
3006 dev->written = NULL; 3006 dev->written = NULL;
3007 while (wbi && wbi->bi_sector < 3007 while (wbi && wbi->bi_iter.bi_sector <
3008 dev->sector + STRIPE_SECTORS) { 3008 dev->sector + STRIPE_SECTORS) {
3009 wbi2 = r5_next_bio(wbi, dev->sector); 3009 wbi2 = r5_next_bio(wbi, dev->sector);
3010 if (!raid5_dec_bi_active_stripes(wbi)) { 3010 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4096 4096
4097static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4097static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4098{ 4098{
4099 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4099 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4100 unsigned int chunk_sectors = mddev->chunk_sectors; 4100 unsigned int chunk_sectors = mddev->chunk_sectors;
4101 unsigned int bio_sectors = bio_sectors(bio); 4101 unsigned int bio_sectors = bio_sectors(bio);
4102 4102
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4233 /* 4233 /*
4234 * compute position 4234 * compute position
4235 */ 4235 */
4236 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4236 align_bi->bi_iter.bi_sector =
4237 0, 4237 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4238 &dd_idx, NULL); 4238 0, &dd_idx, NULL);
4239 4239
4240 end_sector = bio_end_sector(align_bi); 4240 end_sector = bio_end_sector(align_bi);
4241 rcu_read_lock(); 4241 rcu_read_lock();
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4260 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4260 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4261 4261
4262 if (!bio_fits_rdev(align_bi) || 4262 if (!bio_fits_rdev(align_bi) ||
4263 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4263 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4264 bio_sectors(align_bi),
4264 &first_bad, &bad_sectors)) { 4265 &first_bad, &bad_sectors)) {
4265 /* too big in some way, or has a known bad block */ 4266 /* too big in some way, or has a known bad block */
4266 bio_put(align_bi); 4267 bio_put(align_bi);
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4269 } 4270 }
4270 4271
4271 /* No reshape active, so we can trust rdev->data_offset */ 4272 /* No reshape active, so we can trust rdev->data_offset */
4272 align_bi->bi_sector += rdev->data_offset; 4273 align_bi->bi_iter.bi_sector += rdev->data_offset;
4273 4274
4274 spin_lock_irq(&conf->device_lock); 4275 spin_lock_irq(&conf->device_lock);
4275 wait_event_lock_irq(conf->wait_for_stripe, 4276 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4281 if (mddev->gendisk) 4282 if (mddev->gendisk)
4282 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4283 align_bi, disk_devt(mddev->gendisk), 4284 align_bi, disk_devt(mddev->gendisk),
4284 raid_bio->bi_sector); 4285 raid_bio->bi_iter.bi_sector);
4285 generic_make_request(align_bi); 4286 generic_make_request(align_bi);
4286 return 1; 4287 return 1;
4287 } else { 4288 } else {
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4464 /* Skip discard while reshape is happening */ 4465 /* Skip discard while reshape is happening */
4465 return; 4466 return;
4466 4467
4467 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4468 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4468 last_sector = bi->bi_sector + (bi->bi_size>>9); 4469 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4469 4470
4470 bi->bi_next = NULL; 4471 bi->bi_next = NULL;
4471 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4569 return; 4570 return;
4570 } 4571 }
4571 4572
4572 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4573 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4573 last_sector = bio_end_sector(bi); 4574 last_sector = bio_end_sector(bi);
4574 bi->bi_next = NULL; 4575 bi->bi_next = NULL;
4575 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5053,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5053 int remaining; 5054 int remaining;
5054 int handled = 0; 5055 int handled = 0;
5055 5056
5056 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5057 logical_sector = raid_bio->bi_iter.bi_sector &
5058 ~((sector_t)STRIPE_SECTORS-1);
5057 sector = raid5_compute_sector(conf, logical_sector, 5059 sector = raid5_compute_sector(conf, logical_sector,
5058 0, &dd_idx, NULL); 5060 0, &dd_idx, NULL);
5059 last_sector = bio_end_sector(raid_bio); 5061 last_sector = bio_end_sector(raid_bio);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2235 } 2235 }
2236 2236
2237 /* do we need to support multiple segments? */ 2237 /* do we need to support multiple segments? */
2238 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2238 if (bio_multiple_segments(req->bio) ||
2239 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2239 bio_multiple_segments(rsp->bio)) {
2240 ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), 2240 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
2241 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2241 ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2242 return -EINVAL; 2242 return -EINVAL;
2243 } 2243 }
2244 2244
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 92bd22ce6760..9cbc567698ce 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
504 struct dasd_diag_req *dreq; 504 struct dasd_diag_req *dreq;
505 struct dasd_diag_bio *dbio; 505 struct dasd_diag_bio *dbio;
506 struct req_iterator iter; 506 struct req_iterator iter;
507 struct bio_vec *bv; 507 struct bio_vec bv;
508 char *dst; 508 char *dst;
509 unsigned int count, datasize; 509 unsigned int count, datasize;
510 sector_t recid, first_rec, last_rec; 510 sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
525 /* Check struct bio and count the number of blocks for the request. */ 525 /* Check struct bio and count the number of blocks for the request. */
526 count = 0; 526 count = 0;
527 rq_for_each_segment(bv, req, iter) { 527 rq_for_each_segment(bv, req, iter) {
528 if (bv->bv_len & (blksize - 1)) 528 if (bv.bv_len & (blksize - 1))
529 /* Fba can only do full blocks. */ 529 /* Fba can only do full blocks. */
530 return ERR_PTR(-EINVAL); 530 return ERR_PTR(-EINVAL);
531 count += bv->bv_len >> (block->s2b_shift + 9); 531 count += bv.bv_len >> (block->s2b_shift + 9);
532 } 532 }
533 /* Paranoia. */ 533 /* Paranoia. */
534 if (count != last_rec - first_rec + 1) 534 if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
545 dbio = dreq->bio; 545 dbio = dreq->bio;
546 recid = first_rec; 546 recid = first_rec;
547 rq_for_each_segment(bv, req, iter) { 547 rq_for_each_segment(bv, req, iter) {
548 dst = page_address(bv->bv_page) + bv->bv_offset; 548 dst = page_address(bv.bv_page) + bv.bv_offset;
549 for (off = 0; off < bv->bv_len; off += blksize) { 549 for (off = 0; off < bv.bv_len; off += blksize) {
550 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 550 memset(dbio, 0, sizeof (struct dasd_diag_bio));
551 dbio->type = rw_cmd; 551 dbio->type = rw_cmd;
552 dbio->block_number = recid + 1; 552 dbio->block_number = recid + 1;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 95e45782692f..2e8e0755070b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2551 struct dasd_ccw_req *cqr; 2551 struct dasd_ccw_req *cqr;
2552 struct ccw1 *ccw; 2552 struct ccw1 *ccw;
2553 struct req_iterator iter; 2553 struct req_iterator iter;
2554 struct bio_vec *bv; 2554 struct bio_vec bv;
2555 char *dst; 2555 char *dst;
2556 unsigned int off; 2556 unsigned int off;
2557 int count, cidaw, cplength, datasize; 2557 int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2573 count = 0; 2573 count = 0;
2574 cidaw = 0; 2574 cidaw = 0;
2575 rq_for_each_segment(bv, req, iter) { 2575 rq_for_each_segment(bv, req, iter) {
2576 if (bv->bv_len & (blksize - 1)) 2576 if (bv.bv_len & (blksize - 1))
2577 /* Eckd can only do full blocks. */ 2577 /* Eckd can only do full blocks. */
2578 return ERR_PTR(-EINVAL); 2578 return ERR_PTR(-EINVAL);
2579 count += bv->bv_len >> (block->s2b_shift + 9); 2579 count += bv.bv_len >> (block->s2b_shift + 9);
2580#if defined(CONFIG_64BIT) 2580#if defined(CONFIG_64BIT)
2581 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 2581 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
2582 cidaw += bv->bv_len >> (block->s2b_shift + 9); 2582 cidaw += bv.bv_len >> (block->s2b_shift + 9);
2583#endif 2583#endif
2584 } 2584 }
2585 /* Paranoia. */ 2585 /* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2650 last_rec - recid + 1, cmd, basedev, blksize); 2650 last_rec - recid + 1, cmd, basedev, blksize);
2651 } 2651 }
2652 rq_for_each_segment(bv, req, iter) { 2652 rq_for_each_segment(bv, req, iter) {
2653 dst = page_address(bv->bv_page) + bv->bv_offset; 2653 dst = page_address(bv.bv_page) + bv.bv_offset;
2654 if (dasd_page_cache) { 2654 if (dasd_page_cache) {
2655 char *copy = kmem_cache_alloc(dasd_page_cache, 2655 char *copy = kmem_cache_alloc(dasd_page_cache,
2656 GFP_DMA | __GFP_NOWARN); 2656 GFP_DMA | __GFP_NOWARN);
2657 if (copy && rq_data_dir(req) == WRITE) 2657 if (copy && rq_data_dir(req) == WRITE)
2658 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 2658 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
2659 if (copy) 2659 if (copy)
2660 dst = copy + bv->bv_offset; 2660 dst = copy + bv.bv_offset;
2661 } 2661 }
2662 for (off = 0; off < bv->bv_len; off += blksize) { 2662 for (off = 0; off < bv.bv_len; off += blksize) {
2663 sector_t trkid = recid; 2663 sector_t trkid = recid;
2664 unsigned int recoffs = sector_div(trkid, blk_per_trk); 2664 unsigned int recoffs = sector_div(trkid, blk_per_trk);
2665 rcmd = cmd; 2665 rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2735 struct dasd_ccw_req *cqr; 2735 struct dasd_ccw_req *cqr;
2736 struct ccw1 *ccw; 2736 struct ccw1 *ccw;
2737 struct req_iterator iter; 2737 struct req_iterator iter;
2738 struct bio_vec *bv; 2738 struct bio_vec bv;
2739 char *dst, *idaw_dst; 2739 char *dst, *idaw_dst;
2740 unsigned int cidaw, cplength, datasize; 2740 unsigned int cidaw, cplength, datasize;
2741 unsigned int tlf; 2741 unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2813 idaw_dst = NULL; 2813 idaw_dst = NULL;
2814 idaw_len = 0; 2814 idaw_len = 0;
2815 rq_for_each_segment(bv, req, iter) { 2815 rq_for_each_segment(bv, req, iter) {
2816 dst = page_address(bv->bv_page) + bv->bv_offset; 2816 dst = page_address(bv.bv_page) + bv.bv_offset;
2817 seg_len = bv->bv_len; 2817 seg_len = bv.bv_len;
2818 while (seg_len) { 2818 while (seg_len) {
2819 if (new_track) { 2819 if (new_track) {
2820 trkid = recid; 2820 trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3039{ 3039{
3040 struct dasd_ccw_req *cqr; 3040 struct dasd_ccw_req *cqr;
3041 struct req_iterator iter; 3041 struct req_iterator iter;
3042 struct bio_vec *bv; 3042 struct bio_vec bv;
3043 char *dst; 3043 char *dst;
3044 unsigned int trkcount, ctidaw; 3044 unsigned int trkcount, ctidaw;
3045 unsigned char cmd; 3045 unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3125 new_track = 1; 3125 new_track = 1;
3126 recid = first_rec; 3126 recid = first_rec;
3127 rq_for_each_segment(bv, req, iter) { 3127 rq_for_each_segment(bv, req, iter) {
3128 dst = page_address(bv->bv_page) + bv->bv_offset; 3128 dst = page_address(bv.bv_page) + bv.bv_offset;
3129 seg_len = bv->bv_len; 3129 seg_len = bv.bv_len;
3130 while (seg_len) { 3130 while (seg_len) {
3131 if (new_track) { 3131 if (new_track) {
3132 trkid = recid; 3132 trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3158 } 3158 }
3159 } else { 3159 } else {
3160 rq_for_each_segment(bv, req, iter) { 3160 rq_for_each_segment(bv, req, iter) {
3161 dst = page_address(bv->bv_page) + bv->bv_offset; 3161 dst = page_address(bv.bv_page) + bv.bv_offset;
3162 last_tidaw = itcw_add_tidaw(itcw, 0x00, 3162 last_tidaw = itcw_add_tidaw(itcw, 0x00,
3163 dst, bv->bv_len); 3163 dst, bv.bv_len);
3164 if (IS_ERR(last_tidaw)) { 3164 if (IS_ERR(last_tidaw)) {
3165 ret = -EINVAL; 3165 ret = -EINVAL;
3166 goto out_error; 3166 goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3278 struct dasd_ccw_req *cqr; 3278 struct dasd_ccw_req *cqr;
3279 struct ccw1 *ccw; 3279 struct ccw1 *ccw;
3280 struct req_iterator iter; 3280 struct req_iterator iter;
3281 struct bio_vec *bv; 3281 struct bio_vec bv;
3282 char *dst; 3282 char *dst;
3283 unsigned char cmd; 3283 unsigned char cmd;
3284 unsigned int trkcount; 3284 unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3378 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3378 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
3379 } 3379 }
3380 rq_for_each_segment(bv, req, iter) { 3380 rq_for_each_segment(bv, req, iter) {
3381 dst = page_address(bv->bv_page) + bv->bv_offset; 3381 dst = page_address(bv.bv_page) + bv.bv_offset;
3382 seg_len = bv->bv_len; 3382 seg_len = bv.bv_len;
3383 if (cmd == DASD_ECKD_CCW_READ_TRACK) 3383 if (cmd == DASD_ECKD_CCW_READ_TRACK)
3384 memset(dst, 0, seg_len); 3384 memset(dst, 0, seg_len);
3385 if (!len_to_track_end) { 3385 if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3424 struct dasd_eckd_private *private; 3424 struct dasd_eckd_private *private;
3425 struct ccw1 *ccw; 3425 struct ccw1 *ccw;
3426 struct req_iterator iter; 3426 struct req_iterator iter;
3427 struct bio_vec *bv; 3427 struct bio_vec bv;
3428 char *dst, *cda; 3428 char *dst, *cda;
3429 unsigned int blksize, blk_per_trk, off; 3429 unsigned int blksize, blk_per_trk, off;
3430 sector_t recid; 3430 sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3442 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 3442 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
3443 ccw++; 3443 ccw++;
3444 rq_for_each_segment(bv, req, iter) { 3444 rq_for_each_segment(bv, req, iter) {
3445 dst = page_address(bv->bv_page) + bv->bv_offset; 3445 dst = page_address(bv.bv_page) + bv.bv_offset;
3446 for (off = 0; off < bv->bv_len; off += blksize) { 3446 for (off = 0; off < bv.bv_len; off += blksize) {
3447 /* Skip locate record. */ 3447 /* Skip locate record. */
3448 if (private->uses_cdl && recid <= 2*blk_per_trk) 3448 if (private->uses_cdl && recid <= 2*blk_per_trk)
3449 ccw++; 3449 ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3454 cda = (char *)((addr_t) ccw->cda); 3454 cda = (char *)((addr_t) ccw->cda);
3455 if (dst != cda) { 3455 if (dst != cda) {
3456 if (rq_data_dir(req) == READ) 3456 if (rq_data_dir(req) == READ)
3457 memcpy(dst, cda, bv->bv_len); 3457 memcpy(dst, cda, bv.bv_len);
3458 kmem_cache_free(dasd_page_cache, 3458 kmem_cache_free(dasd_page_cache,
3459 (void *)((addr_t)cda & PAGE_MASK)); 3459 (void *)((addr_t)cda & PAGE_MASK));
3460 } 3460 }
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9cbc8c32ba59..2c8e68bf9a1c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
260 struct dasd_ccw_req *cqr; 260 struct dasd_ccw_req *cqr;
261 struct ccw1 *ccw; 261 struct ccw1 *ccw;
262 struct req_iterator iter; 262 struct req_iterator iter;
263 struct bio_vec *bv; 263 struct bio_vec bv;
264 char *dst; 264 char *dst;
265 int count, cidaw, cplength, datasize; 265 int count, cidaw, cplength, datasize;
266 sector_t recid, first_rec, last_rec; 266 sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
283 count = 0; 283 count = 0;
284 cidaw = 0; 284 cidaw = 0;
285 rq_for_each_segment(bv, req, iter) { 285 rq_for_each_segment(bv, req, iter) {
286 if (bv->bv_len & (blksize - 1)) 286 if (bv.bv_len & (blksize - 1))
287 /* Fba can only do full blocks. */ 287 /* Fba can only do full blocks. */
288 return ERR_PTR(-EINVAL); 288 return ERR_PTR(-EINVAL);
289 count += bv->bv_len >> (block->s2b_shift + 9); 289 count += bv.bv_len >> (block->s2b_shift + 9);
290#if defined(CONFIG_64BIT) 290#if defined(CONFIG_64BIT)
291 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 291 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
292 cidaw += bv->bv_len / blksize; 292 cidaw += bv.bv_len / blksize;
293#endif 293#endif
294 } 294 }
295 /* Paranoia. */ 295 /* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
326 } 326 }
327 recid = first_rec; 327 recid = first_rec;
328 rq_for_each_segment(bv, req, iter) { 328 rq_for_each_segment(bv, req, iter) {
329 dst = page_address(bv->bv_page) + bv->bv_offset; 329 dst = page_address(bv.bv_page) + bv.bv_offset;
330 if (dasd_page_cache) { 330 if (dasd_page_cache) {
331 char *copy = kmem_cache_alloc(dasd_page_cache, 331 char *copy = kmem_cache_alloc(dasd_page_cache,
332 GFP_DMA | __GFP_NOWARN); 332 GFP_DMA | __GFP_NOWARN);
333 if (copy && rq_data_dir(req) == WRITE) 333 if (copy && rq_data_dir(req) == WRITE)
334 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 334 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
335 if (copy) 335 if (copy)
336 dst = copy + bv->bv_offset; 336 dst = copy + bv.bv_offset;
337 } 337 }
338 for (off = 0; off < bv->bv_len; off += blksize) { 338 for (off = 0; off < bv.bv_len; off += blksize) {
339 /* Locate record for stupid devices. */ 339 /* Locate record for stupid devices. */
340 if (private->rdc_data.mode.bits.data_chain == 0) { 340 if (private->rdc_data.mode.bits.data_chain == 0) {
341 ccw[-1].flags |= CCW_FLAG_CC; 341 ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
384 struct dasd_fba_private *private; 384 struct dasd_fba_private *private;
385 struct ccw1 *ccw; 385 struct ccw1 *ccw;
386 struct req_iterator iter; 386 struct req_iterator iter;
387 struct bio_vec *bv; 387 struct bio_vec bv;
388 char *dst, *cda; 388 char *dst, *cda;
389 unsigned int blksize, off; 389 unsigned int blksize, off;
390 int status; 390 int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
399 if (private->rdc_data.mode.bits.data_chain != 0) 399 if (private->rdc_data.mode.bits.data_chain != 0)
400 ccw++; 400 ccw++;
401 rq_for_each_segment(bv, req, iter) { 401 rq_for_each_segment(bv, req, iter) {
402 dst = page_address(bv->bv_page) + bv->bv_offset; 402 dst = page_address(bv.bv_page) + bv.bv_offset;
403 for (off = 0; off < bv->bv_len; off += blksize) { 403 for (off = 0; off < bv.bv_len; off += blksize) {
404 /* Skip locate record. */ 404 /* Skip locate record. */
405 if (private->rdc_data.mode.bits.data_chain == 0) 405 if (private->rdc_data.mode.bits.data_chain == 0)
406 ccw++; 406 ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
411 cda = (char *)((addr_t) ccw->cda); 411 cda = (char *)((addr_t) ccw->cda);
412 if (dst != cda) { 412 if (dst != cda) {
413 if (rq_data_dir(req) == READ) 413 if (rq_data_dir(req) == READ)
414 memcpy(dst, cda, bv->bv_len); 414 memcpy(dst, cda, bv.bv_len);
415 kmem_cache_free(dasd_page_cache, 415 kmem_cache_free(dasd_page_cache,
416 (void *)((addr_t)cda & PAGE_MASK)); 416 (void *)((addr_t)cda & PAGE_MASK));
417 } 417 }
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..ebf41e228e55 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -808,18 +808,19 @@ static void
808dcssblk_make_request(struct request_queue *q, struct bio *bio) 808dcssblk_make_request(struct request_queue *q, struct bio *bio)
809{ 809{
810 struct dcssblk_dev_info *dev_info; 810 struct dcssblk_dev_info *dev_info;
811 struct bio_vec *bvec; 811 struct bio_vec bvec;
812 struct bvec_iter iter;
812 unsigned long index; 813 unsigned long index;
813 unsigned long page_addr; 814 unsigned long page_addr;
814 unsigned long source_addr; 815 unsigned long source_addr;
815 unsigned long bytes_done; 816 unsigned long bytes_done;
816 int i;
817 817
818 bytes_done = 0; 818 bytes_done = 0;
819 dev_info = bio->bi_bdev->bd_disk->private_data; 819 dev_info = bio->bi_bdev->bd_disk->private_data;
820 if (dev_info == NULL) 820 if (dev_info == NULL)
821 goto fail; 821 goto fail;
822 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 822 if ((bio->bi_iter.bi_sector & 7) != 0 ||
823 (bio->bi_iter.bi_size & 4095) != 0)
823 /* Request is not page-aligned. */ 824 /* Request is not page-aligned. */
824 goto fail; 825 goto fail;
825 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { 826 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
842 } 843 }
843 } 844 }
844 845
845 index = (bio->bi_sector >> 3); 846 index = (bio->bi_iter.bi_sector >> 3);
846 bio_for_each_segment(bvec, bio, i) { 847 bio_for_each_segment(bvec, bio, iter) {
847 page_addr = (unsigned long) 848 page_addr = (unsigned long)
848 page_address(bvec->bv_page) + bvec->bv_offset; 849 page_address(bvec.bv_page) + bvec.bv_offset;
849 source_addr = dev_info->start + (index<<12) + bytes_done; 850 source_addr = dev_info->start + (index<<12) + bytes_done;
850 if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) 851 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
851 // More paranoia. 852 // More paranoia.
852 goto fail; 853 goto fail;
853 if (bio_data_dir(bio) == READ) { 854 if (bio_data_dir(bio) == READ) {
854 memcpy((void*)page_addr, (void*)source_addr, 855 memcpy((void*)page_addr, (void*)source_addr,
855 bvec->bv_len); 856 bvec.bv_len);
856 } else { 857 } else {
857 memcpy((void*)source_addr, (void*)page_addr, 858 memcpy((void*)source_addr, (void*)page_addr,
858 bvec->bv_len); 859 bvec.bv_len);
859 } 860 }
860 bytes_done += bvec->bv_len; 861 bytes_done += bvec.bv_len;
861 } 862 }
862 bio_endio(bio, 0); 863 bio_endio(bio, 0);
863 return; 864 return;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d0ab5019d885..76bed1743db1 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
130 struct aidaw *aidaw = scmrq->aidaw; 130 struct aidaw *aidaw = scmrq->aidaw;
131 struct msb *msb = &scmrq->aob->msb[0]; 131 struct msb *msb = &scmrq->aob->msb[0];
132 struct req_iterator iter; 132 struct req_iterator iter;
133 struct bio_vec *bv; 133 struct bio_vec bv;
134 134
135 msb->bs = MSB_BS_4K; 135 msb->bs = MSB_BS_4K;
136 scmrq->aob->request.msb_count = 1; 136 scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
142 msb->data_addr = (u64) aidaw; 142 msb->data_addr = (u64) aidaw;
143 143
144 rq_for_each_segment(bv, scmrq->request, iter) { 144 rq_for_each_segment(bv, scmrq->request, iter) {
145 WARN_ON(bv->bv_offset); 145 WARN_ON(bv.bv_offset);
146 msb->blk_count += bv->bv_len >> 12; 146 msb->blk_count += bv.bv_len >> 12;
147 aidaw->data_addr = (u64) page_address(bv->bv_page); 147 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 aidaw++; 148 aidaw++;
149 } 149 }
150} 150}
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 27f930cd657f..9aae909d47a5 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
122 struct aidaw *aidaw = scmrq->aidaw; 122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0]; 123 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter; 124 struct req_iterator iter;
125 struct bio_vec *bv; 125 struct bio_vec bv;
126 int i = 0; 126 int i = 0;
127 u64 addr; 127 u64 addr;
128 128
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
163 i++; 163 i++;
164 } 164 }
165 rq_for_each_segment(bv, req, iter) { 165 rq_for_each_segment(bv, req, iter) {
166 aidaw->data_addr = (u64) page_address(bv->bv_page); 166 aidaw->data_addr = (u64) page_address(bv.bv_page);
167 aidaw++; 167 aidaw++;
168 i++; 168 i++;
169 } 169 }
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 58141f0651f2..6969d39f1e2e 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
184static void xpram_make_request(struct request_queue *q, struct bio *bio) 184static void xpram_make_request(struct request_queue *q, struct bio *bio)
185{ 185{
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187 struct bio_vec *bvec; 187 struct bio_vec bvec;
188 struct bvec_iter iter;
188 unsigned int index; 189 unsigned int index;
189 unsigned long page_addr; 190 unsigned long page_addr;
190 unsigned long bytes; 191 unsigned long bytes;
191 int i;
192 192
193 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 193 if ((bio->bi_iter.bi_sector & 7) != 0 ||
194 (bio->bi_iter.bi_size & 4095) != 0)
194 /* Request is not page-aligned. */ 195 /* Request is not page-aligned. */
195 goto fail; 196 goto fail;
196 if ((bio->bi_size >> 12) > xdev->size) 197 if ((bio->bi_iter.bi_size >> 12) > xdev->size)
197 /* Request size is no page-aligned. */ 198 /* Request size is no page-aligned. */
198 goto fail; 199 goto fail;
199 if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
200 goto fail; 201 goto fail;
201 index = (bio->bi_sector >> 3) + xdev->offset; 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
202 bio_for_each_segment(bvec, bio, i) { 203 bio_for_each_segment(bvec, bio, iter) {
203 page_addr = (unsigned long) 204 page_addr = (unsigned long)
204 kmap(bvec->bv_page) + bvec->bv_offset; 205 kmap(bvec.bv_page) + bvec.bv_offset;
205 bytes = bvec->bv_len; 206 bytes = bvec.bv_len;
206 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) 207 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
207 /* More paranoia. */ 208 /* More paranoia. */
208 goto fail; 209 goto fail;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2163 } 2163 }
2164 2164
2165 /* do we need to support multiple segments? */ 2165 /* do we need to support multiple segments? */
2166 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2166 if (bio_multiple_segments(req->bio) ||
2167 printk("%s: multiple segments req %u %u, rsp %u %u\n", 2167 bio_multiple_segments(rsp->bio)) {
2168 __func__, bio_segments(req->bio), blk_rq_bytes(req), 2168 printk("%s: multiple segments req %u, rsp %u\n",
2169 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2169 __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2170 return -EINVAL; 2170 return -EINVAL;
2171 } 2171 }
2172 2172
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637308be..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1902 Mpi2SmpPassthroughRequest_t *mpi_request; 1902 Mpi2SmpPassthroughRequest_t *mpi_request;
1903 Mpi2SmpPassthroughReply_t *mpi_reply; 1903 Mpi2SmpPassthroughReply_t *mpi_reply;
1904 int rc, i; 1904 int rc;
1905 u16 smid; 1905 u16 smid;
1906 u32 ioc_state; 1906 u32 ioc_state;
1907 unsigned long timeleft; 1907 unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1916 void *pci_addr_out = NULL; 1916 void *pci_addr_out = NULL;
1917 u16 wait_state_count; 1917 u16 wait_state_count;
1918 struct request *rsp = req->next_rq; 1918 struct request *rsp = req->next_rq;
1919 struct bio_vec *bvec = NULL; 1919 struct bio_vec bvec;
1920 struct bvec_iter iter;
1920 1921
1921 if (!rsp) { 1922 if (!rsp) {
1922 printk(MPT2SAS_ERR_FMT "%s: the smp response space is " 1923 printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1942 ioc->transport_cmds.status = MPT2_CMD_PENDING; 1943 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1943 1944
1944 /* Check if the request is split across multiple segments */ 1945 /* Check if the request is split across multiple segments */
1945 if (bio_segments(req->bio) > 1) { 1946 if (bio_multiple_segments(req->bio)) {
1946 u32 offset = 0; 1947 u32 offset = 0;
1947 1948
1948 /* Allocate memory and copy the request */ 1949 /* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1955 goto out; 1956 goto out;
1956 } 1957 }
1957 1958
1958 bio_for_each_segment(bvec, req->bio, i) { 1959 bio_for_each_segment(bvec, req->bio, iter) {
1959 memcpy(pci_addr_out + offset, 1960 memcpy(pci_addr_out + offset,
1960 page_address(bvec->bv_page) + bvec->bv_offset, 1961 page_address(bvec.bv_page) + bvec.bv_offset,
1961 bvec->bv_len); 1962 bvec.bv_len);
1962 offset += bvec->bv_len; 1963 offset += bvec.bv_len;
1963 } 1964 }
1964 } else { 1965 } else {
1965 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1966 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1974 1975
1975 /* Check if the response needs to be populated across 1976 /* Check if the response needs to be populated across
1976 * multiple segments */ 1977 * multiple segments */
1977 if (bio_segments(rsp->bio) > 1) { 1978 if (bio_multiple_segments(rsp->bio)) {
1978 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1979 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1979 &pci_dma_in); 1980 &pci_dma_in);
1980 if (!pci_addr_in) { 1981 if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2041 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2042 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2042 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2043 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2043 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2044 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2044 if (bio_segments(req->bio) > 1) { 2045 if (bio_multiple_segments(req->bio)) {
2045 ioc->base_add_sg_single(psge, sgl_flags | 2046 ioc->base_add_sg_single(psge, sgl_flags |
2046 (blk_rq_bytes(req) - 4), pci_dma_out); 2047 (blk_rq_bytes(req) - 4), pci_dma_out);
2047 } else { 2048 } else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2057 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2058 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2058 MPI2_SGE_FLAGS_END_OF_LIST); 2059 MPI2_SGE_FLAGS_END_OF_LIST);
2059 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2060 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2060 if (bio_segments(rsp->bio) > 1) { 2061 if (bio_multiple_segments(rsp->bio)) {
2061 ioc->base_add_sg_single(psge, sgl_flags | 2062 ioc->base_add_sg_single(psge, sgl_flags |
2062 (blk_rq_bytes(rsp) + 4), pci_dma_in); 2063 (blk_rq_bytes(rsp) + 4), pci_dma_in);
2063 } else { 2064 } else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2102 le16_to_cpu(mpi_reply->ResponseDataLength); 2103 le16_to_cpu(mpi_reply->ResponseDataLength);
2103 /* check if the resp needs to be copied from the allocated 2104 /* check if the resp needs to be copied from the allocated
2104 * pci mem */ 2105 * pci mem */
2105 if (bio_segments(rsp->bio) > 1) { 2106 if (bio_multiple_segments(rsp->bio)) {
2106 u32 offset = 0; 2107 u32 offset = 0;
2107 u32 bytes_to_copy = 2108 u32 bytes_to_copy =
2108 le16_to_cpu(mpi_reply->ResponseDataLength); 2109 le16_to_cpu(mpi_reply->ResponseDataLength);
2109 bio_for_each_segment(bvec, rsp->bio, i) { 2110 bio_for_each_segment(bvec, rsp->bio, iter) {
2110 if (bytes_to_copy <= bvec->bv_len) { 2111 if (bytes_to_copy <= bvec.bv_len) {
2111 memcpy(page_address(bvec->bv_page) + 2112 memcpy(page_address(bvec.bv_page) +
2112 bvec->bv_offset, pci_addr_in + 2113 bvec.bv_offset, pci_addr_in +
2113 offset, bytes_to_copy); 2114 offset, bytes_to_copy);
2114 break; 2115 break;
2115 } else { 2116 } else {
2116 memcpy(page_address(bvec->bv_page) + 2117 memcpy(page_address(bvec.bv_page) +
2117 bvec->bv_offset, pci_addr_in + 2118 bvec.bv_offset, pci_addr_in +
2118 offset, bvec->bv_len); 2119 offset, bvec.bv_len);
2119 bytes_to_copy -= bvec->bv_len; 2120 bytes_to_copy -= bvec.bv_len;
2120 } 2121 }
2121 offset += bvec->bv_len; 2122 offset += bvec.bv_len;
2122 } 2123 }
2123 } 2124 }
2124 } else { 2125 } else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88c6a74..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1885 Mpi2SmpPassthroughRequest_t *mpi_request; 1885 Mpi2SmpPassthroughRequest_t *mpi_request;
1886 Mpi2SmpPassthroughReply_t *mpi_reply; 1886 Mpi2SmpPassthroughReply_t *mpi_reply;
1887 int rc, i; 1887 int rc;
1888 u16 smid; 1888 u16 smid;
1889 u32 ioc_state; 1889 u32 ioc_state;
1890 unsigned long timeleft; 1890 unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1898 void *pci_addr_out = NULL; 1898 void *pci_addr_out = NULL;
1899 u16 wait_state_count; 1899 u16 wait_state_count;
1900 struct request *rsp = req->next_rq; 1900 struct request *rsp = req->next_rq;
1901 struct bio_vec *bvec = NULL; 1901 struct bio_vec bvec;
1902 struct bvec_iter iter;
1902 1903
1903 if (!rsp) { 1904 if (!rsp) {
1904 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", 1905 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1925 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1926 ioc->transport_cmds.status = MPT3_CMD_PENDING;
1926 1927
1927 /* Check if the request is split across multiple segments */ 1928 /* Check if the request is split across multiple segments */
1928 if (req->bio->bi_vcnt > 1) { 1929 if (bio_multiple_segments(req->bio)) {
1929 u32 offset = 0; 1930 u32 offset = 0;
1930 1931
1931 /* Allocate memory and copy the request */ 1932 /* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1938 goto out; 1939 goto out;
1939 } 1940 }
1940 1941
1941 bio_for_each_segment(bvec, req->bio, i) { 1942 bio_for_each_segment(bvec, req->bio, iter) {
1942 memcpy(pci_addr_out + offset, 1943 memcpy(pci_addr_out + offset,
1943 page_address(bvec->bv_page) + bvec->bv_offset, 1944 page_address(bvec.bv_page) + bvec.bv_offset,
1944 bvec->bv_len); 1945 bvec.bv_len);
1945 offset += bvec->bv_len; 1946 offset += bvec.bv_len;
1946 } 1947 }
1947 } else { 1948 } else {
1948 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1949 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1957 1958
1958 /* Check if the response needs to be populated across 1959 /* Check if the response needs to be populated across
1959 * multiple segments */ 1960 * multiple segments */
1960 if (rsp->bio->bi_vcnt > 1) { 1961 if (bio_multiple_segments(rsp->bio)) {
1961 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1962 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1962 &pci_dma_in); 1963 &pci_dma_in);
1963 if (!pci_addr_in) { 1964 if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2018 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 2019 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
2019 psge = &mpi_request->SGL; 2020 psge = &mpi_request->SGL;
2020 2021
2021 if (req->bio->bi_vcnt > 1) 2022 if (bio_multiple_segments(req->bio))
2022 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), 2023 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
2023 pci_dma_in, (blk_rq_bytes(rsp) + 4)); 2024 pci_dma_in, (blk_rq_bytes(rsp) + 4));
2024 else 2025 else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2063 2064
2064 /* check if the resp needs to be copied from the allocated 2065 /* check if the resp needs to be copied from the allocated
2065 * pci mem */ 2066 * pci mem */
2066 if (rsp->bio->bi_vcnt > 1) { 2067 if (bio_multiple_segments(rsp->bio)) {
2067 u32 offset = 0; 2068 u32 offset = 0;
2068 u32 bytes_to_copy = 2069 u32 bytes_to_copy =
2069 le16_to_cpu(mpi_reply->ResponseDataLength); 2070 le16_to_cpu(mpi_reply->ResponseDataLength);
2070 bio_for_each_segment(bvec, rsp->bio, i) { 2071 bio_for_each_segment(bvec, rsp->bio, iter) {
2071 if (bytes_to_copy <= bvec->bv_len) { 2072 if (bytes_to_copy <= bvec.bv_len) {
2072 memcpy(page_address(bvec->bv_page) + 2073 memcpy(page_address(bvec.bv_page) +
2073 bvec->bv_offset, pci_addr_in + 2074 bvec.bv_offset, pci_addr_in +
2074 offset, bytes_to_copy); 2075 offset, bytes_to_copy);
2075 break; 2076 break;
2076 } else { 2077 } else {
2077 memcpy(page_address(bvec->bv_page) + 2078 memcpy(page_address(bvec.bv_page) +
2078 bvec->bv_offset, pci_addr_in + 2079 bvec.bv_offset, pci_addr_in +
2079 offset, bvec->bv_len); 2080 offset, bvec.bv_len);
2080 bytes_to_copy -= bvec->bv_len; 2081 bytes_to_copy -= bvec.bv_len;
2081 } 2082 }
2082 offset += bvec->bv_len; 2083 offset += bvec.bv_len;
2083 } 2084 }
2084 } 2085 }
2085 } else { 2086 } else {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
731 731
732 bio->bi_rw &= ~REQ_WRITE; 732 bio->bi_rw &= ~REQ_WRITE;
733 or->in.bio = bio; 733 or->in.bio = bio;
734 or->in.total_bytes = bio->bi_size; 734 or->in.total_bytes = bio->bi_iter.bi_size;
735 return 0; 735 return 0;
736} 736}
737 737
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 9846c6ab2aaa..470954aba728 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
801 if (sdkp->device->no_write_same) 801 if (sdkp->device->no_write_same)
802 return BLKPREP_KILL; 802 return BLKPREP_KILL;
803 803
804 BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); 804 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
805 805
806 sector >>= ilog2(sdp->sector_size) - 9; 806 sector >>= ilog2(sdp->sector_size) - 9;
807 nr_sectors >>= ilog2(sdp->sector_size) - 9; 807 nr_sectors >>= ilog2(sdp->sector_size) - 9;
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4ea275..a7a691d0af7d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
365 struct bio *bio; 365 struct bio *bio;
366 struct scsi_disk *sdkp; 366 struct scsi_disk *sdkp;
367 struct sd_dif_tuple *sdt; 367 struct sd_dif_tuple *sdt;
368 unsigned int i, j;
369 u32 phys, virt; 368 u32 phys, virt;
370 369
371 sdkp = rq->bio->bi_bdev->bd_disk->private_data; 370 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
376 phys = hw_sector & 0xffffffff; 375 phys = hw_sector & 0xffffffff;
377 376
378 __rq_for_each_bio(bio, rq) { 377 __rq_for_each_bio(bio, rq) {
379 struct bio_vec *iv; 378 struct bio_vec iv;
379 struct bvec_iter iter;
380 unsigned int j;
380 381
381 /* Already remapped? */ 382 /* Already remapped? */
382 if (bio_flagged(bio, BIO_MAPPED_INTEGRITY)) 383 if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
383 break; 384 break;
384 385
385 virt = bio->bi_integrity->bip_sector & 0xffffffff; 386 virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
386 387
387 bip_for_each_vec(iv, bio->bi_integrity, i) { 388 bip_for_each_vec(iv, bio->bi_integrity, iter) {
388 sdt = kmap_atomic(iv->bv_page) 389 sdt = kmap_atomic(iv.bv_page)
389 + iv->bv_offset; 390 + iv.bv_offset;
390 391
391 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 392 for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
392 393
393 if (be32_to_cpu(sdt->ref_tag) == virt) 394 if (be32_to_cpu(sdt->ref_tag) == virt)
394 sdt->ref_tag = cpu_to_be32(phys); 395 sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
414 struct scsi_disk *sdkp; 415 struct scsi_disk *sdkp;
415 struct bio *bio; 416 struct bio *bio;
416 struct sd_dif_tuple *sdt; 417 struct sd_dif_tuple *sdt;
417 unsigned int i, j, sectors, sector_sz; 418 unsigned int j, sectors, sector_sz;
418 u32 phys, virt; 419 u32 phys, virt;
419 420
420 sdkp = scsi_disk(scmd->request->rq_disk); 421 sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
430 phys >>= 3; 431 phys >>= 3;
431 432
432 __rq_for_each_bio(bio, scmd->request) { 433 __rq_for_each_bio(bio, scmd->request) {
433 struct bio_vec *iv; 434 struct bio_vec iv;
435 struct bvec_iter iter;
434 436
435 virt = bio->bi_integrity->bip_sector & 0xffffffff; 437 virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
436 438
437 bip_for_each_vec(iv, bio->bi_integrity, i) { 439 bip_for_each_vec(iv, bio->bi_integrity, iter) {
438 sdt = kmap_atomic(iv->bv_page) 440 sdt = kmap_atomic(iv.bv_page)
439 + iv->bv_offset; 441 + iv.bv_offset;
440 442
441 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 443 for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
442 444
443 if (sectors == 0) { 445 if (sectors == 0) {
444 kunmap_atomic(sdt); 446 kunmap_atomic(sdt);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index 5338e8d4c50f..0718905adeb2 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
194 struct cl_object *obj = ll_i2info(inode)->lli_clob; 194 struct cl_object *obj = ll_i2info(inode)->lli_clob;
195 pgoff_t offset; 195 pgoff_t offset;
196 int ret; 196 int ret;
197 int i;
198 int rw; 197 int rw;
199 obd_count page_count = 0; 198 obd_count page_count = 0;
200 struct bio_vec *bvec; 199 struct bio_vec bvec;
200 struct bvec_iter iter;
201 struct bio *bio; 201 struct bio *bio;
202 ssize_t bytes; 202 ssize_t bytes;
203 203
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
220 for (bio = head; bio != NULL; bio = bio->bi_next) { 220 for (bio = head; bio != NULL; bio = bio->bi_next) {
221 LASSERT(rw == bio->bi_rw); 221 LASSERT(rw == bio->bi_rw);
222 222
223 offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; 223 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
224 bio_for_each_segment(bvec, bio, i) { 224 bio_for_each_segment(bvec, bio, iter) {
225 BUG_ON(bvec->bv_offset != 0); 225 BUG_ON(bvec.bv_offset != 0);
226 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); 226 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
227 227
228 pages[page_count] = bvec->bv_page; 228 pages[page_count] = bvec.bv_page;
229 offsets[page_count] = offset; 229 offsets[page_count] = offset;
230 page_count++; 230 page_count++;
231 offset += bvec->bv_len; 231 offset += bvec.bv_len;
232 } 232 }
233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS); 233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
234 } 234 }
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
313 bio = &lo->lo_bio; 313 bio = &lo->lo_bio;
314 while (*bio && (*bio)->bi_rw == rw) { 314 while (*bio && (*bio)->bi_rw == rw) {
315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", 315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
316 (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, 316 (unsigned long long)(*bio)->bi_iter.bi_sector,
317 (*bio)->bi_iter.bi_size,
317 page_count, (*bio)->bi_vcnt); 318 page_count, (*bio)->bi_vcnt);
318 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) 319 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
319 break; 320 break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
347 goto err; 348 goto err;
348 349
349 CDEBUG(D_INFO, "submit bio sector %llu size %u\n", 350 CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
350 (unsigned long long)old_bio->bi_sector, old_bio->bi_size); 351 (unsigned long long)old_bio->bi_iter.bi_sector,
352 old_bio->bi_iter.bi_size);
351 353
352 spin_lock_irq(&lo->lo_lock); 354 spin_lock_irq(&lo->lo_lock);
353 inactive = (lo->lo_state != LLOOP_BOUND); 355 inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
367 loop_add_bio(lo, old_bio); 369 loop_add_bio(lo, old_bio);
368 return; 370 return;
369err: 371err:
370 cfs_bio_io_error(old_bio, old_bio->bi_size); 372 cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
371} 373}
372 374
373 375
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
378 while (bio) { 380 while (bio) {
379 struct bio *tmp = bio->bi_next; 381 struct bio *tmp = bio->bi_next;
380 bio->bi_next = NULL; 382 bio->bi_next = NULL;
381 cfs_bio_endio(bio, bio->bi_size, ret); 383 cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
382 bio = tmp; 384 bio = tmp;
383 } 385 }
384} 386}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 3277d9838f4e..108f2733106d 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
171 u64 start, end, bound; 171 u64 start, end, bound;
172 172
173 /* unaligned request */ 173 /* unaligned request */
174 if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 174 if (unlikely(bio->bi_iter.bi_sector &
175 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
175 return 0; 176 return 0;
176 if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 177 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
177 return 0; 178 return 0;
178 179
179 start = bio->bi_sector; 180 start = bio->bi_iter.bi_sector;
180 end = start + (bio->bi_size >> SECTOR_SHIFT); 181 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
181 bound = zram->disksize >> SECTOR_SHIFT; 182 bound = zram->disksize >> SECTOR_SHIFT;
182 /* out of range range */ 183 /* out of range range */
183 if (unlikely(start >= bound || end > bound || start > end)) 184 if (unlikely(start >= bound || end > bound || start > end))
@@ -680,9 +681,10 @@ out:
680 681
681static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) 682static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
682{ 683{
683 int i, offset; 684 int offset;
684 u32 index; 685 u32 index;
685 struct bio_vec *bvec; 686 struct bio_vec bvec;
687 struct bvec_iter iter;
686 688
687 switch (rw) { 689 switch (rw) {
688 case READ: 690 case READ:
@@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
693 break; 695 break;
694 } 696 }
695 697
696 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 698 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
697 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 699 offset = (bio->bi_iter.bi_sector &
700 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
698 701
699 bio_for_each_segment(bvec, bio, i) { 702 bio_for_each_segment(bvec, bio, iter) {
700 int max_transfer_size = PAGE_SIZE - offset; 703 int max_transfer_size = PAGE_SIZE - offset;
701 704
702 if (bvec->bv_len > max_transfer_size) { 705 if (bvec.bv_len > max_transfer_size) {
703 /* 706 /*
704 * zram_bvec_rw() can only make operation on a single 707 * zram_bvec_rw() can only make operation on a single
705 * zram page. Split the bio vector. 708 * zram page. Split the bio vector.
706 */ 709 */
707 struct bio_vec bv; 710 struct bio_vec bv;
708 711
709 bv.bv_page = bvec->bv_page; 712 bv.bv_page = bvec.bv_page;
710 bv.bv_len = max_transfer_size; 713 bv.bv_len = max_transfer_size;
711 bv.bv_offset = bvec->bv_offset; 714 bv.bv_offset = bvec.bv_offset;
712 715
713 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) 716 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
714 goto out; 717 goto out;
715 718
716 bv.bv_len = bvec->bv_len - max_transfer_size; 719 bv.bv_len = bvec.bv_len - max_transfer_size;
717 bv.bv_offset += max_transfer_size; 720 bv.bv_offset += max_transfer_size;
718 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) 721 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
719 goto out; 722 goto out;
720 } else 723 } else
721 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) 724 if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
722 < 0) 725 < 0)
723 goto out; 726 goto out;
724 727
725 update_position(&index, &offset, bvec); 728 update_position(&index, &offset, &bvec);
726 } 729 }
727 730
728 set_bit(BIO_UPTODATE, &bio->bi_flags); 731 set_bit(BIO_UPTODATE, &bio->bi_flags);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f12760..2d29356d0c85 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
319 bio->bi_bdev = ib_dev->ibd_bd; 319 bio->bi_bdev = ib_dev->ibd_bd;
320 bio->bi_private = cmd; 320 bio->bi_private = cmd;
321 bio->bi_end_io = &iblock_bio_done; 321 bio->bi_end_io = &iblock_bio_done;
322 bio->bi_sector = lba; 322 bio->bi_iter.bi_sector = lba;
323 323
324 return bio; 324 return bio;
325} 325}