aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/block
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c27
-rw-r--r--drivers/block/drbd/drbd_receiver.c19
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c16
-rw-r--r--drivers/block/loop.c27
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c20
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/nvme-core.c142
-rw-r--r--drivers/block/pktcdvd.c182
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c12
-rw-r--r--drivers/block/rbd.c91
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c15
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
24 files changed, 342 insertions, 502 deletions
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
100 100
101struct buf { 101struct buf {
102 ulong nframesout; 102 ulong nframesout;
103 ulong resid;
104 ulong bv_resid;
105 sector_t sector;
106 struct bio *bio; 103 struct bio *bio;
107 struct bio_vec *bv; 104 struct bvec_iter iter;
108 struct request *rq; 105 struct request *rq;
109}; 106};
110 107
@@ -120,13 +117,10 @@ struct frame {
120 ulong waited; 117 ulong waited;
121 ulong waited_total; 118 ulong waited_total;
122 struct aoetgt *t; /* parent target I belong to */ 119 struct aoetgt *t; /* parent target I belong to */
123 sector_t lba;
124 struct sk_buff *skb; /* command skb freed on module exit */ 120 struct sk_buff *skb; /* command skb freed on module exit */
125 struct sk_buff *r_skb; /* response skb for async processing */ 121 struct sk_buff *r_skb; /* response skb for async processing */
126 struct buf *buf; 122 struct buf *buf;
127 struct bio_vec *bv; 123 struct bvec_iter iter;
128 ulong bcnt;
129 ulong bv_off;
130 char flags; 124 char flags;
131}; 125};
132 126
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..8184451b57c0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
196 196
197 t = f->t; 197 t = f->t;
198 f->buf = NULL; 198 f->buf = NULL;
199 f->lba = 0; 199 memset(&f->iter, 0, sizeof(f->iter));
200 f->bv = NULL;
201 f->r_skb = NULL; 200 f->r_skb = NULL;
202 f->flags = 0; 201 f->flags = 0;
203 list_add(&f->head, &t->ffree); 202 list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
295} 294}
296 295
297static void 296static void
298skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt) 297skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
299{ 298{
300 int frag = 0; 299 int frag = 0;
301 ulong fcnt; 300 struct bio_vec bv;
302loop: 301
303 fcnt = bv->bv_len - (off - bv->bv_offset); 302 __bio_for_each_segment(bv, bio, iter, iter)
304 if (fcnt > cnt) 303 skb_fill_page_desc(skb, frag++, bv.bv_page,
305 fcnt = cnt; 304 bv.bv_offset, bv.bv_len);
306 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
307 cnt -= fcnt;
308 if (cnt <= 0)
309 return;
310 bv++;
311 off = bv->bv_offset;
312 goto loop;
313} 305}
314 306
315static void 307static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
346 t->nout++; 338 t->nout++;
347 f->waited = 0; 339 f->waited = 0;
348 f->waited_total = 0; 340 f->waited_total = 0;
349 if (f->buf)
350 f->lba = f->buf->sector;
351 341
352 /* set up ata header */ 342 /* set up ata header */
353 ah->scnt = f->bcnt >> 9; 343 ah->scnt = f->iter.bi_size >> 9;
354 put_lba(ah, f->lba); 344 put_lba(ah, f->iter.bi_sector);
355 if (t->d->flags & DEVFL_EXT) { 345 if (t->d->flags & DEVFL_EXT) {
356 ah->aflags |= AOEAFL_EXT; 346 ah->aflags |= AOEAFL_EXT;
357 } else { 347 } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
360 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ 350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
361 } 351 }
362 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { 352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
363 skb_fillup(skb, f->bv, f->bv_off, f->bcnt); 353 skb_fillup(skb, f->buf->bio, f->iter);
364 ah->aflags |= AOEAFL_WRITE; 354 ah->aflags |= AOEAFL_WRITE;
365 skb->len += f->bcnt; 355 skb->len += f->iter.bi_size;
366 skb->data_len = f->bcnt; 356 skb->data_len = f->iter.bi_size;
367 skb->truesize += f->bcnt; 357 skb->truesize += f->iter.bi_size;
368 t->wpkts++; 358 t->wpkts++;
369 } else { 359 } else {
370 t->rpkts++; 360 t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
382 struct buf *buf; 372 struct buf *buf;
383 struct sk_buff *skb; 373 struct sk_buff *skb;
384 struct sk_buff_head queue; 374 struct sk_buff_head queue;
385 ulong bcnt, fbcnt;
386 375
387 buf = nextbuf(d); 376 buf = nextbuf(d);
388 if (buf == NULL) 377 if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
390 f = newframe(d); 379 f = newframe(d);
391 if (f == NULL) 380 if (f == NULL)
392 return 0; 381 return 0;
393 bcnt = d->maxbcnt;
394 if (bcnt == 0)
395 bcnt = DEFAULTBCNT;
396 if (bcnt > buf->resid)
397 bcnt = buf->resid;
398 fbcnt = bcnt;
399 f->bv = buf->bv;
400 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
401 do {
402 if (fbcnt < buf->bv_resid) {
403 buf->bv_resid -= fbcnt;
404 buf->resid -= fbcnt;
405 break;
406 }
407 fbcnt -= buf->bv_resid;
408 buf->resid -= buf->bv_resid;
409 if (buf->resid == 0) {
410 d->ip.buf = NULL;
411 break;
412 }
413 buf->bv++;
414 buf->bv_resid = buf->bv->bv_len;
415 WARN_ON(buf->bv_resid == 0);
416 } while (fbcnt);
417 382
418 /* initialize the headers & frame */ 383 /* initialize the headers & frame */
419 f->buf = buf; 384 f->buf = buf;
420 f->bcnt = bcnt; 385 f->iter = buf->iter;
421 ata_rw_frameinit(f); 386 f->iter.bi_size = min_t(unsigned long,
387 d->maxbcnt ?: DEFAULTBCNT,
388 f->iter.bi_size);
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
390
391 if (!buf->iter.bi_size)
392 d->ip.buf = NULL;
422 393
423 /* mark all tracking fields and load out */ 394 /* mark all tracking fields and load out */
424 buf->nframesout += 1; 395 buf->nframesout += 1;
425 buf->sector += bcnt >> 9; 396
397 ata_rw_frameinit(f);
426 398
427 skb = skb_clone(f->skb, GFP_ATOMIC); 399 skb = skb_clone(f->skb, GFP_ATOMIC);
428 if (skb) { 400 if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
613 skb = nf->skb; 585 skb = nf->skb;
614 nf->skb = f->skb; 586 nf->skb = f->skb;
615 nf->buf = f->buf; 587 nf->buf = f->buf;
616 nf->bcnt = f->bcnt; 588 nf->iter = f->iter;
617 nf->lba = f->lba;
618 nf->bv = f->bv;
619 nf->bv_off = f->bv_off;
620 nf->waited = 0; 589 nf->waited = 0;
621 nf->waited_total = f->waited_total; 590 nf->waited_total = f->waited_total;
622 nf->sent = f->sent; 591 nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
648 } 617 }
649 f->flags |= FFL_PROBE; 618 f->flags |= FFL_PROBE;
650 ifrotate(t); 619 ifrotate(t);
651 f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; 620 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
652 ata_rw_frameinit(f); 621 ata_rw_frameinit(f);
653 skb = f->skb; 622 skb = f->skb;
654 for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) { 623 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
655 if (n < PAGE_SIZE) 624 if (n < PAGE_SIZE)
656 m = n; 625 m = n;
657 else 626 else
658 m = PAGE_SIZE; 627 m = PAGE_SIZE;
659 skb_fill_page_desc(skb, frag, empty_page, 0, m); 628 skb_fill_page_desc(skb, frag, empty_page, 0, m);
660 } 629 }
661 skb->len += f->bcnt; 630 skb->len += f->iter.bi_size;
662 skb->data_len = f->bcnt; 631 skb->data_len = f->iter.bi_size;
663 skb->truesize += f->bcnt; 632 skb->truesize += f->iter.bi_size;
664 633
665 skb = skb_clone(f->skb, GFP_ATOMIC); 634 skb = skb_clone(f->skb, GFP_ATOMIC);
666 if (skb) { 635 if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
897static void 866static void
898bio_pageinc(struct bio *bio) 867bio_pageinc(struct bio *bio)
899{ 868{
900 struct bio_vec *bv; 869 struct bio_vec bv;
901 struct page *page; 870 struct page *page;
902 int i; 871 struct bvec_iter iter;
903 872
904 bio_for_each_segment(bv, bio, i) { 873 bio_for_each_segment(bv, bio, iter) {
905 /* Non-zero page count for non-head members of 874 /* Non-zero page count for non-head members of
906 * compound pages is no longer allowed by the kernel. 875 * compound pages is no longer allowed by the kernel.
907 */ 876 */
908 page = compound_trans_head(bv->bv_page); 877 page = compound_trans_head(bv.bv_page);
909 atomic_inc(&page->_count); 878 atomic_inc(&page->_count);
910 } 879 }
911} 880}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
913static void 882static void
914bio_pagedec(struct bio *bio) 883bio_pagedec(struct bio *bio)
915{ 884{
916 struct bio_vec *bv;
917 struct page *page; 885 struct page *page;
918 int i; 886 struct bio_vec bv;
887 struct bvec_iter iter;
919 888
920 bio_for_each_segment(bv, bio, i) { 889 bio_for_each_segment(bv, bio, iter) {
921 page = compound_trans_head(bv->bv_page); 890 page = compound_trans_head(bv.bv_page);
922 atomic_dec(&page->_count); 891 atomic_dec(&page->_count);
923 } 892 }
924} 893}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
929 memset(buf, 0, sizeof(*buf)); 898 memset(buf, 0, sizeof(*buf));
930 buf->rq = rq; 899 buf->rq = rq;
931 buf->bio = bio; 900 buf->bio = bio;
932 buf->resid = bio->bi_size; 901 buf->iter = bio->bi_iter;
933 buf->sector = bio->bi_sector;
934 bio_pageinc(bio); 902 bio_pageinc(bio);
935 buf->bv = bio_iovec(bio);
936 buf->bv_resid = buf->bv->bv_len;
937 WARN_ON(buf->bv_resid == 0);
938} 903}
939 904
940static struct buf * 905static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
1119} 1084}
1120 1085
1121static void 1086static void
1122bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt) 1087bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
1123{ 1088{
1124 ulong fcnt;
1125 char *p;
1126 int soff = 0; 1089 int soff = 0;
1127loop: 1090 struct bio_vec bv;
1128 fcnt = bv->bv_len - (off - bv->bv_offset); 1091
1129 if (fcnt > cnt) 1092 iter.bi_size = cnt;
1130 fcnt = cnt; 1093
1131 p = page_address(bv->bv_page) + off; 1094 __bio_for_each_segment(bv, bio, iter, iter) {
1132 skb_copy_bits(skb, soff, p, fcnt); 1095 char *p = page_address(bv.bv_page) + bv.bv_offset;
1133 soff += fcnt; 1096 skb_copy_bits(skb, soff, p, bv.bv_len);
1134 cnt -= fcnt; 1097 soff += bv.bv_len;
1135 if (cnt <= 0) 1098 }
1136 return;
1137 bv++;
1138 off = bv->bv_offset;
1139 goto loop;
1140} 1099}
1141 1100
1142void 1101void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1152 do { 1111 do {
1153 bio = rq->bio; 1112 bio = rq->bio;
1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1113 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1156 1115
1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1116 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 if (!fastfail) 1117 if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
1229 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1188 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1230 break; 1189 break;
1231 } 1190 }
1232 bvcpy(f->bv, f->bv_off, skb, n); 1191 if (n > f->iter.bi_size) {
1192 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1193 "aoe: too-large data size in read from",
1194 (long) d->aoemajor, d->aoeminor,
1195 n, f->iter.bi_size);
1196 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1197 break;
1198 }
1199 bvcpy(skb, f->buf->bio, f->iter, n);
1233 case ATA_CMD_PIO_WRITE: 1200 case ATA_CMD_PIO_WRITE:
1234 case ATA_CMD_PIO_WRITE_EXT: 1201 case ATA_CMD_PIO_WRITE_EXT:
1235 spin_lock_irq(&d->lock); 1202 spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
1272 1239
1273 aoe_freetframe(f); 1240 aoe_freetframe(f);
1274 1241
1275 if (buf && --buf->nframesout == 0 && buf->resid == 0) 1242 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
1276 aoe_end_buf(d, buf); 1243 aoe_end_buf(d, buf);
1277 1244
1278 spin_unlock_irq(&d->lock); 1245 spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
1727{ 1694{
1728 if (buf == NULL) 1695 if (buf == NULL)
1729 return; 1696 return;
1730 buf->resid = 0; 1697 buf->iter.bi_size = 0;
1731 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1698 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1732 if (buf->nframesout == 0) 1699 if (buf->nframesout == 0)
1733 aoe_end_buf(d, buf); 1700 aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
328 struct block_device *bdev = bio->bi_bdev; 328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data; 329 struct brd_device *brd = bdev->bd_disk->private_data;
330 int rw; 330 int rw;
331 struct bio_vec *bvec; 331 struct bio_vec bvec;
332 sector_t sector; 332 sector_t sector;
333 int i; 333 struct bvec_iter iter;
334 int err = -EIO; 334 int err = -EIO;
335 335
336 sector = bio->bi_sector; 336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 338 goto out;
339 339
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0; 341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_size); 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 343 goto out;
344 } 344 }
345 345
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
347 if (rw == READA) 347 if (rw == READA)
348 rw = READ; 348 rw = READ;
349 349
350 bio_for_each_segment(bvec, bio, i) { 350 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec->bv_len; 351 unsigned int len = bvec.bv_len;
352 err = brd_do_bvec(brd, bvec->bv_page, len, 352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec->bv_offset, rw, sector); 353 bvec.bv_offset, rw, sector);
354 if (err) 354 if (err)
355 break; 355 break;
356 sector += len >> SECTOR_SHIFT; 356 sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
159 159
160 bio = bio_alloc_drbd(GFP_NOIO); 160 bio = bio_alloc_drbd(GFP_NOIO);
161 bio->bi_bdev = bdev->md_bdev; 161 bio->bi_bdev = bdev->md_bdev;
162 bio->bi_sector = sector; 162 bio->bi_iter.bi_sector = sector;
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1028 } else 1028 } else
1029 page = b->bm_pages[page_nr]; 1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev; 1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector; 1031 bio->bi_iter.bi_sector = on_disk_sector;
1032 /* bio_add_page of a single page to an empty bio will always succeed, 1032 /* bio_add_page of a single page to an empty bio will always succeed,
1033 * according to api. Do we want to assert that? */ 1033 * according to api. Do we want to assert that? */
1034 bio_add_page(bio, page, len, 0); 1034 bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b1bc83..929468e1512a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1537 1537
1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1539{ 1539{
1540 struct bio_vec *bvec; 1540 struct bio_vec bvec;
1541 int i; 1541 struct bvec_iter iter;
1542
1542 /* hint all but last page with MSG_MORE */ 1543 /* hint all but last page with MSG_MORE */
1543 bio_for_each_segment(bvec, bio, i) { 1544 bio_for_each_segment(bvec, bio, iter) {
1544 int err; 1545 int err;
1545 1546
1546 err = _drbd_no_send_page(mdev, bvec->bv_page, 1547 err = _drbd_no_send_page(mdev, bvec.bv_page,
1547 bvec->bv_offset, bvec->bv_len, 1548 bvec.bv_offset, bvec.bv_len,
1548 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1549 bio_iter_last(bvec, iter)
1550 ? 0 : MSG_MORE);
1549 if (err) 1551 if (err)
1550 return err; 1552 return err;
1551 } 1553 }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1554 1556
1555static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1557static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1556{ 1558{
1557 struct bio_vec *bvec; 1559 struct bio_vec bvec;
1558 int i; 1560 struct bvec_iter iter;
1561
1559 /* hint all but last page with MSG_MORE */ 1562 /* hint all but last page with MSG_MORE */
1560 bio_for_each_segment(bvec, bio, i) { 1563 bio_for_each_segment(bvec, bio, iter) {
1561 int err; 1564 int err;
1562 1565
1563 err = _drbd_send_page(mdev, bvec->bv_page, 1566 err = _drbd_send_page(mdev, bvec.bv_page,
1564 bvec->bv_offset, bvec->bv_len, 1567 bvec.bv_offset, bvec.bv_len,
1565 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1568 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1566 if (err) 1569 if (err)
1567 return err; 1570 return err;
1568 } 1571 }
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..d073305ffd5e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
1333 goto fail; 1333 goto fail;
1334 } 1334 }
1335 /* > peer_req->i.sector, unless this is the first bio */ 1335 /* > peer_req->i.sector, unless this is the first bio */
1336 bio->bi_sector = sector; 1336 bio->bi_iter.bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
1338 bio->bi_rw = rw; 1338 bio->bi_rw = rw;
1339 bio->bi_private = peer_req; 1339 bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
1353 dev_err(DEV, 1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, " 1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector); 1356 len, (uint64_t)bio->bi_iter.bi_sector);
1357 err = -ENOSPC; 1357 err = -ENOSPC;
1358 goto fail; 1358 goto fail;
1359 } 1359 }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size) 1596 sector_t sector, int data_size)
1597{ 1597{
1598 struct bio_vec *bvec; 1598 struct bio_vec bvec;
1599 struct bvec_iter iter;
1599 struct bio *bio; 1600 struct bio *bio;
1600 int dgs, err, i, expect; 1601 int dgs, err, expect;
1601 void *dig_in = mdev->tconn->int_dig_in; 1602 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv; 1603 void *dig_vv = mdev->tconn->int_dig_vv;
1603 1604
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1615 mdev->recv_cnt += data_size>>9; 1616 mdev->recv_cnt += data_size>>9;
1616 1617
1617 bio = req->master_bio; 1618 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector); 1619 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1620
1620 bio_for_each_segment(bvec, bio, i) { 1621 bio_for_each_segment(bvec, bio, iter) {
1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1622 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1622 expect = min_t(int, data_size, bvec->bv_len); 1623 expect = min_t(int, data_size, bvec.bv_len);
1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect); 1624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1624 kunmap(bvec->bv_page); 1625 kunmap(bvec.bv_page);
1625 if (err) 1626 if (err)
1626 return err; 1627 return err;
1627 data_size -= expect; 1628 data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
77 req->epoch = 0; 77 req->epoch = 0;
78 78
79 drbd_clear_interval(&req->i); 79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_sector; 80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_size; 81 req->i.size = bio_src->bi_iter.bi_size;
82 req->i.local = true; 82 req->i.local = true;
83 req->i.waiting = false; 83 req->i.waiting = false;
84 84
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1280 /* 1280 /*
1281 * what we "blindly" assume: 1281 * what we "blindly" assume:
1282 */ 1282 */
1283 D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
1284 1284
1285 inc_ap_bio(mdev); 1285 inc_ap_bio(mdev);
1286 __drbd_make_request(mdev, bio, start_time); 1286 __drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
269 269
270/* Short lived temporary struct on the stack. 270/* Short lived temporary struct on the stack.
271 * We could squirrel the error to be returned into 271 * We could squirrel the error to be returned into
272 * bio->bi_size, or similar. But that would be too ugly. */ 272 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273struct bio_and_error { 273struct bio_and_error {
274 struct bio *bio; 274 struct bio *bio;
275 int error; 275 int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..84d3175d493a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
313{ 313{
314 struct hash_desc desc; 314 struct hash_desc desc;
315 struct scatterlist sg; 315 struct scatterlist sg;
316 struct bio_vec *bvec; 316 struct bio_vec bvec;
317 int i; 317 struct bvec_iter iter;
318 318
319 desc.tfm = tfm; 319 desc.tfm = tfm;
320 desc.flags = 0; 320 desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
322 sg_init_table(&sg, 1); 322 sg_init_table(&sg, 1);
323 crypto_hash_init(&desc); 323 crypto_hash_init(&desc);
324 324
325 bio_for_each_segment(bvec, bio, i) { 325 bio_for_each_segment(bvec, bio, iter) {
326 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 326 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
327 crypto_hash_update(&desc, &sg, sg.length); 327 crypto_hash_update(&desc, &sg, sg.length);
328 } 328 }
329 crypto_hash_final(&desc, digest); 329 crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..6b29c4422828 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
2351/* Compute maximal contiguous buffer size. */ 2351/* Compute maximal contiguous buffer size. */
2352static int buffer_chain_size(void) 2352static int buffer_chain_size(void)
2353{ 2353{
2354 struct bio_vec *bv; 2354 struct bio_vec bv;
2355 int size; 2355 int size;
2356 struct req_iterator iter; 2356 struct req_iterator iter;
2357 char *base; 2357 char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
2360 size = 0; 2360 size = 0;
2361 2361
2362 rq_for_each_segment(bv, current_req, iter) { 2362 rq_for_each_segment(bv, current_req, iter) {
2363 if (page_address(bv->bv_page) + bv->bv_offset != base + size) 2363 if (page_address(bv.bv_page) + bv.bv_offset != base + size)
2364 break; 2364 break;
2365 2365
2366 size += bv->bv_len; 2366 size += bv.bv_len;
2367 } 2367 }
2368 2368
2369 return size >> 9; 2369 return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
2389static void copy_buffer(int ssize, int max_sector, int max_sector_2) 2389static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2390{ 2390{
2391 int remaining; /* number of transferred 512-byte sectors */ 2391 int remaining; /* number of transferred 512-byte sectors */
2392 struct bio_vec *bv; 2392 struct bio_vec bv;
2393 char *buffer; 2393 char *buffer;
2394 char *dma_buffer; 2394 char *dma_buffer;
2395 int size; 2395 int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2427 if (!remaining) 2427 if (!remaining)
2428 break; 2428 break;
2429 2429
2430 size = bv->bv_len; 2430 size = bv.bv_len;
2431 SUPBOUND(size, remaining); 2431 SUPBOUND(size, remaining);
2432 2432
2433 buffer = page_address(bv->bv_page) + bv->bv_offset; 2433 buffer = page_address(bv.bv_page) + bv.bv_offset;
2434 if (dma_buffer + size > 2434 if (dma_buffer + size >
2435 floppy_track_buffer + (max_buffer_sectors << 10) || 2435 floppy_track_buffer + (max_buffer_sectors << 10) ||
2436 dma_buffer < floppy_track_buffer) { 2436 dma_buffer < floppy_track_buffer) {
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
3775 bio_vec.bv_len = size; 3775 bio_vec.bv_len = size;
3776 bio_vec.bv_offset = 0; 3776 bio_vec.bv_offset = 0;
3777 bio.bi_vcnt = 1; 3777 bio.bi_vcnt = 1;
3778 bio.bi_size = size; 3778 bio.bi_iter.bi_size = size;
3779 bio.bi_bdev = bdev; 3779 bio.bi_bdev = bdev;
3780 bio.bi_sector = 0; 3780 bio.bi_iter.bi_sector = 0;
3781 bio.bi_flags = (1 << BIO_QUIET); 3781 bio.bi_flags = (1 << BIO_QUIET);
3782 init_completion(&complete); 3782 init_completion(&complete);
3783 bio.bi_private = &complete; 3783 bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..33fde3a39759 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
288{ 288{
289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
290 struct page *page); 290 struct page *page);
291 struct bio_vec *bvec; 291 struct bio_vec bvec;
292 struct bvec_iter iter;
292 struct page *page = NULL; 293 struct page *page = NULL;
293 int i, ret = 0; 294 int ret = 0;
294 295
295 if (lo->transfer != transfer_none) { 296 if (lo->transfer != transfer_none) {
296 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 297 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
302 do_lo_send = do_lo_send_direct_write; 303 do_lo_send = do_lo_send_direct_write;
303 } 304 }
304 305
305 bio_for_each_segment(bvec, bio, i) { 306 bio_for_each_segment(bvec, bio, iter) {
306 ret = do_lo_send(lo, bvec, pos, page); 307 ret = do_lo_send(lo, &bvec, pos, page);
307 if (ret < 0) 308 if (ret < 0)
308 break; 309 break;
309 pos += bvec->bv_len; 310 pos += bvec.bv_len;
310 } 311 }
311 if (page) { 312 if (page) {
312 kunmap(page); 313 kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
392static int 393static int
393lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 394lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
394{ 395{
395 struct bio_vec *bvec; 396 struct bio_vec bvec;
397 struct bvec_iter iter;
396 ssize_t s; 398 ssize_t s;
397 int i;
398 399
399 bio_for_each_segment(bvec, bio, i) { 400 bio_for_each_segment(bvec, bio, iter) {
400 s = do_lo_receive(lo, bvec, bsize, pos); 401 s = do_lo_receive(lo, &bvec, bsize, pos);
401 if (s < 0) 402 if (s < 0)
402 return s; 403 return s;
403 404
404 if (s != bvec->bv_len) { 405 if (s != bvec.bv_len) {
405 zero_fill_bio(bio); 406 zero_fill_bio(bio);
406 break; 407 break;
407 } 408 }
408 pos += bvec->bv_len; 409 pos += bvec.bv_len;
409 } 410 }
410 return 0; 411 return 0;
411} 412}
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
415 loff_t pos; 416 loff_t pos;
416 int ret; 417 int ret;
417 418
418 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 419 pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
419 420
420 if (bio_rw(bio) == WRITE) { 421 if (bio_rw(bio) == WRITE) {
421 struct file *file = lo->lo_backing_file; 422 struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
444 goto out; 445 goto out;
445 } 446 }
446 ret = file->f_op->fallocate(file, mode, pos, 447 ret = file->f_op->fallocate(file, mode, pos,
447 bio->bi_size); 448 bio->bi_iter.bi_size);
448 if (unlikely(ret && ret != -EINVAL && 449 if (unlikely(ret && ret != -EINVAL &&
449 ret != -EOPNOTSUPP)) 450 ret != -EOPNOTSUPP))
450 ret = -EIO; 451 ret = -EIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..52b2f2a71470 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3962{ 3962{
3963 struct driver_data *dd = queue->queuedata; 3963 struct driver_data *dd = queue->queuedata;
3964 struct scatterlist *sg; 3964 struct scatterlist *sg;
3965 struct bio_vec *bvec; 3965 struct bio_vec bvec;
3966 int i, nents = 0; 3966 struct bvec_iter iter;
3967 int nents = 0;
3967 int tag = 0, unaligned = 0; 3968 int tag = 0, unaligned = 0;
3968 3969
3969 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3970 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3993 } 3994 }
3994 3995
3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3996 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3997 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
3997 bio_sectors(bio))); 3998 bio_sectors(bio)));
3998 return; 3999 return;
3999 } 4000 }
@@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4006 4007
4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4008 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
4008 dd->unal_qdepth) { 4009 dd->unal_qdepth) {
4009 if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4010 if (bio->bi_iter.bi_sector % 8 != 0)
4011 /* Unaligned on 4k boundaries */
4010 unaligned = 1; 4012 unaligned = 1;
4011 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4013 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
4012 unaligned = 1; 4014 unaligned = 1;
@@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4025 } 4027 }
4026 4028
4027 /* Create the scatter list for this bio. */ 4029 /* Create the scatter list for this bio. */
4028 bio_for_each_segment(bvec, bio, i) { 4030 bio_for_each_segment(bvec, bio, iter) {
4029 sg_set_page(&sg[nents], 4031 sg_set_page(&sg[nents],
4030 bvec->bv_page, 4032 bvec.bv_page,
4031 bvec->bv_len, 4033 bvec.bv_len,
4032 bvec->bv_offset); 4034 bvec.bv_offset);
4033 nents++; 4035 nents++;
4034 } 4036 }
4035 4037
4036 /* Issue the read/write. */ 4038 /* Issue the read/write. */
4037 mtip_hw_submit_io(dd, 4039 mtip_hw_submit_io(dd,
4038 bio->bi_sector, 4040 bio->bi_iter.bi_sector,
4039 bio_sectors(bio), 4041 bio_sectors(bio),
4040 nents, 4042 nents,
4041 tag, 4043 tag,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
271 271
272 if (nbd_cmd(req) == NBD_CMD_WRITE) { 272 if (nbd_cmd(req) == NBD_CMD_WRITE) {
273 struct req_iterator iter; 273 struct req_iterator iter;
274 struct bio_vec *bvec; 274 struct bio_vec bvec;
275 /* 275 /*
276 * we are really probing at internals to determine 276 * we are really probing at internals to determine
277 * whether to set MSG_MORE or not... 277 * whether to set MSG_MORE or not...
278 */ 278 */
279 rq_for_each_segment(bvec, req, iter) { 279 rq_for_each_segment(bvec, req, iter) {
280 flags = 0; 280 flags = 0;
281 if (!rq_iter_last(req, iter)) 281 if (!rq_iter_last(bvec, iter))
282 flags = MSG_MORE; 282 flags = MSG_MORE;
283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
284 nbd->disk->disk_name, req, bvec->bv_len); 284 nbd->disk->disk_name, req, bvec.bv_len);
285 result = sock_send_bvec(nbd, bvec, flags); 285 result = sock_send_bvec(nbd, &bvec, flags);
286 if (result <= 0) { 286 if (result <= 0) {
287 dev_err(disk_to_dev(nbd->disk), 287 dev_err(disk_to_dev(nbd->disk),
288 "Send data failed (result %d)\n", 288 "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
378 nbd->disk->disk_name, req); 378 nbd->disk->disk_name, req);
379 if (nbd_cmd(req) == NBD_CMD_READ) { 379 if (nbd_cmd(req) == NBD_CMD_READ) {
380 struct req_iterator iter; 380 struct req_iterator iter;
381 struct bio_vec *bvec; 381 struct bio_vec bvec;
382 382
383 rq_for_each_segment(bvec, req, iter) { 383 rq_for_each_segment(bvec, req, iter) {
384 result = sock_recv_bvec(nbd, bvec); 384 result = sock_recv_bvec(nbd, &bvec);
385 if (result <= 0) { 385 if (result <= 0) {
386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
387 result); 387 result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
389 return req; 389 return req;
390 } 390 }
391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
392 nbd->disk->disk_name, req, bvec->bv_len); 392 nbd->disk->disk_name, req, bvec.bv_len);
393 } 393 }
394 } 394 }
395 return req; 395 return req;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..1f14ac403945 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
441 return total_len; 441 return total_len;
442} 442}
443 443
444struct nvme_bio_pair {
445 struct bio b1, b2, *parent;
446 struct bio_vec *bv1, *bv2;
447 int err;
448 atomic_t cnt;
449};
450
451static void nvme_bio_pair_endio(struct bio *bio, int err)
452{
453 struct nvme_bio_pair *bp = bio->bi_private;
454
455 if (err)
456 bp->err = err;
457
458 if (atomic_dec_and_test(&bp->cnt)) {
459 bio_endio(bp->parent, bp->err);
460 kfree(bp->bv1);
461 kfree(bp->bv2);
462 kfree(bp);
463 }
464}
465
466static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
467 int len, int offset)
468{
469 struct nvme_bio_pair *bp;
470
471 BUG_ON(len > bio->bi_size);
472 BUG_ON(idx > bio->bi_vcnt);
473
474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
475 if (!bp)
476 return NULL;
477 bp->err = 0;
478
479 bp->b1 = *bio;
480 bp->b2 = *bio;
481
482 bp->b1.bi_size = len;
483 bp->b2.bi_size -= len;
484 bp->b1.bi_vcnt = idx;
485 bp->b2.bi_idx = idx;
486 bp->b2.bi_sector += len >> 9;
487
488 if (offset) {
489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
490 GFP_ATOMIC);
491 if (!bp->bv1)
492 goto split_fail_1;
493
494 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
495 GFP_ATOMIC);
496 if (!bp->bv2)
497 goto split_fail_2;
498
499 memcpy(bp->bv1, bio->bi_io_vec,
500 bio->bi_max_vecs * sizeof(struct bio_vec));
501 memcpy(bp->bv2, bio->bi_io_vec,
502 bio->bi_max_vecs * sizeof(struct bio_vec));
503
504 bp->b1.bi_io_vec = bp->bv1;
505 bp->b2.bi_io_vec = bp->bv2;
506 bp->b2.bi_io_vec[idx].bv_offset += offset;
507 bp->b2.bi_io_vec[idx].bv_len -= offset;
508 bp->b1.bi_io_vec[idx].bv_len = offset;
509 bp->b1.bi_vcnt++;
510 } else
511 bp->bv1 = bp->bv2 = NULL;
512
513 bp->b1.bi_private = bp;
514 bp->b2.bi_private = bp;
515
516 bp->b1.bi_end_io = nvme_bio_pair_endio;
517 bp->b2.bi_end_io = nvme_bio_pair_endio;
518
519 bp->parent = bio;
520 atomic_set(&bp->cnt, 2);
521
522 return bp;
523
524 split_fail_2:
525 kfree(bp->bv1);
526 split_fail_1:
527 kfree(bp);
528 return NULL;
529}
530
531static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 444static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
532 int idx, int len, int offset) 445 int len)
533{ 446{
534 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); 447 struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
535 if (!bp) 448 if (!split)
536 return -ENOMEM; 449 return -ENOMEM;
537 450
451 bio_chain(split, bio);
452
538 if (bio_list_empty(&nvmeq->sq_cong)) 453 if (bio_list_empty(&nvmeq->sq_cong))
539 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 454 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
540 bio_list_add(&nvmeq->sq_cong, &bp->b1); 455 bio_list_add(&nvmeq->sq_cong, split);
541 bio_list_add(&nvmeq->sq_cong, &bp->b2); 456 bio_list_add(&nvmeq->sq_cong, bio);
542 457
543 return 0; 458 return 0;
544} 459}
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
550static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 465static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
551 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 466 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
552{ 467{
553 struct bio_vec *bvec, *bvprv = NULL; 468 struct bio_vec bvec, bvprv;
469 struct bvec_iter iter;
554 struct scatterlist *sg = NULL; 470 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 471 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
472 int first = 1;
556 473
557 if (nvmeq->dev->stripe_size) 474 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 475 split_len = nvmeq->dev->stripe_size -
559 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 476 ((bio->bi_iter.bi_sector << 9) &
477 (nvmeq->dev->stripe_size - 1));
560 478
561 sg_init_table(iod->sg, psegs); 479 sg_init_table(iod->sg, psegs);
562 bio_for_each_segment(bvec, bio, i) { 480 bio_for_each_segment(bvec, bio, iter) {
563 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 481 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
564 sg->length += bvec->bv_len; 482 sg->length += bvec.bv_len;
565 } else { 483 } else {
566 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 484 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
567 return nvme_split_and_submit(bio, nvmeq, i, 485 return nvme_split_and_submit(bio, nvmeq,
568 length, 0); 486 length);
569 487
570 sg = sg ? sg + 1 : iod->sg; 488 sg = sg ? sg + 1 : iod->sg;
571 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 489 sg_set_page(sg, bvec.bv_page,
572 bvec->bv_offset); 490 bvec.bv_len, bvec.bv_offset);
573 nsegs++; 491 nsegs++;
574 } 492 }
575 493
576 if (split_len - length < bvec->bv_len) 494 if (split_len - length < bvec.bv_len)
577 return nvme_split_and_submit(bio, nvmeq, i, split_len, 495 return nvme_split_and_submit(bio, nvmeq, split_len);
578 split_len - length); 496 length += bvec.bv_len;
579 length += bvec->bv_len;
580 bvprv = bvec; 497 bvprv = bvec;
498 first = 0;
581 } 499 }
582 iod->nents = nsegs; 500 iod->nents = nsegs;
583 sg_mark_end(sg); 501 sg_mark_end(sg);
584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 502 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
585 return -ENOMEM; 503 return -ENOMEM;
586 504
587 BUG_ON(length != bio->bi_size); 505 BUG_ON(length != bio->bi_iter.bi_size);
588 return length; 506 return length;
589} 507}
590 508
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 iod->npages = 0; 526 iod->npages = 0;
609 527
610 range->cattr = cpu_to_le32(0); 528 range->cattr = cpu_to_le32(0);
611 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 529 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
612 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 530 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
613 531
614 memset(cmnd, 0, sizeof(*cmnd)); 532 memset(cmnd, 0, sizeof(*cmnd));
615 cmnd->dsm.opcode = nvme_cmd_dsm; 533 cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
674 } 592 }
675 593
676 result = -ENOMEM; 594 result = -ENOMEM;
677 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 595 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
678 if (!iod) 596 if (!iod)
679 goto nomem; 597 goto nomem;
680 iod->private = bio; 598 iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 641 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 642 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
725 GFP_ATOMIC); 643 GFP_ATOMIC);
726 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 644 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 645 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
728 cmnd->rw.control = cpu_to_le16(control); 646 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 647 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..3dda09a5ec41 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
651 651
652 for (;;) { 652 for (;;) {
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector) 654 if (s <= tmp->bio->bi_iter.bi_sector)
655 next = n->rb_left; 655 next = n->rb_left;
656 else 656 else
657 next = n->rb_right; 657 next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
660 n = next; 660 n = next;
661 } 661 }
662 662
663 if (s > tmp->bio->bi_sector) { 663 if (s > tmp->bio->bi_iter.bi_sector) {
664 tmp = pkt_rbtree_next(tmp); 664 tmp = pkt_rbtree_next(tmp);
665 if (!tmp) 665 if (!tmp)
666 return NULL; 666 return NULL;
667 } 667 }
668 BUG_ON(s > tmp->bio->bi_sector); 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
669 return tmp; 669 return tmp;
670} 670}
671 671
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
676{ 676{
677 struct rb_node **p = &pd->bio_queue.rb_node; 677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL; 678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector; 679 sector_t s = node->bio->bi_iter.bi_sector;
680 struct pkt_rb_node *tmp; 680 struct pkt_rb_node *tmp;
681 681
682 while (*p) { 682 while (*p) {
683 parent = *p; 683 parent = *p;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector) 685 if (s < tmp->bio->bi_iter.bi_sector)
686 p = &(*p)->rb_left; 686 p = &(*p)->rb_left;
687 else 687 else
688 p = &(*p)->rb_right; 688 p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
857 spin_lock(&pd->iosched.lock); 857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue); 858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock); 859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 if (bio && (bio->bi_iter.bi_sector ==
861 pd->iosched.last_write))
861 need_write_seek = 0; 862 need_write_seek = 0;
862 if (need_write_seek && reads_queued) { 863 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
888 continue; 889 continue;
889 890
890 if (bio_data_dir(bio) == READ) 891 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10; 892 pd->iosched.successive_reads +=
893 bio->bi_iter.bi_size >> 10;
892 else { 894 else {
893 pd->iosched.successive_reads = 0; 895 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio); 896 pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
978 980
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 981 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector, 982 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err); 983 (unsigned long long)bio->bi_iter.bi_sector, err);
982 984
983 if (err) 985 if (err)
984 atomic_inc(&pkt->io_errors); 986 atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1026 memset(written, 0, sizeof(written)); 1028 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock); 1029 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) { 1030 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1031 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 int num_frames = bio->bi_size / CD_FRAMESIZE; 1032 (CD_FRAMESIZE >> 9);
1033 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0); 1035 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames); 1036 BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1053 1056
1054 bio = pkt->r_bios[f]; 1057 bio = pkt->r_bios[f];
1055 bio_reset(bio); 1058 bio_reset(bio);
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1059 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev; 1060 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read; 1061 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt; 1062 bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
1150 bio_reset(pkt->bio); 1153 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev; 1154 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE; 1155 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector; 1156 pkt->bio->bi_iter.bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1157 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames; 1158 pkt->bio->bi_vcnt = pkt->frames;
1156 1159
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1160 pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
1213 node = first_node; 1216 node = first_node;
1214 while (node) { 1217 while (node) {
1215 bio = node->bio; 1218 bio = node->bio;
1216 zone = get_zone(bio->bi_sector, pd); 1219 zone = get_zone(bio->bi_iter.bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1220 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) { 1221 if (p->sector == zone) {
1219 bio = NULL; 1222 bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1255 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1256 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1254 bio = node->bio; 1257 bio = node->bio;
1255 pkt_dbg(2, pd, "found zone=%llx\n", 1258 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1256 (unsigned long long)get_zone(bio->bi_sector, pd)); 1259 get_zone(bio->bi_iter.bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone) 1260 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1258 break; 1261 break;
1259 pkt_rbtree_erase(pd, node); 1262 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock); 1263 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio); 1264 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1265 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock); 1266 spin_unlock(&pkt->lock);
1264 } 1267 }
1265 /* check write congestion marks, and if bio_queue_size is 1268 /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1296 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1294 1297
1295 bio_reset(pkt->w_bio); 1298 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector; 1299 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev; 1300 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1301 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt; 1302 pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
2335 pkt_bio_finished(pd); 2338 pkt_bio_finished(pd);
2336} 2339}
2337 2340
2338static void pkt_make_request(struct request_queue *q, struct bio *bio) 2341static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2339{ 2342{
2340 struct pktcdvd_device *pd; 2343 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2341 char b[BDEVNAME_SIZE]; 2344 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2345
2346 psd->pd = pd;
2347 psd->bio = bio;
2348 cloned_bio->bi_bdev = pd->bdev;
2349 cloned_bio->bi_private = psd;
2350 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2351 pd->stats.secs_r += bio_sectors(bio);
2352 pkt_queue_bio(pd, cloned_bio);
2353}
2354
2355static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2356{
2357 struct pktcdvd_device *pd = q->queuedata;
2342 sector_t zone; 2358 sector_t zone;
2343 struct packet_data *pkt; 2359 struct packet_data *pkt;
2344 int was_empty, blocked_bio; 2360 int was_empty, blocked_bio;
2345 struct pkt_rb_node *node; 2361 struct pkt_rb_node *node;
2346 2362
2347 pd = q->queuedata; 2363 zone = get_zone(bio->bi_iter.bi_sector, pd);
2348 if (!pd) {
2349 pr_err("%s incorrect request queue\n",
2350 bdevname(bio->bi_bdev, b));
2351 goto end_io;
2352 }
2353
2354 /*
2355 * Clone READ bios so we can have our own bi_end_io callback.
2356 */
2357 if (bio_data_dir(bio) == READ) {
2358 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2359 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2360
2361 psd->pd = pd;
2362 psd->bio = bio;
2363 cloned_bio->bi_bdev = pd->bdev;
2364 cloned_bio->bi_private = psd;
2365 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2366 pd->stats.secs_r += bio_sectors(bio);
2367 pkt_queue_bio(pd, cloned_bio);
2368 return;
2369 }
2370
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector);
2374 goto end_io;
2375 }
2376
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n");
2379 goto end_io;
2380 }
2381
2382 blk_queue_bounce(q, &bio);
2383
2384 zone = get_zone(bio->bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector,
2387 (unsigned long long)bio_end_sector(bio));
2388
2389 /* Check if we have to split the bio */
2390 {
2391 struct bio_pair *bp;
2392 sector_t last_zone;
2393 int first_sectors;
2394
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector;
2399 bp = bio_split(bio, first_sectors);
2400 BUG_ON(!bp);
2401 pkt_make_request(q, &bp->bio1);
2402 pkt_make_request(q, &bp->bio2);
2403 bio_pair_release(bp);
2404 return;
2405 }
2406 }
2407 2364
2408 /* 2365 /*
2409 * If we find a matching packet in state WAITING or READ_WAIT, we can 2366 * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2417 if ((pkt->state == PACKET_WAITING_STATE) || 2374 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2375 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio); 2376 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2377 pkt->write_size +=
2378 bio->bi_iter.bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) && 2379 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) { 2380 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm); 2381 atomic_inc(&pkt->run_sm);
@@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2476 */ 2434 */
2477 wake_up(&pd->wqueue); 2435 wake_up(&pd->wqueue);
2478 } 2436 }
2437}
2438
2439static void pkt_make_request(struct request_queue *q, struct bio *bio)
2440{
2441 struct pktcdvd_device *pd;
2442 char b[BDEVNAME_SIZE];
2443 struct bio *split;
2444
2445 pd = q->queuedata;
2446 if (!pd) {
2447 pr_err("%s incorrect request queue\n",
2448 bdevname(bio->bi_bdev, b));
2449 goto end_io;
2450 }
2451
2452 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2453 (unsigned long long)bio->bi_iter.bi_sector,
2454 (unsigned long long)bio_end_sector(bio));
2455
2456 /*
2457 * Clone READ bios so we can have our own bi_end_io callback.
2458 */
2459 if (bio_data_dir(bio) == READ) {
2460 pkt_make_request_read(pd, bio);
2461 return;
2462 }
2463
2464 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2465 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2466 (unsigned long long)bio->bi_iter.bi_sector);
2467 goto end_io;
2468 }
2469
2470 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2471 pkt_err(pd, "wrong bio size\n");
2472 goto end_io;
2473 }
2474
2475 blk_queue_bounce(q, &bio);
2476
2477 do {
2478 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2479 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2480
2481 if (last_zone != zone) {
2482 BUG_ON(last_zone != zone + pd->settings.size);
2483
2484 split = bio_split(bio, last_zone -
2485 bio->bi_iter.bi_sector,
2486 GFP_NOIO, fs_bio_set);
2487 bio_chain(split, bio);
2488 } else {
2489 split = bio;
2490 }
2491
2492 pkt_make_request_write(q, split);
2493 } while (split != bio);
2494
2479 return; 2495 return;
2480end_io: 2496end_io:
2481 bio_io_error(bio); 2497 bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
94{ 94{
95 unsigned int offset = 0; 95 unsigned int offset = 0;
96 struct req_iterator iter; 96 struct req_iterator iter;
97 struct bio_vec *bvec; 97 struct bio_vec bvec;
98 unsigned int i = 0; 98 unsigned int i = 0;
99 size_t size; 99 size_t size;
100 void *buf; 100 void *buf;
101 101
102 rq_for_each_segment(bvec, req, iter) { 102 rq_for_each_segment(bvec, req, iter) {
103 unsigned long flags; 103 unsigned long flags;
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 __func__, __LINE__, i, bio_sectors(iter.bio),
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 iter.bio->bi_iter.bi_sector);
107 bio_sectors(iter.bio), iter.bio->bi_sector);
108 107
109 size = bvec->bv_len; 108 size = bvec.bv_len;
110 buf = bvec_kmap_irq(bvec, &flags); 109 buf = bvec_kmap_irq(&bvec, &flags);
111 if (gather) 110 if (gather)
112 memcpy(dev->bounce_buf+offset, buf, size); 111 memcpy(dev->bounce_buf+offset, buf, size);
113 else 112 else
114 memcpy(buf, dev->bounce_buf+offset, size); 113 memcpy(buf, dev->bounce_buf+offset, size);
115 offset += size; 114 offset += size;
116 flush_kernel_dcache_page(bvec->bv_page); 115 flush_kernel_dcache_page(bvec.bv_page);
117 bvec_kunmap_irq(buf, &flags); 116 bvec_kunmap_irq(buf, &flags);
118 i++; 117 i++;
119 } 118 }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
130 129
131#ifdef DEBUG 130#ifdef DEBUG
132 unsigned int n = 0; 131 unsigned int n = 0;
133 struct bio_vec *bv; 132 struct bio_vec bv;
134 struct req_iterator iter; 133 struct req_iterator iter;
135 134
136 rq_for_each_segment(bv, req, iter) 135 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..ef45cfb98fd2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
554 int write = bio_data_dir(bio) == WRITE; 554 int write = bio_data_dir(bio) == WRITE;
555 const char *op = write ? "write" : "read"; 555 const char *op = write ? "write" : "read";
556 loff_t offset = bio->bi_sector << 9; 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 int error = 0; 557 int error = 0;
558 struct bio_vec *bvec; 558 struct bio_vec bvec;
559 unsigned int i; 559 struct bvec_iter iter;
560 struct bio *next; 560 struct bio *next;
561 561
562 bio_for_each_segment(bvec, bio, i) { 562 bio_for_each_segment(bvec, bio, iter) {
563 /* PS3 is ppc64, so we don't handle highmem */ 563 /* PS3 is ppc64, so we don't handle highmem */
564 char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; 564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
565 size_t len = bvec->bv_len, retlen; 565 size_t len = bvec.bv_len, retlen;
566 566
567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, 567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
568 len, offset); 568 len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index 16cab6635163..b365e0dfccb6 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1156,23 +1156,23 @@ static void bio_chain_put(struct bio *chain)
1156 */ 1156 */
1157static void zero_bio_chain(struct bio *chain, int start_ofs) 1157static void zero_bio_chain(struct bio *chain, int start_ofs)
1158{ 1158{
1159 struct bio_vec *bv; 1159 struct bio_vec bv;
1160 struct bvec_iter iter;
1160 unsigned long flags; 1161 unsigned long flags;
1161 void *buf; 1162 void *buf;
1162 int i;
1163 int pos = 0; 1163 int pos = 0;
1164 1164
1165 while (chain) { 1165 while (chain) {
1166 bio_for_each_segment(bv, chain, i) { 1166 bio_for_each_segment(bv, chain, iter) {
1167 if (pos + bv->bv_len > start_ofs) { 1167 if (pos + bv.bv_len > start_ofs) {
1168 int remainder = max(start_ofs - pos, 0); 1168 int remainder = max(start_ofs - pos, 0);
1169 buf = bvec_kmap_irq(bv, &flags); 1169 buf = bvec_kmap_irq(&bv, &flags);
1170 memset(buf + remainder, 0, 1170 memset(buf + remainder, 0,
1171 bv->bv_len - remainder); 1171 bv.bv_len - remainder);
1172 flush_dcache_page(bv->bv_page); 1172 flush_dcache_page(bv.bv_page);
1173 bvec_kunmap_irq(buf, &flags); 1173 bvec_kunmap_irq(buf, &flags);
1174 } 1174 }
1175 pos += bv->bv_len; 1175 pos += bv.bv_len;
1176 } 1176 }
1177 1177
1178 chain = chain->bi_next; 1178 chain = chain->bi_next;
@@ -1220,74 +1220,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1220 unsigned int len, 1220 unsigned int len,
1221 gfp_t gfpmask) 1221 gfp_t gfpmask)
1222{ 1222{
1223 struct bio_vec *bv;
1224 unsigned int resid;
1225 unsigned short idx;
1226 unsigned int voff;
1227 unsigned short end_idx;
1228 unsigned short vcnt;
1229 struct bio *bio; 1223 struct bio *bio;
1230 1224
1231 /* Handle the easy case for the caller */ 1225 bio = bio_clone(bio_src, gfpmask);
1232
1233 if (!offset && len == bio_src->bi_size)
1234 return bio_clone(bio_src, gfpmask);
1235
1236 if (WARN_ON_ONCE(!len))
1237 return NULL;
1238 if (WARN_ON_ONCE(len > bio_src->bi_size))
1239 return NULL;
1240 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1241 return NULL;
1242
1243 /* Find first affected segment... */
1244
1245 resid = offset;
1246 bio_for_each_segment(bv, bio_src, idx) {
1247 if (resid < bv->bv_len)
1248 break;
1249 resid -= bv->bv_len;
1250 }
1251 voff = resid;
1252
1253 /* ...and the last affected segment */
1254
1255 resid += len;
1256 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1257 if (resid <= bv->bv_len)
1258 break;
1259 resid -= bv->bv_len;
1260 }
1261 vcnt = end_idx - idx + 1;
1262
1263 /* Build the clone */
1264
1265 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1266 if (!bio) 1226 if (!bio)
1267 return NULL; /* ENOMEM */ 1227 return NULL; /* ENOMEM */
1268 1228
1269 bio->bi_bdev = bio_src->bi_bdev; 1229 bio_advance(bio, offset);
1270 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1230 bio->bi_iter.bi_size = len;
1271 bio->bi_rw = bio_src->bi_rw;
1272 bio->bi_flags |= 1 << BIO_CLONED;
1273
1274 /*
1275 * Copy over our part of the bio_vec, then update the first
1276 * and last (or only) entries.
1277 */
1278 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1279 vcnt * sizeof (struct bio_vec));
1280 bio->bi_io_vec[0].bv_offset += voff;
1281 if (vcnt > 1) {
1282 bio->bi_io_vec[0].bv_len -= voff;
1283 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1284 } else {
1285 bio->bi_io_vec[0].bv_len = len;
1286 }
1287
1288 bio->bi_vcnt = vcnt;
1289 bio->bi_size = len;
1290 bio->bi_idx = 0;
1291 1231
1292 return bio; 1232 return bio;
1293} 1233}
@@ -1318,7 +1258,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1318 1258
1319 /* Build up a chain of clone bios up to the limit */ 1259 /* Build up a chain of clone bios up to the limit */
1320 1260
1321 if (!bi || off >= bi->bi_size || !len) 1261 if (!bi || off >= bi->bi_iter.bi_size || !len)
1322 return NULL; /* Nothing to clone */ 1262 return NULL; /* Nothing to clone */
1323 1263
1324 end = &chain; 1264 end = &chain;
@@ -1330,7 +1270,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1330 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1270 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1331 goto out_err; /* EINVAL; ran out of bio's */ 1271 goto out_err; /* EINVAL; ran out of bio's */
1332 } 1272 }
1333 bi_size = min_t(unsigned int, bi->bi_size - off, len); 1273 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1334 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1274 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1335 if (!bio) 1275 if (!bio)
1336 goto out_err; /* ENOMEM */ 1276 goto out_err; /* ENOMEM */
@@ -1339,7 +1279,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1339 end = &bio->bi_next; 1279 end = &bio->bi_next;
1340 1280
1341 off += bi_size; 1281 off += bi_size;
1342 if (off == bi->bi_size) { 1282 if (off == bi->bi_iter.bi_size) {
1343 bi = bi->bi_next; 1283 bi = bi->bi_next;
1344 off = 0; 1284 off = 0;
1345 } 1285 }
@@ -2227,7 +2167,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2227 2167
2228 if (type == OBJ_REQUEST_BIO) { 2168 if (type == OBJ_REQUEST_BIO) {
2229 bio_list = data_desc; 2169 bio_list = data_desc;
2230 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2170 rbd_assert(img_offset ==
2171 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2231 } else { 2172 } else {
2232 rbd_assert(type == OBJ_REQUEST_PAGES); 2173 rbd_assert(type == OBJ_REQUEST_PAGES);
2233 pages = data_desc; 2174 pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
174 if (!card) 174 if (!card)
175 goto req_err; 175 goto req_err;
176 176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 if (bio_end_sector(bio) > get_capacity(card->gendisk))
178 goto req_err; 178 goto req_err;
179 179
180 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
187 goto req_err; 187 goto req_err;
188 } 188 }
189 189
190 if (bio->bi_size == 0) { 190 if (bio->bi_iter.bi_size == 0) {
191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
192 goto req_err; 192 goto req_err;
193 } 193 }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
208 208
209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
211 (u64)bio->bi_sector << 9, bio->bi_size); 211 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
212 212
213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
214 bio_dma_done_cb, bio_meta); 214 bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
684 void *cb_data) 684 void *cb_data)
685{ 685{
686 struct list_head dma_list[RSXX_MAX_TARGETS]; 686 struct list_head dma_list[RSXX_MAX_TARGETS];
687 struct bio_vec *bvec; 687 struct bio_vec bvec;
688 struct bvec_iter iter;
688 unsigned long long addr8; 689 unsigned long long addr8;
689 unsigned int laddr; 690 unsigned int laddr;
690 unsigned int bv_len; 691 unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
696 int st; 697 int st;
697 int i; 698 int i;
698 699
699 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
700 atomic_set(n_dmas, 0); 701 atomic_set(n_dmas, 0);
701 702
702 for (i = 0; i < card->n_targets; i++) { 703 for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
705 } 706 }
706 707
707 if (bio->bi_rw & REQ_DISCARD) { 708 if (bio->bi_rw & REQ_DISCARD) {
708 bv_len = bio->bi_size; 709 bv_len = bio->bi_iter.bi_size;
709 710
710 while (bv_len > 0) { 711 while (bv_len > 0) {
711 tgt = rsxx_get_dma_tgt(card, addr8); 712 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
722 bv_len -= RSXX_HW_BLK_SIZE; 723 bv_len -= RSXX_HW_BLK_SIZE;
723 } 724 }
724 } else { 725 } else {
725 bio_for_each_segment(bvec, bio, i) { 726 bio_for_each_segment(bvec, bio, iter) {
726 bv_len = bvec->bv_len; 727 bv_len = bvec.bv_len;
727 bv_off = bvec->bv_offset; 728 bv_off = bvec.bv_offset;
728 729
729 while (bv_len > 0) { 730 while (bv_len > 0) {
730 tgt = rsxx_get_dma_tgt(card, addr8); 731 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
736 st = rsxx_queue_dma(card, &dma_list[tgt], 737 st = rsxx_queue_dma(card, &dma_list[tgt],
737 bio_data_dir(bio), 738 bio_data_dir(bio),
738 dma_off, dma_len, 739 dma_off, dma_len,
739 laddr, bvec->bv_page, 740 laddr, bvec.bv_page,
740 bv_off, cb, cb_data); 741 bv_off, cb, cb_data);
741 if (st) 742 if (st)
742 goto bvec_err; 743 goto bvec_err;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
108 * have been written 108 * have been written
109 */ 109 */
110 struct bio *bio, *currentbio, **biotail; 110 struct bio *bio, *currentbio, **biotail;
111 int current_idx; 111 struct bvec_iter current_iter;
112 sector_t current_sector;
113 112
114 struct request_queue *queue; 113 struct request_queue *queue;
115 114
@@ -118,7 +117,7 @@ struct cardinfo {
118 struct mm_dma_desc *desc; 117 struct mm_dma_desc *desc;
119 int cnt, headcnt; 118 int cnt, headcnt;
120 struct bio *bio, **biotail; 119 struct bio *bio, **biotail;
121 int idx; 120 struct bvec_iter iter;
122 } mm_pages[2]; 121 } mm_pages[2];
123#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 122#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
124 123
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
344 dma_addr_t dma_handle; 343 dma_addr_t dma_handle;
345 int offset; 344 int offset;
346 struct bio *bio; 345 struct bio *bio;
347 struct bio_vec *vec; 346 struct bio_vec vec;
348 int idx;
349 int rw; 347 int rw;
350 int len;
351 348
352 bio = card->currentbio; 349 bio = card->currentbio;
353 if (!bio && card->bio) { 350 if (!bio && card->bio) {
354 card->currentbio = card->bio; 351 card->currentbio = card->bio;
355 card->current_idx = card->bio->bi_idx; 352 card->current_iter = card->bio->bi_iter;
356 card->current_sector = card->bio->bi_sector;
357 card->bio = card->bio->bi_next; 353 card->bio = card->bio->bi_next;
358 if (card->bio == NULL) 354 if (card->bio == NULL)
359 card->biotail = &card->bio; 355 card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
362 } 358 }
363 if (!bio) 359 if (!bio)
364 return 0; 360 return 0;
365 idx = card->current_idx;
366 361
367 rw = bio_rw(bio); 362 rw = bio_rw(bio);
368 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 363 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
369 return 0; 364 return 0;
370 365
371 vec = bio_iovec_idx(bio, idx); 366 vec = bio_iter_iovec(bio, card->current_iter);
372 len = vec->bv_len; 367
373 dma_handle = pci_map_page(card->dev, 368 dma_handle = pci_map_page(card->dev,
374 vec->bv_page, 369 vec.bv_page,
375 vec->bv_offset, 370 vec.bv_offset,
376 len, 371 vec.bv_len,
377 (rw == READ) ? 372 (rw == READ) ?
378 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 373 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
379 374
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
381 desc = &p->desc[p->cnt]; 376 desc = &p->desc[p->cnt];
382 p->cnt++; 377 p->cnt++;
383 if (p->bio == NULL) 378 if (p->bio == NULL)
384 p->idx = idx; 379 p->iter = card->current_iter;
385 if ((p->biotail) != &bio->bi_next) { 380 if ((p->biotail) != &bio->bi_next) {
386 *(p->biotail) = bio; 381 *(p->biotail) = bio;
387 p->biotail = &(bio->bi_next); 382 p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
391 desc->data_dma_handle = dma_handle; 386 desc->data_dma_handle = dma_handle;
392 387
393 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 388 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
394 desc->local_addr = cpu_to_le64(card->current_sector << 9); 389 desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
395 desc->transfer_size = cpu_to_le32(len); 390 desc->transfer_size = cpu_to_le32(vec.bv_len);
396 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); 391 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
397 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 392 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
398 desc->zero1 = desc->zero2 = 0; 393 desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
407 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 402 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
408 desc->sem_control_bits = desc->control_bits; 403 desc->sem_control_bits = desc->control_bits;
409 404
410 card->current_sector += (len >> 9); 405
411 idx++; 406 bio_advance_iter(bio, &card->current_iter, vec.bv_len);
412 card->current_idx = idx; 407 if (!card->current_iter.bi_size)
413 if (idx >= bio->bi_vcnt)
414 card->currentbio = NULL; 408 card->currentbio = NULL;
415 409
416 return 1; 410 return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
439 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 433 struct mm_dma_desc *desc = &page->desc[page->headcnt];
440 int control = le32_to_cpu(desc->sem_control_bits); 434 int control = le32_to_cpu(desc->sem_control_bits);
441 int last = 0; 435 int last = 0;
442 int idx; 436 struct bio_vec vec;
443 437
444 if (!(control & DMASCR_DMA_COMPLETE)) { 438 if (!(control & DMASCR_DMA_COMPLETE)) {
445 control = dma_status; 439 control = dma_status;
446 last = 1; 440 last = 1;
447 } 441 }
442
448 page->headcnt++; 443 page->headcnt++;
449 idx = page->idx; 444 vec = bio_iter_iovec(bio, page->iter);
450 page->idx++; 445 bio_advance_iter(bio, &page->iter, vec.bv_len);
451 if (page->idx >= bio->bi_vcnt) { 446
447 if (!page->iter.bi_size) {
452 page->bio = bio->bi_next; 448 page->bio = bio->bi_next;
453 if (page->bio) 449 if (page->bio)
454 page->idx = page->bio->bi_idx; 450 page->iter = page->bio->bi_iter;
455 } 451 }
456 452
457 pci_unmap_page(card->dev, desc->data_dma_handle, 453 pci_unmap_page(card->dev, desc->data_dma_handle,
458 bio_iovec_idx(bio, idx)->bv_len, 454 vec.bv_len,
459 (control & DMASCR_TRANSFER_READ) ? 455 (control & DMASCR_TRANSFER_READ) ?
460 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 456 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
461 if (control & DMASCR_HARD_ERROR) { 457 if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
532{ 528{
533 struct cardinfo *card = q->queuedata; 529 struct cardinfo *card = q->queuedata;
534 pr_debug("mm_make_request %llu %u\n", 530 pr_debug("mm_make_request %llu %u\n",
535 (unsigned long long)bio->bi_sector, bio->bi_size); 531 (unsigned long long)bio->bi_iter.bi_sector,
532 bio->bi_iter.bi_size);
536 533
537 spin_lock_irq(&card->lock); 534 spin_lock_irq(&card->lock);
538 *card->biotail = bio; 535 *card->biotail = bio;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1257 bio->bi_bdev = preq.bdev; 1257 bio->bi_bdev = preq.bdev;
1258 bio->bi_private = pending_req; 1258 bio->bi_private = pending_req;
1259 bio->bi_end_io = end_block_io_op; 1259 bio->bi_end_io = end_block_io_op;
1260 bio->bi_sector = preq.sector_number; 1260 bio->bi_iter.bi_sector = preq.sector_number;
1261 } 1261 }
1262 1262
1263 preq.sector_number += seg[i].nsec; 1263 preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index f9c43f91f03e..8dcfb54f1603 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
1547 for (i = 0; i < pending; i++) { 1547 for (i = 0; i < pending; i++) {
1548 offset = (i * segs * PAGE_SIZE) >> 9; 1548 offset = (i * segs * PAGE_SIZE) >> 9;
1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1550 (unsigned int)(bio->bi_size >> 9) - offset); 1550 (unsigned int)bio_sectors(bio) - offset);
1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1551 cloned_bio = bio_clone(bio, GFP_NOIO);
1552 BUG_ON(cloned_bio == NULL); 1552 BUG_ON(cloned_bio == NULL);
1553 bio_trim(cloned_bio, offset, size); 1553 bio_trim(cloned_bio, offset, size);