aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-11 18:44:27 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:47 -0500
commit4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch)
tree3aedcab02d2ad723a189d01934d1e94fec7a54e1 /drivers/block
parented9c47bebeeea4a468b07cfd745c690190f8014c (diff)
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Boaz Harrosh <bharrosh@panasas.com> Cc: Benny Halevy <bhalevy@tonian.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <chris.mason@fusionio.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Joern Engel <joern@logfs.org> Cc: Prasad Joshi <prasadjoshi.linux@gmail.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Ben Myers <bpm@sgi.com> Cc: xfs@oss.sgi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: Ian Campbell <Ian.Campbell@citrix.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchand@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Peng Tao <tao.peng@emc.com> Cc: Andy Adamson <andros@netapp.com> Cc: fanchaoting <fanchaoting@cn.fujitsu.com> Cc: Jie Liu <jeff.liu@oracle.com> Cc: Sunil Mushran <sunil.mushran@gmail.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Pankaj Kumar <pankaj.km@samsung.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nvme-core.c25
-rw-r--r--drivers/block/pktcdvd.c54
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c21
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c4
-rw-r--r--drivers/block/umem.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
20 files changed, 89 insertions, 81 deletions
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..877ba119b3f8 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -929,8 +929,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
929 memset(buf, 0, sizeof(*buf)); 929 memset(buf, 0, sizeof(*buf));
930 buf->rq = rq; 930 buf->rq = rq;
931 buf->bio = bio; 931 buf->bio = bio;
932 buf->resid = bio->bi_size; 932 buf->resid = bio->bi_iter.bi_size;
933 buf->sector = bio->bi_sector; 933 buf->sector = bio->bi_iter.bi_sector;
934 bio_pageinc(bio); 934 bio_pageinc(bio);
935 buf->bv = bio_iovec(bio); 935 buf->bv = bio_iovec(bio);
936 buf->bv_resid = buf->bv->bv_len; 936 buf->bv_resid = buf->bv->bv_len;
@@ -1152,7 +1152,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1152 do { 1152 do {
1153 bio = rq->bio; 1153 bio = rq->bio;
1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1156 1156
1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 if (!fastfail) 1158 if (!fastfail)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..66f5aaae15a2 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -333,13 +333,13 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
333 int i; 333 int i;
334 int err = -EIO; 334 int err = -EIO;
335 335
336 sector = bio->bi_sector; 336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 338 goto out;
339 339
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0; 341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_size); 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 343 goto out;
344 } 344 }
345 345
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
159 159
160 bio = bio_alloc_drbd(GFP_NOIO); 160 bio = bio_alloc_drbd(GFP_NOIO);
161 bio->bi_bdev = bdev->md_bdev; 161 bio->bi_bdev = bdev->md_bdev;
162 bio->bi_sector = sector; 162 bio->bi_iter.bi_sector = sector;
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1028 } else 1028 } else
1029 page = b->bm_pages[page_nr]; 1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev; 1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector; 1031 bio->bi_iter.bi_sector = on_disk_sector;
1032 /* bio_add_page of a single page to an empty bio will always succeed, 1032 /* bio_add_page of a single page to an empty bio will always succeed,
1033 * according to api. Do we want to assert that? */ 1033 * according to api. Do we want to assert that? */
1034 bio_add_page(bio, page, len, 0); 1034 bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..5326c22cdb9d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
1333 goto fail; 1333 goto fail;
1334 } 1334 }
1335 /* > peer_req->i.sector, unless this is the first bio */ 1335 /* > peer_req->i.sector, unless this is the first bio */
1336 bio->bi_sector = sector; 1336 bio->bi_iter.bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
1338 bio->bi_rw = rw; 1338 bio->bi_rw = rw;
1339 bio->bi_private = peer_req; 1339 bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
1353 dev_err(DEV, 1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, " 1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector); 1356 len, (uint64_t)bio->bi_iter.bi_sector);
1357 err = -ENOSPC; 1357 err = -ENOSPC;
1358 goto fail; 1358 goto fail;
1359 } 1359 }
@@ -1615,7 +1615,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1615 mdev->recv_cnt += data_size>>9; 1615 mdev->recv_cnt += data_size>>9;
1616 1616
1617 bio = req->master_bio; 1617 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector); 1618 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1619
1620 bio_for_each_segment(bvec, bio, i) { 1620 bio_for_each_segment(bvec, bio, i) {
1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
77 req->epoch = 0; 77 req->epoch = 0;
78 78
79 drbd_clear_interval(&req->i); 79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_sector; 80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_size; 81 req->i.size = bio_src->bi_iter.bi_size;
82 req->i.local = true; 82 req->i.local = true;
83 req->i.waiting = false; 83 req->i.waiting = false;
84 84
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1280 /* 1280 /*
1281 * what we "blindly" assume: 1281 * what we "blindly" assume:
1282 */ 1282 */
1283 D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
1284 1284
1285 inc_ap_bio(mdev); 1285 inc_ap_bio(mdev);
1286 __drbd_make_request(mdev, bio, start_time); 1286 __drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
269 269
270/* Short lived temporary struct on the stack. 270/* Short lived temporary struct on the stack.
271 * We could squirrel the error to be returned into 271 * We could squirrel the error to be returned into
272 * bio->bi_size, or similar. But that would be too ugly. */ 272 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273struct bio_and_error { 273struct bio_and_error {
274 struct bio *bio; 274 struct bio *bio;
275 int error; 275 int error;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..6a86fe7b730f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
3775 bio_vec.bv_len = size; 3775 bio_vec.bv_len = size;
3776 bio_vec.bv_offset = 0; 3776 bio_vec.bv_offset = 0;
3777 bio.bi_vcnt = 1; 3777 bio.bi_vcnt = 1;
3778 bio.bi_size = size; 3778 bio.bi_iter.bi_size = size;
3779 bio.bi_bdev = bdev; 3779 bio.bi_bdev = bdev;
3780 bio.bi_sector = 0; 3780 bio.bi_iter.bi_sector = 0;
3781 bio.bi_flags = (1 << BIO_QUIET); 3781 bio.bi_flags = (1 << BIO_QUIET);
3782 init_completion(&complete); 3782 init_completion(&complete);
3783 bio.bi_private = &complete; 3783 bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..f5e39989adde 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -415,7 +415,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
415 loff_t pos; 415 loff_t pos;
416 int ret; 416 int ret;
417 417
418 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 418 pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
419 419
420 if (bio_rw(bio) == WRITE) { 420 if (bio_rw(bio) == WRITE) {
421 struct file *file = lo->lo_backing_file; 421 struct file *file = lo->lo_backing_file;
@@ -444,7 +444,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
444 goto out; 444 goto out;
445 } 445 }
446 ret = file->f_op->fallocate(file, mode, pos, 446 ret = file->f_op->fallocate(file, mode, pos,
447 bio->bi_size); 447 bio->bi_iter.bi_size);
448 if (unlikely(ret && ret != -EINVAL && 448 if (unlikely(ret && ret != -EINVAL &&
449 ret != -EOPNOTSUPP)) 449 ret != -EOPNOTSUPP))
450 ret = -EIO; 450 ret = -EIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..69e9eb5a6b34 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3993,7 +3993,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3993 } 3993 }
3994 3994
3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
3997 bio_sectors(bio))); 3997 bio_sectors(bio)));
3998 return; 3998 return;
3999 } 3999 }
@@ -4006,7 +4006,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4006 4006
4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
4008 dd->unal_qdepth) { 4008 dd->unal_qdepth) {
4009 if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4009 if (bio->bi_iter.bi_sector % 8 != 0)
4010 /* Unaligned on 4k boundaries */
4010 unaligned = 1; 4011 unaligned = 1;
4011 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4012 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
4012 unaligned = 1; 4013 unaligned = 1;
@@ -4035,7 +4036,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4035 4036
4036 /* Issue the read/write. */ 4037 /* Issue the read/write. */
4037 mtip_hw_submit_io(dd, 4038 mtip_hw_submit_io(dd,
4038 bio->bi_sector, 4039 bio->bi_iter.bi_sector,
4039 bio_sectors(bio), 4040 bio_sectors(bio),
4040 nents, 4041 nents,
4041 tag, 4042 tag,
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..53d217381873 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -468,7 +468,7 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
468{ 468{
469 struct nvme_bio_pair *bp; 469 struct nvme_bio_pair *bp;
470 470
471 BUG_ON(len > bio->bi_size); 471 BUG_ON(len > bio->bi_iter.bi_size);
472 BUG_ON(idx > bio->bi_vcnt); 472 BUG_ON(idx > bio->bi_vcnt);
473 473
474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC); 474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
@@ -479,11 +479,11 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
479 bp->b1 = *bio; 479 bp->b1 = *bio;
480 bp->b2 = *bio; 480 bp->b2 = *bio;
481 481
482 bp->b1.bi_size = len; 482 bp->b1.bi_iter.bi_size = len;
483 bp->b2.bi_size -= len; 483 bp->b2.bi_iter.bi_size -= len;
484 bp->b1.bi_vcnt = idx; 484 bp->b1.bi_vcnt = idx;
485 bp->b2.bi_idx = idx; 485 bp->b2.bi_iter.bi_idx = idx;
486 bp->b2.bi_sector += len >> 9; 486 bp->b2.bi_iter.bi_sector += len >> 9;
487 487
488 if (offset) { 488 if (offset) {
489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
@@ -552,11 +552,12 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
552{ 552{
553 struct bio_vec *bvec, *bvprv = NULL; 553 struct bio_vec *bvec, *bvprv = NULL;
554 struct scatterlist *sg = NULL; 554 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 555 int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
556 556
557 if (nvmeq->dev->stripe_size) 557 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 558 split_len = nvmeq->dev->stripe_size -
559 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 559 ((bio->bi_iter.bi_sector << 9) &
560 (nvmeq->dev->stripe_size - 1));
560 561
561 sg_init_table(iod->sg, psegs); 562 sg_init_table(iod->sg, psegs);
562 bio_for_each_segment(bvec, bio, i) { 563 bio_for_each_segment(bvec, bio, i) {
@@ -584,7 +585,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 585 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
585 return -ENOMEM; 586 return -ENOMEM;
586 587
587 BUG_ON(length != bio->bi_size); 588 BUG_ON(length != bio->bi_iter.bi_size);
588 return length; 589 return length;
589} 590}
590 591
@@ -608,8 +609,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 iod->npages = 0; 609 iod->npages = 0;
609 610
610 range->cattr = cpu_to_le32(0); 611 range->cattr = cpu_to_le32(0);
611 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 612 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
612 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 613 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
613 614
614 memset(cmnd, 0, sizeof(*cmnd)); 615 memset(cmnd, 0, sizeof(*cmnd));
615 cmnd->dsm.opcode = nvme_cmd_dsm; 616 cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +675,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
674 } 675 }
675 676
676 result = -ENOMEM; 677 result = -ENOMEM;
677 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 678 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
678 if (!iod) 679 if (!iod)
679 goto nomem; 680 goto nomem;
680 iod->private = bio; 681 iod->private = bio;
@@ -723,7 +724,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 724 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 725 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
725 GFP_ATOMIC); 726 GFP_ATOMIC);
726 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 727 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 728 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
728 cmnd->rw.control = cpu_to_le16(control); 729 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 730 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..ce986bacf7b7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
651 651
652 for (;;) { 652 for (;;) {
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector) 654 if (s <= tmp->bio->bi_iter.bi_sector)
655 next = n->rb_left; 655 next = n->rb_left;
656 else 656 else
657 next = n->rb_right; 657 next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
660 n = next; 660 n = next;
661 } 661 }
662 662
663 if (s > tmp->bio->bi_sector) { 663 if (s > tmp->bio->bi_iter.bi_sector) {
664 tmp = pkt_rbtree_next(tmp); 664 tmp = pkt_rbtree_next(tmp);
665 if (!tmp) 665 if (!tmp)
666 return NULL; 666 return NULL;
667 } 667 }
668 BUG_ON(s > tmp->bio->bi_sector); 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
669 return tmp; 669 return tmp;
670} 670}
671 671
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
676{ 676{
677 struct rb_node **p = &pd->bio_queue.rb_node; 677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL; 678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector; 679 sector_t s = node->bio->bi_iter.bi_sector;
680 struct pkt_rb_node *tmp; 680 struct pkt_rb_node *tmp;
681 681
682 while (*p) { 682 while (*p) {
683 parent = *p; 683 parent = *p;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector) 685 if (s < tmp->bio->bi_iter.bi_sector)
686 p = &(*p)->rb_left; 686 p = &(*p)->rb_left;
687 else 687 else
688 p = &(*p)->rb_right; 688 p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
857 spin_lock(&pd->iosched.lock); 857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue); 858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock); 859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 if (bio && (bio->bi_iter.bi_sector ==
861 pd->iosched.last_write))
861 need_write_seek = 0; 862 need_write_seek = 0;
862 if (need_write_seek && reads_queued) { 863 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
888 continue; 889 continue;
889 890
890 if (bio_data_dir(bio) == READ) 891 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10; 892 pd->iosched.successive_reads +=
893 bio->bi_iter.bi_size >> 10;
892 else { 894 else {
893 pd->iosched.successive_reads = 0; 895 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio); 896 pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
978 980
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 981 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector, 982 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err); 983 (unsigned long long)bio->bi_iter.bi_sector, err);
982 984
983 if (err) 985 if (err)
984 atomic_inc(&pkt->io_errors); 986 atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1026 memset(written, 0, sizeof(written)); 1028 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock); 1029 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) { 1030 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1031 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 int num_frames = bio->bi_size / CD_FRAMESIZE; 1032 (CD_FRAMESIZE >> 9);
1033 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0); 1035 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames); 1036 BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1053 1056
1054 bio = pkt->r_bios[f]; 1057 bio = pkt->r_bios[f];
1055 bio_reset(bio); 1058 bio_reset(bio);
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1059 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev; 1060 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read; 1061 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt; 1062 bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
1150 bio_reset(pkt->bio); 1153 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev; 1154 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE; 1155 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector; 1156 pkt->bio->bi_iter.bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1157 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames; 1158 pkt->bio->bi_vcnt = pkt->frames;
1156 1159
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1160 pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
1213 node = first_node; 1216 node = first_node;
1214 while (node) { 1217 while (node) {
1215 bio = node->bio; 1218 bio = node->bio;
1216 zone = get_zone(bio->bi_sector, pd); 1219 zone = get_zone(bio->bi_iter.bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1220 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) { 1221 if (p->sector == zone) {
1219 bio = NULL; 1222 bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1255 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1256 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1254 bio = node->bio; 1257 bio = node->bio;
1255 pkt_dbg(2, pd, "found zone=%llx\n", 1258 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1256 (unsigned long long)get_zone(bio->bi_sector, pd)); 1259 get_zone(bio->bi_iter.bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone) 1260 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1258 break; 1261 break;
1259 pkt_rbtree_erase(pd, node); 1262 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock); 1263 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio); 1264 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1265 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock); 1266 spin_unlock(&pkt->lock);
1264 } 1267 }
1265 /* check write congestion marks, and if bio_queue_size is 1268 /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1296 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1294 1297
1295 bio_reset(pkt->w_bio); 1298 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector; 1299 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev; 1300 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1301 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt; 1302 pkt->w_bio->bi_private = pkt;
@@ -2370,20 +2373,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2370 2373
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2374 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n", 2375 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector); 2376 (unsigned long long)bio->bi_iter.bi_sector);
2374 goto end_io; 2377 goto end_io;
2375 } 2378 }
2376 2379
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { 2380 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n"); 2381 pkt_err(pd, "wrong bio size\n");
2379 goto end_io; 2382 goto end_io;
2380 } 2383 }
2381 2384
2382 blk_queue_bounce(q, &bio); 2385 blk_queue_bounce(q, &bio);
2383 2386
2384 zone = get_zone(bio->bi_sector, pd); 2387 zone = get_zone(bio->bi_iter.bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2388 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector, 2389 (unsigned long long)bio->bi_iter.bi_sector,
2387 (unsigned long long)bio_end_sector(bio)); 2390 (unsigned long long)bio_end_sector(bio));
2388 2391
2389 /* Check if we have to split the bio */ 2392 /* Check if we have to split the bio */
@@ -2395,7 +2398,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd); 2398 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) { 2399 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size); 2400 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector; 2401 first_sectors = last_zone - bio->bi_iter.bi_sector;
2399 bp = bio_split(bio, first_sectors); 2402 bp = bio_split(bio, first_sectors);
2400 BUG_ON(!bp); 2403 BUG_ON(!bp);
2401 pkt_make_request(q, &bp->bio1); 2404 pkt_make_request(q, &bp->bio1);
@@ -2417,7 +2420,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2417 if ((pkt->state == PACKET_WAITING_STATE) || 2420 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2421 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio); 2422 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2423 pkt->write_size +=
2424 bio->bi_iter.bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) && 2425 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) { 2426 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm); 2427 atomic_inc(&pkt->run_sm);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..464be78a0836 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -104,7 +104,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core,
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 __func__, __LINE__, i, bio_segments(iter.bio),
107 bio_sectors(iter.bio), iter.bio->bi_sector); 107 bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
108 108
109 size = bvec->bv_len; 109 size = bvec->bv_len;
110 buf = bvec_kmap_irq(bvec, &flags); 110 buf = bvec_kmap_irq(bvec, &flags);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..320bbfc9b902 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,7 +553,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
554 int write = bio_data_dir(bio) == WRITE; 554 int write = bio_data_dir(bio) == WRITE;
555 const char *op = write ? "write" : "read"; 555 const char *op = write ? "write" : "read";
556 loff_t offset = bio->bi_sector << 9; 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 int error = 0; 557 int error = 0;
558 struct bio_vec *bvec; 558 struct bio_vec *bvec;
559 unsigned int i; 559 unsigned int i;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..a8f4fe2d4d1b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1183,14 +1183,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1183 1183
1184 /* Handle the easy case for the caller */ 1184 /* Handle the easy case for the caller */
1185 1185
1186 if (!offset && len == bio_src->bi_size) 1186 if (!offset && len == bio_src->bi_iter.bi_size)
1187 return bio_clone(bio_src, gfpmask); 1187 return bio_clone(bio_src, gfpmask);
1188 1188
1189 if (WARN_ON_ONCE(!len)) 1189 if (WARN_ON_ONCE(!len))
1190 return NULL; 1190 return NULL;
1191 if (WARN_ON_ONCE(len > bio_src->bi_size)) 1191 if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size))
1192 return NULL; 1192 return NULL;
1193 if (WARN_ON_ONCE(offset > bio_src->bi_size - len)) 1193 if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len))
1194 return NULL; 1194 return NULL;
1195 1195
1196 /* Find first affected segment... */ 1196 /* Find first affected segment... */
@@ -1220,7 +1220,8 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1220 return NULL; /* ENOMEM */ 1220 return NULL; /* ENOMEM */
1221 1221
1222 bio->bi_bdev = bio_src->bi_bdev; 1222 bio->bi_bdev = bio_src->bi_bdev;
1223 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1223 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector +
1224 (offset >> SECTOR_SHIFT);
1224 bio->bi_rw = bio_src->bi_rw; 1225 bio->bi_rw = bio_src->bi_rw;
1225 bio->bi_flags |= 1 << BIO_CLONED; 1226 bio->bi_flags |= 1 << BIO_CLONED;
1226 1227
@@ -1239,8 +1240,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1239 } 1240 }
1240 1241
1241 bio->bi_vcnt = vcnt; 1242 bio->bi_vcnt = vcnt;
1242 bio->bi_size = len; 1243 bio->bi_iter.bi_size = len;
1243 bio->bi_idx = 0;
1244 1244
1245 return bio; 1245 return bio;
1246} 1246}
@@ -1271,7 +1271,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1271 1271
1272 /* Build up a chain of clone bios up to the limit */ 1272 /* Build up a chain of clone bios up to the limit */
1273 1273
1274 if (!bi || off >= bi->bi_size || !len) 1274 if (!bi || off >= bi->bi_iter.bi_size || !len)
1275 return NULL; /* Nothing to clone */ 1275 return NULL; /* Nothing to clone */
1276 1276
1277 end = &chain; 1277 end = &chain;
@@ -1283,7 +1283,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1284 goto out_err; /* EINVAL; ran out of bio's */ 1284 goto out_err; /* EINVAL; ran out of bio's */
1285 } 1285 }
1286 bi_size = min_t(unsigned int, bi->bi_size - off, len); 1286 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1287 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1287 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1288 if (!bio) 1288 if (!bio)
1289 goto out_err; /* ENOMEM */ 1289 goto out_err; /* ENOMEM */
@@ -1292,7 +1292,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1292 end = &bio->bi_next; 1292 end = &bio->bi_next;
1293 1293
1294 off += bi_size; 1294 off += bi_size;
1295 if (off == bi->bi_size) { 1295 if (off == bi->bi_iter.bi_size) {
1296 bi = bi->bi_next; 1296 bi = bi->bi_next;
1297 off = 0; 1297 off = 0;
1298 } 1298 }
@@ -2186,7 +2186,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2186 2186
2187 if (type == OBJ_REQUEST_BIO) { 2187 if (type == OBJ_REQUEST_BIO) {
2188 bio_list = data_desc; 2188 bio_list = data_desc;
2189 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2189 rbd_assert(img_offset ==
2190 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2190 } else { 2191 } else {
2191 rbd_assert(type == OBJ_REQUEST_PAGES); 2192 rbd_assert(type == OBJ_REQUEST_PAGES);
2192 pages = data_desc; 2193 pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
174 if (!card) 174 if (!card)
175 goto req_err; 175 goto req_err;
176 176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 if (bio_end_sector(bio) > get_capacity(card->gendisk))
178 goto req_err; 178 goto req_err;
179 179
180 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
187 goto req_err; 187 goto req_err;
188 } 188 }
189 189
190 if (bio->bi_size == 0) { 190 if (bio->bi_iter.bi_size == 0) {
191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
192 goto req_err; 192 goto req_err;
193 } 193 }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
208 208
209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
211 (u64)bio->bi_sector << 9, bio->bi_size); 211 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
212 212
213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
214 bio_dma_done_cb, bio_meta); 214 bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..3716633be3c2 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -696,7 +696,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
696 int st; 696 int st;
697 int i; 697 int i;
698 698
699 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 699 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
700 atomic_set(n_dmas, 0); 700 atomic_set(n_dmas, 0);
701 701
702 for (i = 0; i < card->n_targets; i++) { 702 for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
705 } 705 }
706 706
707 if (bio->bi_rw & REQ_DISCARD) { 707 if (bio->bi_rw & REQ_DISCARD) {
708 bv_len = bio->bi_size; 708 bv_len = bio->bi_iter.bi_size;
709 709
710 while (bv_len > 0) { 710 while (bv_len > 0) {
711 tgt = rsxx_get_dma_tgt(card, addr8); 711 tgt = rsxx_get_dma_tgt(card, addr8);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..dab4f1afeae9 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -352,8 +352,8 @@ static int add_bio(struct cardinfo *card)
352 bio = card->currentbio; 352 bio = card->currentbio;
353 if (!bio && card->bio) { 353 if (!bio && card->bio) {
354 card->currentbio = card->bio; 354 card->currentbio = card->bio;
355 card->current_idx = card->bio->bi_idx; 355 card->current_idx = card->bio->bi_iter.bi_idx;
356 card->current_sector = card->bio->bi_sector; 356 card->current_sector = card->bio->bi_iter.bi_sector;
357 card->bio = card->bio->bi_next; 357 card->bio = card->bio->bi_next;
358 if (card->bio == NULL) 358 if (card->bio == NULL)
359 card->biotail = &card->bio; 359 card->biotail = &card->bio;
@@ -451,7 +451,7 @@ static void process_page(unsigned long data)
451 if (page->idx >= bio->bi_vcnt) { 451 if (page->idx >= bio->bi_vcnt) {
452 page->bio = bio->bi_next; 452 page->bio = bio->bi_next;
453 if (page->bio) 453 if (page->bio)
454 page->idx = page->bio->bi_idx; 454 page->idx = page->bio->bi_iter.bi_idx;
455 } 455 }
456 456
457 pci_unmap_page(card->dev, desc->data_dma_handle, 457 pci_unmap_page(card->dev, desc->data_dma_handle,
@@ -532,7 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
532{ 532{
533 struct cardinfo *card = q->queuedata; 533 struct cardinfo *card = q->queuedata;
534 pr_debug("mm_make_request %llu %u\n", 534 pr_debug("mm_make_request %llu %u\n",
535 (unsigned long long)bio->bi_sector, bio->bi_size); 535 (unsigned long long)bio->bi_iter.bi_sector,
536 bio->bi_iter.bi_size);
536 537
537 spin_lock_irq(&card->lock); 538 spin_lock_irq(&card->lock);
538 *card->biotail = bio; 539 *card->biotail = bio;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1257 bio->bi_bdev = preq.bdev; 1257 bio->bi_bdev = preq.bdev;
1258 bio->bi_private = pending_req; 1258 bio->bi_private = pending_req;
1259 bio->bi_end_io = end_block_io_op; 1259 bio->bi_end_io = end_block_io_op;
1260 bio->bi_sector = preq.sector_number; 1260 bio->bi_iter.bi_sector = preq.sector_number;
1261 } 1261 }
1262 1262
1263 preq.sector_number += seg[i].nsec; 1263 preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 432db1b59b00..80e86307dd4b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
1547 for (i = 0; i < pending; i++) { 1547 for (i = 0; i < pending; i++) {
1548 offset = (i * segs * PAGE_SIZE) >> 9; 1548 offset = (i * segs * PAGE_SIZE) >> 9;
1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1550 (unsigned int)(bio->bi_size >> 9) - offset); 1550 (unsigned int)bio_sectors(bio) - offset);
1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1551 cloned_bio = bio_clone(bio, GFP_NOIO);
1552 BUG_ON(cloned_bio == NULL); 1552 BUG_ON(cloned_bio == NULL);
1553 bio_trim(cloned_bio, offset, size); 1553 bio_trim(cloned_bio, offset, size);