aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt7
-rw-r--r--arch/m68k/emu/nfblock.c2
-rw-r--r--arch/powerpc/sysdev/axonram.c3
-rw-r--r--block/blk-core.c36
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-merge.c4
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/aoe/aoecmd.c6
-rw-r--r--drivers/block/brd.c4
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_receiver.c6
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/floppy.c4
-rw-r--r--drivers/block/loop.c4
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c7
-rw-r--r--drivers/block/nvme-core.c25
-rw-r--r--drivers/block/pktcdvd.c54
-rw-r--r--drivers/block/ps3disk.c2
-rw-r--r--drivers/block/ps3vram.c2
-rw-r--r--drivers/block/rbd.c21
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c4
-rw-r--r--drivers/block/umem.c9
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c26
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c58
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h12
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c22
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-raid1.c16
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c18
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c12
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c75
-rw-r--r--drivers/md/raid10.c91
-rw-r--r--drivers/md/raid5.c72
-rw-r--r--drivers/s390/block/dcssblk.c5
-rw-r--r--drivers/s390/block/xpram.c9
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c12
-rw-r--r--drivers/staging/zram/zram_drv.c14
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--fs/bio-integrity.c8
-rw-r--r--fs/bio.c56
-rw-r--r--fs/btrfs/check-integrity.c8
-rw-r--r--fs/btrfs/compression.c17
-rw-r--r--fs/btrfs/extent_io.c14
-rw-r--r--fs/btrfs/file-item.c19
-rw-r--r--fs/btrfs/inode.c22
-rw-r--r--fs/btrfs/raid56.c22
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/volumes.c12
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/ext4/page-io.c4
-rw-r--r--fs/f2fs/data.c2
-rw-r--r--fs/f2fs/segment.c2
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/jfs/jfs_logmgr.c12
-rw-r--r--fs/jfs/jfs_metapage.c9
-rw-r--r--fs/logfs/dev_bdev.c20
-rw-r--r--fs/mpage.c2
-rw-r--r--fs/nfs/blocklayout/blocklayout.c9
-rw-r--r--fs/nilfs2/segbuf.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf.c4
-rw-r--r--include/linux/bio.h16
-rw-r--r--include/linux/blk_types.h19
-rw-r--r--include/trace/events/bcache.h26
-rw-r--r--include/trace/events/block.h26
-rw-r--r--include/trace/events/f2fs.h4
-rw-r--r--kernel/power/block_io.c2
-rw-r--r--kernel/trace/blktrace.c15
-rw-r--r--mm/page_io.c10
107 files changed, 700 insertions, 638 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8df5e8e6dceb..2101e718670d 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -447,14 +447,13 @@ struct bio_vec {
447 * main unit of I/O for the block layer and lower layers (ie drivers) 447 * main unit of I/O for the block layer and lower layers (ie drivers)
448 */ 448 */
449struct bio { 449struct bio {
450 sector_t bi_sector;
451 struct bio *bi_next; /* request queue link */ 450 struct bio *bi_next; /* request queue link */
452 struct block_device *bi_bdev; /* target device */ 451 struct block_device *bi_bdev; /* target device */
453 unsigned long bi_flags; /* status, command, etc */ 452 unsigned long bi_flags; /* status, command, etc */
454 unsigned long bi_rw; /* low bits: r/w, high: priority */ 453 unsigned long bi_rw; /* low bits: r/w, high: priority */
455 454
456 unsigned int bi_vcnt; /* how may bio_vec's */ 455 unsigned int bi_vcnt; /* how may bio_vec's */
457 unsigned int bi_idx; /* current index into bio_vec array */ 456 struct bvec_iter bi_iter; /* current index into bio_vec array */
458 457
459 unsigned int bi_size; /* total size in bytes */ 458 unsigned int bi_size; /* total size in bytes */
460 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/ 459 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
480- Code that traverses the req list can find all the segments of a bio 479- Code that traverses the req list can find all the segments of a bio
481 by using rq_for_each_segment. This handles the fact that a request 480 by using rq_for_each_segment. This handles the fact that a request
482 has multiple bios, each of which can have multiple segments. 481 has multiple bios, each of which can have multiple segments.
483- Drivers which can't process a large bio in one shot can use the bi_idx 482- Drivers which can't process a large bio in one shot can use the bi_iter
484 field to keep track of the next bio_vec entry to process. 483 field to keep track of the next bio_vec entry to process.
485 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) 484 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
486 [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying 485 [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
589nr_sectors and current_nr_sectors fields (based on the corresponding 588nr_sectors and current_nr_sectors fields (based on the corresponding
590hard_xxx values and the number of bytes transferred) and updates it on 589hard_xxx values and the number of bytes transferred) and updates it on
591every transfer that invokes end_that_request_first. It does the same for the 590every transfer that invokes end_that_request_first. It does the same for the
592buffer, bio, bio->bi_idx fields too. 591buffer, bio, bio->bi_iter fields too.
593 592
594The buffer field is just a virtual address mapping of the current segment 593The buffer field is just a virtual address mapping of the current segment
595of the i/o buffer in cases where the buffer resides in low-memory. For high 594of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 0721858fbd1e..0a9d0b3c794b 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -64,7 +64,7 @@ static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
64 struct nfhd_device *dev = queue->queuedata; 64 struct nfhd_device *dev = queue->queuedata;
65 struct bio_vec *bvec; 65 struct bio_vec *bvec;
66 int i, dir, len, shift; 66 int i, dir, len, shift;
67 sector_t sec = bio->bi_sector; 67 sector_t sec = bio->bi_iter.bi_sector;
68 68
69 dir = bio_data_dir(bio); 69 dir = bio_data_dir(bio);
70 shift = dev->bshift; 70 shift = dev->bshift;
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141c031c..f33bcbaa6a07 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -113,7 +113,8 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
113 unsigned int transfered; 113 unsigned int transfered;
114 unsigned short idx; 114 unsigned short idx;
115 115
116 phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
117 AXON_RAM_SECTOR_SHIFT);
117 phys_end = bank->io_addr + bank->size; 118 phys_end = bank->io_addr + bank->size;
118 transfered = 0; 119 transfered = 0;
119 bio_for_each_segment(vec, bio, idx) { 120 bio_for_each_segment(vec, bio, idx) {
diff --git a/block/blk-core.c b/block/blk-core.c
index 8bdd0121212a..5c2ab2c74066 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
130 bio_advance(bio, nbytes); 130 bio_advance(bio, nbytes);
131 131
132 /* don't actually finish bio if it's part of flush sequence */ 132 /* don't actually finish bio if it's part of flush sequence */
133 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 133 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
134 bio_endio(bio, error); 134 bio_endio(bio, error);
135} 135}
136 136
@@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
1326 bio->bi_io_vec->bv_offset = 0; 1326 bio->bi_io_vec->bv_offset = 0;
1327 bio->bi_io_vec->bv_len = len; 1327 bio->bi_io_vec->bv_len = len;
1328 1328
1329 bio->bi_size = len; 1329 bio->bi_iter.bi_size = len;
1330 bio->bi_vcnt = 1; 1330 bio->bi_vcnt = 1;
1331 bio->bi_phys_segments = 1; 1331 bio->bi_phys_segments = 1;
1332 1332
@@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1351 1351
1352 req->biotail->bi_next = bio; 1352 req->biotail->bi_next = bio;
1353 req->biotail = bio; 1353 req->biotail = bio;
1354 req->__data_len += bio->bi_size; 1354 req->__data_len += bio->bi_iter.bi_size;
1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1356 1356
1357 blk_account_io_start(req, false); 1357 blk_account_io_start(req, false);
@@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1380 * not touch req->buffer either... 1380 * not touch req->buffer either...
1381 */ 1381 */
1382 req->buffer = bio_data(bio); 1382 req->buffer = bio_data(bio);
1383 req->__sector = bio->bi_sector; 1383 req->__sector = bio->bi_iter.bi_sector;
1384 req->__data_len += bio->bi_size; 1384 req->__data_len += bio->bi_iter.bi_size;
1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1386 1386
1387 blk_account_io_start(req, false); 1387 blk_account_io_start(req, false);
@@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1459 req->cmd_flags |= REQ_FAILFAST_MASK; 1459 req->cmd_flags |= REQ_FAILFAST_MASK;
1460 1460
1461 req->errors = 0; 1461 req->errors = 0;
1462 req->__sector = bio->bi_sector; 1462 req->__sector = bio->bi_iter.bi_sector;
1463 req->ioprio = bio_prio(bio); 1463 req->ioprio = bio_prio(bio);
1464 blk_rq_bio_prep(req->q, req, bio); 1464 blk_rq_bio_prep(req->q, req, bio);
1465} 1465}
@@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio)
1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1584 struct hd_struct *p = bdev->bd_part; 1584 struct hd_struct *p = bdev->bd_part;
1585 1585
1586 bio->bi_sector += p->start_sect; 1586 bio->bi_iter.bi_sector += p->start_sect;
1587 bio->bi_bdev = bdev->bd_contains; 1587 bio->bi_bdev = bdev->bd_contains;
1588 1588
1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1590 bdev->bd_dev, 1590 bdev->bd_dev,
1591 bio->bi_sector - p->start_sect); 1591 bio->bi_iter.bi_sector - p->start_sect);
1592 } 1592 }
1593} 1593}
1594 1594
@@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1654 /* Test device or partition size, when known. */ 1654 /* Test device or partition size, when known. */
1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1656 if (maxsector) { 1656 if (maxsector) {
1657 sector_t sector = bio->bi_sector; 1657 sector_t sector = bio->bi_iter.bi_sector;
1658 1658
1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1660 /* 1660 /*
@@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio)
1690 "generic_make_request: Trying to access " 1690 "generic_make_request: Trying to access "
1691 "nonexistent block-device %s (%Lu)\n", 1691 "nonexistent block-device %s (%Lu)\n",
1692 bdevname(bio->bi_bdev, b), 1692 bdevname(bio->bi_bdev, b),
1693 (long long) bio->bi_sector); 1693 (long long) bio->bi_iter.bi_sector);
1694 goto end_io; 1694 goto end_io;
1695 } 1695 }
1696 1696
@@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio)
1704 } 1704 }
1705 1705
1706 part = bio->bi_bdev->bd_part; 1706 part = bio->bi_bdev->bd_part;
1707 if (should_fail_request(part, bio->bi_size) || 1707 if (should_fail_request(part, bio->bi_iter.bi_size) ||
1708 should_fail_request(&part_to_disk(part)->part0, 1708 should_fail_request(&part_to_disk(part)->part0,
1709 bio->bi_size)) 1709 bio->bi_iter.bi_size))
1710 goto end_io; 1710 goto end_io;
1711 1711
1712 /* 1712 /*
@@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio)
1865 if (rw & WRITE) { 1865 if (rw & WRITE) {
1866 count_vm_events(PGPGOUT, count); 1866 count_vm_events(PGPGOUT, count);
1867 } else { 1867 } else {
1868 task_io_account_read(bio->bi_size); 1868 task_io_account_read(bio->bi_iter.bi_size);
1869 count_vm_events(PGPGIN, count); 1869 count_vm_events(PGPGIN, count);
1870 } 1870 }
1871 1871
@@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio)
1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1875 current->comm, task_pid_nr(current), 1875 current->comm, task_pid_nr(current),
1876 (rw & WRITE) ? "WRITE" : "READ", 1876 (rw & WRITE) ? "WRITE" : "READ",
1877 (unsigned long long)bio->bi_sector, 1877 (unsigned long long)bio->bi_iter.bi_sector,
1878 bdevname(bio->bi_bdev, b), 1878 bdevname(bio->bi_bdev, b),
1879 count); 1879 count);
1880 } 1880 }
@@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
2007 for (bio = rq->bio; bio; bio = bio->bi_next) { 2007 for (bio = rq->bio; bio; bio = bio->bi_next) {
2008 if ((bio->bi_rw & ff) != ff) 2008 if ((bio->bi_rw & ff) != ff)
2009 break; 2009 break;
2010 bytes += bio->bi_size; 2010 bytes += bio->bi_iter.bi_size;
2011 } 2011 }
2012 2012
2013 /* this could lead to infinite loop */ 2013 /* this could lead to infinite loop */
@@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2378 total_bytes = 0; 2378 total_bytes = 0;
2379 while (req->bio) { 2379 while (req->bio) {
2380 struct bio *bio = req->bio; 2380 struct bio *bio = req->bio;
2381 unsigned bio_bytes = min(bio->bi_size, nr_bytes); 2381 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2382 2382
2383 if (bio_bytes == bio->bi_size) 2383 if (bio_bytes == bio->bi_iter.bi_size)
2384 req->bio = bio->bi_next; 2384 req->bio = bio->bi_next;
2385 2385
2386 req_bio_endio(req, bio, bio_bytes, error); 2386 req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2728 rq->nr_phys_segments = bio_phys_segments(q, bio); 2728 rq->nr_phys_segments = bio_phys_segments(q, bio);
2729 rq->buffer = bio_data(bio); 2729 rq->buffer = bio_data(bio);
2730 } 2730 }
2731 rq->__data_len = bio->bi_size; 2731 rq->__data_len = bio->bi_iter.bi_size;
2732 rq->bio = rq->biotail = bio; 2732 rq->bio = rq->biotail = bio;
2733 2733
2734 if (bio->bi_bdev) 2734 if (bio->bi_bdev)
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fb6f3c0ffa49..9288aaf35c21 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
548 * copied from blk_rq_pos(rq). 548 * copied from blk_rq_pos(rq).
549 */ 549 */
550 if (error_sector) 550 if (error_sector)
551 *error_sector = bio->bi_sector; 551 *error_sector = bio->bi_iter.bi_sector;
552 552
553 bio_put(bio); 553 bio_put(bio);
554 return ret; 554 return ret;
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9b5b561cb928..2da76c999ef3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 req_sects = end_sect - sector; 108 req_sects = end_sect - sector;
109 } 109 }
110 110
111 bio->bi_sector = sector; 111 bio->bi_iter.bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io; 112 bio->bi_end_io = bio_batch_end_io;
113 bio->bi_bdev = bdev; 113 bio->bi_bdev = bdev;
114 bio->bi_private = &bb; 114 bio->bi_private = &bb;
115 115
116 bio->bi_size = req_sects << 9; 116 bio->bi_iter.bi_size = req_sects << 9;
117 nr_sects -= req_sects; 117 nr_sects -= req_sects;
118 sector = end_sect; 118 sector = end_sect;
119 119
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
174 break; 174 break;
175 } 175 }
176 176
177 bio->bi_sector = sector; 177 bio->bi_iter.bi_sector = sector;
178 bio->bi_end_io = bio_batch_end_io; 178 bio->bi_end_io = bio_batch_end_io;
179 bio->bi_bdev = bdev; 179 bio->bi_bdev = bdev;
180 bio->bi_private = &bb; 180 bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
185 185
186 if (nr_sects > max_write_same_sectors) { 186 if (nr_sects > max_write_same_sectors) {
187 bio->bi_size = max_write_same_sectors << 9; 187 bio->bi_iter.bi_size = max_write_same_sectors << 9;
188 nr_sects -= max_write_same_sectors; 188 nr_sects -= max_write_same_sectors;
189 sector += max_write_same_sectors; 189 sector += max_write_same_sectors;
190 } else { 190 } else {
191 bio->bi_size = nr_sects << 9; 191 bio->bi_iter.bi_size = nr_sects << 9;
192 nr_sects = 0; 192 nr_sects = 0;
193 } 193 }
194 194
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
240 break; 240 break;
241 } 241 }
242 242
243 bio->bi_sector = sector; 243 bio->bi_iter.bi_sector = sector;
244 bio->bi_bdev = bdev; 244 bio->bi_bdev = bdev;
245 bio->bi_end_io = bio_batch_end_io; 245 bio->bi_end_io = bio_batch_end_io;
246 bio->bi_private = &bb; 246 bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd4cffe..ae4ae1047fd9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->__data_len += bio->bi_size; 23 rq->__data_len += bio->bi_iter.bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
76 76
77 ret = blk_rq_append_bio(q, rq, bio); 77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret) 78 if (!ret)
79 return bio->bi_size; 79 return bio->bi_iter.bi_size;
80 80
81 /* if it was boucned we must call the end io function */ 81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0); 82 bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
220 if (IS_ERR(bio)) 220 if (IS_ERR(bio))
221 return PTR_ERR(bio); 221 return PTR_ERR(bio);
222 222
223 if (bio->bi_size != len) { 223 if (bio->bi_iter.bi_size != len) {
224 /* 224 /*
225 * Grab an extra reference to this bio, as bio_unmap_user() 225 * Grab an extra reference to this bio, as bio_unmap_user()
226 * expects to be able to drop it twice as it happens on the 226 * expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc58977835..03bc083c28cf 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -543,9 +543,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
543 543
544int blk_try_merge(struct request *rq, struct bio *bio) 544int blk_try_merge(struct request *rq, struct bio *bio)
545{ 545{
546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) 546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
547 return ELEVATOR_BACK_MERGE; 547 return ELEVATOR_BACK_MERGE;
548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) 548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
549 return ELEVATOR_FRONT_MERGE; 549 return ELEVATOR_FRONT_MERGE;
550 return ELEVATOR_NO_MERGE; 550 return ELEVATOR_NO_MERGE;
551} 551}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index cdc629cf075b..e4fbcc3fd2db 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -301,7 +301,7 @@ void blk_mq_complete_request(struct request *rq, int error)
301 struct bio *next = bio->bi_next; 301 struct bio *next = bio->bi_next;
302 302
303 bio->bi_next = NULL; 303 bio->bi_next = NULL;
304 bytes += bio->bi_size; 304 bytes += bio->bi_iter.bi_size;
305 blk_mq_bio_endio(rq, bio, error); 305 blk_mq_bio_endio(rq, bio, error);
306 bio = next; 306 bio = next;
307 } 307 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 06534049afba..20f820037775 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
877 do_div(tmp, HZ); 877 do_div(tmp, HZ);
878 bytes_allowed = tmp; 878 bytes_allowed = tmp;
879 879
880 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
881 if (wait) 881 if (wait)
882 *wait = 0; 882 *wait = 0;
883 return 1; 883 return 1;
884 } 884 }
885 885
886 /* Calc approx time to dispatch */ 886 /* Calc approx time to dispatch */
887 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
889 889
890 if (!jiffy_wait) 890 if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
987 bool rw = bio_data_dir(bio); 987 bool rw = bio_data_dir(bio);
988 988
989 /* Charge the bio to the group */ 989 /* Charge the bio to the group */
990 tg->bytes_disp[rw] += bio->bi_size; 990 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
991 tg->io_disp[rw]++; 991 tg->io_disp[rw]++;
992 992
993 /* 993 /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1003 */ 1003 */
1004 if (!(bio->bi_rw & REQ_THROTTLED)) { 1004 if (!(bio->bi_rw & REQ_THROTTLED)) {
1005 bio->bi_rw |= REQ_THROTTLED; 1005 bio->bi_rw |= REQ_THROTTLED;
1006 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, 1006 throtl_update_dispatch_stats(tg_to_blkg(tg),
1007 bio->bi_rw); 1007 bio->bi_iter.bi_size, bio->bi_rw);
1008 } 1008 }
1009} 1009}
1010 1010
@@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1508 if (tg) { 1508 if (tg) {
1509 if (!tg->has_rules[rw]) { 1509 if (!tg->has_rules[rw]) {
1510 throtl_update_dispatch_stats(tg_to_blkg(tg), 1510 throtl_update_dispatch_stats(tg_to_blkg(tg),
1511 bio->bi_size, bio->bi_rw); 1511 bio->bi_iter.bi_size, bio->bi_rw);
1512 goto out_unlock_rcu; 1512 goto out_unlock_rcu;
1513 } 1513 }
1514 } 1514 }
@@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1564 /* out-of-limit, queue to @tg */ 1564 /* out-of-limit, queue to @tg */
1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1566 rw == READ ? 'R' : 'W', 1566 rw == READ ? 'R' : 'W',
1567 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1567 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1568 tg->io_disp[rw], tg->iops[rw], 1568 tg->io_disp[rw], tg->iops[rw],
1569 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1569 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1570 1570
diff --git a/block/elevator.c b/block/elevator.c
index b7ff2861b6bd..42c45a7d6714 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
440 /* 440 /*
441 * See if our hash lookup can find a potential backmerge. 441 * See if our hash lookup can find a potential backmerge.
442 */ 442 */
443 __rq = elv_rqhash_find(q, bio->bi_sector); 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
444 if (__rq && elv_rq_merge_ok(__rq, bio)) { 444 if (__rq && elv_rq_merge_ok(__rq, bio)) {
445 *req = __rq; 445 *req = __rq;
446 return ELEVATOR_BACK_MERGE; 446 return ELEVATOR_BACK_MERGE;
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..877ba119b3f8 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -929,8 +929,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
929 memset(buf, 0, sizeof(*buf)); 929 memset(buf, 0, sizeof(*buf));
930 buf->rq = rq; 930 buf->rq = rq;
931 buf->bio = bio; 931 buf->bio = bio;
932 buf->resid = bio->bi_size; 932 buf->resid = bio->bi_iter.bi_size;
933 buf->sector = bio->bi_sector; 933 buf->sector = bio->bi_iter.bi_sector;
934 bio_pageinc(bio); 934 bio_pageinc(bio);
935 buf->bv = bio_iovec(bio); 935 buf->bv = bio_iovec(bio);
936 buf->bv_resid = buf->bv->bv_len; 936 buf->bv_resid = buf->bv->bv_len;
@@ -1152,7 +1152,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1152 do { 1152 do {
1153 bio = rq->bio; 1153 bio = rq->bio;
1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1156 1156
1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 if (!fastfail) 1158 if (!fastfail)
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..66f5aaae15a2 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -333,13 +333,13 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
333 int i; 333 int i;
334 int err = -EIO; 334 int err = -EIO;
335 335
336 sector = bio->bi_sector; 336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 338 goto out;
339 339
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0; 341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_size); 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 343 goto out;
344 } 344 }
345 345
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
159 159
160 bio = bio_alloc_drbd(GFP_NOIO); 160 bio = bio_alloc_drbd(GFP_NOIO);
161 bio->bi_bdev = bdev->md_bdev; 161 bio->bi_bdev = bdev->md_bdev;
162 bio->bi_sector = sector; 162 bio->bi_iter.bi_sector = sector;
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1028 } else 1028 } else
1029 page = b->bm_pages[page_nr]; 1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev; 1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector; 1031 bio->bi_iter.bi_sector = on_disk_sector;
1032 /* bio_add_page of a single page to an empty bio will always succeed, 1032 /* bio_add_page of a single page to an empty bio will always succeed,
1033 * according to api. Do we want to assert that? */ 1033 * according to api. Do we want to assert that? */
1034 bio_add_page(bio, page, len, 0); 1034 bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..5326c22cdb9d 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
1333 goto fail; 1333 goto fail;
1334 } 1334 }
1335 /* > peer_req->i.sector, unless this is the first bio */ 1335 /* > peer_req->i.sector, unless this is the first bio */
1336 bio->bi_sector = sector; 1336 bio->bi_iter.bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
1338 bio->bi_rw = rw; 1338 bio->bi_rw = rw;
1339 bio->bi_private = peer_req; 1339 bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
1353 dev_err(DEV, 1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, " 1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector); 1356 len, (uint64_t)bio->bi_iter.bi_sector);
1357 err = -ENOSPC; 1357 err = -ENOSPC;
1358 goto fail; 1358 goto fail;
1359 } 1359 }
@@ -1615,7 +1615,7 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1615 mdev->recv_cnt += data_size>>9; 1615 mdev->recv_cnt += data_size>>9;
1616 1616
1617 bio = req->master_bio; 1617 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector); 1618 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1619
1620 bio_for_each_segment(bvec, bio, i) { 1620 bio_for_each_segment(bvec, bio, i) {
1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
77 req->epoch = 0; 77 req->epoch = 0;
78 78
79 drbd_clear_interval(&req->i); 79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_sector; 80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_size; 81 req->i.size = bio_src->bi_iter.bi_size;
82 req->i.local = true; 82 req->i.local = true;
83 req->i.waiting = false; 83 req->i.waiting = false;
84 84
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1280 /* 1280 /*
1281 * what we "blindly" assume: 1281 * what we "blindly" assume:
1282 */ 1282 */
1283 D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
1284 1284
1285 inc_ap_bio(mdev); 1285 inc_ap_bio(mdev);
1286 __drbd_make_request(mdev, bio, start_time); 1286 __drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
269 269
270/* Short lived temporary struct on the stack. 270/* Short lived temporary struct on the stack.
271 * We could squirrel the error to be returned into 271 * We could squirrel the error to be returned into
272 * bio->bi_size, or similar. But that would be too ugly. */ 272 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273struct bio_and_error { 273struct bio_and_error {
274 struct bio *bio; 274 struct bio *bio;
275 int error; 275 int error;
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..6a86fe7b730f 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
3775 bio_vec.bv_len = size; 3775 bio_vec.bv_len = size;
3776 bio_vec.bv_offset = 0; 3776 bio_vec.bv_offset = 0;
3777 bio.bi_vcnt = 1; 3777 bio.bi_vcnt = 1;
3778 bio.bi_size = size; 3778 bio.bi_iter.bi_size = size;
3779 bio.bi_bdev = bdev; 3779 bio.bi_bdev = bdev;
3780 bio.bi_sector = 0; 3780 bio.bi_iter.bi_sector = 0;
3781 bio.bi_flags = (1 << BIO_QUIET); 3781 bio.bi_flags = (1 << BIO_QUIET);
3782 init_completion(&complete); 3782 init_completion(&complete);
3783 bio.bi_private = &complete; 3783 bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..f5e39989adde 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -415,7 +415,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
415 loff_t pos; 415 loff_t pos;
416 int ret; 416 int ret;
417 417
418 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 418 pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
419 419
420 if (bio_rw(bio) == WRITE) { 420 if (bio_rw(bio) == WRITE) {
421 struct file *file = lo->lo_backing_file; 421 struct file *file = lo->lo_backing_file;
@@ -444,7 +444,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
444 goto out; 444 goto out;
445 } 445 }
446 ret = file->f_op->fallocate(file, mode, pos, 446 ret = file->f_op->fallocate(file, mode, pos,
447 bio->bi_size); 447 bio->bi_iter.bi_size);
448 if (unlikely(ret && ret != -EINVAL && 448 if (unlikely(ret && ret != -EINVAL &&
449 ret != -EOPNOTSUPP)) 449 ret != -EOPNOTSUPP))
450 ret = -EIO; 450 ret = -EIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..69e9eb5a6b34 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3993,7 +3993,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3993 } 3993 }
3994 3994
3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
3997 bio_sectors(bio))); 3997 bio_sectors(bio)));
3998 return; 3998 return;
3999 } 3999 }
@@ -4006,7 +4006,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4006 4006
4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
4008 dd->unal_qdepth) { 4008 dd->unal_qdepth) {
4009 if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4009 if (bio->bi_iter.bi_sector % 8 != 0)
4010 /* Unaligned on 4k boundaries */
4010 unaligned = 1; 4011 unaligned = 1;
4011 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4012 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
4012 unaligned = 1; 4013 unaligned = 1;
@@ -4035,7 +4036,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4035 4036
4036 /* Issue the read/write. */ 4037 /* Issue the read/write. */
4037 mtip_hw_submit_io(dd, 4038 mtip_hw_submit_io(dd,
4038 bio->bi_sector, 4039 bio->bi_iter.bi_sector,
4039 bio_sectors(bio), 4040 bio_sectors(bio),
4040 nents, 4041 nents,
4041 tag, 4042 tag,
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..53d217381873 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -468,7 +468,7 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
468{ 468{
469 struct nvme_bio_pair *bp; 469 struct nvme_bio_pair *bp;
470 470
471 BUG_ON(len > bio->bi_size); 471 BUG_ON(len > bio->bi_iter.bi_size);
472 BUG_ON(idx > bio->bi_vcnt); 472 BUG_ON(idx > bio->bi_vcnt);
473 473
474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC); 474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
@@ -479,11 +479,11 @@ static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
479 bp->b1 = *bio; 479 bp->b1 = *bio;
480 bp->b2 = *bio; 480 bp->b2 = *bio;
481 481
482 bp->b1.bi_size = len; 482 bp->b1.bi_iter.bi_size = len;
483 bp->b2.bi_size -= len; 483 bp->b2.bi_iter.bi_size -= len;
484 bp->b1.bi_vcnt = idx; 484 bp->b1.bi_vcnt = idx;
485 bp->b2.bi_idx = idx; 485 bp->b2.bi_iter.bi_idx = idx;
486 bp->b2.bi_sector += len >> 9; 486 bp->b2.bi_iter.bi_sector += len >> 9;
487 487
488 if (offset) { 488 if (offset) {
489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec), 489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
@@ -552,11 +552,12 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
552{ 552{
553 struct bio_vec *bvec, *bvprv = NULL; 553 struct bio_vec *bvec, *bvprv = NULL;
554 struct scatterlist *sg = NULL; 554 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 555 int i, length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
556 556
557 if (nvmeq->dev->stripe_size) 557 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 558 split_len = nvmeq->dev->stripe_size -
559 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 559 ((bio->bi_iter.bi_sector << 9) &
560 (nvmeq->dev->stripe_size - 1));
560 561
561 sg_init_table(iod->sg, psegs); 562 sg_init_table(iod->sg, psegs);
562 bio_for_each_segment(bvec, bio, i) { 563 bio_for_each_segment(bvec, bio, i) {
@@ -584,7 +585,7 @@ static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 585 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
585 return -ENOMEM; 586 return -ENOMEM;
586 587
587 BUG_ON(length != bio->bi_size); 588 BUG_ON(length != bio->bi_iter.bi_size);
588 return length; 589 return length;
589} 590}
590 591
@@ -608,8 +609,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 iod->npages = 0; 609 iod->npages = 0;
609 610
610 range->cattr = cpu_to_le32(0); 611 range->cattr = cpu_to_le32(0);
611 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 612 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
612 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 613 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
613 614
614 memset(cmnd, 0, sizeof(*cmnd)); 615 memset(cmnd, 0, sizeof(*cmnd));
615 cmnd->dsm.opcode = nvme_cmd_dsm; 616 cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +675,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
674 } 675 }
675 676
676 result = -ENOMEM; 677 result = -ENOMEM;
677 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 678 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
678 if (!iod) 679 if (!iod)
679 goto nomem; 680 goto nomem;
680 iod->private = bio; 681 iod->private = bio;
@@ -723,7 +724,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 724 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 725 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
725 GFP_ATOMIC); 726 GFP_ATOMIC);
726 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 727 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 728 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
728 cmnd->rw.control = cpu_to_le16(control); 729 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 730 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..ce986bacf7b7 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
651 651
652 for (;;) { 652 for (;;) {
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector) 654 if (s <= tmp->bio->bi_iter.bi_sector)
655 next = n->rb_left; 655 next = n->rb_left;
656 else 656 else
657 next = n->rb_right; 657 next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
660 n = next; 660 n = next;
661 } 661 }
662 662
663 if (s > tmp->bio->bi_sector) { 663 if (s > tmp->bio->bi_iter.bi_sector) {
664 tmp = pkt_rbtree_next(tmp); 664 tmp = pkt_rbtree_next(tmp);
665 if (!tmp) 665 if (!tmp)
666 return NULL; 666 return NULL;
667 } 667 }
668 BUG_ON(s > tmp->bio->bi_sector); 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
669 return tmp; 669 return tmp;
670} 670}
671 671
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
676{ 676{
677 struct rb_node **p = &pd->bio_queue.rb_node; 677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL; 678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector; 679 sector_t s = node->bio->bi_iter.bi_sector;
680 struct pkt_rb_node *tmp; 680 struct pkt_rb_node *tmp;
681 681
682 while (*p) { 682 while (*p) {
683 parent = *p; 683 parent = *p;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector) 685 if (s < tmp->bio->bi_iter.bi_sector)
686 p = &(*p)->rb_left; 686 p = &(*p)->rb_left;
687 else 687 else
688 p = &(*p)->rb_right; 688 p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
857 spin_lock(&pd->iosched.lock); 857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue); 858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock); 859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 if (bio && (bio->bi_iter.bi_sector ==
861 pd->iosched.last_write))
861 need_write_seek = 0; 862 need_write_seek = 0;
862 if (need_write_seek && reads_queued) { 863 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
888 continue; 889 continue;
889 890
890 if (bio_data_dir(bio) == READ) 891 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10; 892 pd->iosched.successive_reads +=
893 bio->bi_iter.bi_size >> 10;
892 else { 894 else {
893 pd->iosched.successive_reads = 0; 895 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio); 896 pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
978 980
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 981 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector, 982 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err); 983 (unsigned long long)bio->bi_iter.bi_sector, err);
982 984
983 if (err) 985 if (err)
984 atomic_inc(&pkt->io_errors); 986 atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1026 memset(written, 0, sizeof(written)); 1028 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock); 1029 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) { 1030 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1031 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 int num_frames = bio->bi_size / CD_FRAMESIZE; 1032 (CD_FRAMESIZE >> 9);
1033 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0); 1035 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames); 1036 BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1053 1056
1054 bio = pkt->r_bios[f]; 1057 bio = pkt->r_bios[f];
1055 bio_reset(bio); 1058 bio_reset(bio);
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1059 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev; 1060 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read; 1061 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt; 1062 bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
1150 bio_reset(pkt->bio); 1153 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev; 1154 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE; 1155 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector; 1156 pkt->bio->bi_iter.bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1157 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames; 1158 pkt->bio->bi_vcnt = pkt->frames;
1156 1159
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1160 pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
1213 node = first_node; 1216 node = first_node;
1214 while (node) { 1217 while (node) {
1215 bio = node->bio; 1218 bio = node->bio;
1216 zone = get_zone(bio->bi_sector, pd); 1219 zone = get_zone(bio->bi_iter.bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1220 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) { 1221 if (p->sector == zone) {
1219 bio = NULL; 1222 bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1255 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1256 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1254 bio = node->bio; 1257 bio = node->bio;
1255 pkt_dbg(2, pd, "found zone=%llx\n", 1258 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1256 (unsigned long long)get_zone(bio->bi_sector, pd)); 1259 get_zone(bio->bi_iter.bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone) 1260 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1258 break; 1261 break;
1259 pkt_rbtree_erase(pd, node); 1262 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock); 1263 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio); 1264 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1265 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock); 1266 spin_unlock(&pkt->lock);
1264 } 1267 }
1265 /* check write congestion marks, and if bio_queue_size is 1268 /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1296 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1294 1297
1295 bio_reset(pkt->w_bio); 1298 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector; 1299 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev; 1300 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1301 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt; 1302 pkt->w_bio->bi_private = pkt;
@@ -2370,20 +2373,20 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2370 2373
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) { 2374 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n", 2375 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector); 2376 (unsigned long long)bio->bi_iter.bi_sector);
2374 goto end_io; 2377 goto end_io;
2375 } 2378 }
2376 2379
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) { 2380 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n"); 2381 pkt_err(pd, "wrong bio size\n");
2379 goto end_io; 2382 goto end_io;
2380 } 2383 }
2381 2384
2382 blk_queue_bounce(q, &bio); 2385 blk_queue_bounce(q, &bio);
2383 2386
2384 zone = get_zone(bio->bi_sector, pd); 2387 zone = get_zone(bio->bi_iter.bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n", 2388 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector, 2389 (unsigned long long)bio->bi_iter.bi_sector,
2387 (unsigned long long)bio_end_sector(bio)); 2390 (unsigned long long)bio_end_sector(bio));
2388 2391
2389 /* Check if we have to split the bio */ 2392 /* Check if we have to split the bio */
@@ -2395,7 +2398,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd); 2398 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) { 2399 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size); 2400 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector; 2401 first_sectors = last_zone - bio->bi_iter.bi_sector;
2399 bp = bio_split(bio, first_sectors); 2402 bp = bio_split(bio, first_sectors);
2400 BUG_ON(!bp); 2403 BUG_ON(!bp);
2401 pkt_make_request(q, &bp->bio1); 2404 pkt_make_request(q, &bp->bio1);
@@ -2417,7 +2420,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2417 if ((pkt->state == PACKET_WAITING_STATE) || 2420 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2421 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio); 2422 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2423 pkt->write_size +=
2424 bio->bi_iter.bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) && 2425 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) { 2426 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm); 2427 atomic_inc(&pkt->run_sm);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..464be78a0836 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -104,7 +104,7 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core,
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 "%s:%u: bio %u: %u segs %u sectors from %lu\n",
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 __func__, __LINE__, i, bio_segments(iter.bio),
107 bio_sectors(iter.bio), iter.bio->bi_sector); 107 bio_sectors(iter.bio), iter.bio->bi_iter.bi_sector);
108 108
109 size = bvec->bv_len; 109 size = bvec->bv_len;
110 buf = bvec_kmap_irq(bvec, &flags); 110 buf = bvec_kmap_irq(bvec, &flags);
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..320bbfc9b902 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,7 +553,7 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
554 int write = bio_data_dir(bio) == WRITE; 554 int write = bio_data_dir(bio) == WRITE;
555 const char *op = write ? "write" : "read"; 555 const char *op = write ? "write" : "read";
556 loff_t offset = bio->bi_sector << 9; 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 int error = 0; 557 int error = 0;
558 struct bio_vec *bvec; 558 struct bio_vec *bvec;
559 unsigned int i; 559 unsigned int i;
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..a8f4fe2d4d1b 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1183,14 +1183,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1183 1183
1184 /* Handle the easy case for the caller */ 1184 /* Handle the easy case for the caller */
1185 1185
1186 if (!offset && len == bio_src->bi_size) 1186 if (!offset && len == bio_src->bi_iter.bi_size)
1187 return bio_clone(bio_src, gfpmask); 1187 return bio_clone(bio_src, gfpmask);
1188 1188
1189 if (WARN_ON_ONCE(!len)) 1189 if (WARN_ON_ONCE(!len))
1190 return NULL; 1190 return NULL;
1191 if (WARN_ON_ONCE(len > bio_src->bi_size)) 1191 if (WARN_ON_ONCE(len > bio_src->bi_iter.bi_size))
1192 return NULL; 1192 return NULL;
1193 if (WARN_ON_ONCE(offset > bio_src->bi_size - len)) 1193 if (WARN_ON_ONCE(offset > bio_src->bi_iter.bi_size - len))
1194 return NULL; 1194 return NULL;
1195 1195
1196 /* Find first affected segment... */ 1196 /* Find first affected segment... */
@@ -1220,7 +1220,8 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1220 return NULL; /* ENOMEM */ 1220 return NULL; /* ENOMEM */
1221 1221
1222 bio->bi_bdev = bio_src->bi_bdev; 1222 bio->bi_bdev = bio_src->bi_bdev;
1223 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1223 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector +
1224 (offset >> SECTOR_SHIFT);
1224 bio->bi_rw = bio_src->bi_rw; 1225 bio->bi_rw = bio_src->bi_rw;
1225 bio->bi_flags |= 1 << BIO_CLONED; 1226 bio->bi_flags |= 1 << BIO_CLONED;
1226 1227
@@ -1239,8 +1240,7 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1239 } 1240 }
1240 1241
1241 bio->bi_vcnt = vcnt; 1242 bio->bi_vcnt = vcnt;
1242 bio->bi_size = len; 1243 bio->bi_iter.bi_size = len;
1243 bio->bi_idx = 0;
1244 1244
1245 return bio; 1245 return bio;
1246} 1246}
@@ -1271,7 +1271,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1271 1271
1272 /* Build up a chain of clone bios up to the limit */ 1272 /* Build up a chain of clone bios up to the limit */
1273 1273
1274 if (!bi || off >= bi->bi_size || !len) 1274 if (!bi || off >= bi->bi_iter.bi_size || !len)
1275 return NULL; /* Nothing to clone */ 1275 return NULL; /* Nothing to clone */
1276 1276
1277 end = &chain; 1277 end = &chain;
@@ -1283,7 +1283,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1284 goto out_err; /* EINVAL; ran out of bio's */ 1284 goto out_err; /* EINVAL; ran out of bio's */
1285 } 1285 }
1286 bi_size = min_t(unsigned int, bi->bi_size - off, len); 1286 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1287 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1287 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1288 if (!bio) 1288 if (!bio)
1289 goto out_err; /* ENOMEM */ 1289 goto out_err; /* ENOMEM */
@@ -1292,7 +1292,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1292 end = &bio->bi_next; 1292 end = &bio->bi_next;
1293 1293
1294 off += bi_size; 1294 off += bi_size;
1295 if (off == bi->bi_size) { 1295 if (off == bi->bi_iter.bi_size) {
1296 bi = bi->bi_next; 1296 bi = bi->bi_next;
1297 off = 0; 1297 off = 0;
1298 } 1298 }
@@ -2186,7 +2186,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2186 2186
2187 if (type == OBJ_REQUEST_BIO) { 2187 if (type == OBJ_REQUEST_BIO) {
2188 bio_list = data_desc; 2188 bio_list = data_desc;
2189 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2189 rbd_assert(img_offset ==
2190 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2190 } else { 2191 } else {
2191 rbd_assert(type == OBJ_REQUEST_PAGES); 2192 rbd_assert(type == OBJ_REQUEST_PAGES);
2192 pages = data_desc; 2193 pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
174 if (!card) 174 if (!card)
175 goto req_err; 175 goto req_err;
176 176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 if (bio_end_sector(bio) > get_capacity(card->gendisk))
178 goto req_err; 178 goto req_err;
179 179
180 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
187 goto req_err; 187 goto req_err;
188 } 188 }
189 189
190 if (bio->bi_size == 0) { 190 if (bio->bi_iter.bi_size == 0) {
191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
192 goto req_err; 192 goto req_err;
193 } 193 }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
208 208
209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
211 (u64)bio->bi_sector << 9, bio->bi_size); 211 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
212 212
213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
214 bio_dma_done_cb, bio_meta); 214 bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..3716633be3c2 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -696,7 +696,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
696 int st; 696 int st;
697 int i; 697 int i;
698 698
699 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 699 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
700 atomic_set(n_dmas, 0); 700 atomic_set(n_dmas, 0);
701 701
702 for (i = 0; i < card->n_targets; i++) { 702 for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +705,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
705 } 705 }
706 706
707 if (bio->bi_rw & REQ_DISCARD) { 707 if (bio->bi_rw & REQ_DISCARD) {
708 bv_len = bio->bi_size; 708 bv_len = bio->bi_iter.bi_size;
709 709
710 while (bv_len > 0) { 710 while (bv_len > 0) {
711 tgt = rsxx_get_dma_tgt(card, addr8); 711 tgt = rsxx_get_dma_tgt(card, addr8);
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..dab4f1afeae9 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -352,8 +352,8 @@ static int add_bio(struct cardinfo *card)
352 bio = card->currentbio; 352 bio = card->currentbio;
353 if (!bio && card->bio) { 353 if (!bio && card->bio) {
354 card->currentbio = card->bio; 354 card->currentbio = card->bio;
355 card->current_idx = card->bio->bi_idx; 355 card->current_idx = card->bio->bi_iter.bi_idx;
356 card->current_sector = card->bio->bi_sector; 356 card->current_sector = card->bio->bi_iter.bi_sector;
357 card->bio = card->bio->bi_next; 357 card->bio = card->bio->bi_next;
358 if (card->bio == NULL) 358 if (card->bio == NULL)
359 card->biotail = &card->bio; 359 card->biotail = &card->bio;
@@ -451,7 +451,7 @@ static void process_page(unsigned long data)
451 if (page->idx >= bio->bi_vcnt) { 451 if (page->idx >= bio->bi_vcnt) {
452 page->bio = bio->bi_next; 452 page->bio = bio->bi_next;
453 if (page->bio) 453 if (page->bio)
454 page->idx = page->bio->bi_idx; 454 page->idx = page->bio->bi_iter.bi_idx;
455 } 455 }
456 456
457 pci_unmap_page(card->dev, desc->data_dma_handle, 457 pci_unmap_page(card->dev, desc->data_dma_handle,
@@ -532,7 +532,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
532{ 532{
533 struct cardinfo *card = q->queuedata; 533 struct cardinfo *card = q->queuedata;
534 pr_debug("mm_make_request %llu %u\n", 534 pr_debug("mm_make_request %llu %u\n",
535 (unsigned long long)bio->bi_sector, bio->bi_size); 535 (unsigned long long)bio->bi_iter.bi_sector,
536 bio->bi_iter.bi_size);
536 537
537 spin_lock_irq(&card->lock); 538 spin_lock_irq(&card->lock);
538 *card->biotail = bio; 539 *card->biotail = bio;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1257 bio->bi_bdev = preq.bdev; 1257 bio->bi_bdev = preq.bdev;
1258 bio->bi_private = pending_req; 1258 bio->bi_private = pending_req;
1259 bio->bi_end_io = end_block_io_op; 1259 bio->bi_end_io = end_block_io_op;
1260 bio->bi_sector = preq.sector_number; 1260 bio->bi_iter.bi_sector = preq.sector_number;
1261 } 1261 }
1262 1262
1263 preq.sector_number += seg[i].nsec; 1263 preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index 432db1b59b00..80e86307dd4b 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
1547 for (i = 0; i < pending; i++) { 1547 for (i = 0; i < pending; i++) {
1548 offset = (i * segs * PAGE_SIZE) >> 9; 1548 offset = (i * segs * PAGE_SIZE) >> 9;
1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1550 (unsigned int)(bio->bi_size >> 9) - offset); 1550 (unsigned int)bio_sectors(bio) - offset);
1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1551 cloned_bio = bio_clone(bio, GFP_NOIO);
1552 BUG_ON(cloned_bio == NULL); 1552 BUG_ON(cloned_bio == NULL);
1553 bio_trim(cloned_bio, offset, size); 1553 bio_trim(cloned_bio, offset, size);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..038a6d2aced3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..92b3fd468a03 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
195 dc->disk.c, 195 dc->disk.c,
196 "verify failed at dev %s sector %llu", 196 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 197 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 198 (uint64_t) bio->bi_iter.bi_sector);
199 199
200 kunmap_atomic(p1); 200 kunmap_atomic(p1);
201 } 201 }
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..cc4ba2da5fb6 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error)
21 21
22static void bch_generic_make_request_hack(struct bio *bio) 22static void bch_generic_make_request_hack(struct bio *bio)
23{ 23{
24 if (bio->bi_idx) { 24 if (bio->bi_iter.bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26 26
27 memcpy(clone->bi_io_vec, 27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio), 28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec)); 29 bio_segments(bio) * sizeof(struct bio_vec));
30 30
31 clone->bi_sector = bio->bi_sector; 31 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
32 clone->bi_bdev = bio->bi_bdev; 32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw; 33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio); 34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size; 35 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
36 36
37 clone->bi_private = bio; 37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio; 38 clone->bi_end_io = bch_bi_idx_hack_endio;
@@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
72struct bio *bch_bio_split(struct bio *bio, int sectors, 72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs) 73 gfp_t gfp, struct bio_set *bs)
74{ 74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; 75 unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv; 76 struct bio_vec *bv;
77 struct bio *ret = NULL; 77 struct bio *ret = NULL;
78 78
@@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
90 } 90 }
91 91
92 bio_for_each_segment(bv, bio, idx) { 92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx; 93 vcnt = idx - bio->bi_iter.bi_idx;
94 94
95 if (!nbytes) { 95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs); 96 ret = bio_alloc_bioset(gfp, vcnt, bs);
@@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
119 } 119 }
120out: 120out:
121 ret->bi_bdev = bio->bi_bdev; 121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector; 122 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
123 ret->bi_size = sectors << 9; 123 ret->bi_iter.bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw; 124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt; 125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt; 126 ret->bi_max_vecs = vcnt;
127 127
128 bio->bi_sector += sectors; 128 bio->bi_iter.bi_sector += sectors;
129 bio->bi_size -= sectors << 9; 129 bio->bi_iter.bi_size -= sectors << 9;
130 bio->bi_idx = idx; 130 bio->bi_iter.bi_idx = idx;
131 131
132 if (bio_integrity(bio)) { 132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) { 133 if (bio_integrity_clone(ret, bio, gfp)) {
@@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
162 bio_for_each_segment(bv, bio, i) { 162 bio_for_each_segment(bv, bio, i) {
163 struct bvec_merge_data bvm = { 163 struct bvec_merge_data bvm = {
164 .bi_bdev = bio->bi_bdev, 164 .bi_bdev = bio->bi_bdev,
165 .bi_sector = bio->bi_sector, 165 .bi_sector = bio->bi_iter.bi_sector,
166 .bi_size = ret << 9, 166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw, 167 .bi_rw = bio->bi_rw,
168 }; 168 };
@@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 272{
273 struct bbio *b = container_of(bio, struct bbio, bio); 273 struct bbio *b = container_of(bio, struct bbio, bio);
274 274
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 275 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 277
278 b->submit_time_us = local_clock_us(); 278 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..581f95df8265 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
82 bio_get(bio); 82 bio_get(bio);
83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
84 84
85 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 85 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
87 PAGE_SECTORS); 87 PAGE_SECTORS);
88 bio->bi_private = &io->cl; 88 bio->bi_private = &io->cl;
@@ -98,7 +98,7 @@ static void write_moving(struct closure *cl)
98 if (!op->error) { 98 if (!op->error) {
99 moving_init(io); 99 moving_init(io);
100 100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
102 op->write_prio = 1; 102 op->write_prio = 1;
103 op->bio = &io->bio.bio; 103 op->bio = &io->bio.bio;
104 104
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 78bab4154e97..47a9bbc75124 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl)
261 struct bio *bio = op->bio; 261 struct bio *bio = op->bio;
262 262
263 pr_debug("invalidating %i sectors from %llu", 263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio), (uint64_t) bio->bi_sector); 264 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
265 265
266 while (bio_sectors(bio)) { 266 while (bio_sectors(bio)) {
267 unsigned sectors = min(bio_sectors(bio), 267 unsigned sectors = min(bio_sectors(bio),
@@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl)
270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
271 goto out; 271 goto out;
272 272
273 bio->bi_sector += sectors; 273 bio->bi_iter.bi_sector += sectors;
274 bio->bi_size -= sectors << 9; 274 bio->bi_iter.bi_size -= sectors << 9;
275 275
276 bch_keylist_add(&op->insert_keys, 276 bch_keylist_add(&op->insert_keys,
277 &KEY(op->inode, bio->bi_sector, sectors)); 277 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
278 } 278 }
279 279
280 op->insert_data_done = true; 280 op->insert_data_done = true;
@@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl)
364 k = op->insert_keys.top; 364 k = op->insert_keys.top;
365 bkey_init(k); 365 bkey_init(k);
366 SET_KEY_INODE(k, op->inode); 366 SET_KEY_INODE(k, op->inode);
367 SET_KEY_OFFSET(k, bio->bi_sector); 367 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
368 368
369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
370 op->write_point, op->write_prio, 370 op->write_point, op->write_prio,
@@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
522 (bio->bi_rw & REQ_WRITE))) 522 (bio->bi_rw & REQ_WRITE)))
523 goto skip; 523 goto skip;
524 524
525 if (bio->bi_sector & (c->sb.block_size - 1) || 525 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
526 bio_sectors(bio) & (c->sb.block_size - 1)) { 526 bio_sectors(bio) & (c->sb.block_size - 1)) {
527 pr_debug("skipping unaligned io"); 527 pr_debug("skipping unaligned io");
528 goto skip; 528 goto skip;
@@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
546 546
547 spin_lock(&dc->io_lock); 547 spin_lock(&dc->io_lock);
548 548
549 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 549 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
550 if (i->last == bio->bi_sector && 550 if (i->last == bio->bi_iter.bi_sector &&
551 time_before(jiffies, i->jiffies)) 551 time_before(jiffies, i->jiffies))
552 goto found; 552 goto found;
553 553
@@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
556 add_sequential(task); 556 add_sequential(task);
557 i->sequential = 0; 557 i->sequential = 0;
558found: 558found:
559 if (i->sequential + bio->bi_size > i->sequential) 559 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
560 i->sequential += bio->bi_size; 560 i->sequential += bio->bi_iter.bi_size;
561 561
562 i->last = bio_end_sector(bio); 562 i->last = bio_end_sector(bio);
563 i->jiffies = jiffies + msecs_to_jiffies(5000); 563 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
650 struct bkey *bio_key; 650 struct bkey *bio_key;
651 unsigned ptr; 651 unsigned ptr;
652 652
653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
654 return MAP_CONTINUE; 654 return MAP_CONTINUE;
655 655
656 if (KEY_INODE(k) != s->iop.inode || 656 if (KEY_INODE(k) != s->iop.inode ||
657 KEY_START(k) > bio->bi_sector) { 657 KEY_START(k) > bio->bi_iter.bi_sector) {
658 unsigned bio_sectors = bio_sectors(bio); 658 unsigned bio_sectors = bio_sectors(bio);
659 unsigned sectors = KEY_INODE(k) == s->iop.inode 659 unsigned sectors = KEY_INODE(k) == s->iop.inode
660 ? min_t(uint64_t, INT_MAX, 660 ? min_t(uint64_t, INT_MAX,
661 KEY_START(k) - bio->bi_sector) 661 KEY_START(k) - bio->bi_iter.bi_sector)
662 : INT_MAX; 662 : INT_MAX;
663 663
664 int ret = s->d->cache_miss(b, s, bio, sectors); 664 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
681 s->read_dirty_data = true; 681 s->read_dirty_data = true;
682 682
683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
684 KEY_OFFSET(k) - bio->bi_sector), 684 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
685 GFP_NOIO, s->d->bio_split); 685 GFP_NOIO, s->d->bio_split);
686 686
687 bio_key = &container_of(n, struct bbio, bio)->key; 687 bio_key = &container_of(n, struct bbio, bio)->key;
688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 688 bch_bkey_copy_single_ptr(bio_key, k, ptr);
689 689
690 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 690 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
692 692
693 n->bi_end_io = bch_cache_read_endio; 693 n->bi_end_io = bch_cache_read_endio;
@@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl)
714 struct bio *bio = &s->bio.bio; 714 struct bio *bio = &s->bio.bio;
715 715
716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 716 int ret = bch_btree_map_keys(&s->op, s->iop.c,
717 &KEY(s->iop.inode, bio->bi_sector, 0), 717 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
718 cache_lookup_fn, MAP_END_KEY); 718 cache_lookup_fn, MAP_END_KEY);
719 if (ret == -EAGAIN) 719 if (ret == -EAGAIN)
720 continue_at(cl, cache_lookup, bcache_wq); 720 continue_at(cl, cache_lookup, bcache_wq);
@@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl)
872 872
873 if (s->iop.bio) { 873 if (s->iop.bio) {
874 bio_reset(s->iop.bio); 874 bio_reset(s->iop.bio);
875 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 875 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
877 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 877 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
878 bch_bio_map(s->iop.bio, NULL); 878 bch_bio_map(s->iop.bio, NULL);
879 879
880 bio_copy_data(s->cache_miss, s->iop.bio); 880 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
938 938
939 s->iop.replace_key = KEY(s->iop.inode, 939 s->iop.replace_key = KEY(s->iop.inode,
940 bio->bi_sector + s->insert_bio_sectors, 940 bio->bi_iter.bi_sector + s->insert_bio_sectors,
941 s->insert_bio_sectors); 941 s->insert_bio_sectors);
942 942
943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
957 if (!cache_bio) 957 if (!cache_bio)
958 goto out_submit; 958 goto out_submit;
959 959
960 cache_bio->bi_sector = miss->bi_sector; 960 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
961 cache_bio->bi_bdev = miss->bi_bdev; 961 cache_bio->bi_bdev = miss->bi_bdev;
962 cache_bio->bi_size = s->insert_bio_sectors << 9; 962 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
963 963
964 cache_bio->bi_end_io = request_endio; 964 cache_bio->bi_end_io = request_endio;
965 cache_bio->bi_private = &s->cl; 965 cache_bio->bi_private = &s->cl;
@@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1009{ 1009{
1010 struct closure *cl = &s->cl; 1010 struct closure *cl = &s->cl;
1011 struct bio *bio = &s->bio.bio; 1011 struct bio *bio = &s->bio.bio;
1012 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1012 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1014 1014
1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1104 part_stat_unlock(); 1104 part_stat_unlock();
1105 1105
1106 bio->bi_bdev = dc->bdev; 1106 bio->bi_bdev = dc->bdev;
1107 bio->bi_sector += dc->sb.data_offset; 1107 bio->bi_iter.bi_sector += dc->sb.data_offset;
1108 1108
1109 if (cached_dev_get(dc)) { 1109 if (cached_dev_get(dc)) {
1110 s = search_alloc(bio, d); 1110 s = search_alloc(bio, d);
1111 trace_bcache_request_start(s->d, bio); 1111 trace_bcache_request_start(s->d, bio);
1112 1112
1113 if (!bio->bi_size) { 1113 if (!bio->bi_iter.bi_size) {
1114 /* 1114 /*
1115 * can't call bch_journal_meta from under 1115 * can't call bch_journal_meta from under
1116 * generic_make_request 1116 * generic_make_request
@@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
1197 sectors -= j; 1197 sectors -= j;
1198 } 1198 }
1199 1199
1200 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1200 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1201 1201
1202 if (!bio->bi_size) 1202 if (!bio->bi_iter.bi_size)
1203 return MAP_DONE; 1203 return MAP_DONE;
1204 1204
1205 return MAP_CONTINUE; 1205 return MAP_CONTINUE;
@@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1233 1233
1234 trace_bcache_request_start(s->d, bio); 1234 trace_bcache_request_start(s->d, bio);
1235 1235
1236 if (!bio->bi_size) { 1236 if (!bio->bi_iter.bi_size) {
1237 /* 1237 /*
1238 * can't call bch_journal_meta from under 1238 * can't call bch_journal_meta from under
1239 * generic_make_request 1239 * generic_make_request
@@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1243 bcache_wq); 1243 bcache_wq);
1244 } else if (rw) { 1244 } else if (rw) {
1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1246 &KEY(d->id, bio->bi_sector, 0), 1246 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1247 &KEY(d->id, bio_end_sector(bio), 0)); 1247 &KEY(d->id, bio_end_sector(bio), 0));
1248 1248
1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1d9ee67d14ec..60fb6044b953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..c57621e49dc0 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
218 218
219void bch_bio_map(struct bio *bio, void *base) 219void bch_bio_map(struct bio *bio, void *base)
220{ 220{
221 size_t size = bio->bi_size; 221 size_t size = bio->bi_iter.bi_size;
222 struct bio_vec *bv = bio->bi_io_vec; 222 struct bio_vec *bv = bio->bi_io_vec;
223 223
224 BUG_ON(!bio->bi_size); 224 BUG_ON(!bio->bi_iter.bi_size);
225 BUG_ON(bio->bi_vcnt); 225 BUG_ON(bio->bi_vcnt);
226 226
227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..04657e93f4fd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w)
113 if (!io->dc->writeback_percent) 113 if (!io->dc->writeback_percent)
114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
115 115
116 bio->bi_size = KEY_SIZE(&w->key) << 9; 116 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
118 bio->bi_private = w; 118 bio->bi_private = w;
119 bio->bi_io_vec = bio->bi_inline_vecs; 119 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl)
186 186
187 dirty_init(w); 187 dirty_init(w);
188 io->bio.bi_rw = WRITE; 188 io->bio.bi_rw = WRITE;
189 io->bio.bi_sector = KEY_START(&w->key); 189 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
190 io->bio.bi_bdev = io->dc->bdev; 190 io->bio.bi_bdev = io->dc->bdev;
191 io->bio.bi_end_io = dirty_endio; 191 io->bio.bi_end_io = dirty_endio;
192 192
@@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc)
255 io->dc = dc; 255 io->dc = dc;
256 256
257 dirty_init(w); 257 dirty_init(w);
258 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 258 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
260 &w->key, 0)->bdev; 260 &w->key, 0)->bdev;
261 io->bio.bi_rw = READ; 261 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..5ace48ee9f58 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -40,10 +40,10 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
40{ 40{
41 unsigned i; 41 unsigned i;
42 42
43 bd->bi_sector = bio->bi_sector; 43 bd->bi_sector = bio->bi_iter.bi_sector;
44 bd->bi_bdev = bio->bi_bdev; 44 bd->bi_bdev = bio->bi_bdev;
45 bd->bi_size = bio->bi_size; 45 bd->bi_size = bio->bi_iter.bi_size;
46 bd->bi_idx = bio->bi_idx; 46 bd->bi_idx = bio->bi_iter.bi_idx;
47 bd->bi_flags = bio->bi_flags; 47 bd->bi_flags = bio->bi_flags;
48 48
49 for (i = 0; i < bio->bi_vcnt; i++) { 49 for (i = 0; i < bio->bi_vcnt; i++) {
@@ -56,10 +56,10 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
56{ 56{
57 unsigned i; 57 unsigned i;
58 58
59 bio->bi_sector = bd->bi_sector; 59 bio->bi_iter.bi_sector = bd->bi_sector;
60 bio->bi_bdev = bd->bi_bdev; 60 bio->bi_bdev = bd->bi_bdev;
61 bio->bi_size = bd->bi_size; 61 bio->bi_iter.bi_size = bd->bi_size;
62 bio->bi_idx = bd->bi_idx; 62 bio->bi_iter.bi_idx = bd->bi_idx;
63 bio->bi_flags = bd->bi_flags; 63 bio->bi_flags = bd->bi_flags;
64 64
65 for (i = 0; i < bio->bi_vcnt; i++) { 65 for (i = 0; i < bio->bi_vcnt; i++) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..4113b6044b80 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
538 bio_init(&b->bio); 538 bio_init(&b->bio);
539 b->bio.bi_io_vec = b->bio_vec; 539 b->bio.bi_io_vec = b->bio_vec;
540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
541 b->bio.bi_sector = block << b->c->sectors_per_block_bits; 541 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
542 b->bio.bi_bdev = b->c->bdev; 542 b->bio.bi_bdev = b->c->bdev;
543 b->bio.bi_end_io = end_io; 543 b->bio.bi_end_io = end_io;
544 544
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b752a6e..bfba97dcde2d 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
72 72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio) 73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{ 74{
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) 75 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
76 t->nr_seq_samples++; 76 t->nr_seq_samples++;
77 else { 77 else {
78 /* 78 /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
87 t->nr_rand_samples++; 87 t->nr_rand_samples++;
88 } 88 }
89 89
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); 90 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
91} 91}
92 92
93static void iot_check_for_pattern_switch(struct io_tracker *t) 93static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf1059b99..86f9c83eb30c 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -664,15 +664,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
664static void remap_to_cache(struct cache *cache, struct bio *bio, 664static void remap_to_cache(struct cache *cache, struct bio *bio,
665 dm_cblock_t cblock) 665 dm_cblock_t cblock)
666{ 666{
667 sector_t bi_sector = bio->bi_sector; 667 sector_t bi_sector = bio->bi_iter.bi_sector;
668 668
669 bio->bi_bdev = cache->cache_dev->bdev; 669 bio->bi_bdev = cache->cache_dev->bdev;
670 if (!block_size_is_power_of_two(cache)) 670 if (!block_size_is_power_of_two(cache))
671 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 671 bio->bi_iter.bi_sector =
672 sector_div(bi_sector, cache->sectors_per_block); 672 (from_cblock(cblock) * cache->sectors_per_block) +
673 sector_div(bi_sector, cache->sectors_per_block);
673 else 674 else
674 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 675 bio->bi_iter.bi_sector =
675 (bi_sector & (cache->sectors_per_block - 1)); 676 (from_cblock(cblock) << cache->sectors_per_block_shift) |
677 (bi_sector & (cache->sectors_per_block - 1));
676} 678}
677 679
678static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 680static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
712 714
713static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 715static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
714{ 716{
715 sector_t block_nr = bio->bi_sector; 717 sector_t block_nr = bio->bi_iter.bi_sector;
716 718
717 if (!block_size_is_power_of_two(cache)) 719 if (!block_size_is_power_of_two(cache))
718 (void) sector_div(block_nr, cache->sectors_per_block); 720 (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1029,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1027static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1029static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1028{ 1030{
1029 return (bio_data_dir(bio) == WRITE) && 1031 return (bio_data_dir(bio) == WRITE) &&
1030 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1032 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1031} 1033}
1032 1034
1033static void avoid_copy(struct dm_cache_migration *mg) 1035static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1254,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 size_t pb_data_size = get_per_bio_data_size(cache); 1254 size_t pb_data_size = get_per_bio_data_size(cache);
1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1255 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1254 1256
1255 BUG_ON(bio->bi_size); 1257 BUG_ON(bio->bi_iter.bi_size);
1256 if (!pb->req_nr) 1258 if (!pb->req_nr)
1257 remap_to_origin(cache, bio); 1259 remap_to_origin(cache, bio);
1258 else 1260 else
@@ -1275,9 +1277,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1275 */ 1277 */
1276static void process_discard_bio(struct cache *cache, struct bio *bio) 1278static void process_discard_bio(struct cache *cache, struct bio *bio)
1277{ 1279{
1278 dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1280 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1279 cache->discard_block_size); 1281 cache->discard_block_size);
1280 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1282 dm_block_t end_block = bio_end_sector(bio);
1281 dm_block_t b; 1283 dm_block_t b;
1282 1284
1283 end_block = block_div(end_block, cache->discard_block_size); 1285 end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..1e2e5465d28e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -828,8 +828,8 @@ static void crypt_convert_init(struct crypt_config *cc,
828 ctx->bio_out = bio_out; 828 ctx->bio_out = bio_out;
829 ctx->offset_in = 0; 829 ctx->offset_in = 0;
830 ctx->offset_out = 0; 830 ctx->offset_out = 0;
831 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 831 ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0;
832 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 832 ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0;
833 ctx->cc_sector = sector + cc->iv_offset; 833 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart); 834 init_completion(&ctx->restart);
835} 835}
@@ -1021,7 +1021,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
1021 size -= len; 1021 size -= len;
1022 } 1022 }
1023 1023
1024 if (!clone->bi_size) { 1024 if (!clone->bi_iter.bi_size) {
1025 bio_put(clone); 1025 bio_put(clone);
1026 return NULL; 1026 return NULL;
1027 } 1027 }
@@ -1161,7 +1161,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1161 crypt_inc_pending(io); 1161 crypt_inc_pending(io);
1162 1162
1163 clone_init(io, clone); 1163 clone_init(io, clone);
1164 clone->bi_sector = cc->start + io->sector; 1164 clone->bi_iter.bi_sector = cc->start + io->sector;
1165 1165
1166 generic_make_request(clone); 1166 generic_make_request(clone);
1167 return 0; 1167 return 0;
@@ -1209,7 +1209,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1209 /* crypt_convert should have filled the clone bio */ 1209 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1211 1211
1212 clone->bi_sector = cc->start + io->sector; 1212 clone->bi_iter.bi_sector = cc->start + io->sector;
1213 1213
1214 if (async) 1214 if (async)
1215 kcryptd_queue_io(io); 1215 kcryptd_queue_io(io);
@@ -1224,7 +1224,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1224 struct dm_crypt_io *new_io; 1224 struct dm_crypt_io *new_io;
1225 int crypt_finished; 1225 int crypt_finished;
1226 unsigned out_of_pages = 0; 1226 unsigned out_of_pages = 0;
1227 unsigned remaining = io->base_bio->bi_size; 1227 unsigned remaining = io->base_bio->bi_iter.bi_size;
1228 sector_t sector = io->sector; 1228 sector_t sector = io->sector;
1229 int r; 1229 int r;
1230 1230
@@ -1248,7 +1248,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1248 io->ctx.bio_out = clone; 1248 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0; 1249 io->ctx.idx_out = 0;
1250 1250
1251 remaining -= clone->bi_size; 1251 remaining -= clone->bi_iter.bi_size;
1252 sector += bio_sectors(clone); 1252 sector += bio_sectors(clone);
1253 1253
1254 crypt_inc_pending(io); 1254 crypt_inc_pending(io);
@@ -1869,11 +1869,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1870 bio->bi_bdev = cc->dev->bdev; 1870 bio->bi_bdev = cc->dev->bdev;
1871 if (bio_sectors(bio)) 1871 if (bio_sectors(bio))
1872 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1872 bio->bi_iter.bi_sector = cc->start +
1873 dm_target_offset(ti, bio->bi_iter.bi_sector);
1873 return DM_MAPIO_REMAPPED; 1874 return DM_MAPIO_REMAPPED;
1874 } 1875 }
1875 1876
1876 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); 1877 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1877 1878
1878 if (bio_data_dir(io->base_bio) == READ) { 1879 if (bio_data_dir(io->base_bio) == READ) {
1879 if (kcryptd_io_read(io, GFP_NOWAIT)) 1880 if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..84c860191a2e 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -281,14 +281,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
281 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 281 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
282 bio->bi_bdev = dc->dev_write->bdev; 282 bio->bi_bdev = dc->dev_write->bdev;
283 if (bio_sectors(bio)) 283 if (bio_sectors(bio))
284 bio->bi_sector = dc->start_write + 284 bio->bi_iter.bi_sector = dc->start_write +
285 dm_target_offset(ti, bio->bi_sector); 285 dm_target_offset(ti, bio->bi_iter.bi_sector);
286 286
287 return delay_bio(dc, dc->write_delay, bio); 287 return delay_bio(dc, dc->write_delay, bio);
288 } 288 }
289 289
290 bio->bi_bdev = dc->dev_read->bdev; 290 bio->bi_bdev = dc->dev_read->bdev;
291 bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 291 bio->bi_iter.bi_sector = dc->start_read +
292 dm_target_offset(ti, bio->bi_iter.bi_sector);
292 293
293 return delay_bio(dc, dc->read_delay, bio); 294 return delay_bio(dc, dc->read_delay, bio);
294} 295}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
248 248
249 bio->bi_bdev = fc->dev->bdev; 249 bio->bi_bdev = fc->dev->bdev;
250 if (bio_sectors(bio)) 250 if (bio_sectors(bio))
251 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 251 bio->bi_iter.bi_sector =
252 flakey_map_sector(ti, bio->bi_iter.bi_sector);
252} 253}
253 254
254static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 255static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
265 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 266 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
266 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 267 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
267 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 268 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
268 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 269 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
269 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
270 } 271 }
271} 272}
272 273
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..01558b093307 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -304,14 +304,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
305 305
306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
307 bio->bi_sector = where->sector + (where->count - remaining); 307 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
308 bio->bi_bdev = where->bdev; 308 bio->bi_bdev = where->bdev;
309 bio->bi_end_io = endio; 309 bio->bi_end_io = endio;
310 store_io_and_region_in_bio(bio, io, region); 310 store_io_and_region_in_bio(bio, io, region);
311 311
312 if (rw & REQ_DISCARD) { 312 if (rw & REQ_DISCARD) {
313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
314 bio->bi_size = num_sectors << SECTOR_SHIFT; 314 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
315 remaining -= num_sectors; 315 remaining -= num_sectors;
316 } else if (rw & REQ_WRITE_SAME) { 316 } else if (rw & REQ_WRITE_SAME) {
317 /* 317 /*
@@ -320,7 +320,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
320 dp->get_page(dp, &page, &len, &offset); 320 dp->get_page(dp, &page, &len, &offset);
321 bio_add_page(bio, page, logical_block_size, offset); 321 bio_add_page(bio, page, logical_block_size, offset);
322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
323 bio->bi_size = num_sectors << SECTOR_SHIFT; 323 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
324 324
325 offset = 0; 325 offset = 0;
326 remaining -= num_sectors; 326 remaining -= num_sectors;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
85 85
86 bio->bi_bdev = lc->dev->bdev; 86 bio->bi_bdev = lc->dev->bdev;
87 if (bio_sectors(bio)) 87 if (bio_sectors(bio))
88 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 bio->bi_iter.bi_sector =
89 linear_map_sector(ti, bio->bi_iter.bi_sector);
89} 90}
90 91
91static int linear_map(struct dm_target *ti, struct bio *bio) 92static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..9f6d8e6baa7d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
432 region_t region = dm_rh_bio_to_region(ms->rh, bio); 432 region_t region = dm_rh_bio_to_region(ms->rh, bio);
433 433
434 if (log->type->in_sync(log, region, 0)) 434 if (log->type->in_sync(log, region, 0))
435 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
436 436
437 return 0; 437 return 0;
438} 438}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
442 */ 442 */
443static sector_t map_sector(struct mirror *m, struct bio *bio) 443static sector_t map_sector(struct mirror *m, struct bio *bio)
444{ 444{
445 if (unlikely(!bio->bi_size)) 445 if (unlikely(!bio->bi_iter.bi_size))
446 return 0; 446 return 0;
447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
448} 448}
449 449
450static void map_bio(struct mirror *m, struct bio *bio) 450static void map_bio(struct mirror *m, struct bio *bio)
451{ 451{
452 bio->bi_bdev = m->dev->bdev; 452 bio->bi_bdev = m->dev->bdev;
453 bio->bi_sector = map_sector(m, bio); 453 bio->bi_iter.bi_sector = map_sector(m, bio);
454} 454}
455 455
456static void map_region(struct dm_io_region *io, struct mirror *m, 456static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -527,7 +527,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
527 struct dm_io_request io_req = { 527 struct dm_io_request io_req = {
528 .bi_rw = READ, 528 .bi_rw = READ,
529 .mem.type = DM_IO_BVEC, 529 .mem.type = DM_IO_BVEC,
530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx,
531 .notify.fn = read_callback, 531 .notify.fn = read_callback,
532 .notify.context = bio, 532 .notify.context = bio,
533 .client = m->ms->io_client, 533 .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
559 * We can only read balance if the region is in sync. 559 * We can only read balance if the region is in sync.
560 */ 560 */
561 if (likely(region_in_sync(ms, region, 1))) 561 if (likely(region_in_sync(ms, region, 1)))
562 m = choose_mirror(ms, bio->bi_sector); 562 m = choose_mirror(ms, bio->bi_iter.bi_sector);
563 else if (m && atomic_read(&m->error_count)) 563 else if (m && atomic_read(&m->error_count))
564 m = NULL; 564 m = NULL;
565 565
@@ -630,7 +630,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
630 struct dm_io_request io_req = { 630 struct dm_io_request io_req = {
631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
632 .mem.type = DM_IO_BVEC, 632 .mem.type = DM_IO_BVEC,
633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx,
634 .notify.fn = write_callback, 634 .notify.fn = write_callback,
635 .notify.context = bio, 635 .notify.context = bio,
636 .client = ms->io_client, 636 .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1181 * The region is in-sync and we can perform reads directly. 1181 * The region is in-sync and we can perform reads directly.
1182 * Store enough information so we can retry if it fails. 1182 * Store enough information so we can retry if it fails.
1183 */ 1183 */
1184 m = choose_mirror(ms, bio->bi_sector); 1184 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1185 if (unlikely(!m)) 1185 if (unlikely(!m))
1186 return -EIO; 1186 return -EIO;
1187 1187
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
126 126
127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) 127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
128{ 128{
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
130 rh->target_begin);
130} 131}
131EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); 132EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
132 133
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..3ded8c729dfb 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1562,11 +1562,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1562 struct bio *bio, chunk_t chunk) 1562 struct bio *bio, chunk_t chunk)
1563{ 1563{
1564 bio->bi_bdev = s->cow->bdev; 1564 bio->bi_bdev = s->cow->bdev;
1565 bio->bi_sector = chunk_to_sector(s->store, 1565 bio->bi_iter.bi_sector =
1566 dm_chunk_number(e->new_chunk) + 1566 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1567 (chunk - e->old_chunk)) + 1567 (chunk - e->old_chunk)) +
1568 (bio->bi_sector & 1568 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1569 s->store->chunk_mask);
1570} 1569}
1571 1570
1572static int snapshot_map(struct dm_target *ti, struct bio *bio) 1571static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1584,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1584 return DM_MAPIO_REMAPPED; 1583 return DM_MAPIO_REMAPPED;
1585 } 1584 }
1586 1585
1587 chunk = sector_to_chunk(s->store, bio->bi_sector); 1586 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1588 1587
1589 /* Full snapshots are not usable */ 1588 /* Full snapshots are not usable */
1590 /* To get here the table must be live so s->active is always set. */ 1589 /* To get here the table must be live so s->active is always set. */
@@ -1645,7 +1644,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1645 r = DM_MAPIO_SUBMITTED; 1644 r = DM_MAPIO_SUBMITTED;
1646 1645
1647 if (!pe->started && 1646 if (!pe->started &&
1648 bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { 1647 bio->bi_iter.bi_size ==
1648 (s->store->chunk_size << SECTOR_SHIFT)) {
1649 pe->started = 1; 1649 pe->started = 1;
1650 up_write(&s->lock); 1650 up_write(&s->lock);
1651 start_full_bio(pe, bio); 1651 start_full_bio(pe, bio);
@@ -1701,7 +1701,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1701 return DM_MAPIO_REMAPPED; 1701 return DM_MAPIO_REMAPPED;
1702 } 1702 }
1703 1703
1704 chunk = sector_to_chunk(s->store, bio->bi_sector); 1704 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1705 1705
1706 down_write(&s->lock); 1706 down_write(&s->lock);
1707 1707
@@ -2038,7 +2038,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
2038 down_read(&_origins_lock); 2038 down_read(&_origins_lock);
2039 o = __lookup_origin(origin->bdev); 2039 o = __lookup_origin(origin->bdev);
2040 if (o) 2040 if (o)
2041 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2041 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2042 up_read(&_origins_lock); 2042 up_read(&_origins_lock);
2043 2043
2044 return r; 2044 return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
259{ 259{
260 sector_t begin, end; 260 sector_t begin, end;
261 261
262 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 262 stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
263 target_stripe, &begin);
263 stripe_map_range_sector(sc, bio_end_sector(bio), 264 stripe_map_range_sector(sc, bio_end_sector(bio),
264 target_stripe, &end); 265 target_stripe, &end);
265 if (begin < end) { 266 if (begin < end) {
266 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 267 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
267 bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 268 bio->bi_iter.bi_sector = begin +
268 bio->bi_size = to_bytes(end - begin); 269 sc->stripe[target_stripe].physical_start;
270 bio->bi_iter.bi_size = to_bytes(end - begin);
269 return DM_MAPIO_REMAPPED; 271 return DM_MAPIO_REMAPPED;
270 } else { 272 } else {
271 /* The range doesn't map to the target stripe */ 273 /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
293 return stripe_map_range(sc, bio, target_bio_nr); 295 return stripe_map_range(sc, bio, target_bio_nr);
294 } 296 }
295 297
296 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 298 stripe_map_sector(sc, bio->bi_iter.bi_sector,
299 &stripe, &bio->bi_iter.bi_sector);
297 300
298 bio->bi_sector += sc->stripe[stripe].physical_start; 301 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
299 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 302 bio->bi_bdev = sc->stripe[stripe].dev->bdev;
300 303
301 return DM_MAPIO_REMAPPED; 304 return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
311static int switch_map(struct dm_target *ti, struct bio *bio) 311static int switch_map(struct dm_target *ti, struct bio *bio)
312{ 312{
313 struct switch_ctx *sctx = ti->private; 313 struct switch_ctx *sctx = ti->private;
314 sector_t offset = dm_target_offset(ti, bio->bi_sector); 314 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
315 unsigned path_nr = switch_get_path_nr(sctx, offset); 315 unsigned path_nr = switch_get_path_nr(sctx, offset);
316 316
317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
318 bio->bi_sector = sctx->path_list[path_nr].start + offset; 318 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
319 319
320 return DM_MAPIO_REMAPPED; 320 return DM_MAPIO_REMAPPED;
321} 321}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..a65402480c8c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
414{ 414{
415 struct pool *pool = tc->pool; 415 struct pool *pool = tc->pool;
416 sector_t block_nr = bio->bi_sector; 416 sector_t block_nr = bio->bi_iter.bi_sector;
417 417
418 if (block_size_is_power_of_two(pool)) 418 if (block_size_is_power_of_two(pool))
419 block_nr >>= pool->sectors_per_block_shift; 419 block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
427{ 427{
428 struct pool *pool = tc->pool; 428 struct pool *pool = tc->pool;
429 sector_t bi_sector = bio->bi_sector; 429 sector_t bi_sector = bio->bi_iter.bi_sector;
430 430
431 bio->bi_bdev = tc->pool_dev->bdev; 431 bio->bi_bdev = tc->pool_dev->bdev;
432 if (block_size_is_power_of_two(pool)) 432 if (block_size_is_power_of_two(pool))
433 bio->bi_sector = (block << pool->sectors_per_block_shift) | 433 bio->bi_iter.bi_sector =
434 (bi_sector & (pool->sectors_per_block - 1)); 434 (block << pool->sectors_per_block_shift) |
435 (bi_sector & (pool->sectors_per_block - 1));
435 else 436 else
436 bio->bi_sector = (block * pool->sectors_per_block) + 437 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
437 sector_div(bi_sector, pool->sectors_per_block); 438 sector_div(bi_sector, pool->sectors_per_block);
438} 439}
439 440
@@ -721,7 +722,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
721 */ 722 */
722static int io_overlaps_block(struct pool *pool, struct bio *bio) 723static int io_overlaps_block(struct pool *pool, struct bio *bio)
723{ 724{
724 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 725 return bio->bi_iter.bi_size ==
726 (pool->sectors_per_block << SECTOR_SHIFT);
725} 727}
726 728
727static int io_overwrites_block(struct pool *pool, struct bio *bio) 729static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1130,7 +1132,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1130 if (bio_detain(pool, &key, bio, &cell)) 1132 if (bio_detain(pool, &key, bio, &cell))
1131 return; 1133 return;
1132 1134
1133 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1135 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1134 break_sharing(tc, bio, block, &key, lookup_result, cell); 1136 break_sharing(tc, bio, block, &key, lookup_result, cell);
1135 else { 1137 else {
1136 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1138 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1153,7 +1155,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1153 /* 1155 /*
1154 * Remap empty bios (flushes) immediately, without provisioning. 1156 * Remap empty bios (flushes) immediately, without provisioning.
1155 */ 1157 */
1156 if (!bio->bi_size) { 1158 if (!bio->bi_iter.bi_size) {
1157 inc_all_io_entry(pool, bio); 1159 inc_all_io_entry(pool, bio);
1158 cell_defer_no_holder(tc, cell); 1160 cell_defer_no_holder(tc, cell);
1159 1161
@@ -1253,7 +1255,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1253 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1255 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1254 switch (r) { 1256 switch (r) {
1255 case 0: 1257 case 0:
1256 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1258 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1257 bio_io_error(bio); 1259 bio_io_error(bio);
1258 else { 1260 else {
1259 inc_all_io_entry(tc->pool, bio); 1261 inc_all_io_entry(tc->pool, bio);
@@ -2867,7 +2869,7 @@ out_unlock:
2867 2869
2868static int thin_map(struct dm_target *ti, struct bio *bio) 2870static int thin_map(struct dm_target *ti, struct bio *bio)
2869{ 2871{
2870 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2872 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2871 2873
2872 return thin_bio_map(ti, bio); 2874 return thin_bio_map(ti, bio);
2873} 2875}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..132b3154d466 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -493,9 +493,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
493 struct dm_verity_io *io; 493 struct dm_verity_io *io;
494 494
495 bio->bi_bdev = v->data_dev->bdev; 495 bio->bi_bdev = v->data_dev->bdev;
496 bio->bi_sector = verity_map_sector(v, bio->bi_sector); 496 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
497 497
498 if (((unsigned)bio->bi_sector | bio_sectors(bio)) & 498 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { 499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
500 DMERR_LIMIT("unaligned io"); 500 DMERR_LIMIT("unaligned io");
501 return -EIO; 501 return -EIO;
@@ -514,8 +514,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
514 io->v = v; 514 io->v = v;
515 io->orig_bi_end_io = bio->bi_end_io; 515 io->orig_bi_end_io = bio->bi_end_io;
516 io->orig_bi_private = bio->bi_private; 516 io->orig_bi_private = bio->bi_private;
517 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 517 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
518 io->n_blocks = bio->bi_size >> v->data_dev_block_bits; 518 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
519 519
520 bio->bi_end_io = verity_end_io; 520 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io; 521 bio->bi_private = io;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0704c523a76b..ccd064ea4fe6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
575 atomic_inc_return(&md->pending[rw])); 575 atomic_inc_return(&md->pending[rw]));
576 576
577 if (unlikely(dm_stats_used(&md->stats))) 577 if (unlikely(dm_stats_used(&md->stats)))
578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
579 bio_sectors(bio), false, 0, &io->stats_aux); 579 bio_sectors(bio), false, 0, &io->stats_aux);
580} 580}
581 581
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
593 part_stat_unlock(); 593 part_stat_unlock();
594 594
595 if (unlikely(dm_stats_used(&md->stats))) 595 if (unlikely(dm_stats_used(&md->stats)))
596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
597 bio_sectors(bio), true, duration, &io->stats_aux); 597 bio_sectors(bio), true, duration, &io->stats_aux);
598 598
599 /* 599 /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
742 if (io_error == DM_ENDIO_REQUEUE) 742 if (io_error == DM_ENDIO_REQUEUE)
743 return; 743 return;
744 744
745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
746 /* 746 /*
747 * Preflush done for flush with data, reissue 747 * Preflush done for flush with data, reissue
748 * without REQ_FLUSH. 748 * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
797 struct dm_rq_clone_bio_info *info = clone->bi_private; 797 struct dm_rq_clone_bio_info *info = clone->bi_private;
798 struct dm_rq_target_io *tio = info->tio; 798 struct dm_rq_target_io *tio = info->tio;
799 struct bio *bio = info->orig; 799 struct bio *bio = info->orig;
800 unsigned int nr_bytes = info->orig->bi_size; 800 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
801 801
802 bio_put(clone); 802 bio_put(clone);
803 803
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
1128 * this io. 1128 * this io.
1129 */ 1129 */
1130 atomic_inc(&tio->io->io_count); 1130 atomic_inc(&tio->io->io_count);
1131 sector = clone->bi_sector; 1131 sector = clone->bi_iter.bi_sector;
1132 r = ti->type->map(ti, clone); 1132 r = ti->type->map(ti, clone);
1133 if (r == DM_MAPIO_REMAPPED) { 1133 if (r == DM_MAPIO_REMAPPED) {
1134 /* the bio has been remapped so dispatch it */ 1134 /* the bio has been remapped so dispatch it */
@@ -1160,13 +1160,13 @@ struct clone_info {
1160 1160
1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1162{ 1162{
1163 bio->bi_sector = sector; 1163 bio->bi_iter.bi_sector = sector;
1164 bio->bi_size = to_bytes(len); 1164 bio->bi_iter.bi_size = to_bytes(len);
1165} 1165}
1166 1166
1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) 1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1168{ 1168{
1169 bio->bi_idx = idx; 1169 bio->bi_iter.bi_idx = idx;
1170 bio->bi_vcnt = idx + bv_count; 1170 bio->bi_vcnt = idx + bv_count;
1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1172} 1172}
@@ -1202,7 +1202,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1202 clone->bi_rw = bio->bi_rw; 1202 clone->bi_rw = bio->bi_rw;
1203 clone->bi_vcnt = 1; 1203 clone->bi_vcnt = 1;
1204 clone->bi_io_vec->bv_offset = offset; 1204 clone->bi_io_vec->bv_offset = offset;
1205 clone->bi_io_vec->bv_len = clone->bi_size; 1205 clone->bi_io_vec->bv_len = clone->bi_iter.bi_size;
1206 clone->bi_flags |= 1 << BIO_CLONED; 1206 clone->bi_flags |= 1 << BIO_CLONED;
1207 1207
1208 clone_bio_integrity(bio, clone, idx, len, offset, 1); 1208 clone_bio_integrity(bio, clone, idx, len, offset, 1);
@@ -1222,7 +1222,8 @@ static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1222 bio_setup_sector(clone, sector, len); 1222 bio_setup_sector(clone, sector, len);
1223 bio_setup_bv(clone, idx, bv_count); 1223 bio_setup_bv(clone, idx, bv_count);
1224 1224
1225 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1225 if (idx != bio->bi_iter.bi_idx ||
1226 clone->bi_iter.bi_size < bio->bi_iter.bi_size)
1226 trim = 1; 1227 trim = 1;
1227 clone_bio_integrity(bio, clone, idx, len, 0, trim); 1228 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1228} 1229}
@@ -1510,8 +1511,8 @@ static void __split_and_process_bio(struct mapped_device *md,
1510 ci.io->bio = bio; 1511 ci.io->bio = bio;
1511 ci.io->md = md; 1512 ci.io->md = md;
1512 spin_lock_init(&ci.io->endio_lock); 1513 spin_lock_init(&ci.io->endio_lock);
1513 ci.sector = bio->bi_sector; 1514 ci.sector = bio->bi_iter.bi_sector;
1514 ci.idx = bio->bi_idx; 1515 ci.idx = bio->bi_iter.bi_idx;
1515 1516
1516 start_io_acct(ci.io); 1517 start_io_acct(ci.io);
1517 1518
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
74{ 74{
75 struct bio *b = bio->bi_private; 75 struct bio *b = bio->bi_private;
76 76
77 b->bi_size = bio->bi_size; 77 b->bi_iter.bi_size = bio->bi_iter.bi_size;
78 b->bi_sector = bio->bi_sector; 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
79 79
80 bio_put(bio); 80 bio_put(bio);
81 81
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
185 return; 185 return;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) 188 if (check_sector(conf, bio->bi_iter.bi_sector,
189 bio_end_sector(bio), WRITE))
189 failit = 1; 190 failit = 1;
190 if (check_mode(conf, WritePersistent)) { 191 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent); 192 add_sector(conf, bio->bi_iter.bi_sector,
193 WritePersistent);
192 failit = 1; 194 failit = 1;
193 } 195 }
194 if (check_mode(conf, WriteTransient)) 196 if (check_mode(conf, WriteTransient))
195 failit = 1; 197 failit = 1;
196 } else { 198 } else {
197 /* read request */ 199 /* read request */
198 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) 200 if (check_sector(conf, bio->bi_iter.bi_sector,
201 bio_end_sector(bio), READ))
199 failit = 1; 202 failit = 1;
200 if (check_mode(conf, ReadTransient)) 203 if (check_mode(conf, ReadTransient))
201 failit = 1; 204 failit = 1;
202 if (check_mode(conf, ReadPersistent)) { 205 if (check_mode(conf, ReadPersistent)) {
203 add_sector(conf, bio->bi_sector, ReadPersistent); 206 add_sector(conf, bio->bi_iter.bi_sector,
207 ReadPersistent);
204 failit = 1; 208 failit = 1;
205 } 209 }
206 if (check_mode(conf, ReadFixable)) { 210 if (check_mode(conf, ReadFixable)) {
207 add_sector(conf, bio->bi_sector, ReadFixable); 211 add_sector(conf, bio->bi_iter.bi_sector,
212 ReadFixable);
208 failit = 1; 213 failit = 1;
209 } 214 }
210 } 215 }
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..fb3b0d04edfb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -297,19 +297,19 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
297 } 297 }
298 298
299 rcu_read_lock(); 299 rcu_read_lock();
300 tmp_dev = which_dev(mddev, bio->bi_sector); 300 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; 301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
302 302
303 303
304 if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) 304 if (unlikely(bio->bi_iter.bi_sector >= (tmp_dev->end_sector)
305 || (bio->bi_sector < start_sector))) { 305 || (bio->bi_iter.bi_sector < start_sector))) {
306 char b[BDEVNAME_SIZE]; 306 char b[BDEVNAME_SIZE];
307 307
308 printk(KERN_ERR 308 printk(KERN_ERR
309 "md/linear:%s: make_request: Sector %llu out of bounds on " 309 "md/linear:%s: make_request: Sector %llu out of bounds on "
310 "dev %s: %llu sectors, offset %llu\n", 310 "dev %s: %llu sectors, offset %llu\n",
311 mdname(mddev), 311 mdname(mddev),
312 (unsigned long long)bio->bi_sector, 312 (unsigned long long)bio->bi_iter.bi_sector,
313 bdevname(tmp_dev->rdev->bdev, b), 313 bdevname(tmp_dev->rdev->bdev, b),
314 (unsigned long long)tmp_dev->rdev->sectors, 314 (unsigned long long)tmp_dev->rdev->sectors,
315 (unsigned long long)start_sector); 315 (unsigned long long)start_sector);
@@ -326,7 +326,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
326 326
327 rcu_read_unlock(); 327 rcu_read_unlock();
328 328
329 bp = bio_split(bio, end_sector - bio->bi_sector); 329 bp = bio_split(bio, end_sector - bio->bi_iter.bi_sector);
330 330
331 linear_make_request(mddev, &bp->bio1); 331 linear_make_request(mddev, &bp->bio1);
332 linear_make_request(mddev, &bp->bio2); 332 linear_make_request(mddev, &bp->bio2);
@@ -335,7 +335,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
335 } 335 }
336 336
337 bio->bi_bdev = tmp_dev->rdev->bdev; 337 bio->bi_bdev = tmp_dev->rdev->bdev;
338 bio->bi_sector = bio->bi_sector - start_sector 338 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector
339 + tmp_dev->rdev->data_offset; 339 + tmp_dev->rdev->data_offset;
340 rcu_read_unlock(); 340 rcu_read_unlock();
341 341
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 739b1ec54e28..b07fed398fd7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
393 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 393 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
394 struct bio *bio = mddev->flush_bio; 394 struct bio *bio = mddev->flush_bio;
395 395
396 if (bio->bi_size == 0) 396 if (bio->bi_iter.bi_size == 0)
397 /* an empty barrier - all done */ 397 /* an empty barrier - all done */
398 bio_endio(bio, 0); 398 bio_endio(bio, 0);
399 else { 399 else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755 755
756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
757 bio->bi_sector = sector; 757 bio->bi_iter.bi_sector = sector;
758 bio_add_page(bio, page, size, 0); 758 bio_add_page(bio, page, size, 0);
759 bio->bi_private = rdev; 759 bio->bi_private = rdev;
760 bio->bi_end_io = super_written; 760 bio->bi_end_io = super_written;
@@ -785,13 +785,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
786 rdev->meta_bdev : rdev->bdev; 786 rdev->meta_bdev : rdev->bdev;
787 if (metadata_op) 787 if (metadata_op)
788 bio->bi_sector = sector + rdev->sb_start; 788 bio->bi_iter.bi_sector = sector + rdev->sb_start;
789 else if (rdev->mddev->reshape_position != MaxSector && 789 else if (rdev->mddev->reshape_position != MaxSector &&
790 (rdev->mddev->reshape_backwards == 790 (rdev->mddev->reshape_backwards ==
791 (sector >= rdev->mddev->reshape_position))) 791 (sector >= rdev->mddev->reshape_position)))
792 bio->bi_sector = sector + rdev->new_data_offset; 792 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
793 else 793 else
794 bio->bi_sector = sector + rdev->data_offset; 794 bio->bi_iter.bi_sector = sector + rdev->data_offset;
795 bio_add_page(bio, page, size, 0); 795 bio_add_page(bio, page, size, 0);
796 submit_bio_wait(rw, bio); 796 submit_bio_wait(rw, bio);
797 797
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
100 md_error (mp_bh->mddev, rdev); 100 md_error (mp_bh->mddev, rdev);
101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
102 bdevname(rdev->bdev,b), 102 bdevname(rdev->bdev,b),
103 (unsigned long long)bio->bi_sector); 103 (unsigned long long)bio->bi_iter.bi_sector);
104 multipath_reschedule_retry(mp_bh); 104 multipath_reschedule_retry(mp_bh);
105 } else 105 } else
106 multipath_end_bh_io(mp_bh, error); 106 multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
132 multipath = conf->multipaths + mp_bh->path; 132 multipath = conf->multipaths + mp_bh->path;
133 133
134 mp_bh->bio = *bio; 134 mp_bh->bio = *bio;
135 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request; 138 mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
355 spin_unlock_irqrestore(&conf->device_lock, flags); 355 spin_unlock_irqrestore(&conf->device_lock, flags);
356 356
357 bio = &mp_bh->bio; 357 bio = &mp_bh->bio;
358 bio->bi_sector = mp_bh->master_bio->bi_sector; 358 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
359 359
360 if ((mp_bh->path = multipath_map (conf))<0) { 360 if ((mp_bh->path = multipath_map (conf))<0) {
361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
362 " error for block %llu\n", 362 " error for block %llu\n",
363 bdevname(bio->bi_bdev,b), 363 bdevname(bio->bi_bdev,b),
364 (unsigned long long)bio->bi_sector); 364 (unsigned long long)bio->bi_iter.bi_sector);
365 multipath_end_bh_io(mp_bh, -EIO); 365 multipath_end_bh_io(mp_bh, -EIO);
366 } else { 366 } else {
367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
368 " to another IO path\n", 368 " to another IO path\n",
369 bdevname(bio->bi_bdev,b), 369 bdevname(bio->bi_bdev,b),
370 (unsigned long long)bio->bi_sector); 370 (unsigned long long)bio->bi_iter.bi_sector);
371 *bio = *(mp_bh->master_bio); 371 *bio = *(mp_bh->master_bio);
372 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 372 bio->bi_iter.bi_sector +=
373 conf->multipaths[mp_bh->path].rdev->data_offset;
373 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
374 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
375 bio->bi_end_io = multipath_end_request; 376 bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..e38d1d3226f3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
501 unsigned int chunk_sects, struct bio *bio) 501 unsigned int chunk_sects, struct bio *bio)
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >=
505 ((bio->bi_iter.bi_sector & (chunk_sects-1))
505 + bio_sectors(bio)); 506 + bio_sectors(bio));
506 } else{ 507 } else{
507 sector_t sector = bio->bi_sector; 508 sector_t sector = bio->bi_iter.bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 509 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + bio_sectors(bio)); 510 + bio_sectors(bio));
510 } 511 }
@@ -524,7 +525,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
524 525
525 chunk_sects = mddev->chunk_sectors; 526 chunk_sects = mddev->chunk_sectors;
526 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 527 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
527 sector_t sector = bio->bi_sector; 528 sector_t sector = bio->bi_iter.bi_sector;
528 struct bio_pair *bp; 529 struct bio_pair *bp;
529 /* Sanity check -- queue functions should prevent this happening */ 530 /* Sanity check -- queue functions should prevent this happening */
530 if (bio_segments(bio) > 1) 531 if (bio_segments(bio) > 1)
@@ -544,12 +545,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
544 return; 545 return;
545 } 546 }
546 547
547 sector_offset = bio->bi_sector; 548 sector_offset = bio->bi_iter.bi_sector;
548 zone = find_zone(mddev->private, &sector_offset); 549 zone = find_zone(mddev->private, &sector_offset);
549 tmp_dev = map_sector(mddev, zone, bio->bi_sector, 550 tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector,
550 &sector_offset); 551 &sector_offset);
551 bio->bi_bdev = tmp_dev->bdev; 552 bio->bi_bdev = tmp_dev->bdev;
552 bio->bi_sector = sector_offset + zone->dev_start + 553 bio->bi_iter.bi_sector = sector_offset + zone->dev_start +
553 tmp_dev->data_offset; 554 tmp_dev->data_offset;
554 555
555 if (unlikely((bio->bi_rw & REQ_DISCARD) && 556 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
@@ -566,7 +567,8 @@ bad_map:
566 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 567 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
567 " or bigger than %dk %llu %d\n", 568 " or bigger than %dk %llu %d\n",
568 mdname(mddev), chunk_sects / 2, 569 mdname(mddev), chunk_sects / 2,
569 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 570 (unsigned long long)bio->bi_iter.bi_sector,
571 bio_sectors(bio) / 2);
570 572
571 bio_io_error(bio); 573 bio_io_error(bio);
572 return; 574 return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..db3b9d7314f1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
229 int done; 229 int done;
230 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window; 231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector; 232 sector_t bi_sector = bio->bi_iter.bi_sector;
233 233
234 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
235 unsigned long flags; 235 unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_iter.bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio_end_sector(bio) - 1);
270 bio_sectors(bio) - 1);
271 270
272 call_bio_endio(r1_bio); 271 call_bio_endio(r1_bio);
273 } 272 }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 struct bio *mbio = r1_bio->master_bio; 465 struct bio *mbio = r1_bio->master_bio;
467 pr_debug("raid1: behind end write sectors" 466 pr_debug("raid1: behind end write sectors"
468 " %llu-%llu\n", 467 " %llu-%llu\n",
469 (unsigned long long) mbio->bi_sector, 468 (unsigned long long) mbio->bi_iter.bi_sector,
470 (unsigned long long) mbio->bi_sector + 469 (unsigned long long) bio_end_sector(mbio) - 1);
471 bio_sectors(mbio) - 1);
472 call_bio_endio(r1_bio); 470 call_bio_endio(r1_bio);
473 } 471 }
474 } 472 }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 873 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) || 874 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 875 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector)) 876 <= bio->bi_iter.bi_sector))
879 wait = false; 877 wait = false;
880 else 878 else
881 wait = true; 879 wait = true;
@@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
913 911
914 if (bio && bio_data_dir(bio) == WRITE) { 912 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 913 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) { 914 <= bio->bi_iter.bi_sector) {
917 if (conf->start_next_window == MaxSector) 915 if (conf->start_next_window == MaxSector)
918 conf->start_next_window = 916 conf->start_next_window =
919 conf->next_resync + 917 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE; 918 NEXT_NORMALIO_DISTANCE;
921 919
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 920 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector) 921 <= bio->bi_iter.bi_sector)
924 conf->next_window_requests++; 922 conf->next_window_requests++;
925 else 923 else
926 conf->current_window_requests++; 924 conf->current_window_requests++;
927 } 925 }
928 if (bio->bi_sector >= conf->start_next_window) 926 if (bio->bi_iter.bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window; 927 sector = conf->start_next_window;
930 } 928 }
931 929
@@ -1028,7 +1026,8 @@ do_sync_io:
1028 if (bvecs[i].bv_page) 1026 if (bvecs[i].bv_page)
1029 put_page(bvecs[i].bv_page); 1027 put_page(bvecs[i].bv_page);
1030 kfree(bvecs); 1028 kfree(bvecs);
1031 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1029 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1030 bio->bi_iter.bi_size);
1032} 1031}
1033 1032
1034struct raid1_plug_cb { 1033struct raid1_plug_cb {
@@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1108 1107
1109 if (bio_data_dir(bio) == WRITE && 1108 if (bio_data_dir(bio) == WRITE &&
1110 bio_end_sector(bio) > mddev->suspend_lo && 1109 bio_end_sector(bio) > mddev->suspend_lo &&
1111 bio->bi_sector < mddev->suspend_hi) { 1110 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1112 /* As the suspend_* range is controlled by 1111 /* As the suspend_* range is controlled by
1113 * userspace, we want an interruptible 1112 * userspace, we want an interruptible
1114 * wait. 1113 * wait.
@@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1119 prepare_to_wait(&conf->wait_barrier, 1118 prepare_to_wait(&conf->wait_barrier,
1120 &w, TASK_INTERRUPTIBLE); 1119 &w, TASK_INTERRUPTIBLE);
1121 if (bio_end_sector(bio) <= mddev->suspend_lo || 1120 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1122 bio->bi_sector >= mddev->suspend_hi) 1121 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1123 break; 1122 break;
1124 schedule(); 1123 schedule();
1125 } 1124 }
@@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1141 r1_bio->sectors = bio_sectors(bio); 1140 r1_bio->sectors = bio_sectors(bio);
1142 r1_bio->state = 0; 1141 r1_bio->state = 0;
1143 r1_bio->mddev = mddev; 1142 r1_bio->mddev = mddev;
1144 r1_bio->sector = bio->bi_sector; 1143 r1_bio->sector = bio->bi_iter.bi_sector;
1145 1144
1146 /* We might need to issue multiple reads to different 1145 /* We might need to issue multiple reads to different
1147 * devices if there are bad blocks around, so we keep 1146 * devices if there are bad blocks around, so we keep
@@ -1181,12 +1180,13 @@ read_again:
1181 r1_bio->read_disk = rdisk; 1180 r1_bio->read_disk = rdisk;
1182 1181
1183 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1182 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1184 bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1183 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1185 max_sectors); 1184 max_sectors);
1186 1185
1187 r1_bio->bios[rdisk] = read_bio; 1186 r1_bio->bios[rdisk] = read_bio;
1188 1187
1189 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1188 read_bio->bi_iter.bi_sector = r1_bio->sector +
1189 mirror->rdev->data_offset;
1190 read_bio->bi_bdev = mirror->rdev->bdev; 1190 read_bio->bi_bdev = mirror->rdev->bdev;
1191 read_bio->bi_end_io = raid1_end_read_request; 1191 read_bio->bi_end_io = raid1_end_read_request;
1192 read_bio->bi_rw = READ | do_sync; 1192 read_bio->bi_rw = READ | do_sync;
@@ -1198,7 +1198,7 @@ read_again:
1198 */ 1198 */
1199 1199
1200 sectors_handled = (r1_bio->sector + max_sectors 1200 sectors_handled = (r1_bio->sector + max_sectors
1201 - bio->bi_sector); 1201 - bio->bi_iter.bi_sector);
1202 r1_bio->sectors = max_sectors; 1202 r1_bio->sectors = max_sectors;
1203 spin_lock_irq(&conf->device_lock); 1203 spin_lock_irq(&conf->device_lock);
1204 if (bio->bi_phys_segments == 0) 1204 if (bio->bi_phys_segments == 0)
@@ -1219,7 +1219,8 @@ read_again:
1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1220 r1_bio->state = 0; 1220 r1_bio->state = 0;
1221 r1_bio->mddev = mddev; 1221 r1_bio->mddev = mddev;
1222 r1_bio->sector = bio->bi_sector + sectors_handled; 1222 r1_bio->sector = bio->bi_iter.bi_sector +
1223 sectors_handled;
1223 goto read_again; 1224 goto read_again;
1224 } else 1225 } else
1225 generic_make_request(read_bio); 1226 generic_make_request(read_bio);
@@ -1322,7 +1323,7 @@ read_again:
1322 if (r1_bio->bios[j]) 1323 if (r1_bio->bios[j])
1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1324 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1324 r1_bio->state = 0; 1325 r1_bio->state = 0;
1325 allow_barrier(conf, start_next_window, bio->bi_sector); 1326 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1326 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1327 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1327 start_next_window = wait_barrier(conf, bio); 1328 start_next_window = wait_barrier(conf, bio);
1328 /* 1329 /*
@@ -1349,7 +1350,7 @@ read_again:
1349 bio->bi_phys_segments++; 1350 bio->bi_phys_segments++;
1350 spin_unlock_irq(&conf->device_lock); 1351 spin_unlock_irq(&conf->device_lock);
1351 } 1352 }
1352 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1353 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1353 1354
1354 atomic_set(&r1_bio->remaining, 1); 1355 atomic_set(&r1_bio->remaining, 1);
1355 atomic_set(&r1_bio->behind_remaining, 0); 1356 atomic_set(&r1_bio->behind_remaining, 0);
@@ -1361,7 +1362,7 @@ read_again:
1361 continue; 1362 continue;
1362 1363
1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1364 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1364 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1365 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1365 1366
1366 if (first_clone) { 1367 if (first_clone) {
1367 /* do behind I/O ? 1368 /* do behind I/O ?
@@ -1395,7 +1396,7 @@ read_again:
1395 1396
1396 r1_bio->bios[i] = mbio; 1397 r1_bio->bios[i] = mbio;
1397 1398
1398 mbio->bi_sector = (r1_bio->sector + 1399 mbio->bi_iter.bi_sector = (r1_bio->sector +
1399 conf->mirrors[i].rdev->data_offset); 1400 conf->mirrors[i].rdev->data_offset);
1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1401 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1401 mbio->bi_end_io = raid1_end_write_request; 1402 mbio->bi_end_io = raid1_end_write_request;
@@ -1435,7 +1436,7 @@ read_again:
1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1436 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436 r1_bio->state = 0; 1437 r1_bio->state = 0;
1437 r1_bio->mddev = mddev; 1438 r1_bio->mddev = mddev;
1438 r1_bio->sector = bio->bi_sector + sectors_handled; 1439 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439 goto retry_write; 1440 goto retry_write;
1440 } 1441 }
1441 1442
@@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio)
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse */
1960 bio_reset(b); 1961 bio_reset(b);
1961 b->bi_vcnt = vcnt; 1962 b->bi_vcnt = vcnt;
1962 b->bi_size = r1_bio->sectors << 9; 1963 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_sector = r1_bio->sector + 1964 b->bi_iter.bi_sector = r1_bio->sector +
1964 conf->mirrors[i].rdev->data_offset; 1965 conf->mirrors[i].rdev->data_offset;
1965 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1966 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1966 b->bi_end_io = end_sync_read; 1967 b->bi_end_io = end_sync_read;
1967 b->bi_private = r1_bio; 1968 b->bi_private = r1_bio;
1968 1969
1969 size = b->bi_size; 1970 size = b->bi_iter.bi_size;
1970 for (j = 0; j < vcnt ; j++) { 1971 for (j = 0; j < vcnt ; j++) {
1971 struct bio_vec *bi; 1972 struct bio_vec *bi;
1972 bi = &b->bi_io_vec[j]; 1973 bi = &b->bi_io_vec[j];
@@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2221 } 2222 }
2222 2223
2223 wbio->bi_rw = WRITE; 2224 wbio->bi_rw = WRITE;
2224 wbio->bi_sector = r1_bio->sector; 2225 wbio->bi_iter.bi_sector = r1_bio->sector;
2225 wbio->bi_size = r1_bio->sectors << 9; 2226 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2226 2227
2227 bio_trim(wbio, sector - r1_bio->sector, sectors); 2228 bio_trim(wbio, sector - r1_bio->sector, sectors);
2228 wbio->bi_sector += rdev->data_offset; 2229 wbio->bi_iter.bi_sector += rdev->data_offset;
2229 wbio->bi_bdev = rdev->bdev; 2230 wbio->bi_bdev = rdev->bdev;
2230 if (submit_bio_wait(WRITE, wbio) == 0) 2231 if (submit_bio_wait(WRITE, wbio) == 0)
2231 /* failure! */ 2232 /* failure! */
@@ -2339,7 +2340,8 @@ read_more:
2339 } 2340 }
2340 r1_bio->read_disk = disk; 2341 r1_bio->read_disk = disk;
2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2342 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2342 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2343 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2344 max_sectors);
2343 r1_bio->bios[r1_bio->read_disk] = bio; 2345 r1_bio->bios[r1_bio->read_disk] = bio;
2344 rdev = conf->mirrors[disk].rdev; 2346 rdev = conf->mirrors[disk].rdev;
2345 printk_ratelimited(KERN_ERR 2347 printk_ratelimited(KERN_ERR
@@ -2348,7 +2350,7 @@ read_more:
2348 mdname(mddev), 2350 mdname(mddev),
2349 (unsigned long long)r1_bio->sector, 2351 (unsigned long long)r1_bio->sector,
2350 bdevname(rdev->bdev, b)); 2352 bdevname(rdev->bdev, b));
2351 bio->bi_sector = r1_bio->sector + rdev->data_offset; 2353 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2352 bio->bi_bdev = rdev->bdev; 2354 bio->bi_bdev = rdev->bdev;
2353 bio->bi_end_io = raid1_end_read_request; 2355 bio->bi_end_io = raid1_end_read_request;
2354 bio->bi_rw = READ | do_sync; 2356 bio->bi_rw = READ | do_sync;
@@ -2357,7 +2359,7 @@ read_more:
2357 /* Drat - have to split this up more */ 2359 /* Drat - have to split this up more */
2358 struct bio *mbio = r1_bio->master_bio; 2360 struct bio *mbio = r1_bio->master_bio;
2359 int sectors_handled = (r1_bio->sector + max_sectors 2361 int sectors_handled = (r1_bio->sector + max_sectors
2360 - mbio->bi_sector); 2362 - mbio->bi_iter.bi_sector);
2361 r1_bio->sectors = max_sectors; 2363 r1_bio->sectors = max_sectors;
2362 spin_lock_irq(&conf->device_lock); 2364 spin_lock_irq(&conf->device_lock);
2363 if (mbio->bi_phys_segments == 0) 2365 if (mbio->bi_phys_segments == 0)
@@ -2375,7 +2377,8 @@ read_more:
2375 r1_bio->state = 0; 2377 r1_bio->state = 0;
2376 set_bit(R1BIO_ReadError, &r1_bio->state); 2378 set_bit(R1BIO_ReadError, &r1_bio->state);
2377 r1_bio->mddev = mddev; 2379 r1_bio->mddev = mddev;
2378 r1_bio->sector = mbio->bi_sector + sectors_handled; 2380 r1_bio->sector = mbio->bi_iter.bi_sector +
2381 sectors_handled;
2379 2382
2380 goto read_more; 2383 goto read_more;
2381 } else 2384 } else
@@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2599 } 2602 }
2600 if (bio->bi_end_io) { 2603 if (bio->bi_end_io) {
2601 atomic_inc(&rdev->nr_pending); 2604 atomic_inc(&rdev->nr_pending);
2602 bio->bi_sector = sector_nr + rdev->data_offset; 2605 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2603 bio->bi_bdev = rdev->bdev; 2606 bio->bi_bdev = rdev->bdev;
2604 bio->bi_private = r1_bio; 2607 bio->bi_private = r1_bio;
2605 } 2608 }
@@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2699 continue; 2702 continue;
2700 /* remove last page from this bio */ 2703 /* remove last page from this bio */
2701 bio->bi_vcnt--; 2704 bio->bi_vcnt--;
2702 bio->bi_size -= len; 2705 bio->bi_iter.bi_size -= len;
2703 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2706 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2704 } 2707 }
2705 goto bio_full; 2708 goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..dbf3b63c2754 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1182,7 +1182,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1182 /* If this request crosses a chunk boundary, we need to 1182 /* If this request crosses a chunk boundary, we need to
1183 * split it. This will only happen for 1 PAGE (or less) requests. 1183 * split it. This will only happen for 1 PAGE (or less) requests.
1184 */ 1184 */
1185 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) 1185 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + bio_sectors(bio)
1186 > chunk_sects 1186 > chunk_sects
1187 && (conf->geo.near_copies < conf->geo.raid_disks 1187 && (conf->geo.near_copies < conf->geo.raid_disks
1188 || conf->prev.near_copies < conf->prev.raid_disks))) { 1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
@@ -1193,8 +1193,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1193 /* This is a one page bio that upper layers 1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it. 1194 * refuse to split for us, so we need to split it.
1195 */ 1195 */
1196 bp = bio_split(bio, 1196 bp = bio_split(bio, chunk_sects -
1197 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 1197 (bio->bi_iter.bi_sector & (chunk_sects - 1)));
1198 1198
1199 /* Each of these 'make_request' calls will call 'wait_barrier'. 1199 /* Each of these 'make_request' calls will call 'wait_barrier'.
1200 * If the first succeeds but the second blocks due to the resync 1200 * If the first succeeds but the second blocks due to the resync
@@ -1221,7 +1221,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1221 bad_map: 1221 bad_map:
1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1224 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 1224 (unsigned long long)bio->bi_iter.bi_sector,
1225 bio_sectors(bio) / 2);
1225 1226
1226 bio_io_error(bio); 1227 bio_io_error(bio);
1227 return; 1228 return;
@@ -1238,24 +1239,25 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1238 1239
1239 sectors = bio_sectors(bio); 1240 sectors = bio_sectors(bio);
1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1241 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1241 bio->bi_sector < conf->reshape_progress && 1242 bio->bi_iter.bi_sector < conf->reshape_progress &&
1242 bio->bi_sector + sectors > conf->reshape_progress) { 1243 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1243 /* IO spans the reshape position. Need to wait for 1244 /* IO spans the reshape position. Need to wait for
1244 * reshape to pass 1245 * reshape to pass
1245 */ 1246 */
1246 allow_barrier(conf); 1247 allow_barrier(conf);
1247 wait_event(conf->wait_barrier, 1248 wait_event(conf->wait_barrier,
1248 conf->reshape_progress <= bio->bi_sector || 1249 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1249 conf->reshape_progress >= bio->bi_sector + sectors); 1250 conf->reshape_progress >= bio->bi_iter.bi_sector +
1251 sectors);
1250 wait_barrier(conf); 1252 wait_barrier(conf);
1251 } 1253 }
1252 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1254 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1253 bio_data_dir(bio) == WRITE && 1255 bio_data_dir(bio) == WRITE &&
1254 (mddev->reshape_backwards 1256 (mddev->reshape_backwards
1255 ? (bio->bi_sector < conf->reshape_safe && 1257 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1256 bio->bi_sector + sectors > conf->reshape_progress) 1258 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1257 : (bio->bi_sector + sectors > conf->reshape_safe && 1259 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1258 bio->bi_sector < conf->reshape_progress))) { 1260 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1259 /* Need to update reshape_position in metadata */ 1261 /* Need to update reshape_position in metadata */
1260 mddev->reshape_position = conf->reshape_progress; 1262 mddev->reshape_position = conf->reshape_progress;
1261 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1263 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1275,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1273 r10_bio->sectors = sectors; 1275 r10_bio->sectors = sectors;
1274 1276
1275 r10_bio->mddev = mddev; 1277 r10_bio->mddev = mddev;
1276 r10_bio->sector = bio->bi_sector; 1278 r10_bio->sector = bio->bi_iter.bi_sector;
1277 r10_bio->state = 0; 1279 r10_bio->state = 0;
1278 1280
1279 /* We might need to issue multiple reads to different 1281 /* We might need to issue multiple reads to different
@@ -1302,13 +1304,13 @@ read_again:
1302 slot = r10_bio->read_slot; 1304 slot = r10_bio->read_slot;
1303 1305
1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1306 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1305 bio_trim(read_bio, r10_bio->sector - bio->bi_sector, 1307 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1306 max_sectors); 1308 max_sectors);
1307 1309
1308 r10_bio->devs[slot].bio = read_bio; 1310 r10_bio->devs[slot].bio = read_bio;
1309 r10_bio->devs[slot].rdev = rdev; 1311 r10_bio->devs[slot].rdev = rdev;
1310 1312
1311 read_bio->bi_sector = r10_bio->devs[slot].addr + 1313 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1312 choose_data_offset(r10_bio, rdev); 1314 choose_data_offset(r10_bio, rdev);
1313 read_bio->bi_bdev = rdev->bdev; 1315 read_bio->bi_bdev = rdev->bdev;
1314 read_bio->bi_end_io = raid10_end_read_request; 1316 read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1322,7 @@ read_again:
1320 * need another r10_bio. 1322 * need another r10_bio.
1321 */ 1323 */
1322 sectors_handled = (r10_bio->sectors + max_sectors 1324 sectors_handled = (r10_bio->sectors + max_sectors
1323 - bio->bi_sector); 1325 - bio->bi_iter.bi_sector);
1324 r10_bio->sectors = max_sectors; 1326 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1327 spin_lock_irq(&conf->device_lock);
1326 if (bio->bi_phys_segments == 0) 1328 if (bio->bi_phys_segments == 0)
@@ -1341,7 +1343,8 @@ read_again:
1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1343 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1342 r10_bio->state = 0; 1344 r10_bio->state = 0;
1343 r10_bio->mddev = mddev; 1345 r10_bio->mddev = mddev;
1344 r10_bio->sector = bio->bi_sector + sectors_handled; 1346 r10_bio->sector = bio->bi_iter.bi_sector +
1347 sectors_handled;
1345 goto read_again; 1348 goto read_again;
1346 } else 1349 } else
1347 generic_make_request(read_bio); 1350 generic_make_request(read_bio);
@@ -1499,7 +1502,8 @@ retry_write:
1499 bio->bi_phys_segments++; 1502 bio->bi_phys_segments++;
1500 spin_unlock_irq(&conf->device_lock); 1503 spin_unlock_irq(&conf->device_lock);
1501 } 1504 }
1502 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1505 sectors_handled = r10_bio->sector + max_sectors -
1506 bio->bi_iter.bi_sector;
1503 1507
1504 atomic_set(&r10_bio->remaining, 1); 1508 atomic_set(&r10_bio->remaining, 1);
1505 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1509 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1514,11 @@ retry_write:
1510 if (r10_bio->devs[i].bio) { 1514 if (r10_bio->devs[i].bio) {
1511 struct md_rdev *rdev = conf->mirrors[d].rdev; 1515 struct md_rdev *rdev = conf->mirrors[d].rdev;
1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1516 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1513 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1517 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1514 max_sectors); 1518 max_sectors);
1515 r10_bio->devs[i].bio = mbio; 1519 r10_bio->devs[i].bio = mbio;
1516 1520
1517 mbio->bi_sector = (r10_bio->devs[i].addr+ 1521 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
1518 choose_data_offset(r10_bio, 1522 choose_data_offset(r10_bio,
1519 rdev)); 1523 rdev));
1520 mbio->bi_bdev = rdev->bdev; 1524 mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1557,11 @@ retry_write:
1553 rdev = conf->mirrors[d].rdev; 1557 rdev = conf->mirrors[d].rdev;
1554 } 1558 }
1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1559 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1556 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1560 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1557 max_sectors); 1561 max_sectors);
1558 r10_bio->devs[i].repl_bio = mbio; 1562 r10_bio->devs[i].repl_bio = mbio;
1559 1563
1560 mbio->bi_sector = (r10_bio->devs[i].addr + 1564 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
1561 choose_data_offset( 1565 choose_data_offset(
1562 r10_bio, rdev)); 1566 r10_bio, rdev));
1563 mbio->bi_bdev = rdev->bdev; 1567 mbio->bi_bdev = rdev->bdev;
@@ -1591,7 +1595,7 @@ retry_write:
1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1595 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1592 1596
1593 r10_bio->mddev = mddev; 1597 r10_bio->mddev = mddev;
1594 r10_bio->sector = bio->bi_sector + sectors_handled; 1598 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1595 r10_bio->state = 0; 1599 r10_bio->state = 0;
1596 goto retry_write; 1600 goto retry_write;
1597 } 1601 }
@@ -2124,10 +2128,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2124 bio_reset(tbio); 2128 bio_reset(tbio);
2125 2129
2126 tbio->bi_vcnt = vcnt; 2130 tbio->bi_vcnt = vcnt;
2127 tbio->bi_size = r10_bio->sectors << 9; 2131 tbio->bi_iter.bi_size = r10_bio->sectors << 9;
2128 tbio->bi_rw = WRITE; 2132 tbio->bi_rw = WRITE;
2129 tbio->bi_private = r10_bio; 2133 tbio->bi_private = r10_bio;
2130 tbio->bi_sector = r10_bio->devs[i].addr; 2134 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2131 2135
2132 for (j=0; j < vcnt ; j++) { 2136 for (j=0; j < vcnt ; j++) {
2133 tbio->bi_io_vec[j].bv_offset = 0; 2137 tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2148,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2144 atomic_inc(&r10_bio->remaining); 2148 atomic_inc(&r10_bio->remaining);
2145 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2149 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2146 2150
2147 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2151 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2148 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2152 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2149 generic_make_request(tbio); 2153 generic_make_request(tbio);
2150 } 2154 }
@@ -2614,8 +2618,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2614 sectors = sect_to_write; 2618 sectors = sect_to_write;
2615 /* Write at 'sector' for 'sectors' */ 2619 /* Write at 'sector' for 'sectors' */
2616 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2620 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2617 bio_trim(wbio, sector - bio->bi_sector, sectors); 2621 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2618 wbio->bi_sector = (r10_bio->devs[i].addr+ 2622 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2619 choose_data_offset(r10_bio, rdev) + 2623 choose_data_offset(r10_bio, rdev) +
2620 (sector - r10_bio->sector)); 2624 (sector - r10_bio->sector));
2621 wbio->bi_bdev = rdev->bdev; 2625 wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2691,10 @@ read_more:
2687 (unsigned long long)r10_bio->sector); 2691 (unsigned long long)r10_bio->sector);
2688 bio = bio_clone_mddev(r10_bio->master_bio, 2692 bio = bio_clone_mddev(r10_bio->master_bio,
2689 GFP_NOIO, mddev); 2693 GFP_NOIO, mddev);
2690 bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); 2694 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2691 r10_bio->devs[slot].bio = bio; 2695 r10_bio->devs[slot].bio = bio;
2692 r10_bio->devs[slot].rdev = rdev; 2696 r10_bio->devs[slot].rdev = rdev;
2693 bio->bi_sector = r10_bio->devs[slot].addr 2697 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2694 + choose_data_offset(r10_bio, rdev); 2698 + choose_data_offset(r10_bio, rdev);
2695 bio->bi_bdev = rdev->bdev; 2699 bio->bi_bdev = rdev->bdev;
2696 bio->bi_rw = READ | do_sync; 2700 bio->bi_rw = READ | do_sync;
@@ -2701,7 +2705,7 @@ read_more:
2701 struct bio *mbio = r10_bio->master_bio; 2705 struct bio *mbio = r10_bio->master_bio;
2702 int sectors_handled = 2706 int sectors_handled =
2703 r10_bio->sector + max_sectors 2707 r10_bio->sector + max_sectors
2704 - mbio->bi_sector; 2708 - mbio->bi_iter.bi_sector;
2705 r10_bio->sectors = max_sectors; 2709 r10_bio->sectors = max_sectors;
2706 spin_lock_irq(&conf->device_lock); 2710 spin_lock_irq(&conf->device_lock);
2707 if (mbio->bi_phys_segments == 0) 2711 if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2723,7 @@ read_more:
2719 set_bit(R10BIO_ReadError, 2723 set_bit(R10BIO_ReadError,
2720 &r10_bio->state); 2724 &r10_bio->state);
2721 r10_bio->mddev = mddev; 2725 r10_bio->mddev = mddev;
2722 r10_bio->sector = mbio->bi_sector 2726 r10_bio->sector = mbio->bi_iter.bi_sector
2723 + sectors_handled; 2727 + sectors_handled;
2724 2728
2725 goto read_more; 2729 goto read_more;
@@ -3157,7 +3161,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 bio->bi_end_io = end_sync_read; 3161 bio->bi_end_io = end_sync_read;
3158 bio->bi_rw = READ; 3162 bio->bi_rw = READ;
3159 from_addr = r10_bio->devs[j].addr; 3163 from_addr = r10_bio->devs[j].addr;
3160 bio->bi_sector = from_addr + rdev->data_offset; 3164 bio->bi_iter.bi_sector = from_addr +
3165 rdev->data_offset;
3161 bio->bi_bdev = rdev->bdev; 3166 bio->bi_bdev = rdev->bdev;
3162 atomic_inc(&rdev->nr_pending); 3167 atomic_inc(&rdev->nr_pending);
3163 /* and we write to 'i' (if not in_sync) */ 3168 /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3186,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3181 bio->bi_private = r10_bio; 3186 bio->bi_private = r10_bio;
3182 bio->bi_end_io = end_sync_write; 3187 bio->bi_end_io = end_sync_write;
3183 bio->bi_rw = WRITE; 3188 bio->bi_rw = WRITE;
3184 bio->bi_sector = to_addr 3189 bio->bi_iter.bi_sector = to_addr
3185 + rdev->data_offset; 3190 + rdev->data_offset;
3186 bio->bi_bdev = rdev->bdev; 3191 bio->bi_bdev = rdev->bdev;
3187 atomic_inc(&r10_bio->remaining); 3192 atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3215,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3210 bio->bi_private = r10_bio; 3215 bio->bi_private = r10_bio;
3211 bio->bi_end_io = end_sync_write; 3216 bio->bi_end_io = end_sync_write;
3212 bio->bi_rw = WRITE; 3217 bio->bi_rw = WRITE;
3213 bio->bi_sector = to_addr + rdev->data_offset; 3218 bio->bi_iter.bi_sector = to_addr +
3219 rdev->data_offset;
3214 bio->bi_bdev = rdev->bdev; 3220 bio->bi_bdev = rdev->bdev;
3215 atomic_inc(&r10_bio->remaining); 3221 atomic_inc(&r10_bio->remaining);
3216 break; 3222 break;
@@ -3328,7 +3334,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3328 bio->bi_private = r10_bio; 3334 bio->bi_private = r10_bio;
3329 bio->bi_end_io = end_sync_read; 3335 bio->bi_end_io = end_sync_read;
3330 bio->bi_rw = READ; 3336 bio->bi_rw = READ;
3331 bio->bi_sector = sector + 3337 bio->bi_iter.bi_sector = sector +
3332 conf->mirrors[d].rdev->data_offset; 3338 conf->mirrors[d].rdev->data_offset;
3333 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3339 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3334 count++; 3340 count++;
@@ -3350,7 +3356,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3350 bio->bi_private = r10_bio; 3356 bio->bi_private = r10_bio;
3351 bio->bi_end_io = end_sync_write; 3357 bio->bi_end_io = end_sync_write;
3352 bio->bi_rw = WRITE; 3358 bio->bi_rw = WRITE;
3353 bio->bi_sector = sector + 3359 bio->bi_iter.bi_sector = sector +
3354 conf->mirrors[d].replacement->data_offset; 3360 conf->mirrors[d].replacement->data_offset;
3355 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3361 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3356 count++; 3362 count++;
@@ -3397,7 +3403,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3397 bio2 = bio2->bi_next) { 3403 bio2 = bio2->bi_next) {
3398 /* remove last page from this bio */ 3404 /* remove last page from this bio */
3399 bio2->bi_vcnt--; 3405 bio2->bi_vcnt--;
3400 bio2->bi_size -= len; 3406 bio2->bi_iter.bi_size -= len;
3401 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3407 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3402 } 3408 }
3403 goto bio_full; 3409 goto bio_full;
@@ -4417,7 +4423,7 @@ read_more:
4417 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4423 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4418 4424
4419 read_bio->bi_bdev = rdev->bdev; 4425 read_bio->bi_bdev = rdev->bdev;
4420 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4426 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4421 + rdev->data_offset); 4427 + rdev->data_offset);
4422 read_bio->bi_private = r10_bio; 4428 read_bio->bi_private = r10_bio;
4423 read_bio->bi_end_io = end_sync_read; 4429 read_bio->bi_end_io = end_sync_read;
@@ -4425,7 +4431,7 @@ read_more:
4425 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4431 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4426 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4432 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4427 read_bio->bi_vcnt = 0; 4433 read_bio->bi_vcnt = 0;
4428 read_bio->bi_size = 0; 4434 read_bio->bi_iter.bi_size = 0;
4429 r10_bio->master_bio = read_bio; 4435 r10_bio->master_bio = read_bio;
4430 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4436 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4431 4437
@@ -4451,7 +4457,8 @@ read_more:
4451 4457
4452 bio_reset(b); 4458 bio_reset(b);
4453 b->bi_bdev = rdev2->bdev; 4459 b->bi_bdev = rdev2->bdev;
4454 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4460 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4461 rdev2->new_data_offset;
4455 b->bi_private = r10_bio; 4462 b->bi_private = r10_bio;
4456 b->bi_end_io = end_reshape_write; 4463 b->bi_end_io = end_reshape_write;
4457 b->bi_rw = WRITE; 4464 b->bi_rw = WRITE;
@@ -4478,7 +4485,7 @@ read_more:
4478 bio2 = bio2->bi_next) { 4485 bio2 = bio2->bi_next) {
4479 /* Remove last page from this bio */ 4486 /* Remove last page from this bio */
4480 bio2->bi_vcnt--; 4487 bio2->bi_vcnt--;
4481 bio2->bi_size -= len; 4488 bio2->bi_iter.bi_size -= len;
4482 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4489 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4483 } 4490 }
4484 goto bio_full; 4491 goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 47da0af6322b..a5d9c0ee4d60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -854,10 +854,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
854 bi->bi_rw, i); 854 bi->bi_rw, i);
855 atomic_inc(&sh->count); 855 atomic_inc(&sh->count);
856 if (use_new_offset(conf, sh)) 856 if (use_new_offset(conf, sh))
857 bi->bi_sector = (sh->sector 857 bi->bi_iter.bi_sector = (sh->sector
858 + rdev->new_data_offset); 858 + rdev->new_data_offset);
859 else 859 else
860 bi->bi_sector = (sh->sector 860 bi->bi_iter.bi_sector = (sh->sector
861 + rdev->data_offset); 861 + rdev->data_offset);
862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
863 bi->bi_rw |= REQ_NOMERGE; 863 bi->bi_rw |= REQ_NOMERGE;
@@ -865,7 +865,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
865 bi->bi_vcnt = 1; 865 bi->bi_vcnt = 1;
866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
867 bi->bi_io_vec[0].bv_offset = 0; 867 bi->bi_io_vec[0].bv_offset = 0;
868 bi->bi_size = STRIPE_SIZE; 868 bi->bi_iter.bi_size = STRIPE_SIZE;
869 /* 869 /*
870 * If this is discard request, set bi_vcnt 0. We don't 870 * If this is discard request, set bi_vcnt 0. We don't
871 * want to confuse SCSI because SCSI will replace payload 871 * want to confuse SCSI because SCSI will replace payload
@@ -901,15 +901,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
901 rbi->bi_rw, i); 901 rbi->bi_rw, i);
902 atomic_inc(&sh->count); 902 atomic_inc(&sh->count);
903 if (use_new_offset(conf, sh)) 903 if (use_new_offset(conf, sh))
904 rbi->bi_sector = (sh->sector 904 rbi->bi_iter.bi_sector = (sh->sector
905 + rrdev->new_data_offset); 905 + rrdev->new_data_offset);
906 else 906 else
907 rbi->bi_sector = (sh->sector 907 rbi->bi_iter.bi_sector = (sh->sector
908 + rrdev->data_offset); 908 + rrdev->data_offset);
909 rbi->bi_vcnt = 1; 909 rbi->bi_vcnt = 1;
910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
911 rbi->bi_io_vec[0].bv_offset = 0; 911 rbi->bi_io_vec[0].bv_offset = 0;
912 rbi->bi_size = STRIPE_SIZE; 912 rbi->bi_iter.bi_size = STRIPE_SIZE;
913 /* 913 /*
914 * If this is discard request, set bi_vcnt 0. We don't 914 * If this is discard request, set bi_vcnt 0. We don't
915 * want to confuse SCSI because SCSI will replace payload 915 * want to confuse SCSI because SCSI will replace payload
@@ -944,10 +944,10 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
944 struct async_submit_ctl submit; 944 struct async_submit_ctl submit;
945 enum async_tx_flags flags = 0; 945 enum async_tx_flags flags = 0;
946 946
947 if (bio->bi_sector >= sector) 947 if (bio->bi_iter.bi_sector >= sector)
948 page_offset = (signed)(bio->bi_sector - sector) * 512; 948 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
949 else 949 else
950 page_offset = (signed)(sector - bio->bi_sector) * -512; 950 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
951 951
952 if (frombio) 952 if (frombio)
953 flags |= ASYNC_TX_FENCE; 953 flags |= ASYNC_TX_FENCE;
@@ -1014,7 +1014,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1014 BUG_ON(!dev->read); 1014 BUG_ON(!dev->read);
1015 rbi = dev->read; 1015 rbi = dev->read;
1016 dev->read = NULL; 1016 dev->read = NULL;
1017 while (rbi && rbi->bi_sector < 1017 while (rbi && rbi->bi_iter.bi_sector <
1018 dev->sector + STRIPE_SECTORS) { 1018 dev->sector + STRIPE_SECTORS) {
1019 rbi2 = r5_next_bio(rbi, dev->sector); 1019 rbi2 = r5_next_bio(rbi, dev->sector);
1020 if (!raid5_dec_bi_active_stripes(rbi)) { 1020 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1050,7 +1050,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1050 dev->read = rbi = dev->toread; 1050 dev->read = rbi = dev->toread;
1051 dev->toread = NULL; 1051 dev->toread = NULL;
1052 spin_unlock_irq(&sh->stripe_lock); 1052 spin_unlock_irq(&sh->stripe_lock);
1053 while (rbi && rbi->bi_sector < 1053 while (rbi && rbi->bi_iter.bi_sector <
1054 dev->sector + STRIPE_SECTORS) { 1054 dev->sector + STRIPE_SECTORS) {
1055 tx = async_copy_data(0, rbi, dev->page, 1055 tx = async_copy_data(0, rbi, dev->page,
1056 dev->sector, tx); 1056 dev->sector, tx);
@@ -1392,7 +1392,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1392 wbi = dev->written = chosen; 1392 wbi = dev->written = chosen;
1393 spin_unlock_irq(&sh->stripe_lock); 1393 spin_unlock_irq(&sh->stripe_lock);
1394 1394
1395 while (wbi && wbi->bi_sector < 1395 while (wbi && wbi->bi_iter.bi_sector <
1396 dev->sector + STRIPE_SECTORS) { 1396 dev->sector + STRIPE_SECTORS) {
1397 if (wbi->bi_rw & REQ_FUA) 1397 if (wbi->bi_rw & REQ_FUA)
1398 set_bit(R5_WantFUA, &dev->flags); 1398 set_bit(R5_WantFUA, &dev->flags);
@@ -2616,7 +2616,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2616 int firstwrite=0; 2616 int firstwrite=0;
2617 2617
2618 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2618 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2619 (unsigned long long)bi->bi_sector, 2619 (unsigned long long)bi->bi_iter.bi_sector,
2620 (unsigned long long)sh->sector); 2620 (unsigned long long)sh->sector);
2621 2621
2622 /* 2622 /*
@@ -2634,12 +2634,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2634 firstwrite = 1; 2634 firstwrite = 1;
2635 } else 2635 } else
2636 bip = &sh->dev[dd_idx].toread; 2636 bip = &sh->dev[dd_idx].toread;
2637 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2637 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2638 if (bio_end_sector(*bip) > bi->bi_sector) 2638 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2639 goto overlap; 2639 goto overlap;
2640 bip = & (*bip)->bi_next; 2640 bip = & (*bip)->bi_next;
2641 } 2641 }
2642 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2642 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2643 goto overlap; 2643 goto overlap;
2644 2644
2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2653,7 +2653,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2653 sector_t sector = sh->dev[dd_idx].sector; 2653 sector_t sector = sh->dev[dd_idx].sector;
2654 for (bi=sh->dev[dd_idx].towrite; 2654 for (bi=sh->dev[dd_idx].towrite;
2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2656 bi && bi->bi_sector <= sector; 2656 bi && bi->bi_iter.bi_sector <= sector;
2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2658 if (bio_end_sector(bi) >= sector) 2658 if (bio_end_sector(bi) >= sector)
2659 sector = bio_end_sector(bi); 2659 sector = bio_end_sector(bi);
@@ -2663,7 +2663,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2663 } 2663 }
2664 2664
2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2666 (unsigned long long)(*bip)->bi_sector, 2666 (unsigned long long)(*bip)->bi_iter.bi_sector,
2667 (unsigned long long)sh->sector, dd_idx); 2667 (unsigned long long)sh->sector, dd_idx);
2668 spin_unlock_irq(&sh->stripe_lock); 2668 spin_unlock_irq(&sh->stripe_lock);
2669 2669
@@ -2738,7 +2738,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2739 wake_up(&conf->wait_for_overlap); 2739 wake_up(&conf->wait_for_overlap);
2740 2740
2741 while (bi && bi->bi_sector < 2741 while (bi && bi->bi_iter.bi_sector <
2742 sh->dev[i].sector + STRIPE_SECTORS) { 2742 sh->dev[i].sector + STRIPE_SECTORS) {
2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2744 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2744 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2757,7 +2757,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2757 bi = sh->dev[i].written; 2757 bi = sh->dev[i].written;
2758 sh->dev[i].written = NULL; 2758 sh->dev[i].written = NULL;
2759 if (bi) bitmap_end = 1; 2759 if (bi) bitmap_end = 1;
2760 while (bi && bi->bi_sector < 2760 while (bi && bi->bi_iter.bi_sector <
2761 sh->dev[i].sector + STRIPE_SECTORS) { 2761 sh->dev[i].sector + STRIPE_SECTORS) {
2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2763 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2763 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2781,7 +2781,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2781 spin_unlock_irq(&sh->stripe_lock); 2781 spin_unlock_irq(&sh->stripe_lock);
2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2783 wake_up(&conf->wait_for_overlap); 2783 wake_up(&conf->wait_for_overlap);
2784 while (bi && bi->bi_sector < 2784 while (bi && bi->bi_iter.bi_sector <
2785 sh->dev[i].sector + STRIPE_SECTORS) { 2785 sh->dev[i].sector + STRIPE_SECTORS) {
2786 struct bio *nextbi = 2786 struct bio *nextbi =
2787 r5_next_bio(bi, sh->dev[i].sector); 2787 r5_next_bio(bi, sh->dev[i].sector);
@@ -3005,7 +3005,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3005 clear_bit(R5_UPTODATE, &dev->flags); 3005 clear_bit(R5_UPTODATE, &dev->flags);
3006 wbi = dev->written; 3006 wbi = dev->written;
3007 dev->written = NULL; 3007 dev->written = NULL;
3008 while (wbi && wbi->bi_sector < 3008 while (wbi && wbi->bi_iter.bi_sector <
3009 dev->sector + STRIPE_SECTORS) { 3009 dev->sector + STRIPE_SECTORS) {
3010 wbi2 = r5_next_bio(wbi, dev->sector); 3010 wbi2 = r5_next_bio(wbi, dev->sector);
3011 if (!raid5_dec_bi_active_stripes(wbi)) { 3011 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4097,7 +4097,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4097 4097
4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4099{ 4099{
4100 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4100 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4101 unsigned int chunk_sectors = mddev->chunk_sectors; 4101 unsigned int chunk_sectors = mddev->chunk_sectors;
4102 unsigned int bio_sectors = bio_sectors(bio); 4102 unsigned int bio_sectors = bio_sectors(bio);
4103 4103
@@ -4234,9 +4234,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4234 /* 4234 /*
4235 * compute position 4235 * compute position
4236 */ 4236 */
4237 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4237 align_bi->bi_iter.bi_sector =
4238 0, 4238 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4239 &dd_idx, NULL); 4239 0, &dd_idx, NULL);
4240 4240
4241 end_sector = bio_end_sector(align_bi); 4241 end_sector = bio_end_sector(align_bi);
4242 rcu_read_lock(); 4242 rcu_read_lock();
@@ -4261,7 +4261,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4262 4262
4263 if (!bio_fits_rdev(align_bi) || 4263 if (!bio_fits_rdev(align_bi) ||
4264 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4264 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4265 bio_sectors(align_bi),
4265 &first_bad, &bad_sectors)) { 4266 &first_bad, &bad_sectors)) {
4266 /* too big in some way, or has a known bad block */ 4267 /* too big in some way, or has a known bad block */
4267 bio_put(align_bi); 4268 bio_put(align_bi);
@@ -4270,7 +4271,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4270 } 4271 }
4271 4272
4272 /* No reshape active, so we can trust rdev->data_offset */ 4273 /* No reshape active, so we can trust rdev->data_offset */
4273 align_bi->bi_sector += rdev->data_offset; 4274 align_bi->bi_iter.bi_sector += rdev->data_offset;
4274 4275
4275 spin_lock_irq(&conf->device_lock); 4276 spin_lock_irq(&conf->device_lock);
4276 wait_event_lock_irq(conf->wait_for_stripe, 4277 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4282,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4282 if (mddev->gendisk) 4283 if (mddev->gendisk)
4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4284 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4284 align_bi, disk_devt(mddev->gendisk), 4285 align_bi, disk_devt(mddev->gendisk),
4285 raid_bio->bi_sector); 4286 raid_bio->bi_iter.bi_sector);
4286 generic_make_request(align_bi); 4287 generic_make_request(align_bi);
4287 return 1; 4288 return 1;
4288 } else { 4289 } else {
@@ -4465,8 +4466,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4465 /* Skip discard while reshape is happening */ 4466 /* Skip discard while reshape is happening */
4466 return; 4467 return;
4467 4468
4468 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4469 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4469 last_sector = bi->bi_sector + (bi->bi_size>>9); 4470 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4470 4471
4471 bi->bi_next = NULL; 4472 bi->bi_next = NULL;
4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4473 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4570,7 +4571,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4570 return; 4571 return;
4571 } 4572 }
4572 4573
4573 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4574 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4574 last_sector = bio_end_sector(bi); 4575 last_sector = bio_end_sector(bi);
4575 bi->bi_next = NULL; 4576 bi->bi_next = NULL;
4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4577 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5054,7 +5055,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5054 int remaining; 5055 int remaining;
5055 int handled = 0; 5056 int handled = 0;
5056 5057
5057 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5058 logical_sector = raid_bio->bi_iter.bi_sector &
5059 ~((sector_t)STRIPE_SECTORS-1);
5058 sector = raid5_compute_sector(conf, logical_sector, 5060 sector = raid5_compute_sector(conf, logical_sector,
5059 0, &dd_idx, NULL); 5061 0, &dd_idx, NULL);
5060 last_sector = bio_end_sector(raid_bio); 5062 last_sector = bio_end_sector(raid_bio);
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..16814a8457f8 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -819,7 +819,8 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
819 dev_info = bio->bi_bdev->bd_disk->private_data; 819 dev_info = bio->bi_bdev->bd_disk->private_data;
820 if (dev_info == NULL) 820 if (dev_info == NULL)
821 goto fail; 821 goto fail;
822 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 822 if ((bio->bi_iter.bi_sector & 7) != 0 ||
823 (bio->bi_iter.bi_size & 4095) != 0)
823 /* Request is not page-aligned. */ 824 /* Request is not page-aligned. */
824 goto fail; 825 goto fail;
825 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { 826 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,7 +843,7 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
842 } 843 }
843 } 844 }
844 845
845 index = (bio->bi_sector >> 3); 846 index = (bio->bi_iter.bi_sector >> 3);
846 bio_for_each_segment(bvec, bio, i) { 847 bio_for_each_segment(bvec, bio, i) {
847 page_addr = (unsigned long) 848 page_addr = (unsigned long)
848 page_address(bvec->bv_page) + bvec->bv_offset; 849 page_address(bvec->bv_page) + bvec->bv_offset;
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 464dd29d06c0..dd4e73fdb323 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -190,15 +190,16 @@ static void xpram_make_request(struct request_queue *q, struct bio *bio)
190 unsigned long bytes; 190 unsigned long bytes;
191 int i; 191 int i;
192 192
193 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 193 if ((bio->bi_iter.bi_sector & 7) != 0 ||
194 (bio->bi_iter.bi_size & 4095) != 0)
194 /* Request is not page-aligned. */ 195 /* Request is not page-aligned. */
195 goto fail; 196 goto fail;
196 if ((bio->bi_size >> 12) > xdev->size) 197 if ((bio->bi_iter.bi_size >> 12) > xdev->size)
197 /* Request size is no page-aligned. */ 198 /* Request size is no page-aligned. */
198 goto fail; 199 goto fail;
199 if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
200 goto fail; 201 goto fail;
201 index = (bio->bi_sector >> 3) + xdev->offset; 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
202 bio_for_each_segment(bvec, bio, i) { 203 bio_for_each_segment(bvec, bio, i) {
203 page_addr = (unsigned long) 204 page_addr = (unsigned long)
204 kmap(bvec->bv_page) + bvec->bv_offset; 205 kmap(bvec->bv_page) + bvec->bv_offset;
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
731 731
732 bio->bi_rw &= ~REQ_WRITE; 732 bio->bi_rw &= ~REQ_WRITE;
733 or->in.bio = bio; 733 or->in.bio = bio;
734 or->in.total_bytes = bio->bi_size; 734 or->in.total_bytes = bio->bi_iter.bi_size;
735 return 0; 735 return 0;
736} 736}
737 737
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index e2421ea61352..53741be754b4 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -220,7 +220,7 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
220 for (bio = head; bio != NULL; bio = bio->bi_next) { 220 for (bio = head; bio != NULL; bio = bio->bi_next) {
221 LASSERT(rw == bio->bi_rw); 221 LASSERT(rw == bio->bi_rw);
222 222
223 offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; 223 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
224 bio_for_each_segment(bvec, bio, i) { 224 bio_for_each_segment(bvec, bio, i) {
225 BUG_ON(bvec->bv_offset != 0); 225 BUG_ON(bvec->bv_offset != 0);
226 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); 226 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE);
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
313 bio = &lo->lo_bio; 313 bio = &lo->lo_bio;
314 while (*bio && (*bio)->bi_rw == rw) { 314 while (*bio && (*bio)->bi_rw == rw) {
315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", 315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
316 (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, 316 (unsigned long long)(*bio)->bi_iter.bi_sector,
317 (*bio)->bi_iter.bi_size,
317 page_count, (*bio)->bi_vcnt); 318 page_count, (*bio)->bi_vcnt);
318 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) 319 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
319 break; 320 break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
347 goto err; 348 goto err;
348 349
349 CDEBUG(D_INFO, "submit bio sector %llu size %u\n", 350 CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
350 (unsigned long long)old_bio->bi_sector, old_bio->bi_size); 351 (unsigned long long)old_bio->bi_iter.bi_sector,
352 old_bio->bi_iter.bi_size);
351 353
352 spin_lock_irq(&lo->lo_lock); 354 spin_lock_irq(&lo->lo_lock);
353 inactive = (lo->lo_state != LLOOP_BOUND); 355 inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
367 loop_add_bio(lo, old_bio); 369 loop_add_bio(lo, old_bio);
368 return; 370 return;
369err: 371err:
370 cfs_bio_io_error(old_bio, old_bio->bi_size); 372 cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
371} 373}
372 374
373 375
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
378 while (bio) { 380 while (bio) {
379 struct bio *tmp = bio->bi_next; 381 struct bio *tmp = bio->bi_next;
380 bio->bi_next = NULL; 382 bio->bi_next = NULL;
381 cfs_bio_endio(bio, bio->bi_size, ret); 383 cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
382 bio = tmp; 384 bio = tmp;
383 } 385 }
384} 386}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 79ce363b2ea9..e9e6f984092b 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
171 u64 start, end, bound; 171 u64 start, end, bound;
172 172
173 /* unaligned request */ 173 /* unaligned request */
174 if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 174 if (unlikely(bio->bi_iter.bi_sector &
175 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
175 return 0; 176 return 0;
176 if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 177 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
177 return 0; 178 return 0;
178 179
179 start = bio->bi_sector; 180 start = bio->bi_iter.bi_sector;
180 end = start + (bio->bi_size >> SECTOR_SHIFT); 181 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
181 bound = zram->disksize >> SECTOR_SHIFT; 182 bound = zram->disksize >> SECTOR_SHIFT;
182 /* out of range range */ 183 /* out of range range */
183 if (unlikely(start >= bound || end > bound || start > end)) 184 if (unlikely(start >= bound || end > bound || start > end))
@@ -684,8 +685,9 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
684 break; 685 break;
685 } 686 }
686 687
687 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 688 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
688 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 689 offset = (bio->bi_iter.bi_sector &
690 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
689 691
690 bio_for_each_segment(bvec, bio, i) { 692 bio_for_each_segment(bvec, bio, i) {
691 int max_transfer_size = PAGE_SIZE - offset; 693 int max_transfer_size = PAGE_SIZE - offset;
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f12760..2d29356d0c85 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
319 bio->bi_bdev = ib_dev->ibd_bd; 319 bio->bi_bdev = ib_dev->ibd_bd;
320 bio->bi_private = cmd; 320 bio->bi_private = cmd;
321 bio->bi_end_io = &iblock_bio_done; 321 bio->bi_end_io = &iblock_bio_done;
322 bio->bi_sector = lba; 322 bio->bi_iter.bi_sector = lba;
323 323
324 return bio; 324 return bio;
325} 325}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index fc60b31453ee..08e3d1388c65 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -215,9 +215,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
215{ 215{
216 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 216 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
217 217
218 BUG_ON(bio->bi_size == 0); 218 BUG_ON(bio->bi_iter.bi_size == 0);
219 219
220 return bi->tag_size * (bio->bi_size / bi->sector_size); 220 return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
221} 221}
222EXPORT_SYMBOL(bio_integrity_tag_size); 222EXPORT_SYMBOL(bio_integrity_tag_size);
223 223
@@ -300,7 +300,7 @@ static void bio_integrity_generate(struct bio *bio)
300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
301 struct blk_integrity_exchg bix; 301 struct blk_integrity_exchg bix;
302 struct bio_vec *bv; 302 struct bio_vec *bv;
303 sector_t sector = bio->bi_sector; 303 sector_t sector = bio->bi_iter.bi_sector;
304 unsigned int i, sectors, total; 304 unsigned int i, sectors, total;
305 void *prot_buf = bio->bi_integrity->bip_buf; 305 void *prot_buf = bio->bi_integrity->bip_buf;
306 306
@@ -387,7 +387,7 @@ int bio_integrity_prep(struct bio *bio)
387 bip->bip_owns_buf = 1; 387 bip->bip_owns_buf = 1;
388 bip->bip_buf = buf; 388 bip->bip_buf = buf;
389 bip->bip_size = len; 389 bip->bip_size = len;
390 bip->bip_sector = bio->bi_sector; 390 bip->bip_sector = bio->bi_iter.bi_sector;
391 391
392 /* Map it */ 392 /* Map it */
393 offset = offset_in_page(buf); 393 offset = offset_in_page(buf);
diff --git a/fs/bio.c b/fs/bio.c
index 33d79a4eb92d..a402ad6e753f 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -532,13 +532,13 @@ void __bio_clone(struct bio *bio, struct bio *bio_src)
532 * most users will be overriding ->bi_bdev with a new target, 532 * most users will be overriding ->bi_bdev with a new target,
533 * so we don't set nor calculate new physical/hw segment counts here 533 * so we don't set nor calculate new physical/hw segment counts here
534 */ 534 */
535 bio->bi_sector = bio_src->bi_sector; 535 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
536 bio->bi_bdev = bio_src->bi_bdev; 536 bio->bi_bdev = bio_src->bi_bdev;
537 bio->bi_flags |= 1 << BIO_CLONED; 537 bio->bi_flags |= 1 << BIO_CLONED;
538 bio->bi_rw = bio_src->bi_rw; 538 bio->bi_rw = bio_src->bi_rw;
539 bio->bi_vcnt = bio_src->bi_vcnt; 539 bio->bi_vcnt = bio_src->bi_vcnt;
540 bio->bi_size = bio_src->bi_size; 540 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
541 bio->bi_idx = bio_src->bi_idx; 541 bio->bi_iter.bi_idx = bio_src->bi_iter.bi_idx;
542} 542}
543EXPORT_SYMBOL(__bio_clone); 543EXPORT_SYMBOL(__bio_clone);
544 544
@@ -612,7 +612,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
612 if (unlikely(bio_flagged(bio, BIO_CLONED))) 612 if (unlikely(bio_flagged(bio, BIO_CLONED)))
613 return 0; 613 return 0;
614 614
615 if (((bio->bi_size + len) >> 9) > max_sectors) 615 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
616 return 0; 616 return 0;
617 617
618 /* 618 /*
@@ -635,8 +635,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
635 simulate merging updated prev_bvec 635 simulate merging updated prev_bvec
636 as new bvec. */ 636 as new bvec. */
637 .bi_bdev = bio->bi_bdev, 637 .bi_bdev = bio->bi_bdev,
638 .bi_sector = bio->bi_sector, 638 .bi_sector = bio->bi_iter.bi_sector,
639 .bi_size = bio->bi_size - prev_bv_len, 639 .bi_size = bio->bi_iter.bi_size -
640 prev_bv_len,
640 .bi_rw = bio->bi_rw, 641 .bi_rw = bio->bi_rw,
641 }; 642 };
642 643
@@ -684,8 +685,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
684 if (q->merge_bvec_fn) { 685 if (q->merge_bvec_fn) {
685 struct bvec_merge_data bvm = { 686 struct bvec_merge_data bvm = {
686 .bi_bdev = bio->bi_bdev, 687 .bi_bdev = bio->bi_bdev,
687 .bi_sector = bio->bi_sector, 688 .bi_sector = bio->bi_iter.bi_sector,
688 .bi_size = bio->bi_size, 689 .bi_size = bio->bi_iter.bi_size,
689 .bi_rw = bio->bi_rw, 690 .bi_rw = bio->bi_rw,
690 }; 691 };
691 692
@@ -708,7 +709,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
708 bio->bi_vcnt++; 709 bio->bi_vcnt++;
709 bio->bi_phys_segments++; 710 bio->bi_phys_segments++;
710 done: 711 done:
711 bio->bi_size += len; 712 bio->bi_iter.bi_size += len;
712 return len; 713 return len;
713} 714}
714 715
@@ -807,22 +808,22 @@ void bio_advance(struct bio *bio, unsigned bytes)
807 if (bio_integrity(bio)) 808 if (bio_integrity(bio))
808 bio_integrity_advance(bio, bytes); 809 bio_integrity_advance(bio, bytes);
809 810
810 bio->bi_sector += bytes >> 9; 811 bio->bi_iter.bi_sector += bytes >> 9;
811 bio->bi_size -= bytes; 812 bio->bi_iter.bi_size -= bytes;
812 813
813 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK) 814 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
814 return; 815 return;
815 816
816 while (bytes) { 817 while (bytes) {
817 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) { 818 if (unlikely(bio->bi_iter.bi_idx >= bio->bi_vcnt)) {
818 WARN_ONCE(1, "bio idx %d >= vcnt %d\n", 819 WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
819 bio->bi_idx, bio->bi_vcnt); 820 bio->bi_iter.bi_idx, bio->bi_vcnt);
820 break; 821 break;
821 } 822 }
822 823
823 if (bytes >= bio_iovec(bio)->bv_len) { 824 if (bytes >= bio_iovec(bio)->bv_len) {
824 bytes -= bio_iovec(bio)->bv_len; 825 bytes -= bio_iovec(bio)->bv_len;
825 bio->bi_idx++; 826 bio->bi_iter.bi_idx++;
826 } else { 827 } else {
827 bio_iovec(bio)->bv_len -= bytes; 828 bio_iovec(bio)->bv_len -= bytes;
828 bio_iovec(bio)->bv_offset += bytes; 829 bio_iovec(bio)->bv_offset += bytes;
@@ -1485,7 +1486,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1485 if (IS_ERR(bio)) 1486 if (IS_ERR(bio))
1486 return bio; 1487 return bio;
1487 1488
1488 if (bio->bi_size == len) 1489 if (bio->bi_iter.bi_size == len)
1489 return bio; 1490 return bio;
1490 1491
1491 /* 1492 /*
@@ -1763,16 +1764,16 @@ struct bio_pair *bio_split(struct bio *bi, int first_sectors)
1763 return bp; 1764 return bp;
1764 1765
1765 trace_block_split(bdev_get_queue(bi->bi_bdev), bi, 1766 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1766 bi->bi_sector + first_sectors); 1767 bi->bi_iter.bi_sector + first_sectors);
1767 1768
1768 BUG_ON(bio_segments(bi) > 1); 1769 BUG_ON(bio_segments(bi) > 1);
1769 atomic_set(&bp->cnt, 3); 1770 atomic_set(&bp->cnt, 3);
1770 bp->error = 0; 1771 bp->error = 0;
1771 bp->bio1 = *bi; 1772 bp->bio1 = *bi;
1772 bp->bio2 = *bi; 1773 bp->bio2 = *bi;
1773 bp->bio2.bi_sector += first_sectors; 1774 bp->bio2.bi_iter.bi_sector += first_sectors;
1774 bp->bio2.bi_size -= first_sectors << 9; 1775 bp->bio2.bi_iter.bi_size -= first_sectors << 9;
1775 bp->bio1.bi_size = first_sectors << 9; 1776 bp->bio1.bi_iter.bi_size = first_sectors << 9;
1776 1777
1777 if (bi->bi_vcnt != 0) { 1778 if (bi->bi_vcnt != 0) {
1778 bp->bv1 = *bio_iovec(bi); 1779 bp->bv1 = *bio_iovec(bi);
@@ -1821,21 +1822,22 @@ void bio_trim(struct bio *bio, int offset, int size)
1821 int sofar = 0; 1822 int sofar = 0;
1822 1823
1823 size <<= 9; 1824 size <<= 9;
1824 if (offset == 0 && size == bio->bi_size) 1825 if (offset == 0 && size == bio->bi_iter.bi_size)
1825 return; 1826 return;
1826 1827
1827 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1828 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1828 1829
1829 bio_advance(bio, offset << 9); 1830 bio_advance(bio, offset << 9);
1830 1831
1831 bio->bi_size = size; 1832 bio->bi_iter.bi_size = size;
1832 1833
1833 /* avoid any complications with bi_idx being non-zero*/ 1834 /* avoid any complications with bi_idx being non-zero*/
1834 if (bio->bi_idx) { 1835 if (bio->bi_iter.bi_idx) {
1835 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx, 1836 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_iter.bi_idx,
1836 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec)); 1837 (bio->bi_vcnt - bio->bi_iter.bi_idx) *
1837 bio->bi_vcnt -= bio->bi_idx; 1838 sizeof(struct bio_vec));
1838 bio->bi_idx = 0; 1839 bio->bi_vcnt -= bio->bi_iter.bi_idx;
1840 bio->bi_iter.bi_idx = 0;
1839 } 1841 }
1840 /* Make sure vcnt and last bv are not too big */ 1842 /* Make sure vcnt and last bv are not too big */
1841 bio_for_each_segment(bvec, bio, i) { 1843 bio_for_each_segment(bvec, bio, i) {
@@ -1871,7 +1873,7 @@ sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1871 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue); 1873 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1872 sectors = 0; 1874 sectors = 0;
1873 1875
1874 if (index >= bio->bi_idx) 1876 if (index >= bio->bi_iter.bi_idx)
1875 index = bio->bi_vcnt - 1; 1877 index = bio->bi_vcnt - 1;
1876 1878
1877 bio_for_each_segment_all(bv, bio, i) { 1879 bio_for_each_segment_all(bv, bio, i) {
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 131d82800b3a..cb05e1c842c5 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1695 return -1; 1695 return -1;
1696 } 1696 }
1697 bio->bi_bdev = block_ctx->dev->bdev; 1697 bio->bi_bdev = block_ctx->dev->bdev;
1698 bio->bi_sector = dev_bytenr >> 9; 1698 bio->bi_iter.bi_sector = dev_bytenr >> 9;
1699 1699
1700 for (j = i; j < num_pages; j++) { 1700 for (j = i; j < num_pages; j++) {
1701 ret = bio_add_page(bio, block_ctx->pagev[j], 1701 ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
3013 int bio_is_patched; 3013 int bio_is_patched;
3014 char **mapped_datav; 3014 char **mapped_datav;
3015 3015
3016 dev_bytenr = 512 * bio->bi_sector; 3016 dev_bytenr = 512 * bio->bi_iter.bi_sector;
3017 bio_is_patched = 0; 3017 bio_is_patched = 0;
3018 if (dev_state->state->print_mask & 3018 if (dev_state->state->print_mask &
3019 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3019 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
3021 "submit_bio(rw=0x%x, bi_vcnt=%u," 3021 "submit_bio(rw=0x%x, bi_vcnt=%u,"
3022 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 3022 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
3023 rw, bio->bi_vcnt, 3023 rw, bio->bi_vcnt,
3024 (unsigned long long)bio->bi_sector, dev_bytenr, 3024 (unsigned long long)bio->bi_iter.bi_sector,
3025 bio->bi_bdev); 3025 dev_bytenr, bio->bi_bdev);
3026 3026
3027 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, 3027 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
3028 GFP_NOFS); 3028 GFP_NOFS);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index eac6784e43d7..f5cdeb4b5538 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
172 goto out; 172 goto out;
173 173
174 inode = cb->inode; 174 inode = cb->inode;
175 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 175 ret = check_compressed_csum(inode, cb,
176 (u64)bio->bi_iter.bi_sector << 9);
176 if (ret) 177 if (ret)
177 goto csum_failed; 178 goto csum_failed;
178 179
@@ -370,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
370 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 371 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
371 page = compressed_pages[pg_index]; 372 page = compressed_pages[pg_index];
372 page->mapping = inode->i_mapping; 373 page->mapping = inode->i_mapping;
373 if (bio->bi_size) 374 if (bio->bi_iter.bi_size)
374 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 375 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
375 PAGE_CACHE_SIZE, 376 PAGE_CACHE_SIZE,
376 bio, 0); 377 bio, 0);
@@ -504,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
504 505
505 if (!em || last_offset < em->start || 506 if (!em || last_offset < em->start ||
506 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
507 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 508 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
508 free_extent_map(em); 509 free_extent_map(em);
509 unlock_extent(tree, last_offset, end); 510 unlock_extent(tree, last_offset, end);
510 unlock_page(page); 511 unlock_page(page);
@@ -550,7 +551,7 @@ next:
550 * in it. We don't actually do IO on those pages but allocate new ones 551 * in it. We don't actually do IO on those pages but allocate new ones
551 * to hold the compressed pages on disk. 552 * to hold the compressed pages on disk.
552 * 553 *
553 * bio->bi_sector points to the compressed extent on disk 554 * bio->bi_iter.bi_sector points to the compressed extent on disk
554 * bio->bi_io_vec points to all of the inode pages 555 * bio->bi_io_vec points to all of the inode pages
555 * bio->bi_vcnt is a count of pages 556 * bio->bi_vcnt is a count of pages
556 * 557 *
@@ -571,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
571 struct page *page; 572 struct page *page;
572 struct block_device *bdev; 573 struct block_device *bdev;
573 struct bio *comp_bio; 574 struct bio *comp_bio;
574 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 575 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
575 u64 em_len; 576 u64 em_len;
576 u64 em_start; 577 u64 em_start;
577 struct extent_map *em; 578 struct extent_map *em;
@@ -657,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
657 page->mapping = inode->i_mapping; 658 page->mapping = inode->i_mapping;
658 page->index = em_start >> PAGE_CACHE_SHIFT; 659 page->index = em_start >> PAGE_CACHE_SHIFT;
659 660
660 if (comp_bio->bi_size) 661 if (comp_bio->bi_iter.bi_size)
661 ret = tree->ops->merge_bio_hook(READ, page, 0, 662 ret = tree->ops->merge_bio_hook(READ, page, 0,
662 PAGE_CACHE_SIZE, 663 PAGE_CACHE_SIZE,
663 comp_bio, 0); 664 comp_bio, 0);
@@ -685,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
685 comp_bio, sums); 686 comp_bio, sums);
686 BUG_ON(ret); /* -ENOMEM */ 687 BUG_ON(ret); /* -ENOMEM */
687 } 688 }
688 sums += (comp_bio->bi_size + root->sectorsize - 1) / 689 sums += (comp_bio->bi_iter.bi_size +
689 root->sectorsize; 690 root->sectorsize - 1) / root->sectorsize;
690 691
691 ret = btrfs_map_bio(root, READ, comp_bio, 692 ret = btrfs_map_bio(root, READ, comp_bio,
692 mirror_num, 0); 693 mirror_num, 0);
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index 8b5f9e1d1f0e..bcb6f1b780d6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1985 if (!bio) 1985 if (!bio)
1986 return -EIO; 1986 return -EIO;
1987 bio->bi_size = 0; 1987 bio->bi_iter.bi_size = 0;
1988 map_length = length; 1988 map_length = length;
1989 1989
1990 ret = btrfs_map_block(fs_info, WRITE, logical, 1990 ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1995 } 1995 }
1996 BUG_ON(mirror_num != bbio->mirror_num); 1996 BUG_ON(mirror_num != bbio->mirror_num);
1997 sector = bbio->stripes[mirror_num-1].physical >> 9; 1997 sector = bbio->stripes[mirror_num-1].physical >> 9;
1998 bio->bi_sector = sector; 1998 bio->bi_iter.bi_sector = sector;
1999 dev = bbio->stripes[mirror_num-1].dev; 1999 dev = bbio->stripes[mirror_num-1].dev;
2000 kfree(bbio); 2000 kfree(bbio);
2001 if (!dev || !dev->bdev || !dev->writeable) { 2001 if (!dev || !dev->bdev || !dev->writeable) {
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2268 return -EIO; 2268 return -EIO;
2269 } 2269 }
2270 bio->bi_end_io = failed_bio->bi_end_io; 2270 bio->bi_end_io = failed_bio->bi_end_io;
2271 bio->bi_sector = failrec->logical >> 9; 2271 bio->bi_iter.bi_sector = failrec->logical >> 9;
2272 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 2272 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2273 bio->bi_size = 0; 2273 bio->bi_iter.bi_size = 0;
2274 2274
2275 btrfs_failed_bio = btrfs_io_bio(failed_bio); 2275 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2276 if (btrfs_failed_bio->csum) { 2276 if (btrfs_failed_bio->csum) {
@@ -2412,7 +2412,7 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2412 struct inode *inode = page->mapping->host; 2412 struct inode *inode = page->mapping->host;
2413 2413
2414 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2414 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2415 "mirror=%lu\n", (u64)bio->bi_sector, err, 2415 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
2416 io_bio->mirror_num); 2416 io_bio->mirror_num);
2417 tree = &BTRFS_I(inode)->io_tree; 2417 tree = &BTRFS_I(inode)->io_tree;
2418 2418
@@ -2543,7 +2543,7 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2543 2543
2544 if (bio) { 2544 if (bio) {
2545 bio->bi_bdev = bdev; 2545 bio->bi_bdev = bdev;
2546 bio->bi_sector = first_sector; 2546 bio->bi_iter.bi_sector = first_sector;
2547 btrfs_bio = btrfs_io_bio(bio); 2547 btrfs_bio = btrfs_io_bio(bio);
2548 btrfs_bio->csum = NULL; 2548 btrfs_bio->csum = NULL;
2549 btrfs_bio->csum_allocated = NULL; 2549 btrfs_bio->csum_allocated = NULL;
@@ -2637,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2637 if (bio_ret && *bio_ret) { 2637 if (bio_ret && *bio_ret) {
2638 bio = *bio_ret; 2638 bio = *bio_ret;
2639 if (old_compressed) 2639 if (old_compressed)
2640 contig = bio->bi_sector == sector; 2640 contig = bio->bi_iter.bi_sector == sector;
2641 else 2641 else
2642 contig = bio_end_sector(bio) == sector; 2642 contig = bio_end_sector(bio) == sector;
2643 2643
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 6f3848860283..84a46a42d262 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
182 if (!path) 182 if (!path)
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits; 185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
186 if (!dst) { 186 if (!dst) {
187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
188 btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, 188 btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
201 csum = (u8 *)dst; 201 csum = (u8 *)dst;
202 } 202 }
203 203
204 if (bio->bi_size > PAGE_CACHE_SIZE * 8) 204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
205 path->reada = 2; 205 path->reada = 2;
206 206
207 WARN_ON(bio->bi_vcnt <= 0); 207 WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
217 path->skip_locking = 1; 217 path->skip_locking = 1;
218 } 218 }
219 219
220 disk_bytenr = (u64)bio->bi_sector << 9; 220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
221 if (dio) 221 if (dio)
222 offset = logical_offset; 222 offset = logical_offset;
223 while (bio_index < bio->bi_vcnt) { 223 while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
302 struct btrfs_dio_private *dip, struct bio *bio, 302 struct btrfs_dio_private *dip, struct bio *bio,
303 u64 offset) 303 u64 offset)
304{ 304{
305 int len = (bio->bi_sector << 9) - dip->disk_bytenr; 305 int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
306 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 306 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
307 int ret; 307 int ret;
308 308
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
447 u64 offset; 447 u64 offset;
448 448
449 WARN_ON(bio->bi_vcnt <= 0); 449 WARN_ON(bio->bi_vcnt <= 0);
450 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); 450 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
451 GFP_NOFS);
451 if (!sums) 452 if (!sums)
452 return -ENOMEM; 453 return -ENOMEM;
453 454
454 sums->len = bio->bi_size; 455 sums->len = bio->bi_iter.bi_size;
455 INIT_LIST_HEAD(&sums->list); 456 INIT_LIST_HEAD(&sums->list);
456 457
457 if (contig) 458 if (contig)
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
461 462
462 ordered = btrfs_lookup_ordered_extent(inode, offset); 463 ordered = btrfs_lookup_ordered_extent(inode, offset);
463 BUG_ON(!ordered); /* Logic error */ 464 BUG_ON(!ordered); /* Logic error */
464 sums->bytenr = (u64)bio->bi_sector << 9; 465 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
465 index = 0; 466 index = 0;
466 467
467 while (bio_index < bio->bi_vcnt) { 468 while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
476 btrfs_add_ordered_sum(inode, ordered, sums); 477 btrfs_add_ordered_sum(inode, ordered, sums);
477 btrfs_put_ordered_extent(ordered); 478 btrfs_put_ordered_extent(ordered);
478 479
479 bytes_left = bio->bi_size - total_bytes; 480 bytes_left = bio->bi_iter.bi_size - total_bytes;
480 481
481 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 482 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
482 GFP_NOFS); 483 GFP_NOFS);
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
484 sums->len = bytes_left; 485 sums->len = bytes_left;
485 ordered = btrfs_lookup_ordered_extent(inode, offset); 486 ordered = btrfs_lookup_ordered_extent(inode, offset);
486 BUG_ON(!ordered); /* Logic error */ 487 BUG_ON(!ordered); /* Logic error */
487 sums->bytenr = ((u64)bio->bi_sector << 9) + 488 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
488 total_bytes; 489 total_bytes;
489 index = 0; 490 index = 0;
490 } 491 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index d6630dc130ba..7ab0e94ad492 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1577 unsigned long bio_flags) 1577 unsigned long bio_flags)
1578{ 1578{
1579 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1579 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1580 u64 logical = (u64)bio->bi_sector << 9; 1580 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1581 u64 length = 0; 1581 u64 length = 0;
1582 u64 map_length; 1582 u64 map_length;
1583 int ret; 1583 int ret;
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1585 if (bio_flags & EXTENT_BIO_COMPRESSED) 1585 if (bio_flags & EXTENT_BIO_COMPRESSED)
1586 return 0; 1586 return 0;
1587 1587
1588 length = bio->bi_size; 1588 length = bio->bi_iter.bi_size;
1589 map_length = length; 1589 map_length = length;
1590 ret = btrfs_map_block(root->fs_info, rw, logical, 1590 ret = btrfs_map_block(root->fs_info, rw, logical,
1591 &map_length, NULL, 0); 1591 &map_length, NULL, 0);
@@ -6894,7 +6894,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
6894 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 6894 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6895 "sector %#Lx len %u err no %d\n", 6895 "sector %#Lx len %u err no %d\n",
6896 btrfs_ino(dip->inode), bio->bi_rw, 6896 btrfs_ino(dip->inode), bio->bi_rw,
6897 (unsigned long long)bio->bi_sector, bio->bi_size, err); 6897 (unsigned long long)bio->bi_iter.bi_sector,
6898 bio->bi_iter.bi_size, err);
6898 dip->errors = 1; 6899 dip->errors = 1;
6899 6900
6900 /* 6901 /*
@@ -6985,7 +6986,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6985 struct bio *bio; 6986 struct bio *bio;
6986 struct bio *orig_bio = dip->orig_bio; 6987 struct bio *orig_bio = dip->orig_bio;
6987 struct bio_vec *bvec = orig_bio->bi_io_vec; 6988 struct bio_vec *bvec = orig_bio->bi_io_vec;
6988 u64 start_sector = orig_bio->bi_sector; 6989 u64 start_sector = orig_bio->bi_iter.bi_sector;
6989 u64 file_offset = dip->logical_offset; 6990 u64 file_offset = dip->logical_offset;
6990 u64 submit_len = 0; 6991 u64 submit_len = 0;
6991 u64 map_length; 6992 u64 map_length;
@@ -6993,7 +6994,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6993 int ret = 0; 6994 int ret = 0;
6994 int async_submit = 0; 6995 int async_submit = 0;
6995 6996
6996 map_length = orig_bio->bi_size; 6997 map_length = orig_bio->bi_iter.bi_size;
6997 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 6998 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
6998 &map_length, NULL, 0); 6999 &map_length, NULL, 0);
6999 if (ret) { 7000 if (ret) {
@@ -7001,7 +7002,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7001 return -EIO; 7002 return -EIO;
7002 } 7003 }
7003 7004
7004 if (map_length >= orig_bio->bi_size) { 7005 if (map_length >= orig_bio->bi_iter.bi_size) {
7005 bio = orig_bio; 7006 bio = orig_bio;
7006 goto submit; 7007 goto submit;
7007 } 7008 }
@@ -7053,7 +7054,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7053 bio->bi_private = dip; 7054 bio->bi_private = dip;
7054 bio->bi_end_io = btrfs_end_dio_bio; 7055 bio->bi_end_io = btrfs_end_dio_bio;
7055 7056
7056 map_length = orig_bio->bi_size; 7057 map_length = orig_bio->bi_iter.bi_size;
7057 ret = btrfs_map_block(root->fs_info, rw, 7058 ret = btrfs_map_block(root->fs_info, rw,
7058 start_sector << 9, 7059 start_sector << 9,
7059 &map_length, NULL, 0); 7060 &map_length, NULL, 0);
@@ -7111,7 +7112,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7111 7112
7112 if (!skip_sum && !write) { 7113 if (!skip_sum && !write) {
7113 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 7114 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
7114 sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; 7115 sum_len = dio_bio->bi_iter.bi_size >>
7116 inode->i_sb->s_blocksize_bits;
7115 sum_len *= csum_size; 7117 sum_len *= csum_size;
7116 } else { 7118 } else {
7117 sum_len = 0; 7119 sum_len = 0;
@@ -7126,8 +7128,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7126 dip->private = dio_bio->bi_private; 7128 dip->private = dio_bio->bi_private;
7127 dip->inode = inode; 7129 dip->inode = inode;
7128 dip->logical_offset = file_offset; 7130 dip->logical_offset = file_offset;
7129 dip->bytes = dio_bio->bi_size; 7131 dip->bytes = dio_bio->bi_iter.bi_size;
7130 dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; 7132 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
7131 io_bio->bi_private = dip; 7133 io_bio->bi_private = dip;
7132 dip->errors = 0; 7134 dip->errors = 0;
7133 dip->orig_bio = io_bio; 7135 dip->orig_bio = io_bio;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 24ac21840a9a..9af0b25d991a 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1032 1032
1033 /* see if we can add this page onto our existing bio */ 1033 /* see if we can add this page onto our existing bio */
1034 if (last) { 1034 if (last) {
1035 last_end = (u64)last->bi_sector << 9; 1035 last_end = (u64)last->bi_iter.bi_sector << 9;
1036 last_end += last->bi_size; 1036 last_end += last->bi_iter.bi_size;
1037 1037
1038 /* 1038 /*
1039 * we can't merge these if they are from different 1039 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1053 if (!bio) 1053 if (!bio)
1054 return -ENOMEM; 1054 return -ENOMEM;
1055 1055
1056 bio->bi_size = 0; 1056 bio->bi_iter.bi_size = 0;
1057 bio->bi_bdev = stripe->dev->bdev; 1057 bio->bi_bdev = stripe->dev->bdev;
1058 bio->bi_sector = disk_start >> 9; 1058 bio->bi_iter.bi_sector = disk_start >> 9;
1059 set_bit(BIO_UPTODATE, &bio->bi_flags); 1059 set_bit(BIO_UPTODATE, &bio->bi_flags);
1060 1060
1061 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1061 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1111 1111
1112 spin_lock_irq(&rbio->bio_list_lock); 1112 spin_lock_irq(&rbio->bio_list_lock);
1113 bio_list_for_each(bio, &rbio->bio_list) { 1113 bio_list_for_each(bio, &rbio->bio_list) {
1114 start = (u64)bio->bi_sector << 9; 1114 start = (u64)bio->bi_iter.bi_sector << 9;
1115 stripe_offset = start - rbio->raid_map[0]; 1115 stripe_offset = start - rbio->raid_map[0];
1116 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1116 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1117 1117
@@ -1272,7 +1272,7 @@ cleanup:
1272static int find_bio_stripe(struct btrfs_raid_bio *rbio, 1272static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1273 struct bio *bio) 1273 struct bio *bio)
1274{ 1274{
1275 u64 physical = bio->bi_sector; 1275 u64 physical = bio->bi_iter.bi_sector;
1276 u64 stripe_start; 1276 u64 stripe_start;
1277 int i; 1277 int i;
1278 struct btrfs_bio_stripe *stripe; 1278 struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1298static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 1298static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1299 struct bio *bio) 1299 struct bio *bio)
1300{ 1300{
1301 u64 logical = bio->bi_sector; 1301 u64 logical = bio->bi_iter.bi_sector;
1302 u64 stripe_start; 1302 u64 stripe_start;
1303 int i; 1303 int i;
1304 1304
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1602 plug_list); 1602 plug_list);
1603 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1603 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1604 plug_list); 1604 plug_list);
1605 u64 a_sector = ra->bio_list.head->bi_sector; 1605 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1606 u64 b_sector = rb->bio_list.head->bi_sector; 1606 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1607 1607
1608 if (a_sector < b_sector) 1608 if (a_sector < b_sector)
1609 return -1; 1609 return -1;
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1691 if (IS_ERR(rbio)) 1691 if (IS_ERR(rbio))
1692 return PTR_ERR(rbio); 1692 return PTR_ERR(rbio);
1693 bio_list_add(&rbio->bio_list, bio); 1693 bio_list_add(&rbio->bio_list, bio);
1694 rbio->bio_list_bytes = bio->bi_size; 1694 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1695 1695
1696 /* 1696 /*
1697 * don't plug on full rbios, just get them out the door 1697 * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2044 2044
2045 rbio->read_rebuild = 1; 2045 rbio->read_rebuild = 1;
2046 bio_list_add(&rbio->bio_list, bio); 2046 bio_list_add(&rbio->bio_list, bio);
2047 rbio->bio_list_bytes = bio->bi_size; 2047 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2048 2048
2049 rbio->faila = find_logical_bio_stripe(rbio, bio); 2049 rbio->faila = find_logical_bio_stripe(rbio, bio);
2050 if (rbio->faila == -1) { 2050 if (rbio->faila == -1) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1fd3f33c330a..bb9a928fa3a8 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1308 continue; 1308 continue;
1309 } 1309 }
1310 bio->bi_bdev = page->dev->bdev; 1310 bio->bi_bdev = page->dev->bdev;
1311 bio->bi_sector = page->physical >> 9; 1311 bio->bi_iter.bi_sector = page->physical >> 9;
1312 1312
1313 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1314 if (btrfsic_submit_bio_wait(READ, bio)) 1314 if (btrfsic_submit_bio_wait(READ, bio))
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1427 if (!bio) 1427 if (!bio)
1428 return -EIO; 1428 return -EIO;
1429 bio->bi_bdev = page_bad->dev->bdev; 1429 bio->bi_bdev = page_bad->dev->bdev;
1430 bio->bi_sector = page_bad->physical >> 9; 1430 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1431 1431
1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1433 if (PAGE_SIZE != ret) { 1433 if (PAGE_SIZE != ret) {
@@ -1520,7 +1520,7 @@ again:
1520 bio->bi_private = sbio; 1520 bio->bi_private = sbio;
1521 bio->bi_end_io = scrub_wr_bio_end_io; 1521 bio->bi_end_io = scrub_wr_bio_end_io;
1522 bio->bi_bdev = sbio->dev->bdev; 1522 bio->bi_bdev = sbio->dev->bdev;
1523 bio->bi_sector = sbio->physical >> 9; 1523 bio->bi_iter.bi_sector = sbio->physical >> 9;
1524 sbio->err = 0; 1524 sbio->err = 0;
1525 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1525 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1526 spage->physical_for_dev_replace || 1526 spage->physical_for_dev_replace ||
@@ -1926,7 +1926,7 @@ again:
1926 bio->bi_private = sbio; 1926 bio->bi_private = sbio;
1927 bio->bi_end_io = scrub_bio_end_io; 1927 bio->bi_end_io = scrub_bio_end_io;
1928 bio->bi_bdev = sbio->dev->bdev; 1928 bio->bi_bdev = sbio->dev->bdev;
1929 bio->bi_sector = sbio->physical >> 9; 1929 bio->bi_iter.bi_sector = sbio->physical >> 9;
1930 sbio->err = 0; 1930 sbio->err = 0;
1931 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1931 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1932 spage->physical || 1932 spage->physical ||
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3371 spin_unlock(&sctx->stat_lock); 3371 spin_unlock(&sctx->stat_lock);
3372 return -ENOMEM; 3372 return -ENOMEM;
3373 } 3373 }
3374 bio->bi_size = 0; 3374 bio->bi_iter.bi_size = 0;
3375 bio->bi_sector = physical_for_dev_replace >> 9; 3375 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
3376 bio->bi_bdev = dev->bdev; 3376 bio->bi_bdev = dev->bdev;
3377 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 3377 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3378 if (ret != PAGE_CACHE_SIZE) { 3378 if (ret != PAGE_CACHE_SIZE) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 92303f42baaa..f2130de0ddc2 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5411,7 +5411,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5411 if (!q->merge_bvec_fn) 5411 if (!q->merge_bvec_fn)
5412 return 1; 5412 return 1;
5413 5413
5414 bvm.bi_size = bio->bi_size - prev->bv_len; 5414 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5415 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) 5415 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5416 return 0; 5416 return 0;
5417 return 1; 5417 return 1;
@@ -5426,7 +5426,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5426 bio->bi_private = bbio; 5426 bio->bi_private = bbio;
5427 btrfs_io_bio(bio)->stripe_index = dev_nr; 5427 btrfs_io_bio(bio)->stripe_index = dev_nr;
5428 bio->bi_end_io = btrfs_end_bio; 5428 bio->bi_end_io = btrfs_end_bio;
5429 bio->bi_sector = physical >> 9; 5429 bio->bi_iter.bi_sector = physical >> 9;
5430#ifdef DEBUG 5430#ifdef DEBUG
5431 { 5431 {
5432 struct rcu_string *name; 5432 struct rcu_string *name;
@@ -5464,7 +5464,7 @@ again:
5464 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { 5464 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5465 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, 5465 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5466 bvec->bv_offset) < bvec->bv_len) { 5466 bvec->bv_offset) < bvec->bv_len) {
5467 u64 len = bio->bi_size; 5467 u64 len = bio->bi_iter.bi_size;
5468 5468
5469 atomic_inc(&bbio->stripes_pending); 5469 atomic_inc(&bbio->stripes_pending);
5470 submit_stripe_bio(root, bbio, bio, physical, dev_nr, 5470 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5486,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5486 bio->bi_private = bbio->private; 5486 bio->bi_private = bbio->private;
5487 bio->bi_end_io = bbio->end_io; 5487 bio->bi_end_io = bbio->end_io;
5488 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5488 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5489 bio->bi_sector = logical >> 9; 5489 bio->bi_iter.bi_sector = logical >> 9;
5490 kfree(bbio); 5490 kfree(bbio);
5491 bio_endio(bio, -EIO); 5491 bio_endio(bio, -EIO);
5492 } 5492 }
@@ -5497,7 +5497,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5497{ 5497{
5498 struct btrfs_device *dev; 5498 struct btrfs_device *dev;
5499 struct bio *first_bio = bio; 5499 struct bio *first_bio = bio;
5500 u64 logical = (u64)bio->bi_sector << 9; 5500 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5501 u64 length = 0; 5501 u64 length = 0;
5502 u64 map_length; 5502 u64 map_length;
5503 u64 *raid_map = NULL; 5503 u64 *raid_map = NULL;
@@ -5506,7 +5506,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5506 int total_devs = 1; 5506 int total_devs = 1;
5507 struct btrfs_bio *bbio = NULL; 5507 struct btrfs_bio *bbio = NULL;
5508 5508
5509 length = bio->bi_size; 5509 length = bio->bi_iter.bi_size;
5510 map_length = length; 5510 map_length = length;
5511 5511
5512 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 5512 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
diff --git a/fs/buffer.c b/fs/buffer.c
index 6024877335ca..1c04ec66974e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2982 * let it through, and the IO layer will turn it into 2982 * let it through, and the IO layer will turn it into
2983 * an EIO. 2983 * an EIO.
2984 */ 2984 */
2985 if (unlikely(bio->bi_sector >= maxsector)) 2985 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
2986 return; 2986 return;
2987 2987
2988 maxsector -= bio->bi_sector; 2988 maxsector -= bio->bi_iter.bi_sector;
2989 bytes = bio->bi_size; 2989 bytes = bio->bi_iter.bi_size;
2990 if (likely((bytes >> 9) <= maxsector)) 2990 if (likely((bytes >> 9) <= maxsector))
2991 return; 2991 return;
2992 2992
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2994 bytes = maxsector << 9; 2994 bytes = maxsector << 9;
2995 2995
2996 /* Truncate the bio.. */ 2996 /* Truncate the bio.. */
2997 bio->bi_size = bytes; 2997 bio->bi_iter.bi_size = bytes;
2998 bio->bi_io_vec[0].bv_len = bytes; 2998 bio->bi_io_vec[0].bv_len = bytes;
2999 2999
3000 /* ..and clear the end of the buffer for reads */ 3000 /* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
3029 */ 3029 */
3030 bio = bio_alloc(GFP_NOIO, 1); 3030 bio = bio_alloc(GFP_NOIO, 1);
3031 3031
3032 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3032 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3033 bio->bi_bdev = bh->b_bdev; 3033 bio->bi_bdev = bh->b_bdev;
3034 bio->bi_io_vec[0].bv_page = bh->b_page; 3034 bio->bi_io_vec[0].bv_page = bh->b_page;
3035 bio->bi_io_vec[0].bv_len = bh->b_size; 3035 bio->bi_io_vec[0].bv_len = bh->b_size;
3036 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 3036 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3037 3037
3038 bio->bi_vcnt = 1; 3038 bio->bi_vcnt = 1;
3039 bio->bi_size = bh->b_size; 3039 bio->bi_iter.bi_size = bh->b_size;
3040 3040
3041 bio->bi_end_io = end_bio_bh_io_sync; 3041 bio->bi_end_io = end_bio_bh_io_sync;
3042 bio->bi_private = bh; 3042 bio->bi_private = bh;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e04142d5962..160a5489a939 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
375 bio = bio_alloc(GFP_KERNEL, nr_vecs); 375 bio = bio_alloc(GFP_KERNEL, nr_vecs);
376 376
377 bio->bi_bdev = bdev; 377 bio->bi_bdev = bdev;
378 bio->bi_sector = first_sector; 378 bio->bi_iter.bi_sector = first_sector;
379 if (dio->is_async) 379 if (dio->is_async)
380 bio->bi_end_io = dio_bio_end_aio; 380 bio->bi_end_io = dio_bio_end_aio;
381 else 381 else
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
719 if (sdio->bio) { 719 if (sdio->bio) {
720 loff_t cur_offset = sdio->cur_page_fs_offset; 720 loff_t cur_offset = sdio->cur_page_fs_offset;
721 loff_t bio_next_offset = sdio->logical_offset_in_bio + 721 loff_t bio_next_offset = sdio->logical_offset_in_bio +
722 sdio->bio->bi_size; 722 sdio->bio->bi_iter.bi_size;
723 723
724 /* 724 /*
725 * See whether this new request is contiguous with the old. 725 * See whether this new request is contiguous with the old.
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index a31e4da14508..ab95508e3d40 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
298static void ext4_end_bio(struct bio *bio, int error) 298static void ext4_end_bio(struct bio *bio, int error)
299{ 299{
300 ext4_io_end_t *io_end = bio->bi_private; 300 ext4_io_end_t *io_end = bio->bi_private;
301 sector_t bi_sector = bio->bi_sector; 301 sector_t bi_sector = bio->bi_iter.bi_sector;
302 302
303 BUG_ON(!io_end); 303 BUG_ON(!io_end);
304 bio->bi_end_io = NULL; 304 bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
366 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 366 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
367 if (!bio) 367 if (!bio)
368 return -ENOMEM; 368 return -ENOMEM;
369 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 369 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
370 bio->bi_bdev = bh->b_bdev; 370 bio->bi_bdev = bh->b_bdev;
371 bio->bi_end_io = ext4_end_bio; 371 bio->bi_end_io = ext4_end_bio;
372 bio->bi_private = ext4_get_io_end(io->io_end); 372 bio->bi_private = ext4_get_io_end(io->io_end);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index a4949096cf4c..a2c8de8ba6ce 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -386,7 +386,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
386 bio = f2fs_bio_alloc(bdev, 1); 386 bio = f2fs_bio_alloc(bdev, 1);
387 387
388 /* Initialize the bio */ 388 /* Initialize the bio */
389 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 389 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
390 bio->bi_end_io = read_end_io; 390 bio->bi_end_io = read_end_io;
391 391
392 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 392 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a90c6bc0d129..36e8afd8e1e4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -682,7 +682,7 @@ retry:
682 682
683 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 683 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
684 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); 684 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
685 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 685 sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
686 sbi->bio[type]->bi_private = priv; 686 sbi->bio[type]->bi_private = priv;
687 /* 687 /*
688 * The end_io will be assigned at the sumbission phase. 688 * The end_io will be assigned at the sumbission phase.
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 010b9fb9fec6..985da945f0b5 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -272,7 +272,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
272 nrvecs = max(nrvecs/2, 1U); 272 nrvecs = max(nrvecs/2, 1U);
273 } 273 }
274 274
275 bio->bi_sector = blkno * (sb->s_blocksize >> 9); 275 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
276 bio->bi_bdev = sb->s_bdev; 276 bio->bi_bdev = sb->s_bdev;
277 bio->bi_end_io = gfs2_end_log_write; 277 bio->bi_end_io = gfs2_end_log_write;
278 bio->bi_private = sdp; 278 bio->bi_private = sdp;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b474958..16194da91652 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
224 lock_page(page); 224 lock_page(page);
225 225
226 bio = bio_alloc(GFP_NOFS, 1); 226 bio = bio_alloc(GFP_NOFS, 1);
227 bio->bi_sector = sector * (sb->s_blocksize >> 9); 227 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
228 bio->bi_bdev = sb->s_bdev; 228 bio->bi_bdev = sb->s_bdev;
229 bio_add_page(bio, page, PAGE_SIZE, 0); 229 bio_add_page(bio, page, PAGE_SIZE, 0);
230 230
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index e9a97a0d4314..3f999649587f 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); 63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
64 64
65 bio = bio_alloc(GFP_NOIO, 1); 65 bio = bio_alloc(GFP_NOIO, 1);
66 bio->bi_sector = sector; 66 bio->bi_iter.bi_sector = sector;
67 bio->bi_bdev = sb->s_bdev; 67 bio->bi_bdev = sb->s_bdev;
68 68
69 if (!(rw & WRITE) && data) 69 if (!(rw & WRITE) && data)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 360d27c48887..8d811e02b4b9 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
1998 1998
1999 bio = bio_alloc(GFP_NOFS, 1); 1999 bio = bio_alloc(GFP_NOFS, 1);
2000 2000
2001 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2001 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
2002 bio->bi_bdev = log->bdev; 2002 bio->bi_bdev = log->bdev;
2003 bio->bi_io_vec[0].bv_page = bp->l_page; 2003 bio->bi_io_vec[0].bv_page = bp->l_page;
2004 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2004 bio->bi_io_vec[0].bv_len = LOGPSIZE;
2005 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2005 bio->bi_io_vec[0].bv_offset = bp->l_offset;
2006 2006
2007 bio->bi_vcnt = 1; 2007 bio->bi_vcnt = 1;
2008 bio->bi_size = LOGPSIZE; 2008 bio->bi_iter.bi_size = LOGPSIZE;
2009 2009
2010 bio->bi_end_io = lbmIODone; 2010 bio->bi_end_io = lbmIODone;
2011 bio->bi_private = bp; 2011 bio->bi_private = bp;
2012 /*check if journaling to disk has been disabled*/ 2012 /*check if journaling to disk has been disabled*/
2013 if (log->no_integrity) { 2013 if (log->no_integrity) {
2014 bio->bi_size = 0; 2014 bio->bi_iter.bi_size = 0;
2015 lbmIODone(bio, 0); 2015 lbmIODone(bio, 0);
2016 } else { 2016 } else {
2017 submit_bio(READ_SYNC, bio); 2017 submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
2144 jfs_info("lbmStartIO\n"); 2144 jfs_info("lbmStartIO\n");
2145 2145
2146 bio = bio_alloc(GFP_NOFS, 1); 2146 bio = bio_alloc(GFP_NOFS, 1);
2147 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2147 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
2148 bio->bi_bdev = log->bdev; 2148 bio->bi_bdev = log->bdev;
2149 bio->bi_io_vec[0].bv_page = bp->l_page; 2149 bio->bi_io_vec[0].bv_page = bp->l_page;
2150 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2150 bio->bi_io_vec[0].bv_len = LOGPSIZE;
2151 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2151 bio->bi_io_vec[0].bv_offset = bp->l_offset;
2152 2152
2153 bio->bi_vcnt = 1; 2153 bio->bi_vcnt = 1;
2154 bio->bi_size = LOGPSIZE; 2154 bio->bi_iter.bi_size = LOGPSIZE;
2155 2155
2156 bio->bi_end_io = lbmIODone; 2156 bio->bi_end_io = lbmIODone;
2157 bio->bi_private = bp; 2157 bio->bi_private = bp;
2158 2158
2159 /* check if journaling to disk has been disabled */ 2159 /* check if journaling to disk has been disabled */
2160 if (log->no_integrity) { 2160 if (log->no_integrity) {
2161 bio->bi_size = 0; 2161 bio->bi_iter.bi_size = 0;
2162 lbmIODone(bio, 0); 2162 lbmIODone(bio, 0);
2163 } else { 2163 } else {
2164 submit_bio(WRITE_SYNC, bio); 2164 submit_bio(WRITE_SYNC, bio);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index d165cde0c68d..49ba7ff1bbb9 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
416 * count from hitting zero before we're through 416 * count from hitting zero before we're through
417 */ 417 */
418 inc_io(page); 418 inc_io(page);
419 if (!bio->bi_size) 419 if (!bio->bi_iter.bi_size)
420 goto dump_bio; 420 goto dump_bio;
421 submit_bio(WRITE, bio); 421 submit_bio(WRITE, bio);
422 nr_underway++; 422 nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
438 438
439 bio = bio_alloc(GFP_NOFS, 1); 439 bio = bio_alloc(GFP_NOFS, 1);
440 bio->bi_bdev = inode->i_sb->s_bdev; 440 bio->bi_bdev = inode->i_sb->s_bdev;
441 bio->bi_sector = pblock << (inode->i_blkbits - 9); 441 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
442 bio->bi_end_io = metapage_write_end_io; 442 bio->bi_end_io = metapage_write_end_io;
443 bio->bi_private = page; 443 bio->bi_private = page;
444 444
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
452 if (bio) { 452 if (bio) {
453 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) 453 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
454 goto add_failed; 454 goto add_failed;
455 if (!bio->bi_size) 455 if (!bio->bi_iter.bi_size)
456 goto dump_bio; 456 goto dump_bio;
457 457
458 submit_bio(WRITE, bio); 458 submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
517 517
518 bio = bio_alloc(GFP_NOFS, 1); 518 bio = bio_alloc(GFP_NOFS, 1);
519 bio->bi_bdev = inode->i_sb->s_bdev; 519 bio->bi_bdev = inode->i_sb->s_bdev;
520 bio->bi_sector = pblock << (inode->i_blkbits - 9); 520 bio->bi_iter.bi_sector =
521 pblock << (inode->i_blkbits - 9);
521 bio->bi_end_io = metapage_read_end_io; 522 bio->bi_end_io = metapage_read_end_io;
522 bio->bi_private = page; 523 bio->bi_private = page;
523 len = xlen << inode->i_blkbits; 524 len = xlen << inode->i_blkbits;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index e6df3be3b31b..76279e11982d 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
26 bio_vec.bv_len = PAGE_SIZE; 26 bio_vec.bv_len = PAGE_SIZE;
27 bio_vec.bv_offset = 0; 27 bio_vec.bv_offset = 0;
28 bio.bi_vcnt = 1; 28 bio.bi_vcnt = 1;
29 bio.bi_size = PAGE_SIZE;
30 bio.bi_bdev = bdev; 29 bio.bi_bdev = bdev;
31 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
31 bio.bi_iter.bi_size = PAGE_SIZE;
32 32
33 return submit_bio_wait(rw, &bio); 33 return submit_bio_wait(rw, &bio);
34} 34}
@@ -92,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
92 if (i >= max_pages) { 92 if (i >= max_pages) {
93 /* Block layer cannot split bios :( */ 93 /* Block layer cannot split bios :( */
94 bio->bi_vcnt = i; 94 bio->bi_vcnt = i;
95 bio->bi_size = i * PAGE_SIZE; 95 bio->bi_iter.bi_size = i * PAGE_SIZE;
96 bio->bi_bdev = super->s_bdev; 96 bio->bi_bdev = super->s_bdev;
97 bio->bi_sector = ofs >> 9; 97 bio->bi_iter.bi_sector = ofs >> 9;
98 bio->bi_private = sb; 98 bio->bi_private = sb;
99 bio->bi_end_io = writeseg_end_io; 99 bio->bi_end_io = writeseg_end_io;
100 atomic_inc(&super->s_pending_writes); 100 atomic_inc(&super->s_pending_writes);
@@ -119,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
119 unlock_page(page); 119 unlock_page(page);
120 } 120 }
121 bio->bi_vcnt = nr_pages; 121 bio->bi_vcnt = nr_pages;
122 bio->bi_size = nr_pages * PAGE_SIZE; 122 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
123 bio->bi_bdev = super->s_bdev; 123 bio->bi_bdev = super->s_bdev;
124 bio->bi_sector = ofs >> 9; 124 bio->bi_iter.bi_sector = ofs >> 9;
125 bio->bi_private = sb; 125 bio->bi_private = sb;
126 bio->bi_end_io = writeseg_end_io; 126 bio->bi_end_io = writeseg_end_io;
127 atomic_inc(&super->s_pending_writes); 127 atomic_inc(&super->s_pending_writes);
@@ -184,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
184 if (i >= max_pages) { 184 if (i >= max_pages) {
185 /* Block layer cannot split bios :( */ 185 /* Block layer cannot split bios :( */
186 bio->bi_vcnt = i; 186 bio->bi_vcnt = i;
187 bio->bi_size = i * PAGE_SIZE; 187 bio->bi_iter.bi_size = i * PAGE_SIZE;
188 bio->bi_bdev = super->s_bdev; 188 bio->bi_bdev = super->s_bdev;
189 bio->bi_sector = ofs >> 9; 189 bio->bi_iter.bi_sector = ofs >> 9;
190 bio->bi_private = sb; 190 bio->bi_private = sb;
191 bio->bi_end_io = erase_end_io; 191 bio->bi_end_io = erase_end_io;
192 atomic_inc(&super->s_pending_writes); 192 atomic_inc(&super->s_pending_writes);
@@ -205,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
205 bio->bi_io_vec[i].bv_offset = 0; 205 bio->bi_io_vec[i].bv_offset = 0;
206 } 206 }
207 bio->bi_vcnt = nr_pages; 207 bio->bi_vcnt = nr_pages;
208 bio->bi_size = nr_pages * PAGE_SIZE; 208 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
209 bio->bi_bdev = super->s_bdev; 209 bio->bi_bdev = super->s_bdev;
210 bio->bi_sector = ofs >> 9; 210 bio->bi_iter.bi_sector = ofs >> 9;
211 bio->bi_private = sb; 211 bio->bi_private = sb;
212 bio->bi_end_io = erase_end_io; 212 bio->bi_end_io = erase_end_io;
213 atomic_inc(&super->s_pending_writes); 213 atomic_inc(&super->s_pending_writes);
diff --git a/fs/mpage.c b/fs/mpage.c
index dd6d5878f4d9..4979ffa60aaa 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -93,7 +93,7 @@ mpage_alloc(struct block_device *bdev,
93 93
94 if (bio) { 94 if (bio) {
95 bio->bi_bdev = bdev; 95 bio->bi_bdev = bdev;
96 bio->bi_sector = first_sector; 96 bio->bi_iter.bi_sector = first_sector;
97 } 97 }
98 return bio; 98 return bio;
99} 99}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index da768923bf7c..56ff823ca82e 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
134 if (bio) { 134 if (bio) {
135 get_parallel(bio->bi_private); 135 get_parallel(bio->bi_private);
136 dprintk("%s submitting %s bio %u@%llu\n", __func__, 136 dprintk("%s submitting %s bio %u@%llu\n", __func__,
137 rw == READ ? "read" : "write", 137 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138 bio->bi_size, (unsigned long long)bio->bi_sector); 138 (unsigned long long)bio->bi_iter.bi_sector);
139 submit_bio(rw, bio); 139 submit_bio(rw, bio);
140 } 140 }
141 return NULL; 141 return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
156 } 156 }
157 157
158 if (bio) { 158 if (bio) {
159 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 159 bio->bi_iter.bi_sector = isect - be->be_f_offset +
160 be->be_v_offset;
160 bio->bi_bdev = be->be_mdev; 161 bio->bi_bdev = be->be_mdev;
161 bio->bi_end_io = end_io; 162 bio->bi_end_io = end_io;
162 bio->bi_private = par; 163 bio->bi_private = par;
@@ -511,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
511 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + 512 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
512 (offset / SECTOR_SIZE); 513 (offset / SECTOR_SIZE);
513 514
514 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 515 bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
515 bio->bi_bdev = be->be_mdev; 516 bio->bi_bdev = be->be_mdev;
516 bio->bi_end_io = bl_read_single_end_io; 517 bio->bi_end_io = bl_read_single_end_io;
517 518
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 2d8be51f90dc..dc3a9efdaab8 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
416 } 416 }
417 if (likely(bio)) { 417 if (likely(bio)) {
418 bio->bi_bdev = nilfs->ns_bdev; 418 bio->bi_bdev = nilfs->ns_bdev;
419 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); 419 bio->bi_iter.bi_sector =
420 start << (nilfs->ns_blocksize_bits - 9);
420 } 421 }
421 return bio; 422 return bio;
422} 423}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 73920ffda05b..bf482dfed14f 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
413 } 413 }
414 414
415 /* Must put everything in 512 byte sectors for the bio... */ 415 /* Must put everything in 512 byte sectors for the bio... */
416 bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); 416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
417 bio->bi_bdev = reg->hr_bdev; 417 bio->bi_bdev = reg->hr_bdev;
418 bio->bi_private = wc; 418 bio->bi_private = wc;
419 bio->bi_end_io = o2hb_bio_end_io; 419 bio->bi_end_io = o2hb_bio_end_io;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 71c8c9d2b882..1b19b9cd692a 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio(
407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
408 408
409 ASSERT(bio->bi_private == NULL); 409 ASSERT(bio->bi_private == NULL);
410 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 410 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
411 bio->bi_bdev = bh->b_bdev; 411 bio->bi_bdev = bh->b_bdev;
412 return bio; 412 return bio;
413} 413}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index c7f0b77dcb00..5f3ea443ebbe 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1255,7 +1255,7 @@ next_chunk:
1255 1255
1256 bio = bio_alloc(GFP_NOIO, nr_pages); 1256 bio = bio_alloc(GFP_NOIO, nr_pages);
1257 bio->bi_bdev = bp->b_target->bt_bdev; 1257 bio->bi_bdev = bp->b_target->bt_bdev;
1258 bio->bi_sector = sector; 1258 bio->bi_iter.bi_sector = sector;
1259 bio->bi_end_io = xfs_buf_bio_end_io; 1259 bio->bi_end_io = xfs_buf_bio_end_io;
1260 bio->bi_private = bp; 1260 bio->bi_private = bp;
1261 1261
@@ -1277,7 +1277,7 @@ next_chunk:
1277 total_nr_pages--; 1277 total_nr_pages--;
1278 } 1278 }
1279 1279
1280 if (likely(bio->bi_size)) { 1280 if (likely(bio->bi_iter.bi_size)) {
1281 if (xfs_buf_is_vmapped(bp)) { 1281 if (xfs_buf_is_vmapped(bp)) {
1282 flush_kernel_vmap_range(bp->b_addr, 1282 flush_kernel_vmap_range(bp->b_addr,
1283 xfs_buf_vmap_len(bp)); 1283 xfs_buf_vmap_len(bp));
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 060ff695085c..e2e0bc642ed1 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -62,19 +62,19 @@
62 * on highmem page vectors 62 * on highmem page vectors
63 */ 63 */
64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)]))
65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx)
66#define bio_page(bio) bio_iovec((bio))->bv_page 66#define bio_page(bio) bio_iovec((bio))->bv_page
67#define bio_offset(bio) bio_iovec((bio))->bv_offset 67#define bio_offset(bio) bio_iovec((bio))->bv_offset
68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_iter.bi_idx)
69#define bio_sectors(bio) ((bio)->bi_size >> 9) 69#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
70#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio))) 70#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
71 71
72static inline unsigned int bio_cur_bytes(struct bio *bio) 72static inline unsigned int bio_cur_bytes(struct bio *bio)
73{ 73{
74 if (bio->bi_vcnt) 74 if (bio->bi_vcnt)
75 return bio_iovec(bio)->bv_len; 75 return bio_iovec(bio)->bv_len;
76 else /* dataless requests such as discard */ 76 else /* dataless requests such as discard */
77 return bio->bi_size; 77 return bio->bi_iter.bi_size;
78} 78}
79 79
80static inline void *bio_data(struct bio *bio) 80static inline void *bio_data(struct bio *bio)
@@ -108,7 +108,7 @@ static inline void *bio_data(struct bio *bio)
108 */ 108 */
109 109
110#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1) 110#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
111#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx) 111#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_iter.bi_idx)
112 112
113/* Default implementation of BIOVEC_PHYS_MERGEABLE */ 113/* Default implementation of BIOVEC_PHYS_MERGEABLE */
114#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 114#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
@@ -150,7 +150,7 @@ static inline void *bio_data(struct bio *bio)
150 i++) 150 i++)
151 151
152#define bio_for_each_segment(bvl, bio, i) \ 152#define bio_for_each_segment(bvl, bio, i) \
153 for (i = (bio)->bi_idx; \ 153 for (i = (bio)->bi_iter.bi_idx; \
154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \
155 i++) 155 i++)
156 156
@@ -365,7 +365,7 @@ static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx,
365#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 365#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
366 366
367#define bio_kmap_irq(bio, flags) \ 367#define bio_kmap_irq(bio, flags) \
368 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 368 __bio_kmap_irq((bio), (bio)->bi_iter.bi_idx, (flags))
369#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 369#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
370 370
371/* 371/*
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 238ef0ed62f8..29b5b84d8a29 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -28,13 +28,19 @@ struct bio_vec {
28 unsigned int bv_offset; 28 unsigned int bv_offset;
29}; 29};
30 30
31struct bvec_iter {
32 sector_t bi_sector; /* device address in 512 byte
33 sectors */
34 unsigned int bi_size; /* residual I/O count */
35
36 unsigned int bi_idx; /* current index into bvl_vec */
37};
38
31/* 39/*
32 * main unit of I/O for the block layer and lower layers (ie drivers and 40 * main unit of I/O for the block layer and lower layers (ie drivers and
33 * stacking drivers) 41 * stacking drivers)
34 */ 42 */
35struct bio { 43struct bio {
36 sector_t bi_sector; /* device address in 512 byte
37 sectors */
38 struct bio *bi_next; /* request queue link */ 44 struct bio *bi_next; /* request queue link */
39 struct block_device *bi_bdev; 45 struct block_device *bi_bdev;
40 unsigned long bi_flags; /* status, command, etc */ 46 unsigned long bi_flags; /* status, command, etc */
@@ -42,16 +48,13 @@ struct bio {
42 * top bits priority 48 * top bits priority
43 */ 49 */
44 50
45 unsigned short bi_vcnt; /* how many bio_vec's */ 51 struct bvec_iter bi_iter;
46 unsigned short bi_idx; /* current index into bvl_vec */
47 52
48 /* Number of segments in this BIO after 53 /* Number of segments in this BIO after
49 * physical address coalescing is performed. 54 * physical address coalescing is performed.
50 */ 55 */
51 unsigned int bi_phys_segments; 56 unsigned int bi_phys_segments;
52 57
53 unsigned int bi_size; /* residual I/O count */
54
55 /* 58 /*
56 * To keep track of the max segment size, we account for the 59 * To keep track of the max segment size, we account for the
57 * sizes of the first and last mergeable segments in this bio. 60 * sizes of the first and last mergeable segments in this bio.
@@ -74,11 +77,13 @@ struct bio {
74 struct bio_integrity_payload *bi_integrity; /* data integrity */ 77 struct bio_integrity_payload *bi_integrity; /* data integrity */
75#endif 78#endif
76 79
80 unsigned short bi_vcnt; /* how many bio_vec's */
81
77 /* 82 /*
78 * Everything starting with bi_max_vecs will be preserved by bio_reset() 83 * Everything starting with bi_max_vecs will be preserved by bio_reset()
79 */ 84 */
80 85
81 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 86 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
82 87
83 atomic_t bi_cnt; /* pin count */ 88 atomic_t bi_cnt; /* pin count */
84 89
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index e2b9576d00e2..095c6e4fe1e8 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
24 __entry->dev = bio->bi_bdev->bd_dev; 24 __entry->dev = bio->bi_bdev->bd_dev;
25 __entry->orig_major = d->disk->major; 25 __entry->orig_major = d->disk->major;
26 __entry->orig_minor = d->disk->first_minor; 26 __entry->orig_minor = d->disk->first_minor;
27 __entry->sector = bio->bi_sector; 27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_sector - 16; 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_size >> 9; 29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
31 ), 31 ),
32 32
33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", 33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
99 99
100 TP_fast_assign( 100 TP_fast_assign(
101 __entry->dev = bio->bi_bdev->bd_dev; 101 __entry->dev = bio->bi_bdev->bd_dev;
102 __entry->sector = bio->bi_sector; 102 __entry->sector = bio->bi_iter.bi_sector;
103 __entry->nr_sector = bio->bi_size >> 9; 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
105 ), 105 ),
106 106
107 TP_printk("%d,%d %s %llu + %u", 107 TP_printk("%d,%d %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
134 134
135 TP_fast_assign( 135 TP_fast_assign(
136 __entry->dev = bio->bi_bdev->bd_dev; 136 __entry->dev = bio->bi_bdev->bd_dev;
137 __entry->sector = bio->bi_sector; 137 __entry->sector = bio->bi_iter.bi_sector;
138 __entry->nr_sector = bio->bi_size >> 9; 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
140 __entry->cache_hit = hit; 140 __entry->cache_hit = hit;
141 __entry->bypass = bypass; 141 __entry->bypass = bypass;
142 ), 142 ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
162 162
163 TP_fast_assign( 163 TP_fast_assign(
164 __entry->dev = bio->bi_bdev->bd_dev; 164 __entry->dev = bio->bi_bdev->bd_dev;
165 __entry->sector = bio->bi_sector; 165 __entry->sector = bio->bi_iter.bi_sector;
166 __entry->nr_sector = bio->bi_size >> 9; 166 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
168 __entry->writeback = writeback; 168 __entry->writeback = writeback;
169 __entry->bypass = bypass; 169 __entry->bypass = bypass;
170 ), 170 ),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 4c2301d2ef1a..e76ae19a8d6f 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
243 TP_fast_assign( 243 TP_fast_assign(
244 __entry->dev = bio->bi_bdev ? 244 __entry->dev = bio->bi_bdev ?
245 bio->bi_bdev->bd_dev : 0; 245 bio->bi_bdev->bd_dev : 0;
246 __entry->sector = bio->bi_sector; 246 __entry->sector = bio->bi_iter.bi_sector;
247 __entry->nr_sector = bio_sectors(bio); 247 __entry->nr_sector = bio_sectors(bio);
248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
250 ), 250 ),
251 251
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
280 280
281 TP_fast_assign( 281 TP_fast_assign(
282 __entry->dev = bio->bi_bdev->bd_dev; 282 __entry->dev = bio->bi_bdev->bd_dev;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_iter.bi_sector;
284 __entry->nr_sector = bio_sectors(bio); 284 __entry->nr_sector = bio_sectors(bio);
285 __entry->error = error; 285 __entry->error = error;
286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
287 ), 287 ),
288 288
289 TP_printk("%d,%d %s %llu + %u [%d]", 289 TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
308 308
309 TP_fast_assign( 309 TP_fast_assign(
310 __entry->dev = bio->bi_bdev->bd_dev; 310 __entry->dev = bio->bi_bdev->bd_dev;
311 __entry->sector = bio->bi_sector; 311 __entry->sector = bio->bi_iter.bi_sector;
312 __entry->nr_sector = bio_sectors(bio); 312 __entry->nr_sector = bio_sectors(bio);
313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
315 ), 315 ),
316 316
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
375 375
376 TP_fast_assign( 376 TP_fast_assign(
377 __entry->dev = bio->bi_bdev->bd_dev; 377 __entry->dev = bio->bi_bdev->bd_dev;
378 __entry->sector = bio->bi_sector; 378 __entry->sector = bio->bi_iter.bi_sector;
379 __entry->nr_sector = bio_sectors(bio); 379 __entry->nr_sector = bio_sectors(bio);
380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
382 ), 382 ),
383 383
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
403 403
404 TP_fast_assign( 404 TP_fast_assign(
405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
406 __entry->sector = bio ? bio->bi_sector : 0; 406 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
407 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 407 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
408 blk_fill_rwbs(__entry->rwbs, 408 blk_fill_rwbs(__entry->rwbs,
409 bio ? bio->bi_rw : 0, __entry->nr_sector); 409 bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
538 538
539 TP_fast_assign( 539 TP_fast_assign(
540 __entry->dev = bio->bi_bdev->bd_dev; 540 __entry->dev = bio->bi_bdev->bd_dev;
541 __entry->sector = bio->bi_sector; 541 __entry->sector = bio->bi_iter.bi_sector;
542 __entry->new_sector = new_sector; 542 __entry->new_sector = new_sector;
543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
545 ), 545 ),
546 546
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
579 579
580 TP_fast_assign( 580 TP_fast_assign(
581 __entry->dev = bio->bi_bdev->bd_dev; 581 __entry->dev = bio->bi_bdev->bd_dev;
582 __entry->sector = bio->bi_sector; 582 __entry->sector = bio->bi_iter.bi_sector;
583 __entry->nr_sector = bio_sectors(bio); 583 __entry->nr_sector = bio_sectors(bio);
584 __entry->old_dev = dev; 584 __entry->old_dev = dev;
585 __entry->old_sector = from; 585 __entry->old_sector = from;
586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
587 ), 587 ),
588 588
589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e0dc355fa317..bd3ee4fbe7a7 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -616,8 +616,8 @@ TRACE_EVENT(f2fs_do_submit_bio,
616 __entry->dev = sb->s_dev; 616 __entry->dev = sb->s_dev;
617 __entry->btype = btype; 617 __entry->btype = btype;
618 __entry->sync = sync; 618 __entry->sync = sync;
619 __entry->sector = bio->bi_sector; 619 __entry->sector = bio->bi_iter.bi_sector;
620 __entry->size = bio->bi_size; 620 __entry->size = bio->bi_iter.bi_size;
621 ), 621 ),
622 622
623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u", 623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index d09dd10c5a5e..9a58bc258810 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
32 struct bio *bio; 32 struct bio *bio;
33 33
34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
35 bio->bi_sector = sector; 35 bio->bi_iter.bi_sector = sector;
36 bio->bi_bdev = bdev; 36 bio->bi_bdev = bdev;
37 bio->bi_end_io = end_swap_bio_read; 37 bio->bi_end_io = end_swap_bio_read;
38 38
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f785aef65799..b418cb0d7242 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
781 if (!error && !bio_flagged(bio, BIO_UPTODATE)) 781 if (!error && !bio_flagged(bio, BIO_UPTODATE))
782 error = EIO; 782 error = EIO;
783 783
784 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, 784 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
785 error, 0, NULL); 785 bio->bi_rw, what, error, 0, NULL);
786} 786}
787 787
788static void blk_add_trace_bio_bounce(void *ignore, 788static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
885 if (bt) { 885 if (bt) {
886 __be64 rpdu = cpu_to_be64(pdu); 886 __be64 rpdu = cpu_to_be64(pdu);
887 887
888 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 888 __blk_add_trace(bt, bio->bi_iter.bi_sector,
889 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), 889 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
890 !bio_flagged(bio, BIO_UPTODATE),
890 sizeof(rpdu), &rpdu); 891 sizeof(rpdu), &rpdu);
891 } 892 }
892} 893}
@@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
918 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); 919 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
919 r.sector_from = cpu_to_be64(from); 920 r.sector_from = cpu_to_be64(from);
920 921
921 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 922 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
922 BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), 923 bio->bi_rw, BLK_TA_REMAP,
923 sizeof(r), &r); 924 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
924} 925}
925 926
926/** 927/**
diff --git a/mm/page_io.c b/mm/page_io.c
index 8c79a4764be0..f14eded987fa 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
31 31
32 bio = bio_alloc(gfp_flags, 1); 32 bio = bio_alloc(gfp_flags, 1);
33 if (bio) { 33 if (bio) {
34 bio->bi_sector = map_swap_page(page, &bio->bi_bdev); 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
35 bio->bi_sector <<= PAGE_SHIFT - 9; 35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
36 bio->bi_io_vec[0].bv_page = page; 36 bio->bi_io_vec[0].bv_page = page;
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; 37 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
38 bio->bi_io_vec[0].bv_offset = 0; 38 bio->bi_io_vec[0].bv_offset = 0;
39 bio->bi_vcnt = 1; 39 bio->bi_vcnt = 1;
40 bio->bi_size = PAGE_SIZE; 40 bio->bi_iter.bi_size = PAGE_SIZE;
41 bio->bi_end_io = end_io; 41 bio->bi_end_io = end_io;
42 } 42 }
43 return bio; 43 return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
62 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", 62 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
63 imajor(bio->bi_bdev->bd_inode), 63 imajor(bio->bi_bdev->bd_inode),
64 iminor(bio->bi_bdev->bd_inode), 64 iminor(bio->bi_bdev->bd_inode),
65 (unsigned long long)bio->bi_sector); 65 (unsigned long long)bio->bi_iter.bi_sector);
66 ClearPageReclaim(page); 66 ClearPageReclaim(page);
67 } 67 }
68 end_page_writeback(page); 68 end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
81 imajor(bio->bi_bdev->bd_inode), 81 imajor(bio->bi_bdev->bd_inode),
82 iminor(bio->bi_bdev->bd_inode), 82 iminor(bio->bi_bdev->bd_inode),
83 (unsigned long long)bio->bi_sector); 83 (unsigned long long)bio->bi_iter.bi_sector);
84 goto out; 84 goto out;
85 } 85 }
86 86