aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/block/biodoc.txt7
-rw-r--r--Documentation/block/biovecs.txt111
-rw-r--r--arch/m68k/emu/nfblock.c13
-rw-r--r--arch/powerpc/sysdev/axonram.c21
-rw-r--r--block/blk-core.c40
-rw-r--r--block/blk-flush.c2
-rw-r--r--block/blk-integrity.c40
-rw-r--r--block/blk-lib.c12
-rw-r--r--block/blk-map.c6
-rw-r--r--block/blk-merge.c66
-rw-r--r--block/blk-mq.c2
-rw-r--r--block/blk-throttle.c14
-rw-r--r--block/elevator.c2
-rw-r--r--drivers/block/aoe/aoe.h10
-rw-r--r--drivers/block/aoe/aoecmd.c153
-rw-r--r--drivers/block/brd.c16
-rw-r--r--drivers/block/drbd/drbd_actlog.c2
-rw-r--r--drivers/block/drbd/drbd_bitmap.c2
-rw-r--r--drivers/block/drbd/drbd_main.c27
-rw-r--r--drivers/block/drbd/drbd_receiver.c19
-rw-r--r--drivers/block/drbd/drbd_req.c6
-rw-r--r--drivers/block/drbd/drbd_req.h2
-rw-r--r--drivers/block/drbd/drbd_worker.c8
-rw-r--r--drivers/block/floppy.c16
-rw-r--r--drivers/block/loop.c27
-rw-r--r--drivers/block/mtip32xx/mtip32xx.c20
-rw-r--r--drivers/block/nbd.c14
-rw-r--r--drivers/block/nvme-core.c142
-rw-r--r--drivers/block/pktcdvd.c182
-rw-r--r--drivers/block/ps3disk.c17
-rw-r--r--drivers/block/ps3vram.c12
-rw-r--r--drivers/block/rbd.c91
-rw-r--r--drivers/block/rsxx/dev.c6
-rw-r--r--drivers/block/rsxx/dma.c15
-rw-r--r--drivers/block/umem.c53
-rw-r--r--drivers/block/xen-blkback/blkback.c2
-rw-r--r--drivers/block/xen-blkfront.c2
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c8
-rw-r--r--drivers/md/bcache/debug.c21
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c131
-rw-r--r--drivers/md/bcache/super.c20
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h37
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c28
-rw-r--r--drivers/md/dm-crypt.c64
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c37
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-raid1.c20
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c19
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c30
-rw-r--r--drivers/md/dm-verity.c62
-rw-r--r--drivers/md/dm.c189
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c96
-rw-r--r--drivers/md/md.c12
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c79
-rw-r--r--drivers/md/raid1.c75
-rw-r--r--drivers/md/raid10.c194
-rw-r--r--drivers/md/raid5.c84
-rw-r--r--drivers/message/fusion/mptsas.c8
-rw-r--r--drivers/s390/block/dasd_diag.c10
-rw-r--r--drivers/s390/block/dasd_eckd.c48
-rw-r--r--drivers/s390/block/dasd_fba.c26
-rw-r--r--drivers/s390/block/dcssblk.c21
-rw-r--r--drivers/s390/block/scm_blk.c8
-rw-r--r--drivers/s390/block/scm_blk_cluster.c4
-rw-r--r--drivers/s390/block/xpram.c19
-rw-r--r--drivers/scsi/libsas/sas_expander.c8
-rw-r--r--drivers/scsi/mpt2sas/mpt2sas_transport.c41
-rw-r--r--drivers/scsi/mpt3sas/mpt3sas_transport.c39
-rw-r--r--drivers/scsi/osd/osd_initiator.c2
-rw-r--r--drivers/scsi/sd.c2
-rw-r--r--drivers/scsi/sd_dif.c30
-rw-r--r--drivers/staging/lustre/lustre/llite/lloop.c26
-rw-r--r--drivers/staging/zram/zram_drv.c33
-rw-r--r--drivers/target/target_core_iblock.c2
-rw-r--r--fs/bio-integrity.c179
-rw-r--r--fs/bio.c502
-rw-r--r--fs/btrfs/check-integrity.c8
-rw-r--r--fs/btrfs/compression.c27
-rw-r--r--fs/btrfs/disk-io.c13
-rw-r--r--fs/btrfs/extent_io.c49
-rw-r--r--fs/btrfs/file-item.c19
-rw-r--r--fs/btrfs/inode.c37
-rw-r--r--fs/btrfs/raid56.c22
-rw-r--r--fs/btrfs/scrub.c12
-rw-r--r--fs/btrfs/volumes.c14
-rw-r--r--fs/buffer.c12
-rw-r--r--fs/direct-io.c4
-rw-r--r--fs/ext4/page-io.c8
-rw-r--r--fs/f2fs/data.c15
-rw-r--r--fs/f2fs/segment.c14
-rw-r--r--fs/gfs2/lops.c2
-rw-r--r--fs/gfs2/ops_fstype.c2
-rw-r--r--fs/hfsplus/wrapper.c2
-rw-r--r--fs/jfs/jfs_logmgr.c12
-rw-r--r--fs/jfs/jfs_metapage.c9
-rw-r--r--fs/logfs/dev_bdev.c38
-rw-r--r--fs/mpage.c19
-rw-r--r--fs/nfs/blocklayout/blocklayout.c43
-rw-r--r--fs/nilfs2/segbuf.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c2
-rw-r--r--fs/xfs/xfs_aops.c2
-rw-r--r--fs/xfs/xfs_buf.c4
-rw-r--r--include/linux/bio.h283
-rw-r--r--include/linux/blk_types.h24
-rw-r--r--include/linux/blkdev.h9
-rw-r--r--include/linux/ceph/messenger.h4
-rw-r--r--include/linux/dm-io.h4
-rw-r--r--include/trace/events/bcache.h26
-rw-r--r--include/trace/events/block.h26
-rw-r--r--include/trace/events/f2fs.h4
-rw-r--r--kernel/power/block_io.c2
-rw-r--r--kernel/trace/blktrace.c15
-rw-r--r--mm/bounce.c44
-rw-r--r--mm/page_io.c10
-rw-r--r--net/ceph/messenger.c43
131 files changed, 2041 insertions, 2540 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 8df5e8e6dceb..2101e718670d 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -447,14 +447,13 @@ struct bio_vec {
447 * main unit of I/O for the block layer and lower layers (ie drivers) 447 * main unit of I/O for the block layer and lower layers (ie drivers)
448 */ 448 */
449struct bio { 449struct bio {
450 sector_t bi_sector;
451 struct bio *bi_next; /* request queue link */ 450 struct bio *bi_next; /* request queue link */
452 struct block_device *bi_bdev; /* target device */ 451 struct block_device *bi_bdev; /* target device */
453 unsigned long bi_flags; /* status, command, etc */ 452 unsigned long bi_flags; /* status, command, etc */
454 unsigned long bi_rw; /* low bits: r/w, high: priority */ 453 unsigned long bi_rw; /* low bits: r/w, high: priority */
455 454
456 unsigned int bi_vcnt; /* how may bio_vec's */ 455 unsigned int bi_vcnt; /* how may bio_vec's */
457 unsigned int bi_idx; /* current index into bio_vec array */ 456 struct bvec_iter bi_iter; /* current index into bio_vec array */
458 457
459 unsigned int bi_size; /* total size in bytes */ 458 unsigned int bi_size; /* total size in bytes */
460 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/ 459 unsigned short bi_phys_segments; /* segments after physaddr coalesce*/
@@ -480,7 +479,7 @@ With this multipage bio design:
480- Code that traverses the req list can find all the segments of a bio 479- Code that traverses the req list can find all the segments of a bio
481 by using rq_for_each_segment. This handles the fact that a request 480 by using rq_for_each_segment. This handles the fact that a request
482 has multiple bios, each of which can have multiple segments. 481 has multiple bios, each of which can have multiple segments.
483- Drivers which can't process a large bio in one shot can use the bi_idx 482- Drivers which can't process a large bio in one shot can use the bi_iter
484 field to keep track of the next bio_vec entry to process. 483 field to keep track of the next bio_vec entry to process.
485 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE) 484 (e.g a 1MB bio_vec needs to be handled in max 128kB chunks for IDE)
486 [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying 485 [TBD: Should preferably also have a bi_voffset and bi_vlen to avoid modifying
@@ -589,7 +588,7 @@ driver should not modify these values. The block layer sets up the
589nr_sectors and current_nr_sectors fields (based on the corresponding 588nr_sectors and current_nr_sectors fields (based on the corresponding
590hard_xxx values and the number of bytes transferred) and updates it on 589hard_xxx values and the number of bytes transferred) and updates it on
591every transfer that invokes end_that_request_first. It does the same for the 590every transfer that invokes end_that_request_first. It does the same for the
592buffer, bio, bio->bi_idx fields too. 591buffer, bio, bio->bi_iter fields too.
593 592
594The buffer field is just a virtual address mapping of the current segment 593The buffer field is just a virtual address mapping of the current segment
595of the i/o buffer in cases where the buffer resides in low-memory. For high 594of the i/o buffer in cases where the buffer resides in low-memory. For high
diff --git a/Documentation/block/biovecs.txt b/Documentation/block/biovecs.txt
new file mode 100644
index 000000000000..74a32ad52f53
--- /dev/null
+++ b/Documentation/block/biovecs.txt
@@ -0,0 +1,111 @@
1
2Immutable biovecs and biovec iterators:
3=======================================
4
5Kent Overstreet <kmo@daterainc.com>
6
7As of 3.13, biovecs should never be modified after a bio has been submitted.
8Instead, we have a new struct bvec_iter which represents a range of a biovec -
9the iterator will be modified as the bio is completed, not the biovec.
10
11More specifically, old code that needed to partially complete a bio would
12update bi_sector and bi_size, and advance bi_idx to the next biovec. If it
13ended up partway through a biovec, it would increment bv_offset and decrement
14bv_len by the number of bytes completed in that biovec.
15
16In the new scheme of things, everything that must be mutated in order to
17partially complete a bio is segregated into struct bvec_iter: bi_sector,
18bi_size and bi_idx have been moved there; and instead of modifying bv_offset
19and bv_len, struct bvec_iter has bi_bvec_done, which represents the number of
20bytes completed in the current bvec.
21
22There are a bunch of new helper macros for hiding the gory details - in
23particular, presenting the illusion of partially completed biovecs so that
24normal code doesn't have to deal with bi_bvec_done.
25
26 * Driver code should no longer refer to biovecs directly; we now have
27 bio_iovec() and bio_iovec_iter() macros that return literal struct biovecs,
28 constructed from the raw biovecs but taking into account bi_bvec_done and
29 bi_size.
30
31 bio_for_each_segment() has been updated to take a bvec_iter argument
32 instead of an integer (that corresponded to bi_idx); for a lot of code the
33 conversion just required changing the types of the arguments to
34 bio_for_each_segment().
35
36 * Advancing a bvec_iter is done with bio_advance_iter(); bio_advance() is a
37 wrapper around bio_advance_iter() that operates on bio->bi_iter, and also
38 advances the bio integrity's iter if present.
39
40 There is a lower level advance function - bvec_iter_advance() - which takes
41 a pointer to a biovec, not a bio; this is used by the bio integrity code.
42
43What's all this get us?
44=======================
45
46Having a real iterator, and making biovecs immutable, has a number of
47advantages:
48
49 * Before, iterating over bios was very awkward when you weren't processing
50 exactly one bvec at a time - for example, bio_copy_data() in fs/bio.c,
51 which copies the contents of one bio into another. Because the biovecs
52 wouldn't necessarily be the same size, the old code was tricky convoluted -
53 it had to walk two different bios at the same time, keeping both bi_idx and
54 and offset into the current biovec for each.
55
56 The new code is much more straightforward - have a look. This sort of
57 pattern comes up in a lot of places; a lot of drivers were essentially open
58 coding bvec iterators before, and having common implementation considerably
59 simplifies a lot of code.
60
61 * Before, any code that might need to use the biovec after the bio had been
62 completed (perhaps to copy the data somewhere else, or perhaps to resubmit
63 it somewhere else if there was an error) had to save the entire bvec array
64 - again, this was being done in a fair number of places.
65
66 * Biovecs can be shared between multiple bios - a bvec iter can represent an
67 arbitrary range of an existing biovec, both starting and ending midway
68 through biovecs. This is what enables efficient splitting of arbitrary
69 bios. Note that this means we _only_ use bi_size to determine when we've
70 reached the end of a bio, not bi_vcnt - and the bio_iovec() macro takes
71 bi_size into account when constructing biovecs.
72
73 * Splitting bios is now much simpler. The old bio_split() didn't even work on
74 bios with more than a single bvec! Now, we can efficiently split arbitrary
75 size bios - because the new bio can share the old bio's biovec.
76
77 Care must be taken to ensure the biovec isn't freed while the split bio is
78 still using it, in case the original bio completes first, though. Using
79 bio_chain() when splitting bios helps with this.
80
81 * Submitting partially completed bios is now perfectly fine - this comes up
82 occasionally in stacking block drivers and various code (e.g. md and
83 bcache) had some ugly workarounds for this.
84
85 It used to be the case that submitting a partially completed bio would work
86 fine to _most_ devices, but since accessing the raw bvec array was the
87 norm, not all drivers would respect bi_idx and those would break. Now,
88 since all drivers _must_ go through the bvec iterator - and have been
89 audited to make sure they are - submitting partially completed bios is
90 perfectly fine.
91
92Other implications:
93===================
94
95 * Almost all usage of bi_idx is now incorrect and has been removed; instead,
96 where previously you would have used bi_idx you'd now use a bvec_iter,
97 probably passing it to one of the helper macros.
98
99 I.e. instead of using bio_iovec_idx() (or bio->bi_iovec[bio->bi_idx]), you
100 now use bio_iter_iovec(), which takes a bvec_iter and returns a
101 literal struct bio_vec - constructed on the fly from the raw biovec but
102 taking into account bi_bvec_done (and bi_size).
103
104 * bi_vcnt can't be trusted or relied upon by driver code - i.e. anything that
105 doesn't actually own the bio. The reason is twofold: firstly, it's not
106 actually needed for iterating over the bio anymore - we only use bi_size.
107 Secondly, when cloning a bio and reusing (a portion of) the original bio's
108 biovec, in order to calculate bi_vcnt for the new bio we'd have to iterate
109 over all the biovecs in the new bio - which is silly as it's not needed.
110
111 So, don't use bi_vcnt anymore.
diff --git a/arch/m68k/emu/nfblock.c b/arch/m68k/emu/nfblock.c
index 0721858fbd1e..2d75ae246167 100644
--- a/arch/m68k/emu/nfblock.c
+++ b/arch/m68k/emu/nfblock.c
@@ -62,17 +62,18 @@ struct nfhd_device {
62static void nfhd_make_request(struct request_queue *queue, struct bio *bio) 62static void nfhd_make_request(struct request_queue *queue, struct bio *bio)
63{ 63{
64 struct nfhd_device *dev = queue->queuedata; 64 struct nfhd_device *dev = queue->queuedata;
65 struct bio_vec *bvec; 65 struct bio_vec bvec;
66 int i, dir, len, shift; 66 struct bvec_iter iter;
67 sector_t sec = bio->bi_sector; 67 int dir, len, shift;
68 sector_t sec = bio->bi_iter.bi_sector;
68 69
69 dir = bio_data_dir(bio); 70 dir = bio_data_dir(bio);
70 shift = dev->bshift; 71 shift = dev->bshift;
71 bio_for_each_segment(bvec, bio, i) { 72 bio_for_each_segment(bvec, bio, iter) {
72 len = bvec->bv_len; 73 len = bvec.bv_len;
73 len >>= 9; 74 len >>= 9;
74 nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift, 75 nfhd_read_write(dev->id, 0, dir, sec >> shift, len >> shift,
75 bvec_to_phys(bvec)); 76 bvec_to_phys(&bvec));
76 sec += len; 77 sec += len;
77 } 78 }
78 bio_endio(bio, 0); 79 bio_endio(bio, 0);
diff --git a/arch/powerpc/sysdev/axonram.c b/arch/powerpc/sysdev/axonram.c
index 1c16141c031c..47b6b9f81d43 100644
--- a/arch/powerpc/sysdev/axonram.c
+++ b/arch/powerpc/sysdev/axonram.c
@@ -109,27 +109,28 @@ axon_ram_make_request(struct request_queue *queue, struct bio *bio)
109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; 109 struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data;
110 unsigned long phys_mem, phys_end; 110 unsigned long phys_mem, phys_end;
111 void *user_mem; 111 void *user_mem;
112 struct bio_vec *vec; 112 struct bio_vec vec;
113 unsigned int transfered; 113 unsigned int transfered;
114 unsigned short idx; 114 struct bvec_iter iter;
115 115
116 phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); 116 phys_mem = bank->io_addr + (bio->bi_iter.bi_sector <<
117 AXON_RAM_SECTOR_SHIFT);
117 phys_end = bank->io_addr + bank->size; 118 phys_end = bank->io_addr + bank->size;
118 transfered = 0; 119 transfered = 0;
119 bio_for_each_segment(vec, bio, idx) { 120 bio_for_each_segment(vec, bio, iter) {
120 if (unlikely(phys_mem + vec->bv_len > phys_end)) { 121 if (unlikely(phys_mem + vec.bv_len > phys_end)) {
121 bio_io_error(bio); 122 bio_io_error(bio);
122 return; 123 return;
123 } 124 }
124 125
125 user_mem = page_address(vec->bv_page) + vec->bv_offset; 126 user_mem = page_address(vec.bv_page) + vec.bv_offset;
126 if (bio_data_dir(bio) == READ) 127 if (bio_data_dir(bio) == READ)
127 memcpy(user_mem, (void *) phys_mem, vec->bv_len); 128 memcpy(user_mem, (void *) phys_mem, vec.bv_len);
128 else 129 else
129 memcpy((void *) phys_mem, user_mem, vec->bv_len); 130 memcpy((void *) phys_mem, user_mem, vec.bv_len);
130 131
131 phys_mem += vec->bv_len; 132 phys_mem += vec.bv_len;
132 transfered += vec->bv_len; 133 transfered += vec.bv_len;
133 } 134 }
134 bio_endio(bio, 0); 135 bio_endio(bio, 0);
135} 136}
diff --git a/block/blk-core.c b/block/blk-core.c
index 8bdd0121212a..5da8e900d3b1 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -130,7 +130,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
130 bio_advance(bio, nbytes); 130 bio_advance(bio, nbytes);
131 131
132 /* don't actually finish bio if it's part of flush sequence */ 132 /* don't actually finish bio if it's part of flush sequence */
133 if (bio->bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ)) 133 if (bio->bi_iter.bi_size == 0 && !(rq->cmd_flags & REQ_FLUSH_SEQ))
134 bio_endio(bio, error); 134 bio_endio(bio, error);
135} 135}
136 136
@@ -1326,7 +1326,7 @@ void blk_add_request_payload(struct request *rq, struct page *page,
1326 bio->bi_io_vec->bv_offset = 0; 1326 bio->bi_io_vec->bv_offset = 0;
1327 bio->bi_io_vec->bv_len = len; 1327 bio->bi_io_vec->bv_len = len;
1328 1328
1329 bio->bi_size = len; 1329 bio->bi_iter.bi_size = len;
1330 bio->bi_vcnt = 1; 1330 bio->bi_vcnt = 1;
1331 bio->bi_phys_segments = 1; 1331 bio->bi_phys_segments = 1;
1332 1332
@@ -1351,7 +1351,7 @@ bool bio_attempt_back_merge(struct request_queue *q, struct request *req,
1351 1351
1352 req->biotail->bi_next = bio; 1352 req->biotail->bi_next = bio;
1353 req->biotail = bio; 1353 req->biotail = bio;
1354 req->__data_len += bio->bi_size; 1354 req->__data_len += bio->bi_iter.bi_size;
1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1355 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1356 1356
1357 blk_account_io_start(req, false); 1357 blk_account_io_start(req, false);
@@ -1380,8 +1380,8 @@ bool bio_attempt_front_merge(struct request_queue *q, struct request *req,
1380 * not touch req->buffer either... 1380 * not touch req->buffer either...
1381 */ 1381 */
1382 req->buffer = bio_data(bio); 1382 req->buffer = bio_data(bio);
1383 req->__sector = bio->bi_sector; 1383 req->__sector = bio->bi_iter.bi_sector;
1384 req->__data_len += bio->bi_size; 1384 req->__data_len += bio->bi_iter.bi_size;
1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio)); 1385 req->ioprio = ioprio_best(req->ioprio, bio_prio(bio));
1386 1386
1387 blk_account_io_start(req, false); 1387 blk_account_io_start(req, false);
@@ -1459,7 +1459,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
1459 req->cmd_flags |= REQ_FAILFAST_MASK; 1459 req->cmd_flags |= REQ_FAILFAST_MASK;
1460 1460
1461 req->errors = 0; 1461 req->errors = 0;
1462 req->__sector = bio->bi_sector; 1462 req->__sector = bio->bi_iter.bi_sector;
1463 req->ioprio = bio_prio(bio); 1463 req->ioprio = bio_prio(bio);
1464 blk_rq_bio_prep(req->q, req, bio); 1464 blk_rq_bio_prep(req->q, req, bio);
1465} 1465}
@@ -1583,12 +1583,12 @@ static inline void blk_partition_remap(struct bio *bio)
1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) { 1583 if (bio_sectors(bio) && bdev != bdev->bd_contains) {
1584 struct hd_struct *p = bdev->bd_part; 1584 struct hd_struct *p = bdev->bd_part;
1585 1585
1586 bio->bi_sector += p->start_sect; 1586 bio->bi_iter.bi_sector += p->start_sect;
1587 bio->bi_bdev = bdev->bd_contains; 1587 bio->bi_bdev = bdev->bd_contains;
1588 1588
1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio, 1589 trace_block_bio_remap(bdev_get_queue(bio->bi_bdev), bio,
1590 bdev->bd_dev, 1590 bdev->bd_dev,
1591 bio->bi_sector - p->start_sect); 1591 bio->bi_iter.bi_sector - p->start_sect);
1592 } 1592 }
1593} 1593}
1594 1594
@@ -1654,7 +1654,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
1654 /* Test device or partition size, when known. */ 1654 /* Test device or partition size, when known. */
1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9; 1655 maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
1656 if (maxsector) { 1656 if (maxsector) {
1657 sector_t sector = bio->bi_sector; 1657 sector_t sector = bio->bi_iter.bi_sector;
1658 1658
1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) { 1659 if (maxsector < nr_sectors || maxsector - nr_sectors < sector) {
1660 /* 1660 /*
@@ -1690,7 +1690,7 @@ generic_make_request_checks(struct bio *bio)
1690 "generic_make_request: Trying to access " 1690 "generic_make_request: Trying to access "
1691 "nonexistent block-device %s (%Lu)\n", 1691 "nonexistent block-device %s (%Lu)\n",
1692 bdevname(bio->bi_bdev, b), 1692 bdevname(bio->bi_bdev, b),
1693 (long long) bio->bi_sector); 1693 (long long) bio->bi_iter.bi_sector);
1694 goto end_io; 1694 goto end_io;
1695 } 1695 }
1696 1696
@@ -1704,9 +1704,9 @@ generic_make_request_checks(struct bio *bio)
1704 } 1704 }
1705 1705
1706 part = bio->bi_bdev->bd_part; 1706 part = bio->bi_bdev->bd_part;
1707 if (should_fail_request(part, bio->bi_size) || 1707 if (should_fail_request(part, bio->bi_iter.bi_size) ||
1708 should_fail_request(&part_to_disk(part)->part0, 1708 should_fail_request(&part_to_disk(part)->part0,
1709 bio->bi_size)) 1709 bio->bi_iter.bi_size))
1710 goto end_io; 1710 goto end_io;
1711 1711
1712 /* 1712 /*
@@ -1865,7 +1865,7 @@ void submit_bio(int rw, struct bio *bio)
1865 if (rw & WRITE) { 1865 if (rw & WRITE) {
1866 count_vm_events(PGPGOUT, count); 1866 count_vm_events(PGPGOUT, count);
1867 } else { 1867 } else {
1868 task_io_account_read(bio->bi_size); 1868 task_io_account_read(bio->bi_iter.bi_size);
1869 count_vm_events(PGPGIN, count); 1869 count_vm_events(PGPGIN, count);
1870 } 1870 }
1871 1871
@@ -1874,7 +1874,7 @@ void submit_bio(int rw, struct bio *bio)
1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n", 1874 printk(KERN_DEBUG "%s(%d): %s block %Lu on %s (%u sectors)\n",
1875 current->comm, task_pid_nr(current), 1875 current->comm, task_pid_nr(current),
1876 (rw & WRITE) ? "WRITE" : "READ", 1876 (rw & WRITE) ? "WRITE" : "READ",
1877 (unsigned long long)bio->bi_sector, 1877 (unsigned long long)bio->bi_iter.bi_sector,
1878 bdevname(bio->bi_bdev, b), 1878 bdevname(bio->bi_bdev, b),
1879 count); 1879 count);
1880 } 1880 }
@@ -2007,7 +2007,7 @@ unsigned int blk_rq_err_bytes(const struct request *rq)
2007 for (bio = rq->bio; bio; bio = bio->bi_next) { 2007 for (bio = rq->bio; bio; bio = bio->bi_next) {
2008 if ((bio->bi_rw & ff) != ff) 2008 if ((bio->bi_rw & ff) != ff)
2009 break; 2009 break;
2010 bytes += bio->bi_size; 2010 bytes += bio->bi_iter.bi_size;
2011 } 2011 }
2012 2012
2013 /* this could lead to infinite loop */ 2013 /* this could lead to infinite loop */
@@ -2378,9 +2378,9 @@ bool blk_update_request(struct request *req, int error, unsigned int nr_bytes)
2378 total_bytes = 0; 2378 total_bytes = 0;
2379 while (req->bio) { 2379 while (req->bio) {
2380 struct bio *bio = req->bio; 2380 struct bio *bio = req->bio;
2381 unsigned bio_bytes = min(bio->bi_size, nr_bytes); 2381 unsigned bio_bytes = min(bio->bi_iter.bi_size, nr_bytes);
2382 2382
2383 if (bio_bytes == bio->bi_size) 2383 if (bio_bytes == bio->bi_iter.bi_size)
2384 req->bio = bio->bi_next; 2384 req->bio = bio->bi_next;
2385 2385
2386 req_bio_endio(req, bio, bio_bytes, error); 2386 req_bio_endio(req, bio, bio_bytes, error);
@@ -2728,7 +2728,7 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2728 rq->nr_phys_segments = bio_phys_segments(q, bio); 2728 rq->nr_phys_segments = bio_phys_segments(q, bio);
2729 rq->buffer = bio_data(bio); 2729 rq->buffer = bio_data(bio);
2730 } 2730 }
2731 rq->__data_len = bio->bi_size; 2731 rq->__data_len = bio->bi_iter.bi_size;
2732 rq->bio = rq->biotail = bio; 2732 rq->bio = rq->biotail = bio;
2733 2733
2734 if (bio->bi_bdev) 2734 if (bio->bi_bdev)
@@ -2746,10 +2746,10 @@ void blk_rq_bio_prep(struct request_queue *q, struct request *rq,
2746void rq_flush_dcache_pages(struct request *rq) 2746void rq_flush_dcache_pages(struct request *rq)
2747{ 2747{
2748 struct req_iterator iter; 2748 struct req_iterator iter;
2749 struct bio_vec *bvec; 2749 struct bio_vec bvec;
2750 2750
2751 rq_for_each_segment(bvec, rq, iter) 2751 rq_for_each_segment(bvec, rq, iter)
2752 flush_dcache_page(bvec->bv_page); 2752 flush_dcache_page(bvec.bv_page);
2753} 2753}
2754EXPORT_SYMBOL_GPL(rq_flush_dcache_pages); 2754EXPORT_SYMBOL_GPL(rq_flush_dcache_pages);
2755#endif 2755#endif
diff --git a/block/blk-flush.c b/block/blk-flush.c
index fb6f3c0ffa49..9288aaf35c21 100644
--- a/block/blk-flush.c
+++ b/block/blk-flush.c
@@ -548,7 +548,7 @@ int blkdev_issue_flush(struct block_device *bdev, gfp_t gfp_mask,
548 * copied from blk_rq_pos(rq). 548 * copied from blk_rq_pos(rq).
549 */ 549 */
550 if (error_sector) 550 if (error_sector)
551 *error_sector = bio->bi_sector; 551 *error_sector = bio->bi_iter.bi_sector;
552 552
553 bio_put(bio); 553 bio_put(bio);
554 return ret; 554 return ret;
diff --git a/block/blk-integrity.c b/block/blk-integrity.c
index 03cf7179e8ef..7fbab84399e6 100644
--- a/block/blk-integrity.c
+++ b/block/blk-integrity.c
@@ -43,30 +43,32 @@ static const char *bi_unsupported_name = "unsupported";
43 */ 43 */
44int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio) 44int blk_rq_count_integrity_sg(struct request_queue *q, struct bio *bio)
45{ 45{
46 struct bio_vec *iv, *ivprv = NULL; 46 struct bio_vec iv, ivprv = { NULL };
47 unsigned int segments = 0; 47 unsigned int segments = 0;
48 unsigned int seg_size = 0; 48 unsigned int seg_size = 0;
49 unsigned int i = 0; 49 struct bvec_iter iter;
50 int prev = 0;
50 51
51 bio_for_each_integrity_vec(iv, bio, i) { 52 bio_for_each_integrity_vec(iv, bio, iter) {
52 53
53 if (ivprv) { 54 if (prev) {
54 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 55 if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
55 goto new_segment; 56 goto new_segment;
56 57
57 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) 58 if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
58 goto new_segment; 59 goto new_segment;
59 60
60 if (seg_size + iv->bv_len > queue_max_segment_size(q)) 61 if (seg_size + iv.bv_len > queue_max_segment_size(q))
61 goto new_segment; 62 goto new_segment;
62 63
63 seg_size += iv->bv_len; 64 seg_size += iv.bv_len;
64 } else { 65 } else {
65new_segment: 66new_segment:
66 segments++; 67 segments++;
67 seg_size = iv->bv_len; 68 seg_size = iv.bv_len;
68 } 69 }
69 70
71 prev = 1;
70 ivprv = iv; 72 ivprv = iv;
71 } 73 }
72 74
@@ -87,24 +89,25 @@ EXPORT_SYMBOL(blk_rq_count_integrity_sg);
87int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio, 89int blk_rq_map_integrity_sg(struct request_queue *q, struct bio *bio,
88 struct scatterlist *sglist) 90 struct scatterlist *sglist)
89{ 91{
90 struct bio_vec *iv, *ivprv = NULL; 92 struct bio_vec iv, ivprv = { NULL };
91 struct scatterlist *sg = NULL; 93 struct scatterlist *sg = NULL;
92 unsigned int segments = 0; 94 unsigned int segments = 0;
93 unsigned int i = 0; 95 struct bvec_iter iter;
96 int prev = 0;
94 97
95 bio_for_each_integrity_vec(iv, bio, i) { 98 bio_for_each_integrity_vec(iv, bio, iter) {
96 99
97 if (ivprv) { 100 if (prev) {
98 if (!BIOVEC_PHYS_MERGEABLE(ivprv, iv)) 101 if (!BIOVEC_PHYS_MERGEABLE(&ivprv, &iv))
99 goto new_segment; 102 goto new_segment;
100 103
101 if (!BIOVEC_SEG_BOUNDARY(q, ivprv, iv)) 104 if (!BIOVEC_SEG_BOUNDARY(q, &ivprv, &iv))
102 goto new_segment; 105 goto new_segment;
103 106
104 if (sg->length + iv->bv_len > queue_max_segment_size(q)) 107 if (sg->length + iv.bv_len > queue_max_segment_size(q))
105 goto new_segment; 108 goto new_segment;
106 109
107 sg->length += iv->bv_len; 110 sg->length += iv.bv_len;
108 } else { 111 } else {
109new_segment: 112new_segment:
110 if (!sg) 113 if (!sg)
@@ -114,10 +117,11 @@ new_segment:
114 sg = sg_next(sg); 117 sg = sg_next(sg);
115 } 118 }
116 119
117 sg_set_page(sg, iv->bv_page, iv->bv_len, iv->bv_offset); 120 sg_set_page(sg, iv.bv_page, iv.bv_len, iv.bv_offset);
118 segments++; 121 segments++;
119 } 122 }
120 123
124 prev = 1;
121 ivprv = iv; 125 ivprv = iv;
122 } 126 }
123 127
diff --git a/block/blk-lib.c b/block/blk-lib.c
index 9b5b561cb928..2da76c999ef3 100644
--- a/block/blk-lib.c
+++ b/block/blk-lib.c
@@ -108,12 +108,12 @@ int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
108 req_sects = end_sect - sector; 108 req_sects = end_sect - sector;
109 } 109 }
110 110
111 bio->bi_sector = sector; 111 bio->bi_iter.bi_sector = sector;
112 bio->bi_end_io = bio_batch_end_io; 112 bio->bi_end_io = bio_batch_end_io;
113 bio->bi_bdev = bdev; 113 bio->bi_bdev = bdev;
114 bio->bi_private = &bb; 114 bio->bi_private = &bb;
115 115
116 bio->bi_size = req_sects << 9; 116 bio->bi_iter.bi_size = req_sects << 9;
117 nr_sects -= req_sects; 117 nr_sects -= req_sects;
118 sector = end_sect; 118 sector = end_sect;
119 119
@@ -174,7 +174,7 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
174 break; 174 break;
175 } 175 }
176 176
177 bio->bi_sector = sector; 177 bio->bi_iter.bi_sector = sector;
178 bio->bi_end_io = bio_batch_end_io; 178 bio->bi_end_io = bio_batch_end_io;
179 bio->bi_bdev = bdev; 179 bio->bi_bdev = bdev;
180 bio->bi_private = &bb; 180 bio->bi_private = &bb;
@@ -184,11 +184,11 @@ int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev); 184 bio->bi_io_vec->bv_len = bdev_logical_block_size(bdev);
185 185
186 if (nr_sects > max_write_same_sectors) { 186 if (nr_sects > max_write_same_sectors) {
187 bio->bi_size = max_write_same_sectors << 9; 187 bio->bi_iter.bi_size = max_write_same_sectors << 9;
188 nr_sects -= max_write_same_sectors; 188 nr_sects -= max_write_same_sectors;
189 sector += max_write_same_sectors; 189 sector += max_write_same_sectors;
190 } else { 190 } else {
191 bio->bi_size = nr_sects << 9; 191 bio->bi_iter.bi_size = nr_sects << 9;
192 nr_sects = 0; 192 nr_sects = 0;
193 } 193 }
194 194
@@ -240,7 +240,7 @@ int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
240 break; 240 break;
241 } 241 }
242 242
243 bio->bi_sector = sector; 243 bio->bi_iter.bi_sector = sector;
244 bio->bi_bdev = bdev; 244 bio->bi_bdev = bdev;
245 bio->bi_end_io = bio_batch_end_io; 245 bio->bi_end_io = bio_batch_end_io;
246 bio->bi_private = &bb; 246 bio->bi_private = &bb;
diff --git a/block/blk-map.c b/block/blk-map.c
index 623e1cd4cffe..ae4ae1047fd9 100644
--- a/block/blk-map.c
+++ b/block/blk-map.c
@@ -20,7 +20,7 @@ int blk_rq_append_bio(struct request_queue *q, struct request *rq,
20 rq->biotail->bi_next = bio; 20 rq->biotail->bi_next = bio;
21 rq->biotail = bio; 21 rq->biotail = bio;
22 22
23 rq->__data_len += bio->bi_size; 23 rq->__data_len += bio->bi_iter.bi_size;
24 } 24 }
25 return 0; 25 return 0;
26} 26}
@@ -76,7 +76,7 @@ static int __blk_rq_map_user(struct request_queue *q, struct request *rq,
76 76
77 ret = blk_rq_append_bio(q, rq, bio); 77 ret = blk_rq_append_bio(q, rq, bio);
78 if (!ret) 78 if (!ret)
79 return bio->bi_size; 79 return bio->bi_iter.bi_size;
80 80
81 /* if it was boucned we must call the end io function */ 81 /* if it was boucned we must call the end io function */
82 bio_endio(bio, 0); 82 bio_endio(bio, 0);
@@ -220,7 +220,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
220 if (IS_ERR(bio)) 220 if (IS_ERR(bio))
221 return PTR_ERR(bio); 221 return PTR_ERR(bio);
222 222
223 if (bio->bi_size != len) { 223 if (bio->bi_iter.bi_size != len) {
224 /* 224 /*
225 * Grab an extra reference to this bio, as bio_unmap_user() 225 * Grab an extra reference to this bio, as bio_unmap_user()
226 * expects to be able to drop it twice as it happens on the 226 * expects to be able to drop it twice as it happens on the
diff --git a/block/blk-merge.c b/block/blk-merge.c
index 1ffc58977835..8f8adaa95466 100644
--- a/block/blk-merge.c
+++ b/block/blk-merge.c
@@ -12,10 +12,11 @@
12static unsigned int __blk_recalc_rq_segments(struct request_queue *q, 12static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
13 struct bio *bio) 13 struct bio *bio)
14{ 14{
15 struct bio_vec *bv, *bvprv = NULL; 15 struct bio_vec bv, bvprv = { NULL };
16 int cluster, i, high, highprv = 1; 16 int cluster, high, highprv = 1;
17 unsigned int seg_size, nr_phys_segs; 17 unsigned int seg_size, nr_phys_segs;
18 struct bio *fbio, *bbio; 18 struct bio *fbio, *bbio;
19 struct bvec_iter iter;
19 20
20 if (!bio) 21 if (!bio)
21 return 0; 22 return 0;
@@ -25,25 +26,23 @@ static unsigned int __blk_recalc_rq_segments(struct request_queue *q,
25 seg_size = 0; 26 seg_size = 0;
26 nr_phys_segs = 0; 27 nr_phys_segs = 0;
27 for_each_bio(bio) { 28 for_each_bio(bio) {
28 bio_for_each_segment(bv, bio, i) { 29 bio_for_each_segment(bv, bio, iter) {
29 /* 30 /*
30 * the trick here is making sure that a high page is 31 * the trick here is making sure that a high page is
31 * never considered part of another segment, since that 32 * never considered part of another segment, since that
32 * might change with the bounce page. 33 * might change with the bounce page.
33 */ 34 */
34 high = page_to_pfn(bv->bv_page) > queue_bounce_pfn(q); 35 high = page_to_pfn(bv.bv_page) > queue_bounce_pfn(q);
35 if (high || highprv) 36 if (!high && !highprv && cluster) {
36 goto new_segment; 37 if (seg_size + bv.bv_len
37 if (cluster) {
38 if (seg_size + bv->bv_len
39 > queue_max_segment_size(q)) 38 > queue_max_segment_size(q))
40 goto new_segment; 39 goto new_segment;
41 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bv)) 40 if (!BIOVEC_PHYS_MERGEABLE(&bvprv, &bv))
42 goto new_segment; 41 goto new_segment;
43 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bv)) 42 if (!BIOVEC_SEG_BOUNDARY(q, &bvprv, &bv))
44 goto new_segment; 43 goto new_segment;
45 44
46 seg_size += bv->bv_len; 45 seg_size += bv.bv_len;
47 bvprv = bv; 46 bvprv = bv;
48 continue; 47 continue;
49 } 48 }
@@ -54,7 +53,7 @@ new_segment:
54 53
55 nr_phys_segs++; 54 nr_phys_segs++;
56 bvprv = bv; 55 bvprv = bv;
57 seg_size = bv->bv_len; 56 seg_size = bv.bv_len;
58 highprv = high; 57 highprv = high;
59 } 58 }
60 bbio = bio; 59 bbio = bio;
@@ -87,6 +86,9 @@ EXPORT_SYMBOL(blk_recount_segments);
87static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio, 86static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
88 struct bio *nxt) 87 struct bio *nxt)
89{ 88{
89 struct bio_vec end_bv = { NULL }, nxt_bv;
90 struct bvec_iter iter;
91
90 if (!blk_queue_cluster(q)) 92 if (!blk_queue_cluster(q))
91 return 0; 93 return 0;
92 94
@@ -97,34 +99,40 @@ static int blk_phys_contig_segment(struct request_queue *q, struct bio *bio,
97 if (!bio_has_data(bio)) 99 if (!bio_has_data(bio))
98 return 1; 100 return 1;
99 101
100 if (!BIOVEC_PHYS_MERGEABLE(__BVEC_END(bio), __BVEC_START(nxt))) 102 bio_for_each_segment(end_bv, bio, iter)
103 if (end_bv.bv_len == iter.bi_size)
104 break;
105
106 nxt_bv = bio_iovec(nxt);
107
108 if (!BIOVEC_PHYS_MERGEABLE(&end_bv, &nxt_bv))
101 return 0; 109 return 0;
102 110
103 /* 111 /*
104 * bio and nxt are contiguous in memory; check if the queue allows 112 * bio and nxt are contiguous in memory; check if the queue allows
105 * these two to be merged into one 113 * these two to be merged into one
106 */ 114 */
107 if (BIO_SEG_BOUNDARY(q, bio, nxt)) 115 if (BIOVEC_SEG_BOUNDARY(q, &end_bv, &nxt_bv))
108 return 1; 116 return 1;
109 117
110 return 0; 118 return 0;
111} 119}
112 120
113static void 121static inline void
114__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec, 122__blk_segment_map_sg(struct request_queue *q, struct bio_vec *bvec,
115 struct scatterlist *sglist, struct bio_vec **bvprv, 123 struct scatterlist *sglist, struct bio_vec *bvprv,
116 struct scatterlist **sg, int *nsegs, int *cluster) 124 struct scatterlist **sg, int *nsegs, int *cluster)
117{ 125{
118 126
119 int nbytes = bvec->bv_len; 127 int nbytes = bvec->bv_len;
120 128
121 if (*bvprv && *cluster) { 129 if (*sg && *cluster) {
122 if ((*sg)->length + nbytes > queue_max_segment_size(q)) 130 if ((*sg)->length + nbytes > queue_max_segment_size(q))
123 goto new_segment; 131 goto new_segment;
124 132
125 if (!BIOVEC_PHYS_MERGEABLE(*bvprv, bvec)) 133 if (!BIOVEC_PHYS_MERGEABLE(bvprv, bvec))
126 goto new_segment; 134 goto new_segment;
127 if (!BIOVEC_SEG_BOUNDARY(q, *bvprv, bvec)) 135 if (!BIOVEC_SEG_BOUNDARY(q, bvprv, bvec))
128 goto new_segment; 136 goto new_segment;
129 137
130 (*sg)->length += nbytes; 138 (*sg)->length += nbytes;
@@ -150,7 +158,7 @@ new_segment:
150 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset); 158 sg_set_page(*sg, bvec->bv_page, nbytes, bvec->bv_offset);
151 (*nsegs)++; 159 (*nsegs)++;
152 } 160 }
153 *bvprv = bvec; 161 *bvprv = *bvec;
154} 162}
155 163
156/* 164/*
@@ -160,7 +168,7 @@ new_segment:
160int blk_rq_map_sg(struct request_queue *q, struct request *rq, 168int blk_rq_map_sg(struct request_queue *q, struct request *rq,
161 struct scatterlist *sglist) 169 struct scatterlist *sglist)
162{ 170{
163 struct bio_vec *bvec, *bvprv; 171 struct bio_vec bvec, bvprv = { NULL };
164 struct req_iterator iter; 172 struct req_iterator iter;
165 struct scatterlist *sg; 173 struct scatterlist *sg;
166 int nsegs, cluster; 174 int nsegs, cluster;
@@ -171,10 +179,9 @@ int blk_rq_map_sg(struct request_queue *q, struct request *rq,
171 /* 179 /*
172 * for each bio in rq 180 * for each bio in rq
173 */ 181 */
174 bvprv = NULL;
175 sg = NULL; 182 sg = NULL;
176 rq_for_each_segment(bvec, rq, iter) { 183 rq_for_each_segment(bvec, rq, iter) {
177 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 184 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
178 &nsegs, &cluster); 185 &nsegs, &cluster);
179 } /* segments in rq */ 186 } /* segments in rq */
180 187
@@ -223,18 +230,17 @@ EXPORT_SYMBOL(blk_rq_map_sg);
223int blk_bio_map_sg(struct request_queue *q, struct bio *bio, 230int blk_bio_map_sg(struct request_queue *q, struct bio *bio,
224 struct scatterlist *sglist) 231 struct scatterlist *sglist)
225{ 232{
226 struct bio_vec *bvec, *bvprv; 233 struct bio_vec bvec, bvprv = { NULL };
227 struct scatterlist *sg; 234 struct scatterlist *sg;
228 int nsegs, cluster; 235 int nsegs, cluster;
229 unsigned long i; 236 struct bvec_iter iter;
230 237
231 nsegs = 0; 238 nsegs = 0;
232 cluster = blk_queue_cluster(q); 239 cluster = blk_queue_cluster(q);
233 240
234 bvprv = NULL;
235 sg = NULL; 241 sg = NULL;
236 bio_for_each_segment(bvec, bio, i) { 242 bio_for_each_segment(bvec, bio, iter) {
237 __blk_segment_map_sg(q, bvec, sglist, &bvprv, &sg, 243 __blk_segment_map_sg(q, &bvec, sglist, &bvprv, &sg,
238 &nsegs, &cluster); 244 &nsegs, &cluster);
239 } /* segments in bio */ 245 } /* segments in bio */
240 246
@@ -543,9 +549,9 @@ bool blk_rq_merge_ok(struct request *rq, struct bio *bio)
543 549
544int blk_try_merge(struct request *rq, struct bio *bio) 550int blk_try_merge(struct request *rq, struct bio *bio)
545{ 551{
546 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_sector) 552 if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
547 return ELEVATOR_BACK_MERGE; 553 return ELEVATOR_BACK_MERGE;
548 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_sector) 554 else if (blk_rq_pos(rq) - bio_sectors(bio) == bio->bi_iter.bi_sector)
549 return ELEVATOR_FRONT_MERGE; 555 return ELEVATOR_FRONT_MERGE;
550 return ELEVATOR_NO_MERGE; 556 return ELEVATOR_NO_MERGE;
551} 557}
diff --git a/block/blk-mq.c b/block/blk-mq.c
index c79126e11030..3929f43d0b03 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -305,7 +305,7 @@ void blk_mq_complete_request(struct request *rq, int error)
305 struct bio *next = bio->bi_next; 305 struct bio *next = bio->bi_next;
306 306
307 bio->bi_next = NULL; 307 bio->bi_next = NULL;
308 bytes += bio->bi_size; 308 bytes += bio->bi_iter.bi_size;
309 blk_mq_bio_endio(rq, bio, error); 309 blk_mq_bio_endio(rq, bio, error);
310 bio = next; 310 bio = next;
311 } 311 }
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index 06534049afba..20f820037775 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -877,14 +877,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
877 do_div(tmp, HZ); 877 do_div(tmp, HZ);
878 bytes_allowed = tmp; 878 bytes_allowed = tmp;
879 879
880 if (tg->bytes_disp[rw] + bio->bi_size <= bytes_allowed) { 880 if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) {
881 if (wait) 881 if (wait)
882 *wait = 0; 882 *wait = 0;
883 return 1; 883 return 1;
884 } 884 }
885 885
886 /* Calc approx time to dispatch */ 886 /* Calc approx time to dispatch */
887 extra_bytes = tg->bytes_disp[rw] + bio->bi_size - bytes_allowed; 887 extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed;
888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]); 888 jiffy_wait = div64_u64(extra_bytes * HZ, tg->bps[rw]);
889 889
890 if (!jiffy_wait) 890 if (!jiffy_wait)
@@ -987,7 +987,7 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
987 bool rw = bio_data_dir(bio); 987 bool rw = bio_data_dir(bio);
988 988
989 /* Charge the bio to the group */ 989 /* Charge the bio to the group */
990 tg->bytes_disp[rw] += bio->bi_size; 990 tg->bytes_disp[rw] += bio->bi_iter.bi_size;
991 tg->io_disp[rw]++; 991 tg->io_disp[rw]++;
992 992
993 /* 993 /*
@@ -1003,8 +1003,8 @@ static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
1003 */ 1003 */
1004 if (!(bio->bi_rw & REQ_THROTTLED)) { 1004 if (!(bio->bi_rw & REQ_THROTTLED)) {
1005 bio->bi_rw |= REQ_THROTTLED; 1005 bio->bi_rw |= REQ_THROTTLED;
1006 throtl_update_dispatch_stats(tg_to_blkg(tg), bio->bi_size, 1006 throtl_update_dispatch_stats(tg_to_blkg(tg),
1007 bio->bi_rw); 1007 bio->bi_iter.bi_size, bio->bi_rw);
1008 } 1008 }
1009} 1009}
1010 1010
@@ -1508,7 +1508,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1508 if (tg) { 1508 if (tg) {
1509 if (!tg->has_rules[rw]) { 1509 if (!tg->has_rules[rw]) {
1510 throtl_update_dispatch_stats(tg_to_blkg(tg), 1510 throtl_update_dispatch_stats(tg_to_blkg(tg),
1511 bio->bi_size, bio->bi_rw); 1511 bio->bi_iter.bi_size, bio->bi_rw);
1512 goto out_unlock_rcu; 1512 goto out_unlock_rcu;
1513 } 1513 }
1514 } 1514 }
@@ -1564,7 +1564,7 @@ bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
1564 /* out-of-limit, queue to @tg */ 1564 /* out-of-limit, queue to @tg */
1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d", 1565 throtl_log(sq, "[%c] bio. bdisp=%llu sz=%u bps=%llu iodisp=%u iops=%u queued=%d/%d",
1566 rw == READ ? 'R' : 'W', 1566 rw == READ ? 'R' : 'W',
1567 tg->bytes_disp[rw], bio->bi_size, tg->bps[rw], 1567 tg->bytes_disp[rw], bio->bi_iter.bi_size, tg->bps[rw],
1568 tg->io_disp[rw], tg->iops[rw], 1568 tg->io_disp[rw], tg->iops[rw],
1569 sq->nr_queued[READ], sq->nr_queued[WRITE]); 1569 sq->nr_queued[READ], sq->nr_queued[WRITE]);
1570 1570
diff --git a/block/elevator.c b/block/elevator.c
index b7ff2861b6bd..42c45a7d6714 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -440,7 +440,7 @@ int elv_merge(struct request_queue *q, struct request **req, struct bio *bio)
440 /* 440 /*
441 * See if our hash lookup can find a potential backmerge. 441 * See if our hash lookup can find a potential backmerge.
442 */ 442 */
443 __rq = elv_rqhash_find(q, bio->bi_sector); 443 __rq = elv_rqhash_find(q, bio->bi_iter.bi_sector);
444 if (__rq && elv_rq_merge_ok(__rq, bio)) { 444 if (__rq && elv_rq_merge_ok(__rq, bio)) {
445 *req = __rq; 445 *req = __rq;
446 return ELEVATOR_BACK_MERGE; 446 return ELEVATOR_BACK_MERGE;
diff --git a/drivers/block/aoe/aoe.h b/drivers/block/aoe/aoe.h
index 14a9d1912318..9220f8e833d0 100644
--- a/drivers/block/aoe/aoe.h
+++ b/drivers/block/aoe/aoe.h
@@ -100,11 +100,8 @@ enum {
100 100
101struct buf { 101struct buf {
102 ulong nframesout; 102 ulong nframesout;
103 ulong resid;
104 ulong bv_resid;
105 sector_t sector;
106 struct bio *bio; 103 struct bio *bio;
107 struct bio_vec *bv; 104 struct bvec_iter iter;
108 struct request *rq; 105 struct request *rq;
109}; 106};
110 107
@@ -120,13 +117,10 @@ struct frame {
120 ulong waited; 117 ulong waited;
121 ulong waited_total; 118 ulong waited_total;
122 struct aoetgt *t; /* parent target I belong to */ 119 struct aoetgt *t; /* parent target I belong to */
123 sector_t lba;
124 struct sk_buff *skb; /* command skb freed on module exit */ 120 struct sk_buff *skb; /* command skb freed on module exit */
125 struct sk_buff *r_skb; /* response skb for async processing */ 121 struct sk_buff *r_skb; /* response skb for async processing */
126 struct buf *buf; 122 struct buf *buf;
127 struct bio_vec *bv; 123 struct bvec_iter iter;
128 ulong bcnt;
129 ulong bv_off;
130 char flags; 124 char flags;
131}; 125};
132 126
diff --git a/drivers/block/aoe/aoecmd.c b/drivers/block/aoe/aoecmd.c
index d2515435e23f..8184451b57c0 100644
--- a/drivers/block/aoe/aoecmd.c
+++ b/drivers/block/aoe/aoecmd.c
@@ -196,8 +196,7 @@ aoe_freetframe(struct frame *f)
196 196
197 t = f->t; 197 t = f->t;
198 f->buf = NULL; 198 f->buf = NULL;
199 f->lba = 0; 199 memset(&f->iter, 0, sizeof(f->iter));
200 f->bv = NULL;
201 f->r_skb = NULL; 200 f->r_skb = NULL;
202 f->flags = 0; 201 f->flags = 0;
203 list_add(&f->head, &t->ffree); 202 list_add(&f->head, &t->ffree);
@@ -295,21 +294,14 @@ newframe(struct aoedev *d)
295} 294}
296 295
297static void 296static void
298skb_fillup(struct sk_buff *skb, struct bio_vec *bv, ulong off, ulong cnt) 297skb_fillup(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter)
299{ 298{
300 int frag = 0; 299 int frag = 0;
301 ulong fcnt; 300 struct bio_vec bv;
302loop: 301
303 fcnt = bv->bv_len - (off - bv->bv_offset); 302 __bio_for_each_segment(bv, bio, iter, iter)
304 if (fcnt > cnt) 303 skb_fill_page_desc(skb, frag++, bv.bv_page,
305 fcnt = cnt; 304 bv.bv_offset, bv.bv_len);
306 skb_fill_page_desc(skb, frag++, bv->bv_page, off, fcnt);
307 cnt -= fcnt;
308 if (cnt <= 0)
309 return;
310 bv++;
311 off = bv->bv_offset;
312 goto loop;
313} 305}
314 306
315static void 307static void
@@ -346,12 +338,10 @@ ata_rw_frameinit(struct frame *f)
346 t->nout++; 338 t->nout++;
347 f->waited = 0; 339 f->waited = 0;
348 f->waited_total = 0; 340 f->waited_total = 0;
349 if (f->buf)
350 f->lba = f->buf->sector;
351 341
352 /* set up ata header */ 342 /* set up ata header */
353 ah->scnt = f->bcnt >> 9; 343 ah->scnt = f->iter.bi_size >> 9;
354 put_lba(ah, f->lba); 344 put_lba(ah, f->iter.bi_sector);
355 if (t->d->flags & DEVFL_EXT) { 345 if (t->d->flags & DEVFL_EXT) {
356 ah->aflags |= AOEAFL_EXT; 346 ah->aflags |= AOEAFL_EXT;
357 } else { 347 } else {
@@ -360,11 +350,11 @@ ata_rw_frameinit(struct frame *f)
360 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */ 350 ah->lba3 |= 0xe0; /* LBA bit + obsolete 0xa0 */
361 } 351 }
362 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) { 352 if (f->buf && bio_data_dir(f->buf->bio) == WRITE) {
363 skb_fillup(skb, f->bv, f->bv_off, f->bcnt); 353 skb_fillup(skb, f->buf->bio, f->iter);
364 ah->aflags |= AOEAFL_WRITE; 354 ah->aflags |= AOEAFL_WRITE;
365 skb->len += f->bcnt; 355 skb->len += f->iter.bi_size;
366 skb->data_len = f->bcnt; 356 skb->data_len = f->iter.bi_size;
367 skb->truesize += f->bcnt; 357 skb->truesize += f->iter.bi_size;
368 t->wpkts++; 358 t->wpkts++;
369 } else { 359 } else {
370 t->rpkts++; 360 t->rpkts++;
@@ -382,7 +372,6 @@ aoecmd_ata_rw(struct aoedev *d)
382 struct buf *buf; 372 struct buf *buf;
383 struct sk_buff *skb; 373 struct sk_buff *skb;
384 struct sk_buff_head queue; 374 struct sk_buff_head queue;
385 ulong bcnt, fbcnt;
386 375
387 buf = nextbuf(d); 376 buf = nextbuf(d);
388 if (buf == NULL) 377 if (buf == NULL)
@@ -390,39 +379,22 @@ aoecmd_ata_rw(struct aoedev *d)
390 f = newframe(d); 379 f = newframe(d);
391 if (f == NULL) 380 if (f == NULL)
392 return 0; 381 return 0;
393 bcnt = d->maxbcnt;
394 if (bcnt == 0)
395 bcnt = DEFAULTBCNT;
396 if (bcnt > buf->resid)
397 bcnt = buf->resid;
398 fbcnt = bcnt;
399 f->bv = buf->bv;
400 f->bv_off = f->bv->bv_offset + (f->bv->bv_len - buf->bv_resid);
401 do {
402 if (fbcnt < buf->bv_resid) {
403 buf->bv_resid -= fbcnt;
404 buf->resid -= fbcnt;
405 break;
406 }
407 fbcnt -= buf->bv_resid;
408 buf->resid -= buf->bv_resid;
409 if (buf->resid == 0) {
410 d->ip.buf = NULL;
411 break;
412 }
413 buf->bv++;
414 buf->bv_resid = buf->bv->bv_len;
415 WARN_ON(buf->bv_resid == 0);
416 } while (fbcnt);
417 382
418 /* initialize the headers & frame */ 383 /* initialize the headers & frame */
419 f->buf = buf; 384 f->buf = buf;
420 f->bcnt = bcnt; 385 f->iter = buf->iter;
421 ata_rw_frameinit(f); 386 f->iter.bi_size = min_t(unsigned long,
387 d->maxbcnt ?: DEFAULTBCNT,
388 f->iter.bi_size);
389 bio_advance_iter(buf->bio, &buf->iter, f->iter.bi_size);
390
391 if (!buf->iter.bi_size)
392 d->ip.buf = NULL;
422 393
423 /* mark all tracking fields and load out */ 394 /* mark all tracking fields and load out */
424 buf->nframesout += 1; 395 buf->nframesout += 1;
425 buf->sector += bcnt >> 9; 396
397 ata_rw_frameinit(f);
426 398
427 skb = skb_clone(f->skb, GFP_ATOMIC); 399 skb = skb_clone(f->skb, GFP_ATOMIC);
428 if (skb) { 400 if (skb) {
@@ -613,10 +585,7 @@ reassign_frame(struct frame *f)
613 skb = nf->skb; 585 skb = nf->skb;
614 nf->skb = f->skb; 586 nf->skb = f->skb;
615 nf->buf = f->buf; 587 nf->buf = f->buf;
616 nf->bcnt = f->bcnt; 588 nf->iter = f->iter;
617 nf->lba = f->lba;
618 nf->bv = f->bv;
619 nf->bv_off = f->bv_off;
620 nf->waited = 0; 589 nf->waited = 0;
621 nf->waited_total = f->waited_total; 590 nf->waited_total = f->waited_total;
622 nf->sent = f->sent; 591 nf->sent = f->sent;
@@ -648,19 +617,19 @@ probe(struct aoetgt *t)
648 } 617 }
649 f->flags |= FFL_PROBE; 618 f->flags |= FFL_PROBE;
650 ifrotate(t); 619 ifrotate(t);
651 f->bcnt = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT; 620 f->iter.bi_size = t->d->maxbcnt ? t->d->maxbcnt : DEFAULTBCNT;
652 ata_rw_frameinit(f); 621 ata_rw_frameinit(f);
653 skb = f->skb; 622 skb = f->skb;
654 for (frag = 0, n = f->bcnt; n > 0; ++frag, n -= m) { 623 for (frag = 0, n = f->iter.bi_size; n > 0; ++frag, n -= m) {
655 if (n < PAGE_SIZE) 624 if (n < PAGE_SIZE)
656 m = n; 625 m = n;
657 else 626 else
658 m = PAGE_SIZE; 627 m = PAGE_SIZE;
659 skb_fill_page_desc(skb, frag, empty_page, 0, m); 628 skb_fill_page_desc(skb, frag, empty_page, 0, m);
660 } 629 }
661 skb->len += f->bcnt; 630 skb->len += f->iter.bi_size;
662 skb->data_len = f->bcnt; 631 skb->data_len = f->iter.bi_size;
663 skb->truesize += f->bcnt; 632 skb->truesize += f->iter.bi_size;
664 633
665 skb = skb_clone(f->skb, GFP_ATOMIC); 634 skb = skb_clone(f->skb, GFP_ATOMIC);
666 if (skb) { 635 if (skb) {
@@ -897,15 +866,15 @@ rqbiocnt(struct request *r)
897static void 866static void
898bio_pageinc(struct bio *bio) 867bio_pageinc(struct bio *bio)
899{ 868{
900 struct bio_vec *bv; 869 struct bio_vec bv;
901 struct page *page; 870 struct page *page;
902 int i; 871 struct bvec_iter iter;
903 872
904 bio_for_each_segment(bv, bio, i) { 873 bio_for_each_segment(bv, bio, iter) {
905 /* Non-zero page count for non-head members of 874 /* Non-zero page count for non-head members of
906 * compound pages is no longer allowed by the kernel. 875 * compound pages is no longer allowed by the kernel.
907 */ 876 */
908 page = compound_trans_head(bv->bv_page); 877 page = compound_trans_head(bv.bv_page);
909 atomic_inc(&page->_count); 878 atomic_inc(&page->_count);
910 } 879 }
911} 880}
@@ -913,12 +882,12 @@ bio_pageinc(struct bio *bio)
913static void 882static void
914bio_pagedec(struct bio *bio) 883bio_pagedec(struct bio *bio)
915{ 884{
916 struct bio_vec *bv;
917 struct page *page; 885 struct page *page;
918 int i; 886 struct bio_vec bv;
887 struct bvec_iter iter;
919 888
920 bio_for_each_segment(bv, bio, i) { 889 bio_for_each_segment(bv, bio, iter) {
921 page = compound_trans_head(bv->bv_page); 890 page = compound_trans_head(bv.bv_page);
922 atomic_dec(&page->_count); 891 atomic_dec(&page->_count);
923 } 892 }
924} 893}
@@ -929,12 +898,8 @@ bufinit(struct buf *buf, struct request *rq, struct bio *bio)
929 memset(buf, 0, sizeof(*buf)); 898 memset(buf, 0, sizeof(*buf));
930 buf->rq = rq; 899 buf->rq = rq;
931 buf->bio = bio; 900 buf->bio = bio;
932 buf->resid = bio->bi_size; 901 buf->iter = bio->bi_iter;
933 buf->sector = bio->bi_sector;
934 bio_pageinc(bio); 902 bio_pageinc(bio);
935 buf->bv = bio_iovec(bio);
936 buf->bv_resid = buf->bv->bv_len;
937 WARN_ON(buf->bv_resid == 0);
938} 903}
939 904
940static struct buf * 905static struct buf *
@@ -1119,24 +1084,18 @@ gettgt(struct aoedev *d, char *addr)
1119} 1084}
1120 1085
1121static void 1086static void
1122bvcpy(struct bio_vec *bv, ulong off, struct sk_buff *skb, long cnt) 1087bvcpy(struct sk_buff *skb, struct bio *bio, struct bvec_iter iter, long cnt)
1123{ 1088{
1124 ulong fcnt;
1125 char *p;
1126 int soff = 0; 1089 int soff = 0;
1127loop: 1090 struct bio_vec bv;
1128 fcnt = bv->bv_len - (off - bv->bv_offset); 1091
1129 if (fcnt > cnt) 1092 iter.bi_size = cnt;
1130 fcnt = cnt; 1093
1131 p = page_address(bv->bv_page) + off; 1094 __bio_for_each_segment(bv, bio, iter, iter) {
1132 skb_copy_bits(skb, soff, p, fcnt); 1095 char *p = page_address(bv.bv_page) + bv.bv_offset;
1133 soff += fcnt; 1096 skb_copy_bits(skb, soff, p, bv.bv_len);
1134 cnt -= fcnt; 1097 soff += bv.bv_len;
1135 if (cnt <= 0) 1098 }
1136 return;
1137 bv++;
1138 off = bv->bv_offset;
1139 goto loop;
1140} 1099}
1141 1100
1142void 1101void
@@ -1152,7 +1111,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
1152 do { 1111 do {
1153 bio = rq->bio; 1112 bio = rq->bio;
1154 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags); 1113 bok = !fastfail && test_bit(BIO_UPTODATE, &bio->bi_flags);
1155 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_size)); 1114 } while (__blk_end_request(rq, bok ? 0 : -EIO, bio->bi_iter.bi_size));
1156 1115
1157 /* cf. http://lkml.org/lkml/2006/10/31/28 */ 1116 /* cf. http://lkml.org/lkml/2006/10/31/28 */
1158 if (!fastfail) 1117 if (!fastfail)
@@ -1229,7 +1188,15 @@ noskb: if (buf)
1229 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1188 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1230 break; 1189 break;
1231 } 1190 }
1232 bvcpy(f->bv, f->bv_off, skb, n); 1191 if (n > f->iter.bi_size) {
1192 pr_err_ratelimited("%s e%ld.%d. bytes=%ld need=%u\n",
1193 "aoe: too-large data size in read from",
1194 (long) d->aoemajor, d->aoeminor,
1195 n, f->iter.bi_size);
1196 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1197 break;
1198 }
1199 bvcpy(skb, f->buf->bio, f->iter, n);
1233 case ATA_CMD_PIO_WRITE: 1200 case ATA_CMD_PIO_WRITE:
1234 case ATA_CMD_PIO_WRITE_EXT: 1201 case ATA_CMD_PIO_WRITE_EXT:
1235 spin_lock_irq(&d->lock); 1202 spin_lock_irq(&d->lock);
@@ -1272,7 +1239,7 @@ out:
1272 1239
1273 aoe_freetframe(f); 1240 aoe_freetframe(f);
1274 1241
1275 if (buf && --buf->nframesout == 0 && buf->resid == 0) 1242 if (buf && --buf->nframesout == 0 && buf->iter.bi_size == 0)
1276 aoe_end_buf(d, buf); 1243 aoe_end_buf(d, buf);
1277 1244
1278 spin_unlock_irq(&d->lock); 1245 spin_unlock_irq(&d->lock);
@@ -1727,7 +1694,7 @@ aoe_failbuf(struct aoedev *d, struct buf *buf)
1727{ 1694{
1728 if (buf == NULL) 1695 if (buf == NULL)
1729 return; 1696 return;
1730 buf->resid = 0; 1697 buf->iter.bi_size = 0;
1731 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags); 1698 clear_bit(BIO_UPTODATE, &buf->bio->bi_flags);
1732 if (buf->nframesout == 0) 1699 if (buf->nframesout == 0)
1733 aoe_end_buf(d, buf); 1700 aoe_end_buf(d, buf);
diff --git a/drivers/block/brd.c b/drivers/block/brd.c
index d91f1a56e861..e73b85cf0756 100644
--- a/drivers/block/brd.c
+++ b/drivers/block/brd.c
@@ -328,18 +328,18 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
328 struct block_device *bdev = bio->bi_bdev; 328 struct block_device *bdev = bio->bi_bdev;
329 struct brd_device *brd = bdev->bd_disk->private_data; 329 struct brd_device *brd = bdev->bd_disk->private_data;
330 int rw; 330 int rw;
331 struct bio_vec *bvec; 331 struct bio_vec bvec;
332 sector_t sector; 332 sector_t sector;
333 int i; 333 struct bvec_iter iter;
334 int err = -EIO; 334 int err = -EIO;
335 335
336 sector = bio->bi_sector; 336 sector = bio->bi_iter.bi_sector;
337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk)) 337 if (bio_end_sector(bio) > get_capacity(bdev->bd_disk))
338 goto out; 338 goto out;
339 339
340 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 340 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
341 err = 0; 341 err = 0;
342 discard_from_brd(brd, sector, bio->bi_size); 342 discard_from_brd(brd, sector, bio->bi_iter.bi_size);
343 goto out; 343 goto out;
344 } 344 }
345 345
@@ -347,10 +347,10 @@ static void brd_make_request(struct request_queue *q, struct bio *bio)
347 if (rw == READA) 347 if (rw == READA)
348 rw = READ; 348 rw = READ;
349 349
350 bio_for_each_segment(bvec, bio, i) { 350 bio_for_each_segment(bvec, bio, iter) {
351 unsigned int len = bvec->bv_len; 351 unsigned int len = bvec.bv_len;
352 err = brd_do_bvec(brd, bvec->bv_page, len, 352 err = brd_do_bvec(brd, bvec.bv_page, len,
353 bvec->bv_offset, rw, sector); 353 bvec.bv_offset, rw, sector);
354 if (err) 354 if (err)
355 break; 355 break;
356 sector += len >> SECTOR_SHIFT; 356 sector += len >> SECTOR_SHIFT;
diff --git a/drivers/block/drbd/drbd_actlog.c b/drivers/block/drbd/drbd_actlog.c
index 28c73ca320a8..a9b13f2cc420 100644
--- a/drivers/block/drbd/drbd_actlog.c
+++ b/drivers/block/drbd/drbd_actlog.c
@@ -159,7 +159,7 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
159 159
160 bio = bio_alloc_drbd(GFP_NOIO); 160 bio = bio_alloc_drbd(GFP_NOIO);
161 bio->bi_bdev = bdev->md_bdev; 161 bio->bi_bdev = bdev->md_bdev;
162 bio->bi_sector = sector; 162 bio->bi_iter.bi_sector = sector;
163 err = -EIO; 163 err = -EIO;
164 if (bio_add_page(bio, page, size, 0) != size) 164 if (bio_add_page(bio, page, size, 0) != size)
165 goto out; 165 goto out;
diff --git a/drivers/block/drbd/drbd_bitmap.c b/drivers/block/drbd/drbd_bitmap.c
index b12c11ec4bd2..597f111df67b 100644
--- a/drivers/block/drbd/drbd_bitmap.c
+++ b/drivers/block/drbd/drbd_bitmap.c
@@ -1028,7 +1028,7 @@ static void bm_page_io_async(struct bm_aio_ctx *ctx, int page_nr, int rw) __must
1028 } else 1028 } else
1029 page = b->bm_pages[page_nr]; 1029 page = b->bm_pages[page_nr];
1030 bio->bi_bdev = mdev->ldev->md_bdev; 1030 bio->bi_bdev = mdev->ldev->md_bdev;
1031 bio->bi_sector = on_disk_sector; 1031 bio->bi_iter.bi_sector = on_disk_sector;
1032 /* bio_add_page of a single page to an empty bio will always succeed, 1032 /* bio_add_page of a single page to an empty bio will always succeed,
1033 * according to api. Do we want to assert that? */ 1033 * according to api. Do we want to assert that? */
1034 bio_add_page(bio, page, len, 0); 1034 bio_add_page(bio, page, len, 0);
diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
index 9e3818b1bc83..929468e1512a 100644
--- a/drivers/block/drbd/drbd_main.c
+++ b/drivers/block/drbd/drbd_main.c
@@ -1537,15 +1537,17 @@ static int _drbd_send_page(struct drbd_conf *mdev, struct page *page,
1537 1537
1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio) 1538static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1539{ 1539{
1540 struct bio_vec *bvec; 1540 struct bio_vec bvec;
1541 int i; 1541 struct bvec_iter iter;
1542
1542 /* hint all but last page with MSG_MORE */ 1543 /* hint all but last page with MSG_MORE */
1543 bio_for_each_segment(bvec, bio, i) { 1544 bio_for_each_segment(bvec, bio, iter) {
1544 int err; 1545 int err;
1545 1546
1546 err = _drbd_no_send_page(mdev, bvec->bv_page, 1547 err = _drbd_no_send_page(mdev, bvec.bv_page,
1547 bvec->bv_offset, bvec->bv_len, 1548 bvec.bv_offset, bvec.bv_len,
1548 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1549 bio_iter_last(bvec, iter)
1550 ? 0 : MSG_MORE);
1549 if (err) 1551 if (err)
1550 return err; 1552 return err;
1551 } 1553 }
@@ -1554,15 +1556,16 @@ static int _drbd_send_bio(struct drbd_conf *mdev, struct bio *bio)
1554 1556
1555static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio) 1557static int _drbd_send_zc_bio(struct drbd_conf *mdev, struct bio *bio)
1556{ 1558{
1557 struct bio_vec *bvec; 1559 struct bio_vec bvec;
1558 int i; 1560 struct bvec_iter iter;
1561
1559 /* hint all but last page with MSG_MORE */ 1562 /* hint all but last page with MSG_MORE */
1560 bio_for_each_segment(bvec, bio, i) { 1563 bio_for_each_segment(bvec, bio, iter) {
1561 int err; 1564 int err;
1562 1565
1563 err = _drbd_send_page(mdev, bvec->bv_page, 1566 err = _drbd_send_page(mdev, bvec.bv_page,
1564 bvec->bv_offset, bvec->bv_len, 1567 bvec.bv_offset, bvec.bv_len,
1565 i == bio->bi_vcnt - 1 ? 0 : MSG_MORE); 1568 bio_iter_last(bvec, iter) ? 0 : MSG_MORE);
1566 if (err) 1569 if (err)
1567 return err; 1570 return err;
1568 } 1571 }
diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
index 6fa6673b36b3..d073305ffd5e 100644
--- a/drivers/block/drbd/drbd_receiver.c
+++ b/drivers/block/drbd/drbd_receiver.c
@@ -1333,7 +1333,7 @@ next_bio:
1333 goto fail; 1333 goto fail;
1334 } 1334 }
1335 /* > peer_req->i.sector, unless this is the first bio */ 1335 /* > peer_req->i.sector, unless this is the first bio */
1336 bio->bi_sector = sector; 1336 bio->bi_iter.bi_sector = sector;
1337 bio->bi_bdev = mdev->ldev->backing_bdev; 1337 bio->bi_bdev = mdev->ldev->backing_bdev;
1338 bio->bi_rw = rw; 1338 bio->bi_rw = rw;
1339 bio->bi_private = peer_req; 1339 bio->bi_private = peer_req;
@@ -1353,7 +1353,7 @@ next_bio:
1353 dev_err(DEV, 1353 dev_err(DEV,
1354 "bio_add_page failed for len=%u, " 1354 "bio_add_page failed for len=%u, "
1355 "bi_vcnt=0 (bi_sector=%llu)\n", 1355 "bi_vcnt=0 (bi_sector=%llu)\n",
1356 len, (unsigned long long)bio->bi_sector); 1356 len, (uint64_t)bio->bi_iter.bi_sector);
1357 err = -ENOSPC; 1357 err = -ENOSPC;
1358 goto fail; 1358 goto fail;
1359 } 1359 }
@@ -1595,9 +1595,10 @@ static int drbd_drain_block(struct drbd_conf *mdev, int data_size)
1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req, 1595static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1596 sector_t sector, int data_size) 1596 sector_t sector, int data_size)
1597{ 1597{
1598 struct bio_vec *bvec; 1598 struct bio_vec bvec;
1599 struct bvec_iter iter;
1599 struct bio *bio; 1600 struct bio *bio;
1600 int dgs, err, i, expect; 1601 int dgs, err, expect;
1601 void *dig_in = mdev->tconn->int_dig_in; 1602 void *dig_in = mdev->tconn->int_dig_in;
1602 void *dig_vv = mdev->tconn->int_dig_vv; 1603 void *dig_vv = mdev->tconn->int_dig_vv;
1603 1604
@@ -1615,13 +1616,13 @@ static int recv_dless_read(struct drbd_conf *mdev, struct drbd_request *req,
1615 mdev->recv_cnt += data_size>>9; 1616 mdev->recv_cnt += data_size>>9;
1616 1617
1617 bio = req->master_bio; 1618 bio = req->master_bio;
1618 D_ASSERT(sector == bio->bi_sector); 1619 D_ASSERT(sector == bio->bi_iter.bi_sector);
1619 1620
1620 bio_for_each_segment(bvec, bio, i) { 1621 bio_for_each_segment(bvec, bio, iter) {
1621 void *mapped = kmap(bvec->bv_page) + bvec->bv_offset; 1622 void *mapped = kmap(bvec.bv_page) + bvec.bv_offset;
1622 expect = min_t(int, data_size, bvec->bv_len); 1623 expect = min_t(int, data_size, bvec.bv_len);
1623 err = drbd_recv_all_warn(mdev->tconn, mapped, expect); 1624 err = drbd_recv_all_warn(mdev->tconn, mapped, expect);
1624 kunmap(bvec->bv_page); 1625 kunmap(bvec.bv_page);
1625 if (err) 1626 if (err)
1626 return err; 1627 return err;
1627 data_size -= expect; 1628 data_size -= expect;
diff --git a/drivers/block/drbd/drbd_req.c b/drivers/block/drbd/drbd_req.c
index fec7bef44994..104a040f24de 100644
--- a/drivers/block/drbd/drbd_req.c
+++ b/drivers/block/drbd/drbd_req.c
@@ -77,8 +77,8 @@ static struct drbd_request *drbd_req_new(struct drbd_conf *mdev,
77 req->epoch = 0; 77 req->epoch = 0;
78 78
79 drbd_clear_interval(&req->i); 79 drbd_clear_interval(&req->i);
80 req->i.sector = bio_src->bi_sector; 80 req->i.sector = bio_src->bi_iter.bi_sector;
81 req->i.size = bio_src->bi_size; 81 req->i.size = bio_src->bi_iter.bi_size;
82 req->i.local = true; 82 req->i.local = true;
83 req->i.waiting = false; 83 req->i.waiting = false;
84 84
@@ -1280,7 +1280,7 @@ void drbd_make_request(struct request_queue *q, struct bio *bio)
1280 /* 1280 /*
1281 * what we "blindly" assume: 1281 * what we "blindly" assume:
1282 */ 1282 */
1283 D_ASSERT(IS_ALIGNED(bio->bi_size, 512)); 1283 D_ASSERT(IS_ALIGNED(bio->bi_iter.bi_size, 512));
1284 1284
1285 inc_ap_bio(mdev); 1285 inc_ap_bio(mdev);
1286 __drbd_make_request(mdev, bio, start_time); 1286 __drbd_make_request(mdev, bio, start_time);
diff --git a/drivers/block/drbd/drbd_req.h b/drivers/block/drbd/drbd_req.h
index 978cb1addc98..28e15d91197a 100644
--- a/drivers/block/drbd/drbd_req.h
+++ b/drivers/block/drbd/drbd_req.h
@@ -269,7 +269,7 @@ static inline void drbd_req_make_private_bio(struct drbd_request *req, struct bi
269 269
270/* Short lived temporary struct on the stack. 270/* Short lived temporary struct on the stack.
271 * We could squirrel the error to be returned into 271 * We could squirrel the error to be returned into
272 * bio->bi_size, or similar. But that would be too ugly. */ 272 * bio->bi_iter.bi_size, or similar. But that would be too ugly. */
273struct bio_and_error { 273struct bio_and_error {
274 struct bio *bio; 274 struct bio *bio;
275 int error; 275 int error;
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index 891c0ecaa292..84d3175d493a 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -313,8 +313,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
313{ 313{
314 struct hash_desc desc; 314 struct hash_desc desc;
315 struct scatterlist sg; 315 struct scatterlist sg;
316 struct bio_vec *bvec; 316 struct bio_vec bvec;
317 int i; 317 struct bvec_iter iter;
318 318
319 desc.tfm = tfm; 319 desc.tfm = tfm;
320 desc.flags = 0; 320 desc.flags = 0;
@@ -322,8 +322,8 @@ void drbd_csum_bio(struct drbd_conf *mdev, struct crypto_hash *tfm, struct bio *
322 sg_init_table(&sg, 1); 322 sg_init_table(&sg, 1);
323 crypto_hash_init(&desc); 323 crypto_hash_init(&desc);
324 324
325 bio_for_each_segment(bvec, bio, i) { 325 bio_for_each_segment(bvec, bio, iter) {
326 sg_set_page(&sg, bvec->bv_page, bvec->bv_len, bvec->bv_offset); 326 sg_set_page(&sg, bvec.bv_page, bvec.bv_len, bvec.bv_offset);
327 crypto_hash_update(&desc, &sg, sg.length); 327 crypto_hash_update(&desc, &sg, sg.length);
328 } 328 }
329 crypto_hash_final(&desc, digest); 329 crypto_hash_final(&desc, digest);
diff --git a/drivers/block/floppy.c b/drivers/block/floppy.c
index 000abe2f105c..6b29c4422828 100644
--- a/drivers/block/floppy.c
+++ b/drivers/block/floppy.c
@@ -2351,7 +2351,7 @@ static void rw_interrupt(void)
2351/* Compute maximal contiguous buffer size. */ 2351/* Compute maximal contiguous buffer size. */
2352static int buffer_chain_size(void) 2352static int buffer_chain_size(void)
2353{ 2353{
2354 struct bio_vec *bv; 2354 struct bio_vec bv;
2355 int size; 2355 int size;
2356 struct req_iterator iter; 2356 struct req_iterator iter;
2357 char *base; 2357 char *base;
@@ -2360,10 +2360,10 @@ static int buffer_chain_size(void)
2360 size = 0; 2360 size = 0;
2361 2361
2362 rq_for_each_segment(bv, current_req, iter) { 2362 rq_for_each_segment(bv, current_req, iter) {
2363 if (page_address(bv->bv_page) + bv->bv_offset != base + size) 2363 if (page_address(bv.bv_page) + bv.bv_offset != base + size)
2364 break; 2364 break;
2365 2365
2366 size += bv->bv_len; 2366 size += bv.bv_len;
2367 } 2367 }
2368 2368
2369 return size >> 9; 2369 return size >> 9;
@@ -2389,7 +2389,7 @@ static int transfer_size(int ssize, int max_sector, int max_size)
2389static void copy_buffer(int ssize, int max_sector, int max_sector_2) 2389static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2390{ 2390{
2391 int remaining; /* number of transferred 512-byte sectors */ 2391 int remaining; /* number of transferred 512-byte sectors */
2392 struct bio_vec *bv; 2392 struct bio_vec bv;
2393 char *buffer; 2393 char *buffer;
2394 char *dma_buffer; 2394 char *dma_buffer;
2395 int size; 2395 int size;
@@ -2427,10 +2427,10 @@ static void copy_buffer(int ssize, int max_sector, int max_sector_2)
2427 if (!remaining) 2427 if (!remaining)
2428 break; 2428 break;
2429 2429
2430 size = bv->bv_len; 2430 size = bv.bv_len;
2431 SUPBOUND(size, remaining); 2431 SUPBOUND(size, remaining);
2432 2432
2433 buffer = page_address(bv->bv_page) + bv->bv_offset; 2433 buffer = page_address(bv.bv_page) + bv.bv_offset;
2434 if (dma_buffer + size > 2434 if (dma_buffer + size >
2435 floppy_track_buffer + (max_buffer_sectors << 10) || 2435 floppy_track_buffer + (max_buffer_sectors << 10) ||
2436 dma_buffer < floppy_track_buffer) { 2436 dma_buffer < floppy_track_buffer) {
@@ -3775,9 +3775,9 @@ static int __floppy_read_block_0(struct block_device *bdev)
3775 bio_vec.bv_len = size; 3775 bio_vec.bv_len = size;
3776 bio_vec.bv_offset = 0; 3776 bio_vec.bv_offset = 0;
3777 bio.bi_vcnt = 1; 3777 bio.bi_vcnt = 1;
3778 bio.bi_size = size; 3778 bio.bi_iter.bi_size = size;
3779 bio.bi_bdev = bdev; 3779 bio.bi_bdev = bdev;
3780 bio.bi_sector = 0; 3780 bio.bi_iter.bi_sector = 0;
3781 bio.bi_flags = (1 << BIO_QUIET); 3781 bio.bi_flags = (1 << BIO_QUIET);
3782 init_completion(&complete); 3782 init_completion(&complete);
3783 bio.bi_private = &complete; 3783 bio.bi_private = &complete;
diff --git a/drivers/block/loop.c b/drivers/block/loop.c
index c8dac7305244..33fde3a39759 100644
--- a/drivers/block/loop.c
+++ b/drivers/block/loop.c
@@ -288,9 +288,10 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
288{ 288{
289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t, 289 int (*do_lo_send)(struct loop_device *, struct bio_vec *, loff_t,
290 struct page *page); 290 struct page *page);
291 struct bio_vec *bvec; 291 struct bio_vec bvec;
292 struct bvec_iter iter;
292 struct page *page = NULL; 293 struct page *page = NULL;
293 int i, ret = 0; 294 int ret = 0;
294 295
295 if (lo->transfer != transfer_none) { 296 if (lo->transfer != transfer_none) {
296 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM); 297 page = alloc_page(GFP_NOIO | __GFP_HIGHMEM);
@@ -302,11 +303,11 @@ static int lo_send(struct loop_device *lo, struct bio *bio, loff_t pos)
302 do_lo_send = do_lo_send_direct_write; 303 do_lo_send = do_lo_send_direct_write;
303 } 304 }
304 305
305 bio_for_each_segment(bvec, bio, i) { 306 bio_for_each_segment(bvec, bio, iter) {
306 ret = do_lo_send(lo, bvec, pos, page); 307 ret = do_lo_send(lo, &bvec, pos, page);
307 if (ret < 0) 308 if (ret < 0)
308 break; 309 break;
309 pos += bvec->bv_len; 310 pos += bvec.bv_len;
310 } 311 }
311 if (page) { 312 if (page) {
312 kunmap(page); 313 kunmap(page);
@@ -392,20 +393,20 @@ do_lo_receive(struct loop_device *lo,
392static int 393static int
393lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos) 394lo_receive(struct loop_device *lo, struct bio *bio, int bsize, loff_t pos)
394{ 395{
395 struct bio_vec *bvec; 396 struct bio_vec bvec;
397 struct bvec_iter iter;
396 ssize_t s; 398 ssize_t s;
397 int i;
398 399
399 bio_for_each_segment(bvec, bio, i) { 400 bio_for_each_segment(bvec, bio, iter) {
400 s = do_lo_receive(lo, bvec, bsize, pos); 401 s = do_lo_receive(lo, &bvec, bsize, pos);
401 if (s < 0) 402 if (s < 0)
402 return s; 403 return s;
403 404
404 if (s != bvec->bv_len) { 405 if (s != bvec.bv_len) {
405 zero_fill_bio(bio); 406 zero_fill_bio(bio);
406 break; 407 break;
407 } 408 }
408 pos += bvec->bv_len; 409 pos += bvec.bv_len;
409 } 410 }
410 return 0; 411 return 0;
411} 412}
@@ -415,7 +416,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
415 loff_t pos; 416 loff_t pos;
416 int ret; 417 int ret;
417 418
418 pos = ((loff_t) bio->bi_sector << 9) + lo->lo_offset; 419 pos = ((loff_t) bio->bi_iter.bi_sector << 9) + lo->lo_offset;
419 420
420 if (bio_rw(bio) == WRITE) { 421 if (bio_rw(bio) == WRITE) {
421 struct file *file = lo->lo_backing_file; 422 struct file *file = lo->lo_backing_file;
@@ -444,7 +445,7 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
444 goto out; 445 goto out;
445 } 446 }
446 ret = file->f_op->fallocate(file, mode, pos, 447 ret = file->f_op->fallocate(file, mode, pos,
447 bio->bi_size); 448 bio->bi_iter.bi_size);
448 if (unlikely(ret && ret != -EINVAL && 449 if (unlikely(ret && ret != -EINVAL &&
449 ret != -EOPNOTSUPP)) 450 ret != -EOPNOTSUPP))
450 ret = -EIO; 451 ret = -EIO;
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c
index 050c71267f14..52b2f2a71470 100644
--- a/drivers/block/mtip32xx/mtip32xx.c
+++ b/drivers/block/mtip32xx/mtip32xx.c
@@ -3962,8 +3962,9 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3962{ 3962{
3963 struct driver_data *dd = queue->queuedata; 3963 struct driver_data *dd = queue->queuedata;
3964 struct scatterlist *sg; 3964 struct scatterlist *sg;
3965 struct bio_vec *bvec; 3965 struct bio_vec bvec;
3966 int i, nents = 0; 3966 struct bvec_iter iter;
3967 int nents = 0;
3967 int tag = 0, unaligned = 0; 3968 int tag = 0, unaligned = 0;
3968 3969
3969 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) { 3970 if (unlikely(dd->dd_flag & MTIP_DDF_STOP_IO)) {
@@ -3993,7 +3994,7 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
3993 } 3994 }
3994 3995
3995 if (unlikely(bio->bi_rw & REQ_DISCARD)) { 3996 if (unlikely(bio->bi_rw & REQ_DISCARD)) {
3996 bio_endio(bio, mtip_send_trim(dd, bio->bi_sector, 3997 bio_endio(bio, mtip_send_trim(dd, bio->bi_iter.bi_sector,
3997 bio_sectors(bio))); 3998 bio_sectors(bio)));
3998 return; 3999 return;
3999 } 4000 }
@@ -4006,7 +4007,8 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4006 4007
4007 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 && 4008 if (bio_data_dir(bio) == WRITE && bio_sectors(bio) <= 64 &&
4008 dd->unal_qdepth) { 4009 dd->unal_qdepth) {
4009 if (bio->bi_sector % 8 != 0) /* Unaligned on 4k boundaries */ 4010 if (bio->bi_iter.bi_sector % 8 != 0)
4011 /* Unaligned on 4k boundaries */
4010 unaligned = 1; 4012 unaligned = 1;
4011 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */ 4013 else if (bio_sectors(bio) % 8 != 0) /* Aligned but not 4k/8k */
4012 unaligned = 1; 4014 unaligned = 1;
@@ -4025,17 +4027,17 @@ static void mtip_make_request(struct request_queue *queue, struct bio *bio)
4025 } 4027 }
4026 4028
4027 /* Create the scatter list for this bio. */ 4029 /* Create the scatter list for this bio. */
4028 bio_for_each_segment(bvec, bio, i) { 4030 bio_for_each_segment(bvec, bio, iter) {
4029 sg_set_page(&sg[nents], 4031 sg_set_page(&sg[nents],
4030 bvec->bv_page, 4032 bvec.bv_page,
4031 bvec->bv_len, 4033 bvec.bv_len,
4032 bvec->bv_offset); 4034 bvec.bv_offset);
4033 nents++; 4035 nents++;
4034 } 4036 }
4035 4037
4036 /* Issue the read/write. */ 4038 /* Issue the read/write. */
4037 mtip_hw_submit_io(dd, 4039 mtip_hw_submit_io(dd,
4038 bio->bi_sector, 4040 bio->bi_iter.bi_sector,
4039 bio_sectors(bio), 4041 bio_sectors(bio),
4040 nents, 4042 nents,
4041 tag, 4043 tag,
diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c
index 2dc3b5153f0d..55298db36b2d 100644
--- a/drivers/block/nbd.c
+++ b/drivers/block/nbd.c
@@ -271,18 +271,18 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req)
271 271
272 if (nbd_cmd(req) == NBD_CMD_WRITE) { 272 if (nbd_cmd(req) == NBD_CMD_WRITE) {
273 struct req_iterator iter; 273 struct req_iterator iter;
274 struct bio_vec *bvec; 274 struct bio_vec bvec;
275 /* 275 /*
276 * we are really probing at internals to determine 276 * we are really probing at internals to determine
277 * whether to set MSG_MORE or not... 277 * whether to set MSG_MORE or not...
278 */ 278 */
279 rq_for_each_segment(bvec, req, iter) { 279 rq_for_each_segment(bvec, req, iter) {
280 flags = 0; 280 flags = 0;
281 if (!rq_iter_last(req, iter)) 281 if (!rq_iter_last(bvec, iter))
282 flags = MSG_MORE; 282 flags = MSG_MORE;
283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n", 283 dprintk(DBG_TX, "%s: request %p: sending %d bytes data\n",
284 nbd->disk->disk_name, req, bvec->bv_len); 284 nbd->disk->disk_name, req, bvec.bv_len);
285 result = sock_send_bvec(nbd, bvec, flags); 285 result = sock_send_bvec(nbd, &bvec, flags);
286 if (result <= 0) { 286 if (result <= 0) {
287 dev_err(disk_to_dev(nbd->disk), 287 dev_err(disk_to_dev(nbd->disk),
288 "Send data failed (result %d)\n", 288 "Send data failed (result %d)\n",
@@ -378,10 +378,10 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
378 nbd->disk->disk_name, req); 378 nbd->disk->disk_name, req);
379 if (nbd_cmd(req) == NBD_CMD_READ) { 379 if (nbd_cmd(req) == NBD_CMD_READ) {
380 struct req_iterator iter; 380 struct req_iterator iter;
381 struct bio_vec *bvec; 381 struct bio_vec bvec;
382 382
383 rq_for_each_segment(bvec, req, iter) { 383 rq_for_each_segment(bvec, req, iter) {
384 result = sock_recv_bvec(nbd, bvec); 384 result = sock_recv_bvec(nbd, &bvec);
385 if (result <= 0) { 385 if (result <= 0) {
386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", 386 dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n",
387 result); 387 result);
@@ -389,7 +389,7 @@ static struct request *nbd_read_stat(struct nbd_device *nbd)
389 return req; 389 return req;
390 } 390 }
391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n", 391 dprintk(DBG_RX, "%s: request %p: got %d bytes data\n",
392 nbd->disk->disk_name, req, bvec->bv_len); 392 nbd->disk->disk_name, req, bvec.bv_len);
393 } 393 }
394 } 394 }
395 return req; 395 return req;
diff --git a/drivers/block/nvme-core.c b/drivers/block/nvme-core.c
index 26d03fa0bf26..1f14ac403945 100644
--- a/drivers/block/nvme-core.c
+++ b/drivers/block/nvme-core.c
@@ -441,104 +441,19 @@ int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
441 return total_len; 441 return total_len;
442} 442}
443 443
444struct nvme_bio_pair {
445 struct bio b1, b2, *parent;
446 struct bio_vec *bv1, *bv2;
447 int err;
448 atomic_t cnt;
449};
450
451static void nvme_bio_pair_endio(struct bio *bio, int err)
452{
453 struct nvme_bio_pair *bp = bio->bi_private;
454
455 if (err)
456 bp->err = err;
457
458 if (atomic_dec_and_test(&bp->cnt)) {
459 bio_endio(bp->parent, bp->err);
460 kfree(bp->bv1);
461 kfree(bp->bv2);
462 kfree(bp);
463 }
464}
465
466static struct nvme_bio_pair *nvme_bio_split(struct bio *bio, int idx,
467 int len, int offset)
468{
469 struct nvme_bio_pair *bp;
470
471 BUG_ON(len > bio->bi_size);
472 BUG_ON(idx > bio->bi_vcnt);
473
474 bp = kmalloc(sizeof(*bp), GFP_ATOMIC);
475 if (!bp)
476 return NULL;
477 bp->err = 0;
478
479 bp->b1 = *bio;
480 bp->b2 = *bio;
481
482 bp->b1.bi_size = len;
483 bp->b2.bi_size -= len;
484 bp->b1.bi_vcnt = idx;
485 bp->b2.bi_idx = idx;
486 bp->b2.bi_sector += len >> 9;
487
488 if (offset) {
489 bp->bv1 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
490 GFP_ATOMIC);
491 if (!bp->bv1)
492 goto split_fail_1;
493
494 bp->bv2 = kmalloc(bio->bi_max_vecs * sizeof(struct bio_vec),
495 GFP_ATOMIC);
496 if (!bp->bv2)
497 goto split_fail_2;
498
499 memcpy(bp->bv1, bio->bi_io_vec,
500 bio->bi_max_vecs * sizeof(struct bio_vec));
501 memcpy(bp->bv2, bio->bi_io_vec,
502 bio->bi_max_vecs * sizeof(struct bio_vec));
503
504 bp->b1.bi_io_vec = bp->bv1;
505 bp->b2.bi_io_vec = bp->bv2;
506 bp->b2.bi_io_vec[idx].bv_offset += offset;
507 bp->b2.bi_io_vec[idx].bv_len -= offset;
508 bp->b1.bi_io_vec[idx].bv_len = offset;
509 bp->b1.bi_vcnt++;
510 } else
511 bp->bv1 = bp->bv2 = NULL;
512
513 bp->b1.bi_private = bp;
514 bp->b2.bi_private = bp;
515
516 bp->b1.bi_end_io = nvme_bio_pair_endio;
517 bp->b2.bi_end_io = nvme_bio_pair_endio;
518
519 bp->parent = bio;
520 atomic_set(&bp->cnt, 2);
521
522 return bp;
523
524 split_fail_2:
525 kfree(bp->bv1);
526 split_fail_1:
527 kfree(bp);
528 return NULL;
529}
530
531static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq, 444static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
532 int idx, int len, int offset) 445 int len)
533{ 446{
534 struct nvme_bio_pair *bp = nvme_bio_split(bio, idx, len, offset); 447 struct bio *split = bio_split(bio, len >> 9, GFP_ATOMIC, NULL);
535 if (!bp) 448 if (!split)
536 return -ENOMEM; 449 return -ENOMEM;
537 450
451 bio_chain(split, bio);
452
538 if (bio_list_empty(&nvmeq->sq_cong)) 453 if (bio_list_empty(&nvmeq->sq_cong))
539 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait); 454 add_wait_queue(&nvmeq->sq_full, &nvmeq->sq_cong_wait);
540 bio_list_add(&nvmeq->sq_cong, &bp->b1); 455 bio_list_add(&nvmeq->sq_cong, split);
541 bio_list_add(&nvmeq->sq_cong, &bp->b2); 456 bio_list_add(&nvmeq->sq_cong, bio);
542 457
543 return 0; 458 return 0;
544} 459}
@@ -550,41 +465,44 @@ static int nvme_split_and_submit(struct bio *bio, struct nvme_queue *nvmeq,
550static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod, 465static int nvme_map_bio(struct nvme_queue *nvmeq, struct nvme_iod *iod,
551 struct bio *bio, enum dma_data_direction dma_dir, int psegs) 466 struct bio *bio, enum dma_data_direction dma_dir, int psegs)
552{ 467{
553 struct bio_vec *bvec, *bvprv = NULL; 468 struct bio_vec bvec, bvprv;
469 struct bvec_iter iter;
554 struct scatterlist *sg = NULL; 470 struct scatterlist *sg = NULL;
555 int i, length = 0, nsegs = 0, split_len = bio->bi_size; 471 int length = 0, nsegs = 0, split_len = bio->bi_iter.bi_size;
472 int first = 1;
556 473
557 if (nvmeq->dev->stripe_size) 474 if (nvmeq->dev->stripe_size)
558 split_len = nvmeq->dev->stripe_size - 475 split_len = nvmeq->dev->stripe_size -
559 ((bio->bi_sector << 9) & (nvmeq->dev->stripe_size - 1)); 476 ((bio->bi_iter.bi_sector << 9) &
477 (nvmeq->dev->stripe_size - 1));
560 478
561 sg_init_table(iod->sg, psegs); 479 sg_init_table(iod->sg, psegs);
562 bio_for_each_segment(bvec, bio, i) { 480 bio_for_each_segment(bvec, bio, iter) {
563 if (bvprv && BIOVEC_PHYS_MERGEABLE(bvprv, bvec)) { 481 if (!first && BIOVEC_PHYS_MERGEABLE(&bvprv, &bvec)) {
564 sg->length += bvec->bv_len; 482 sg->length += bvec.bv_len;
565 } else { 483 } else {
566 if (bvprv && BIOVEC_NOT_VIRT_MERGEABLE(bvprv, bvec)) 484 if (!first && BIOVEC_NOT_VIRT_MERGEABLE(&bvprv, &bvec))
567 return nvme_split_and_submit(bio, nvmeq, i, 485 return nvme_split_and_submit(bio, nvmeq,
568 length, 0); 486 length);
569 487
570 sg = sg ? sg + 1 : iod->sg; 488 sg = sg ? sg + 1 : iod->sg;
571 sg_set_page(sg, bvec->bv_page, bvec->bv_len, 489 sg_set_page(sg, bvec.bv_page,
572 bvec->bv_offset); 490 bvec.bv_len, bvec.bv_offset);
573 nsegs++; 491 nsegs++;
574 } 492 }
575 493
576 if (split_len - length < bvec->bv_len) 494 if (split_len - length < bvec.bv_len)
577 return nvme_split_and_submit(bio, nvmeq, i, split_len, 495 return nvme_split_and_submit(bio, nvmeq, split_len);
578 split_len - length); 496 length += bvec.bv_len;
579 length += bvec->bv_len;
580 bvprv = bvec; 497 bvprv = bvec;
498 first = 0;
581 } 499 }
582 iod->nents = nsegs; 500 iod->nents = nsegs;
583 sg_mark_end(sg); 501 sg_mark_end(sg);
584 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0) 502 if (dma_map_sg(nvmeq->q_dmadev, iod->sg, iod->nents, dma_dir) == 0)
585 return -ENOMEM; 503 return -ENOMEM;
586 504
587 BUG_ON(length != bio->bi_size); 505 BUG_ON(length != bio->bi_iter.bi_size);
588 return length; 506 return length;
589} 507}
590 508
@@ -608,8 +526,8 @@ static int nvme_submit_discard(struct nvme_queue *nvmeq, struct nvme_ns *ns,
608 iod->npages = 0; 526 iod->npages = 0;
609 527
610 range->cattr = cpu_to_le32(0); 528 range->cattr = cpu_to_le32(0);
611 range->nlb = cpu_to_le32(bio->bi_size >> ns->lba_shift); 529 range->nlb = cpu_to_le32(bio->bi_iter.bi_size >> ns->lba_shift);
612 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 530 range->slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
613 531
614 memset(cmnd, 0, sizeof(*cmnd)); 532 memset(cmnd, 0, sizeof(*cmnd));
615 cmnd->dsm.opcode = nvme_cmd_dsm; 533 cmnd->dsm.opcode = nvme_cmd_dsm;
@@ -674,7 +592,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
674 } 592 }
675 593
676 result = -ENOMEM; 594 result = -ENOMEM;
677 iod = nvme_alloc_iod(psegs, bio->bi_size, GFP_ATOMIC); 595 iod = nvme_alloc_iod(psegs, bio->bi_iter.bi_size, GFP_ATOMIC);
678 if (!iod) 596 if (!iod)
679 goto nomem; 597 goto nomem;
680 iod->private = bio; 598 iod->private = bio;
@@ -723,7 +641,7 @@ static int nvme_submit_bio_queue(struct nvme_queue *nvmeq, struct nvme_ns *ns,
723 cmnd->rw.nsid = cpu_to_le32(ns->ns_id); 641 cmnd->rw.nsid = cpu_to_le32(ns->ns_id);
724 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length, 642 length = nvme_setup_prps(nvmeq->dev, &cmnd->common, iod, length,
725 GFP_ATOMIC); 643 GFP_ATOMIC);
726 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_sector)); 644 cmnd->rw.slba = cpu_to_le64(nvme_block_nr(ns, bio->bi_iter.bi_sector));
727 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1); 645 cmnd->rw.length = cpu_to_le16((length >> ns->lba_shift) - 1);
728 cmnd->rw.control = cpu_to_le16(control); 646 cmnd->rw.control = cpu_to_le16(control);
729 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt); 647 cmnd->rw.dsmgmt = cpu_to_le32(dsmgmt);
diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
index ff8668c5efb1..3dda09a5ec41 100644
--- a/drivers/block/pktcdvd.c
+++ b/drivers/block/pktcdvd.c
@@ -651,7 +651,7 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
651 651
652 for (;;) { 652 for (;;) {
653 tmp = rb_entry(n, struct pkt_rb_node, rb_node); 653 tmp = rb_entry(n, struct pkt_rb_node, rb_node);
654 if (s <= tmp->bio->bi_sector) 654 if (s <= tmp->bio->bi_iter.bi_sector)
655 next = n->rb_left; 655 next = n->rb_left;
656 else 656 else
657 next = n->rb_right; 657 next = n->rb_right;
@@ -660,12 +660,12 @@ static struct pkt_rb_node *pkt_rbtree_find(struct pktcdvd_device *pd, sector_t s
660 n = next; 660 n = next;
661 } 661 }
662 662
663 if (s > tmp->bio->bi_sector) { 663 if (s > tmp->bio->bi_iter.bi_sector) {
664 tmp = pkt_rbtree_next(tmp); 664 tmp = pkt_rbtree_next(tmp);
665 if (!tmp) 665 if (!tmp)
666 return NULL; 666 return NULL;
667 } 667 }
668 BUG_ON(s > tmp->bio->bi_sector); 668 BUG_ON(s > tmp->bio->bi_iter.bi_sector);
669 return tmp; 669 return tmp;
670} 670}
671 671
@@ -676,13 +676,13 @@ static void pkt_rbtree_insert(struct pktcdvd_device *pd, struct pkt_rb_node *nod
676{ 676{
677 struct rb_node **p = &pd->bio_queue.rb_node; 677 struct rb_node **p = &pd->bio_queue.rb_node;
678 struct rb_node *parent = NULL; 678 struct rb_node *parent = NULL;
679 sector_t s = node->bio->bi_sector; 679 sector_t s = node->bio->bi_iter.bi_sector;
680 struct pkt_rb_node *tmp; 680 struct pkt_rb_node *tmp;
681 681
682 while (*p) { 682 while (*p) {
683 parent = *p; 683 parent = *p;
684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node); 684 tmp = rb_entry(parent, struct pkt_rb_node, rb_node);
685 if (s < tmp->bio->bi_sector) 685 if (s < tmp->bio->bi_iter.bi_sector)
686 p = &(*p)->rb_left; 686 p = &(*p)->rb_left;
687 else 687 else
688 p = &(*p)->rb_right; 688 p = &(*p)->rb_right;
@@ -857,7 +857,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
857 spin_lock(&pd->iosched.lock); 857 spin_lock(&pd->iosched.lock);
858 bio = bio_list_peek(&pd->iosched.write_queue); 858 bio = bio_list_peek(&pd->iosched.write_queue);
859 spin_unlock(&pd->iosched.lock); 859 spin_unlock(&pd->iosched.lock);
860 if (bio && (bio->bi_sector == pd->iosched.last_write)) 860 if (bio && (bio->bi_iter.bi_sector ==
861 pd->iosched.last_write))
861 need_write_seek = 0; 862 need_write_seek = 0;
862 if (need_write_seek && reads_queued) { 863 if (need_write_seek && reads_queued) {
863 if (atomic_read(&pd->cdrw.pending_bios) > 0) { 864 if (atomic_read(&pd->cdrw.pending_bios) > 0) {
@@ -888,7 +889,8 @@ static void pkt_iosched_process_queue(struct pktcdvd_device *pd)
888 continue; 889 continue;
889 890
890 if (bio_data_dir(bio) == READ) 891 if (bio_data_dir(bio) == READ)
891 pd->iosched.successive_reads += bio->bi_size >> 10; 892 pd->iosched.successive_reads +=
893 bio->bi_iter.bi_size >> 10;
892 else { 894 else {
893 pd->iosched.successive_reads = 0; 895 pd->iosched.successive_reads = 0;
894 pd->iosched.last_write = bio_end_sector(bio); 896 pd->iosched.last_write = bio_end_sector(bio);
@@ -978,7 +980,7 @@ static void pkt_end_io_read(struct bio *bio, int err)
978 980
979 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n", 981 pkt_dbg(2, pd, "bio=%p sec0=%llx sec=%llx err=%d\n",
980 bio, (unsigned long long)pkt->sector, 982 bio, (unsigned long long)pkt->sector,
981 (unsigned long long)bio->bi_sector, err); 983 (unsigned long long)bio->bi_iter.bi_sector, err);
982 984
983 if (err) 985 if (err)
984 atomic_inc(&pkt->io_errors); 986 atomic_inc(&pkt->io_errors);
@@ -1026,8 +1028,9 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1026 memset(written, 0, sizeof(written)); 1028 memset(written, 0, sizeof(written));
1027 spin_lock(&pkt->lock); 1029 spin_lock(&pkt->lock);
1028 bio_list_for_each(bio, &pkt->orig_bios) { 1030 bio_list_for_each(bio, &pkt->orig_bios) {
1029 int first_frame = (bio->bi_sector - pkt->sector) / (CD_FRAMESIZE >> 9); 1031 int first_frame = (bio->bi_iter.bi_sector - pkt->sector) /
1030 int num_frames = bio->bi_size / CD_FRAMESIZE; 1032 (CD_FRAMESIZE >> 9);
1033 int num_frames = bio->bi_iter.bi_size / CD_FRAMESIZE;
1031 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9); 1034 pd->stats.secs_w += num_frames * (CD_FRAMESIZE >> 9);
1032 BUG_ON(first_frame < 0); 1035 BUG_ON(first_frame < 0);
1033 BUG_ON(first_frame + num_frames > pkt->frames); 1036 BUG_ON(first_frame + num_frames > pkt->frames);
@@ -1053,7 +1056,7 @@ static void pkt_gather_data(struct pktcdvd_device *pd, struct packet_data *pkt)
1053 1056
1054 bio = pkt->r_bios[f]; 1057 bio = pkt->r_bios[f];
1055 bio_reset(bio); 1058 bio_reset(bio);
1056 bio->bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9); 1059 bio->bi_iter.bi_sector = pkt->sector + f * (CD_FRAMESIZE >> 9);
1057 bio->bi_bdev = pd->bdev; 1060 bio->bi_bdev = pd->bdev;
1058 bio->bi_end_io = pkt_end_io_read; 1061 bio->bi_end_io = pkt_end_io_read;
1059 bio->bi_private = pkt; 1062 bio->bi_private = pkt;
@@ -1150,8 +1153,8 @@ static int pkt_start_recovery(struct packet_data *pkt)
1150 bio_reset(pkt->bio); 1153 bio_reset(pkt->bio);
1151 pkt->bio->bi_bdev = pd->bdev; 1154 pkt->bio->bi_bdev = pd->bdev;
1152 pkt->bio->bi_rw = REQ_WRITE; 1155 pkt->bio->bi_rw = REQ_WRITE;
1153 pkt->bio->bi_sector = new_sector; 1156 pkt->bio->bi_iter.bi_sector = new_sector;
1154 pkt->bio->bi_size = pkt->frames * CD_FRAMESIZE; 1157 pkt->bio->bi_iter.bi_size = pkt->frames * CD_FRAMESIZE;
1155 pkt->bio->bi_vcnt = pkt->frames; 1158 pkt->bio->bi_vcnt = pkt->frames;
1156 1159
1157 pkt->bio->bi_end_io = pkt_end_io_packet_write; 1160 pkt->bio->bi_end_io = pkt_end_io_packet_write;
@@ -1213,7 +1216,7 @@ static int pkt_handle_queue(struct pktcdvd_device *pd)
1213 node = first_node; 1216 node = first_node;
1214 while (node) { 1217 while (node) {
1215 bio = node->bio; 1218 bio = node->bio;
1216 zone = get_zone(bio->bi_sector, pd); 1219 zone = get_zone(bio->bi_iter.bi_sector, pd);
1217 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) { 1220 list_for_each_entry(p, &pd->cdrw.pkt_active_list, list) {
1218 if (p->sector == zone) { 1221 if (p->sector == zone) {
1219 bio = NULL; 1222 bio = NULL;
@@ -1252,14 +1255,14 @@ try_next_bio:
1252 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone); 1255 pkt_dbg(2, pd, "looking for zone %llx\n", (unsigned long long)zone);
1253 while ((node = pkt_rbtree_find(pd, zone)) != NULL) { 1256 while ((node = pkt_rbtree_find(pd, zone)) != NULL) {
1254 bio = node->bio; 1257 bio = node->bio;
1255 pkt_dbg(2, pd, "found zone=%llx\n", 1258 pkt_dbg(2, pd, "found zone=%llx\n", (unsigned long long)
1256 (unsigned long long)get_zone(bio->bi_sector, pd)); 1259 get_zone(bio->bi_iter.bi_sector, pd));
1257 if (get_zone(bio->bi_sector, pd) != zone) 1260 if (get_zone(bio->bi_iter.bi_sector, pd) != zone)
1258 break; 1261 break;
1259 pkt_rbtree_erase(pd, node); 1262 pkt_rbtree_erase(pd, node);
1260 spin_lock(&pkt->lock); 1263 spin_lock(&pkt->lock);
1261 bio_list_add(&pkt->orig_bios, bio); 1264 bio_list_add(&pkt->orig_bios, bio);
1262 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 1265 pkt->write_size += bio->bi_iter.bi_size / CD_FRAMESIZE;
1263 spin_unlock(&pkt->lock); 1266 spin_unlock(&pkt->lock);
1264 } 1267 }
1265 /* check write congestion marks, and if bio_queue_size is 1268 /* check write congestion marks, and if bio_queue_size is
@@ -1293,7 +1296,7 @@ static void pkt_start_write(struct pktcdvd_device *pd, struct packet_data *pkt)
1293 struct bio_vec *bvec = pkt->w_bio->bi_io_vec; 1296 struct bio_vec *bvec = pkt->w_bio->bi_io_vec;
1294 1297
1295 bio_reset(pkt->w_bio); 1298 bio_reset(pkt->w_bio);
1296 pkt->w_bio->bi_sector = pkt->sector; 1299 pkt->w_bio->bi_iter.bi_sector = pkt->sector;
1297 pkt->w_bio->bi_bdev = pd->bdev; 1300 pkt->w_bio->bi_bdev = pd->bdev;
1298 pkt->w_bio->bi_end_io = pkt_end_io_packet_write; 1301 pkt->w_bio->bi_end_io = pkt_end_io_packet_write;
1299 pkt->w_bio->bi_private = pkt; 1302 pkt->w_bio->bi_private = pkt;
@@ -2335,75 +2338,29 @@ static void pkt_end_io_read_cloned(struct bio *bio, int err)
2335 pkt_bio_finished(pd); 2338 pkt_bio_finished(pd);
2336} 2339}
2337 2340
2338static void pkt_make_request(struct request_queue *q, struct bio *bio) 2341static void pkt_make_request_read(struct pktcdvd_device *pd, struct bio *bio)
2339{ 2342{
2340 struct pktcdvd_device *pd; 2343 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2341 char b[BDEVNAME_SIZE]; 2344 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2345
2346 psd->pd = pd;
2347 psd->bio = bio;
2348 cloned_bio->bi_bdev = pd->bdev;
2349 cloned_bio->bi_private = psd;
2350 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2351 pd->stats.secs_r += bio_sectors(bio);
2352 pkt_queue_bio(pd, cloned_bio);
2353}
2354
2355static void pkt_make_request_write(struct request_queue *q, struct bio *bio)
2356{
2357 struct pktcdvd_device *pd = q->queuedata;
2342 sector_t zone; 2358 sector_t zone;
2343 struct packet_data *pkt; 2359 struct packet_data *pkt;
2344 int was_empty, blocked_bio; 2360 int was_empty, blocked_bio;
2345 struct pkt_rb_node *node; 2361 struct pkt_rb_node *node;
2346 2362
2347 pd = q->queuedata; 2363 zone = get_zone(bio->bi_iter.bi_sector, pd);
2348 if (!pd) {
2349 pr_err("%s incorrect request queue\n",
2350 bdevname(bio->bi_bdev, b));
2351 goto end_io;
2352 }
2353
2354 /*
2355 * Clone READ bios so we can have our own bi_end_io callback.
2356 */
2357 if (bio_data_dir(bio) == READ) {
2358 struct bio *cloned_bio = bio_clone(bio, GFP_NOIO);
2359 struct packet_stacked_data *psd = mempool_alloc(psd_pool, GFP_NOIO);
2360
2361 psd->pd = pd;
2362 psd->bio = bio;
2363 cloned_bio->bi_bdev = pd->bdev;
2364 cloned_bio->bi_private = psd;
2365 cloned_bio->bi_end_io = pkt_end_io_read_cloned;
2366 pd->stats.secs_r += bio_sectors(bio);
2367 pkt_queue_bio(pd, cloned_bio);
2368 return;
2369 }
2370
2371 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2372 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2373 (unsigned long long)bio->bi_sector);
2374 goto end_io;
2375 }
2376
2377 if (!bio->bi_size || (bio->bi_size % CD_FRAMESIZE)) {
2378 pkt_err(pd, "wrong bio size\n");
2379 goto end_io;
2380 }
2381
2382 blk_queue_bounce(q, &bio);
2383
2384 zone = get_zone(bio->bi_sector, pd);
2385 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2386 (unsigned long long)bio->bi_sector,
2387 (unsigned long long)bio_end_sector(bio));
2388
2389 /* Check if we have to split the bio */
2390 {
2391 struct bio_pair *bp;
2392 sector_t last_zone;
2393 int first_sectors;
2394
2395 last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2396 if (last_zone != zone) {
2397 BUG_ON(last_zone != zone + pd->settings.size);
2398 first_sectors = last_zone - bio->bi_sector;
2399 bp = bio_split(bio, first_sectors);
2400 BUG_ON(!bp);
2401 pkt_make_request(q, &bp->bio1);
2402 pkt_make_request(q, &bp->bio2);
2403 bio_pair_release(bp);
2404 return;
2405 }
2406 }
2407 2364
2408 /* 2365 /*
2409 * If we find a matching packet in state WAITING or READ_WAIT, we can 2366 * If we find a matching packet in state WAITING or READ_WAIT, we can
@@ -2417,7 +2374,8 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2417 if ((pkt->state == PACKET_WAITING_STATE) || 2374 if ((pkt->state == PACKET_WAITING_STATE) ||
2418 (pkt->state == PACKET_READ_WAIT_STATE)) { 2375 (pkt->state == PACKET_READ_WAIT_STATE)) {
2419 bio_list_add(&pkt->orig_bios, bio); 2376 bio_list_add(&pkt->orig_bios, bio);
2420 pkt->write_size += bio->bi_size / CD_FRAMESIZE; 2377 pkt->write_size +=
2378 bio->bi_iter.bi_size / CD_FRAMESIZE;
2421 if ((pkt->write_size >= pkt->frames) && 2379 if ((pkt->write_size >= pkt->frames) &&
2422 (pkt->state == PACKET_WAITING_STATE)) { 2380 (pkt->state == PACKET_WAITING_STATE)) {
2423 atomic_inc(&pkt->run_sm); 2381 atomic_inc(&pkt->run_sm);
@@ -2476,6 +2434,64 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
2476 */ 2434 */
2477 wake_up(&pd->wqueue); 2435 wake_up(&pd->wqueue);
2478 } 2436 }
2437}
2438
2439static void pkt_make_request(struct request_queue *q, struct bio *bio)
2440{
2441 struct pktcdvd_device *pd;
2442 char b[BDEVNAME_SIZE];
2443 struct bio *split;
2444
2445 pd = q->queuedata;
2446 if (!pd) {
2447 pr_err("%s incorrect request queue\n",
2448 bdevname(bio->bi_bdev, b));
2449 goto end_io;
2450 }
2451
2452 pkt_dbg(2, pd, "start = %6llx stop = %6llx\n",
2453 (unsigned long long)bio->bi_iter.bi_sector,
2454 (unsigned long long)bio_end_sector(bio));
2455
2456 /*
2457 * Clone READ bios so we can have our own bi_end_io callback.
2458 */
2459 if (bio_data_dir(bio) == READ) {
2460 pkt_make_request_read(pd, bio);
2461 return;
2462 }
2463
2464 if (!test_bit(PACKET_WRITABLE, &pd->flags)) {
2465 pkt_notice(pd, "WRITE for ro device (%llu)\n",
2466 (unsigned long long)bio->bi_iter.bi_sector);
2467 goto end_io;
2468 }
2469
2470 if (!bio->bi_iter.bi_size || (bio->bi_iter.bi_size % CD_FRAMESIZE)) {
2471 pkt_err(pd, "wrong bio size\n");
2472 goto end_io;
2473 }
2474
2475 blk_queue_bounce(q, &bio);
2476
2477 do {
2478 sector_t zone = get_zone(bio->bi_iter.bi_sector, pd);
2479 sector_t last_zone = get_zone(bio_end_sector(bio) - 1, pd);
2480
2481 if (last_zone != zone) {
2482 BUG_ON(last_zone != zone + pd->settings.size);
2483
2484 split = bio_split(bio, last_zone -
2485 bio->bi_iter.bi_sector,
2486 GFP_NOIO, fs_bio_set);
2487 bio_chain(split, bio);
2488 } else {
2489 split = bio;
2490 }
2491
2492 pkt_make_request_write(q, split);
2493 } while (split != bio);
2494
2479 return; 2495 return;
2480end_io: 2496end_io:
2481 bio_io_error(bio); 2497 bio_io_error(bio);
diff --git a/drivers/block/ps3disk.c b/drivers/block/ps3disk.c
index d754a88d7585..c120d70d3fb3 100644
--- a/drivers/block/ps3disk.c
+++ b/drivers/block/ps3disk.c
@@ -94,26 +94,25 @@ static void ps3disk_scatter_gather(struct ps3_storage_device *dev,
94{ 94{
95 unsigned int offset = 0; 95 unsigned int offset = 0;
96 struct req_iterator iter; 96 struct req_iterator iter;
97 struct bio_vec *bvec; 97 struct bio_vec bvec;
98 unsigned int i = 0; 98 unsigned int i = 0;
99 size_t size; 99 size_t size;
100 void *buf; 100 void *buf;
101 101
102 rq_for_each_segment(bvec, req, iter) { 102 rq_for_each_segment(bvec, req, iter) {
103 unsigned long flags; 103 unsigned long flags;
104 dev_dbg(&dev->sbd.core, 104 dev_dbg(&dev->sbd.core, "%s:%u: bio %u: %u sectors from %lu\n",
105 "%s:%u: bio %u: %u segs %u sectors from %lu\n", 105 __func__, __LINE__, i, bio_sectors(iter.bio),
106 __func__, __LINE__, i, bio_segments(iter.bio), 106 iter.bio->bi_iter.bi_sector);
107 bio_sectors(iter.bio), iter.bio->bi_sector);
108 107
109 size = bvec->bv_len; 108 size = bvec.bv_len;
110 buf = bvec_kmap_irq(bvec, &flags); 109 buf = bvec_kmap_irq(&bvec, &flags);
111 if (gather) 110 if (gather)
112 memcpy(dev->bounce_buf+offset, buf, size); 111 memcpy(dev->bounce_buf+offset, buf, size);
113 else 112 else
114 memcpy(buf, dev->bounce_buf+offset, size); 113 memcpy(buf, dev->bounce_buf+offset, size);
115 offset += size; 114 offset += size;
116 flush_kernel_dcache_page(bvec->bv_page); 115 flush_kernel_dcache_page(bvec.bv_page);
117 bvec_kunmap_irq(buf, &flags); 116 bvec_kunmap_irq(buf, &flags);
118 i++; 117 i++;
119 } 118 }
@@ -130,7 +129,7 @@ static int ps3disk_submit_request_sg(struct ps3_storage_device *dev,
130 129
131#ifdef DEBUG 130#ifdef DEBUG
132 unsigned int n = 0; 131 unsigned int n = 0;
133 struct bio_vec *bv; 132 struct bio_vec bv;
134 struct req_iterator iter; 133 struct req_iterator iter;
135 134
136 rq_for_each_segment(bv, req, iter) 135 rq_for_each_segment(bv, req, iter)
diff --git a/drivers/block/ps3vram.c b/drivers/block/ps3vram.c
index 06a2e53e5f37..ef45cfb98fd2 100644
--- a/drivers/block/ps3vram.c
+++ b/drivers/block/ps3vram.c
@@ -553,16 +553,16 @@ static struct bio *ps3vram_do_bio(struct ps3_system_bus_device *dev,
553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev); 553 struct ps3vram_priv *priv = ps3_system_bus_get_drvdata(dev);
554 int write = bio_data_dir(bio) == WRITE; 554 int write = bio_data_dir(bio) == WRITE;
555 const char *op = write ? "write" : "read"; 555 const char *op = write ? "write" : "read";
556 loff_t offset = bio->bi_sector << 9; 556 loff_t offset = bio->bi_iter.bi_sector << 9;
557 int error = 0; 557 int error = 0;
558 struct bio_vec *bvec; 558 struct bio_vec bvec;
559 unsigned int i; 559 struct bvec_iter iter;
560 struct bio *next; 560 struct bio *next;
561 561
562 bio_for_each_segment(bvec, bio, i) { 562 bio_for_each_segment(bvec, bio, iter) {
563 /* PS3 is ppc64, so we don't handle highmem */ 563 /* PS3 is ppc64, so we don't handle highmem */
564 char *ptr = page_address(bvec->bv_page) + bvec->bv_offset; 564 char *ptr = page_address(bvec.bv_page) + bvec.bv_offset;
565 size_t len = bvec->bv_len, retlen; 565 size_t len = bvec.bv_len, retlen;
566 566
567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op, 567 dev_dbg(&dev->core, " %s %zu bytes at offset %llu\n", op,
568 len, offset); 568 len, offset);
diff --git a/drivers/block/rbd.c b/drivers/block/rbd.c
index cb1db2979d3d..3624368b910d 100644
--- a/drivers/block/rbd.c
+++ b/drivers/block/rbd.c
@@ -1109,23 +1109,23 @@ static void bio_chain_put(struct bio *chain)
1109 */ 1109 */
1110static void zero_bio_chain(struct bio *chain, int start_ofs) 1110static void zero_bio_chain(struct bio *chain, int start_ofs)
1111{ 1111{
1112 struct bio_vec *bv; 1112 struct bio_vec bv;
1113 struct bvec_iter iter;
1113 unsigned long flags; 1114 unsigned long flags;
1114 void *buf; 1115 void *buf;
1115 int i;
1116 int pos = 0; 1116 int pos = 0;
1117 1117
1118 while (chain) { 1118 while (chain) {
1119 bio_for_each_segment(bv, chain, i) { 1119 bio_for_each_segment(bv, chain, iter) {
1120 if (pos + bv->bv_len > start_ofs) { 1120 if (pos + bv.bv_len > start_ofs) {
1121 int remainder = max(start_ofs - pos, 0); 1121 int remainder = max(start_ofs - pos, 0);
1122 buf = bvec_kmap_irq(bv, &flags); 1122 buf = bvec_kmap_irq(&bv, &flags);
1123 memset(buf + remainder, 0, 1123 memset(buf + remainder, 0,
1124 bv->bv_len - remainder); 1124 bv.bv_len - remainder);
1125 flush_dcache_page(bv->bv_page); 1125 flush_dcache_page(bv.bv_page);
1126 bvec_kunmap_irq(buf, &flags); 1126 bvec_kunmap_irq(buf, &flags);
1127 } 1127 }
1128 pos += bv->bv_len; 1128 pos += bv.bv_len;
1129 } 1129 }
1130 1130
1131 chain = chain->bi_next; 1131 chain = chain->bi_next;
@@ -1173,74 +1173,14 @@ static struct bio *bio_clone_range(struct bio *bio_src,
1173 unsigned int len, 1173 unsigned int len,
1174 gfp_t gfpmask) 1174 gfp_t gfpmask)
1175{ 1175{
1176 struct bio_vec *bv;
1177 unsigned int resid;
1178 unsigned short idx;
1179 unsigned int voff;
1180 unsigned short end_idx;
1181 unsigned short vcnt;
1182 struct bio *bio; 1176 struct bio *bio;
1183 1177
1184 /* Handle the easy case for the caller */ 1178 bio = bio_clone(bio_src, gfpmask);
1185
1186 if (!offset && len == bio_src->bi_size)
1187 return bio_clone(bio_src, gfpmask);
1188
1189 if (WARN_ON_ONCE(!len))
1190 return NULL;
1191 if (WARN_ON_ONCE(len > bio_src->bi_size))
1192 return NULL;
1193 if (WARN_ON_ONCE(offset > bio_src->bi_size - len))
1194 return NULL;
1195
1196 /* Find first affected segment... */
1197
1198 resid = offset;
1199 bio_for_each_segment(bv, bio_src, idx) {
1200 if (resid < bv->bv_len)
1201 break;
1202 resid -= bv->bv_len;
1203 }
1204 voff = resid;
1205
1206 /* ...and the last affected segment */
1207
1208 resid += len;
1209 __bio_for_each_segment(bv, bio_src, end_idx, idx) {
1210 if (resid <= bv->bv_len)
1211 break;
1212 resid -= bv->bv_len;
1213 }
1214 vcnt = end_idx - idx + 1;
1215
1216 /* Build the clone */
1217
1218 bio = bio_alloc(gfpmask, (unsigned int) vcnt);
1219 if (!bio) 1179 if (!bio)
1220 return NULL; /* ENOMEM */ 1180 return NULL; /* ENOMEM */
1221 1181
1222 bio->bi_bdev = bio_src->bi_bdev; 1182 bio_advance(bio, offset);
1223 bio->bi_sector = bio_src->bi_sector + (offset >> SECTOR_SHIFT); 1183 bio->bi_iter.bi_size = len;
1224 bio->bi_rw = bio_src->bi_rw;
1225 bio->bi_flags |= 1 << BIO_CLONED;
1226
1227 /*
1228 * Copy over our part of the bio_vec, then update the first
1229 * and last (or only) entries.
1230 */
1231 memcpy(&bio->bi_io_vec[0], &bio_src->bi_io_vec[idx],
1232 vcnt * sizeof (struct bio_vec));
1233 bio->bi_io_vec[0].bv_offset += voff;
1234 if (vcnt > 1) {
1235 bio->bi_io_vec[0].bv_len -= voff;
1236 bio->bi_io_vec[vcnt - 1].bv_len = resid;
1237 } else {
1238 bio->bi_io_vec[0].bv_len = len;
1239 }
1240
1241 bio->bi_vcnt = vcnt;
1242 bio->bi_size = len;
1243 bio->bi_idx = 0;
1244 1184
1245 return bio; 1185 return bio;
1246} 1186}
@@ -1271,7 +1211,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1271 1211
1272 /* Build up a chain of clone bios up to the limit */ 1212 /* Build up a chain of clone bios up to the limit */
1273 1213
1274 if (!bi || off >= bi->bi_size || !len) 1214 if (!bi || off >= bi->bi_iter.bi_size || !len)
1275 return NULL; /* Nothing to clone */ 1215 return NULL; /* Nothing to clone */
1276 1216
1277 end = &chain; 1217 end = &chain;
@@ -1283,7 +1223,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1283 rbd_warn(NULL, "bio_chain exhausted with %u left", len); 1223 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1284 goto out_err; /* EINVAL; ran out of bio's */ 1224 goto out_err; /* EINVAL; ran out of bio's */
1285 } 1225 }
1286 bi_size = min_t(unsigned int, bi->bi_size - off, len); 1226 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1287 bio = bio_clone_range(bi, off, bi_size, gfpmask); 1227 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1288 if (!bio) 1228 if (!bio)
1289 goto out_err; /* ENOMEM */ 1229 goto out_err; /* ENOMEM */
@@ -1292,7 +1232,7 @@ static struct bio *bio_chain_clone_range(struct bio **bio_src,
1292 end = &bio->bi_next; 1232 end = &bio->bi_next;
1293 1233
1294 off += bi_size; 1234 off += bi_size;
1295 if (off == bi->bi_size) { 1235 if (off == bi->bi_iter.bi_size) {
1296 bi = bi->bi_next; 1236 bi = bi->bi_next;
1297 off = 0; 1237 off = 0;
1298 } 1238 }
@@ -2186,7 +2126,8 @@ static int rbd_img_request_fill(struct rbd_img_request *img_request,
2186 2126
2187 if (type == OBJ_REQUEST_BIO) { 2127 if (type == OBJ_REQUEST_BIO) {
2188 bio_list = data_desc; 2128 bio_list = data_desc;
2189 rbd_assert(img_offset == bio_list->bi_sector << SECTOR_SHIFT); 2129 rbd_assert(img_offset ==
2130 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2190 } else { 2131 } else {
2191 rbd_assert(type == OBJ_REQUEST_PAGES); 2132 rbd_assert(type == OBJ_REQUEST_PAGES);
2192 pages = data_desc; 2133 pages = data_desc;
diff --git a/drivers/block/rsxx/dev.c b/drivers/block/rsxx/dev.c
index 2284f5d3a54a..2839d37e5af7 100644
--- a/drivers/block/rsxx/dev.c
+++ b/drivers/block/rsxx/dev.c
@@ -174,7 +174,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
174 if (!card) 174 if (!card)
175 goto req_err; 175 goto req_err;
176 176
177 if (bio->bi_sector + (bio->bi_size >> 9) > get_capacity(card->gendisk)) 177 if (bio_end_sector(bio) > get_capacity(card->gendisk))
178 goto req_err; 178 goto req_err;
179 179
180 if (unlikely(card->halt)) { 180 if (unlikely(card->halt)) {
@@ -187,7 +187,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
187 goto req_err; 187 goto req_err;
188 } 188 }
189 189
190 if (bio->bi_size == 0) { 190 if (bio->bi_iter.bi_size == 0) {
191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n"); 191 dev_err(CARD_TO_DEV(card), "size zero BIO!\n");
192 goto req_err; 192 goto req_err;
193 } 193 }
@@ -208,7 +208,7 @@ static void rsxx_make_request(struct request_queue *q, struct bio *bio)
208 208
209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n", 209 dev_dbg(CARD_TO_DEV(card), "BIO[%c]: meta: %p addr8: x%llx size: %d\n",
210 bio_data_dir(bio) ? 'W' : 'R', bio_meta, 210 bio_data_dir(bio) ? 'W' : 'R', bio_meta,
211 (u64)bio->bi_sector << 9, bio->bi_size); 211 (u64)bio->bi_iter.bi_sector << 9, bio->bi_iter.bi_size);
212 212
213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas, 213 st = rsxx_dma_queue_bio(card, bio, &bio_meta->pending_dmas,
214 bio_dma_done_cb, bio_meta); 214 bio_dma_done_cb, bio_meta);
diff --git a/drivers/block/rsxx/dma.c b/drivers/block/rsxx/dma.c
index fc88ba3e1bd2..cf8cd293abb5 100644
--- a/drivers/block/rsxx/dma.c
+++ b/drivers/block/rsxx/dma.c
@@ -684,7 +684,8 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
684 void *cb_data) 684 void *cb_data)
685{ 685{
686 struct list_head dma_list[RSXX_MAX_TARGETS]; 686 struct list_head dma_list[RSXX_MAX_TARGETS];
687 struct bio_vec *bvec; 687 struct bio_vec bvec;
688 struct bvec_iter iter;
688 unsigned long long addr8; 689 unsigned long long addr8;
689 unsigned int laddr; 690 unsigned int laddr;
690 unsigned int bv_len; 691 unsigned int bv_len;
@@ -696,7 +697,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
696 int st; 697 int st;
697 int i; 698 int i;
698 699
699 addr8 = bio->bi_sector << 9; /* sectors are 512 bytes */ 700 addr8 = bio->bi_iter.bi_sector << 9; /* sectors are 512 bytes */
700 atomic_set(n_dmas, 0); 701 atomic_set(n_dmas, 0);
701 702
702 for (i = 0; i < card->n_targets; i++) { 703 for (i = 0; i < card->n_targets; i++) {
@@ -705,7 +706,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
705 } 706 }
706 707
707 if (bio->bi_rw & REQ_DISCARD) { 708 if (bio->bi_rw & REQ_DISCARD) {
708 bv_len = bio->bi_size; 709 bv_len = bio->bi_iter.bi_size;
709 710
710 while (bv_len > 0) { 711 while (bv_len > 0) {
711 tgt = rsxx_get_dma_tgt(card, addr8); 712 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -722,9 +723,9 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
722 bv_len -= RSXX_HW_BLK_SIZE; 723 bv_len -= RSXX_HW_BLK_SIZE;
723 } 724 }
724 } else { 725 } else {
725 bio_for_each_segment(bvec, bio, i) { 726 bio_for_each_segment(bvec, bio, iter) {
726 bv_len = bvec->bv_len; 727 bv_len = bvec.bv_len;
727 bv_off = bvec->bv_offset; 728 bv_off = bvec.bv_offset;
728 729
729 while (bv_len > 0) { 730 while (bv_len > 0) {
730 tgt = rsxx_get_dma_tgt(card, addr8); 731 tgt = rsxx_get_dma_tgt(card, addr8);
@@ -736,7 +737,7 @@ int rsxx_dma_queue_bio(struct rsxx_cardinfo *card,
736 st = rsxx_queue_dma(card, &dma_list[tgt], 737 st = rsxx_queue_dma(card, &dma_list[tgt],
737 bio_data_dir(bio), 738 bio_data_dir(bio),
738 dma_off, dma_len, 739 dma_off, dma_len,
739 laddr, bvec->bv_page, 740 laddr, bvec.bv_page,
740 bv_off, cb, cb_data); 741 bv_off, cb, cb_data);
741 if (st) 742 if (st)
742 goto bvec_err; 743 goto bvec_err;
diff --git a/drivers/block/umem.c b/drivers/block/umem.c
index ad70868f8a96..4cf81b5bf0f7 100644
--- a/drivers/block/umem.c
+++ b/drivers/block/umem.c
@@ -108,8 +108,7 @@ struct cardinfo {
108 * have been written 108 * have been written
109 */ 109 */
110 struct bio *bio, *currentbio, **biotail; 110 struct bio *bio, *currentbio, **biotail;
111 int current_idx; 111 struct bvec_iter current_iter;
112 sector_t current_sector;
113 112
114 struct request_queue *queue; 113 struct request_queue *queue;
115 114
@@ -118,7 +117,7 @@ struct cardinfo {
118 struct mm_dma_desc *desc; 117 struct mm_dma_desc *desc;
119 int cnt, headcnt; 118 int cnt, headcnt;
120 struct bio *bio, **biotail; 119 struct bio *bio, **biotail;
121 int idx; 120 struct bvec_iter iter;
122 } mm_pages[2]; 121 } mm_pages[2];
123#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) 122#define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc))
124 123
@@ -344,16 +343,13 @@ static int add_bio(struct cardinfo *card)
344 dma_addr_t dma_handle; 343 dma_addr_t dma_handle;
345 int offset; 344 int offset;
346 struct bio *bio; 345 struct bio *bio;
347 struct bio_vec *vec; 346 struct bio_vec vec;
348 int idx;
349 int rw; 347 int rw;
350 int len;
351 348
352 bio = card->currentbio; 349 bio = card->currentbio;
353 if (!bio && card->bio) { 350 if (!bio && card->bio) {
354 card->currentbio = card->bio; 351 card->currentbio = card->bio;
355 card->current_idx = card->bio->bi_idx; 352 card->current_iter = card->bio->bi_iter;
356 card->current_sector = card->bio->bi_sector;
357 card->bio = card->bio->bi_next; 353 card->bio = card->bio->bi_next;
358 if (card->bio == NULL) 354 if (card->bio == NULL)
359 card->biotail = &card->bio; 355 card->biotail = &card->bio;
@@ -362,18 +358,17 @@ static int add_bio(struct cardinfo *card)
362 } 358 }
363 if (!bio) 359 if (!bio)
364 return 0; 360 return 0;
365 idx = card->current_idx;
366 361
367 rw = bio_rw(bio); 362 rw = bio_rw(bio);
368 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) 363 if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE)
369 return 0; 364 return 0;
370 365
371 vec = bio_iovec_idx(bio, idx); 366 vec = bio_iter_iovec(bio, card->current_iter);
372 len = vec->bv_len; 367
373 dma_handle = pci_map_page(card->dev, 368 dma_handle = pci_map_page(card->dev,
374 vec->bv_page, 369 vec.bv_page,
375 vec->bv_offset, 370 vec.bv_offset,
376 len, 371 vec.bv_len,
377 (rw == READ) ? 372 (rw == READ) ?
378 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); 373 PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE);
379 374
@@ -381,7 +376,7 @@ static int add_bio(struct cardinfo *card)
381 desc = &p->desc[p->cnt]; 376 desc = &p->desc[p->cnt];
382 p->cnt++; 377 p->cnt++;
383 if (p->bio == NULL) 378 if (p->bio == NULL)
384 p->idx = idx; 379 p->iter = card->current_iter;
385 if ((p->biotail) != &bio->bi_next) { 380 if ((p->biotail) != &bio->bi_next) {
386 *(p->biotail) = bio; 381 *(p->biotail) = bio;
387 p->biotail = &(bio->bi_next); 382 p->biotail = &(bio->bi_next);
@@ -391,8 +386,8 @@ static int add_bio(struct cardinfo *card)
391 desc->data_dma_handle = dma_handle; 386 desc->data_dma_handle = dma_handle;
392 387
393 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); 388 desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle);
394 desc->local_addr = cpu_to_le64(card->current_sector << 9); 389 desc->local_addr = cpu_to_le64(card->current_iter.bi_sector << 9);
395 desc->transfer_size = cpu_to_le32(len); 390 desc->transfer_size = cpu_to_le32(vec.bv_len);
396 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); 391 offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc));
397 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); 392 desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset));
398 desc->zero1 = desc->zero2 = 0; 393 desc->zero1 = desc->zero2 = 0;
@@ -407,10 +402,9 @@ static int add_bio(struct cardinfo *card)
407 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); 402 desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ);
408 desc->sem_control_bits = desc->control_bits; 403 desc->sem_control_bits = desc->control_bits;
409 404
410 card->current_sector += (len >> 9); 405
411 idx++; 406 bio_advance_iter(bio, &card->current_iter, vec.bv_len);
412 card->current_idx = idx; 407 if (!card->current_iter.bi_size)
413 if (idx >= bio->bi_vcnt)
414 card->currentbio = NULL; 408 card->currentbio = NULL;
415 409
416 return 1; 410 return 1;
@@ -439,23 +433,25 @@ static void process_page(unsigned long data)
439 struct mm_dma_desc *desc = &page->desc[page->headcnt]; 433 struct mm_dma_desc *desc = &page->desc[page->headcnt];
440 int control = le32_to_cpu(desc->sem_control_bits); 434 int control = le32_to_cpu(desc->sem_control_bits);
441 int last = 0; 435 int last = 0;
442 int idx; 436 struct bio_vec vec;
443 437
444 if (!(control & DMASCR_DMA_COMPLETE)) { 438 if (!(control & DMASCR_DMA_COMPLETE)) {
445 control = dma_status; 439 control = dma_status;
446 last = 1; 440 last = 1;
447 } 441 }
442
448 page->headcnt++; 443 page->headcnt++;
449 idx = page->idx; 444 vec = bio_iter_iovec(bio, page->iter);
450 page->idx++; 445 bio_advance_iter(bio, &page->iter, vec.bv_len);
451 if (page->idx >= bio->bi_vcnt) { 446
447 if (!page->iter.bi_size) {
452 page->bio = bio->bi_next; 448 page->bio = bio->bi_next;
453 if (page->bio) 449 if (page->bio)
454 page->idx = page->bio->bi_idx; 450 page->iter = page->bio->bi_iter;
455 } 451 }
456 452
457 pci_unmap_page(card->dev, desc->data_dma_handle, 453 pci_unmap_page(card->dev, desc->data_dma_handle,
458 bio_iovec_idx(bio, idx)->bv_len, 454 vec.bv_len,
459 (control & DMASCR_TRANSFER_READ) ? 455 (control & DMASCR_TRANSFER_READ) ?
460 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); 456 PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE);
461 if (control & DMASCR_HARD_ERROR) { 457 if (control & DMASCR_HARD_ERROR) {
@@ -532,7 +528,8 @@ static void mm_make_request(struct request_queue *q, struct bio *bio)
532{ 528{
533 struct cardinfo *card = q->queuedata; 529 struct cardinfo *card = q->queuedata;
534 pr_debug("mm_make_request %llu %u\n", 530 pr_debug("mm_make_request %llu %u\n",
535 (unsigned long long)bio->bi_sector, bio->bi_size); 531 (unsigned long long)bio->bi_iter.bi_sector,
532 bio->bi_iter.bi_size);
536 533
537 spin_lock_irq(&card->lock); 534 spin_lock_irq(&card->lock);
538 *card->biotail = bio; 535 *card->biotail = bio;
diff --git a/drivers/block/xen-blkback/blkback.c b/drivers/block/xen-blkback/blkback.c
index 6620b73d0490..4b97b86da926 100644
--- a/drivers/block/xen-blkback/blkback.c
+++ b/drivers/block/xen-blkback/blkback.c
@@ -1257,7 +1257,7 @@ static int dispatch_rw_block_io(struct xen_blkif *blkif,
1257 bio->bi_bdev = preq.bdev; 1257 bio->bi_bdev = preq.bdev;
1258 bio->bi_private = pending_req; 1258 bio->bi_private = pending_req;
1259 bio->bi_end_io = end_block_io_op; 1259 bio->bi_end_io = end_block_io_op;
1260 bio->bi_sector = preq.sector_number; 1260 bio->bi_iter.bi_sector = preq.sector_number;
1261 } 1261 }
1262 1262
1263 preq.sector_number += seg[i].nsec; 1263 preq.sector_number += seg[i].nsec;
diff --git a/drivers/block/xen-blkfront.c b/drivers/block/xen-blkfront.c
index c4a4c9006288..26ad7923e331 100644
--- a/drivers/block/xen-blkfront.c
+++ b/drivers/block/xen-blkfront.c
@@ -1547,7 +1547,7 @@ static int blkif_recover(struct blkfront_info *info)
1547 for (i = 0; i < pending; i++) { 1547 for (i = 0; i < pending; i++) {
1548 offset = (i * segs * PAGE_SIZE) >> 9; 1548 offset = (i * segs * PAGE_SIZE) >> 9;
1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9, 1549 size = min((unsigned int)(segs * PAGE_SIZE) >> 9,
1550 (unsigned int)(bio->bi_size >> 9) - offset); 1550 (unsigned int)bio_sectors(bio) - offset);
1551 cloned_bio = bio_clone(bio, GFP_NOIO); 1551 cloned_bio = bio_clone(bio, GFP_NOIO);
1552 BUG_ON(cloned_bio == NULL); 1552 BUG_ON(cloned_bio == NULL);
1553 bio_trim(cloned_bio, offset, size); 1553 bio_trim(cloned_bio, offset, size);
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f43177483..dbdbca5a9591 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -280,7 +280,6 @@ struct bcache_device {
280 unsigned long sectors_dirty_last; 280 unsigned long sectors_dirty_last;
281 long sectors_dirty_derivative; 281 long sectors_dirty_derivative;
282 282
283 mempool_t *unaligned_bvec;
284 struct bio_set *bio_split; 283 struct bio_set *bio_split;
285 284
286 unsigned data_csum:1; 285 unsigned data_csum:1;
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
902void bch_bbio_free(struct bio *, struct cache_set *); 901void bch_bbio_free(struct bio *, struct cache_set *);
903struct bio *bch_bbio_alloc(struct cache_set *); 902struct bio *bch_bbio_alloc(struct cache_set *);
904 903
905struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
906void bch_generic_make_request(struct bio *, struct bio_split_pool *); 904void bch_generic_make_request(struct bio *, struct bio_split_pool *);
907void __bch_submit_bbio(struct bio *, struct cache_set *); 905void __bch_submit_bbio(struct bio *, struct cache_set *);
908void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 906void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53fcc67a..946ecd3b048b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
362 struct bio_vec *bv; 362 struct bio_vec *bv;
363 int n; 363 int n;
364 364
365 __bio_for_each_segment(bv, b->bio, n, 0) 365 bio_for_each_segment_all(bv, b->bio, n)
366 __free_page(bv->bv_page); 366 __free_page(bv->bv_page);
367 367
368 __btree_node_write_done(cl); 368 __btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
421 struct bio_vec *bv; 421 struct bio_vec *bv;
422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423 423
424 bio_for_each_segment(bv, b->bio, j) 424 bio_for_each_segment_all(bv, b->bio, j)
425 memcpy(page_address(bv->bv_page), 425 memcpy(page_address(bv->bv_page),
426 base + j * PAGE_SIZE, PAGE_SIZE); 426 base + j * PAGE_SIZE, PAGE_SIZE);
427 427
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..03cb4d114e16 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
173{ 173{
174 char name[BDEVNAME_SIZE]; 174 char name[BDEVNAME_SIZE];
175 struct bio *check; 175 struct bio *check;
176 struct bio_vec *bv; 176 struct bio_vec bv, *bv2;
177 struct bvec_iter iter;
177 int i; 178 int i;
178 179
179 check = bio_clone(bio, GFP_NOIO); 180 check = bio_clone(bio, GFP_NOIO);
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
185 186
186 submit_bio_wait(READ_SYNC, check); 187 submit_bio_wait(READ_SYNC, check);
187 188
188 bio_for_each_segment(bv, bio, i) { 189 bio_for_each_segment(bv, bio, iter) {
189 void *p1 = kmap_atomic(bv->bv_page); 190 void *p1 = kmap_atomic(bv.bv_page);
190 void *p2 = page_address(check->bi_io_vec[i].bv_page); 191 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
191 192
192 cache_set_err_on(memcmp(p1 + bv->bv_offset, 193 cache_set_err_on(memcmp(p1 + bv.bv_offset,
193 p2 + bv->bv_offset, 194 p2 + bv.bv_offset,
194 bv->bv_len), 195 bv.bv_len),
195 dc->disk.c, 196 dc->disk.c,
196 "verify failed at dev %s sector %llu", 197 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 198 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 199 (uint64_t) bio->bi_iter.bi_sector);
199 200
200 kunmap_atomic(p1); 201 kunmap_atomic(p1);
201 } 202 }
202 203
203 bio_for_each_segment_all(bv, check, i) 204 bio_for_each_segment_all(bv2, check, i)
204 __free_page(bv->bv_page); 205 __free_page(bv2->bv_page);
205out_put: 206out_put:
206 bio_put(check); 207 bio_put(check);
207} 208}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
11 11
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13 13
14static void bch_bi_idx_hack_endio(struct bio *bio, int error)
15{
16 struct bio *p = bio->bi_private;
17
18 bio_endio(p, error);
19 bio_put(bio);
20}
21
22static void bch_generic_make_request_hack(struct bio *bio)
23{
24 if (bio->bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26
27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec));
30
31 clone->bi_sector = bio->bi_sector;
32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size;
36
37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio;
39
40 bio = clone;
41 }
42
43 /*
44 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
45 * bios might have had more than that (before we split them per device
46 * limitations).
47 *
48 * To be taken out once immutable bvec stuff is in.
49 */
50 bio->bi_max_vecs = bio->bi_vcnt;
51
52 generic_make_request(bio);
53}
54
55/**
56 * bch_bio_split - split a bio
57 * @bio: bio to split
58 * @sectors: number of sectors to split from the front of @bio
59 * @gfp: gfp mask
60 * @bs: bio set to allocate from
61 *
62 * Allocates and returns a new bio which represents @sectors from the start of
63 * @bio, and updates @bio to represent the remaining sectors.
64 *
65 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
66 * unchanged.
67 *
68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
70 * freed before the split.
71 */
72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs)
74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv;
77 struct bio *ret = NULL;
78
79 BUG_ON(sectors <= 0);
80
81 if (sectors >= bio_sectors(bio))
82 return bio;
83
84 if (bio->bi_rw & REQ_DISCARD) {
85 ret = bio_alloc_bioset(gfp, 1, bs);
86 if (!ret)
87 return NULL;
88 idx = 0;
89 goto out;
90 }
91
92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx;
94
95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs);
97 if (!ret)
98 return NULL;
99
100 memcpy(ret->bi_io_vec, bio_iovec(bio),
101 sizeof(struct bio_vec) * vcnt);
102
103 break;
104 } else if (nbytes < bv->bv_len) {
105 ret = bio_alloc_bioset(gfp, ++vcnt, bs);
106 if (!ret)
107 return NULL;
108
109 memcpy(ret->bi_io_vec, bio_iovec(bio),
110 sizeof(struct bio_vec) * vcnt);
111
112 ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
113 bv->bv_offset += nbytes;
114 bv->bv_len -= nbytes;
115 break;
116 }
117
118 nbytes -= bv->bv_len;
119 }
120out:
121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector;
123 ret->bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt;
127
128 bio->bi_sector += sectors;
129 bio->bi_size -= sectors << 9;
130 bio->bi_idx = idx;
131
132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) {
134 bio_put(ret);
135 return NULL;
136 }
137
138 bio_integrity_trim(ret, 0, bio_sectors(ret));
139 bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
140 }
141
142 return ret;
143}
144
145static unsigned bch_bio_max_sectors(struct bio *bio) 14static unsigned bch_bio_max_sectors(struct bio *bio)
146{ 15{
147 unsigned ret = bio_sectors(bio);
148 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 17 struct bio_vec bv;
150 queue_max_segments(q)); 18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
151 20
152 if (bio->bi_rw & REQ_DISCARD) 21 if (bio->bi_rw & REQ_DISCARD)
153 return min(ret, q->limits.max_discard_sectors); 22 return min(bio_sectors(bio), q->limits.max_discard_sectors);
154 23
155 if (bio_segments(bio) > max_segments || 24 bio_for_each_segment(bv, bio, iter) {
156 q->merge_bvec_fn) { 25 struct bvec_merge_data bvm = {
157 struct bio_vec *bv; 26 .bi_bdev = bio->bi_bdev,
158 int i, seg = 0; 27 .bi_sector = bio->bi_iter.bi_sector,
159 28 .bi_size = ret << 9,
160 ret = 0; 29 .bi_rw = bio->bi_rw,
161 30 };
162 bio_for_each_segment(bv, bio, i) { 31
163 struct bvec_merge_data bvm = { 32 if (seg == min_t(unsigned, BIO_MAX_PAGES,
164 .bi_bdev = bio->bi_bdev, 33 queue_max_segments(q)))
165 .bi_sector = bio->bi_sector, 34 break;
166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw,
168 };
169
170 if (seg == max_segments)
171 break;
172 35
173 if (q->merge_bvec_fn && 36 if (q->merge_bvec_fn &&
174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
175 break; 38 break;
176 39
177 seg++; 40 seg++;
178 ret += bv->bv_len >> 9; 41 ret += bv.bv_len >> 9;
179 }
180 } 42 }
181 43
182 ret = min(ret, queue_max_sectors(q)); 44 ret = min(ret, queue_max_sectors(q));
183 45
184 WARN_ON(!ret); 46 WARN_ON(!ret);
185 ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); 47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
186 48
187 return ret; 49 return ret;
188} 50}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
193 55
194 s->bio->bi_end_io = s->bi_end_io; 56 s->bio->bi_end_io = s->bi_end_io;
195 s->bio->bi_private = s->bi_private; 57 s->bio->bi_private = s->bi_private;
196 bio_endio(s->bio, 0); 58 bio_endio_nodec(s->bio, 0);
197 59
198 closure_debug_destroy(&s->cl); 60 closure_debug_destroy(&s->cl);
199 mempool_free(s, s->p->bio_split_hook); 61 mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
232 bio_get(bio); 94 bio_get(bio);
233 95
234 do { 96 do {
235 n = bch_bio_split(bio, bch_bio_max_sectors(bio), 97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split); 98 GFP_NOIO, s->p->bio_split);
237 99
238 n->bi_end_io = bch_bio_submit_split_endio; 100 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl; 101 n->bi_private = &s->cl;
240 102
241 closure_get(&s->cl); 103 closure_get(&s->cl);
242 bch_generic_make_request_hack(n); 104 generic_make_request(n);
243 } while (n != bio); 105 } while (n != bio);
244 106
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL); 107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
246submit: 108submit:
247 bch_generic_make_request_hack(bio); 109 generic_make_request(bio);
248} 110}
249 111
250/* Bios with headers */ 112/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 134{
273 struct bbio *b = container_of(bio, struct bbio, bio); 135 struct bbio *b = container_of(bio, struct bbio, bio);
274 136
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 139
278 b->submit_time_us = local_clock_us(); 140 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998c4a91..052bd24d24b4 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
86 bio_get(bio); 86 bio_get(bio);
87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
88 88
89 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 89 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
91 PAGE_SECTORS); 91 PAGE_SECTORS);
92 bio->bi_private = &io->cl; 92 bio->bi_private = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
102 if (!op->error) { 102 if (!op->error) {
103 moving_init(io); 103 moving_init(io);
104 104
105 io->bio.bio.bi_sector = KEY_START(&io->w->key); 105 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
106 op->write_prio = 1; 106 op->write_prio = 1;
107 op->bio = &io->bio.bio; 107 op->bio = &io->bio.bio;
108 108
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index fbcc851ed5a5..5878cdb39529 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -198,14 +198,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
198 198
199static void bio_csum(struct bio *bio, struct bkey *k) 199static void bio_csum(struct bio *bio, struct bkey *k)
200{ 200{
201 struct bio_vec *bv; 201 struct bio_vec bv;
202 struct bvec_iter iter;
202 uint64_t csum = 0; 203 uint64_t csum = 0;
203 int i;
204 204
205 bio_for_each_segment(bv, bio, i) { 205 bio_for_each_segment(bv, bio, iter) {
206 void *d = kmap(bv->bv_page) + bv->bv_offset; 206 void *d = kmap(bv.bv_page) + bv.bv_offset;
207 csum = bch_crc64_update(csum, d, bv->bv_len); 207 csum = bch_crc64_update(csum, d, bv.bv_len);
208 kunmap(bv->bv_page); 208 kunmap(bv.bv_page);
209 } 209 }
210 210
211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 211 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl)
261 struct bio *bio = op->bio; 261 struct bio *bio = op->bio;
262 262
263 pr_debug("invalidating %i sectors from %llu", 263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio), (uint64_t) bio->bi_sector); 264 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
265 265
266 while (bio_sectors(bio)) { 266 while (bio_sectors(bio)) {
267 unsigned sectors = min(bio_sectors(bio), 267 unsigned sectors = min(bio_sectors(bio),
@@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl)
270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
271 goto out; 271 goto out;
272 272
273 bio->bi_sector += sectors; 273 bio->bi_iter.bi_sector += sectors;
274 bio->bi_size -= sectors << 9; 274 bio->bi_iter.bi_size -= sectors << 9;
275 275
276 bch_keylist_add(&op->insert_keys, 276 bch_keylist_add(&op->insert_keys,
277 &KEY(op->inode, bio->bi_sector, sectors)); 277 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
278 } 278 }
279 279
280 op->insert_data_done = true; 280 op->insert_data_done = true;
@@ -364,14 +364,14 @@ static void bch_data_insert_start(struct closure *cl)
364 k = op->insert_keys.top; 364 k = op->insert_keys.top;
365 bkey_init(k); 365 bkey_init(k);
366 SET_KEY_INODE(k, op->inode); 366 SET_KEY_INODE(k, op->inode);
367 SET_KEY_OFFSET(k, bio->bi_sector); 367 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
368 368
369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
370 op->write_point, op->write_prio, 370 op->write_point, op->write_prio,
371 op->writeback)) 371 op->writeback))
372 goto err; 372 goto err;
373 373
374 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 374 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
375 375
376 n->bi_end_io = bch_data_insert_endio; 376 n->bi_end_io = bch_data_insert_endio;
377 n->bi_private = cl; 377 n->bi_private = cl;
@@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
522 (bio->bi_rw & REQ_WRITE))) 522 (bio->bi_rw & REQ_WRITE)))
523 goto skip; 523 goto skip;
524 524
525 if (bio->bi_sector & (c->sb.block_size - 1) || 525 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
526 bio_sectors(bio) & (c->sb.block_size - 1)) { 526 bio_sectors(bio) & (c->sb.block_size - 1)) {
527 pr_debug("skipping unaligned io"); 527 pr_debug("skipping unaligned io");
528 goto skip; 528 goto skip;
@@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
546 546
547 spin_lock(&dc->io_lock); 547 spin_lock(&dc->io_lock);
548 548
549 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 549 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
550 if (i->last == bio->bi_sector && 550 if (i->last == bio->bi_iter.bi_sector &&
551 time_before(jiffies, i->jiffies)) 551 time_before(jiffies, i->jiffies))
552 goto found; 552 goto found;
553 553
@@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
556 add_sequential(task); 556 add_sequential(task);
557 i->sequential = 0; 557 i->sequential = 0;
558found: 558found:
559 if (i->sequential + bio->bi_size > i->sequential) 559 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
560 i->sequential += bio->bi_size; 560 i->sequential += bio->bi_iter.bi_size;
561 561
562 i->last = bio_end_sector(bio); 562 i->last = bio_end_sector(bio);
563 i->jiffies = jiffies + msecs_to_jiffies(5000); 563 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -606,7 +606,6 @@ struct search {
606 unsigned insert_bio_sectors; 606 unsigned insert_bio_sectors;
607 607
608 unsigned recoverable:1; 608 unsigned recoverable:1;
609 unsigned unaligned_bvec:1;
610 unsigned write:1; 609 unsigned write:1;
611 unsigned read_dirty_data:1; 610 unsigned read_dirty_data:1;
612 611
@@ -650,15 +649,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
650 struct bkey *bio_key; 649 struct bkey *bio_key;
651 unsigned ptr; 650 unsigned ptr;
652 651
653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 652 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
654 return MAP_CONTINUE; 653 return MAP_CONTINUE;
655 654
656 if (KEY_INODE(k) != s->iop.inode || 655 if (KEY_INODE(k) != s->iop.inode ||
657 KEY_START(k) > bio->bi_sector) { 656 KEY_START(k) > bio->bi_iter.bi_sector) {
658 unsigned bio_sectors = bio_sectors(bio); 657 unsigned bio_sectors = bio_sectors(bio);
659 unsigned sectors = KEY_INODE(k) == s->iop.inode 658 unsigned sectors = KEY_INODE(k) == s->iop.inode
660 ? min_t(uint64_t, INT_MAX, 659 ? min_t(uint64_t, INT_MAX,
661 KEY_START(k) - bio->bi_sector) 660 KEY_START(k) - bio->bi_iter.bi_sector)
662 : INT_MAX; 661 : INT_MAX;
663 662
664 int ret = s->d->cache_miss(b, s, bio, sectors); 663 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -680,14 +679,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
680 if (KEY_DIRTY(k)) 679 if (KEY_DIRTY(k))
681 s->read_dirty_data = true; 680 s->read_dirty_data = true;
682 681
683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 682 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
684 KEY_OFFSET(k) - bio->bi_sector), 683 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
685 GFP_NOIO, s->d->bio_split); 684 GFP_NOIO, s->d->bio_split);
686 685
687 bio_key = &container_of(n, struct bbio, bio)->key; 686 bio_key = &container_of(n, struct bbio, bio)->key;
688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 687 bch_bkey_copy_single_ptr(bio_key, k, ptr);
689 688
690 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 689 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 690 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
692 691
693 n->bi_end_io = bch_cache_read_endio; 692 n->bi_end_io = bch_cache_read_endio;
@@ -714,7 +713,7 @@ static void cache_lookup(struct closure *cl)
714 struct bio *bio = &s->bio.bio; 713 struct bio *bio = &s->bio.bio;
715 714
716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 715 int ret = bch_btree_map_keys(&s->op, s->iop.c,
717 &KEY(s->iop.inode, bio->bi_sector, 0), 716 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
718 cache_lookup_fn, MAP_END_KEY); 717 cache_lookup_fn, MAP_END_KEY);
719 if (ret == -EAGAIN) 718 if (ret == -EAGAIN)
720 continue_at(cl, cache_lookup, bcache_wq); 719 continue_at(cl, cache_lookup, bcache_wq);
@@ -759,10 +758,12 @@ static void bio_complete(struct search *s)
759static void do_bio_hook(struct search *s) 758static void do_bio_hook(struct search *s)
760{ 759{
761 struct bio *bio = &s->bio.bio; 760 struct bio *bio = &s->bio.bio;
762 memcpy(bio, s->orig_bio, sizeof(struct bio));
763 761
762 bio_init(bio);
763 __bio_clone_fast(bio, s->orig_bio);
764 bio->bi_end_io = request_endio; 764 bio->bi_end_io = request_endio;
765 bio->bi_private = &s->cl; 765 bio->bi_private = &s->cl;
766
766 atomic_set(&bio->bi_cnt, 3); 767 atomic_set(&bio->bi_cnt, 3);
767} 768}
768 769
@@ -774,9 +775,6 @@ static void search_free(struct closure *cl)
774 if (s->iop.bio) 775 if (s->iop.bio)
775 bio_put(s->iop.bio); 776 bio_put(s->iop.bio);
776 777
777 if (s->unaligned_bvec)
778 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
779
780 closure_debug_destroy(cl); 778 closure_debug_destroy(cl);
781 mempool_free(s, s->d->c->search); 779 mempool_free(s, s->d->c->search);
782} 780}
@@ -784,7 +782,6 @@ static void search_free(struct closure *cl)
784static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 782static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
785{ 783{
786 struct search *s; 784 struct search *s;
787 struct bio_vec *bv;
788 785
789 s = mempool_alloc(d->c->search, GFP_NOIO); 786 s = mempool_alloc(d->c->search, GFP_NOIO);
790 memset(s, 0, offsetof(struct search, iop.insert_keys)); 787 memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -803,15 +800,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
803 s->start_time = jiffies; 800 s->start_time = jiffies;
804 do_bio_hook(s); 801 do_bio_hook(s);
805 802
806 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
807 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
808 memcpy(bv, bio_iovec(bio),
809 sizeof(struct bio_vec) * bio_segments(bio));
810
811 s->bio.bio.bi_io_vec = bv;
812 s->unaligned_bvec = 1;
813 }
814
815 return s; 803 return s;
816} 804}
817 805
@@ -850,26 +838,13 @@ static void cached_dev_read_error(struct closure *cl)
850{ 838{
851 struct search *s = container_of(cl, struct search, cl); 839 struct search *s = container_of(cl, struct search, cl);
852 struct bio *bio = &s->bio.bio; 840 struct bio *bio = &s->bio.bio;
853 struct bio_vec *bv;
854 int i;
855 841
856 if (s->recoverable) { 842 if (s->recoverable) {
857 /* Retry from the backing device: */ 843 /* Retry from the backing device: */
858 trace_bcache_read_retry(s->orig_bio); 844 trace_bcache_read_retry(s->orig_bio);
859 845
860 s->iop.error = 0; 846 s->iop.error = 0;
861 bv = s->bio.bio.bi_io_vec;
862 do_bio_hook(s); 847 do_bio_hook(s);
863 s->bio.bio.bi_io_vec = bv;
864
865 if (!s->unaligned_bvec)
866 bio_for_each_segment(bv, s->orig_bio, i)
867 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
868 else
869 memcpy(s->bio.bio.bi_io_vec,
870 bio_iovec(s->orig_bio),
871 sizeof(struct bio_vec) *
872 bio_segments(s->orig_bio));
873 848
874 /* XXX: invalidate cache */ 849 /* XXX: invalidate cache */
875 850
@@ -894,9 +869,9 @@ static void cached_dev_read_done(struct closure *cl)
894 869
895 if (s->iop.bio) { 870 if (s->iop.bio) {
896 bio_reset(s->iop.bio); 871 bio_reset(s->iop.bio);
897 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 872 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
898 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 873 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
899 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 874 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
900 bch_bio_map(s->iop.bio, NULL); 875 bch_bio_map(s->iop.bio, NULL);
901 876
902 bio_copy_data(s->cache_miss, s->iop.bio); 877 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -905,8 +880,7 @@ static void cached_dev_read_done(struct closure *cl)
905 s->cache_miss = NULL; 880 s->cache_miss = NULL;
906 } 881 }
907 882
908 if (verify(dc, &s->bio.bio) && s->recoverable && 883 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
909 !s->unaligned_bvec && !s->read_dirty_data)
910 bch_data_verify(dc, s->orig_bio); 884 bch_data_verify(dc, s->orig_bio);
911 885
912 bio_complete(s); 886 bio_complete(s);
@@ -946,7 +920,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
946 struct bio *miss, *cache_bio; 920 struct bio *miss, *cache_bio;
947 921
948 if (s->cache_miss || s->iop.bypass) { 922 if (s->cache_miss || s->iop.bypass) {
949 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 923 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
950 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 924 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
951 goto out_submit; 925 goto out_submit;
952 } 926 }
@@ -960,7 +934,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
960 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 934 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
961 935
962 s->iop.replace_key = KEY(s->iop.inode, 936 s->iop.replace_key = KEY(s->iop.inode,
963 bio->bi_sector + s->insert_bio_sectors, 937 bio->bi_iter.bi_sector + s->insert_bio_sectors,
964 s->insert_bio_sectors); 938 s->insert_bio_sectors);
965 939
966 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 940 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -969,7 +943,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
969 943
970 s->iop.replace = true; 944 s->iop.replace = true;
971 945
972 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 946 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
973 947
974 /* btree_search_recurse()'s btree iterator is no good anymore */ 948 /* btree_search_recurse()'s btree iterator is no good anymore */
975 ret = miss == bio ? MAP_DONE : -EINTR; 949 ret = miss == bio ? MAP_DONE : -EINTR;
@@ -980,9 +954,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
980 if (!cache_bio) 954 if (!cache_bio)
981 goto out_submit; 955 goto out_submit;
982 956
983 cache_bio->bi_sector = miss->bi_sector; 957 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
984 cache_bio->bi_bdev = miss->bi_bdev; 958 cache_bio->bi_bdev = miss->bi_bdev;
985 cache_bio->bi_size = s->insert_bio_sectors << 9; 959 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
986 960
987 cache_bio->bi_end_io = request_endio; 961 cache_bio->bi_end_io = request_endio;
988 cache_bio->bi_private = &s->cl; 962 cache_bio->bi_private = &s->cl;
@@ -1032,7 +1006,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1032{ 1006{
1033 struct closure *cl = &s->cl; 1007 struct closure *cl = &s->cl;
1034 struct bio *bio = &s->bio.bio; 1008 struct bio *bio = &s->bio.bio;
1035 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1009 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1036 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1010 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1037 1011
1038 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1012 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1088,8 +1062,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1088 closure_bio_submit(flush, cl, s->d); 1062 closure_bio_submit(flush, cl, s->d);
1089 } 1063 }
1090 } else { 1064 } else {
1091 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, 1065 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
1092 dc->disk.bio_split);
1093 1066
1094 closure_bio_submit(bio, cl, s->d); 1067 closure_bio_submit(bio, cl, s->d);
1095 } 1068 }
@@ -1127,13 +1100,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1127 part_stat_unlock(); 1100 part_stat_unlock();
1128 1101
1129 bio->bi_bdev = dc->bdev; 1102 bio->bi_bdev = dc->bdev;
1130 bio->bi_sector += dc->sb.data_offset; 1103 bio->bi_iter.bi_sector += dc->sb.data_offset;
1131 1104
1132 if (cached_dev_get(dc)) { 1105 if (cached_dev_get(dc)) {
1133 s = search_alloc(bio, d); 1106 s = search_alloc(bio, d);
1134 trace_bcache_request_start(s->d, bio); 1107 trace_bcache_request_start(s->d, bio);
1135 1108
1136 if (!bio->bi_size) { 1109 if (!bio->bi_iter.bi_size) {
1137 /* 1110 /*
1138 * can't call bch_journal_meta from under 1111 * can't call bch_journal_meta from under
1139 * generic_make_request 1112 * generic_make_request
@@ -1205,24 +1178,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1205static int flash_dev_cache_miss(struct btree *b, struct search *s, 1178static int flash_dev_cache_miss(struct btree *b, struct search *s,
1206 struct bio *bio, unsigned sectors) 1179 struct bio *bio, unsigned sectors)
1207{ 1180{
1208 struct bio_vec *bv; 1181 struct bio_vec bv;
1209 int i; 1182 struct bvec_iter iter;
1210 1183
1211 /* Zero fill bio */ 1184 /* Zero fill bio */
1212 1185
1213 bio_for_each_segment(bv, bio, i) { 1186 bio_for_each_segment(bv, bio, iter) {
1214 unsigned j = min(bv->bv_len >> 9, sectors); 1187 unsigned j = min(bv.bv_len >> 9, sectors);
1215 1188
1216 void *p = kmap(bv->bv_page); 1189 void *p = kmap(bv.bv_page);
1217 memset(p + bv->bv_offset, 0, j << 9); 1190 memset(p + bv.bv_offset, 0, j << 9);
1218 kunmap(bv->bv_page); 1191 kunmap(bv.bv_page);
1219 1192
1220 sectors -= j; 1193 sectors -= j;
1221 } 1194 }
1222 1195
1223 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1196 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1224 1197
1225 if (!bio->bi_size) 1198 if (!bio->bi_iter.bi_size)
1226 return MAP_DONE; 1199 return MAP_DONE;
1227 1200
1228 return MAP_CONTINUE; 1201 return MAP_CONTINUE;
@@ -1256,7 +1229,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1256 1229
1257 trace_bcache_request_start(s->d, bio); 1230 trace_bcache_request_start(s->d, bio);
1258 1231
1259 if (!bio->bi_size) { 1232 if (!bio->bi_iter.bi_size) {
1260 /* 1233 /*
1261 * can't call bch_journal_meta from under 1234 * can't call bch_journal_meta from under
1262 * generic_make_request 1235 * generic_make_request
@@ -1266,7 +1239,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1266 bcache_wq); 1239 bcache_wq);
1267 } else if (rw) { 1240 } else if (rw) {
1268 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1241 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1269 &KEY(d->id, bio->bi_sector, 0), 1242 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1270 &KEY(d->id, bio_end_sector(bio), 0)); 1243 &KEY(d->id, bio_end_sector(bio), 0));
1271 1244
1272 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1245 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa071a57..93d593f957f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
739 } 739 }
740 740
741 bio_split_pool_free(&d->bio_split_hook); 741 bio_split_pool_free(&d->bio_split_hook);
742 if (d->unaligned_bvec)
743 mempool_destroy(d->unaligned_bvec);
744 if (d->bio_split) 742 if (d->bio_split)
745 bioset_free(d->bio_split); 743 bioset_free(d->bio_split);
746 if (is_vmalloc_addr(d->full_dirty_stripes)) 744 if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
793 return minor; 791 return minor;
794 792
795 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 793 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
796 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
797 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
798 bio_split_pool_init(&d->bio_split_hook) || 794 bio_split_pool_init(&d->bio_split_hook) ||
799 !(d->disk = alloc_disk(1))) { 795 !(d->disk = alloc_disk(1))) {
800 ida_simple_remove(&bcache_minor, minor); 796 ida_simple_remove(&bcache_minor, minor);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618e7664..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
224 224
225void bch_bio_map(struct bio *bio, void *base) 225void bch_bio_map(struct bio *bio, void *base)
226{ 226{
227 size_t size = bio->bi_size; 227 size_t size = bio->bi_iter.bi_size;
228 struct bio_vec *bv = bio->bi_io_vec; 228 struct bio_vec *bv = bio->bi_io_vec;
229 229
230 BUG_ON(!bio->bi_size); 230 BUG_ON(!bio->bi_iter.bi_size);
231 BUG_ON(bio->bi_vcnt); 231 BUG_ON(bio->bi_vcnt);
232 232
233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe059c27..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
111 if (!io->dc->writeback_percent) 111 if (!io->dc->writeback_percent)
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
113 113
114 bio->bi_size = KEY_SIZE(&w->key) << 9; 114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
116 bio->bi_private = w; 116 bio->bi_private = w;
117 bio->bi_io_vec = bio->bi_inline_vecs; 117 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
184 184
185 dirty_init(w); 185 dirty_init(w);
186 io->bio.bi_rw = WRITE; 186 io->bio.bi_rw = WRITE;
187 io->bio.bi_sector = KEY_START(&w->key); 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
188 io->bio.bi_bdev = io->dc->bdev; 188 io->bio.bi_bdev = io->dc->bdev;
189 io->bio.bi_end_io = dirty_endio; 189 io->bio.bi_end_io = dirty_endio;
190 190
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
253 io->dc = dc; 253 io->dc = dc;
254 254
255 dirty_init(w); 255 dirty_init(w);
256 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
258 &w->key, 0)->bdev; 258 &w->key, 0)->bdev;
259 io->bio.bi_rw = READ; 259 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..dd3646111561 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -17,55 +17,24 @@
17 * original bio state. 17 * original bio state.
18 */ 18 */
19 19
20struct dm_bio_vec_details {
21#if PAGE_SIZE < 65536
22 __u16 bv_len;
23 __u16 bv_offset;
24#else
25 unsigned bv_len;
26 unsigned bv_offset;
27#endif
28};
29
30struct dm_bio_details { 20struct dm_bio_details {
31 sector_t bi_sector;
32 struct block_device *bi_bdev; 21 struct block_device *bi_bdev;
33 unsigned int bi_size;
34 unsigned short bi_idx;
35 unsigned long bi_flags; 22 unsigned long bi_flags;
36 struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; 23 struct bvec_iter bi_iter;
37}; 24};
38 25
39static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) 26static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
40{ 27{
41 unsigned i;
42
43 bd->bi_sector = bio->bi_sector;
44 bd->bi_bdev = bio->bi_bdev; 28 bd->bi_bdev = bio->bi_bdev;
45 bd->bi_size = bio->bi_size;
46 bd->bi_idx = bio->bi_idx;
47 bd->bi_flags = bio->bi_flags; 29 bd->bi_flags = bio->bi_flags;
48 30 bd->bi_iter = bio->bi_iter;
49 for (i = 0; i < bio->bi_vcnt; i++) {
50 bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len;
51 bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset;
52 }
53} 31}
54 32
55static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) 33static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
56{ 34{
57 unsigned i;
58
59 bio->bi_sector = bd->bi_sector;
60 bio->bi_bdev = bd->bi_bdev; 35 bio->bi_bdev = bd->bi_bdev;
61 bio->bi_size = bd->bi_size;
62 bio->bi_idx = bd->bi_idx;
63 bio->bi_flags = bd->bi_flags; 36 bio->bi_flags = bd->bi_flags;
64 37 bio->bi_iter = bd->bi_iter;
65 for (i = 0; i < bio->bi_vcnt; i++) {
66 bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len;
67 bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset;
68 }
69} 38}
70 39
71#endif 40#endif
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 54bdd923316f..a1b58a65d8ed 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
538 bio_init(&b->bio); 538 bio_init(&b->bio);
539 b->bio.bi_io_vec = b->bio_vec; 539 b->bio.bi_io_vec = b->bio_vec;
540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
541 b->bio.bi_sector = block << b->c->sectors_per_block_bits; 541 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
542 b->bio.bi_bdev = b->c->bdev; 542 b->bio.bi_bdev = b->c->bdev;
543 b->bio.bi_end_io = end_io; 543 b->bio.bi_end_io = end_io;
544 544
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 64780ad73bb0..d13a16865d03 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
72 72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio) 73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{ 74{
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) 75 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
76 t->nr_seq_samples++; 76 t->nr_seq_samples++;
77 else { 77 else {
78 /* 78 /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
87 t->nr_rand_samples++; 87 t->nr_rand_samples++;
88 } 88 }
89 89
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); 90 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
91} 91}
92 92
93static void iot_check_for_pattern_switch(struct io_tracker *t) 93static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 1b1469ebe5cb..99f91628a33a 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio)
85{ 85{
86 bio->bi_end_io = h->bi_end_io; 86 bio->bi_end_io = h->bi_end_io;
87 bio->bi_private = h->bi_private; 87 bio->bi_private = h->bi_private;
88
89 /*
90 * Must bump bi_remaining to allow bio to complete with
91 * restored bi_end_io.
92 */
93 atomic_inc(&bio->bi_remaining);
88} 94}
89 95
90/*----------------------------------------------------------------*/ 96/*----------------------------------------------------------------*/
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
664static void remap_to_cache(struct cache *cache, struct bio *bio, 670static void remap_to_cache(struct cache *cache, struct bio *bio,
665 dm_cblock_t cblock) 671 dm_cblock_t cblock)
666{ 672{
667 sector_t bi_sector = bio->bi_sector; 673 sector_t bi_sector = bio->bi_iter.bi_sector;
668 674
669 bio->bi_bdev = cache->cache_dev->bdev; 675 bio->bi_bdev = cache->cache_dev->bdev;
670 if (!block_size_is_power_of_two(cache)) 676 if (!block_size_is_power_of_two(cache))
671 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 677 bio->bi_iter.bi_sector =
672 sector_div(bi_sector, cache->sectors_per_block); 678 (from_cblock(cblock) * cache->sectors_per_block) +
679 sector_div(bi_sector, cache->sectors_per_block);
673 else 680 else
674 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 681 bio->bi_iter.bi_sector =
675 (bi_sector & (cache->sectors_per_block - 1)); 682 (from_cblock(cblock) << cache->sectors_per_block_shift) |
683 (bi_sector & (cache->sectors_per_block - 1));
676} 684}
677 685
678static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 686static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
712 720
713static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 721static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
714{ 722{
715 sector_t block_nr = bio->bi_sector; 723 sector_t block_nr = bio->bi_iter.bi_sector;
716 724
717 if (!block_size_is_power_of_two(cache)) 725 if (!block_size_is_power_of_two(cache))
718 (void) sector_div(block_nr, cache->sectors_per_block); 726 (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1027static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1035static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1028{ 1036{
1029 return (bio_data_dir(bio) == WRITE) && 1037 return (bio_data_dir(bio) == WRITE) &&
1030 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1038 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1031} 1039}
1032 1040
1033static void avoid_copy(struct dm_cache_migration *mg) 1041static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 size_t pb_data_size = get_per_bio_data_size(cache); 1260 size_t pb_data_size = get_per_bio_data_size(cache);
1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1261 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1254 1262
1255 BUG_ON(bio->bi_size); 1263 BUG_ON(bio->bi_iter.bi_size);
1256 if (!pb->req_nr) 1264 if (!pb->req_nr)
1257 remap_to_origin(cache, bio); 1265 remap_to_origin(cache, bio);
1258 else 1266 else
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1275 */ 1283 */
1276static void process_discard_bio(struct cache *cache, struct bio *bio) 1284static void process_discard_bio(struct cache *cache, struct bio *bio)
1277{ 1285{
1278 dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1286 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1279 cache->discard_block_size); 1287 cache->discard_block_size);
1280 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1288 dm_block_t end_block = bio_end_sector(bio);
1281 dm_block_t b; 1289 dm_block_t b;
1282 1290
1283 end_block = block_div(end_block, cache->discard_block_size); 1291 end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..784695d22fde 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -39,10 +39,8 @@ struct convert_context {
39 struct completion restart; 39 struct completion restart;
40 struct bio *bio_in; 40 struct bio *bio_in;
41 struct bio *bio_out; 41 struct bio *bio_out;
42 unsigned int offset_in; 42 struct bvec_iter iter_in;
43 unsigned int offset_out; 43 struct bvec_iter iter_out;
44 unsigned int idx_in;
45 unsigned int idx_out;
46 sector_t cc_sector; 44 sector_t cc_sector;
47 atomic_t cc_pending; 45 atomic_t cc_pending;
48}; 46};
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc,
826{ 824{
827 ctx->bio_in = bio_in; 825 ctx->bio_in = bio_in;
828 ctx->bio_out = bio_out; 826 ctx->bio_out = bio_out;
829 ctx->offset_in = 0; 827 if (bio_in)
830 ctx->offset_out = 0; 828 ctx->iter_in = bio_in->bi_iter;
831 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 829 if (bio_out)
832 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 830 ctx->iter_out = bio_out->bi_iter;
833 ctx->cc_sector = sector + cc->iv_offset; 831 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart); 832 init_completion(&ctx->restart);
835} 833}
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc,
857 struct convert_context *ctx, 855 struct convert_context *ctx,
858 struct ablkcipher_request *req) 856 struct ablkcipher_request *req)
859{ 857{
860 struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); 858 struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in);
861 struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); 859 struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out);
862 struct dm_crypt_request *dmreq; 860 struct dm_crypt_request *dmreq;
863 u8 *iv; 861 u8 *iv;
864 int r; 862 int r;
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc,
869 dmreq->iv_sector = ctx->cc_sector; 867 dmreq->iv_sector = ctx->cc_sector;
870 dmreq->ctx = ctx; 868 dmreq->ctx = ctx;
871 sg_init_table(&dmreq->sg_in, 1); 869 sg_init_table(&dmreq->sg_in, 1);
872 sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, 870 sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT,
873 bv_in->bv_offset + ctx->offset_in); 871 bv_in.bv_offset);
874 872
875 sg_init_table(&dmreq->sg_out, 1); 873 sg_init_table(&dmreq->sg_out, 1);
876 sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, 874 sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT,
877 bv_out->bv_offset + ctx->offset_out); 875 bv_out.bv_offset);
878 876
879 ctx->offset_in += 1 << SECTOR_SHIFT; 877 bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT);
880 if (ctx->offset_in >= bv_in->bv_len) { 878 bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT);
881 ctx->offset_in = 0;
882 ctx->idx_in++;
883 }
884
885 ctx->offset_out += 1 << SECTOR_SHIFT;
886 if (ctx->offset_out >= bv_out->bv_len) {
887 ctx->offset_out = 0;
888 ctx->idx_out++;
889 }
890 879
891 if (cc->iv_gen_ops) { 880 if (cc->iv_gen_ops) {
892 r = cc->iv_gen_ops->generator(cc, iv, dmreq); 881 r = cc->iv_gen_ops->generator(cc, iv, dmreq);
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc,
937 926
938 atomic_set(&ctx->cc_pending, 1); 927 atomic_set(&ctx->cc_pending, 1);
939 928
940 while(ctx->idx_in < ctx->bio_in->bi_vcnt && 929 while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) {
941 ctx->idx_out < ctx->bio_out->bi_vcnt) {
942 930
943 crypt_alloc_req(cc, ctx); 931 crypt_alloc_req(cc, ctx);
944 932
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
1021 size -= len; 1009 size -= len;
1022 } 1010 }
1023 1011
1024 if (!clone->bi_size) { 1012 if (!clone->bi_iter.bi_size) {
1025 bio_put(clone); 1013 bio_put(clone);
1026 return NULL; 1014 return NULL;
1027 } 1015 }
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1161 crypt_inc_pending(io); 1149 crypt_inc_pending(io);
1162 1150
1163 clone_init(io, clone); 1151 clone_init(io, clone);
1164 clone->bi_sector = cc->start + io->sector; 1152 clone->bi_iter.bi_sector = cc->start + io->sector;
1165 1153
1166 generic_make_request(clone); 1154 generic_make_request(clone);
1167 return 0; 1155 return 0;
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1207 } 1195 }
1208 1196
1209 /* crypt_convert should have filled the clone bio */ 1197 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1198 BUG_ON(io->ctx.iter_out.bi_size);
1211 1199
1212 clone->bi_sector = cc->start + io->sector; 1200 clone->bi_iter.bi_sector = cc->start + io->sector;
1213 1201
1214 if (async) 1202 if (async)
1215 kcryptd_queue_io(io); 1203 kcryptd_queue_io(io);
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1224 struct dm_crypt_io *new_io; 1212 struct dm_crypt_io *new_io;
1225 int crypt_finished; 1213 int crypt_finished;
1226 unsigned out_of_pages = 0; 1214 unsigned out_of_pages = 0;
1227 unsigned remaining = io->base_bio->bi_size; 1215 unsigned remaining = io->base_bio->bi_iter.bi_size;
1228 sector_t sector = io->sector; 1216 sector_t sector = io->sector;
1229 int r; 1217 int r;
1230 1218
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1246 } 1234 }
1247 1235
1248 io->ctx.bio_out = clone; 1236 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0; 1237 io->ctx.iter_out = clone->bi_iter;
1250 1238
1251 remaining -= clone->bi_size; 1239 remaining -= clone->bi_iter.bi_size;
1252 sector += bio_sectors(clone); 1240 sector += bio_sectors(clone);
1253 1241
1254 crypt_inc_pending(io); 1242 crypt_inc_pending(io);
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1290 crypt_inc_pending(new_io); 1278 crypt_inc_pending(new_io);
1291 crypt_convert_init(cc, &new_io->ctx, NULL, 1279 crypt_convert_init(cc, &new_io->ctx, NULL,
1292 io->base_bio, sector); 1280 io->base_bio, sector);
1293 new_io->ctx.idx_in = io->ctx.idx_in; 1281 new_io->ctx.iter_in = io->ctx.iter_in;
1294 new_io->ctx.offset_in = io->ctx.offset_in;
1295 1282
1296 /* 1283 /*
1297 * Fragments after the first use the base_io 1284 * Fragments after the first use the base_io
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1856 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1870 bio->bi_bdev = cc->dev->bdev; 1857 bio->bi_bdev = cc->dev->bdev;
1871 if (bio_sectors(bio)) 1858 if (bio_sectors(bio))
1872 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1859 bio->bi_iter.bi_sector = cc->start +
1860 dm_target_offset(ti, bio->bi_iter.bi_sector);
1873 return DM_MAPIO_REMAPPED; 1861 return DM_MAPIO_REMAPPED;
1874 } 1862 }
1875 1863
1876 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); 1864 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1877 1865
1878 if (bio_data_dir(io->base_bio) == READ) { 1866 if (bio_data_dir(io->base_bio) == READ) {
1879 if (kcryptd_io_read(io, GFP_NOWAIT)) 1867 if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 2f91d6d4a2cc..fc8482a65dd2 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -289,14 +289,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
289 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 289 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
290 bio->bi_bdev = dc->dev_write->bdev; 290 bio->bi_bdev = dc->dev_write->bdev;
291 if (bio_sectors(bio)) 291 if (bio_sectors(bio))
292 bio->bi_sector = dc->start_write + 292 bio->bi_iter.bi_sector = dc->start_write +
293 dm_target_offset(ti, bio->bi_sector); 293 dm_target_offset(ti, bio->bi_iter.bi_sector);
294 294
295 return delay_bio(dc, dc->write_delay, bio); 295 return delay_bio(dc, dc->write_delay, bio);
296 } 296 }
297 297
298 bio->bi_bdev = dc->dev_read->bdev; 298 bio->bi_bdev = dc->dev_read->bdev;
299 bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 299 bio->bi_iter.bi_sector = dc->start_read +
300 dm_target_offset(ti, bio->bi_iter.bi_sector);
300 301
301 return delay_bio(dc, dc->read_delay, bio); 302 return delay_bio(dc, dc->read_delay, bio);
302} 303}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
248 248
249 bio->bi_bdev = fc->dev->bdev; 249 bio->bi_bdev = fc->dev->bdev;
250 if (bio_sectors(bio)) 250 if (bio_sectors(bio))
251 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 251 bio->bi_iter.bi_sector =
252 flakey_map_sector(ti, bio->bi_iter.bi_sector);
252} 253}
253 254
254static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 255static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
265 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 266 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
266 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 267 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
267 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 268 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
268 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 269 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
269 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
270 } 271 }
271} 272}
272 273
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..b2b8a10e8427 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse
201/* 201/*
202 * Functions for getting the pages from a bvec. 202 * Functions for getting the pages from a bvec.
203 */ 203 */
204static void bvec_get_page(struct dpages *dp, 204static void bio_get_page(struct dpages *dp,
205 struct page **p, unsigned long *len, unsigned *offset) 205 struct page **p, unsigned long *len, unsigned *offset)
206{ 206{
207 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 207 struct bio *bio = dp->context_ptr;
208 *p = bvec->bv_page; 208 struct bio_vec bvec = bio_iovec(bio);
209 *len = bvec->bv_len; 209 *p = bvec.bv_page;
210 *offset = bvec->bv_offset; 210 *len = bvec.bv_len;
211 *offset = bvec.bv_offset;
211} 212}
212 213
213static void bvec_next_page(struct dpages *dp) 214static void bio_next_page(struct dpages *dp)
214{ 215{
215 struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; 216 struct bio *bio = dp->context_ptr;
216 dp->context_ptr = bvec + 1; 217 struct bio_vec bvec = bio_iovec(bio);
218
219 bio_advance(bio, bvec.bv_len);
217} 220}
218 221
219static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) 222static void bio_dp_init(struct dpages *dp, struct bio *bio)
220{ 223{
221 dp->get_page = bvec_get_page; 224 dp->get_page = bio_get_page;
222 dp->next_page = bvec_next_page; 225 dp->next_page = bio_next_page;
223 dp->context_ptr = bvec; 226 dp->context_ptr = bio;
224} 227}
225 228
226/* 229/*
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 307 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
305 308
306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 309 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
307 bio->bi_sector = where->sector + (where->count - remaining); 310 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
308 bio->bi_bdev = where->bdev; 311 bio->bi_bdev = where->bdev;
309 bio->bi_end_io = endio; 312 bio->bi_end_io = endio;
310 store_io_and_region_in_bio(bio, io, region); 313 store_io_and_region_in_bio(bio, io, region);
311 314
312 if (rw & REQ_DISCARD) { 315 if (rw & REQ_DISCARD) {
313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 316 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
314 bio->bi_size = num_sectors << SECTOR_SHIFT; 317 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
315 remaining -= num_sectors; 318 remaining -= num_sectors;
316 } else if (rw & REQ_WRITE_SAME) { 319 } else if (rw & REQ_WRITE_SAME) {
317 /* 320 /*
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
320 dp->get_page(dp, &page, &len, &offset); 323 dp->get_page(dp, &page, &len, &offset);
321 bio_add_page(bio, page, logical_block_size, offset); 324 bio_add_page(bio, page, logical_block_size, offset);
322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 325 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
323 bio->bi_size = num_sectors << SECTOR_SHIFT; 326 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
324 327
325 offset = 0; 328 offset = 0;
326 remaining -= num_sectors; 329 remaining -= num_sectors;
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp,
457 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); 460 list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset);
458 break; 461 break;
459 462
460 case DM_IO_BVEC: 463 case DM_IO_BIO:
461 bvec_dp_init(dp, io_req->mem.ptr.bvec); 464 bio_dp_init(dp, io_req->mem.ptr.bio);
462 break; 465 break;
463 466
464 case DM_IO_VMA: 467 case DM_IO_VMA:
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
85 85
86 bio->bi_bdev = lc->dev->bdev; 86 bio->bi_bdev = lc->dev->bdev;
87 if (bio_sectors(bio)) 87 if (bio_sectors(bio))
88 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 bio->bi_iter.bi_sector =
89 linear_map_sector(ti, bio->bi_iter.bi_sector);
89} 90}
90 91
91static int linear_map(struct dm_target *ti, struct bio *bio) 92static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..f284e0bfb25f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
432 region_t region = dm_rh_bio_to_region(ms->rh, bio); 432 region_t region = dm_rh_bio_to_region(ms->rh, bio);
433 433
434 if (log->type->in_sync(log, region, 0)) 434 if (log->type->in_sync(log, region, 0))
435 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
436 436
437 return 0; 437 return 0;
438} 438}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
442 */ 442 */
443static sector_t map_sector(struct mirror *m, struct bio *bio) 443static sector_t map_sector(struct mirror *m, struct bio *bio)
444{ 444{
445 if (unlikely(!bio->bi_size)) 445 if (unlikely(!bio->bi_iter.bi_size))
446 return 0; 446 return 0;
447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
448} 448}
449 449
450static void map_bio(struct mirror *m, struct bio *bio) 450static void map_bio(struct mirror *m, struct bio *bio)
451{ 451{
452 bio->bi_bdev = m->dev->bdev; 452 bio->bi_bdev = m->dev->bdev;
453 bio->bi_sector = map_sector(m, bio); 453 bio->bi_iter.bi_sector = map_sector(m, bio);
454} 454}
455 455
456static void map_region(struct dm_io_region *io, struct mirror *m, 456static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
526 struct dm_io_region io; 526 struct dm_io_region io;
527 struct dm_io_request io_req = { 527 struct dm_io_request io_req = {
528 .bi_rw = READ, 528 .bi_rw = READ,
529 .mem.type = DM_IO_BVEC, 529 .mem.type = DM_IO_BIO,
530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 530 .mem.ptr.bio = bio,
531 .notify.fn = read_callback, 531 .notify.fn = read_callback,
532 .notify.context = bio, 532 .notify.context = bio,
533 .client = m->ms->io_client, 533 .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
559 * We can only read balance if the region is in sync. 559 * We can only read balance if the region is in sync.
560 */ 560 */
561 if (likely(region_in_sync(ms, region, 1))) 561 if (likely(region_in_sync(ms, region, 1)))
562 m = choose_mirror(ms, bio->bi_sector); 562 m = choose_mirror(ms, bio->bi_iter.bi_sector);
563 else if (m && atomic_read(&m->error_count)) 563 else if (m && atomic_read(&m->error_count))
564 m = NULL; 564 m = NULL;
565 565
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
629 struct mirror *m; 629 struct mirror *m;
630 struct dm_io_request io_req = { 630 struct dm_io_request io_req = {
631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
632 .mem.type = DM_IO_BVEC, 632 .mem.type = DM_IO_BIO,
633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 633 .mem.ptr.bio = bio,
634 .notify.fn = write_callback, 634 .notify.fn = write_callback,
635 .notify.context = bio, 635 .notify.context = bio,
636 .client = ms->io_client, 636 .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1181 * The region is in-sync and we can perform reads directly. 1181 * The region is in-sync and we can perform reads directly.
1182 * Store enough information so we can retry if it fails. 1182 * Store enough information so we can retry if it fails.
1183 */ 1183 */
1184 m = choose_mirror(ms, bio->bi_sector); 1184 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1185 if (unlikely(!m)) 1185 if (unlikely(!m))
1186 return -EIO; 1186 return -EIO;
1187 1187
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
126 126
127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) 127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
128{ 128{
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
130 rh->target_begin);
130} 131}
131EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); 132EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
132 133
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index 944690bafd93..01b6a11813f2 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1438,6 +1438,7 @@ out:
1438 if (full_bio) { 1438 if (full_bio) {
1439 full_bio->bi_end_io = pe->full_bio_end_io; 1439 full_bio->bi_end_io = pe->full_bio_end_io;
1440 full_bio->bi_private = pe->full_bio_private; 1440 full_bio->bi_private = pe->full_bio_private;
1441 atomic_inc(&full_bio->bi_remaining);
1441 } 1442 }
1442 free_pending_exception(pe); 1443 free_pending_exception(pe);
1443 1444
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1619 struct bio *bio, chunk_t chunk) 1620 struct bio *bio, chunk_t chunk)
1620{ 1621{
1621 bio->bi_bdev = s->cow->bdev; 1622 bio->bi_bdev = s->cow->bdev;
1622 bio->bi_sector = chunk_to_sector(s->store, 1623 bio->bi_iter.bi_sector =
1623 dm_chunk_number(e->new_chunk) + 1624 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1624 (chunk - e->old_chunk)) + 1625 (chunk - e->old_chunk)) +
1625 (bio->bi_sector & 1626 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1626 s->store->chunk_mask);
1627} 1627}
1628 1628
1629static int snapshot_map(struct dm_target *ti, struct bio *bio) 1629static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1641 return DM_MAPIO_REMAPPED; 1641 return DM_MAPIO_REMAPPED;
1642 } 1642 }
1643 1643
1644 chunk = sector_to_chunk(s->store, bio->bi_sector); 1644 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1645 1645
1646 /* Full snapshots are not usable */ 1646 /* Full snapshots are not usable */
1647 /* To get here the table must be live so s->active is always set. */ 1647 /* To get here the table must be live so s->active is always set. */
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1702 r = DM_MAPIO_SUBMITTED; 1702 r = DM_MAPIO_SUBMITTED;
1703 1703
1704 if (!pe->started && 1704 if (!pe->started &&
1705 bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { 1705 bio->bi_iter.bi_size ==
1706 (s->store->chunk_size << SECTOR_SHIFT)) {
1706 pe->started = 1; 1707 pe->started = 1;
1707 up_write(&s->lock); 1708 up_write(&s->lock);
1708 start_full_bio(pe, bio); 1709 start_full_bio(pe, bio);
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1758 return DM_MAPIO_REMAPPED; 1759 return DM_MAPIO_REMAPPED;
1759 } 1760 }
1760 1761
1761 chunk = sector_to_chunk(s->store, bio->bi_sector); 1762 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1762 1763
1763 down_write(&s->lock); 1764 down_write(&s->lock);
1764 1765
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
2095 down_read(&_origins_lock); 2096 down_read(&_origins_lock);
2096 o = __lookup_origin(origin->bdev); 2097 o = __lookup_origin(origin->bdev);
2097 if (o) 2098 if (o)
2098 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2099 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2099 up_read(&_origins_lock); 2100 up_read(&_origins_lock);
2100 2101
2101 return r; 2102 return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
259{ 259{
260 sector_t begin, end; 260 sector_t begin, end;
261 261
262 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 262 stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
263 target_stripe, &begin);
263 stripe_map_range_sector(sc, bio_end_sector(bio), 264 stripe_map_range_sector(sc, bio_end_sector(bio),
264 target_stripe, &end); 265 target_stripe, &end);
265 if (begin < end) { 266 if (begin < end) {
266 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 267 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
267 bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 268 bio->bi_iter.bi_sector = begin +
268 bio->bi_size = to_bytes(end - begin); 269 sc->stripe[target_stripe].physical_start;
270 bio->bi_iter.bi_size = to_bytes(end - begin);
269 return DM_MAPIO_REMAPPED; 271 return DM_MAPIO_REMAPPED;
270 } else { 272 } else {
271 /* The range doesn't map to the target stripe */ 273 /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
293 return stripe_map_range(sc, bio, target_bio_nr); 295 return stripe_map_range(sc, bio, target_bio_nr);
294 } 296 }
295 297
296 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 298 stripe_map_sector(sc, bio->bi_iter.bi_sector,
299 &stripe, &bio->bi_iter.bi_sector);
297 300
298 bio->bi_sector += sc->stripe[stripe].physical_start; 301 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
299 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 302 bio->bi_bdev = sc->stripe[stripe].dev->bdev;
300 303
301 return DM_MAPIO_REMAPPED; 304 return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
311static int switch_map(struct dm_target *ti, struct bio *bio) 311static int switch_map(struct dm_target *ti, struct bio *bio)
312{ 312{
313 struct switch_ctx *sctx = ti->private; 313 struct switch_ctx *sctx = ti->private;
314 sector_t offset = dm_target_offset(ti, bio->bi_sector); 314 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
315 unsigned path_nr = switch_get_path_nr(sctx, offset); 315 unsigned path_nr = switch_get_path_nr(sctx, offset);
316 316
317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
318 bio->bi_sector = sctx->path_list[path_nr].start + offset; 318 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
319 319
320 return DM_MAPIO_REMAPPED; 320 return DM_MAPIO_REMAPPED;
321} 321}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index ee29037ffc2e..357eb272dbd9 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
414{ 414{
415 struct pool *pool = tc->pool; 415 struct pool *pool = tc->pool;
416 sector_t block_nr = bio->bi_sector; 416 sector_t block_nr = bio->bi_iter.bi_sector;
417 417
418 if (block_size_is_power_of_two(pool)) 418 if (block_size_is_power_of_two(pool))
419 block_nr >>= pool->sectors_per_block_shift; 419 block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
427{ 427{
428 struct pool *pool = tc->pool; 428 struct pool *pool = tc->pool;
429 sector_t bi_sector = bio->bi_sector; 429 sector_t bi_sector = bio->bi_iter.bi_sector;
430 430
431 bio->bi_bdev = tc->pool_dev->bdev; 431 bio->bi_bdev = tc->pool_dev->bdev;
432 if (block_size_is_power_of_two(pool)) 432 if (block_size_is_power_of_two(pool))
433 bio->bi_sector = (block << pool->sectors_per_block_shift) | 433 bio->bi_iter.bi_sector =
434 (bi_sector & (pool->sectors_per_block - 1)); 434 (block << pool->sectors_per_block_shift) |
435 (bi_sector & (pool->sectors_per_block - 1));
435 else 436 else
436 bio->bi_sector = (block * pool->sectors_per_block) + 437 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
437 sector_div(bi_sector, pool->sectors_per_block); 438 sector_div(bi_sector, pool->sectors_per_block);
438} 439}
439 440
@@ -610,8 +611,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c
610 611
611static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) 612static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m)
612{ 613{
613 if (m->bio) 614 if (m->bio) {
614 m->bio->bi_end_io = m->saved_bi_end_io; 615 m->bio->bi_end_io = m->saved_bi_end_io;
616 atomic_inc(&m->bio->bi_remaining);
617 }
615 cell_error(m->tc->pool, m->cell); 618 cell_error(m->tc->pool, m->cell);
616 list_del(&m->list); 619 list_del(&m->list);
617 mempool_free(m, m->tc->pool->mapping_pool); 620 mempool_free(m, m->tc->pool->mapping_pool);
@@ -625,8 +628,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
625 int r; 628 int r;
626 629
627 bio = m->bio; 630 bio = m->bio;
628 if (bio) 631 if (bio) {
629 bio->bi_end_io = m->saved_bi_end_io; 632 bio->bi_end_io = m->saved_bi_end_io;
633 atomic_inc(&bio->bi_remaining);
634 }
630 635
631 if (m->err) { 636 if (m->err) {
632 cell_error(pool, m->cell); 637 cell_error(pool, m->cell);
@@ -723,7 +728,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
723 */ 728 */
724static int io_overlaps_block(struct pool *pool, struct bio *bio) 729static int io_overlaps_block(struct pool *pool, struct bio *bio)
725{ 730{
726 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 731 return bio->bi_iter.bi_size ==
732 (pool->sectors_per_block << SECTOR_SHIFT);
727} 733}
728 734
729static int io_overwrites_block(struct pool *pool, struct bio *bio) 735static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1133,7 +1139,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1133 if (bio_detain(pool, &key, bio, &cell)) 1139 if (bio_detain(pool, &key, bio, &cell))
1134 return; 1140 return;
1135 1141
1136 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1142 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1137 break_sharing(tc, bio, block, &key, lookup_result, cell); 1143 break_sharing(tc, bio, block, &key, lookup_result, cell);
1138 else { 1144 else {
1139 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1145 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1156,7 +1162,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1156 /* 1162 /*
1157 * Remap empty bios (flushes) immediately, without provisioning. 1163 * Remap empty bios (flushes) immediately, without provisioning.
1158 */ 1164 */
1159 if (!bio->bi_size) { 1165 if (!bio->bi_iter.bi_size) {
1160 inc_all_io_entry(pool, bio); 1166 inc_all_io_entry(pool, bio);
1161 cell_defer_no_holder(tc, cell); 1167 cell_defer_no_holder(tc, cell);
1162 1168
@@ -1256,7 +1262,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1256 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1262 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1257 switch (r) { 1263 switch (r) {
1258 case 0: 1264 case 0:
1259 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1265 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1260 bio_io_error(bio); 1266 bio_io_error(bio);
1261 else { 1267 else {
1262 inc_all_io_entry(tc->pool, bio); 1268 inc_all_io_entry(tc->pool, bio);
@@ -2879,7 +2885,7 @@ out_unlock:
2879 2885
2880static int thin_map(struct dm_target *ti, struct bio *bio) 2886static int thin_map(struct dm_target *ti, struct bio *bio)
2881{ 2887{
2882 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2888 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2883 2889
2884 return thin_bio_map(ti, bio); 2890 return thin_bio_map(ti, bio);
2885} 2891}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..796007a5e0e1 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -73,15 +73,10 @@ struct dm_verity_io {
73 sector_t block; 73 sector_t block;
74 unsigned n_blocks; 74 unsigned n_blocks;
75 75
76 /* saved bio vector */ 76 struct bvec_iter iter;
77 struct bio_vec *io_vec;
78 unsigned io_vec_size;
79 77
80 struct work_struct work; 78 struct work_struct work;
81 79
82 /* A space for short vectors; longer vectors are allocated separately. */
83 struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE];
84
85 /* 80 /*
86 * Three variably-size fields follow this struct: 81 * Three variably-size fields follow this struct:
87 * 82 *
@@ -284,9 +279,10 @@ release_ret_r:
284static int verity_verify_io(struct dm_verity_io *io) 279static int verity_verify_io(struct dm_verity_io *io)
285{ 280{
286 struct dm_verity *v = io->v; 281 struct dm_verity *v = io->v;
282 struct bio *bio = dm_bio_from_per_bio_data(io,
283 v->ti->per_bio_data_size);
287 unsigned b; 284 unsigned b;
288 int i; 285 int i;
289 unsigned vector = 0, offset = 0;
290 286
291 for (b = 0; b < io->n_blocks; b++) { 287 for (b = 0; b < io->n_blocks; b++) {
292 struct shash_desc *desc; 288 struct shash_desc *desc;
@@ -336,31 +332,22 @@ test_block_hash:
336 } 332 }
337 333
338 todo = 1 << v->data_dev_block_bits; 334 todo = 1 << v->data_dev_block_bits;
339 do { 335 while (io->iter.bi_size) {
340 struct bio_vec *bv;
341 u8 *page; 336 u8 *page;
342 unsigned len; 337 struct bio_vec bv = bio_iter_iovec(bio, io->iter);
343 338
344 BUG_ON(vector >= io->io_vec_size); 339 page = kmap_atomic(bv.bv_page);
345 bv = &io->io_vec[vector]; 340 r = crypto_shash_update(desc, page + bv.bv_offset,
346 page = kmap_atomic(bv->bv_page); 341 bv.bv_len);
347 len = bv->bv_len - offset;
348 if (likely(len >= todo))
349 len = todo;
350 r = crypto_shash_update(desc,
351 page + bv->bv_offset + offset, len);
352 kunmap_atomic(page); 342 kunmap_atomic(page);
343
353 if (r < 0) { 344 if (r < 0) {
354 DMERR("crypto_shash_update failed: %d", r); 345 DMERR("crypto_shash_update failed: %d", r);
355 return r; 346 return r;
356 } 347 }
357 offset += len; 348
358 if (likely(offset == bv->bv_len)) { 349 bio_advance_iter(bio, &io->iter, bv.bv_len);
359 offset = 0; 350 }
360 vector++;
361 }
362 todo -= len;
363 } while (todo);
364 351
365 if (!v->version) { 352 if (!v->version) {
366 r = crypto_shash_update(desc, v->salt, v->salt_size); 353 r = crypto_shash_update(desc, v->salt, v->salt_size);
@@ -383,8 +370,6 @@ test_block_hash:
383 return -EIO; 370 return -EIO;
384 } 371 }
385 } 372 }
386 BUG_ON(vector != io->io_vec_size);
387 BUG_ON(offset);
388 373
389 return 0; 374 return 0;
390} 375}
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error)
400 bio->bi_end_io = io->orig_bi_end_io; 385 bio->bi_end_io = io->orig_bi_end_io;
401 bio->bi_private = io->orig_bi_private; 386 bio->bi_private = io->orig_bi_private;
402 387
403 if (io->io_vec != io->io_vec_inline) 388 bio_endio_nodec(bio, error);
404 mempool_free(io->io_vec, v->vec_mempool);
405
406 bio_endio(bio, error);
407} 389}
408 390
409static void verity_work(struct work_struct *w) 391static void verity_work(struct work_struct *w)
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
493 struct dm_verity_io *io; 475 struct dm_verity_io *io;
494 476
495 bio->bi_bdev = v->data_dev->bdev; 477 bio->bi_bdev = v->data_dev->bdev;
496 bio->bi_sector = verity_map_sector(v, bio->bi_sector); 478 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
497 479
498 if (((unsigned)bio->bi_sector | bio_sectors(bio)) & 480 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { 481 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
500 DMERR_LIMIT("unaligned io"); 482 DMERR_LIMIT("unaligned io");
501 return -EIO; 483 return -EIO;
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
514 io->v = v; 496 io->v = v;
515 io->orig_bi_end_io = bio->bi_end_io; 497 io->orig_bi_end_io = bio->bi_end_io;
516 io->orig_bi_private = bio->bi_private; 498 io->orig_bi_private = bio->bi_private;
517 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 499 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
518 io->n_blocks = bio->bi_size >> v->data_dev_block_bits; 500 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
519 501
520 bio->bi_end_io = verity_end_io; 502 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io; 503 bio->bi_private = io;
522 io->io_vec_size = bio_segments(bio); 504 io->iter = bio->bi_iter;
523 if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE)
524 io->io_vec = io->io_vec_inline;
525 else
526 io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO);
527 memcpy(io->io_vec, bio_iovec(bio),
528 io->io_vec_size * sizeof(struct bio_vec));
529 505
530 verity_submit_prefetch(v, io); 506 verity_submit_prefetch(v, io);
531 507
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0704c523a76b..44a2fa6814ce 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
575 atomic_inc_return(&md->pending[rw])); 575 atomic_inc_return(&md->pending[rw]));
576 576
577 if (unlikely(dm_stats_used(&md->stats))) 577 if (unlikely(dm_stats_used(&md->stats)))
578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
579 bio_sectors(bio), false, 0, &io->stats_aux); 579 bio_sectors(bio), false, 0, &io->stats_aux);
580} 580}
581 581
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
593 part_stat_unlock(); 593 part_stat_unlock();
594 594
595 if (unlikely(dm_stats_used(&md->stats))) 595 if (unlikely(dm_stats_used(&md->stats)))
596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
597 bio_sectors(bio), true, duration, &io->stats_aux); 597 bio_sectors(bio), true, duration, &io->stats_aux);
598 598
599 /* 599 /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
742 if (io_error == DM_ENDIO_REQUEUE) 742 if (io_error == DM_ENDIO_REQUEUE)
743 return; 743 return;
744 744
745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
746 /* 746 /*
747 * Preflush done for flush with data, reissue 747 * Preflush done for flush with data, reissue
748 * without REQ_FLUSH. 748 * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
797 struct dm_rq_clone_bio_info *info = clone->bi_private; 797 struct dm_rq_clone_bio_info *info = clone->bi_private;
798 struct dm_rq_target_io *tio = info->tio; 798 struct dm_rq_target_io *tio = info->tio;
799 struct bio *bio = info->orig; 799 struct bio *bio = info->orig;
800 unsigned int nr_bytes = info->orig->bi_size; 800 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
801 801
802 bio_put(clone); 802 bio_put(clone);
803 803
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
1128 * this io. 1128 * this io.
1129 */ 1129 */
1130 atomic_inc(&tio->io->io_count); 1130 atomic_inc(&tio->io->io_count);
1131 sector = clone->bi_sector; 1131 sector = clone->bi_iter.bi_sector;
1132 r = ti->type->map(ti, clone); 1132 r = ti->type->map(ti, clone);
1133 if (r == DM_MAPIO_REMAPPED) { 1133 if (r == DM_MAPIO_REMAPPED) {
1134 /* the bio has been remapped so dispatch it */ 1134 /* the bio has been remapped so dispatch it */
@@ -1155,76 +1155,32 @@ struct clone_info {
1155 struct dm_io *io; 1155 struct dm_io *io;
1156 sector_t sector; 1156 sector_t sector;
1157 sector_t sector_count; 1157 sector_t sector_count;
1158 unsigned short idx;
1159}; 1158};
1160 1159
1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1160static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1162{ 1161{
1163 bio->bi_sector = sector; 1162 bio->bi_iter.bi_sector = sector;
1164 bio->bi_size = to_bytes(len); 1163 bio->bi_iter.bi_size = to_bytes(len);
1165}
1166
1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1168{
1169 bio->bi_idx = idx;
1170 bio->bi_vcnt = idx + bv_count;
1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1172}
1173
1174static void clone_bio_integrity(struct bio *bio, struct bio *clone,
1175 unsigned short idx, unsigned len, unsigned offset,
1176 unsigned trim)
1177{
1178 if (!bio_integrity(bio))
1179 return;
1180
1181 bio_integrity_clone(clone, bio, GFP_NOIO);
1182
1183 if (trim)
1184 bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len);
1185}
1186
1187/*
1188 * Creates a little bio that just does part of a bvec.
1189 */
1190static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1191 sector_t sector, unsigned short idx,
1192 unsigned offset, unsigned len)
1193{
1194 struct bio *clone = &tio->clone;
1195 struct bio_vec *bv = bio->bi_io_vec + idx;
1196
1197 *clone->bi_io_vec = *bv;
1198
1199 bio_setup_sector(clone, sector, len);
1200
1201 clone->bi_bdev = bio->bi_bdev;
1202 clone->bi_rw = bio->bi_rw;
1203 clone->bi_vcnt = 1;
1204 clone->bi_io_vec->bv_offset = offset;
1205 clone->bi_io_vec->bv_len = clone->bi_size;
1206 clone->bi_flags |= 1 << BIO_CLONED;
1207
1208 clone_bio_integrity(bio, clone, idx, len, offset, 1);
1209} 1164}
1210 1165
1211/* 1166/*
1212 * Creates a bio that consists of range of complete bvecs. 1167 * Creates a bio that consists of range of complete bvecs.
1213 */ 1168 */
1214static void clone_bio(struct dm_target_io *tio, struct bio *bio, 1169static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1215 sector_t sector, unsigned short idx, 1170 sector_t sector, unsigned len)
1216 unsigned short bv_count, unsigned len)
1217{ 1171{
1218 struct bio *clone = &tio->clone; 1172 struct bio *clone = &tio->clone;
1219 unsigned trim = 0;
1220 1173
1221 __bio_clone(clone, bio); 1174 __bio_clone_fast(clone, bio);
1222 bio_setup_sector(clone, sector, len); 1175
1223 bio_setup_bv(clone, idx, bv_count); 1176 if (bio_integrity(bio))
1177 bio_integrity_clone(clone, bio, GFP_NOIO);
1178
1179 bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector));
1180 clone->bi_iter.bi_size = to_bytes(len);
1224 1181
1225 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1182 if (bio_integrity(bio))
1226 trim = 1; 1183 bio_integrity_trim(clone, 0, len);
1227 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1228} 1184}
1229 1185
1230static struct dm_target_io *alloc_tio(struct clone_info *ci, 1186static struct dm_target_io *alloc_tio(struct clone_info *ci,
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci,
1257 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush 1213 * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush
1258 * and discard, so no need for concern about wasted bvec allocations. 1214 * and discard, so no need for concern about wasted bvec allocations.
1259 */ 1215 */
1260 __bio_clone(clone, ci->bio); 1216 __bio_clone_fast(clone, ci->bio);
1261 if (len) 1217 if (len)
1262 bio_setup_sector(clone, ci->sector, len); 1218 bio_setup_sector(clone, ci->sector, len);
1263 1219
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci)
1286} 1242}
1287 1243
1288static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, 1244static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti,
1289 sector_t sector, int nr_iovecs, 1245 sector_t sector, unsigned len)
1290 unsigned short idx, unsigned short bv_count,
1291 unsigned offset, unsigned len,
1292 unsigned split_bvec)
1293{ 1246{
1294 struct bio *bio = ci->bio; 1247 struct bio *bio = ci->bio;
1295 struct dm_target_io *tio; 1248 struct dm_target_io *tio;
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti
1303 num_target_bios = ti->num_write_bios(ti, bio); 1256 num_target_bios = ti->num_write_bios(ti, bio);
1304 1257
1305 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { 1258 for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) {
1306 tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); 1259 tio = alloc_tio(ci, ti, 0, target_bio_nr);
1307 if (split_bvec) 1260 clone_bio(tio, bio, sector, len);
1308 clone_split_bio(tio, bio, sector, idx, offset, len);
1309 else
1310 clone_bio(tio, bio, sector, idx, bv_count, len);
1311 __map_bio(tio); 1261 __map_bio(tio);
1312 } 1262 }
1313} 1263}
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci)
1379} 1329}
1380 1330
1381/* 1331/*
1382 * Find maximum number of sectors / bvecs we can process with a single bio.
1383 */
1384static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx)
1385{
1386 struct bio *bio = ci->bio;
1387 sector_t bv_len, total_len = 0;
1388
1389 for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) {
1390 bv_len = to_sector(bio->bi_io_vec[*idx].bv_len);
1391
1392 if (bv_len > max)
1393 break;
1394
1395 max -= bv_len;
1396 total_len += bv_len;
1397 }
1398
1399 return total_len;
1400}
1401
1402static int __split_bvec_across_targets(struct clone_info *ci,
1403 struct dm_target *ti, sector_t max)
1404{
1405 struct bio *bio = ci->bio;
1406 struct bio_vec *bv = bio->bi_io_vec + ci->idx;
1407 sector_t remaining = to_sector(bv->bv_len);
1408 unsigned offset = 0;
1409 sector_t len;
1410
1411 do {
1412 if (offset) {
1413 ti = dm_table_find_target(ci->map, ci->sector);
1414 if (!dm_target_is_valid(ti))
1415 return -EIO;
1416
1417 max = max_io_len(ci->sector, ti);
1418 }
1419
1420 len = min(remaining, max);
1421
1422 __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0,
1423 bv->bv_offset + offset, len, 1);
1424
1425 ci->sector += len;
1426 ci->sector_count -= len;
1427 offset += to_bytes(len);
1428 } while (remaining -= len);
1429
1430 ci->idx++;
1431
1432 return 0;
1433}
1434
1435/*
1436 * Select the correct strategy for processing a non-flush bio. 1332 * Select the correct strategy for processing a non-flush bio.
1437 */ 1333 */
1438static int __split_and_process_non_flush(struct clone_info *ci) 1334static int __split_and_process_non_flush(struct clone_info *ci)
1439{ 1335{
1440 struct bio *bio = ci->bio; 1336 struct bio *bio = ci->bio;
1441 struct dm_target *ti; 1337 struct dm_target *ti;
1442 sector_t len, max; 1338 unsigned len;
1443 int idx;
1444 1339
1445 if (unlikely(bio->bi_rw & REQ_DISCARD)) 1340 if (unlikely(bio->bi_rw & REQ_DISCARD))
1446 return __send_discard(ci); 1341 return __send_discard(ci);
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci)
1451 if (!dm_target_is_valid(ti)) 1346 if (!dm_target_is_valid(ti))
1452 return -EIO; 1347 return -EIO;
1453 1348
1454 max = max_io_len(ci->sector, ti); 1349 len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count);
1455
1456 /*
1457 * Optimise for the simple case where we can do all of
1458 * the remaining io with a single clone.
1459 */
1460 if (ci->sector_count <= max) {
1461 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1462 ci->idx, bio->bi_vcnt - ci->idx, 0,
1463 ci->sector_count, 0);
1464 ci->sector_count = 0;
1465 return 0;
1466 }
1467
1468 /*
1469 * There are some bvecs that don't span targets.
1470 * Do as many of these as possible.
1471 */
1472 if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) {
1473 len = __len_within_target(ci, max, &idx);
1474
1475 __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs,
1476 ci->idx, idx - ci->idx, 0, len, 0);
1477 1350
1478 ci->sector += len; 1351 __clone_and_map_data_bio(ci, ti, ci->sector, len);
1479 ci->sector_count -= len;
1480 ci->idx = idx;
1481 1352
1482 return 0; 1353 ci->sector += len;
1483 } 1354 ci->sector_count -= len;
1484 1355
1485 /* 1356 return 0;
1486 * Handle a bvec that must be split between two or more targets.
1487 */
1488 return __split_bvec_across_targets(ci, ti, max);
1489} 1357}
1490 1358
1491/* 1359/*
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md,
1510 ci.io->bio = bio; 1378 ci.io->bio = bio;
1511 ci.io->md = md; 1379 ci.io->md = md;
1512 spin_lock_init(&ci.io->endio_lock); 1380 spin_lock_init(&ci.io->endio_lock);
1513 ci.sector = bio->bi_sector; 1381 ci.sector = bio->bi_iter.bi_sector;
1514 ci.idx = bio->bi_idx;
1515 1382
1516 start_io_acct(ci.io); 1383 start_io_acct(ci.io);
1517 1384
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
74{ 74{
75 struct bio *b = bio->bi_private; 75 struct bio *b = bio->bi_private;
76 76
77 b->bi_size = bio->bi_size; 77 b->bi_iter.bi_size = bio->bi_iter.bi_size;
78 b->bi_sector = bio->bi_sector; 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
79 79
80 bio_put(bio); 80 bio_put(bio);
81 81
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
185 return; 185 return;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) 188 if (check_sector(conf, bio->bi_iter.bi_sector,
189 bio_end_sector(bio), WRITE))
189 failit = 1; 190 failit = 1;
190 if (check_mode(conf, WritePersistent)) { 191 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent); 192 add_sector(conf, bio->bi_iter.bi_sector,
193 WritePersistent);
192 failit = 1; 194 failit = 1;
193 } 195 }
194 if (check_mode(conf, WriteTransient)) 196 if (check_mode(conf, WriteTransient))
195 failit = 1; 197 failit = 1;
196 } else { 198 } else {
197 /* read request */ 199 /* read request */
198 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) 200 if (check_sector(conf, bio->bi_iter.bi_sector,
201 bio_end_sector(bio), READ))
199 failit = 1; 202 failit = 1;
200 if (check_mode(conf, ReadTransient)) 203 if (check_mode(conf, ReadTransient))
201 failit = 1; 204 failit = 1;
202 if (check_mode(conf, ReadPersistent)) { 205 if (check_mode(conf, ReadPersistent)) {
203 add_sector(conf, bio->bi_sector, ReadPersistent); 206 add_sector(conf, bio->bi_iter.bi_sector,
207 ReadPersistent);
204 failit = 1; 208 failit = 1;
205 } 209 }
206 if (check_mode(conf, ReadFixable)) { 210 if (check_mode(conf, ReadFixable)) {
207 add_sector(conf, bio->bi_sector, ReadFixable); 211 add_sector(conf, bio->bi_iter.bi_sector,
212 ReadFixable);
208 failit = 1; 213 failit = 1;
209 } 214 }
210 } 215 }
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..56f534b4a2d2 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev)
288 288
289static void linear_make_request(struct mddev *mddev, struct bio *bio) 289static void linear_make_request(struct mddev *mddev, struct bio *bio)
290{ 290{
291 char b[BDEVNAME_SIZE];
291 struct dev_info *tmp_dev; 292 struct dev_info *tmp_dev;
292 sector_t start_sector; 293 struct bio *split;
294 sector_t start_sector, end_sector, data_offset;
293 295
294 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 296 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
295 md_flush_request(mddev, bio); 297 md_flush_request(mddev, bio);
296 return; 298 return;
297 } 299 }
298 300
299 rcu_read_lock(); 301 do {
300 tmp_dev = which_dev(mddev, bio->bi_sector); 302 rcu_read_lock();
301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
302
303
304 if (unlikely(bio->bi_sector >= (tmp_dev->end_sector)
305 || (bio->bi_sector < start_sector))) {
306 char b[BDEVNAME_SIZE];
307
308 printk(KERN_ERR
309 "md/linear:%s: make_request: Sector %llu out of bounds on "
310 "dev %s: %llu sectors, offset %llu\n",
311 mdname(mddev),
312 (unsigned long long)bio->bi_sector,
313 bdevname(tmp_dev->rdev->bdev, b),
314 (unsigned long long)tmp_dev->rdev->sectors,
315 (unsigned long long)start_sector);
316 rcu_read_unlock();
317 bio_io_error(bio);
318 return;
319 }
320 if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) {
321 /* This bio crosses a device boundary, so we have to
322 * split it.
323 */
324 struct bio_pair *bp;
325 sector_t end_sector = tmp_dev->end_sector;
326 303
327 rcu_read_unlock(); 304 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
328 305 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
329 bp = bio_split(bio, end_sector - bio->bi_sector); 306 end_sector = tmp_dev->end_sector;
307 data_offset = tmp_dev->rdev->data_offset;
308 bio->bi_bdev = tmp_dev->rdev->bdev;
330 309
331 linear_make_request(mddev, &bp->bio1); 310 rcu_read_unlock();
332 linear_make_request(mddev, &bp->bio2);
333 bio_pair_release(bp);
334 return;
335 }
336
337 bio->bi_bdev = tmp_dev->rdev->bdev;
338 bio->bi_sector = bio->bi_sector - start_sector
339 + tmp_dev->rdev->data_offset;
340 rcu_read_unlock();
341 311
342 if (unlikely((bio->bi_rw & REQ_DISCARD) && 312 if (unlikely(bio->bi_iter.bi_sector >= end_sector ||
343 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { 313 bio->bi_iter.bi_sector < start_sector))
344 /* Just ignore it */ 314 goto out_of_bounds;
345 bio_endio(bio, 0); 315
346 return; 316 if (unlikely(bio_end_sector(bio) > end_sector)) {
347 } 317 /* This bio crosses a device boundary, so we have to
318 * split it.
319 */
320 split = bio_split(bio, end_sector -
321 bio->bi_iter.bi_sector,
322 GFP_NOIO, fs_bio_set);
323 bio_chain(split, bio);
324 } else {
325 split = bio;
326 }
348 327
349 generic_make_request(bio); 328 split->bi_iter.bi_sector = split->bi_iter.bi_sector -
329 start_sector + data_offset;
330
331 if (unlikely((split->bi_rw & REQ_DISCARD) &&
332 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
333 /* Just ignore it */
334 bio_endio(split, 0);
335 } else
336 generic_make_request(split);
337 } while (split != bio);
338 return;
339
340out_of_bounds:
341 printk(KERN_ERR
342 "md/linear:%s: make_request: Sector %llu out of bounds on "
343 "dev %s: %llu sectors, offset %llu\n",
344 mdname(mddev),
345 (unsigned long long)bio->bi_iter.bi_sector,
346 bdevname(tmp_dev->rdev->bdev, b),
347 (unsigned long long)tmp_dev->rdev->sectors,
348 (unsigned long long)start_sector);
349 bio_io_error(bio);
350} 350}
351 351
352static void linear_status (struct seq_file *seq, struct mddev *mddev) 352static void linear_status (struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 21f4d7ff0da2..16d84e091e2d 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
393 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 393 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
394 struct bio *bio = mddev->flush_bio; 394 struct bio *bio = mddev->flush_bio;
395 395
396 if (bio->bi_size == 0) 396 if (bio->bi_iter.bi_size == 0)
397 /* an empty barrier - all done */ 397 /* an empty barrier - all done */
398 bio_endio(bio, 0); 398 bio_endio(bio, 0);
399 else { 399 else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755 755
756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
757 bio->bi_sector = sector; 757 bio->bi_iter.bi_sector = sector;
758 bio_add_page(bio, page, size, 0); 758 bio_add_page(bio, page, size, 0);
759 bio->bi_private = rdev; 759 bio->bi_private = rdev;
760 bio->bi_end_io = super_written; 760 bio->bi_end_io = super_written;
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); 782 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev);
783 int ret; 783 int ret;
784 784
785 rw |= REQ_SYNC;
786
787 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
788 rdev->meta_bdev : rdev->bdev; 786 rdev->meta_bdev : rdev->bdev;
789 if (metadata_op) 787 if (metadata_op)
790 bio->bi_sector = sector + rdev->sb_start; 788 bio->bi_iter.bi_sector = sector + rdev->sb_start;
791 else if (rdev->mddev->reshape_position != MaxSector && 789 else if (rdev->mddev->reshape_position != MaxSector &&
792 (rdev->mddev->reshape_backwards == 790 (rdev->mddev->reshape_backwards ==
793 (sector >= rdev->mddev->reshape_position))) 791 (sector >= rdev->mddev->reshape_position)))
794 bio->bi_sector = sector + rdev->new_data_offset; 792 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
795 else 793 else
796 bio->bi_sector = sector + rdev->data_offset; 794 bio->bi_iter.bi_sector = sector + rdev->data_offset;
797 bio_add_page(bio, page, size, 0); 795 bio_add_page(bio, page, size, 0);
798 submit_bio_wait(rw, bio); 796 submit_bio_wait(rw, bio);
799 797
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
100 md_error (mp_bh->mddev, rdev); 100 md_error (mp_bh->mddev, rdev);
101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
102 bdevname(rdev->bdev,b), 102 bdevname(rdev->bdev,b),
103 (unsigned long long)bio->bi_sector); 103 (unsigned long long)bio->bi_iter.bi_sector);
104 multipath_reschedule_retry(mp_bh); 104 multipath_reschedule_retry(mp_bh);
105 } else 105 } else
106 multipath_end_bh_io(mp_bh, error); 106 multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
132 multipath = conf->multipaths + mp_bh->path; 132 multipath = conf->multipaths + mp_bh->path;
133 133
134 mp_bh->bio = *bio; 134 mp_bh->bio = *bio;
135 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request; 138 mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
355 spin_unlock_irqrestore(&conf->device_lock, flags); 355 spin_unlock_irqrestore(&conf->device_lock, flags);
356 356
357 bio = &mp_bh->bio; 357 bio = &mp_bh->bio;
358 bio->bi_sector = mp_bh->master_bio->bi_sector; 358 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
359 359
360 if ((mp_bh->path = multipath_map (conf))<0) { 360 if ((mp_bh->path = multipath_map (conf))<0) {
361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
362 " error for block %llu\n", 362 " error for block %llu\n",
363 bdevname(bio->bi_bdev,b), 363 bdevname(bio->bi_bdev,b),
364 (unsigned long long)bio->bi_sector); 364 (unsigned long long)bio->bi_iter.bi_sector);
365 multipath_end_bh_io(mp_bh, -EIO); 365 multipath_end_bh_io(mp_bh, -EIO);
366 } else { 366 } else {
367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
368 " to another IO path\n", 368 " to another IO path\n",
369 bdevname(bio->bi_bdev,b), 369 bdevname(bio->bi_bdev,b),
370 (unsigned long long)bio->bi_sector); 370 (unsigned long long)bio->bi_iter.bi_sector);
371 *bio = *(mp_bh->master_bio); 371 *bio = *(mp_bh->master_bio);
372 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 372 bio->bi_iter.bi_sector +=
373 conf->multipaths[mp_bh->path].rdev->data_offset;
373 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
374 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
375 bio->bi_end_io = multipath_end_request; 376 bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..407a99e46f69 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
501 unsigned int chunk_sects, struct bio *bio) 501 unsigned int chunk_sects, struct bio *bio)
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >=
505 ((bio->bi_iter.bi_sector & (chunk_sects-1))
505 + bio_sectors(bio)); 506 + bio_sectors(bio));
506 } else{ 507 } else{
507 sector_t sector = bio->bi_sector; 508 sector_t sector = bio->bi_iter.bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 509 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + bio_sectors(bio)); 510 + bio_sectors(bio));
510 } 511 }
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
512 513
513static void raid0_make_request(struct mddev *mddev, struct bio *bio) 514static void raid0_make_request(struct mddev *mddev, struct bio *bio)
514{ 515{
515 unsigned int chunk_sects;
516 sector_t sector_offset;
517 struct strip_zone *zone; 516 struct strip_zone *zone;
518 struct md_rdev *tmp_dev; 517 struct md_rdev *tmp_dev;
518 struct bio *split;
519 519
520 if (unlikely(bio->bi_rw & REQ_FLUSH)) { 520 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
521 md_flush_request(mddev, bio); 521 md_flush_request(mddev, bio);
522 return; 522 return;
523 } 523 }
524 524
525 chunk_sects = mddev->chunk_sectors; 525 do {
526 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 526 sector_t sector = bio->bi_iter.bi_sector;
527 sector_t sector = bio->bi_sector; 527 unsigned chunk_sects = mddev->chunk_sectors;
528 struct bio_pair *bp;
529 /* Sanity check -- queue functions should prevent this happening */
530 if (bio_segments(bio) > 1)
531 goto bad_map;
532 /* This is a one page bio that upper layers
533 * refuse to split for us, so we need to split it.
534 */
535 if (likely(is_power_of_2(chunk_sects)))
536 bp = bio_split(bio, chunk_sects - (sector &
537 (chunk_sects-1)));
538 else
539 bp = bio_split(bio, chunk_sects -
540 sector_div(sector, chunk_sects));
541 raid0_make_request(mddev, &bp->bio1);
542 raid0_make_request(mddev, &bp->bio2);
543 bio_pair_release(bp);
544 return;
545 }
546 528
547 sector_offset = bio->bi_sector; 529 unsigned sectors = chunk_sects -
548 zone = find_zone(mddev->private, &sector_offset); 530 (likely(is_power_of_2(chunk_sects))
549 tmp_dev = map_sector(mddev, zone, bio->bi_sector, 531 ? (sector & (chunk_sects-1))
550 &sector_offset); 532 : sector_div(sector, chunk_sects));
551 bio->bi_bdev = tmp_dev->bdev;
552 bio->bi_sector = sector_offset + zone->dev_start +
553 tmp_dev->data_offset;
554
555 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
556 !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) {
557 /* Just ignore it */
558 bio_endio(bio, 0);
559 return;
560 }
561 533
562 generic_make_request(bio); 534 if (sectors < bio_sectors(bio)) {
563 return; 535 split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
564 536 bio_chain(split, bio);
565bad_map: 537 } else {
566 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 538 split = bio;
567 " or bigger than %dk %llu %d\n", 539 }
568 mdname(mddev), chunk_sects / 2,
569 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
570 540
571 bio_io_error(bio); 541 zone = find_zone(mddev->private, &sector);
572 return; 542 tmp_dev = map_sector(mddev, zone, sector, &sector);
543 split->bi_bdev = tmp_dev->bdev;
544 split->bi_iter.bi_sector = sector + zone->dev_start +
545 tmp_dev->data_offset;
546
547 if (unlikely((split->bi_rw & REQ_DISCARD) &&
548 !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) {
549 /* Just ignore it */
550 bio_endio(split, 0);
551 } else
552 generic_make_request(split);
553 } while (split != bio);
573} 554}
574 555
575static void raid0_status(struct seq_file *seq, struct mddev *mddev) 556static void raid0_status(struct seq_file *seq, struct mddev *mddev)
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..db3b9d7314f1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
229 int done; 229 int done;
230 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window; 231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector; 232 sector_t bi_sector = bio->bi_iter.bi_sector;
233 233
234 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
235 unsigned long flags; 235 unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_iter.bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio_end_sector(bio) - 1);
270 bio_sectors(bio) - 1);
271 270
272 call_bio_endio(r1_bio); 271 call_bio_endio(r1_bio);
273 } 272 }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 struct bio *mbio = r1_bio->master_bio; 465 struct bio *mbio = r1_bio->master_bio;
467 pr_debug("raid1: behind end write sectors" 466 pr_debug("raid1: behind end write sectors"
468 " %llu-%llu\n", 467 " %llu-%llu\n",
469 (unsigned long long) mbio->bi_sector, 468 (unsigned long long) mbio->bi_iter.bi_sector,
470 (unsigned long long) mbio->bi_sector + 469 (unsigned long long) bio_end_sector(mbio) - 1);
471 bio_sectors(mbio) - 1);
472 call_bio_endio(r1_bio); 470 call_bio_endio(r1_bio);
473 } 471 }
474 } 472 }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 873 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) || 874 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 875 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector)) 876 <= bio->bi_iter.bi_sector))
879 wait = false; 877 wait = false;
880 else 878 else
881 wait = true; 879 wait = true;
@@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
913 911
914 if (bio && bio_data_dir(bio) == WRITE) { 912 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 913 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) { 914 <= bio->bi_iter.bi_sector) {
917 if (conf->start_next_window == MaxSector) 915 if (conf->start_next_window == MaxSector)
918 conf->start_next_window = 916 conf->start_next_window =
919 conf->next_resync + 917 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE; 918 NEXT_NORMALIO_DISTANCE;
921 919
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 920 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector) 921 <= bio->bi_iter.bi_sector)
924 conf->next_window_requests++; 922 conf->next_window_requests++;
925 else 923 else
926 conf->current_window_requests++; 924 conf->current_window_requests++;
927 } 925 }
928 if (bio->bi_sector >= conf->start_next_window) 926 if (bio->bi_iter.bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window; 927 sector = conf->start_next_window;
930 } 928 }
931 929
@@ -1028,7 +1026,8 @@ do_sync_io:
1028 if (bvecs[i].bv_page) 1026 if (bvecs[i].bv_page)
1029 put_page(bvecs[i].bv_page); 1027 put_page(bvecs[i].bv_page);
1030 kfree(bvecs); 1028 kfree(bvecs);
1031 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1029 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1030 bio->bi_iter.bi_size);
1032} 1031}
1033 1032
1034struct raid1_plug_cb { 1033struct raid1_plug_cb {
@@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1108 1107
1109 if (bio_data_dir(bio) == WRITE && 1108 if (bio_data_dir(bio) == WRITE &&
1110 bio_end_sector(bio) > mddev->suspend_lo && 1109 bio_end_sector(bio) > mddev->suspend_lo &&
1111 bio->bi_sector < mddev->suspend_hi) { 1110 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1112 /* As the suspend_* range is controlled by 1111 /* As the suspend_* range is controlled by
1113 * userspace, we want an interruptible 1112 * userspace, we want an interruptible
1114 * wait. 1113 * wait.
@@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1119 prepare_to_wait(&conf->wait_barrier, 1118 prepare_to_wait(&conf->wait_barrier,
1120 &w, TASK_INTERRUPTIBLE); 1119 &w, TASK_INTERRUPTIBLE);
1121 if (bio_end_sector(bio) <= mddev->suspend_lo || 1120 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1122 bio->bi_sector >= mddev->suspend_hi) 1121 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1123 break; 1122 break;
1124 schedule(); 1123 schedule();
1125 } 1124 }
@@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1141 r1_bio->sectors = bio_sectors(bio); 1140 r1_bio->sectors = bio_sectors(bio);
1142 r1_bio->state = 0; 1141 r1_bio->state = 0;
1143 r1_bio->mddev = mddev; 1142 r1_bio->mddev = mddev;
1144 r1_bio->sector = bio->bi_sector; 1143 r1_bio->sector = bio->bi_iter.bi_sector;
1145 1144
1146 /* We might need to issue multiple reads to different 1145 /* We might need to issue multiple reads to different
1147 * devices if there are bad blocks around, so we keep 1146 * devices if there are bad blocks around, so we keep
@@ -1181,12 +1180,13 @@ read_again:
1181 r1_bio->read_disk = rdisk; 1180 r1_bio->read_disk = rdisk;
1182 1181
1183 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1182 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1184 bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1183 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1185 max_sectors); 1184 max_sectors);
1186 1185
1187 r1_bio->bios[rdisk] = read_bio; 1186 r1_bio->bios[rdisk] = read_bio;
1188 1187
1189 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1188 read_bio->bi_iter.bi_sector = r1_bio->sector +
1189 mirror->rdev->data_offset;
1190 read_bio->bi_bdev = mirror->rdev->bdev; 1190 read_bio->bi_bdev = mirror->rdev->bdev;
1191 read_bio->bi_end_io = raid1_end_read_request; 1191 read_bio->bi_end_io = raid1_end_read_request;
1192 read_bio->bi_rw = READ | do_sync; 1192 read_bio->bi_rw = READ | do_sync;
@@ -1198,7 +1198,7 @@ read_again:
1198 */ 1198 */
1199 1199
1200 sectors_handled = (r1_bio->sector + max_sectors 1200 sectors_handled = (r1_bio->sector + max_sectors
1201 - bio->bi_sector); 1201 - bio->bi_iter.bi_sector);
1202 r1_bio->sectors = max_sectors; 1202 r1_bio->sectors = max_sectors;
1203 spin_lock_irq(&conf->device_lock); 1203 spin_lock_irq(&conf->device_lock);
1204 if (bio->bi_phys_segments == 0) 1204 if (bio->bi_phys_segments == 0)
@@ -1219,7 +1219,8 @@ read_again:
1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1220 r1_bio->state = 0; 1220 r1_bio->state = 0;
1221 r1_bio->mddev = mddev; 1221 r1_bio->mddev = mddev;
1222 r1_bio->sector = bio->bi_sector + sectors_handled; 1222 r1_bio->sector = bio->bi_iter.bi_sector +
1223 sectors_handled;
1223 goto read_again; 1224 goto read_again;
1224 } else 1225 } else
1225 generic_make_request(read_bio); 1226 generic_make_request(read_bio);
@@ -1322,7 +1323,7 @@ read_again:
1322 if (r1_bio->bios[j]) 1323 if (r1_bio->bios[j])
1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1324 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1324 r1_bio->state = 0; 1325 r1_bio->state = 0;
1325 allow_barrier(conf, start_next_window, bio->bi_sector); 1326 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1326 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1327 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1327 start_next_window = wait_barrier(conf, bio); 1328 start_next_window = wait_barrier(conf, bio);
1328 /* 1329 /*
@@ -1349,7 +1350,7 @@ read_again:
1349 bio->bi_phys_segments++; 1350 bio->bi_phys_segments++;
1350 spin_unlock_irq(&conf->device_lock); 1351 spin_unlock_irq(&conf->device_lock);
1351 } 1352 }
1352 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1353 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1353 1354
1354 atomic_set(&r1_bio->remaining, 1); 1355 atomic_set(&r1_bio->remaining, 1);
1355 atomic_set(&r1_bio->behind_remaining, 0); 1356 atomic_set(&r1_bio->behind_remaining, 0);
@@ -1361,7 +1362,7 @@ read_again:
1361 continue; 1362 continue;
1362 1363
1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1364 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1364 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1365 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1365 1366
1366 if (first_clone) { 1367 if (first_clone) {
1367 /* do behind I/O ? 1368 /* do behind I/O ?
@@ -1395,7 +1396,7 @@ read_again:
1395 1396
1396 r1_bio->bios[i] = mbio; 1397 r1_bio->bios[i] = mbio;
1397 1398
1398 mbio->bi_sector = (r1_bio->sector + 1399 mbio->bi_iter.bi_sector = (r1_bio->sector +
1399 conf->mirrors[i].rdev->data_offset); 1400 conf->mirrors[i].rdev->data_offset);
1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1401 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1401 mbio->bi_end_io = raid1_end_write_request; 1402 mbio->bi_end_io = raid1_end_write_request;
@@ -1435,7 +1436,7 @@ read_again:
1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1436 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436 r1_bio->state = 0; 1437 r1_bio->state = 0;
1437 r1_bio->mddev = mddev; 1438 r1_bio->mddev = mddev;
1438 r1_bio->sector = bio->bi_sector + sectors_handled; 1439 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439 goto retry_write; 1440 goto retry_write;
1440 } 1441 }
1441 1442
@@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio)
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse */
1960 bio_reset(b); 1961 bio_reset(b);
1961 b->bi_vcnt = vcnt; 1962 b->bi_vcnt = vcnt;
1962 b->bi_size = r1_bio->sectors << 9; 1963 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_sector = r1_bio->sector + 1964 b->bi_iter.bi_sector = r1_bio->sector +
1964 conf->mirrors[i].rdev->data_offset; 1965 conf->mirrors[i].rdev->data_offset;
1965 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1966 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1966 b->bi_end_io = end_sync_read; 1967 b->bi_end_io = end_sync_read;
1967 b->bi_private = r1_bio; 1968 b->bi_private = r1_bio;
1968 1969
1969 size = b->bi_size; 1970 size = b->bi_iter.bi_size;
1970 for (j = 0; j < vcnt ; j++) { 1971 for (j = 0; j < vcnt ; j++) {
1971 struct bio_vec *bi; 1972 struct bio_vec *bi;
1972 bi = &b->bi_io_vec[j]; 1973 bi = &b->bi_io_vec[j];
@@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2221 } 2222 }
2222 2223
2223 wbio->bi_rw = WRITE; 2224 wbio->bi_rw = WRITE;
2224 wbio->bi_sector = r1_bio->sector; 2225 wbio->bi_iter.bi_sector = r1_bio->sector;
2225 wbio->bi_size = r1_bio->sectors << 9; 2226 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2226 2227
2227 bio_trim(wbio, sector - r1_bio->sector, sectors); 2228 bio_trim(wbio, sector - r1_bio->sector, sectors);
2228 wbio->bi_sector += rdev->data_offset; 2229 wbio->bi_iter.bi_sector += rdev->data_offset;
2229 wbio->bi_bdev = rdev->bdev; 2230 wbio->bi_bdev = rdev->bdev;
2230 if (submit_bio_wait(WRITE, wbio) == 0) 2231 if (submit_bio_wait(WRITE, wbio) == 0)
2231 /* failure! */ 2232 /* failure! */
@@ -2339,7 +2340,8 @@ read_more:
2339 } 2340 }
2340 r1_bio->read_disk = disk; 2341 r1_bio->read_disk = disk;
2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2342 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2342 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2343 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2344 max_sectors);
2343 r1_bio->bios[r1_bio->read_disk] = bio; 2345 r1_bio->bios[r1_bio->read_disk] = bio;
2344 rdev = conf->mirrors[disk].rdev; 2346 rdev = conf->mirrors[disk].rdev;
2345 printk_ratelimited(KERN_ERR 2347 printk_ratelimited(KERN_ERR
@@ -2348,7 +2350,7 @@ read_more:
2348 mdname(mddev), 2350 mdname(mddev),
2349 (unsigned long long)r1_bio->sector, 2351 (unsigned long long)r1_bio->sector,
2350 bdevname(rdev->bdev, b)); 2352 bdevname(rdev->bdev, b));
2351 bio->bi_sector = r1_bio->sector + rdev->data_offset; 2353 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2352 bio->bi_bdev = rdev->bdev; 2354 bio->bi_bdev = rdev->bdev;
2353 bio->bi_end_io = raid1_end_read_request; 2355 bio->bi_end_io = raid1_end_read_request;
2354 bio->bi_rw = READ | do_sync; 2356 bio->bi_rw = READ | do_sync;
@@ -2357,7 +2359,7 @@ read_more:
2357 /* Drat - have to split this up more */ 2359 /* Drat - have to split this up more */
2358 struct bio *mbio = r1_bio->master_bio; 2360 struct bio *mbio = r1_bio->master_bio;
2359 int sectors_handled = (r1_bio->sector + max_sectors 2361 int sectors_handled = (r1_bio->sector + max_sectors
2360 - mbio->bi_sector); 2362 - mbio->bi_iter.bi_sector);
2361 r1_bio->sectors = max_sectors; 2363 r1_bio->sectors = max_sectors;
2362 spin_lock_irq(&conf->device_lock); 2364 spin_lock_irq(&conf->device_lock);
2363 if (mbio->bi_phys_segments == 0) 2365 if (mbio->bi_phys_segments == 0)
@@ -2375,7 +2377,8 @@ read_more:
2375 r1_bio->state = 0; 2377 r1_bio->state = 0;
2376 set_bit(R1BIO_ReadError, &r1_bio->state); 2378 set_bit(R1BIO_ReadError, &r1_bio->state);
2377 r1_bio->mddev = mddev; 2379 r1_bio->mddev = mddev;
2378 r1_bio->sector = mbio->bi_sector + sectors_handled; 2380 r1_bio->sector = mbio->bi_iter.bi_sector +
2381 sectors_handled;
2379 2382
2380 goto read_more; 2383 goto read_more;
2381 } else 2384 } else
@@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2599 } 2602 }
2600 if (bio->bi_end_io) { 2603 if (bio->bi_end_io) {
2601 atomic_inc(&rdev->nr_pending); 2604 atomic_inc(&rdev->nr_pending);
2602 bio->bi_sector = sector_nr + rdev->data_offset; 2605 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2603 bio->bi_bdev = rdev->bdev; 2606 bio->bi_bdev = rdev->bdev;
2604 bio->bi_private = r1_bio; 2607 bio->bi_private = r1_bio;
2605 } 2608 }
@@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2699 continue; 2702 continue;
2700 /* remove last page from this bio */ 2703 /* remove last page from this bio */
2701 bio->bi_vcnt--; 2704 bio->bi_vcnt--;
2702 bio->bi_size -= len; 2705 bio->bi_iter.bi_size -= len;
2703 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2706 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2704 } 2707 }
2705 goto bio_full; 2708 goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..6d43d88657aa 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule)
1152 kfree(plug); 1152 kfree(plug);
1153} 1153}
1154 1154
1155static void make_request(struct mddev *mddev, struct bio * bio) 1155static void __make_request(struct mddev *mddev, struct bio *bio)
1156{ 1156{
1157 struct r10conf *conf = mddev->private; 1157 struct r10conf *conf = mddev->private;
1158 struct r10bio *r10_bio; 1158 struct r10bio *r10_bio;
1159 struct bio *read_bio; 1159 struct bio *read_bio;
1160 int i; 1160 int i;
1161 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1162 int chunk_sects = chunk_mask + 1;
1163 const int rw = bio_data_dir(bio); 1161 const int rw = bio_data_dir(bio);
1164 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); 1162 const unsigned long do_sync = (bio->bi_rw & REQ_SYNC);
1165 const unsigned long do_fua = (bio->bi_rw & REQ_FUA); 1163 const unsigned long do_fua = (bio->bi_rw & REQ_FUA);
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1174 int max_sectors; 1172 int max_sectors;
1175 int sectors; 1173 int sectors;
1176 1174
1177 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1178 md_flush_request(mddev, bio);
1179 return;
1180 }
1181
1182 /* If this request crosses a chunk boundary, we need to
1183 * split it. This will only happen for 1 PAGE (or less) requests.
1184 */
1185 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
1186 > chunk_sects
1187 && (conf->geo.near_copies < conf->geo.raid_disks
1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
1189 struct bio_pair *bp;
1190 /* Sanity check -- queue functions should prevent this happening */
1191 if (bio_segments(bio) > 1)
1192 goto bad_map;
1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it.
1195 */
1196 bp = bio_split(bio,
1197 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) );
1198
1199 /* Each of these 'make_request' calls will call 'wait_barrier'.
1200 * If the first succeeds but the second blocks due to the resync
1201 * thread raising the barrier, we will deadlock because the
1202 * IO to the underlying device will be queued in generic_make_request
1203 * and will never complete, so will never reduce nr_pending.
1204 * So increment nr_waiting here so no new raise_barriers will
1205 * succeed, and so the second wait_barrier cannot block.
1206 */
1207 spin_lock_irq(&conf->resync_lock);
1208 conf->nr_waiting++;
1209 spin_unlock_irq(&conf->resync_lock);
1210
1211 make_request(mddev, &bp->bio1);
1212 make_request(mddev, &bp->bio2);
1213
1214 spin_lock_irq(&conf->resync_lock);
1215 conf->nr_waiting--;
1216 wake_up(&conf->wait_barrier);
1217 spin_unlock_irq(&conf->resync_lock);
1218
1219 bio_pair_release(bp);
1220 return;
1221 bad_map:
1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1224 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
1225
1226 bio_io_error(bio);
1227 return;
1228 }
1229
1230 md_write_start(mddev, bio);
1231
1232 /*
1233 * Register the new request and wait if the reconstruction
1234 * thread has put up a bar for new requests.
1235 * Continue immediately if no resync is active currently.
1236 */
1237 wait_barrier(conf);
1238
1239 sectors = bio_sectors(bio); 1175 sectors = bio_sectors(bio);
1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1176 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1241 bio->bi_sector < conf->reshape_progress && 1177 bio->bi_iter.bi_sector < conf->reshape_progress &&
1242 bio->bi_sector + sectors > conf->reshape_progress) { 1178 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1243 /* IO spans the reshape position. Need to wait for 1179 /* IO spans the reshape position. Need to wait for
1244 * reshape to pass 1180 * reshape to pass
1245 */ 1181 */
1246 allow_barrier(conf); 1182 allow_barrier(conf);
1247 wait_event(conf->wait_barrier, 1183 wait_event(conf->wait_barrier,
1248 conf->reshape_progress <= bio->bi_sector || 1184 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1249 conf->reshape_progress >= bio->bi_sector + sectors); 1185 conf->reshape_progress >= bio->bi_iter.bi_sector +
1186 sectors);
1250 wait_barrier(conf); 1187 wait_barrier(conf);
1251 } 1188 }
1252 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1189 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1253 bio_data_dir(bio) == WRITE && 1190 bio_data_dir(bio) == WRITE &&
1254 (mddev->reshape_backwards 1191 (mddev->reshape_backwards
1255 ? (bio->bi_sector < conf->reshape_safe && 1192 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1256 bio->bi_sector + sectors > conf->reshape_progress) 1193 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1257 : (bio->bi_sector + sectors > conf->reshape_safe && 1194 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1258 bio->bi_sector < conf->reshape_progress))) { 1195 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1259 /* Need to update reshape_position in metadata */ 1196 /* Need to update reshape_position in metadata */
1260 mddev->reshape_position = conf->reshape_progress; 1197 mddev->reshape_position = conf->reshape_progress;
1261 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1198 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1273 r10_bio->sectors = sectors; 1210 r10_bio->sectors = sectors;
1274 1211
1275 r10_bio->mddev = mddev; 1212 r10_bio->mddev = mddev;
1276 r10_bio->sector = bio->bi_sector; 1213 r10_bio->sector = bio->bi_iter.bi_sector;
1277 r10_bio->state = 0; 1214 r10_bio->state = 0;
1278 1215
1279 /* We might need to issue multiple reads to different 1216 /* We might need to issue multiple reads to different
@@ -1302,13 +1239,13 @@ read_again:
1302 slot = r10_bio->read_slot; 1239 slot = r10_bio->read_slot;
1303 1240
1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1241 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1305 bio_trim(read_bio, r10_bio->sector - bio->bi_sector, 1242 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1306 max_sectors); 1243 max_sectors);
1307 1244
1308 r10_bio->devs[slot].bio = read_bio; 1245 r10_bio->devs[slot].bio = read_bio;
1309 r10_bio->devs[slot].rdev = rdev; 1246 r10_bio->devs[slot].rdev = rdev;
1310 1247
1311 read_bio->bi_sector = r10_bio->devs[slot].addr + 1248 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1312 choose_data_offset(r10_bio, rdev); 1249 choose_data_offset(r10_bio, rdev);
1313 read_bio->bi_bdev = rdev->bdev; 1250 read_bio->bi_bdev = rdev->bdev;
1314 read_bio->bi_end_io = raid10_end_read_request; 1251 read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1257,7 @@ read_again:
1320 * need another r10_bio. 1257 * need another r10_bio.
1321 */ 1258 */
1322 sectors_handled = (r10_bio->sectors + max_sectors 1259 sectors_handled = (r10_bio->sectors + max_sectors
1323 - bio->bi_sector); 1260 - bio->bi_iter.bi_sector);
1324 r10_bio->sectors = max_sectors; 1261 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1262 spin_lock_irq(&conf->device_lock);
1326 if (bio->bi_phys_segments == 0) 1263 if (bio->bi_phys_segments == 0)
@@ -1341,7 +1278,8 @@ read_again:
1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1278 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1342 r10_bio->state = 0; 1279 r10_bio->state = 0;
1343 r10_bio->mddev = mddev; 1280 r10_bio->mddev = mddev;
1344 r10_bio->sector = bio->bi_sector + sectors_handled; 1281 r10_bio->sector = bio->bi_iter.bi_sector +
1282 sectors_handled;
1345 goto read_again; 1283 goto read_again;
1346 } else 1284 } else
1347 generic_make_request(read_bio); 1285 generic_make_request(read_bio);
@@ -1499,7 +1437,8 @@ retry_write:
1499 bio->bi_phys_segments++; 1437 bio->bi_phys_segments++;
1500 spin_unlock_irq(&conf->device_lock); 1438 spin_unlock_irq(&conf->device_lock);
1501 } 1439 }
1502 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1440 sectors_handled = r10_bio->sector + max_sectors -
1441 bio->bi_iter.bi_sector;
1503 1442
1504 atomic_set(&r10_bio->remaining, 1); 1443 atomic_set(&r10_bio->remaining, 1);
1505 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1444 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1449,11 @@ retry_write:
1510 if (r10_bio->devs[i].bio) { 1449 if (r10_bio->devs[i].bio) {
1511 struct md_rdev *rdev = conf->mirrors[d].rdev; 1450 struct md_rdev *rdev = conf->mirrors[d].rdev;
1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1451 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1513 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1452 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1514 max_sectors); 1453 max_sectors);
1515 r10_bio->devs[i].bio = mbio; 1454 r10_bio->devs[i].bio = mbio;
1516 1455
1517 mbio->bi_sector = (r10_bio->devs[i].addr+ 1456 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
1518 choose_data_offset(r10_bio, 1457 choose_data_offset(r10_bio,
1519 rdev)); 1458 rdev));
1520 mbio->bi_bdev = rdev->bdev; 1459 mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1492,11 @@ retry_write:
1553 rdev = conf->mirrors[d].rdev; 1492 rdev = conf->mirrors[d].rdev;
1554 } 1493 }
1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1494 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1556 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1495 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1557 max_sectors); 1496 max_sectors);
1558 r10_bio->devs[i].repl_bio = mbio; 1497 r10_bio->devs[i].repl_bio = mbio;
1559 1498
1560 mbio->bi_sector = (r10_bio->devs[i].addr + 1499 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
1561 choose_data_offset( 1500 choose_data_offset(
1562 r10_bio, rdev)); 1501 r10_bio, rdev));
1563 mbio->bi_bdev = rdev->bdev; 1502 mbio->bi_bdev = rdev->bdev;
@@ -1591,11 +1530,57 @@ retry_write:
1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1530 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1592 1531
1593 r10_bio->mddev = mddev; 1532 r10_bio->mddev = mddev;
1594 r10_bio->sector = bio->bi_sector + sectors_handled; 1533 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1595 r10_bio->state = 0; 1534 r10_bio->state = 0;
1596 goto retry_write; 1535 goto retry_write;
1597 } 1536 }
1598 one_write_done(r10_bio); 1537 one_write_done(r10_bio);
1538}
1539
1540static void make_request(struct mddev *mddev, struct bio *bio)
1541{
1542 struct r10conf *conf = mddev->private;
1543 sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask);
1544 int chunk_sects = chunk_mask + 1;
1545
1546 struct bio *split;
1547
1548 if (unlikely(bio->bi_rw & REQ_FLUSH)) {
1549 md_flush_request(mddev, bio);
1550 return;
1551 }
1552
1553 md_write_start(mddev, bio);
1554
1555 /*
1556 * Register the new request and wait if the reconstruction
1557 * thread has put up a bar for new requests.
1558 * Continue immediately if no resync is active currently.
1559 */
1560 wait_barrier(conf);
1561
1562 do {
1563
1564 /*
1565 * If this request crosses a chunk boundary, we need to split
1566 * it.
1567 */
1568 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) +
1569 bio_sectors(bio) > chunk_sects
1570 && (conf->geo.near_copies < conf->geo.raid_disks
1571 || conf->prev.near_copies <
1572 conf->prev.raid_disks))) {
1573 split = bio_split(bio, chunk_sects -
1574 (bio->bi_iter.bi_sector &
1575 (chunk_sects - 1)),
1576 GFP_NOIO, fs_bio_set);
1577 bio_chain(split, bio);
1578 } else {
1579 split = bio;
1580 }
1581
1582 __make_request(mddev, split);
1583 } while (split != bio);
1599 1584
1600 /* In case raid10d snuck in to freeze_array */ 1585 /* In case raid10d snuck in to freeze_array */
1601 wake_up(&conf->wait_barrier); 1586 wake_up(&conf->wait_barrier);
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2124 bio_reset(tbio); 2109 bio_reset(tbio);
2125 2110
2126 tbio->bi_vcnt = vcnt; 2111 tbio->bi_vcnt = vcnt;
2127 tbio->bi_size = r10_bio->sectors << 9; 2112 tbio->bi_iter.bi_size = r10_bio->sectors << 9;
2128 tbio->bi_rw = WRITE; 2113 tbio->bi_rw = WRITE;
2129 tbio->bi_private = r10_bio; 2114 tbio->bi_private = r10_bio;
2130 tbio->bi_sector = r10_bio->devs[i].addr; 2115 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2131 2116
2132 for (j=0; j < vcnt ; j++) { 2117 for (j=0; j < vcnt ; j++) {
2133 tbio->bi_io_vec[j].bv_offset = 0; 2118 tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2144 atomic_inc(&r10_bio->remaining); 2129 atomic_inc(&r10_bio->remaining);
2145 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2130 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2146 2131
2147 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2132 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2148 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2133 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2149 generic_make_request(tbio); 2134 generic_make_request(tbio);
2150 } 2135 }
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2614 sectors = sect_to_write; 2599 sectors = sect_to_write;
2615 /* Write at 'sector' for 'sectors' */ 2600 /* Write at 'sector' for 'sectors' */
2616 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2601 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2617 bio_trim(wbio, sector - bio->bi_sector, sectors); 2602 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2618 wbio->bi_sector = (r10_bio->devs[i].addr+ 2603 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2619 choose_data_offset(r10_bio, rdev) + 2604 choose_data_offset(r10_bio, rdev) +
2620 (sector - r10_bio->sector)); 2605 (sector - r10_bio->sector));
2621 wbio->bi_bdev = rdev->bdev; 2606 wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2672,10 @@ read_more:
2687 (unsigned long long)r10_bio->sector); 2672 (unsigned long long)r10_bio->sector);
2688 bio = bio_clone_mddev(r10_bio->master_bio, 2673 bio = bio_clone_mddev(r10_bio->master_bio,
2689 GFP_NOIO, mddev); 2674 GFP_NOIO, mddev);
2690 bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); 2675 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2691 r10_bio->devs[slot].bio = bio; 2676 r10_bio->devs[slot].bio = bio;
2692 r10_bio->devs[slot].rdev = rdev; 2677 r10_bio->devs[slot].rdev = rdev;
2693 bio->bi_sector = r10_bio->devs[slot].addr 2678 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2694 + choose_data_offset(r10_bio, rdev); 2679 + choose_data_offset(r10_bio, rdev);
2695 bio->bi_bdev = rdev->bdev; 2680 bio->bi_bdev = rdev->bdev;
2696 bio->bi_rw = READ | do_sync; 2681 bio->bi_rw = READ | do_sync;
@@ -2701,7 +2686,7 @@ read_more:
2701 struct bio *mbio = r10_bio->master_bio; 2686 struct bio *mbio = r10_bio->master_bio;
2702 int sectors_handled = 2687 int sectors_handled =
2703 r10_bio->sector + max_sectors 2688 r10_bio->sector + max_sectors
2704 - mbio->bi_sector; 2689 - mbio->bi_iter.bi_sector;
2705 r10_bio->sectors = max_sectors; 2690 r10_bio->sectors = max_sectors;
2706 spin_lock_irq(&conf->device_lock); 2691 spin_lock_irq(&conf->device_lock);
2707 if (mbio->bi_phys_segments == 0) 2692 if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2704,7 @@ read_more:
2719 set_bit(R10BIO_ReadError, 2704 set_bit(R10BIO_ReadError,
2720 &r10_bio->state); 2705 &r10_bio->state);
2721 r10_bio->mddev = mddev; 2706 r10_bio->mddev = mddev;
2722 r10_bio->sector = mbio->bi_sector 2707 r10_bio->sector = mbio->bi_iter.bi_sector
2723 + sectors_handled; 2708 + sectors_handled;
2724 2709
2725 goto read_more; 2710 goto read_more;
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 bio->bi_end_io = end_sync_read; 3142 bio->bi_end_io = end_sync_read;
3158 bio->bi_rw = READ; 3143 bio->bi_rw = READ;
3159 from_addr = r10_bio->devs[j].addr; 3144 from_addr = r10_bio->devs[j].addr;
3160 bio->bi_sector = from_addr + rdev->data_offset; 3145 bio->bi_iter.bi_sector = from_addr +
3146 rdev->data_offset;
3161 bio->bi_bdev = rdev->bdev; 3147 bio->bi_bdev = rdev->bdev;
3162 atomic_inc(&rdev->nr_pending); 3148 atomic_inc(&rdev->nr_pending);
3163 /* and we write to 'i' (if not in_sync) */ 3149 /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3181 bio->bi_private = r10_bio; 3167 bio->bi_private = r10_bio;
3182 bio->bi_end_io = end_sync_write; 3168 bio->bi_end_io = end_sync_write;
3183 bio->bi_rw = WRITE; 3169 bio->bi_rw = WRITE;
3184 bio->bi_sector = to_addr 3170 bio->bi_iter.bi_sector = to_addr
3185 + rdev->data_offset; 3171 + rdev->data_offset;
3186 bio->bi_bdev = rdev->bdev; 3172 bio->bi_bdev = rdev->bdev;
3187 atomic_inc(&r10_bio->remaining); 3173 atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3210 bio->bi_private = r10_bio; 3196 bio->bi_private = r10_bio;
3211 bio->bi_end_io = end_sync_write; 3197 bio->bi_end_io = end_sync_write;
3212 bio->bi_rw = WRITE; 3198 bio->bi_rw = WRITE;
3213 bio->bi_sector = to_addr + rdev->data_offset; 3199 bio->bi_iter.bi_sector = to_addr +
3200 rdev->data_offset;
3214 bio->bi_bdev = rdev->bdev; 3201 bio->bi_bdev = rdev->bdev;
3215 atomic_inc(&r10_bio->remaining); 3202 atomic_inc(&r10_bio->remaining);
3216 break; 3203 break;
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3328 bio->bi_private = r10_bio; 3315 bio->bi_private = r10_bio;
3329 bio->bi_end_io = end_sync_read; 3316 bio->bi_end_io = end_sync_read;
3330 bio->bi_rw = READ; 3317 bio->bi_rw = READ;
3331 bio->bi_sector = sector + 3318 bio->bi_iter.bi_sector = sector +
3332 conf->mirrors[d].rdev->data_offset; 3319 conf->mirrors[d].rdev->data_offset;
3333 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3320 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3334 count++; 3321 count++;
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3350 bio->bi_private = r10_bio; 3337 bio->bi_private = r10_bio;
3351 bio->bi_end_io = end_sync_write; 3338 bio->bi_end_io = end_sync_write;
3352 bio->bi_rw = WRITE; 3339 bio->bi_rw = WRITE;
3353 bio->bi_sector = sector + 3340 bio->bi_iter.bi_sector = sector +
3354 conf->mirrors[d].replacement->data_offset; 3341 conf->mirrors[d].replacement->data_offset;
3355 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3342 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3356 count++; 3343 count++;
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3397 bio2 = bio2->bi_next) { 3384 bio2 = bio2->bi_next) {
3398 /* remove last page from this bio */ 3385 /* remove last page from this bio */
3399 bio2->bi_vcnt--; 3386 bio2->bi_vcnt--;
3400 bio2->bi_size -= len; 3387 bio2->bi_iter.bi_size -= len;
3401 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3388 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3402 } 3389 }
3403 goto bio_full; 3390 goto bio_full;
@@ -4417,7 +4404,7 @@ read_more:
4417 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4404 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4418 4405
4419 read_bio->bi_bdev = rdev->bdev; 4406 read_bio->bi_bdev = rdev->bdev;
4420 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4407 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4421 + rdev->data_offset); 4408 + rdev->data_offset);
4422 read_bio->bi_private = r10_bio; 4409 read_bio->bi_private = r10_bio;
4423 read_bio->bi_end_io = end_sync_read; 4410 read_bio->bi_end_io = end_sync_read;
@@ -4425,7 +4412,7 @@ read_more:
4425 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4412 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4426 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4413 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4427 read_bio->bi_vcnt = 0; 4414 read_bio->bi_vcnt = 0;
4428 read_bio->bi_size = 0; 4415 read_bio->bi_iter.bi_size = 0;
4429 r10_bio->master_bio = read_bio; 4416 r10_bio->master_bio = read_bio;
4430 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4417 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4431 4418
@@ -4451,7 +4438,8 @@ read_more:
4451 4438
4452 bio_reset(b); 4439 bio_reset(b);
4453 b->bi_bdev = rdev2->bdev; 4440 b->bi_bdev = rdev2->bdev;
4454 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4441 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4442 rdev2->new_data_offset;
4455 b->bi_private = r10_bio; 4443 b->bi_private = r10_bio;
4456 b->bi_end_io = end_reshape_write; 4444 b->bi_end_io = end_reshape_write;
4457 b->bi_rw = WRITE; 4445 b->bi_rw = WRITE;
@@ -4478,7 +4466,7 @@ read_more:
4478 bio2 = bio2->bi_next) { 4466 bio2 = bio2->bi_next) {
4479 /* Remove last page from this bio */ 4467 /* Remove last page from this bio */
4480 bio2->bi_vcnt--; 4468 bio2->bi_vcnt--;
4481 bio2->bi_size -= len; 4469 bio2->bi_iter.bi_size -= len;
4482 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4470 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4483 } 4471 }
4484 goto bio_full; 4472 goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index cc055da02e2a..eea63372e4d3 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -851,10 +851,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
851 bi->bi_rw, i); 851 bi->bi_rw, i);
852 atomic_inc(&sh->count); 852 atomic_inc(&sh->count);
853 if (use_new_offset(conf, sh)) 853 if (use_new_offset(conf, sh))
854 bi->bi_sector = (sh->sector 854 bi->bi_iter.bi_sector = (sh->sector
855 + rdev->new_data_offset); 855 + rdev->new_data_offset);
856 else 856 else
857 bi->bi_sector = (sh->sector 857 bi->bi_iter.bi_sector = (sh->sector
858 + rdev->data_offset); 858 + rdev->data_offset);
859 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 859 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
860 bi->bi_rw |= REQ_NOMERGE; 860 bi->bi_rw |= REQ_NOMERGE;
@@ -862,7 +862,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
862 bi->bi_vcnt = 1; 862 bi->bi_vcnt = 1;
863 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 863 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
864 bi->bi_io_vec[0].bv_offset = 0; 864 bi->bi_io_vec[0].bv_offset = 0;
865 bi->bi_size = STRIPE_SIZE; 865 bi->bi_iter.bi_size = STRIPE_SIZE;
866 /* 866 /*
867 * If this is discard request, set bi_vcnt 0. We don't 867 * If this is discard request, set bi_vcnt 0. We don't
868 * want to confuse SCSI because SCSI will replace payload 868 * want to confuse SCSI because SCSI will replace payload
@@ -898,15 +898,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
898 rbi->bi_rw, i); 898 rbi->bi_rw, i);
899 atomic_inc(&sh->count); 899 atomic_inc(&sh->count);
900 if (use_new_offset(conf, sh)) 900 if (use_new_offset(conf, sh))
901 rbi->bi_sector = (sh->sector 901 rbi->bi_iter.bi_sector = (sh->sector
902 + rrdev->new_data_offset); 902 + rrdev->new_data_offset);
903 else 903 else
904 rbi->bi_sector = (sh->sector 904 rbi->bi_iter.bi_sector = (sh->sector
905 + rrdev->data_offset); 905 + rrdev->data_offset);
906 rbi->bi_vcnt = 1; 906 rbi->bi_vcnt = 1;
907 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 907 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
908 rbi->bi_io_vec[0].bv_offset = 0; 908 rbi->bi_io_vec[0].bv_offset = 0;
909 rbi->bi_size = STRIPE_SIZE; 909 rbi->bi_iter.bi_size = STRIPE_SIZE;
910 /* 910 /*
911 * If this is discard request, set bi_vcnt 0. We don't 911 * If this is discard request, set bi_vcnt 0. We don't
912 * want to confuse SCSI because SCSI will replace payload 912 * want to confuse SCSI because SCSI will replace payload
@@ -934,24 +934,24 @@ static struct dma_async_tx_descriptor *
934async_copy_data(int frombio, struct bio *bio, struct page *page, 934async_copy_data(int frombio, struct bio *bio, struct page *page,
935 sector_t sector, struct dma_async_tx_descriptor *tx) 935 sector_t sector, struct dma_async_tx_descriptor *tx)
936{ 936{
937 struct bio_vec *bvl; 937 struct bio_vec bvl;
938 struct bvec_iter iter;
938 struct page *bio_page; 939 struct page *bio_page;
939 int i;
940 int page_offset; 940 int page_offset;
941 struct async_submit_ctl submit; 941 struct async_submit_ctl submit;
942 enum async_tx_flags flags = 0; 942 enum async_tx_flags flags = 0;
943 943
944 if (bio->bi_sector >= sector) 944 if (bio->bi_iter.bi_sector >= sector)
945 page_offset = (signed)(bio->bi_sector - sector) * 512; 945 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
946 else 946 else
947 page_offset = (signed)(sector - bio->bi_sector) * -512; 947 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
948 948
949 if (frombio) 949 if (frombio)
950 flags |= ASYNC_TX_FENCE; 950 flags |= ASYNC_TX_FENCE;
951 init_async_submit(&submit, flags, tx, NULL, NULL, NULL); 951 init_async_submit(&submit, flags, tx, NULL, NULL, NULL);
952 952
953 bio_for_each_segment(bvl, bio, i) { 953 bio_for_each_segment(bvl, bio, iter) {
954 int len = bvl->bv_len; 954 int len = bvl.bv_len;
955 int clen; 955 int clen;
956 int b_offset = 0; 956 int b_offset = 0;
957 957
@@ -967,8 +967,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
967 clen = len; 967 clen = len;
968 968
969 if (clen > 0) { 969 if (clen > 0) {
970 b_offset += bvl->bv_offset; 970 b_offset += bvl.bv_offset;
971 bio_page = bvl->bv_page; 971 bio_page = bvl.bv_page;
972 if (frombio) 972 if (frombio)
973 tx = async_memcpy(page, bio_page, page_offset, 973 tx = async_memcpy(page, bio_page, page_offset,
974 b_offset, clen, &submit); 974 b_offset, clen, &submit);
@@ -1011,7 +1011,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1011 BUG_ON(!dev->read); 1011 BUG_ON(!dev->read);
1012 rbi = dev->read; 1012 rbi = dev->read;
1013 dev->read = NULL; 1013 dev->read = NULL;
1014 while (rbi && rbi->bi_sector < 1014 while (rbi && rbi->bi_iter.bi_sector <
1015 dev->sector + STRIPE_SECTORS) { 1015 dev->sector + STRIPE_SECTORS) {
1016 rbi2 = r5_next_bio(rbi, dev->sector); 1016 rbi2 = r5_next_bio(rbi, dev->sector);
1017 if (!raid5_dec_bi_active_stripes(rbi)) { 1017 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1047,7 +1047,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1047 dev->read = rbi = dev->toread; 1047 dev->read = rbi = dev->toread;
1048 dev->toread = NULL; 1048 dev->toread = NULL;
1049 spin_unlock_irq(&sh->stripe_lock); 1049 spin_unlock_irq(&sh->stripe_lock);
1050 while (rbi && rbi->bi_sector < 1050 while (rbi && rbi->bi_iter.bi_sector <
1051 dev->sector + STRIPE_SECTORS) { 1051 dev->sector + STRIPE_SECTORS) {
1052 tx = async_copy_data(0, rbi, dev->page, 1052 tx = async_copy_data(0, rbi, dev->page,
1053 dev->sector, tx); 1053 dev->sector, tx);
@@ -1389,7 +1389,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1389 wbi = dev->written = chosen; 1389 wbi = dev->written = chosen;
1390 spin_unlock_irq(&sh->stripe_lock); 1390 spin_unlock_irq(&sh->stripe_lock);
1391 1391
1392 while (wbi && wbi->bi_sector < 1392 while (wbi && wbi->bi_iter.bi_sector <
1393 dev->sector + STRIPE_SECTORS) { 1393 dev->sector + STRIPE_SECTORS) {
1394 if (wbi->bi_rw & REQ_FUA) 1394 if (wbi->bi_rw & REQ_FUA)
1395 set_bit(R5_WantFUA, &dev->flags); 1395 set_bit(R5_WantFUA, &dev->flags);
@@ -2613,7 +2613,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2613 int firstwrite=0; 2613 int firstwrite=0;
2614 2614
2615 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2615 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2616 (unsigned long long)bi->bi_sector, 2616 (unsigned long long)bi->bi_iter.bi_sector,
2617 (unsigned long long)sh->sector); 2617 (unsigned long long)sh->sector);
2618 2618
2619 /* 2619 /*
@@ -2631,12 +2631,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2631 firstwrite = 1; 2631 firstwrite = 1;
2632 } else 2632 } else
2633 bip = &sh->dev[dd_idx].toread; 2633 bip = &sh->dev[dd_idx].toread;
2634 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2634 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2635 if (bio_end_sector(*bip) > bi->bi_sector) 2635 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2636 goto overlap; 2636 goto overlap;
2637 bip = & (*bip)->bi_next; 2637 bip = & (*bip)->bi_next;
2638 } 2638 }
2639 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2639 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2640 goto overlap; 2640 goto overlap;
2641 2641
2642 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2642 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2650,7 +2650,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2650 sector_t sector = sh->dev[dd_idx].sector; 2650 sector_t sector = sh->dev[dd_idx].sector;
2651 for (bi=sh->dev[dd_idx].towrite; 2651 for (bi=sh->dev[dd_idx].towrite;
2652 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2652 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2653 bi && bi->bi_sector <= sector; 2653 bi && bi->bi_iter.bi_sector <= sector;
2654 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2654 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2655 if (bio_end_sector(bi) >= sector) 2655 if (bio_end_sector(bi) >= sector)
2656 sector = bio_end_sector(bi); 2656 sector = bio_end_sector(bi);
@@ -2660,7 +2660,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2660 } 2660 }
2661 2661
2662 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2662 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2663 (unsigned long long)(*bip)->bi_sector, 2663 (unsigned long long)(*bip)->bi_iter.bi_sector,
2664 (unsigned long long)sh->sector, dd_idx); 2664 (unsigned long long)sh->sector, dd_idx);
2665 spin_unlock_irq(&sh->stripe_lock); 2665 spin_unlock_irq(&sh->stripe_lock);
2666 2666
@@ -2735,7 +2735,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2735 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2735 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2736 wake_up(&conf->wait_for_overlap); 2736 wake_up(&conf->wait_for_overlap);
2737 2737
2738 while (bi && bi->bi_sector < 2738 while (bi && bi->bi_iter.bi_sector <
2739 sh->dev[i].sector + STRIPE_SECTORS) { 2739 sh->dev[i].sector + STRIPE_SECTORS) {
2740 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2740 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2741 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2741 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2754,7 +2754,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2754 bi = sh->dev[i].written; 2754 bi = sh->dev[i].written;
2755 sh->dev[i].written = NULL; 2755 sh->dev[i].written = NULL;
2756 if (bi) bitmap_end = 1; 2756 if (bi) bitmap_end = 1;
2757 while (bi && bi->bi_sector < 2757 while (bi && bi->bi_iter.bi_sector <
2758 sh->dev[i].sector + STRIPE_SECTORS) { 2758 sh->dev[i].sector + STRIPE_SECTORS) {
2759 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2759 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2760 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2760 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2778,7 +2778,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2778 spin_unlock_irq(&sh->stripe_lock); 2778 spin_unlock_irq(&sh->stripe_lock);
2779 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2779 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2780 wake_up(&conf->wait_for_overlap); 2780 wake_up(&conf->wait_for_overlap);
2781 while (bi && bi->bi_sector < 2781 while (bi && bi->bi_iter.bi_sector <
2782 sh->dev[i].sector + STRIPE_SECTORS) { 2782 sh->dev[i].sector + STRIPE_SECTORS) {
2783 struct bio *nextbi = 2783 struct bio *nextbi =
2784 r5_next_bio(bi, sh->dev[i].sector); 2784 r5_next_bio(bi, sh->dev[i].sector);
@@ -3002,7 +3002,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3002 clear_bit(R5_UPTODATE, &dev->flags); 3002 clear_bit(R5_UPTODATE, &dev->flags);
3003 wbi = dev->written; 3003 wbi = dev->written;
3004 dev->written = NULL; 3004 dev->written = NULL;
3005 while (wbi && wbi->bi_sector < 3005 while (wbi && wbi->bi_iter.bi_sector <
3006 dev->sector + STRIPE_SECTORS) { 3006 dev->sector + STRIPE_SECTORS) {
3007 wbi2 = r5_next_bio(wbi, dev->sector); 3007 wbi2 = r5_next_bio(wbi, dev->sector);
3008 if (!raid5_dec_bi_active_stripes(wbi)) { 3008 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4094,7 +4094,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4094 4094
4095static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4095static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4096{ 4096{
4097 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4097 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4098 unsigned int chunk_sectors = mddev->chunk_sectors; 4098 unsigned int chunk_sectors = mddev->chunk_sectors;
4099 unsigned int bio_sectors = bio_sectors(bio); 4099 unsigned int bio_sectors = bio_sectors(bio);
4100 4100
@@ -4231,9 +4231,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4231 /* 4231 /*
4232 * compute position 4232 * compute position
4233 */ 4233 */
4234 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4234 align_bi->bi_iter.bi_sector =
4235 0, 4235 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4236 &dd_idx, NULL); 4236 0, &dd_idx, NULL);
4237 4237
4238 end_sector = bio_end_sector(align_bi); 4238 end_sector = bio_end_sector(align_bi);
4239 rcu_read_lock(); 4239 rcu_read_lock();
@@ -4258,7 +4258,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4258 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4258 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4259 4259
4260 if (!bio_fits_rdev(align_bi) || 4260 if (!bio_fits_rdev(align_bi) ||
4261 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4261 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4262 bio_sectors(align_bi),
4262 &first_bad, &bad_sectors)) { 4263 &first_bad, &bad_sectors)) {
4263 /* too big in some way, or has a known bad block */ 4264 /* too big in some way, or has a known bad block */
4264 bio_put(align_bi); 4265 bio_put(align_bi);
@@ -4267,7 +4268,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4267 } 4268 }
4268 4269
4269 /* No reshape active, so we can trust rdev->data_offset */ 4270 /* No reshape active, so we can trust rdev->data_offset */
4270 align_bi->bi_sector += rdev->data_offset; 4271 align_bi->bi_iter.bi_sector += rdev->data_offset;
4271 4272
4272 spin_lock_irq(&conf->device_lock); 4273 spin_lock_irq(&conf->device_lock);
4273 wait_event_lock_irq(conf->wait_for_stripe, 4274 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4279,7 +4280,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4279 if (mddev->gendisk) 4280 if (mddev->gendisk)
4280 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4281 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4281 align_bi, disk_devt(mddev->gendisk), 4282 align_bi, disk_devt(mddev->gendisk),
4282 raid_bio->bi_sector); 4283 raid_bio->bi_iter.bi_sector);
4283 generic_make_request(align_bi); 4284 generic_make_request(align_bi);
4284 return 1; 4285 return 1;
4285 } else { 4286 } else {
@@ -4462,8 +4463,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4462 /* Skip discard while reshape is happening */ 4463 /* Skip discard while reshape is happening */
4463 return; 4464 return;
4464 4465
4465 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4466 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4466 last_sector = bi->bi_sector + (bi->bi_size>>9); 4467 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4467 4468
4468 bi->bi_next = NULL; 4469 bi->bi_next = NULL;
4469 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4470 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4567,7 +4568,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4567 return; 4568 return;
4568 } 4569 }
4569 4570
4570 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4571 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4571 last_sector = bio_end_sector(bi); 4572 last_sector = bio_end_sector(bi);
4572 bi->bi_next = NULL; 4573 bi->bi_next = NULL;
4573 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4574 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5051,7 +5052,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5051 int remaining; 5052 int remaining;
5052 int handled = 0; 5053 int handled = 0;
5053 5054
5054 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5055 logical_sector = raid_bio->bi_iter.bi_sector &
5056 ~((sector_t)STRIPE_SECTORS-1);
5055 sector = raid5_compute_sector(conf, logical_sector, 5057 sector = raid5_compute_sector(conf, logical_sector,
5056 0, &dd_idx, NULL); 5058 0, &dd_idx, NULL);
5057 last_sector = bio_end_sector(raid_bio); 5059 last_sector = bio_end_sector(raid_bio);
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index dd239bdbfcb4..00d339c361fc 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -2235,10 +2235,10 @@ static int mptsas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2235 } 2235 }
2236 2236
2237 /* do we need to support multiple segments? */ 2237 /* do we need to support multiple segments? */
2238 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2238 if (bio_multiple_segments(req->bio) ||
2239 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u %u, rsp %u %u\n", 2239 bio_multiple_segments(rsp->bio)) {
2240 ioc->name, __func__, bio_segments(req->bio), blk_rq_bytes(req), 2240 printk(MYIOC_s_ERR_FMT "%s: multiple segments req %u, rsp %u\n",
2241 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2241 ioc->name, __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2242 return -EINVAL; 2242 return -EINVAL;
2243 } 2243 }
2244 2244
diff --git a/drivers/s390/block/dasd_diag.c b/drivers/s390/block/dasd_diag.c
index 92bd22ce6760..9cbc567698ce 100644
--- a/drivers/s390/block/dasd_diag.c
+++ b/drivers/s390/block/dasd_diag.c
@@ -504,7 +504,7 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
504 struct dasd_diag_req *dreq; 504 struct dasd_diag_req *dreq;
505 struct dasd_diag_bio *dbio; 505 struct dasd_diag_bio *dbio;
506 struct req_iterator iter; 506 struct req_iterator iter;
507 struct bio_vec *bv; 507 struct bio_vec bv;
508 char *dst; 508 char *dst;
509 unsigned int count, datasize; 509 unsigned int count, datasize;
510 sector_t recid, first_rec, last_rec; 510 sector_t recid, first_rec, last_rec;
@@ -525,10 +525,10 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
525 /* Check struct bio and count the number of blocks for the request. */ 525 /* Check struct bio and count the number of blocks for the request. */
526 count = 0; 526 count = 0;
527 rq_for_each_segment(bv, req, iter) { 527 rq_for_each_segment(bv, req, iter) {
528 if (bv->bv_len & (blksize - 1)) 528 if (bv.bv_len & (blksize - 1))
529 /* Fba can only do full blocks. */ 529 /* Fba can only do full blocks. */
530 return ERR_PTR(-EINVAL); 530 return ERR_PTR(-EINVAL);
531 count += bv->bv_len >> (block->s2b_shift + 9); 531 count += bv.bv_len >> (block->s2b_shift + 9);
532 } 532 }
533 /* Paranoia. */ 533 /* Paranoia. */
534 if (count != last_rec - first_rec + 1) 534 if (count != last_rec - first_rec + 1)
@@ -545,8 +545,8 @@ static struct dasd_ccw_req *dasd_diag_build_cp(struct dasd_device *memdev,
545 dbio = dreq->bio; 545 dbio = dreq->bio;
546 recid = first_rec; 546 recid = first_rec;
547 rq_for_each_segment(bv, req, iter) { 547 rq_for_each_segment(bv, req, iter) {
548 dst = page_address(bv->bv_page) + bv->bv_offset; 548 dst = page_address(bv.bv_page) + bv.bv_offset;
549 for (off = 0; off < bv->bv_len; off += blksize) { 549 for (off = 0; off < bv.bv_len; off += blksize) {
550 memset(dbio, 0, sizeof (struct dasd_diag_bio)); 550 memset(dbio, 0, sizeof (struct dasd_diag_bio));
551 dbio->type = rw_cmd; 551 dbio->type = rw_cmd;
552 dbio->block_number = recid + 1; 552 dbio->block_number = recid + 1;
diff --git a/drivers/s390/block/dasd_eckd.c b/drivers/s390/block/dasd_eckd.c
index 95e45782692f..2e8e0755070b 100644
--- a/drivers/s390/block/dasd_eckd.c
+++ b/drivers/s390/block/dasd_eckd.c
@@ -2551,7 +2551,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2551 struct dasd_ccw_req *cqr; 2551 struct dasd_ccw_req *cqr;
2552 struct ccw1 *ccw; 2552 struct ccw1 *ccw;
2553 struct req_iterator iter; 2553 struct req_iterator iter;
2554 struct bio_vec *bv; 2554 struct bio_vec bv;
2555 char *dst; 2555 char *dst;
2556 unsigned int off; 2556 unsigned int off;
2557 int count, cidaw, cplength, datasize; 2557 int count, cidaw, cplength, datasize;
@@ -2573,13 +2573,13 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2573 count = 0; 2573 count = 0;
2574 cidaw = 0; 2574 cidaw = 0;
2575 rq_for_each_segment(bv, req, iter) { 2575 rq_for_each_segment(bv, req, iter) {
2576 if (bv->bv_len & (blksize - 1)) 2576 if (bv.bv_len & (blksize - 1))
2577 /* Eckd can only do full blocks. */ 2577 /* Eckd can only do full blocks. */
2578 return ERR_PTR(-EINVAL); 2578 return ERR_PTR(-EINVAL);
2579 count += bv->bv_len >> (block->s2b_shift + 9); 2579 count += bv.bv_len >> (block->s2b_shift + 9);
2580#if defined(CONFIG_64BIT) 2580#if defined(CONFIG_64BIT)
2581 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 2581 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
2582 cidaw += bv->bv_len >> (block->s2b_shift + 9); 2582 cidaw += bv.bv_len >> (block->s2b_shift + 9);
2583#endif 2583#endif
2584 } 2584 }
2585 /* Paranoia. */ 2585 /* Paranoia. */
@@ -2650,16 +2650,16 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_single(
2650 last_rec - recid + 1, cmd, basedev, blksize); 2650 last_rec - recid + 1, cmd, basedev, blksize);
2651 } 2651 }
2652 rq_for_each_segment(bv, req, iter) { 2652 rq_for_each_segment(bv, req, iter) {
2653 dst = page_address(bv->bv_page) + bv->bv_offset; 2653 dst = page_address(bv.bv_page) + bv.bv_offset;
2654 if (dasd_page_cache) { 2654 if (dasd_page_cache) {
2655 char *copy = kmem_cache_alloc(dasd_page_cache, 2655 char *copy = kmem_cache_alloc(dasd_page_cache,
2656 GFP_DMA | __GFP_NOWARN); 2656 GFP_DMA | __GFP_NOWARN);
2657 if (copy && rq_data_dir(req) == WRITE) 2657 if (copy && rq_data_dir(req) == WRITE)
2658 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 2658 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
2659 if (copy) 2659 if (copy)
2660 dst = copy + bv->bv_offset; 2660 dst = copy + bv.bv_offset;
2661 } 2661 }
2662 for (off = 0; off < bv->bv_len; off += blksize) { 2662 for (off = 0; off < bv.bv_len; off += blksize) {
2663 sector_t trkid = recid; 2663 sector_t trkid = recid;
2664 unsigned int recoffs = sector_div(trkid, blk_per_trk); 2664 unsigned int recoffs = sector_div(trkid, blk_per_trk);
2665 rcmd = cmd; 2665 rcmd = cmd;
@@ -2735,7 +2735,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2735 struct dasd_ccw_req *cqr; 2735 struct dasd_ccw_req *cqr;
2736 struct ccw1 *ccw; 2736 struct ccw1 *ccw;
2737 struct req_iterator iter; 2737 struct req_iterator iter;
2738 struct bio_vec *bv; 2738 struct bio_vec bv;
2739 char *dst, *idaw_dst; 2739 char *dst, *idaw_dst;
2740 unsigned int cidaw, cplength, datasize; 2740 unsigned int cidaw, cplength, datasize;
2741 unsigned int tlf; 2741 unsigned int tlf;
@@ -2813,8 +2813,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_cmd_track(
2813 idaw_dst = NULL; 2813 idaw_dst = NULL;
2814 idaw_len = 0; 2814 idaw_len = 0;
2815 rq_for_each_segment(bv, req, iter) { 2815 rq_for_each_segment(bv, req, iter) {
2816 dst = page_address(bv->bv_page) + bv->bv_offset; 2816 dst = page_address(bv.bv_page) + bv.bv_offset;
2817 seg_len = bv->bv_len; 2817 seg_len = bv.bv_len;
2818 while (seg_len) { 2818 while (seg_len) {
2819 if (new_track) { 2819 if (new_track) {
2820 trkid = recid; 2820 trkid = recid;
@@ -3039,7 +3039,7 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3039{ 3039{
3040 struct dasd_ccw_req *cqr; 3040 struct dasd_ccw_req *cqr;
3041 struct req_iterator iter; 3041 struct req_iterator iter;
3042 struct bio_vec *bv; 3042 struct bio_vec bv;
3043 char *dst; 3043 char *dst;
3044 unsigned int trkcount, ctidaw; 3044 unsigned int trkcount, ctidaw;
3045 unsigned char cmd; 3045 unsigned char cmd;
@@ -3125,8 +3125,8 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3125 new_track = 1; 3125 new_track = 1;
3126 recid = first_rec; 3126 recid = first_rec;
3127 rq_for_each_segment(bv, req, iter) { 3127 rq_for_each_segment(bv, req, iter) {
3128 dst = page_address(bv->bv_page) + bv->bv_offset; 3128 dst = page_address(bv.bv_page) + bv.bv_offset;
3129 seg_len = bv->bv_len; 3129 seg_len = bv.bv_len;
3130 while (seg_len) { 3130 while (seg_len) {
3131 if (new_track) { 3131 if (new_track) {
3132 trkid = recid; 3132 trkid = recid;
@@ -3158,9 +3158,9 @@ static struct dasd_ccw_req *dasd_eckd_build_cp_tpm_track(
3158 } 3158 }
3159 } else { 3159 } else {
3160 rq_for_each_segment(bv, req, iter) { 3160 rq_for_each_segment(bv, req, iter) {
3161 dst = page_address(bv->bv_page) + bv->bv_offset; 3161 dst = page_address(bv.bv_page) + bv.bv_offset;
3162 last_tidaw = itcw_add_tidaw(itcw, 0x00, 3162 last_tidaw = itcw_add_tidaw(itcw, 0x00,
3163 dst, bv->bv_len); 3163 dst, bv.bv_len);
3164 if (IS_ERR(last_tidaw)) { 3164 if (IS_ERR(last_tidaw)) {
3165 ret = -EINVAL; 3165 ret = -EINVAL;
3166 goto out_error; 3166 goto out_error;
@@ -3278,7 +3278,7 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3278 struct dasd_ccw_req *cqr; 3278 struct dasd_ccw_req *cqr;
3279 struct ccw1 *ccw; 3279 struct ccw1 *ccw;
3280 struct req_iterator iter; 3280 struct req_iterator iter;
3281 struct bio_vec *bv; 3281 struct bio_vec bv;
3282 char *dst; 3282 char *dst;
3283 unsigned char cmd; 3283 unsigned char cmd;
3284 unsigned int trkcount; 3284 unsigned int trkcount;
@@ -3378,8 +3378,8 @@ static struct dasd_ccw_req *dasd_raw_build_cp(struct dasd_device *startdev,
3378 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE); 3378 idaws = idal_create_words(idaws, rawpadpage, PAGE_SIZE);
3379 } 3379 }
3380 rq_for_each_segment(bv, req, iter) { 3380 rq_for_each_segment(bv, req, iter) {
3381 dst = page_address(bv->bv_page) + bv->bv_offset; 3381 dst = page_address(bv.bv_page) + bv.bv_offset;
3382 seg_len = bv->bv_len; 3382 seg_len = bv.bv_len;
3383 if (cmd == DASD_ECKD_CCW_READ_TRACK) 3383 if (cmd == DASD_ECKD_CCW_READ_TRACK)
3384 memset(dst, 0, seg_len); 3384 memset(dst, 0, seg_len);
3385 if (!len_to_track_end) { 3385 if (!len_to_track_end) {
@@ -3424,7 +3424,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3424 struct dasd_eckd_private *private; 3424 struct dasd_eckd_private *private;
3425 struct ccw1 *ccw; 3425 struct ccw1 *ccw;
3426 struct req_iterator iter; 3426 struct req_iterator iter;
3427 struct bio_vec *bv; 3427 struct bio_vec bv;
3428 char *dst, *cda; 3428 char *dst, *cda;
3429 unsigned int blksize, blk_per_trk, off; 3429 unsigned int blksize, blk_per_trk, off;
3430 sector_t recid; 3430 sector_t recid;
@@ -3442,8 +3442,8 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3442 if (private->uses_cdl == 0 || recid > 2*blk_per_trk) 3442 if (private->uses_cdl == 0 || recid > 2*blk_per_trk)
3443 ccw++; 3443 ccw++;
3444 rq_for_each_segment(bv, req, iter) { 3444 rq_for_each_segment(bv, req, iter) {
3445 dst = page_address(bv->bv_page) + bv->bv_offset; 3445 dst = page_address(bv.bv_page) + bv.bv_offset;
3446 for (off = 0; off < bv->bv_len; off += blksize) { 3446 for (off = 0; off < bv.bv_len; off += blksize) {
3447 /* Skip locate record. */ 3447 /* Skip locate record. */
3448 if (private->uses_cdl && recid <= 2*blk_per_trk) 3448 if (private->uses_cdl && recid <= 2*blk_per_trk)
3449 ccw++; 3449 ccw++;
@@ -3454,7 +3454,7 @@ dasd_eckd_free_cp(struct dasd_ccw_req *cqr, struct request *req)
3454 cda = (char *)((addr_t) ccw->cda); 3454 cda = (char *)((addr_t) ccw->cda);
3455 if (dst != cda) { 3455 if (dst != cda) {
3456 if (rq_data_dir(req) == READ) 3456 if (rq_data_dir(req) == READ)
3457 memcpy(dst, cda, bv->bv_len); 3457 memcpy(dst, cda, bv.bv_len);
3458 kmem_cache_free(dasd_page_cache, 3458 kmem_cache_free(dasd_page_cache,
3459 (void *)((addr_t)cda & PAGE_MASK)); 3459 (void *)((addr_t)cda & PAGE_MASK));
3460 } 3460 }
diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c
index 9cbc8c32ba59..2c8e68bf9a1c 100644
--- a/drivers/s390/block/dasd_fba.c
+++ b/drivers/s390/block/dasd_fba.c
@@ -260,7 +260,7 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
260 struct dasd_ccw_req *cqr; 260 struct dasd_ccw_req *cqr;
261 struct ccw1 *ccw; 261 struct ccw1 *ccw;
262 struct req_iterator iter; 262 struct req_iterator iter;
263 struct bio_vec *bv; 263 struct bio_vec bv;
264 char *dst; 264 char *dst;
265 int count, cidaw, cplength, datasize; 265 int count, cidaw, cplength, datasize;
266 sector_t recid, first_rec, last_rec; 266 sector_t recid, first_rec, last_rec;
@@ -283,13 +283,13 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
283 count = 0; 283 count = 0;
284 cidaw = 0; 284 cidaw = 0;
285 rq_for_each_segment(bv, req, iter) { 285 rq_for_each_segment(bv, req, iter) {
286 if (bv->bv_len & (blksize - 1)) 286 if (bv.bv_len & (blksize - 1))
287 /* Fba can only do full blocks. */ 287 /* Fba can only do full blocks. */
288 return ERR_PTR(-EINVAL); 288 return ERR_PTR(-EINVAL);
289 count += bv->bv_len >> (block->s2b_shift + 9); 289 count += bv.bv_len >> (block->s2b_shift + 9);
290#if defined(CONFIG_64BIT) 290#if defined(CONFIG_64BIT)
291 if (idal_is_needed (page_address(bv->bv_page), bv->bv_len)) 291 if (idal_is_needed (page_address(bv.bv_page), bv.bv_len))
292 cidaw += bv->bv_len / blksize; 292 cidaw += bv.bv_len / blksize;
293#endif 293#endif
294 } 294 }
295 /* Paranoia. */ 295 /* Paranoia. */
@@ -326,16 +326,16 @@ static struct dasd_ccw_req *dasd_fba_build_cp(struct dasd_device * memdev,
326 } 326 }
327 recid = first_rec; 327 recid = first_rec;
328 rq_for_each_segment(bv, req, iter) { 328 rq_for_each_segment(bv, req, iter) {
329 dst = page_address(bv->bv_page) + bv->bv_offset; 329 dst = page_address(bv.bv_page) + bv.bv_offset;
330 if (dasd_page_cache) { 330 if (dasd_page_cache) {
331 char *copy = kmem_cache_alloc(dasd_page_cache, 331 char *copy = kmem_cache_alloc(dasd_page_cache,
332 GFP_DMA | __GFP_NOWARN); 332 GFP_DMA | __GFP_NOWARN);
333 if (copy && rq_data_dir(req) == WRITE) 333 if (copy && rq_data_dir(req) == WRITE)
334 memcpy(copy + bv->bv_offset, dst, bv->bv_len); 334 memcpy(copy + bv.bv_offset, dst, bv.bv_len);
335 if (copy) 335 if (copy)
336 dst = copy + bv->bv_offset; 336 dst = copy + bv.bv_offset;
337 } 337 }
338 for (off = 0; off < bv->bv_len; off += blksize) { 338 for (off = 0; off < bv.bv_len; off += blksize) {
339 /* Locate record for stupid devices. */ 339 /* Locate record for stupid devices. */
340 if (private->rdc_data.mode.bits.data_chain == 0) { 340 if (private->rdc_data.mode.bits.data_chain == 0) {
341 ccw[-1].flags |= CCW_FLAG_CC; 341 ccw[-1].flags |= CCW_FLAG_CC;
@@ -384,7 +384,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
384 struct dasd_fba_private *private; 384 struct dasd_fba_private *private;
385 struct ccw1 *ccw; 385 struct ccw1 *ccw;
386 struct req_iterator iter; 386 struct req_iterator iter;
387 struct bio_vec *bv; 387 struct bio_vec bv;
388 char *dst, *cda; 388 char *dst, *cda;
389 unsigned int blksize, off; 389 unsigned int blksize, off;
390 int status; 390 int status;
@@ -399,8 +399,8 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
399 if (private->rdc_data.mode.bits.data_chain != 0) 399 if (private->rdc_data.mode.bits.data_chain != 0)
400 ccw++; 400 ccw++;
401 rq_for_each_segment(bv, req, iter) { 401 rq_for_each_segment(bv, req, iter) {
402 dst = page_address(bv->bv_page) + bv->bv_offset; 402 dst = page_address(bv.bv_page) + bv.bv_offset;
403 for (off = 0; off < bv->bv_len; off += blksize) { 403 for (off = 0; off < bv.bv_len; off += blksize) {
404 /* Skip locate record. */ 404 /* Skip locate record. */
405 if (private->rdc_data.mode.bits.data_chain == 0) 405 if (private->rdc_data.mode.bits.data_chain == 0)
406 ccw++; 406 ccw++;
@@ -411,7 +411,7 @@ dasd_fba_free_cp(struct dasd_ccw_req *cqr, struct request *req)
411 cda = (char *)((addr_t) ccw->cda); 411 cda = (char *)((addr_t) ccw->cda);
412 if (dst != cda) { 412 if (dst != cda) {
413 if (rq_data_dir(req) == READ) 413 if (rq_data_dir(req) == READ)
414 memcpy(dst, cda, bv->bv_len); 414 memcpy(dst, cda, bv.bv_len);
415 kmem_cache_free(dasd_page_cache, 415 kmem_cache_free(dasd_page_cache,
416 (void *)((addr_t)cda & PAGE_MASK)); 416 (void *)((addr_t)cda & PAGE_MASK));
417 } 417 }
diff --git a/drivers/s390/block/dcssblk.c b/drivers/s390/block/dcssblk.c
index 6eca019bcf30..ebf41e228e55 100644
--- a/drivers/s390/block/dcssblk.c
+++ b/drivers/s390/block/dcssblk.c
@@ -808,18 +808,19 @@ static void
808dcssblk_make_request(struct request_queue *q, struct bio *bio) 808dcssblk_make_request(struct request_queue *q, struct bio *bio)
809{ 809{
810 struct dcssblk_dev_info *dev_info; 810 struct dcssblk_dev_info *dev_info;
811 struct bio_vec *bvec; 811 struct bio_vec bvec;
812 struct bvec_iter iter;
812 unsigned long index; 813 unsigned long index;
813 unsigned long page_addr; 814 unsigned long page_addr;
814 unsigned long source_addr; 815 unsigned long source_addr;
815 unsigned long bytes_done; 816 unsigned long bytes_done;
816 int i;
817 817
818 bytes_done = 0; 818 bytes_done = 0;
819 dev_info = bio->bi_bdev->bd_disk->private_data; 819 dev_info = bio->bi_bdev->bd_disk->private_data;
820 if (dev_info == NULL) 820 if (dev_info == NULL)
821 goto fail; 821 goto fail;
822 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 822 if ((bio->bi_iter.bi_sector & 7) != 0 ||
823 (bio->bi_iter.bi_size & 4095) != 0)
823 /* Request is not page-aligned. */ 824 /* Request is not page-aligned. */
824 goto fail; 825 goto fail;
825 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) { 826 if (bio_end_sector(bio) > get_capacity(bio->bi_bdev->bd_disk)) {
@@ -842,22 +843,22 @@ dcssblk_make_request(struct request_queue *q, struct bio *bio)
842 } 843 }
843 } 844 }
844 845
845 index = (bio->bi_sector >> 3); 846 index = (bio->bi_iter.bi_sector >> 3);
846 bio_for_each_segment(bvec, bio, i) { 847 bio_for_each_segment(bvec, bio, iter) {
847 page_addr = (unsigned long) 848 page_addr = (unsigned long)
848 page_address(bvec->bv_page) + bvec->bv_offset; 849 page_address(bvec.bv_page) + bvec.bv_offset;
849 source_addr = dev_info->start + (index<<12) + bytes_done; 850 source_addr = dev_info->start + (index<<12) + bytes_done;
850 if (unlikely((page_addr & 4095) != 0) || (bvec->bv_len & 4095) != 0) 851 if (unlikely((page_addr & 4095) != 0) || (bvec.bv_len & 4095) != 0)
851 // More paranoia. 852 // More paranoia.
852 goto fail; 853 goto fail;
853 if (bio_data_dir(bio) == READ) { 854 if (bio_data_dir(bio) == READ) {
854 memcpy((void*)page_addr, (void*)source_addr, 855 memcpy((void*)page_addr, (void*)source_addr,
855 bvec->bv_len); 856 bvec.bv_len);
856 } else { 857 } else {
857 memcpy((void*)source_addr, (void*)page_addr, 858 memcpy((void*)source_addr, (void*)page_addr,
858 bvec->bv_len); 859 bvec.bv_len);
859 } 860 }
860 bytes_done += bvec->bv_len; 861 bytes_done += bvec.bv_len;
861 } 862 }
862 bio_endio(bio, 0); 863 bio_endio(bio, 0);
863 return; 864 return;
diff --git a/drivers/s390/block/scm_blk.c b/drivers/s390/block/scm_blk.c
index d0ab5019d885..76bed1743db1 100644
--- a/drivers/s390/block/scm_blk.c
+++ b/drivers/s390/block/scm_blk.c
@@ -130,7 +130,7 @@ static void scm_request_prepare(struct scm_request *scmrq)
130 struct aidaw *aidaw = scmrq->aidaw; 130 struct aidaw *aidaw = scmrq->aidaw;
131 struct msb *msb = &scmrq->aob->msb[0]; 131 struct msb *msb = &scmrq->aob->msb[0];
132 struct req_iterator iter; 132 struct req_iterator iter;
133 struct bio_vec *bv; 133 struct bio_vec bv;
134 134
135 msb->bs = MSB_BS_4K; 135 msb->bs = MSB_BS_4K;
136 scmrq->aob->request.msb_count = 1; 136 scmrq->aob->request.msb_count = 1;
@@ -142,9 +142,9 @@ static void scm_request_prepare(struct scm_request *scmrq)
142 msb->data_addr = (u64) aidaw; 142 msb->data_addr = (u64) aidaw;
143 143
144 rq_for_each_segment(bv, scmrq->request, iter) { 144 rq_for_each_segment(bv, scmrq->request, iter) {
145 WARN_ON(bv->bv_offset); 145 WARN_ON(bv.bv_offset);
146 msb->blk_count += bv->bv_len >> 12; 146 msb->blk_count += bv.bv_len >> 12;
147 aidaw->data_addr = (u64) page_address(bv->bv_page); 147 aidaw->data_addr = (u64) page_address(bv.bv_page);
148 aidaw++; 148 aidaw++;
149 } 149 }
150} 150}
diff --git a/drivers/s390/block/scm_blk_cluster.c b/drivers/s390/block/scm_blk_cluster.c
index 27f930cd657f..9aae909d47a5 100644
--- a/drivers/s390/block/scm_blk_cluster.c
+++ b/drivers/s390/block/scm_blk_cluster.c
@@ -122,7 +122,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
122 struct aidaw *aidaw = scmrq->aidaw; 122 struct aidaw *aidaw = scmrq->aidaw;
123 struct msb *msb = &scmrq->aob->msb[0]; 123 struct msb *msb = &scmrq->aob->msb[0];
124 struct req_iterator iter; 124 struct req_iterator iter;
125 struct bio_vec *bv; 125 struct bio_vec bv;
126 int i = 0; 126 int i = 0;
127 u64 addr; 127 u64 addr;
128 128
@@ -163,7 +163,7 @@ static void scm_prepare_cluster_request(struct scm_request *scmrq)
163 i++; 163 i++;
164 } 164 }
165 rq_for_each_segment(bv, req, iter) { 165 rq_for_each_segment(bv, req, iter) {
166 aidaw->data_addr = (u64) page_address(bv->bv_page); 166 aidaw->data_addr = (u64) page_address(bv.bv_page);
167 aidaw++; 167 aidaw++;
168 i++; 168 i++;
169 } 169 }
diff --git a/drivers/s390/block/xpram.c b/drivers/s390/block/xpram.c
index 464dd29d06c0..3e530f9da8c4 100644
--- a/drivers/s390/block/xpram.c
+++ b/drivers/s390/block/xpram.c
@@ -184,25 +184,26 @@ static unsigned long xpram_highest_page_index(void)
184static void xpram_make_request(struct request_queue *q, struct bio *bio) 184static void xpram_make_request(struct request_queue *q, struct bio *bio)
185{ 185{
186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data; 186 xpram_device_t *xdev = bio->bi_bdev->bd_disk->private_data;
187 struct bio_vec *bvec; 187 struct bio_vec bvec;
188 struct bvec_iter iter;
188 unsigned int index; 189 unsigned int index;
189 unsigned long page_addr; 190 unsigned long page_addr;
190 unsigned long bytes; 191 unsigned long bytes;
191 int i;
192 192
193 if ((bio->bi_sector & 7) != 0 || (bio->bi_size & 4095) != 0) 193 if ((bio->bi_iter.bi_sector & 7) != 0 ||
194 (bio->bi_iter.bi_size & 4095) != 0)
194 /* Request is not page-aligned. */ 195 /* Request is not page-aligned. */
195 goto fail; 196 goto fail;
196 if ((bio->bi_size >> 12) > xdev->size) 197 if ((bio->bi_iter.bi_size >> 12) > xdev->size)
197 /* Request size is no page-aligned. */ 198 /* Request size is no page-aligned. */
198 goto fail; 199 goto fail;
199 if ((bio->bi_sector >> 3) > 0xffffffffU - xdev->offset) 200 if ((bio->bi_iter.bi_sector >> 3) > 0xffffffffU - xdev->offset)
200 goto fail; 201 goto fail;
201 index = (bio->bi_sector >> 3) + xdev->offset; 202 index = (bio->bi_iter.bi_sector >> 3) + xdev->offset;
202 bio_for_each_segment(bvec, bio, i) { 203 bio_for_each_segment(bvec, bio, iter) {
203 page_addr = (unsigned long) 204 page_addr = (unsigned long)
204 kmap(bvec->bv_page) + bvec->bv_offset; 205 kmap(bvec.bv_page) + bvec.bv_offset;
205 bytes = bvec->bv_len; 206 bytes = bvec.bv_len;
206 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0) 207 if ((page_addr & 4095) != 0 || (bytes & 4095) != 0)
207 /* More paranoia. */ 208 /* More paranoia. */
208 goto fail; 209 goto fail;
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index 446b85110a1f..0cac7d8fd0f7 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -2163,10 +2163,10 @@ int sas_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2163 } 2163 }
2164 2164
2165 /* do we need to support multiple segments? */ 2165 /* do we need to support multiple segments? */
2166 if (bio_segments(req->bio) > 1 || bio_segments(rsp->bio) > 1) { 2166 if (bio_multiple_segments(req->bio) ||
2167 printk("%s: multiple segments req %u %u, rsp %u %u\n", 2167 bio_multiple_segments(rsp->bio)) {
2168 __func__, bio_segments(req->bio), blk_rq_bytes(req), 2168 printk("%s: multiple segments req %u, rsp %u\n",
2169 bio_segments(rsp->bio), blk_rq_bytes(rsp)); 2169 __func__, blk_rq_bytes(req), blk_rq_bytes(rsp));
2170 return -EINVAL; 2170 return -EINVAL;
2171 } 2171 }
2172 2172
diff --git a/drivers/scsi/mpt2sas/mpt2sas_transport.c b/drivers/scsi/mpt2sas/mpt2sas_transport.c
index 9d26637308be..410f4a3e8888 100644
--- a/drivers/scsi/mpt2sas/mpt2sas_transport.c
+++ b/drivers/scsi/mpt2sas/mpt2sas_transport.c
@@ -1901,7 +1901,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost); 1901 struct MPT2SAS_ADAPTER *ioc = shost_priv(shost);
1902 Mpi2SmpPassthroughRequest_t *mpi_request; 1902 Mpi2SmpPassthroughRequest_t *mpi_request;
1903 Mpi2SmpPassthroughReply_t *mpi_reply; 1903 Mpi2SmpPassthroughReply_t *mpi_reply;
1904 int rc, i; 1904 int rc;
1905 u16 smid; 1905 u16 smid;
1906 u32 ioc_state; 1906 u32 ioc_state;
1907 unsigned long timeleft; 1907 unsigned long timeleft;
@@ -1916,7 +1916,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1916 void *pci_addr_out = NULL; 1916 void *pci_addr_out = NULL;
1917 u16 wait_state_count; 1917 u16 wait_state_count;
1918 struct request *rsp = req->next_rq; 1918 struct request *rsp = req->next_rq;
1919 struct bio_vec *bvec = NULL; 1919 struct bio_vec bvec;
1920 struct bvec_iter iter;
1920 1921
1921 if (!rsp) { 1922 if (!rsp) {
1922 printk(MPT2SAS_ERR_FMT "%s: the smp response space is " 1923 printk(MPT2SAS_ERR_FMT "%s: the smp response space is "
@@ -1942,7 +1943,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1942 ioc->transport_cmds.status = MPT2_CMD_PENDING; 1943 ioc->transport_cmds.status = MPT2_CMD_PENDING;
1943 1944
1944 /* Check if the request is split across multiple segments */ 1945 /* Check if the request is split across multiple segments */
1945 if (bio_segments(req->bio) > 1) { 1946 if (bio_multiple_segments(req->bio)) {
1946 u32 offset = 0; 1947 u32 offset = 0;
1947 1948
1948 /* Allocate memory and copy the request */ 1949 /* Allocate memory and copy the request */
@@ -1955,11 +1956,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1955 goto out; 1956 goto out;
1956 } 1957 }
1957 1958
1958 bio_for_each_segment(bvec, req->bio, i) { 1959 bio_for_each_segment(bvec, req->bio, iter) {
1959 memcpy(pci_addr_out + offset, 1960 memcpy(pci_addr_out + offset,
1960 page_address(bvec->bv_page) + bvec->bv_offset, 1961 page_address(bvec.bv_page) + bvec.bv_offset,
1961 bvec->bv_len); 1962 bvec.bv_len);
1962 offset += bvec->bv_len; 1963 offset += bvec.bv_len;
1963 } 1964 }
1964 } else { 1965 } else {
1965 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1966 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1974,7 +1975,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1974 1975
1975 /* Check if the response needs to be populated across 1976 /* Check if the response needs to be populated across
1976 * multiple segments */ 1977 * multiple segments */
1977 if (bio_segments(rsp->bio) > 1) { 1978 if (bio_multiple_segments(rsp->bio)) {
1978 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1979 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1979 &pci_dma_in); 1980 &pci_dma_in);
1980 if (!pci_addr_in) { 1981 if (!pci_addr_in) {
@@ -2041,7 +2042,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2041 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT | 2042 sgl_flags = (MPI2_SGE_FLAGS_SIMPLE_ELEMENT |
2042 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC); 2043 MPI2_SGE_FLAGS_END_OF_BUFFER | MPI2_SGE_FLAGS_HOST_TO_IOC);
2043 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2044 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2044 if (bio_segments(req->bio) > 1) { 2045 if (bio_multiple_segments(req->bio)) {
2045 ioc->base_add_sg_single(psge, sgl_flags | 2046 ioc->base_add_sg_single(psge, sgl_flags |
2046 (blk_rq_bytes(req) - 4), pci_dma_out); 2047 (blk_rq_bytes(req) - 4), pci_dma_out);
2047 } else { 2048 } else {
@@ -2057,7 +2058,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2057 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER | 2058 MPI2_SGE_FLAGS_LAST_ELEMENT | MPI2_SGE_FLAGS_END_OF_BUFFER |
2058 MPI2_SGE_FLAGS_END_OF_LIST); 2059 MPI2_SGE_FLAGS_END_OF_LIST);
2059 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT; 2060 sgl_flags = sgl_flags << MPI2_SGE_FLAGS_SHIFT;
2060 if (bio_segments(rsp->bio) > 1) { 2061 if (bio_multiple_segments(rsp->bio)) {
2061 ioc->base_add_sg_single(psge, sgl_flags | 2062 ioc->base_add_sg_single(psge, sgl_flags |
2062 (blk_rq_bytes(rsp) + 4), pci_dma_in); 2063 (blk_rq_bytes(rsp) + 4), pci_dma_in);
2063 } else { 2064 } else {
@@ -2102,23 +2103,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2102 le16_to_cpu(mpi_reply->ResponseDataLength); 2103 le16_to_cpu(mpi_reply->ResponseDataLength);
2103 /* check if the resp needs to be copied from the allocated 2104 /* check if the resp needs to be copied from the allocated
2104 * pci mem */ 2105 * pci mem */
2105 if (bio_segments(rsp->bio) > 1) { 2106 if (bio_multiple_segments(rsp->bio)) {
2106 u32 offset = 0; 2107 u32 offset = 0;
2107 u32 bytes_to_copy = 2108 u32 bytes_to_copy =
2108 le16_to_cpu(mpi_reply->ResponseDataLength); 2109 le16_to_cpu(mpi_reply->ResponseDataLength);
2109 bio_for_each_segment(bvec, rsp->bio, i) { 2110 bio_for_each_segment(bvec, rsp->bio, iter) {
2110 if (bytes_to_copy <= bvec->bv_len) { 2111 if (bytes_to_copy <= bvec.bv_len) {
2111 memcpy(page_address(bvec->bv_page) + 2112 memcpy(page_address(bvec.bv_page) +
2112 bvec->bv_offset, pci_addr_in + 2113 bvec.bv_offset, pci_addr_in +
2113 offset, bytes_to_copy); 2114 offset, bytes_to_copy);
2114 break; 2115 break;
2115 } else { 2116 } else {
2116 memcpy(page_address(bvec->bv_page) + 2117 memcpy(page_address(bvec.bv_page) +
2117 bvec->bv_offset, pci_addr_in + 2118 bvec.bv_offset, pci_addr_in +
2118 offset, bvec->bv_len); 2119 offset, bvec.bv_len);
2119 bytes_to_copy -= bvec->bv_len; 2120 bytes_to_copy -= bvec.bv_len;
2120 } 2121 }
2121 offset += bvec->bv_len; 2122 offset += bvec.bv_len;
2122 } 2123 }
2123 } 2124 }
2124 } else { 2125 } else {
diff --git a/drivers/scsi/mpt3sas/mpt3sas_transport.c b/drivers/scsi/mpt3sas/mpt3sas_transport.c
index e771a88c6a74..65170cb1a00f 100644
--- a/drivers/scsi/mpt3sas/mpt3sas_transport.c
+++ b/drivers/scsi/mpt3sas/mpt3sas_transport.c
@@ -1884,7 +1884,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost); 1884 struct MPT3SAS_ADAPTER *ioc = shost_priv(shost);
1885 Mpi2SmpPassthroughRequest_t *mpi_request; 1885 Mpi2SmpPassthroughRequest_t *mpi_request;
1886 Mpi2SmpPassthroughReply_t *mpi_reply; 1886 Mpi2SmpPassthroughReply_t *mpi_reply;
1887 int rc, i; 1887 int rc;
1888 u16 smid; 1888 u16 smid;
1889 u32 ioc_state; 1889 u32 ioc_state;
1890 unsigned long timeleft; 1890 unsigned long timeleft;
@@ -1898,7 +1898,8 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1898 void *pci_addr_out = NULL; 1898 void *pci_addr_out = NULL;
1899 u16 wait_state_count; 1899 u16 wait_state_count;
1900 struct request *rsp = req->next_rq; 1900 struct request *rsp = req->next_rq;
1901 struct bio_vec *bvec = NULL; 1901 struct bio_vec bvec;
1902 struct bvec_iter iter;
1902 1903
1903 if (!rsp) { 1904 if (!rsp) {
1904 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n", 1905 pr_err(MPT3SAS_FMT "%s: the smp response space is missing\n",
@@ -1925,7 +1926,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1925 ioc->transport_cmds.status = MPT3_CMD_PENDING; 1926 ioc->transport_cmds.status = MPT3_CMD_PENDING;
1926 1927
1927 /* Check if the request is split across multiple segments */ 1928 /* Check if the request is split across multiple segments */
1928 if (req->bio->bi_vcnt > 1) { 1929 if (bio_multiple_segments(req->bio)) {
1929 u32 offset = 0; 1930 u32 offset = 0;
1930 1931
1931 /* Allocate memory and copy the request */ 1932 /* Allocate memory and copy the request */
@@ -1938,11 +1939,11 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1938 goto out; 1939 goto out;
1939 } 1940 }
1940 1941
1941 bio_for_each_segment(bvec, req->bio, i) { 1942 bio_for_each_segment(bvec, req->bio, iter) {
1942 memcpy(pci_addr_out + offset, 1943 memcpy(pci_addr_out + offset,
1943 page_address(bvec->bv_page) + bvec->bv_offset, 1944 page_address(bvec.bv_page) + bvec.bv_offset,
1944 bvec->bv_len); 1945 bvec.bv_len);
1945 offset += bvec->bv_len; 1946 offset += bvec.bv_len;
1946 } 1947 }
1947 } else { 1948 } else {
1948 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio), 1949 dma_addr_out = pci_map_single(ioc->pdev, bio_data(req->bio),
@@ -1957,7 +1958,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
1957 1958
1958 /* Check if the response needs to be populated across 1959 /* Check if the response needs to be populated across
1959 * multiple segments */ 1960 * multiple segments */
1960 if (rsp->bio->bi_vcnt > 1) { 1961 if (bio_multiple_segments(rsp->bio)) {
1961 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp), 1962 pci_addr_in = pci_alloc_consistent(ioc->pdev, blk_rq_bytes(rsp),
1962 &pci_dma_in); 1963 &pci_dma_in);
1963 if (!pci_addr_in) { 1964 if (!pci_addr_in) {
@@ -2018,7 +2019,7 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2018 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4); 2019 mpi_request->RequestDataLength = cpu_to_le16(blk_rq_bytes(req) - 4);
2019 psge = &mpi_request->SGL; 2020 psge = &mpi_request->SGL;
2020 2021
2021 if (req->bio->bi_vcnt > 1) 2022 if (bio_multiple_segments(req->bio))
2022 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4), 2023 ioc->build_sg(ioc, psge, pci_dma_out, (blk_rq_bytes(req) - 4),
2023 pci_dma_in, (blk_rq_bytes(rsp) + 4)); 2024 pci_dma_in, (blk_rq_bytes(rsp) + 4));
2024 else 2025 else
@@ -2063,23 +2064,23 @@ _transport_smp_handler(struct Scsi_Host *shost, struct sas_rphy *rphy,
2063 2064
2064 /* check if the resp needs to be copied from the allocated 2065 /* check if the resp needs to be copied from the allocated
2065 * pci mem */ 2066 * pci mem */
2066 if (rsp->bio->bi_vcnt > 1) { 2067 if (bio_multiple_segments(rsp->bio)) {
2067 u32 offset = 0; 2068 u32 offset = 0;
2068 u32 bytes_to_copy = 2069 u32 bytes_to_copy =
2069 le16_to_cpu(mpi_reply->ResponseDataLength); 2070 le16_to_cpu(mpi_reply->ResponseDataLength);
2070 bio_for_each_segment(bvec, rsp->bio, i) { 2071 bio_for_each_segment(bvec, rsp->bio, iter) {
2071 if (bytes_to_copy <= bvec->bv_len) { 2072 if (bytes_to_copy <= bvec.bv_len) {
2072 memcpy(page_address(bvec->bv_page) + 2073 memcpy(page_address(bvec.bv_page) +
2073 bvec->bv_offset, pci_addr_in + 2074 bvec.bv_offset, pci_addr_in +
2074 offset, bytes_to_copy); 2075 offset, bytes_to_copy);
2075 break; 2076 break;
2076 } else { 2077 } else {
2077 memcpy(page_address(bvec->bv_page) + 2078 memcpy(page_address(bvec.bv_page) +
2078 bvec->bv_offset, pci_addr_in + 2079 bvec.bv_offset, pci_addr_in +
2079 offset, bvec->bv_len); 2080 offset, bvec.bv_len);
2080 bytes_to_copy -= bvec->bv_len; 2081 bytes_to_copy -= bvec.bv_len;
2081 } 2082 }
2082 offset += bvec->bv_len; 2083 offset += bvec.bv_len;
2083 } 2084 }
2084 } 2085 }
2085 } else { 2086 } else {
diff --git a/drivers/scsi/osd/osd_initiator.c b/drivers/scsi/osd/osd_initiator.c
index aa66361ed44b..bac04c2335aa 100644
--- a/drivers/scsi/osd/osd_initiator.c
+++ b/drivers/scsi/osd/osd_initiator.c
@@ -731,7 +731,7 @@ static int _osd_req_list_objects(struct osd_request *or,
731 731
732 bio->bi_rw &= ~REQ_WRITE; 732 bio->bi_rw &= ~REQ_WRITE;
733 or->in.bio = bio; 733 or->in.bio = bio;
734 or->in.total_bytes = bio->bi_size; 734 or->in.total_bytes = bio->bi_iter.bi_size;
735 return 0; 735 return 0;
736} 736}
737 737
diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
index 69725f7c32c1..5c8a3b696a1d 100644
--- a/drivers/scsi/sd.c
+++ b/drivers/scsi/sd.c
@@ -801,7 +801,7 @@ static int sd_setup_write_same_cmnd(struct scsi_device *sdp, struct request *rq)
801 if (sdkp->device->no_write_same) 801 if (sdkp->device->no_write_same)
802 return BLKPREP_KILL; 802 return BLKPREP_KILL;
803 803
804 BUG_ON(bio_offset(bio) || bio_iovec(bio)->bv_len != sdp->sector_size); 804 BUG_ON(bio_offset(bio) || bio_iovec(bio).bv_len != sdp->sector_size);
805 805
806 sector >>= ilog2(sdp->sector_size) - 9; 806 sector >>= ilog2(sdp->sector_size) - 9;
807 nr_sectors >>= ilog2(sdp->sector_size) - 9; 807 nr_sectors >>= ilog2(sdp->sector_size) - 9;
diff --git a/drivers/scsi/sd_dif.c b/drivers/scsi/sd_dif.c
index 6174ca4ea275..a7a691d0af7d 100644
--- a/drivers/scsi/sd_dif.c
+++ b/drivers/scsi/sd_dif.c
@@ -365,7 +365,6 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
365 struct bio *bio; 365 struct bio *bio;
366 struct scsi_disk *sdkp; 366 struct scsi_disk *sdkp;
367 struct sd_dif_tuple *sdt; 367 struct sd_dif_tuple *sdt;
368 unsigned int i, j;
369 u32 phys, virt; 368 u32 phys, virt;
370 369
371 sdkp = rq->bio->bi_bdev->bd_disk->private_data; 370 sdkp = rq->bio->bi_bdev->bd_disk->private_data;
@@ -376,19 +375,21 @@ void sd_dif_prepare(struct request *rq, sector_t hw_sector,
376 phys = hw_sector & 0xffffffff; 375 phys = hw_sector & 0xffffffff;
377 376
378 __rq_for_each_bio(bio, rq) { 377 __rq_for_each_bio(bio, rq) {
379 struct bio_vec *iv; 378 struct bio_vec iv;
379 struct bvec_iter iter;
380 unsigned int j;
380 381
381 /* Already remapped? */ 382 /* Already remapped? */
382 if (bio_flagged(bio, BIO_MAPPED_INTEGRITY)) 383 if (bio_flagged(bio, BIO_MAPPED_INTEGRITY))
383 break; 384 break;
384 385
385 virt = bio->bi_integrity->bip_sector & 0xffffffff; 386 virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
386 387
387 bip_for_each_vec(iv, bio->bi_integrity, i) { 388 bip_for_each_vec(iv, bio->bi_integrity, iter) {
388 sdt = kmap_atomic(iv->bv_page) 389 sdt = kmap_atomic(iv.bv_page)
389 + iv->bv_offset; 390 + iv.bv_offset;
390 391
391 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 392 for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
392 393
393 if (be32_to_cpu(sdt->ref_tag) == virt) 394 if (be32_to_cpu(sdt->ref_tag) == virt)
394 sdt->ref_tag = cpu_to_be32(phys); 395 sdt->ref_tag = cpu_to_be32(phys);
@@ -414,7 +415,7 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
414 struct scsi_disk *sdkp; 415 struct scsi_disk *sdkp;
415 struct bio *bio; 416 struct bio *bio;
416 struct sd_dif_tuple *sdt; 417 struct sd_dif_tuple *sdt;
417 unsigned int i, j, sectors, sector_sz; 418 unsigned int j, sectors, sector_sz;
418 u32 phys, virt; 419 u32 phys, virt;
419 420
420 sdkp = scsi_disk(scmd->request->rq_disk); 421 sdkp = scsi_disk(scmd->request->rq_disk);
@@ -430,15 +431,16 @@ void sd_dif_complete(struct scsi_cmnd *scmd, unsigned int good_bytes)
430 phys >>= 3; 431 phys >>= 3;
431 432
432 __rq_for_each_bio(bio, scmd->request) { 433 __rq_for_each_bio(bio, scmd->request) {
433 struct bio_vec *iv; 434 struct bio_vec iv;
435 struct bvec_iter iter;
434 436
435 virt = bio->bi_integrity->bip_sector & 0xffffffff; 437 virt = bio->bi_integrity->bip_iter.bi_sector & 0xffffffff;
436 438
437 bip_for_each_vec(iv, bio->bi_integrity, i) { 439 bip_for_each_vec(iv, bio->bi_integrity, iter) {
438 sdt = kmap_atomic(iv->bv_page) 440 sdt = kmap_atomic(iv.bv_page)
439 + iv->bv_offset; 441 + iv.bv_offset;
440 442
441 for (j = 0 ; j < iv->bv_len ; j += tuple_sz, sdt++) { 443 for (j = 0; j < iv.bv_len; j += tuple_sz, sdt++) {
442 444
443 if (sectors == 0) { 445 if (sectors == 0) {
444 kunmap_atomic(sdt); 446 kunmap_atomic(sdt);
diff --git a/drivers/staging/lustre/lustre/llite/lloop.c b/drivers/staging/lustre/lustre/llite/lloop.c
index e2421ea61352..581ff78be1a2 100644
--- a/drivers/staging/lustre/lustre/llite/lloop.c
+++ b/drivers/staging/lustre/lustre/llite/lloop.c
@@ -194,10 +194,10 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
194 struct cl_object *obj = ll_i2info(inode)->lli_clob; 194 struct cl_object *obj = ll_i2info(inode)->lli_clob;
195 pgoff_t offset; 195 pgoff_t offset;
196 int ret; 196 int ret;
197 int i;
198 int rw; 197 int rw;
199 obd_count page_count = 0; 198 obd_count page_count = 0;
200 struct bio_vec *bvec; 199 struct bio_vec bvec;
200 struct bvec_iter iter;
201 struct bio *bio; 201 struct bio *bio;
202 ssize_t bytes; 202 ssize_t bytes;
203 203
@@ -220,15 +220,15 @@ static int do_bio_lustrebacked(struct lloop_device *lo, struct bio *head)
220 for (bio = head; bio != NULL; bio = bio->bi_next) { 220 for (bio = head; bio != NULL; bio = bio->bi_next) {
221 LASSERT(rw == bio->bi_rw); 221 LASSERT(rw == bio->bi_rw);
222 222
223 offset = (pgoff_t)(bio->bi_sector << 9) + lo->lo_offset; 223 offset = (pgoff_t)(bio->bi_iter.bi_sector << 9) + lo->lo_offset;
224 bio_for_each_segment(bvec, bio, i) { 224 bio_for_each_segment(bvec, bio, iter) {
225 BUG_ON(bvec->bv_offset != 0); 225 BUG_ON(bvec.bv_offset != 0);
226 BUG_ON(bvec->bv_len != PAGE_CACHE_SIZE); 226 BUG_ON(bvec.bv_len != PAGE_CACHE_SIZE);
227 227
228 pages[page_count] = bvec->bv_page; 228 pages[page_count] = bvec.bv_page;
229 offsets[page_count] = offset; 229 offsets[page_count] = offset;
230 page_count++; 230 page_count++;
231 offset += bvec->bv_len; 231 offset += bvec.bv_len;
232 } 232 }
233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS); 233 LASSERT(page_count <= LLOOP_MAX_SEGMENTS);
234 } 234 }
@@ -313,7 +313,8 @@ static unsigned int loop_get_bio(struct lloop_device *lo, struct bio **req)
313 bio = &lo->lo_bio; 313 bio = &lo->lo_bio;
314 while (*bio && (*bio)->bi_rw == rw) { 314 while (*bio && (*bio)->bi_rw == rw) {
315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n", 315 CDEBUG(D_INFO, "bio sector %llu size %u count %u vcnt%u \n",
316 (unsigned long long)(*bio)->bi_sector, (*bio)->bi_size, 316 (unsigned long long)(*bio)->bi_iter.bi_sector,
317 (*bio)->bi_iter.bi_size,
317 page_count, (*bio)->bi_vcnt); 318 page_count, (*bio)->bi_vcnt);
318 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS) 319 if (page_count + (*bio)->bi_vcnt > LLOOP_MAX_SEGMENTS)
319 break; 320 break;
@@ -347,7 +348,8 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
347 goto err; 348 goto err;
348 349
349 CDEBUG(D_INFO, "submit bio sector %llu size %u\n", 350 CDEBUG(D_INFO, "submit bio sector %llu size %u\n",
350 (unsigned long long)old_bio->bi_sector, old_bio->bi_size); 351 (unsigned long long)old_bio->bi_iter.bi_sector,
352 old_bio->bi_iter.bi_size);
351 353
352 spin_lock_irq(&lo->lo_lock); 354 spin_lock_irq(&lo->lo_lock);
353 inactive = (lo->lo_state != LLOOP_BOUND); 355 inactive = (lo->lo_state != LLOOP_BOUND);
@@ -367,7 +369,7 @@ static void loop_make_request(struct request_queue *q, struct bio *old_bio)
367 loop_add_bio(lo, old_bio); 369 loop_add_bio(lo, old_bio);
368 return; 370 return;
369err: 371err:
370 cfs_bio_io_error(old_bio, old_bio->bi_size); 372 cfs_bio_io_error(old_bio, old_bio->bi_iter.bi_size);
371} 373}
372 374
373 375
@@ -378,7 +380,7 @@ static inline void loop_handle_bio(struct lloop_device *lo, struct bio *bio)
378 while (bio) { 380 while (bio) {
379 struct bio *tmp = bio->bi_next; 381 struct bio *tmp = bio->bi_next;
380 bio->bi_next = NULL; 382 bio->bi_next = NULL;
381 cfs_bio_endio(bio, bio->bi_size, ret); 383 cfs_bio_endio(bio, bio->bi_iter.bi_size, ret);
382 bio = tmp; 384 bio = tmp;
383 } 385 }
384} 386}
diff --git a/drivers/staging/zram/zram_drv.c b/drivers/staging/zram/zram_drv.c
index 3277d9838f4e..108f2733106d 100644
--- a/drivers/staging/zram/zram_drv.c
+++ b/drivers/staging/zram/zram_drv.c
@@ -171,13 +171,14 @@ static inline int valid_io_request(struct zram *zram, struct bio *bio)
171 u64 start, end, bound; 171 u64 start, end, bound;
172 172
173 /* unaligned request */ 173 /* unaligned request */
174 if (unlikely(bio->bi_sector & (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1))) 174 if (unlikely(bio->bi_iter.bi_sector &
175 (ZRAM_SECTOR_PER_LOGICAL_BLOCK - 1)))
175 return 0; 176 return 0;
176 if (unlikely(bio->bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1))) 177 if (unlikely(bio->bi_iter.bi_size & (ZRAM_LOGICAL_BLOCK_SIZE - 1)))
177 return 0; 178 return 0;
178 179
179 start = bio->bi_sector; 180 start = bio->bi_iter.bi_sector;
180 end = start + (bio->bi_size >> SECTOR_SHIFT); 181 end = start + (bio->bi_iter.bi_size >> SECTOR_SHIFT);
181 bound = zram->disksize >> SECTOR_SHIFT; 182 bound = zram->disksize >> SECTOR_SHIFT;
182 /* out of range range */ 183 /* out of range range */
183 if (unlikely(start >= bound || end > bound || start > end)) 184 if (unlikely(start >= bound || end > bound || start > end))
@@ -680,9 +681,10 @@ out:
680 681
681static void __zram_make_request(struct zram *zram, struct bio *bio, int rw) 682static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
682{ 683{
683 int i, offset; 684 int offset;
684 u32 index; 685 u32 index;
685 struct bio_vec *bvec; 686 struct bio_vec bvec;
687 struct bvec_iter iter;
686 688
687 switch (rw) { 689 switch (rw) {
688 case READ: 690 case READ:
@@ -693,36 +695,37 @@ static void __zram_make_request(struct zram *zram, struct bio *bio, int rw)
693 break; 695 break;
694 } 696 }
695 697
696 index = bio->bi_sector >> SECTORS_PER_PAGE_SHIFT; 698 index = bio->bi_iter.bi_sector >> SECTORS_PER_PAGE_SHIFT;
697 offset = (bio->bi_sector & (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT; 699 offset = (bio->bi_iter.bi_sector &
700 (SECTORS_PER_PAGE - 1)) << SECTOR_SHIFT;
698 701
699 bio_for_each_segment(bvec, bio, i) { 702 bio_for_each_segment(bvec, bio, iter) {
700 int max_transfer_size = PAGE_SIZE - offset; 703 int max_transfer_size = PAGE_SIZE - offset;
701 704
702 if (bvec->bv_len > max_transfer_size) { 705 if (bvec.bv_len > max_transfer_size) {
703 /* 706 /*
704 * zram_bvec_rw() can only make operation on a single 707 * zram_bvec_rw() can only make operation on a single
705 * zram page. Split the bio vector. 708 * zram page. Split the bio vector.
706 */ 709 */
707 struct bio_vec bv; 710 struct bio_vec bv;
708 711
709 bv.bv_page = bvec->bv_page; 712 bv.bv_page = bvec.bv_page;
710 bv.bv_len = max_transfer_size; 713 bv.bv_len = max_transfer_size;
711 bv.bv_offset = bvec->bv_offset; 714 bv.bv_offset = bvec.bv_offset;
712 715
713 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0) 716 if (zram_bvec_rw(zram, &bv, index, offset, bio, rw) < 0)
714 goto out; 717 goto out;
715 718
716 bv.bv_len = bvec->bv_len - max_transfer_size; 719 bv.bv_len = bvec.bv_len - max_transfer_size;
717 bv.bv_offset += max_transfer_size; 720 bv.bv_offset += max_transfer_size;
718 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0) 721 if (zram_bvec_rw(zram, &bv, index+1, 0, bio, rw) < 0)
719 goto out; 722 goto out;
720 } else 723 } else
721 if (zram_bvec_rw(zram, bvec, index, offset, bio, rw) 724 if (zram_bvec_rw(zram, &bvec, index, offset, bio, rw)
722 < 0) 725 < 0)
723 goto out; 726 goto out;
724 727
725 update_position(&index, &offset, bvec); 728 update_position(&index, &offset, &bvec);
726 } 729 }
727 730
728 set_bit(BIO_UPTODATE, &bio->bi_flags); 731 set_bit(BIO_UPTODATE, &bio->bi_flags);
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index c87959f12760..2d29356d0c85 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -319,7 +319,7 @@ iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
319 bio->bi_bdev = ib_dev->ibd_bd; 319 bio->bi_bdev = ib_dev->ibd_bd;
320 bio->bi_private = cmd; 320 bio->bi_private = cmd;
321 bio->bi_end_io = &iblock_bio_done; 321 bio->bi_end_io = &iblock_bio_done;
322 bio->bi_sector = lba; 322 bio->bi_iter.bi_sector = lba;
323 323
324 return bio; 324 return bio;
325} 325}
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index fc60b31453ee..80d972d739e5 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -134,8 +134,7 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
134 return 0; 134 return 0;
135 } 135 }
136 136
137 iv = bip_vec_idx(bip, bip->bip_vcnt); 137 iv = bip->bip_vec + bip->bip_vcnt;
138 BUG_ON(iv == NULL);
139 138
140 iv->bv_page = page; 139 iv->bv_page = page;
141 iv->bv_len = len; 140 iv->bv_len = len;
@@ -203,6 +202,12 @@ static inline unsigned int bio_integrity_hw_sectors(struct blk_integrity *bi,
203 return sectors; 202 return sectors;
204} 203}
205 204
205static inline unsigned int bio_integrity_bytes(struct blk_integrity *bi,
206 unsigned int sectors)
207{
208 return bio_integrity_hw_sectors(bi, sectors) * bi->tuple_size;
209}
210
206/** 211/**
207 * bio_integrity_tag_size - Retrieve integrity tag space 212 * bio_integrity_tag_size - Retrieve integrity tag space
208 * @bio: bio to inspect 213 * @bio: bio to inspect
@@ -215,9 +220,9 @@ unsigned int bio_integrity_tag_size(struct bio *bio)
215{ 220{
216 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 221 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
217 222
218 BUG_ON(bio->bi_size == 0); 223 BUG_ON(bio->bi_iter.bi_size == 0);
219 224
220 return bi->tag_size * (bio->bi_size / bi->sector_size); 225 return bi->tag_size * (bio->bi_iter.bi_size / bi->sector_size);
221} 226}
222EXPORT_SYMBOL(bio_integrity_tag_size); 227EXPORT_SYMBOL(bio_integrity_tag_size);
223 228
@@ -235,9 +240,9 @@ int bio_integrity_tag(struct bio *bio, void *tag_buf, unsigned int len, int set)
235 nr_sectors = bio_integrity_hw_sectors(bi, 240 nr_sectors = bio_integrity_hw_sectors(bi,
236 DIV_ROUND_UP(len, bi->tag_size)); 241 DIV_ROUND_UP(len, bi->tag_size));
237 242
238 if (nr_sectors * bi->tuple_size > bip->bip_size) { 243 if (nr_sectors * bi->tuple_size > bip->bip_iter.bi_size) {
239 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", 244 printk(KERN_ERR "%s: tag too big for bio: %u > %u\n", __func__,
240 __func__, nr_sectors * bi->tuple_size, bip->bip_size); 245 nr_sectors * bi->tuple_size, bip->bip_iter.bi_size);
241 return -1; 246 return -1;
242 } 247 }
243 248
@@ -299,29 +304,30 @@ static void bio_integrity_generate(struct bio *bio)
299{ 304{
300 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 305 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
301 struct blk_integrity_exchg bix; 306 struct blk_integrity_exchg bix;
302 struct bio_vec *bv; 307 struct bio_vec bv;
303 sector_t sector = bio->bi_sector; 308 struct bvec_iter iter;
304 unsigned int i, sectors, total; 309 sector_t sector = bio->bi_iter.bi_sector;
310 unsigned int sectors, total;
305 void *prot_buf = bio->bi_integrity->bip_buf; 311 void *prot_buf = bio->bi_integrity->bip_buf;
306 312
307 total = 0; 313 total = 0;
308 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 314 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
309 bix.sector_size = bi->sector_size; 315 bix.sector_size = bi->sector_size;
310 316
311 bio_for_each_segment(bv, bio, i) { 317 bio_for_each_segment(bv, bio, iter) {
312 void *kaddr = kmap_atomic(bv->bv_page); 318 void *kaddr = kmap_atomic(bv.bv_page);
313 bix.data_buf = kaddr + bv->bv_offset; 319 bix.data_buf = kaddr + bv.bv_offset;
314 bix.data_size = bv->bv_len; 320 bix.data_size = bv.bv_len;
315 bix.prot_buf = prot_buf; 321 bix.prot_buf = prot_buf;
316 bix.sector = sector; 322 bix.sector = sector;
317 323
318 bi->generate_fn(&bix); 324 bi->generate_fn(&bix);
319 325
320 sectors = bv->bv_len / bi->sector_size; 326 sectors = bv.bv_len / bi->sector_size;
321 sector += sectors; 327 sector += sectors;
322 prot_buf += sectors * bi->tuple_size; 328 prot_buf += sectors * bi->tuple_size;
323 total += sectors * bi->tuple_size; 329 total += sectors * bi->tuple_size;
324 BUG_ON(total > bio->bi_integrity->bip_size); 330 BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
325 331
326 kunmap_atomic(kaddr); 332 kunmap_atomic(kaddr);
327 } 333 }
@@ -386,8 +392,8 @@ int bio_integrity_prep(struct bio *bio)
386 392
387 bip->bip_owns_buf = 1; 393 bip->bip_owns_buf = 1;
388 bip->bip_buf = buf; 394 bip->bip_buf = buf;
389 bip->bip_size = len; 395 bip->bip_iter.bi_size = len;
390 bip->bip_sector = bio->bi_sector; 396 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
391 397
392 /* Map it */ 398 /* Map it */
393 offset = offset_in_page(buf); 399 offset = offset_in_page(buf);
@@ -441,19 +447,20 @@ static int bio_integrity_verify(struct bio *bio)
441{ 447{
442 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 448 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
443 struct blk_integrity_exchg bix; 449 struct blk_integrity_exchg bix;
444 struct bio_vec *bv; 450 struct bio_vec bv;
445 sector_t sector = bio->bi_integrity->bip_sector; 451 struct bvec_iter iter;
446 unsigned int i, sectors, total, ret; 452 sector_t sector = bio->bi_integrity->bip_iter.bi_sector;
453 unsigned int sectors, total, ret;
447 void *prot_buf = bio->bi_integrity->bip_buf; 454 void *prot_buf = bio->bi_integrity->bip_buf;
448 455
449 ret = total = 0; 456 ret = total = 0;
450 bix.disk_name = bio->bi_bdev->bd_disk->disk_name; 457 bix.disk_name = bio->bi_bdev->bd_disk->disk_name;
451 bix.sector_size = bi->sector_size; 458 bix.sector_size = bi->sector_size;
452 459
453 bio_for_each_segment(bv, bio, i) { 460 bio_for_each_segment(bv, bio, iter) {
454 void *kaddr = kmap_atomic(bv->bv_page); 461 void *kaddr = kmap_atomic(bv.bv_page);
455 bix.data_buf = kaddr + bv->bv_offset; 462 bix.data_buf = kaddr + bv.bv_offset;
456 bix.data_size = bv->bv_len; 463 bix.data_size = bv.bv_len;
457 bix.prot_buf = prot_buf; 464 bix.prot_buf = prot_buf;
458 bix.sector = sector; 465 bix.sector = sector;
459 466
@@ -464,11 +471,11 @@ static int bio_integrity_verify(struct bio *bio)
464 return ret; 471 return ret;
465 } 472 }
466 473
467 sectors = bv->bv_len / bi->sector_size; 474 sectors = bv.bv_len / bi->sector_size;
468 sector += sectors; 475 sector += sectors;
469 prot_buf += sectors * bi->tuple_size; 476 prot_buf += sectors * bi->tuple_size;
470 total += sectors * bi->tuple_size; 477 total += sectors * bi->tuple_size;
471 BUG_ON(total > bio->bi_integrity->bip_size); 478 BUG_ON(total > bio->bi_integrity->bip_iter.bi_size);
472 479
473 kunmap_atomic(kaddr); 480 kunmap_atomic(kaddr);
474 } 481 }
@@ -495,7 +502,7 @@ static void bio_integrity_verify_fn(struct work_struct *work)
495 502
496 /* Restore original bio completion handler */ 503 /* Restore original bio completion handler */
497 bio->bi_end_io = bip->bip_end_io; 504 bio->bi_end_io = bip->bip_end_io;
498 bio_endio(bio, error); 505 bio_endio_nodec(bio, error);
499} 506}
500 507
501/** 508/**
@@ -533,56 +540,6 @@ void bio_integrity_endio(struct bio *bio, int error)
533EXPORT_SYMBOL(bio_integrity_endio); 540EXPORT_SYMBOL(bio_integrity_endio);
534 541
535/** 542/**
536 * bio_integrity_mark_head - Advance bip_vec skip bytes
537 * @bip: Integrity vector to advance
538 * @skip: Number of bytes to advance it
539 */
540void bio_integrity_mark_head(struct bio_integrity_payload *bip,
541 unsigned int skip)
542{
543 struct bio_vec *iv;
544 unsigned int i;
545
546 bip_for_each_vec(iv, bip, i) {
547 if (skip == 0) {
548 bip->bip_idx = i;
549 return;
550 } else if (skip >= iv->bv_len) {
551 skip -= iv->bv_len;
552 } else { /* skip < iv->bv_len) */
553 iv->bv_offset += skip;
554 iv->bv_len -= skip;
555 bip->bip_idx = i;
556 return;
557 }
558 }
559}
560
561/**
562 * bio_integrity_mark_tail - Truncate bip_vec to be len bytes long
563 * @bip: Integrity vector to truncate
564 * @len: New length of integrity vector
565 */
566void bio_integrity_mark_tail(struct bio_integrity_payload *bip,
567 unsigned int len)
568{
569 struct bio_vec *iv;
570 unsigned int i;
571
572 bip_for_each_vec(iv, bip, i) {
573 if (len == 0) {
574 bip->bip_vcnt = i;
575 return;
576 } else if (len >= iv->bv_len) {
577 len -= iv->bv_len;
578 } else { /* len < iv->bv_len) */
579 iv->bv_len = len;
580 len = 0;
581 }
582 }
583}
584
585/**
586 * bio_integrity_advance - Advance integrity vector 543 * bio_integrity_advance - Advance integrity vector
587 * @bio: bio whose integrity vector to update 544 * @bio: bio whose integrity vector to update
588 * @bytes_done: number of data bytes that have been completed 545 * @bytes_done: number of data bytes that have been completed
@@ -595,13 +552,9 @@ void bio_integrity_advance(struct bio *bio, unsigned int bytes_done)
595{ 552{
596 struct bio_integrity_payload *bip = bio->bi_integrity; 553 struct bio_integrity_payload *bip = bio->bi_integrity;
597 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 554 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
598 unsigned int nr_sectors; 555 unsigned bytes = bio_integrity_bytes(bi, bytes_done >> 9);
599
600 BUG_ON(bip == NULL);
601 BUG_ON(bi == NULL);
602 556
603 nr_sectors = bio_integrity_hw_sectors(bi, bytes_done >> 9); 557 bvec_iter_advance(bip->bip_vec, &bip->bip_iter, bytes);
604 bio_integrity_mark_head(bip, nr_sectors * bi->tuple_size);
605} 558}
606EXPORT_SYMBOL(bio_integrity_advance); 559EXPORT_SYMBOL(bio_integrity_advance);
607 560
@@ -621,64 +574,13 @@ void bio_integrity_trim(struct bio *bio, unsigned int offset,
621{ 574{
622 struct bio_integrity_payload *bip = bio->bi_integrity; 575 struct bio_integrity_payload *bip = bio->bi_integrity;
623 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev); 576 struct blk_integrity *bi = bdev_get_integrity(bio->bi_bdev);
624 unsigned int nr_sectors;
625 577
626 BUG_ON(bip == NULL); 578 bio_integrity_advance(bio, offset << 9);
627 BUG_ON(bi == NULL); 579 bip->bip_iter.bi_size = bio_integrity_bytes(bi, sectors);
628 BUG_ON(!bio_flagged(bio, BIO_CLONED));
629
630 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
631 bip->bip_sector = bip->bip_sector + offset;
632 bio_integrity_mark_head(bip, offset * bi->tuple_size);
633 bio_integrity_mark_tail(bip, sectors * bi->tuple_size);
634} 580}
635EXPORT_SYMBOL(bio_integrity_trim); 581EXPORT_SYMBOL(bio_integrity_trim);
636 582
637/** 583/**
638 * bio_integrity_split - Split integrity metadata
639 * @bio: Protected bio
640 * @bp: Resulting bio_pair
641 * @sectors: Offset
642 *
643 * Description: Splits an integrity page into a bio_pair.
644 */
645void bio_integrity_split(struct bio *bio, struct bio_pair *bp, int sectors)
646{
647 struct blk_integrity *bi;
648 struct bio_integrity_payload *bip = bio->bi_integrity;
649 unsigned int nr_sectors;
650
651 if (bio_integrity(bio) == 0)
652 return;
653
654 bi = bdev_get_integrity(bio->bi_bdev);
655 BUG_ON(bi == NULL);
656 BUG_ON(bip->bip_vcnt != 1);
657
658 nr_sectors = bio_integrity_hw_sectors(bi, sectors);
659
660 bp->bio1.bi_integrity = &bp->bip1;
661 bp->bio2.bi_integrity = &bp->bip2;
662
663 bp->iv1 = bip->bip_vec[bip->bip_idx];
664 bp->iv2 = bip->bip_vec[bip->bip_idx];
665
666 bp->bip1.bip_vec = &bp->iv1;
667 bp->bip2.bip_vec = &bp->iv2;
668
669 bp->iv1.bv_len = sectors * bi->tuple_size;
670 bp->iv2.bv_offset += sectors * bi->tuple_size;
671 bp->iv2.bv_len -= sectors * bi->tuple_size;
672
673 bp->bip1.bip_sector = bio->bi_integrity->bip_sector;
674 bp->bip2.bip_sector = bio->bi_integrity->bip_sector + nr_sectors;
675
676 bp->bip1.bip_vcnt = bp->bip2.bip_vcnt = 1;
677 bp->bip1.bip_idx = bp->bip2.bip_idx = 0;
678}
679EXPORT_SYMBOL(bio_integrity_split);
680
681/**
682 * bio_integrity_clone - Callback for cloning bios with integrity metadata 584 * bio_integrity_clone - Callback for cloning bios with integrity metadata
683 * @bio: New bio 585 * @bio: New bio
684 * @bio_src: Original bio 586 * @bio_src: Original bio
@@ -702,9 +604,8 @@ int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
702 memcpy(bip->bip_vec, bip_src->bip_vec, 604 memcpy(bip->bip_vec, bip_src->bip_vec,
703 bip_src->bip_vcnt * sizeof(struct bio_vec)); 605 bip_src->bip_vcnt * sizeof(struct bio_vec));
704 606
705 bip->bip_sector = bip_src->bip_sector;
706 bip->bip_vcnt = bip_src->bip_vcnt; 607 bip->bip_vcnt = bip_src->bip_vcnt;
707 bip->bip_idx = bip_src->bip_idx; 608 bip->bip_iter = bip_src->bip_iter;
708 609
709 return 0; 610 return 0;
710} 611}
diff --git a/fs/bio.c b/fs/bio.c
index 33d79a4eb92d..75c49a382239 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -38,8 +38,6 @@
38 */ 38 */
39#define BIO_INLINE_VECS 4 39#define BIO_INLINE_VECS 4
40 40
41static mempool_t *bio_split_pool __read_mostly;
42
43/* 41/*
44 * if you change this list, also change bvec_alloc or things will 42 * if you change this list, also change bvec_alloc or things will
45 * break badly! cannot be bigger than what you can fit into an 43 * break badly! cannot be bigger than what you can fit into an
@@ -273,6 +271,7 @@ void bio_init(struct bio *bio)
273{ 271{
274 memset(bio, 0, sizeof(*bio)); 272 memset(bio, 0, sizeof(*bio));
275 bio->bi_flags = 1 << BIO_UPTODATE; 273 bio->bi_flags = 1 << BIO_UPTODATE;
274 atomic_set(&bio->bi_remaining, 1);
276 atomic_set(&bio->bi_cnt, 1); 275 atomic_set(&bio->bi_cnt, 1);
277} 276}
278EXPORT_SYMBOL(bio_init); 277EXPORT_SYMBOL(bio_init);
@@ -295,9 +294,35 @@ void bio_reset(struct bio *bio)
295 294
296 memset(bio, 0, BIO_RESET_BYTES); 295 memset(bio, 0, BIO_RESET_BYTES);
297 bio->bi_flags = flags|(1 << BIO_UPTODATE); 296 bio->bi_flags = flags|(1 << BIO_UPTODATE);
297 atomic_set(&bio->bi_remaining, 1);
298} 298}
299EXPORT_SYMBOL(bio_reset); 299EXPORT_SYMBOL(bio_reset);
300 300
301static void bio_chain_endio(struct bio *bio, int error)
302{
303 bio_endio(bio->bi_private, error);
304 bio_put(bio);
305}
306
307/**
308 * bio_chain - chain bio completions
309 *
310 * The caller won't have a bi_end_io called when @bio completes - instead,
311 * @parent's bi_end_io won't be called until both @parent and @bio have
312 * completed; the chained bio will also be freed when it completes.
313 *
314 * The caller must not set bi_private or bi_end_io in @bio.
315 */
316void bio_chain(struct bio *bio, struct bio *parent)
317{
318 BUG_ON(bio->bi_private || bio->bi_end_io);
319
320 bio->bi_private = parent;
321 bio->bi_end_io = bio_chain_endio;
322 atomic_inc(&parent->bi_remaining);
323}
324EXPORT_SYMBOL(bio_chain);
325
301static void bio_alloc_rescue(struct work_struct *work) 326static void bio_alloc_rescue(struct work_struct *work)
302{ 327{
303 struct bio_set *bs = container_of(work, struct bio_set, rescue_work); 328 struct bio_set *bs = container_of(work, struct bio_set, rescue_work);
@@ -473,13 +498,13 @@ EXPORT_SYMBOL(bio_alloc_bioset);
473void zero_fill_bio(struct bio *bio) 498void zero_fill_bio(struct bio *bio)
474{ 499{
475 unsigned long flags; 500 unsigned long flags;
476 struct bio_vec *bv; 501 struct bio_vec bv;
477 int i; 502 struct bvec_iter iter;
478 503
479 bio_for_each_segment(bv, bio, i) { 504 bio_for_each_segment(bv, bio, iter) {
480 char *data = bvec_kmap_irq(bv, &flags); 505 char *data = bvec_kmap_irq(&bv, &flags);
481 memset(data, 0, bv->bv_len); 506 memset(data, 0, bv.bv_len);
482 flush_dcache_page(bv->bv_page); 507 flush_dcache_page(bv.bv_page);
483 bvec_kunmap_irq(data, &flags); 508 bvec_kunmap_irq(data, &flags);
484 } 509 }
485} 510}
@@ -515,51 +540,49 @@ inline int bio_phys_segments(struct request_queue *q, struct bio *bio)
515EXPORT_SYMBOL(bio_phys_segments); 540EXPORT_SYMBOL(bio_phys_segments);
516 541
517/** 542/**
518 * __bio_clone - clone a bio 543 * __bio_clone_fast - clone a bio that shares the original bio's biovec
519 * @bio: destination bio 544 * @bio: destination bio
520 * @bio_src: bio to clone 545 * @bio_src: bio to clone
521 * 546 *
522 * Clone a &bio. Caller will own the returned bio, but not 547 * Clone a &bio. Caller will own the returned bio, but not
523 * the actual data it points to. Reference count of returned 548 * the actual data it points to. Reference count of returned
524 * bio will be one. 549 * bio will be one.
550 *
551 * Caller must ensure that @bio_src is not freed before @bio.
525 */ 552 */
526void __bio_clone(struct bio *bio, struct bio *bio_src) 553void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
527{ 554{
528 memcpy(bio->bi_io_vec, bio_src->bi_io_vec, 555 BUG_ON(bio->bi_pool && BIO_POOL_IDX(bio) != BIO_POOL_NONE);
529 bio_src->bi_max_vecs * sizeof(struct bio_vec));
530 556
531 /* 557 /*
532 * most users will be overriding ->bi_bdev with a new target, 558 * most users will be overriding ->bi_bdev with a new target,
533 * so we don't set nor calculate new physical/hw segment counts here 559 * so we don't set nor calculate new physical/hw segment counts here
534 */ 560 */
535 bio->bi_sector = bio_src->bi_sector;
536 bio->bi_bdev = bio_src->bi_bdev; 561 bio->bi_bdev = bio_src->bi_bdev;
537 bio->bi_flags |= 1 << BIO_CLONED; 562 bio->bi_flags |= 1 << BIO_CLONED;
538 bio->bi_rw = bio_src->bi_rw; 563 bio->bi_rw = bio_src->bi_rw;
539 bio->bi_vcnt = bio_src->bi_vcnt; 564 bio->bi_iter = bio_src->bi_iter;
540 bio->bi_size = bio_src->bi_size; 565 bio->bi_io_vec = bio_src->bi_io_vec;
541 bio->bi_idx = bio_src->bi_idx;
542} 566}
543EXPORT_SYMBOL(__bio_clone); 567EXPORT_SYMBOL(__bio_clone_fast);
544 568
545/** 569/**
546 * bio_clone_bioset - clone a bio 570 * bio_clone_fast - clone a bio that shares the original bio's biovec
547 * @bio: bio to clone 571 * @bio: bio to clone
548 * @gfp_mask: allocation priority 572 * @gfp_mask: allocation priority
549 * @bs: bio_set to allocate from 573 * @bs: bio_set to allocate from
550 * 574 *
551 * Like __bio_clone, only also allocates the returned bio 575 * Like __bio_clone_fast, only also allocates the returned bio
552 */ 576 */
553struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask, 577struct bio *bio_clone_fast(struct bio *bio, gfp_t gfp_mask, struct bio_set *bs)
554 struct bio_set *bs)
555{ 578{
556 struct bio *b; 579 struct bio *b;
557 580
558 b = bio_alloc_bioset(gfp_mask, bio->bi_max_vecs, bs); 581 b = bio_alloc_bioset(gfp_mask, 0, bs);
559 if (!b) 582 if (!b)
560 return NULL; 583 return NULL;
561 584
562 __bio_clone(b, bio); 585 __bio_clone_fast(b, bio);
563 586
564 if (bio_integrity(bio)) { 587 if (bio_integrity(bio)) {
565 int ret; 588 int ret;
@@ -574,6 +597,74 @@ struct bio *bio_clone_bioset(struct bio *bio, gfp_t gfp_mask,
574 597
575 return b; 598 return b;
576} 599}
600EXPORT_SYMBOL(bio_clone_fast);
601
602/**
603 * bio_clone_bioset - clone a bio
604 * @bio_src: bio to clone
605 * @gfp_mask: allocation priority
606 * @bs: bio_set to allocate from
607 *
608 * Clone bio. Caller will own the returned bio, but not the actual data it
609 * points to. Reference count of returned bio will be one.
610 */
611struct bio *bio_clone_bioset(struct bio *bio_src, gfp_t gfp_mask,
612 struct bio_set *bs)
613{
614 unsigned nr_iovecs = 0;
615 struct bvec_iter iter;
616 struct bio_vec bv;
617 struct bio *bio;
618
619 /*
620 * Pre immutable biovecs, __bio_clone() used to just do a memcpy from
621 * bio_src->bi_io_vec to bio->bi_io_vec.
622 *
623 * We can't do that anymore, because:
624 *
625 * - The point of cloning the biovec is to produce a bio with a biovec
626 * the caller can modify: bi_idx and bi_bvec_done should be 0.
627 *
628 * - The original bio could've had more than BIO_MAX_PAGES biovecs; if
629 * we tried to clone the whole thing bio_alloc_bioset() would fail.
630 * But the clone should succeed as long as the number of biovecs we
631 * actually need to allocate is fewer than BIO_MAX_PAGES.
632 *
633 * - Lastly, bi_vcnt should not be looked at or relied upon by code
634 * that does not own the bio - reason being drivers don't use it for
635 * iterating over the biovec anymore, so expecting it to be kept up
636 * to date (i.e. for clones that share the parent biovec) is just
637 * asking for trouble and would force extra work on
638 * __bio_clone_fast() anyways.
639 */
640
641 bio_for_each_segment(bv, bio_src, iter)
642 nr_iovecs++;
643
644 bio = bio_alloc_bioset(gfp_mask, nr_iovecs, bs);
645 if (!bio)
646 return NULL;
647
648 bio->bi_bdev = bio_src->bi_bdev;
649 bio->bi_rw = bio_src->bi_rw;
650 bio->bi_iter.bi_sector = bio_src->bi_iter.bi_sector;
651 bio->bi_iter.bi_size = bio_src->bi_iter.bi_size;
652
653 bio_for_each_segment(bv, bio_src, iter)
654 bio->bi_io_vec[bio->bi_vcnt++] = bv;
655
656 if (bio_integrity(bio_src)) {
657 int ret;
658
659 ret = bio_integrity_clone(bio, bio_src, gfp_mask);
660 if (ret < 0) {
661 bio_put(bio);
662 return NULL;
663 }
664 }
665
666 return bio;
667}
577EXPORT_SYMBOL(bio_clone_bioset); 668EXPORT_SYMBOL(bio_clone_bioset);
578 669
579/** 670/**
@@ -612,7 +703,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
612 if (unlikely(bio_flagged(bio, BIO_CLONED))) 703 if (unlikely(bio_flagged(bio, BIO_CLONED)))
613 return 0; 704 return 0;
614 705
615 if (((bio->bi_size + len) >> 9) > max_sectors) 706 if (((bio->bi_iter.bi_size + len) >> 9) > max_sectors)
616 return 0; 707 return 0;
617 708
618 /* 709 /*
@@ -635,8 +726,9 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
635 simulate merging updated prev_bvec 726 simulate merging updated prev_bvec
636 as new bvec. */ 727 as new bvec. */
637 .bi_bdev = bio->bi_bdev, 728 .bi_bdev = bio->bi_bdev,
638 .bi_sector = bio->bi_sector, 729 .bi_sector = bio->bi_iter.bi_sector,
639 .bi_size = bio->bi_size - prev_bv_len, 730 .bi_size = bio->bi_iter.bi_size -
731 prev_bv_len,
640 .bi_rw = bio->bi_rw, 732 .bi_rw = bio->bi_rw,
641 }; 733 };
642 734
@@ -684,8 +776,8 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
684 if (q->merge_bvec_fn) { 776 if (q->merge_bvec_fn) {
685 struct bvec_merge_data bvm = { 777 struct bvec_merge_data bvm = {
686 .bi_bdev = bio->bi_bdev, 778 .bi_bdev = bio->bi_bdev,
687 .bi_sector = bio->bi_sector, 779 .bi_sector = bio->bi_iter.bi_sector,
688 .bi_size = bio->bi_size, 780 .bi_size = bio->bi_iter.bi_size,
689 .bi_rw = bio->bi_rw, 781 .bi_rw = bio->bi_rw,
690 }; 782 };
691 783
@@ -708,7 +800,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
708 bio->bi_vcnt++; 800 bio->bi_vcnt++;
709 bio->bi_phys_segments++; 801 bio->bi_phys_segments++;
710 done: 802 done:
711 bio->bi_size += len; 803 bio->bi_iter.bi_size += len;
712 return len; 804 return len;
713} 805}
714 806
@@ -807,28 +899,7 @@ void bio_advance(struct bio *bio, unsigned bytes)
807 if (bio_integrity(bio)) 899 if (bio_integrity(bio))
808 bio_integrity_advance(bio, bytes); 900 bio_integrity_advance(bio, bytes);
809 901
810 bio->bi_sector += bytes >> 9; 902 bio_advance_iter(bio, &bio->bi_iter, bytes);
811 bio->bi_size -= bytes;
812
813 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
814 return;
815
816 while (bytes) {
817 if (unlikely(bio->bi_idx >= bio->bi_vcnt)) {
818 WARN_ONCE(1, "bio idx %d >= vcnt %d\n",
819 bio->bi_idx, bio->bi_vcnt);
820 break;
821 }
822
823 if (bytes >= bio_iovec(bio)->bv_len) {
824 bytes -= bio_iovec(bio)->bv_len;
825 bio->bi_idx++;
826 } else {
827 bio_iovec(bio)->bv_len -= bytes;
828 bio_iovec(bio)->bv_offset += bytes;
829 bytes = 0;
830 }
831 }
832} 903}
833EXPORT_SYMBOL(bio_advance); 904EXPORT_SYMBOL(bio_advance);
834 905
@@ -874,117 +945,80 @@ EXPORT_SYMBOL(bio_alloc_pages);
874 */ 945 */
875void bio_copy_data(struct bio *dst, struct bio *src) 946void bio_copy_data(struct bio *dst, struct bio *src)
876{ 947{
877 struct bio_vec *src_bv, *dst_bv; 948 struct bvec_iter src_iter, dst_iter;
878 unsigned src_offset, dst_offset, bytes; 949 struct bio_vec src_bv, dst_bv;
879 void *src_p, *dst_p; 950 void *src_p, *dst_p;
951 unsigned bytes;
880 952
881 src_bv = bio_iovec(src); 953 src_iter = src->bi_iter;
882 dst_bv = bio_iovec(dst); 954 dst_iter = dst->bi_iter;
883
884 src_offset = src_bv->bv_offset;
885 dst_offset = dst_bv->bv_offset;
886 955
887 while (1) { 956 while (1) {
888 if (src_offset == src_bv->bv_offset + src_bv->bv_len) { 957 if (!src_iter.bi_size) {
889 src_bv++; 958 src = src->bi_next;
890 if (src_bv == bio_iovec_idx(src, src->bi_vcnt)) { 959 if (!src)
891 src = src->bi_next; 960 break;
892 if (!src)
893 break;
894
895 src_bv = bio_iovec(src);
896 }
897 961
898 src_offset = src_bv->bv_offset; 962 src_iter = src->bi_iter;
899 } 963 }
900 964
901 if (dst_offset == dst_bv->bv_offset + dst_bv->bv_len) { 965 if (!dst_iter.bi_size) {
902 dst_bv++; 966 dst = dst->bi_next;
903 if (dst_bv == bio_iovec_idx(dst, dst->bi_vcnt)) { 967 if (!dst)
904 dst = dst->bi_next; 968 break;
905 if (!dst)
906 break;
907
908 dst_bv = bio_iovec(dst);
909 }
910 969
911 dst_offset = dst_bv->bv_offset; 970 dst_iter = dst->bi_iter;
912 } 971 }
913 972
914 bytes = min(dst_bv->bv_offset + dst_bv->bv_len - dst_offset, 973 src_bv = bio_iter_iovec(src, src_iter);
915 src_bv->bv_offset + src_bv->bv_len - src_offset); 974 dst_bv = bio_iter_iovec(dst, dst_iter);
975
976 bytes = min(src_bv.bv_len, dst_bv.bv_len);
916 977
917 src_p = kmap_atomic(src_bv->bv_page); 978 src_p = kmap_atomic(src_bv.bv_page);
918 dst_p = kmap_atomic(dst_bv->bv_page); 979 dst_p = kmap_atomic(dst_bv.bv_page);
919 980
920 memcpy(dst_p + dst_offset, 981 memcpy(dst_p + dst_bv.bv_offset,
921 src_p + src_offset, 982 src_p + src_bv.bv_offset,
922 bytes); 983 bytes);
923 984
924 kunmap_atomic(dst_p); 985 kunmap_atomic(dst_p);
925 kunmap_atomic(src_p); 986 kunmap_atomic(src_p);
926 987
927 src_offset += bytes; 988 bio_advance_iter(src, &src_iter, bytes);
928 dst_offset += bytes; 989 bio_advance_iter(dst, &dst_iter, bytes);
929 } 990 }
930} 991}
931EXPORT_SYMBOL(bio_copy_data); 992EXPORT_SYMBOL(bio_copy_data);
932 993
933struct bio_map_data { 994struct bio_map_data {
934 struct bio_vec *iovecs;
935 struct sg_iovec *sgvecs;
936 int nr_sgvecs; 995 int nr_sgvecs;
937 int is_our_pages; 996 int is_our_pages;
997 struct sg_iovec sgvecs[];
938}; 998};
939 999
940static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio, 1000static void bio_set_map_data(struct bio_map_data *bmd, struct bio *bio,
941 struct sg_iovec *iov, int iov_count, 1001 struct sg_iovec *iov, int iov_count,
942 int is_our_pages) 1002 int is_our_pages)
943{ 1003{
944 memcpy(bmd->iovecs, bio->bi_io_vec, sizeof(struct bio_vec) * bio->bi_vcnt);
945 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count); 1004 memcpy(bmd->sgvecs, iov, sizeof(struct sg_iovec) * iov_count);
946 bmd->nr_sgvecs = iov_count; 1005 bmd->nr_sgvecs = iov_count;
947 bmd->is_our_pages = is_our_pages; 1006 bmd->is_our_pages = is_our_pages;
948 bio->bi_private = bmd; 1007 bio->bi_private = bmd;
949} 1008}
950 1009
951static void bio_free_map_data(struct bio_map_data *bmd)
952{
953 kfree(bmd->iovecs);
954 kfree(bmd->sgvecs);
955 kfree(bmd);
956}
957
958static struct bio_map_data *bio_alloc_map_data(int nr_segs, 1010static struct bio_map_data *bio_alloc_map_data(int nr_segs,
959 unsigned int iov_count, 1011 unsigned int iov_count,
960 gfp_t gfp_mask) 1012 gfp_t gfp_mask)
961{ 1013{
962 struct bio_map_data *bmd;
963
964 if (iov_count > UIO_MAXIOV) 1014 if (iov_count > UIO_MAXIOV)
965 return NULL; 1015 return NULL;
966 1016
967 bmd = kmalloc(sizeof(*bmd), gfp_mask); 1017 return kmalloc(sizeof(struct bio_map_data) +
968 if (!bmd) 1018 sizeof(struct sg_iovec) * iov_count, gfp_mask);
969 return NULL;
970
971 bmd->iovecs = kmalloc(sizeof(struct bio_vec) * nr_segs, gfp_mask);
972 if (!bmd->iovecs) {
973 kfree(bmd);
974 return NULL;
975 }
976
977 bmd->sgvecs = kmalloc(sizeof(struct sg_iovec) * iov_count, gfp_mask);
978 if (bmd->sgvecs)
979 return bmd;
980
981 kfree(bmd->iovecs);
982 kfree(bmd);
983 return NULL;
984} 1019}
985 1020
986static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, 1021static int __bio_copy_iov(struct bio *bio, struct sg_iovec *iov, int iov_count,
987 struct sg_iovec *iov, int iov_count,
988 int to_user, int from_user, int do_free_page) 1022 int to_user, int from_user, int do_free_page)
989{ 1023{
990 int ret = 0, i; 1024 int ret = 0, i;
@@ -994,7 +1028,7 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs,
994 1028
995 bio_for_each_segment_all(bvec, bio, i) { 1029 bio_for_each_segment_all(bvec, bio, i) {
996 char *bv_addr = page_address(bvec->bv_page); 1030 char *bv_addr = page_address(bvec->bv_page);
997 unsigned int bv_len = iovecs[i].bv_len; 1031 unsigned int bv_len = bvec->bv_len;
998 1032
999 while (bv_len && iov_idx < iov_count) { 1033 while (bv_len && iov_idx < iov_count) {
1000 unsigned int bytes; 1034 unsigned int bytes;
@@ -1054,14 +1088,14 @@ int bio_uncopy_user(struct bio *bio)
1054 * don't copy into a random user address space, just free. 1088 * don't copy into a random user address space, just free.
1055 */ 1089 */
1056 if (current->mm) 1090 if (current->mm)
1057 ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, 1091 ret = __bio_copy_iov(bio, bmd->sgvecs, bmd->nr_sgvecs,
1058 bmd->nr_sgvecs, bio_data_dir(bio) == READ, 1092 bio_data_dir(bio) == READ,
1059 0, bmd->is_our_pages); 1093 0, bmd->is_our_pages);
1060 else if (bmd->is_our_pages) 1094 else if (bmd->is_our_pages)
1061 bio_for_each_segment_all(bvec, bio, i) 1095 bio_for_each_segment_all(bvec, bio, i)
1062 __free_page(bvec->bv_page); 1096 __free_page(bvec->bv_page);
1063 } 1097 }
1064 bio_free_map_data(bmd); 1098 kfree(bmd);
1065 bio_put(bio); 1099 bio_put(bio);
1066 return ret; 1100 return ret;
1067} 1101}
@@ -1175,7 +1209,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
1175 */ 1209 */
1176 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) || 1210 if ((!write_to_vm && (!map_data || !map_data->null_mapped)) ||
1177 (map_data && map_data->from_user)) { 1211 (map_data && map_data->from_user)) {
1178 ret = __bio_copy_iov(bio, bio->bi_io_vec, iov, iov_count, 0, 1, 0); 1212 ret = __bio_copy_iov(bio, iov, iov_count, 0, 1, 0);
1179 if (ret) 1213 if (ret)
1180 goto cleanup; 1214 goto cleanup;
1181 } 1215 }
@@ -1189,7 +1223,7 @@ cleanup:
1189 1223
1190 bio_put(bio); 1224 bio_put(bio);
1191out_bmd: 1225out_bmd:
1192 bio_free_map_data(bmd); 1226 kfree(bmd);
1193 return ERR_PTR(ret); 1227 return ERR_PTR(ret);
1194} 1228}
1195 1229
@@ -1485,7 +1519,7 @@ struct bio *bio_map_kern(struct request_queue *q, void *data, unsigned int len,
1485 if (IS_ERR(bio)) 1519 if (IS_ERR(bio))
1486 return bio; 1520 return bio;
1487 1521
1488 if (bio->bi_size == len) 1522 if (bio->bi_iter.bi_size == len)
1489 return bio; 1523 return bio;
1490 1524
1491 /* 1525 /*
@@ -1506,16 +1540,15 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
1506 1540
1507 bio_for_each_segment_all(bvec, bio, i) { 1541 bio_for_each_segment_all(bvec, bio, i) {
1508 char *addr = page_address(bvec->bv_page); 1542 char *addr = page_address(bvec->bv_page);
1509 int len = bmd->iovecs[i].bv_len;
1510 1543
1511 if (read) 1544 if (read)
1512 memcpy(p, addr, len); 1545 memcpy(p, addr, bvec->bv_len);
1513 1546
1514 __free_page(bvec->bv_page); 1547 __free_page(bvec->bv_page);
1515 p += len; 1548 p += bvec->bv_len;
1516 } 1549 }
1517 1550
1518 bio_free_map_data(bmd); 1551 kfree(bmd);
1519 bio_put(bio); 1552 bio_put(bio);
1520} 1553}
1521 1554
@@ -1686,11 +1719,11 @@ void bio_check_pages_dirty(struct bio *bio)
1686#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1719#if ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
1687void bio_flush_dcache_pages(struct bio *bi) 1720void bio_flush_dcache_pages(struct bio *bi)
1688{ 1721{
1689 int i; 1722 struct bio_vec bvec;
1690 struct bio_vec *bvec; 1723 struct bvec_iter iter;
1691 1724
1692 bio_for_each_segment(bvec, bi, i) 1725 bio_for_each_segment(bvec, bi, iter)
1693 flush_dcache_page(bvec->bv_page); 1726 flush_dcache_page(bvec.bv_page);
1694} 1727}
1695EXPORT_SYMBOL(bio_flush_dcache_pages); 1728EXPORT_SYMBOL(bio_flush_dcache_pages);
1696#endif 1729#endif
@@ -1711,96 +1744,86 @@ EXPORT_SYMBOL(bio_flush_dcache_pages);
1711 **/ 1744 **/
1712void bio_endio(struct bio *bio, int error) 1745void bio_endio(struct bio *bio, int error)
1713{ 1746{
1714 if (error) 1747 while (bio) {
1715 clear_bit(BIO_UPTODATE, &bio->bi_flags); 1748 BUG_ON(atomic_read(&bio->bi_remaining) <= 0);
1716 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1717 error = -EIO;
1718 1749
1719 if (bio->bi_end_io) 1750 if (error)
1720 bio->bi_end_io(bio, error); 1751 clear_bit(BIO_UPTODATE, &bio->bi_flags);
1721} 1752 else if (!test_bit(BIO_UPTODATE, &bio->bi_flags))
1722EXPORT_SYMBOL(bio_endio); 1753 error = -EIO;
1723 1754
1724void bio_pair_release(struct bio_pair *bp) 1755 if (!atomic_dec_and_test(&bio->bi_remaining))
1725{ 1756 return;
1726 if (atomic_dec_and_test(&bp->cnt)) {
1727 struct bio *master = bp->bio1.bi_private;
1728 1757
1729 bio_endio(master, bp->error); 1758 /*
1730 mempool_free(bp, bp->bio2.bi_private); 1759 * Need to have a real endio function for chained bios,
1760 * otherwise various corner cases will break (like stacking
1761 * block devices that save/restore bi_end_io) - however, we want
1762 * to avoid unbounded recursion and blowing the stack. Tail call
1763 * optimization would handle this, but compiling with frame
1764 * pointers also disables gcc's sibling call optimization.
1765 */
1766 if (bio->bi_end_io == bio_chain_endio) {
1767 struct bio *parent = bio->bi_private;
1768 bio_put(bio);
1769 bio = parent;
1770 } else {
1771 if (bio->bi_end_io)
1772 bio->bi_end_io(bio, error);
1773 bio = NULL;
1774 }
1731 } 1775 }
1732} 1776}
1733EXPORT_SYMBOL(bio_pair_release); 1777EXPORT_SYMBOL(bio_endio);
1734 1778
1735static void bio_pair_end_1(struct bio *bi, int err) 1779/**
1780 * bio_endio_nodec - end I/O on a bio, without decrementing bi_remaining
1781 * @bio: bio
1782 * @error: error, if any
1783 *
1784 * For code that has saved and restored bi_end_io; thing hard before using this
1785 * function, probably you should've cloned the entire bio.
1786 **/
1787void bio_endio_nodec(struct bio *bio, int error)
1736{ 1788{
1737 struct bio_pair *bp = container_of(bi, struct bio_pair, bio1); 1789 atomic_inc(&bio->bi_remaining);
1738 1790 bio_endio(bio, error);
1739 if (err)
1740 bp->error = err;
1741
1742 bio_pair_release(bp);
1743} 1791}
1792EXPORT_SYMBOL(bio_endio_nodec);
1744 1793
1745static void bio_pair_end_2(struct bio *bi, int err) 1794/**
1746{ 1795 * bio_split - split a bio
1747 struct bio_pair *bp = container_of(bi, struct bio_pair, bio2); 1796 * @bio: bio to split
1748 1797 * @sectors: number of sectors to split from the front of @bio
1749 if (err) 1798 * @gfp: gfp mask
1750 bp->error = err; 1799 * @bs: bio set to allocate from
1751 1800 *
1752 bio_pair_release(bp); 1801 * Allocates and returns a new bio which represents @sectors from the start of
1753} 1802 * @bio, and updates @bio to represent the remaining sectors.
1754 1803 *
1755/* 1804 * The newly allocated bio will point to @bio's bi_io_vec; it is the caller's
1756 * split a bio - only worry about a bio with a single page in its iovec 1805 * responsibility to ensure that @bio is not freed before the split.
1757 */ 1806 */
1758struct bio_pair *bio_split(struct bio *bi, int first_sectors) 1807struct bio *bio_split(struct bio *bio, int sectors,
1808 gfp_t gfp, struct bio_set *bs)
1759{ 1809{
1760 struct bio_pair *bp = mempool_alloc(bio_split_pool, GFP_NOIO); 1810 struct bio *split = NULL;
1761
1762 if (!bp)
1763 return bp;
1764
1765 trace_block_split(bdev_get_queue(bi->bi_bdev), bi,
1766 bi->bi_sector + first_sectors);
1767
1768 BUG_ON(bio_segments(bi) > 1);
1769 atomic_set(&bp->cnt, 3);
1770 bp->error = 0;
1771 bp->bio1 = *bi;
1772 bp->bio2 = *bi;
1773 bp->bio2.bi_sector += first_sectors;
1774 bp->bio2.bi_size -= first_sectors << 9;
1775 bp->bio1.bi_size = first_sectors << 9;
1776
1777 if (bi->bi_vcnt != 0) {
1778 bp->bv1 = *bio_iovec(bi);
1779 bp->bv2 = *bio_iovec(bi);
1780
1781 if (bio_is_rw(bi)) {
1782 bp->bv2.bv_offset += first_sectors << 9;
1783 bp->bv2.bv_len -= first_sectors << 9;
1784 bp->bv1.bv_len = first_sectors << 9;
1785 }
1786 1811
1787 bp->bio1.bi_io_vec = &bp->bv1; 1812 BUG_ON(sectors <= 0);
1788 bp->bio2.bi_io_vec = &bp->bv2; 1813 BUG_ON(sectors >= bio_sectors(bio));
1789 1814
1790 bp->bio1.bi_max_vecs = 1; 1815 split = bio_clone_fast(bio, gfp, bs);
1791 bp->bio2.bi_max_vecs = 1; 1816 if (!split)
1792 } 1817 return NULL;
1793 1818
1794 bp->bio1.bi_end_io = bio_pair_end_1; 1819 split->bi_iter.bi_size = sectors << 9;
1795 bp->bio2.bi_end_io = bio_pair_end_2;
1796 1820
1797 bp->bio1.bi_private = bi; 1821 if (bio_integrity(split))
1798 bp->bio2.bi_private = bio_split_pool; 1822 bio_integrity_trim(split, 0, sectors);
1799 1823
1800 if (bio_integrity(bi)) 1824 bio_advance(bio, split->bi_iter.bi_size);
1801 bio_integrity_split(bi, bp, first_sectors);
1802 1825
1803 return bp; 1826 return split;
1804} 1827}
1805EXPORT_SYMBOL(bio_split); 1828EXPORT_SYMBOL(bio_split);
1806 1829
@@ -1814,80 +1837,20 @@ void bio_trim(struct bio *bio, int offset, int size)
1814{ 1837{
1815 /* 'bio' is a cloned bio which we need to trim to match 1838 /* 'bio' is a cloned bio which we need to trim to match
1816 * the given offset and size. 1839 * the given offset and size.
1817 * This requires adjusting bi_sector, bi_size, and bi_io_vec
1818 */ 1840 */
1819 int i;
1820 struct bio_vec *bvec;
1821 int sofar = 0;
1822 1841
1823 size <<= 9; 1842 size <<= 9;
1824 if (offset == 0 && size == bio->bi_size) 1843 if (offset == 0 && size == bio->bi_iter.bi_size)
1825 return; 1844 return;
1826 1845
1827 clear_bit(BIO_SEG_VALID, &bio->bi_flags); 1846 clear_bit(BIO_SEG_VALID, &bio->bi_flags);
1828 1847
1829 bio_advance(bio, offset << 9); 1848 bio_advance(bio, offset << 9);
1830 1849
1831 bio->bi_size = size; 1850 bio->bi_iter.bi_size = size;
1832
1833 /* avoid any complications with bi_idx being non-zero*/
1834 if (bio->bi_idx) {
1835 memmove(bio->bi_io_vec, bio->bi_io_vec+bio->bi_idx,
1836 (bio->bi_vcnt - bio->bi_idx) * sizeof(struct bio_vec));
1837 bio->bi_vcnt -= bio->bi_idx;
1838 bio->bi_idx = 0;
1839 }
1840 /* Make sure vcnt and last bv are not too big */
1841 bio_for_each_segment(bvec, bio, i) {
1842 if (sofar + bvec->bv_len > size)
1843 bvec->bv_len = size - sofar;
1844 if (bvec->bv_len == 0) {
1845 bio->bi_vcnt = i;
1846 break;
1847 }
1848 sofar += bvec->bv_len;
1849 }
1850} 1851}
1851EXPORT_SYMBOL_GPL(bio_trim); 1852EXPORT_SYMBOL_GPL(bio_trim);
1852 1853
1853/**
1854 * bio_sector_offset - Find hardware sector offset in bio
1855 * @bio: bio to inspect
1856 * @index: bio_vec index
1857 * @offset: offset in bv_page
1858 *
1859 * Return the number of hardware sectors between beginning of bio
1860 * and an end point indicated by a bio_vec index and an offset
1861 * within that vector's page.
1862 */
1863sector_t bio_sector_offset(struct bio *bio, unsigned short index,
1864 unsigned int offset)
1865{
1866 unsigned int sector_sz;
1867 struct bio_vec *bv;
1868 sector_t sectors;
1869 int i;
1870
1871 sector_sz = queue_logical_block_size(bio->bi_bdev->bd_disk->queue);
1872 sectors = 0;
1873
1874 if (index >= bio->bi_idx)
1875 index = bio->bi_vcnt - 1;
1876
1877 bio_for_each_segment_all(bv, bio, i) {
1878 if (i == index) {
1879 if (offset > bv->bv_offset)
1880 sectors += (offset - bv->bv_offset) / sector_sz;
1881 break;
1882 }
1883
1884 sectors += bv->bv_len / sector_sz;
1885 }
1886
1887 return sectors;
1888}
1889EXPORT_SYMBOL(bio_sector_offset);
1890
1891/* 1854/*
1892 * create memory pools for biovec's in a bio_set. 1855 * create memory pools for biovec's in a bio_set.
1893 * use the global biovec slabs created for general use. 1856 * use the global biovec slabs created for general use.
@@ -2065,11 +2028,6 @@ static int __init init_bio(void)
2065 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE)) 2028 if (bioset_integrity_create(fs_bio_set, BIO_POOL_SIZE))
2066 panic("bio: can't create integrity pool\n"); 2029 panic("bio: can't create integrity pool\n");
2067 2030
2068 bio_split_pool = mempool_create_kmalloc_pool(BIO_SPLIT_ENTRIES,
2069 sizeof(struct bio_pair));
2070 if (!bio_split_pool)
2071 panic("bio: can't create split pool\n");
2072
2073 return 0; 2031 return 0;
2074} 2032}
2075subsys_initcall(init_bio); 2033subsys_initcall(init_bio);
diff --git a/fs/btrfs/check-integrity.c b/fs/btrfs/check-integrity.c
index 131d82800b3a..cb05e1c842c5 100644
--- a/fs/btrfs/check-integrity.c
+++ b/fs/btrfs/check-integrity.c
@@ -1695,7 +1695,7 @@ static int btrfsic_read_block(struct btrfsic_state *state,
1695 return -1; 1695 return -1;
1696 } 1696 }
1697 bio->bi_bdev = block_ctx->dev->bdev; 1697 bio->bi_bdev = block_ctx->dev->bdev;
1698 bio->bi_sector = dev_bytenr >> 9; 1698 bio->bi_iter.bi_sector = dev_bytenr >> 9;
1699 1699
1700 for (j = i; j < num_pages; j++) { 1700 for (j = i; j < num_pages; j++) {
1701 ret = bio_add_page(bio, block_ctx->pagev[j], 1701 ret = bio_add_page(bio, block_ctx->pagev[j],
@@ -3013,7 +3013,7 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
3013 int bio_is_patched; 3013 int bio_is_patched;
3014 char **mapped_datav; 3014 char **mapped_datav;
3015 3015
3016 dev_bytenr = 512 * bio->bi_sector; 3016 dev_bytenr = 512 * bio->bi_iter.bi_sector;
3017 bio_is_patched = 0; 3017 bio_is_patched = 0;
3018 if (dev_state->state->print_mask & 3018 if (dev_state->state->print_mask &
3019 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH) 3019 BTRFSIC_PRINT_MASK_SUBMIT_BIO_BH)
@@ -3021,8 +3021,8 @@ static void __btrfsic_submit_bio(int rw, struct bio *bio)
3021 "submit_bio(rw=0x%x, bi_vcnt=%u," 3021 "submit_bio(rw=0x%x, bi_vcnt=%u,"
3022 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n", 3022 " bi_sector=%llu (bytenr %llu), bi_bdev=%p)\n",
3023 rw, bio->bi_vcnt, 3023 rw, bio->bi_vcnt,
3024 (unsigned long long)bio->bi_sector, dev_bytenr, 3024 (unsigned long long)bio->bi_iter.bi_sector,
3025 bio->bi_bdev); 3025 dev_bytenr, bio->bi_bdev);
3026 3026
3027 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt, 3027 mapped_datav = kmalloc(sizeof(*mapped_datav) * bio->bi_vcnt,
3028 GFP_NOFS); 3028 GFP_NOFS);
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 1499b27b4186..f5cdeb4b5538 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -172,7 +172,8 @@ static void end_compressed_bio_read(struct bio *bio, int err)
172 goto out; 172 goto out;
173 173
174 inode = cb->inode; 174 inode = cb->inode;
175 ret = check_compressed_csum(inode, cb, (u64)bio->bi_sector << 9); 175 ret = check_compressed_csum(inode, cb,
176 (u64)bio->bi_iter.bi_sector << 9);
176 if (ret) 177 if (ret)
177 goto csum_failed; 178 goto csum_failed;
178 179
@@ -201,18 +202,16 @@ csum_failed:
201 if (cb->errors) { 202 if (cb->errors) {
202 bio_io_error(cb->orig_bio); 203 bio_io_error(cb->orig_bio);
203 } else { 204 } else {
204 int bio_index = 0; 205 int i;
205 struct bio_vec *bvec = cb->orig_bio->bi_io_vec; 206 struct bio_vec *bvec;
206 207
207 /* 208 /*
208 * we have verified the checksum already, set page 209 * we have verified the checksum already, set page
209 * checked so the end_io handlers know about it 210 * checked so the end_io handlers know about it
210 */ 211 */
211 while (bio_index < cb->orig_bio->bi_vcnt) { 212 bio_for_each_segment_all(bvec, cb->orig_bio, i)
212 SetPageChecked(bvec->bv_page); 213 SetPageChecked(bvec->bv_page);
213 bvec++; 214
214 bio_index++;
215 }
216 bio_endio(cb->orig_bio, 0); 215 bio_endio(cb->orig_bio, 0);
217 } 216 }
218 217
@@ -372,7 +371,7 @@ int btrfs_submit_compressed_write(struct inode *inode, u64 start,
372 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) { 371 for (pg_index = 0; pg_index < cb->nr_pages; pg_index++) {
373 page = compressed_pages[pg_index]; 372 page = compressed_pages[pg_index];
374 page->mapping = inode->i_mapping; 373 page->mapping = inode->i_mapping;
375 if (bio->bi_size) 374 if (bio->bi_iter.bi_size)
376 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0, 375 ret = io_tree->ops->merge_bio_hook(WRITE, page, 0,
377 PAGE_CACHE_SIZE, 376 PAGE_CACHE_SIZE,
378 bio, 0); 377 bio, 0);
@@ -506,7 +505,7 @@ static noinline int add_ra_bio_pages(struct inode *inode,
506 505
507 if (!em || last_offset < em->start || 506 if (!em || last_offset < em->start ||
508 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) || 507 (last_offset + PAGE_CACHE_SIZE > extent_map_end(em)) ||
509 (em->block_start >> 9) != cb->orig_bio->bi_sector) { 508 (em->block_start >> 9) != cb->orig_bio->bi_iter.bi_sector) {
510 free_extent_map(em); 509 free_extent_map(em);
511 unlock_extent(tree, last_offset, end); 510 unlock_extent(tree, last_offset, end);
512 unlock_page(page); 511 unlock_page(page);
@@ -552,7 +551,7 @@ next:
552 * in it. We don't actually do IO on those pages but allocate new ones 551 * in it. We don't actually do IO on those pages but allocate new ones
553 * to hold the compressed pages on disk. 552 * to hold the compressed pages on disk.
554 * 553 *
555 * bio->bi_sector points to the compressed extent on disk 554 * bio->bi_iter.bi_sector points to the compressed extent on disk
556 * bio->bi_io_vec points to all of the inode pages 555 * bio->bi_io_vec points to all of the inode pages
557 * bio->bi_vcnt is a count of pages 556 * bio->bi_vcnt is a count of pages
558 * 557 *
@@ -573,7 +572,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
573 struct page *page; 572 struct page *page;
574 struct block_device *bdev; 573 struct block_device *bdev;
575 struct bio *comp_bio; 574 struct bio *comp_bio;
576 u64 cur_disk_byte = (u64)bio->bi_sector << 9; 575 u64 cur_disk_byte = (u64)bio->bi_iter.bi_sector << 9;
577 u64 em_len; 576 u64 em_len;
578 u64 em_start; 577 u64 em_start;
579 struct extent_map *em; 578 struct extent_map *em;
@@ -659,7 +658,7 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
659 page->mapping = inode->i_mapping; 658 page->mapping = inode->i_mapping;
660 page->index = em_start >> PAGE_CACHE_SHIFT; 659 page->index = em_start >> PAGE_CACHE_SHIFT;
661 660
662 if (comp_bio->bi_size) 661 if (comp_bio->bi_iter.bi_size)
663 ret = tree->ops->merge_bio_hook(READ, page, 0, 662 ret = tree->ops->merge_bio_hook(READ, page, 0,
664 PAGE_CACHE_SIZE, 663 PAGE_CACHE_SIZE,
665 comp_bio, 0); 664 comp_bio, 0);
@@ -687,8 +686,8 @@ int btrfs_submit_compressed_read(struct inode *inode, struct bio *bio,
687 comp_bio, sums); 686 comp_bio, sums);
688 BUG_ON(ret); /* -ENOMEM */ 687 BUG_ON(ret); /* -ENOMEM */
689 } 688 }
690 sums += (comp_bio->bi_size + root->sectorsize - 1) / 689 sums += (comp_bio->bi_iter.bi_size +
691 root->sectorsize; 690 root->sectorsize - 1) / root->sectorsize;
692 691
693 ret = btrfs_map_bio(root, READ, comp_bio, 692 ret = btrfs_map_bio(root, READ, comp_bio,
694 mirror_num, 0); 693 mirror_num, 0);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 8072cfa8a3b1..e71039ea66cf 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -842,20 +842,17 @@ int btrfs_wq_submit_bio(struct btrfs_fs_info *fs_info, struct inode *inode,
842 842
843static int btree_csum_one_bio(struct bio *bio) 843static int btree_csum_one_bio(struct bio *bio)
844{ 844{
845 struct bio_vec *bvec = bio->bi_io_vec; 845 struct bio_vec *bvec;
846 int bio_index = 0;
847 struct btrfs_root *root; 846 struct btrfs_root *root;
848 int ret = 0; 847 int i, ret = 0;
849 848
850 WARN_ON(bio->bi_vcnt <= 0); 849 bio_for_each_segment_all(bvec, bio, i) {
851 while (bio_index < bio->bi_vcnt) {
852 root = BTRFS_I(bvec->bv_page->mapping->host)->root; 850 root = BTRFS_I(bvec->bv_page->mapping->host)->root;
853 ret = csum_dirty_buffer(root, bvec->bv_page); 851 ret = csum_dirty_buffer(root, bvec->bv_page);
854 if (ret) 852 if (ret)
855 break; 853 break;
856 bio_index++;
857 bvec++;
858 } 854 }
855
859 return ret; 856 return ret;
860} 857}
861 858
@@ -1695,7 +1692,7 @@ static void end_workqueue_fn(struct btrfs_work *work)
1695 bio->bi_private = end_io_wq->private; 1692 bio->bi_private = end_io_wq->private;
1696 bio->bi_end_io = end_io_wq->end_io; 1693 bio->bi_end_io = end_io_wq->end_io;
1697 kfree(end_io_wq); 1694 kfree(end_io_wq);
1698 bio_endio(bio, error); 1695 bio_endio_nodec(bio, error);
1699} 1696}
1700 1697
1701static int cleaner_kthread(void *arg) 1698static int cleaner_kthread(void *arg)
diff --git a/fs/btrfs/extent_io.c b/fs/btrfs/extent_io.c
index ff43802a7c88..bcb6f1b780d6 100644
--- a/fs/btrfs/extent_io.c
+++ b/fs/btrfs/extent_io.c
@@ -1984,7 +1984,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1); 1984 bio = btrfs_io_bio_alloc(GFP_NOFS, 1);
1985 if (!bio) 1985 if (!bio)
1986 return -EIO; 1986 return -EIO;
1987 bio->bi_size = 0; 1987 bio->bi_iter.bi_size = 0;
1988 map_length = length; 1988 map_length = length;
1989 1989
1990 ret = btrfs_map_block(fs_info, WRITE, logical, 1990 ret = btrfs_map_block(fs_info, WRITE, logical,
@@ -1995,7 +1995,7 @@ int repair_io_failure(struct btrfs_fs_info *fs_info, u64 start,
1995 } 1995 }
1996 BUG_ON(mirror_num != bbio->mirror_num); 1996 BUG_ON(mirror_num != bbio->mirror_num);
1997 sector = bbio->stripes[mirror_num-1].physical >> 9; 1997 sector = bbio->stripes[mirror_num-1].physical >> 9;
1998 bio->bi_sector = sector; 1998 bio->bi_iter.bi_sector = sector;
1999 dev = bbio->stripes[mirror_num-1].dev; 1999 dev = bbio->stripes[mirror_num-1].dev;
2000 kfree(bbio); 2000 kfree(bbio);
2001 if (!dev || !dev->bdev || !dev->writeable) { 2001 if (!dev || !dev->bdev || !dev->writeable) {
@@ -2268,9 +2268,9 @@ static int bio_readpage_error(struct bio *failed_bio, u64 phy_offset,
2268 return -EIO; 2268 return -EIO;
2269 } 2269 }
2270 bio->bi_end_io = failed_bio->bi_end_io; 2270 bio->bi_end_io = failed_bio->bi_end_io;
2271 bio->bi_sector = failrec->logical >> 9; 2271 bio->bi_iter.bi_sector = failrec->logical >> 9;
2272 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev; 2272 bio->bi_bdev = BTRFS_I(inode)->root->fs_info->fs_devices->latest_bdev;
2273 bio->bi_size = 0; 2273 bio->bi_iter.bi_size = 0;
2274 2274
2275 btrfs_failed_bio = btrfs_io_bio(failed_bio); 2275 btrfs_failed_bio = btrfs_io_bio(failed_bio);
2276 if (btrfs_failed_bio->csum) { 2276 if (btrfs_failed_bio->csum) {
@@ -2332,12 +2332,13 @@ int end_extent_writepage(struct page *page, int err, u64 start, u64 end)
2332 */ 2332 */
2333static void end_bio_extent_writepage(struct bio *bio, int err) 2333static void end_bio_extent_writepage(struct bio *bio, int err)
2334{ 2334{
2335 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 2335 struct bio_vec *bvec;
2336 struct extent_io_tree *tree; 2336 struct extent_io_tree *tree;
2337 u64 start; 2337 u64 start;
2338 u64 end; 2338 u64 end;
2339 int i;
2339 2340
2340 do { 2341 bio_for_each_segment_all(bvec, bio, i) {
2341 struct page *page = bvec->bv_page; 2342 struct page *page = bvec->bv_page;
2342 tree = &BTRFS_I(page->mapping->host)->io_tree; 2343 tree = &BTRFS_I(page->mapping->host)->io_tree;
2343 2344
@@ -2355,14 +2356,11 @@ static void end_bio_extent_writepage(struct bio *bio, int err)
2355 start = page_offset(page); 2356 start = page_offset(page);
2356 end = start + bvec->bv_offset + bvec->bv_len - 1; 2357 end = start + bvec->bv_offset + bvec->bv_len - 1;
2357 2358
2358 if (--bvec >= bio->bi_io_vec)
2359 prefetchw(&bvec->bv_page->flags);
2360
2361 if (end_extent_writepage(page, err, start, end)) 2359 if (end_extent_writepage(page, err, start, end))
2362 continue; 2360 continue;
2363 2361
2364 end_page_writeback(page); 2362 end_page_writeback(page);
2365 } while (bvec >= bio->bi_io_vec); 2363 }
2366 2364
2367 bio_put(bio); 2365 bio_put(bio);
2368} 2366}
@@ -2392,9 +2390,8 @@ endio_readpage_release_extent(struct extent_io_tree *tree, u64 start, u64 len,
2392 */ 2390 */
2393static void end_bio_extent_readpage(struct bio *bio, int err) 2391static void end_bio_extent_readpage(struct bio *bio, int err)
2394{ 2392{
2393 struct bio_vec *bvec;
2395 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 2394 int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
2396 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1;
2397 struct bio_vec *bvec = bio->bi_io_vec;
2398 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio); 2395 struct btrfs_io_bio *io_bio = btrfs_io_bio(bio);
2399 struct extent_io_tree *tree; 2396 struct extent_io_tree *tree;
2400 u64 offset = 0; 2397 u64 offset = 0;
@@ -2405,16 +2402,17 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2405 u64 extent_len = 0; 2402 u64 extent_len = 0;
2406 int mirror; 2403 int mirror;
2407 int ret; 2404 int ret;
2405 int i;
2408 2406
2409 if (err) 2407 if (err)
2410 uptodate = 0; 2408 uptodate = 0;
2411 2409
2412 do { 2410 bio_for_each_segment_all(bvec, bio, i) {
2413 struct page *page = bvec->bv_page; 2411 struct page *page = bvec->bv_page;
2414 struct inode *inode = page->mapping->host; 2412 struct inode *inode = page->mapping->host;
2415 2413
2416 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, " 2414 pr_debug("end_bio_extent_readpage: bi_sector=%llu, err=%d, "
2417 "mirror=%lu\n", (u64)bio->bi_sector, err, 2415 "mirror=%lu\n", (u64)bio->bi_iter.bi_sector, err,
2418 io_bio->mirror_num); 2416 io_bio->mirror_num);
2419 tree = &BTRFS_I(inode)->io_tree; 2417 tree = &BTRFS_I(inode)->io_tree;
2420 2418
@@ -2433,9 +2431,6 @@ static void end_bio_extent_readpage(struct bio *bio, int err)
2433 end = start + bvec->bv_offset + bvec->bv_len - 1; 2431 end = start + bvec->bv_offset + bvec->bv_len - 1;
2434 len = bvec->bv_len; 2432 len = bvec->bv_len;
2435 2433
2436 if (++bvec <= bvec_end)
2437 prefetchw(&bvec->bv_page->flags);
2438
2439 mirror = io_bio->mirror_num; 2434 mirror = io_bio->mirror_num;
2440 if (likely(uptodate && tree->ops && 2435 if (likely(uptodate && tree->ops &&
2441 tree->ops->readpage_end_io_hook)) { 2436 tree->ops->readpage_end_io_hook)) {
@@ -2516,7 +2511,7 @@ readpage_ok:
2516 extent_start = start; 2511 extent_start = start;
2517 extent_len = end + 1 - start; 2512 extent_len = end + 1 - start;
2518 } 2513 }
2519 } while (bvec <= bvec_end); 2514 }
2520 2515
2521 if (extent_len) 2516 if (extent_len)
2522 endio_readpage_release_extent(tree, extent_start, extent_len, 2517 endio_readpage_release_extent(tree, extent_start, extent_len,
@@ -2547,9 +2542,8 @@ btrfs_bio_alloc(struct block_device *bdev, u64 first_sector, int nr_vecs,
2547 } 2542 }
2548 2543
2549 if (bio) { 2544 if (bio) {
2550 bio->bi_size = 0;
2551 bio->bi_bdev = bdev; 2545 bio->bi_bdev = bdev;
2552 bio->bi_sector = first_sector; 2546 bio->bi_iter.bi_sector = first_sector;
2553 btrfs_bio = btrfs_io_bio(bio); 2547 btrfs_bio = btrfs_io_bio(bio);
2554 btrfs_bio->csum = NULL; 2548 btrfs_bio->csum = NULL;
2555 btrfs_bio->csum_allocated = NULL; 2549 btrfs_bio->csum_allocated = NULL;
@@ -2643,7 +2637,7 @@ static int submit_extent_page(int rw, struct extent_io_tree *tree,
2643 if (bio_ret && *bio_ret) { 2637 if (bio_ret && *bio_ret) {
2644 bio = *bio_ret; 2638 bio = *bio_ret;
2645 if (old_compressed) 2639 if (old_compressed)
2646 contig = bio->bi_sector == sector; 2640 contig = bio->bi_iter.bi_sector == sector;
2647 else 2641 else
2648 contig = bio_end_sector(bio) == sector; 2642 contig = bio_end_sector(bio) == sector;
2649 2643
@@ -3410,20 +3404,18 @@ static void end_extent_buffer_writeback(struct extent_buffer *eb)
3410 3404
3411static void end_bio_extent_buffer_writepage(struct bio *bio, int err) 3405static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3412{ 3406{
3413 int uptodate = err == 0; 3407 struct bio_vec *bvec;
3414 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
3415 struct extent_buffer *eb; 3408 struct extent_buffer *eb;
3416 int done; 3409 int i, done;
3417 3410
3418 do { 3411 bio_for_each_segment_all(bvec, bio, i) {
3419 struct page *page = bvec->bv_page; 3412 struct page *page = bvec->bv_page;
3420 3413
3421 bvec--;
3422 eb = (struct extent_buffer *)page->private; 3414 eb = (struct extent_buffer *)page->private;
3423 BUG_ON(!eb); 3415 BUG_ON(!eb);
3424 done = atomic_dec_and_test(&eb->io_pages); 3416 done = atomic_dec_and_test(&eb->io_pages);
3425 3417
3426 if (!uptodate || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) { 3418 if (err || test_bit(EXTENT_BUFFER_IOERR, &eb->bflags)) {
3427 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags); 3419 set_bit(EXTENT_BUFFER_IOERR, &eb->bflags);
3428 ClearPageUptodate(page); 3420 ClearPageUptodate(page);
3429 SetPageError(page); 3421 SetPageError(page);
@@ -3435,10 +3427,9 @@ static void end_bio_extent_buffer_writepage(struct bio *bio, int err)
3435 continue; 3427 continue;
3436 3428
3437 end_extent_buffer_writeback(eb); 3429 end_extent_buffer_writeback(eb);
3438 } while (bvec >= bio->bi_io_vec); 3430 }
3439 3431
3440 bio_put(bio); 3432 bio_put(bio);
3441
3442} 3433}
3443 3434
3444static int write_one_eb(struct extent_buffer *eb, 3435static int write_one_eb(struct extent_buffer *eb,
diff --git a/fs/btrfs/file-item.c b/fs/btrfs/file-item.c
index 6f3848860283..84a46a42d262 100644
--- a/fs/btrfs/file-item.c
+++ b/fs/btrfs/file-item.c
@@ -182,7 +182,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
182 if (!path) 182 if (!path)
183 return -ENOMEM; 183 return -ENOMEM;
184 184
185 nblocks = bio->bi_size >> inode->i_sb->s_blocksize_bits; 185 nblocks = bio->bi_iter.bi_size >> inode->i_sb->s_blocksize_bits;
186 if (!dst) { 186 if (!dst) {
187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) { 187 if (nblocks * csum_size > BTRFS_BIO_INLINE_CSUM_SIZE) {
188 btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size, 188 btrfs_bio->csum_allocated = kmalloc(nblocks * csum_size,
@@ -201,7 +201,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
201 csum = (u8 *)dst; 201 csum = (u8 *)dst;
202 } 202 }
203 203
204 if (bio->bi_size > PAGE_CACHE_SIZE * 8) 204 if (bio->bi_iter.bi_size > PAGE_CACHE_SIZE * 8)
205 path->reada = 2; 205 path->reada = 2;
206 206
207 WARN_ON(bio->bi_vcnt <= 0); 207 WARN_ON(bio->bi_vcnt <= 0);
@@ -217,7 +217,7 @@ static int __btrfs_lookup_bio_sums(struct btrfs_root *root,
217 path->skip_locking = 1; 217 path->skip_locking = 1;
218 } 218 }
219 219
220 disk_bytenr = (u64)bio->bi_sector << 9; 220 disk_bytenr = (u64)bio->bi_iter.bi_sector << 9;
221 if (dio) 221 if (dio)
222 offset = logical_offset; 222 offset = logical_offset;
223 while (bio_index < bio->bi_vcnt) { 223 while (bio_index < bio->bi_vcnt) {
@@ -302,7 +302,7 @@ int btrfs_lookup_bio_sums_dio(struct btrfs_root *root, struct inode *inode,
302 struct btrfs_dio_private *dip, struct bio *bio, 302 struct btrfs_dio_private *dip, struct bio *bio,
303 u64 offset) 303 u64 offset)
304{ 304{
305 int len = (bio->bi_sector << 9) - dip->disk_bytenr; 305 int len = (bio->bi_iter.bi_sector << 9) - dip->disk_bytenr;
306 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 306 u16 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
307 int ret; 307 int ret;
308 308
@@ -447,11 +447,12 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
447 u64 offset; 447 u64 offset;
448 448
449 WARN_ON(bio->bi_vcnt <= 0); 449 WARN_ON(bio->bi_vcnt <= 0);
450 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_size), GFP_NOFS); 450 sums = kzalloc(btrfs_ordered_sum_size(root, bio->bi_iter.bi_size),
451 GFP_NOFS);
451 if (!sums) 452 if (!sums)
452 return -ENOMEM; 453 return -ENOMEM;
453 454
454 sums->len = bio->bi_size; 455 sums->len = bio->bi_iter.bi_size;
455 INIT_LIST_HEAD(&sums->list); 456 INIT_LIST_HEAD(&sums->list);
456 457
457 if (contig) 458 if (contig)
@@ -461,7 +462,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
461 462
462 ordered = btrfs_lookup_ordered_extent(inode, offset); 463 ordered = btrfs_lookup_ordered_extent(inode, offset);
463 BUG_ON(!ordered); /* Logic error */ 464 BUG_ON(!ordered); /* Logic error */
464 sums->bytenr = (u64)bio->bi_sector << 9; 465 sums->bytenr = (u64)bio->bi_iter.bi_sector << 9;
465 index = 0; 466 index = 0;
466 467
467 while (bio_index < bio->bi_vcnt) { 468 while (bio_index < bio->bi_vcnt) {
@@ -476,7 +477,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
476 btrfs_add_ordered_sum(inode, ordered, sums); 477 btrfs_add_ordered_sum(inode, ordered, sums);
477 btrfs_put_ordered_extent(ordered); 478 btrfs_put_ordered_extent(ordered);
478 479
479 bytes_left = bio->bi_size - total_bytes; 480 bytes_left = bio->bi_iter.bi_size - total_bytes;
480 481
481 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left), 482 sums = kzalloc(btrfs_ordered_sum_size(root, bytes_left),
482 GFP_NOFS); 483 GFP_NOFS);
@@ -484,7 +485,7 @@ int btrfs_csum_one_bio(struct btrfs_root *root, struct inode *inode,
484 sums->len = bytes_left; 485 sums->len = bytes_left;
485 ordered = btrfs_lookup_ordered_extent(inode, offset); 486 ordered = btrfs_lookup_ordered_extent(inode, offset);
486 BUG_ON(!ordered); /* Logic error */ 487 BUG_ON(!ordered); /* Logic error */
487 sums->bytenr = ((u64)bio->bi_sector << 9) + 488 sums->bytenr = ((u64)bio->bi_iter.bi_sector << 9) +
488 total_bytes; 489 total_bytes;
489 index = 0; 490 index = 0;
490 } 491 }
diff --git a/fs/btrfs/inode.c b/fs/btrfs/inode.c
index f1a77449d032..7ab0e94ad492 100644
--- a/fs/btrfs/inode.c
+++ b/fs/btrfs/inode.c
@@ -1577,7 +1577,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1577 unsigned long bio_flags) 1577 unsigned long bio_flags)
1578{ 1578{
1579 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root; 1579 struct btrfs_root *root = BTRFS_I(page->mapping->host)->root;
1580 u64 logical = (u64)bio->bi_sector << 9; 1580 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
1581 u64 length = 0; 1581 u64 length = 0;
1582 u64 map_length; 1582 u64 map_length;
1583 int ret; 1583 int ret;
@@ -1585,7 +1585,7 @@ int btrfs_merge_bio_hook(int rw, struct page *page, unsigned long offset,
1585 if (bio_flags & EXTENT_BIO_COMPRESSED) 1585 if (bio_flags & EXTENT_BIO_COMPRESSED)
1586 return 0; 1586 return 0;
1587 1587
1588 length = bio->bi_size; 1588 length = bio->bi_iter.bi_size;
1589 map_length = length; 1589 map_length = length;
1590 ret = btrfs_map_block(root->fs_info, rw, logical, 1590 ret = btrfs_map_block(root->fs_info, rw, logical,
1591 &map_length, NULL, 0); 1591 &map_length, NULL, 0);
@@ -6779,17 +6779,16 @@ unlock_err:
6779static void btrfs_endio_direct_read(struct bio *bio, int err) 6779static void btrfs_endio_direct_read(struct bio *bio, int err)
6780{ 6780{
6781 struct btrfs_dio_private *dip = bio->bi_private; 6781 struct btrfs_dio_private *dip = bio->bi_private;
6782 struct bio_vec *bvec_end = bio->bi_io_vec + bio->bi_vcnt - 1; 6782 struct bio_vec *bvec;
6783 struct bio_vec *bvec = bio->bi_io_vec;
6784 struct inode *inode = dip->inode; 6783 struct inode *inode = dip->inode;
6785 struct btrfs_root *root = BTRFS_I(inode)->root; 6784 struct btrfs_root *root = BTRFS_I(inode)->root;
6786 struct bio *dio_bio; 6785 struct bio *dio_bio;
6787 u32 *csums = (u32 *)dip->csum; 6786 u32 *csums = (u32 *)dip->csum;
6788 int index = 0;
6789 u64 start; 6787 u64 start;
6788 int i;
6790 6789
6791 start = dip->logical_offset; 6790 start = dip->logical_offset;
6792 do { 6791 bio_for_each_segment_all(bvec, bio, i) {
6793 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) { 6792 if (!(BTRFS_I(inode)->flags & BTRFS_INODE_NODATASUM)) {
6794 struct page *page = bvec->bv_page; 6793 struct page *page = bvec->bv_page;
6795 char *kaddr; 6794 char *kaddr;
@@ -6805,18 +6804,16 @@ static void btrfs_endio_direct_read(struct bio *bio, int err)
6805 local_irq_restore(flags); 6804 local_irq_restore(flags);
6806 6805
6807 flush_dcache_page(bvec->bv_page); 6806 flush_dcache_page(bvec->bv_page);
6808 if (csum != csums[index]) { 6807 if (csum != csums[i]) {
6809 btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u", 6808 btrfs_err(root->fs_info, "csum failed ino %llu off %llu csum %u expected csum %u",
6810 btrfs_ino(inode), start, csum, 6809 btrfs_ino(inode), start, csum,
6811 csums[index]); 6810 csums[i]);
6812 err = -EIO; 6811 err = -EIO;
6813 } 6812 }
6814 } 6813 }
6815 6814
6816 start += bvec->bv_len; 6815 start += bvec->bv_len;
6817 bvec++; 6816 }
6818 index++;
6819 } while (bvec <= bvec_end);
6820 6817
6821 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset, 6818 unlock_extent(&BTRFS_I(inode)->io_tree, dip->logical_offset,
6822 dip->logical_offset + dip->bytes - 1); 6819 dip->logical_offset + dip->bytes - 1);
@@ -6897,7 +6894,8 @@ static void btrfs_end_dio_bio(struct bio *bio, int err)
6897 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu " 6894 printk(KERN_ERR "btrfs direct IO failed ino %llu rw %lu "
6898 "sector %#Lx len %u err no %d\n", 6895 "sector %#Lx len %u err no %d\n",
6899 btrfs_ino(dip->inode), bio->bi_rw, 6896 btrfs_ino(dip->inode), bio->bi_rw,
6900 (unsigned long long)bio->bi_sector, bio->bi_size, err); 6897 (unsigned long long)bio->bi_iter.bi_sector,
6898 bio->bi_iter.bi_size, err);
6901 dip->errors = 1; 6899 dip->errors = 1;
6902 6900
6903 /* 6901 /*
@@ -6988,7 +6986,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6988 struct bio *bio; 6986 struct bio *bio;
6989 struct bio *orig_bio = dip->orig_bio; 6987 struct bio *orig_bio = dip->orig_bio;
6990 struct bio_vec *bvec = orig_bio->bi_io_vec; 6988 struct bio_vec *bvec = orig_bio->bi_io_vec;
6991 u64 start_sector = orig_bio->bi_sector; 6989 u64 start_sector = orig_bio->bi_iter.bi_sector;
6992 u64 file_offset = dip->logical_offset; 6990 u64 file_offset = dip->logical_offset;
6993 u64 submit_len = 0; 6991 u64 submit_len = 0;
6994 u64 map_length; 6992 u64 map_length;
@@ -6996,7 +6994,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
6996 int ret = 0; 6994 int ret = 0;
6997 int async_submit = 0; 6995 int async_submit = 0;
6998 6996
6999 map_length = orig_bio->bi_size; 6997 map_length = orig_bio->bi_iter.bi_size;
7000 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9, 6998 ret = btrfs_map_block(root->fs_info, rw, start_sector << 9,
7001 &map_length, NULL, 0); 6999 &map_length, NULL, 0);
7002 if (ret) { 7000 if (ret) {
@@ -7004,7 +7002,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7004 return -EIO; 7002 return -EIO;
7005 } 7003 }
7006 7004
7007 if (map_length >= orig_bio->bi_size) { 7005 if (map_length >= orig_bio->bi_iter.bi_size) {
7008 bio = orig_bio; 7006 bio = orig_bio;
7009 goto submit; 7007 goto submit;
7010 } 7008 }
@@ -7056,7 +7054,7 @@ static int btrfs_submit_direct_hook(int rw, struct btrfs_dio_private *dip,
7056 bio->bi_private = dip; 7054 bio->bi_private = dip;
7057 bio->bi_end_io = btrfs_end_dio_bio; 7055 bio->bi_end_io = btrfs_end_dio_bio;
7058 7056
7059 map_length = orig_bio->bi_size; 7057 map_length = orig_bio->bi_iter.bi_size;
7060 ret = btrfs_map_block(root->fs_info, rw, 7058 ret = btrfs_map_block(root->fs_info, rw,
7061 start_sector << 9, 7059 start_sector << 9,
7062 &map_length, NULL, 0); 7060 &map_length, NULL, 0);
@@ -7114,7 +7112,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7114 7112
7115 if (!skip_sum && !write) { 7113 if (!skip_sum && !write) {
7116 csum_size = btrfs_super_csum_size(root->fs_info->super_copy); 7114 csum_size = btrfs_super_csum_size(root->fs_info->super_copy);
7117 sum_len = dio_bio->bi_size >> inode->i_sb->s_blocksize_bits; 7115 sum_len = dio_bio->bi_iter.bi_size >>
7116 inode->i_sb->s_blocksize_bits;
7118 sum_len *= csum_size; 7117 sum_len *= csum_size;
7119 } else { 7118 } else {
7120 sum_len = 0; 7119 sum_len = 0;
@@ -7129,8 +7128,8 @@ static void btrfs_submit_direct(int rw, struct bio *dio_bio,
7129 dip->private = dio_bio->bi_private; 7128 dip->private = dio_bio->bi_private;
7130 dip->inode = inode; 7129 dip->inode = inode;
7131 dip->logical_offset = file_offset; 7130 dip->logical_offset = file_offset;
7132 dip->bytes = dio_bio->bi_size; 7131 dip->bytes = dio_bio->bi_iter.bi_size;
7133 dip->disk_bytenr = (u64)dio_bio->bi_sector << 9; 7132 dip->disk_bytenr = (u64)dio_bio->bi_iter.bi_sector << 9;
7134 io_bio->bi_private = dip; 7133 io_bio->bi_private = dip;
7135 dip->errors = 0; 7134 dip->errors = 0;
7136 dip->orig_bio = io_bio; 7135 dip->orig_bio = io_bio;
diff --git a/fs/btrfs/raid56.c b/fs/btrfs/raid56.c
index 24ac21840a9a..9af0b25d991a 100644
--- a/fs/btrfs/raid56.c
+++ b/fs/btrfs/raid56.c
@@ -1032,8 +1032,8 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1032 1032
1033 /* see if we can add this page onto our existing bio */ 1033 /* see if we can add this page onto our existing bio */
1034 if (last) { 1034 if (last) {
1035 last_end = (u64)last->bi_sector << 9; 1035 last_end = (u64)last->bi_iter.bi_sector << 9;
1036 last_end += last->bi_size; 1036 last_end += last->bi_iter.bi_size;
1037 1037
1038 /* 1038 /*
1039 * we can't merge these if they are from different 1039 * we can't merge these if they are from different
@@ -1053,9 +1053,9 @@ static int rbio_add_io_page(struct btrfs_raid_bio *rbio,
1053 if (!bio) 1053 if (!bio)
1054 return -ENOMEM; 1054 return -ENOMEM;
1055 1055
1056 bio->bi_size = 0; 1056 bio->bi_iter.bi_size = 0;
1057 bio->bi_bdev = stripe->dev->bdev; 1057 bio->bi_bdev = stripe->dev->bdev;
1058 bio->bi_sector = disk_start >> 9; 1058 bio->bi_iter.bi_sector = disk_start >> 9;
1059 set_bit(BIO_UPTODATE, &bio->bi_flags); 1059 set_bit(BIO_UPTODATE, &bio->bi_flags);
1060 1060
1061 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 1061 bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
@@ -1111,7 +1111,7 @@ static void index_rbio_pages(struct btrfs_raid_bio *rbio)
1111 1111
1112 spin_lock_irq(&rbio->bio_list_lock); 1112 spin_lock_irq(&rbio->bio_list_lock);
1113 bio_list_for_each(bio, &rbio->bio_list) { 1113 bio_list_for_each(bio, &rbio->bio_list) {
1114 start = (u64)bio->bi_sector << 9; 1114 start = (u64)bio->bi_iter.bi_sector << 9;
1115 stripe_offset = start - rbio->raid_map[0]; 1115 stripe_offset = start - rbio->raid_map[0];
1116 page_index = stripe_offset >> PAGE_CACHE_SHIFT; 1116 page_index = stripe_offset >> PAGE_CACHE_SHIFT;
1117 1117
@@ -1272,7 +1272,7 @@ cleanup:
1272static int find_bio_stripe(struct btrfs_raid_bio *rbio, 1272static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1273 struct bio *bio) 1273 struct bio *bio)
1274{ 1274{
1275 u64 physical = bio->bi_sector; 1275 u64 physical = bio->bi_iter.bi_sector;
1276 u64 stripe_start; 1276 u64 stripe_start;
1277 int i; 1277 int i;
1278 struct btrfs_bio_stripe *stripe; 1278 struct btrfs_bio_stripe *stripe;
@@ -1298,7 +1298,7 @@ static int find_bio_stripe(struct btrfs_raid_bio *rbio,
1298static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio, 1298static int find_logical_bio_stripe(struct btrfs_raid_bio *rbio,
1299 struct bio *bio) 1299 struct bio *bio)
1300{ 1300{
1301 u64 logical = bio->bi_sector; 1301 u64 logical = bio->bi_iter.bi_sector;
1302 u64 stripe_start; 1302 u64 stripe_start;
1303 int i; 1303 int i;
1304 1304
@@ -1602,8 +1602,8 @@ static int plug_cmp(void *priv, struct list_head *a, struct list_head *b)
1602 plug_list); 1602 plug_list);
1603 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio, 1603 struct btrfs_raid_bio *rb = container_of(b, struct btrfs_raid_bio,
1604 plug_list); 1604 plug_list);
1605 u64 a_sector = ra->bio_list.head->bi_sector; 1605 u64 a_sector = ra->bio_list.head->bi_iter.bi_sector;
1606 u64 b_sector = rb->bio_list.head->bi_sector; 1606 u64 b_sector = rb->bio_list.head->bi_iter.bi_sector;
1607 1607
1608 if (a_sector < b_sector) 1608 if (a_sector < b_sector)
1609 return -1; 1609 return -1;
@@ -1691,7 +1691,7 @@ int raid56_parity_write(struct btrfs_root *root, struct bio *bio,
1691 if (IS_ERR(rbio)) 1691 if (IS_ERR(rbio))
1692 return PTR_ERR(rbio); 1692 return PTR_ERR(rbio);
1693 bio_list_add(&rbio->bio_list, bio); 1693 bio_list_add(&rbio->bio_list, bio);
1694 rbio->bio_list_bytes = bio->bi_size; 1694 rbio->bio_list_bytes = bio->bi_iter.bi_size;
1695 1695
1696 /* 1696 /*
1697 * don't plug on full rbios, just get them out the door 1697 * don't plug on full rbios, just get them out the door
@@ -2044,7 +2044,7 @@ int raid56_parity_recover(struct btrfs_root *root, struct bio *bio,
2044 2044
2045 rbio->read_rebuild = 1; 2045 rbio->read_rebuild = 1;
2046 bio_list_add(&rbio->bio_list, bio); 2046 bio_list_add(&rbio->bio_list, bio);
2047 rbio->bio_list_bytes = bio->bi_size; 2047 rbio->bio_list_bytes = bio->bi_iter.bi_size;
2048 2048
2049 rbio->faila = find_logical_bio_stripe(rbio, bio); 2049 rbio->faila = find_logical_bio_stripe(rbio, bio);
2050 if (rbio->faila == -1) { 2050 if (rbio->faila == -1) {
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index 1fd3f33c330a..bb9a928fa3a8 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -1308,7 +1308,7 @@ static void scrub_recheck_block(struct btrfs_fs_info *fs_info,
1308 continue; 1308 continue;
1309 } 1309 }
1310 bio->bi_bdev = page->dev->bdev; 1310 bio->bi_bdev = page->dev->bdev;
1311 bio->bi_sector = page->physical >> 9; 1311 bio->bi_iter.bi_sector = page->physical >> 9;
1312 1312
1313 bio_add_page(bio, page->page, PAGE_SIZE, 0); 1313 bio_add_page(bio, page->page, PAGE_SIZE, 0);
1314 if (btrfsic_submit_bio_wait(READ, bio)) 1314 if (btrfsic_submit_bio_wait(READ, bio))
@@ -1427,7 +1427,7 @@ static int scrub_repair_page_from_good_copy(struct scrub_block *sblock_bad,
1427 if (!bio) 1427 if (!bio)
1428 return -EIO; 1428 return -EIO;
1429 bio->bi_bdev = page_bad->dev->bdev; 1429 bio->bi_bdev = page_bad->dev->bdev;
1430 bio->bi_sector = page_bad->physical >> 9; 1430 bio->bi_iter.bi_sector = page_bad->physical >> 9;
1431 1431
1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0); 1432 ret = bio_add_page(bio, page_good->page, PAGE_SIZE, 0);
1433 if (PAGE_SIZE != ret) { 1433 if (PAGE_SIZE != ret) {
@@ -1520,7 +1520,7 @@ again:
1520 bio->bi_private = sbio; 1520 bio->bi_private = sbio;
1521 bio->bi_end_io = scrub_wr_bio_end_io; 1521 bio->bi_end_io = scrub_wr_bio_end_io;
1522 bio->bi_bdev = sbio->dev->bdev; 1522 bio->bi_bdev = sbio->dev->bdev;
1523 bio->bi_sector = sbio->physical >> 9; 1523 bio->bi_iter.bi_sector = sbio->physical >> 9;
1524 sbio->err = 0; 1524 sbio->err = 0;
1525 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1525 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1526 spage->physical_for_dev_replace || 1526 spage->physical_for_dev_replace ||
@@ -1926,7 +1926,7 @@ again:
1926 bio->bi_private = sbio; 1926 bio->bi_private = sbio;
1927 bio->bi_end_io = scrub_bio_end_io; 1927 bio->bi_end_io = scrub_bio_end_io;
1928 bio->bi_bdev = sbio->dev->bdev; 1928 bio->bi_bdev = sbio->dev->bdev;
1929 bio->bi_sector = sbio->physical >> 9; 1929 bio->bi_iter.bi_sector = sbio->physical >> 9;
1930 sbio->err = 0; 1930 sbio->err = 0;
1931 } else if (sbio->physical + sbio->page_count * PAGE_SIZE != 1931 } else if (sbio->physical + sbio->page_count * PAGE_SIZE !=
1932 spage->physical || 1932 spage->physical ||
@@ -3371,8 +3371,8 @@ static int write_page_nocow(struct scrub_ctx *sctx,
3371 spin_unlock(&sctx->stat_lock); 3371 spin_unlock(&sctx->stat_lock);
3372 return -ENOMEM; 3372 return -ENOMEM;
3373 } 3373 }
3374 bio->bi_size = 0; 3374 bio->bi_iter.bi_size = 0;
3375 bio->bi_sector = physical_for_dev_replace >> 9; 3375 bio->bi_iter.bi_sector = physical_for_dev_replace >> 9;
3376 bio->bi_bdev = dev->bdev; 3376 bio->bi_bdev = dev->bdev;
3377 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0); 3377 ret = bio_add_page(bio, page, PAGE_CACHE_SIZE, 0);
3378 if (ret != PAGE_CACHE_SIZE) { 3378 if (ret != PAGE_CACHE_SIZE) {
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 92303f42baaa..37972d5db737 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -5297,6 +5297,8 @@ static void btrfs_end_bio(struct bio *bio, int err)
5297 if (!is_orig_bio) { 5297 if (!is_orig_bio) {
5298 bio_put(bio); 5298 bio_put(bio);
5299 bio = bbio->orig_bio; 5299 bio = bbio->orig_bio;
5300 } else {
5301 atomic_inc(&bio->bi_remaining);
5300 } 5302 }
5301 bio->bi_private = bbio->private; 5303 bio->bi_private = bbio->private;
5302 bio->bi_end_io = bbio->end_io; 5304 bio->bi_end_io = bbio->end_io;
@@ -5411,7 +5413,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
5411 if (!q->merge_bvec_fn) 5413 if (!q->merge_bvec_fn)
5412 return 1; 5414 return 1;
5413 5415
5414 bvm.bi_size = bio->bi_size - prev->bv_len; 5416 bvm.bi_size = bio->bi_iter.bi_size - prev->bv_len;
5415 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len) 5417 if (q->merge_bvec_fn(q, &bvm, prev) < prev->bv_len)
5416 return 0; 5418 return 0;
5417 return 1; 5419 return 1;
@@ -5426,7 +5428,7 @@ static void submit_stripe_bio(struct btrfs_root *root, struct btrfs_bio *bbio,
5426 bio->bi_private = bbio; 5428 bio->bi_private = bbio;
5427 btrfs_io_bio(bio)->stripe_index = dev_nr; 5429 btrfs_io_bio(bio)->stripe_index = dev_nr;
5428 bio->bi_end_io = btrfs_end_bio; 5430 bio->bi_end_io = btrfs_end_bio;
5429 bio->bi_sector = physical >> 9; 5431 bio->bi_iter.bi_sector = physical >> 9;
5430#ifdef DEBUG 5432#ifdef DEBUG
5431 { 5433 {
5432 struct rcu_string *name; 5434 struct rcu_string *name;
@@ -5464,7 +5466,7 @@ again:
5464 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) { 5466 while (bvec <= (first_bio->bi_io_vec + first_bio->bi_vcnt - 1)) {
5465 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len, 5467 if (bio_add_page(bio, bvec->bv_page, bvec->bv_len,
5466 bvec->bv_offset) < bvec->bv_len) { 5468 bvec->bv_offset) < bvec->bv_len) {
5467 u64 len = bio->bi_size; 5469 u64 len = bio->bi_iter.bi_size;
5468 5470
5469 atomic_inc(&bbio->stripes_pending); 5471 atomic_inc(&bbio->stripes_pending);
5470 submit_stripe_bio(root, bbio, bio, physical, dev_nr, 5472 submit_stripe_bio(root, bbio, bio, physical, dev_nr,
@@ -5486,7 +5488,7 @@ static void bbio_error(struct btrfs_bio *bbio, struct bio *bio, u64 logical)
5486 bio->bi_private = bbio->private; 5488 bio->bi_private = bbio->private;
5487 bio->bi_end_io = bbio->end_io; 5489 bio->bi_end_io = bbio->end_io;
5488 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num; 5490 btrfs_io_bio(bio)->mirror_num = bbio->mirror_num;
5489 bio->bi_sector = logical >> 9; 5491 bio->bi_iter.bi_sector = logical >> 9;
5490 kfree(bbio); 5492 kfree(bbio);
5491 bio_endio(bio, -EIO); 5493 bio_endio(bio, -EIO);
5492 } 5494 }
@@ -5497,7 +5499,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5497{ 5499{
5498 struct btrfs_device *dev; 5500 struct btrfs_device *dev;
5499 struct bio *first_bio = bio; 5501 struct bio *first_bio = bio;
5500 u64 logical = (u64)bio->bi_sector << 9; 5502 u64 logical = (u64)bio->bi_iter.bi_sector << 9;
5501 u64 length = 0; 5503 u64 length = 0;
5502 u64 map_length; 5504 u64 map_length;
5503 u64 *raid_map = NULL; 5505 u64 *raid_map = NULL;
@@ -5506,7 +5508,7 @@ int btrfs_map_bio(struct btrfs_root *root, int rw, struct bio *bio,
5506 int total_devs = 1; 5508 int total_devs = 1;
5507 struct btrfs_bio *bbio = NULL; 5509 struct btrfs_bio *bbio = NULL;
5508 5510
5509 length = bio->bi_size; 5511 length = bio->bi_iter.bi_size;
5510 map_length = length; 5512 map_length = length;
5511 5513
5512 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio, 5514 ret = __btrfs_map_block(root->fs_info, rw, logical, &map_length, &bbio,
diff --git a/fs/buffer.c b/fs/buffer.c
index 6024877335ca..1c04ec66974e 100644
--- a/fs/buffer.c
+++ b/fs/buffer.c
@@ -2982,11 +2982,11 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2982 * let it through, and the IO layer will turn it into 2982 * let it through, and the IO layer will turn it into
2983 * an EIO. 2983 * an EIO.
2984 */ 2984 */
2985 if (unlikely(bio->bi_sector >= maxsector)) 2985 if (unlikely(bio->bi_iter.bi_sector >= maxsector))
2986 return; 2986 return;
2987 2987
2988 maxsector -= bio->bi_sector; 2988 maxsector -= bio->bi_iter.bi_sector;
2989 bytes = bio->bi_size; 2989 bytes = bio->bi_iter.bi_size;
2990 if (likely((bytes >> 9) <= maxsector)) 2990 if (likely((bytes >> 9) <= maxsector))
2991 return; 2991 return;
2992 2992
@@ -2994,7 +2994,7 @@ static void guard_bh_eod(int rw, struct bio *bio, struct buffer_head *bh)
2994 bytes = maxsector << 9; 2994 bytes = maxsector << 9;
2995 2995
2996 /* Truncate the bio.. */ 2996 /* Truncate the bio.. */
2997 bio->bi_size = bytes; 2997 bio->bi_iter.bi_size = bytes;
2998 bio->bi_io_vec[0].bv_len = bytes; 2998 bio->bi_io_vec[0].bv_len = bytes;
2999 2999
3000 /* ..and clear the end of the buffer for reads */ 3000 /* ..and clear the end of the buffer for reads */
@@ -3029,14 +3029,14 @@ int _submit_bh(int rw, struct buffer_head *bh, unsigned long bio_flags)
3029 */ 3029 */
3030 bio = bio_alloc(GFP_NOIO, 1); 3030 bio = bio_alloc(GFP_NOIO, 1);
3031 3031
3032 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 3032 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
3033 bio->bi_bdev = bh->b_bdev; 3033 bio->bi_bdev = bh->b_bdev;
3034 bio->bi_io_vec[0].bv_page = bh->b_page; 3034 bio->bi_io_vec[0].bv_page = bh->b_page;
3035 bio->bi_io_vec[0].bv_len = bh->b_size; 3035 bio->bi_io_vec[0].bv_len = bh->b_size;
3036 bio->bi_io_vec[0].bv_offset = bh_offset(bh); 3036 bio->bi_io_vec[0].bv_offset = bh_offset(bh);
3037 3037
3038 bio->bi_vcnt = 1; 3038 bio->bi_vcnt = 1;
3039 bio->bi_size = bh->b_size; 3039 bio->bi_iter.bi_size = bh->b_size;
3040 3040
3041 bio->bi_end_io = end_bio_bh_io_sync; 3041 bio->bi_end_io = end_bio_bh_io_sync;
3042 bio->bi_private = bh; 3042 bio->bi_private = bh;
diff --git a/fs/direct-io.c b/fs/direct-io.c
index 0e04142d5962..160a5489a939 100644
--- a/fs/direct-io.c
+++ b/fs/direct-io.c
@@ -375,7 +375,7 @@ dio_bio_alloc(struct dio *dio, struct dio_submit *sdio,
375 bio = bio_alloc(GFP_KERNEL, nr_vecs); 375 bio = bio_alloc(GFP_KERNEL, nr_vecs);
376 376
377 bio->bi_bdev = bdev; 377 bio->bi_bdev = bdev;
378 bio->bi_sector = first_sector; 378 bio->bi_iter.bi_sector = first_sector;
379 if (dio->is_async) 379 if (dio->is_async)
380 bio->bi_end_io = dio_bio_end_aio; 380 bio->bi_end_io = dio_bio_end_aio;
381 else 381 else
@@ -719,7 +719,7 @@ static inline int dio_send_cur_page(struct dio *dio, struct dio_submit *sdio,
719 if (sdio->bio) { 719 if (sdio->bio) {
720 loff_t cur_offset = sdio->cur_page_fs_offset; 720 loff_t cur_offset = sdio->cur_page_fs_offset;
721 loff_t bio_next_offset = sdio->logical_offset_in_bio + 721 loff_t bio_next_offset = sdio->logical_offset_in_bio +
722 sdio->bio->bi_size; 722 sdio->bio->bi_iter.bi_size;
723 723
724 /* 724 /*
725 * See whether this new request is contiguous with the old. 725 * See whether this new request is contiguous with the old.
diff --git a/fs/ext4/page-io.c b/fs/ext4/page-io.c
index d488f80ee32d..ab95508e3d40 100644
--- a/fs/ext4/page-io.c
+++ b/fs/ext4/page-io.c
@@ -65,9 +65,9 @@ static void ext4_finish_bio(struct bio *bio)
65{ 65{
66 int i; 66 int i;
67 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags); 67 int error = !test_bit(BIO_UPTODATE, &bio->bi_flags);
68 struct bio_vec *bvec;
68 69
69 for (i = 0; i < bio->bi_vcnt; i++) { 70 bio_for_each_segment_all(bvec, bio, i) {
70 struct bio_vec *bvec = &bio->bi_io_vec[i];
71 struct page *page = bvec->bv_page; 71 struct page *page = bvec->bv_page;
72 struct buffer_head *bh, *head; 72 struct buffer_head *bh, *head;
73 unsigned bio_start = bvec->bv_offset; 73 unsigned bio_start = bvec->bv_offset;
@@ -298,7 +298,7 @@ ext4_io_end_t *ext4_get_io_end(ext4_io_end_t *io_end)
298static void ext4_end_bio(struct bio *bio, int error) 298static void ext4_end_bio(struct bio *bio, int error)
299{ 299{
300 ext4_io_end_t *io_end = bio->bi_private; 300 ext4_io_end_t *io_end = bio->bi_private;
301 sector_t bi_sector = bio->bi_sector; 301 sector_t bi_sector = bio->bi_iter.bi_sector;
302 302
303 BUG_ON(!io_end); 303 BUG_ON(!io_end);
304 bio->bi_end_io = NULL; 304 bio->bi_end_io = NULL;
@@ -366,7 +366,7 @@ static int io_submit_init_bio(struct ext4_io_submit *io,
366 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES)); 366 bio = bio_alloc(GFP_NOIO, min(nvecs, BIO_MAX_PAGES));
367 if (!bio) 367 if (!bio)
368 return -ENOMEM; 368 return -ENOMEM;
369 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 369 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
370 bio->bi_bdev = bh->b_bdev; 370 bio->bi_bdev = bh->b_bdev;
371 bio->bi_end_io = ext4_end_bio; 371 bio->bi_end_io = ext4_end_bio;
372 bio->bi_private = ext4_get_io_end(io->io_end); 372 bio->bi_private = ext4_get_io_end(io->io_end);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index aa3438c571fa..a2c8de8ba6ce 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -351,23 +351,20 @@ repeat:
351 351
352static void read_end_io(struct bio *bio, int err) 352static void read_end_io(struct bio *bio, int err)
353{ 353{
354 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 354 struct bio_vec *bvec;
355 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 355 int i;
356 356
357 do { 357 bio_for_each_segment_all(bvec, bio, i) {
358 struct page *page = bvec->bv_page; 358 struct page *page = bvec->bv_page;
359 359
360 if (--bvec >= bio->bi_io_vec) 360 if (!err) {
361 prefetchw(&bvec->bv_page->flags);
362
363 if (uptodate) {
364 SetPageUptodate(page); 361 SetPageUptodate(page);
365 } else { 362 } else {
366 ClearPageUptodate(page); 363 ClearPageUptodate(page);
367 SetPageError(page); 364 SetPageError(page);
368 } 365 }
369 unlock_page(page); 366 unlock_page(page);
370 } while (bvec >= bio->bi_io_vec); 367 }
371 bio_put(bio); 368 bio_put(bio);
372} 369}
373 370
@@ -389,7 +386,7 @@ int f2fs_readpage(struct f2fs_sb_info *sbi, struct page *page,
389 bio = f2fs_bio_alloc(bdev, 1); 386 bio = f2fs_bio_alloc(bdev, 1);
390 387
391 /* Initialize the bio */ 388 /* Initialize the bio */
392 bio->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 389 bio->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
393 bio->bi_end_io = read_end_io; 390 bio->bi_end_io = read_end_io;
394 391
395 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) { 392 if (bio_add_page(bio, page, PAGE_CACHE_SIZE, 0) < PAGE_CACHE_SIZE) {
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index fa284d397199..36e8afd8e1e4 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -575,16 +575,14 @@ static const struct segment_allocation default_salloc_ops = {
575 575
576static void f2fs_end_io_write(struct bio *bio, int err) 576static void f2fs_end_io_write(struct bio *bio, int err)
577{ 577{
578 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
579 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1;
580 struct bio_private *p = bio->bi_private; 578 struct bio_private *p = bio->bi_private;
579 struct bio_vec *bvec;
580 int i;
581 581
582 do { 582 bio_for_each_segment_all(bvec, bio, i) {
583 struct page *page = bvec->bv_page; 583 struct page *page = bvec->bv_page;
584 584
585 if (--bvec >= bio->bi_io_vec) 585 if (err) {
586 prefetchw(&bvec->bv_page->flags);
587 if (!uptodate) {
588 SetPageError(page); 586 SetPageError(page);
589 if (page->mapping) 587 if (page->mapping)
590 set_bit(AS_EIO, &page->mapping->flags); 588 set_bit(AS_EIO, &page->mapping->flags);
@@ -593,7 +591,7 @@ static void f2fs_end_io_write(struct bio *bio, int err)
593 } 591 }
594 end_page_writeback(page); 592 end_page_writeback(page);
595 dec_page_count(p->sbi, F2FS_WRITEBACK); 593 dec_page_count(p->sbi, F2FS_WRITEBACK);
596 } while (bvec >= bio->bi_io_vec); 594 }
597 595
598 if (p->is_sync) 596 if (p->is_sync)
599 complete(p->wait); 597 complete(p->wait);
@@ -684,7 +682,7 @@ retry:
684 682
685 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi)); 683 bio_blocks = MAX_BIO_BLOCKS(max_hw_blocks(sbi));
686 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks); 684 sbi->bio[type] = f2fs_bio_alloc(bdev, bio_blocks);
687 sbi->bio[type]->bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr); 685 sbi->bio[type]->bi_iter.bi_sector = SECTOR_FROM_BLOCK(sbi, blk_addr);
688 sbi->bio[type]->bi_private = priv; 686 sbi->bio[type]->bi_private = priv;
689 /* 687 /*
690 * The end_io will be assigned at the sumbission phase. 688 * The end_io will be assigned at the sumbission phase.
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 010b9fb9fec6..985da945f0b5 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -272,7 +272,7 @@ static struct bio *gfs2_log_alloc_bio(struct gfs2_sbd *sdp, u64 blkno)
272 nrvecs = max(nrvecs/2, 1U); 272 nrvecs = max(nrvecs/2, 1U);
273 } 273 }
274 274
275 bio->bi_sector = blkno * (sb->s_blocksize >> 9); 275 bio->bi_iter.bi_sector = blkno * (sb->s_blocksize >> 9);
276 bio->bi_bdev = sb->s_bdev; 276 bio->bi_bdev = sb->s_bdev;
277 bio->bi_end_io = gfs2_end_log_write; 277 bio->bi_end_io = gfs2_end_log_write;
278 bio->bi_private = sdp; 278 bio->bi_private = sdp;
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index 82303b474958..16194da91652 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -224,7 +224,7 @@ static int gfs2_read_super(struct gfs2_sbd *sdp, sector_t sector, int silent)
224 lock_page(page); 224 lock_page(page);
225 225
226 bio = bio_alloc(GFP_NOFS, 1); 226 bio = bio_alloc(GFP_NOFS, 1);
227 bio->bi_sector = sector * (sb->s_blocksize >> 9); 227 bio->bi_iter.bi_sector = sector * (sb->s_blocksize >> 9);
228 bio->bi_bdev = sb->s_bdev; 228 bio->bi_bdev = sb->s_bdev;
229 bio_add_page(bio, page, PAGE_SIZE, 0); 229 bio_add_page(bio, page, PAGE_SIZE, 0);
230 230
diff --git a/fs/hfsplus/wrapper.c b/fs/hfsplus/wrapper.c
index e9a97a0d4314..3f999649587f 100644
--- a/fs/hfsplus/wrapper.c
+++ b/fs/hfsplus/wrapper.c
@@ -63,7 +63,7 @@ int hfsplus_submit_bio(struct super_block *sb, sector_t sector,
63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1); 63 sector &= ~((io_size >> HFSPLUS_SECTOR_SHIFT) - 1);
64 64
65 bio = bio_alloc(GFP_NOIO, 1); 65 bio = bio_alloc(GFP_NOIO, 1);
66 bio->bi_sector = sector; 66 bio->bi_iter.bi_sector = sector;
67 bio->bi_bdev = sb->s_bdev; 67 bio->bi_bdev = sb->s_bdev;
68 68
69 if (!(rw & WRITE) && data) 69 if (!(rw & WRITE) && data)
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 360d27c48887..8d811e02b4b9 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -1998,20 +1998,20 @@ static int lbmRead(struct jfs_log * log, int pn, struct lbuf ** bpp)
1998 1998
1999 bio = bio_alloc(GFP_NOFS, 1); 1999 bio = bio_alloc(GFP_NOFS, 1);
2000 2000
2001 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2001 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
2002 bio->bi_bdev = log->bdev; 2002 bio->bi_bdev = log->bdev;
2003 bio->bi_io_vec[0].bv_page = bp->l_page; 2003 bio->bi_io_vec[0].bv_page = bp->l_page;
2004 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2004 bio->bi_io_vec[0].bv_len = LOGPSIZE;
2005 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2005 bio->bi_io_vec[0].bv_offset = bp->l_offset;
2006 2006
2007 bio->bi_vcnt = 1; 2007 bio->bi_vcnt = 1;
2008 bio->bi_size = LOGPSIZE; 2008 bio->bi_iter.bi_size = LOGPSIZE;
2009 2009
2010 bio->bi_end_io = lbmIODone; 2010 bio->bi_end_io = lbmIODone;
2011 bio->bi_private = bp; 2011 bio->bi_private = bp;
2012 /*check if journaling to disk has been disabled*/ 2012 /*check if journaling to disk has been disabled*/
2013 if (log->no_integrity) { 2013 if (log->no_integrity) {
2014 bio->bi_size = 0; 2014 bio->bi_iter.bi_size = 0;
2015 lbmIODone(bio, 0); 2015 lbmIODone(bio, 0);
2016 } else { 2016 } else {
2017 submit_bio(READ_SYNC, bio); 2017 submit_bio(READ_SYNC, bio);
@@ -2144,21 +2144,21 @@ static void lbmStartIO(struct lbuf * bp)
2144 jfs_info("lbmStartIO\n"); 2144 jfs_info("lbmStartIO\n");
2145 2145
2146 bio = bio_alloc(GFP_NOFS, 1); 2146 bio = bio_alloc(GFP_NOFS, 1);
2147 bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); 2147 bio->bi_iter.bi_sector = bp->l_blkno << (log->l2bsize - 9);
2148 bio->bi_bdev = log->bdev; 2148 bio->bi_bdev = log->bdev;
2149 bio->bi_io_vec[0].bv_page = bp->l_page; 2149 bio->bi_io_vec[0].bv_page = bp->l_page;
2150 bio->bi_io_vec[0].bv_len = LOGPSIZE; 2150 bio->bi_io_vec[0].bv_len = LOGPSIZE;
2151 bio->bi_io_vec[0].bv_offset = bp->l_offset; 2151 bio->bi_io_vec[0].bv_offset = bp->l_offset;
2152 2152
2153 bio->bi_vcnt = 1; 2153 bio->bi_vcnt = 1;
2154 bio->bi_size = LOGPSIZE; 2154 bio->bi_iter.bi_size = LOGPSIZE;
2155 2155
2156 bio->bi_end_io = lbmIODone; 2156 bio->bi_end_io = lbmIODone;
2157 bio->bi_private = bp; 2157 bio->bi_private = bp;
2158 2158
2159 /* check if journaling to disk has been disabled */ 2159 /* check if journaling to disk has been disabled */
2160 if (log->no_integrity) { 2160 if (log->no_integrity) {
2161 bio->bi_size = 0; 2161 bio->bi_iter.bi_size = 0;
2162 lbmIODone(bio, 0); 2162 lbmIODone(bio, 0);
2163 } else { 2163 } else {
2164 submit_bio(WRITE_SYNC, bio); 2164 submit_bio(WRITE_SYNC, bio);
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index d165cde0c68d..49ba7ff1bbb9 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -416,7 +416,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
416 * count from hitting zero before we're through 416 * count from hitting zero before we're through
417 */ 417 */
418 inc_io(page); 418 inc_io(page);
419 if (!bio->bi_size) 419 if (!bio->bi_iter.bi_size)
420 goto dump_bio; 420 goto dump_bio;
421 submit_bio(WRITE, bio); 421 submit_bio(WRITE, bio);
422 nr_underway++; 422 nr_underway++;
@@ -438,7 +438,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
438 438
439 bio = bio_alloc(GFP_NOFS, 1); 439 bio = bio_alloc(GFP_NOFS, 1);
440 bio->bi_bdev = inode->i_sb->s_bdev; 440 bio->bi_bdev = inode->i_sb->s_bdev;
441 bio->bi_sector = pblock << (inode->i_blkbits - 9); 441 bio->bi_iter.bi_sector = pblock << (inode->i_blkbits - 9);
442 bio->bi_end_io = metapage_write_end_io; 442 bio->bi_end_io = metapage_write_end_io;
443 bio->bi_private = page; 443 bio->bi_private = page;
444 444
@@ -452,7 +452,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
452 if (bio) { 452 if (bio) {
453 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes) 453 if (bio_add_page(bio, page, bio_bytes, bio_offset) < bio_bytes)
454 goto add_failed; 454 goto add_failed;
455 if (!bio->bi_size) 455 if (!bio->bi_iter.bi_size)
456 goto dump_bio; 456 goto dump_bio;
457 457
458 submit_bio(WRITE, bio); 458 submit_bio(WRITE, bio);
@@ -517,7 +517,8 @@ static int metapage_readpage(struct file *fp, struct page *page)
517 517
518 bio = bio_alloc(GFP_NOFS, 1); 518 bio = bio_alloc(GFP_NOFS, 1);
519 bio->bi_bdev = inode->i_sb->s_bdev; 519 bio->bi_bdev = inode->i_sb->s_bdev;
520 bio->bi_sector = pblock << (inode->i_blkbits - 9); 520 bio->bi_iter.bi_sector =
521 pblock << (inode->i_blkbits - 9);
521 bio->bi_end_io = metapage_read_end_io; 522 bio->bi_end_io = metapage_read_end_io;
522 bio->bi_private = page; 523 bio->bi_private = page;
523 len = xlen << inode->i_blkbits; 524 len = xlen << inode->i_blkbits;
diff --git a/fs/logfs/dev_bdev.c b/fs/logfs/dev_bdev.c
index 0f95f0d0b313..76279e11982d 100644
--- a/fs/logfs/dev_bdev.c
+++ b/fs/logfs/dev_bdev.c
@@ -26,9 +26,9 @@ static int sync_request(struct page *page, struct block_device *bdev, int rw)
26 bio_vec.bv_len = PAGE_SIZE; 26 bio_vec.bv_len = PAGE_SIZE;
27 bio_vec.bv_offset = 0; 27 bio_vec.bv_offset = 0;
28 bio.bi_vcnt = 1; 28 bio.bi_vcnt = 1;
29 bio.bi_size = PAGE_SIZE;
30 bio.bi_bdev = bdev; 29 bio.bi_bdev = bdev;
31 bio.bi_sector = page->index * (PAGE_SIZE >> 9); 30 bio.bi_iter.bi_sector = page->index * (PAGE_SIZE >> 9);
31 bio.bi_iter.bi_size = PAGE_SIZE;
32 32
33 return submit_bio_wait(rw, &bio); 33 return submit_bio_wait(rw, &bio);
34} 34}
@@ -56,22 +56,18 @@ static DECLARE_WAIT_QUEUE_HEAD(wq);
56static void writeseg_end_io(struct bio *bio, int err) 56static void writeseg_end_io(struct bio *bio, int err)
57{ 57{
58 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 58 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
59 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 59 struct bio_vec *bvec;
60 int i;
60 struct super_block *sb = bio->bi_private; 61 struct super_block *sb = bio->bi_private;
61 struct logfs_super *super = logfs_super(sb); 62 struct logfs_super *super = logfs_super(sb);
62 struct page *page;
63 63
64 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */ 64 BUG_ON(!uptodate); /* FIXME: Retry io or write elsewhere */
65 BUG_ON(err); 65 BUG_ON(err);
66 BUG_ON(bio->bi_vcnt == 0); 66
67 do { 67 bio_for_each_segment_all(bvec, bio, i) {
68 page = bvec->bv_page; 68 end_page_writeback(bvec->bv_page);
69 if (--bvec >= bio->bi_io_vec) 69 page_cache_release(bvec->bv_page);
70 prefetchw(&bvec->bv_page->flags); 70 }
71
72 end_page_writeback(page);
73 page_cache_release(page);
74 } while (bvec >= bio->bi_io_vec);
75 bio_put(bio); 71 bio_put(bio);
76 if (atomic_dec_and_test(&super->s_pending_writes)) 72 if (atomic_dec_and_test(&super->s_pending_writes))
77 wake_up(&wq); 73 wake_up(&wq);
@@ -96,9 +92,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
96 if (i >= max_pages) { 92 if (i >= max_pages) {
97 /* Block layer cannot split bios :( */ 93 /* Block layer cannot split bios :( */
98 bio->bi_vcnt = i; 94 bio->bi_vcnt = i;
99 bio->bi_size = i * PAGE_SIZE; 95 bio->bi_iter.bi_size = i * PAGE_SIZE;
100 bio->bi_bdev = super->s_bdev; 96 bio->bi_bdev = super->s_bdev;
101 bio->bi_sector = ofs >> 9; 97 bio->bi_iter.bi_sector = ofs >> 9;
102 bio->bi_private = sb; 98 bio->bi_private = sb;
103 bio->bi_end_io = writeseg_end_io; 99 bio->bi_end_io = writeseg_end_io;
104 atomic_inc(&super->s_pending_writes); 100 atomic_inc(&super->s_pending_writes);
@@ -123,9 +119,9 @@ static int __bdev_writeseg(struct super_block *sb, u64 ofs, pgoff_t index,
123 unlock_page(page); 119 unlock_page(page);
124 } 120 }
125 bio->bi_vcnt = nr_pages; 121 bio->bi_vcnt = nr_pages;
126 bio->bi_size = nr_pages * PAGE_SIZE; 122 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
127 bio->bi_bdev = super->s_bdev; 123 bio->bi_bdev = super->s_bdev;
128 bio->bi_sector = ofs >> 9; 124 bio->bi_iter.bi_sector = ofs >> 9;
129 bio->bi_private = sb; 125 bio->bi_private = sb;
130 bio->bi_end_io = writeseg_end_io; 126 bio->bi_end_io = writeseg_end_io;
131 atomic_inc(&super->s_pending_writes); 127 atomic_inc(&super->s_pending_writes);
@@ -188,9 +184,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
188 if (i >= max_pages) { 184 if (i >= max_pages) {
189 /* Block layer cannot split bios :( */ 185 /* Block layer cannot split bios :( */
190 bio->bi_vcnt = i; 186 bio->bi_vcnt = i;
191 bio->bi_size = i * PAGE_SIZE; 187 bio->bi_iter.bi_size = i * PAGE_SIZE;
192 bio->bi_bdev = super->s_bdev; 188 bio->bi_bdev = super->s_bdev;
193 bio->bi_sector = ofs >> 9; 189 bio->bi_iter.bi_sector = ofs >> 9;
194 bio->bi_private = sb; 190 bio->bi_private = sb;
195 bio->bi_end_io = erase_end_io; 191 bio->bi_end_io = erase_end_io;
196 atomic_inc(&super->s_pending_writes); 192 atomic_inc(&super->s_pending_writes);
@@ -209,9 +205,9 @@ static int do_erase(struct super_block *sb, u64 ofs, pgoff_t index,
209 bio->bi_io_vec[i].bv_offset = 0; 205 bio->bi_io_vec[i].bv_offset = 0;
210 } 206 }
211 bio->bi_vcnt = nr_pages; 207 bio->bi_vcnt = nr_pages;
212 bio->bi_size = nr_pages * PAGE_SIZE; 208 bio->bi_iter.bi_size = nr_pages * PAGE_SIZE;
213 bio->bi_bdev = super->s_bdev; 209 bio->bi_bdev = super->s_bdev;
214 bio->bi_sector = ofs >> 9; 210 bio->bi_iter.bi_sector = ofs >> 9;
215 bio->bi_private = sb; 211 bio->bi_private = sb;
216 bio->bi_end_io = erase_end_io; 212 bio->bi_end_io = erase_end_io;
217 atomic_inc(&super->s_pending_writes); 213 atomic_inc(&super->s_pending_writes);
diff --git a/fs/mpage.c b/fs/mpage.c
index 0face1c4d4c6..4979ffa60aaa 100644
--- a/fs/mpage.c
+++ b/fs/mpage.c
@@ -43,16 +43,14 @@
43 */ 43 */
44static void mpage_end_io(struct bio *bio, int err) 44static void mpage_end_io(struct bio *bio, int err)
45{ 45{
46 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 46 struct bio_vec *bv;
47 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 47 int i;
48 48
49 do { 49 bio_for_each_segment_all(bv, bio, i) {
50 struct page *page = bvec->bv_page; 50 struct page *page = bv->bv_page;
51 51
52 if (--bvec >= bio->bi_io_vec)
53 prefetchw(&bvec->bv_page->flags);
54 if (bio_data_dir(bio) == READ) { 52 if (bio_data_dir(bio) == READ) {
55 if (uptodate) { 53 if (!err) {
56 SetPageUptodate(page); 54 SetPageUptodate(page);
57 } else { 55 } else {
58 ClearPageUptodate(page); 56 ClearPageUptodate(page);
@@ -60,14 +58,15 @@ static void mpage_end_io(struct bio *bio, int err)
60 } 58 }
61 unlock_page(page); 59 unlock_page(page);
62 } else { /* bio_data_dir(bio) == WRITE */ 60 } else { /* bio_data_dir(bio) == WRITE */
63 if (!uptodate) { 61 if (err) {
64 SetPageError(page); 62 SetPageError(page);
65 if (page->mapping) 63 if (page->mapping)
66 set_bit(AS_EIO, &page->mapping->flags); 64 set_bit(AS_EIO, &page->mapping->flags);
67 } 65 }
68 end_page_writeback(page); 66 end_page_writeback(page);
69 } 67 }
70 } while (bvec >= bio->bi_io_vec); 68 }
69
71 bio_put(bio); 70 bio_put(bio);
72} 71}
73 72
@@ -94,7 +93,7 @@ mpage_alloc(struct block_device *bdev,
94 93
95 if (bio) { 94 if (bio) {
96 bio->bi_bdev = bdev; 95 bio->bi_bdev = bdev;
97 bio->bi_sector = first_sector; 96 bio->bi_iter.bi_sector = first_sector;
98 } 97 }
99 return bio; 98 return bio;
100} 99}
diff --git a/fs/nfs/blocklayout/blocklayout.c b/fs/nfs/blocklayout/blocklayout.c
index e242bbf72972..56ff823ca82e 100644
--- a/fs/nfs/blocklayout/blocklayout.c
+++ b/fs/nfs/blocklayout/blocklayout.c
@@ -134,8 +134,8 @@ bl_submit_bio(int rw, struct bio *bio)
134 if (bio) { 134 if (bio) {
135 get_parallel(bio->bi_private); 135 get_parallel(bio->bi_private);
136 dprintk("%s submitting %s bio %u@%llu\n", __func__, 136 dprintk("%s submitting %s bio %u@%llu\n", __func__,
137 rw == READ ? "read" : "write", 137 rw == READ ? "read" : "write", bio->bi_iter.bi_size,
138 bio->bi_size, (unsigned long long)bio->bi_sector); 138 (unsigned long long)bio->bi_iter.bi_sector);
139 submit_bio(rw, bio); 139 submit_bio(rw, bio);
140 } 140 }
141 return NULL; 141 return NULL;
@@ -156,7 +156,8 @@ static struct bio *bl_alloc_init_bio(int npg, sector_t isect,
156 } 156 }
157 157
158 if (bio) { 158 if (bio) {
159 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 159 bio->bi_iter.bi_sector = isect - be->be_f_offset +
160 be->be_v_offset;
160 bio->bi_bdev = be->be_mdev; 161 bio->bi_bdev = be->be_mdev;
161 bio->bi_end_io = end_io; 162 bio->bi_end_io = end_io;
162 bio->bi_private = par; 163 bio->bi_private = par;
@@ -201,18 +202,14 @@ static struct bio *bl_add_page_to_bio(struct bio *bio, int npg, int rw,
201static void bl_end_io_read(struct bio *bio, int err) 202static void bl_end_io_read(struct bio *bio, int err)
202{ 203{
203 struct parallel_io *par = bio->bi_private; 204 struct parallel_io *par = bio->bi_private;
204 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 205 struct bio_vec *bvec;
205 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 206 int i;
206 207
207 do { 208 if (!err)
208 struct page *page = bvec->bv_page; 209 bio_for_each_segment_all(bvec, bio, i)
210 SetPageUptodate(bvec->bv_page);
209 211
210 if (--bvec >= bio->bi_io_vec) 212 if (err) {
211 prefetchw(&bvec->bv_page->flags);
212 if (uptodate)
213 SetPageUptodate(page);
214 } while (bvec >= bio->bi_io_vec);
215 if (!uptodate) {
216 struct nfs_read_data *rdata = par->data; 213 struct nfs_read_data *rdata = par->data;
217 struct nfs_pgio_header *header = rdata->header; 214 struct nfs_pgio_header *header = rdata->header;
218 215
@@ -383,20 +380,16 @@ static void mark_extents_written(struct pnfs_block_layout *bl,
383static void bl_end_io_write_zero(struct bio *bio, int err) 380static void bl_end_io_write_zero(struct bio *bio, int err)
384{ 381{
385 struct parallel_io *par = bio->bi_private; 382 struct parallel_io *par = bio->bi_private;
386 const int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); 383 struct bio_vec *bvec;
387 struct bio_vec *bvec = bio->bi_io_vec + bio->bi_vcnt - 1; 384 int i;
388
389 do {
390 struct page *page = bvec->bv_page;
391 385
392 if (--bvec >= bio->bi_io_vec) 386 bio_for_each_segment_all(bvec, bio, i) {
393 prefetchw(&bvec->bv_page->flags);
394 /* This is the zeroing page we added */ 387 /* This is the zeroing page we added */
395 end_page_writeback(page); 388 end_page_writeback(bvec->bv_page);
396 page_cache_release(page); 389 page_cache_release(bvec->bv_page);
397 } while (bvec >= bio->bi_io_vec); 390 }
398 391
399 if (unlikely(!uptodate)) { 392 if (unlikely(err)) {
400 struct nfs_write_data *data = par->data; 393 struct nfs_write_data *data = par->data;
401 struct nfs_pgio_header *header = data->header; 394 struct nfs_pgio_header *header = data->header;
402 395
@@ -519,7 +512,7 @@ bl_do_readpage_sync(struct page *page, struct pnfs_block_extent *be,
519 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) + 512 isect = (page->index << PAGE_CACHE_SECTOR_SHIFT) +
520 (offset / SECTOR_SIZE); 513 (offset / SECTOR_SIZE);
521 514
522 bio->bi_sector = isect - be->be_f_offset + be->be_v_offset; 515 bio->bi_iter.bi_sector = isect - be->be_f_offset + be->be_v_offset;
523 bio->bi_bdev = be->be_mdev; 516 bio->bi_bdev = be->be_mdev;
524 bio->bi_end_io = bl_read_single_end_io; 517 bio->bi_end_io = bl_read_single_end_io;
525 518
diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c
index 2d8be51f90dc..dc3a9efdaab8 100644
--- a/fs/nilfs2/segbuf.c
+++ b/fs/nilfs2/segbuf.c
@@ -416,7 +416,8 @@ static struct bio *nilfs_alloc_seg_bio(struct the_nilfs *nilfs, sector_t start,
416 } 416 }
417 if (likely(bio)) { 417 if (likely(bio)) {
418 bio->bi_bdev = nilfs->ns_bdev; 418 bio->bi_bdev = nilfs->ns_bdev;
419 bio->bi_sector = start << (nilfs->ns_blocksize_bits - 9); 419 bio->bi_iter.bi_sector =
420 start << (nilfs->ns_blocksize_bits - 9);
420 } 421 }
421 return bio; 422 return bio;
422} 423}
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 73920ffda05b..bf482dfed14f 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -413,7 +413,7 @@ static struct bio *o2hb_setup_one_bio(struct o2hb_region *reg,
413 } 413 }
414 414
415 /* Must put everything in 512 byte sectors for the bio... */ 415 /* Must put everything in 512 byte sectors for the bio... */
416 bio->bi_sector = (reg->hr_start_block + cs) << (bits - 9); 416 bio->bi_iter.bi_sector = (reg->hr_start_block + cs) << (bits - 9);
417 bio->bi_bdev = reg->hr_bdev; 417 bio->bi_bdev = reg->hr_bdev;
418 bio->bi_private = wc; 418 bio->bi_private = wc;
419 bio->bi_end_io = o2hb_bio_end_io; 419 bio->bi_end_io = o2hb_bio_end_io;
diff --git a/fs/xfs/xfs_aops.c b/fs/xfs/xfs_aops.c
index 71c8c9d2b882..1b19b9cd692a 100644
--- a/fs/xfs/xfs_aops.c
+++ b/fs/xfs/xfs_aops.c
@@ -407,7 +407,7 @@ xfs_alloc_ioend_bio(
407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs); 407 struct bio *bio = bio_alloc(GFP_NOIO, nvecs);
408 408
409 ASSERT(bio->bi_private == NULL); 409 ASSERT(bio->bi_private == NULL);
410 bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); 410 bio->bi_iter.bi_sector = bh->b_blocknr * (bh->b_size >> 9);
411 bio->bi_bdev = bh->b_bdev; 411 bio->bi_bdev = bh->b_bdev;
412 return bio; 412 return bio;
413} 413}
diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c
index afe7645e4b2b..2a941ab623cb 100644
--- a/fs/xfs/xfs_buf.c
+++ b/fs/xfs/xfs_buf.c
@@ -1240,7 +1240,7 @@ next_chunk:
1240 1240
1241 bio = bio_alloc(GFP_NOIO, nr_pages); 1241 bio = bio_alloc(GFP_NOIO, nr_pages);
1242 bio->bi_bdev = bp->b_target->bt_bdev; 1242 bio->bi_bdev = bp->b_target->bt_bdev;
1243 bio->bi_sector = sector; 1243 bio->bi_iter.bi_sector = sector;
1244 bio->bi_end_io = xfs_buf_bio_end_io; 1244 bio->bi_end_io = xfs_buf_bio_end_io;
1245 bio->bi_private = bp; 1245 bio->bi_private = bp;
1246 1246
@@ -1262,7 +1262,7 @@ next_chunk:
1262 total_nr_pages--; 1262 total_nr_pages--;
1263 } 1263 }
1264 1264
1265 if (likely(bio->bi_size)) { 1265 if (likely(bio->bi_iter.bi_size)) {
1266 if (xfs_buf_is_vmapped(bp)) { 1266 if (xfs_buf_is_vmapped(bp)) {
1267 flush_kernel_vmap_range(bp->b_addr, 1267 flush_kernel_vmap_range(bp->b_addr,
1268 xfs_buf_vmap_len(bp)); 1268 xfs_buf_vmap_len(bp));
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 060ff695085c..70654521dab6 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -61,25 +61,87 @@
61 * various member access, note that bio_data should of course not be used 61 * various member access, note that bio_data should of course not be used
62 * on highmem page vectors 62 * on highmem page vectors
63 */ 63 */
64#define bio_iovec_idx(bio, idx) (&((bio)->bi_io_vec[(idx)])) 64#define __bvec_iter_bvec(bvec, iter) (&(bvec)[(iter).bi_idx])
65#define bio_iovec(bio) bio_iovec_idx((bio), (bio)->bi_idx) 65
66#define bio_page(bio) bio_iovec((bio))->bv_page 66#define bvec_iter_page(bvec, iter) \
67#define bio_offset(bio) bio_iovec((bio))->bv_offset 67 (__bvec_iter_bvec((bvec), (iter))->bv_page)
68#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) 68
69#define bio_sectors(bio) ((bio)->bi_size >> 9) 69#define bvec_iter_len(bvec, iter) \
70#define bio_end_sector(bio) ((bio)->bi_sector + bio_sectors((bio))) 70 min((iter).bi_size, \
71 __bvec_iter_bvec((bvec), (iter))->bv_len - (iter).bi_bvec_done)
72
73#define bvec_iter_offset(bvec, iter) \
74 (__bvec_iter_bvec((bvec), (iter))->bv_offset + (iter).bi_bvec_done)
75
76#define bvec_iter_bvec(bvec, iter) \
77((struct bio_vec) { \
78 .bv_page = bvec_iter_page((bvec), (iter)), \
79 .bv_len = bvec_iter_len((bvec), (iter)), \
80 .bv_offset = bvec_iter_offset((bvec), (iter)), \
81})
82
83#define bio_iter_iovec(bio, iter) \
84 bvec_iter_bvec((bio)->bi_io_vec, (iter))
85
86#define bio_iter_page(bio, iter) \
87 bvec_iter_page((bio)->bi_io_vec, (iter))
88#define bio_iter_len(bio, iter) \
89 bvec_iter_len((bio)->bi_io_vec, (iter))
90#define bio_iter_offset(bio, iter) \
91 bvec_iter_offset((bio)->bi_io_vec, (iter))
92
93#define bio_page(bio) bio_iter_page((bio), (bio)->bi_iter)
94#define bio_offset(bio) bio_iter_offset((bio), (bio)->bi_iter)
95#define bio_iovec(bio) bio_iter_iovec((bio), (bio)->bi_iter)
96
97#define bio_multiple_segments(bio) \
98 ((bio)->bi_iter.bi_size != bio_iovec(bio).bv_len)
99#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
100#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
101
102/*
103 * Check whether this bio carries any data or not. A NULL bio is allowed.
104 */
105static inline bool bio_has_data(struct bio *bio)
106{
107 if (bio &&
108 bio->bi_iter.bi_size &&
109 !(bio->bi_rw & REQ_DISCARD))
110 return true;
111
112 return false;
113}
114
115static inline bool bio_is_rw(struct bio *bio)
116{
117 if (!bio_has_data(bio))
118 return false;
119
120 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
121 return false;
122
123 return true;
124}
125
126static inline bool bio_mergeable(struct bio *bio)
127{
128 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
129 return false;
130
131 return true;
132}
71 133
72static inline unsigned int bio_cur_bytes(struct bio *bio) 134static inline unsigned int bio_cur_bytes(struct bio *bio)
73{ 135{
74 if (bio->bi_vcnt) 136 if (bio_has_data(bio))
75 return bio_iovec(bio)->bv_len; 137 return bio_iovec(bio).bv_len;
76 else /* dataless requests such as discard */ 138 else /* dataless requests such as discard */
77 return bio->bi_size; 139 return bio->bi_iter.bi_size;
78} 140}
79 141
80static inline void *bio_data(struct bio *bio) 142static inline void *bio_data(struct bio *bio)
81{ 143{
82 if (bio->bi_vcnt) 144 if (bio_has_data(bio))
83 return page_address(bio_page(bio)) + bio_offset(bio); 145 return page_address(bio_page(bio)) + bio_offset(bio);
84 146
85 return NULL; 147 return NULL;
@@ -97,19 +159,16 @@ static inline void *bio_data(struct bio *bio)
97 * permanent PIO fall back, user is probably better off disabling highmem 159 * permanent PIO fall back, user is probably better off disabling highmem
98 * I/O completely on that queue (see ide-dma for example) 160 * I/O completely on that queue (see ide-dma for example)
99 */ 161 */
100#define __bio_kmap_atomic(bio, idx) \ 162#define __bio_kmap_atomic(bio, iter) \
101 (kmap_atomic(bio_iovec_idx((bio), (idx))->bv_page) + \ 163 (kmap_atomic(bio_iter_iovec((bio), (iter)).bv_page) + \
102 bio_iovec_idx((bio), (idx))->bv_offset) 164 bio_iter_iovec((bio), (iter)).bv_offset)
103 165
104#define __bio_kunmap_atomic(addr) kunmap_atomic(addr) 166#define __bio_kunmap_atomic(addr) kunmap_atomic(addr)
105 167
106/* 168/*
107 * merge helpers etc 169 * merge helpers etc
108 */ 170 */
109 171
110#define __BVEC_END(bio) bio_iovec_idx((bio), (bio)->bi_vcnt - 1)
111#define __BVEC_START(bio) bio_iovec_idx((bio), (bio)->bi_idx)
112
113/* Default implementation of BIOVEC_PHYS_MERGEABLE */ 172/* Default implementation of BIOVEC_PHYS_MERGEABLE */
114#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \ 173#define __BIOVEC_PHYS_MERGEABLE(vec1, vec2) \
115 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2))) 174 ((bvec_to_phys((vec1)) + (vec1)->bv_len) == bvec_to_phys((vec2)))
@@ -126,33 +185,76 @@ static inline void *bio_data(struct bio *bio)
126 (((addr1) | (mask)) == (((addr2) - 1) | (mask))) 185 (((addr1) | (mask)) == (((addr2) - 1) | (mask)))
127#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \ 186#define BIOVEC_SEG_BOUNDARY(q, b1, b2) \
128 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q))) 187 __BIO_SEG_BOUNDARY(bvec_to_phys((b1)), bvec_to_phys((b2)) + (b2)->bv_len, queue_segment_boundary((q)))
129#define BIO_SEG_BOUNDARY(q, b1, b2) \
130 BIOVEC_SEG_BOUNDARY((q), __BVEC_END((b1)), __BVEC_START((b2)))
131 188
132#define bio_io_error(bio) bio_endio((bio), -EIO) 189#define bio_io_error(bio) bio_endio((bio), -EIO)
133 190
134/* 191/*
135 * drivers should not use the __ version unless they _really_ know what
136 * they're doing
137 */
138#define __bio_for_each_segment(bvl, bio, i, start_idx) \
139 for (bvl = bio_iovec_idx((bio), (start_idx)), i = (start_idx); \
140 i < (bio)->bi_vcnt; \
141 bvl++, i++)
142
143/*
144 * drivers should _never_ use the all version - the bio may have been split 192 * drivers should _never_ use the all version - the bio may have been split
145 * before it got to the driver and the driver won't own all of it 193 * before it got to the driver and the driver won't own all of it
146 */ 194 */
147#define bio_for_each_segment_all(bvl, bio, i) \ 195#define bio_for_each_segment_all(bvl, bio, i) \
148 for (i = 0; \ 196 for (i = 0, bvl = (bio)->bi_io_vec; i < (bio)->bi_vcnt; i++, bvl++)
149 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 197
150 i++) 198static inline void bvec_iter_advance(struct bio_vec *bv, struct bvec_iter *iter,
199 unsigned bytes)
200{
201 WARN_ONCE(bytes > iter->bi_size,
202 "Attempted to advance past end of bvec iter\n");
203
204 while (bytes) {
205 unsigned len = min(bytes, bvec_iter_len(bv, *iter));
206
207 bytes -= len;
208 iter->bi_size -= len;
209 iter->bi_bvec_done += len;
210
211 if (iter->bi_bvec_done == __bvec_iter_bvec(bv, *iter)->bv_len) {
212 iter->bi_bvec_done = 0;
213 iter->bi_idx++;
214 }
215 }
216}
217
218#define for_each_bvec(bvl, bio_vec, iter, start) \
219 for ((iter) = start; \
220 (bvl) = bvec_iter_bvec((bio_vec), (iter)), \
221 (iter).bi_size; \
222 bvec_iter_advance((bio_vec), &(iter), (bvl).bv_len))
223
224
225static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
226 unsigned bytes)
227{
228 iter->bi_sector += bytes >> 9;
229
230 if (bio->bi_rw & BIO_NO_ADVANCE_ITER_MASK)
231 iter->bi_size -= bytes;
232 else
233 bvec_iter_advance(bio->bi_io_vec, iter, bytes);
234}
151 235
152#define bio_for_each_segment(bvl, bio, i) \ 236#define __bio_for_each_segment(bvl, bio, iter, start) \
153 for (i = (bio)->bi_idx; \ 237 for (iter = (start); \
154 bvl = bio_iovec_idx((bio), (i)), i < (bio)->bi_vcnt; \ 238 (iter).bi_size && \
155 i++) 239 ((bvl = bio_iter_iovec((bio), (iter))), 1); \
240 bio_advance_iter((bio), &(iter), (bvl).bv_len))
241
242#define bio_for_each_segment(bvl, bio, iter) \
243 __bio_for_each_segment(bvl, bio, iter, (bio)->bi_iter)
244
245#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
246
247static inline unsigned bio_segments(struct bio *bio)
248{
249 unsigned segs = 0;
250 struct bio_vec bv;
251 struct bvec_iter iter;
252
253 bio_for_each_segment(bv, bio, iter)
254 segs++;
255
256 return segs;
257}
156 258
157/* 259/*
158 * get a reference to a bio, so it won't disappear. the intended use is 260 * get a reference to a bio, so it won't disappear. the intended use is
@@ -177,16 +279,15 @@ static inline void *bio_data(struct bio *bio)
177struct bio_integrity_payload { 279struct bio_integrity_payload {
178 struct bio *bip_bio; /* parent bio */ 280 struct bio *bip_bio; /* parent bio */
179 281
180 sector_t bip_sector; /* virtual start sector */ 282 struct bvec_iter bip_iter;
181 283
284 /* kill - should just use bip_vec */
182 void *bip_buf; /* generated integrity data */ 285 void *bip_buf; /* generated integrity data */
183 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
184 286
185 unsigned int bip_size; 287 bio_end_io_t *bip_end_io; /* saved I/O completion fn */
186 288
187 unsigned short bip_slab; /* slab the bip came from */ 289 unsigned short bip_slab; /* slab the bip came from */
188 unsigned short bip_vcnt; /* # of integrity bio_vecs */ 290 unsigned short bip_vcnt; /* # of integrity bio_vecs */
189 unsigned short bip_idx; /* current bip_vec index */
190 unsigned bip_owns_buf:1; /* should free bip_buf */ 291 unsigned bip_owns_buf:1; /* should free bip_buf */
191 292
192 struct work_struct bip_work; /* I/O completion */ 293 struct work_struct bip_work; /* I/O completion */
@@ -196,29 +297,28 @@ struct bio_integrity_payload {
196}; 297};
197#endif /* CONFIG_BLK_DEV_INTEGRITY */ 298#endif /* CONFIG_BLK_DEV_INTEGRITY */
198 299
199/* 300extern void bio_trim(struct bio *bio, int offset, int size);
200 * A bio_pair is used when we need to split a bio. 301extern struct bio *bio_split(struct bio *bio, int sectors,
201 * This can only happen for a bio that refers to just one 302 gfp_t gfp, struct bio_set *bs);
202 * page of data, and in the unusual situation when the 303
203 * page crosses a chunk/device boundary 304/**
305 * bio_next_split - get next @sectors from a bio, splitting if necessary
306 * @bio: bio to split
307 * @sectors: number of sectors to split from the front of @bio
308 * @gfp: gfp mask
309 * @bs: bio set to allocate from
204 * 310 *
205 * The address of the master bio is stored in bio1.bi_private 311 * Returns a bio representing the next @sectors of @bio - if the bio is smaller
206 * The address of the pool the pair was allocated from is stored 312 * than @sectors, returns the original bio unchanged.
207 * in bio2.bi_private
208 */ 313 */
209struct bio_pair { 314static inline struct bio *bio_next_split(struct bio *bio, int sectors,
210 struct bio bio1, bio2; 315 gfp_t gfp, struct bio_set *bs)
211 struct bio_vec bv1, bv2; 316{
212#if defined(CONFIG_BLK_DEV_INTEGRITY) 317 if (sectors >= bio_sectors(bio))
213 struct bio_integrity_payload bip1, bip2; 318 return bio;
214 struct bio_vec iv1, iv2; 319
215#endif 320 return bio_split(bio, sectors, gfp, bs);
216 atomic_t cnt; 321}
217 int error;
218};
219extern struct bio_pair *bio_split(struct bio *bi, int first_sectors);
220extern void bio_pair_release(struct bio_pair *dbio);
221extern void bio_trim(struct bio *bio, int offset, int size);
222 322
223extern struct bio_set *bioset_create(unsigned int, unsigned int); 323extern struct bio_set *bioset_create(unsigned int, unsigned int);
224extern void bioset_free(struct bio_set *); 324extern void bioset_free(struct bio_set *);
@@ -227,7 +327,8 @@ extern mempool_t *biovec_create_pool(struct bio_set *bs, int pool_entries);
227extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *); 327extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
228extern void bio_put(struct bio *); 328extern void bio_put(struct bio *);
229 329
230extern void __bio_clone(struct bio *, struct bio *); 330extern void __bio_clone_fast(struct bio *, struct bio *);
331extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
231extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs); 332extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
232 333
233extern struct bio_set *fs_bio_set; 334extern struct bio_set *fs_bio_set;
@@ -254,6 +355,7 @@ static inline struct bio *bio_clone_kmalloc(struct bio *bio, gfp_t gfp_mask)
254} 355}
255 356
256extern void bio_endio(struct bio *, int); 357extern void bio_endio(struct bio *, int);
358extern void bio_endio_nodec(struct bio *, int);
257struct request_queue; 359struct request_queue;
258extern int bio_phys_segments(struct request_queue *, struct bio *); 360extern int bio_phys_segments(struct request_queue *, struct bio *);
259 361
@@ -262,12 +364,12 @@ extern void bio_advance(struct bio *, unsigned);
262 364
263extern void bio_init(struct bio *); 365extern void bio_init(struct bio *);
264extern void bio_reset(struct bio *); 366extern void bio_reset(struct bio *);
367void bio_chain(struct bio *, struct bio *);
265 368
266extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int); 369extern int bio_add_page(struct bio *, struct page *, unsigned int,unsigned int);
267extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *, 370extern int bio_add_pc_page(struct request_queue *, struct bio *, struct page *,
268 unsigned int, unsigned int); 371 unsigned int, unsigned int);
269extern int bio_get_nr_vecs(struct block_device *); 372extern int bio_get_nr_vecs(struct block_device *);
270extern sector_t bio_sector_offset(struct bio *, unsigned short, unsigned int);
271extern struct bio *bio_map_user(struct request_queue *, struct block_device *, 373extern struct bio *bio_map_user(struct request_queue *, struct block_device *,
272 unsigned long, unsigned int, int, gfp_t); 374 unsigned long, unsigned int, int, gfp_t);
273struct sg_iovec; 375struct sg_iovec;
@@ -357,48 +459,18 @@ static inline void bvec_kunmap_irq(char *buffer, unsigned long *flags)
357} 459}
358#endif 460#endif
359 461
360static inline char *__bio_kmap_irq(struct bio *bio, unsigned short idx, 462static inline char *__bio_kmap_irq(struct bio *bio, struct bvec_iter iter,
361 unsigned long *flags) 463 unsigned long *flags)
362{ 464{
363 return bvec_kmap_irq(bio_iovec_idx(bio, idx), flags); 465 return bvec_kmap_irq(&bio_iter_iovec(bio, iter), flags);
364} 466}
365#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags) 467#define __bio_kunmap_irq(buf, flags) bvec_kunmap_irq(buf, flags)
366 468
367#define bio_kmap_irq(bio, flags) \ 469#define bio_kmap_irq(bio, flags) \
368 __bio_kmap_irq((bio), (bio)->bi_idx, (flags)) 470 __bio_kmap_irq((bio), (bio)->bi_iter, (flags))
369#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags) 471#define bio_kunmap_irq(buf,flags) __bio_kunmap_irq(buf, flags)
370 472
371/* 473/*
372 * Check whether this bio carries any data or not. A NULL bio is allowed.
373 */
374static inline bool bio_has_data(struct bio *bio)
375{
376 if (bio && bio->bi_vcnt)
377 return true;
378
379 return false;
380}
381
382static inline bool bio_is_rw(struct bio *bio)
383{
384 if (!bio_has_data(bio))
385 return false;
386
387 if (bio->bi_rw & REQ_WRITE_SAME)
388 return false;
389
390 return true;
391}
392
393static inline bool bio_mergeable(struct bio *bio)
394{
395 if (bio->bi_rw & REQ_NOMERGE_FLAGS)
396 return false;
397
398 return true;
399}
400
401/*
402 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop. 474 * BIO list management for use by remapping drivers (e.g. DM or MD) and loop.
403 * 475 *
404 * A bio_list anchors a singly-linked list of bios chained through the bi_next 476 * A bio_list anchors a singly-linked list of bios chained through the bi_next
@@ -559,16 +631,12 @@ struct biovec_slab {
559 631
560#if defined(CONFIG_BLK_DEV_INTEGRITY) 632#if defined(CONFIG_BLK_DEV_INTEGRITY)
561 633
562#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
563#define bip_vec(bip) bip_vec_idx(bip, 0)
564 634
565#define __bip_for_each_vec(bvl, bip, i, start_idx) \
566 for (bvl = bip_vec_idx((bip), (start_idx)), i = (start_idx); \
567 i < (bip)->bip_vcnt; \
568 bvl++, i++)
569 635
570#define bip_for_each_vec(bvl, bip, i) \ 636#define bip_vec_idx(bip, idx) (&(bip->bip_vec[(idx)]))
571 __bip_for_each_vec(bvl, bip, i, (bip)->bip_idx) 637
638#define bip_for_each_vec(bvl, bip, iter) \
639 for_each_bvec(bvl, (bip)->bip_vec, iter, (bip)->bip_iter)
572 640
573#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \ 641#define bio_for_each_integrity_vec(_bvl, _bio, _iter) \
574 for_each_bio(_bio) \ 642 for_each_bio(_bio) \
@@ -586,7 +654,6 @@ extern int bio_integrity_prep(struct bio *);
586extern void bio_integrity_endio(struct bio *, int); 654extern void bio_integrity_endio(struct bio *, int);
587extern void bio_integrity_advance(struct bio *, unsigned int); 655extern void bio_integrity_advance(struct bio *, unsigned int);
588extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int); 656extern void bio_integrity_trim(struct bio *, unsigned int, unsigned int);
589extern void bio_integrity_split(struct bio *, struct bio_pair *, int);
590extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t); 657extern int bio_integrity_clone(struct bio *, struct bio *, gfp_t);
591extern int bioset_integrity_create(struct bio_set *, int); 658extern int bioset_integrity_create(struct bio_set *, int);
592extern void bioset_integrity_free(struct bio_set *); 659extern void bioset_integrity_free(struct bio_set *);
@@ -630,12 +697,6 @@ static inline int bio_integrity_clone(struct bio *bio, struct bio *bio_src,
630 return 0; 697 return 0;
631} 698}
632 699
633static inline void bio_integrity_split(struct bio *bio, struct bio_pair *bp,
634 int sectors)
635{
636 return;
637}
638
639static inline void bio_integrity_advance(struct bio *bio, 700static inline void bio_integrity_advance(struct bio *bio,
640 unsigned int bytes_done) 701 unsigned int bytes_done)
641{ 702{
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index 238ef0ed62f8..bbc3a6c88fce 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -28,13 +28,22 @@ struct bio_vec {
28 unsigned int bv_offset; 28 unsigned int bv_offset;
29}; 29};
30 30
31struct bvec_iter {
32 sector_t bi_sector; /* device address in 512 byte
33 sectors */
34 unsigned int bi_size; /* residual I/O count */
35
36 unsigned int bi_idx; /* current index into bvl_vec */
37
38 unsigned int bi_bvec_done; /* number of bytes completed in
39 current bvec */
40};
41
31/* 42/*
32 * main unit of I/O for the block layer and lower layers (ie drivers and 43 * main unit of I/O for the block layer and lower layers (ie drivers and
33 * stacking drivers) 44 * stacking drivers)
34 */ 45 */
35struct bio { 46struct bio {
36 sector_t bi_sector; /* device address in 512 byte
37 sectors */
38 struct bio *bi_next; /* request queue link */ 47 struct bio *bi_next; /* request queue link */
39 struct block_device *bi_bdev; 48 struct block_device *bi_bdev;
40 unsigned long bi_flags; /* status, command, etc */ 49 unsigned long bi_flags; /* status, command, etc */
@@ -42,16 +51,13 @@ struct bio {
42 * top bits priority 51 * top bits priority
43 */ 52 */
44 53
45 unsigned short bi_vcnt; /* how many bio_vec's */ 54 struct bvec_iter bi_iter;
46 unsigned short bi_idx; /* current index into bvl_vec */
47 55
48 /* Number of segments in this BIO after 56 /* Number of segments in this BIO after
49 * physical address coalescing is performed. 57 * physical address coalescing is performed.
50 */ 58 */
51 unsigned int bi_phys_segments; 59 unsigned int bi_phys_segments;
52 60
53 unsigned int bi_size; /* residual I/O count */
54
55 /* 61 /*
56 * To keep track of the max segment size, we account for the 62 * To keep track of the max segment size, we account for the
57 * sizes of the first and last mergeable segments in this bio. 63 * sizes of the first and last mergeable segments in this bio.
@@ -59,6 +65,8 @@ struct bio {
59 unsigned int bi_seg_front_size; 65 unsigned int bi_seg_front_size;
60 unsigned int bi_seg_back_size; 66 unsigned int bi_seg_back_size;
61 67
68 atomic_t bi_remaining;
69
62 bio_end_io_t *bi_end_io; 70 bio_end_io_t *bi_end_io;
63 71
64 void *bi_private; 72 void *bi_private;
@@ -74,11 +82,13 @@ struct bio {
74 struct bio_integrity_payload *bi_integrity; /* data integrity */ 82 struct bio_integrity_payload *bi_integrity; /* data integrity */
75#endif 83#endif
76 84
85 unsigned short bi_vcnt; /* how many bio_vec's */
86
77 /* 87 /*
78 * Everything starting with bi_max_vecs will be preserved by bio_reset() 88 * Everything starting with bi_max_vecs will be preserved by bio_reset()
79 */ 89 */
80 90
81 unsigned int bi_max_vecs; /* max bvl_vecs we can hold */ 91 unsigned short bi_max_vecs; /* max bvl_vecs we can hold */
82 92
83 atomic_t bi_cnt; /* pin count */ 93 atomic_t bi_cnt; /* pin count */
84 94
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 1b135d49b279..02cb6f0ea71d 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -735,7 +735,7 @@ struct rq_map_data {
735}; 735};
736 736
737struct req_iterator { 737struct req_iterator {
738 int i; 738 struct bvec_iter iter;
739 struct bio *bio; 739 struct bio *bio;
740}; 740};
741 741
@@ -748,10 +748,11 @@ struct req_iterator {
748 748
749#define rq_for_each_segment(bvl, _rq, _iter) \ 749#define rq_for_each_segment(bvl, _rq, _iter) \
750 __rq_for_each_bio(_iter.bio, _rq) \ 750 __rq_for_each_bio(_iter.bio, _rq) \
751 bio_for_each_segment(bvl, _iter.bio, _iter.i) 751 bio_for_each_segment(bvl, _iter.bio, _iter.iter)
752 752
753#define rq_iter_last(rq, _iter) \ 753#define rq_iter_last(bvec, _iter) \
754 (_iter.bio->bi_next == NULL && _iter.i == _iter.bio->bi_vcnt-1) 754 (_iter.bio->bi_next == NULL && \
755 bio_iter_last(bvec, _iter.iter))
755 756
756#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 757#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE
757# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform" 758# error "You should define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE for your platform"
diff --git a/include/linux/ceph/messenger.h b/include/linux/ceph/messenger.h
index 7c1420bb1dce..091fdb600d55 100644
--- a/include/linux/ceph/messenger.h
+++ b/include/linux/ceph/messenger.h
@@ -1,6 +1,7 @@
1#ifndef __FS_CEPH_MESSENGER_H 1#ifndef __FS_CEPH_MESSENGER_H
2#define __FS_CEPH_MESSENGER_H 2#define __FS_CEPH_MESSENGER_H
3 3
4#include <linux/blk_types.h>
4#include <linux/kref.h> 5#include <linux/kref.h>
5#include <linux/mutex.h> 6#include <linux/mutex.h>
6#include <linux/net.h> 7#include <linux/net.h>
@@ -119,8 +120,7 @@ struct ceph_msg_data_cursor {
119#ifdef CONFIG_BLOCK 120#ifdef CONFIG_BLOCK
120 struct { /* bio */ 121 struct { /* bio */
121 struct bio *bio; /* bio from list */ 122 struct bio *bio; /* bio from list */
122 unsigned int vector_index; /* vector from bio */ 123 struct bvec_iter bvec_iter;
123 unsigned int vector_offset; /* bytes from vector */
124 }; 124 };
125#endif /* CONFIG_BLOCK */ 125#endif /* CONFIG_BLOCK */
126 struct { /* pages */ 126 struct { /* pages */
diff --git a/include/linux/dm-io.h b/include/linux/dm-io.h
index f4b0aa3126f5..a68cbe59e6ad 100644
--- a/include/linux/dm-io.h
+++ b/include/linux/dm-io.h
@@ -29,7 +29,7 @@ typedef void (*io_notify_fn)(unsigned long error, void *context);
29 29
30enum dm_io_mem_type { 30enum dm_io_mem_type {
31 DM_IO_PAGE_LIST,/* Page list */ 31 DM_IO_PAGE_LIST,/* Page list */
32 DM_IO_BVEC, /* Bio vector */ 32 DM_IO_BIO, /* Bio vector */
33 DM_IO_VMA, /* Virtual memory area */ 33 DM_IO_VMA, /* Virtual memory area */
34 DM_IO_KMEM, /* Kernel memory */ 34 DM_IO_KMEM, /* Kernel memory */
35}; 35};
@@ -41,7 +41,7 @@ struct dm_io_memory {
41 41
42 union { 42 union {
43 struct page_list *pl; 43 struct page_list *pl;
44 struct bio_vec *bvec; 44 struct bio *bio;
45 void *vma; 45 void *vma;
46 void *addr; 46 void *addr;
47 } ptr; 47 } ptr;
diff --git a/include/trace/events/bcache.h b/include/trace/events/bcache.h
index e2b9576d00e2..095c6e4fe1e8 100644
--- a/include/trace/events/bcache.h
+++ b/include/trace/events/bcache.h
@@ -24,10 +24,10 @@ DECLARE_EVENT_CLASS(bcache_request,
24 __entry->dev = bio->bi_bdev->bd_dev; 24 __entry->dev = bio->bi_bdev->bd_dev;
25 __entry->orig_major = d->disk->major; 25 __entry->orig_major = d->disk->major;
26 __entry->orig_minor = d->disk->first_minor; 26 __entry->orig_minor = d->disk->first_minor;
27 __entry->sector = bio->bi_sector; 27 __entry->sector = bio->bi_iter.bi_sector;
28 __entry->orig_sector = bio->bi_sector - 16; 28 __entry->orig_sector = bio->bi_iter.bi_sector - 16;
29 __entry->nr_sector = bio->bi_size >> 9; 29 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 30 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
31 ), 31 ),
32 32
33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)", 33 TP_printk("%d,%d %s %llu + %u (from %d,%d @ %llu)",
@@ -99,9 +99,9 @@ DECLARE_EVENT_CLASS(bcache_bio,
99 99
100 TP_fast_assign( 100 TP_fast_assign(
101 __entry->dev = bio->bi_bdev->bd_dev; 101 __entry->dev = bio->bi_bdev->bd_dev;
102 __entry->sector = bio->bi_sector; 102 __entry->sector = bio->bi_iter.bi_sector;
103 __entry->nr_sector = bio->bi_size >> 9; 103 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 104 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
105 ), 105 ),
106 106
107 TP_printk("%d,%d %s %llu + %u", 107 TP_printk("%d,%d %s %llu + %u",
@@ -134,9 +134,9 @@ TRACE_EVENT(bcache_read,
134 134
135 TP_fast_assign( 135 TP_fast_assign(
136 __entry->dev = bio->bi_bdev->bd_dev; 136 __entry->dev = bio->bi_bdev->bd_dev;
137 __entry->sector = bio->bi_sector; 137 __entry->sector = bio->bi_iter.bi_sector;
138 __entry->nr_sector = bio->bi_size >> 9; 138 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 139 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
140 __entry->cache_hit = hit; 140 __entry->cache_hit = hit;
141 __entry->bypass = bypass; 141 __entry->bypass = bypass;
142 ), 142 ),
@@ -162,9 +162,9 @@ TRACE_EVENT(bcache_write,
162 162
163 TP_fast_assign( 163 TP_fast_assign(
164 __entry->dev = bio->bi_bdev->bd_dev; 164 __entry->dev = bio->bi_bdev->bd_dev;
165 __entry->sector = bio->bi_sector; 165 __entry->sector = bio->bi_iter.bi_sector;
166 __entry->nr_sector = bio->bi_size >> 9; 166 __entry->nr_sector = bio->bi_iter.bi_size >> 9;
167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 167 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
168 __entry->writeback = writeback; 168 __entry->writeback = writeback;
169 __entry->bypass = bypass; 169 __entry->bypass = bypass;
170 ), 170 ),
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index 4c2301d2ef1a..e76ae19a8d6f 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -243,9 +243,9 @@ TRACE_EVENT(block_bio_bounce,
243 TP_fast_assign( 243 TP_fast_assign(
244 __entry->dev = bio->bi_bdev ? 244 __entry->dev = bio->bi_bdev ?
245 bio->bi_bdev->bd_dev : 0; 245 bio->bi_bdev->bd_dev : 0;
246 __entry->sector = bio->bi_sector; 246 __entry->sector = bio->bi_iter.bi_sector;
247 __entry->nr_sector = bio_sectors(bio); 247 __entry->nr_sector = bio_sectors(bio);
248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 248 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 249 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
250 ), 250 ),
251 251
@@ -280,10 +280,10 @@ TRACE_EVENT(block_bio_complete,
280 280
281 TP_fast_assign( 281 TP_fast_assign(
282 __entry->dev = bio->bi_bdev->bd_dev; 282 __entry->dev = bio->bi_bdev->bd_dev;
283 __entry->sector = bio->bi_sector; 283 __entry->sector = bio->bi_iter.bi_sector;
284 __entry->nr_sector = bio_sectors(bio); 284 __entry->nr_sector = bio_sectors(bio);
285 __entry->error = error; 285 __entry->error = error;
286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 286 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
287 ), 287 ),
288 288
289 TP_printk("%d,%d %s %llu + %u [%d]", 289 TP_printk("%d,%d %s %llu + %u [%d]",
@@ -308,9 +308,9 @@ DECLARE_EVENT_CLASS(block_bio_merge,
308 308
309 TP_fast_assign( 309 TP_fast_assign(
310 __entry->dev = bio->bi_bdev->bd_dev; 310 __entry->dev = bio->bi_bdev->bd_dev;
311 __entry->sector = bio->bi_sector; 311 __entry->sector = bio->bi_iter.bi_sector;
312 __entry->nr_sector = bio_sectors(bio); 312 __entry->nr_sector = bio_sectors(bio);
313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 313 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 314 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
315 ), 315 ),
316 316
@@ -375,9 +375,9 @@ TRACE_EVENT(block_bio_queue,
375 375
376 TP_fast_assign( 376 TP_fast_assign(
377 __entry->dev = bio->bi_bdev->bd_dev; 377 __entry->dev = bio->bi_bdev->bd_dev;
378 __entry->sector = bio->bi_sector; 378 __entry->sector = bio->bi_iter.bi_sector;
379 __entry->nr_sector = bio_sectors(bio); 379 __entry->nr_sector = bio_sectors(bio);
380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 380 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 381 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
382 ), 382 ),
383 383
@@ -403,7 +403,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
403 403
404 TP_fast_assign( 404 TP_fast_assign(
405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0; 405 __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
406 __entry->sector = bio ? bio->bi_sector : 0; 406 __entry->sector = bio ? bio->bi_iter.bi_sector : 0;
407 __entry->nr_sector = bio ? bio_sectors(bio) : 0; 407 __entry->nr_sector = bio ? bio_sectors(bio) : 0;
408 blk_fill_rwbs(__entry->rwbs, 408 blk_fill_rwbs(__entry->rwbs,
409 bio ? bio->bi_rw : 0, __entry->nr_sector); 409 bio ? bio->bi_rw : 0, __entry->nr_sector);
@@ -538,9 +538,9 @@ TRACE_EVENT(block_split,
538 538
539 TP_fast_assign( 539 TP_fast_assign(
540 __entry->dev = bio->bi_bdev->bd_dev; 540 __entry->dev = bio->bi_bdev->bd_dev;
541 __entry->sector = bio->bi_sector; 541 __entry->sector = bio->bi_iter.bi_sector;
542 __entry->new_sector = new_sector; 542 __entry->new_sector = new_sector;
543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 543 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN); 544 memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
545 ), 545 ),
546 546
@@ -579,11 +579,11 @@ TRACE_EVENT(block_bio_remap,
579 579
580 TP_fast_assign( 580 TP_fast_assign(
581 __entry->dev = bio->bi_bdev->bd_dev; 581 __entry->dev = bio->bi_bdev->bd_dev;
582 __entry->sector = bio->bi_sector; 582 __entry->sector = bio->bi_iter.bi_sector;
583 __entry->nr_sector = bio_sectors(bio); 583 __entry->nr_sector = bio_sectors(bio);
584 __entry->old_dev = dev; 584 __entry->old_dev = dev;
585 __entry->old_sector = from; 585 __entry->old_sector = from;
586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); 586 blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_iter.bi_size);
587 ), 587 ),
588 588
589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu", 589 TP_printk("%d,%d %s %llu + %u <- (%d,%d) %llu",
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index e0dc355fa317..bd3ee4fbe7a7 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -616,8 +616,8 @@ TRACE_EVENT(f2fs_do_submit_bio,
616 __entry->dev = sb->s_dev; 616 __entry->dev = sb->s_dev;
617 __entry->btype = btype; 617 __entry->btype = btype;
618 __entry->sync = sync; 618 __entry->sync = sync;
619 __entry->sector = bio->bi_sector; 619 __entry->sector = bio->bi_iter.bi_sector;
620 __entry->size = bio->bi_size; 620 __entry->size = bio->bi_iter.bi_size;
621 ), 621 ),
622 622
623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u", 623 TP_printk("dev = (%d,%d), type = %s, io = %s, sector = %lld, size = %u",
diff --git a/kernel/power/block_io.c b/kernel/power/block_io.c
index d09dd10c5a5e..9a58bc258810 100644
--- a/kernel/power/block_io.c
+++ b/kernel/power/block_io.c
@@ -32,7 +32,7 @@ static int submit(int rw, struct block_device *bdev, sector_t sector,
32 struct bio *bio; 32 struct bio *bio;
33 33
34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1); 34 bio = bio_alloc(__GFP_WAIT | __GFP_HIGH, 1);
35 bio->bi_sector = sector; 35 bio->bi_iter.bi_sector = sector;
36 bio->bi_bdev = bdev; 36 bio->bi_bdev = bdev;
37 bio->bi_end_io = end_swap_bio_read; 37 bio->bi_end_io = end_swap_bio_read;
38 38
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index f785aef65799..b418cb0d7242 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -781,8 +781,8 @@ static void blk_add_trace_bio(struct request_queue *q, struct bio *bio,
781 if (!error && !bio_flagged(bio, BIO_UPTODATE)) 781 if (!error && !bio_flagged(bio, BIO_UPTODATE))
782 error = EIO; 782 error = EIO;
783 783
784 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, what, 784 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
785 error, 0, NULL); 785 bio->bi_rw, what, error, 0, NULL);
786} 786}
787 787
788static void blk_add_trace_bio_bounce(void *ignore, 788static void blk_add_trace_bio_bounce(void *ignore,
@@ -885,8 +885,9 @@ static void blk_add_trace_split(void *ignore,
885 if (bt) { 885 if (bt) {
886 __be64 rpdu = cpu_to_be64(pdu); 886 __be64 rpdu = cpu_to_be64(pdu);
887 887
888 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 888 __blk_add_trace(bt, bio->bi_iter.bi_sector,
889 BLK_TA_SPLIT, !bio_flagged(bio, BIO_UPTODATE), 889 bio->bi_iter.bi_size, bio->bi_rw, BLK_TA_SPLIT,
890 !bio_flagged(bio, BIO_UPTODATE),
890 sizeof(rpdu), &rpdu); 891 sizeof(rpdu), &rpdu);
891 } 892 }
892} 893}
@@ -918,9 +919,9 @@ static void blk_add_trace_bio_remap(void *ignore,
918 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev); 919 r.device_to = cpu_to_be32(bio->bi_bdev->bd_dev);
919 r.sector_from = cpu_to_be64(from); 920 r.sector_from = cpu_to_be64(from);
920 921
921 __blk_add_trace(bt, bio->bi_sector, bio->bi_size, bio->bi_rw, 922 __blk_add_trace(bt, bio->bi_iter.bi_sector, bio->bi_iter.bi_size,
922 BLK_TA_REMAP, !bio_flagged(bio, BIO_UPTODATE), 923 bio->bi_rw, BLK_TA_REMAP,
923 sizeof(r), &r); 924 !bio_flagged(bio, BIO_UPTODATE), sizeof(r), &r);
924} 925}
925 926
926/** 927/**
diff --git a/mm/bounce.c b/mm/bounce.c
index 5a7d58fb883b..523918b8c6dc 100644
--- a/mm/bounce.c
+++ b/mm/bounce.c
@@ -98,27 +98,24 @@ int init_emergency_isa_pool(void)
98static void copy_to_high_bio_irq(struct bio *to, struct bio *from) 98static void copy_to_high_bio_irq(struct bio *to, struct bio *from)
99{ 99{
100 unsigned char *vfrom; 100 unsigned char *vfrom;
101 struct bio_vec *tovec, *fromvec; 101 struct bio_vec tovec, *fromvec = from->bi_io_vec;
102 int i; 102 struct bvec_iter iter;
103 103
104 bio_for_each_segment(tovec, to, i) { 104 bio_for_each_segment(tovec, to, iter) {
105 fromvec = from->bi_io_vec + i; 105 if (tovec.bv_page != fromvec->bv_page) {
106 106 /*
107 /* 107 * fromvec->bv_offset and fromvec->bv_len might have
108 * not bounced 108 * been modified by the block layer, so use the original
109 */ 109 * copy, bounce_copy_vec already uses tovec->bv_len
110 if (tovec->bv_page == fromvec->bv_page) 110 */
111 continue; 111 vfrom = page_address(fromvec->bv_page) +
112 112 tovec.bv_offset;
113 /* 113
114 * fromvec->bv_offset and fromvec->bv_len might have been 114 bounce_copy_vec(&tovec, vfrom);
115 * modified by the block layer, so use the original copy, 115 flush_dcache_page(tovec.bv_page);
116 * bounce_copy_vec already uses tovec->bv_len 116 }
117 */
118 vfrom = page_address(fromvec->bv_page) + tovec->bv_offset;
119 117
120 bounce_copy_vec(tovec, vfrom); 118 fromvec++;
121 flush_dcache_page(tovec->bv_page);
122 } 119 }
123} 120}
124 121
@@ -201,13 +198,14 @@ static void __blk_queue_bounce(struct request_queue *q, struct bio **bio_orig,
201{ 198{
202 struct bio *bio; 199 struct bio *bio;
203 int rw = bio_data_dir(*bio_orig); 200 int rw = bio_data_dir(*bio_orig);
204 struct bio_vec *to, *from; 201 struct bio_vec *to, from;
202 struct bvec_iter iter;
205 unsigned i; 203 unsigned i;
206 204
207 if (force) 205 if (force)
208 goto bounce; 206 goto bounce;
209 bio_for_each_segment(from, *bio_orig, i) 207 bio_for_each_segment(from, *bio_orig, iter)
210 if (page_to_pfn(from->bv_page) > queue_bounce_pfn(q)) 208 if (page_to_pfn(from.bv_page) > queue_bounce_pfn(q))
211 goto bounce; 209 goto bounce;
212 210
213 return; 211 return;
diff --git a/mm/page_io.c b/mm/page_io.c
index 8c79a4764be0..f14eded987fa 100644
--- a/mm/page_io.c
+++ b/mm/page_io.c
@@ -31,13 +31,13 @@ static struct bio *get_swap_bio(gfp_t gfp_flags,
31 31
32 bio = bio_alloc(gfp_flags, 1); 32 bio = bio_alloc(gfp_flags, 1);
33 if (bio) { 33 if (bio) {
34 bio->bi_sector = map_swap_page(page, &bio->bi_bdev); 34 bio->bi_iter.bi_sector = map_swap_page(page, &bio->bi_bdev);
35 bio->bi_sector <<= PAGE_SHIFT - 9; 35 bio->bi_iter.bi_sector <<= PAGE_SHIFT - 9;
36 bio->bi_io_vec[0].bv_page = page; 36 bio->bi_io_vec[0].bv_page = page;
37 bio->bi_io_vec[0].bv_len = PAGE_SIZE; 37 bio->bi_io_vec[0].bv_len = PAGE_SIZE;
38 bio->bi_io_vec[0].bv_offset = 0; 38 bio->bi_io_vec[0].bv_offset = 0;
39 bio->bi_vcnt = 1; 39 bio->bi_vcnt = 1;
40 bio->bi_size = PAGE_SIZE; 40 bio->bi_iter.bi_size = PAGE_SIZE;
41 bio->bi_end_io = end_io; 41 bio->bi_end_io = end_io;
42 } 42 }
43 return bio; 43 return bio;
@@ -62,7 +62,7 @@ void end_swap_bio_write(struct bio *bio, int err)
62 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n", 62 printk(KERN_ALERT "Write-error on swap-device (%u:%u:%Lu)\n",
63 imajor(bio->bi_bdev->bd_inode), 63 imajor(bio->bi_bdev->bd_inode),
64 iminor(bio->bi_bdev->bd_inode), 64 iminor(bio->bi_bdev->bd_inode),
65 (unsigned long long)bio->bi_sector); 65 (unsigned long long)bio->bi_iter.bi_sector);
66 ClearPageReclaim(page); 66 ClearPageReclaim(page);
67 } 67 }
68 end_page_writeback(page); 68 end_page_writeback(page);
@@ -80,7 +80,7 @@ void end_swap_bio_read(struct bio *bio, int err)
80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n", 80 printk(KERN_ALERT "Read-error on swap-device (%u:%u:%Lu)\n",
81 imajor(bio->bi_bdev->bd_inode), 81 imajor(bio->bi_bdev->bd_inode),
82 iminor(bio->bi_bdev->bd_inode), 82 iminor(bio->bi_bdev->bd_inode),
83 (unsigned long long)bio->bi_sector); 83 (unsigned long long)bio->bi_iter.bi_sector);
84 goto out; 84 goto out;
85 } 85 }
86 86
diff --git a/net/ceph/messenger.c b/net/ceph/messenger.c
index 4a5df7b1cc9f..18c039b95c22 100644
--- a/net/ceph/messenger.c
+++ b/net/ceph/messenger.c
@@ -777,13 +777,12 @@ static void ceph_msg_data_bio_cursor_init(struct ceph_msg_data_cursor *cursor,
777 777
778 bio = data->bio; 778 bio = data->bio;
779 BUG_ON(!bio); 779 BUG_ON(!bio);
780 BUG_ON(!bio->bi_vcnt);
781 780
782 cursor->resid = min(length, data->bio_length); 781 cursor->resid = min(length, data->bio_length);
783 cursor->bio = bio; 782 cursor->bio = bio;
784 cursor->vector_index = 0; 783 cursor->bvec_iter = bio->bi_iter;
785 cursor->vector_offset = 0; 784 cursor->last_piece =
786 cursor->last_piece = length <= bio->bi_io_vec[0].bv_len; 785 cursor->resid <= bio_iter_len(bio, cursor->bvec_iter);
787} 786}
788 787
789static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor, 788static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
@@ -792,71 +791,63 @@ static struct page *ceph_msg_data_bio_next(struct ceph_msg_data_cursor *cursor,
792{ 791{
793 struct ceph_msg_data *data = cursor->data; 792 struct ceph_msg_data *data = cursor->data;
794 struct bio *bio; 793 struct bio *bio;
795 struct bio_vec *bio_vec; 794 struct bio_vec bio_vec;
796 unsigned int index;
797 795
798 BUG_ON(data->type != CEPH_MSG_DATA_BIO); 796 BUG_ON(data->type != CEPH_MSG_DATA_BIO);
799 797
800 bio = cursor->bio; 798 bio = cursor->bio;
801 BUG_ON(!bio); 799 BUG_ON(!bio);
802 800
803 index = cursor->vector_index; 801 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
804 BUG_ON(index >= (unsigned int) bio->bi_vcnt);
805 802
806 bio_vec = &bio->bi_io_vec[index]; 803 *page_offset = (size_t) bio_vec.bv_offset;
807 BUG_ON(cursor->vector_offset >= bio_vec->bv_len);
808 *page_offset = (size_t) (bio_vec->bv_offset + cursor->vector_offset);
809 BUG_ON(*page_offset >= PAGE_SIZE); 804 BUG_ON(*page_offset >= PAGE_SIZE);
810 if (cursor->last_piece) /* pagelist offset is always 0 */ 805 if (cursor->last_piece) /* pagelist offset is always 0 */
811 *length = cursor->resid; 806 *length = cursor->resid;
812 else 807 else
813 *length = (size_t) (bio_vec->bv_len - cursor->vector_offset); 808 *length = (size_t) bio_vec.bv_len;
814 BUG_ON(*length > cursor->resid); 809 BUG_ON(*length > cursor->resid);
815 BUG_ON(*page_offset + *length > PAGE_SIZE); 810 BUG_ON(*page_offset + *length > PAGE_SIZE);
816 811
817 return bio_vec->bv_page; 812 return bio_vec.bv_page;
818} 813}
819 814
820static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor, 815static bool ceph_msg_data_bio_advance(struct ceph_msg_data_cursor *cursor,
821 size_t bytes) 816 size_t bytes)
822{ 817{
823 struct bio *bio; 818 struct bio *bio;
824 struct bio_vec *bio_vec; 819 struct bio_vec bio_vec;
825 unsigned int index;
826 820
827 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO); 821 BUG_ON(cursor->data->type != CEPH_MSG_DATA_BIO);
828 822
829 bio = cursor->bio; 823 bio = cursor->bio;
830 BUG_ON(!bio); 824 BUG_ON(!bio);
831 825
832 index = cursor->vector_index; 826 bio_vec = bio_iter_iovec(bio, cursor->bvec_iter);
833 BUG_ON(index >= (unsigned int) bio->bi_vcnt);
834 bio_vec = &bio->bi_io_vec[index];
835 827
836 /* Advance the cursor offset */ 828 /* Advance the cursor offset */
837 829
838 BUG_ON(cursor->resid < bytes); 830 BUG_ON(cursor->resid < bytes);
839 cursor->resid -= bytes; 831 cursor->resid -= bytes;
840 cursor->vector_offset += bytes; 832
841 if (cursor->vector_offset < bio_vec->bv_len) 833 bio_advance_iter(bio, &cursor->bvec_iter, bytes);
834
835 if (bytes < bio_vec.bv_len)
842 return false; /* more bytes to process in this segment */ 836 return false; /* more bytes to process in this segment */
843 BUG_ON(cursor->vector_offset != bio_vec->bv_len);
844 837
845 /* Move on to the next segment, and possibly the next bio */ 838 /* Move on to the next segment, and possibly the next bio */
846 839
847 if (++index == (unsigned int) bio->bi_vcnt) { 840 if (!cursor->bvec_iter.bi_size) {
848 bio = bio->bi_next; 841 bio = bio->bi_next;
849 index = 0; 842 cursor->bvec_iter = bio->bi_iter;
850 } 843 }
851 cursor->bio = bio; 844 cursor->bio = bio;
852 cursor->vector_index = index;
853 cursor->vector_offset = 0;
854 845
855 if (!cursor->last_piece) { 846 if (!cursor->last_piece) {
856 BUG_ON(!cursor->resid); 847 BUG_ON(!cursor->resid);
857 BUG_ON(!bio); 848 BUG_ON(!bio);
858 /* A short read is OK, so use <= rather than == */ 849 /* A short read is OK, so use <= rather than == */
859 if (cursor->resid <= bio->bi_io_vec[index].bv_len) 850 if (cursor->resid <= bio_iter_len(bio, cursor->bvec_iter))
860 cursor->last_piece = true; 851 cursor->last_piece = true;
861 } 852 }
862 853