aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-11 18:44:27 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:47 -0500
commit4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch)
tree3aedcab02d2ad723a189d01934d1e94fec7a54e1 /drivers/md
parented9c47bebeeea4a468b07cfd745c690190f8014c (diff)
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Boaz Harrosh <bharrosh@panasas.com> Cc: Benny Halevy <bhalevy@tonian.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <chris.mason@fusionio.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Joern Engel <joern@logfs.org> Cc: Prasad Joshi <prasadjoshi.linux@gmail.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Ben Myers <bpm@sgi.com> Cc: xfs@oss.sgi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: Ian Campbell <Ian.Campbell@citrix.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchand@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Peng Tao <tao.peng@emc.com> Cc: Andy Adamson <andros@netapp.com> Cc: fanchaoting <fanchaoting@cn.fujitsu.com> Cc: Jie Liu <jeff.liu@oracle.com> Cc: Sunil Mushran <sunil.mushran@gmail.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Pankaj Kumar <pankaj.km@samsung.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c26
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c58
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
-rw-r--r--drivers/md/dm-bio-record.h12
-rw-r--r--drivers/md/dm-bufio.c2
-rw-r--r--drivers/md/dm-cache-policy-mq.c4
-rw-r--r--drivers/md/dm-cache-target.c22
-rw-r--r--drivers/md/dm-crypt.c19
-rw-r--r--drivers/md/dm-delay.c7
-rw-r--r--drivers/md/dm-flakey.c7
-rw-r--r--drivers/md/dm-io.c6
-rw-r--r--drivers/md/dm-linear.c3
-rw-r--r--drivers/md/dm-raid1.c16
-rw-r--r--drivers/md/dm-region-hash.c3
-rw-r--r--drivers/md/dm-snap.c18
-rw-r--r--drivers/md/dm-stripe.c13
-rw-r--r--drivers/md/dm-switch.c4
-rw-r--r--drivers/md/dm-thin.c22
-rw-r--r--drivers/md/dm-verity.c8
-rw-r--r--drivers/md/dm.c25
-rw-r--r--drivers/md/faulty.c19
-rw-r--r--drivers/md/linear.c12
-rw-r--r--drivers/md/md.c10
-rw-r--r--drivers/md/multipath.c13
-rw-r--r--drivers/md/raid0.c16
-rw-r--r--drivers/md/raid1.c75
-rw-r--r--drivers/md/raid10.c91
-rw-r--r--drivers/md/raid5.c72
35 files changed, 333 insertions, 300 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..038a6d2aced3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..92b3fd468a03 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
195 dc->disk.c, 195 dc->disk.c,
196 "verify failed at dev %s sector %llu", 196 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 197 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 198 (uint64_t) bio->bi_iter.bi_sector);
199 199
200 kunmap_atomic(p1); 200 kunmap_atomic(p1);
201 } 201 }
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..cc4ba2da5fb6 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error)
21 21
22static void bch_generic_make_request_hack(struct bio *bio) 22static void bch_generic_make_request_hack(struct bio *bio)
23{ 23{
24 if (bio->bi_idx) { 24 if (bio->bi_iter.bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26 26
27 memcpy(clone->bi_io_vec, 27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio), 28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec)); 29 bio_segments(bio) * sizeof(struct bio_vec));
30 30
31 clone->bi_sector = bio->bi_sector; 31 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
32 clone->bi_bdev = bio->bi_bdev; 32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw; 33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio); 34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size; 35 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
36 36
37 clone->bi_private = bio; 37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio; 38 clone->bi_end_io = bch_bi_idx_hack_endio;
@@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
72struct bio *bch_bio_split(struct bio *bio, int sectors, 72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs) 73 gfp_t gfp, struct bio_set *bs)
74{ 74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; 75 unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv; 76 struct bio_vec *bv;
77 struct bio *ret = NULL; 77 struct bio *ret = NULL;
78 78
@@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
90 } 90 }
91 91
92 bio_for_each_segment(bv, bio, idx) { 92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx; 93 vcnt = idx - bio->bi_iter.bi_idx;
94 94
95 if (!nbytes) { 95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs); 96 ret = bio_alloc_bioset(gfp, vcnt, bs);
@@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
119 } 119 }
120out: 120out:
121 ret->bi_bdev = bio->bi_bdev; 121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector; 122 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
123 ret->bi_size = sectors << 9; 123 ret->bi_iter.bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw; 124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt; 125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt; 126 ret->bi_max_vecs = vcnt;
127 127
128 bio->bi_sector += sectors; 128 bio->bi_iter.bi_sector += sectors;
129 bio->bi_size -= sectors << 9; 129 bio->bi_iter.bi_size -= sectors << 9;
130 bio->bi_idx = idx; 130 bio->bi_iter.bi_idx = idx;
131 131
132 if (bio_integrity(bio)) { 132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) { 133 if (bio_integrity_clone(ret, bio, gfp)) {
@@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
162 bio_for_each_segment(bv, bio, i) { 162 bio_for_each_segment(bv, bio, i) {
163 struct bvec_merge_data bvm = { 163 struct bvec_merge_data bvm = {
164 .bi_bdev = bio->bi_bdev, 164 .bi_bdev = bio->bi_bdev,
165 .bi_sector = bio->bi_sector, 165 .bi_sector = bio->bi_iter.bi_sector,
166 .bi_size = ret << 9, 166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw, 167 .bi_rw = bio->bi_rw,
168 }; 168 };
@@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 272{
273 struct bbio *b = container_of(bio, struct bbio, bio); 273 struct bbio *b = container_of(bio, struct bbio, bio);
274 274
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 275 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 277
278 b->submit_time_us = local_clock_us(); 278 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..581f95df8265 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
82 bio_get(bio); 82 bio_get(bio);
83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
84 84
85 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 85 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
87 PAGE_SECTORS); 87 PAGE_SECTORS);
88 bio->bi_private = &io->cl; 88 bio->bi_private = &io->cl;
@@ -98,7 +98,7 @@ static void write_moving(struct closure *cl)
98 if (!op->error) { 98 if (!op->error) {
99 moving_init(io); 99 moving_init(io);
100 100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
102 op->write_prio = 1; 102 op->write_prio = 1;
103 op->bio = &io->bio.bio; 103 op->bio = &io->bio.bio;
104 104
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 78bab4154e97..47a9bbc75124 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl)
261 struct bio *bio = op->bio; 261 struct bio *bio = op->bio;
262 262
263 pr_debug("invalidating %i sectors from %llu", 263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio), (uint64_t) bio->bi_sector); 264 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
265 265
266 while (bio_sectors(bio)) { 266 while (bio_sectors(bio)) {
267 unsigned sectors = min(bio_sectors(bio), 267 unsigned sectors = min(bio_sectors(bio),
@@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl)
270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
271 goto out; 271 goto out;
272 272
273 bio->bi_sector += sectors; 273 bio->bi_iter.bi_sector += sectors;
274 bio->bi_size -= sectors << 9; 274 bio->bi_iter.bi_size -= sectors << 9;
275 275
276 bch_keylist_add(&op->insert_keys, 276 bch_keylist_add(&op->insert_keys,
277 &KEY(op->inode, bio->bi_sector, sectors)); 277 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
278 } 278 }
279 279
280 op->insert_data_done = true; 280 op->insert_data_done = true;
@@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl)
364 k = op->insert_keys.top; 364 k = op->insert_keys.top;
365 bkey_init(k); 365 bkey_init(k);
366 SET_KEY_INODE(k, op->inode); 366 SET_KEY_INODE(k, op->inode);
367 SET_KEY_OFFSET(k, bio->bi_sector); 367 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
368 368
369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
370 op->write_point, op->write_prio, 370 op->write_point, op->write_prio,
@@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
522 (bio->bi_rw & REQ_WRITE))) 522 (bio->bi_rw & REQ_WRITE)))
523 goto skip; 523 goto skip;
524 524
525 if (bio->bi_sector & (c->sb.block_size - 1) || 525 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
526 bio_sectors(bio) & (c->sb.block_size - 1)) { 526 bio_sectors(bio) & (c->sb.block_size - 1)) {
527 pr_debug("skipping unaligned io"); 527 pr_debug("skipping unaligned io");
528 goto skip; 528 goto skip;
@@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
546 546
547 spin_lock(&dc->io_lock); 547 spin_lock(&dc->io_lock);
548 548
549 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 549 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
550 if (i->last == bio->bi_sector && 550 if (i->last == bio->bi_iter.bi_sector &&
551 time_before(jiffies, i->jiffies)) 551 time_before(jiffies, i->jiffies))
552 goto found; 552 goto found;
553 553
@@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
556 add_sequential(task); 556 add_sequential(task);
557 i->sequential = 0; 557 i->sequential = 0;
558found: 558found:
559 if (i->sequential + bio->bi_size > i->sequential) 559 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
560 i->sequential += bio->bi_size; 560 i->sequential += bio->bi_iter.bi_size;
561 561
562 i->last = bio_end_sector(bio); 562 i->last = bio_end_sector(bio);
563 i->jiffies = jiffies + msecs_to_jiffies(5000); 563 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
650 struct bkey *bio_key; 650 struct bkey *bio_key;
651 unsigned ptr; 651 unsigned ptr;
652 652
653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
654 return MAP_CONTINUE; 654 return MAP_CONTINUE;
655 655
656 if (KEY_INODE(k) != s->iop.inode || 656 if (KEY_INODE(k) != s->iop.inode ||
657 KEY_START(k) > bio->bi_sector) { 657 KEY_START(k) > bio->bi_iter.bi_sector) {
658 unsigned bio_sectors = bio_sectors(bio); 658 unsigned bio_sectors = bio_sectors(bio);
659 unsigned sectors = KEY_INODE(k) == s->iop.inode 659 unsigned sectors = KEY_INODE(k) == s->iop.inode
660 ? min_t(uint64_t, INT_MAX, 660 ? min_t(uint64_t, INT_MAX,
661 KEY_START(k) - bio->bi_sector) 661 KEY_START(k) - bio->bi_iter.bi_sector)
662 : INT_MAX; 662 : INT_MAX;
663 663
664 int ret = s->d->cache_miss(b, s, bio, sectors); 664 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
681 s->read_dirty_data = true; 681 s->read_dirty_data = true;
682 682
683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
684 KEY_OFFSET(k) - bio->bi_sector), 684 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
685 GFP_NOIO, s->d->bio_split); 685 GFP_NOIO, s->d->bio_split);
686 686
687 bio_key = &container_of(n, struct bbio, bio)->key; 687 bio_key = &container_of(n, struct bbio, bio)->key;
688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 688 bch_bkey_copy_single_ptr(bio_key, k, ptr);
689 689
690 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 690 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
692 692
693 n->bi_end_io = bch_cache_read_endio; 693 n->bi_end_io = bch_cache_read_endio;
@@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl)
714 struct bio *bio = &s->bio.bio; 714 struct bio *bio = &s->bio.bio;
715 715
716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 716 int ret = bch_btree_map_keys(&s->op, s->iop.c,
717 &KEY(s->iop.inode, bio->bi_sector, 0), 717 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
718 cache_lookup_fn, MAP_END_KEY); 718 cache_lookup_fn, MAP_END_KEY);
719 if (ret == -EAGAIN) 719 if (ret == -EAGAIN)
720 continue_at(cl, cache_lookup, bcache_wq); 720 continue_at(cl, cache_lookup, bcache_wq);
@@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl)
872 872
873 if (s->iop.bio) { 873 if (s->iop.bio) {
874 bio_reset(s->iop.bio); 874 bio_reset(s->iop.bio);
875 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 875 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
877 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 877 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
878 bch_bio_map(s->iop.bio, NULL); 878 bch_bio_map(s->iop.bio, NULL);
879 879
880 bio_copy_data(s->cache_miss, s->iop.bio); 880 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
938 938
939 s->iop.replace_key = KEY(s->iop.inode, 939 s->iop.replace_key = KEY(s->iop.inode,
940 bio->bi_sector + s->insert_bio_sectors, 940 bio->bi_iter.bi_sector + s->insert_bio_sectors,
941 s->insert_bio_sectors); 941 s->insert_bio_sectors);
942 942
943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
957 if (!cache_bio) 957 if (!cache_bio)
958 goto out_submit; 958 goto out_submit;
959 959
960 cache_bio->bi_sector = miss->bi_sector; 960 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
961 cache_bio->bi_bdev = miss->bi_bdev; 961 cache_bio->bi_bdev = miss->bi_bdev;
962 cache_bio->bi_size = s->insert_bio_sectors << 9; 962 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
963 963
964 cache_bio->bi_end_io = request_endio; 964 cache_bio->bi_end_io = request_endio;
965 cache_bio->bi_private = &s->cl; 965 cache_bio->bi_private = &s->cl;
@@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1009{ 1009{
1010 struct closure *cl = &s->cl; 1010 struct closure *cl = &s->cl;
1011 struct bio *bio = &s->bio.bio; 1011 struct bio *bio = &s->bio.bio;
1012 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1012 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1014 1014
1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1104 part_stat_unlock(); 1104 part_stat_unlock();
1105 1105
1106 bio->bi_bdev = dc->bdev; 1106 bio->bi_bdev = dc->bdev;
1107 bio->bi_sector += dc->sb.data_offset; 1107 bio->bi_iter.bi_sector += dc->sb.data_offset;
1108 1108
1109 if (cached_dev_get(dc)) { 1109 if (cached_dev_get(dc)) {
1110 s = search_alloc(bio, d); 1110 s = search_alloc(bio, d);
1111 trace_bcache_request_start(s->d, bio); 1111 trace_bcache_request_start(s->d, bio);
1112 1112
1113 if (!bio->bi_size) { 1113 if (!bio->bi_iter.bi_size) {
1114 /* 1114 /*
1115 * can't call bch_journal_meta from under 1115 * can't call bch_journal_meta from under
1116 * generic_make_request 1116 * generic_make_request
@@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
1197 sectors -= j; 1197 sectors -= j;
1198 } 1198 }
1199 1199
1200 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1200 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1201 1201
1202 if (!bio->bi_size) 1202 if (!bio->bi_iter.bi_size)
1203 return MAP_DONE; 1203 return MAP_DONE;
1204 1204
1205 return MAP_CONTINUE; 1205 return MAP_CONTINUE;
@@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1233 1233
1234 trace_bcache_request_start(s->d, bio); 1234 trace_bcache_request_start(s->d, bio);
1235 1235
1236 if (!bio->bi_size) { 1236 if (!bio->bi_iter.bi_size) {
1237 /* 1237 /*
1238 * can't call bch_journal_meta from under 1238 * can't call bch_journal_meta from under
1239 * generic_make_request 1239 * generic_make_request
@@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1243 bcache_wq); 1243 bcache_wq);
1244 } else if (rw) { 1244 } else if (rw) {
1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1246 &KEY(d->id, bio->bi_sector, 0), 1246 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1247 &KEY(d->id, bio_end_sector(bio), 0)); 1247 &KEY(d->id, bio_end_sector(bio), 0));
1248 1248
1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1d9ee67d14ec..60fb6044b953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..c57621e49dc0 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
218 218
219void bch_bio_map(struct bio *bio, void *base) 219void bch_bio_map(struct bio *bio, void *base)
220{ 220{
221 size_t size = bio->bi_size; 221 size_t size = bio->bi_iter.bi_size;
222 struct bio_vec *bv = bio->bi_io_vec; 222 struct bio_vec *bv = bio->bi_io_vec;
223 223
224 BUG_ON(!bio->bi_size); 224 BUG_ON(!bio->bi_iter.bi_size);
225 BUG_ON(bio->bi_vcnt); 225 BUG_ON(bio->bi_vcnt);
226 226
227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..04657e93f4fd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w)
113 if (!io->dc->writeback_percent) 113 if (!io->dc->writeback_percent)
114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
115 115
116 bio->bi_size = KEY_SIZE(&w->key) << 9; 116 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
118 bio->bi_private = w; 118 bio->bi_private = w;
119 bio->bi_io_vec = bio->bi_inline_vecs; 119 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl)
186 186
187 dirty_init(w); 187 dirty_init(w);
188 io->bio.bi_rw = WRITE; 188 io->bio.bi_rw = WRITE;
189 io->bio.bi_sector = KEY_START(&w->key); 189 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
190 io->bio.bi_bdev = io->dc->bdev; 190 io->bio.bi_bdev = io->dc->bdev;
191 io->bio.bi_end_io = dirty_endio; 191 io->bio.bi_end_io = dirty_endio;
192 192
@@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc)
255 io->dc = dc; 255 io->dc = dc;
256 256
257 dirty_init(w); 257 dirty_init(w);
258 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 258 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
260 &w->key, 0)->bdev; 260 &w->key, 0)->bdev;
261 io->bio.bi_rw = READ; 261 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h
index 3a8cfa2645c7..5ace48ee9f58 100644
--- a/drivers/md/dm-bio-record.h
+++ b/drivers/md/dm-bio-record.h
@@ -40,10 +40,10 @@ static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio)
40{ 40{
41 unsigned i; 41 unsigned i;
42 42
43 bd->bi_sector = bio->bi_sector; 43 bd->bi_sector = bio->bi_iter.bi_sector;
44 bd->bi_bdev = bio->bi_bdev; 44 bd->bi_bdev = bio->bi_bdev;
45 bd->bi_size = bio->bi_size; 45 bd->bi_size = bio->bi_iter.bi_size;
46 bd->bi_idx = bio->bi_idx; 46 bd->bi_idx = bio->bi_iter.bi_idx;
47 bd->bi_flags = bio->bi_flags; 47 bd->bi_flags = bio->bi_flags;
48 48
49 for (i = 0; i < bio->bi_vcnt; i++) { 49 for (i = 0; i < bio->bi_vcnt; i++) {
@@ -56,10 +56,10 @@ static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio)
56{ 56{
57 unsigned i; 57 unsigned i;
58 58
59 bio->bi_sector = bd->bi_sector; 59 bio->bi_iter.bi_sector = bd->bi_sector;
60 bio->bi_bdev = bd->bi_bdev; 60 bio->bi_bdev = bd->bi_bdev;
61 bio->bi_size = bd->bi_size; 61 bio->bi_iter.bi_size = bd->bi_size;
62 bio->bi_idx = bd->bi_idx; 62 bio->bi_iter.bi_idx = bd->bi_idx;
63 bio->bi_flags = bd->bi_flags; 63 bio->bi_flags = bd->bi_flags;
64 64
65 for (i = 0; i < bio->bi_vcnt; i++) { 65 for (i = 0; i < bio->bi_vcnt; i++) {
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c
index 173cbb20d104..4113b6044b80 100644
--- a/drivers/md/dm-bufio.c
+++ b/drivers/md/dm-bufio.c
@@ -538,7 +538,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block,
538 bio_init(&b->bio); 538 bio_init(&b->bio);
539 b->bio.bi_io_vec = b->bio_vec; 539 b->bio.bi_io_vec = b->bio_vec;
540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; 540 b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS;
541 b->bio.bi_sector = block << b->c->sectors_per_block_bits; 541 b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits;
542 b->bio.bi_bdev = b->c->bdev; 542 b->bio.bi_bdev = b->c->bdev;
543 b->bio.bi_end_io = end_io; 543 b->bio.bi_end_io = end_io;
544 544
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c
index 416b7b752a6e..bfba97dcde2d 100644
--- a/drivers/md/dm-cache-policy-mq.c
+++ b/drivers/md/dm-cache-policy-mq.c
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t)
72 72
73static void iot_update_stats(struct io_tracker *t, struct bio *bio) 73static void iot_update_stats(struct io_tracker *t, struct bio *bio)
74{ 74{
75 if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) 75 if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1)
76 t->nr_seq_samples++; 76 t->nr_seq_samples++;
77 else { 77 else {
78 /* 78 /*
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio)
87 t->nr_rand_samples++; 87 t->nr_rand_samples++;
88 } 88 }
89 89
90 t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); 90 t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1);
91} 91}
92 92
93static void iot_check_for_pattern_switch(struct io_tracker *t) 93static void iot_check_for_pattern_switch(struct io_tracker *t)
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c
index 9efcf1059b99..86f9c83eb30c 100644
--- a/drivers/md/dm-cache-target.c
+++ b/drivers/md/dm-cache-target.c
@@ -664,15 +664,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio)
664static void remap_to_cache(struct cache *cache, struct bio *bio, 664static void remap_to_cache(struct cache *cache, struct bio *bio,
665 dm_cblock_t cblock) 665 dm_cblock_t cblock)
666{ 666{
667 sector_t bi_sector = bio->bi_sector; 667 sector_t bi_sector = bio->bi_iter.bi_sector;
668 668
669 bio->bi_bdev = cache->cache_dev->bdev; 669 bio->bi_bdev = cache->cache_dev->bdev;
670 if (!block_size_is_power_of_two(cache)) 670 if (!block_size_is_power_of_two(cache))
671 bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + 671 bio->bi_iter.bi_sector =
672 sector_div(bi_sector, cache->sectors_per_block); 672 (from_cblock(cblock) * cache->sectors_per_block) +
673 sector_div(bi_sector, cache->sectors_per_block);
673 else 674 else
674 bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | 675 bio->bi_iter.bi_sector =
675 (bi_sector & (cache->sectors_per_block - 1)); 676 (from_cblock(cblock) << cache->sectors_per_block_shift) |
677 (bi_sector & (cache->sectors_per_block - 1));
676} 678}
677 679
678static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) 680static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio)
@@ -712,7 +714,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio,
712 714
713static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) 715static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio)
714{ 716{
715 sector_t block_nr = bio->bi_sector; 717 sector_t block_nr = bio->bi_iter.bi_sector;
716 718
717 if (!block_size_is_power_of_two(cache)) 719 if (!block_size_is_power_of_two(cache))
718 (void) sector_div(block_nr, cache->sectors_per_block); 720 (void) sector_div(block_nr, cache->sectors_per_block);
@@ -1027,7 +1029,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio)
1027static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) 1029static bool bio_writes_complete_block(struct cache *cache, struct bio *bio)
1028{ 1030{
1029 return (bio_data_dir(bio) == WRITE) && 1031 return (bio_data_dir(bio) == WRITE) &&
1030 (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); 1032 (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT));
1031} 1033}
1032 1034
1033static void avoid_copy(struct dm_cache_migration *mg) 1035static void avoid_copy(struct dm_cache_migration *mg)
@@ -1252,7 +1254,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1252 size_t pb_data_size = get_per_bio_data_size(cache); 1254 size_t pb_data_size = get_per_bio_data_size(cache);
1253 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); 1255 struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size);
1254 1256
1255 BUG_ON(bio->bi_size); 1257 BUG_ON(bio->bi_iter.bi_size);
1256 if (!pb->req_nr) 1258 if (!pb->req_nr)
1257 remap_to_origin(cache, bio); 1259 remap_to_origin(cache, bio);
1258 else 1260 else
@@ -1275,9 +1277,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio)
1275 */ 1277 */
1276static void process_discard_bio(struct cache *cache, struct bio *bio) 1278static void process_discard_bio(struct cache *cache, struct bio *bio)
1277{ 1279{
1278 dm_block_t start_block = dm_sector_div_up(bio->bi_sector, 1280 dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector,
1279 cache->discard_block_size); 1281 cache->discard_block_size);
1280 dm_block_t end_block = bio->bi_sector + bio_sectors(bio); 1282 dm_block_t end_block = bio_end_sector(bio);
1281 dm_block_t b; 1283 dm_block_t b;
1282 1284
1283 end_block = block_div(end_block, cache->discard_block_size); 1285 end_block = block_div(end_block, cache->discard_block_size);
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c
index 81b0fa660452..1e2e5465d28e 100644
--- a/drivers/md/dm-crypt.c
+++ b/drivers/md/dm-crypt.c
@@ -828,8 +828,8 @@ static void crypt_convert_init(struct crypt_config *cc,
828 ctx->bio_out = bio_out; 828 ctx->bio_out = bio_out;
829 ctx->offset_in = 0; 829 ctx->offset_in = 0;
830 ctx->offset_out = 0; 830 ctx->offset_out = 0;
831 ctx->idx_in = bio_in ? bio_in->bi_idx : 0; 831 ctx->idx_in = bio_in ? bio_in->bi_iter.bi_idx : 0;
832 ctx->idx_out = bio_out ? bio_out->bi_idx : 0; 832 ctx->idx_out = bio_out ? bio_out->bi_iter.bi_idx : 0;
833 ctx->cc_sector = sector + cc->iv_offset; 833 ctx->cc_sector = sector + cc->iv_offset;
834 init_completion(&ctx->restart); 834 init_completion(&ctx->restart);
835} 835}
@@ -1021,7 +1021,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size,
1021 size -= len; 1021 size -= len;
1022 } 1022 }
1023 1023
1024 if (!clone->bi_size) { 1024 if (!clone->bi_iter.bi_size) {
1025 bio_put(clone); 1025 bio_put(clone);
1026 return NULL; 1026 return NULL;
1027 } 1027 }
@@ -1161,7 +1161,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp)
1161 crypt_inc_pending(io); 1161 crypt_inc_pending(io);
1162 1162
1163 clone_init(io, clone); 1163 clone_init(io, clone);
1164 clone->bi_sector = cc->start + io->sector; 1164 clone->bi_iter.bi_sector = cc->start + io->sector;
1165 1165
1166 generic_make_request(clone); 1166 generic_make_request(clone);
1167 return 0; 1167 return 0;
@@ -1209,7 +1209,7 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async)
1209 /* crypt_convert should have filled the clone bio */ 1209 /* crypt_convert should have filled the clone bio */
1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt); 1210 BUG_ON(io->ctx.idx_out < clone->bi_vcnt);
1211 1211
1212 clone->bi_sector = cc->start + io->sector; 1212 clone->bi_iter.bi_sector = cc->start + io->sector;
1213 1213
1214 if (async) 1214 if (async)
1215 kcryptd_queue_io(io); 1215 kcryptd_queue_io(io);
@@ -1224,7 +1224,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1224 struct dm_crypt_io *new_io; 1224 struct dm_crypt_io *new_io;
1225 int crypt_finished; 1225 int crypt_finished;
1226 unsigned out_of_pages = 0; 1226 unsigned out_of_pages = 0;
1227 unsigned remaining = io->base_bio->bi_size; 1227 unsigned remaining = io->base_bio->bi_iter.bi_size;
1228 sector_t sector = io->sector; 1228 sector_t sector = io->sector;
1229 int r; 1229 int r;
1230 1230
@@ -1248,7 +1248,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io)
1248 io->ctx.bio_out = clone; 1248 io->ctx.bio_out = clone;
1249 io->ctx.idx_out = 0; 1249 io->ctx.idx_out = 0;
1250 1250
1251 remaining -= clone->bi_size; 1251 remaining -= clone->bi_iter.bi_size;
1252 sector += bio_sectors(clone); 1252 sector += bio_sectors(clone);
1253 1253
1254 crypt_inc_pending(io); 1254 crypt_inc_pending(io);
@@ -1869,11 +1869,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio)
1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { 1869 if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) {
1870 bio->bi_bdev = cc->dev->bdev; 1870 bio->bi_bdev = cc->dev->bdev;
1871 if (bio_sectors(bio)) 1871 if (bio_sectors(bio))
1872 bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); 1872 bio->bi_iter.bi_sector = cc->start +
1873 dm_target_offset(ti, bio->bi_iter.bi_sector);
1873 return DM_MAPIO_REMAPPED; 1874 return DM_MAPIO_REMAPPED;
1874 } 1875 }
1875 1876
1876 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); 1877 io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector));
1877 1878
1878 if (bio_data_dir(io->base_bio) == READ) { 1879 if (bio_data_dir(io->base_bio) == READ) {
1879 if (kcryptd_io_read(io, GFP_NOWAIT)) 1880 if (kcryptd_io_read(io, GFP_NOWAIT))
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c
index 496d5f3646a5..84c860191a2e 100644
--- a/drivers/md/dm-delay.c
+++ b/drivers/md/dm-delay.c
@@ -281,14 +281,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio)
281 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { 281 if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) {
282 bio->bi_bdev = dc->dev_write->bdev; 282 bio->bi_bdev = dc->dev_write->bdev;
283 if (bio_sectors(bio)) 283 if (bio_sectors(bio))
284 bio->bi_sector = dc->start_write + 284 bio->bi_iter.bi_sector = dc->start_write +
285 dm_target_offset(ti, bio->bi_sector); 285 dm_target_offset(ti, bio->bi_iter.bi_sector);
286 286
287 return delay_bio(dc, dc->write_delay, bio); 287 return delay_bio(dc, dc->write_delay, bio);
288 } 288 }
289 289
290 bio->bi_bdev = dc->dev_read->bdev; 290 bio->bi_bdev = dc->dev_read->bdev;
291 bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); 291 bio->bi_iter.bi_sector = dc->start_read +
292 dm_target_offset(ti, bio->bi_iter.bi_sector);
292 293
293 return delay_bio(dc, dc->read_delay, bio); 294 return delay_bio(dc, dc->read_delay, bio);
294} 295}
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c
index c80a0ec5f126..b257e46876d3 100644
--- a/drivers/md/dm-flakey.c
+++ b/drivers/md/dm-flakey.c
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio)
248 248
249 bio->bi_bdev = fc->dev->bdev; 249 bio->bi_bdev = fc->dev->bdev;
250 if (bio_sectors(bio)) 250 if (bio_sectors(bio))
251 bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); 251 bio->bi_iter.bi_sector =
252 flakey_map_sector(ti, bio->bi_iter.bi_sector);
252} 253}
253 254
254static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) 255static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc)
265 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " 266 DMDEBUG("Corrupting data bio=%p by writing %u to byte %u "
266 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", 267 "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n",
267 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, 268 bio, fc->corrupt_bio_value, fc->corrupt_bio_byte,
268 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', 269 (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw,
269 bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); 270 (unsigned long long)bio->bi_iter.bi_sector, bio_bytes);
270 } 271 }
271} 272}
272 273
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c
index 2a20986a2fec..01558b093307 100644
--- a/drivers/md/dm-io.c
+++ b/drivers/md/dm-io.c
@@ -304,14 +304,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); 304 dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT)));
305 305
306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); 306 bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios);
307 bio->bi_sector = where->sector + (where->count - remaining); 307 bio->bi_iter.bi_sector = where->sector + (where->count - remaining);
308 bio->bi_bdev = where->bdev; 308 bio->bi_bdev = where->bdev;
309 bio->bi_end_io = endio; 309 bio->bi_end_io = endio;
310 store_io_and_region_in_bio(bio, io, region); 310 store_io_and_region_in_bio(bio, io, region);
311 311
312 if (rw & REQ_DISCARD) { 312 if (rw & REQ_DISCARD) {
313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); 313 num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining);
314 bio->bi_size = num_sectors << SECTOR_SHIFT; 314 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
315 remaining -= num_sectors; 315 remaining -= num_sectors;
316 } else if (rw & REQ_WRITE_SAME) { 316 } else if (rw & REQ_WRITE_SAME) {
317 /* 317 /*
@@ -320,7 +320,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where,
320 dp->get_page(dp, &page, &len, &offset); 320 dp->get_page(dp, &page, &len, &offset);
321 bio_add_page(bio, page, logical_block_size, offset); 321 bio_add_page(bio, page, logical_block_size, offset);
322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); 322 num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining);
323 bio->bi_size = num_sectors << SECTOR_SHIFT; 323 bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT;
324 324
325 offset = 0; 325 offset = 0;
326 remaining -= num_sectors; 326 remaining -= num_sectors;
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c
index 4f99d267340c..53e848c10939 100644
--- a/drivers/md/dm-linear.c
+++ b/drivers/md/dm-linear.c
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio)
85 85
86 bio->bi_bdev = lc->dev->bdev; 86 bio->bi_bdev = lc->dev->bdev;
87 if (bio_sectors(bio)) 87 if (bio_sectors(bio))
88 bio->bi_sector = linear_map_sector(ti, bio->bi_sector); 88 bio->bi_iter.bi_sector =
89 linear_map_sector(ti, bio->bi_iter.bi_sector);
89} 90}
90 91
91static int linear_map(struct dm_target *ti, struct bio *bio) 92static int linear_map(struct dm_target *ti, struct bio *bio)
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9584443c5614..9f6d8e6baa7d 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
432 region_t region = dm_rh_bio_to_region(ms->rh, bio); 432 region_t region = dm_rh_bio_to_region(ms->rh, bio);
433 433
434 if (log->type->in_sync(log, region, 0)) 434 if (log->type->in_sync(log, region, 0))
435 return choose_mirror(ms, bio->bi_sector) ? 1 : 0; 435 return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0;
436 436
437 return 0; 437 return 0;
438} 438}
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio)
442 */ 442 */
443static sector_t map_sector(struct mirror *m, struct bio *bio) 443static sector_t map_sector(struct mirror *m, struct bio *bio)
444{ 444{
445 if (unlikely(!bio->bi_size)) 445 if (unlikely(!bio->bi_iter.bi_size))
446 return 0; 446 return 0;
447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); 447 return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector);
448} 448}
449 449
450static void map_bio(struct mirror *m, struct bio *bio) 450static void map_bio(struct mirror *m, struct bio *bio)
451{ 451{
452 bio->bi_bdev = m->dev->bdev; 452 bio->bi_bdev = m->dev->bdev;
453 bio->bi_sector = map_sector(m, bio); 453 bio->bi_iter.bi_sector = map_sector(m, bio);
454} 454}
455 455
456static void map_region(struct dm_io_region *io, struct mirror *m, 456static void map_region(struct dm_io_region *io, struct mirror *m,
@@ -527,7 +527,7 @@ static void read_async_bio(struct mirror *m, struct bio *bio)
527 struct dm_io_request io_req = { 527 struct dm_io_request io_req = {
528 .bi_rw = READ, 528 .bi_rw = READ,
529 .mem.type = DM_IO_BVEC, 529 .mem.type = DM_IO_BVEC,
530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 530 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx,
531 .notify.fn = read_callback, 531 .notify.fn = read_callback,
532 .notify.context = bio, 532 .notify.context = bio,
533 .client = m->ms->io_client, 533 .client = m->ms->io_client,
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
559 * We can only read balance if the region is in sync. 559 * We can only read balance if the region is in sync.
560 */ 560 */
561 if (likely(region_in_sync(ms, region, 1))) 561 if (likely(region_in_sync(ms, region, 1)))
562 m = choose_mirror(ms, bio->bi_sector); 562 m = choose_mirror(ms, bio->bi_iter.bi_sector);
563 else if (m && atomic_read(&m->error_count)) 563 else if (m && atomic_read(&m->error_count))
564 m = NULL; 564 m = NULL;
565 565
@@ -630,7 +630,7 @@ static void do_write(struct mirror_set *ms, struct bio *bio)
630 struct dm_io_request io_req = { 630 struct dm_io_request io_req = {
631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), 631 .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA),
632 .mem.type = DM_IO_BVEC, 632 .mem.type = DM_IO_BVEC,
633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, 633 .mem.ptr.bvec = bio->bi_io_vec + bio->bi_iter.bi_idx,
634 .notify.fn = write_callback, 634 .notify.fn = write_callback,
635 .notify.context = bio, 635 .notify.context = bio,
636 .client = ms->io_client, 636 .client = ms->io_client,
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio)
1181 * The region is in-sync and we can perform reads directly. 1181 * The region is in-sync and we can perform reads directly.
1182 * Store enough information so we can retry if it fails. 1182 * Store enough information so we can retry if it fails.
1183 */ 1183 */
1184 m = choose_mirror(ms, bio->bi_sector); 1184 m = choose_mirror(ms, bio->bi_iter.bi_sector);
1185 if (unlikely(!m)) 1185 if (unlikely(!m))
1186 return -EIO; 1186 return -EIO;
1187 1187
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 69732e03eb34..b929fd5f4984 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector);
126 126
127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) 127region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio)
128{ 128{
129 return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); 129 return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector -
130 rh->target_begin);
130} 131}
131EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); 132EXPORT_SYMBOL_GPL(dm_rh_bio_to_region);
132 133
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c
index aec57d76db5d..3ded8c729dfb 100644
--- a/drivers/md/dm-snap.c
+++ b/drivers/md/dm-snap.c
@@ -1562,11 +1562,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e,
1562 struct bio *bio, chunk_t chunk) 1562 struct bio *bio, chunk_t chunk)
1563{ 1563{
1564 bio->bi_bdev = s->cow->bdev; 1564 bio->bi_bdev = s->cow->bdev;
1565 bio->bi_sector = chunk_to_sector(s->store, 1565 bio->bi_iter.bi_sector =
1566 dm_chunk_number(e->new_chunk) + 1566 chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) +
1567 (chunk - e->old_chunk)) + 1567 (chunk - e->old_chunk)) +
1568 (bio->bi_sector & 1568 (bio->bi_iter.bi_sector & s->store->chunk_mask);
1569 s->store->chunk_mask);
1570} 1569}
1571 1570
1572static int snapshot_map(struct dm_target *ti, struct bio *bio) 1571static int snapshot_map(struct dm_target *ti, struct bio *bio)
@@ -1584,7 +1583,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1584 return DM_MAPIO_REMAPPED; 1583 return DM_MAPIO_REMAPPED;
1585 } 1584 }
1586 1585
1587 chunk = sector_to_chunk(s->store, bio->bi_sector); 1586 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1588 1587
1589 /* Full snapshots are not usable */ 1588 /* Full snapshots are not usable */
1590 /* To get here the table must be live so s->active is always set. */ 1589 /* To get here the table must be live so s->active is always set. */
@@ -1645,7 +1644,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio)
1645 r = DM_MAPIO_SUBMITTED; 1644 r = DM_MAPIO_SUBMITTED;
1646 1645
1647 if (!pe->started && 1646 if (!pe->started &&
1648 bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { 1647 bio->bi_iter.bi_size ==
1648 (s->store->chunk_size << SECTOR_SHIFT)) {
1649 pe->started = 1; 1649 pe->started = 1;
1650 up_write(&s->lock); 1650 up_write(&s->lock);
1651 start_full_bio(pe, bio); 1651 start_full_bio(pe, bio);
@@ -1701,7 +1701,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio)
1701 return DM_MAPIO_REMAPPED; 1701 return DM_MAPIO_REMAPPED;
1702 } 1702 }
1703 1703
1704 chunk = sector_to_chunk(s->store, bio->bi_sector); 1704 chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector);
1705 1705
1706 down_write(&s->lock); 1706 down_write(&s->lock);
1707 1707
@@ -2038,7 +2038,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio)
2038 down_read(&_origins_lock); 2038 down_read(&_origins_lock);
2039 o = __lookup_origin(origin->bdev); 2039 o = __lookup_origin(origin->bdev);
2040 if (o) 2040 if (o)
2041 r = __origin_write(&o->snapshots, bio->bi_sector, bio); 2041 r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio);
2042 up_read(&_origins_lock); 2042 up_read(&_origins_lock);
2043 2043
2044 return r; 2044 return r;
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
index 73c1712dad96..d1600d2aa2e2 100644
--- a/drivers/md/dm-stripe.c
+++ b/drivers/md/dm-stripe.c
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio,
259{ 259{
260 sector_t begin, end; 260 sector_t begin, end;
261 261
262 stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); 262 stripe_map_range_sector(sc, bio->bi_iter.bi_sector,
263 target_stripe, &begin);
263 stripe_map_range_sector(sc, bio_end_sector(bio), 264 stripe_map_range_sector(sc, bio_end_sector(bio),
264 target_stripe, &end); 265 target_stripe, &end);
265 if (begin < end) { 266 if (begin < end) {
266 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; 267 bio->bi_bdev = sc->stripe[target_stripe].dev->bdev;
267 bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; 268 bio->bi_iter.bi_sector = begin +
268 bio->bi_size = to_bytes(end - begin); 269 sc->stripe[target_stripe].physical_start;
270 bio->bi_iter.bi_size = to_bytes(end - begin);
269 return DM_MAPIO_REMAPPED; 271 return DM_MAPIO_REMAPPED;
270 } else { 272 } else {
271 /* The range doesn't map to the target stripe */ 273 /* The range doesn't map to the target stripe */
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio)
293 return stripe_map_range(sc, bio, target_bio_nr); 295 return stripe_map_range(sc, bio, target_bio_nr);
294 } 296 }
295 297
296 stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); 298 stripe_map_sector(sc, bio->bi_iter.bi_sector,
299 &stripe, &bio->bi_iter.bi_sector);
297 300
298 bio->bi_sector += sc->stripe[stripe].physical_start; 301 bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start;
299 bio->bi_bdev = sc->stripe[stripe].dev->bdev; 302 bio->bi_bdev = sc->stripe[stripe].dev->bdev;
300 303
301 return DM_MAPIO_REMAPPED; 304 return DM_MAPIO_REMAPPED;
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c
index ff9ac4be4721..09a688b3d48c 100644
--- a/drivers/md/dm-switch.c
+++ b/drivers/md/dm-switch.c
@@ -311,11 +311,11 @@ error:
311static int switch_map(struct dm_target *ti, struct bio *bio) 311static int switch_map(struct dm_target *ti, struct bio *bio)
312{ 312{
313 struct switch_ctx *sctx = ti->private; 313 struct switch_ctx *sctx = ti->private;
314 sector_t offset = dm_target_offset(ti, bio->bi_sector); 314 sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector);
315 unsigned path_nr = switch_get_path_nr(sctx, offset); 315 unsigned path_nr = switch_get_path_nr(sctx, offset);
316 316
317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; 317 bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev;
318 bio->bi_sector = sctx->path_list[path_nr].start + offset; 318 bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset;
319 319
320 return DM_MAPIO_REMAPPED; 320 return DM_MAPIO_REMAPPED;
321} 321}
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
index 2c0cf511ec23..a65402480c8c 100644
--- a/drivers/md/dm-thin.c
+++ b/drivers/md/dm-thin.c
@@ -413,7 +413,7 @@ static bool block_size_is_power_of_two(struct pool *pool)
413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) 413static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
414{ 414{
415 struct pool *pool = tc->pool; 415 struct pool *pool = tc->pool;
416 sector_t block_nr = bio->bi_sector; 416 sector_t block_nr = bio->bi_iter.bi_sector;
417 417
418 if (block_size_is_power_of_two(pool)) 418 if (block_size_is_power_of_two(pool))
419 block_nr >>= pool->sectors_per_block_shift; 419 block_nr >>= pool->sectors_per_block_shift;
@@ -426,14 +426,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio)
426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) 426static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block)
427{ 427{
428 struct pool *pool = tc->pool; 428 struct pool *pool = tc->pool;
429 sector_t bi_sector = bio->bi_sector; 429 sector_t bi_sector = bio->bi_iter.bi_sector;
430 430
431 bio->bi_bdev = tc->pool_dev->bdev; 431 bio->bi_bdev = tc->pool_dev->bdev;
432 if (block_size_is_power_of_two(pool)) 432 if (block_size_is_power_of_two(pool))
433 bio->bi_sector = (block << pool->sectors_per_block_shift) | 433 bio->bi_iter.bi_sector =
434 (bi_sector & (pool->sectors_per_block - 1)); 434 (block << pool->sectors_per_block_shift) |
435 (bi_sector & (pool->sectors_per_block - 1));
435 else 436 else
436 bio->bi_sector = (block * pool->sectors_per_block) + 437 bio->bi_iter.bi_sector = (block * pool->sectors_per_block) +
437 sector_div(bi_sector, pool->sectors_per_block); 438 sector_div(bi_sector, pool->sectors_per_block);
438} 439}
439 440
@@ -721,7 +722,8 @@ static void process_prepared(struct pool *pool, struct list_head *head,
721 */ 722 */
722static int io_overlaps_block(struct pool *pool, struct bio *bio) 723static int io_overlaps_block(struct pool *pool, struct bio *bio)
723{ 724{
724 return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); 725 return bio->bi_iter.bi_size ==
726 (pool->sectors_per_block << SECTOR_SHIFT);
725} 727}
726 728
727static int io_overwrites_block(struct pool *pool, struct bio *bio) 729static int io_overwrites_block(struct pool *pool, struct bio *bio)
@@ -1130,7 +1132,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio,
1130 if (bio_detain(pool, &key, bio, &cell)) 1132 if (bio_detain(pool, &key, bio, &cell))
1131 return; 1133 return;
1132 1134
1133 if (bio_data_dir(bio) == WRITE && bio->bi_size) 1135 if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size)
1134 break_sharing(tc, bio, block, &key, lookup_result, cell); 1136 break_sharing(tc, bio, block, &key, lookup_result, cell);
1135 else { 1137 else {
1136 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); 1138 struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook));
@@ -1153,7 +1155,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block
1153 /* 1155 /*
1154 * Remap empty bios (flushes) immediately, without provisioning. 1156 * Remap empty bios (flushes) immediately, without provisioning.
1155 */ 1157 */
1156 if (!bio->bi_size) { 1158 if (!bio->bi_iter.bi_size) {
1157 inc_all_io_entry(pool, bio); 1159 inc_all_io_entry(pool, bio);
1158 cell_defer_no_holder(tc, cell); 1160 cell_defer_no_holder(tc, cell);
1159 1161
@@ -1253,7 +1255,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio)
1253 r = dm_thin_find_block(tc->td, block, 1, &lookup_result); 1255 r = dm_thin_find_block(tc->td, block, 1, &lookup_result);
1254 switch (r) { 1256 switch (r) {
1255 case 0: 1257 case 0:
1256 if (lookup_result.shared && (rw == WRITE) && bio->bi_size) 1258 if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size)
1257 bio_io_error(bio); 1259 bio_io_error(bio);
1258 else { 1260 else {
1259 inc_all_io_entry(tc->pool, bio); 1261 inc_all_io_entry(tc->pool, bio);
@@ -2867,7 +2869,7 @@ out_unlock:
2867 2869
2868static int thin_map(struct dm_target *ti, struct bio *bio) 2870static int thin_map(struct dm_target *ti, struct bio *bio)
2869{ 2871{
2870 bio->bi_sector = dm_target_offset(ti, bio->bi_sector); 2872 bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector);
2871 2873
2872 return thin_bio_map(ti, bio); 2874 return thin_bio_map(ti, bio);
2873} 2875}
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c
index 4b7941db3aff..132b3154d466 100644
--- a/drivers/md/dm-verity.c
+++ b/drivers/md/dm-verity.c
@@ -493,9 +493,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
493 struct dm_verity_io *io; 493 struct dm_verity_io *io;
494 494
495 bio->bi_bdev = v->data_dev->bdev; 495 bio->bi_bdev = v->data_dev->bdev;
496 bio->bi_sector = verity_map_sector(v, bio->bi_sector); 496 bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector);
497 497
498 if (((unsigned)bio->bi_sector | bio_sectors(bio)) & 498 if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) &
499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { 499 ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) {
500 DMERR_LIMIT("unaligned io"); 500 DMERR_LIMIT("unaligned io");
501 return -EIO; 501 return -EIO;
@@ -514,8 +514,8 @@ static int verity_map(struct dm_target *ti, struct bio *bio)
514 io->v = v; 514 io->v = v;
515 io->orig_bi_end_io = bio->bi_end_io; 515 io->orig_bi_end_io = bio->bi_end_io;
516 io->orig_bi_private = bio->bi_private; 516 io->orig_bi_private = bio->bi_private;
517 io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); 517 io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT);
518 io->n_blocks = bio->bi_size >> v->data_dev_block_bits; 518 io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits;
519 519
520 bio->bi_end_io = verity_end_io; 520 bio->bi_end_io = verity_end_io;
521 bio->bi_private = io; 521 bio->bi_private = io;
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 0704c523a76b..ccd064ea4fe6 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io)
575 atomic_inc_return(&md->pending[rw])); 575 atomic_inc_return(&md->pending[rw]));
576 576
577 if (unlikely(dm_stats_used(&md->stats))) 577 if (unlikely(dm_stats_used(&md->stats)))
578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 578 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
579 bio_sectors(bio), false, 0, &io->stats_aux); 579 bio_sectors(bio), false, 0, &io->stats_aux);
580} 580}
581 581
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io)
593 part_stat_unlock(); 593 part_stat_unlock();
594 594
595 if (unlikely(dm_stats_used(&md->stats))) 595 if (unlikely(dm_stats_used(&md->stats)))
596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, 596 dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector,
597 bio_sectors(bio), true, duration, &io->stats_aux); 597 bio_sectors(bio), true, duration, &io->stats_aux);
598 598
599 /* 599 /*
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error)
742 if (io_error == DM_ENDIO_REQUEUE) 742 if (io_error == DM_ENDIO_REQUEUE)
743 return; 743 return;
744 744
745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { 745 if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) {
746 /* 746 /*
747 * Preflush done for flush with data, reissue 747 * Preflush done for flush with data, reissue
748 * without REQ_FLUSH. 748 * without REQ_FLUSH.
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error)
797 struct dm_rq_clone_bio_info *info = clone->bi_private; 797 struct dm_rq_clone_bio_info *info = clone->bi_private;
798 struct dm_rq_target_io *tio = info->tio; 798 struct dm_rq_target_io *tio = info->tio;
799 struct bio *bio = info->orig; 799 struct bio *bio = info->orig;
800 unsigned int nr_bytes = info->orig->bi_size; 800 unsigned int nr_bytes = info->orig->bi_iter.bi_size;
801 801
802 bio_put(clone); 802 bio_put(clone);
803 803
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio)
1128 * this io. 1128 * this io.
1129 */ 1129 */
1130 atomic_inc(&tio->io->io_count); 1130 atomic_inc(&tio->io->io_count);
1131 sector = clone->bi_sector; 1131 sector = clone->bi_iter.bi_sector;
1132 r = ti->type->map(ti, clone); 1132 r = ti->type->map(ti, clone);
1133 if (r == DM_MAPIO_REMAPPED) { 1133 if (r == DM_MAPIO_REMAPPED) {
1134 /* the bio has been remapped so dispatch it */ 1134 /* the bio has been remapped so dispatch it */
@@ -1160,13 +1160,13 @@ struct clone_info {
1160 1160
1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) 1161static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len)
1162{ 1162{
1163 bio->bi_sector = sector; 1163 bio->bi_iter.bi_sector = sector;
1164 bio->bi_size = to_bytes(len); 1164 bio->bi_iter.bi_size = to_bytes(len);
1165} 1165}
1166 1166
1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) 1167static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count)
1168{ 1168{
1169 bio->bi_idx = idx; 1169 bio->bi_iter.bi_idx = idx;
1170 bio->bi_vcnt = idx + bv_count; 1170 bio->bi_vcnt = idx + bv_count;
1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID); 1171 bio->bi_flags &= ~(1 << BIO_SEG_VALID);
1172} 1172}
@@ -1202,7 +1202,7 @@ static void clone_split_bio(struct dm_target_io *tio, struct bio *bio,
1202 clone->bi_rw = bio->bi_rw; 1202 clone->bi_rw = bio->bi_rw;
1203 clone->bi_vcnt = 1; 1203 clone->bi_vcnt = 1;
1204 clone->bi_io_vec->bv_offset = offset; 1204 clone->bi_io_vec->bv_offset = offset;
1205 clone->bi_io_vec->bv_len = clone->bi_size; 1205 clone->bi_io_vec->bv_len = clone->bi_iter.bi_size;
1206 clone->bi_flags |= 1 << BIO_CLONED; 1206 clone->bi_flags |= 1 << BIO_CLONED;
1207 1207
1208 clone_bio_integrity(bio, clone, idx, len, offset, 1); 1208 clone_bio_integrity(bio, clone, idx, len, offset, 1);
@@ -1222,7 +1222,8 @@ static void clone_bio(struct dm_target_io *tio, struct bio *bio,
1222 bio_setup_sector(clone, sector, len); 1222 bio_setup_sector(clone, sector, len);
1223 bio_setup_bv(clone, idx, bv_count); 1223 bio_setup_bv(clone, idx, bv_count);
1224 1224
1225 if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) 1225 if (idx != bio->bi_iter.bi_idx ||
1226 clone->bi_iter.bi_size < bio->bi_iter.bi_size)
1226 trim = 1; 1227 trim = 1;
1227 clone_bio_integrity(bio, clone, idx, len, 0, trim); 1228 clone_bio_integrity(bio, clone, idx, len, 0, trim);
1228} 1229}
@@ -1510,8 +1511,8 @@ static void __split_and_process_bio(struct mapped_device *md,
1510 ci.io->bio = bio; 1511 ci.io->bio = bio;
1511 ci.io->md = md; 1512 ci.io->md = md;
1512 spin_lock_init(&ci.io->endio_lock); 1513 spin_lock_init(&ci.io->endio_lock);
1513 ci.sector = bio->bi_sector; 1514 ci.sector = bio->bi_iter.bi_sector;
1514 ci.idx = bio->bi_idx; 1515 ci.idx = bio->bi_iter.bi_idx;
1515 1516
1516 start_io_acct(ci.io); 1517 start_io_acct(ci.io);
1517 1518
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c
index 3193aefe982b..e8b4574956c7 100644
--- a/drivers/md/faulty.c
+++ b/drivers/md/faulty.c
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error)
74{ 74{
75 struct bio *b = bio->bi_private; 75 struct bio *b = bio->bi_private;
76 76
77 b->bi_size = bio->bi_size; 77 b->bi_iter.bi_size = bio->bi_iter.bi_size;
78 b->bi_sector = bio->bi_sector; 78 b->bi_iter.bi_sector = bio->bi_iter.bi_sector;
79 79
80 bio_put(bio); 80 bio_put(bio);
81 81
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio)
185 return; 185 return;
186 } 186 }
187 187
188 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) 188 if (check_sector(conf, bio->bi_iter.bi_sector,
189 bio_end_sector(bio), WRITE))
189 failit = 1; 190 failit = 1;
190 if (check_mode(conf, WritePersistent)) { 191 if (check_mode(conf, WritePersistent)) {
191 add_sector(conf, bio->bi_sector, WritePersistent); 192 add_sector(conf, bio->bi_iter.bi_sector,
193 WritePersistent);
192 failit = 1; 194 failit = 1;
193 } 195 }
194 if (check_mode(conf, WriteTransient)) 196 if (check_mode(conf, WriteTransient))
195 failit = 1; 197 failit = 1;
196 } else { 198 } else {
197 /* read request */ 199 /* read request */
198 if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) 200 if (check_sector(conf, bio->bi_iter.bi_sector,
201 bio_end_sector(bio), READ))
199 failit = 1; 202 failit = 1;
200 if (check_mode(conf, ReadTransient)) 203 if (check_mode(conf, ReadTransient))
201 failit = 1; 204 failit = 1;
202 if (check_mode(conf, ReadPersistent)) { 205 if (check_mode(conf, ReadPersistent)) {
203 add_sector(conf, bio->bi_sector, ReadPersistent); 206 add_sector(conf, bio->bi_iter.bi_sector,
207 ReadPersistent);
204 failit = 1; 208 failit = 1;
205 } 209 }
206 if (check_mode(conf, ReadFixable)) { 210 if (check_mode(conf, ReadFixable)) {
207 add_sector(conf, bio->bi_sector, ReadFixable); 211 add_sector(conf, bio->bi_iter.bi_sector,
212 ReadFixable);
208 failit = 1; 213 failit = 1;
209 } 214 }
210 } 215 }
diff --git a/drivers/md/linear.c b/drivers/md/linear.c
index f03fabd2b37b..fb3b0d04edfb 100644
--- a/drivers/md/linear.c
+++ b/drivers/md/linear.c
@@ -297,19 +297,19 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
297 } 297 }
298 298
299 rcu_read_lock(); 299 rcu_read_lock();
300 tmp_dev = which_dev(mddev, bio->bi_sector); 300 tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector);
301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; 301 start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors;
302 302
303 303
304 if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) 304 if (unlikely(bio->bi_iter.bi_sector >= (tmp_dev->end_sector)
305 || (bio->bi_sector < start_sector))) { 305 || (bio->bi_iter.bi_sector < start_sector))) {
306 char b[BDEVNAME_SIZE]; 306 char b[BDEVNAME_SIZE];
307 307
308 printk(KERN_ERR 308 printk(KERN_ERR
309 "md/linear:%s: make_request: Sector %llu out of bounds on " 309 "md/linear:%s: make_request: Sector %llu out of bounds on "
310 "dev %s: %llu sectors, offset %llu\n", 310 "dev %s: %llu sectors, offset %llu\n",
311 mdname(mddev), 311 mdname(mddev),
312 (unsigned long long)bio->bi_sector, 312 (unsigned long long)bio->bi_iter.bi_sector,
313 bdevname(tmp_dev->rdev->bdev, b), 313 bdevname(tmp_dev->rdev->bdev, b),
314 (unsigned long long)tmp_dev->rdev->sectors, 314 (unsigned long long)tmp_dev->rdev->sectors,
315 (unsigned long long)start_sector); 315 (unsigned long long)start_sector);
@@ -326,7 +326,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
326 326
327 rcu_read_unlock(); 327 rcu_read_unlock();
328 328
329 bp = bio_split(bio, end_sector - bio->bi_sector); 329 bp = bio_split(bio, end_sector - bio->bi_iter.bi_sector);
330 330
331 linear_make_request(mddev, &bp->bio1); 331 linear_make_request(mddev, &bp->bio1);
332 linear_make_request(mddev, &bp->bio2); 332 linear_make_request(mddev, &bp->bio2);
@@ -335,7 +335,7 @@ static void linear_make_request(struct mddev *mddev, struct bio *bio)
335 } 335 }
336 336
337 bio->bi_bdev = tmp_dev->rdev->bdev; 337 bio->bi_bdev = tmp_dev->rdev->bdev;
338 bio->bi_sector = bio->bi_sector - start_sector 338 bio->bi_iter.bi_sector = bio->bi_iter.bi_sector - start_sector
339 + tmp_dev->rdev->data_offset; 339 + tmp_dev->rdev->data_offset;
340 rcu_read_unlock(); 340 rcu_read_unlock();
341 341
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 739b1ec54e28..b07fed398fd7 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws)
393 struct mddev *mddev = container_of(ws, struct mddev, flush_work); 393 struct mddev *mddev = container_of(ws, struct mddev, flush_work);
394 struct bio *bio = mddev->flush_bio; 394 struct bio *bio = mddev->flush_bio;
395 395
396 if (bio->bi_size == 0) 396 if (bio->bi_iter.bi_size == 0)
397 /* an empty barrier - all done */ 397 /* an empty barrier - all done */
398 bio_endio(bio, 0); 398 bio_endio(bio, 0);
399 else { 399 else {
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev,
754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); 754 struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev);
755 755
756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; 756 bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev;
757 bio->bi_sector = sector; 757 bio->bi_iter.bi_sector = sector;
758 bio_add_page(bio, page, size, 0); 758 bio_add_page(bio, page, size, 0);
759 bio->bi_private = rdev; 759 bio->bi_private = rdev;
760 bio->bi_end_io = super_written; 760 bio->bi_end_io = super_written;
@@ -785,13 +785,13 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size,
785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? 785 bio->bi_bdev = (metadata_op && rdev->meta_bdev) ?
786 rdev->meta_bdev : rdev->bdev; 786 rdev->meta_bdev : rdev->bdev;
787 if (metadata_op) 787 if (metadata_op)
788 bio->bi_sector = sector + rdev->sb_start; 788 bio->bi_iter.bi_sector = sector + rdev->sb_start;
789 else if (rdev->mddev->reshape_position != MaxSector && 789 else if (rdev->mddev->reshape_position != MaxSector &&
790 (rdev->mddev->reshape_backwards == 790 (rdev->mddev->reshape_backwards ==
791 (sector >= rdev->mddev->reshape_position))) 791 (sector >= rdev->mddev->reshape_position)))
792 bio->bi_sector = sector + rdev->new_data_offset; 792 bio->bi_iter.bi_sector = sector + rdev->new_data_offset;
793 else 793 else
794 bio->bi_sector = sector + rdev->data_offset; 794 bio->bi_iter.bi_sector = sector + rdev->data_offset;
795 bio_add_page(bio, page, size, 0); 795 bio_add_page(bio, page, size, 0);
796 submit_bio_wait(rw, bio); 796 submit_bio_wait(rw, bio);
797 797
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c
index 1642eae75a33..849ad39f547b 100644
--- a/drivers/md/multipath.c
+++ b/drivers/md/multipath.c
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error)
100 md_error (mp_bh->mddev, rdev); 100 md_error (mp_bh->mddev, rdev);
101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", 101 printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n",
102 bdevname(rdev->bdev,b), 102 bdevname(rdev->bdev,b),
103 (unsigned long long)bio->bi_sector); 103 (unsigned long long)bio->bi_iter.bi_sector);
104 multipath_reschedule_retry(mp_bh); 104 multipath_reschedule_retry(mp_bh);
105 } else 105 } else
106 multipath_end_bh_io(mp_bh, error); 106 multipath_end_bh_io(mp_bh, error);
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio)
132 multipath = conf->multipaths + mp_bh->path; 132 multipath = conf->multipaths + mp_bh->path;
133 133
134 mp_bh->bio = *bio; 134 mp_bh->bio = *bio;
135 mp_bh->bio.bi_sector += multipath->rdev->data_offset; 135 mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset;
136 mp_bh->bio.bi_bdev = multipath->rdev->bdev; 136 mp_bh->bio.bi_bdev = multipath->rdev->bdev;
137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; 137 mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT;
138 mp_bh->bio.bi_end_io = multipath_end_request; 138 mp_bh->bio.bi_end_io = multipath_end_request;
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread)
355 spin_unlock_irqrestore(&conf->device_lock, flags); 355 spin_unlock_irqrestore(&conf->device_lock, flags);
356 356
357 bio = &mp_bh->bio; 357 bio = &mp_bh->bio;
358 bio->bi_sector = mp_bh->master_bio->bi_sector; 358 bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector;
359 359
360 if ((mp_bh->path = multipath_map (conf))<0) { 360 if ((mp_bh->path = multipath_map (conf))<0) {
361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read" 361 printk(KERN_ALERT "multipath: %s: unrecoverable IO read"
362 " error for block %llu\n", 362 " error for block %llu\n",
363 bdevname(bio->bi_bdev,b), 363 bdevname(bio->bi_bdev,b),
364 (unsigned long long)bio->bi_sector); 364 (unsigned long long)bio->bi_iter.bi_sector);
365 multipath_end_bh_io(mp_bh, -EIO); 365 multipath_end_bh_io(mp_bh, -EIO);
366 } else { 366 } else {
367 printk(KERN_ERR "multipath: %s: redirecting sector %llu" 367 printk(KERN_ERR "multipath: %s: redirecting sector %llu"
368 " to another IO path\n", 368 " to another IO path\n",
369 bdevname(bio->bi_bdev,b), 369 bdevname(bio->bi_bdev,b),
370 (unsigned long long)bio->bi_sector); 370 (unsigned long long)bio->bi_iter.bi_sector);
371 *bio = *(mp_bh->master_bio); 371 *bio = *(mp_bh->master_bio);
372 bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; 372 bio->bi_iter.bi_sector +=
373 conf->multipaths[mp_bh->path].rdev->data_offset;
373 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; 374 bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev;
374 bio->bi_rw |= REQ_FAILFAST_TRANSPORT; 375 bio->bi_rw |= REQ_FAILFAST_TRANSPORT;
375 bio->bi_end_io = multipath_end_request; 376 bio->bi_end_io = multipath_end_request;
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c
index c4d420b7d2f4..e38d1d3226f3 100644
--- a/drivers/md/raid0.c
+++ b/drivers/md/raid0.c
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
501 unsigned int chunk_sects, struct bio *bio) 501 unsigned int chunk_sects, struct bio *bio)
502{ 502{
503 if (likely(is_power_of_2(chunk_sects))) { 503 if (likely(is_power_of_2(chunk_sects))) {
504 return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) 504 return chunk_sects >=
505 ((bio->bi_iter.bi_sector & (chunk_sects-1))
505 + bio_sectors(bio)); 506 + bio_sectors(bio));
506 } else{ 507 } else{
507 sector_t sector = bio->bi_sector; 508 sector_t sector = bio->bi_iter.bi_sector;
508 return chunk_sects >= (sector_div(sector, chunk_sects) 509 return chunk_sects >= (sector_div(sector, chunk_sects)
509 + bio_sectors(bio)); 510 + bio_sectors(bio));
510 } 511 }
@@ -524,7 +525,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
524 525
525 chunk_sects = mddev->chunk_sectors; 526 chunk_sects = mddev->chunk_sectors;
526 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { 527 if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) {
527 sector_t sector = bio->bi_sector; 528 sector_t sector = bio->bi_iter.bi_sector;
528 struct bio_pair *bp; 529 struct bio_pair *bp;
529 /* Sanity check -- queue functions should prevent this happening */ 530 /* Sanity check -- queue functions should prevent this happening */
530 if (bio_segments(bio) > 1) 531 if (bio_segments(bio) > 1)
@@ -544,12 +545,12 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
544 return; 545 return;
545 } 546 }
546 547
547 sector_offset = bio->bi_sector; 548 sector_offset = bio->bi_iter.bi_sector;
548 zone = find_zone(mddev->private, &sector_offset); 549 zone = find_zone(mddev->private, &sector_offset);
549 tmp_dev = map_sector(mddev, zone, bio->bi_sector, 550 tmp_dev = map_sector(mddev, zone, bio->bi_iter.bi_sector,
550 &sector_offset); 551 &sector_offset);
551 bio->bi_bdev = tmp_dev->bdev; 552 bio->bi_bdev = tmp_dev->bdev;
552 bio->bi_sector = sector_offset + zone->dev_start + 553 bio->bi_iter.bi_sector = sector_offset + zone->dev_start +
553 tmp_dev->data_offset; 554 tmp_dev->data_offset;
554 555
555 if (unlikely((bio->bi_rw & REQ_DISCARD) && 556 if (unlikely((bio->bi_rw & REQ_DISCARD) &&
@@ -566,7 +567,8 @@ bad_map:
566 printk("md/raid0:%s: make_request bug: can't convert block across chunks" 567 printk("md/raid0:%s: make_request bug: can't convert block across chunks"
567 " or bigger than %dk %llu %d\n", 568 " or bigger than %dk %llu %d\n",
568 mdname(mddev), chunk_sects / 2, 569 mdname(mddev), chunk_sects / 2,
569 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 570 (unsigned long long)bio->bi_iter.bi_sector,
571 bio_sectors(bio) / 2);
570 572
571 bio_io_error(bio); 573 bio_io_error(bio);
572 return; 574 return;
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
index 1e5a540995e9..db3b9d7314f1 100644
--- a/drivers/md/raid1.c
+++ b/drivers/md/raid1.c
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio)
229 int done; 229 int done;
230 struct r1conf *conf = r1_bio->mddev->private; 230 struct r1conf *conf = r1_bio->mddev->private;
231 sector_t start_next_window = r1_bio->start_next_window; 231 sector_t start_next_window = r1_bio->start_next_window;
232 sector_t bi_sector = bio->bi_sector; 232 sector_t bi_sector = bio->bi_iter.bi_sector;
233 233
234 if (bio->bi_phys_segments) { 234 if (bio->bi_phys_segments) {
235 unsigned long flags; 235 unsigned long flags;
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { 265 if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) {
266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n", 266 pr_debug("raid1: sync end %s on sectors %llu-%llu\n",
267 (bio_data_dir(bio) == WRITE) ? "write" : "read", 267 (bio_data_dir(bio) == WRITE) ? "write" : "read",
268 (unsigned long long) bio->bi_sector, 268 (unsigned long long) bio->bi_iter.bi_sector,
269 (unsigned long long) bio->bi_sector + 269 (unsigned long long) bio_end_sector(bio) - 1);
270 bio_sectors(bio) - 1);
271 270
272 call_bio_endio(r1_bio); 271 call_bio_endio(r1_bio);
273 } 272 }
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error)
466 struct bio *mbio = r1_bio->master_bio; 465 struct bio *mbio = r1_bio->master_bio;
467 pr_debug("raid1: behind end write sectors" 466 pr_debug("raid1: behind end write sectors"
468 " %llu-%llu\n", 467 " %llu-%llu\n",
469 (unsigned long long) mbio->bi_sector, 468 (unsigned long long) mbio->bi_iter.bi_sector,
470 (unsigned long long) mbio->bi_sector + 469 (unsigned long long) bio_end_sector(mbio) - 1);
471 bio_sectors(mbio) - 1);
472 call_bio_endio(r1_bio); 470 call_bio_endio(r1_bio);
473 } 471 }
474 } 472 }
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio)
875 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS 873 else if ((conf->next_resync - RESYNC_WINDOW_SECTORS
876 >= bio_end_sector(bio)) || 874 >= bio_end_sector(bio)) ||
877 (conf->next_resync + NEXT_NORMALIO_DISTANCE 875 (conf->next_resync + NEXT_NORMALIO_DISTANCE
878 <= bio->bi_sector)) 876 <= bio->bi_iter.bi_sector))
879 wait = false; 877 wait = false;
880 else 878 else
881 wait = true; 879 wait = true;
@@ -913,19 +911,19 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio)
913 911
914 if (bio && bio_data_dir(bio) == WRITE) { 912 if (bio && bio_data_dir(bio) == WRITE) {
915 if (conf->next_resync + NEXT_NORMALIO_DISTANCE 913 if (conf->next_resync + NEXT_NORMALIO_DISTANCE
916 <= bio->bi_sector) { 914 <= bio->bi_iter.bi_sector) {
917 if (conf->start_next_window == MaxSector) 915 if (conf->start_next_window == MaxSector)
918 conf->start_next_window = 916 conf->start_next_window =
919 conf->next_resync + 917 conf->next_resync +
920 NEXT_NORMALIO_DISTANCE; 918 NEXT_NORMALIO_DISTANCE;
921 919
922 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) 920 if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE)
923 <= bio->bi_sector) 921 <= bio->bi_iter.bi_sector)
924 conf->next_window_requests++; 922 conf->next_window_requests++;
925 else 923 else
926 conf->current_window_requests++; 924 conf->current_window_requests++;
927 } 925 }
928 if (bio->bi_sector >= conf->start_next_window) 926 if (bio->bi_iter.bi_sector >= conf->start_next_window)
929 sector = conf->start_next_window; 927 sector = conf->start_next_window;
930 } 928 }
931 929
@@ -1028,7 +1026,8 @@ do_sync_io:
1028 if (bvecs[i].bv_page) 1026 if (bvecs[i].bv_page)
1029 put_page(bvecs[i].bv_page); 1027 put_page(bvecs[i].bv_page);
1030 kfree(bvecs); 1028 kfree(bvecs);
1031 pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); 1029 pr_debug("%dB behind alloc failed, doing sync I/O\n",
1030 bio->bi_iter.bi_size);
1032} 1031}
1033 1032
1034struct raid1_plug_cb { 1033struct raid1_plug_cb {
@@ -1108,7 +1107,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1108 1107
1109 if (bio_data_dir(bio) == WRITE && 1108 if (bio_data_dir(bio) == WRITE &&
1110 bio_end_sector(bio) > mddev->suspend_lo && 1109 bio_end_sector(bio) > mddev->suspend_lo &&
1111 bio->bi_sector < mddev->suspend_hi) { 1110 bio->bi_iter.bi_sector < mddev->suspend_hi) {
1112 /* As the suspend_* range is controlled by 1111 /* As the suspend_* range is controlled by
1113 * userspace, we want an interruptible 1112 * userspace, we want an interruptible
1114 * wait. 1113 * wait.
@@ -1119,7 +1118,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1119 prepare_to_wait(&conf->wait_barrier, 1118 prepare_to_wait(&conf->wait_barrier,
1120 &w, TASK_INTERRUPTIBLE); 1119 &w, TASK_INTERRUPTIBLE);
1121 if (bio_end_sector(bio) <= mddev->suspend_lo || 1120 if (bio_end_sector(bio) <= mddev->suspend_lo ||
1122 bio->bi_sector >= mddev->suspend_hi) 1121 bio->bi_iter.bi_sector >= mddev->suspend_hi)
1123 break; 1122 break;
1124 schedule(); 1123 schedule();
1125 } 1124 }
@@ -1141,7 +1140,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1141 r1_bio->sectors = bio_sectors(bio); 1140 r1_bio->sectors = bio_sectors(bio);
1142 r1_bio->state = 0; 1141 r1_bio->state = 0;
1143 r1_bio->mddev = mddev; 1142 r1_bio->mddev = mddev;
1144 r1_bio->sector = bio->bi_sector; 1143 r1_bio->sector = bio->bi_iter.bi_sector;
1145 1144
1146 /* We might need to issue multiple reads to different 1145 /* We might need to issue multiple reads to different
1147 * devices if there are bad blocks around, so we keep 1146 * devices if there are bad blocks around, so we keep
@@ -1181,12 +1180,13 @@ read_again:
1181 r1_bio->read_disk = rdisk; 1180 r1_bio->read_disk = rdisk;
1182 1181
1183 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1182 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1184 bio_trim(read_bio, r1_bio->sector - bio->bi_sector, 1183 bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector,
1185 max_sectors); 1184 max_sectors);
1186 1185
1187 r1_bio->bios[rdisk] = read_bio; 1186 r1_bio->bios[rdisk] = read_bio;
1188 1187
1189 read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; 1188 read_bio->bi_iter.bi_sector = r1_bio->sector +
1189 mirror->rdev->data_offset;
1190 read_bio->bi_bdev = mirror->rdev->bdev; 1190 read_bio->bi_bdev = mirror->rdev->bdev;
1191 read_bio->bi_end_io = raid1_end_read_request; 1191 read_bio->bi_end_io = raid1_end_read_request;
1192 read_bio->bi_rw = READ | do_sync; 1192 read_bio->bi_rw = READ | do_sync;
@@ -1198,7 +1198,7 @@ read_again:
1198 */ 1198 */
1199 1199
1200 sectors_handled = (r1_bio->sector + max_sectors 1200 sectors_handled = (r1_bio->sector + max_sectors
1201 - bio->bi_sector); 1201 - bio->bi_iter.bi_sector);
1202 r1_bio->sectors = max_sectors; 1202 r1_bio->sectors = max_sectors;
1203 spin_lock_irq(&conf->device_lock); 1203 spin_lock_irq(&conf->device_lock);
1204 if (bio->bi_phys_segments == 0) 1204 if (bio->bi_phys_segments == 0)
@@ -1219,7 +1219,8 @@ read_again:
1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1219 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1220 r1_bio->state = 0; 1220 r1_bio->state = 0;
1221 r1_bio->mddev = mddev; 1221 r1_bio->mddev = mddev;
1222 r1_bio->sector = bio->bi_sector + sectors_handled; 1222 r1_bio->sector = bio->bi_iter.bi_sector +
1223 sectors_handled;
1223 goto read_again; 1224 goto read_again;
1224 } else 1225 } else
1225 generic_make_request(read_bio); 1226 generic_make_request(read_bio);
@@ -1322,7 +1323,7 @@ read_again:
1322 if (r1_bio->bios[j]) 1323 if (r1_bio->bios[j])
1323 rdev_dec_pending(conf->mirrors[j].rdev, mddev); 1324 rdev_dec_pending(conf->mirrors[j].rdev, mddev);
1324 r1_bio->state = 0; 1325 r1_bio->state = 0;
1325 allow_barrier(conf, start_next_window, bio->bi_sector); 1326 allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector);
1326 md_wait_for_blocked_rdev(blocked_rdev, mddev); 1327 md_wait_for_blocked_rdev(blocked_rdev, mddev);
1327 start_next_window = wait_barrier(conf, bio); 1328 start_next_window = wait_barrier(conf, bio);
1328 /* 1329 /*
@@ -1349,7 +1350,7 @@ read_again:
1349 bio->bi_phys_segments++; 1350 bio->bi_phys_segments++;
1350 spin_unlock_irq(&conf->device_lock); 1351 spin_unlock_irq(&conf->device_lock);
1351 } 1352 }
1352 sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; 1353 sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector;
1353 1354
1354 atomic_set(&r1_bio->remaining, 1); 1355 atomic_set(&r1_bio->remaining, 1);
1355 atomic_set(&r1_bio->behind_remaining, 0); 1356 atomic_set(&r1_bio->behind_remaining, 0);
@@ -1361,7 +1362,7 @@ read_again:
1361 continue; 1362 continue;
1362 1363
1363 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1364 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1364 bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); 1365 bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors);
1365 1366
1366 if (first_clone) { 1367 if (first_clone) {
1367 /* do behind I/O ? 1368 /* do behind I/O ?
@@ -1395,7 +1396,7 @@ read_again:
1395 1396
1396 r1_bio->bios[i] = mbio; 1397 r1_bio->bios[i] = mbio;
1397 1398
1398 mbio->bi_sector = (r1_bio->sector + 1399 mbio->bi_iter.bi_sector = (r1_bio->sector +
1399 conf->mirrors[i].rdev->data_offset); 1400 conf->mirrors[i].rdev->data_offset);
1400 mbio->bi_bdev = conf->mirrors[i].rdev->bdev; 1401 mbio->bi_bdev = conf->mirrors[i].rdev->bdev;
1401 mbio->bi_end_io = raid1_end_write_request; 1402 mbio->bi_end_io = raid1_end_write_request;
@@ -1435,7 +1436,7 @@ read_again:
1435 r1_bio->sectors = bio_sectors(bio) - sectors_handled; 1436 r1_bio->sectors = bio_sectors(bio) - sectors_handled;
1436 r1_bio->state = 0; 1437 r1_bio->state = 0;
1437 r1_bio->mddev = mddev; 1438 r1_bio->mddev = mddev;
1438 r1_bio->sector = bio->bi_sector + sectors_handled; 1439 r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1439 goto retry_write; 1440 goto retry_write;
1440 } 1441 }
1441 1442
@@ -1959,14 +1960,14 @@ static int process_checks(struct r1bio *r1_bio)
1959 /* fixup the bio for reuse */ 1960 /* fixup the bio for reuse */
1960 bio_reset(b); 1961 bio_reset(b);
1961 b->bi_vcnt = vcnt; 1962 b->bi_vcnt = vcnt;
1962 b->bi_size = r1_bio->sectors << 9; 1963 b->bi_iter.bi_size = r1_bio->sectors << 9;
1963 b->bi_sector = r1_bio->sector + 1964 b->bi_iter.bi_sector = r1_bio->sector +
1964 conf->mirrors[i].rdev->data_offset; 1965 conf->mirrors[i].rdev->data_offset;
1965 b->bi_bdev = conf->mirrors[i].rdev->bdev; 1966 b->bi_bdev = conf->mirrors[i].rdev->bdev;
1966 b->bi_end_io = end_sync_read; 1967 b->bi_end_io = end_sync_read;
1967 b->bi_private = r1_bio; 1968 b->bi_private = r1_bio;
1968 1969
1969 size = b->bi_size; 1970 size = b->bi_iter.bi_size;
1970 for (j = 0; j < vcnt ; j++) { 1971 for (j = 0; j < vcnt ; j++) {
1971 struct bio_vec *bi; 1972 struct bio_vec *bi;
1972 bi = &b->bi_io_vec[j]; 1973 bi = &b->bi_io_vec[j];
@@ -2221,11 +2222,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i)
2221 } 2222 }
2222 2223
2223 wbio->bi_rw = WRITE; 2224 wbio->bi_rw = WRITE;
2224 wbio->bi_sector = r1_bio->sector; 2225 wbio->bi_iter.bi_sector = r1_bio->sector;
2225 wbio->bi_size = r1_bio->sectors << 9; 2226 wbio->bi_iter.bi_size = r1_bio->sectors << 9;
2226 2227
2227 bio_trim(wbio, sector - r1_bio->sector, sectors); 2228 bio_trim(wbio, sector - r1_bio->sector, sectors);
2228 wbio->bi_sector += rdev->data_offset; 2229 wbio->bi_iter.bi_sector += rdev->data_offset;
2229 wbio->bi_bdev = rdev->bdev; 2230 wbio->bi_bdev = rdev->bdev;
2230 if (submit_bio_wait(WRITE, wbio) == 0) 2231 if (submit_bio_wait(WRITE, wbio) == 0)
2231 /* failure! */ 2232 /* failure! */
@@ -2339,7 +2340,8 @@ read_more:
2339 } 2340 }
2340 r1_bio->read_disk = disk; 2341 r1_bio->read_disk = disk;
2341 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); 2342 bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev);
2342 bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); 2343 bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector,
2344 max_sectors);
2343 r1_bio->bios[r1_bio->read_disk] = bio; 2345 r1_bio->bios[r1_bio->read_disk] = bio;
2344 rdev = conf->mirrors[disk].rdev; 2346 rdev = conf->mirrors[disk].rdev;
2345 printk_ratelimited(KERN_ERR 2347 printk_ratelimited(KERN_ERR
@@ -2348,7 +2350,7 @@ read_more:
2348 mdname(mddev), 2350 mdname(mddev),
2349 (unsigned long long)r1_bio->sector, 2351 (unsigned long long)r1_bio->sector,
2350 bdevname(rdev->bdev, b)); 2352 bdevname(rdev->bdev, b));
2351 bio->bi_sector = r1_bio->sector + rdev->data_offset; 2353 bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset;
2352 bio->bi_bdev = rdev->bdev; 2354 bio->bi_bdev = rdev->bdev;
2353 bio->bi_end_io = raid1_end_read_request; 2355 bio->bi_end_io = raid1_end_read_request;
2354 bio->bi_rw = READ | do_sync; 2356 bio->bi_rw = READ | do_sync;
@@ -2357,7 +2359,7 @@ read_more:
2357 /* Drat - have to split this up more */ 2359 /* Drat - have to split this up more */
2358 struct bio *mbio = r1_bio->master_bio; 2360 struct bio *mbio = r1_bio->master_bio;
2359 int sectors_handled = (r1_bio->sector + max_sectors 2361 int sectors_handled = (r1_bio->sector + max_sectors
2360 - mbio->bi_sector); 2362 - mbio->bi_iter.bi_sector);
2361 r1_bio->sectors = max_sectors; 2363 r1_bio->sectors = max_sectors;
2362 spin_lock_irq(&conf->device_lock); 2364 spin_lock_irq(&conf->device_lock);
2363 if (mbio->bi_phys_segments == 0) 2365 if (mbio->bi_phys_segments == 0)
@@ -2375,7 +2377,8 @@ read_more:
2375 r1_bio->state = 0; 2377 r1_bio->state = 0;
2376 set_bit(R1BIO_ReadError, &r1_bio->state); 2378 set_bit(R1BIO_ReadError, &r1_bio->state);
2377 r1_bio->mddev = mddev; 2379 r1_bio->mddev = mddev;
2378 r1_bio->sector = mbio->bi_sector + sectors_handled; 2380 r1_bio->sector = mbio->bi_iter.bi_sector +
2381 sectors_handled;
2379 2382
2380 goto read_more; 2383 goto read_more;
2381 } else 2384 } else
@@ -2599,7 +2602,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2599 } 2602 }
2600 if (bio->bi_end_io) { 2603 if (bio->bi_end_io) {
2601 atomic_inc(&rdev->nr_pending); 2604 atomic_inc(&rdev->nr_pending);
2602 bio->bi_sector = sector_nr + rdev->data_offset; 2605 bio->bi_iter.bi_sector = sector_nr + rdev->data_offset;
2603 bio->bi_bdev = rdev->bdev; 2606 bio->bi_bdev = rdev->bdev;
2604 bio->bi_private = r1_bio; 2607 bio->bi_private = r1_bio;
2605 } 2608 }
@@ -2699,7 +2702,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
2699 continue; 2702 continue;
2700 /* remove last page from this bio */ 2703 /* remove last page from this bio */
2701 bio->bi_vcnt--; 2704 bio->bi_vcnt--;
2702 bio->bi_size -= len; 2705 bio->bi_iter.bi_size -= len;
2703 bio->bi_flags &= ~(1<< BIO_SEG_VALID); 2706 bio->bi_flags &= ~(1<< BIO_SEG_VALID);
2704 } 2707 }
2705 goto bio_full; 2708 goto bio_full;
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
index c504e8389e69..dbf3b63c2754 100644
--- a/drivers/md/raid10.c
+++ b/drivers/md/raid10.c
@@ -1182,7 +1182,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1182 /* If this request crosses a chunk boundary, we need to 1182 /* If this request crosses a chunk boundary, we need to
1183 * split it. This will only happen for 1 PAGE (or less) requests. 1183 * split it. This will only happen for 1 PAGE (or less) requests.
1184 */ 1184 */
1185 if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) 1185 if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + bio_sectors(bio)
1186 > chunk_sects 1186 > chunk_sects
1187 && (conf->geo.near_copies < conf->geo.raid_disks 1187 && (conf->geo.near_copies < conf->geo.raid_disks
1188 || conf->prev.near_copies < conf->prev.raid_disks))) { 1188 || conf->prev.near_copies < conf->prev.raid_disks))) {
@@ -1193,8 +1193,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1193 /* This is a one page bio that upper layers 1193 /* This is a one page bio that upper layers
1194 * refuse to split for us, so we need to split it. 1194 * refuse to split for us, so we need to split it.
1195 */ 1195 */
1196 bp = bio_split(bio, 1196 bp = bio_split(bio, chunk_sects -
1197 chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); 1197 (bio->bi_iter.bi_sector & (chunk_sects - 1)));
1198 1198
1199 /* Each of these 'make_request' calls will call 'wait_barrier'. 1199 /* Each of these 'make_request' calls will call 'wait_barrier'.
1200 * If the first succeeds but the second blocks due to the resync 1200 * If the first succeeds but the second blocks due to the resync
@@ -1221,7 +1221,8 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1221 bad_map: 1221 bad_map:
1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks" 1222 printk("md/raid10:%s: make_request bug: can't convert block across chunks"
1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, 1223 " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
1224 (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); 1224 (unsigned long long)bio->bi_iter.bi_sector,
1225 bio_sectors(bio) / 2);
1225 1226
1226 bio_io_error(bio); 1227 bio_io_error(bio);
1227 return; 1228 return;
@@ -1238,24 +1239,25 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1238 1239
1239 sectors = bio_sectors(bio); 1240 sectors = bio_sectors(bio);
1240 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1241 while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1241 bio->bi_sector < conf->reshape_progress && 1242 bio->bi_iter.bi_sector < conf->reshape_progress &&
1242 bio->bi_sector + sectors > conf->reshape_progress) { 1243 bio->bi_iter.bi_sector + sectors > conf->reshape_progress) {
1243 /* IO spans the reshape position. Need to wait for 1244 /* IO spans the reshape position. Need to wait for
1244 * reshape to pass 1245 * reshape to pass
1245 */ 1246 */
1246 allow_barrier(conf); 1247 allow_barrier(conf);
1247 wait_event(conf->wait_barrier, 1248 wait_event(conf->wait_barrier,
1248 conf->reshape_progress <= bio->bi_sector || 1249 conf->reshape_progress <= bio->bi_iter.bi_sector ||
1249 conf->reshape_progress >= bio->bi_sector + sectors); 1250 conf->reshape_progress >= bio->bi_iter.bi_sector +
1251 sectors);
1250 wait_barrier(conf); 1252 wait_barrier(conf);
1251 } 1253 }
1252 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && 1254 if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
1253 bio_data_dir(bio) == WRITE && 1255 bio_data_dir(bio) == WRITE &&
1254 (mddev->reshape_backwards 1256 (mddev->reshape_backwards
1255 ? (bio->bi_sector < conf->reshape_safe && 1257 ? (bio->bi_iter.bi_sector < conf->reshape_safe &&
1256 bio->bi_sector + sectors > conf->reshape_progress) 1258 bio->bi_iter.bi_sector + sectors > conf->reshape_progress)
1257 : (bio->bi_sector + sectors > conf->reshape_safe && 1259 : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe &&
1258 bio->bi_sector < conf->reshape_progress))) { 1260 bio->bi_iter.bi_sector < conf->reshape_progress))) {
1259 /* Need to update reshape_position in metadata */ 1261 /* Need to update reshape_position in metadata */
1260 mddev->reshape_position = conf->reshape_progress; 1262 mddev->reshape_position = conf->reshape_progress;
1261 set_bit(MD_CHANGE_DEVS, &mddev->flags); 1263 set_bit(MD_CHANGE_DEVS, &mddev->flags);
@@ -1273,7 +1275,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
1273 r10_bio->sectors = sectors; 1275 r10_bio->sectors = sectors;
1274 1276
1275 r10_bio->mddev = mddev; 1277 r10_bio->mddev = mddev;
1276 r10_bio->sector = bio->bi_sector; 1278 r10_bio->sector = bio->bi_iter.bi_sector;
1277 r10_bio->state = 0; 1279 r10_bio->state = 0;
1278 1280
1279 /* We might need to issue multiple reads to different 1281 /* We might need to issue multiple reads to different
@@ -1302,13 +1304,13 @@ read_again:
1302 slot = r10_bio->read_slot; 1304 slot = r10_bio->read_slot;
1303 1305
1304 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1306 read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1305 bio_trim(read_bio, r10_bio->sector - bio->bi_sector, 1307 bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector,
1306 max_sectors); 1308 max_sectors);
1307 1309
1308 r10_bio->devs[slot].bio = read_bio; 1310 r10_bio->devs[slot].bio = read_bio;
1309 r10_bio->devs[slot].rdev = rdev; 1311 r10_bio->devs[slot].rdev = rdev;
1310 1312
1311 read_bio->bi_sector = r10_bio->devs[slot].addr + 1313 read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr +
1312 choose_data_offset(r10_bio, rdev); 1314 choose_data_offset(r10_bio, rdev);
1313 read_bio->bi_bdev = rdev->bdev; 1315 read_bio->bi_bdev = rdev->bdev;
1314 read_bio->bi_end_io = raid10_end_read_request; 1316 read_bio->bi_end_io = raid10_end_read_request;
@@ -1320,7 +1322,7 @@ read_again:
1320 * need another r10_bio. 1322 * need another r10_bio.
1321 */ 1323 */
1322 sectors_handled = (r10_bio->sectors + max_sectors 1324 sectors_handled = (r10_bio->sectors + max_sectors
1323 - bio->bi_sector); 1325 - bio->bi_iter.bi_sector);
1324 r10_bio->sectors = max_sectors; 1326 r10_bio->sectors = max_sectors;
1325 spin_lock_irq(&conf->device_lock); 1327 spin_lock_irq(&conf->device_lock);
1326 if (bio->bi_phys_segments == 0) 1328 if (bio->bi_phys_segments == 0)
@@ -1341,7 +1343,8 @@ read_again:
1341 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1343 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1342 r10_bio->state = 0; 1344 r10_bio->state = 0;
1343 r10_bio->mddev = mddev; 1345 r10_bio->mddev = mddev;
1344 r10_bio->sector = bio->bi_sector + sectors_handled; 1346 r10_bio->sector = bio->bi_iter.bi_sector +
1347 sectors_handled;
1345 goto read_again; 1348 goto read_again;
1346 } else 1349 } else
1347 generic_make_request(read_bio); 1350 generic_make_request(read_bio);
@@ -1499,7 +1502,8 @@ retry_write:
1499 bio->bi_phys_segments++; 1502 bio->bi_phys_segments++;
1500 spin_unlock_irq(&conf->device_lock); 1503 spin_unlock_irq(&conf->device_lock);
1501 } 1504 }
1502 sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; 1505 sectors_handled = r10_bio->sector + max_sectors -
1506 bio->bi_iter.bi_sector;
1503 1507
1504 atomic_set(&r10_bio->remaining, 1); 1508 atomic_set(&r10_bio->remaining, 1);
1505 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); 1509 bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0);
@@ -1510,11 +1514,11 @@ retry_write:
1510 if (r10_bio->devs[i].bio) { 1514 if (r10_bio->devs[i].bio) {
1511 struct md_rdev *rdev = conf->mirrors[d].rdev; 1515 struct md_rdev *rdev = conf->mirrors[d].rdev;
1512 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1516 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1513 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1517 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1514 max_sectors); 1518 max_sectors);
1515 r10_bio->devs[i].bio = mbio; 1519 r10_bio->devs[i].bio = mbio;
1516 1520
1517 mbio->bi_sector = (r10_bio->devs[i].addr+ 1521 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
1518 choose_data_offset(r10_bio, 1522 choose_data_offset(r10_bio,
1519 rdev)); 1523 rdev));
1520 mbio->bi_bdev = rdev->bdev; 1524 mbio->bi_bdev = rdev->bdev;
@@ -1553,11 +1557,11 @@ retry_write:
1553 rdev = conf->mirrors[d].rdev; 1557 rdev = conf->mirrors[d].rdev;
1554 } 1558 }
1555 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 1559 mbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
1556 bio_trim(mbio, r10_bio->sector - bio->bi_sector, 1560 bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector,
1557 max_sectors); 1561 max_sectors);
1558 r10_bio->devs[i].repl_bio = mbio; 1562 r10_bio->devs[i].repl_bio = mbio;
1559 1563
1560 mbio->bi_sector = (r10_bio->devs[i].addr + 1564 mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr +
1561 choose_data_offset( 1565 choose_data_offset(
1562 r10_bio, rdev)); 1566 r10_bio, rdev));
1563 mbio->bi_bdev = rdev->bdev; 1567 mbio->bi_bdev = rdev->bdev;
@@ -1591,7 +1595,7 @@ retry_write:
1591 r10_bio->sectors = bio_sectors(bio) - sectors_handled; 1595 r10_bio->sectors = bio_sectors(bio) - sectors_handled;
1592 1596
1593 r10_bio->mddev = mddev; 1597 r10_bio->mddev = mddev;
1594 r10_bio->sector = bio->bi_sector + sectors_handled; 1598 r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled;
1595 r10_bio->state = 0; 1599 r10_bio->state = 0;
1596 goto retry_write; 1600 goto retry_write;
1597 } 1601 }
@@ -2124,10 +2128,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2124 bio_reset(tbio); 2128 bio_reset(tbio);
2125 2129
2126 tbio->bi_vcnt = vcnt; 2130 tbio->bi_vcnt = vcnt;
2127 tbio->bi_size = r10_bio->sectors << 9; 2131 tbio->bi_iter.bi_size = r10_bio->sectors << 9;
2128 tbio->bi_rw = WRITE; 2132 tbio->bi_rw = WRITE;
2129 tbio->bi_private = r10_bio; 2133 tbio->bi_private = r10_bio;
2130 tbio->bi_sector = r10_bio->devs[i].addr; 2134 tbio->bi_iter.bi_sector = r10_bio->devs[i].addr;
2131 2135
2132 for (j=0; j < vcnt ; j++) { 2136 for (j=0; j < vcnt ; j++) {
2133 tbio->bi_io_vec[j].bv_offset = 0; 2137 tbio->bi_io_vec[j].bv_offset = 0;
@@ -2144,7 +2148,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
2144 atomic_inc(&r10_bio->remaining); 2148 atomic_inc(&r10_bio->remaining);
2145 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); 2149 md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
2146 2150
2147 tbio->bi_sector += conf->mirrors[d].rdev->data_offset; 2151 tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset;
2148 tbio->bi_bdev = conf->mirrors[d].rdev->bdev; 2152 tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
2149 generic_make_request(tbio); 2153 generic_make_request(tbio);
2150 } 2154 }
@@ -2614,8 +2618,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i)
2614 sectors = sect_to_write; 2618 sectors = sect_to_write;
2615 /* Write at 'sector' for 'sectors' */ 2619 /* Write at 'sector' for 'sectors' */
2616 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); 2620 wbio = bio_clone_mddev(bio, GFP_NOIO, mddev);
2617 bio_trim(wbio, sector - bio->bi_sector, sectors); 2621 bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors);
2618 wbio->bi_sector = (r10_bio->devs[i].addr+ 2622 wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+
2619 choose_data_offset(r10_bio, rdev) + 2623 choose_data_offset(r10_bio, rdev) +
2620 (sector - r10_bio->sector)); 2624 (sector - r10_bio->sector));
2621 wbio->bi_bdev = rdev->bdev; 2625 wbio->bi_bdev = rdev->bdev;
@@ -2687,10 +2691,10 @@ read_more:
2687 (unsigned long long)r10_bio->sector); 2691 (unsigned long long)r10_bio->sector);
2688 bio = bio_clone_mddev(r10_bio->master_bio, 2692 bio = bio_clone_mddev(r10_bio->master_bio,
2689 GFP_NOIO, mddev); 2693 GFP_NOIO, mddev);
2690 bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); 2694 bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors);
2691 r10_bio->devs[slot].bio = bio; 2695 r10_bio->devs[slot].bio = bio;
2692 r10_bio->devs[slot].rdev = rdev; 2696 r10_bio->devs[slot].rdev = rdev;
2693 bio->bi_sector = r10_bio->devs[slot].addr 2697 bio->bi_iter.bi_sector = r10_bio->devs[slot].addr
2694 + choose_data_offset(r10_bio, rdev); 2698 + choose_data_offset(r10_bio, rdev);
2695 bio->bi_bdev = rdev->bdev; 2699 bio->bi_bdev = rdev->bdev;
2696 bio->bi_rw = READ | do_sync; 2700 bio->bi_rw = READ | do_sync;
@@ -2701,7 +2705,7 @@ read_more:
2701 struct bio *mbio = r10_bio->master_bio; 2705 struct bio *mbio = r10_bio->master_bio;
2702 int sectors_handled = 2706 int sectors_handled =
2703 r10_bio->sector + max_sectors 2707 r10_bio->sector + max_sectors
2704 - mbio->bi_sector; 2708 - mbio->bi_iter.bi_sector;
2705 r10_bio->sectors = max_sectors; 2709 r10_bio->sectors = max_sectors;
2706 spin_lock_irq(&conf->device_lock); 2710 spin_lock_irq(&conf->device_lock);
2707 if (mbio->bi_phys_segments == 0) 2711 if (mbio->bi_phys_segments == 0)
@@ -2719,7 +2723,7 @@ read_more:
2719 set_bit(R10BIO_ReadError, 2723 set_bit(R10BIO_ReadError,
2720 &r10_bio->state); 2724 &r10_bio->state);
2721 r10_bio->mddev = mddev; 2725 r10_bio->mddev = mddev;
2722 r10_bio->sector = mbio->bi_sector 2726 r10_bio->sector = mbio->bi_iter.bi_sector
2723 + sectors_handled; 2727 + sectors_handled;
2724 2728
2725 goto read_more; 2729 goto read_more;
@@ -3157,7 +3161,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3157 bio->bi_end_io = end_sync_read; 3161 bio->bi_end_io = end_sync_read;
3158 bio->bi_rw = READ; 3162 bio->bi_rw = READ;
3159 from_addr = r10_bio->devs[j].addr; 3163 from_addr = r10_bio->devs[j].addr;
3160 bio->bi_sector = from_addr + rdev->data_offset; 3164 bio->bi_iter.bi_sector = from_addr +
3165 rdev->data_offset;
3161 bio->bi_bdev = rdev->bdev; 3166 bio->bi_bdev = rdev->bdev;
3162 atomic_inc(&rdev->nr_pending); 3167 atomic_inc(&rdev->nr_pending);
3163 /* and we write to 'i' (if not in_sync) */ 3168 /* and we write to 'i' (if not in_sync) */
@@ -3181,7 +3186,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3181 bio->bi_private = r10_bio; 3186 bio->bi_private = r10_bio;
3182 bio->bi_end_io = end_sync_write; 3187 bio->bi_end_io = end_sync_write;
3183 bio->bi_rw = WRITE; 3188 bio->bi_rw = WRITE;
3184 bio->bi_sector = to_addr 3189 bio->bi_iter.bi_sector = to_addr
3185 + rdev->data_offset; 3190 + rdev->data_offset;
3186 bio->bi_bdev = rdev->bdev; 3191 bio->bi_bdev = rdev->bdev;
3187 atomic_inc(&r10_bio->remaining); 3192 atomic_inc(&r10_bio->remaining);
@@ -3210,7 +3215,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3210 bio->bi_private = r10_bio; 3215 bio->bi_private = r10_bio;
3211 bio->bi_end_io = end_sync_write; 3216 bio->bi_end_io = end_sync_write;
3212 bio->bi_rw = WRITE; 3217 bio->bi_rw = WRITE;
3213 bio->bi_sector = to_addr + rdev->data_offset; 3218 bio->bi_iter.bi_sector = to_addr +
3219 rdev->data_offset;
3214 bio->bi_bdev = rdev->bdev; 3220 bio->bi_bdev = rdev->bdev;
3215 atomic_inc(&r10_bio->remaining); 3221 atomic_inc(&r10_bio->remaining);
3216 break; 3222 break;
@@ -3328,7 +3334,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3328 bio->bi_private = r10_bio; 3334 bio->bi_private = r10_bio;
3329 bio->bi_end_io = end_sync_read; 3335 bio->bi_end_io = end_sync_read;
3330 bio->bi_rw = READ; 3336 bio->bi_rw = READ;
3331 bio->bi_sector = sector + 3337 bio->bi_iter.bi_sector = sector +
3332 conf->mirrors[d].rdev->data_offset; 3338 conf->mirrors[d].rdev->data_offset;
3333 bio->bi_bdev = conf->mirrors[d].rdev->bdev; 3339 bio->bi_bdev = conf->mirrors[d].rdev->bdev;
3334 count++; 3340 count++;
@@ -3350,7 +3356,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3350 bio->bi_private = r10_bio; 3356 bio->bi_private = r10_bio;
3351 bio->bi_end_io = end_sync_write; 3357 bio->bi_end_io = end_sync_write;
3352 bio->bi_rw = WRITE; 3358 bio->bi_rw = WRITE;
3353 bio->bi_sector = sector + 3359 bio->bi_iter.bi_sector = sector +
3354 conf->mirrors[d].replacement->data_offset; 3360 conf->mirrors[d].replacement->data_offset;
3355 bio->bi_bdev = conf->mirrors[d].replacement->bdev; 3361 bio->bi_bdev = conf->mirrors[d].replacement->bdev;
3356 count++; 3362 count++;
@@ -3397,7 +3403,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
3397 bio2 = bio2->bi_next) { 3403 bio2 = bio2->bi_next) {
3398 /* remove last page from this bio */ 3404 /* remove last page from this bio */
3399 bio2->bi_vcnt--; 3405 bio2->bi_vcnt--;
3400 bio2->bi_size -= len; 3406 bio2->bi_iter.bi_size -= len;
3401 bio2->bi_flags &= ~(1<< BIO_SEG_VALID); 3407 bio2->bi_flags &= ~(1<< BIO_SEG_VALID);
3402 } 3408 }
3403 goto bio_full; 3409 goto bio_full;
@@ -4417,7 +4423,7 @@ read_more:
4417 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); 4423 read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev);
4418 4424
4419 read_bio->bi_bdev = rdev->bdev; 4425 read_bio->bi_bdev = rdev->bdev;
4420 read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr 4426 read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr
4421 + rdev->data_offset); 4427 + rdev->data_offset);
4422 read_bio->bi_private = r10_bio; 4428 read_bio->bi_private = r10_bio;
4423 read_bio->bi_end_io = end_sync_read; 4429 read_bio->bi_end_io = end_sync_read;
@@ -4425,7 +4431,7 @@ read_more:
4425 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); 4431 read_bio->bi_flags &= ~(BIO_POOL_MASK - 1);
4426 read_bio->bi_flags |= 1 << BIO_UPTODATE; 4432 read_bio->bi_flags |= 1 << BIO_UPTODATE;
4427 read_bio->bi_vcnt = 0; 4433 read_bio->bi_vcnt = 0;
4428 read_bio->bi_size = 0; 4434 read_bio->bi_iter.bi_size = 0;
4429 r10_bio->master_bio = read_bio; 4435 r10_bio->master_bio = read_bio;
4430 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; 4436 r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum;
4431 4437
@@ -4451,7 +4457,8 @@ read_more:
4451 4457
4452 bio_reset(b); 4458 bio_reset(b);
4453 b->bi_bdev = rdev2->bdev; 4459 b->bi_bdev = rdev2->bdev;
4454 b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; 4460 b->bi_iter.bi_sector = r10_bio->devs[s/2].addr +
4461 rdev2->new_data_offset;
4455 b->bi_private = r10_bio; 4462 b->bi_private = r10_bio;
4456 b->bi_end_io = end_reshape_write; 4463 b->bi_end_io = end_reshape_write;
4457 b->bi_rw = WRITE; 4464 b->bi_rw = WRITE;
@@ -4478,7 +4485,7 @@ read_more:
4478 bio2 = bio2->bi_next) { 4485 bio2 = bio2->bi_next) {
4479 /* Remove last page from this bio */ 4486 /* Remove last page from this bio */
4480 bio2->bi_vcnt--; 4487 bio2->bi_vcnt--;
4481 bio2->bi_size -= len; 4488 bio2->bi_iter.bi_size -= len;
4482 bio2->bi_flags &= ~(1<<BIO_SEG_VALID); 4489 bio2->bi_flags &= ~(1<<BIO_SEG_VALID);
4483 } 4490 }
4484 goto bio_full; 4491 goto bio_full;
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index 47da0af6322b..a5d9c0ee4d60 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf)
133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) 133static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
134{ 134{
135 int sectors = bio_sectors(bio); 135 int sectors = bio_sectors(bio);
136 if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) 136 if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS)
137 return bio->bi_next; 137 return bio->bi_next;
138 else 138 else
139 return NULL; 139 return NULL;
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi)
225 225
226 return_bi = bi->bi_next; 226 return_bi = bi->bi_next;
227 bi->bi_next = NULL; 227 bi->bi_next = NULL;
228 bi->bi_size = 0; 228 bi->bi_iter.bi_size = 0;
229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), 229 trace_block_bio_complete(bdev_get_queue(bi->bi_bdev),
230 bi, 0); 230 bi, 0);
231 bio_endio(bi, 0); 231 bio_endio(bi, 0);
@@ -854,10 +854,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
854 bi->bi_rw, i); 854 bi->bi_rw, i);
855 atomic_inc(&sh->count); 855 atomic_inc(&sh->count);
856 if (use_new_offset(conf, sh)) 856 if (use_new_offset(conf, sh))
857 bi->bi_sector = (sh->sector 857 bi->bi_iter.bi_sector = (sh->sector
858 + rdev->new_data_offset); 858 + rdev->new_data_offset);
859 else 859 else
860 bi->bi_sector = (sh->sector 860 bi->bi_iter.bi_sector = (sh->sector
861 + rdev->data_offset); 861 + rdev->data_offset);
862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) 862 if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
863 bi->bi_rw |= REQ_NOMERGE; 863 bi->bi_rw |= REQ_NOMERGE;
@@ -865,7 +865,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
865 bi->bi_vcnt = 1; 865 bi->bi_vcnt = 1;
866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE; 866 bi->bi_io_vec[0].bv_len = STRIPE_SIZE;
867 bi->bi_io_vec[0].bv_offset = 0; 867 bi->bi_io_vec[0].bv_offset = 0;
868 bi->bi_size = STRIPE_SIZE; 868 bi->bi_iter.bi_size = STRIPE_SIZE;
869 /* 869 /*
870 * If this is discard request, set bi_vcnt 0. We don't 870 * If this is discard request, set bi_vcnt 0. We don't
871 * want to confuse SCSI because SCSI will replace payload 871 * want to confuse SCSI because SCSI will replace payload
@@ -901,15 +901,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s)
901 rbi->bi_rw, i); 901 rbi->bi_rw, i);
902 atomic_inc(&sh->count); 902 atomic_inc(&sh->count);
903 if (use_new_offset(conf, sh)) 903 if (use_new_offset(conf, sh))
904 rbi->bi_sector = (sh->sector 904 rbi->bi_iter.bi_sector = (sh->sector
905 + rrdev->new_data_offset); 905 + rrdev->new_data_offset);
906 else 906 else
907 rbi->bi_sector = (sh->sector 907 rbi->bi_iter.bi_sector = (sh->sector
908 + rrdev->data_offset); 908 + rrdev->data_offset);
909 rbi->bi_vcnt = 1; 909 rbi->bi_vcnt = 1;
910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; 910 rbi->bi_io_vec[0].bv_len = STRIPE_SIZE;
911 rbi->bi_io_vec[0].bv_offset = 0; 911 rbi->bi_io_vec[0].bv_offset = 0;
912 rbi->bi_size = STRIPE_SIZE; 912 rbi->bi_iter.bi_size = STRIPE_SIZE;
913 /* 913 /*
914 * If this is discard request, set bi_vcnt 0. We don't 914 * If this is discard request, set bi_vcnt 0. We don't
915 * want to confuse SCSI because SCSI will replace payload 915 * want to confuse SCSI because SCSI will replace payload
@@ -944,10 +944,10 @@ async_copy_data(int frombio, struct bio *bio, struct page *page,
944 struct async_submit_ctl submit; 944 struct async_submit_ctl submit;
945 enum async_tx_flags flags = 0; 945 enum async_tx_flags flags = 0;
946 946
947 if (bio->bi_sector >= sector) 947 if (bio->bi_iter.bi_sector >= sector)
948 page_offset = (signed)(bio->bi_sector - sector) * 512; 948 page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512;
949 else 949 else
950 page_offset = (signed)(sector - bio->bi_sector) * -512; 950 page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512;
951 951
952 if (frombio) 952 if (frombio)
953 flags |= ASYNC_TX_FENCE; 953 flags |= ASYNC_TX_FENCE;
@@ -1014,7 +1014,7 @@ static void ops_complete_biofill(void *stripe_head_ref)
1014 BUG_ON(!dev->read); 1014 BUG_ON(!dev->read);
1015 rbi = dev->read; 1015 rbi = dev->read;
1016 dev->read = NULL; 1016 dev->read = NULL;
1017 while (rbi && rbi->bi_sector < 1017 while (rbi && rbi->bi_iter.bi_sector <
1018 dev->sector + STRIPE_SECTORS) { 1018 dev->sector + STRIPE_SECTORS) {
1019 rbi2 = r5_next_bio(rbi, dev->sector); 1019 rbi2 = r5_next_bio(rbi, dev->sector);
1020 if (!raid5_dec_bi_active_stripes(rbi)) { 1020 if (!raid5_dec_bi_active_stripes(rbi)) {
@@ -1050,7 +1050,7 @@ static void ops_run_biofill(struct stripe_head *sh)
1050 dev->read = rbi = dev->toread; 1050 dev->read = rbi = dev->toread;
1051 dev->toread = NULL; 1051 dev->toread = NULL;
1052 spin_unlock_irq(&sh->stripe_lock); 1052 spin_unlock_irq(&sh->stripe_lock);
1053 while (rbi && rbi->bi_sector < 1053 while (rbi && rbi->bi_iter.bi_sector <
1054 dev->sector + STRIPE_SECTORS) { 1054 dev->sector + STRIPE_SECTORS) {
1055 tx = async_copy_data(0, rbi, dev->page, 1055 tx = async_copy_data(0, rbi, dev->page,
1056 dev->sector, tx); 1056 dev->sector, tx);
@@ -1392,7 +1392,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx)
1392 wbi = dev->written = chosen; 1392 wbi = dev->written = chosen;
1393 spin_unlock_irq(&sh->stripe_lock); 1393 spin_unlock_irq(&sh->stripe_lock);
1394 1394
1395 while (wbi && wbi->bi_sector < 1395 while (wbi && wbi->bi_iter.bi_sector <
1396 dev->sector + STRIPE_SECTORS) { 1396 dev->sector + STRIPE_SECTORS) {
1397 if (wbi->bi_rw & REQ_FUA) 1397 if (wbi->bi_rw & REQ_FUA)
1398 set_bit(R5_WantFUA, &dev->flags); 1398 set_bit(R5_WantFUA, &dev->flags);
@@ -2616,7 +2616,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2616 int firstwrite=0; 2616 int firstwrite=0;
2617 2617
2618 pr_debug("adding bi b#%llu to stripe s#%llu\n", 2618 pr_debug("adding bi b#%llu to stripe s#%llu\n",
2619 (unsigned long long)bi->bi_sector, 2619 (unsigned long long)bi->bi_iter.bi_sector,
2620 (unsigned long long)sh->sector); 2620 (unsigned long long)sh->sector);
2621 2621
2622 /* 2622 /*
@@ -2634,12 +2634,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2634 firstwrite = 1; 2634 firstwrite = 1;
2635 } else 2635 } else
2636 bip = &sh->dev[dd_idx].toread; 2636 bip = &sh->dev[dd_idx].toread;
2637 while (*bip && (*bip)->bi_sector < bi->bi_sector) { 2637 while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) {
2638 if (bio_end_sector(*bip) > bi->bi_sector) 2638 if (bio_end_sector(*bip) > bi->bi_iter.bi_sector)
2639 goto overlap; 2639 goto overlap;
2640 bip = & (*bip)->bi_next; 2640 bip = & (*bip)->bi_next;
2641 } 2641 }
2642 if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) 2642 if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi))
2643 goto overlap; 2643 goto overlap;
2644 2644
2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); 2645 BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next);
@@ -2653,7 +2653,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2653 sector_t sector = sh->dev[dd_idx].sector; 2653 sector_t sector = sh->dev[dd_idx].sector;
2654 for (bi=sh->dev[dd_idx].towrite; 2654 for (bi=sh->dev[dd_idx].towrite;
2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && 2655 sector < sh->dev[dd_idx].sector + STRIPE_SECTORS &&
2656 bi && bi->bi_sector <= sector; 2656 bi && bi->bi_iter.bi_sector <= sector;
2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { 2657 bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) {
2658 if (bio_end_sector(bi) >= sector) 2658 if (bio_end_sector(bi) >= sector)
2659 sector = bio_end_sector(bi); 2659 sector = bio_end_sector(bi);
@@ -2663,7 +2663,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in
2663 } 2663 }
2664 2664
2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", 2665 pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
2666 (unsigned long long)(*bip)->bi_sector, 2666 (unsigned long long)(*bip)->bi_iter.bi_sector,
2667 (unsigned long long)sh->sector, dd_idx); 2667 (unsigned long long)sh->sector, dd_idx);
2668 spin_unlock_irq(&sh->stripe_lock); 2668 spin_unlock_irq(&sh->stripe_lock);
2669 2669
@@ -2738,7 +2738,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2738 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2739 wake_up(&conf->wait_for_overlap); 2739 wake_up(&conf->wait_for_overlap);
2740 2740
2741 while (bi && bi->bi_sector < 2741 while (bi && bi->bi_iter.bi_sector <
2742 sh->dev[i].sector + STRIPE_SECTORS) { 2742 sh->dev[i].sector + STRIPE_SECTORS) {
2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); 2743 struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector);
2744 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2744 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2757,7 +2757,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2757 bi = sh->dev[i].written; 2757 bi = sh->dev[i].written;
2758 sh->dev[i].written = NULL; 2758 sh->dev[i].written = NULL;
2759 if (bi) bitmap_end = 1; 2759 if (bi) bitmap_end = 1;
2760 while (bi && bi->bi_sector < 2760 while (bi && bi->bi_iter.bi_sector <
2761 sh->dev[i].sector + STRIPE_SECTORS) { 2761 sh->dev[i].sector + STRIPE_SECTORS) {
2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); 2762 struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector);
2763 clear_bit(BIO_UPTODATE, &bi->bi_flags); 2763 clear_bit(BIO_UPTODATE, &bi->bi_flags);
@@ -2781,7 +2781,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh,
2781 spin_unlock_irq(&sh->stripe_lock); 2781 spin_unlock_irq(&sh->stripe_lock);
2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) 2782 if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
2783 wake_up(&conf->wait_for_overlap); 2783 wake_up(&conf->wait_for_overlap);
2784 while (bi && bi->bi_sector < 2784 while (bi && bi->bi_iter.bi_sector <
2785 sh->dev[i].sector + STRIPE_SECTORS) { 2785 sh->dev[i].sector + STRIPE_SECTORS) {
2786 struct bio *nextbi = 2786 struct bio *nextbi =
2787 r5_next_bio(bi, sh->dev[i].sector); 2787 r5_next_bio(bi, sh->dev[i].sector);
@@ -3005,7 +3005,7 @@ static void handle_stripe_clean_event(struct r5conf *conf,
3005 clear_bit(R5_UPTODATE, &dev->flags); 3005 clear_bit(R5_UPTODATE, &dev->flags);
3006 wbi = dev->written; 3006 wbi = dev->written;
3007 dev->written = NULL; 3007 dev->written = NULL;
3008 while (wbi && wbi->bi_sector < 3008 while (wbi && wbi->bi_iter.bi_sector <
3009 dev->sector + STRIPE_SECTORS) { 3009 dev->sector + STRIPE_SECTORS) {
3010 wbi2 = r5_next_bio(wbi, dev->sector); 3010 wbi2 = r5_next_bio(wbi, dev->sector);
3011 if (!raid5_dec_bi_active_stripes(wbi)) { 3011 if (!raid5_dec_bi_active_stripes(wbi)) {
@@ -4097,7 +4097,7 @@ static int raid5_mergeable_bvec(struct request_queue *q,
4097 4097
4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) 4098static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
4099{ 4099{
4100 sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); 4100 sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev);
4101 unsigned int chunk_sectors = mddev->chunk_sectors; 4101 unsigned int chunk_sectors = mddev->chunk_sectors;
4102 unsigned int bio_sectors = bio_sectors(bio); 4102 unsigned int bio_sectors = bio_sectors(bio);
4103 4103
@@ -4234,9 +4234,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4234 /* 4234 /*
4235 * compute position 4235 * compute position
4236 */ 4236 */
4237 align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, 4237 align_bi->bi_iter.bi_sector =
4238 0, 4238 raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector,
4239 &dd_idx, NULL); 4239 0, &dd_idx, NULL);
4240 4240
4241 end_sector = bio_end_sector(align_bi); 4241 end_sector = bio_end_sector(align_bi);
4242 rcu_read_lock(); 4242 rcu_read_lock();
@@ -4261,7 +4261,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); 4261 align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
4262 4262
4263 if (!bio_fits_rdev(align_bi) || 4263 if (!bio_fits_rdev(align_bi) ||
4264 is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), 4264 is_badblock(rdev, align_bi->bi_iter.bi_sector,
4265 bio_sectors(align_bi),
4265 &first_bad, &bad_sectors)) { 4266 &first_bad, &bad_sectors)) {
4266 /* too big in some way, or has a known bad block */ 4267 /* too big in some way, or has a known bad block */
4267 bio_put(align_bi); 4268 bio_put(align_bi);
@@ -4270,7 +4271,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4270 } 4271 }
4271 4272
4272 /* No reshape active, so we can trust rdev->data_offset */ 4273 /* No reshape active, so we can trust rdev->data_offset */
4273 align_bi->bi_sector += rdev->data_offset; 4274 align_bi->bi_iter.bi_sector += rdev->data_offset;
4274 4275
4275 spin_lock_irq(&conf->device_lock); 4276 spin_lock_irq(&conf->device_lock);
4276 wait_event_lock_irq(conf->wait_for_stripe, 4277 wait_event_lock_irq(conf->wait_for_stripe,
@@ -4282,7 +4283,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
4282 if (mddev->gendisk) 4283 if (mddev->gendisk)
4283 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), 4284 trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev),
4284 align_bi, disk_devt(mddev->gendisk), 4285 align_bi, disk_devt(mddev->gendisk),
4285 raid_bio->bi_sector); 4286 raid_bio->bi_iter.bi_sector);
4286 generic_make_request(align_bi); 4287 generic_make_request(align_bi);
4287 return 1; 4288 return 1;
4288 } else { 4289 } else {
@@ -4465,8 +4466,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi)
4465 /* Skip discard while reshape is happening */ 4466 /* Skip discard while reshape is happening */
4466 return; 4467 return;
4467 4468
4468 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4469 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4469 last_sector = bi->bi_sector + (bi->bi_size>>9); 4470 last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9);
4470 4471
4471 bi->bi_next = NULL; 4472 bi->bi_next = NULL;
4472 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4473 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -4570,7 +4571,7 @@ static void make_request(struct mddev *mddev, struct bio * bi)
4570 return; 4571 return;
4571 } 4572 }
4572 4573
4573 logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 4574 logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1);
4574 last_sector = bio_end_sector(bi); 4575 last_sector = bio_end_sector(bi);
4575 bi->bi_next = NULL; 4576 bi->bi_next = NULL;
4576 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ 4577 bi->bi_phys_segments = 1; /* over-loaded to count active stripes */
@@ -5054,7 +5055,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio)
5054 int remaining; 5055 int remaining;
5055 int handled = 0; 5056 int handled = 0;
5056 5057
5057 logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); 5058 logical_sector = raid_bio->bi_iter.bi_sector &
5059 ~((sector_t)STRIPE_SECTORS-1);
5058 sector = raid5_compute_sector(conf, logical_sector, 5060 sector = raid5_compute_sector(conf, logical_sector,
5059 0, &dd_idx, NULL); 5061 0, &dd_idx, NULL);
5060 last_sector = bio_end_sector(raid_bio); 5062 last_sector = bio_end_sector(raid_bio);