aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorKent Overstreet <kmo@daterainc.com>2013-10-11 18:44:27 -0400
committerKent Overstreet <kmo@daterainc.com>2013-11-24 01:33:47 -0500
commit4f024f3797c43cb4b73cd2c50cec728842d0e49e (patch)
tree3aedcab02d2ad723a189d01934d1e94fec7a54e1 /drivers/md/bcache
parented9c47bebeeea4a468b07cfd745c690190f8014c (diff)
block: Abstract out bvec iterator
Immutable biovecs are going to require an explicit iterator. To implement immutable bvecs, a later patch is going to add a bi_bvec_done member to this struct; for now, this patch effectively just renames things. Signed-off-by: Kent Overstreet <kmo@daterainc.com> Cc: Jens Axboe <axboe@kernel.dk> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: Paul Mackerras <paulus@samba.org> Cc: "Ed L. Cashin" <ecashin@coraid.com> Cc: Nick Piggin <npiggin@kernel.dk> Cc: Lars Ellenberg <drbd-dev@lists.linbit.com> Cc: Jiri Kosina <jkosina@suse.cz> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Geoff Levand <geoff@infradead.org> Cc: Yehuda Sadeh <yehuda@inktank.com> Cc: Sage Weil <sage@inktank.com> Cc: Alex Elder <elder@inktank.com> Cc: ceph-devel@vger.kernel.org Cc: Joshua Morris <josh.h.morris@us.ibm.com> Cc: Philip Kelleher <pjk1939@linux.vnet.ibm.com> Cc: Rusty Russell <rusty@rustcorp.com.au> Cc: "Michael S. Tsirkin" <mst@redhat.com> Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com> Cc: Jeremy Fitzhardinge <jeremy@goop.org> Cc: Neil Brown <neilb@suse.de> Cc: Alasdair Kergon <agk@redhat.com> Cc: Mike Snitzer <snitzer@redhat.com> Cc: dm-devel@redhat.com Cc: Martin Schwidefsky <schwidefsky@de.ibm.com> Cc: Heiko Carstens <heiko.carstens@de.ibm.com> Cc: linux390@de.ibm.com Cc: Boaz Harrosh <bharrosh@panasas.com> Cc: Benny Halevy <bhalevy@tonian.com> Cc: "James E.J. Bottomley" <JBottomley@parallels.com> Cc: Greg Kroah-Hartman <gregkh@linuxfoundation.org> Cc: "Nicholas A. Bellinger" <nab@linux-iscsi.org> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Chris Mason <chris.mason@fusionio.com> Cc: "Theodore Ts'o" <tytso@mit.edu> Cc: Andreas Dilger <adilger.kernel@dilger.ca> Cc: Jaegeuk Kim <jaegeuk.kim@samsung.com> Cc: Steven Whitehouse <swhiteho@redhat.com> Cc: Dave Kleikamp <shaggy@kernel.org> Cc: Joern Engel <joern@logfs.org> Cc: Prasad Joshi <prasadjoshi.linux@gmail.com> Cc: Trond Myklebust <Trond.Myklebust@netapp.com> Cc: KONISHI Ryusuke <konishi.ryusuke@lab.ntt.co.jp> Cc: Mark Fasheh <mfasheh@suse.com> Cc: Joel Becker <jlbec@evilplan.org> Cc: Ben Myers <bpm@sgi.com> Cc: xfs@oss.sgi.com Cc: Steven Rostedt <rostedt@goodmis.org> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@redhat.com> Cc: Len Brown <len.brown@intel.com> Cc: Pavel Machek <pavel@ucw.cz> Cc: "Rafael J. Wysocki" <rjw@sisk.pl> Cc: Herton Ronaldo Krzesinski <herton.krzesinski@canonical.com> Cc: Ben Hutchings <ben@decadent.org.uk> Cc: Andrew Morton <akpm@linux-foundation.org> Cc: Guo Chao <yan@linux.vnet.ibm.com> Cc: Tejun Heo <tj@kernel.org> Cc: Asai Thambi S P <asamymuthupa@micron.com> Cc: Selvan Mani <smani@micron.com> Cc: Sam Bradshaw <sbradshaw@micron.com> Cc: Wei Yongjun <yongjun_wei@trendmicro.com.cn> Cc: "Roger Pau Monné" <roger.pau@citrix.com> Cc: Jan Beulich <jbeulich@suse.com> Cc: Stefano Stabellini <stefano.stabellini@eu.citrix.com> Cc: Ian Campbell <Ian.Campbell@citrix.com> Cc: Sebastian Ott <sebott@linux.vnet.ibm.com> Cc: Christian Borntraeger <borntraeger@de.ibm.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Jiang Liu <jiang.liu@huawei.com> Cc: Nitin Gupta <ngupta@vflare.org> Cc: Jerome Marchand <jmarchand@redhat.com> Cc: Joe Perches <joe@perches.com> Cc: Peng Tao <tao.peng@emc.com> Cc: Andy Adamson <andros@netapp.com> Cc: fanchaoting <fanchaoting@cn.fujitsu.com> Cc: Jie Liu <jeff.liu@oracle.com> Cc: Sunil Mushran <sunil.mushran@gmail.com> Cc: "Martin K. Petersen" <martin.petersen@oracle.com> Cc: Namjae Jeon <namjae.jeon@samsung.com> Cc: Pankaj Kumar <pankaj.km@samsung.com> Cc: Dan Magenheimer <dan.magenheimer@oracle.com> Cc: Mel Gorman <mgorman@suse.de>6
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/btree.c4
-rw-r--r--drivers/md/bcache/debug.c2
-rw-r--r--drivers/md/bcache/io.c26
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c58
-rw-r--r--drivers/md/bcache/super.c16
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
10 files changed, 67 insertions, 67 deletions
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 5e2765aadce1..038a6d2aced3 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..92b3fd468a03 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -195,7 +195,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
195 dc->disk.c, 195 dc->disk.c,
196 "verify failed at dev %s sector %llu", 196 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 197 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 198 (uint64_t) bio->bi_iter.bi_sector);
199 199
200 kunmap_atomic(p1); 200 kunmap_atomic(p1);
201 } 201 }
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..cc4ba2da5fb6 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -21,18 +21,18 @@ static void bch_bi_idx_hack_endio(struct bio *bio, int error)
21 21
22static void bch_generic_make_request_hack(struct bio *bio) 22static void bch_generic_make_request_hack(struct bio *bio)
23{ 23{
24 if (bio->bi_idx) { 24 if (bio->bi_iter.bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); 25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26 26
27 memcpy(clone->bi_io_vec, 27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio), 28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec)); 29 bio_segments(bio) * sizeof(struct bio_vec));
30 30
31 clone->bi_sector = bio->bi_sector; 31 clone->bi_iter.bi_sector = bio->bi_iter.bi_sector;
32 clone->bi_bdev = bio->bi_bdev; 32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw; 33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio); 34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size; 35 clone->bi_iter.bi_size = bio->bi_iter.bi_size;
36 36
37 clone->bi_private = bio; 37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio; 38 clone->bi_end_io = bch_bi_idx_hack_endio;
@@ -72,7 +72,7 @@ static void bch_generic_make_request_hack(struct bio *bio)
72struct bio *bch_bio_split(struct bio *bio, int sectors, 72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs) 73 gfp_t gfp, struct bio_set *bs)
74{ 74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; 75 unsigned idx = bio->bi_iter.bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv; 76 struct bio_vec *bv;
77 struct bio *ret = NULL; 77 struct bio *ret = NULL;
78 78
@@ -90,7 +90,7 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
90 } 90 }
91 91
92 bio_for_each_segment(bv, bio, idx) { 92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx; 93 vcnt = idx - bio->bi_iter.bi_idx;
94 94
95 if (!nbytes) { 95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs); 96 ret = bio_alloc_bioset(gfp, vcnt, bs);
@@ -119,15 +119,15 @@ struct bio *bch_bio_split(struct bio *bio, int sectors,
119 } 119 }
120out: 120out:
121 ret->bi_bdev = bio->bi_bdev; 121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector; 122 ret->bi_iter.bi_sector = bio->bi_iter.bi_sector;
123 ret->bi_size = sectors << 9; 123 ret->bi_iter.bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw; 124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt; 125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt; 126 ret->bi_max_vecs = vcnt;
127 127
128 bio->bi_sector += sectors; 128 bio->bi_iter.bi_sector += sectors;
129 bio->bi_size -= sectors << 9; 129 bio->bi_iter.bi_size -= sectors << 9;
130 bio->bi_idx = idx; 130 bio->bi_iter.bi_idx = idx;
131 131
132 if (bio_integrity(bio)) { 132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) { 133 if (bio_integrity_clone(ret, bio, gfp)) {
@@ -162,7 +162,7 @@ static unsigned bch_bio_max_sectors(struct bio *bio)
162 bio_for_each_segment(bv, bio, i) { 162 bio_for_each_segment(bv, bio, i) {
163 struct bvec_merge_data bvm = { 163 struct bvec_merge_data bvm = {
164 .bi_bdev = bio->bi_bdev, 164 .bi_bdev = bio->bi_bdev,
165 .bi_sector = bio->bi_sector, 165 .bi_sector = bio->bi_iter.bi_sector,
166 .bi_size = ret << 9, 166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw, 167 .bi_rw = bio->bi_rw,
168 }; 168 };
@@ -272,8 +272,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 272{
273 struct bbio *b = container_of(bio, struct bbio, bio); 273 struct bbio *b = container_of(bio, struct bbio, bio);
274 274
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 275 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 277
278 b->submit_time_us = local_clock_us(); 278 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index 7c1275e66025..581f95df8265 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -82,7 +82,7 @@ static void moving_init(struct moving_io *io)
82 bio_get(bio); 82 bio_get(bio);
83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 83 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
84 84
85 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 85 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 86 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
87 PAGE_SECTORS); 87 PAGE_SECTORS);
88 bio->bi_private = &io->cl; 88 bio->bi_private = &io->cl;
@@ -98,7 +98,7 @@ static void write_moving(struct closure *cl)
98 if (!op->error) { 98 if (!op->error) {
99 moving_init(io); 99 moving_init(io);
100 100
101 io->bio.bio.bi_sector = KEY_START(&io->w->key); 101 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
102 op->write_prio = 1; 102 op->write_prio = 1;
103 op->bio = &io->bio.bio; 103 op->bio = &io->bio.bio;
104 104
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 78bab4154e97..47a9bbc75124 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -261,7 +261,7 @@ static void bch_data_invalidate(struct closure *cl)
261 struct bio *bio = op->bio; 261 struct bio *bio = op->bio;
262 262
263 pr_debug("invalidating %i sectors from %llu", 263 pr_debug("invalidating %i sectors from %llu",
264 bio_sectors(bio), (uint64_t) bio->bi_sector); 264 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
265 265
266 while (bio_sectors(bio)) { 266 while (bio_sectors(bio)) {
267 unsigned sectors = min(bio_sectors(bio), 267 unsigned sectors = min(bio_sectors(bio),
@@ -270,11 +270,11 @@ static void bch_data_invalidate(struct closure *cl)
270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 270 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
271 goto out; 271 goto out;
272 272
273 bio->bi_sector += sectors; 273 bio->bi_iter.bi_sector += sectors;
274 bio->bi_size -= sectors << 9; 274 bio->bi_iter.bi_size -= sectors << 9;
275 275
276 bch_keylist_add(&op->insert_keys, 276 bch_keylist_add(&op->insert_keys,
277 &KEY(op->inode, bio->bi_sector, sectors)); 277 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
278 } 278 }
279 279
280 op->insert_data_done = true; 280 op->insert_data_done = true;
@@ -364,7 +364,7 @@ static void bch_data_insert_start(struct closure *cl)
364 k = op->insert_keys.top; 364 k = op->insert_keys.top;
365 bkey_init(k); 365 bkey_init(k);
366 SET_KEY_INODE(k, op->inode); 366 SET_KEY_INODE(k, op->inode);
367 SET_KEY_OFFSET(k, bio->bi_sector); 367 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
368 368
369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 369 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
370 op->write_point, op->write_prio, 370 op->write_point, op->write_prio,
@@ -522,7 +522,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
522 (bio->bi_rw & REQ_WRITE))) 522 (bio->bi_rw & REQ_WRITE)))
523 goto skip; 523 goto skip;
524 524
525 if (bio->bi_sector & (c->sb.block_size - 1) || 525 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
526 bio_sectors(bio) & (c->sb.block_size - 1)) { 526 bio_sectors(bio) & (c->sb.block_size - 1)) {
527 pr_debug("skipping unaligned io"); 527 pr_debug("skipping unaligned io");
528 goto skip; 528 goto skip;
@@ -546,8 +546,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
546 546
547 spin_lock(&dc->io_lock); 547 spin_lock(&dc->io_lock);
548 548
549 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 549 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
550 if (i->last == bio->bi_sector && 550 if (i->last == bio->bi_iter.bi_sector &&
551 time_before(jiffies, i->jiffies)) 551 time_before(jiffies, i->jiffies))
552 goto found; 552 goto found;
553 553
@@ -556,8 +556,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
556 add_sequential(task); 556 add_sequential(task);
557 i->sequential = 0; 557 i->sequential = 0;
558found: 558found:
559 if (i->sequential + bio->bi_size > i->sequential) 559 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
560 i->sequential += bio->bi_size; 560 i->sequential += bio->bi_iter.bi_size;
561 561
562 i->last = bio_end_sector(bio); 562 i->last = bio_end_sector(bio);
563 i->jiffies = jiffies + msecs_to_jiffies(5000); 563 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -650,15 +650,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
650 struct bkey *bio_key; 650 struct bkey *bio_key;
651 unsigned ptr; 651 unsigned ptr;
652 652
653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 653 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
654 return MAP_CONTINUE; 654 return MAP_CONTINUE;
655 655
656 if (KEY_INODE(k) != s->iop.inode || 656 if (KEY_INODE(k) != s->iop.inode ||
657 KEY_START(k) > bio->bi_sector) { 657 KEY_START(k) > bio->bi_iter.bi_sector) {
658 unsigned bio_sectors = bio_sectors(bio); 658 unsigned bio_sectors = bio_sectors(bio);
659 unsigned sectors = KEY_INODE(k) == s->iop.inode 659 unsigned sectors = KEY_INODE(k) == s->iop.inode
660 ? min_t(uint64_t, INT_MAX, 660 ? min_t(uint64_t, INT_MAX,
661 KEY_START(k) - bio->bi_sector) 661 KEY_START(k) - bio->bi_iter.bi_sector)
662 : INT_MAX; 662 : INT_MAX;
663 663
664 int ret = s->d->cache_miss(b, s, bio, sectors); 664 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -681,13 +681,13 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
681 s->read_dirty_data = true; 681 s->read_dirty_data = true;
682 682
683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 683 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX,
684 KEY_OFFSET(k) - bio->bi_sector), 684 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
685 GFP_NOIO, s->d->bio_split); 685 GFP_NOIO, s->d->bio_split);
686 686
687 bio_key = &container_of(n, struct bbio, bio)->key; 687 bio_key = &container_of(n, struct bbio, bio)->key;
688 bch_bkey_copy_single_ptr(bio_key, k, ptr); 688 bch_bkey_copy_single_ptr(bio_key, k, ptr);
689 689
690 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 690 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 691 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
692 692
693 n->bi_end_io = bch_cache_read_endio; 693 n->bi_end_io = bch_cache_read_endio;
@@ -714,7 +714,7 @@ static void cache_lookup(struct closure *cl)
714 struct bio *bio = &s->bio.bio; 714 struct bio *bio = &s->bio.bio;
715 715
716 int ret = bch_btree_map_keys(&s->op, s->iop.c, 716 int ret = bch_btree_map_keys(&s->op, s->iop.c,
717 &KEY(s->iop.inode, bio->bi_sector, 0), 717 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
718 cache_lookup_fn, MAP_END_KEY); 718 cache_lookup_fn, MAP_END_KEY);
719 if (ret == -EAGAIN) 719 if (ret == -EAGAIN)
720 continue_at(cl, cache_lookup, bcache_wq); 720 continue_at(cl, cache_lookup, bcache_wq);
@@ -872,9 +872,9 @@ static void cached_dev_read_done(struct closure *cl)
872 872
873 if (s->iop.bio) { 873 if (s->iop.bio) {
874 bio_reset(s->iop.bio); 874 bio_reset(s->iop.bio);
875 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 875 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 876 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
877 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 877 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
878 bch_bio_map(s->iop.bio, NULL); 878 bch_bio_map(s->iop.bio, NULL);
879 879
880 bio_copy_data(s->cache_miss, s->iop.bio); 880 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -937,7 +937,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 937 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
938 938
939 s->iop.replace_key = KEY(s->iop.inode, 939 s->iop.replace_key = KEY(s->iop.inode,
940 bio->bi_sector + s->insert_bio_sectors, 940 bio->bi_iter.bi_sector + s->insert_bio_sectors,
941 s->insert_bio_sectors); 941 s->insert_bio_sectors);
942 942
943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 943 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -957,9 +957,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
957 if (!cache_bio) 957 if (!cache_bio)
958 goto out_submit; 958 goto out_submit;
959 959
960 cache_bio->bi_sector = miss->bi_sector; 960 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
961 cache_bio->bi_bdev = miss->bi_bdev; 961 cache_bio->bi_bdev = miss->bi_bdev;
962 cache_bio->bi_size = s->insert_bio_sectors << 9; 962 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
963 963
964 cache_bio->bi_end_io = request_endio; 964 cache_bio->bi_end_io = request_endio;
965 cache_bio->bi_private = &s->cl; 965 cache_bio->bi_private = &s->cl;
@@ -1009,7 +1009,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1009{ 1009{
1010 struct closure *cl = &s->cl; 1010 struct closure *cl = &s->cl;
1011 struct bio *bio = &s->bio.bio; 1011 struct bio *bio = &s->bio.bio;
1012 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1012 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1013 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1014 1014
1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1015 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1104,13 +1104,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1104 part_stat_unlock(); 1104 part_stat_unlock();
1105 1105
1106 bio->bi_bdev = dc->bdev; 1106 bio->bi_bdev = dc->bdev;
1107 bio->bi_sector += dc->sb.data_offset; 1107 bio->bi_iter.bi_sector += dc->sb.data_offset;
1108 1108
1109 if (cached_dev_get(dc)) { 1109 if (cached_dev_get(dc)) {
1110 s = search_alloc(bio, d); 1110 s = search_alloc(bio, d);
1111 trace_bcache_request_start(s->d, bio); 1111 trace_bcache_request_start(s->d, bio);
1112 1112
1113 if (!bio->bi_size) { 1113 if (!bio->bi_iter.bi_size) {
1114 /* 1114 /*
1115 * can't call bch_journal_meta from under 1115 * can't call bch_journal_meta from under
1116 * generic_make_request 1116 * generic_make_request
@@ -1197,9 +1197,9 @@ static int flash_dev_cache_miss(struct btree *b, struct search *s,
1197 sectors -= j; 1197 sectors -= j;
1198 } 1198 }
1199 1199
1200 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1200 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1201 1201
1202 if (!bio->bi_size) 1202 if (!bio->bi_iter.bi_size)
1203 return MAP_DONE; 1203 return MAP_DONE;
1204 1204
1205 return MAP_CONTINUE; 1205 return MAP_CONTINUE;
@@ -1233,7 +1233,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1233 1233
1234 trace_bcache_request_start(s->d, bio); 1234 trace_bcache_request_start(s->d, bio);
1235 1235
1236 if (!bio->bi_size) { 1236 if (!bio->bi_iter.bi_size) {
1237 /* 1237 /*
1238 * can't call bch_journal_meta from under 1238 * can't call bch_journal_meta from under
1239 * generic_make_request 1239 * generic_make_request
@@ -1243,7 +1243,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1243 bcache_wq); 1243 bcache_wq);
1244 } else if (rw) { 1244 } else if (rw) {
1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1245 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1246 &KEY(d->id, bio->bi_sector, 0), 1246 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1247 &KEY(d->id, bio_end_sector(bio), 0)); 1247 &KEY(d->id, bio_end_sector(bio), 0));
1248 1248
1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1249 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index 1d9ee67d14ec..60fb6044b953 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index 462214eeacbe..c57621e49dc0 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -218,10 +218,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
218 218
219void bch_bio_map(struct bio *bio, void *base) 219void bch_bio_map(struct bio *bio, void *base)
220{ 220{
221 size_t size = bio->bi_size; 221 size_t size = bio->bi_iter.bi_size;
222 struct bio_vec *bv = bio->bi_io_vec; 222 struct bio_vec *bv = bio->bi_io_vec;
223 223
224 BUG_ON(!bio->bi_size); 224 BUG_ON(!bio->bi_iter.bi_size);
225 BUG_ON(bio->bi_vcnt); 225 BUG_ON(bio->bi_vcnt);
226 226
227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 227 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 99053b1251be..04657e93f4fd 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -113,7 +113,7 @@ static void dirty_init(struct keybuf_key *w)
113 if (!io->dc->writeback_percent) 113 if (!io->dc->writeback_percent)
114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 114 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
115 115
116 bio->bi_size = KEY_SIZE(&w->key) << 9; 116 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 117 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
118 bio->bi_private = w; 118 bio->bi_private = w;
119 bio->bi_io_vec = bio->bi_inline_vecs; 119 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -186,7 +186,7 @@ static void write_dirty(struct closure *cl)
186 186
187 dirty_init(w); 187 dirty_init(w);
188 io->bio.bi_rw = WRITE; 188 io->bio.bi_rw = WRITE;
189 io->bio.bi_sector = KEY_START(&w->key); 189 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
190 io->bio.bi_bdev = io->dc->bdev; 190 io->bio.bi_bdev = io->dc->bdev;
191 io->bio.bi_end_io = dirty_endio; 191 io->bio.bi_end_io = dirty_endio;
192 192
@@ -255,7 +255,7 @@ static void read_dirty(struct cached_dev *dc)
255 io->dc = dc; 255 io->dc = dc;
256 256
257 dirty_init(w); 257 dirty_init(w);
258 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 258 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 259 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
260 &w->key, 0)->bdev; 260 &w->key, 0)->bdev;
261 io->bio.bi_rw = READ; 261 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56