aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/md/bcache
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-01-30 14:19:05 -0500
commitf568849edac8611d603e00bd6cbbcfea09395ae6 (patch)
treeb9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/md/bcache
parentd9894c228b11273e720bb63ba120d1d326fe9d94 (diff)
parent675675ada486dde5bf9aa51665e90706bff11a35 (diff)
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe: "The major piece in here is the immutable bio_ve series from Kent, the rest is fairly minor. It was supposed to go in last round, but various issues pushed it to this release instead. The pull request contains: - Various smaller blk-mq fixes from different folks. Nothing major here, just minor fixes and cleanups. - Fix for a memory leak in the error path in the block ioctl code from Christian Engelmayer. - Header export fix from CaiZhiyong. - Finally the immutable biovec changes from Kent Overstreet. This enables some nice future work on making arbitrarily sized bios possible, and splitting more efficient. Related fixes to immutable bio_vecs: - dm-cache immutable fixup from Mike Snitzer. - btrfs immutable fixup from Muthu Kumar. - bio-integrity fix from Nic Bellinger, which is also going to stable" * 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits) xtensa: fixup simdisk driver to work with immutable bio_vecs block/blk-mq-cpu.c: use hotcpu_notifier() blk-mq: for_each_* macro correctness block: Fix memory leak in rw_copy_check_uvector() handling bio-integrity: Fix bio_integrity_verify segment start bug block: remove unrelated header files and export symbol blk-mq: uses page->list incorrectly blk-mq: use __smp_call_function_single directly btrfs: fix missing increment of bi_remaining Revert "block: Warn and free bio if bi_end_io is not set" block: Warn and free bio if bi_end_io is not set blk-mq: fix initializing request's start time block: blk-mq: don't export blk_mq_free_queue() block: blk-mq: make blk_sync_queue support mq block: blk-mq: support draining mq queue dm cache: increment bi_remaining when bi_end_io is restored block: fixup for generic bio chaining block: Really silence spurious compiler warnings block: Silence spurious compiler warnings block: Kill bio_pair_split() ...
Diffstat (limited to 'drivers/md/bcache')
-rw-r--r--drivers/md/bcache/bcache.h2
-rw-r--r--drivers/md/bcache/btree.c8
-rw-r--r--drivers/md/bcache/debug.c21
-rw-r--r--drivers/md/bcache/io.c196
-rw-r--r--drivers/md/bcache/journal.c12
-rw-r--r--drivers/md/bcache/movinggc.c4
-rw-r--r--drivers/md/bcache/request.c131
-rw-r--r--drivers/md/bcache/super.c20
-rw-r--r--drivers/md/bcache/util.c4
-rw-r--r--drivers/md/bcache/writeback.c6
-rw-r--r--drivers/md/bcache/writeback.h2
11 files changed, 118 insertions, 288 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h
index 754f43177483..dbdbca5a9591 100644
--- a/drivers/md/bcache/bcache.h
+++ b/drivers/md/bcache/bcache.h
@@ -280,7 +280,6 @@ struct bcache_device {
280 unsigned long sectors_dirty_last; 280 unsigned long sectors_dirty_last;
281 long sectors_dirty_derivative; 281 long sectors_dirty_derivative;
282 282
283 mempool_t *unaligned_bvec;
284 struct bio_set *bio_split; 283 struct bio_set *bio_split;
285 284
286 unsigned data_csum:1; 285 unsigned data_csum:1;
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *);
902void bch_bbio_free(struct bio *, struct cache_set *); 901void bch_bbio_free(struct bio *, struct cache_set *);
903struct bio *bch_bbio_alloc(struct cache_set *); 902struct bio *bch_bbio_alloc(struct cache_set *);
904 903
905struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *);
906void bch_generic_make_request(struct bio *, struct bio_split_pool *); 904void bch_generic_make_request(struct bio *, struct bio_split_pool *);
907void __bch_submit_bbio(struct bio *, struct cache_set *); 905void __bch_submit_bbio(struct bio *, struct cache_set *);
908void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); 906void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned);
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c
index 31bb53fcc67a..946ecd3b048b 100644
--- a/drivers/md/bcache/btree.c
+++ b/drivers/md/bcache/btree.c
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b)
299 299
300 bio = bch_bbio_alloc(b->c); 300 bio = bch_bbio_alloc(b->c);
301 bio->bi_rw = REQ_META|READ_SYNC; 301 bio->bi_rw = REQ_META|READ_SYNC;
302 bio->bi_size = KEY_SIZE(&b->key) << 9; 302 bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9;
303 bio->bi_end_io = btree_node_read_endio; 303 bio->bi_end_io = btree_node_read_endio;
304 bio->bi_private = &cl; 304 bio->bi_private = &cl;
305 305
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl)
362 struct bio_vec *bv; 362 struct bio_vec *bv;
363 int n; 363 int n;
364 364
365 __bio_for_each_segment(bv, b->bio, n, 0) 365 bio_for_each_segment_all(bv, b->bio, n)
366 __free_page(bv->bv_page); 366 __free_page(bv->bv_page);
367 367
368 __btree_node_write_done(cl); 368 __btree_node_write_done(cl);
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b)
395 b->bio->bi_end_io = btree_node_write_endio; 395 b->bio->bi_end_io = btree_node_write_endio;
396 b->bio->bi_private = cl; 396 b->bio->bi_private = cl;
397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; 397 b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA;
398 b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); 398 b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c);
399 bch_bio_map(b->bio, i); 399 bch_bio_map(b->bio, i);
400 400
401 /* 401 /*
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b)
421 struct bio_vec *bv; 421 struct bio_vec *bv;
422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); 422 void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1));
423 423
424 bio_for_each_segment(bv, b->bio, j) 424 bio_for_each_segment_all(bv, b->bio, j)
425 memcpy(page_address(bv->bv_page), 425 memcpy(page_address(bv->bv_page),
426 base + j * PAGE_SIZE, PAGE_SIZE); 426 base + j * PAGE_SIZE, PAGE_SIZE);
427 427
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c
index 264fcfbd6290..03cb4d114e16 100644
--- a/drivers/md/bcache/debug.c
+++ b/drivers/md/bcache/debug.c
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
173{ 173{
174 char name[BDEVNAME_SIZE]; 174 char name[BDEVNAME_SIZE];
175 struct bio *check; 175 struct bio *check;
176 struct bio_vec *bv; 176 struct bio_vec bv, *bv2;
177 struct bvec_iter iter;
177 int i; 178 int i;
178 179
179 check = bio_clone(bio, GFP_NOIO); 180 check = bio_clone(bio, GFP_NOIO);
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio)
185 186
186 submit_bio_wait(READ_SYNC, check); 187 submit_bio_wait(READ_SYNC, check);
187 188
188 bio_for_each_segment(bv, bio, i) { 189 bio_for_each_segment(bv, bio, iter) {
189 void *p1 = kmap_atomic(bv->bv_page); 190 void *p1 = kmap_atomic(bv.bv_page);
190 void *p2 = page_address(check->bi_io_vec[i].bv_page); 191 void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page);
191 192
192 cache_set_err_on(memcmp(p1 + bv->bv_offset, 193 cache_set_err_on(memcmp(p1 + bv.bv_offset,
193 p2 + bv->bv_offset, 194 p2 + bv.bv_offset,
194 bv->bv_len), 195 bv.bv_len),
195 dc->disk.c, 196 dc->disk.c,
196 "verify failed at dev %s sector %llu", 197 "verify failed at dev %s sector %llu",
197 bdevname(dc->bdev, name), 198 bdevname(dc->bdev, name),
198 (uint64_t) bio->bi_sector); 199 (uint64_t) bio->bi_iter.bi_sector);
199 200
200 kunmap_atomic(p1); 201 kunmap_atomic(p1);
201 } 202 }
202 203
203 bio_for_each_segment_all(bv, check, i) 204 bio_for_each_segment_all(bv2, check, i)
204 __free_page(bv->bv_page); 205 __free_page(bv2->bv_page);
205out_put: 206out_put:
206 bio_put(check); 207 bio_put(check);
207} 208}
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c
index 9056632995b1..fa028fa82df4 100644
--- a/drivers/md/bcache/io.c
+++ b/drivers/md/bcache/io.c
@@ -11,178 +11,40 @@
11 11
12#include <linux/blkdev.h> 12#include <linux/blkdev.h>
13 13
14static void bch_bi_idx_hack_endio(struct bio *bio, int error)
15{
16 struct bio *p = bio->bi_private;
17
18 bio_endio(p, error);
19 bio_put(bio);
20}
21
22static void bch_generic_make_request_hack(struct bio *bio)
23{
24 if (bio->bi_idx) {
25 struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio));
26
27 memcpy(clone->bi_io_vec,
28 bio_iovec(bio),
29 bio_segments(bio) * sizeof(struct bio_vec));
30
31 clone->bi_sector = bio->bi_sector;
32 clone->bi_bdev = bio->bi_bdev;
33 clone->bi_rw = bio->bi_rw;
34 clone->bi_vcnt = bio_segments(bio);
35 clone->bi_size = bio->bi_size;
36
37 clone->bi_private = bio;
38 clone->bi_end_io = bch_bi_idx_hack_endio;
39
40 bio = clone;
41 }
42
43 /*
44 * Hack, since drivers that clone bios clone up to bi_max_vecs, but our
45 * bios might have had more than that (before we split them per device
46 * limitations).
47 *
48 * To be taken out once immutable bvec stuff is in.
49 */
50 bio->bi_max_vecs = bio->bi_vcnt;
51
52 generic_make_request(bio);
53}
54
55/**
56 * bch_bio_split - split a bio
57 * @bio: bio to split
58 * @sectors: number of sectors to split from the front of @bio
59 * @gfp: gfp mask
60 * @bs: bio set to allocate from
61 *
62 * Allocates and returns a new bio which represents @sectors from the start of
63 * @bio, and updates @bio to represent the remaining sectors.
64 *
65 * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio
66 * unchanged.
67 *
68 * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a
69 * bvec boundry; it is the caller's responsibility to ensure that @bio is not
70 * freed before the split.
71 */
72struct bio *bch_bio_split(struct bio *bio, int sectors,
73 gfp_t gfp, struct bio_set *bs)
74{
75 unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9;
76 struct bio_vec *bv;
77 struct bio *ret = NULL;
78
79 BUG_ON(sectors <= 0);
80
81 if (sectors >= bio_sectors(bio))
82 return bio;
83
84 if (bio->bi_rw & REQ_DISCARD) {
85 ret = bio_alloc_bioset(gfp, 1, bs);
86 if (!ret)
87 return NULL;
88 idx = 0;
89 goto out;
90 }
91
92 bio_for_each_segment(bv, bio, idx) {
93 vcnt = idx - bio->bi_idx;
94
95 if (!nbytes) {
96 ret = bio_alloc_bioset(gfp, vcnt, bs);
97 if (!ret)
98 return NULL;
99
100 memcpy(ret->bi_io_vec, bio_iovec(bio),
101 sizeof(struct bio_vec) * vcnt);
102
103 break;
104 } else if (nbytes < bv->bv_len) {
105 ret = bio_alloc_bioset(gfp, ++vcnt, bs);
106 if (!ret)
107 return NULL;
108
109 memcpy(ret->bi_io_vec, bio_iovec(bio),
110 sizeof(struct bio_vec) * vcnt);
111
112 ret->bi_io_vec[vcnt - 1].bv_len = nbytes;
113 bv->bv_offset += nbytes;
114 bv->bv_len -= nbytes;
115 break;
116 }
117
118 nbytes -= bv->bv_len;
119 }
120out:
121 ret->bi_bdev = bio->bi_bdev;
122 ret->bi_sector = bio->bi_sector;
123 ret->bi_size = sectors << 9;
124 ret->bi_rw = bio->bi_rw;
125 ret->bi_vcnt = vcnt;
126 ret->bi_max_vecs = vcnt;
127
128 bio->bi_sector += sectors;
129 bio->bi_size -= sectors << 9;
130 bio->bi_idx = idx;
131
132 if (bio_integrity(bio)) {
133 if (bio_integrity_clone(ret, bio, gfp)) {
134 bio_put(ret);
135 return NULL;
136 }
137
138 bio_integrity_trim(ret, 0, bio_sectors(ret));
139 bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio));
140 }
141
142 return ret;
143}
144
145static unsigned bch_bio_max_sectors(struct bio *bio) 14static unsigned bch_bio_max_sectors(struct bio *bio)
146{ 15{
147 unsigned ret = bio_sectors(bio);
148 struct request_queue *q = bdev_get_queue(bio->bi_bdev); 16 struct request_queue *q = bdev_get_queue(bio->bi_bdev);
149 unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, 17 struct bio_vec bv;
150 queue_max_segments(q)); 18 struct bvec_iter iter;
19 unsigned ret = 0, seg = 0;
151 20
152 if (bio->bi_rw & REQ_DISCARD) 21 if (bio->bi_rw & REQ_DISCARD)
153 return min(ret, q->limits.max_discard_sectors); 22 return min(bio_sectors(bio), q->limits.max_discard_sectors);
154 23
155 if (bio_segments(bio) > max_segments || 24 bio_for_each_segment(bv, bio, iter) {
156 q->merge_bvec_fn) { 25 struct bvec_merge_data bvm = {
157 struct bio_vec *bv; 26 .bi_bdev = bio->bi_bdev,
158 int i, seg = 0; 27 .bi_sector = bio->bi_iter.bi_sector,
159 28 .bi_size = ret << 9,
160 ret = 0; 29 .bi_rw = bio->bi_rw,
161 30 };
162 bio_for_each_segment(bv, bio, i) { 31
163 struct bvec_merge_data bvm = { 32 if (seg == min_t(unsigned, BIO_MAX_PAGES,
164 .bi_bdev = bio->bi_bdev, 33 queue_max_segments(q)))
165 .bi_sector = bio->bi_sector, 34 break;
166 .bi_size = ret << 9,
167 .bi_rw = bio->bi_rw,
168 };
169
170 if (seg == max_segments)
171 break;
172 35
173 if (q->merge_bvec_fn && 36 if (q->merge_bvec_fn &&
174 q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) 37 q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len)
175 break; 38 break;
176 39
177 seg++; 40 seg++;
178 ret += bv->bv_len >> 9; 41 ret += bv.bv_len >> 9;
179 }
180 } 42 }
181 43
182 ret = min(ret, queue_max_sectors(q)); 44 ret = min(ret, queue_max_sectors(q));
183 45
184 WARN_ON(!ret); 46 WARN_ON(!ret);
185 ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); 47 ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9);
186 48
187 return ret; 49 return ret;
188} 50}
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl)
193 55
194 s->bio->bi_end_io = s->bi_end_io; 56 s->bio->bi_end_io = s->bi_end_io;
195 s->bio->bi_private = s->bi_private; 57 s->bio->bi_private = s->bi_private;
196 bio_endio(s->bio, 0); 58 bio_endio_nodec(s->bio, 0);
197 59
198 closure_debug_destroy(&s->cl); 60 closure_debug_destroy(&s->cl);
199 mempool_free(s, s->p->bio_split_hook); 61 mempool_free(s, s->p->bio_split_hook);
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p)
232 bio_get(bio); 94 bio_get(bio);
233 95
234 do { 96 do {
235 n = bch_bio_split(bio, bch_bio_max_sectors(bio), 97 n = bio_next_split(bio, bch_bio_max_sectors(bio),
236 GFP_NOIO, s->p->bio_split); 98 GFP_NOIO, s->p->bio_split);
237 99
238 n->bi_end_io = bch_bio_submit_split_endio; 100 n->bi_end_io = bch_bio_submit_split_endio;
239 n->bi_private = &s->cl; 101 n->bi_private = &s->cl;
240 102
241 closure_get(&s->cl); 103 closure_get(&s->cl);
242 bch_generic_make_request_hack(n); 104 generic_make_request(n);
243 } while (n != bio); 105 } while (n != bio);
244 106
245 continue_at(&s->cl, bch_bio_submit_split_done, NULL); 107 continue_at(&s->cl, bch_bio_submit_split_done, NULL);
246submit: 108submit:
247 bch_generic_make_request_hack(bio); 109 generic_make_request(bio);
248} 110}
249 111
250/* Bios with headers */ 112/* Bios with headers */
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c)
272{ 134{
273 struct bbio *b = container_of(bio, struct bbio, bio); 135 struct bbio *b = container_of(bio, struct bbio, bio);
274 136
275 bio->bi_sector = PTR_OFFSET(&b->key, 0); 137 bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0);
276 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; 138 bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev;
277 139
278 b->submit_time_us = local_clock_us(); 140 b->submit_time_us = local_clock_us();
279 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); 141 closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0));
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c
index ecdaa671bd50..7eafdf09a0ae 100644
--- a/drivers/md/bcache/journal.c
+++ b/drivers/md/bcache/journal.c
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset;
51 len = min_t(unsigned, left, PAGE_SECTORS * 8); 51 len = min_t(unsigned, left, PAGE_SECTORS * 8);
52 52
53 bio_reset(bio); 53 bio_reset(bio);
54 bio->bi_sector = bucket + offset; 54 bio->bi_iter.bi_sector = bucket + offset;
55 bio->bi_bdev = ca->bdev; 55 bio->bi_bdev = ca->bdev;
56 bio->bi_rw = READ; 56 bio->bi_rw = READ;
57 bio->bi_size = len << 9; 57 bio->bi_iter.bi_size = len << 9;
58 58
59 bio->bi_end_io = journal_read_endio; 59 bio->bi_end_io = journal_read_endio;
60 bio->bi_private = &cl; 60 bio->bi_private = &cl;
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca)
437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); 437 atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT);
438 438
439 bio_init(bio); 439 bio_init(bio);
440 bio->bi_sector = bucket_to_sector(ca->set, 440 bio->bi_iter.bi_sector = bucket_to_sector(ca->set,
441 ca->sb.d[ja->discard_idx]); 441 ca->sb.d[ja->discard_idx]);
442 bio->bi_bdev = ca->bdev; 442 bio->bi_bdev = ca->bdev;
443 bio->bi_rw = REQ_WRITE|REQ_DISCARD; 443 bio->bi_rw = REQ_WRITE|REQ_DISCARD;
444 bio->bi_max_vecs = 1; 444 bio->bi_max_vecs = 1;
445 bio->bi_io_vec = bio->bi_inline_vecs; 445 bio->bi_io_vec = bio->bi_inline_vecs;
446 bio->bi_size = bucket_bytes(ca); 446 bio->bi_iter.bi_size = bucket_bytes(ca);
447 bio->bi_end_io = journal_discard_endio; 447 bio->bi_end_io = journal_discard_endio;
448 448
449 closure_get(&ca->set->cl); 449 closure_get(&ca->set->cl);
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl)
608 atomic_long_add(sectors, &ca->meta_sectors_written); 608 atomic_long_add(sectors, &ca->meta_sectors_written);
609 609
610 bio_reset(bio); 610 bio_reset(bio);
611 bio->bi_sector = PTR_OFFSET(k, i); 611 bio->bi_iter.bi_sector = PTR_OFFSET(k, i);
612 bio->bi_bdev = ca->bdev; 612 bio->bi_bdev = ca->bdev;
613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; 613 bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA;
614 bio->bi_size = sectors << 9; 614 bio->bi_iter.bi_size = sectors << 9;
615 615
616 bio->bi_end_io = journal_write_endio; 616 bio->bi_end_io = journal_write_endio;
617 bio->bi_private = w; 617 bio->bi_private = w;
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c
index f2f0998c4a91..052bd24d24b4 100644
--- a/drivers/md/bcache/movinggc.c
+++ b/drivers/md/bcache/movinggc.c
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io)
86 bio_get(bio); 86 bio_get(bio);
87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 87 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
88 88
89 bio->bi_size = KEY_SIZE(&io->w->key) << 9; 89 bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9;
90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), 90 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key),
91 PAGE_SECTORS); 91 PAGE_SECTORS);
92 bio->bi_private = &io->cl; 92 bio->bi_private = &io->cl;
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl)
102 if (!op->error) { 102 if (!op->error) {
103 moving_init(io); 103 moving_init(io);
104 104
105 io->bio.bio.bi_sector = KEY_START(&io->w->key); 105 io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key);
106 op->write_prio = 1; 106 op->write_prio = 1;
107 op->bio = &io->bio.bio; 107 op->bio = &io->bio.bio;
108 108
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c
index 61bcfc21d2a0..c906571997d7 100644
--- a/drivers/md/bcache/request.c
+++ b/drivers/md/bcache/request.c
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio)
197 197
198static void bio_csum(struct bio *bio, struct bkey *k) 198static void bio_csum(struct bio *bio, struct bkey *k)
199{ 199{
200 struct bio_vec *bv; 200 struct bio_vec bv;
201 struct bvec_iter iter;
201 uint64_t csum = 0; 202 uint64_t csum = 0;
202 int i;
203 203
204 bio_for_each_segment(bv, bio, i) { 204 bio_for_each_segment(bv, bio, iter) {
205 void *d = kmap(bv->bv_page) + bv->bv_offset; 205 void *d = kmap(bv.bv_page) + bv.bv_offset;
206 csum = bch_crc64_update(csum, d, bv->bv_len); 206 csum = bch_crc64_update(csum, d, bv.bv_len);
207 kunmap(bv->bv_page); 207 kunmap(bv.bv_page);
208 } 208 }
209 209
210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); 210 k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1);
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl)
260 struct bio *bio = op->bio; 260 struct bio *bio = op->bio;
261 261
262 pr_debug("invalidating %i sectors from %llu", 262 pr_debug("invalidating %i sectors from %llu",
263 bio_sectors(bio), (uint64_t) bio->bi_sector); 263 bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector);
264 264
265 while (bio_sectors(bio)) { 265 while (bio_sectors(bio)) {
266 unsigned sectors = min(bio_sectors(bio), 266 unsigned sectors = min(bio_sectors(bio),
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl)
269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) 269 if (bch_keylist_realloc(&op->insert_keys, 0, op->c))
270 goto out; 270 goto out;
271 271
272 bio->bi_sector += sectors; 272 bio->bi_iter.bi_sector += sectors;
273 bio->bi_size -= sectors << 9; 273 bio->bi_iter.bi_size -= sectors << 9;
274 274
275 bch_keylist_add(&op->insert_keys, 275 bch_keylist_add(&op->insert_keys,
276 &KEY(op->inode, bio->bi_sector, sectors)); 276 &KEY(op->inode, bio->bi_iter.bi_sector, sectors));
277 } 277 }
278 278
279 op->insert_data_done = true; 279 op->insert_data_done = true;
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl)
363 k = op->insert_keys.top; 363 k = op->insert_keys.top;
364 bkey_init(k); 364 bkey_init(k);
365 SET_KEY_INODE(k, op->inode); 365 SET_KEY_INODE(k, op->inode);
366 SET_KEY_OFFSET(k, bio->bi_sector); 366 SET_KEY_OFFSET(k, bio->bi_iter.bi_sector);
367 367
368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), 368 if (!bch_alloc_sectors(op->c, k, bio_sectors(bio),
369 op->write_point, op->write_prio, 369 op->write_point, op->write_prio,
370 op->writeback)) 370 op->writeback))
371 goto err; 371 goto err;
372 372
373 n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); 373 n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split);
374 374
375 n->bi_end_io = bch_data_insert_endio; 375 n->bi_end_io = bch_data_insert_endio;
376 n->bi_private = cl; 376 n->bi_private = cl;
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
521 (bio->bi_rw & REQ_WRITE))) 521 (bio->bi_rw & REQ_WRITE)))
522 goto skip; 522 goto skip;
523 523
524 if (bio->bi_sector & (c->sb.block_size - 1) || 524 if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) ||
525 bio_sectors(bio) & (c->sb.block_size - 1)) { 525 bio_sectors(bio) & (c->sb.block_size - 1)) {
526 pr_debug("skipping unaligned io"); 526 pr_debug("skipping unaligned io");
527 goto skip; 527 goto skip;
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
545 545
546 spin_lock(&dc->io_lock); 546 spin_lock(&dc->io_lock);
547 547
548 hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) 548 hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash)
549 if (i->last == bio->bi_sector && 549 if (i->last == bio->bi_iter.bi_sector &&
550 time_before(jiffies, i->jiffies)) 550 time_before(jiffies, i->jiffies))
551 goto found; 551 goto found;
552 552
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio)
555 add_sequential(task); 555 add_sequential(task);
556 i->sequential = 0; 556 i->sequential = 0;
557found: 557found:
558 if (i->sequential + bio->bi_size > i->sequential) 558 if (i->sequential + bio->bi_iter.bi_size > i->sequential)
559 i->sequential += bio->bi_size; 559 i->sequential += bio->bi_iter.bi_size;
560 560
561 i->last = bio_end_sector(bio); 561 i->last = bio_end_sector(bio);
562 i->jiffies = jiffies + msecs_to_jiffies(5000); 562 i->jiffies = jiffies + msecs_to_jiffies(5000);
@@ -605,7 +605,6 @@ struct search {
605 unsigned insert_bio_sectors; 605 unsigned insert_bio_sectors;
606 606
607 unsigned recoverable:1; 607 unsigned recoverable:1;
608 unsigned unaligned_bvec:1;
609 unsigned write:1; 608 unsigned write:1;
610 unsigned read_dirty_data:1; 609 unsigned read_dirty_data:1;
611 610
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
649 struct bkey *bio_key; 648 struct bkey *bio_key;
650 unsigned ptr; 649 unsigned ptr;
651 650
652 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) 651 if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0)
653 return MAP_CONTINUE; 652 return MAP_CONTINUE;
654 653
655 if (KEY_INODE(k) != s->iop.inode || 654 if (KEY_INODE(k) != s->iop.inode ||
656 KEY_START(k) > bio->bi_sector) { 655 KEY_START(k) > bio->bi_iter.bi_sector) {
657 unsigned bio_sectors = bio_sectors(bio); 656 unsigned bio_sectors = bio_sectors(bio);
658 unsigned sectors = KEY_INODE(k) == s->iop.inode 657 unsigned sectors = KEY_INODE(k) == s->iop.inode
659 ? min_t(uint64_t, INT_MAX, 658 ? min_t(uint64_t, INT_MAX,
660 KEY_START(k) - bio->bi_sector) 659 KEY_START(k) - bio->bi_iter.bi_sector)
661 : INT_MAX; 660 : INT_MAX;
662 661
663 int ret = s->d->cache_miss(b, s, bio, sectors); 662 int ret = s->d->cache_miss(b, s, bio, sectors);
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k)
679 if (KEY_DIRTY(k)) 678 if (KEY_DIRTY(k))
680 s->read_dirty_data = true; 679 s->read_dirty_data = true;
681 680
682 n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, 681 n = bio_next_split(bio, min_t(uint64_t, INT_MAX,
683 KEY_OFFSET(k) - bio->bi_sector), 682 KEY_OFFSET(k) - bio->bi_iter.bi_sector),
684 GFP_NOIO, s->d->bio_split); 683 GFP_NOIO, s->d->bio_split);
685 684
686 bio_key = &container_of(n, struct bbio, bio)->key; 685 bio_key = &container_of(n, struct bbio, bio)->key;
687 bch_bkey_copy_single_ptr(bio_key, k, ptr); 686 bch_bkey_copy_single_ptr(bio_key, k, ptr);
688 687
689 bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); 688 bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key);
690 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); 689 bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key);
691 690
692 n->bi_end_io = bch_cache_read_endio; 691 n->bi_end_io = bch_cache_read_endio;
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl)
713 struct bio *bio = &s->bio.bio; 712 struct bio *bio = &s->bio.bio;
714 713
715 int ret = bch_btree_map_keys(&s->op, s->iop.c, 714 int ret = bch_btree_map_keys(&s->op, s->iop.c,
716 &KEY(s->iop.inode, bio->bi_sector, 0), 715 &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0),
717 cache_lookup_fn, MAP_END_KEY); 716 cache_lookup_fn, MAP_END_KEY);
718 if (ret == -EAGAIN) 717 if (ret == -EAGAIN)
719 continue_at(cl, cache_lookup, bcache_wq); 718 continue_at(cl, cache_lookup, bcache_wq);
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s)
758static void do_bio_hook(struct search *s) 757static void do_bio_hook(struct search *s)
759{ 758{
760 struct bio *bio = &s->bio.bio; 759 struct bio *bio = &s->bio.bio;
761 memcpy(bio, s->orig_bio, sizeof(struct bio));
762 760
761 bio_init(bio);
762 __bio_clone_fast(bio, s->orig_bio);
763 bio->bi_end_io = request_endio; 763 bio->bi_end_io = request_endio;
764 bio->bi_private = &s->cl; 764 bio->bi_private = &s->cl;
765
765 atomic_set(&bio->bi_cnt, 3); 766 atomic_set(&bio->bi_cnt, 3);
766} 767}
767 768
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl)
773 if (s->iop.bio) 774 if (s->iop.bio)
774 bio_put(s->iop.bio); 775 bio_put(s->iop.bio);
775 776
776 if (s->unaligned_bvec)
777 mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec);
778
779 closure_debug_destroy(cl); 777 closure_debug_destroy(cl);
780 mempool_free(s, s->d->c->search); 778 mempool_free(s, s->d->c->search);
781} 779}
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl)
783static struct search *search_alloc(struct bio *bio, struct bcache_device *d) 781static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
784{ 782{
785 struct search *s; 783 struct search *s;
786 struct bio_vec *bv;
787 784
788 s = mempool_alloc(d->c->search, GFP_NOIO); 785 s = mempool_alloc(d->c->search, GFP_NOIO);
789 memset(s, 0, offsetof(struct search, iop.insert_keys)); 786 memset(s, 0, offsetof(struct search, iop.insert_keys));
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d)
802 s->start_time = jiffies; 799 s->start_time = jiffies;
803 do_bio_hook(s); 800 do_bio_hook(s);
804 801
805 if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) {
806 bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO);
807 memcpy(bv, bio_iovec(bio),
808 sizeof(struct bio_vec) * bio_segments(bio));
809
810 s->bio.bio.bi_io_vec = bv;
811 s->unaligned_bvec = 1;
812 }
813
814 return s; 802 return s;
815} 803}
816 804
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl)
849{ 837{
850 struct search *s = container_of(cl, struct search, cl); 838 struct search *s = container_of(cl, struct search, cl);
851 struct bio *bio = &s->bio.bio; 839 struct bio *bio = &s->bio.bio;
852 struct bio_vec *bv;
853 int i;
854 840
855 if (s->recoverable) { 841 if (s->recoverable) {
856 /* Retry from the backing device: */ 842 /* Retry from the backing device: */
857 trace_bcache_read_retry(s->orig_bio); 843 trace_bcache_read_retry(s->orig_bio);
858 844
859 s->iop.error = 0; 845 s->iop.error = 0;
860 bv = s->bio.bio.bi_io_vec;
861 do_bio_hook(s); 846 do_bio_hook(s);
862 s->bio.bio.bi_io_vec = bv;
863
864 if (!s->unaligned_bvec)
865 bio_for_each_segment(bv, s->orig_bio, i)
866 bv->bv_offset = 0, bv->bv_len = PAGE_SIZE;
867 else
868 memcpy(s->bio.bio.bi_io_vec,
869 bio_iovec(s->orig_bio),
870 sizeof(struct bio_vec) *
871 bio_segments(s->orig_bio));
872 847
873 /* XXX: invalidate cache */ 848 /* XXX: invalidate cache */
874 849
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl)
893 868
894 if (s->iop.bio) { 869 if (s->iop.bio) {
895 bio_reset(s->iop.bio); 870 bio_reset(s->iop.bio);
896 s->iop.bio->bi_sector = s->cache_miss->bi_sector; 871 s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector;
897 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; 872 s->iop.bio->bi_bdev = s->cache_miss->bi_bdev;
898 s->iop.bio->bi_size = s->insert_bio_sectors << 9; 873 s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
899 bch_bio_map(s->iop.bio, NULL); 874 bch_bio_map(s->iop.bio, NULL);
900 875
901 bio_copy_data(s->cache_miss, s->iop.bio); 876 bio_copy_data(s->cache_miss, s->iop.bio);
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl)
904 s->cache_miss = NULL; 879 s->cache_miss = NULL;
905 } 880 }
906 881
907 if (verify(dc, &s->bio.bio) && s->recoverable && 882 if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data)
908 !s->unaligned_bvec && !s->read_dirty_data)
909 bch_data_verify(dc, s->orig_bio); 883 bch_data_verify(dc, s->orig_bio);
910 884
911 bio_complete(s); 885 bio_complete(s);
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
945 struct bio *miss, *cache_bio; 919 struct bio *miss, *cache_bio;
946 920
947 if (s->cache_miss || s->iop.bypass) { 921 if (s->cache_miss || s->iop.bypass) {
948 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 922 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
949 ret = miss == bio ? MAP_DONE : MAP_CONTINUE; 923 ret = miss == bio ? MAP_DONE : MAP_CONTINUE;
950 goto out_submit; 924 goto out_submit;
951 } 925 }
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
959 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); 933 s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada);
960 934
961 s->iop.replace_key = KEY(s->iop.inode, 935 s->iop.replace_key = KEY(s->iop.inode,
962 bio->bi_sector + s->insert_bio_sectors, 936 bio->bi_iter.bi_sector + s->insert_bio_sectors,
963 s->insert_bio_sectors); 937 s->insert_bio_sectors);
964 938
965 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); 939 ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key);
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
968 942
969 s->iop.replace = true; 943 s->iop.replace = true;
970 944
971 miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); 945 miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split);
972 946
973 /* btree_search_recurse()'s btree iterator is no good anymore */ 947 /* btree_search_recurse()'s btree iterator is no good anymore */
974 ret = miss == bio ? MAP_DONE : -EINTR; 948 ret = miss == bio ? MAP_DONE : -EINTR;
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s,
979 if (!cache_bio) 953 if (!cache_bio)
980 goto out_submit; 954 goto out_submit;
981 955
982 cache_bio->bi_sector = miss->bi_sector; 956 cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector;
983 cache_bio->bi_bdev = miss->bi_bdev; 957 cache_bio->bi_bdev = miss->bi_bdev;
984 cache_bio->bi_size = s->insert_bio_sectors << 9; 958 cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9;
985 959
986 cache_bio->bi_end_io = request_endio; 960 cache_bio->bi_end_io = request_endio;
987 cache_bio->bi_private = &s->cl; 961 cache_bio->bi_private = &s->cl;
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1031{ 1005{
1032 struct closure *cl = &s->cl; 1006 struct closure *cl = &s->cl;
1033 struct bio *bio = &s->bio.bio; 1007 struct bio *bio = &s->bio.bio;
1034 struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); 1008 struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0);
1035 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); 1009 struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0);
1036 1010
1037 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); 1011 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end);
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s)
1087 closure_bio_submit(flush, cl, s->d); 1061 closure_bio_submit(flush, cl, s->d);
1088 } 1062 }
1089 } else { 1063 } else {
1090 s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, 1064 s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split);
1091 dc->disk.bio_split);
1092 1065
1093 closure_bio_submit(bio, cl, s->d); 1066 closure_bio_submit(bio, cl, s->d);
1094 } 1067 }
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio)
1126 part_stat_unlock(); 1099 part_stat_unlock();
1127 1100
1128 bio->bi_bdev = dc->bdev; 1101 bio->bi_bdev = dc->bdev;
1129 bio->bi_sector += dc->sb.data_offset; 1102 bio->bi_iter.bi_sector += dc->sb.data_offset;
1130 1103
1131 if (cached_dev_get(dc)) { 1104 if (cached_dev_get(dc)) {
1132 s = search_alloc(bio, d); 1105 s = search_alloc(bio, d);
1133 trace_bcache_request_start(s->d, bio); 1106 trace_bcache_request_start(s->d, bio);
1134 1107
1135 if (!bio->bi_size) { 1108 if (!bio->bi_iter.bi_size) {
1136 /* 1109 /*
1137 * can't call bch_journal_meta from under 1110 * can't call bch_journal_meta from under
1138 * generic_make_request 1111 * generic_make_request
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc)
1204static int flash_dev_cache_miss(struct btree *b, struct search *s, 1177static int flash_dev_cache_miss(struct btree *b, struct search *s,
1205 struct bio *bio, unsigned sectors) 1178 struct bio *bio, unsigned sectors)
1206{ 1179{
1207 struct bio_vec *bv; 1180 struct bio_vec bv;
1208 int i; 1181 struct bvec_iter iter;
1209 1182
1210 /* Zero fill bio */ 1183 /* Zero fill bio */
1211 1184
1212 bio_for_each_segment(bv, bio, i) { 1185 bio_for_each_segment(bv, bio, iter) {
1213 unsigned j = min(bv->bv_len >> 9, sectors); 1186 unsigned j = min(bv.bv_len >> 9, sectors);
1214 1187
1215 void *p = kmap(bv->bv_page); 1188 void *p = kmap(bv.bv_page);
1216 memset(p + bv->bv_offset, 0, j << 9); 1189 memset(p + bv.bv_offset, 0, j << 9);
1217 kunmap(bv->bv_page); 1190 kunmap(bv.bv_page);
1218 1191
1219 sectors -= j; 1192 sectors -= j;
1220 } 1193 }
1221 1194
1222 bio_advance(bio, min(sectors << 9, bio->bi_size)); 1195 bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size));
1223 1196
1224 if (!bio->bi_size) 1197 if (!bio->bi_iter.bi_size)
1225 return MAP_DONE; 1198 return MAP_DONE;
1226 1199
1227 return MAP_CONTINUE; 1200 return MAP_CONTINUE;
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1255 1228
1256 trace_bcache_request_start(s->d, bio); 1229 trace_bcache_request_start(s->d, bio);
1257 1230
1258 if (!bio->bi_size) { 1231 if (!bio->bi_iter.bi_size) {
1259 /* 1232 /*
1260 * can't call bch_journal_meta from under 1233 * can't call bch_journal_meta from under
1261 * generic_make_request 1234 * generic_make_request
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio)
1265 bcache_wq); 1238 bcache_wq);
1266 } else if (rw) { 1239 } else if (rw) {
1267 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, 1240 bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys,
1268 &KEY(d->id, bio->bi_sector, 0), 1241 &KEY(d->id, bio->bi_iter.bi_sector, 0),
1269 &KEY(d->id, bio_end_sector(bio), 0)); 1242 &KEY(d->id, bio_end_sector(bio), 0));
1270 1243
1271 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; 1244 s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0;
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c
index c57bfa071a57..93d593f957f6 100644
--- a/drivers/md/bcache/super.c
+++ b/drivers/md/bcache/super.c
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio)
233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); 233 struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page);
234 unsigned i; 234 unsigned i;
235 235
236 bio->bi_sector = SB_SECTOR; 236 bio->bi_iter.bi_sector = SB_SECTOR;
237 bio->bi_rw = REQ_SYNC|REQ_META; 237 bio->bi_rw = REQ_SYNC|REQ_META;
238 bio->bi_size = SB_SIZE; 238 bio->bi_iter.bi_size = SB_SIZE;
239 bch_bio_map(bio, NULL); 239 bch_bio_map(bio, NULL);
240 240
241 out->offset = cpu_to_le64(sb->offset); 241 out->offset = cpu_to_le64(sb->offset);
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw,
347 struct bio *bio = bch_bbio_alloc(c); 347 struct bio *bio = bch_bbio_alloc(c);
348 348
349 bio->bi_rw = REQ_SYNC|REQ_META|rw; 349 bio->bi_rw = REQ_SYNC|REQ_META|rw;
350 bio->bi_size = KEY_SIZE(k) << 9; 350 bio->bi_iter.bi_size = KEY_SIZE(k) << 9;
351 351
352 bio->bi_end_io = uuid_endio; 352 bio->bi_end_io = uuid_endio;
353 bio->bi_private = cl; 353 bio->bi_private = cl;
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw)
503 503
504 closure_init_stack(cl); 504 closure_init_stack(cl);
505 505
506 bio->bi_sector = bucket * ca->sb.bucket_size; 506 bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size;
507 bio->bi_bdev = ca->bdev; 507 bio->bi_bdev = ca->bdev;
508 bio->bi_rw = REQ_SYNC|REQ_META|rw; 508 bio->bi_rw = REQ_SYNC|REQ_META|rw;
509 bio->bi_size = bucket_bytes(ca); 509 bio->bi_iter.bi_size = bucket_bytes(ca);
510 510
511 bio->bi_end_io = prio_endio; 511 bio->bi_end_io = prio_endio;
512 bio->bi_private = ca; 512 bio->bi_private = ca;
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d)
739 } 739 }
740 740
741 bio_split_pool_free(&d->bio_split_hook); 741 bio_split_pool_free(&d->bio_split_hook);
742 if (d->unaligned_bvec)
743 mempool_destroy(d->unaligned_bvec);
744 if (d->bio_split) 742 if (d->bio_split)
745 bioset_free(d->bio_split); 743 bioset_free(d->bio_split);
746 if (is_vmalloc_addr(d->full_dirty_stripes)) 744 if (is_vmalloc_addr(d->full_dirty_stripes))
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size,
793 return minor; 791 return minor;
794 792
795 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || 793 if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) ||
796 !(d->unaligned_bvec = mempool_create_kmalloc_pool(1,
797 sizeof(struct bio_vec) * BIO_MAX_PAGES)) ||
798 bio_split_pool_init(&d->bio_split_hook) || 794 bio_split_pool_init(&d->bio_split_hook) ||
799 !(d->disk = alloc_disk(1))) { 795 !(d->disk = alloc_disk(1))) {
800 ida_simple_remove(&bcache_minor, minor); 796 ida_simple_remove(&bcache_minor, minor);
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c
index bb37618e7664..db3ae4c2b223 100644
--- a/drivers/md/bcache/util.c
+++ b/drivers/md/bcache/util.c
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done)
224 224
225void bch_bio_map(struct bio *bio, void *base) 225void bch_bio_map(struct bio *bio, void *base)
226{ 226{
227 size_t size = bio->bi_size; 227 size_t size = bio->bi_iter.bi_size;
228 struct bio_vec *bv = bio->bi_io_vec; 228 struct bio_vec *bv = bio->bi_io_vec;
229 229
230 BUG_ON(!bio->bi_size); 230 BUG_ON(!bio->bi_iter.bi_size);
231 BUG_ON(bio->bi_vcnt); 231 BUG_ON(bio->bi_vcnt);
232 232
233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; 233 bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0;
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c
index 6c44fe059c27..f4300e4c0114 100644
--- a/drivers/md/bcache/writeback.c
+++ b/drivers/md/bcache/writeback.c
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w)
111 if (!io->dc->writeback_percent) 111 if (!io->dc->writeback_percent)
112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); 112 bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0));
113 113
114 bio->bi_size = KEY_SIZE(&w->key) << 9; 114 bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9;
115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); 115 bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS);
116 bio->bi_private = w; 116 bio->bi_private = w;
117 bio->bi_io_vec = bio->bi_inline_vecs; 117 bio->bi_io_vec = bio->bi_inline_vecs;
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl)
184 184
185 dirty_init(w); 185 dirty_init(w);
186 io->bio.bi_rw = WRITE; 186 io->bio.bi_rw = WRITE;
187 io->bio.bi_sector = KEY_START(&w->key); 187 io->bio.bi_iter.bi_sector = KEY_START(&w->key);
188 io->bio.bi_bdev = io->dc->bdev; 188 io->bio.bi_bdev = io->dc->bdev;
189 io->bio.bi_end_io = dirty_endio; 189 io->bio.bi_end_io = dirty_endio;
190 190
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc)
253 io->dc = dc; 253 io->dc = dc;
254 254
255 dirty_init(w); 255 dirty_init(w);
256 io->bio.bi_sector = PTR_OFFSET(&w->key, 0); 256 io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0);
257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c, 257 io->bio.bi_bdev = PTR_CACHE(dc->disk.c,
258 &w->key, 0)->bdev; 258 &w->key, 0)->bdev;
259 io->bio.bi_rw = READ; 259 io->bio.bi_rw = READ;
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h
index c9ddcf4614b9..e2f8598937ac 100644
--- a/drivers/md/bcache/writeback.h
+++ b/drivers/md/bcache/writeback.h
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio,
50 return false; 50 return false;
51 51
52 if (dc->partial_stripes_expensive && 52 if (dc->partial_stripes_expensive &&
53 bcache_dev_stripe_dirty(dc, bio->bi_sector, 53 bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector,
54 bio_sectors(bio))) 54 bio_sectors(bio)))
55 return true; 55 return true;
56 56