diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 14:19:05 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-01-30 14:19:05 -0500 |
commit | f568849edac8611d603e00bd6cbbcfea09395ae6 (patch) | |
tree | b9472d640fe5d87426d38c9d81d946cf197ad3fb /drivers/md | |
parent | d9894c228b11273e720bb63ba120d1d326fe9d94 (diff) | |
parent | 675675ada486dde5bf9aa51665e90706bff11a35 (diff) |
Merge branch 'for-3.14/core' of git://git.kernel.dk/linux-block
Pull core block IO changes from Jens Axboe:
"The major piece in here is the immutable bio_ve series from Kent, the
rest is fairly minor. It was supposed to go in last round, but
various issues pushed it to this release instead. The pull request
contains:
- Various smaller blk-mq fixes from different folks. Nothing major
here, just minor fixes and cleanups.
- Fix for a memory leak in the error path in the block ioctl code
from Christian Engelmayer.
- Header export fix from CaiZhiyong.
- Finally the immutable biovec changes from Kent Overstreet. This
enables some nice future work on making arbitrarily sized bios
possible, and splitting more efficient. Related fixes to immutable
bio_vecs:
- dm-cache immutable fixup from Mike Snitzer.
- btrfs immutable fixup from Muthu Kumar.
- bio-integrity fix from Nic Bellinger, which is also going to stable"
* 'for-3.14/core' of git://git.kernel.dk/linux-block: (44 commits)
xtensa: fixup simdisk driver to work with immutable bio_vecs
block/blk-mq-cpu.c: use hotcpu_notifier()
blk-mq: for_each_* macro correctness
block: Fix memory leak in rw_copy_check_uvector() handling
bio-integrity: Fix bio_integrity_verify segment start bug
block: remove unrelated header files and export symbol
blk-mq: uses page->list incorrectly
blk-mq: use __smp_call_function_single directly
btrfs: fix missing increment of bi_remaining
Revert "block: Warn and free bio if bi_end_io is not set"
block: Warn and free bio if bi_end_io is not set
blk-mq: fix initializing request's start time
block: blk-mq: don't export blk_mq_free_queue()
block: blk-mq: make blk_sync_queue support mq
block: blk-mq: support draining mq queue
dm cache: increment bi_remaining when bi_end_io is restored
block: fixup for generic bio chaining
block: Really silence spurious compiler warnings
block: Silence spurious compiler warnings
block: Kill bio_pair_split()
...
Diffstat (limited to 'drivers/md')
36 files changed, 569 insertions, 936 deletions
diff --git a/drivers/md/bcache/bcache.h b/drivers/md/bcache/bcache.h index 754f43177483..dbdbca5a9591 100644 --- a/drivers/md/bcache/bcache.h +++ b/drivers/md/bcache/bcache.h | |||
@@ -280,7 +280,6 @@ struct bcache_device { | |||
280 | unsigned long sectors_dirty_last; | 280 | unsigned long sectors_dirty_last; |
281 | long sectors_dirty_derivative; | 281 | long sectors_dirty_derivative; |
282 | 282 | ||
283 | mempool_t *unaligned_bvec; | ||
284 | struct bio_set *bio_split; | 283 | struct bio_set *bio_split; |
285 | 284 | ||
286 | unsigned data_csum:1; | 285 | unsigned data_csum:1; |
@@ -902,7 +901,6 @@ void bch_bbio_endio(struct cache_set *, struct bio *, int, const char *); | |||
902 | void bch_bbio_free(struct bio *, struct cache_set *); | 901 | void bch_bbio_free(struct bio *, struct cache_set *); |
903 | struct bio *bch_bbio_alloc(struct cache_set *); | 902 | struct bio *bch_bbio_alloc(struct cache_set *); |
904 | 903 | ||
905 | struct bio *bch_bio_split(struct bio *, int, gfp_t, struct bio_set *); | ||
906 | void bch_generic_make_request(struct bio *, struct bio_split_pool *); | 904 | void bch_generic_make_request(struct bio *, struct bio_split_pool *); |
907 | void __bch_submit_bbio(struct bio *, struct cache_set *); | 905 | void __bch_submit_bbio(struct bio *, struct cache_set *); |
908 | void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); | 906 | void bch_submit_bbio(struct bio *, struct cache_set *, struct bkey *, unsigned); |
diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 31bb53fcc67a..946ecd3b048b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c | |||
@@ -299,7 +299,7 @@ void bch_btree_node_read(struct btree *b) | |||
299 | 299 | ||
300 | bio = bch_bbio_alloc(b->c); | 300 | bio = bch_bbio_alloc(b->c); |
301 | bio->bi_rw = REQ_META|READ_SYNC; | 301 | bio->bi_rw = REQ_META|READ_SYNC; |
302 | bio->bi_size = KEY_SIZE(&b->key) << 9; | 302 | bio->bi_iter.bi_size = KEY_SIZE(&b->key) << 9; |
303 | bio->bi_end_io = btree_node_read_endio; | 303 | bio->bi_end_io = btree_node_read_endio; |
304 | bio->bi_private = &cl; | 304 | bio->bi_private = &cl; |
305 | 305 | ||
@@ -362,7 +362,7 @@ static void btree_node_write_done(struct closure *cl) | |||
362 | struct bio_vec *bv; | 362 | struct bio_vec *bv; |
363 | int n; | 363 | int n; |
364 | 364 | ||
365 | __bio_for_each_segment(bv, b->bio, n, 0) | 365 | bio_for_each_segment_all(bv, b->bio, n) |
366 | __free_page(bv->bv_page); | 366 | __free_page(bv->bv_page); |
367 | 367 | ||
368 | __btree_node_write_done(cl); | 368 | __btree_node_write_done(cl); |
@@ -395,7 +395,7 @@ static void do_btree_node_write(struct btree *b) | |||
395 | b->bio->bi_end_io = btree_node_write_endio; | 395 | b->bio->bi_end_io = btree_node_write_endio; |
396 | b->bio->bi_private = cl; | 396 | b->bio->bi_private = cl; |
397 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; | 397 | b->bio->bi_rw = REQ_META|WRITE_SYNC|REQ_FUA; |
398 | b->bio->bi_size = set_blocks(i, b->c) * block_bytes(b->c); | 398 | b->bio->bi_iter.bi_size = set_blocks(i, b->c) * block_bytes(b->c); |
399 | bch_bio_map(b->bio, i); | 399 | bch_bio_map(b->bio, i); |
400 | 400 | ||
401 | /* | 401 | /* |
@@ -421,7 +421,7 @@ static void do_btree_node_write(struct btree *b) | |||
421 | struct bio_vec *bv; | 421 | struct bio_vec *bv; |
422 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); | 422 | void *base = (void *) ((unsigned long) i & ~(PAGE_SIZE - 1)); |
423 | 423 | ||
424 | bio_for_each_segment(bv, b->bio, j) | 424 | bio_for_each_segment_all(bv, b->bio, j) |
425 | memcpy(page_address(bv->bv_page), | 425 | memcpy(page_address(bv->bv_page), |
426 | base + j * PAGE_SIZE, PAGE_SIZE); | 426 | base + j * PAGE_SIZE, PAGE_SIZE); |
427 | 427 | ||
diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index 264fcfbd6290..03cb4d114e16 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c | |||
@@ -173,7 +173,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
173 | { | 173 | { |
174 | char name[BDEVNAME_SIZE]; | 174 | char name[BDEVNAME_SIZE]; |
175 | struct bio *check; | 175 | struct bio *check; |
176 | struct bio_vec *bv; | 176 | struct bio_vec bv, *bv2; |
177 | struct bvec_iter iter; | ||
177 | int i; | 178 | int i; |
178 | 179 | ||
179 | check = bio_clone(bio, GFP_NOIO); | 180 | check = bio_clone(bio, GFP_NOIO); |
@@ -185,23 +186,23 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) | |||
185 | 186 | ||
186 | submit_bio_wait(READ_SYNC, check); | 187 | submit_bio_wait(READ_SYNC, check); |
187 | 188 | ||
188 | bio_for_each_segment(bv, bio, i) { | 189 | bio_for_each_segment(bv, bio, iter) { |
189 | void *p1 = kmap_atomic(bv->bv_page); | 190 | void *p1 = kmap_atomic(bv.bv_page); |
190 | void *p2 = page_address(check->bi_io_vec[i].bv_page); | 191 | void *p2 = page_address(check->bi_io_vec[iter.bi_idx].bv_page); |
191 | 192 | ||
192 | cache_set_err_on(memcmp(p1 + bv->bv_offset, | 193 | cache_set_err_on(memcmp(p1 + bv.bv_offset, |
193 | p2 + bv->bv_offset, | 194 | p2 + bv.bv_offset, |
194 | bv->bv_len), | 195 | bv.bv_len), |
195 | dc->disk.c, | 196 | dc->disk.c, |
196 | "verify failed at dev %s sector %llu", | 197 | "verify failed at dev %s sector %llu", |
197 | bdevname(dc->bdev, name), | 198 | bdevname(dc->bdev, name), |
198 | (uint64_t) bio->bi_sector); | 199 | (uint64_t) bio->bi_iter.bi_sector); |
199 | 200 | ||
200 | kunmap_atomic(p1); | 201 | kunmap_atomic(p1); |
201 | } | 202 | } |
202 | 203 | ||
203 | bio_for_each_segment_all(bv, check, i) | 204 | bio_for_each_segment_all(bv2, check, i) |
204 | __free_page(bv->bv_page); | 205 | __free_page(bv2->bv_page); |
205 | out_put: | 206 | out_put: |
206 | bio_put(check); | 207 | bio_put(check); |
207 | } | 208 | } |
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 9056632995b1..fa028fa82df4 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -11,178 +11,40 @@ | |||
11 | 11 | ||
12 | #include <linux/blkdev.h> | 12 | #include <linux/blkdev.h> |
13 | 13 | ||
14 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) | ||
15 | { | ||
16 | struct bio *p = bio->bi_private; | ||
17 | |||
18 | bio_endio(p, error); | ||
19 | bio_put(bio); | ||
20 | } | ||
21 | |||
22 | static void bch_generic_make_request_hack(struct bio *bio) | ||
23 | { | ||
24 | if (bio->bi_idx) { | ||
25 | struct bio *clone = bio_alloc(GFP_NOIO, bio_segments(bio)); | ||
26 | |||
27 | memcpy(clone->bi_io_vec, | ||
28 | bio_iovec(bio), | ||
29 | bio_segments(bio) * sizeof(struct bio_vec)); | ||
30 | |||
31 | clone->bi_sector = bio->bi_sector; | ||
32 | clone->bi_bdev = bio->bi_bdev; | ||
33 | clone->bi_rw = bio->bi_rw; | ||
34 | clone->bi_vcnt = bio_segments(bio); | ||
35 | clone->bi_size = bio->bi_size; | ||
36 | |||
37 | clone->bi_private = bio; | ||
38 | clone->bi_end_io = bch_bi_idx_hack_endio; | ||
39 | |||
40 | bio = clone; | ||
41 | } | ||
42 | |||
43 | /* | ||
44 | * Hack, since drivers that clone bios clone up to bi_max_vecs, but our | ||
45 | * bios might have had more than that (before we split them per device | ||
46 | * limitations). | ||
47 | * | ||
48 | * To be taken out once immutable bvec stuff is in. | ||
49 | */ | ||
50 | bio->bi_max_vecs = bio->bi_vcnt; | ||
51 | |||
52 | generic_make_request(bio); | ||
53 | } | ||
54 | |||
55 | /** | ||
56 | * bch_bio_split - split a bio | ||
57 | * @bio: bio to split | ||
58 | * @sectors: number of sectors to split from the front of @bio | ||
59 | * @gfp: gfp mask | ||
60 | * @bs: bio set to allocate from | ||
61 | * | ||
62 | * Allocates and returns a new bio which represents @sectors from the start of | ||
63 | * @bio, and updates @bio to represent the remaining sectors. | ||
64 | * | ||
65 | * If bio_sectors(@bio) was less than or equal to @sectors, returns @bio | ||
66 | * unchanged. | ||
67 | * | ||
68 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | ||
69 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | ||
70 | * freed before the split. | ||
71 | */ | ||
72 | struct bio *bch_bio_split(struct bio *bio, int sectors, | ||
73 | gfp_t gfp, struct bio_set *bs) | ||
74 | { | ||
75 | unsigned idx = bio->bi_idx, vcnt = 0, nbytes = sectors << 9; | ||
76 | struct bio_vec *bv; | ||
77 | struct bio *ret = NULL; | ||
78 | |||
79 | BUG_ON(sectors <= 0); | ||
80 | |||
81 | if (sectors >= bio_sectors(bio)) | ||
82 | return bio; | ||
83 | |||
84 | if (bio->bi_rw & REQ_DISCARD) { | ||
85 | ret = bio_alloc_bioset(gfp, 1, bs); | ||
86 | if (!ret) | ||
87 | return NULL; | ||
88 | idx = 0; | ||
89 | goto out; | ||
90 | } | ||
91 | |||
92 | bio_for_each_segment(bv, bio, idx) { | ||
93 | vcnt = idx - bio->bi_idx; | ||
94 | |||
95 | if (!nbytes) { | ||
96 | ret = bio_alloc_bioset(gfp, vcnt, bs); | ||
97 | if (!ret) | ||
98 | return NULL; | ||
99 | |||
100 | memcpy(ret->bi_io_vec, bio_iovec(bio), | ||
101 | sizeof(struct bio_vec) * vcnt); | ||
102 | |||
103 | break; | ||
104 | } else if (nbytes < bv->bv_len) { | ||
105 | ret = bio_alloc_bioset(gfp, ++vcnt, bs); | ||
106 | if (!ret) | ||
107 | return NULL; | ||
108 | |||
109 | memcpy(ret->bi_io_vec, bio_iovec(bio), | ||
110 | sizeof(struct bio_vec) * vcnt); | ||
111 | |||
112 | ret->bi_io_vec[vcnt - 1].bv_len = nbytes; | ||
113 | bv->bv_offset += nbytes; | ||
114 | bv->bv_len -= nbytes; | ||
115 | break; | ||
116 | } | ||
117 | |||
118 | nbytes -= bv->bv_len; | ||
119 | } | ||
120 | out: | ||
121 | ret->bi_bdev = bio->bi_bdev; | ||
122 | ret->bi_sector = bio->bi_sector; | ||
123 | ret->bi_size = sectors << 9; | ||
124 | ret->bi_rw = bio->bi_rw; | ||
125 | ret->bi_vcnt = vcnt; | ||
126 | ret->bi_max_vecs = vcnt; | ||
127 | |||
128 | bio->bi_sector += sectors; | ||
129 | bio->bi_size -= sectors << 9; | ||
130 | bio->bi_idx = idx; | ||
131 | |||
132 | if (bio_integrity(bio)) { | ||
133 | if (bio_integrity_clone(ret, bio, gfp)) { | ||
134 | bio_put(ret); | ||
135 | return NULL; | ||
136 | } | ||
137 | |||
138 | bio_integrity_trim(ret, 0, bio_sectors(ret)); | ||
139 | bio_integrity_trim(bio, bio_sectors(ret), bio_sectors(bio)); | ||
140 | } | ||
141 | |||
142 | return ret; | ||
143 | } | ||
144 | |||
145 | static unsigned bch_bio_max_sectors(struct bio *bio) | 14 | static unsigned bch_bio_max_sectors(struct bio *bio) |
146 | { | 15 | { |
147 | unsigned ret = bio_sectors(bio); | ||
148 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 16 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
149 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, | 17 | struct bio_vec bv; |
150 | queue_max_segments(q)); | 18 | struct bvec_iter iter; |
19 | unsigned ret = 0, seg = 0; | ||
151 | 20 | ||
152 | if (bio->bi_rw & REQ_DISCARD) | 21 | if (bio->bi_rw & REQ_DISCARD) |
153 | return min(ret, q->limits.max_discard_sectors); | 22 | return min(bio_sectors(bio), q->limits.max_discard_sectors); |
154 | 23 | ||
155 | if (bio_segments(bio) > max_segments || | 24 | bio_for_each_segment(bv, bio, iter) { |
156 | q->merge_bvec_fn) { | 25 | struct bvec_merge_data bvm = { |
157 | struct bio_vec *bv; | 26 | .bi_bdev = bio->bi_bdev, |
158 | int i, seg = 0; | 27 | .bi_sector = bio->bi_iter.bi_sector, |
159 | 28 | .bi_size = ret << 9, | |
160 | ret = 0; | 29 | .bi_rw = bio->bi_rw, |
161 | 30 | }; | |
162 | bio_for_each_segment(bv, bio, i) { | 31 | |
163 | struct bvec_merge_data bvm = { | 32 | if (seg == min_t(unsigned, BIO_MAX_PAGES, |
164 | .bi_bdev = bio->bi_bdev, | 33 | queue_max_segments(q))) |
165 | .bi_sector = bio->bi_sector, | 34 | break; |
166 | .bi_size = ret << 9, | ||
167 | .bi_rw = bio->bi_rw, | ||
168 | }; | ||
169 | |||
170 | if (seg == max_segments) | ||
171 | break; | ||
172 | 35 | ||
173 | if (q->merge_bvec_fn && | 36 | if (q->merge_bvec_fn && |
174 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) | 37 | q->merge_bvec_fn(q, &bvm, &bv) < (int) bv.bv_len) |
175 | break; | 38 | break; |
176 | 39 | ||
177 | seg++; | 40 | seg++; |
178 | ret += bv->bv_len >> 9; | 41 | ret += bv.bv_len >> 9; |
179 | } | ||
180 | } | 42 | } |
181 | 43 | ||
182 | ret = min(ret, queue_max_sectors(q)); | 44 | ret = min(ret, queue_max_sectors(q)); |
183 | 45 | ||
184 | WARN_ON(!ret); | 46 | WARN_ON(!ret); |
185 | ret = max_t(int, ret, bio_iovec(bio)->bv_len >> 9); | 47 | ret = max_t(int, ret, bio_iovec(bio).bv_len >> 9); |
186 | 48 | ||
187 | return ret; | 49 | return ret; |
188 | } | 50 | } |
@@ -193,7 +55,7 @@ static void bch_bio_submit_split_done(struct closure *cl) | |||
193 | 55 | ||
194 | s->bio->bi_end_io = s->bi_end_io; | 56 | s->bio->bi_end_io = s->bi_end_io; |
195 | s->bio->bi_private = s->bi_private; | 57 | s->bio->bi_private = s->bi_private; |
196 | bio_endio(s->bio, 0); | 58 | bio_endio_nodec(s->bio, 0); |
197 | 59 | ||
198 | closure_debug_destroy(&s->cl); | 60 | closure_debug_destroy(&s->cl); |
199 | mempool_free(s, s->p->bio_split_hook); | 61 | mempool_free(s, s->p->bio_split_hook); |
@@ -232,19 +94,19 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |||
232 | bio_get(bio); | 94 | bio_get(bio); |
233 | 95 | ||
234 | do { | 96 | do { |
235 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | 97 | n = bio_next_split(bio, bch_bio_max_sectors(bio), |
236 | GFP_NOIO, s->p->bio_split); | 98 | GFP_NOIO, s->p->bio_split); |
237 | 99 | ||
238 | n->bi_end_io = bch_bio_submit_split_endio; | 100 | n->bi_end_io = bch_bio_submit_split_endio; |
239 | n->bi_private = &s->cl; | 101 | n->bi_private = &s->cl; |
240 | 102 | ||
241 | closure_get(&s->cl); | 103 | closure_get(&s->cl); |
242 | bch_generic_make_request_hack(n); | 104 | generic_make_request(n); |
243 | } while (n != bio); | 105 | } while (n != bio); |
244 | 106 | ||
245 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); | 107 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); |
246 | submit: | 108 | submit: |
247 | bch_generic_make_request_hack(bio); | 109 | generic_make_request(bio); |
248 | } | 110 | } |
249 | 111 | ||
250 | /* Bios with headers */ | 112 | /* Bios with headers */ |
@@ -272,8 +134,8 @@ void __bch_submit_bbio(struct bio *bio, struct cache_set *c) | |||
272 | { | 134 | { |
273 | struct bbio *b = container_of(bio, struct bbio, bio); | 135 | struct bbio *b = container_of(bio, struct bbio, bio); |
274 | 136 | ||
275 | bio->bi_sector = PTR_OFFSET(&b->key, 0); | 137 | bio->bi_iter.bi_sector = PTR_OFFSET(&b->key, 0); |
276 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; | 138 | bio->bi_bdev = PTR_CACHE(c, &b->key, 0)->bdev; |
277 | 139 | ||
278 | b->submit_time_us = local_clock_us(); | 140 | b->submit_time_us = local_clock_us(); |
279 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); | 141 | closure_bio_submit(bio, bio->bi_private, PTR_CACHE(c, &b->key, 0)); |
diff --git a/drivers/md/bcache/journal.c b/drivers/md/bcache/journal.c index ecdaa671bd50..7eafdf09a0ae 100644 --- a/drivers/md/bcache/journal.c +++ b/drivers/md/bcache/journal.c | |||
@@ -51,10 +51,10 @@ reread: left = ca->sb.bucket_size - offset; | |||
51 | len = min_t(unsigned, left, PAGE_SECTORS * 8); | 51 | len = min_t(unsigned, left, PAGE_SECTORS * 8); |
52 | 52 | ||
53 | bio_reset(bio); | 53 | bio_reset(bio); |
54 | bio->bi_sector = bucket + offset; | 54 | bio->bi_iter.bi_sector = bucket + offset; |
55 | bio->bi_bdev = ca->bdev; | 55 | bio->bi_bdev = ca->bdev; |
56 | bio->bi_rw = READ; | 56 | bio->bi_rw = READ; |
57 | bio->bi_size = len << 9; | 57 | bio->bi_iter.bi_size = len << 9; |
58 | 58 | ||
59 | bio->bi_end_io = journal_read_endio; | 59 | bio->bi_end_io = journal_read_endio; |
60 | bio->bi_private = &cl; | 60 | bio->bi_private = &cl; |
@@ -437,13 +437,13 @@ static void do_journal_discard(struct cache *ca) | |||
437 | atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); | 437 | atomic_set(&ja->discard_in_flight, DISCARD_IN_FLIGHT); |
438 | 438 | ||
439 | bio_init(bio); | 439 | bio_init(bio); |
440 | bio->bi_sector = bucket_to_sector(ca->set, | 440 | bio->bi_iter.bi_sector = bucket_to_sector(ca->set, |
441 | ca->sb.d[ja->discard_idx]); | 441 | ca->sb.d[ja->discard_idx]); |
442 | bio->bi_bdev = ca->bdev; | 442 | bio->bi_bdev = ca->bdev; |
443 | bio->bi_rw = REQ_WRITE|REQ_DISCARD; | 443 | bio->bi_rw = REQ_WRITE|REQ_DISCARD; |
444 | bio->bi_max_vecs = 1; | 444 | bio->bi_max_vecs = 1; |
445 | bio->bi_io_vec = bio->bi_inline_vecs; | 445 | bio->bi_io_vec = bio->bi_inline_vecs; |
446 | bio->bi_size = bucket_bytes(ca); | 446 | bio->bi_iter.bi_size = bucket_bytes(ca); |
447 | bio->bi_end_io = journal_discard_endio; | 447 | bio->bi_end_io = journal_discard_endio; |
448 | 448 | ||
449 | closure_get(&ca->set->cl); | 449 | closure_get(&ca->set->cl); |
@@ -608,10 +608,10 @@ static void journal_write_unlocked(struct closure *cl) | |||
608 | atomic_long_add(sectors, &ca->meta_sectors_written); | 608 | atomic_long_add(sectors, &ca->meta_sectors_written); |
609 | 609 | ||
610 | bio_reset(bio); | 610 | bio_reset(bio); |
611 | bio->bi_sector = PTR_OFFSET(k, i); | 611 | bio->bi_iter.bi_sector = PTR_OFFSET(k, i); |
612 | bio->bi_bdev = ca->bdev; | 612 | bio->bi_bdev = ca->bdev; |
613 | bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; | 613 | bio->bi_rw = REQ_WRITE|REQ_SYNC|REQ_META|REQ_FLUSH|REQ_FUA; |
614 | bio->bi_size = sectors << 9; | 614 | bio->bi_iter.bi_size = sectors << 9; |
615 | 615 | ||
616 | bio->bi_end_io = journal_write_endio; | 616 | bio->bi_end_io = journal_write_endio; |
617 | bio->bi_private = w; | 617 | bio->bi_private = w; |
diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index f2f0998c4a91..052bd24d24b4 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c | |||
@@ -86,7 +86,7 @@ static void moving_init(struct moving_io *io) | |||
86 | bio_get(bio); | 86 | bio_get(bio); |
87 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | 87 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
88 | 88 | ||
89 | bio->bi_size = KEY_SIZE(&io->w->key) << 9; | 89 | bio->bi_iter.bi_size = KEY_SIZE(&io->w->key) << 9; |
90 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), | 90 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&io->w->key), |
91 | PAGE_SECTORS); | 91 | PAGE_SECTORS); |
92 | bio->bi_private = &io->cl; | 92 | bio->bi_private = &io->cl; |
@@ -102,7 +102,7 @@ static void write_moving(struct closure *cl) | |||
102 | if (!op->error) { | 102 | if (!op->error) { |
103 | moving_init(io); | 103 | moving_init(io); |
104 | 104 | ||
105 | io->bio.bio.bi_sector = KEY_START(&io->w->key); | 105 | io->bio.bio.bi_iter.bi_sector = KEY_START(&io->w->key); |
106 | op->write_prio = 1; | 106 | op->write_prio = 1; |
107 | op->bio = &io->bio.bio; | 107 | op->bio = &io->bio.bio; |
108 | 108 | ||
diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 61bcfc21d2a0..c906571997d7 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c | |||
@@ -197,14 +197,14 @@ static bool verify(struct cached_dev *dc, struct bio *bio) | |||
197 | 197 | ||
198 | static void bio_csum(struct bio *bio, struct bkey *k) | 198 | static void bio_csum(struct bio *bio, struct bkey *k) |
199 | { | 199 | { |
200 | struct bio_vec *bv; | 200 | struct bio_vec bv; |
201 | struct bvec_iter iter; | ||
201 | uint64_t csum = 0; | 202 | uint64_t csum = 0; |
202 | int i; | ||
203 | 203 | ||
204 | bio_for_each_segment(bv, bio, i) { | 204 | bio_for_each_segment(bv, bio, iter) { |
205 | void *d = kmap(bv->bv_page) + bv->bv_offset; | 205 | void *d = kmap(bv.bv_page) + bv.bv_offset; |
206 | csum = bch_crc64_update(csum, d, bv->bv_len); | 206 | csum = bch_crc64_update(csum, d, bv.bv_len); |
207 | kunmap(bv->bv_page); | 207 | kunmap(bv.bv_page); |
208 | } | 208 | } |
209 | 209 | ||
210 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); | 210 | k->ptr[KEY_PTRS(k)] = csum & (~0ULL >> 1); |
@@ -260,7 +260,7 @@ static void bch_data_invalidate(struct closure *cl) | |||
260 | struct bio *bio = op->bio; | 260 | struct bio *bio = op->bio; |
261 | 261 | ||
262 | pr_debug("invalidating %i sectors from %llu", | 262 | pr_debug("invalidating %i sectors from %llu", |
263 | bio_sectors(bio), (uint64_t) bio->bi_sector); | 263 | bio_sectors(bio), (uint64_t) bio->bi_iter.bi_sector); |
264 | 264 | ||
265 | while (bio_sectors(bio)) { | 265 | while (bio_sectors(bio)) { |
266 | unsigned sectors = min(bio_sectors(bio), | 266 | unsigned sectors = min(bio_sectors(bio), |
@@ -269,11 +269,11 @@ static void bch_data_invalidate(struct closure *cl) | |||
269 | if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) | 269 | if (bch_keylist_realloc(&op->insert_keys, 0, op->c)) |
270 | goto out; | 270 | goto out; |
271 | 271 | ||
272 | bio->bi_sector += sectors; | 272 | bio->bi_iter.bi_sector += sectors; |
273 | bio->bi_size -= sectors << 9; | 273 | bio->bi_iter.bi_size -= sectors << 9; |
274 | 274 | ||
275 | bch_keylist_add(&op->insert_keys, | 275 | bch_keylist_add(&op->insert_keys, |
276 | &KEY(op->inode, bio->bi_sector, sectors)); | 276 | &KEY(op->inode, bio->bi_iter.bi_sector, sectors)); |
277 | } | 277 | } |
278 | 278 | ||
279 | op->insert_data_done = true; | 279 | op->insert_data_done = true; |
@@ -363,14 +363,14 @@ static void bch_data_insert_start(struct closure *cl) | |||
363 | k = op->insert_keys.top; | 363 | k = op->insert_keys.top; |
364 | bkey_init(k); | 364 | bkey_init(k); |
365 | SET_KEY_INODE(k, op->inode); | 365 | SET_KEY_INODE(k, op->inode); |
366 | SET_KEY_OFFSET(k, bio->bi_sector); | 366 | SET_KEY_OFFSET(k, bio->bi_iter.bi_sector); |
367 | 367 | ||
368 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), | 368 | if (!bch_alloc_sectors(op->c, k, bio_sectors(bio), |
369 | op->write_point, op->write_prio, | 369 | op->write_point, op->write_prio, |
370 | op->writeback)) | 370 | op->writeback)) |
371 | goto err; | 371 | goto err; |
372 | 372 | ||
373 | n = bch_bio_split(bio, KEY_SIZE(k), GFP_NOIO, split); | 373 | n = bio_next_split(bio, KEY_SIZE(k), GFP_NOIO, split); |
374 | 374 | ||
375 | n->bi_end_io = bch_data_insert_endio; | 375 | n->bi_end_io = bch_data_insert_endio; |
376 | n->bi_private = cl; | 376 | n->bi_private = cl; |
@@ -521,7 +521,7 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
521 | (bio->bi_rw & REQ_WRITE))) | 521 | (bio->bi_rw & REQ_WRITE))) |
522 | goto skip; | 522 | goto skip; |
523 | 523 | ||
524 | if (bio->bi_sector & (c->sb.block_size - 1) || | 524 | if (bio->bi_iter.bi_sector & (c->sb.block_size - 1) || |
525 | bio_sectors(bio) & (c->sb.block_size - 1)) { | 525 | bio_sectors(bio) & (c->sb.block_size - 1)) { |
526 | pr_debug("skipping unaligned io"); | 526 | pr_debug("skipping unaligned io"); |
527 | goto skip; | 527 | goto skip; |
@@ -545,8 +545,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
545 | 545 | ||
546 | spin_lock(&dc->io_lock); | 546 | spin_lock(&dc->io_lock); |
547 | 547 | ||
548 | hlist_for_each_entry(i, iohash(dc, bio->bi_sector), hash) | 548 | hlist_for_each_entry(i, iohash(dc, bio->bi_iter.bi_sector), hash) |
549 | if (i->last == bio->bi_sector && | 549 | if (i->last == bio->bi_iter.bi_sector && |
550 | time_before(jiffies, i->jiffies)) | 550 | time_before(jiffies, i->jiffies)) |
551 | goto found; | 551 | goto found; |
552 | 552 | ||
@@ -555,8 +555,8 @@ static bool check_should_bypass(struct cached_dev *dc, struct bio *bio) | |||
555 | add_sequential(task); | 555 | add_sequential(task); |
556 | i->sequential = 0; | 556 | i->sequential = 0; |
557 | found: | 557 | found: |
558 | if (i->sequential + bio->bi_size > i->sequential) | 558 | if (i->sequential + bio->bi_iter.bi_size > i->sequential) |
559 | i->sequential += bio->bi_size; | 559 | i->sequential += bio->bi_iter.bi_size; |
560 | 560 | ||
561 | i->last = bio_end_sector(bio); | 561 | i->last = bio_end_sector(bio); |
562 | i->jiffies = jiffies + msecs_to_jiffies(5000); | 562 | i->jiffies = jiffies + msecs_to_jiffies(5000); |
@@ -605,7 +605,6 @@ struct search { | |||
605 | unsigned insert_bio_sectors; | 605 | unsigned insert_bio_sectors; |
606 | 606 | ||
607 | unsigned recoverable:1; | 607 | unsigned recoverable:1; |
608 | unsigned unaligned_bvec:1; | ||
609 | unsigned write:1; | 608 | unsigned write:1; |
610 | unsigned read_dirty_data:1; | 609 | unsigned read_dirty_data:1; |
611 | 610 | ||
@@ -649,15 +648,15 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) | |||
649 | struct bkey *bio_key; | 648 | struct bkey *bio_key; |
650 | unsigned ptr; | 649 | unsigned ptr; |
651 | 650 | ||
652 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_sector, 0)) <= 0) | 651 | if (bkey_cmp(k, &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0)) <= 0) |
653 | return MAP_CONTINUE; | 652 | return MAP_CONTINUE; |
654 | 653 | ||
655 | if (KEY_INODE(k) != s->iop.inode || | 654 | if (KEY_INODE(k) != s->iop.inode || |
656 | KEY_START(k) > bio->bi_sector) { | 655 | KEY_START(k) > bio->bi_iter.bi_sector) { |
657 | unsigned bio_sectors = bio_sectors(bio); | 656 | unsigned bio_sectors = bio_sectors(bio); |
658 | unsigned sectors = KEY_INODE(k) == s->iop.inode | 657 | unsigned sectors = KEY_INODE(k) == s->iop.inode |
659 | ? min_t(uint64_t, INT_MAX, | 658 | ? min_t(uint64_t, INT_MAX, |
660 | KEY_START(k) - bio->bi_sector) | 659 | KEY_START(k) - bio->bi_iter.bi_sector) |
661 | : INT_MAX; | 660 | : INT_MAX; |
662 | 661 | ||
663 | int ret = s->d->cache_miss(b, s, bio, sectors); | 662 | int ret = s->d->cache_miss(b, s, bio, sectors); |
@@ -679,14 +678,14 @@ static int cache_lookup_fn(struct btree_op *op, struct btree *b, struct bkey *k) | |||
679 | if (KEY_DIRTY(k)) | 678 | if (KEY_DIRTY(k)) |
680 | s->read_dirty_data = true; | 679 | s->read_dirty_data = true; |
681 | 680 | ||
682 | n = bch_bio_split(bio, min_t(uint64_t, INT_MAX, | 681 | n = bio_next_split(bio, min_t(uint64_t, INT_MAX, |
683 | KEY_OFFSET(k) - bio->bi_sector), | 682 | KEY_OFFSET(k) - bio->bi_iter.bi_sector), |
684 | GFP_NOIO, s->d->bio_split); | 683 | GFP_NOIO, s->d->bio_split); |
685 | 684 | ||
686 | bio_key = &container_of(n, struct bbio, bio)->key; | 685 | bio_key = &container_of(n, struct bbio, bio)->key; |
687 | bch_bkey_copy_single_ptr(bio_key, k, ptr); | 686 | bch_bkey_copy_single_ptr(bio_key, k, ptr); |
688 | 687 | ||
689 | bch_cut_front(&KEY(s->iop.inode, n->bi_sector, 0), bio_key); | 688 | bch_cut_front(&KEY(s->iop.inode, n->bi_iter.bi_sector, 0), bio_key); |
690 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); | 689 | bch_cut_back(&KEY(s->iop.inode, bio_end_sector(n), 0), bio_key); |
691 | 690 | ||
692 | n->bi_end_io = bch_cache_read_endio; | 691 | n->bi_end_io = bch_cache_read_endio; |
@@ -713,7 +712,7 @@ static void cache_lookup(struct closure *cl) | |||
713 | struct bio *bio = &s->bio.bio; | 712 | struct bio *bio = &s->bio.bio; |
714 | 713 | ||
715 | int ret = bch_btree_map_keys(&s->op, s->iop.c, | 714 | int ret = bch_btree_map_keys(&s->op, s->iop.c, |
716 | &KEY(s->iop.inode, bio->bi_sector, 0), | 715 | &KEY(s->iop.inode, bio->bi_iter.bi_sector, 0), |
717 | cache_lookup_fn, MAP_END_KEY); | 716 | cache_lookup_fn, MAP_END_KEY); |
718 | if (ret == -EAGAIN) | 717 | if (ret == -EAGAIN) |
719 | continue_at(cl, cache_lookup, bcache_wq); | 718 | continue_at(cl, cache_lookup, bcache_wq); |
@@ -758,10 +757,12 @@ static void bio_complete(struct search *s) | |||
758 | static void do_bio_hook(struct search *s) | 757 | static void do_bio_hook(struct search *s) |
759 | { | 758 | { |
760 | struct bio *bio = &s->bio.bio; | 759 | struct bio *bio = &s->bio.bio; |
761 | memcpy(bio, s->orig_bio, sizeof(struct bio)); | ||
762 | 760 | ||
761 | bio_init(bio); | ||
762 | __bio_clone_fast(bio, s->orig_bio); | ||
763 | bio->bi_end_io = request_endio; | 763 | bio->bi_end_io = request_endio; |
764 | bio->bi_private = &s->cl; | 764 | bio->bi_private = &s->cl; |
765 | |||
765 | atomic_set(&bio->bi_cnt, 3); | 766 | atomic_set(&bio->bi_cnt, 3); |
766 | } | 767 | } |
767 | 768 | ||
@@ -773,9 +774,6 @@ static void search_free(struct closure *cl) | |||
773 | if (s->iop.bio) | 774 | if (s->iop.bio) |
774 | bio_put(s->iop.bio); | 775 | bio_put(s->iop.bio); |
775 | 776 | ||
776 | if (s->unaligned_bvec) | ||
777 | mempool_free(s->bio.bio.bi_io_vec, s->d->unaligned_bvec); | ||
778 | |||
779 | closure_debug_destroy(cl); | 777 | closure_debug_destroy(cl); |
780 | mempool_free(s, s->d->c->search); | 778 | mempool_free(s, s->d->c->search); |
781 | } | 779 | } |
@@ -783,7 +781,6 @@ static void search_free(struct closure *cl) | |||
783 | static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | 781 | static struct search *search_alloc(struct bio *bio, struct bcache_device *d) |
784 | { | 782 | { |
785 | struct search *s; | 783 | struct search *s; |
786 | struct bio_vec *bv; | ||
787 | 784 | ||
788 | s = mempool_alloc(d->c->search, GFP_NOIO); | 785 | s = mempool_alloc(d->c->search, GFP_NOIO); |
789 | memset(s, 0, offsetof(struct search, iop.insert_keys)); | 786 | memset(s, 0, offsetof(struct search, iop.insert_keys)); |
@@ -802,15 +799,6 @@ static struct search *search_alloc(struct bio *bio, struct bcache_device *d) | |||
802 | s->start_time = jiffies; | 799 | s->start_time = jiffies; |
803 | do_bio_hook(s); | 800 | do_bio_hook(s); |
804 | 801 | ||
805 | if (bio->bi_size != bio_segments(bio) * PAGE_SIZE) { | ||
806 | bv = mempool_alloc(d->unaligned_bvec, GFP_NOIO); | ||
807 | memcpy(bv, bio_iovec(bio), | ||
808 | sizeof(struct bio_vec) * bio_segments(bio)); | ||
809 | |||
810 | s->bio.bio.bi_io_vec = bv; | ||
811 | s->unaligned_bvec = 1; | ||
812 | } | ||
813 | |||
814 | return s; | 802 | return s; |
815 | } | 803 | } |
816 | 804 | ||
@@ -849,26 +837,13 @@ static void cached_dev_read_error(struct closure *cl) | |||
849 | { | 837 | { |
850 | struct search *s = container_of(cl, struct search, cl); | 838 | struct search *s = container_of(cl, struct search, cl); |
851 | struct bio *bio = &s->bio.bio; | 839 | struct bio *bio = &s->bio.bio; |
852 | struct bio_vec *bv; | ||
853 | int i; | ||
854 | 840 | ||
855 | if (s->recoverable) { | 841 | if (s->recoverable) { |
856 | /* Retry from the backing device: */ | 842 | /* Retry from the backing device: */ |
857 | trace_bcache_read_retry(s->orig_bio); | 843 | trace_bcache_read_retry(s->orig_bio); |
858 | 844 | ||
859 | s->iop.error = 0; | 845 | s->iop.error = 0; |
860 | bv = s->bio.bio.bi_io_vec; | ||
861 | do_bio_hook(s); | 846 | do_bio_hook(s); |
862 | s->bio.bio.bi_io_vec = bv; | ||
863 | |||
864 | if (!s->unaligned_bvec) | ||
865 | bio_for_each_segment(bv, s->orig_bio, i) | ||
866 | bv->bv_offset = 0, bv->bv_len = PAGE_SIZE; | ||
867 | else | ||
868 | memcpy(s->bio.bio.bi_io_vec, | ||
869 | bio_iovec(s->orig_bio), | ||
870 | sizeof(struct bio_vec) * | ||
871 | bio_segments(s->orig_bio)); | ||
872 | 847 | ||
873 | /* XXX: invalidate cache */ | 848 | /* XXX: invalidate cache */ |
874 | 849 | ||
@@ -893,9 +868,9 @@ static void cached_dev_read_done(struct closure *cl) | |||
893 | 868 | ||
894 | if (s->iop.bio) { | 869 | if (s->iop.bio) { |
895 | bio_reset(s->iop.bio); | 870 | bio_reset(s->iop.bio); |
896 | s->iop.bio->bi_sector = s->cache_miss->bi_sector; | 871 | s->iop.bio->bi_iter.bi_sector = s->cache_miss->bi_iter.bi_sector; |
897 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; | 872 | s->iop.bio->bi_bdev = s->cache_miss->bi_bdev; |
898 | s->iop.bio->bi_size = s->insert_bio_sectors << 9; | 873 | s->iop.bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
899 | bch_bio_map(s->iop.bio, NULL); | 874 | bch_bio_map(s->iop.bio, NULL); |
900 | 875 | ||
901 | bio_copy_data(s->cache_miss, s->iop.bio); | 876 | bio_copy_data(s->cache_miss, s->iop.bio); |
@@ -904,8 +879,7 @@ static void cached_dev_read_done(struct closure *cl) | |||
904 | s->cache_miss = NULL; | 879 | s->cache_miss = NULL; |
905 | } | 880 | } |
906 | 881 | ||
907 | if (verify(dc, &s->bio.bio) && s->recoverable && | 882 | if (verify(dc, &s->bio.bio) && s->recoverable && !s->read_dirty_data) |
908 | !s->unaligned_bvec && !s->read_dirty_data) | ||
909 | bch_data_verify(dc, s->orig_bio); | 883 | bch_data_verify(dc, s->orig_bio); |
910 | 884 | ||
911 | bio_complete(s); | 885 | bio_complete(s); |
@@ -945,7 +919,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
945 | struct bio *miss, *cache_bio; | 919 | struct bio *miss, *cache_bio; |
946 | 920 | ||
947 | if (s->cache_miss || s->iop.bypass) { | 921 | if (s->cache_miss || s->iop.bypass) { |
948 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 922 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
949 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; | 923 | ret = miss == bio ? MAP_DONE : MAP_CONTINUE; |
950 | goto out_submit; | 924 | goto out_submit; |
951 | } | 925 | } |
@@ -959,7 +933,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
959 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); | 933 | s->insert_bio_sectors = min(sectors, bio_sectors(bio) + reada); |
960 | 934 | ||
961 | s->iop.replace_key = KEY(s->iop.inode, | 935 | s->iop.replace_key = KEY(s->iop.inode, |
962 | bio->bi_sector + s->insert_bio_sectors, | 936 | bio->bi_iter.bi_sector + s->insert_bio_sectors, |
963 | s->insert_bio_sectors); | 937 | s->insert_bio_sectors); |
964 | 938 | ||
965 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); | 939 | ret = bch_btree_insert_check_key(b, &s->op, &s->iop.replace_key); |
@@ -968,7 +942,7 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
968 | 942 | ||
969 | s->iop.replace = true; | 943 | s->iop.replace = true; |
970 | 944 | ||
971 | miss = bch_bio_split(bio, sectors, GFP_NOIO, s->d->bio_split); | 945 | miss = bio_next_split(bio, sectors, GFP_NOIO, s->d->bio_split); |
972 | 946 | ||
973 | /* btree_search_recurse()'s btree iterator is no good anymore */ | 947 | /* btree_search_recurse()'s btree iterator is no good anymore */ |
974 | ret = miss == bio ? MAP_DONE : -EINTR; | 948 | ret = miss == bio ? MAP_DONE : -EINTR; |
@@ -979,9 +953,9 @@ static int cached_dev_cache_miss(struct btree *b, struct search *s, | |||
979 | if (!cache_bio) | 953 | if (!cache_bio) |
980 | goto out_submit; | 954 | goto out_submit; |
981 | 955 | ||
982 | cache_bio->bi_sector = miss->bi_sector; | 956 | cache_bio->bi_iter.bi_sector = miss->bi_iter.bi_sector; |
983 | cache_bio->bi_bdev = miss->bi_bdev; | 957 | cache_bio->bi_bdev = miss->bi_bdev; |
984 | cache_bio->bi_size = s->insert_bio_sectors << 9; | 958 | cache_bio->bi_iter.bi_size = s->insert_bio_sectors << 9; |
985 | 959 | ||
986 | cache_bio->bi_end_io = request_endio; | 960 | cache_bio->bi_end_io = request_endio; |
987 | cache_bio->bi_private = &s->cl; | 961 | cache_bio->bi_private = &s->cl; |
@@ -1031,7 +1005,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
1031 | { | 1005 | { |
1032 | struct closure *cl = &s->cl; | 1006 | struct closure *cl = &s->cl; |
1033 | struct bio *bio = &s->bio.bio; | 1007 | struct bio *bio = &s->bio.bio; |
1034 | struct bkey start = KEY(dc->disk.id, bio->bi_sector, 0); | 1008 | struct bkey start = KEY(dc->disk.id, bio->bi_iter.bi_sector, 0); |
1035 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); | 1009 | struct bkey end = KEY(dc->disk.id, bio_end_sector(bio), 0); |
1036 | 1010 | ||
1037 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); | 1011 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, &start, &end); |
@@ -1087,8 +1061,7 @@ static void cached_dev_write(struct cached_dev *dc, struct search *s) | |||
1087 | closure_bio_submit(flush, cl, s->d); | 1061 | closure_bio_submit(flush, cl, s->d); |
1088 | } | 1062 | } |
1089 | } else { | 1063 | } else { |
1090 | s->iop.bio = bio_clone_bioset(bio, GFP_NOIO, | 1064 | s->iop.bio = bio_clone_fast(bio, GFP_NOIO, dc->disk.bio_split); |
1091 | dc->disk.bio_split); | ||
1092 | 1065 | ||
1093 | closure_bio_submit(bio, cl, s->d); | 1066 | closure_bio_submit(bio, cl, s->d); |
1094 | } | 1067 | } |
@@ -1126,13 +1099,13 @@ static void cached_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1126 | part_stat_unlock(); | 1099 | part_stat_unlock(); |
1127 | 1100 | ||
1128 | bio->bi_bdev = dc->bdev; | 1101 | bio->bi_bdev = dc->bdev; |
1129 | bio->bi_sector += dc->sb.data_offset; | 1102 | bio->bi_iter.bi_sector += dc->sb.data_offset; |
1130 | 1103 | ||
1131 | if (cached_dev_get(dc)) { | 1104 | if (cached_dev_get(dc)) { |
1132 | s = search_alloc(bio, d); | 1105 | s = search_alloc(bio, d); |
1133 | trace_bcache_request_start(s->d, bio); | 1106 | trace_bcache_request_start(s->d, bio); |
1134 | 1107 | ||
1135 | if (!bio->bi_size) { | 1108 | if (!bio->bi_iter.bi_size) { |
1136 | /* | 1109 | /* |
1137 | * can't call bch_journal_meta from under | 1110 | * can't call bch_journal_meta from under |
1138 | * generic_make_request | 1111 | * generic_make_request |
@@ -1204,24 +1177,24 @@ void bch_cached_dev_request_init(struct cached_dev *dc) | |||
1204 | static int flash_dev_cache_miss(struct btree *b, struct search *s, | 1177 | static int flash_dev_cache_miss(struct btree *b, struct search *s, |
1205 | struct bio *bio, unsigned sectors) | 1178 | struct bio *bio, unsigned sectors) |
1206 | { | 1179 | { |
1207 | struct bio_vec *bv; | 1180 | struct bio_vec bv; |
1208 | int i; | 1181 | struct bvec_iter iter; |
1209 | 1182 | ||
1210 | /* Zero fill bio */ | 1183 | /* Zero fill bio */ |
1211 | 1184 | ||
1212 | bio_for_each_segment(bv, bio, i) { | 1185 | bio_for_each_segment(bv, bio, iter) { |
1213 | unsigned j = min(bv->bv_len >> 9, sectors); | 1186 | unsigned j = min(bv.bv_len >> 9, sectors); |
1214 | 1187 | ||
1215 | void *p = kmap(bv->bv_page); | 1188 | void *p = kmap(bv.bv_page); |
1216 | memset(p + bv->bv_offset, 0, j << 9); | 1189 | memset(p + bv.bv_offset, 0, j << 9); |
1217 | kunmap(bv->bv_page); | 1190 | kunmap(bv.bv_page); |
1218 | 1191 | ||
1219 | sectors -= j; | 1192 | sectors -= j; |
1220 | } | 1193 | } |
1221 | 1194 | ||
1222 | bio_advance(bio, min(sectors << 9, bio->bi_size)); | 1195 | bio_advance(bio, min(sectors << 9, bio->bi_iter.bi_size)); |
1223 | 1196 | ||
1224 | if (!bio->bi_size) | 1197 | if (!bio->bi_iter.bi_size) |
1225 | return MAP_DONE; | 1198 | return MAP_DONE; |
1226 | 1199 | ||
1227 | return MAP_CONTINUE; | 1200 | return MAP_CONTINUE; |
@@ -1255,7 +1228,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1255 | 1228 | ||
1256 | trace_bcache_request_start(s->d, bio); | 1229 | trace_bcache_request_start(s->d, bio); |
1257 | 1230 | ||
1258 | if (!bio->bi_size) { | 1231 | if (!bio->bi_iter.bi_size) { |
1259 | /* | 1232 | /* |
1260 | * can't call bch_journal_meta from under | 1233 | * can't call bch_journal_meta from under |
1261 | * generic_make_request | 1234 | * generic_make_request |
@@ -1265,7 +1238,7 @@ static void flash_dev_make_request(struct request_queue *q, struct bio *bio) | |||
1265 | bcache_wq); | 1238 | bcache_wq); |
1266 | } else if (rw) { | 1239 | } else if (rw) { |
1267 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, | 1240 | bch_keybuf_check_overlapping(&s->iop.c->moving_gc_keys, |
1268 | &KEY(d->id, bio->bi_sector, 0), | 1241 | &KEY(d->id, bio->bi_iter.bi_sector, 0), |
1269 | &KEY(d->id, bio_end_sector(bio), 0)); | 1242 | &KEY(d->id, bio_end_sector(bio), 0)); |
1270 | 1243 | ||
1271 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; | 1244 | s->iop.bypass = (bio->bi_rw & REQ_DISCARD) != 0; |
diff --git a/drivers/md/bcache/super.c b/drivers/md/bcache/super.c index c57bfa071a57..93d593f957f6 100644 --- a/drivers/md/bcache/super.c +++ b/drivers/md/bcache/super.c | |||
@@ -233,9 +233,9 @@ static void __write_super(struct cache_sb *sb, struct bio *bio) | |||
233 | struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); | 233 | struct cache_sb *out = page_address(bio->bi_io_vec[0].bv_page); |
234 | unsigned i; | 234 | unsigned i; |
235 | 235 | ||
236 | bio->bi_sector = SB_SECTOR; | 236 | bio->bi_iter.bi_sector = SB_SECTOR; |
237 | bio->bi_rw = REQ_SYNC|REQ_META; | 237 | bio->bi_rw = REQ_SYNC|REQ_META; |
238 | bio->bi_size = SB_SIZE; | 238 | bio->bi_iter.bi_size = SB_SIZE; |
239 | bch_bio_map(bio, NULL); | 239 | bch_bio_map(bio, NULL); |
240 | 240 | ||
241 | out->offset = cpu_to_le64(sb->offset); | 241 | out->offset = cpu_to_le64(sb->offset); |
@@ -347,7 +347,7 @@ static void uuid_io(struct cache_set *c, unsigned long rw, | |||
347 | struct bio *bio = bch_bbio_alloc(c); | 347 | struct bio *bio = bch_bbio_alloc(c); |
348 | 348 | ||
349 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | 349 | bio->bi_rw = REQ_SYNC|REQ_META|rw; |
350 | bio->bi_size = KEY_SIZE(k) << 9; | 350 | bio->bi_iter.bi_size = KEY_SIZE(k) << 9; |
351 | 351 | ||
352 | bio->bi_end_io = uuid_endio; | 352 | bio->bi_end_io = uuid_endio; |
353 | bio->bi_private = cl; | 353 | bio->bi_private = cl; |
@@ -503,10 +503,10 @@ static void prio_io(struct cache *ca, uint64_t bucket, unsigned long rw) | |||
503 | 503 | ||
504 | closure_init_stack(cl); | 504 | closure_init_stack(cl); |
505 | 505 | ||
506 | bio->bi_sector = bucket * ca->sb.bucket_size; | 506 | bio->bi_iter.bi_sector = bucket * ca->sb.bucket_size; |
507 | bio->bi_bdev = ca->bdev; | 507 | bio->bi_bdev = ca->bdev; |
508 | bio->bi_rw = REQ_SYNC|REQ_META|rw; | 508 | bio->bi_rw = REQ_SYNC|REQ_META|rw; |
509 | bio->bi_size = bucket_bytes(ca); | 509 | bio->bi_iter.bi_size = bucket_bytes(ca); |
510 | 510 | ||
511 | bio->bi_end_io = prio_endio; | 511 | bio->bi_end_io = prio_endio; |
512 | bio->bi_private = ca; | 512 | bio->bi_private = ca; |
@@ -739,8 +739,6 @@ static void bcache_device_free(struct bcache_device *d) | |||
739 | } | 739 | } |
740 | 740 | ||
741 | bio_split_pool_free(&d->bio_split_hook); | 741 | bio_split_pool_free(&d->bio_split_hook); |
742 | if (d->unaligned_bvec) | ||
743 | mempool_destroy(d->unaligned_bvec); | ||
744 | if (d->bio_split) | 742 | if (d->bio_split) |
745 | bioset_free(d->bio_split); | 743 | bioset_free(d->bio_split); |
746 | if (is_vmalloc_addr(d->full_dirty_stripes)) | 744 | if (is_vmalloc_addr(d->full_dirty_stripes)) |
@@ -793,8 +791,6 @@ static int bcache_device_init(struct bcache_device *d, unsigned block_size, | |||
793 | return minor; | 791 | return minor; |
794 | 792 | ||
795 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || | 793 | if (!(d->bio_split = bioset_create(4, offsetof(struct bbio, bio))) || |
796 | !(d->unaligned_bvec = mempool_create_kmalloc_pool(1, | ||
797 | sizeof(struct bio_vec) * BIO_MAX_PAGES)) || | ||
798 | bio_split_pool_init(&d->bio_split_hook) || | 794 | bio_split_pool_init(&d->bio_split_hook) || |
799 | !(d->disk = alloc_disk(1))) { | 795 | !(d->disk = alloc_disk(1))) { |
800 | ida_simple_remove(&bcache_minor, minor); | 796 | ida_simple_remove(&bcache_minor, minor); |
diff --git a/drivers/md/bcache/util.c b/drivers/md/bcache/util.c index bb37618e7664..db3ae4c2b223 100644 --- a/drivers/md/bcache/util.c +++ b/drivers/md/bcache/util.c | |||
@@ -224,10 +224,10 @@ uint64_t bch_next_delay(struct bch_ratelimit *d, uint64_t done) | |||
224 | 224 | ||
225 | void bch_bio_map(struct bio *bio, void *base) | 225 | void bch_bio_map(struct bio *bio, void *base) |
226 | { | 226 | { |
227 | size_t size = bio->bi_size; | 227 | size_t size = bio->bi_iter.bi_size; |
228 | struct bio_vec *bv = bio->bi_io_vec; | 228 | struct bio_vec *bv = bio->bi_io_vec; |
229 | 229 | ||
230 | BUG_ON(!bio->bi_size); | 230 | BUG_ON(!bio->bi_iter.bi_size); |
231 | BUG_ON(bio->bi_vcnt); | 231 | BUG_ON(bio->bi_vcnt); |
232 | 232 | ||
233 | bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; | 233 | bv->bv_offset = base ? ((unsigned long) base) % PAGE_SIZE : 0; |
diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index 6c44fe059c27..f4300e4c0114 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c | |||
@@ -111,7 +111,7 @@ static void dirty_init(struct keybuf_key *w) | |||
111 | if (!io->dc->writeback_percent) | 111 | if (!io->dc->writeback_percent) |
112 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); | 112 | bio_set_prio(bio, IOPRIO_PRIO_VALUE(IOPRIO_CLASS_IDLE, 0)); |
113 | 113 | ||
114 | bio->bi_size = KEY_SIZE(&w->key) << 9; | 114 | bio->bi_iter.bi_size = KEY_SIZE(&w->key) << 9; |
115 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); | 115 | bio->bi_max_vecs = DIV_ROUND_UP(KEY_SIZE(&w->key), PAGE_SECTORS); |
116 | bio->bi_private = w; | 116 | bio->bi_private = w; |
117 | bio->bi_io_vec = bio->bi_inline_vecs; | 117 | bio->bi_io_vec = bio->bi_inline_vecs; |
@@ -184,7 +184,7 @@ static void write_dirty(struct closure *cl) | |||
184 | 184 | ||
185 | dirty_init(w); | 185 | dirty_init(w); |
186 | io->bio.bi_rw = WRITE; | 186 | io->bio.bi_rw = WRITE; |
187 | io->bio.bi_sector = KEY_START(&w->key); | 187 | io->bio.bi_iter.bi_sector = KEY_START(&w->key); |
188 | io->bio.bi_bdev = io->dc->bdev; | 188 | io->bio.bi_bdev = io->dc->bdev; |
189 | io->bio.bi_end_io = dirty_endio; | 189 | io->bio.bi_end_io = dirty_endio; |
190 | 190 | ||
@@ -253,7 +253,7 @@ static void read_dirty(struct cached_dev *dc) | |||
253 | io->dc = dc; | 253 | io->dc = dc; |
254 | 254 | ||
255 | dirty_init(w); | 255 | dirty_init(w); |
256 | io->bio.bi_sector = PTR_OFFSET(&w->key, 0); | 256 | io->bio.bi_iter.bi_sector = PTR_OFFSET(&w->key, 0); |
257 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, | 257 | io->bio.bi_bdev = PTR_CACHE(dc->disk.c, |
258 | &w->key, 0)->bdev; | 258 | &w->key, 0)->bdev; |
259 | io->bio.bi_rw = READ; | 259 | io->bio.bi_rw = READ; |
diff --git a/drivers/md/bcache/writeback.h b/drivers/md/bcache/writeback.h index c9ddcf4614b9..e2f8598937ac 100644 --- a/drivers/md/bcache/writeback.h +++ b/drivers/md/bcache/writeback.h | |||
@@ -50,7 +50,7 @@ static inline bool should_writeback(struct cached_dev *dc, struct bio *bio, | |||
50 | return false; | 50 | return false; |
51 | 51 | ||
52 | if (dc->partial_stripes_expensive && | 52 | if (dc->partial_stripes_expensive && |
53 | bcache_dev_stripe_dirty(dc, bio->bi_sector, | 53 | bcache_dev_stripe_dirty(dc, bio->bi_iter.bi_sector, |
54 | bio_sectors(bio))) | 54 | bio_sectors(bio))) |
55 | return true; | 55 | return true; |
56 | 56 | ||
diff --git a/drivers/md/dm-bio-record.h b/drivers/md/dm-bio-record.h index 3a8cfa2645c7..dd3646111561 100644 --- a/drivers/md/dm-bio-record.h +++ b/drivers/md/dm-bio-record.h | |||
@@ -17,55 +17,24 @@ | |||
17 | * original bio state. | 17 | * original bio state. |
18 | */ | 18 | */ |
19 | 19 | ||
20 | struct dm_bio_vec_details { | ||
21 | #if PAGE_SIZE < 65536 | ||
22 | __u16 bv_len; | ||
23 | __u16 bv_offset; | ||
24 | #else | ||
25 | unsigned bv_len; | ||
26 | unsigned bv_offset; | ||
27 | #endif | ||
28 | }; | ||
29 | |||
30 | struct dm_bio_details { | 20 | struct dm_bio_details { |
31 | sector_t bi_sector; | ||
32 | struct block_device *bi_bdev; | 21 | struct block_device *bi_bdev; |
33 | unsigned int bi_size; | ||
34 | unsigned short bi_idx; | ||
35 | unsigned long bi_flags; | 22 | unsigned long bi_flags; |
36 | struct dm_bio_vec_details bi_io_vec[BIO_MAX_PAGES]; | 23 | struct bvec_iter bi_iter; |
37 | }; | 24 | }; |
38 | 25 | ||
39 | static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) | 26 | static inline void dm_bio_record(struct dm_bio_details *bd, struct bio *bio) |
40 | { | 27 | { |
41 | unsigned i; | ||
42 | |||
43 | bd->bi_sector = bio->bi_sector; | ||
44 | bd->bi_bdev = bio->bi_bdev; | 28 | bd->bi_bdev = bio->bi_bdev; |
45 | bd->bi_size = bio->bi_size; | ||
46 | bd->bi_idx = bio->bi_idx; | ||
47 | bd->bi_flags = bio->bi_flags; | 29 | bd->bi_flags = bio->bi_flags; |
48 | 30 | bd->bi_iter = bio->bi_iter; | |
49 | for (i = 0; i < bio->bi_vcnt; i++) { | ||
50 | bd->bi_io_vec[i].bv_len = bio->bi_io_vec[i].bv_len; | ||
51 | bd->bi_io_vec[i].bv_offset = bio->bi_io_vec[i].bv_offset; | ||
52 | } | ||
53 | } | 31 | } |
54 | 32 | ||
55 | static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) | 33 | static inline void dm_bio_restore(struct dm_bio_details *bd, struct bio *bio) |
56 | { | 34 | { |
57 | unsigned i; | ||
58 | |||
59 | bio->bi_sector = bd->bi_sector; | ||
60 | bio->bi_bdev = bd->bi_bdev; | 35 | bio->bi_bdev = bd->bi_bdev; |
61 | bio->bi_size = bd->bi_size; | ||
62 | bio->bi_idx = bd->bi_idx; | ||
63 | bio->bi_flags = bd->bi_flags; | 36 | bio->bi_flags = bd->bi_flags; |
64 | 37 | bio->bi_iter = bd->bi_iter; | |
65 | for (i = 0; i < bio->bi_vcnt; i++) { | ||
66 | bio->bi_io_vec[i].bv_len = bd->bi_io_vec[i].bv_len; | ||
67 | bio->bi_io_vec[i].bv_offset = bd->bi_io_vec[i].bv_offset; | ||
68 | } | ||
69 | } | 38 | } |
70 | 39 | ||
71 | #endif | 40 | #endif |
diff --git a/drivers/md/dm-bufio.c b/drivers/md/dm-bufio.c index 9ed42125514b..66c5d130c8c2 100644 --- a/drivers/md/dm-bufio.c +++ b/drivers/md/dm-bufio.c | |||
@@ -540,7 +540,7 @@ static void use_inline_bio(struct dm_buffer *b, int rw, sector_t block, | |||
540 | bio_init(&b->bio); | 540 | bio_init(&b->bio); |
541 | b->bio.bi_io_vec = b->bio_vec; | 541 | b->bio.bi_io_vec = b->bio_vec; |
542 | b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; | 542 | b->bio.bi_max_vecs = DM_BUFIO_INLINE_VECS; |
543 | b->bio.bi_sector = block << b->c->sectors_per_block_bits; | 543 | b->bio.bi_iter.bi_sector = block << b->c->sectors_per_block_bits; |
544 | b->bio.bi_bdev = b->c->bdev; | 544 | b->bio.bi_bdev = b->c->bdev; |
545 | b->bio.bi_end_io = end_io; | 545 | b->bio.bi_end_io = end_io; |
546 | 546 | ||
diff --git a/drivers/md/dm-cache-policy-mq.c b/drivers/md/dm-cache-policy-mq.c index 930e8c3d73e9..1e018e986610 100644 --- a/drivers/md/dm-cache-policy-mq.c +++ b/drivers/md/dm-cache-policy-mq.c | |||
@@ -72,7 +72,7 @@ static enum io_pattern iot_pattern(struct io_tracker *t) | |||
72 | 72 | ||
73 | static void iot_update_stats(struct io_tracker *t, struct bio *bio) | 73 | static void iot_update_stats(struct io_tracker *t, struct bio *bio) |
74 | { | 74 | { |
75 | if (bio->bi_sector == from_oblock(t->last_end_oblock) + 1) | 75 | if (bio->bi_iter.bi_sector == from_oblock(t->last_end_oblock) + 1) |
76 | t->nr_seq_samples++; | 76 | t->nr_seq_samples++; |
77 | else { | 77 | else { |
78 | /* | 78 | /* |
@@ -87,7 +87,7 @@ static void iot_update_stats(struct io_tracker *t, struct bio *bio) | |||
87 | t->nr_rand_samples++; | 87 | t->nr_rand_samples++; |
88 | } | 88 | } |
89 | 89 | ||
90 | t->last_end_oblock = to_oblock(bio->bi_sector + bio_sectors(bio) - 1); | 90 | t->last_end_oblock = to_oblock(bio_end_sector(bio) - 1); |
91 | } | 91 | } |
92 | 92 | ||
93 | static void iot_check_for_pattern_switch(struct io_tracker *t) | 93 | static void iot_check_for_pattern_switch(struct io_tracker *t) |
diff --git a/drivers/md/dm-cache-target.c b/drivers/md/dm-cache-target.c index 09334c275c79..ffd472e015ca 100644 --- a/drivers/md/dm-cache-target.c +++ b/drivers/md/dm-cache-target.c | |||
@@ -85,6 +85,12 @@ static void dm_unhook_bio(struct dm_hook_info *h, struct bio *bio) | |||
85 | { | 85 | { |
86 | bio->bi_end_io = h->bi_end_io; | 86 | bio->bi_end_io = h->bi_end_io; |
87 | bio->bi_private = h->bi_private; | 87 | bio->bi_private = h->bi_private; |
88 | |||
89 | /* | ||
90 | * Must bump bi_remaining to allow bio to complete with | ||
91 | * restored bi_end_io. | ||
92 | */ | ||
93 | atomic_inc(&bio->bi_remaining); | ||
88 | } | 94 | } |
89 | 95 | ||
90 | /*----------------------------------------------------------------*/ | 96 | /*----------------------------------------------------------------*/ |
@@ -664,15 +670,17 @@ static void remap_to_origin(struct cache *cache, struct bio *bio) | |||
664 | static void remap_to_cache(struct cache *cache, struct bio *bio, | 670 | static void remap_to_cache(struct cache *cache, struct bio *bio, |
665 | dm_cblock_t cblock) | 671 | dm_cblock_t cblock) |
666 | { | 672 | { |
667 | sector_t bi_sector = bio->bi_sector; | 673 | sector_t bi_sector = bio->bi_iter.bi_sector; |
668 | 674 | ||
669 | bio->bi_bdev = cache->cache_dev->bdev; | 675 | bio->bi_bdev = cache->cache_dev->bdev; |
670 | if (!block_size_is_power_of_two(cache)) | 676 | if (!block_size_is_power_of_two(cache)) |
671 | bio->bi_sector = (from_cblock(cblock) * cache->sectors_per_block) + | 677 | bio->bi_iter.bi_sector = |
672 | sector_div(bi_sector, cache->sectors_per_block); | 678 | (from_cblock(cblock) * cache->sectors_per_block) + |
679 | sector_div(bi_sector, cache->sectors_per_block); | ||
673 | else | 680 | else |
674 | bio->bi_sector = (from_cblock(cblock) << cache->sectors_per_block_shift) | | 681 | bio->bi_iter.bi_sector = |
675 | (bi_sector & (cache->sectors_per_block - 1)); | 682 | (from_cblock(cblock) << cache->sectors_per_block_shift) | |
683 | (bi_sector & (cache->sectors_per_block - 1)); | ||
676 | } | 684 | } |
677 | 685 | ||
678 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) | 686 | static void check_if_tick_bio_needed(struct cache *cache, struct bio *bio) |
@@ -712,7 +720,7 @@ static void remap_to_cache_dirty(struct cache *cache, struct bio *bio, | |||
712 | 720 | ||
713 | static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) | 721 | static dm_oblock_t get_bio_block(struct cache *cache, struct bio *bio) |
714 | { | 722 | { |
715 | sector_t block_nr = bio->bi_sector; | 723 | sector_t block_nr = bio->bi_iter.bi_sector; |
716 | 724 | ||
717 | if (!block_size_is_power_of_two(cache)) | 725 | if (!block_size_is_power_of_two(cache)) |
718 | (void) sector_div(block_nr, cache->sectors_per_block); | 726 | (void) sector_div(block_nr, cache->sectors_per_block); |
@@ -1027,7 +1035,7 @@ static void issue_overwrite(struct dm_cache_migration *mg, struct bio *bio) | |||
1027 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) | 1035 | static bool bio_writes_complete_block(struct cache *cache, struct bio *bio) |
1028 | { | 1036 | { |
1029 | return (bio_data_dir(bio) == WRITE) && | 1037 | return (bio_data_dir(bio) == WRITE) && |
1030 | (bio->bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); | 1038 | (bio->bi_iter.bi_size == (cache->sectors_per_block << SECTOR_SHIFT)); |
1031 | } | 1039 | } |
1032 | 1040 | ||
1033 | static void avoid_copy(struct dm_cache_migration *mg) | 1041 | static void avoid_copy(struct dm_cache_migration *mg) |
@@ -1252,7 +1260,7 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1252 | size_t pb_data_size = get_per_bio_data_size(cache); | 1260 | size_t pb_data_size = get_per_bio_data_size(cache); |
1253 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); | 1261 | struct per_bio_data *pb = get_per_bio_data(bio, pb_data_size); |
1254 | 1262 | ||
1255 | BUG_ON(bio->bi_size); | 1263 | BUG_ON(bio->bi_iter.bi_size); |
1256 | if (!pb->req_nr) | 1264 | if (!pb->req_nr) |
1257 | remap_to_origin(cache, bio); | 1265 | remap_to_origin(cache, bio); |
1258 | else | 1266 | else |
@@ -1275,9 +1283,9 @@ static void process_flush_bio(struct cache *cache, struct bio *bio) | |||
1275 | */ | 1283 | */ |
1276 | static void process_discard_bio(struct cache *cache, struct bio *bio) | 1284 | static void process_discard_bio(struct cache *cache, struct bio *bio) |
1277 | { | 1285 | { |
1278 | dm_block_t start_block = dm_sector_div_up(bio->bi_sector, | 1286 | dm_block_t start_block = dm_sector_div_up(bio->bi_iter.bi_sector, |
1279 | cache->discard_block_size); | 1287 | cache->discard_block_size); |
1280 | dm_block_t end_block = bio->bi_sector + bio_sectors(bio); | 1288 | dm_block_t end_block = bio_end_sector(bio); |
1281 | dm_block_t b; | 1289 | dm_block_t b; |
1282 | 1290 | ||
1283 | end_block = block_div(end_block, cache->discard_block_size); | 1291 | end_block = block_div(end_block, cache->discard_block_size); |
diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 81b0fa660452..784695d22fde 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c | |||
@@ -39,10 +39,8 @@ struct convert_context { | |||
39 | struct completion restart; | 39 | struct completion restart; |
40 | struct bio *bio_in; | 40 | struct bio *bio_in; |
41 | struct bio *bio_out; | 41 | struct bio *bio_out; |
42 | unsigned int offset_in; | 42 | struct bvec_iter iter_in; |
43 | unsigned int offset_out; | 43 | struct bvec_iter iter_out; |
44 | unsigned int idx_in; | ||
45 | unsigned int idx_out; | ||
46 | sector_t cc_sector; | 44 | sector_t cc_sector; |
47 | atomic_t cc_pending; | 45 | atomic_t cc_pending; |
48 | }; | 46 | }; |
@@ -826,10 +824,10 @@ static void crypt_convert_init(struct crypt_config *cc, | |||
826 | { | 824 | { |
827 | ctx->bio_in = bio_in; | 825 | ctx->bio_in = bio_in; |
828 | ctx->bio_out = bio_out; | 826 | ctx->bio_out = bio_out; |
829 | ctx->offset_in = 0; | 827 | if (bio_in) |
830 | ctx->offset_out = 0; | 828 | ctx->iter_in = bio_in->bi_iter; |
831 | ctx->idx_in = bio_in ? bio_in->bi_idx : 0; | 829 | if (bio_out) |
832 | ctx->idx_out = bio_out ? bio_out->bi_idx : 0; | 830 | ctx->iter_out = bio_out->bi_iter; |
833 | ctx->cc_sector = sector + cc->iv_offset; | 831 | ctx->cc_sector = sector + cc->iv_offset; |
834 | init_completion(&ctx->restart); | 832 | init_completion(&ctx->restart); |
835 | } | 833 | } |
@@ -857,8 +855,8 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
857 | struct convert_context *ctx, | 855 | struct convert_context *ctx, |
858 | struct ablkcipher_request *req) | 856 | struct ablkcipher_request *req) |
859 | { | 857 | { |
860 | struct bio_vec *bv_in = bio_iovec_idx(ctx->bio_in, ctx->idx_in); | 858 | struct bio_vec bv_in = bio_iter_iovec(ctx->bio_in, ctx->iter_in); |
861 | struct bio_vec *bv_out = bio_iovec_idx(ctx->bio_out, ctx->idx_out); | 859 | struct bio_vec bv_out = bio_iter_iovec(ctx->bio_out, ctx->iter_out); |
862 | struct dm_crypt_request *dmreq; | 860 | struct dm_crypt_request *dmreq; |
863 | u8 *iv; | 861 | u8 *iv; |
864 | int r; | 862 | int r; |
@@ -869,24 +867,15 @@ static int crypt_convert_block(struct crypt_config *cc, | |||
869 | dmreq->iv_sector = ctx->cc_sector; | 867 | dmreq->iv_sector = ctx->cc_sector; |
870 | dmreq->ctx = ctx; | 868 | dmreq->ctx = ctx; |
871 | sg_init_table(&dmreq->sg_in, 1); | 869 | sg_init_table(&dmreq->sg_in, 1); |
872 | sg_set_page(&dmreq->sg_in, bv_in->bv_page, 1 << SECTOR_SHIFT, | 870 | sg_set_page(&dmreq->sg_in, bv_in.bv_page, 1 << SECTOR_SHIFT, |
873 | bv_in->bv_offset + ctx->offset_in); | 871 | bv_in.bv_offset); |
874 | 872 | ||
875 | sg_init_table(&dmreq->sg_out, 1); | 873 | sg_init_table(&dmreq->sg_out, 1); |
876 | sg_set_page(&dmreq->sg_out, bv_out->bv_page, 1 << SECTOR_SHIFT, | 874 | sg_set_page(&dmreq->sg_out, bv_out.bv_page, 1 << SECTOR_SHIFT, |
877 | bv_out->bv_offset + ctx->offset_out); | 875 | bv_out.bv_offset); |
878 | 876 | ||
879 | ctx->offset_in += 1 << SECTOR_SHIFT; | 877 | bio_advance_iter(ctx->bio_in, &ctx->iter_in, 1 << SECTOR_SHIFT); |
880 | if (ctx->offset_in >= bv_in->bv_len) { | 878 | bio_advance_iter(ctx->bio_out, &ctx->iter_out, 1 << SECTOR_SHIFT); |
881 | ctx->offset_in = 0; | ||
882 | ctx->idx_in++; | ||
883 | } | ||
884 | |||
885 | ctx->offset_out += 1 << SECTOR_SHIFT; | ||
886 | if (ctx->offset_out >= bv_out->bv_len) { | ||
887 | ctx->offset_out = 0; | ||
888 | ctx->idx_out++; | ||
889 | } | ||
890 | 879 | ||
891 | if (cc->iv_gen_ops) { | 880 | if (cc->iv_gen_ops) { |
892 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); | 881 | r = cc->iv_gen_ops->generator(cc, iv, dmreq); |
@@ -937,8 +926,7 @@ static int crypt_convert(struct crypt_config *cc, | |||
937 | 926 | ||
938 | atomic_set(&ctx->cc_pending, 1); | 927 | atomic_set(&ctx->cc_pending, 1); |
939 | 928 | ||
940 | while(ctx->idx_in < ctx->bio_in->bi_vcnt && | 929 | while (ctx->iter_in.bi_size && ctx->iter_out.bi_size) { |
941 | ctx->idx_out < ctx->bio_out->bi_vcnt) { | ||
942 | 930 | ||
943 | crypt_alloc_req(cc, ctx); | 931 | crypt_alloc_req(cc, ctx); |
944 | 932 | ||
@@ -1021,7 +1009,7 @@ static struct bio *crypt_alloc_buffer(struct dm_crypt_io *io, unsigned size, | |||
1021 | size -= len; | 1009 | size -= len; |
1022 | } | 1010 | } |
1023 | 1011 | ||
1024 | if (!clone->bi_size) { | 1012 | if (!clone->bi_iter.bi_size) { |
1025 | bio_put(clone); | 1013 | bio_put(clone); |
1026 | return NULL; | 1014 | return NULL; |
1027 | } | 1015 | } |
@@ -1161,7 +1149,7 @@ static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) | |||
1161 | crypt_inc_pending(io); | 1149 | crypt_inc_pending(io); |
1162 | 1150 | ||
1163 | clone_init(io, clone); | 1151 | clone_init(io, clone); |
1164 | clone->bi_sector = cc->start + io->sector; | 1152 | clone->bi_iter.bi_sector = cc->start + io->sector; |
1165 | 1153 | ||
1166 | generic_make_request(clone); | 1154 | generic_make_request(clone); |
1167 | return 0; | 1155 | return 0; |
@@ -1207,9 +1195,9 @@ static void kcryptd_crypt_write_io_submit(struct dm_crypt_io *io, int async) | |||
1207 | } | 1195 | } |
1208 | 1196 | ||
1209 | /* crypt_convert should have filled the clone bio */ | 1197 | /* crypt_convert should have filled the clone bio */ |
1210 | BUG_ON(io->ctx.idx_out < clone->bi_vcnt); | 1198 | BUG_ON(io->ctx.iter_out.bi_size); |
1211 | 1199 | ||
1212 | clone->bi_sector = cc->start + io->sector; | 1200 | clone->bi_iter.bi_sector = cc->start + io->sector; |
1213 | 1201 | ||
1214 | if (async) | 1202 | if (async) |
1215 | kcryptd_queue_io(io); | 1203 | kcryptd_queue_io(io); |
@@ -1224,7 +1212,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1224 | struct dm_crypt_io *new_io; | 1212 | struct dm_crypt_io *new_io; |
1225 | int crypt_finished; | 1213 | int crypt_finished; |
1226 | unsigned out_of_pages = 0; | 1214 | unsigned out_of_pages = 0; |
1227 | unsigned remaining = io->base_bio->bi_size; | 1215 | unsigned remaining = io->base_bio->bi_iter.bi_size; |
1228 | sector_t sector = io->sector; | 1216 | sector_t sector = io->sector; |
1229 | int r; | 1217 | int r; |
1230 | 1218 | ||
@@ -1246,9 +1234,9 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1246 | } | 1234 | } |
1247 | 1235 | ||
1248 | io->ctx.bio_out = clone; | 1236 | io->ctx.bio_out = clone; |
1249 | io->ctx.idx_out = 0; | 1237 | io->ctx.iter_out = clone->bi_iter; |
1250 | 1238 | ||
1251 | remaining -= clone->bi_size; | 1239 | remaining -= clone->bi_iter.bi_size; |
1252 | sector += bio_sectors(clone); | 1240 | sector += bio_sectors(clone); |
1253 | 1241 | ||
1254 | crypt_inc_pending(io); | 1242 | crypt_inc_pending(io); |
@@ -1290,8 +1278,7 @@ static void kcryptd_crypt_write_convert(struct dm_crypt_io *io) | |||
1290 | crypt_inc_pending(new_io); | 1278 | crypt_inc_pending(new_io); |
1291 | crypt_convert_init(cc, &new_io->ctx, NULL, | 1279 | crypt_convert_init(cc, &new_io->ctx, NULL, |
1292 | io->base_bio, sector); | 1280 | io->base_bio, sector); |
1293 | new_io->ctx.idx_in = io->ctx.idx_in; | 1281 | new_io->ctx.iter_in = io->ctx.iter_in; |
1294 | new_io->ctx.offset_in = io->ctx.offset_in; | ||
1295 | 1282 | ||
1296 | /* | 1283 | /* |
1297 | * Fragments after the first use the base_io | 1284 | * Fragments after the first use the base_io |
@@ -1869,11 +1856,12 @@ static int crypt_map(struct dm_target *ti, struct bio *bio) | |||
1869 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { | 1856 | if (unlikely(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))) { |
1870 | bio->bi_bdev = cc->dev->bdev; | 1857 | bio->bi_bdev = cc->dev->bdev; |
1871 | if (bio_sectors(bio)) | 1858 | if (bio_sectors(bio)) |
1872 | bio->bi_sector = cc->start + dm_target_offset(ti, bio->bi_sector); | 1859 | bio->bi_iter.bi_sector = cc->start + |
1860 | dm_target_offset(ti, bio->bi_iter.bi_sector); | ||
1873 | return DM_MAPIO_REMAPPED; | 1861 | return DM_MAPIO_REMAPPED; |
1874 | } | 1862 | } |
1875 | 1863 | ||
1876 | io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_sector)); | 1864 | io = crypt_io_alloc(cc, bio, dm_target_offset(ti, bio->bi_iter.bi_sector)); |
1877 | 1865 | ||
1878 | if (bio_data_dir(io->base_bio) == READ) { | 1866 | if (bio_data_dir(io->base_bio) == READ) { |
1879 | if (kcryptd_io_read(io, GFP_NOWAIT)) | 1867 | if (kcryptd_io_read(io, GFP_NOWAIT)) |
diff --git a/drivers/md/dm-delay.c b/drivers/md/dm-delay.c index a8a511c053a5..42c3a27a14cc 100644 --- a/drivers/md/dm-delay.c +++ b/drivers/md/dm-delay.c | |||
@@ -277,14 +277,15 @@ static int delay_map(struct dm_target *ti, struct bio *bio) | |||
277 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { | 277 | if ((bio_data_dir(bio) == WRITE) && (dc->dev_write)) { |
278 | bio->bi_bdev = dc->dev_write->bdev; | 278 | bio->bi_bdev = dc->dev_write->bdev; |
279 | if (bio_sectors(bio)) | 279 | if (bio_sectors(bio)) |
280 | bio->bi_sector = dc->start_write + | 280 | bio->bi_iter.bi_sector = dc->start_write + |
281 | dm_target_offset(ti, bio->bi_sector); | 281 | dm_target_offset(ti, bio->bi_iter.bi_sector); |
282 | 282 | ||
283 | return delay_bio(dc, dc->write_delay, bio); | 283 | return delay_bio(dc, dc->write_delay, bio); |
284 | } | 284 | } |
285 | 285 | ||
286 | bio->bi_bdev = dc->dev_read->bdev; | 286 | bio->bi_bdev = dc->dev_read->bdev; |
287 | bio->bi_sector = dc->start_read + dm_target_offset(ti, bio->bi_sector); | 287 | bio->bi_iter.bi_sector = dc->start_read + |
288 | dm_target_offset(ti, bio->bi_iter.bi_sector); | ||
288 | 289 | ||
289 | return delay_bio(dc, dc->read_delay, bio); | 290 | return delay_bio(dc, dc->read_delay, bio); |
290 | } | 291 | } |
diff --git a/drivers/md/dm-flakey.c b/drivers/md/dm-flakey.c index c80a0ec5f126..b257e46876d3 100644 --- a/drivers/md/dm-flakey.c +++ b/drivers/md/dm-flakey.c | |||
@@ -248,7 +248,8 @@ static void flakey_map_bio(struct dm_target *ti, struct bio *bio) | |||
248 | 248 | ||
249 | bio->bi_bdev = fc->dev->bdev; | 249 | bio->bi_bdev = fc->dev->bdev; |
250 | if (bio_sectors(bio)) | 250 | if (bio_sectors(bio)) |
251 | bio->bi_sector = flakey_map_sector(ti, bio->bi_sector); | 251 | bio->bi_iter.bi_sector = |
252 | flakey_map_sector(ti, bio->bi_iter.bi_sector); | ||
252 | } | 253 | } |
253 | 254 | ||
254 | static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) | 255 | static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) |
@@ -265,8 +266,8 @@ static void corrupt_bio_data(struct bio *bio, struct flakey_c *fc) | |||
265 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " | 266 | DMDEBUG("Corrupting data bio=%p by writing %u to byte %u " |
266 | "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", | 267 | "(rw=%c bi_rw=%lu bi_sector=%llu cur_bytes=%u)\n", |
267 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, | 268 | bio, fc->corrupt_bio_value, fc->corrupt_bio_byte, |
268 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', | 269 | (bio_data_dir(bio) == WRITE) ? 'w' : 'r', bio->bi_rw, |
269 | bio->bi_rw, (unsigned long long)bio->bi_sector, bio_bytes); | 270 | (unsigned long long)bio->bi_iter.bi_sector, bio_bytes); |
270 | } | 271 | } |
271 | } | 272 | } |
272 | 273 | ||
diff --git a/drivers/md/dm-io.c b/drivers/md/dm-io.c index 2a20986a2fec..b2b8a10e8427 100644 --- a/drivers/md/dm-io.c +++ b/drivers/md/dm-io.c | |||
@@ -201,26 +201,29 @@ static void list_dp_init(struct dpages *dp, struct page_list *pl, unsigned offse | |||
201 | /* | 201 | /* |
202 | * Functions for getting the pages from a bvec. | 202 | * Functions for getting the pages from a bvec. |
203 | */ | 203 | */ |
204 | static void bvec_get_page(struct dpages *dp, | 204 | static void bio_get_page(struct dpages *dp, |
205 | struct page **p, unsigned long *len, unsigned *offset) | 205 | struct page **p, unsigned long *len, unsigned *offset) |
206 | { | 206 | { |
207 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | 207 | struct bio *bio = dp->context_ptr; |
208 | *p = bvec->bv_page; | 208 | struct bio_vec bvec = bio_iovec(bio); |
209 | *len = bvec->bv_len; | 209 | *p = bvec.bv_page; |
210 | *offset = bvec->bv_offset; | 210 | *len = bvec.bv_len; |
211 | *offset = bvec.bv_offset; | ||
211 | } | 212 | } |
212 | 213 | ||
213 | static void bvec_next_page(struct dpages *dp) | 214 | static void bio_next_page(struct dpages *dp) |
214 | { | 215 | { |
215 | struct bio_vec *bvec = (struct bio_vec *) dp->context_ptr; | 216 | struct bio *bio = dp->context_ptr; |
216 | dp->context_ptr = bvec + 1; | 217 | struct bio_vec bvec = bio_iovec(bio); |
218 | |||
219 | bio_advance(bio, bvec.bv_len); | ||
217 | } | 220 | } |
218 | 221 | ||
219 | static void bvec_dp_init(struct dpages *dp, struct bio_vec *bvec) | 222 | static void bio_dp_init(struct dpages *dp, struct bio *bio) |
220 | { | 223 | { |
221 | dp->get_page = bvec_get_page; | 224 | dp->get_page = bio_get_page; |
222 | dp->next_page = bvec_next_page; | 225 | dp->next_page = bio_next_page; |
223 | dp->context_ptr = bvec; | 226 | dp->context_ptr = bio; |
224 | } | 227 | } |
225 | 228 | ||
226 | /* | 229 | /* |
@@ -304,14 +307,14 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
304 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); | 307 | dm_sector_div_up(remaining, (PAGE_SIZE >> SECTOR_SHIFT))); |
305 | 308 | ||
306 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); | 309 | bio = bio_alloc_bioset(GFP_NOIO, num_bvecs, io->client->bios); |
307 | bio->bi_sector = where->sector + (where->count - remaining); | 310 | bio->bi_iter.bi_sector = where->sector + (where->count - remaining); |
308 | bio->bi_bdev = where->bdev; | 311 | bio->bi_bdev = where->bdev; |
309 | bio->bi_end_io = endio; | 312 | bio->bi_end_io = endio; |
310 | store_io_and_region_in_bio(bio, io, region); | 313 | store_io_and_region_in_bio(bio, io, region); |
311 | 314 | ||
312 | if (rw & REQ_DISCARD) { | 315 | if (rw & REQ_DISCARD) { |
313 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); | 316 | num_sectors = min_t(sector_t, q->limits.max_discard_sectors, remaining); |
314 | bio->bi_size = num_sectors << SECTOR_SHIFT; | 317 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
315 | remaining -= num_sectors; | 318 | remaining -= num_sectors; |
316 | } else if (rw & REQ_WRITE_SAME) { | 319 | } else if (rw & REQ_WRITE_SAME) { |
317 | /* | 320 | /* |
@@ -320,7 +323,7 @@ static void do_region(int rw, unsigned region, struct dm_io_region *where, | |||
320 | dp->get_page(dp, &page, &len, &offset); | 323 | dp->get_page(dp, &page, &len, &offset); |
321 | bio_add_page(bio, page, logical_block_size, offset); | 324 | bio_add_page(bio, page, logical_block_size, offset); |
322 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); | 325 | num_sectors = min_t(sector_t, q->limits.max_write_same_sectors, remaining); |
323 | bio->bi_size = num_sectors << SECTOR_SHIFT; | 326 | bio->bi_iter.bi_size = num_sectors << SECTOR_SHIFT; |
324 | 327 | ||
325 | offset = 0; | 328 | offset = 0; |
326 | remaining -= num_sectors; | 329 | remaining -= num_sectors; |
@@ -457,8 +460,8 @@ static int dp_init(struct dm_io_request *io_req, struct dpages *dp, | |||
457 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); | 460 | list_dp_init(dp, io_req->mem.ptr.pl, io_req->mem.offset); |
458 | break; | 461 | break; |
459 | 462 | ||
460 | case DM_IO_BVEC: | 463 | case DM_IO_BIO: |
461 | bvec_dp_init(dp, io_req->mem.ptr.bvec); | 464 | bio_dp_init(dp, io_req->mem.ptr.bio); |
462 | break; | 465 | break; |
463 | 466 | ||
464 | case DM_IO_VMA: | 467 | case DM_IO_VMA: |
diff --git a/drivers/md/dm-linear.c b/drivers/md/dm-linear.c index 4f99d267340c..53e848c10939 100644 --- a/drivers/md/dm-linear.c +++ b/drivers/md/dm-linear.c | |||
@@ -85,7 +85,8 @@ static void linear_map_bio(struct dm_target *ti, struct bio *bio) | |||
85 | 85 | ||
86 | bio->bi_bdev = lc->dev->bdev; | 86 | bio->bi_bdev = lc->dev->bdev; |
87 | if (bio_sectors(bio)) | 87 | if (bio_sectors(bio)) |
88 | bio->bi_sector = linear_map_sector(ti, bio->bi_sector); | 88 | bio->bi_iter.bi_sector = |
89 | linear_map_sector(ti, bio->bi_iter.bi_sector); | ||
89 | } | 90 | } |
90 | 91 | ||
91 | static int linear_map(struct dm_target *ti, struct bio *bio) | 92 | static int linear_map(struct dm_target *ti, struct bio *bio) |
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c index 9584443c5614..f284e0bfb25f 100644 --- a/drivers/md/dm-raid1.c +++ b/drivers/md/dm-raid1.c | |||
@@ -432,7 +432,7 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) | |||
432 | region_t region = dm_rh_bio_to_region(ms->rh, bio); | 432 | region_t region = dm_rh_bio_to_region(ms->rh, bio); |
433 | 433 | ||
434 | if (log->type->in_sync(log, region, 0)) | 434 | if (log->type->in_sync(log, region, 0)) |
435 | return choose_mirror(ms, bio->bi_sector) ? 1 : 0; | 435 | return choose_mirror(ms, bio->bi_iter.bi_sector) ? 1 : 0; |
436 | 436 | ||
437 | return 0; | 437 | return 0; |
438 | } | 438 | } |
@@ -442,15 +442,15 @@ static int mirror_available(struct mirror_set *ms, struct bio *bio) | |||
442 | */ | 442 | */ |
443 | static sector_t map_sector(struct mirror *m, struct bio *bio) | 443 | static sector_t map_sector(struct mirror *m, struct bio *bio) |
444 | { | 444 | { |
445 | if (unlikely(!bio->bi_size)) | 445 | if (unlikely(!bio->bi_iter.bi_size)) |
446 | return 0; | 446 | return 0; |
447 | return m->offset + dm_target_offset(m->ms->ti, bio->bi_sector); | 447 | return m->offset + dm_target_offset(m->ms->ti, bio->bi_iter.bi_sector); |
448 | } | 448 | } |
449 | 449 | ||
450 | static void map_bio(struct mirror *m, struct bio *bio) | 450 | static void map_bio(struct mirror *m, struct bio *bio) |
451 | { | 451 | { |
452 | bio->bi_bdev = m->dev->bdev; | 452 | bio->bi_bdev = m->dev->bdev; |
453 | bio->bi_sector = map_sector(m, bio); | 453 | bio->bi_iter.bi_sector = map_sector(m, bio); |
454 | } | 454 | } |
455 | 455 | ||
456 | static void map_region(struct dm_io_region *io, struct mirror *m, | 456 | static void map_region(struct dm_io_region *io, struct mirror *m, |
@@ -526,8 +526,8 @@ static void read_async_bio(struct mirror *m, struct bio *bio) | |||
526 | struct dm_io_region io; | 526 | struct dm_io_region io; |
527 | struct dm_io_request io_req = { | 527 | struct dm_io_request io_req = { |
528 | .bi_rw = READ, | 528 | .bi_rw = READ, |
529 | .mem.type = DM_IO_BVEC, | 529 | .mem.type = DM_IO_BIO, |
530 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | 530 | .mem.ptr.bio = bio, |
531 | .notify.fn = read_callback, | 531 | .notify.fn = read_callback, |
532 | .notify.context = bio, | 532 | .notify.context = bio, |
533 | .client = m->ms->io_client, | 533 | .client = m->ms->io_client, |
@@ -559,7 +559,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads) | |||
559 | * We can only read balance if the region is in sync. | 559 | * We can only read balance if the region is in sync. |
560 | */ | 560 | */ |
561 | if (likely(region_in_sync(ms, region, 1))) | 561 | if (likely(region_in_sync(ms, region, 1))) |
562 | m = choose_mirror(ms, bio->bi_sector); | 562 | m = choose_mirror(ms, bio->bi_iter.bi_sector); |
563 | else if (m && atomic_read(&m->error_count)) | 563 | else if (m && atomic_read(&m->error_count)) |
564 | m = NULL; | 564 | m = NULL; |
565 | 565 | ||
@@ -629,8 +629,8 @@ static void do_write(struct mirror_set *ms, struct bio *bio) | |||
629 | struct mirror *m; | 629 | struct mirror *m; |
630 | struct dm_io_request io_req = { | 630 | struct dm_io_request io_req = { |
631 | .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), | 631 | .bi_rw = WRITE | (bio->bi_rw & WRITE_FLUSH_FUA), |
632 | .mem.type = DM_IO_BVEC, | 632 | .mem.type = DM_IO_BIO, |
633 | .mem.ptr.bvec = bio->bi_io_vec + bio->bi_idx, | 633 | .mem.ptr.bio = bio, |
634 | .notify.fn = write_callback, | 634 | .notify.fn = write_callback, |
635 | .notify.context = bio, | 635 | .notify.context = bio, |
636 | .client = ms->io_client, | 636 | .client = ms->io_client, |
@@ -1181,7 +1181,7 @@ static int mirror_map(struct dm_target *ti, struct bio *bio) | |||
1181 | * The region is in-sync and we can perform reads directly. | 1181 | * The region is in-sync and we can perform reads directly. |
1182 | * Store enough information so we can retry if it fails. | 1182 | * Store enough information so we can retry if it fails. |
1183 | */ | 1183 | */ |
1184 | m = choose_mirror(ms, bio->bi_sector); | 1184 | m = choose_mirror(ms, bio->bi_iter.bi_sector); |
1185 | if (unlikely(!m)) | 1185 | if (unlikely(!m)) |
1186 | return -EIO; | 1186 | return -EIO; |
1187 | 1187 | ||
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c index 69732e03eb34..b929fd5f4984 100644 --- a/drivers/md/dm-region-hash.c +++ b/drivers/md/dm-region-hash.c | |||
@@ -126,7 +126,8 @@ EXPORT_SYMBOL_GPL(dm_rh_region_to_sector); | |||
126 | 126 | ||
127 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) | 127 | region_t dm_rh_bio_to_region(struct dm_region_hash *rh, struct bio *bio) |
128 | { | 128 | { |
129 | return dm_rh_sector_to_region(rh, bio->bi_sector - rh->target_begin); | 129 | return dm_rh_sector_to_region(rh, bio->bi_iter.bi_sector - |
130 | rh->target_begin); | ||
130 | } | 131 | } |
131 | EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); | 132 | EXPORT_SYMBOL_GPL(dm_rh_bio_to_region); |
132 | 133 | ||
diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 717718558bd9..ebddef5237e4 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c | |||
@@ -1438,6 +1438,7 @@ out: | |||
1438 | if (full_bio) { | 1438 | if (full_bio) { |
1439 | full_bio->bi_end_io = pe->full_bio_end_io; | 1439 | full_bio->bi_end_io = pe->full_bio_end_io; |
1440 | full_bio->bi_private = pe->full_bio_private; | 1440 | full_bio->bi_private = pe->full_bio_private; |
1441 | atomic_inc(&full_bio->bi_remaining); | ||
1441 | } | 1442 | } |
1442 | free_pending_exception(pe); | 1443 | free_pending_exception(pe); |
1443 | 1444 | ||
@@ -1619,11 +1620,10 @@ static void remap_exception(struct dm_snapshot *s, struct dm_exception *e, | |||
1619 | struct bio *bio, chunk_t chunk) | 1620 | struct bio *bio, chunk_t chunk) |
1620 | { | 1621 | { |
1621 | bio->bi_bdev = s->cow->bdev; | 1622 | bio->bi_bdev = s->cow->bdev; |
1622 | bio->bi_sector = chunk_to_sector(s->store, | 1623 | bio->bi_iter.bi_sector = |
1623 | dm_chunk_number(e->new_chunk) + | 1624 | chunk_to_sector(s->store, dm_chunk_number(e->new_chunk) + |
1624 | (chunk - e->old_chunk)) + | 1625 | (chunk - e->old_chunk)) + |
1625 | (bio->bi_sector & | 1626 | (bio->bi_iter.bi_sector & s->store->chunk_mask); |
1626 | s->store->chunk_mask); | ||
1627 | } | 1627 | } |
1628 | 1628 | ||
1629 | static int snapshot_map(struct dm_target *ti, struct bio *bio) | 1629 | static int snapshot_map(struct dm_target *ti, struct bio *bio) |
@@ -1641,7 +1641,7 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1641 | return DM_MAPIO_REMAPPED; | 1641 | return DM_MAPIO_REMAPPED; |
1642 | } | 1642 | } |
1643 | 1643 | ||
1644 | chunk = sector_to_chunk(s->store, bio->bi_sector); | 1644 | chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); |
1645 | 1645 | ||
1646 | /* Full snapshots are not usable */ | 1646 | /* Full snapshots are not usable */ |
1647 | /* To get here the table must be live so s->active is always set. */ | 1647 | /* To get here the table must be live so s->active is always set. */ |
@@ -1702,7 +1702,8 @@ static int snapshot_map(struct dm_target *ti, struct bio *bio) | |||
1702 | r = DM_MAPIO_SUBMITTED; | 1702 | r = DM_MAPIO_SUBMITTED; |
1703 | 1703 | ||
1704 | if (!pe->started && | 1704 | if (!pe->started && |
1705 | bio->bi_size == (s->store->chunk_size << SECTOR_SHIFT)) { | 1705 | bio->bi_iter.bi_size == |
1706 | (s->store->chunk_size << SECTOR_SHIFT)) { | ||
1706 | pe->started = 1; | 1707 | pe->started = 1; |
1707 | up_write(&s->lock); | 1708 | up_write(&s->lock); |
1708 | start_full_bio(pe, bio); | 1709 | start_full_bio(pe, bio); |
@@ -1758,7 +1759,7 @@ static int snapshot_merge_map(struct dm_target *ti, struct bio *bio) | |||
1758 | return DM_MAPIO_REMAPPED; | 1759 | return DM_MAPIO_REMAPPED; |
1759 | } | 1760 | } |
1760 | 1761 | ||
1761 | chunk = sector_to_chunk(s->store, bio->bi_sector); | 1762 | chunk = sector_to_chunk(s->store, bio->bi_iter.bi_sector); |
1762 | 1763 | ||
1763 | down_write(&s->lock); | 1764 | down_write(&s->lock); |
1764 | 1765 | ||
@@ -2095,7 +2096,7 @@ static int do_origin(struct dm_dev *origin, struct bio *bio) | |||
2095 | down_read(&_origins_lock); | 2096 | down_read(&_origins_lock); |
2096 | o = __lookup_origin(origin->bdev); | 2097 | o = __lookup_origin(origin->bdev); |
2097 | if (o) | 2098 | if (o) |
2098 | r = __origin_write(&o->snapshots, bio->bi_sector, bio); | 2099 | r = __origin_write(&o->snapshots, bio->bi_iter.bi_sector, bio); |
2099 | up_read(&_origins_lock); | 2100 | up_read(&_origins_lock); |
2100 | 2101 | ||
2101 | return r; | 2102 | return r; |
diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c index 73c1712dad96..d1600d2aa2e2 100644 --- a/drivers/md/dm-stripe.c +++ b/drivers/md/dm-stripe.c | |||
@@ -259,13 +259,15 @@ static int stripe_map_range(struct stripe_c *sc, struct bio *bio, | |||
259 | { | 259 | { |
260 | sector_t begin, end; | 260 | sector_t begin, end; |
261 | 261 | ||
262 | stripe_map_range_sector(sc, bio->bi_sector, target_stripe, &begin); | 262 | stripe_map_range_sector(sc, bio->bi_iter.bi_sector, |
263 | target_stripe, &begin); | ||
263 | stripe_map_range_sector(sc, bio_end_sector(bio), | 264 | stripe_map_range_sector(sc, bio_end_sector(bio), |
264 | target_stripe, &end); | 265 | target_stripe, &end); |
265 | if (begin < end) { | 266 | if (begin < end) { |
266 | bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; | 267 | bio->bi_bdev = sc->stripe[target_stripe].dev->bdev; |
267 | bio->bi_sector = begin + sc->stripe[target_stripe].physical_start; | 268 | bio->bi_iter.bi_sector = begin + |
268 | bio->bi_size = to_bytes(end - begin); | 269 | sc->stripe[target_stripe].physical_start; |
270 | bio->bi_iter.bi_size = to_bytes(end - begin); | ||
269 | return DM_MAPIO_REMAPPED; | 271 | return DM_MAPIO_REMAPPED; |
270 | } else { | 272 | } else { |
271 | /* The range doesn't map to the target stripe */ | 273 | /* The range doesn't map to the target stripe */ |
@@ -293,9 +295,10 @@ static int stripe_map(struct dm_target *ti, struct bio *bio) | |||
293 | return stripe_map_range(sc, bio, target_bio_nr); | 295 | return stripe_map_range(sc, bio, target_bio_nr); |
294 | } | 296 | } |
295 | 297 | ||
296 | stripe_map_sector(sc, bio->bi_sector, &stripe, &bio->bi_sector); | 298 | stripe_map_sector(sc, bio->bi_iter.bi_sector, |
299 | &stripe, &bio->bi_iter.bi_sector); | ||
297 | 300 | ||
298 | bio->bi_sector += sc->stripe[stripe].physical_start; | 301 | bio->bi_iter.bi_sector += sc->stripe[stripe].physical_start; |
299 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; | 302 | bio->bi_bdev = sc->stripe[stripe].dev->bdev; |
300 | 303 | ||
301 | return DM_MAPIO_REMAPPED; | 304 | return DM_MAPIO_REMAPPED; |
diff --git a/drivers/md/dm-switch.c b/drivers/md/dm-switch.c index ff9ac4be4721..09a688b3d48c 100644 --- a/drivers/md/dm-switch.c +++ b/drivers/md/dm-switch.c | |||
@@ -311,11 +311,11 @@ error: | |||
311 | static int switch_map(struct dm_target *ti, struct bio *bio) | 311 | static int switch_map(struct dm_target *ti, struct bio *bio) |
312 | { | 312 | { |
313 | struct switch_ctx *sctx = ti->private; | 313 | struct switch_ctx *sctx = ti->private; |
314 | sector_t offset = dm_target_offset(ti, bio->bi_sector); | 314 | sector_t offset = dm_target_offset(ti, bio->bi_iter.bi_sector); |
315 | unsigned path_nr = switch_get_path_nr(sctx, offset); | 315 | unsigned path_nr = switch_get_path_nr(sctx, offset); |
316 | 316 | ||
317 | bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; | 317 | bio->bi_bdev = sctx->path_list[path_nr].dmdev->bdev; |
318 | bio->bi_sector = sctx->path_list[path_nr].start + offset; | 318 | bio->bi_iter.bi_sector = sctx->path_list[path_nr].start + offset; |
319 | 319 | ||
320 | return DM_MAPIO_REMAPPED; | 320 | return DM_MAPIO_REMAPPED; |
321 | } | 321 | } |
diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c index 726228b33a01..faaf944597ab 100644 --- a/drivers/md/dm-thin.c +++ b/drivers/md/dm-thin.c | |||
@@ -414,7 +414,7 @@ static bool block_size_is_power_of_two(struct pool *pool) | |||
414 | static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) | 414 | static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) |
415 | { | 415 | { |
416 | struct pool *pool = tc->pool; | 416 | struct pool *pool = tc->pool; |
417 | sector_t block_nr = bio->bi_sector; | 417 | sector_t block_nr = bio->bi_iter.bi_sector; |
418 | 418 | ||
419 | if (block_size_is_power_of_two(pool)) | 419 | if (block_size_is_power_of_two(pool)) |
420 | block_nr >>= pool->sectors_per_block_shift; | 420 | block_nr >>= pool->sectors_per_block_shift; |
@@ -427,14 +427,15 @@ static dm_block_t get_bio_block(struct thin_c *tc, struct bio *bio) | |||
427 | static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) | 427 | static void remap(struct thin_c *tc, struct bio *bio, dm_block_t block) |
428 | { | 428 | { |
429 | struct pool *pool = tc->pool; | 429 | struct pool *pool = tc->pool; |
430 | sector_t bi_sector = bio->bi_sector; | 430 | sector_t bi_sector = bio->bi_iter.bi_sector; |
431 | 431 | ||
432 | bio->bi_bdev = tc->pool_dev->bdev; | 432 | bio->bi_bdev = tc->pool_dev->bdev; |
433 | if (block_size_is_power_of_two(pool)) | 433 | if (block_size_is_power_of_two(pool)) |
434 | bio->bi_sector = (block << pool->sectors_per_block_shift) | | 434 | bio->bi_iter.bi_sector = |
435 | (bi_sector & (pool->sectors_per_block - 1)); | 435 | (block << pool->sectors_per_block_shift) | |
436 | (bi_sector & (pool->sectors_per_block - 1)); | ||
436 | else | 437 | else |
437 | bio->bi_sector = (block * pool->sectors_per_block) + | 438 | bio->bi_iter.bi_sector = (block * pool->sectors_per_block) + |
438 | sector_div(bi_sector, pool->sectors_per_block); | 439 | sector_div(bi_sector, pool->sectors_per_block); |
439 | } | 440 | } |
440 | 441 | ||
@@ -612,8 +613,10 @@ static void cell_defer_no_holder(struct thin_c *tc, struct dm_bio_prison_cell *c | |||
612 | 613 | ||
613 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) | 614 | static void process_prepared_mapping_fail(struct dm_thin_new_mapping *m) |
614 | { | 615 | { |
615 | if (m->bio) | 616 | if (m->bio) { |
616 | m->bio->bi_end_io = m->saved_bi_end_io; | 617 | m->bio->bi_end_io = m->saved_bi_end_io; |
618 | atomic_inc(&m->bio->bi_remaining); | ||
619 | } | ||
617 | cell_error(m->tc->pool, m->cell); | 620 | cell_error(m->tc->pool, m->cell); |
618 | list_del(&m->list); | 621 | list_del(&m->list); |
619 | mempool_free(m, m->tc->pool->mapping_pool); | 622 | mempool_free(m, m->tc->pool->mapping_pool); |
@@ -627,8 +630,10 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m) | |||
627 | int r; | 630 | int r; |
628 | 631 | ||
629 | bio = m->bio; | 632 | bio = m->bio; |
630 | if (bio) | 633 | if (bio) { |
631 | bio->bi_end_io = m->saved_bi_end_io; | 634 | bio->bi_end_io = m->saved_bi_end_io; |
635 | atomic_inc(&bio->bi_remaining); | ||
636 | } | ||
632 | 637 | ||
633 | if (m->err) { | 638 | if (m->err) { |
634 | cell_error(pool, m->cell); | 639 | cell_error(pool, m->cell); |
@@ -731,7 +736,8 @@ static void process_prepared(struct pool *pool, struct list_head *head, | |||
731 | */ | 736 | */ |
732 | static int io_overlaps_block(struct pool *pool, struct bio *bio) | 737 | static int io_overlaps_block(struct pool *pool, struct bio *bio) |
733 | { | 738 | { |
734 | return bio->bi_size == (pool->sectors_per_block << SECTOR_SHIFT); | 739 | return bio->bi_iter.bi_size == |
740 | (pool->sectors_per_block << SECTOR_SHIFT); | ||
735 | } | 741 | } |
736 | 742 | ||
737 | static int io_overwrites_block(struct pool *pool, struct bio *bio) | 743 | static int io_overwrites_block(struct pool *pool, struct bio *bio) |
@@ -1136,7 +1142,7 @@ static void process_shared_bio(struct thin_c *tc, struct bio *bio, | |||
1136 | if (bio_detain(pool, &key, bio, &cell)) | 1142 | if (bio_detain(pool, &key, bio, &cell)) |
1137 | return; | 1143 | return; |
1138 | 1144 | ||
1139 | if (bio_data_dir(bio) == WRITE && bio->bi_size) | 1145 | if (bio_data_dir(bio) == WRITE && bio->bi_iter.bi_size) |
1140 | break_sharing(tc, bio, block, &key, lookup_result, cell); | 1146 | break_sharing(tc, bio, block, &key, lookup_result, cell); |
1141 | else { | 1147 | else { |
1142 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); | 1148 | struct dm_thin_endio_hook *h = dm_per_bio_data(bio, sizeof(struct dm_thin_endio_hook)); |
@@ -1159,7 +1165,7 @@ static void provision_block(struct thin_c *tc, struct bio *bio, dm_block_t block | |||
1159 | /* | 1165 | /* |
1160 | * Remap empty bios (flushes) immediately, without provisioning. | 1166 | * Remap empty bios (flushes) immediately, without provisioning. |
1161 | */ | 1167 | */ |
1162 | if (!bio->bi_size) { | 1168 | if (!bio->bi_iter.bi_size) { |
1163 | inc_all_io_entry(pool, bio); | 1169 | inc_all_io_entry(pool, bio); |
1164 | cell_defer_no_holder(tc, cell); | 1170 | cell_defer_no_holder(tc, cell); |
1165 | 1171 | ||
@@ -1258,7 +1264,7 @@ static void process_bio_read_only(struct thin_c *tc, struct bio *bio) | |||
1258 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); | 1264 | r = dm_thin_find_block(tc->td, block, 1, &lookup_result); |
1259 | switch (r) { | 1265 | switch (r) { |
1260 | case 0: | 1266 | case 0: |
1261 | if (lookup_result.shared && (rw == WRITE) && bio->bi_size) | 1267 | if (lookup_result.shared && (rw == WRITE) && bio->bi_iter.bi_size) |
1262 | handle_unserviceable_bio(tc->pool, bio); | 1268 | handle_unserviceable_bio(tc->pool, bio); |
1263 | else { | 1269 | else { |
1264 | inc_all_io_entry(tc->pool, bio); | 1270 | inc_all_io_entry(tc->pool, bio); |
@@ -2939,7 +2945,7 @@ out_unlock: | |||
2939 | 2945 | ||
2940 | static int thin_map(struct dm_target *ti, struct bio *bio) | 2946 | static int thin_map(struct dm_target *ti, struct bio *bio) |
2941 | { | 2947 | { |
2942 | bio->bi_sector = dm_target_offset(ti, bio->bi_sector); | 2948 | bio->bi_iter.bi_sector = dm_target_offset(ti, bio->bi_iter.bi_sector); |
2943 | 2949 | ||
2944 | return thin_bio_map(ti, bio); | 2950 | return thin_bio_map(ti, bio); |
2945 | } | 2951 | } |
diff --git a/drivers/md/dm-verity.c b/drivers/md/dm-verity.c index 4b7941db3aff..796007a5e0e1 100644 --- a/drivers/md/dm-verity.c +++ b/drivers/md/dm-verity.c | |||
@@ -73,15 +73,10 @@ struct dm_verity_io { | |||
73 | sector_t block; | 73 | sector_t block; |
74 | unsigned n_blocks; | 74 | unsigned n_blocks; |
75 | 75 | ||
76 | /* saved bio vector */ | 76 | struct bvec_iter iter; |
77 | struct bio_vec *io_vec; | ||
78 | unsigned io_vec_size; | ||
79 | 77 | ||
80 | struct work_struct work; | 78 | struct work_struct work; |
81 | 79 | ||
82 | /* A space for short vectors; longer vectors are allocated separately. */ | ||
83 | struct bio_vec io_vec_inline[DM_VERITY_IO_VEC_INLINE]; | ||
84 | |||
85 | /* | 80 | /* |
86 | * Three variably-size fields follow this struct: | 81 | * Three variably-size fields follow this struct: |
87 | * | 82 | * |
@@ -284,9 +279,10 @@ release_ret_r: | |||
284 | static int verity_verify_io(struct dm_verity_io *io) | 279 | static int verity_verify_io(struct dm_verity_io *io) |
285 | { | 280 | { |
286 | struct dm_verity *v = io->v; | 281 | struct dm_verity *v = io->v; |
282 | struct bio *bio = dm_bio_from_per_bio_data(io, | ||
283 | v->ti->per_bio_data_size); | ||
287 | unsigned b; | 284 | unsigned b; |
288 | int i; | 285 | int i; |
289 | unsigned vector = 0, offset = 0; | ||
290 | 286 | ||
291 | for (b = 0; b < io->n_blocks; b++) { | 287 | for (b = 0; b < io->n_blocks; b++) { |
292 | struct shash_desc *desc; | 288 | struct shash_desc *desc; |
@@ -336,31 +332,22 @@ test_block_hash: | |||
336 | } | 332 | } |
337 | 333 | ||
338 | todo = 1 << v->data_dev_block_bits; | 334 | todo = 1 << v->data_dev_block_bits; |
339 | do { | 335 | while (io->iter.bi_size) { |
340 | struct bio_vec *bv; | ||
341 | u8 *page; | 336 | u8 *page; |
342 | unsigned len; | 337 | struct bio_vec bv = bio_iter_iovec(bio, io->iter); |
343 | 338 | ||
344 | BUG_ON(vector >= io->io_vec_size); | 339 | page = kmap_atomic(bv.bv_page); |
345 | bv = &io->io_vec[vector]; | 340 | r = crypto_shash_update(desc, page + bv.bv_offset, |
346 | page = kmap_atomic(bv->bv_page); | 341 | bv.bv_len); |
347 | len = bv->bv_len - offset; | ||
348 | if (likely(len >= todo)) | ||
349 | len = todo; | ||
350 | r = crypto_shash_update(desc, | ||
351 | page + bv->bv_offset + offset, len); | ||
352 | kunmap_atomic(page); | 342 | kunmap_atomic(page); |
343 | |||
353 | if (r < 0) { | 344 | if (r < 0) { |
354 | DMERR("crypto_shash_update failed: %d", r); | 345 | DMERR("crypto_shash_update failed: %d", r); |
355 | return r; | 346 | return r; |
356 | } | 347 | } |
357 | offset += len; | 348 | |
358 | if (likely(offset == bv->bv_len)) { | 349 | bio_advance_iter(bio, &io->iter, bv.bv_len); |
359 | offset = 0; | 350 | } |
360 | vector++; | ||
361 | } | ||
362 | todo -= len; | ||
363 | } while (todo); | ||
364 | 351 | ||
365 | if (!v->version) { | 352 | if (!v->version) { |
366 | r = crypto_shash_update(desc, v->salt, v->salt_size); | 353 | r = crypto_shash_update(desc, v->salt, v->salt_size); |
@@ -383,8 +370,6 @@ test_block_hash: | |||
383 | return -EIO; | 370 | return -EIO; |
384 | } | 371 | } |
385 | } | 372 | } |
386 | BUG_ON(vector != io->io_vec_size); | ||
387 | BUG_ON(offset); | ||
388 | 373 | ||
389 | return 0; | 374 | return 0; |
390 | } | 375 | } |
@@ -400,10 +385,7 @@ static void verity_finish_io(struct dm_verity_io *io, int error) | |||
400 | bio->bi_end_io = io->orig_bi_end_io; | 385 | bio->bi_end_io = io->orig_bi_end_io; |
401 | bio->bi_private = io->orig_bi_private; | 386 | bio->bi_private = io->orig_bi_private; |
402 | 387 | ||
403 | if (io->io_vec != io->io_vec_inline) | 388 | bio_endio_nodec(bio, error); |
404 | mempool_free(io->io_vec, v->vec_mempool); | ||
405 | |||
406 | bio_endio(bio, error); | ||
407 | } | 389 | } |
408 | 390 | ||
409 | static void verity_work(struct work_struct *w) | 391 | static void verity_work(struct work_struct *w) |
@@ -493,9 +475,9 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
493 | struct dm_verity_io *io; | 475 | struct dm_verity_io *io; |
494 | 476 | ||
495 | bio->bi_bdev = v->data_dev->bdev; | 477 | bio->bi_bdev = v->data_dev->bdev; |
496 | bio->bi_sector = verity_map_sector(v, bio->bi_sector); | 478 | bio->bi_iter.bi_sector = verity_map_sector(v, bio->bi_iter.bi_sector); |
497 | 479 | ||
498 | if (((unsigned)bio->bi_sector | bio_sectors(bio)) & | 480 | if (((unsigned)bio->bi_iter.bi_sector | bio_sectors(bio)) & |
499 | ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { | 481 | ((1 << (v->data_dev_block_bits - SECTOR_SHIFT)) - 1)) { |
500 | DMERR_LIMIT("unaligned io"); | 482 | DMERR_LIMIT("unaligned io"); |
501 | return -EIO; | 483 | return -EIO; |
@@ -514,18 +496,12 @@ static int verity_map(struct dm_target *ti, struct bio *bio) | |||
514 | io->v = v; | 496 | io->v = v; |
515 | io->orig_bi_end_io = bio->bi_end_io; | 497 | io->orig_bi_end_io = bio->bi_end_io; |
516 | io->orig_bi_private = bio->bi_private; | 498 | io->orig_bi_private = bio->bi_private; |
517 | io->block = bio->bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); | 499 | io->block = bio->bi_iter.bi_sector >> (v->data_dev_block_bits - SECTOR_SHIFT); |
518 | io->n_blocks = bio->bi_size >> v->data_dev_block_bits; | 500 | io->n_blocks = bio->bi_iter.bi_size >> v->data_dev_block_bits; |
519 | 501 | ||
520 | bio->bi_end_io = verity_end_io; | 502 | bio->bi_end_io = verity_end_io; |
521 | bio->bi_private = io; | 503 | bio->bi_private = io; |
522 | io->io_vec_size = bio_segments(bio); | 504 | io->iter = bio->bi_iter; |
523 | if (io->io_vec_size < DM_VERITY_IO_VEC_INLINE) | ||
524 | io->io_vec = io->io_vec_inline; | ||
525 | else | ||
526 | io->io_vec = mempool_alloc(v->vec_mempool, GFP_NOIO); | ||
527 | memcpy(io->io_vec, bio_iovec(bio), | ||
528 | io->io_vec_size * sizeof(struct bio_vec)); | ||
529 | 505 | ||
530 | verity_submit_prefetch(v, io); | 506 | verity_submit_prefetch(v, io); |
531 | 507 | ||
diff --git a/drivers/md/dm.c b/drivers/md/dm.c index b49c76284241..8c53b09b9a2c 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c | |||
@@ -575,7 +575,7 @@ static void start_io_acct(struct dm_io *io) | |||
575 | atomic_inc_return(&md->pending[rw])); | 575 | atomic_inc_return(&md->pending[rw])); |
576 | 576 | ||
577 | if (unlikely(dm_stats_used(&md->stats))) | 577 | if (unlikely(dm_stats_used(&md->stats))) |
578 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, | 578 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, |
579 | bio_sectors(bio), false, 0, &io->stats_aux); | 579 | bio_sectors(bio), false, 0, &io->stats_aux); |
580 | } | 580 | } |
581 | 581 | ||
@@ -593,7 +593,7 @@ static void end_io_acct(struct dm_io *io) | |||
593 | part_stat_unlock(); | 593 | part_stat_unlock(); |
594 | 594 | ||
595 | if (unlikely(dm_stats_used(&md->stats))) | 595 | if (unlikely(dm_stats_used(&md->stats))) |
596 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_sector, | 596 | dm_stats_account_io(&md->stats, bio->bi_rw, bio->bi_iter.bi_sector, |
597 | bio_sectors(bio), true, duration, &io->stats_aux); | 597 | bio_sectors(bio), true, duration, &io->stats_aux); |
598 | 598 | ||
599 | /* | 599 | /* |
@@ -742,7 +742,7 @@ static void dec_pending(struct dm_io *io, int error) | |||
742 | if (io_error == DM_ENDIO_REQUEUE) | 742 | if (io_error == DM_ENDIO_REQUEUE) |
743 | return; | 743 | return; |
744 | 744 | ||
745 | if ((bio->bi_rw & REQ_FLUSH) && bio->bi_size) { | 745 | if ((bio->bi_rw & REQ_FLUSH) && bio->bi_iter.bi_size) { |
746 | /* | 746 | /* |
747 | * Preflush done for flush with data, reissue | 747 | * Preflush done for flush with data, reissue |
748 | * without REQ_FLUSH. | 748 | * without REQ_FLUSH. |
@@ -797,7 +797,7 @@ static void end_clone_bio(struct bio *clone, int error) | |||
797 | struct dm_rq_clone_bio_info *info = clone->bi_private; | 797 | struct dm_rq_clone_bio_info *info = clone->bi_private; |
798 | struct dm_rq_target_io *tio = info->tio; | 798 | struct dm_rq_target_io *tio = info->tio; |
799 | struct bio *bio = info->orig; | 799 | struct bio *bio = info->orig; |
800 | unsigned int nr_bytes = info->orig->bi_size; | 800 | unsigned int nr_bytes = info->orig->bi_iter.bi_size; |
801 | 801 | ||
802 | bio_put(clone); | 802 | bio_put(clone); |
803 | 803 | ||
@@ -1128,7 +1128,7 @@ static void __map_bio(struct dm_target_io *tio) | |||
1128 | * this io. | 1128 | * this io. |
1129 | */ | 1129 | */ |
1130 | atomic_inc(&tio->io->io_count); | 1130 | atomic_inc(&tio->io->io_count); |
1131 | sector = clone->bi_sector; | 1131 | sector = clone->bi_iter.bi_sector; |
1132 | r = ti->type->map(ti, clone); | 1132 | r = ti->type->map(ti, clone); |
1133 | if (r == DM_MAPIO_REMAPPED) { | 1133 | if (r == DM_MAPIO_REMAPPED) { |
1134 | /* the bio has been remapped so dispatch it */ | 1134 | /* the bio has been remapped so dispatch it */ |
@@ -1155,76 +1155,32 @@ struct clone_info { | |||
1155 | struct dm_io *io; | 1155 | struct dm_io *io; |
1156 | sector_t sector; | 1156 | sector_t sector; |
1157 | sector_t sector_count; | 1157 | sector_t sector_count; |
1158 | unsigned short idx; | ||
1159 | }; | 1158 | }; |
1160 | 1159 | ||
1161 | static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) | 1160 | static void bio_setup_sector(struct bio *bio, sector_t sector, sector_t len) |
1162 | { | 1161 | { |
1163 | bio->bi_sector = sector; | 1162 | bio->bi_iter.bi_sector = sector; |
1164 | bio->bi_size = to_bytes(len); | 1163 | bio->bi_iter.bi_size = to_bytes(len); |
1165 | } | ||
1166 | |||
1167 | static void bio_setup_bv(struct bio *bio, unsigned short idx, unsigned short bv_count) | ||
1168 | { | ||
1169 | bio->bi_idx = idx; | ||
1170 | bio->bi_vcnt = idx + bv_count; | ||
1171 | bio->bi_flags &= ~(1 << BIO_SEG_VALID); | ||
1172 | } | ||
1173 | |||
1174 | static void clone_bio_integrity(struct bio *bio, struct bio *clone, | ||
1175 | unsigned short idx, unsigned len, unsigned offset, | ||
1176 | unsigned trim) | ||
1177 | { | ||
1178 | if (!bio_integrity(bio)) | ||
1179 | return; | ||
1180 | |||
1181 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
1182 | |||
1183 | if (trim) | ||
1184 | bio_integrity_trim(clone, bio_sector_offset(bio, idx, offset), len); | ||
1185 | } | ||
1186 | |||
1187 | /* | ||
1188 | * Creates a little bio that just does part of a bvec. | ||
1189 | */ | ||
1190 | static void clone_split_bio(struct dm_target_io *tio, struct bio *bio, | ||
1191 | sector_t sector, unsigned short idx, | ||
1192 | unsigned offset, unsigned len) | ||
1193 | { | ||
1194 | struct bio *clone = &tio->clone; | ||
1195 | struct bio_vec *bv = bio->bi_io_vec + idx; | ||
1196 | |||
1197 | *clone->bi_io_vec = *bv; | ||
1198 | |||
1199 | bio_setup_sector(clone, sector, len); | ||
1200 | |||
1201 | clone->bi_bdev = bio->bi_bdev; | ||
1202 | clone->bi_rw = bio->bi_rw; | ||
1203 | clone->bi_vcnt = 1; | ||
1204 | clone->bi_io_vec->bv_offset = offset; | ||
1205 | clone->bi_io_vec->bv_len = clone->bi_size; | ||
1206 | clone->bi_flags |= 1 << BIO_CLONED; | ||
1207 | |||
1208 | clone_bio_integrity(bio, clone, idx, len, offset, 1); | ||
1209 | } | 1164 | } |
1210 | 1165 | ||
1211 | /* | 1166 | /* |
1212 | * Creates a bio that consists of range of complete bvecs. | 1167 | * Creates a bio that consists of range of complete bvecs. |
1213 | */ | 1168 | */ |
1214 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, | 1169 | static void clone_bio(struct dm_target_io *tio, struct bio *bio, |
1215 | sector_t sector, unsigned short idx, | 1170 | sector_t sector, unsigned len) |
1216 | unsigned short bv_count, unsigned len) | ||
1217 | { | 1171 | { |
1218 | struct bio *clone = &tio->clone; | 1172 | struct bio *clone = &tio->clone; |
1219 | unsigned trim = 0; | ||
1220 | 1173 | ||
1221 | __bio_clone(clone, bio); | 1174 | __bio_clone_fast(clone, bio); |
1222 | bio_setup_sector(clone, sector, len); | 1175 | |
1223 | bio_setup_bv(clone, idx, bv_count); | 1176 | if (bio_integrity(bio)) |
1177 | bio_integrity_clone(clone, bio, GFP_NOIO); | ||
1178 | |||
1179 | bio_advance(clone, to_bytes(sector - clone->bi_iter.bi_sector)); | ||
1180 | clone->bi_iter.bi_size = to_bytes(len); | ||
1224 | 1181 | ||
1225 | if (idx != bio->bi_idx || clone->bi_size < bio->bi_size) | 1182 | if (bio_integrity(bio)) |
1226 | trim = 1; | 1183 | bio_integrity_trim(clone, 0, len); |
1227 | clone_bio_integrity(bio, clone, idx, len, 0, trim); | ||
1228 | } | 1184 | } |
1229 | 1185 | ||
1230 | static struct dm_target_io *alloc_tio(struct clone_info *ci, | 1186 | static struct dm_target_io *alloc_tio(struct clone_info *ci, |
@@ -1257,7 +1213,7 @@ static void __clone_and_map_simple_bio(struct clone_info *ci, | |||
1257 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush | 1213 | * ci->bio->bi_max_vecs is BIO_INLINE_VECS anyway, for both flush |
1258 | * and discard, so no need for concern about wasted bvec allocations. | 1214 | * and discard, so no need for concern about wasted bvec allocations. |
1259 | */ | 1215 | */ |
1260 | __bio_clone(clone, ci->bio); | 1216 | __bio_clone_fast(clone, ci->bio); |
1261 | if (len) | 1217 | if (len) |
1262 | bio_setup_sector(clone, ci->sector, len); | 1218 | bio_setup_sector(clone, ci->sector, len); |
1263 | 1219 | ||
@@ -1286,10 +1242,7 @@ static int __send_empty_flush(struct clone_info *ci) | |||
1286 | } | 1242 | } |
1287 | 1243 | ||
1288 | static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, | 1244 | static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti, |
1289 | sector_t sector, int nr_iovecs, | 1245 | sector_t sector, unsigned len) |
1290 | unsigned short idx, unsigned short bv_count, | ||
1291 | unsigned offset, unsigned len, | ||
1292 | unsigned split_bvec) | ||
1293 | { | 1246 | { |
1294 | struct bio *bio = ci->bio; | 1247 | struct bio *bio = ci->bio; |
1295 | struct dm_target_io *tio; | 1248 | struct dm_target_io *tio; |
@@ -1303,11 +1256,8 @@ static void __clone_and_map_data_bio(struct clone_info *ci, struct dm_target *ti | |||
1303 | num_target_bios = ti->num_write_bios(ti, bio); | 1256 | num_target_bios = ti->num_write_bios(ti, bio); |
1304 | 1257 | ||
1305 | for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { | 1258 | for (target_bio_nr = 0; target_bio_nr < num_target_bios; target_bio_nr++) { |
1306 | tio = alloc_tio(ci, ti, nr_iovecs, target_bio_nr); | 1259 | tio = alloc_tio(ci, ti, 0, target_bio_nr); |
1307 | if (split_bvec) | 1260 | clone_bio(tio, bio, sector, len); |
1308 | clone_split_bio(tio, bio, sector, idx, offset, len); | ||
1309 | else | ||
1310 | clone_bio(tio, bio, sector, idx, bv_count, len); | ||
1311 | __map_bio(tio); | 1261 | __map_bio(tio); |
1312 | } | 1262 | } |
1313 | } | 1263 | } |
@@ -1379,68 +1329,13 @@ static int __send_write_same(struct clone_info *ci) | |||
1379 | } | 1329 | } |
1380 | 1330 | ||
1381 | /* | 1331 | /* |
1382 | * Find maximum number of sectors / bvecs we can process with a single bio. | ||
1383 | */ | ||
1384 | static sector_t __len_within_target(struct clone_info *ci, sector_t max, int *idx) | ||
1385 | { | ||
1386 | struct bio *bio = ci->bio; | ||
1387 | sector_t bv_len, total_len = 0; | ||
1388 | |||
1389 | for (*idx = ci->idx; max && (*idx < bio->bi_vcnt); (*idx)++) { | ||
1390 | bv_len = to_sector(bio->bi_io_vec[*idx].bv_len); | ||
1391 | |||
1392 | if (bv_len > max) | ||
1393 | break; | ||
1394 | |||
1395 | max -= bv_len; | ||
1396 | total_len += bv_len; | ||
1397 | } | ||
1398 | |||
1399 | return total_len; | ||
1400 | } | ||
1401 | |||
1402 | static int __split_bvec_across_targets(struct clone_info *ci, | ||
1403 | struct dm_target *ti, sector_t max) | ||
1404 | { | ||
1405 | struct bio *bio = ci->bio; | ||
1406 | struct bio_vec *bv = bio->bi_io_vec + ci->idx; | ||
1407 | sector_t remaining = to_sector(bv->bv_len); | ||
1408 | unsigned offset = 0; | ||
1409 | sector_t len; | ||
1410 | |||
1411 | do { | ||
1412 | if (offset) { | ||
1413 | ti = dm_table_find_target(ci->map, ci->sector); | ||
1414 | if (!dm_target_is_valid(ti)) | ||
1415 | return -EIO; | ||
1416 | |||
1417 | max = max_io_len(ci->sector, ti); | ||
1418 | } | ||
1419 | |||
1420 | len = min(remaining, max); | ||
1421 | |||
1422 | __clone_and_map_data_bio(ci, ti, ci->sector, 1, ci->idx, 0, | ||
1423 | bv->bv_offset + offset, len, 1); | ||
1424 | |||
1425 | ci->sector += len; | ||
1426 | ci->sector_count -= len; | ||
1427 | offset += to_bytes(len); | ||
1428 | } while (remaining -= len); | ||
1429 | |||
1430 | ci->idx++; | ||
1431 | |||
1432 | return 0; | ||
1433 | } | ||
1434 | |||
1435 | /* | ||
1436 | * Select the correct strategy for processing a non-flush bio. | 1332 | * Select the correct strategy for processing a non-flush bio. |
1437 | */ | 1333 | */ |
1438 | static int __split_and_process_non_flush(struct clone_info *ci) | 1334 | static int __split_and_process_non_flush(struct clone_info *ci) |
1439 | { | 1335 | { |
1440 | struct bio *bio = ci->bio; | 1336 | struct bio *bio = ci->bio; |
1441 | struct dm_target *ti; | 1337 | struct dm_target *ti; |
1442 | sector_t len, max; | 1338 | unsigned len; |
1443 | int idx; | ||
1444 | 1339 | ||
1445 | if (unlikely(bio->bi_rw & REQ_DISCARD)) | 1340 | if (unlikely(bio->bi_rw & REQ_DISCARD)) |
1446 | return __send_discard(ci); | 1341 | return __send_discard(ci); |
@@ -1451,41 +1346,14 @@ static int __split_and_process_non_flush(struct clone_info *ci) | |||
1451 | if (!dm_target_is_valid(ti)) | 1346 | if (!dm_target_is_valid(ti)) |
1452 | return -EIO; | 1347 | return -EIO; |
1453 | 1348 | ||
1454 | max = max_io_len(ci->sector, ti); | 1349 | len = min_t(sector_t, max_io_len(ci->sector, ti), ci->sector_count); |
1455 | |||
1456 | /* | ||
1457 | * Optimise for the simple case where we can do all of | ||
1458 | * the remaining io with a single clone. | ||
1459 | */ | ||
1460 | if (ci->sector_count <= max) { | ||
1461 | __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, | ||
1462 | ci->idx, bio->bi_vcnt - ci->idx, 0, | ||
1463 | ci->sector_count, 0); | ||
1464 | ci->sector_count = 0; | ||
1465 | return 0; | ||
1466 | } | ||
1467 | |||
1468 | /* | ||
1469 | * There are some bvecs that don't span targets. | ||
1470 | * Do as many of these as possible. | ||
1471 | */ | ||
1472 | if (to_sector(bio->bi_io_vec[ci->idx].bv_len) <= max) { | ||
1473 | len = __len_within_target(ci, max, &idx); | ||
1474 | |||
1475 | __clone_and_map_data_bio(ci, ti, ci->sector, bio->bi_max_vecs, | ||
1476 | ci->idx, idx - ci->idx, 0, len, 0); | ||
1477 | 1350 | ||
1478 | ci->sector += len; | 1351 | __clone_and_map_data_bio(ci, ti, ci->sector, len); |
1479 | ci->sector_count -= len; | ||
1480 | ci->idx = idx; | ||
1481 | 1352 | ||
1482 | return 0; | 1353 | ci->sector += len; |
1483 | } | 1354 | ci->sector_count -= len; |
1484 | 1355 | ||
1485 | /* | 1356 | return 0; |
1486 | * Handle a bvec that must be split between two or more targets. | ||
1487 | */ | ||
1488 | return __split_bvec_across_targets(ci, ti, max); | ||
1489 | } | 1357 | } |
1490 | 1358 | ||
1491 | /* | 1359 | /* |
@@ -1510,8 +1378,7 @@ static void __split_and_process_bio(struct mapped_device *md, | |||
1510 | ci.io->bio = bio; | 1378 | ci.io->bio = bio; |
1511 | ci.io->md = md; | 1379 | ci.io->md = md; |
1512 | spin_lock_init(&ci.io->endio_lock); | 1380 | spin_lock_init(&ci.io->endio_lock); |
1513 | ci.sector = bio->bi_sector; | 1381 | ci.sector = bio->bi_iter.bi_sector; |
1514 | ci.idx = bio->bi_idx; | ||
1515 | 1382 | ||
1516 | start_io_acct(ci.io); | 1383 | start_io_acct(ci.io); |
1517 | 1384 | ||
diff --git a/drivers/md/faulty.c b/drivers/md/faulty.c index 3193aefe982b..e8b4574956c7 100644 --- a/drivers/md/faulty.c +++ b/drivers/md/faulty.c | |||
@@ -74,8 +74,8 @@ static void faulty_fail(struct bio *bio, int error) | |||
74 | { | 74 | { |
75 | struct bio *b = bio->bi_private; | 75 | struct bio *b = bio->bi_private; |
76 | 76 | ||
77 | b->bi_size = bio->bi_size; | 77 | b->bi_iter.bi_size = bio->bi_iter.bi_size; |
78 | b->bi_sector = bio->bi_sector; | 78 | b->bi_iter.bi_sector = bio->bi_iter.bi_sector; |
79 | 79 | ||
80 | bio_put(bio); | 80 | bio_put(bio); |
81 | 81 | ||
@@ -185,26 +185,31 @@ static void make_request(struct mddev *mddev, struct bio *bio) | |||
185 | return; | 185 | return; |
186 | } | 186 | } |
187 | 187 | ||
188 | if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), WRITE)) | 188 | if (check_sector(conf, bio->bi_iter.bi_sector, |
189 | bio_end_sector(bio), WRITE)) | ||
189 | failit = 1; | 190 | failit = 1; |
190 | if (check_mode(conf, WritePersistent)) { | 191 | if (check_mode(conf, WritePersistent)) { |
191 | add_sector(conf, bio->bi_sector, WritePersistent); | 192 | add_sector(conf, bio->bi_iter.bi_sector, |
193 | WritePersistent); | ||
192 | failit = 1; | 194 | failit = 1; |
193 | } | 195 | } |
194 | if (check_mode(conf, WriteTransient)) | 196 | if (check_mode(conf, WriteTransient)) |
195 | failit = 1; | 197 | failit = 1; |
196 | } else { | 198 | } else { |
197 | /* read request */ | 199 | /* read request */ |
198 | if (check_sector(conf, bio->bi_sector, bio_end_sector(bio), READ)) | 200 | if (check_sector(conf, bio->bi_iter.bi_sector, |
201 | bio_end_sector(bio), READ)) | ||
199 | failit = 1; | 202 | failit = 1; |
200 | if (check_mode(conf, ReadTransient)) | 203 | if (check_mode(conf, ReadTransient)) |
201 | failit = 1; | 204 | failit = 1; |
202 | if (check_mode(conf, ReadPersistent)) { | 205 | if (check_mode(conf, ReadPersistent)) { |
203 | add_sector(conf, bio->bi_sector, ReadPersistent); | 206 | add_sector(conf, bio->bi_iter.bi_sector, |
207 | ReadPersistent); | ||
204 | failit = 1; | 208 | failit = 1; |
205 | } | 209 | } |
206 | if (check_mode(conf, ReadFixable)) { | 210 | if (check_mode(conf, ReadFixable)) { |
207 | add_sector(conf, bio->bi_sector, ReadFixable); | 211 | add_sector(conf, bio->bi_iter.bi_sector, |
212 | ReadFixable); | ||
208 | failit = 1; | 213 | failit = 1; |
209 | } | 214 | } |
210 | } | 215 | } |
diff --git a/drivers/md/linear.c b/drivers/md/linear.c index f03fabd2b37b..56f534b4a2d2 100644 --- a/drivers/md/linear.c +++ b/drivers/md/linear.c | |||
@@ -288,65 +288,65 @@ static int linear_stop (struct mddev *mddev) | |||
288 | 288 | ||
289 | static void linear_make_request(struct mddev *mddev, struct bio *bio) | 289 | static void linear_make_request(struct mddev *mddev, struct bio *bio) |
290 | { | 290 | { |
291 | char b[BDEVNAME_SIZE]; | ||
291 | struct dev_info *tmp_dev; | 292 | struct dev_info *tmp_dev; |
292 | sector_t start_sector; | 293 | struct bio *split; |
294 | sector_t start_sector, end_sector, data_offset; | ||
293 | 295 | ||
294 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 296 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
295 | md_flush_request(mddev, bio); | 297 | md_flush_request(mddev, bio); |
296 | return; | 298 | return; |
297 | } | 299 | } |
298 | 300 | ||
299 | rcu_read_lock(); | 301 | do { |
300 | tmp_dev = which_dev(mddev, bio->bi_sector); | 302 | rcu_read_lock(); |
301 | start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; | ||
302 | |||
303 | |||
304 | if (unlikely(bio->bi_sector >= (tmp_dev->end_sector) | ||
305 | || (bio->bi_sector < start_sector))) { | ||
306 | char b[BDEVNAME_SIZE]; | ||
307 | |||
308 | printk(KERN_ERR | ||
309 | "md/linear:%s: make_request: Sector %llu out of bounds on " | ||
310 | "dev %s: %llu sectors, offset %llu\n", | ||
311 | mdname(mddev), | ||
312 | (unsigned long long)bio->bi_sector, | ||
313 | bdevname(tmp_dev->rdev->bdev, b), | ||
314 | (unsigned long long)tmp_dev->rdev->sectors, | ||
315 | (unsigned long long)start_sector); | ||
316 | rcu_read_unlock(); | ||
317 | bio_io_error(bio); | ||
318 | return; | ||
319 | } | ||
320 | if (unlikely(bio_end_sector(bio) > tmp_dev->end_sector)) { | ||
321 | /* This bio crosses a device boundary, so we have to | ||
322 | * split it. | ||
323 | */ | ||
324 | struct bio_pair *bp; | ||
325 | sector_t end_sector = tmp_dev->end_sector; | ||
326 | 303 | ||
327 | rcu_read_unlock(); | 304 | tmp_dev = which_dev(mddev, bio->bi_iter.bi_sector); |
328 | 305 | start_sector = tmp_dev->end_sector - tmp_dev->rdev->sectors; | |
329 | bp = bio_split(bio, end_sector - bio->bi_sector); | 306 | end_sector = tmp_dev->end_sector; |
307 | data_offset = tmp_dev->rdev->data_offset; | ||
308 | bio->bi_bdev = tmp_dev->rdev->bdev; | ||
330 | 309 | ||
331 | linear_make_request(mddev, &bp->bio1); | 310 | rcu_read_unlock(); |
332 | linear_make_request(mddev, &bp->bio2); | ||
333 | bio_pair_release(bp); | ||
334 | return; | ||
335 | } | ||
336 | |||
337 | bio->bi_bdev = tmp_dev->rdev->bdev; | ||
338 | bio->bi_sector = bio->bi_sector - start_sector | ||
339 | + tmp_dev->rdev->data_offset; | ||
340 | rcu_read_unlock(); | ||
341 | 311 | ||
342 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | 312 | if (unlikely(bio->bi_iter.bi_sector >= end_sector || |
343 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | 313 | bio->bi_iter.bi_sector < start_sector)) |
344 | /* Just ignore it */ | 314 | goto out_of_bounds; |
345 | bio_endio(bio, 0); | 315 | |
346 | return; | 316 | if (unlikely(bio_end_sector(bio) > end_sector)) { |
347 | } | 317 | /* This bio crosses a device boundary, so we have to |
318 | * split it. | ||
319 | */ | ||
320 | split = bio_split(bio, end_sector - | ||
321 | bio->bi_iter.bi_sector, | ||
322 | GFP_NOIO, fs_bio_set); | ||
323 | bio_chain(split, bio); | ||
324 | } else { | ||
325 | split = bio; | ||
326 | } | ||
348 | 327 | ||
349 | generic_make_request(bio); | 328 | split->bi_iter.bi_sector = split->bi_iter.bi_sector - |
329 | start_sector + data_offset; | ||
330 | |||
331 | if (unlikely((split->bi_rw & REQ_DISCARD) && | ||
332 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { | ||
333 | /* Just ignore it */ | ||
334 | bio_endio(split, 0); | ||
335 | } else | ||
336 | generic_make_request(split); | ||
337 | } while (split != bio); | ||
338 | return; | ||
339 | |||
340 | out_of_bounds: | ||
341 | printk(KERN_ERR | ||
342 | "md/linear:%s: make_request: Sector %llu out of bounds on " | ||
343 | "dev %s: %llu sectors, offset %llu\n", | ||
344 | mdname(mddev), | ||
345 | (unsigned long long)bio->bi_iter.bi_sector, | ||
346 | bdevname(tmp_dev->rdev->bdev, b), | ||
347 | (unsigned long long)tmp_dev->rdev->sectors, | ||
348 | (unsigned long long)start_sector); | ||
349 | bio_io_error(bio); | ||
350 | } | 350 | } |
351 | 351 | ||
352 | static void linear_status (struct seq_file *seq, struct mddev *mddev) | 352 | static void linear_status (struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/md.c b/drivers/md/md.c index 40c531359a15..4ad5cc4e63e8 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c | |||
@@ -393,7 +393,7 @@ static void md_submit_flush_data(struct work_struct *ws) | |||
393 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); | 393 | struct mddev *mddev = container_of(ws, struct mddev, flush_work); |
394 | struct bio *bio = mddev->flush_bio; | 394 | struct bio *bio = mddev->flush_bio; |
395 | 395 | ||
396 | if (bio->bi_size == 0) | 396 | if (bio->bi_iter.bi_size == 0) |
397 | /* an empty barrier - all done */ | 397 | /* an empty barrier - all done */ |
398 | bio_endio(bio, 0); | 398 | bio_endio(bio, 0); |
399 | else { | 399 | else { |
@@ -754,7 +754,7 @@ void md_super_write(struct mddev *mddev, struct md_rdev *rdev, | |||
754 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); | 754 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, mddev); |
755 | 755 | ||
756 | bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; | 756 | bio->bi_bdev = rdev->meta_bdev ? rdev->meta_bdev : rdev->bdev; |
757 | bio->bi_sector = sector; | 757 | bio->bi_iter.bi_sector = sector; |
758 | bio_add_page(bio, page, size, 0); | 758 | bio_add_page(bio, page, size, 0); |
759 | bio->bi_private = rdev; | 759 | bio->bi_private = rdev; |
760 | bio->bi_end_io = super_written; | 760 | bio->bi_end_io = super_written; |
@@ -782,18 +782,16 @@ int sync_page_io(struct md_rdev *rdev, sector_t sector, int size, | |||
782 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); | 782 | struct bio *bio = bio_alloc_mddev(GFP_NOIO, 1, rdev->mddev); |
783 | int ret; | 783 | int ret; |
784 | 784 | ||
785 | rw |= REQ_SYNC; | ||
786 | |||
787 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? | 785 | bio->bi_bdev = (metadata_op && rdev->meta_bdev) ? |
788 | rdev->meta_bdev : rdev->bdev; | 786 | rdev->meta_bdev : rdev->bdev; |
789 | if (metadata_op) | 787 | if (metadata_op) |
790 | bio->bi_sector = sector + rdev->sb_start; | 788 | bio->bi_iter.bi_sector = sector + rdev->sb_start; |
791 | else if (rdev->mddev->reshape_position != MaxSector && | 789 | else if (rdev->mddev->reshape_position != MaxSector && |
792 | (rdev->mddev->reshape_backwards == | 790 | (rdev->mddev->reshape_backwards == |
793 | (sector >= rdev->mddev->reshape_position))) | 791 | (sector >= rdev->mddev->reshape_position))) |
794 | bio->bi_sector = sector + rdev->new_data_offset; | 792 | bio->bi_iter.bi_sector = sector + rdev->new_data_offset; |
795 | else | 793 | else |
796 | bio->bi_sector = sector + rdev->data_offset; | 794 | bio->bi_iter.bi_sector = sector + rdev->data_offset; |
797 | bio_add_page(bio, page, size, 0); | 795 | bio_add_page(bio, page, size, 0); |
798 | submit_bio_wait(rw, bio); | 796 | submit_bio_wait(rw, bio); |
799 | 797 | ||
diff --git a/drivers/md/multipath.c b/drivers/md/multipath.c index 1642eae75a33..849ad39f547b 100644 --- a/drivers/md/multipath.c +++ b/drivers/md/multipath.c | |||
@@ -100,7 +100,7 @@ static void multipath_end_request(struct bio *bio, int error) | |||
100 | md_error (mp_bh->mddev, rdev); | 100 | md_error (mp_bh->mddev, rdev); |
101 | printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", | 101 | printk(KERN_ERR "multipath: %s: rescheduling sector %llu\n", |
102 | bdevname(rdev->bdev,b), | 102 | bdevname(rdev->bdev,b), |
103 | (unsigned long long)bio->bi_sector); | 103 | (unsigned long long)bio->bi_iter.bi_sector); |
104 | multipath_reschedule_retry(mp_bh); | 104 | multipath_reschedule_retry(mp_bh); |
105 | } else | 105 | } else |
106 | multipath_end_bh_io(mp_bh, error); | 106 | multipath_end_bh_io(mp_bh, error); |
@@ -132,7 +132,7 @@ static void multipath_make_request(struct mddev *mddev, struct bio * bio) | |||
132 | multipath = conf->multipaths + mp_bh->path; | 132 | multipath = conf->multipaths + mp_bh->path; |
133 | 133 | ||
134 | mp_bh->bio = *bio; | 134 | mp_bh->bio = *bio; |
135 | mp_bh->bio.bi_sector += multipath->rdev->data_offset; | 135 | mp_bh->bio.bi_iter.bi_sector += multipath->rdev->data_offset; |
136 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; | 136 | mp_bh->bio.bi_bdev = multipath->rdev->bdev; |
137 | mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; | 137 | mp_bh->bio.bi_rw |= REQ_FAILFAST_TRANSPORT; |
138 | mp_bh->bio.bi_end_io = multipath_end_request; | 138 | mp_bh->bio.bi_end_io = multipath_end_request; |
@@ -355,21 +355,22 @@ static void multipathd(struct md_thread *thread) | |||
355 | spin_unlock_irqrestore(&conf->device_lock, flags); | 355 | spin_unlock_irqrestore(&conf->device_lock, flags); |
356 | 356 | ||
357 | bio = &mp_bh->bio; | 357 | bio = &mp_bh->bio; |
358 | bio->bi_sector = mp_bh->master_bio->bi_sector; | 358 | bio->bi_iter.bi_sector = mp_bh->master_bio->bi_iter.bi_sector; |
359 | 359 | ||
360 | if ((mp_bh->path = multipath_map (conf))<0) { | 360 | if ((mp_bh->path = multipath_map (conf))<0) { |
361 | printk(KERN_ALERT "multipath: %s: unrecoverable IO read" | 361 | printk(KERN_ALERT "multipath: %s: unrecoverable IO read" |
362 | " error for block %llu\n", | 362 | " error for block %llu\n", |
363 | bdevname(bio->bi_bdev,b), | 363 | bdevname(bio->bi_bdev,b), |
364 | (unsigned long long)bio->bi_sector); | 364 | (unsigned long long)bio->bi_iter.bi_sector); |
365 | multipath_end_bh_io(mp_bh, -EIO); | 365 | multipath_end_bh_io(mp_bh, -EIO); |
366 | } else { | 366 | } else { |
367 | printk(KERN_ERR "multipath: %s: redirecting sector %llu" | 367 | printk(KERN_ERR "multipath: %s: redirecting sector %llu" |
368 | " to another IO path\n", | 368 | " to another IO path\n", |
369 | bdevname(bio->bi_bdev,b), | 369 | bdevname(bio->bi_bdev,b), |
370 | (unsigned long long)bio->bi_sector); | 370 | (unsigned long long)bio->bi_iter.bi_sector); |
371 | *bio = *(mp_bh->master_bio); | 371 | *bio = *(mp_bh->master_bio); |
372 | bio->bi_sector += conf->multipaths[mp_bh->path].rdev->data_offset; | 372 | bio->bi_iter.bi_sector += |
373 | conf->multipaths[mp_bh->path].rdev->data_offset; | ||
373 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; | 374 | bio->bi_bdev = conf->multipaths[mp_bh->path].rdev->bdev; |
374 | bio->bi_rw |= REQ_FAILFAST_TRANSPORT; | 375 | bio->bi_rw |= REQ_FAILFAST_TRANSPORT; |
375 | bio->bi_end_io = multipath_end_request; | 376 | bio->bi_end_io = multipath_end_request; |
diff --git a/drivers/md/raid0.c b/drivers/md/raid0.c index c4d420b7d2f4..407a99e46f69 100644 --- a/drivers/md/raid0.c +++ b/drivers/md/raid0.c | |||
@@ -501,10 +501,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, | |||
501 | unsigned int chunk_sects, struct bio *bio) | 501 | unsigned int chunk_sects, struct bio *bio) |
502 | { | 502 | { |
503 | if (likely(is_power_of_2(chunk_sects))) { | 503 | if (likely(is_power_of_2(chunk_sects))) { |
504 | return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) | 504 | return chunk_sects >= |
505 | ((bio->bi_iter.bi_sector & (chunk_sects-1)) | ||
505 | + bio_sectors(bio)); | 506 | + bio_sectors(bio)); |
506 | } else{ | 507 | } else{ |
507 | sector_t sector = bio->bi_sector; | 508 | sector_t sector = bio->bi_iter.bi_sector; |
508 | return chunk_sects >= (sector_div(sector, chunk_sects) | 509 | return chunk_sects >= (sector_div(sector, chunk_sects) |
509 | + bio_sectors(bio)); | 510 | + bio_sectors(bio)); |
510 | } | 511 | } |
@@ -512,64 +513,44 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, | |||
512 | 513 | ||
513 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) | 514 | static void raid0_make_request(struct mddev *mddev, struct bio *bio) |
514 | { | 515 | { |
515 | unsigned int chunk_sects; | ||
516 | sector_t sector_offset; | ||
517 | struct strip_zone *zone; | 516 | struct strip_zone *zone; |
518 | struct md_rdev *tmp_dev; | 517 | struct md_rdev *tmp_dev; |
518 | struct bio *split; | ||
519 | 519 | ||
520 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | 520 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { |
521 | md_flush_request(mddev, bio); | 521 | md_flush_request(mddev, bio); |
522 | return; | 522 | return; |
523 | } | 523 | } |
524 | 524 | ||
525 | chunk_sects = mddev->chunk_sectors; | 525 | do { |
526 | if (unlikely(!is_io_in_chunk_boundary(mddev, chunk_sects, bio))) { | 526 | sector_t sector = bio->bi_iter.bi_sector; |
527 | sector_t sector = bio->bi_sector; | 527 | unsigned chunk_sects = mddev->chunk_sectors; |
528 | struct bio_pair *bp; | ||
529 | /* Sanity check -- queue functions should prevent this happening */ | ||
530 | if (bio_segments(bio) > 1) | ||
531 | goto bad_map; | ||
532 | /* This is a one page bio that upper layers | ||
533 | * refuse to split for us, so we need to split it. | ||
534 | */ | ||
535 | if (likely(is_power_of_2(chunk_sects))) | ||
536 | bp = bio_split(bio, chunk_sects - (sector & | ||
537 | (chunk_sects-1))); | ||
538 | else | ||
539 | bp = bio_split(bio, chunk_sects - | ||
540 | sector_div(sector, chunk_sects)); | ||
541 | raid0_make_request(mddev, &bp->bio1); | ||
542 | raid0_make_request(mddev, &bp->bio2); | ||
543 | bio_pair_release(bp); | ||
544 | return; | ||
545 | } | ||
546 | 528 | ||
547 | sector_offset = bio->bi_sector; | 529 | unsigned sectors = chunk_sects - |
548 | zone = find_zone(mddev->private, §or_offset); | 530 | (likely(is_power_of_2(chunk_sects)) |
549 | tmp_dev = map_sector(mddev, zone, bio->bi_sector, | 531 | ? (sector & (chunk_sects-1)) |
550 | §or_offset); | 532 | : sector_div(sector, chunk_sects)); |
551 | bio->bi_bdev = tmp_dev->bdev; | ||
552 | bio->bi_sector = sector_offset + zone->dev_start + | ||
553 | tmp_dev->data_offset; | ||
554 | |||
555 | if (unlikely((bio->bi_rw & REQ_DISCARD) && | ||
556 | !blk_queue_discard(bdev_get_queue(bio->bi_bdev)))) { | ||
557 | /* Just ignore it */ | ||
558 | bio_endio(bio, 0); | ||
559 | return; | ||
560 | } | ||
561 | 533 | ||
562 | generic_make_request(bio); | 534 | if (sectors < bio_sectors(bio)) { |
563 | return; | 535 | split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set); |
564 | 536 | bio_chain(split, bio); | |
565 | bad_map: | 537 | } else { |
566 | printk("md/raid0:%s: make_request bug: can't convert block across chunks" | 538 | split = bio; |
567 | " or bigger than %dk %llu %d\n", | 539 | } |
568 | mdname(mddev), chunk_sects / 2, | ||
569 | (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); | ||
570 | 540 | ||
571 | bio_io_error(bio); | 541 | zone = find_zone(mddev->private, §or); |
572 | return; | 542 | tmp_dev = map_sector(mddev, zone, sector, §or); |
543 | split->bi_bdev = tmp_dev->bdev; | ||
544 | split->bi_iter.bi_sector = sector + zone->dev_start + | ||
545 | tmp_dev->data_offset; | ||
546 | |||
547 | if (unlikely((split->bi_rw & REQ_DISCARD) && | ||
548 | !blk_queue_discard(bdev_get_queue(split->bi_bdev)))) { | ||
549 | /* Just ignore it */ | ||
550 | bio_endio(split, 0); | ||
551 | } else | ||
552 | generic_make_request(split); | ||
553 | } while (split != bio); | ||
573 | } | 554 | } |
574 | 555 | ||
575 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) | 556 | static void raid0_status(struct seq_file *seq, struct mddev *mddev) |
diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index a49cfcc7a343..fd3a2a14b587 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c | |||
@@ -229,7 +229,7 @@ static void call_bio_endio(struct r1bio *r1_bio) | |||
229 | int done; | 229 | int done; |
230 | struct r1conf *conf = r1_bio->mddev->private; | 230 | struct r1conf *conf = r1_bio->mddev->private; |
231 | sector_t start_next_window = r1_bio->start_next_window; | 231 | sector_t start_next_window = r1_bio->start_next_window; |
232 | sector_t bi_sector = bio->bi_sector; | 232 | sector_t bi_sector = bio->bi_iter.bi_sector; |
233 | 233 | ||
234 | if (bio->bi_phys_segments) { | 234 | if (bio->bi_phys_segments) { |
235 | unsigned long flags; | 235 | unsigned long flags; |
@@ -265,9 +265,8 @@ static void raid_end_bio_io(struct r1bio *r1_bio) | |||
265 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { | 265 | if (!test_and_set_bit(R1BIO_Returned, &r1_bio->state)) { |
266 | pr_debug("raid1: sync end %s on sectors %llu-%llu\n", | 266 | pr_debug("raid1: sync end %s on sectors %llu-%llu\n", |
267 | (bio_data_dir(bio) == WRITE) ? "write" : "read", | 267 | (bio_data_dir(bio) == WRITE) ? "write" : "read", |
268 | (unsigned long long) bio->bi_sector, | 268 | (unsigned long long) bio->bi_iter.bi_sector, |
269 | (unsigned long long) bio->bi_sector + | 269 | (unsigned long long) bio_end_sector(bio) - 1); |
270 | bio_sectors(bio) - 1); | ||
271 | 270 | ||
272 | call_bio_endio(r1_bio); | 271 | call_bio_endio(r1_bio); |
273 | } | 272 | } |
@@ -466,9 +465,8 @@ static void raid1_end_write_request(struct bio *bio, int error) | |||
466 | struct bio *mbio = r1_bio->master_bio; | 465 | struct bio *mbio = r1_bio->master_bio; |
467 | pr_debug("raid1: behind end write sectors" | 466 | pr_debug("raid1: behind end write sectors" |
468 | " %llu-%llu\n", | 467 | " %llu-%llu\n", |
469 | (unsigned long long) mbio->bi_sector, | 468 | (unsigned long long) mbio->bi_iter.bi_sector, |
470 | (unsigned long long) mbio->bi_sector + | 469 | (unsigned long long) bio_end_sector(mbio) - 1); |
471 | bio_sectors(mbio) - 1); | ||
472 | call_bio_endio(r1_bio); | 470 | call_bio_endio(r1_bio); |
473 | } | 471 | } |
474 | } | 472 | } |
@@ -875,7 +873,7 @@ static bool need_to_wait_for_sync(struct r1conf *conf, struct bio *bio) | |||
875 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS | 873 | else if ((conf->next_resync - RESYNC_WINDOW_SECTORS |
876 | >= bio_end_sector(bio)) || | 874 | >= bio_end_sector(bio)) || |
877 | (conf->next_resync + NEXT_NORMALIO_DISTANCE | 875 | (conf->next_resync + NEXT_NORMALIO_DISTANCE |
878 | <= bio->bi_sector)) | 876 | <= bio->bi_iter.bi_sector)) |
879 | wait = false; | 877 | wait = false; |
880 | else | 878 | else |
881 | wait = true; | 879 | wait = true; |
@@ -913,14 +911,14 @@ static sector_t wait_barrier(struct r1conf *conf, struct bio *bio) | |||
913 | 911 | ||
914 | if (bio && bio_data_dir(bio) == WRITE) { | 912 | if (bio && bio_data_dir(bio) == WRITE) { |
915 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE | 913 | if (conf->next_resync + NEXT_NORMALIO_DISTANCE |
916 | <= bio->bi_sector) { | 914 | <= bio->bi_iter.bi_sector) { |
917 | if (conf->start_next_window == MaxSector) | 915 | if (conf->start_next_window == MaxSector) |
918 | conf->start_next_window = | 916 | conf->start_next_window = |
919 | conf->next_resync + | 917 | conf->next_resync + |
920 | NEXT_NORMALIO_DISTANCE; | 918 | NEXT_NORMALIO_DISTANCE; |
921 | 919 | ||
922 | if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) | 920 | if ((conf->start_next_window + NEXT_NORMALIO_DISTANCE) |
923 | <= bio->bi_sector) | 921 | <= bio->bi_iter.bi_sector) |
924 | conf->next_window_requests++; | 922 | conf->next_window_requests++; |
925 | else | 923 | else |
926 | conf->current_window_requests++; | 924 | conf->current_window_requests++; |
@@ -1027,7 +1025,8 @@ do_sync_io: | |||
1027 | if (bvecs[i].bv_page) | 1025 | if (bvecs[i].bv_page) |
1028 | put_page(bvecs[i].bv_page); | 1026 | put_page(bvecs[i].bv_page); |
1029 | kfree(bvecs); | 1027 | kfree(bvecs); |
1030 | pr_debug("%dB behind alloc failed, doing sync I/O\n", bio->bi_size); | 1028 | pr_debug("%dB behind alloc failed, doing sync I/O\n", |
1029 | bio->bi_iter.bi_size); | ||
1031 | } | 1030 | } |
1032 | 1031 | ||
1033 | struct raid1_plug_cb { | 1032 | struct raid1_plug_cb { |
@@ -1107,7 +1106,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1107 | 1106 | ||
1108 | if (bio_data_dir(bio) == WRITE && | 1107 | if (bio_data_dir(bio) == WRITE && |
1109 | bio_end_sector(bio) > mddev->suspend_lo && | 1108 | bio_end_sector(bio) > mddev->suspend_lo && |
1110 | bio->bi_sector < mddev->suspend_hi) { | 1109 | bio->bi_iter.bi_sector < mddev->suspend_hi) { |
1111 | /* As the suspend_* range is controlled by | 1110 | /* As the suspend_* range is controlled by |
1112 | * userspace, we want an interruptible | 1111 | * userspace, we want an interruptible |
1113 | * wait. | 1112 | * wait. |
@@ -1118,7 +1117,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1118 | prepare_to_wait(&conf->wait_barrier, | 1117 | prepare_to_wait(&conf->wait_barrier, |
1119 | &w, TASK_INTERRUPTIBLE); | 1118 | &w, TASK_INTERRUPTIBLE); |
1120 | if (bio_end_sector(bio) <= mddev->suspend_lo || | 1119 | if (bio_end_sector(bio) <= mddev->suspend_lo || |
1121 | bio->bi_sector >= mddev->suspend_hi) | 1120 | bio->bi_iter.bi_sector >= mddev->suspend_hi) |
1122 | break; | 1121 | break; |
1123 | schedule(); | 1122 | schedule(); |
1124 | } | 1123 | } |
@@ -1140,7 +1139,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1140 | r1_bio->sectors = bio_sectors(bio); | 1139 | r1_bio->sectors = bio_sectors(bio); |
1141 | r1_bio->state = 0; | 1140 | r1_bio->state = 0; |
1142 | r1_bio->mddev = mddev; | 1141 | r1_bio->mddev = mddev; |
1143 | r1_bio->sector = bio->bi_sector; | 1142 | r1_bio->sector = bio->bi_iter.bi_sector; |
1144 | 1143 | ||
1145 | /* We might need to issue multiple reads to different | 1144 | /* We might need to issue multiple reads to different |
1146 | * devices if there are bad blocks around, so we keep | 1145 | * devices if there are bad blocks around, so we keep |
@@ -1180,12 +1179,13 @@ read_again: | |||
1180 | r1_bio->read_disk = rdisk; | 1179 | r1_bio->read_disk = rdisk; |
1181 | 1180 | ||
1182 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1181 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1183 | bio_trim(read_bio, r1_bio->sector - bio->bi_sector, | 1182 | bio_trim(read_bio, r1_bio->sector - bio->bi_iter.bi_sector, |
1184 | max_sectors); | 1183 | max_sectors); |
1185 | 1184 | ||
1186 | r1_bio->bios[rdisk] = read_bio; | 1185 | r1_bio->bios[rdisk] = read_bio; |
1187 | 1186 | ||
1188 | read_bio->bi_sector = r1_bio->sector + mirror->rdev->data_offset; | 1187 | read_bio->bi_iter.bi_sector = r1_bio->sector + |
1188 | mirror->rdev->data_offset; | ||
1189 | read_bio->bi_bdev = mirror->rdev->bdev; | 1189 | read_bio->bi_bdev = mirror->rdev->bdev; |
1190 | read_bio->bi_end_io = raid1_end_read_request; | 1190 | read_bio->bi_end_io = raid1_end_read_request; |
1191 | read_bio->bi_rw = READ | do_sync; | 1191 | read_bio->bi_rw = READ | do_sync; |
@@ -1197,7 +1197,7 @@ read_again: | |||
1197 | */ | 1197 | */ |
1198 | 1198 | ||
1199 | sectors_handled = (r1_bio->sector + max_sectors | 1199 | sectors_handled = (r1_bio->sector + max_sectors |
1200 | - bio->bi_sector); | 1200 | - bio->bi_iter.bi_sector); |
1201 | r1_bio->sectors = max_sectors; | 1201 | r1_bio->sectors = max_sectors; |
1202 | spin_lock_irq(&conf->device_lock); | 1202 | spin_lock_irq(&conf->device_lock); |
1203 | if (bio->bi_phys_segments == 0) | 1203 | if (bio->bi_phys_segments == 0) |
@@ -1218,7 +1218,8 @@ read_again: | |||
1218 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; | 1218 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; |
1219 | r1_bio->state = 0; | 1219 | r1_bio->state = 0; |
1220 | r1_bio->mddev = mddev; | 1220 | r1_bio->mddev = mddev; |
1221 | r1_bio->sector = bio->bi_sector + sectors_handled; | 1221 | r1_bio->sector = bio->bi_iter.bi_sector + |
1222 | sectors_handled; | ||
1222 | goto read_again; | 1223 | goto read_again; |
1223 | } else | 1224 | } else |
1224 | generic_make_request(read_bio); | 1225 | generic_make_request(read_bio); |
@@ -1321,7 +1322,7 @@ read_again: | |||
1321 | if (r1_bio->bios[j]) | 1322 | if (r1_bio->bios[j]) |
1322 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); | 1323 | rdev_dec_pending(conf->mirrors[j].rdev, mddev); |
1323 | r1_bio->state = 0; | 1324 | r1_bio->state = 0; |
1324 | allow_barrier(conf, start_next_window, bio->bi_sector); | 1325 | allow_barrier(conf, start_next_window, bio->bi_iter.bi_sector); |
1325 | md_wait_for_blocked_rdev(blocked_rdev, mddev); | 1326 | md_wait_for_blocked_rdev(blocked_rdev, mddev); |
1326 | start_next_window = wait_barrier(conf, bio); | 1327 | start_next_window = wait_barrier(conf, bio); |
1327 | /* | 1328 | /* |
@@ -1348,7 +1349,7 @@ read_again: | |||
1348 | bio->bi_phys_segments++; | 1349 | bio->bi_phys_segments++; |
1349 | spin_unlock_irq(&conf->device_lock); | 1350 | spin_unlock_irq(&conf->device_lock); |
1350 | } | 1351 | } |
1351 | sectors_handled = r1_bio->sector + max_sectors - bio->bi_sector; | 1352 | sectors_handled = r1_bio->sector + max_sectors - bio->bi_iter.bi_sector; |
1352 | 1353 | ||
1353 | atomic_set(&r1_bio->remaining, 1); | 1354 | atomic_set(&r1_bio->remaining, 1); |
1354 | atomic_set(&r1_bio->behind_remaining, 0); | 1355 | atomic_set(&r1_bio->behind_remaining, 0); |
@@ -1360,7 +1361,7 @@ read_again: | |||
1360 | continue; | 1361 | continue; |
1361 | 1362 | ||
1362 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1363 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1363 | bio_trim(mbio, r1_bio->sector - bio->bi_sector, max_sectors); | 1364 | bio_trim(mbio, r1_bio->sector - bio->bi_iter.bi_sector, max_sectors); |
1364 | 1365 | ||
1365 | if (first_clone) { | 1366 | if (first_clone) { |
1366 | /* do behind I/O ? | 1367 | /* do behind I/O ? |
@@ -1394,7 +1395,7 @@ read_again: | |||
1394 | 1395 | ||
1395 | r1_bio->bios[i] = mbio; | 1396 | r1_bio->bios[i] = mbio; |
1396 | 1397 | ||
1397 | mbio->bi_sector = (r1_bio->sector + | 1398 | mbio->bi_iter.bi_sector = (r1_bio->sector + |
1398 | conf->mirrors[i].rdev->data_offset); | 1399 | conf->mirrors[i].rdev->data_offset); |
1399 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; | 1400 | mbio->bi_bdev = conf->mirrors[i].rdev->bdev; |
1400 | mbio->bi_end_io = raid1_end_write_request; | 1401 | mbio->bi_end_io = raid1_end_write_request; |
@@ -1434,7 +1435,7 @@ read_again: | |||
1434 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; | 1435 | r1_bio->sectors = bio_sectors(bio) - sectors_handled; |
1435 | r1_bio->state = 0; | 1436 | r1_bio->state = 0; |
1436 | r1_bio->mddev = mddev; | 1437 | r1_bio->mddev = mddev; |
1437 | r1_bio->sector = bio->bi_sector + sectors_handled; | 1438 | r1_bio->sector = bio->bi_iter.bi_sector + sectors_handled; |
1438 | goto retry_write; | 1439 | goto retry_write; |
1439 | } | 1440 | } |
1440 | 1441 | ||
@@ -1958,14 +1959,14 @@ static int process_checks(struct r1bio *r1_bio) | |||
1958 | /* fixup the bio for reuse */ | 1959 | /* fixup the bio for reuse */ |
1959 | bio_reset(b); | 1960 | bio_reset(b); |
1960 | b->bi_vcnt = vcnt; | 1961 | b->bi_vcnt = vcnt; |
1961 | b->bi_size = r1_bio->sectors << 9; | 1962 | b->bi_iter.bi_size = r1_bio->sectors << 9; |
1962 | b->bi_sector = r1_bio->sector + | 1963 | b->bi_iter.bi_sector = r1_bio->sector + |
1963 | conf->mirrors[i].rdev->data_offset; | 1964 | conf->mirrors[i].rdev->data_offset; |
1964 | b->bi_bdev = conf->mirrors[i].rdev->bdev; | 1965 | b->bi_bdev = conf->mirrors[i].rdev->bdev; |
1965 | b->bi_end_io = end_sync_read; | 1966 | b->bi_end_io = end_sync_read; |
1966 | b->bi_private = r1_bio; | 1967 | b->bi_private = r1_bio; |
1967 | 1968 | ||
1968 | size = b->bi_size; | 1969 | size = b->bi_iter.bi_size; |
1969 | for (j = 0; j < vcnt ; j++) { | 1970 | for (j = 0; j < vcnt ; j++) { |
1970 | struct bio_vec *bi; | 1971 | struct bio_vec *bi; |
1971 | bi = &b->bi_io_vec[j]; | 1972 | bi = &b->bi_io_vec[j]; |
@@ -2220,11 +2221,11 @@ static int narrow_write_error(struct r1bio *r1_bio, int i) | |||
2220 | } | 2221 | } |
2221 | 2222 | ||
2222 | wbio->bi_rw = WRITE; | 2223 | wbio->bi_rw = WRITE; |
2223 | wbio->bi_sector = r1_bio->sector; | 2224 | wbio->bi_iter.bi_sector = r1_bio->sector; |
2224 | wbio->bi_size = r1_bio->sectors << 9; | 2225 | wbio->bi_iter.bi_size = r1_bio->sectors << 9; |
2225 | 2226 | ||
2226 | bio_trim(wbio, sector - r1_bio->sector, sectors); | 2227 | bio_trim(wbio, sector - r1_bio->sector, sectors); |
2227 | wbio->bi_sector += rdev->data_offset; | 2228 | wbio->bi_iter.bi_sector += rdev->data_offset; |
2228 | wbio->bi_bdev = rdev->bdev; | 2229 | wbio->bi_bdev = rdev->bdev; |
2229 | if (submit_bio_wait(WRITE, wbio) == 0) | 2230 | if (submit_bio_wait(WRITE, wbio) == 0) |
2230 | /* failure! */ | 2231 | /* failure! */ |
@@ -2338,7 +2339,8 @@ read_more: | |||
2338 | } | 2339 | } |
2339 | r1_bio->read_disk = disk; | 2340 | r1_bio->read_disk = disk; |
2340 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); | 2341 | bio = bio_clone_mddev(r1_bio->master_bio, GFP_NOIO, mddev); |
2341 | bio_trim(bio, r1_bio->sector - bio->bi_sector, max_sectors); | 2342 | bio_trim(bio, r1_bio->sector - bio->bi_iter.bi_sector, |
2343 | max_sectors); | ||
2342 | r1_bio->bios[r1_bio->read_disk] = bio; | 2344 | r1_bio->bios[r1_bio->read_disk] = bio; |
2343 | rdev = conf->mirrors[disk].rdev; | 2345 | rdev = conf->mirrors[disk].rdev; |
2344 | printk_ratelimited(KERN_ERR | 2346 | printk_ratelimited(KERN_ERR |
@@ -2347,7 +2349,7 @@ read_more: | |||
2347 | mdname(mddev), | 2349 | mdname(mddev), |
2348 | (unsigned long long)r1_bio->sector, | 2350 | (unsigned long long)r1_bio->sector, |
2349 | bdevname(rdev->bdev, b)); | 2351 | bdevname(rdev->bdev, b)); |
2350 | bio->bi_sector = r1_bio->sector + rdev->data_offset; | 2352 | bio->bi_iter.bi_sector = r1_bio->sector + rdev->data_offset; |
2351 | bio->bi_bdev = rdev->bdev; | 2353 | bio->bi_bdev = rdev->bdev; |
2352 | bio->bi_end_io = raid1_end_read_request; | 2354 | bio->bi_end_io = raid1_end_read_request; |
2353 | bio->bi_rw = READ | do_sync; | 2355 | bio->bi_rw = READ | do_sync; |
@@ -2356,7 +2358,7 @@ read_more: | |||
2356 | /* Drat - have to split this up more */ | 2358 | /* Drat - have to split this up more */ |
2357 | struct bio *mbio = r1_bio->master_bio; | 2359 | struct bio *mbio = r1_bio->master_bio; |
2358 | int sectors_handled = (r1_bio->sector + max_sectors | 2360 | int sectors_handled = (r1_bio->sector + max_sectors |
2359 | - mbio->bi_sector); | 2361 | - mbio->bi_iter.bi_sector); |
2360 | r1_bio->sectors = max_sectors; | 2362 | r1_bio->sectors = max_sectors; |
2361 | spin_lock_irq(&conf->device_lock); | 2363 | spin_lock_irq(&conf->device_lock); |
2362 | if (mbio->bi_phys_segments == 0) | 2364 | if (mbio->bi_phys_segments == 0) |
@@ -2374,7 +2376,8 @@ read_more: | |||
2374 | r1_bio->state = 0; | 2376 | r1_bio->state = 0; |
2375 | set_bit(R1BIO_ReadError, &r1_bio->state); | 2377 | set_bit(R1BIO_ReadError, &r1_bio->state); |
2376 | r1_bio->mddev = mddev; | 2378 | r1_bio->mddev = mddev; |
2377 | r1_bio->sector = mbio->bi_sector + sectors_handled; | 2379 | r1_bio->sector = mbio->bi_iter.bi_sector + |
2380 | sectors_handled; | ||
2378 | 2381 | ||
2379 | goto read_more; | 2382 | goto read_more; |
2380 | } else | 2383 | } else |
@@ -2598,7 +2601,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2598 | } | 2601 | } |
2599 | if (bio->bi_end_io) { | 2602 | if (bio->bi_end_io) { |
2600 | atomic_inc(&rdev->nr_pending); | 2603 | atomic_inc(&rdev->nr_pending); |
2601 | bio->bi_sector = sector_nr + rdev->data_offset; | 2604 | bio->bi_iter.bi_sector = sector_nr + rdev->data_offset; |
2602 | bio->bi_bdev = rdev->bdev; | 2605 | bio->bi_bdev = rdev->bdev; |
2603 | bio->bi_private = r1_bio; | 2606 | bio->bi_private = r1_bio; |
2604 | } | 2607 | } |
@@ -2698,7 +2701,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp | |||
2698 | continue; | 2701 | continue; |
2699 | /* remove last page from this bio */ | 2702 | /* remove last page from this bio */ |
2700 | bio->bi_vcnt--; | 2703 | bio->bi_vcnt--; |
2701 | bio->bi_size -= len; | 2704 | bio->bi_iter.bi_size -= len; |
2702 | bio->bi_flags &= ~(1<< BIO_SEG_VALID); | 2705 | bio->bi_flags &= ~(1<< BIO_SEG_VALID); |
2703 | } | 2706 | } |
2704 | goto bio_full; | 2707 | goto bio_full; |
diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c index 8d39d63281b9..33fc408e5eac 100644 --- a/drivers/md/raid10.c +++ b/drivers/md/raid10.c | |||
@@ -1152,14 +1152,12 @@ static void raid10_unplug(struct blk_plug_cb *cb, bool from_schedule) | |||
1152 | kfree(plug); | 1152 | kfree(plug); |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | static void make_request(struct mddev *mddev, struct bio * bio) | 1155 | static void __make_request(struct mddev *mddev, struct bio *bio) |
1156 | { | 1156 | { |
1157 | struct r10conf *conf = mddev->private; | 1157 | struct r10conf *conf = mddev->private; |
1158 | struct r10bio *r10_bio; | 1158 | struct r10bio *r10_bio; |
1159 | struct bio *read_bio; | 1159 | struct bio *read_bio; |
1160 | int i; | 1160 | int i; |
1161 | sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); | ||
1162 | int chunk_sects = chunk_mask + 1; | ||
1163 | const int rw = bio_data_dir(bio); | 1161 | const int rw = bio_data_dir(bio); |
1164 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); | 1162 | const unsigned long do_sync = (bio->bi_rw & REQ_SYNC); |
1165 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); | 1163 | const unsigned long do_fua = (bio->bi_rw & REQ_FUA); |
@@ -1174,88 +1172,27 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1174 | int max_sectors; | 1172 | int max_sectors; |
1175 | int sectors; | 1173 | int sectors; |
1176 | 1174 | ||
1177 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | ||
1178 | md_flush_request(mddev, bio); | ||
1179 | return; | ||
1180 | } | ||
1181 | |||
1182 | /* If this request crosses a chunk boundary, we need to | ||
1183 | * split it. This will only happen for 1 PAGE (or less) requests. | ||
1184 | */ | ||
1185 | if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio) | ||
1186 | > chunk_sects | ||
1187 | && (conf->geo.near_copies < conf->geo.raid_disks | ||
1188 | || conf->prev.near_copies < conf->prev.raid_disks))) { | ||
1189 | struct bio_pair *bp; | ||
1190 | /* Sanity check -- queue functions should prevent this happening */ | ||
1191 | if (bio_segments(bio) > 1) | ||
1192 | goto bad_map; | ||
1193 | /* This is a one page bio that upper layers | ||
1194 | * refuse to split for us, so we need to split it. | ||
1195 | */ | ||
1196 | bp = bio_split(bio, | ||
1197 | chunk_sects - (bio->bi_sector & (chunk_sects - 1)) ); | ||
1198 | |||
1199 | /* Each of these 'make_request' calls will call 'wait_barrier'. | ||
1200 | * If the first succeeds but the second blocks due to the resync | ||
1201 | * thread raising the barrier, we will deadlock because the | ||
1202 | * IO to the underlying device will be queued in generic_make_request | ||
1203 | * and will never complete, so will never reduce nr_pending. | ||
1204 | * So increment nr_waiting here so no new raise_barriers will | ||
1205 | * succeed, and so the second wait_barrier cannot block. | ||
1206 | */ | ||
1207 | spin_lock_irq(&conf->resync_lock); | ||
1208 | conf->nr_waiting++; | ||
1209 | spin_unlock_irq(&conf->resync_lock); | ||
1210 | |||
1211 | make_request(mddev, &bp->bio1); | ||
1212 | make_request(mddev, &bp->bio2); | ||
1213 | |||
1214 | spin_lock_irq(&conf->resync_lock); | ||
1215 | conf->nr_waiting--; | ||
1216 | wake_up(&conf->wait_barrier); | ||
1217 | spin_unlock_irq(&conf->resync_lock); | ||
1218 | |||
1219 | bio_pair_release(bp); | ||
1220 | return; | ||
1221 | bad_map: | ||
1222 | printk("md/raid10:%s: make_request bug: can't convert block across chunks" | ||
1223 | " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, | ||
1224 | (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2); | ||
1225 | |||
1226 | bio_io_error(bio); | ||
1227 | return; | ||
1228 | } | ||
1229 | |||
1230 | md_write_start(mddev, bio); | ||
1231 | |||
1232 | /* | ||
1233 | * Register the new request and wait if the reconstruction | ||
1234 | * thread has put up a bar for new requests. | ||
1235 | * Continue immediately if no resync is active currently. | ||
1236 | */ | ||
1237 | wait_barrier(conf); | ||
1238 | |||
1239 | sectors = bio_sectors(bio); | 1175 | sectors = bio_sectors(bio); |
1240 | while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | 1176 | while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && |
1241 | bio->bi_sector < conf->reshape_progress && | 1177 | bio->bi_iter.bi_sector < conf->reshape_progress && |
1242 | bio->bi_sector + sectors > conf->reshape_progress) { | 1178 | bio->bi_iter.bi_sector + sectors > conf->reshape_progress) { |
1243 | /* IO spans the reshape position. Need to wait for | 1179 | /* IO spans the reshape position. Need to wait for |
1244 | * reshape to pass | 1180 | * reshape to pass |
1245 | */ | 1181 | */ |
1246 | allow_barrier(conf); | 1182 | allow_barrier(conf); |
1247 | wait_event(conf->wait_barrier, | 1183 | wait_event(conf->wait_barrier, |
1248 | conf->reshape_progress <= bio->bi_sector || | 1184 | conf->reshape_progress <= bio->bi_iter.bi_sector || |
1249 | conf->reshape_progress >= bio->bi_sector + sectors); | 1185 | conf->reshape_progress >= bio->bi_iter.bi_sector + |
1186 | sectors); | ||
1250 | wait_barrier(conf); | 1187 | wait_barrier(conf); |
1251 | } | 1188 | } |
1252 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && | 1189 | if (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && |
1253 | bio_data_dir(bio) == WRITE && | 1190 | bio_data_dir(bio) == WRITE && |
1254 | (mddev->reshape_backwards | 1191 | (mddev->reshape_backwards |
1255 | ? (bio->bi_sector < conf->reshape_safe && | 1192 | ? (bio->bi_iter.bi_sector < conf->reshape_safe && |
1256 | bio->bi_sector + sectors > conf->reshape_progress) | 1193 | bio->bi_iter.bi_sector + sectors > conf->reshape_progress) |
1257 | : (bio->bi_sector + sectors > conf->reshape_safe && | 1194 | : (bio->bi_iter.bi_sector + sectors > conf->reshape_safe && |
1258 | bio->bi_sector < conf->reshape_progress))) { | 1195 | bio->bi_iter.bi_sector < conf->reshape_progress))) { |
1259 | /* Need to update reshape_position in metadata */ | 1196 | /* Need to update reshape_position in metadata */ |
1260 | mddev->reshape_position = conf->reshape_progress; | 1197 | mddev->reshape_position = conf->reshape_progress; |
1261 | set_bit(MD_CHANGE_DEVS, &mddev->flags); | 1198 | set_bit(MD_CHANGE_DEVS, &mddev->flags); |
@@ -1273,7 +1210,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) | |||
1273 | r10_bio->sectors = sectors; | 1210 | r10_bio->sectors = sectors; |
1274 | 1211 | ||
1275 | r10_bio->mddev = mddev; | 1212 | r10_bio->mddev = mddev; |
1276 | r10_bio->sector = bio->bi_sector; | 1213 | r10_bio->sector = bio->bi_iter.bi_sector; |
1277 | r10_bio->state = 0; | 1214 | r10_bio->state = 0; |
1278 | 1215 | ||
1279 | /* We might need to issue multiple reads to different | 1216 | /* We might need to issue multiple reads to different |
@@ -1302,13 +1239,13 @@ read_again: | |||
1302 | slot = r10_bio->read_slot; | 1239 | slot = r10_bio->read_slot; |
1303 | 1240 | ||
1304 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1241 | read_bio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1305 | bio_trim(read_bio, r10_bio->sector - bio->bi_sector, | 1242 | bio_trim(read_bio, r10_bio->sector - bio->bi_iter.bi_sector, |
1306 | max_sectors); | 1243 | max_sectors); |
1307 | 1244 | ||
1308 | r10_bio->devs[slot].bio = read_bio; | 1245 | r10_bio->devs[slot].bio = read_bio; |
1309 | r10_bio->devs[slot].rdev = rdev; | 1246 | r10_bio->devs[slot].rdev = rdev; |
1310 | 1247 | ||
1311 | read_bio->bi_sector = r10_bio->devs[slot].addr + | 1248 | read_bio->bi_iter.bi_sector = r10_bio->devs[slot].addr + |
1312 | choose_data_offset(r10_bio, rdev); | 1249 | choose_data_offset(r10_bio, rdev); |
1313 | read_bio->bi_bdev = rdev->bdev; | 1250 | read_bio->bi_bdev = rdev->bdev; |
1314 | read_bio->bi_end_io = raid10_end_read_request; | 1251 | read_bio->bi_end_io = raid10_end_read_request; |
@@ -1320,7 +1257,7 @@ read_again: | |||
1320 | * need another r10_bio. | 1257 | * need another r10_bio. |
1321 | */ | 1258 | */ |
1322 | sectors_handled = (r10_bio->sector + max_sectors | 1259 | sectors_handled = (r10_bio->sector + max_sectors |
1323 | - bio->bi_sector); | 1260 | - bio->bi_iter.bi_sector); |
1324 | r10_bio->sectors = max_sectors; | 1261 | r10_bio->sectors = max_sectors; |
1325 | spin_lock_irq(&conf->device_lock); | 1262 | spin_lock_irq(&conf->device_lock); |
1326 | if (bio->bi_phys_segments == 0) | 1263 | if (bio->bi_phys_segments == 0) |
@@ -1341,7 +1278,8 @@ read_again: | |||
1341 | r10_bio->sectors = bio_sectors(bio) - sectors_handled; | 1278 | r10_bio->sectors = bio_sectors(bio) - sectors_handled; |
1342 | r10_bio->state = 0; | 1279 | r10_bio->state = 0; |
1343 | r10_bio->mddev = mddev; | 1280 | r10_bio->mddev = mddev; |
1344 | r10_bio->sector = bio->bi_sector + sectors_handled; | 1281 | r10_bio->sector = bio->bi_iter.bi_sector + |
1282 | sectors_handled; | ||
1345 | goto read_again; | 1283 | goto read_again; |
1346 | } else | 1284 | } else |
1347 | generic_make_request(read_bio); | 1285 | generic_make_request(read_bio); |
@@ -1499,7 +1437,8 @@ retry_write: | |||
1499 | bio->bi_phys_segments++; | 1437 | bio->bi_phys_segments++; |
1500 | spin_unlock_irq(&conf->device_lock); | 1438 | spin_unlock_irq(&conf->device_lock); |
1501 | } | 1439 | } |
1502 | sectors_handled = r10_bio->sector + max_sectors - bio->bi_sector; | 1440 | sectors_handled = r10_bio->sector + max_sectors - |
1441 | bio->bi_iter.bi_sector; | ||
1503 | 1442 | ||
1504 | atomic_set(&r10_bio->remaining, 1); | 1443 | atomic_set(&r10_bio->remaining, 1); |
1505 | bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); | 1444 | bitmap_startwrite(mddev->bitmap, r10_bio->sector, r10_bio->sectors, 0); |
@@ -1510,11 +1449,11 @@ retry_write: | |||
1510 | if (r10_bio->devs[i].bio) { | 1449 | if (r10_bio->devs[i].bio) { |
1511 | struct md_rdev *rdev = conf->mirrors[d].rdev; | 1450 | struct md_rdev *rdev = conf->mirrors[d].rdev; |
1512 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1451 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1513 | bio_trim(mbio, r10_bio->sector - bio->bi_sector, | 1452 | bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, |
1514 | max_sectors); | 1453 | max_sectors); |
1515 | r10_bio->devs[i].bio = mbio; | 1454 | r10_bio->devs[i].bio = mbio; |
1516 | 1455 | ||
1517 | mbio->bi_sector = (r10_bio->devs[i].addr+ | 1456 | mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ |
1518 | choose_data_offset(r10_bio, | 1457 | choose_data_offset(r10_bio, |
1519 | rdev)); | 1458 | rdev)); |
1520 | mbio->bi_bdev = rdev->bdev; | 1459 | mbio->bi_bdev = rdev->bdev; |
@@ -1553,11 +1492,11 @@ retry_write: | |||
1553 | rdev = conf->mirrors[d].rdev; | 1492 | rdev = conf->mirrors[d].rdev; |
1554 | } | 1493 | } |
1555 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 1494 | mbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
1556 | bio_trim(mbio, r10_bio->sector - bio->bi_sector, | 1495 | bio_trim(mbio, r10_bio->sector - bio->bi_iter.bi_sector, |
1557 | max_sectors); | 1496 | max_sectors); |
1558 | r10_bio->devs[i].repl_bio = mbio; | 1497 | r10_bio->devs[i].repl_bio = mbio; |
1559 | 1498 | ||
1560 | mbio->bi_sector = (r10_bio->devs[i].addr + | 1499 | mbio->bi_iter.bi_sector = (r10_bio->devs[i].addr + |
1561 | choose_data_offset( | 1500 | choose_data_offset( |
1562 | r10_bio, rdev)); | 1501 | r10_bio, rdev)); |
1563 | mbio->bi_bdev = rdev->bdev; | 1502 | mbio->bi_bdev = rdev->bdev; |
@@ -1591,11 +1530,57 @@ retry_write: | |||
1591 | r10_bio->sectors = bio_sectors(bio) - sectors_handled; | 1530 | r10_bio->sectors = bio_sectors(bio) - sectors_handled; |
1592 | 1531 | ||
1593 | r10_bio->mddev = mddev; | 1532 | r10_bio->mddev = mddev; |
1594 | r10_bio->sector = bio->bi_sector + sectors_handled; | 1533 | r10_bio->sector = bio->bi_iter.bi_sector + sectors_handled; |
1595 | r10_bio->state = 0; | 1534 | r10_bio->state = 0; |
1596 | goto retry_write; | 1535 | goto retry_write; |
1597 | } | 1536 | } |
1598 | one_write_done(r10_bio); | 1537 | one_write_done(r10_bio); |
1538 | } | ||
1539 | |||
1540 | static void make_request(struct mddev *mddev, struct bio *bio) | ||
1541 | { | ||
1542 | struct r10conf *conf = mddev->private; | ||
1543 | sector_t chunk_mask = (conf->geo.chunk_mask & conf->prev.chunk_mask); | ||
1544 | int chunk_sects = chunk_mask + 1; | ||
1545 | |||
1546 | struct bio *split; | ||
1547 | |||
1548 | if (unlikely(bio->bi_rw & REQ_FLUSH)) { | ||
1549 | md_flush_request(mddev, bio); | ||
1550 | return; | ||
1551 | } | ||
1552 | |||
1553 | md_write_start(mddev, bio); | ||
1554 | |||
1555 | /* | ||
1556 | * Register the new request and wait if the reconstruction | ||
1557 | * thread has put up a bar for new requests. | ||
1558 | * Continue immediately if no resync is active currently. | ||
1559 | */ | ||
1560 | wait_barrier(conf); | ||
1561 | |||
1562 | do { | ||
1563 | |||
1564 | /* | ||
1565 | * If this request crosses a chunk boundary, we need to split | ||
1566 | * it. | ||
1567 | */ | ||
1568 | if (unlikely((bio->bi_iter.bi_sector & chunk_mask) + | ||
1569 | bio_sectors(bio) > chunk_sects | ||
1570 | && (conf->geo.near_copies < conf->geo.raid_disks | ||
1571 | || conf->prev.near_copies < | ||
1572 | conf->prev.raid_disks))) { | ||
1573 | split = bio_split(bio, chunk_sects - | ||
1574 | (bio->bi_iter.bi_sector & | ||
1575 | (chunk_sects - 1)), | ||
1576 | GFP_NOIO, fs_bio_set); | ||
1577 | bio_chain(split, bio); | ||
1578 | } else { | ||
1579 | split = bio; | ||
1580 | } | ||
1581 | |||
1582 | __make_request(mddev, split); | ||
1583 | } while (split != bio); | ||
1599 | 1584 | ||
1600 | /* In case raid10d snuck in to freeze_array */ | 1585 | /* In case raid10d snuck in to freeze_array */ |
1601 | wake_up(&conf->wait_barrier); | 1586 | wake_up(&conf->wait_barrier); |
@@ -2124,10 +2109,10 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2124 | bio_reset(tbio); | 2109 | bio_reset(tbio); |
2125 | 2110 | ||
2126 | tbio->bi_vcnt = vcnt; | 2111 | tbio->bi_vcnt = vcnt; |
2127 | tbio->bi_size = r10_bio->sectors << 9; | 2112 | tbio->bi_iter.bi_size = r10_bio->sectors << 9; |
2128 | tbio->bi_rw = WRITE; | 2113 | tbio->bi_rw = WRITE; |
2129 | tbio->bi_private = r10_bio; | 2114 | tbio->bi_private = r10_bio; |
2130 | tbio->bi_sector = r10_bio->devs[i].addr; | 2115 | tbio->bi_iter.bi_sector = r10_bio->devs[i].addr; |
2131 | 2116 | ||
2132 | for (j=0; j < vcnt ; j++) { | 2117 | for (j=0; j < vcnt ; j++) { |
2133 | tbio->bi_io_vec[j].bv_offset = 0; | 2118 | tbio->bi_io_vec[j].bv_offset = 0; |
@@ -2144,7 +2129,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) | |||
2144 | atomic_inc(&r10_bio->remaining); | 2129 | atomic_inc(&r10_bio->remaining); |
2145 | md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); | 2130 | md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio)); |
2146 | 2131 | ||
2147 | tbio->bi_sector += conf->mirrors[d].rdev->data_offset; | 2132 | tbio->bi_iter.bi_sector += conf->mirrors[d].rdev->data_offset; |
2148 | tbio->bi_bdev = conf->mirrors[d].rdev->bdev; | 2133 | tbio->bi_bdev = conf->mirrors[d].rdev->bdev; |
2149 | generic_make_request(tbio); | 2134 | generic_make_request(tbio); |
2150 | } | 2135 | } |
@@ -2614,8 +2599,8 @@ static int narrow_write_error(struct r10bio *r10_bio, int i) | |||
2614 | sectors = sect_to_write; | 2599 | sectors = sect_to_write; |
2615 | /* Write at 'sector' for 'sectors' */ | 2600 | /* Write at 'sector' for 'sectors' */ |
2616 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); | 2601 | wbio = bio_clone_mddev(bio, GFP_NOIO, mddev); |
2617 | bio_trim(wbio, sector - bio->bi_sector, sectors); | 2602 | bio_trim(wbio, sector - bio->bi_iter.bi_sector, sectors); |
2618 | wbio->bi_sector = (r10_bio->devs[i].addr+ | 2603 | wbio->bi_iter.bi_sector = (r10_bio->devs[i].addr+ |
2619 | choose_data_offset(r10_bio, rdev) + | 2604 | choose_data_offset(r10_bio, rdev) + |
2620 | (sector - r10_bio->sector)); | 2605 | (sector - r10_bio->sector)); |
2621 | wbio->bi_bdev = rdev->bdev; | 2606 | wbio->bi_bdev = rdev->bdev; |
@@ -2687,10 +2672,10 @@ read_more: | |||
2687 | (unsigned long long)r10_bio->sector); | 2672 | (unsigned long long)r10_bio->sector); |
2688 | bio = bio_clone_mddev(r10_bio->master_bio, | 2673 | bio = bio_clone_mddev(r10_bio->master_bio, |
2689 | GFP_NOIO, mddev); | 2674 | GFP_NOIO, mddev); |
2690 | bio_trim(bio, r10_bio->sector - bio->bi_sector, max_sectors); | 2675 | bio_trim(bio, r10_bio->sector - bio->bi_iter.bi_sector, max_sectors); |
2691 | r10_bio->devs[slot].bio = bio; | 2676 | r10_bio->devs[slot].bio = bio; |
2692 | r10_bio->devs[slot].rdev = rdev; | 2677 | r10_bio->devs[slot].rdev = rdev; |
2693 | bio->bi_sector = r10_bio->devs[slot].addr | 2678 | bio->bi_iter.bi_sector = r10_bio->devs[slot].addr |
2694 | + choose_data_offset(r10_bio, rdev); | 2679 | + choose_data_offset(r10_bio, rdev); |
2695 | bio->bi_bdev = rdev->bdev; | 2680 | bio->bi_bdev = rdev->bdev; |
2696 | bio->bi_rw = READ | do_sync; | 2681 | bio->bi_rw = READ | do_sync; |
@@ -2701,7 +2686,7 @@ read_more: | |||
2701 | struct bio *mbio = r10_bio->master_bio; | 2686 | struct bio *mbio = r10_bio->master_bio; |
2702 | int sectors_handled = | 2687 | int sectors_handled = |
2703 | r10_bio->sector + max_sectors | 2688 | r10_bio->sector + max_sectors |
2704 | - mbio->bi_sector; | 2689 | - mbio->bi_iter.bi_sector; |
2705 | r10_bio->sectors = max_sectors; | 2690 | r10_bio->sectors = max_sectors; |
2706 | spin_lock_irq(&conf->device_lock); | 2691 | spin_lock_irq(&conf->device_lock); |
2707 | if (mbio->bi_phys_segments == 0) | 2692 | if (mbio->bi_phys_segments == 0) |
@@ -2719,7 +2704,7 @@ read_more: | |||
2719 | set_bit(R10BIO_ReadError, | 2704 | set_bit(R10BIO_ReadError, |
2720 | &r10_bio->state); | 2705 | &r10_bio->state); |
2721 | r10_bio->mddev = mddev; | 2706 | r10_bio->mddev = mddev; |
2722 | r10_bio->sector = mbio->bi_sector | 2707 | r10_bio->sector = mbio->bi_iter.bi_sector |
2723 | + sectors_handled; | 2708 | + sectors_handled; |
2724 | 2709 | ||
2725 | goto read_more; | 2710 | goto read_more; |
@@ -3157,7 +3142,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3157 | bio->bi_end_io = end_sync_read; | 3142 | bio->bi_end_io = end_sync_read; |
3158 | bio->bi_rw = READ; | 3143 | bio->bi_rw = READ; |
3159 | from_addr = r10_bio->devs[j].addr; | 3144 | from_addr = r10_bio->devs[j].addr; |
3160 | bio->bi_sector = from_addr + rdev->data_offset; | 3145 | bio->bi_iter.bi_sector = from_addr + |
3146 | rdev->data_offset; | ||
3161 | bio->bi_bdev = rdev->bdev; | 3147 | bio->bi_bdev = rdev->bdev; |
3162 | atomic_inc(&rdev->nr_pending); | 3148 | atomic_inc(&rdev->nr_pending); |
3163 | /* and we write to 'i' (if not in_sync) */ | 3149 | /* and we write to 'i' (if not in_sync) */ |
@@ -3181,7 +3167,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3181 | bio->bi_private = r10_bio; | 3167 | bio->bi_private = r10_bio; |
3182 | bio->bi_end_io = end_sync_write; | 3168 | bio->bi_end_io = end_sync_write; |
3183 | bio->bi_rw = WRITE; | 3169 | bio->bi_rw = WRITE; |
3184 | bio->bi_sector = to_addr | 3170 | bio->bi_iter.bi_sector = to_addr |
3185 | + rdev->data_offset; | 3171 | + rdev->data_offset; |
3186 | bio->bi_bdev = rdev->bdev; | 3172 | bio->bi_bdev = rdev->bdev; |
3187 | atomic_inc(&r10_bio->remaining); | 3173 | atomic_inc(&r10_bio->remaining); |
@@ -3210,7 +3196,8 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3210 | bio->bi_private = r10_bio; | 3196 | bio->bi_private = r10_bio; |
3211 | bio->bi_end_io = end_sync_write; | 3197 | bio->bi_end_io = end_sync_write; |
3212 | bio->bi_rw = WRITE; | 3198 | bio->bi_rw = WRITE; |
3213 | bio->bi_sector = to_addr + rdev->data_offset; | 3199 | bio->bi_iter.bi_sector = to_addr + |
3200 | rdev->data_offset; | ||
3214 | bio->bi_bdev = rdev->bdev; | 3201 | bio->bi_bdev = rdev->bdev; |
3215 | atomic_inc(&r10_bio->remaining); | 3202 | atomic_inc(&r10_bio->remaining); |
3216 | break; | 3203 | break; |
@@ -3328,7 +3315,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3328 | bio->bi_private = r10_bio; | 3315 | bio->bi_private = r10_bio; |
3329 | bio->bi_end_io = end_sync_read; | 3316 | bio->bi_end_io = end_sync_read; |
3330 | bio->bi_rw = READ; | 3317 | bio->bi_rw = READ; |
3331 | bio->bi_sector = sector + | 3318 | bio->bi_iter.bi_sector = sector + |
3332 | conf->mirrors[d].rdev->data_offset; | 3319 | conf->mirrors[d].rdev->data_offset; |
3333 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; | 3320 | bio->bi_bdev = conf->mirrors[d].rdev->bdev; |
3334 | count++; | 3321 | count++; |
@@ -3350,7 +3337,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3350 | bio->bi_private = r10_bio; | 3337 | bio->bi_private = r10_bio; |
3351 | bio->bi_end_io = end_sync_write; | 3338 | bio->bi_end_io = end_sync_write; |
3352 | bio->bi_rw = WRITE; | 3339 | bio->bi_rw = WRITE; |
3353 | bio->bi_sector = sector + | 3340 | bio->bi_iter.bi_sector = sector + |
3354 | conf->mirrors[d].replacement->data_offset; | 3341 | conf->mirrors[d].replacement->data_offset; |
3355 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; | 3342 | bio->bi_bdev = conf->mirrors[d].replacement->bdev; |
3356 | count++; | 3343 | count++; |
@@ -3397,7 +3384,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, | |||
3397 | bio2 = bio2->bi_next) { | 3384 | bio2 = bio2->bi_next) { |
3398 | /* remove last page from this bio */ | 3385 | /* remove last page from this bio */ |
3399 | bio2->bi_vcnt--; | 3386 | bio2->bi_vcnt--; |
3400 | bio2->bi_size -= len; | 3387 | bio2->bi_iter.bi_size -= len; |
3401 | bio2->bi_flags &= ~(1<< BIO_SEG_VALID); | 3388 | bio2->bi_flags &= ~(1<< BIO_SEG_VALID); |
3402 | } | 3389 | } |
3403 | goto bio_full; | 3390 | goto bio_full; |
@@ -4418,7 +4405,7 @@ read_more: | |||
4418 | read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); | 4405 | read_bio = bio_alloc_mddev(GFP_KERNEL, RESYNC_PAGES, mddev); |
4419 | 4406 | ||
4420 | read_bio->bi_bdev = rdev->bdev; | 4407 | read_bio->bi_bdev = rdev->bdev; |
4421 | read_bio->bi_sector = (r10_bio->devs[r10_bio->read_slot].addr | 4408 | read_bio->bi_iter.bi_sector = (r10_bio->devs[r10_bio->read_slot].addr |
4422 | + rdev->data_offset); | 4409 | + rdev->data_offset); |
4423 | read_bio->bi_private = r10_bio; | 4410 | read_bio->bi_private = r10_bio; |
4424 | read_bio->bi_end_io = end_sync_read; | 4411 | read_bio->bi_end_io = end_sync_read; |
@@ -4426,7 +4413,7 @@ read_more: | |||
4426 | read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); | 4413 | read_bio->bi_flags &= ~(BIO_POOL_MASK - 1); |
4427 | read_bio->bi_flags |= 1 << BIO_UPTODATE; | 4414 | read_bio->bi_flags |= 1 << BIO_UPTODATE; |
4428 | read_bio->bi_vcnt = 0; | 4415 | read_bio->bi_vcnt = 0; |
4429 | read_bio->bi_size = 0; | 4416 | read_bio->bi_iter.bi_size = 0; |
4430 | r10_bio->master_bio = read_bio; | 4417 | r10_bio->master_bio = read_bio; |
4431 | r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; | 4418 | r10_bio->read_slot = r10_bio->devs[r10_bio->read_slot].devnum; |
4432 | 4419 | ||
@@ -4452,7 +4439,8 @@ read_more: | |||
4452 | 4439 | ||
4453 | bio_reset(b); | 4440 | bio_reset(b); |
4454 | b->bi_bdev = rdev2->bdev; | 4441 | b->bi_bdev = rdev2->bdev; |
4455 | b->bi_sector = r10_bio->devs[s/2].addr + rdev2->new_data_offset; | 4442 | b->bi_iter.bi_sector = r10_bio->devs[s/2].addr + |
4443 | rdev2->new_data_offset; | ||
4456 | b->bi_private = r10_bio; | 4444 | b->bi_private = r10_bio; |
4457 | b->bi_end_io = end_reshape_write; | 4445 | b->bi_end_io = end_reshape_write; |
4458 | b->bi_rw = WRITE; | 4446 | b->bi_rw = WRITE; |
@@ -4479,7 +4467,7 @@ read_more: | |||
4479 | bio2 = bio2->bi_next) { | 4467 | bio2 = bio2->bi_next) { |
4480 | /* Remove last page from this bio */ | 4468 | /* Remove last page from this bio */ |
4481 | bio2->bi_vcnt--; | 4469 | bio2->bi_vcnt--; |
4482 | bio2->bi_size -= len; | 4470 | bio2->bi_iter.bi_size -= len; |
4483 | bio2->bi_flags &= ~(1<<BIO_SEG_VALID); | 4471 | bio2->bi_flags &= ~(1<<BIO_SEG_VALID); |
4484 | } | 4472 | } |
4485 | goto bio_full; | 4473 | goto bio_full; |
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 03f82ab87d9e..67ca9c3d2939 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c | |||
@@ -133,7 +133,7 @@ static inline void unlock_all_device_hash_locks_irq(struct r5conf *conf) | |||
133 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) | 133 | static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) |
134 | { | 134 | { |
135 | int sectors = bio_sectors(bio); | 135 | int sectors = bio_sectors(bio); |
136 | if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) | 136 | if (bio->bi_iter.bi_sector + sectors < sector + STRIPE_SECTORS) |
137 | return bio->bi_next; | 137 | return bio->bi_next; |
138 | else | 138 | else |
139 | return NULL; | 139 | return NULL; |
@@ -225,7 +225,7 @@ static void return_io(struct bio *return_bi) | |||
225 | 225 | ||
226 | return_bi = bi->bi_next; | 226 | return_bi = bi->bi_next; |
227 | bi->bi_next = NULL; | 227 | bi->bi_next = NULL; |
228 | bi->bi_size = 0; | 228 | bi->bi_iter.bi_size = 0; |
229 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), | 229 | trace_block_bio_complete(bdev_get_queue(bi->bi_bdev), |
230 | bi, 0); | 230 | bi, 0); |
231 | bio_endio(bi, 0); | 231 | bio_endio(bi, 0); |
@@ -852,10 +852,10 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
852 | bi->bi_rw, i); | 852 | bi->bi_rw, i); |
853 | atomic_inc(&sh->count); | 853 | atomic_inc(&sh->count); |
854 | if (use_new_offset(conf, sh)) | 854 | if (use_new_offset(conf, sh)) |
855 | bi->bi_sector = (sh->sector | 855 | bi->bi_iter.bi_sector = (sh->sector |
856 | + rdev->new_data_offset); | 856 | + rdev->new_data_offset); |
857 | else | 857 | else |
858 | bi->bi_sector = (sh->sector | 858 | bi->bi_iter.bi_sector = (sh->sector |
859 | + rdev->data_offset); | 859 | + rdev->data_offset); |
860 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) | 860 | if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags)) |
861 | bi->bi_rw |= REQ_NOMERGE; | 861 | bi->bi_rw |= REQ_NOMERGE; |
@@ -863,7 +863,7 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
863 | bi->bi_vcnt = 1; | 863 | bi->bi_vcnt = 1; |
864 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; | 864 | bi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
865 | bi->bi_io_vec[0].bv_offset = 0; | 865 | bi->bi_io_vec[0].bv_offset = 0; |
866 | bi->bi_size = STRIPE_SIZE; | 866 | bi->bi_iter.bi_size = STRIPE_SIZE; |
867 | /* | 867 | /* |
868 | * If this is discard request, set bi_vcnt 0. We don't | 868 | * If this is discard request, set bi_vcnt 0. We don't |
869 | * want to confuse SCSI because SCSI will replace payload | 869 | * want to confuse SCSI because SCSI will replace payload |
@@ -899,15 +899,15 @@ static void ops_run_io(struct stripe_head *sh, struct stripe_head_state *s) | |||
899 | rbi->bi_rw, i); | 899 | rbi->bi_rw, i); |
900 | atomic_inc(&sh->count); | 900 | atomic_inc(&sh->count); |
901 | if (use_new_offset(conf, sh)) | 901 | if (use_new_offset(conf, sh)) |
902 | rbi->bi_sector = (sh->sector | 902 | rbi->bi_iter.bi_sector = (sh->sector |
903 | + rrdev->new_data_offset); | 903 | + rrdev->new_data_offset); |
904 | else | 904 | else |
905 | rbi->bi_sector = (sh->sector | 905 | rbi->bi_iter.bi_sector = (sh->sector |
906 | + rrdev->data_offset); | 906 | + rrdev->data_offset); |
907 | rbi->bi_vcnt = 1; | 907 | rbi->bi_vcnt = 1; |
908 | rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; | 908 | rbi->bi_io_vec[0].bv_len = STRIPE_SIZE; |
909 | rbi->bi_io_vec[0].bv_offset = 0; | 909 | rbi->bi_io_vec[0].bv_offset = 0; |
910 | rbi->bi_size = STRIPE_SIZE; | 910 | rbi->bi_iter.bi_size = STRIPE_SIZE; |
911 | /* | 911 | /* |
912 | * If this is discard request, set bi_vcnt 0. We don't | 912 | * If this is discard request, set bi_vcnt 0. We don't |
913 | * want to confuse SCSI because SCSI will replace payload | 913 | * want to confuse SCSI because SCSI will replace payload |
@@ -935,24 +935,24 @@ static struct dma_async_tx_descriptor * | |||
935 | async_copy_data(int frombio, struct bio *bio, struct page *page, | 935 | async_copy_data(int frombio, struct bio *bio, struct page *page, |
936 | sector_t sector, struct dma_async_tx_descriptor *tx) | 936 | sector_t sector, struct dma_async_tx_descriptor *tx) |
937 | { | 937 | { |
938 | struct bio_vec *bvl; | 938 | struct bio_vec bvl; |
939 | struct bvec_iter iter; | ||
939 | struct page *bio_page; | 940 | struct page *bio_page; |
940 | int i; | ||
941 | int page_offset; | 941 | int page_offset; |
942 | struct async_submit_ctl submit; | 942 | struct async_submit_ctl submit; |
943 | enum async_tx_flags flags = 0; | 943 | enum async_tx_flags flags = 0; |
944 | 944 | ||
945 | if (bio->bi_sector >= sector) | 945 | if (bio->bi_iter.bi_sector >= sector) |
946 | page_offset = (signed)(bio->bi_sector - sector) * 512; | 946 | page_offset = (signed)(bio->bi_iter.bi_sector - sector) * 512; |
947 | else | 947 | else |
948 | page_offset = (signed)(sector - bio->bi_sector) * -512; | 948 | page_offset = (signed)(sector - bio->bi_iter.bi_sector) * -512; |
949 | 949 | ||
950 | if (frombio) | 950 | if (frombio) |
951 | flags |= ASYNC_TX_FENCE; | 951 | flags |= ASYNC_TX_FENCE; |
952 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); | 952 | init_async_submit(&submit, flags, tx, NULL, NULL, NULL); |
953 | 953 | ||
954 | bio_for_each_segment(bvl, bio, i) { | 954 | bio_for_each_segment(bvl, bio, iter) { |
955 | int len = bvl->bv_len; | 955 | int len = bvl.bv_len; |
956 | int clen; | 956 | int clen; |
957 | int b_offset = 0; | 957 | int b_offset = 0; |
958 | 958 | ||
@@ -968,8 +968,8 @@ async_copy_data(int frombio, struct bio *bio, struct page *page, | |||
968 | clen = len; | 968 | clen = len; |
969 | 969 | ||
970 | if (clen > 0) { | 970 | if (clen > 0) { |
971 | b_offset += bvl->bv_offset; | 971 | b_offset += bvl.bv_offset; |
972 | bio_page = bvl->bv_page; | 972 | bio_page = bvl.bv_page; |
973 | if (frombio) | 973 | if (frombio) |
974 | tx = async_memcpy(page, bio_page, page_offset, | 974 | tx = async_memcpy(page, bio_page, page_offset, |
975 | b_offset, clen, &submit); | 975 | b_offset, clen, &submit); |
@@ -1012,7 +1012,7 @@ static void ops_complete_biofill(void *stripe_head_ref) | |||
1012 | BUG_ON(!dev->read); | 1012 | BUG_ON(!dev->read); |
1013 | rbi = dev->read; | 1013 | rbi = dev->read; |
1014 | dev->read = NULL; | 1014 | dev->read = NULL; |
1015 | while (rbi && rbi->bi_sector < | 1015 | while (rbi && rbi->bi_iter.bi_sector < |
1016 | dev->sector + STRIPE_SECTORS) { | 1016 | dev->sector + STRIPE_SECTORS) { |
1017 | rbi2 = r5_next_bio(rbi, dev->sector); | 1017 | rbi2 = r5_next_bio(rbi, dev->sector); |
1018 | if (!raid5_dec_bi_active_stripes(rbi)) { | 1018 | if (!raid5_dec_bi_active_stripes(rbi)) { |
@@ -1048,7 +1048,7 @@ static void ops_run_biofill(struct stripe_head *sh) | |||
1048 | dev->read = rbi = dev->toread; | 1048 | dev->read = rbi = dev->toread; |
1049 | dev->toread = NULL; | 1049 | dev->toread = NULL; |
1050 | spin_unlock_irq(&sh->stripe_lock); | 1050 | spin_unlock_irq(&sh->stripe_lock); |
1051 | while (rbi && rbi->bi_sector < | 1051 | while (rbi && rbi->bi_iter.bi_sector < |
1052 | dev->sector + STRIPE_SECTORS) { | 1052 | dev->sector + STRIPE_SECTORS) { |
1053 | tx = async_copy_data(0, rbi, dev->page, | 1053 | tx = async_copy_data(0, rbi, dev->page, |
1054 | dev->sector, tx); | 1054 | dev->sector, tx); |
@@ -1390,7 +1390,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) | |||
1390 | wbi = dev->written = chosen; | 1390 | wbi = dev->written = chosen; |
1391 | spin_unlock_irq(&sh->stripe_lock); | 1391 | spin_unlock_irq(&sh->stripe_lock); |
1392 | 1392 | ||
1393 | while (wbi && wbi->bi_sector < | 1393 | while (wbi && wbi->bi_iter.bi_sector < |
1394 | dev->sector + STRIPE_SECTORS) { | 1394 | dev->sector + STRIPE_SECTORS) { |
1395 | if (wbi->bi_rw & REQ_FUA) | 1395 | if (wbi->bi_rw & REQ_FUA) |
1396 | set_bit(R5_WantFUA, &dev->flags); | 1396 | set_bit(R5_WantFUA, &dev->flags); |
@@ -2615,7 +2615,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
2615 | int firstwrite=0; | 2615 | int firstwrite=0; |
2616 | 2616 | ||
2617 | pr_debug("adding bi b#%llu to stripe s#%llu\n", | 2617 | pr_debug("adding bi b#%llu to stripe s#%llu\n", |
2618 | (unsigned long long)bi->bi_sector, | 2618 | (unsigned long long)bi->bi_iter.bi_sector, |
2619 | (unsigned long long)sh->sector); | 2619 | (unsigned long long)sh->sector); |
2620 | 2620 | ||
2621 | /* | 2621 | /* |
@@ -2633,12 +2633,12 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
2633 | firstwrite = 1; | 2633 | firstwrite = 1; |
2634 | } else | 2634 | } else |
2635 | bip = &sh->dev[dd_idx].toread; | 2635 | bip = &sh->dev[dd_idx].toread; |
2636 | while (*bip && (*bip)->bi_sector < bi->bi_sector) { | 2636 | while (*bip && (*bip)->bi_iter.bi_sector < bi->bi_iter.bi_sector) { |
2637 | if (bio_end_sector(*bip) > bi->bi_sector) | 2637 | if (bio_end_sector(*bip) > bi->bi_iter.bi_sector) |
2638 | goto overlap; | 2638 | goto overlap; |
2639 | bip = & (*bip)->bi_next; | 2639 | bip = & (*bip)->bi_next; |
2640 | } | 2640 | } |
2641 | if (*bip && (*bip)->bi_sector < bio_end_sector(bi)) | 2641 | if (*bip && (*bip)->bi_iter.bi_sector < bio_end_sector(bi)) |
2642 | goto overlap; | 2642 | goto overlap; |
2643 | 2643 | ||
2644 | BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); | 2644 | BUG_ON(*bip && bi->bi_next && (*bip) != bi->bi_next); |
@@ -2652,7 +2652,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
2652 | sector_t sector = sh->dev[dd_idx].sector; | 2652 | sector_t sector = sh->dev[dd_idx].sector; |
2653 | for (bi=sh->dev[dd_idx].towrite; | 2653 | for (bi=sh->dev[dd_idx].towrite; |
2654 | sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && | 2654 | sector < sh->dev[dd_idx].sector + STRIPE_SECTORS && |
2655 | bi && bi->bi_sector <= sector; | 2655 | bi && bi->bi_iter.bi_sector <= sector; |
2656 | bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { | 2656 | bi = r5_next_bio(bi, sh->dev[dd_idx].sector)) { |
2657 | if (bio_end_sector(bi) >= sector) | 2657 | if (bio_end_sector(bi) >= sector) |
2658 | sector = bio_end_sector(bi); | 2658 | sector = bio_end_sector(bi); |
@@ -2662,7 +2662,7 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx, in | |||
2662 | } | 2662 | } |
2663 | 2663 | ||
2664 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", | 2664 | pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n", |
2665 | (unsigned long long)(*bip)->bi_sector, | 2665 | (unsigned long long)(*bip)->bi_iter.bi_sector, |
2666 | (unsigned long long)sh->sector, dd_idx); | 2666 | (unsigned long long)sh->sector, dd_idx); |
2667 | spin_unlock_irq(&sh->stripe_lock); | 2667 | spin_unlock_irq(&sh->stripe_lock); |
2668 | 2668 | ||
@@ -2737,7 +2737,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
2737 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 2737 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
2738 | wake_up(&conf->wait_for_overlap); | 2738 | wake_up(&conf->wait_for_overlap); |
2739 | 2739 | ||
2740 | while (bi && bi->bi_sector < | 2740 | while (bi && bi->bi_iter.bi_sector < |
2741 | sh->dev[i].sector + STRIPE_SECTORS) { | 2741 | sh->dev[i].sector + STRIPE_SECTORS) { |
2742 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); | 2742 | struct bio *nextbi = r5_next_bio(bi, sh->dev[i].sector); |
2743 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 2743 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
@@ -2756,7 +2756,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
2756 | bi = sh->dev[i].written; | 2756 | bi = sh->dev[i].written; |
2757 | sh->dev[i].written = NULL; | 2757 | sh->dev[i].written = NULL; |
2758 | if (bi) bitmap_end = 1; | 2758 | if (bi) bitmap_end = 1; |
2759 | while (bi && bi->bi_sector < | 2759 | while (bi && bi->bi_iter.bi_sector < |
2760 | sh->dev[i].sector + STRIPE_SECTORS) { | 2760 | sh->dev[i].sector + STRIPE_SECTORS) { |
2761 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); | 2761 | struct bio *bi2 = r5_next_bio(bi, sh->dev[i].sector); |
2762 | clear_bit(BIO_UPTODATE, &bi->bi_flags); | 2762 | clear_bit(BIO_UPTODATE, &bi->bi_flags); |
@@ -2780,7 +2780,7 @@ handle_failed_stripe(struct r5conf *conf, struct stripe_head *sh, | |||
2780 | spin_unlock_irq(&sh->stripe_lock); | 2780 | spin_unlock_irq(&sh->stripe_lock); |
2781 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) | 2781 | if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags)) |
2782 | wake_up(&conf->wait_for_overlap); | 2782 | wake_up(&conf->wait_for_overlap); |
2783 | while (bi && bi->bi_sector < | 2783 | while (bi && bi->bi_iter.bi_sector < |
2784 | sh->dev[i].sector + STRIPE_SECTORS) { | 2784 | sh->dev[i].sector + STRIPE_SECTORS) { |
2785 | struct bio *nextbi = | 2785 | struct bio *nextbi = |
2786 | r5_next_bio(bi, sh->dev[i].sector); | 2786 | r5_next_bio(bi, sh->dev[i].sector); |
@@ -3004,7 +3004,7 @@ static void handle_stripe_clean_event(struct r5conf *conf, | |||
3004 | clear_bit(R5_UPTODATE, &dev->flags); | 3004 | clear_bit(R5_UPTODATE, &dev->flags); |
3005 | wbi = dev->written; | 3005 | wbi = dev->written; |
3006 | dev->written = NULL; | 3006 | dev->written = NULL; |
3007 | while (wbi && wbi->bi_sector < | 3007 | while (wbi && wbi->bi_iter.bi_sector < |
3008 | dev->sector + STRIPE_SECTORS) { | 3008 | dev->sector + STRIPE_SECTORS) { |
3009 | wbi2 = r5_next_bio(wbi, dev->sector); | 3009 | wbi2 = r5_next_bio(wbi, dev->sector); |
3010 | if (!raid5_dec_bi_active_stripes(wbi)) { | 3010 | if (!raid5_dec_bi_active_stripes(wbi)) { |
@@ -4096,7 +4096,7 @@ static int raid5_mergeable_bvec(struct request_queue *q, | |||
4096 | 4096 | ||
4097 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) | 4097 | static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) |
4098 | { | 4098 | { |
4099 | sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); | 4099 | sector_t sector = bio->bi_iter.bi_sector + get_start_sect(bio->bi_bdev); |
4100 | unsigned int chunk_sectors = mddev->chunk_sectors; | 4100 | unsigned int chunk_sectors = mddev->chunk_sectors; |
4101 | unsigned int bio_sectors = bio_sectors(bio); | 4101 | unsigned int bio_sectors = bio_sectors(bio); |
4102 | 4102 | ||
@@ -4233,9 +4233,9 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
4233 | /* | 4233 | /* |
4234 | * compute position | 4234 | * compute position |
4235 | */ | 4235 | */ |
4236 | align_bi->bi_sector = raid5_compute_sector(conf, raid_bio->bi_sector, | 4236 | align_bi->bi_iter.bi_sector = |
4237 | 0, | 4237 | raid5_compute_sector(conf, raid_bio->bi_iter.bi_sector, |
4238 | &dd_idx, NULL); | 4238 | 0, &dd_idx, NULL); |
4239 | 4239 | ||
4240 | end_sector = bio_end_sector(align_bi); | 4240 | end_sector = bio_end_sector(align_bi); |
4241 | rcu_read_lock(); | 4241 | rcu_read_lock(); |
@@ -4260,7 +4260,8 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
4260 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); | 4260 | align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); |
4261 | 4261 | ||
4262 | if (!bio_fits_rdev(align_bi) || | 4262 | if (!bio_fits_rdev(align_bi) || |
4263 | is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi), | 4263 | is_badblock(rdev, align_bi->bi_iter.bi_sector, |
4264 | bio_sectors(align_bi), | ||
4264 | &first_bad, &bad_sectors)) { | 4265 | &first_bad, &bad_sectors)) { |
4265 | /* too big in some way, or has a known bad block */ | 4266 | /* too big in some way, or has a known bad block */ |
4266 | bio_put(align_bi); | 4267 | bio_put(align_bi); |
@@ -4269,7 +4270,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
4269 | } | 4270 | } |
4270 | 4271 | ||
4271 | /* No reshape active, so we can trust rdev->data_offset */ | 4272 | /* No reshape active, so we can trust rdev->data_offset */ |
4272 | align_bi->bi_sector += rdev->data_offset; | 4273 | align_bi->bi_iter.bi_sector += rdev->data_offset; |
4273 | 4274 | ||
4274 | spin_lock_irq(&conf->device_lock); | 4275 | spin_lock_irq(&conf->device_lock); |
4275 | wait_event_lock_irq(conf->wait_for_stripe, | 4276 | wait_event_lock_irq(conf->wait_for_stripe, |
@@ -4281,7 +4282,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) | |||
4281 | if (mddev->gendisk) | 4282 | if (mddev->gendisk) |
4282 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), | 4283 | trace_block_bio_remap(bdev_get_queue(align_bi->bi_bdev), |
4283 | align_bi, disk_devt(mddev->gendisk), | 4284 | align_bi, disk_devt(mddev->gendisk), |
4284 | raid_bio->bi_sector); | 4285 | raid_bio->bi_iter.bi_sector); |
4285 | generic_make_request(align_bi); | 4286 | generic_make_request(align_bi); |
4286 | return 1; | 4287 | return 1; |
4287 | } else { | 4288 | } else { |
@@ -4464,8 +4465,8 @@ static void make_discard_request(struct mddev *mddev, struct bio *bi) | |||
4464 | /* Skip discard while reshape is happening */ | 4465 | /* Skip discard while reshape is happening */ |
4465 | return; | 4466 | return; |
4466 | 4467 | ||
4467 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 4468 | logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
4468 | last_sector = bi->bi_sector + (bi->bi_size>>9); | 4469 | last_sector = bi->bi_iter.bi_sector + (bi->bi_iter.bi_size>>9); |
4469 | 4470 | ||
4470 | bi->bi_next = NULL; | 4471 | bi->bi_next = NULL; |
4471 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ | 4472 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ |
@@ -4569,7 +4570,7 @@ static void make_request(struct mddev *mddev, struct bio * bi) | |||
4569 | return; | 4570 | return; |
4570 | } | 4571 | } |
4571 | 4572 | ||
4572 | logical_sector = bi->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 4573 | logical_sector = bi->bi_iter.bi_sector & ~((sector_t)STRIPE_SECTORS-1); |
4573 | last_sector = bio_end_sector(bi); | 4574 | last_sector = bio_end_sector(bi); |
4574 | bi->bi_next = NULL; | 4575 | bi->bi_next = NULL; |
4575 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ | 4576 | bi->bi_phys_segments = 1; /* over-loaded to count active stripes */ |
@@ -5053,7 +5054,8 @@ static int retry_aligned_read(struct r5conf *conf, struct bio *raid_bio) | |||
5053 | int remaining; | 5054 | int remaining; |
5054 | int handled = 0; | 5055 | int handled = 0; |
5055 | 5056 | ||
5056 | logical_sector = raid_bio->bi_sector & ~((sector_t)STRIPE_SECTORS-1); | 5057 | logical_sector = raid_bio->bi_iter.bi_sector & |
5058 | ~((sector_t)STRIPE_SECTORS-1); | ||
5057 | sector = raid5_compute_sector(conf, logical_sector, | 5059 | sector = raid5_compute_sector(conf, logical_sector, |
5058 | 0, &dd_idx, NULL); | 5060 | 0, &dd_idx, NULL); |
5059 | last_sector = bio_end_sector(raid_bio); | 5061 | last_sector = bio_end_sector(raid_bio); |