diff options
Diffstat (limited to 'drivers/md/bcache/io.c')
-rw-r--r-- | drivers/md/bcache/io.c | 68 |
1 files changed, 26 insertions, 42 deletions
diff --git a/drivers/md/bcache/io.c b/drivers/md/bcache/io.c index 48efd4dea645..9056632995b1 100644 --- a/drivers/md/bcache/io.c +++ b/drivers/md/bcache/io.c | |||
@@ -9,6 +9,8 @@ | |||
9 | #include "bset.h" | 9 | #include "bset.h" |
10 | #include "debug.h" | 10 | #include "debug.h" |
11 | 11 | ||
12 | #include <linux/blkdev.h> | ||
13 | |||
12 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) | 14 | static void bch_bi_idx_hack_endio(struct bio *bio, int error) |
13 | { | 15 | { |
14 | struct bio *p = bio->bi_private; | 16 | struct bio *p = bio->bi_private; |
@@ -66,13 +68,6 @@ static void bch_generic_make_request_hack(struct bio *bio) | |||
66 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a | 68 | * The newly allocated bio will point to @bio's bi_io_vec, if the split was on a |
67 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not | 69 | * bvec boundry; it is the caller's responsibility to ensure that @bio is not |
68 | * freed before the split. | 70 | * freed before the split. |
69 | * | ||
70 | * If bch_bio_split() is running under generic_make_request(), it's not safe to | ||
71 | * allocate more than one bio from the same bio set. Therefore, if it is running | ||
72 | * under generic_make_request() it masks out __GFP_WAIT when doing the | ||
73 | * allocation. The caller must check for failure if there's any possibility of | ||
74 | * it being called from under generic_make_request(); it is then the caller's | ||
75 | * responsibility to retry from a safe context (by e.g. punting to workqueue). | ||
76 | */ | 71 | */ |
77 | struct bio *bch_bio_split(struct bio *bio, int sectors, | 72 | struct bio *bch_bio_split(struct bio *bio, int sectors, |
78 | gfp_t gfp, struct bio_set *bs) | 73 | gfp_t gfp, struct bio_set *bs) |
@@ -83,20 +78,13 @@ struct bio *bch_bio_split(struct bio *bio, int sectors, | |||
83 | 78 | ||
84 | BUG_ON(sectors <= 0); | 79 | BUG_ON(sectors <= 0); |
85 | 80 | ||
86 | /* | ||
87 | * If we're being called from underneath generic_make_request() and we | ||
88 | * already allocated any bios from this bio set, we risk deadlock if we | ||
89 | * use the mempool. So instead, we possibly fail and let the caller punt | ||
90 | * to workqueue or somesuch and retry in a safe context. | ||
91 | */ | ||
92 | if (current->bio_list) | ||
93 | gfp &= ~__GFP_WAIT; | ||
94 | |||
95 | if (sectors >= bio_sectors(bio)) | 81 | if (sectors >= bio_sectors(bio)) |
96 | return bio; | 82 | return bio; |
97 | 83 | ||
98 | if (bio->bi_rw & REQ_DISCARD) { | 84 | if (bio->bi_rw & REQ_DISCARD) { |
99 | ret = bio_alloc_bioset(gfp, 1, bs); | 85 | ret = bio_alloc_bioset(gfp, 1, bs); |
86 | if (!ret) | ||
87 | return NULL; | ||
100 | idx = 0; | 88 | idx = 0; |
101 | goto out; | 89 | goto out; |
102 | } | 90 | } |
@@ -160,17 +148,18 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
160 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); | 148 | struct request_queue *q = bdev_get_queue(bio->bi_bdev); |
161 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, | 149 | unsigned max_segments = min_t(unsigned, BIO_MAX_PAGES, |
162 | queue_max_segments(q)); | 150 | queue_max_segments(q)); |
163 | struct bio_vec *bv, *end = bio_iovec(bio) + | ||
164 | min_t(int, bio_segments(bio), max_segments); | ||
165 | 151 | ||
166 | if (bio->bi_rw & REQ_DISCARD) | 152 | if (bio->bi_rw & REQ_DISCARD) |
167 | return min(ret, q->limits.max_discard_sectors); | 153 | return min(ret, q->limits.max_discard_sectors); |
168 | 154 | ||
169 | if (bio_segments(bio) > max_segments || | 155 | if (bio_segments(bio) > max_segments || |
170 | q->merge_bvec_fn) { | 156 | q->merge_bvec_fn) { |
157 | struct bio_vec *bv; | ||
158 | int i, seg = 0; | ||
159 | |||
171 | ret = 0; | 160 | ret = 0; |
172 | 161 | ||
173 | for (bv = bio_iovec(bio); bv < end; bv++) { | 162 | bio_for_each_segment(bv, bio, i) { |
174 | struct bvec_merge_data bvm = { | 163 | struct bvec_merge_data bvm = { |
175 | .bi_bdev = bio->bi_bdev, | 164 | .bi_bdev = bio->bi_bdev, |
176 | .bi_sector = bio->bi_sector, | 165 | .bi_sector = bio->bi_sector, |
@@ -178,10 +167,14 @@ static unsigned bch_bio_max_sectors(struct bio *bio) | |||
178 | .bi_rw = bio->bi_rw, | 167 | .bi_rw = bio->bi_rw, |
179 | }; | 168 | }; |
180 | 169 | ||
170 | if (seg == max_segments) | ||
171 | break; | ||
172 | |||
181 | if (q->merge_bvec_fn && | 173 | if (q->merge_bvec_fn && |
182 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) | 174 | q->merge_bvec_fn(q, &bvm, bv) < (int) bv->bv_len) |
183 | break; | 175 | break; |
184 | 176 | ||
177 | seg++; | ||
185 | ret += bv->bv_len >> 9; | 178 | ret += bv->bv_len >> 9; |
186 | } | 179 | } |
187 | } | 180 | } |
@@ -218,30 +211,10 @@ static void bch_bio_submit_split_endio(struct bio *bio, int error) | |||
218 | closure_put(cl); | 211 | closure_put(cl); |
219 | } | 212 | } |
220 | 213 | ||
221 | static void __bch_bio_submit_split(struct closure *cl) | ||
222 | { | ||
223 | struct bio_split_hook *s = container_of(cl, struct bio_split_hook, cl); | ||
224 | struct bio *bio = s->bio, *n; | ||
225 | |||
226 | do { | ||
227 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), | ||
228 | GFP_NOIO, s->p->bio_split); | ||
229 | if (!n) | ||
230 | continue_at(cl, __bch_bio_submit_split, system_wq); | ||
231 | |||
232 | n->bi_end_io = bch_bio_submit_split_endio; | ||
233 | n->bi_private = cl; | ||
234 | |||
235 | closure_get(cl); | ||
236 | bch_generic_make_request_hack(n); | ||
237 | } while (n != bio); | ||
238 | |||
239 | continue_at(cl, bch_bio_submit_split_done, NULL); | ||
240 | } | ||
241 | |||
242 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | 214 | void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) |
243 | { | 215 | { |
244 | struct bio_split_hook *s; | 216 | struct bio_split_hook *s; |
217 | struct bio *n; | ||
245 | 218 | ||
246 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) | 219 | if (!bio_has_data(bio) && !(bio->bi_rw & REQ_DISCARD)) |
247 | goto submit; | 220 | goto submit; |
@@ -250,6 +223,7 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |||
250 | goto submit; | 223 | goto submit; |
251 | 224 | ||
252 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); | 225 | s = mempool_alloc(p->bio_split_hook, GFP_NOIO); |
226 | closure_init(&s->cl, NULL); | ||
253 | 227 | ||
254 | s->bio = bio; | 228 | s->bio = bio; |
255 | s->p = p; | 229 | s->p = p; |
@@ -257,8 +231,18 @@ void bch_generic_make_request(struct bio *bio, struct bio_split_pool *p) | |||
257 | s->bi_private = bio->bi_private; | 231 | s->bi_private = bio->bi_private; |
258 | bio_get(bio); | 232 | bio_get(bio); |
259 | 233 | ||
260 | closure_call(&s->cl, __bch_bio_submit_split, NULL, NULL); | 234 | do { |
261 | return; | 235 | n = bch_bio_split(bio, bch_bio_max_sectors(bio), |
236 | GFP_NOIO, s->p->bio_split); | ||
237 | |||
238 | n->bi_end_io = bch_bio_submit_split_endio; | ||
239 | n->bi_private = &s->cl; | ||
240 | |||
241 | closure_get(&s->cl); | ||
242 | bch_generic_make_request_hack(n); | ||
243 | } while (n != bio); | ||
244 | |||
245 | continue_at(&s->cl, bch_bio_submit_split_done, NULL); | ||
262 | submit: | 246 | submit: |
263 | bch_generic_make_request_hack(bio); | 247 | bch_generic_make_request_hack(bio); |
264 | } | 248 | } |