diff options
author | Ming Lei <tom.leiming@gmail.com> | 2014-05-31 12:43:37 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2014-06-03 23:04:39 -0400 |
commit | cb96a42cc1f50ba1c7b1e9b2343bec80b926107f (patch) | |
tree | c69f953fcd7d04288903abe73a0f92f155882bfd /block | |
parent | 1aecfe4887713838c79bc52f774609a57db4f988 (diff) |
blk-mq: fix schedule from atomic context
blk_mq_put_ctx() has to be called before io_schedule() in
bt_get().
This patch fixes the problem by taking similar approach from
percpu_ida allocation for the situation.
Signed-off-by: Ming Lei <tom.leiming@gmail.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-tag.c | 48 | ||||
-rw-r--r-- | block/blk-mq-tag.h | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 36 | ||||
-rw-r--r-- | block/blk-mq.h | 23 |
4 files changed, 78 insertions, 31 deletions
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index d90c4aeb7dd3..1aab39f71d95 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -221,8 +221,10 @@ static struct bt_wait_state *bt_wait_ptr(struct blk_mq_bitmap_tags *bt, | |||
221 | return bs; | 221 | return bs; |
222 | } | 222 | } |
223 | 223 | ||
224 | static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | 224 | static int bt_get(struct blk_mq_alloc_data *data, |
225 | unsigned int *last_tag, gfp_t gfp) | 225 | struct blk_mq_bitmap_tags *bt, |
226 | struct blk_mq_hw_ctx *hctx, | ||
227 | unsigned int *last_tag) | ||
226 | { | 228 | { |
227 | struct bt_wait_state *bs; | 229 | struct bt_wait_state *bs; |
228 | DEFINE_WAIT(wait); | 230 | DEFINE_WAIT(wait); |
@@ -232,7 +234,7 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | |||
232 | if (tag != -1) | 234 | if (tag != -1) |
233 | return tag; | 235 | return tag; |
234 | 236 | ||
235 | if (!(gfp & __GFP_WAIT)) | 237 | if (!(data->gfp & __GFP_WAIT)) |
236 | return -1; | 238 | return -1; |
237 | 239 | ||
238 | bs = bt_wait_ptr(bt, hctx); | 240 | bs = bt_wait_ptr(bt, hctx); |
@@ -249,50 +251,62 @@ static int bt_get(struct blk_mq_bitmap_tags *bt, struct blk_mq_hw_ctx *hctx, | |||
249 | if (was_empty) | 251 | if (was_empty) |
250 | atomic_set(&bs->wait_cnt, bt->wake_cnt); | 252 | atomic_set(&bs->wait_cnt, bt->wake_cnt); |
251 | 253 | ||
254 | blk_mq_put_ctx(data->ctx); | ||
255 | |||
252 | io_schedule(); | 256 | io_schedule(); |
257 | |||
258 | data->ctx = blk_mq_get_ctx(data->q); | ||
259 | data->hctx = data->q->mq_ops->map_queue(data->q, | ||
260 | data->ctx->cpu); | ||
261 | if (data->reserved) { | ||
262 | bt = &data->hctx->tags->breserved_tags; | ||
263 | } else { | ||
264 | last_tag = &data->ctx->last_tag; | ||
265 | hctx = data->hctx; | ||
266 | bt = &hctx->tags->bitmap_tags; | ||
267 | } | ||
268 | finish_wait(&bs->wait, &wait); | ||
269 | bs = bt_wait_ptr(bt, hctx); | ||
253 | } while (1); | 270 | } while (1); |
254 | 271 | ||
255 | finish_wait(&bs->wait, &wait); | 272 | finish_wait(&bs->wait, &wait); |
256 | return tag; | 273 | return tag; |
257 | } | 274 | } |
258 | 275 | ||
259 | static unsigned int __blk_mq_get_tag(struct blk_mq_tags *tags, | 276 | static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data) |
260 | struct blk_mq_hw_ctx *hctx, | ||
261 | unsigned int *last_tag, gfp_t gfp) | ||
262 | { | 277 | { |
263 | int tag; | 278 | int tag; |
264 | 279 | ||
265 | tag = bt_get(&tags->bitmap_tags, hctx, last_tag, gfp); | 280 | tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx, |
281 | &data->ctx->last_tag); | ||
266 | if (tag >= 0) | 282 | if (tag >= 0) |
267 | return tag + tags->nr_reserved_tags; | 283 | return tag + data->hctx->tags->nr_reserved_tags; |
268 | 284 | ||
269 | return BLK_MQ_TAG_FAIL; | 285 | return BLK_MQ_TAG_FAIL; |
270 | } | 286 | } |
271 | 287 | ||
272 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_tags *tags, | 288 | static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data) |
273 | gfp_t gfp) | ||
274 | { | 289 | { |
275 | int tag, zero = 0; | 290 | int tag, zero = 0; |
276 | 291 | ||
277 | if (unlikely(!tags->nr_reserved_tags)) { | 292 | if (unlikely(!data->hctx->tags->nr_reserved_tags)) { |
278 | WARN_ON_ONCE(1); | 293 | WARN_ON_ONCE(1); |
279 | return BLK_MQ_TAG_FAIL; | 294 | return BLK_MQ_TAG_FAIL; |
280 | } | 295 | } |
281 | 296 | ||
282 | tag = bt_get(&tags->breserved_tags, NULL, &zero, gfp); | 297 | tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, &zero); |
283 | if (tag < 0) | 298 | if (tag < 0) |
284 | return BLK_MQ_TAG_FAIL; | 299 | return BLK_MQ_TAG_FAIL; |
285 | 300 | ||
286 | return tag; | 301 | return tag; |
287 | } | 302 | } |
288 | 303 | ||
289 | unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, | 304 | unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) |
290 | gfp_t gfp, bool reserved) | ||
291 | { | 305 | { |
292 | if (!reserved) | 306 | if (!data->reserved) |
293 | return __blk_mq_get_tag(hctx->tags, hctx, last_tag, gfp); | 307 | return __blk_mq_get_tag(data); |
294 | 308 | ||
295 | return __blk_mq_get_reserved_tag(hctx->tags, gfp); | 309 | return __blk_mq_get_reserved_tag(data); |
296 | } | 310 | } |
297 | 311 | ||
298 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) | 312 | static struct bt_wait_state *bt_wake_ptr(struct blk_mq_bitmap_tags *bt) |
diff --git a/block/blk-mq-tag.h b/block/blk-mq-tag.h index c959de58d2a5..98696a65d4d4 100644 --- a/block/blk-mq-tag.h +++ b/block/blk-mq-tag.h | |||
@@ -48,7 +48,7 @@ struct blk_mq_tags { | |||
48 | extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); | 48 | extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int reserved_tags, int node); |
49 | extern void blk_mq_free_tags(struct blk_mq_tags *tags); | 49 | extern void blk_mq_free_tags(struct blk_mq_tags *tags); |
50 | 50 | ||
51 | extern unsigned int blk_mq_get_tag(struct blk_mq_hw_ctx *hctx, unsigned int *last_tag, gfp_t gfp, bool reserved); | 51 | extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); |
52 | extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); | 52 | extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, unsigned int tag, unsigned int *last_tag); |
53 | extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); | 53 | extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); |
54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); | 54 | extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); |
diff --git a/block/blk-mq.c b/block/blk-mq.c index b9230c522c6b..43eb3156e110 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -210,24 +210,23 @@ static void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx, | |||
210 | } | 210 | } |
211 | 211 | ||
212 | static struct request * | 212 | static struct request * |
213 | __blk_mq_alloc_request(struct request_queue *q, struct blk_mq_hw_ctx *hctx, | 213 | __blk_mq_alloc_request(struct blk_mq_alloc_data *data, int rw) |
214 | struct blk_mq_ctx *ctx, int rw, gfp_t gfp, bool reserved) | ||
215 | { | 214 | { |
216 | struct request *rq; | 215 | struct request *rq; |
217 | unsigned int tag; | 216 | unsigned int tag; |
218 | 217 | ||
219 | tag = blk_mq_get_tag(hctx, &ctx->last_tag, gfp, reserved); | 218 | tag = blk_mq_get_tag(data); |
220 | if (tag != BLK_MQ_TAG_FAIL) { | 219 | if (tag != BLK_MQ_TAG_FAIL) { |
221 | rq = hctx->tags->rqs[tag]; | 220 | rq = data->hctx->tags->rqs[tag]; |
222 | 221 | ||
223 | rq->cmd_flags = 0; | 222 | rq->cmd_flags = 0; |
224 | if (blk_mq_tag_busy(hctx)) { | 223 | if (blk_mq_tag_busy(data->hctx)) { |
225 | rq->cmd_flags = REQ_MQ_INFLIGHT; | 224 | rq->cmd_flags = REQ_MQ_INFLIGHT; |
226 | atomic_inc(&hctx->nr_active); | 225 | atomic_inc(&data->hctx->nr_active); |
227 | } | 226 | } |
228 | 227 | ||
229 | rq->tag = tag; | 228 | rq->tag = tag; |
230 | blk_mq_rq_ctx_init(q, ctx, rq, rw); | 229 | blk_mq_rq_ctx_init(data->q, data->ctx, rq, rw); |
231 | return rq; | 230 | return rq; |
232 | } | 231 | } |
233 | 232 | ||
@@ -240,22 +239,27 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, gfp_t gfp, | |||
240 | struct blk_mq_ctx *ctx; | 239 | struct blk_mq_ctx *ctx; |
241 | struct blk_mq_hw_ctx *hctx; | 240 | struct blk_mq_hw_ctx *hctx; |
242 | struct request *rq; | 241 | struct request *rq; |
242 | struct blk_mq_alloc_data alloc_data; | ||
243 | 243 | ||
244 | if (blk_mq_queue_enter(q)) | 244 | if (blk_mq_queue_enter(q)) |
245 | return NULL; | 245 | return NULL; |
246 | 246 | ||
247 | ctx = blk_mq_get_ctx(q); | 247 | ctx = blk_mq_get_ctx(q); |
248 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 248 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
249 | blk_mq_set_alloc_data(&alloc_data, q, gfp & ~__GFP_WAIT, | ||
250 | reserved, ctx, hctx); | ||
249 | 251 | ||
250 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp & ~__GFP_WAIT, | 252 | rq = __blk_mq_alloc_request(&alloc_data, rw); |
251 | reserved); | ||
252 | if (!rq && (gfp & __GFP_WAIT)) { | 253 | if (!rq && (gfp & __GFP_WAIT)) { |
253 | __blk_mq_run_hw_queue(hctx); | 254 | __blk_mq_run_hw_queue(hctx); |
254 | blk_mq_put_ctx(ctx); | 255 | blk_mq_put_ctx(ctx); |
255 | 256 | ||
256 | ctx = blk_mq_get_ctx(q); | 257 | ctx = blk_mq_get_ctx(q); |
257 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 258 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
258 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, gfp, reserved); | 259 | blk_mq_set_alloc_data(&alloc_data, q, gfp, reserved, ctx, |
260 | hctx); | ||
261 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
262 | ctx = alloc_data.ctx; | ||
259 | } | 263 | } |
260 | blk_mq_put_ctx(ctx); | 264 | blk_mq_put_ctx(ctx); |
261 | return rq; | 265 | return rq; |
@@ -1136,6 +1140,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1136 | struct blk_mq_ctx *ctx; | 1140 | struct blk_mq_ctx *ctx; |
1137 | struct request *rq; | 1141 | struct request *rq; |
1138 | int rw = bio_data_dir(bio); | 1142 | int rw = bio_data_dir(bio); |
1143 | struct blk_mq_alloc_data alloc_data; | ||
1139 | 1144 | ||
1140 | if (unlikely(blk_mq_queue_enter(q))) { | 1145 | if (unlikely(blk_mq_queue_enter(q))) { |
1141 | bio_endio(bio, -EIO); | 1146 | bio_endio(bio, -EIO); |
@@ -1149,7 +1154,9 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1149 | rw |= REQ_SYNC; | 1154 | rw |= REQ_SYNC; |
1150 | 1155 | ||
1151 | trace_block_getrq(q, bio, rw); | 1156 | trace_block_getrq(q, bio, rw); |
1152 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, GFP_ATOMIC, false); | 1157 | blk_mq_set_alloc_data(&alloc_data, q, GFP_ATOMIC, false, ctx, |
1158 | hctx); | ||
1159 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
1153 | if (unlikely(!rq)) { | 1160 | if (unlikely(!rq)) { |
1154 | __blk_mq_run_hw_queue(hctx); | 1161 | __blk_mq_run_hw_queue(hctx); |
1155 | blk_mq_put_ctx(ctx); | 1162 | blk_mq_put_ctx(ctx); |
@@ -1157,8 +1164,11 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1157 | 1164 | ||
1158 | ctx = blk_mq_get_ctx(q); | 1165 | ctx = blk_mq_get_ctx(q); |
1159 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 1166 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
1160 | rq = __blk_mq_alloc_request(q, hctx, ctx, rw, | 1167 | blk_mq_set_alloc_data(&alloc_data, q, |
1161 | __GFP_WAIT|GFP_ATOMIC, false); | 1168 | __GFP_WAIT|GFP_ATOMIC, false, ctx, hctx); |
1169 | rq = __blk_mq_alloc_request(&alloc_data, rw); | ||
1170 | ctx = alloc_data.ctx; | ||
1171 | hctx = alloc_data.hctx; | ||
1162 | } | 1172 | } |
1163 | 1173 | ||
1164 | hctx->queued++; | 1174 | hctx->queued++; |
diff --git a/block/blk-mq.h b/block/blk-mq.h index 57a7968e47b3..26460884c6cd 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -91,4 +91,27 @@ static inline void blk_mq_put_ctx(struct blk_mq_ctx *ctx) | |||
91 | put_cpu(); | 91 | put_cpu(); |
92 | } | 92 | } |
93 | 93 | ||
94 | struct blk_mq_alloc_data { | ||
95 | /* input parameter */ | ||
96 | struct request_queue *q; | ||
97 | gfp_t gfp; | ||
98 | bool reserved; | ||
99 | |||
100 | /* input & output parameter */ | ||
101 | struct blk_mq_ctx *ctx; | ||
102 | struct blk_mq_hw_ctx *hctx; | ||
103 | }; | ||
104 | |||
105 | static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, | ||
106 | struct request_queue *q, gfp_t gfp, bool reserved, | ||
107 | struct blk_mq_ctx *ctx, | ||
108 | struct blk_mq_hw_ctx *hctx) | ||
109 | { | ||
110 | data->q = q; | ||
111 | data->gfp = gfp; | ||
112 | data->reserved = reserved; | ||
113 | data->ctx = ctx; | ||
114 | data->hctx = hctx; | ||
115 | } | ||
116 | |||
94 | #endif | 117 | #endif |