diff options
author | Jens Axboe <axboe@fb.com> | 2016-09-21 12:08:43 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-22 11:39:53 -0400 |
commit | 841bac2c87fc21c3ecf3bc3354855921735aeec1 (patch) | |
tree | 950195b86586bf2aee0fc21229d083a5b139b1b6 /block | |
parent | 491221f88d00651e449c9caf7415b6453c8a77b7 (diff) |
blk-mq: get rid of manual run of queue with __blk_mq_run_hw_queue()
Two cases:
1) blk_mq_alloc_request() needlessly re-runs the queue, after
calling into the tag allocation without NOWAIT set. We don't
need to do that.
2) blk_mq_map_request() should just use blk_mq_run_hw_queue() with
the async flag set to false.
Signed-off-by: Jens Axboe <axboe@fb.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq.c | 16 |
1 files changed, 2 insertions, 14 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c index e0a69daddbd8..c29700010b5c 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -34,8 +34,6 @@ | |||
34 | static DEFINE_MUTEX(all_q_mutex); | 34 | static DEFINE_MUTEX(all_q_mutex); |
35 | static LIST_HEAD(all_q_list); | 35 | static LIST_HEAD(all_q_list); |
36 | 36 | ||
37 | static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx); | ||
38 | |||
39 | /* | 37 | /* |
40 | * Check if any of the ctx's have pending work in this hardware queue | 38 | * Check if any of the ctx's have pending work in this hardware queue |
41 | */ | 39 | */ |
@@ -228,19 +226,9 @@ struct request *blk_mq_alloc_request(struct request_queue *q, int rw, | |||
228 | ctx = blk_mq_get_ctx(q); | 226 | ctx = blk_mq_get_ctx(q); |
229 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | 227 | hctx = q->mq_ops->map_queue(q, ctx->cpu); |
230 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | 228 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); |
231 | |||
232 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); | 229 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); |
233 | if (!rq && !(flags & BLK_MQ_REQ_NOWAIT)) { | ||
234 | __blk_mq_run_hw_queue(hctx); | ||
235 | blk_mq_put_ctx(ctx); | ||
236 | |||
237 | ctx = blk_mq_get_ctx(q); | ||
238 | hctx = q->mq_ops->map_queue(q, ctx->cpu); | ||
239 | blk_mq_set_alloc_data(&alloc_data, q, flags, ctx, hctx); | ||
240 | rq = __blk_mq_alloc_request(&alloc_data, rw, 0); | ||
241 | ctx = alloc_data.ctx; | ||
242 | } | ||
243 | blk_mq_put_ctx(ctx); | 230 | blk_mq_put_ctx(ctx); |
231 | |||
244 | if (!rq) { | 232 | if (!rq) { |
245 | blk_queue_exit(q); | 233 | blk_queue_exit(q); |
246 | return ERR_PTR(-EWOULDBLOCK); | 234 | return ERR_PTR(-EWOULDBLOCK); |
@@ -1225,7 +1213,7 @@ static struct request *blk_mq_map_request(struct request_queue *q, | |||
1225 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); | 1213 | blk_mq_set_alloc_data(&alloc_data, q, BLK_MQ_REQ_NOWAIT, ctx, hctx); |
1226 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); | 1214 | rq = __blk_mq_alloc_request(&alloc_data, op, op_flags); |
1227 | if (unlikely(!rq)) { | 1215 | if (unlikely(!rq)) { |
1228 | __blk_mq_run_hw_queue(hctx); | 1216 | blk_mq_run_hw_queue(hctx, false); |
1229 | blk_mq_put_ctx(ctx); | 1217 | blk_mq_put_ctx(ctx); |
1230 | trace_block_sleeprq(q, bio, op); | 1218 | trace_block_sleeprq(q, bio, op); |
1231 | 1219 | ||