diff options
author | Jianchao Wang <jianchao.w.wang@oracle.com> | 2019-01-24 05:25:32 -0500 |
---|---|---|
committer | Jens Axboe <axboe@kernel.dk> | 2019-02-01 10:33:04 -0500 |
commit | 8ccdf4a3775229314c8bd365ac88c2cbdf36be13 (patch) | |
tree | 8b6c6835912bb3f3076d6694ba264072ae5921b6 /block | |
parent | 058fdecc6de7cdecbf4c59b851e80eb2d6c5295f (diff) |
blk-mq: save queue mapping result into ctx directly
Currently, the queue mapping result is saved in a two-dimensional
array. In the hot path, to get a hctx, we need do following:
q->queue_hw_ctx[q->tag_set->map[type].mq_map[cpu]]
This isn't very efficient. We could save the queue mapping result into
ctx directly with different hctx type, like,
ctx->hctxs[type]
Signed-off-by: Jianchao Wang <jianchao.w.wang@oracle.com>
Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'block')
-rw-r--r-- | block/blk-mq-sched.c | 2 | ||||
-rw-r--r-- | block/blk-mq-tag.c | 2 | ||||
-rw-r--r-- | block/blk-mq.c | 4 | ||||
-rw-r--r-- | block/blk-mq.h | 7 | ||||
-rw-r--r-- | block/blk.h | 2 |
5 files changed, 9 insertions, 8 deletions
diff --git a/block/blk-mq-sched.c b/block/blk-mq-sched.c index 140933e4a7d1..40905539afed 100644 --- a/block/blk-mq-sched.c +++ b/block/blk-mq-sched.c | |||
@@ -321,7 +321,7 @@ bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio) | |||
321 | { | 321 | { |
322 | struct elevator_queue *e = q->elevator; | 322 | struct elevator_queue *e = q->elevator; |
323 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); | 323 | struct blk_mq_ctx *ctx = blk_mq_get_ctx(q); |
324 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx->cpu); | 324 | struct blk_mq_hw_ctx *hctx = blk_mq_map_queue(q, bio->bi_opf, ctx); |
325 | bool ret = false; | 325 | bool ret = false; |
326 | enum hctx_type type; | 326 | enum hctx_type type; |
327 | 327 | ||
diff --git a/block/blk-mq-tag.c b/block/blk-mq-tag.c index 2089c6c62f44..a4931fc7be8a 100644 --- a/block/blk-mq-tag.c +++ b/block/blk-mq-tag.c | |||
@@ -170,7 +170,7 @@ unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) | |||
170 | 170 | ||
171 | data->ctx = blk_mq_get_ctx(data->q); | 171 | data->ctx = blk_mq_get_ctx(data->q); |
172 | data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, | 172 | data->hctx = blk_mq_map_queue(data->q, data->cmd_flags, |
173 | data->ctx->cpu); | 173 | data->ctx); |
174 | tags = blk_mq_tags_from_data(data); | 174 | tags = blk_mq_tags_from_data(data); |
175 | if (data->flags & BLK_MQ_REQ_RESERVED) | 175 | if (data->flags & BLK_MQ_REQ_RESERVED) |
176 | bt = &tags->breserved_tags; | 176 | bt = &tags->breserved_tags; |
diff --git a/block/blk-mq.c b/block/blk-mq.c index 8f5b533764ca..445d0a2642ae 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c | |||
@@ -364,7 +364,7 @@ static struct request *blk_mq_get_request(struct request_queue *q, | |||
364 | } | 364 | } |
365 | if (likely(!data->hctx)) | 365 | if (likely(!data->hctx)) |
366 | data->hctx = blk_mq_map_queue(q, data->cmd_flags, | 366 | data->hctx = blk_mq_map_queue(q, data->cmd_flags, |
367 | data->ctx->cpu); | 367 | data->ctx); |
368 | if (data->cmd_flags & REQ_NOWAIT) | 368 | if (data->cmd_flags & REQ_NOWAIT) |
369 | data->flags |= BLK_MQ_REQ_NOWAIT; | 369 | data->flags |= BLK_MQ_REQ_NOWAIT; |
370 | 370 | ||
@@ -2435,7 +2435,7 @@ static void blk_mq_map_swqueue(struct request_queue *q) | |||
2435 | continue; | 2435 | continue; |
2436 | 2436 | ||
2437 | hctx = blk_mq_map_queue_type(q, j, i); | 2437 | hctx = blk_mq_map_queue_type(q, j, i); |
2438 | 2438 | ctx->hctxs[j] = hctx; | |
2439 | /* | 2439 | /* |
2440 | * If the CPU is already set in the mask, then we've | 2440 | * If the CPU is already set in the mask, then we've |
2441 | * mapped this one already. This can happen if | 2441 | * mapped this one already. This can happen if |
diff --git a/block/blk-mq.h b/block/blk-mq.h index d943d46b0785..9fb06261518e 100644 --- a/block/blk-mq.h +++ b/block/blk-mq.h | |||
@@ -23,6 +23,7 @@ struct blk_mq_ctx { | |||
23 | 23 | ||
24 | unsigned int cpu; | 24 | unsigned int cpu; |
25 | unsigned short index_hw[HCTX_MAX_TYPES]; | 25 | unsigned short index_hw[HCTX_MAX_TYPES]; |
26 | struct blk_mq_hw_ctx *hctxs[HCTX_MAX_TYPES]; | ||
26 | 27 | ||
27 | /* incremented at dispatch time */ | 28 | /* incremented at dispatch time */ |
28 | unsigned long rq_dispatched[2]; | 29 | unsigned long rq_dispatched[2]; |
@@ -97,11 +98,11 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue_type(struct request_queue * | |||
97 | * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue | 98 | * blk_mq_map_queue() - map (cmd_flags,type) to hardware queue |
98 | * @q: request queue | 99 | * @q: request queue |
99 | * @flags: request command flags | 100 | * @flags: request command flags |
100 | * @cpu: CPU | 101 | * @cpu: cpu ctx |
101 | */ | 102 | */ |
102 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | 103 | static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, |
103 | unsigned int flags, | 104 | unsigned int flags, |
104 | unsigned int cpu) | 105 | struct blk_mq_ctx *ctx) |
105 | { | 106 | { |
106 | enum hctx_type type = HCTX_TYPE_DEFAULT; | 107 | enum hctx_type type = HCTX_TYPE_DEFAULT; |
107 | 108 | ||
@@ -116,7 +117,7 @@ static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, | |||
116 | q->tag_set->map[HCTX_TYPE_READ].nr_queues) | 117 | q->tag_set->map[HCTX_TYPE_READ].nr_queues) |
117 | type = HCTX_TYPE_READ; | 118 | type = HCTX_TYPE_READ; |
118 | 119 | ||
119 | return blk_mq_map_queue_type(q, type, cpu); | 120 | return ctx->hctxs[type]; |
120 | } | 121 | } |
121 | 122 | ||
122 | /* | 123 | /* |
diff --git a/block/blk.h b/block/blk.h index 848278c52030..5d636ee41663 100644 --- a/block/blk.h +++ b/block/blk.h | |||
@@ -38,7 +38,7 @@ extern struct ida blk_queue_ida; | |||
38 | static inline struct blk_flush_queue * | 38 | static inline struct blk_flush_queue * |
39 | blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) | 39 | blk_get_flush_queue(struct request_queue *q, struct blk_mq_ctx *ctx) |
40 | { | 40 | { |
41 | return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx->cpu)->fq; | 41 | return blk_mq_map_queue(q, REQ_OP_FLUSH, ctx)->fq; |
42 | } | 42 | } |
43 | 43 | ||
44 | static inline void __blk_get_queue(struct request_queue *q) | 44 | static inline void __blk_get_queue(struct request_queue *q) |