aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMing Lei <ming.lei@redhat.com>2018-04-08 05:48:14 -0400
committerJens Axboe <axboe@kernel.dk>2018-04-10 10:38:46 -0400
commit37c7c6c76d431dd7ef9c29d95f6052bd425f004c (patch)
treef67363b026b429b79416d9f171d7df9ecca529d4
parent127276c6ce5a30fcc806b7fe53015f4f89b62956 (diff)
blk-mq: remove code for dealing with remapping queue
Firstly, from commit 4b855ad37194 ("blk-mq: Create hctx for each present CPU), blk-mq doesn't remap queue any more after CPU topo is changed. Secondly, set->nr_hw_queues can't be bigger than nr_cpu_ids, and now we map all possible CPUs to hw queues, so at least one CPU is mapped to each hctx. So queue mapping has became static and fixed just like percpu variable, and we don't need to handle queue remapping any more. Cc: Stefan Haberland <sth@linux.vnet.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Reviewed-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Sagi Grimberg <sagi@grimberg.me> Signed-off-by: Ming Lei <ming.lei@redhat.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
-rw-r--r--block/blk-mq.c34
1 files changed, 3 insertions, 31 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 0ee9d8e964b3..0dc9e341c2a7 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2329,7 +2329,7 @@ static void blk_mq_free_map_and_requests(struct blk_mq_tag_set *set,
2329 2329
2330static void blk_mq_map_swqueue(struct request_queue *q) 2330static void blk_mq_map_swqueue(struct request_queue *q)
2331{ 2331{
2332 unsigned int i, hctx_idx; 2332 unsigned int i;
2333 struct blk_mq_hw_ctx *hctx; 2333 struct blk_mq_hw_ctx *hctx;
2334 struct blk_mq_ctx *ctx; 2334 struct blk_mq_ctx *ctx;
2335 struct blk_mq_tag_set *set = q->tag_set; 2335 struct blk_mq_tag_set *set = q->tag_set;
@@ -2346,23 +2346,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2346 2346
2347 /* 2347 /*
2348 * Map software to hardware queues. 2348 * Map software to hardware queues.
2349 *
2350 * If the cpu isn't present, the cpu is mapped to first hctx.
2351 */ 2349 */
2352 for_each_possible_cpu(i) { 2350 for_each_possible_cpu(i) {
2353 hctx_idx = q->mq_map[i];
2354 /* unmapped hw queue can be remapped after CPU topo changed */
2355 if (!set->tags[hctx_idx] &&
2356 !__blk_mq_alloc_rq_map(set, hctx_idx)) {
2357 /*
2358 * If tags initialization fail for some hctx,
2359 * that hctx won't be brought online. In this
2360 * case, remap the current ctx to hctx[0] which
2361 * is guaranteed to always have tags allocated
2362 */
2363 q->mq_map[i] = 0;
2364 }
2365
2366 ctx = per_cpu_ptr(q->queue_ctx, i); 2351 ctx = per_cpu_ptr(q->queue_ctx, i);
2367 hctx = blk_mq_map_queue(q, i); 2352 hctx = blk_mq_map_queue(q, i);
2368 2353
@@ -2374,21 +2359,8 @@ static void blk_mq_map_swqueue(struct request_queue *q)
2374 mutex_unlock(&q->sysfs_lock); 2359 mutex_unlock(&q->sysfs_lock);
2375 2360
2376 queue_for_each_hw_ctx(q, hctx, i) { 2361 queue_for_each_hw_ctx(q, hctx, i) {
2377 /* 2362 /* every hctx should get mapped by at least one CPU */
2378 * If no software queues are mapped to this hardware queue, 2363 WARN_ON(!hctx->nr_ctx);
2379 * disable it and free the request entries.
2380 */
2381 if (!hctx->nr_ctx) {
2382 /* Never unmap queue 0. We need it as a
2383 * fallback in case of a new remap fails
2384 * allocation
2385 */
2386 if (i && set->tags[i])
2387 blk_mq_free_map_and_requests(set, i);
2388
2389 hctx->tags = NULL;
2390 continue;
2391 }
2392 2364
2393 hctx->tags = set->tags[i]; 2365 hctx->tags = set->tags[i];
2394 WARN_ON(!hctx->tags); 2366 WARN_ON(!hctx->tags);