aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2016-09-14 10:18:53 -0400
committerJens Axboe <axboe@fb.com>2016-09-15 10:42:03 -0400
commitbdd17e75cd97c5c39feee409890a91d0396640fe (patch)
treea7b6b01aa305d466e83255caa08670391c294b33
parent4e68a011428af3211facd932b4003b3fa3ef4faa (diff)
blk-mq: only allocate a single mq_map per tag_set
The mapping is identical for all queues in a tag_set, so stop wasting memory for building multiple. Note that for now I've kept the mq_map pointer in the request_queue, but we'll need to investigate if we can remove it without suffering too much from the additional pointer chasing. The same would apply to the mq_ops pointer as well. Signed-off-by: Christoph Hellwig <hch@lst.de> Reviewed-by: Keith Busch <keith.busch@intel.com> Signed-off-by: Jens Axboe <axboe@fb.com>
-rw-r--r--block/blk-mq.c22
-rw-r--r--include/linux/blk-mq.h1
2 files changed, 15 insertions, 8 deletions
diff --git a/block/blk-mq.c b/block/blk-mq.c
index ffc9d4a3dbbd..c9499f118ef6 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -1973,7 +1973,6 @@ void blk_mq_release(struct request_queue *q)
1973 kfree(hctx); 1973 kfree(hctx);
1974 } 1974 }
1975 1975
1976 kfree(q->mq_map);
1977 q->mq_map = NULL; 1976 q->mq_map = NULL;
1978 1977
1979 kfree(q->queue_hw_ctx); 1978 kfree(q->queue_hw_ctx);
@@ -2072,9 +2071,7 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2072 if (!q->queue_hw_ctx) 2071 if (!q->queue_hw_ctx)
2073 goto err_percpu; 2072 goto err_percpu;
2074 2073
2075 q->mq_map = blk_mq_make_queue_map(set); 2074 q->mq_map = set->mq_map;
2076 if (!q->mq_map)
2077 goto err_map;
2078 2075
2079 blk_mq_realloc_hw_ctxs(set, q); 2076 blk_mq_realloc_hw_ctxs(set, q);
2080 if (!q->nr_hw_queues) 2077 if (!q->nr_hw_queues)
@@ -2124,8 +2121,6 @@ struct request_queue *blk_mq_init_allocated_queue(struct blk_mq_tag_set *set,
2124 return q; 2121 return q;
2125 2122
2126err_hctxs: 2123err_hctxs:
2127 kfree(q->mq_map);
2128err_map:
2129 kfree(q->queue_hw_ctx); 2124 kfree(q->queue_hw_ctx);
2130err_percpu: 2125err_percpu:
2131 free_percpu(q->queue_ctx); 2126 free_percpu(q->queue_ctx);
@@ -2347,14 +2342,22 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2347 if (!set->tags) 2342 if (!set->tags)
2348 return -ENOMEM; 2343 return -ENOMEM;
2349 2344
2345 set->mq_map = blk_mq_make_queue_map(set);
2346 if (!set->mq_map)
2347 goto out_free_tags;
2348
2350 if (blk_mq_alloc_rq_maps(set)) 2349 if (blk_mq_alloc_rq_maps(set))
2351 goto enomem; 2350 goto out_free_mq_map;
2352 2351
2353 mutex_init(&set->tag_list_lock); 2352 mutex_init(&set->tag_list_lock);
2354 INIT_LIST_HEAD(&set->tag_list); 2353 INIT_LIST_HEAD(&set->tag_list);
2355 2354
2356 return 0; 2355 return 0;
2357enomem: 2356
2357out_free_mq_map:
2358 kfree(set->mq_map);
2359 set->mq_map = NULL;
2360out_free_tags:
2358 kfree(set->tags); 2361 kfree(set->tags);
2359 set->tags = NULL; 2362 set->tags = NULL;
2360 return -ENOMEM; 2363 return -ENOMEM;
@@ -2370,6 +2373,9 @@ void blk_mq_free_tag_set(struct blk_mq_tag_set *set)
2370 blk_mq_free_rq_map(set, set->tags[i], i); 2373 blk_mq_free_rq_map(set, set->tags[i], i);
2371 } 2374 }
2372 2375
2376 kfree(set->mq_map);
2377 set->mq_map = NULL;
2378
2373 kfree(set->tags); 2379 kfree(set->tags);
2374 set->tags = NULL; 2380 set->tags = NULL;
2375} 2381}
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 60ef14cbcd2d..deda16a9bde4 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -67,6 +67,7 @@ struct blk_mq_hw_ctx {
67}; 67};
68 68
69struct blk_mq_tag_set { 69struct blk_mq_tag_set {
70 unsigned int *mq_map;
70 struct blk_mq_ops *ops; 71 struct blk_mq_ops *ops;
71 unsigned int nr_hw_queues; 72 unsigned int nr_hw_queues;
72 unsigned int queue_depth; /* max hw supported */ 73 unsigned int queue_depth; /* max hw supported */