summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-mq-cpumap.c25
-rw-r--r--block/blk-mq.c18
-rw-r--r--block/blk-mq.h4
3 files changed, 21 insertions, 26 deletions
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c
index d0634bcf322f..19b1d9c5f07e 100644
--- a/block/blk-mq-cpumap.c
+++ b/block/blk-mq-cpumap.c
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu)
31 return cpu; 31 return cpu;
32} 32}
33 33
34int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, 34int blk_mq_map_queues(struct blk_mq_tag_set *set)
35 const struct cpumask *online_mask)
36{ 35{
36 unsigned int *map = set->mq_map;
37 unsigned int nr_queues = set->nr_hw_queues;
38 const struct cpumask *online_mask = cpu_online_mask;
37 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; 39 unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling;
38 cpumask_var_t cpus; 40 cpumask_var_t cpus;
39 41
40 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) 42 if (!alloc_cpumask_var(&cpus, GFP_ATOMIC))
41 return 1; 43 return -ENOMEM;
42 44
43 cpumask_clear(cpus); 45 cpumask_clear(cpus);
44 nr_cpus = nr_uniq_cpus = 0; 46 nr_cpus = nr_uniq_cpus = 0;
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
86 return 0; 88 return 0;
87} 89}
88 90
89unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set)
90{
91 unsigned int *map;
92
93 /* If cpus are offline, map them to first hctx */
94 map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL,
95 set->numa_node);
96 if (!map)
97 return NULL;
98
99 if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask))
100 return map;
101
102 kfree(map);
103 return NULL;
104}
105
106/* 91/*
107 * We have no quick way of doing reverse lookups. This is only used at 92 * We have no quick way of doing reverse lookups. This is only used at
108 * queue init time, so runtime isn't important. 93 * queue init time, so runtime isn't important.
diff --git a/block/blk-mq.c b/block/blk-mq.c
index 6e077a9d61a8..a3060078a8da 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -2286,6 +2286,8 @@ EXPORT_SYMBOL_GPL(blk_mq_tags_cpumask);
2286 */ 2286 */
2287int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set) 2287int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2288{ 2288{
2289 int ret;
2290
2289 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS); 2291 BUILD_BUG_ON(BLK_MQ_MAX_DEPTH > 1 << BLK_MQ_UNIQUE_TAG_BITS);
2290 2292
2291 if (!set->nr_hw_queues) 2293 if (!set->nr_hw_queues)
@@ -2324,11 +2326,21 @@ int blk_mq_alloc_tag_set(struct blk_mq_tag_set *set)
2324 if (!set->tags) 2326 if (!set->tags)
2325 return -ENOMEM; 2327 return -ENOMEM;
2326 2328
2327 set->mq_map = blk_mq_make_queue_map(set); 2329 ret = -ENOMEM;
2330 set->mq_map = kzalloc_node(sizeof(*set->mq_map) * nr_cpu_ids,
2331 GFP_KERNEL, set->numa_node);
2328 if (!set->mq_map) 2332 if (!set->mq_map)
2329 goto out_free_tags; 2333 goto out_free_tags;
2330 2334
2331 if (blk_mq_alloc_rq_maps(set)) 2335 if (set->ops->map_queues)
2336 ret = set->ops->map_queues(set);
2337 else
2338 ret = blk_mq_map_queues(set);
2339 if (ret)
2340 goto out_free_mq_map;
2341
2342 ret = blk_mq_alloc_rq_maps(set);
2343 if (ret)
2332 goto out_free_mq_map; 2344 goto out_free_mq_map;
2333 2345
2334 mutex_init(&set->tag_list_lock); 2346 mutex_init(&set->tag_list_lock);
@@ -2342,7 +2354,7 @@ out_free_mq_map:
2342out_free_tags: 2354out_free_tags:
2343 kfree(set->tags); 2355 kfree(set->tags);
2344 set->tags = NULL; 2356 set->tags = NULL;
2345 return -ENOMEM; 2357 return ret;
2346} 2358}
2347EXPORT_SYMBOL(blk_mq_alloc_tag_set); 2359EXPORT_SYMBOL(blk_mq_alloc_tag_set);
2348 2360
diff --git a/block/blk-mq.h b/block/blk-mq.h
index ec774bf4aea2..c92bb7debf85 100644
--- a/block/blk-mq.h
+++ b/block/blk-mq.h
@@ -47,9 +47,7 @@ void blk_mq_disable_hotplug(void);
47/* 47/*
48 * CPU -> queue mappings 48 * CPU -> queue mappings
49 */ 49 */
50extern unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set); 50int blk_mq_map_queues(struct blk_mq_tag_set *set);
51extern int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues,
52 const struct cpumask *online_mask);
53extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int); 51extern int blk_mq_hw_queue_to_node(unsigned int *map, unsigned int);
54 52
55static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q, 53static inline struct blk_mq_hw_ctx *blk_mq_map_queue(struct request_queue *q,