diff options
author | Christoph Hellwig <hch@lst.de> | 2016-09-14 10:18:55 -0400 |
---|---|---|
committer | Jens Axboe <axboe@fb.com> | 2016-09-15 10:42:03 -0400 |
commit | da695ba236b993f07a540d35c17f271ef08c89f3 (patch) | |
tree | bcf5c0624f3e3889b8c1284f1965cf90b265b78e /block/blk-mq-cpumap.c | |
parent | 7d7e0f90b70f6c5367c2d1c9a7e87dd228bd0816 (diff) |
blk-mq: allow the driver to pass in a queue mapping
This allows drivers specify their own queue mapping by overriding the
setup-time function that builds the mq_map. This can be used for
example to build the map based on the MSI-X vector mapping provided
by the core interrupt layer for PCI devices.
Signed-off-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Keith Busch <keith.busch@intel.com>
Signed-off-by: Jens Axboe <axboe@fb.com>
Diffstat (limited to 'block/blk-mq-cpumap.c')
-rw-r--r-- | block/blk-mq-cpumap.c | 25 |
1 files changed, 5 insertions, 20 deletions
diff --git a/block/blk-mq-cpumap.c b/block/blk-mq-cpumap.c index d0634bcf322f..19b1d9c5f07e 100644 --- a/block/blk-mq-cpumap.c +++ b/block/blk-mq-cpumap.c | |||
@@ -31,14 +31,16 @@ static int get_first_sibling(unsigned int cpu) | |||
31 | return cpu; | 31 | return cpu; |
32 | } | 32 | } |
33 | 33 | ||
34 | int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, | 34 | int blk_mq_map_queues(struct blk_mq_tag_set *set) |
35 | const struct cpumask *online_mask) | ||
36 | { | 35 | { |
36 | unsigned int *map = set->mq_map; | ||
37 | unsigned int nr_queues = set->nr_hw_queues; | ||
38 | const struct cpumask *online_mask = cpu_online_mask; | ||
37 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; | 39 | unsigned int i, nr_cpus, nr_uniq_cpus, queue, first_sibling; |
38 | cpumask_var_t cpus; | 40 | cpumask_var_t cpus; |
39 | 41 | ||
40 | if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) | 42 | if (!alloc_cpumask_var(&cpus, GFP_ATOMIC)) |
41 | return 1; | 43 | return -ENOMEM; |
42 | 44 | ||
43 | cpumask_clear(cpus); | 45 | cpumask_clear(cpus); |
44 | nr_cpus = nr_uniq_cpus = 0; | 46 | nr_cpus = nr_uniq_cpus = 0; |
@@ -86,23 +88,6 @@ int blk_mq_update_queue_map(unsigned int *map, unsigned int nr_queues, | |||
86 | return 0; | 88 | return 0; |
87 | } | 89 | } |
88 | 90 | ||
89 | unsigned int *blk_mq_make_queue_map(struct blk_mq_tag_set *set) | ||
90 | { | ||
91 | unsigned int *map; | ||
92 | |||
93 | /* If cpus are offline, map them to first hctx */ | ||
94 | map = kzalloc_node(sizeof(*map) * nr_cpu_ids, GFP_KERNEL, | ||
95 | set->numa_node); | ||
96 | if (!map) | ||
97 | return NULL; | ||
98 | |||
99 | if (!blk_mq_update_queue_map(map, set->nr_hw_queues, cpu_online_mask)) | ||
100 | return map; | ||
101 | |||
102 | kfree(map); | ||
103 | return NULL; | ||
104 | } | ||
105 | |||
106 | /* | 91 | /* |
107 | * We have no quick way of doing reverse lookups. This is only used at | 92 | * We have no quick way of doing reverse lookups. This is only used at |
108 | * queue init time, so runtime isn't important. | 93 | * queue init time, so runtime isn't important. |