summaryrefslogtreecommitdiffstats
path: root/kernel/irq/affinity.c
diff options
context:
space:
mode:
authorChristoph Hellwig <hch@lst.de>2018-01-11 21:53:05 -0500
committerJens Axboe <axboe@kernel.dk>2018-01-12 13:01:38 -0500
commit84676c1f21e8ff54befe985f4f14dc1edc10046b (patch)
tree40fe75ec8bab0c1d48604c1d2483b963a925729d /kernel/irq/affinity.c
parentc27d53fb445f2d93a1918c3dd7344770b0cd865b (diff)
genirq/affinity: assign vectors to all possible CPUs
Currently we assign managed interrupt vectors to all present CPUs. This works fine for systems were we only online/offline CPUs. But in case of systems that support physical CPU hotplug (or the virtualized version of it) this means the additional CPUs covered for in the ACPI tables or on the command line are not catered for. To fix this we'd either need to introduce new hotplug CPU states just for this case, or we can start assining vectors to possible but not present CPUs. Reported-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Christian Borntraeger <borntraeger@de.ibm.com> Tested-by: Stefan Haberland <sth@linux.vnet.ibm.com> Fixes: 4b855ad37194 ("blk-mq: Create hctx for each present CPU") Cc: linux-kernel@vger.kernel.org Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Christoph Hellwig <hch@lst.de> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'kernel/irq/affinity.c')
-rw-r--r--kernel/irq/affinity.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/kernel/irq/affinity.c b/kernel/irq/affinity.c
index e12d35108225..a37a3b4b6342 100644
--- a/kernel/irq/affinity.c
+++ b/kernel/irq/affinity.c
@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
39 } 39 }
40} 40}
41 41
42static cpumask_var_t *alloc_node_to_present_cpumask(void) 42static cpumask_var_t *alloc_node_to_possible_cpumask(void)
43{ 43{
44 cpumask_var_t *masks; 44 cpumask_var_t *masks;
45 int node; 45 int node;
@@ -62,7 +62,7 @@ out_unwind:
62 return NULL; 62 return NULL;
63} 63}
64 64
65static void free_node_to_present_cpumask(cpumask_var_t *masks) 65static void free_node_to_possible_cpumask(cpumask_var_t *masks)
66{ 66{
67 int node; 67 int node;
68 68
@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
71 kfree(masks); 71 kfree(masks);
72} 72}
73 73
74static void build_node_to_present_cpumask(cpumask_var_t *masks) 74static void build_node_to_possible_cpumask(cpumask_var_t *masks)
75{ 75{
76 int cpu; 76 int cpu;
77 77
78 for_each_present_cpu(cpu) 78 for_each_possible_cpu(cpu)
79 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); 79 cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
80} 80}
81 81
82static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask, 82static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
83 const struct cpumask *mask, nodemask_t *nodemsk) 83 const struct cpumask *mask, nodemask_t *nodemsk)
84{ 84{
85 int n, nodes = 0; 85 int n, nodes = 0;
86 86
87 /* Calculate the number of nodes in the supplied affinity mask */ 87 /* Calculate the number of nodes in the supplied affinity mask */
88 for_each_node(n) { 88 for_each_node(n) {
89 if (cpumask_intersects(mask, node_to_present_cpumask[n])) { 89 if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
90 node_set(n, *nodemsk); 90 node_set(n, *nodemsk);
91 nodes++; 91 nodes++;
92 } 92 }
@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
109 int last_affv = affv + affd->pre_vectors; 109 int last_affv = affv + affd->pre_vectors;
110 nodemask_t nodemsk = NODE_MASK_NONE; 110 nodemask_t nodemsk = NODE_MASK_NONE;
111 struct cpumask *masks; 111 struct cpumask *masks;
112 cpumask_var_t nmsk, *node_to_present_cpumask; 112 cpumask_var_t nmsk, *node_to_possible_cpumask;
113 113
114 /* 114 /*
115 * If there aren't any vectors left after applying the pre/post 115 * If there aren't any vectors left after applying the pre/post
@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
125 if (!masks) 125 if (!masks)
126 goto out; 126 goto out;
127 127
128 node_to_present_cpumask = alloc_node_to_present_cpumask(); 128 node_to_possible_cpumask = alloc_node_to_possible_cpumask();
129 if (!node_to_present_cpumask) 129 if (!node_to_possible_cpumask)
130 goto out; 130 goto out;
131 131
132 /* Fill out vectors at the beginning that don't need affinity */ 132 /* Fill out vectors at the beginning that don't need affinity */
@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
135 135
136 /* Stabilize the cpumasks */ 136 /* Stabilize the cpumasks */
137 get_online_cpus(); 137 get_online_cpus();
138 build_node_to_present_cpumask(node_to_present_cpumask); 138 build_node_to_possible_cpumask(node_to_possible_cpumask);
139 nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask, 139 nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
140 &nodemsk); 140 &nodemsk);
141 141
142 /* 142 /*
@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
146 if (affv <= nodes) { 146 if (affv <= nodes) {
147 for_each_node_mask(n, nodemsk) { 147 for_each_node_mask(n, nodemsk) {
148 cpumask_copy(masks + curvec, 148 cpumask_copy(masks + curvec,
149 node_to_present_cpumask[n]); 149 node_to_possible_cpumask[n]);
150 if (++curvec == last_affv) 150 if (++curvec == last_affv)
151 break; 151 break;
152 } 152 }
@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
160 vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; 160 vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
161 161
162 /* Get the cpus on this node which are in the mask */ 162 /* Get the cpus on this node which are in the mask */
163 cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]); 163 cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
164 164
165 /* Calculate the number of cpus per vector */ 165 /* Calculate the number of cpus per vector */
166 ncpus = cpumask_weight(nmsk); 166 ncpus = cpumask_weight(nmsk);
@@ -192,7 +192,7 @@ done:
192 /* Fill out vectors at the end that don't need affinity */ 192 /* Fill out vectors at the end that don't need affinity */
193 for (; curvec < nvecs; curvec++) 193 for (; curvec < nvecs; curvec++)
194 cpumask_copy(masks + curvec, irq_default_affinity); 194 cpumask_copy(masks + curvec, irq_default_affinity);
195 free_node_to_present_cpumask(node_to_present_cpumask); 195 free_node_to_possible_cpumask(node_to_possible_cpumask);
196out: 196out:
197 free_cpumask_var(nmsk); 197 free_cpumask_var(nmsk);
198 return masks; 198 return masks;
@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
214 return 0; 214 return 0;
215 215
216 get_online_cpus(); 216 get_online_cpus();
217 ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv; 217 ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
218 put_online_cpus(); 218 put_online_cpus();
219 return ret; 219 return ret;
220} 220}