aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDou Liyang <douly.fnst@cn.fujitsu.com>2018-09-08 13:58:38 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-09-18 12:27:24 -0400
commit76f99ae5b54d48430d1f0c5512a84da0ff9761e0 (patch)
tree1ecbfa8208bdafa2db9d44ec240bc4523496ab91
parent8ffe4e61c06a48324cfd97f1199bb9838acce2f2 (diff)
irq/matrix: Spread managed interrupts on allocation
Linux spreads out the non managed interrupt across the possible target CPUs to avoid vector space exhaustion. Managed interrupts are treated differently, as for them the vectors are reserved (with guarantee) when the interrupt descriptors are initialized. When the interrupt is requested a real vector is assigned. The assignment logic uses the first CPU in the affinity mask for assignment. If the interrupt has more than one CPU in the affinity mask, which happens when a multi queue device has less queues than CPUs, then doing the same search as for non managed interrupts makes sense as it puts the interrupt on the least interrupt plagued CPU. For single CPU affine vectors that's obviously a NOOP. Restructre the matrix allocation code so it does the 'best CPU' search, add the sanity check for an empty affinity mask and adapt the call site in the x86 vector management code. [ tglx: Added the empty mask check to the core and improved change log ] Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180908175838.14450-2-dou_liyang@163.com
-rw-r--r--arch/x86/kernel/apic/vector.c9
-rw-r--r--include/linux/irq.h3
-rw-r--r--kernel/irq/matrix.c17
3 files changed, 20 insertions, 9 deletions
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c
index 7654febd5102..652e7ffa9b9d 100644
--- a/arch/x86/kernel/apic/vector.c
+++ b/arch/x86/kernel/apic/vector.c
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest)
313 struct apic_chip_data *apicd = apic_chip_data(irqd); 313 struct apic_chip_data *apicd = apic_chip_data(irqd);
314 int vector, cpu; 314 int vector, cpu;
315 315
316 cpumask_and(vector_searchmask, vector_searchmask, affmsk); 316 cpumask_and(vector_searchmask, dest, affmsk);
317 cpu = cpumask_first(vector_searchmask); 317
318 if (cpu >= nr_cpu_ids)
319 return -EINVAL;
320 /* set_affinity might call here for nothing */ 318 /* set_affinity might call here for nothing */
321 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) 319 if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask))
322 return 0; 320 return 0;
323 vector = irq_matrix_alloc_managed(vector_matrix, cpu); 321 vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask,
322 &cpu);
324 trace_vector_alloc_managed(irqd->irq, vector, vector); 323 trace_vector_alloc_managed(irqd->irq, vector, vector);
325 if (vector < 0) 324 if (vector < 0)
326 return vector; 325 return vector;
diff --git a/include/linux/irq.h b/include/linux/irq.h
index 201de12a9957..c9bffda04a45 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m);
1151void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); 1151void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace);
1152int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); 1152int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk);
1153void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); 1153void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk);
1154int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); 1154int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
1155 unsigned int *mapped_cpu);
1155void irq_matrix_reserve(struct irq_matrix *m); 1156void irq_matrix_reserve(struct irq_matrix *m);
1156void irq_matrix_remove_reserved(struct irq_matrix *m); 1157void irq_matrix_remove_reserved(struct irq_matrix *m);
1157int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 1158int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 67768bbe736e..6e6d467f3dec 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -260,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
260 * @m: Matrix pointer 260 * @m: Matrix pointer
261 * @cpu: On which CPU the interrupt should be allocated 261 * @cpu: On which CPU the interrupt should be allocated
262 */ 262 */
263int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) 263int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
264 unsigned int *mapped_cpu)
264{ 265{
265 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); 266 unsigned int bit, cpu, end = m->alloc_end;
266 unsigned int bit, end = m->alloc_end; 267 struct cpumap *cm;
268
269 if (cpumask_empty(msk))
270 return -EINVAL;
267 271
272 cpu = matrix_find_best_cpu(m, msk);
273 if (cpu == UINT_MAX)
274 return -ENOSPC;
275
276 cm = per_cpu_ptr(m->maps, cpu);
277 end = m->alloc_end;
268 /* Get managed bit which are not allocated */ 278 /* Get managed bit which are not allocated */
269 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); 279 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
270 bit = find_first_bit(m->scratch_map, end); 280 bit = find_first_bit(m->scratch_map, end);
@@ -273,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
273 set_bit(bit, cm->alloc_map); 283 set_bit(bit, cm->alloc_map);
274 cm->allocated++; 284 cm->allocated++;
275 m->total_allocated++; 285 m->total_allocated++;
286 *mapped_cpu = cpu;
276 trace_irq_matrix_alloc_managed(bit, cpu, m, cm); 287 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
277 return bit; 288 return bit;
278} 289}