aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDou Liyang <douly.fnst@cn.fujitsu.com>2018-09-08 13:58:37 -0400
committerThomas Gleixner <tglx@linutronix.de>2018-09-18 12:27:24 -0400
commit8ffe4e61c06a48324cfd97f1199bb9838acce2f2 (patch)
tree81572b0099e375518189f08c092b46f6f9da2d5f
parent7876320f88802b22d4e2daf7eb027dd14175a0f8 (diff)
irq/matrix: Split out the CPU selection code into a helper
Linux finds the CPU which has the lowest vector allocation count to spread out the non managed interrupts across the possible target CPUs, but does not do so for managed interrupts. Split out the CPU selection code into a helper function for reuse. No functional change. Signed-off-by: Dou Liyang <douly.fnst@cn.fujitsu.com> Signed-off-by: Thomas Gleixner <tglx@linutronix.de> Cc: hpa@zytor.com Link: https://lkml.kernel.org/r/20180908175838.14450-1-dou_liyang@163.com
-rw-r--r--kernel/irq/matrix.c65
1 files changed, 38 insertions, 27 deletions
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 5092494bf261..67768bbe736e 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
124 return area; 124 return area;
125} 125}
126 126
127/* Find the best CPU which has the lowest vector allocation count */
128static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
129 const struct cpumask *msk)
130{
131 unsigned int cpu, best_cpu, maxavl = 0;
132 struct cpumap *cm;
133
134 best_cpu = UINT_MAX;
135
136 for_each_cpu(cpu, msk) {
137 cm = per_cpu_ptr(m->maps, cpu);
138
139 if (!cm->online || cm->available <= maxavl)
140 continue;
141
142 best_cpu = cpu;
143 maxavl = cm->available;
144 }
145 return best_cpu;
146}
147
127/** 148/**
128 * irq_matrix_assign_system - Assign system wide entry in the matrix 149 * irq_matrix_assign_system - Assign system wide entry in the matrix
129 * @m: Matrix pointer 150 * @m: Matrix pointer
@@ -322,37 +343,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m)
322int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, 343int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
323 bool reserved, unsigned int *mapped_cpu) 344 bool reserved, unsigned int *mapped_cpu)
324{ 345{
325 unsigned int cpu, best_cpu, maxavl = 0; 346 unsigned int cpu, bit;
326 struct cpumap *cm; 347 struct cpumap *cm;
327 unsigned int bit;
328 348
329 best_cpu = UINT_MAX; 349 cpu = matrix_find_best_cpu(m, msk);
330 for_each_cpu(cpu, msk) { 350 if (cpu == UINT_MAX)
331 cm = per_cpu_ptr(m->maps, cpu); 351 return -ENOSPC;
332
333 if (!cm->online || cm->available <= maxavl)
334 continue;
335 352
336 best_cpu = cpu; 353 cm = per_cpu_ptr(m->maps, cpu);
337 maxavl = cm->available; 354 bit = matrix_alloc_area(m, cm, 1, false);
338 } 355 if (bit >= m->alloc_end)
356 return -ENOSPC;
357 cm->allocated++;
358 cm->available--;
359 m->total_allocated++;
360 m->global_available--;
361 if (reserved)
362 m->global_reserved--;
363 *mapped_cpu = cpu;
364 trace_irq_matrix_alloc(bit, cpu, m, cm);
365 return bit;
339 366
340 if (maxavl) {
341 cm = per_cpu_ptr(m->maps, best_cpu);
342 bit = matrix_alloc_area(m, cm, 1, false);
343 if (bit < m->alloc_end) {
344 cm->allocated++;
345 cm->available--;
346 m->total_allocated++;
347 m->global_available--;
348 if (reserved)
349 m->global_reserved--;
350 *mapped_cpu = best_cpu;
351 trace_irq_matrix_alloc(bit, best_cpu, m, cm);
352 return bit;
353 }
354 }
355 return -ENOSPC;
356} 367}
357 368
358/** 369/**