summaryrefslogtreecommitdiffstats
path: root/kernel/irq/matrix.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/matrix.c')
-rw-r--r--kernel/irq/matrix.c34
1 files changed, 30 insertions, 4 deletions
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
index 1f0985adf193..30cc217b8631 100644
--- a/kernel/irq/matrix.c
+++ b/kernel/irq/matrix.c
@@ -14,6 +14,7 @@ struct cpumap {
14 unsigned int available; 14 unsigned int available;
15 unsigned int allocated; 15 unsigned int allocated;
16 unsigned int managed; 16 unsigned int managed;
17 unsigned int managed_allocated;
17 bool initialized; 18 bool initialized;
18 bool online; 19 bool online;
19 unsigned long alloc_map[IRQ_MATRIX_SIZE]; 20 unsigned long alloc_map[IRQ_MATRIX_SIZE];
@@ -145,6 +146,27 @@ static unsigned int matrix_find_best_cpu(struct irq_matrix *m,
145 return best_cpu; 146 return best_cpu;
146} 147}
147 148
149/* Find the best CPU which has the lowest number of managed IRQs allocated */
150static unsigned int matrix_find_best_cpu_managed(struct irq_matrix *m,
151 const struct cpumask *msk)
152{
153 unsigned int cpu, best_cpu, allocated = UINT_MAX;
154 struct cpumap *cm;
155
156 best_cpu = UINT_MAX;
157
158 for_each_cpu(cpu, msk) {
159 cm = per_cpu_ptr(m->maps, cpu);
160
161 if (!cm->online || cm->managed_allocated > allocated)
162 continue;
163
164 best_cpu = cpu;
165 allocated = cm->managed_allocated;
166 }
167 return best_cpu;
168}
169
148/** 170/**
149 * irq_matrix_assign_system - Assign system wide entry in the matrix 171 * irq_matrix_assign_system - Assign system wide entry in the matrix
150 * @m: Matrix pointer 172 * @m: Matrix pointer
@@ -269,7 +291,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
269 if (cpumask_empty(msk)) 291 if (cpumask_empty(msk))
270 return -EINVAL; 292 return -EINVAL;
271 293
272 cpu = matrix_find_best_cpu(m, msk); 294 cpu = matrix_find_best_cpu_managed(m, msk);
273 if (cpu == UINT_MAX) 295 if (cpu == UINT_MAX)
274 return -ENOSPC; 296 return -ENOSPC;
275 297
@@ -282,6 +304,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk,
282 return -ENOSPC; 304 return -ENOSPC;
283 set_bit(bit, cm->alloc_map); 305 set_bit(bit, cm->alloc_map);
284 cm->allocated++; 306 cm->allocated++;
307 cm->managed_allocated++;
285 m->total_allocated++; 308 m->total_allocated++;
286 *mapped_cpu = cpu; 309 *mapped_cpu = cpu;
287 trace_irq_matrix_alloc_managed(bit, cpu, m, cm); 310 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
@@ -395,6 +418,8 @@ void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
395 418
396 clear_bit(bit, cm->alloc_map); 419 clear_bit(bit, cm->alloc_map);
397 cm->allocated--; 420 cm->allocated--;
421 if(managed)
422 cm->managed_allocated--;
398 423
399 if (cm->online) 424 if (cm->online)
400 m->total_allocated--; 425 m->total_allocated--;
@@ -464,13 +489,14 @@ void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
464 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated); 489 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
465 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits, 490 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
466 m->system_map); 491 m->system_map);
467 seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " "); 492 seq_printf(sf, "%*s| CPU | avl | man | mac | act | vectors\n", ind, " ");
468 cpus_read_lock(); 493 cpus_read_lock();
469 for_each_online_cpu(cpu) { 494 for_each_online_cpu(cpu) {
470 struct cpumap *cm = per_cpu_ptr(m->maps, cpu); 495 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
471 496
472 seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ", 497 seq_printf(sf, "%*s %4d %4u %4u %4u %4u %*pbl\n", ind, " ",
473 cpu, cm->available, cm->managed, cm->allocated, 498 cpu, cm->available, cm->managed,
499 cm->managed_allocated, cm->allocated,
474 m->matrix_bits, cm->alloc_map); 500 m->matrix_bits, cm->alloc_map);
475 } 501 }
476 cpus_read_unlock(); 502 cpus_read_unlock();