diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 10:15:20 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-10-23 10:15:20 -0400 |
commit | cbbfb0ae2ca979222297062647ced653682a6cc7 (patch) | |
tree | 15744f08287f776eb53c3fd1c97817f0e320875a | |
parent | 42f52e1c59bdb78cad945b2dd34fa1f892239a39 (diff) | |
parent | 76f99ae5b54d48430d1f0c5512a84da0ff9761e0 (diff) |
Merge branch 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 apic updates from Ingo Molnar:
"Improve the spreading of managed IRQs at allocation time"
* 'x86-apic-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
irq/matrix: Spread managed interrupts on allocation
irq/matrix: Split out the CPU selection code into a helper
-rw-r--r-- | arch/x86/kernel/apic/vector.c | 9 | ||||
-rw-r--r-- | include/linux/irq.h | 3 | ||||
-rw-r--r-- | kernel/irq/matrix.c | 82 |
3 files changed, 58 insertions, 36 deletions
diff --git a/arch/x86/kernel/apic/vector.c b/arch/x86/kernel/apic/vector.c index 7654febd5102..652e7ffa9b9d 100644 --- a/arch/x86/kernel/apic/vector.c +++ b/arch/x86/kernel/apic/vector.c | |||
@@ -313,14 +313,13 @@ assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) | |||
313 | struct apic_chip_data *apicd = apic_chip_data(irqd); | 313 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
314 | int vector, cpu; | 314 | int vector, cpu; |
315 | 315 | ||
316 | cpumask_and(vector_searchmask, vector_searchmask, affmsk); | 316 | cpumask_and(vector_searchmask, dest, affmsk); |
317 | cpu = cpumask_first(vector_searchmask); | 317 | |
318 | if (cpu >= nr_cpu_ids) | ||
319 | return -EINVAL; | ||
320 | /* set_affinity might call here for nothing */ | 318 | /* set_affinity might call here for nothing */ |
321 | if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) | 319 | if (apicd->vector && cpumask_test_cpu(apicd->cpu, vector_searchmask)) |
322 | return 0; | 320 | return 0; |
323 | vector = irq_matrix_alloc_managed(vector_matrix, cpu); | 321 | vector = irq_matrix_alloc_managed(vector_matrix, vector_searchmask, |
322 | &cpu); | ||
324 | trace_vector_alloc_managed(irqd->irq, vector, vector); | 323 | trace_vector_alloc_managed(irqd->irq, vector, vector); |
325 | if (vector < 0) | 324 | if (vector < 0) |
326 | return vector; | 325 | return vector; |
diff --git a/include/linux/irq.h b/include/linux/irq.h index 201de12a9957..c9bffda04a45 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -1151,7 +1151,8 @@ void irq_matrix_offline(struct irq_matrix *m); | |||
1151 | void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); | 1151 | void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); |
1152 | int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); | 1152 | int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); |
1153 | void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); | 1153 | void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); |
1154 | int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu); | 1154 | int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, |
1155 | unsigned int *mapped_cpu); | ||
1155 | void irq_matrix_reserve(struct irq_matrix *m); | 1156 | void irq_matrix_reserve(struct irq_matrix *m); |
1156 | void irq_matrix_remove_reserved(struct irq_matrix *m); | 1157 | void irq_matrix_remove_reserved(struct irq_matrix *m); |
1157 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, | 1158 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, |
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c index 5092494bf261..6e6d467f3dec 100644 --- a/kernel/irq/matrix.c +++ b/kernel/irq/matrix.c | |||
@@ -124,6 +124,27 @@ static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, | |||
124 | return area; | 124 | return area; |
125 | } | 125 | } |
126 | 126 | ||
127 | /* Find the best CPU which has the lowest vector allocation count */ | ||
128 | static unsigned int matrix_find_best_cpu(struct irq_matrix *m, | ||
129 | const struct cpumask *msk) | ||
130 | { | ||
131 | unsigned int cpu, best_cpu, maxavl = 0; | ||
132 | struct cpumap *cm; | ||
133 | |||
134 | best_cpu = UINT_MAX; | ||
135 | |||
136 | for_each_cpu(cpu, msk) { | ||
137 | cm = per_cpu_ptr(m->maps, cpu); | ||
138 | |||
139 | if (!cm->online || cm->available <= maxavl) | ||
140 | continue; | ||
141 | |||
142 | best_cpu = cpu; | ||
143 | maxavl = cm->available; | ||
144 | } | ||
145 | return best_cpu; | ||
146 | } | ||
147 | |||
127 | /** | 148 | /** |
128 | * irq_matrix_assign_system - Assign system wide entry in the matrix | 149 | * irq_matrix_assign_system - Assign system wide entry in the matrix |
129 | * @m: Matrix pointer | 150 | * @m: Matrix pointer |
@@ -239,11 +260,21 @@ void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) | |||
239 | * @m: Matrix pointer | 260 | * @m: Matrix pointer |
240 | * @cpu: On which CPU the interrupt should be allocated | 261 | * @cpu: On which CPU the interrupt should be allocated |
241 | */ | 262 | */ |
242 | int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) | 263 | int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, |
264 | unsigned int *mapped_cpu) | ||
243 | { | 265 | { |
244 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | 266 | unsigned int bit, cpu, end = m->alloc_end; |
245 | unsigned int bit, end = m->alloc_end; | 267 | struct cpumap *cm; |
268 | |||
269 | if (cpumask_empty(msk)) | ||
270 | return -EINVAL; | ||
271 | |||
272 | cpu = matrix_find_best_cpu(m, msk); | ||
273 | if (cpu == UINT_MAX) | ||
274 | return -ENOSPC; | ||
246 | 275 | ||
276 | cm = per_cpu_ptr(m->maps, cpu); | ||
277 | end = m->alloc_end; | ||
247 | /* Get managed bit which are not allocated */ | 278 | /* Get managed bit which are not allocated */ |
248 | bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); | 279 | bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); |
249 | bit = find_first_bit(m->scratch_map, end); | 280 | bit = find_first_bit(m->scratch_map, end); |
@@ -252,6 +283,7 @@ int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) | |||
252 | set_bit(bit, cm->alloc_map); | 283 | set_bit(bit, cm->alloc_map); |
253 | cm->allocated++; | 284 | cm->allocated++; |
254 | m->total_allocated++; | 285 | m->total_allocated++; |
286 | *mapped_cpu = cpu; | ||
255 | trace_irq_matrix_alloc_managed(bit, cpu, m, cm); | 287 | trace_irq_matrix_alloc_managed(bit, cpu, m, cm); |
256 | return bit; | 288 | return bit; |
257 | } | 289 | } |
@@ -322,37 +354,27 @@ void irq_matrix_remove_reserved(struct irq_matrix *m) | |||
322 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, | 354 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, |
323 | bool reserved, unsigned int *mapped_cpu) | 355 | bool reserved, unsigned int *mapped_cpu) |
324 | { | 356 | { |
325 | unsigned int cpu, best_cpu, maxavl = 0; | 357 | unsigned int cpu, bit; |
326 | struct cpumap *cm; | 358 | struct cpumap *cm; |
327 | unsigned int bit; | ||
328 | |||
329 | best_cpu = UINT_MAX; | ||
330 | for_each_cpu(cpu, msk) { | ||
331 | cm = per_cpu_ptr(m->maps, cpu); | ||
332 | 359 | ||
333 | if (!cm->online || cm->available <= maxavl) | 360 | cpu = matrix_find_best_cpu(m, msk); |
334 | continue; | 361 | if (cpu == UINT_MAX) |
362 | return -ENOSPC; | ||
335 | 363 | ||
336 | best_cpu = cpu; | 364 | cm = per_cpu_ptr(m->maps, cpu); |
337 | maxavl = cm->available; | 365 | bit = matrix_alloc_area(m, cm, 1, false); |
338 | } | 366 | if (bit >= m->alloc_end) |
367 | return -ENOSPC; | ||
368 | cm->allocated++; | ||
369 | cm->available--; | ||
370 | m->total_allocated++; | ||
371 | m->global_available--; | ||
372 | if (reserved) | ||
373 | m->global_reserved--; | ||
374 | *mapped_cpu = cpu; | ||
375 | trace_irq_matrix_alloc(bit, cpu, m, cm); | ||
376 | return bit; | ||
339 | 377 | ||
340 | if (maxavl) { | ||
341 | cm = per_cpu_ptr(m->maps, best_cpu); | ||
342 | bit = matrix_alloc_area(m, cm, 1, false); | ||
343 | if (bit < m->alloc_end) { | ||
344 | cm->allocated++; | ||
345 | cm->available--; | ||
346 | m->total_allocated++; | ||
347 | m->global_available--; | ||
348 | if (reserved) | ||
349 | m->global_reserved--; | ||
350 | *mapped_cpu = best_cpu; | ||
351 | trace_irq_matrix_alloc(bit, best_cpu, m, cm); | ||
352 | return bit; | ||
353 | } | ||
354 | } | ||
355 | return -ENOSPC; | ||
356 | } | 378 | } |
357 | 379 | ||
358 | /** | 380 | /** |