summaryrefslogtreecommitdiffstats
path: root/kernel/irq/matrix.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 20:33:11 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2017-11-13 20:33:11 -0500
commit670310dfbae0eefe7318ff6a61e29e67a7a7bbce (patch)
treeeb3ce3aa3e6786a64fec93d410bb6f0b9a56be77 /kernel/irq/matrix.c
parent43ff2f4db9d0f76452b77cfa645f02b471143b24 (diff)
parentffc661c99f621152d5fdcf53f9df0d48c326318b (diff)
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq core updates from Thomas Gleixner: "A rather large update for the interrupt core code and the irq chip drivers: - Add a new bitmap matrix allocator and supporting changes, which is used to replace the x86 vector allocator which comes with separate pull request. This allows to replace the convoluted nested loop allocation function in x86 with a facility which supports the recently added property of managed interrupts proper and allows to switch to a best effort vector reservation scheme, which addresses problems with vector exhaustion. - A large update to the ARM GIC-V3-ITS driver adding support for range selectors. - New interrupt controllers: - Meson and Meson8 GPIO - BCM7271 L2 - Socionext EXIU If you expected that this will stop at some point, I have to disappoint you. There are new ones posted already. Sigh! - STM32 interrupt controller support for new platforms. - A pile of fixes, cleanups and updates to the MIPS GIC driver - The usual small fixes, cleanups and updates all over the place. Most visible one is to move the irq chip drivers Kconfig switches into a separate Kconfig menu" * 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (70 commits) genirq: Fix type of shifting literal 1 in __setup_irq() irqdomain: Drop pointless NULL check in virq_debug_show_one genirq/proc: Return proper error code when irq_set_affinity() fails irq/work: Use llist_for_each_entry_safe irqchip: mips-gic: Print warning if inherited GIC base is used irqchip/mips-gic: Add pr_fmt and reword pr_* messages irqchip/stm32: Move the wakeup on interrupt mask irqchip/stm32: Fix initial values irqchip/stm32: Add stm32h7 support dt-bindings/interrupt-controllers: Add compatible string for stm32h7 irqchip/stm32: Add multi-bank management irqchip/stm32: Select GENERIC_IRQ_CHIP irqchip/exiu: Add support for Socionext Synquacer EXIU controller dt-bindings: Add description of Socionext EXIU interrupt controller irqchip/gic-v3-its: Fix VPE activate callback return value irqchip: mips-gic: Make IPI bitmaps static irqchip: mips-gic: Share register writes in gic_set_type() irqchip: mips-gic: Remove gic_vpes variable irqchip: mips-gic: Use num_possible_cpus() to reserve IPIs irqchip: mips-gic: Configure EIC when CPUs come online ...
Diffstat (limited to 'kernel/irq/matrix.c')
-rw-r--r--kernel/irq/matrix.c443
1 files changed, 443 insertions, 0 deletions
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c
new file mode 100644
index 000000000000..a3cbbc8191c5
--- /dev/null
+++ b/kernel/irq/matrix.c
@@ -0,0 +1,443 @@
1/*
2 * Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de>
3 *
4 * SPDX-License-Identifier: GPL-2.0
5 */
6#include <linux/spinlock.h>
7#include <linux/seq_file.h>
8#include <linux/bitmap.h>
9#include <linux/percpu.h>
10#include <linux/cpu.h>
11#include <linux/irq.h>
12
13#define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long))
14
15struct cpumap {
16 unsigned int available;
17 unsigned int allocated;
18 unsigned int managed;
19 bool online;
20 unsigned long alloc_map[IRQ_MATRIX_SIZE];
21 unsigned long managed_map[IRQ_MATRIX_SIZE];
22};
23
24struct irq_matrix {
25 unsigned int matrix_bits;
26 unsigned int alloc_start;
27 unsigned int alloc_end;
28 unsigned int alloc_size;
29 unsigned int global_available;
30 unsigned int global_reserved;
31 unsigned int systembits_inalloc;
32 unsigned int total_allocated;
33 unsigned int online_maps;
34 struct cpumap __percpu *maps;
35 unsigned long scratch_map[IRQ_MATRIX_SIZE];
36 unsigned long system_map[IRQ_MATRIX_SIZE];
37};
38
39#define CREATE_TRACE_POINTS
40#include <trace/events/irq_matrix.h>
41
42/**
43 * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it
44 * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS
45 * @alloc_start: From which bit the allocation search starts
46 * @alloc_end: At which bit the allocation search ends, i.e first
47 * invalid bit
48 */
49__init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits,
50 unsigned int alloc_start,
51 unsigned int alloc_end)
52{
53 struct irq_matrix *m;
54
55 if (matrix_bits > IRQ_MATRIX_BITS)
56 return NULL;
57
58 m = kzalloc(sizeof(*m), GFP_KERNEL);
59 if (!m)
60 return NULL;
61
62 m->matrix_bits = matrix_bits;
63 m->alloc_start = alloc_start;
64 m->alloc_end = alloc_end;
65 m->alloc_size = alloc_end - alloc_start;
66 m->maps = alloc_percpu(*m->maps);
67 if (!m->maps) {
68 kfree(m);
69 return NULL;
70 }
71 return m;
72}
73
74/**
75 * irq_matrix_online - Bring the local CPU matrix online
76 * @m: Matrix pointer
77 */
78void irq_matrix_online(struct irq_matrix *m)
79{
80 struct cpumap *cm = this_cpu_ptr(m->maps);
81
82 BUG_ON(cm->online);
83
84 bitmap_zero(cm->alloc_map, m->matrix_bits);
85 cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc);
86 cm->allocated = 0;
87 m->global_available += cm->available;
88 cm->online = true;
89 m->online_maps++;
90 trace_irq_matrix_online(m);
91}
92
93/**
94 * irq_matrix_offline - Bring the local CPU matrix offline
95 * @m: Matrix pointer
96 */
97void irq_matrix_offline(struct irq_matrix *m)
98{
99 struct cpumap *cm = this_cpu_ptr(m->maps);
100
101 /* Update the global available size */
102 m->global_available -= cm->available;
103 cm->online = false;
104 m->online_maps--;
105 trace_irq_matrix_offline(m);
106}
107
108static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm,
109 unsigned int num, bool managed)
110{
111 unsigned int area, start = m->alloc_start;
112 unsigned int end = m->alloc_end;
113
114 bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end);
115 bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end);
116 area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0);
117 if (area >= end)
118 return area;
119 if (managed)
120 bitmap_set(cm->managed_map, area, num);
121 else
122 bitmap_set(cm->alloc_map, area, num);
123 return area;
124}
125
126/**
127 * irq_matrix_assign_system - Assign system wide entry in the matrix
128 * @m: Matrix pointer
129 * @bit: Which bit to reserve
130 * @replace: Replace an already allocated vector with a system
131 * vector at the same bit position.
132 *
133 * The BUG_ON()s below are on purpose. If this goes wrong in the
134 * early boot process, then the chance to survive is about zero.
135 * If this happens when the system is life, it's not much better.
136 */
137void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit,
138 bool replace)
139{
140 struct cpumap *cm = this_cpu_ptr(m->maps);
141
142 BUG_ON(bit > m->matrix_bits);
143 BUG_ON(m->online_maps > 1 || (m->online_maps && !replace));
144
145 set_bit(bit, m->system_map);
146 if (replace) {
147 BUG_ON(!test_and_clear_bit(bit, cm->alloc_map));
148 cm->allocated--;
149 m->total_allocated--;
150 }
151 if (bit >= m->alloc_start && bit < m->alloc_end)
152 m->systembits_inalloc++;
153
154 trace_irq_matrix_assign_system(bit, m);
155}
156
157/**
158 * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map
159 * @m: Matrix pointer
160 * @msk: On which CPUs the bits should be reserved.
161 *
162 * Can be called for offline CPUs. Note, this will only reserve one bit
163 * on all CPUs in @msk, but it's not guaranteed that the bits are at the
164 * same offset on all CPUs
165 */
166int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk)
167{
168 unsigned int cpu, failed_cpu;
169
170 for_each_cpu(cpu, msk) {
171 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
172 unsigned int bit;
173
174 bit = matrix_alloc_area(m, cm, 1, true);
175 if (bit >= m->alloc_end)
176 goto cleanup;
177 cm->managed++;
178 if (cm->online) {
179 cm->available--;
180 m->global_available--;
181 }
182 trace_irq_matrix_reserve_managed(bit, cpu, m, cm);
183 }
184 return 0;
185cleanup:
186 failed_cpu = cpu;
187 for_each_cpu(cpu, msk) {
188 if (cpu == failed_cpu)
189 break;
190 irq_matrix_remove_managed(m, cpumask_of(cpu));
191 }
192 return -ENOSPC;
193}
194
195/**
196 * irq_matrix_remove_managed - Remove managed interrupts in a CPU map
197 * @m: Matrix pointer
198 * @msk: On which CPUs the bits should be removed
199 *
200 * Can be called for offline CPUs
201 *
202 * This removes not allocated managed interrupts from the map. It does
203 * not matter which one because the managed interrupts free their
204 * allocation when they shut down. If not, the accounting is screwed,
205 * but all what can be done at this point is warn about it.
206 */
207void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk)
208{
209 unsigned int cpu;
210
211 for_each_cpu(cpu, msk) {
212 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
213 unsigned int bit, end = m->alloc_end;
214
215 if (WARN_ON_ONCE(!cm->managed))
216 continue;
217
218 /* Get managed bit which are not allocated */
219 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
220
221 bit = find_first_bit(m->scratch_map, end);
222 if (WARN_ON_ONCE(bit >= end))
223 continue;
224
225 clear_bit(bit, cm->managed_map);
226
227 cm->managed--;
228 if (cm->online) {
229 cm->available++;
230 m->global_available++;
231 }
232 trace_irq_matrix_remove_managed(bit, cpu, m, cm);
233 }
234}
235
236/**
237 * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map
238 * @m: Matrix pointer
239 * @cpu: On which CPU the interrupt should be allocated
240 */
241int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu)
242{
243 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
244 unsigned int bit, end = m->alloc_end;
245
246 /* Get managed bit which are not allocated */
247 bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end);
248 bit = find_first_bit(m->scratch_map, end);
249 if (bit >= end)
250 return -ENOSPC;
251 set_bit(bit, cm->alloc_map);
252 cm->allocated++;
253 m->total_allocated++;
254 trace_irq_matrix_alloc_managed(bit, cpu, m, cm);
255 return bit;
256}
257
258/**
259 * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map
260 * @m: Matrix pointer
261 * @bit: Which bit to mark
262 *
263 * This should only be used to mark preallocated vectors
264 */
265void irq_matrix_assign(struct irq_matrix *m, unsigned int bit)
266{
267 struct cpumap *cm = this_cpu_ptr(m->maps);
268
269 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
270 return;
271 if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map)))
272 return;
273 cm->allocated++;
274 m->total_allocated++;
275 cm->available--;
276 m->global_available--;
277 trace_irq_matrix_assign(bit, smp_processor_id(), m, cm);
278}
279
280/**
281 * irq_matrix_reserve - Reserve interrupts
282 * @m: Matrix pointer
283 *
284 * This is merily a book keeping call. It increments the number of globally
285 * reserved interrupt bits w/o actually allocating them. This allows to
286 * setup interrupt descriptors w/o assigning low level resources to it.
287 * The actual allocation happens when the interrupt gets activated.
288 */
289void irq_matrix_reserve(struct irq_matrix *m)
290{
291 if (m->global_reserved <= m->global_available &&
292 m->global_reserved + 1 > m->global_available)
293 pr_warn("Interrupt reservation exceeds available resources\n");
294
295 m->global_reserved++;
296 trace_irq_matrix_reserve(m);
297}
298
299/**
300 * irq_matrix_remove_reserved - Remove interrupt reservation
301 * @m: Matrix pointer
302 *
303 * This is merily a book keeping call. It decrements the number of globally
304 * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the
305 * interrupt was never in use and a real vector allocated, which undid the
306 * reservation.
307 */
308void irq_matrix_remove_reserved(struct irq_matrix *m)
309{
310 m->global_reserved--;
311 trace_irq_matrix_remove_reserved(m);
312}
313
314/**
315 * irq_matrix_alloc - Allocate a regular interrupt in a CPU map
316 * @m: Matrix pointer
317 * @msk: Which CPUs to search in
318 * @reserved: Allocate previously reserved interrupts
319 * @mapped_cpu: Pointer to store the CPU for which the irq was allocated
320 */
321int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk,
322 bool reserved, unsigned int *mapped_cpu)
323{
324 unsigned int cpu;
325
326 for_each_cpu(cpu, msk) {
327 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
328 unsigned int bit;
329
330 if (!cm->online)
331 continue;
332
333 bit = matrix_alloc_area(m, cm, 1, false);
334 if (bit < m->alloc_end) {
335 cm->allocated++;
336 cm->available--;
337 m->total_allocated++;
338 m->global_available--;
339 if (reserved)
340 m->global_reserved--;
341 *mapped_cpu = cpu;
342 trace_irq_matrix_alloc(bit, cpu, m, cm);
343 return bit;
344 }
345 }
346 return -ENOSPC;
347}
348
349/**
350 * irq_matrix_free - Free allocated interrupt in the matrix
351 * @m: Matrix pointer
352 * @cpu: Which CPU map needs be updated
353 * @bit: The bit to remove
354 * @managed: If true, the interrupt is managed and not accounted
355 * as available.
356 */
357void irq_matrix_free(struct irq_matrix *m, unsigned int cpu,
358 unsigned int bit, bool managed)
359{
360 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
361
362 if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end))
363 return;
364
365 if (cm->online) {
366 clear_bit(bit, cm->alloc_map);
367 cm->allocated--;
368 m->total_allocated--;
369 if (!managed) {
370 cm->available++;
371 m->global_available++;
372 }
373 }
374 trace_irq_matrix_free(bit, cpu, m, cm);
375}
376
377/**
378 * irq_matrix_available - Get the number of globally available irqs
379 * @m: Pointer to the matrix to query
380 * @cpudown: If true, the local CPU is about to go down, adjust
381 * the number of available irqs accordingly
382 */
383unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown)
384{
385 struct cpumap *cm = this_cpu_ptr(m->maps);
386
387 return m->global_available - cpudown ? cm->available : 0;
388}
389
390/**
391 * irq_matrix_reserved - Get the number of globally reserved irqs
392 * @m: Pointer to the matrix to query
393 */
394unsigned int irq_matrix_reserved(struct irq_matrix *m)
395{
396 return m->global_reserved;
397}
398
399/**
400 * irq_matrix_allocated - Get the number of allocated irqs on the local cpu
401 * @m: Pointer to the matrix to search
402 *
403 * This returns number of allocated irqs
404 */
405unsigned int irq_matrix_allocated(struct irq_matrix *m)
406{
407 struct cpumap *cm = this_cpu_ptr(m->maps);
408
409 return cm->allocated;
410}
411
412#ifdef CONFIG_GENERIC_IRQ_DEBUGFS
413/**
414 * irq_matrix_debug_show - Show detailed allocation information
415 * @sf: Pointer to the seq_file to print to
416 * @m: Pointer to the matrix allocator
417 * @ind: Indentation for the print format
418 *
419 * Note, this is a lockless snapshot.
420 */
421void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind)
422{
423 unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits);
424 int cpu;
425
426 seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps);
427 seq_printf(sf, "Global available: %6u\n", m->global_available);
428 seq_printf(sf, "Global reserved: %6u\n", m->global_reserved);
429 seq_printf(sf, "Total allocated: %6u\n", m->total_allocated);
430 seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits,
431 m->system_map);
432 seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " ");
433 cpus_read_lock();
434 for_each_online_cpu(cpu) {
435 struct cpumap *cm = per_cpu_ptr(m->maps, cpu);
436
437 seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ",
438 cpu, cm->available, cm->managed, cm->allocated,
439 m->matrix_bits, cm->alloc_map);
440 }
441 cpus_read_unlock();
442}
443#endif