diff options
Diffstat (limited to 'kernel/irq/matrix.c')
-rw-r--r-- | kernel/irq/matrix.c | 428 |
1 files changed, 428 insertions, 0 deletions
diff --git a/kernel/irq/matrix.c b/kernel/irq/matrix.c new file mode 100644 index 000000000000..7b2b4fbde1e2 --- /dev/null +++ b/kernel/irq/matrix.c | |||
@@ -0,0 +1,428 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2017 Thomas Gleixner <tglx@linutronix.de> | ||
3 | * | ||
4 | * SPDX-License-Identifier: GPL-2.0 | ||
5 | */ | ||
6 | #include <linux/spinlock.h> | ||
7 | #include <linux/seq_file.h> | ||
8 | #include <linux/bitmap.h> | ||
9 | #include <linux/percpu.h> | ||
10 | #include <linux/cpu.h> | ||
11 | #include <linux/irq.h> | ||
12 | |||
13 | #define IRQ_MATRIX_SIZE (BITS_TO_LONGS(IRQ_MATRIX_BITS) * sizeof(unsigned long)) | ||
14 | |||
15 | struct cpumap { | ||
16 | unsigned int available; | ||
17 | unsigned int allocated; | ||
18 | unsigned int managed; | ||
19 | bool online; | ||
20 | unsigned long alloc_map[IRQ_MATRIX_SIZE]; | ||
21 | unsigned long managed_map[IRQ_MATRIX_SIZE]; | ||
22 | }; | ||
23 | |||
24 | struct irq_matrix { | ||
25 | unsigned int matrix_bits; | ||
26 | unsigned int alloc_start; | ||
27 | unsigned int alloc_end; | ||
28 | unsigned int alloc_size; | ||
29 | unsigned int global_available; | ||
30 | unsigned int global_reserved; | ||
31 | unsigned int systembits_inalloc; | ||
32 | unsigned int total_allocated; | ||
33 | unsigned int online_maps; | ||
34 | struct cpumap __percpu *maps; | ||
35 | unsigned long scratch_map[IRQ_MATRIX_SIZE]; | ||
36 | unsigned long system_map[IRQ_MATRIX_SIZE]; | ||
37 | }; | ||
38 | |||
39 | /** | ||
40 | * irq_alloc_matrix - Allocate a irq_matrix structure and initialize it | ||
41 | * @matrix_bits: Number of matrix bits must be <= IRQ_MATRIX_BITS | ||
42 | * @alloc_start: From which bit the allocation search starts | ||
43 | * @alloc_end: At which bit the allocation search ends, i.e first | ||
44 | * invalid bit | ||
45 | */ | ||
46 | __init struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, | ||
47 | unsigned int alloc_start, | ||
48 | unsigned int alloc_end) | ||
49 | { | ||
50 | struct irq_matrix *m; | ||
51 | |||
52 | if (matrix_bits > IRQ_MATRIX_BITS) | ||
53 | return NULL; | ||
54 | |||
55 | m = kzalloc(sizeof(*m), GFP_KERNEL); | ||
56 | if (!m) | ||
57 | return NULL; | ||
58 | |||
59 | m->matrix_bits = matrix_bits; | ||
60 | m->alloc_start = alloc_start; | ||
61 | m->alloc_end = alloc_end; | ||
62 | m->alloc_size = alloc_end - alloc_start; | ||
63 | m->maps = alloc_percpu(*m->maps); | ||
64 | if (!m->maps) { | ||
65 | kfree(m); | ||
66 | return NULL; | ||
67 | } | ||
68 | return m; | ||
69 | } | ||
70 | |||
71 | /** | ||
72 | * irq_matrix_online - Bring the local CPU matrix online | ||
73 | * @m: Matrix pointer | ||
74 | */ | ||
75 | void irq_matrix_online(struct irq_matrix *m) | ||
76 | { | ||
77 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
78 | |||
79 | BUG_ON(cm->online); | ||
80 | |||
81 | bitmap_zero(cm->alloc_map, m->matrix_bits); | ||
82 | cm->available = m->alloc_size - (cm->managed + m->systembits_inalloc); | ||
83 | cm->allocated = 0; | ||
84 | m->global_available += cm->available; | ||
85 | cm->online = true; | ||
86 | m->online_maps++; | ||
87 | } | ||
88 | |||
89 | /** | ||
90 | * irq_matrix_offline - Bring the local CPU matrix offline | ||
91 | * @m: Matrix pointer | ||
92 | */ | ||
93 | void irq_matrix_offline(struct irq_matrix *m) | ||
94 | { | ||
95 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
96 | |||
97 | /* Update the global available size */ | ||
98 | m->global_available -= cm->available; | ||
99 | cm->online = false; | ||
100 | m->online_maps--; | ||
101 | } | ||
102 | |||
103 | static unsigned int matrix_alloc_area(struct irq_matrix *m, struct cpumap *cm, | ||
104 | unsigned int num, bool managed) | ||
105 | { | ||
106 | unsigned int area, start = m->alloc_start; | ||
107 | unsigned int end = m->alloc_end; | ||
108 | |||
109 | bitmap_or(m->scratch_map, cm->managed_map, m->system_map, end); | ||
110 | bitmap_or(m->scratch_map, m->scratch_map, cm->alloc_map, end); | ||
111 | area = bitmap_find_next_zero_area(m->scratch_map, end, start, num, 0); | ||
112 | if (area >= end) | ||
113 | return area; | ||
114 | if (managed) | ||
115 | bitmap_set(cm->managed_map, area, num); | ||
116 | else | ||
117 | bitmap_set(cm->alloc_map, area, num); | ||
118 | return area; | ||
119 | } | ||
120 | |||
121 | /** | ||
122 | * irq_matrix_assign_system - Assign system wide entry in the matrix | ||
123 | * @m: Matrix pointer | ||
124 | * @bit: Which bit to reserve | ||
125 | * @replace: Replace an already allocated vector with a system | ||
126 | * vector at the same bit position. | ||
127 | * | ||
128 | * The BUG_ON()s below are on purpose. If this goes wrong in the | ||
129 | * early boot process, then the chance to survive is about zero. | ||
130 | * If this happens when the system is life, it's not much better. | ||
131 | */ | ||
132 | void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, | ||
133 | bool replace) | ||
134 | { | ||
135 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
136 | |||
137 | BUG_ON(bit > m->matrix_bits); | ||
138 | BUG_ON(m->online_maps > 1 || (m->online_maps && !replace)); | ||
139 | |||
140 | set_bit(bit, m->system_map); | ||
141 | if (replace) { | ||
142 | BUG_ON(!test_and_clear_bit(bit, cm->alloc_map)); | ||
143 | cm->allocated--; | ||
144 | m->total_allocated--; | ||
145 | } | ||
146 | if (bit >= m->alloc_start && bit < m->alloc_end) | ||
147 | m->systembits_inalloc++; | ||
148 | } | ||
149 | |||
150 | /** | ||
151 | * irq_matrix_reserve_managed - Reserve a managed interrupt in a CPU map | ||
152 | * @m: Matrix pointer | ||
153 | * @msk: On which CPUs the bits should be reserved. | ||
154 | * | ||
155 | * Can be called for offline CPUs. Note, this will only reserve one bit | ||
156 | * on all CPUs in @msk, but it's not guaranteed that the bits are at the | ||
157 | * same offset on all CPUs | ||
158 | */ | ||
159 | int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk) | ||
160 | { | ||
161 | unsigned int cpu, failed_cpu; | ||
162 | |||
163 | for_each_cpu(cpu, msk) { | ||
164 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
165 | unsigned int bit; | ||
166 | |||
167 | bit = matrix_alloc_area(m, cm, 1, true); | ||
168 | if (bit >= m->alloc_end) | ||
169 | goto cleanup; | ||
170 | cm->managed++; | ||
171 | if (cm->online) { | ||
172 | cm->available--; | ||
173 | m->global_available--; | ||
174 | } | ||
175 | } | ||
176 | return 0; | ||
177 | cleanup: | ||
178 | failed_cpu = cpu; | ||
179 | for_each_cpu(cpu, msk) { | ||
180 | if (cpu == failed_cpu) | ||
181 | break; | ||
182 | irq_matrix_remove_managed(m, cpumask_of(cpu)); | ||
183 | } | ||
184 | return -ENOSPC; | ||
185 | } | ||
186 | |||
187 | /** | ||
188 | * irq_matrix_remove_managed - Remove managed interrupts in a CPU map | ||
189 | * @m: Matrix pointer | ||
190 | * @msk: On which CPUs the bits should be removed | ||
191 | * | ||
192 | * Can be called for offline CPUs | ||
193 | * | ||
194 | * This removes not allocated managed interrupts from the map. It does | ||
195 | * not matter which one because the managed interrupts free their | ||
196 | * allocation when they shut down. If not, the accounting is screwed, | ||
197 | * but all what can be done at this point is warn about it. | ||
198 | */ | ||
199 | void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk) | ||
200 | { | ||
201 | unsigned int cpu; | ||
202 | |||
203 | for_each_cpu(cpu, msk) { | ||
204 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
205 | unsigned int bit, end = m->alloc_end; | ||
206 | |||
207 | if (WARN_ON_ONCE(!cm->managed)) | ||
208 | continue; | ||
209 | |||
210 | /* Get managed bit which are not allocated */ | ||
211 | bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); | ||
212 | |||
213 | bit = find_first_bit(m->scratch_map, end); | ||
214 | if (WARN_ON_ONCE(bit >= end)) | ||
215 | continue; | ||
216 | |||
217 | clear_bit(bit, cm->managed_map); | ||
218 | |||
219 | cm->managed--; | ||
220 | if (cm->online) { | ||
221 | cm->available++; | ||
222 | m->global_available++; | ||
223 | } | ||
224 | } | ||
225 | } | ||
226 | |||
227 | /** | ||
228 | * irq_matrix_alloc_managed - Allocate a managed interrupt in a CPU map | ||
229 | * @m: Matrix pointer | ||
230 | * @cpu: On which CPU the interrupt should be allocated | ||
231 | */ | ||
232 | int irq_matrix_alloc_managed(struct irq_matrix *m, unsigned int cpu) | ||
233 | { | ||
234 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
235 | unsigned int bit, end = m->alloc_end; | ||
236 | |||
237 | /* Get managed bit which are not allocated */ | ||
238 | bitmap_andnot(m->scratch_map, cm->managed_map, cm->alloc_map, end); | ||
239 | bit = find_first_bit(m->scratch_map, end); | ||
240 | if (bit >= end) | ||
241 | return -ENOSPC; | ||
242 | set_bit(bit, cm->alloc_map); | ||
243 | cm->allocated++; | ||
244 | m->total_allocated++; | ||
245 | return bit; | ||
246 | } | ||
247 | |||
248 | /** | ||
249 | * irq_matrix_assign - Assign a preallocated interrupt in the local CPU map | ||
250 | * @m: Matrix pointer | ||
251 | * @bit: Which bit to mark | ||
252 | * | ||
253 | * This should only be used to mark preallocated vectors | ||
254 | */ | ||
255 | void irq_matrix_assign(struct irq_matrix *m, unsigned int bit) | ||
256 | { | ||
257 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
258 | |||
259 | if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) | ||
260 | return; | ||
261 | if (WARN_ON_ONCE(test_and_set_bit(bit, cm->alloc_map))) | ||
262 | return; | ||
263 | cm->allocated++; | ||
264 | m->total_allocated++; | ||
265 | cm->available--; | ||
266 | m->global_available--; | ||
267 | } | ||
268 | |||
269 | /** | ||
270 | * irq_matrix_reserve - Reserve interrupts | ||
271 | * @m: Matrix pointer | ||
272 | * | ||
273 | * This is merily a book keeping call. It increments the number of globally | ||
274 | * reserved interrupt bits w/o actually allocating them. This allows to | ||
275 | * setup interrupt descriptors w/o assigning low level resources to it. | ||
276 | * The actual allocation happens when the interrupt gets activated. | ||
277 | */ | ||
278 | void irq_matrix_reserve(struct irq_matrix *m) | ||
279 | { | ||
280 | if (m->global_reserved <= m->global_available && | ||
281 | m->global_reserved + 1 > m->global_available) | ||
282 | pr_warn("Interrupt reservation exceeds available resources\n"); | ||
283 | |||
284 | m->global_reserved++; | ||
285 | } | ||
286 | |||
287 | /** | ||
288 | * irq_matrix_remove_reserved - Remove interrupt reservation | ||
289 | * @m: Matrix pointer | ||
290 | * | ||
291 | * This is merily a book keeping call. It decrements the number of globally | ||
292 | * reserved interrupt bits. This is used to undo irq_matrix_reserve() when the | ||
293 | * interrupt was never in use and a real vector allocated, which undid the | ||
294 | * reservation. | ||
295 | */ | ||
296 | void irq_matrix_remove_reserved(struct irq_matrix *m) | ||
297 | { | ||
298 | m->global_reserved--; | ||
299 | } | ||
300 | |||
301 | /** | ||
302 | * irq_matrix_alloc - Allocate a regular interrupt in a CPU map | ||
303 | * @m: Matrix pointer | ||
304 | * @msk: Which CPUs to search in | ||
305 | * @reserved: Allocate previously reserved interrupts | ||
306 | * @mapped_cpu: Pointer to store the CPU for which the irq was allocated | ||
307 | */ | ||
308 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, | ||
309 | bool reserved, unsigned int *mapped_cpu) | ||
310 | { | ||
311 | unsigned int cpu; | ||
312 | |||
313 | for_each_cpu(cpu, msk) { | ||
314 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
315 | unsigned int bit; | ||
316 | |||
317 | if (!cm->online) | ||
318 | continue; | ||
319 | |||
320 | bit = matrix_alloc_area(m, cm, 1, false); | ||
321 | if (bit < m->alloc_end) { | ||
322 | cm->allocated++; | ||
323 | cm->available--; | ||
324 | m->total_allocated++; | ||
325 | m->global_available--; | ||
326 | if (reserved) | ||
327 | m->global_reserved--; | ||
328 | *mapped_cpu = cpu; | ||
329 | return bit; | ||
330 | } | ||
331 | } | ||
332 | return -ENOSPC; | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * irq_matrix_free - Free allocated interrupt in the matrix | ||
337 | * @m: Matrix pointer | ||
338 | * @cpu: Which CPU map needs be updated | ||
339 | * @bit: The bit to remove | ||
340 | * @managed: If true, the interrupt is managed and not accounted | ||
341 | * as available. | ||
342 | */ | ||
343 | void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, | ||
344 | unsigned int bit, bool managed) | ||
345 | { | ||
346 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
347 | |||
348 | if (WARN_ON_ONCE(bit < m->alloc_start || bit >= m->alloc_end)) | ||
349 | return; | ||
350 | |||
351 | if (cm->online) { | ||
352 | clear_bit(bit, cm->alloc_map); | ||
353 | cm->allocated--; | ||
354 | m->total_allocated--; | ||
355 | if (!managed) { | ||
356 | cm->available++; | ||
357 | m->global_available++; | ||
358 | } | ||
359 | } | ||
360 | } | ||
361 | |||
362 | /** | ||
363 | * irq_matrix_available - Get the number of globally available irqs | ||
364 | * @m: Pointer to the matrix to query | ||
365 | * @cpudown: If true, the local CPU is about to go down, adjust | ||
366 | * the number of available irqs accordingly | ||
367 | */ | ||
368 | unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown) | ||
369 | { | ||
370 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
371 | |||
372 | return m->global_available - cpudown ? cm->available : 0; | ||
373 | } | ||
374 | |||
375 | /** | ||
376 | * irq_matrix_reserved - Get the number of globally reserved irqs | ||
377 | * @m: Pointer to the matrix to query | ||
378 | */ | ||
379 | unsigned int irq_matrix_reserved(struct irq_matrix *m) | ||
380 | { | ||
381 | return m->global_reserved; | ||
382 | } | ||
383 | |||
384 | /** | ||
385 | * irq_matrix_allocated - Get the number of allocated irqs on the local cpu | ||
386 | * @m: Pointer to the matrix to search | ||
387 | * | ||
388 | * This returns number of allocated irqs | ||
389 | */ | ||
390 | unsigned int irq_matrix_allocated(struct irq_matrix *m) | ||
391 | { | ||
392 | struct cpumap *cm = this_cpu_ptr(m->maps); | ||
393 | |||
394 | return cm->allocated; | ||
395 | } | ||
396 | |||
397 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS | ||
398 | /** | ||
399 | * irq_matrix_debug_show - Show detailed allocation information | ||
400 | * @sf: Pointer to the seq_file to print to | ||
401 | * @m: Pointer to the matrix allocator | ||
402 | * @ind: Indentation for the print format | ||
403 | * | ||
404 | * Note, this is a lockless snapshot. | ||
405 | */ | ||
406 | void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind) | ||
407 | { | ||
408 | unsigned int nsys = bitmap_weight(m->system_map, m->matrix_bits); | ||
409 | int cpu; | ||
410 | |||
411 | seq_printf(sf, "Online bitmaps: %6u\n", m->online_maps); | ||
412 | seq_printf(sf, "Global available: %6u\n", m->global_available); | ||
413 | seq_printf(sf, "Global reserved: %6u\n", m->global_reserved); | ||
414 | seq_printf(sf, "Total allocated: %6u\n", m->total_allocated); | ||
415 | seq_printf(sf, "System: %u: %*pbl\n", nsys, m->matrix_bits, | ||
416 | m->system_map); | ||
417 | seq_printf(sf, "%*s| CPU | avl | man | act | vectors\n", ind, " "); | ||
418 | cpus_read_lock(); | ||
419 | for_each_online_cpu(cpu) { | ||
420 | struct cpumap *cm = per_cpu_ptr(m->maps, cpu); | ||
421 | |||
422 | seq_printf(sf, "%*s %4d %4u %4u %4u %*pbl\n", ind, " ", | ||
423 | cpu, cm->available, cm->managed, cm->allocated, | ||
424 | m->matrix_bits, cm->alloc_map); | ||
425 | } | ||
426 | cpus_read_unlock(); | ||
427 | } | ||
428 | #endif | ||