diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-07-03 19:50:31 -0400 |
commit | 03ffbcdd7898c0b5299efeb9f18de927487ec1cf (patch) | |
tree | 0569222e4dc9db22049d7d8d15920cc085a194f6 /kernel/irq/internals.h | |
parent | 1b044f1cfc65a7d90b209dfabd57e16d98b58c5b (diff) | |
parent | f9632de40ee0161e864bea8c1b017d957fd7312c (diff) |
Merge branch 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull irq updates from Thomas Gleixner:
"The irq department delivers:
- Expand the generic infrastructure handling the irq migration on CPU
hotplug and convert X86 over to it. (Thomas Gleixner)
Aside of consolidating code this is a preparatory change for:
- Finalizing the affinity management for multi-queue devices. The
main change here is to shut down interrupts which are affine to a
outgoing CPU and reenabling them when the CPU comes online again.
That avoids moving interrupts pointlessly around and breaking and
reestablishing affinities for no value. (Christoph Hellwig)
Note: This contains also the BLOCK-MQ and NVME changes which depend
on the rework of the irq core infrastructure. Jens acked them and
agreed that they should go with the irq changes.
- Consolidation of irq domain code (Marc Zyngier)
- State tracking consolidation in the core code (Jeffy Chen)
- Add debug infrastructure for hierarchical irq domains (Thomas
Gleixner)
- Infrastructure enhancement for managing generic interrupt chips via
devmem (Bartosz Golaszewski)
- Constification work all over the place (Tobias Klauser)
- Two new interrupt controller drivers for MVEBU (Thomas Petazzoni)
- The usual set of fixes, updates and enhancements all over the
place"
* 'irq-core-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: (112 commits)
irqchip/or1k-pic: Fix interrupt acknowledgement
irqchip/irq-mvebu-gicp: Allocate enough memory for spi_bitmap
irqchip/gic-v3: Fix out-of-bound access in gic_set_affinity
nvme: Allocate queues for all possible CPUs
blk-mq: Create hctx for each present CPU
blk-mq: Include all present CPUs in the default queue mapping
genirq: Avoid unnecessary low level irq function calls
genirq: Set irq masked state when initializing irq_desc
genirq/timings: Add infrastructure for estimating the next interrupt arrival time
genirq/timings: Add infrastructure to track the interrupt timings
genirq/debugfs: Remove pointless NULL pointer check
irqchip/gic-v3-its: Don't assume GICv3 hardware supports 16bit INTID
irqchip/gic-v3-its: Add ACPI NUMA node mapping
irqchip/gic-v3-its-platform-msi: Make of_device_ids const
irqchip/gic-v3-its: Make of_device_ids const
irqchip/irq-mvebu-icu: Add new driver for Marvell ICU
irqchip/irq-mvebu-gicp: Add new driver for Marvell GICP
dt-bindings/interrupt-controller: Add DT binding for the Marvell ICU
genirq/irqdomain: Remove auto-recursive hierarchy support
irqchip/MSI: Use irq_domain_update_bus_token instead of an open coded access
...
Diffstat (limited to 'kernel/irq/internals.h')
-rw-r--r-- | kernel/irq/internals.h | 225 |
1 files changed, 223 insertions, 2 deletions
diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h index bc226e783bd2..9da14d125df4 100644 --- a/kernel/irq/internals.h +++ b/kernel/irq/internals.h | |||
@@ -8,6 +8,7 @@ | |||
8 | #include <linux/irqdesc.h> | 8 | #include <linux/irqdesc.h> |
9 | #include <linux/kernel_stat.h> | 9 | #include <linux/kernel_stat.h> |
10 | #include <linux/pm_runtime.h> | 10 | #include <linux/pm_runtime.h> |
11 | #include <linux/sched/clock.h> | ||
11 | 12 | ||
12 | #ifdef CONFIG_SPARSE_IRQ | 13 | #ifdef CONFIG_SPARSE_IRQ |
13 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) | 14 | # define IRQ_BITMAP_BITS (NR_IRQS + 8196) |
@@ -57,6 +58,7 @@ enum { | |||
57 | IRQS_WAITING = 0x00000080, | 58 | IRQS_WAITING = 0x00000080, |
58 | IRQS_PENDING = 0x00000200, | 59 | IRQS_PENDING = 0x00000200, |
59 | IRQS_SUSPENDED = 0x00000800, | 60 | IRQS_SUSPENDED = 0x00000800, |
61 | IRQS_TIMINGS = 0x00001000, | ||
60 | }; | 62 | }; |
61 | 63 | ||
62 | #include "debug.h" | 64 | #include "debug.h" |
@@ -66,7 +68,14 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned long flags); | |||
66 | extern void __disable_irq(struct irq_desc *desc); | 68 | extern void __disable_irq(struct irq_desc *desc); |
67 | extern void __enable_irq(struct irq_desc *desc); | 69 | extern void __enable_irq(struct irq_desc *desc); |
68 | 70 | ||
69 | extern int irq_startup(struct irq_desc *desc, bool resend); | 71 | #define IRQ_RESEND true |
72 | #define IRQ_NORESEND false | ||
73 | |||
74 | #define IRQ_START_FORCE true | ||
75 | #define IRQ_START_COND false | ||
76 | |||
77 | extern int irq_startup(struct irq_desc *desc, bool resend, bool force); | ||
78 | |||
70 | extern void irq_shutdown(struct irq_desc *desc); | 79 | extern void irq_shutdown(struct irq_desc *desc); |
71 | extern void irq_enable(struct irq_desc *desc); | 80 | extern void irq_enable(struct irq_desc *desc); |
72 | extern void irq_disable(struct irq_desc *desc); | 81 | extern void irq_disable(struct irq_desc *desc); |
@@ -109,13 +118,19 @@ static inline void unregister_handler_proc(unsigned int irq, | |||
109 | 118 | ||
110 | extern bool irq_can_set_affinity_usr(unsigned int irq); | 119 | extern bool irq_can_set_affinity_usr(unsigned int irq); |
111 | 120 | ||
112 | extern int irq_select_affinity_usr(unsigned int irq, struct cpumask *mask); | 121 | extern int irq_select_affinity_usr(unsigned int irq); |
113 | 122 | ||
114 | extern void irq_set_thread_affinity(struct irq_desc *desc); | 123 | extern void irq_set_thread_affinity(struct irq_desc *desc); |
115 | 124 | ||
116 | extern int irq_do_set_affinity(struct irq_data *data, | 125 | extern int irq_do_set_affinity(struct irq_data *data, |
117 | const struct cpumask *dest, bool force); | 126 | const struct cpumask *dest, bool force); |
118 | 127 | ||
128 | #ifdef CONFIG_SMP | ||
129 | extern int irq_setup_affinity(struct irq_desc *desc); | ||
130 | #else | ||
131 | static inline int irq_setup_affinity(struct irq_desc *desc) { return 0; } | ||
132 | #endif | ||
133 | |||
119 | /* Inline functions for support of irq chips on slow busses */ | 134 | /* Inline functions for support of irq chips on slow busses */ |
120 | static inline void chip_bus_lock(struct irq_desc *desc) | 135 | static inline void chip_bus_lock(struct irq_desc *desc) |
121 | { | 136 | { |
@@ -169,6 +184,11 @@ irq_put_desc_unlock(struct irq_desc *desc, unsigned long flags) | |||
169 | 184 | ||
170 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) | 185 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
171 | 186 | ||
187 | static inline unsigned int irqd_get(struct irq_data *d) | ||
188 | { | ||
189 | return __irqd_to_state(d); | ||
190 | } | ||
191 | |||
172 | /* | 192 | /* |
173 | * Manipulation functions for irq_data.state | 193 | * Manipulation functions for irq_data.state |
174 | */ | 194 | */ |
@@ -182,6 +202,16 @@ static inline void irqd_clr_move_pending(struct irq_data *d) | |||
182 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; | 202 | __irqd_to_state(d) &= ~IRQD_SETAFFINITY_PENDING; |
183 | } | 203 | } |
184 | 204 | ||
205 | static inline void irqd_set_managed_shutdown(struct irq_data *d) | ||
206 | { | ||
207 | __irqd_to_state(d) |= IRQD_MANAGED_SHUTDOWN; | ||
208 | } | ||
209 | |||
210 | static inline void irqd_clr_managed_shutdown(struct irq_data *d) | ||
211 | { | ||
212 | __irqd_to_state(d) &= ~IRQD_MANAGED_SHUTDOWN; | ||
213 | } | ||
214 | |||
185 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) | 215 | static inline void irqd_clear(struct irq_data *d, unsigned int mask) |
186 | { | 216 | { |
187 | __irqd_to_state(d) &= ~mask; | 217 | __irqd_to_state(d) &= ~mask; |
@@ -226,3 +256,194 @@ irq_pm_install_action(struct irq_desc *desc, struct irqaction *action) { } | |||
226 | static inline void | 256 | static inline void |
227 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } | 257 | irq_pm_remove_action(struct irq_desc *desc, struct irqaction *action) { } |
228 | #endif | 258 | #endif |
259 | |||
260 | #ifdef CONFIG_IRQ_TIMINGS | ||
261 | |||
262 | #define IRQ_TIMINGS_SHIFT 5 | ||
263 | #define IRQ_TIMINGS_SIZE (1 << IRQ_TIMINGS_SHIFT) | ||
264 | #define IRQ_TIMINGS_MASK (IRQ_TIMINGS_SIZE - 1) | ||
265 | |||
266 | /** | ||
267 | * struct irq_timings - irq timings storing structure | ||
268 | * @values: a circular buffer of u64 encoded <timestamp,irq> values | ||
269 | * @count: the number of elements in the array | ||
270 | */ | ||
271 | struct irq_timings { | ||
272 | u64 values[IRQ_TIMINGS_SIZE]; | ||
273 | int count; | ||
274 | }; | ||
275 | |||
276 | DECLARE_PER_CPU(struct irq_timings, irq_timings); | ||
277 | |||
278 | extern void irq_timings_free(int irq); | ||
279 | extern int irq_timings_alloc(int irq); | ||
280 | |||
281 | static inline void irq_remove_timings(struct irq_desc *desc) | ||
282 | { | ||
283 | desc->istate &= ~IRQS_TIMINGS; | ||
284 | |||
285 | irq_timings_free(irq_desc_get_irq(desc)); | ||
286 | } | ||
287 | |||
288 | static inline void irq_setup_timings(struct irq_desc *desc, struct irqaction *act) | ||
289 | { | ||
290 | int irq = irq_desc_get_irq(desc); | ||
291 | int ret; | ||
292 | |||
293 | /* | ||
294 | * We don't need the measurement because the idle code already | ||
295 | * knows the next expiry event. | ||
296 | */ | ||
297 | if (act->flags & __IRQF_TIMER) | ||
298 | return; | ||
299 | |||
300 | /* | ||
301 | * In case the timing allocation fails, we just want to warn, | ||
302 | * not fail, so letting the system boot anyway. | ||
303 | */ | ||
304 | ret = irq_timings_alloc(irq); | ||
305 | if (ret) { | ||
306 | pr_warn("Failed to allocate irq timing stats for irq%d (%d)", | ||
307 | irq, ret); | ||
308 | return; | ||
309 | } | ||
310 | |||
311 | desc->istate |= IRQS_TIMINGS; | ||
312 | } | ||
313 | |||
314 | extern void irq_timings_enable(void); | ||
315 | extern void irq_timings_disable(void); | ||
316 | |||
317 | DECLARE_STATIC_KEY_FALSE(irq_timing_enabled); | ||
318 | |||
319 | /* | ||
320 | * The interrupt number and the timestamp are encoded into a single | ||
321 | * u64 variable to optimize the size. | ||
322 | * 48 bit time stamp and 16 bit IRQ number is way sufficient. | ||
323 | * Who cares an IRQ after 78 hours of idle time? | ||
324 | */ | ||
325 | static inline u64 irq_timing_encode(u64 timestamp, int irq) | ||
326 | { | ||
327 | return (timestamp << 16) | irq; | ||
328 | } | ||
329 | |||
330 | static inline int irq_timing_decode(u64 value, u64 *timestamp) | ||
331 | { | ||
332 | *timestamp = value >> 16; | ||
333 | return value & U16_MAX; | ||
334 | } | ||
335 | |||
336 | /* | ||
337 | * The function record_irq_time is only called in one place in the | ||
338 | * interrupts handler. We want this function always inline so the code | ||
339 | * inside is embedded in the function and the static key branching | ||
340 | * code can act at the higher level. Without the explicit | ||
341 | * __always_inline we can end up with a function call and a small | ||
342 | * overhead in the hotpath for nothing. | ||
343 | */ | ||
344 | static __always_inline void record_irq_time(struct irq_desc *desc) | ||
345 | { | ||
346 | if (!static_branch_likely(&irq_timing_enabled)) | ||
347 | return; | ||
348 | |||
349 | if (desc->istate & IRQS_TIMINGS) { | ||
350 | struct irq_timings *timings = this_cpu_ptr(&irq_timings); | ||
351 | |||
352 | timings->values[timings->count & IRQ_TIMINGS_MASK] = | ||
353 | irq_timing_encode(local_clock(), | ||
354 | irq_desc_get_irq(desc)); | ||
355 | |||
356 | timings->count++; | ||
357 | } | ||
358 | } | ||
359 | #else | ||
360 | static inline void irq_remove_timings(struct irq_desc *desc) {} | ||
361 | static inline void irq_setup_timings(struct irq_desc *desc, | ||
362 | struct irqaction *act) {}; | ||
363 | static inline void record_irq_time(struct irq_desc *desc) {} | ||
364 | #endif /* CONFIG_IRQ_TIMINGS */ | ||
365 | |||
366 | |||
367 | #ifdef CONFIG_GENERIC_IRQ_CHIP | ||
368 | void irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, | ||
369 | int num_ct, unsigned int irq_base, | ||
370 | void __iomem *reg_base, irq_flow_handler_t handler); | ||
371 | #else | ||
372 | static inline void | ||
373 | irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, | ||
374 | int num_ct, unsigned int irq_base, | ||
375 | void __iomem *reg_base, irq_flow_handler_t handler) { } | ||
376 | #endif /* CONFIG_GENERIC_IRQ_CHIP */ | ||
377 | |||
378 | #ifdef CONFIG_GENERIC_PENDING_IRQ | ||
379 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | ||
380 | { | ||
381 | return irqd_can_move_in_process_context(data); | ||
382 | } | ||
383 | static inline bool irq_move_pending(struct irq_data *data) | ||
384 | { | ||
385 | return irqd_is_setaffinity_pending(data); | ||
386 | } | ||
387 | static inline void | ||
388 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
389 | { | ||
390 | cpumask_copy(desc->pending_mask, mask); | ||
391 | } | ||
392 | static inline void | ||
393 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
394 | { | ||
395 | cpumask_copy(mask, desc->pending_mask); | ||
396 | } | ||
397 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) | ||
398 | { | ||
399 | return desc->pending_mask; | ||
400 | } | ||
401 | bool irq_fixup_move_pending(struct irq_desc *desc, bool force_clear); | ||
402 | #else /* CONFIG_GENERIC_PENDING_IRQ */ | ||
403 | static inline bool irq_can_move_pcntxt(struct irq_data *data) | ||
404 | { | ||
405 | return true; | ||
406 | } | ||
407 | static inline bool irq_move_pending(struct irq_data *data) | ||
408 | { | ||
409 | return false; | ||
410 | } | ||
411 | static inline void | ||
412 | irq_copy_pending(struct irq_desc *desc, const struct cpumask *mask) | ||
413 | { | ||
414 | } | ||
415 | static inline void | ||
416 | irq_get_pending(struct cpumask *mask, struct irq_desc *desc) | ||
417 | { | ||
418 | } | ||
419 | static inline struct cpumask *irq_desc_get_pending_mask(struct irq_desc *desc) | ||
420 | { | ||
421 | return NULL; | ||
422 | } | ||
423 | static inline bool irq_fixup_move_pending(struct irq_desc *desc, bool fclear) | ||
424 | { | ||
425 | return false; | ||
426 | } | ||
427 | #endif /* !CONFIG_GENERIC_PENDING_IRQ */ | ||
428 | |||
429 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS | ||
430 | #include <linux/debugfs.h> | ||
431 | |||
432 | void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *desc); | ||
433 | static inline void irq_remove_debugfs_entry(struct irq_desc *desc) | ||
434 | { | ||
435 | debugfs_remove(desc->debugfs_file); | ||
436 | } | ||
437 | # ifdef CONFIG_IRQ_DOMAIN | ||
438 | void irq_domain_debugfs_init(struct dentry *root); | ||
439 | # else | ||
440 | static inline void irq_domain_debugfs_init(struct dentry *root); | ||
441 | # endif | ||
442 | #else /* CONFIG_GENERIC_IRQ_DEBUGFS */ | ||
443 | static inline void irq_add_debugfs_entry(unsigned int irq, struct irq_desc *d) | ||
444 | { | ||
445 | } | ||
446 | static inline void irq_remove_debugfs_entry(struct irq_desc *d) | ||
447 | { | ||
448 | } | ||
449 | #endif /* CONFIG_GENERIC_IRQ_DEBUGFS */ | ||