diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2013-11-06 06:30:07 -0500 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2013-11-13 10:03:02 -0500 |
commit | b39898cd4077f4b6ec706e717c938751c34e1dc4 (patch) | |
tree | ff5abed20e1f0199124d83844e714a3eff2b895b /kernel/irq/spurious.c | |
parent | 9b66bfb28049594fe2bb2b91607ba302f511ce8b (diff) |
genirq: Prevent spurious detection for unconditionally polled interrupts
On a 68k platform a couple of interrupts are demultiplexed and
"polled" from a top level interrupt. Unfortunately there is no way to
determine which of the sub interrupts raised the top level interrupt,
so all of the demultiplexed interrupt handlers need to be
invoked. Given a high enough frequency this can trigger the spurious
interrupt detection mechanism, if one of the demultiplex interrupts
returns IRQ_NONE continuously. But this is a false positive as the
polling causes this behaviour and not buggy hardware/software.
Introduce IRQ_POLLED which can be set at interrupt chip setup time via
irq_set_status_flags(). The flag excludes the interrupt from the
spurious detector and from all core polling activities.
Reported-and-tested-by: Michael Schmitz <schmitzmic@gmail.com>
Cc: Geert Uytterhoeven <geert@linux-m68k.org>
Cc: linux-m68k@vger.kernel.org
Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311061149250.23353@ionos.tec.linutronix.de
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r-- | kernel/irq/spurious.c | 12 |
1 files changed, 9 insertions, 3 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 7b5f012bde9d..a1d8cc63b56e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force) | |||
67 | 67 | ||
68 | raw_spin_lock(&desc->lock); | 68 | raw_spin_lock(&desc->lock); |
69 | 69 | ||
70 | /* PER_CPU and nested thread interrupts are never polled */ | 70 | /* |
71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) | 71 | * PER_CPU, nested thread interrupts and interrupts explicitely |
72 | * marked polled are excluded from polling. | ||
73 | */ | ||
74 | if (irq_settings_is_per_cpu(desc) || | ||
75 | irq_settings_is_nested_thread(desc) || | ||
76 | irq_settings_is_polled(desc)) | ||
72 | goto out; | 77 | goto out; |
73 | 78 | ||
74 | /* | 79 | /* |
@@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
268 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 273 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
269 | irqreturn_t action_ret) | 274 | irqreturn_t action_ret) |
270 | { | 275 | { |
271 | if (desc->istate & IRQS_POLL_INPROGRESS) | 276 | if (desc->istate & IRQS_POLL_INPROGRESS || |
277 | irq_settings_is_polled(desc)) | ||
272 | return; | 278 | return; |
273 | 279 | ||
274 | /* we get here again via the threaded handler */ | 280 | /* we get here again via the threaded handler */ |