aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2013-11-06 06:30:07 -0500
committerThomas Gleixner <tglx@linutronix.de>2013-11-13 10:03:02 -0500
commitb39898cd4077f4b6ec706e717c938751c34e1dc4 (patch)
treeff5abed20e1f0199124d83844e714a3eff2b895b /kernel/irq
parent9b66bfb28049594fe2bb2b91607ba302f511ce8b (diff)
genirq: Prevent spurious detection for unconditionally polled interrupts
On a 68k platform a couple of interrupts are demultiplexed and "polled" from a top level interrupt. Unfortunately there is no way to determine which of the sub interrupts raised the top level interrupt, so all of the demultiplexed interrupt handlers need to be invoked. Given a high enough frequency this can trigger the spurious interrupt detection mechanism, if one of the demultiplex interrupts returns IRQ_NONE continuously. But this is a false positive as the polling causes this behaviour and not buggy hardware/software. Introduce IRQ_POLLED which can be set at interrupt chip setup time via irq_set_status_flags(). The flag excludes the interrupt from the spurious detector and from all core polling activities. Reported-and-tested-by: Michael Schmitz <schmitzmic@gmail.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: linux-m68k@vger.kernel.org Link: http://lkml.kernel.org/r/alpine.DEB.2.02.1311061149250.23353@ionos.tec.linutronix.de
Diffstat (limited to 'kernel/irq')
-rw-r--r--kernel/irq/settings.h7
-rw-r--r--kernel/irq/spurious.c12
2 files changed, 16 insertions, 3 deletions
diff --git a/kernel/irq/settings.h b/kernel/irq/settings.h
index 1162f1030f18..3320b84cc60f 100644
--- a/kernel/irq/settings.h
+++ b/kernel/irq/settings.h
@@ -14,6 +14,7 @@ enum {
14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING, 14 _IRQ_NO_BALANCING = IRQ_NO_BALANCING,
15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD, 15 _IRQ_NESTED_THREAD = IRQ_NESTED_THREAD,
16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID, 16 _IRQ_PER_CPU_DEVID = IRQ_PER_CPU_DEVID,
17 _IRQ_IS_POLLED = IRQ_IS_POLLED,
17 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK, 18 _IRQF_MODIFY_MASK = IRQF_MODIFY_MASK,
18}; 19};
19 20
@@ -26,6 +27,7 @@ enum {
26#define IRQ_NOAUTOEN GOT_YOU_MORON 27#define IRQ_NOAUTOEN GOT_YOU_MORON
27#define IRQ_NESTED_THREAD GOT_YOU_MORON 28#define IRQ_NESTED_THREAD GOT_YOU_MORON
28#define IRQ_PER_CPU_DEVID GOT_YOU_MORON 29#define IRQ_PER_CPU_DEVID GOT_YOU_MORON
30#define IRQ_IS_POLLED GOT_YOU_MORON
29#undef IRQF_MODIFY_MASK 31#undef IRQF_MODIFY_MASK
30#define IRQF_MODIFY_MASK GOT_YOU_MORON 32#define IRQF_MODIFY_MASK GOT_YOU_MORON
31 33
@@ -147,3 +149,8 @@ static inline bool irq_settings_is_nested_thread(struct irq_desc *desc)
147{ 149{
148 return desc->status_use_accessors & _IRQ_NESTED_THREAD; 150 return desc->status_use_accessors & _IRQ_NESTED_THREAD;
149} 151}
152
153static inline bool irq_settings_is_polled(struct irq_desc *desc)
154{
155 return desc->status_use_accessors & _IRQ_IS_POLLED;
156}
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 7b5f012bde9d..a1d8cc63b56e 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -67,8 +67,13 @@ static int try_one_irq(int irq, struct irq_desc *desc, bool force)
67 67
68 raw_spin_lock(&desc->lock); 68 raw_spin_lock(&desc->lock);
69 69
70 /* PER_CPU and nested thread interrupts are never polled */ 70 /*
71 if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) 71 * PER_CPU, nested thread interrupts and interrupts explicitely
72 * marked polled are excluded from polling.
73 */
74 if (irq_settings_is_per_cpu(desc) ||
75 irq_settings_is_nested_thread(desc) ||
76 irq_settings_is_polled(desc))
72 goto out; 77 goto out;
73 78
74 /* 79 /*
@@ -268,7 +273,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
268void note_interrupt(unsigned int irq, struct irq_desc *desc, 273void note_interrupt(unsigned int irq, struct irq_desc *desc,
269 irqreturn_t action_ret) 274 irqreturn_t action_ret)
270{ 275{
271 if (desc->istate & IRQS_POLL_INPROGRESS) 276 if (desc->istate & IRQS_POLL_INPROGRESS ||
277 irq_settings_is_polled(desc))
272 return; 278 return;
273 279
274 /* we get here again via the threaded handler */ 280 /* we get here again via the threaded handler */