aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/spurious.c
diff options
context:
space:
mode:
authorThomas Gleixner <tglx@linutronix.de>2011-02-07 03:52:27 -0500
committerThomas Gleixner <tglx@linutronix.de>2011-02-19 06:58:09 -0500
commitc7259cd7af757ddcd65701c37099dcddae2054f0 (patch)
tree42f957d4c516563e01870199b9ffc6e4e00cdf4a /kernel/irq/spurious.c
parentfa27271bc8d230355c1f24ddea103824fdc12de6 (diff)
genirq: Do not poll disabled, percpu and timer interrupts
There is no point in polling disabled lines. percpu does not make sense at all because we only poll on the cpu we're currently running on. Also polling per_cpu interrupts is racy as hell. The handler runs without locking so we might get a huge surprise. If the timer interrupt needs polling, then we wont get there anyway. Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r--kernel/irq/spurious.c40
1 files changed, 26 insertions, 14 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 0af9e59c82eb..bd0e42d3e0ba 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -25,30 +25,42 @@ static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
25/* 25/*
26 * Recovery handler for misrouted interrupts. 26 * Recovery handler for misrouted interrupts.
27 */ 27 */
28static int try_one_irq(int irq, struct irq_desc *desc) 28static int try_one_irq(int irq, struct irq_desc *desc, bool force)
29{ 29{
30 struct irqaction *action; 30 struct irqaction *action;
31 int ok = 0, work = 0; 31 int ok = 0, work = 0;
32 32
33 raw_spin_lock(&desc->lock); 33 raw_spin_lock(&desc->lock);
34
35 /* PER_CPU and nested thread interrupts are never polled */
36 if (desc->status & (IRQ_PER_CPU | IRQ_NESTED_THREAD))
37 goto out;
38
39 /*
40 * Do not poll disabled interrupts unless the spurious
41 * disabled poller asks explicitely.
42 */
43 if ((desc->status & IRQ_DISABLED) && !force)
44 goto out;
45
46 /*
47 * All handlers must agree on IRQF_SHARED, so we test just the
48 * first. Check for action->next as well.
49 */
50 action = desc->action;
51 if (!action || !(action->flags & IRQF_SHARED) ||
52 (action->flags & __IRQF_TIMER) || !action->next)
53 goto out;
54
34 /* Already running on another processor */ 55 /* Already running on another processor */
35 if (desc->status & IRQ_INPROGRESS) { 56 if (desc->status & IRQ_INPROGRESS) {
36 /* 57 /*
37 * Already running: If it is shared get the other 58 * Already running: If it is shared get the other
38 * CPU to go looking for our mystery interrupt too 59 * CPU to go looking for our mystery interrupt too
39 */ 60 */
40 if (desc->action && (desc->action->flags & IRQF_SHARED)) 61 desc->status |= IRQ_PENDING;
41 desc->status |= IRQ_PENDING;
42 raw_spin_unlock(&desc->lock);
43 return ok;
44 }
45 /*
46 * All handlers must agree on IRQF_SHARED, so we test just the
47 * first. Check for action->next as well.
48 */
49 action = desc->action;
50 if (!action || !(action->flags & IRQF_SHARED) || !action->next)
51 goto out; 62 goto out;
63 }
52 64
53 /* Honour the normal IRQ locking */ 65 /* Honour the normal IRQ locking */
54 desc->status |= IRQ_INPROGRESS; 66 desc->status |= IRQ_INPROGRESS;
@@ -87,7 +99,7 @@ static int misrouted_irq(int irq)
87 if (i == irq) /* Already tried */ 99 if (i == irq) /* Already tried */
88 continue; 100 continue;
89 101
90 if (try_one_irq(i, desc)) 102 if (try_one_irq(i, desc, false))
91 ok = 1; 103 ok = 1;
92 } 104 }
93 /* So the caller can adjust the irq error counts */ 105 /* So the caller can adjust the irq error counts */
@@ -112,7 +124,7 @@ static void poll_spurious_irqs(unsigned long dummy)
112 continue; 124 continue;
113 125
114 local_irq_disable(); 126 local_irq_disable();
115 try_one_irq(i, desc); 127 try_one_irq(i, desc, true);
116 local_irq_enable(); 128 local_irq_enable();
117 } 129 }
118 130