diff options
author | Eric W. Biederman <ebiederm@xmission.com> | 2008-07-10 17:48:54 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-18 13:21:13 -0400 |
commit | f84dbb912f344270f31d5cce974f12908a47798d (patch) | |
tree | 0eb8543dc2f9455decd0c69f07d40f03e1868932 /kernel/irq | |
parent | 5b664cb235e97afbf34db9c4d77f08ebd725335e (diff) |
genirq: enable polling for disabled screaming irqs
When we disable a screaming irq we never see it again. If the irq
line is shared or if the driver half works this is a real pain. So
periodically poll the handlers for screaming interrupts.
I use a timer instead of the classic irq poll technique of working off
the timer interrupt because when we use the local apic timers
note_interrupt is never called (bug?). Further on a system with
dynamic ticks the timer interrupt might not even fire unless there is
a timer telling it it needs to.
I forced this case on my test system with an e1000 nic and my ssh
session remained responsive despite the interrupt handler only being
called every 10th of a second.
Signed-off-by: Eric W. Biederman <ebiederm@xmission.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/spurious.c | 146 |
1 files changed, 91 insertions, 55 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index c66d3f10e853..19fe9d6ebfe8 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -12,83 +12,117 @@ | |||
12 | #include <linux/kallsyms.h> | 12 | #include <linux/kallsyms.h> |
13 | #include <linux/interrupt.h> | 13 | #include <linux/interrupt.h> |
14 | #include <linux/moduleparam.h> | 14 | #include <linux/moduleparam.h> |
15 | #include <linux/timer.h> | ||
15 | 16 | ||
16 | static int irqfixup __read_mostly; | 17 | static int irqfixup __read_mostly; |
17 | 18 | ||
19 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | ||
20 | static void poll_spurious_irqs(unsigned long dummy); | ||
21 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | ||
22 | |||
18 | /* | 23 | /* |
19 | * Recovery handler for misrouted interrupts. | 24 | * Recovery handler for misrouted interrupts. |
20 | */ | 25 | */ |
21 | static int misrouted_irq(int irq) | 26 | static int try_one_irq(int irq, struct irq_desc *desc) |
22 | { | 27 | { |
23 | int i; | 28 | struct irqaction *action; |
24 | int ok = 0; | 29 | int ok = 0; |
25 | int work = 0; /* Did we do work for a real IRQ */ | 30 | int work = 0; /* Did we do work for a real IRQ */ |
26 | 31 | ||
27 | for (i = 1; i < NR_IRQS; i++) { | 32 | spin_lock(&desc->lock); |
28 | struct irq_desc *desc = irq_desc + i; | 33 | /* Already running on another processor */ |
29 | struct irqaction *action; | 34 | if (desc->status & IRQ_INPROGRESS) { |
30 | 35 | /* | |
31 | if (i == irq) /* Already tried */ | 36 | * Already running: If it is shared get the other |
32 | continue; | 37 | * CPU to go looking for our mystery interrupt too |
33 | 38 | */ | |
34 | spin_lock(&desc->lock); | 39 | if (desc->action && (desc->action->flags & IRQF_SHARED)) |
35 | /* Already running on another processor */ | 40 | desc->status |= IRQ_PENDING; |
36 | if (desc->status & IRQ_INPROGRESS) { | ||
37 | /* | ||
38 | * Already running: If it is shared get the other | ||
39 | * CPU to go looking for our mystery interrupt too | ||
40 | */ | ||
41 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | ||
42 | desc->status |= IRQ_PENDING; | ||
43 | spin_unlock(&desc->lock); | ||
44 | continue; | ||
45 | } | ||
46 | /* Honour the normal IRQ locking */ | ||
47 | desc->status |= IRQ_INPROGRESS; | ||
48 | action = desc->action; | ||
49 | spin_unlock(&desc->lock); | 41 | spin_unlock(&desc->lock); |
42 | return ok; | ||
43 | } | ||
44 | /* Honour the normal IRQ locking */ | ||
45 | desc->status |= IRQ_INPROGRESS; | ||
46 | action = desc->action; | ||
47 | spin_unlock(&desc->lock); | ||
50 | 48 | ||
51 | while (action) { | 49 | while (action) { |
52 | /* Only shared IRQ handlers are safe to call */ | 50 | /* Only shared IRQ handlers are safe to call */ |
53 | if (action->flags & IRQF_SHARED) { | 51 | if (action->flags & IRQF_SHARED) { |
54 | if (action->handler(i, action->dev_id) == | 52 | if (action->handler(irq, action->dev_id) == |
55 | IRQ_HANDLED) | 53 | IRQ_HANDLED) |
56 | ok = 1; | 54 | ok = 1; |
57 | } | ||
58 | action = action->next; | ||
59 | } | 55 | } |
60 | local_irq_disable(); | 56 | action = action->next; |
61 | /* Now clean up the flags */ | 57 | } |
62 | spin_lock(&desc->lock); | 58 | local_irq_disable(); |
63 | action = desc->action; | 59 | /* Now clean up the flags */ |
60 | spin_lock(&desc->lock); | ||
61 | action = desc->action; | ||
64 | 62 | ||
63 | /* | ||
64 | * While we were looking for a fixup someone queued a real | ||
65 | * IRQ clashing with our walk: | ||
66 | */ | ||
67 | while ((desc->status & IRQ_PENDING) && action) { | ||
65 | /* | 68 | /* |
66 | * While we were looking for a fixup someone queued a real | 69 | * Perform real IRQ processing for the IRQ we deferred |
67 | * IRQ clashing with our walk: | ||
68 | */ | ||
69 | while ((desc->status & IRQ_PENDING) && action) { | ||
70 | /* | ||
71 | * Perform real IRQ processing for the IRQ we deferred | ||
72 | */ | ||
73 | work = 1; | ||
74 | spin_unlock(&desc->lock); | ||
75 | handle_IRQ_event(i, action); | ||
76 | spin_lock(&desc->lock); | ||
77 | desc->status &= ~IRQ_PENDING; | ||
78 | } | ||
79 | desc->status &= ~IRQ_INPROGRESS; | ||
80 | /* | ||
81 | * If we did actual work for the real IRQ line we must let the | ||
82 | * IRQ controller clean up too | ||
83 | */ | 70 | */ |
84 | if (work && desc->chip && desc->chip->end) | 71 | work = 1; |
85 | desc->chip->end(i); | ||
86 | spin_unlock(&desc->lock); | 72 | spin_unlock(&desc->lock); |
73 | handle_IRQ_event(irq, action); | ||
74 | spin_lock(&desc->lock); | ||
75 | desc->status &= ~IRQ_PENDING; | ||
76 | } | ||
77 | desc->status &= ~IRQ_INPROGRESS; | ||
78 | /* | ||
79 | * If we did actual work for the real IRQ line we must let the | ||
80 | * IRQ controller clean up too | ||
81 | */ | ||
82 | if (work && desc->chip && desc->chip->end) | ||
83 | desc->chip->end(irq); | ||
84 | spin_unlock(&desc->lock); | ||
85 | |||
86 | return ok; | ||
87 | } | ||
88 | |||
89 | static int misrouted_irq(int irq) | ||
90 | { | ||
91 | int i; | ||
92 | int ok = 0; | ||
93 | |||
94 | for (i = 1; i < NR_IRQS; i++) { | ||
95 | struct irq_desc *desc = irq_desc + i; | ||
96 | |||
97 | if (i == irq) /* Already tried */ | ||
98 | continue; | ||
99 | |||
100 | if (try_one_irq(i, desc)) | ||
101 | ok = 1; | ||
87 | } | 102 | } |
88 | /* So the caller can adjust the irq error counts */ | 103 | /* So the caller can adjust the irq error counts */ |
89 | return ok; | 104 | return ok; |
90 | } | 105 | } |
91 | 106 | ||
107 | static void poll_spurious_irqs(unsigned long dummy) | ||
108 | { | ||
109 | int i; | ||
110 | for (i = 1; i < NR_IRQS; i++) { | ||
111 | struct irq_desc *desc = irq_desc + i; | ||
112 | unsigned int status; | ||
113 | |||
114 | /* Racy but it doesn't matter */ | ||
115 | status = desc->status; | ||
116 | barrier(); | ||
117 | if (!(status & IRQ_SPURIOUS_DISABLED)) | ||
118 | continue; | ||
119 | |||
120 | try_one_irq(i, desc); | ||
121 | } | ||
122 | |||
123 | mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | ||
124 | } | ||
125 | |||
92 | /* | 126 | /* |
93 | * If 99,900 of the previous 100,000 interrupts have not been handled | 127 | * If 99,900 of the previous 100,000 interrupts have not been handled |
94 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 128 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
@@ -212,6 +246,8 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
212 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 246 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; |
213 | desc->depth++; | 247 | desc->depth++; |
214 | desc->chip->disable(irq); | 248 | desc->chip->disable(irq); |
249 | |||
250 | mod_timer(&poll_spurious_irq_timer, jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | ||
215 | } | 251 | } |
216 | desc->irqs_unhandled = 0; | 252 | desc->irqs_unhandled = 0; |
217 | } | 253 | } |