aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/spurious.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r--kernel/irq/spurious.c195
1 files changed, 128 insertions, 67 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index 89fb90ae534f..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -14,75 +14,100 @@
14#include <linux/moduleparam.h> 14#include <linux/moduleparam.h>
15#include <linux/timer.h> 15#include <linux/timer.h>
16 16
17#include "internals.h"
18
17static int irqfixup __read_mostly; 19static int irqfixup __read_mostly;
18 20
19#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) 21#define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10)
20static void poll_spurious_irqs(unsigned long dummy); 22static void poll_spurious_irqs(unsigned long dummy);
21static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); 23static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0);
24static int irq_poll_cpu;
25static atomic_t irq_poll_active;
26
27/*
28 * We wait here for a poller to finish.
29 *
30 * If the poll runs on this CPU, then we yell loudly and return
31 * false. That will leave the interrupt line disabled in the worst
32 * case, but it should never happen.
33 *
34 * We wait until the poller is done and then recheck disabled and
35 * action (about to be disabled). Only if it's still active, we return
36 * true and let the handler run.
37 */
38bool irq_wait_for_poll(struct irq_desc *desc)
39{
40 if (WARN_ONCE(irq_poll_cpu == smp_processor_id(),
41 "irq poll in progress on cpu %d for irq %d\n",
42 smp_processor_id(), desc->irq_data.irq))
43 return false;
44
45#ifdef CONFIG_SMP
46 do {
47 raw_spin_unlock(&desc->lock);
48 while (irqd_irq_inprogress(&desc->irq_data))
49 cpu_relax();
50 raw_spin_lock(&desc->lock);
51 } while (irqd_irq_inprogress(&desc->irq_data));
52 /* Might have been disabled in meantime */
53 return !irqd_irq_disabled(&desc->irq_data) && desc->action;
54#else
55 return false;
56#endif
57}
58
22 59
23/* 60/*
24 * Recovery handler for misrouted interrupts. 61 * Recovery handler for misrouted interrupts.
25 */ 62 */
26static int try_one_irq(int irq, struct irq_desc *desc) 63static int try_one_irq(int irq, struct irq_desc *desc, bool force)
27{ 64{
65 irqreturn_t ret = IRQ_NONE;
28 struct irqaction *action; 66 struct irqaction *action;
29 int ok = 0, work = 0;
30 67
31 raw_spin_lock(&desc->lock); 68 raw_spin_lock(&desc->lock);
32 /* Already running on another processor */
33 if (desc->status & IRQ_INPROGRESS) {
34 /*
35 * Already running: If it is shared get the other
36 * CPU to go looking for our mystery interrupt too
37 */
38 if (desc->action && (desc->action->flags & IRQF_SHARED))
39 desc->status |= IRQ_PENDING;
40 raw_spin_unlock(&desc->lock);
41 return ok;
42 }
43 /* Honour the normal IRQ locking */
44 desc->status |= IRQ_INPROGRESS;
45 action = desc->action;
46 raw_spin_unlock(&desc->lock);
47 69
48 while (action) { 70 /* PER_CPU and nested thread interrupts are never polled */
49 /* Only shared IRQ handlers are safe to call */ 71 if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc))
50 if (action->flags & IRQF_SHARED) { 72 goto out;
51 if (action->handler(irq, action->dev_id) ==
52 IRQ_HANDLED)
53 ok = 1;
54 }
55 action = action->next;
56 }
57 local_irq_disable();
58 /* Now clean up the flags */
59 raw_spin_lock(&desc->lock);
60 action = desc->action;
61 73
62 /* 74 /*
63 * While we were looking for a fixup someone queued a real 75 * Do not poll disabled interrupts unless the spurious
64 * IRQ clashing with our walk: 76 * disabled poller asks explicitely.
65 */ 77 */
66 while ((desc->status & IRQ_PENDING) && action) { 78 if (irqd_irq_disabled(&desc->irq_data) && !force)
79 goto out;
80
81 /*
82 * All handlers must agree on IRQF_SHARED, so we test just the
83 * first. Check for action->next as well.
84 */
85 action = desc->action;
86 if (!action || !(action->flags & IRQF_SHARED) ||
87 (action->flags & __IRQF_TIMER) || !action->next)
88 goto out;
89
90 /* Already running on another processor */
91 if (irqd_irq_inprogress(&desc->irq_data)) {
67 /* 92 /*
68 * Perform real IRQ processing for the IRQ we deferred 93 * Already running: If it is shared get the other
94 * CPU to go looking for our mystery interrupt too
69 */ 95 */
70 work = 1; 96 desc->istate |= IRQS_PENDING;
71 raw_spin_unlock(&desc->lock); 97 goto out;
72 handle_IRQ_event(irq, action);
73 raw_spin_lock(&desc->lock);
74 desc->status &= ~IRQ_PENDING;
75 } 98 }
76 desc->status &= ~IRQ_INPROGRESS;
77 /*
78 * If we did actual work for the real IRQ line we must let the
79 * IRQ controller clean up too
80 */
81 if (work && desc->chip && desc->chip->end)
82 desc->chip->end(irq);
83 raw_spin_unlock(&desc->lock);
84 99
85 return ok; 100 /* Mark it poll in progress */
101 desc->istate |= IRQS_POLL_INPROGRESS;
102 do {
103 if (handle_irq_event(desc) == IRQ_HANDLED)
104 ret = IRQ_HANDLED;
105 action = desc->action;
106 } while ((desc->istate & IRQS_PENDING) && action);
107 desc->istate &= ~IRQS_POLL_INPROGRESS;
108out:
109 raw_spin_unlock(&desc->lock);
110 return ret == IRQ_HANDLED;
86} 111}
87 112
88static int misrouted_irq(int irq) 113static int misrouted_irq(int irq)
@@ -90,6 +115,11 @@ static int misrouted_irq(int irq)
90 struct irq_desc *desc; 115 struct irq_desc *desc;
91 int i, ok = 0; 116 int i, ok = 0;
92 117
118 if (atomic_inc_return(&irq_poll_active) == 1)
119 goto out;
120
121 irq_poll_cpu = smp_processor_id();
122
93 for_each_irq_desc(i, desc) { 123 for_each_irq_desc(i, desc) {
94 if (!i) 124 if (!i)
95 continue; 125 continue;
@@ -97,9 +127,11 @@ static int misrouted_irq(int irq)
97 if (i == irq) /* Already tried */ 127 if (i == irq) /* Already tried */
98 continue; 128 continue;
99 129
100 if (try_one_irq(i, desc)) 130 if (try_one_irq(i, desc, false))
101 ok = 1; 131 ok = 1;
102 } 132 }
133out:
134 atomic_dec(&irq_poll_active);
103 /* So the caller can adjust the irq error counts */ 135 /* So the caller can adjust the irq error counts */
104 return ok; 136 return ok;
105} 137}
@@ -109,27 +141,39 @@ static void poll_spurious_irqs(unsigned long dummy)
109 struct irq_desc *desc; 141 struct irq_desc *desc;
110 int i; 142 int i;
111 143
144 if (atomic_inc_return(&irq_poll_active) != 1)
145 goto out;
146 irq_poll_cpu = smp_processor_id();
147
112 for_each_irq_desc(i, desc) { 148 for_each_irq_desc(i, desc) {
113 unsigned int status; 149 unsigned int state;
114 150
115 if (!i) 151 if (!i)
116 continue; 152 continue;
117 153
118 /* Racy but it doesn't matter */ 154 /* Racy but it doesn't matter */
119 status = desc->status; 155 state = desc->istate;
120 barrier(); 156 barrier();
121 if (!(status & IRQ_SPURIOUS_DISABLED)) 157 if (!(state & IRQS_SPURIOUS_DISABLED))
122 continue; 158 continue;
123 159
124 local_irq_disable(); 160 local_irq_disable();
125 try_one_irq(i, desc); 161 try_one_irq(i, desc, true);
126 local_irq_enable(); 162 local_irq_enable();
127 } 163 }
128 164out:
165 atomic_dec(&irq_poll_active);
129 mod_timer(&poll_spurious_irq_timer, 166 mod_timer(&poll_spurious_irq_timer,
130 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
131} 168}
132 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
133/* 177/*
134 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
135 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -137,17 +181,15 @@ static void poll_spurious_irqs(unsigned long dummy)
137 * 181 *
138 * (The other 100-of-100,000 interrupts may have been a correctly 182 * (The other 100-of-100,000 interrupts may have been a correctly
139 * functioning device sharing an IRQ with the failing one) 183 * functioning device sharing an IRQ with the failing one)
140 *
141 * Called under desc->lock
142 */ 184 */
143
144static void 185static void
145__report_bad_irq(unsigned int irq, struct irq_desc *desc, 186__report_bad_irq(unsigned int irq, struct irq_desc *desc,
146 irqreturn_t action_ret) 187 irqreturn_t action_ret)
147{ 188{
148 struct irqaction *action; 189 struct irqaction *action;
190 unsigned long flags;
149 191
150 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
151 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
152 irq, action_ret); 194 irq, action_ret);
153 } else { 195 } else {
@@ -157,14 +199,23 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
157 dump_stack(); 199 dump_stack();
158 printk(KERN_ERR "handlers:\n"); 200 printk(KERN_ERR "handlers:\n");
159 201
202 /*
203 * We need to take desc->lock here. note_interrupt() is called
204 * w/o desc->lock held, but IRQ_PROGRESS set. We might race
205 * with something else removing an action. It's ok to take
206 * desc->lock here. See synchronize_irq().
207 */
208 raw_spin_lock_irqsave(&desc->lock, flags);
160 action = desc->action; 209 action = desc->action;
161 while (action) { 210 while (action) {
162 printk(KERN_ERR "[<%p>]", action->handler); 211 printk(KERN_ERR "[<%p>] %pf", action->handler, action->handler);
163 print_symbol(" (%s)", 212 if (action->thread_fn)
164 (unsigned long)action->handler); 213 printk(KERN_CONT " threaded [<%p>] %pf",
165 printk("\n"); 214 action->thread_fn, action->thread_fn);
215 printk(KERN_CONT "\n");
166 action = action->next; 216 action = action->next;
167 } 217 }
218 raw_spin_unlock_irqrestore(&desc->lock, flags);
168} 219}
169 220
170static void 221static void
@@ -216,7 +267,19 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc,
216void note_interrupt(unsigned int irq, struct irq_desc *desc, 267void note_interrupt(unsigned int irq, struct irq_desc *desc,
217 irqreturn_t action_ret) 268 irqreturn_t action_ret)
218{ 269{
219 if (unlikely(action_ret != IRQ_HANDLED)) { 270 if (desc->istate & IRQS_POLL_INPROGRESS)
271 return;
272
273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
220 /* 283 /*
221 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
222 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -228,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
228 else 291 else
229 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
230 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
231 if (unlikely(action_ret != IRQ_NONE))
232 report_bad_irq(irq, desc, action_ret);
233 } 294 }
234 295
235 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {
@@ -252,9 +313,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
252 * Now kill the IRQ 313 * Now kill the IRQ
253 */ 314 */
254 printk(KERN_EMERG "Disabling IRQ #%d\n", irq); 315 printk(KERN_EMERG "Disabling IRQ #%d\n", irq);
255 desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; 316 desc->istate |= IRQS_SPURIOUS_DISABLED;
256 desc->depth++; 317 desc->depth++;
257 desc->chip->disable(irq); 318 irq_disable(desc);
258 319
259 mod_timer(&poll_spurious_irq_timer, 320 mod_timer(&poll_spurious_irq_timer,
260 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 321 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);