diff options
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r-- | kernel/irq/spurious.c | 162 |
1 files changed, 103 insertions, 59 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 3089d3b9d5f3..dfbd550401b2 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -21,70 +21,93 @@ static int irqfixup __read_mostly; | |||
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | 22 | static void poll_spurious_irqs(unsigned long dummy); |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
24 | static int irq_poll_cpu; | ||
25 | static atomic_t irq_poll_active; | ||
26 | |||
27 | /* | ||
28 | * We wait here for a poller to finish. | ||
29 | * | ||
30 | * If the poll runs on this CPU, then we yell loudly and return | ||
31 | * false. That will leave the interrupt line disabled in the worst | ||
32 | * case, but it should never happen. | ||
33 | * | ||
34 | * We wait until the poller is done and then recheck disabled and | ||
35 | * action (about to be disabled). Only if it's still active, we return | ||
36 | * true and let the handler run. | ||
37 | */ | ||
38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
39 | { | ||
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
41 | "irq poll in progress on cpu %d for irq %d\n", | ||
42 | smp_processor_id(), desc->irq_data.irq)) | ||
43 | return false; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | do { | ||
47 | raw_spin_unlock(&desc->lock); | ||
48 | while (irqd_irq_inprogress(&desc->irq_data)) | ||
49 | cpu_relax(); | ||
50 | raw_spin_lock(&desc->lock); | ||
51 | } while (irqd_irq_inprogress(&desc->irq_data)); | ||
52 | /* Might have been disabled in meantime */ | ||
53 | return !irqd_irq_disabled(&desc->irq_data) && desc->action; | ||
54 | #else | ||
55 | return false; | ||
56 | #endif | ||
57 | } | ||
58 | |||
24 | 59 | ||
25 | /* | 60 | /* |
26 | * Recovery handler for misrouted interrupts. | 61 | * Recovery handler for misrouted interrupts. |
27 | */ | 62 | */ |
28 | static int try_one_irq(int irq, struct irq_desc *desc) | 63 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
29 | { | 64 | { |
65 | irqreturn_t ret = IRQ_NONE; | ||
30 | struct irqaction *action; | 66 | struct irqaction *action; |
31 | int ok = 0, work = 0; | ||
32 | 67 | ||
33 | raw_spin_lock(&desc->lock); | 68 | raw_spin_lock(&desc->lock); |
34 | /* Already running on another processor */ | ||
35 | if (desc->status & IRQ_INPROGRESS) { | ||
36 | /* | ||
37 | * Already running: If it is shared get the other | ||
38 | * CPU to go looking for our mystery interrupt too | ||
39 | */ | ||
40 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | ||
41 | desc->status |= IRQ_PENDING; | ||
42 | raw_spin_unlock(&desc->lock); | ||
43 | return ok; | ||
44 | } | ||
45 | /* Honour the normal IRQ locking */ | ||
46 | desc->status |= IRQ_INPROGRESS; | ||
47 | action = desc->action; | ||
48 | raw_spin_unlock(&desc->lock); | ||
49 | 69 | ||
50 | while (action) { | 70 | /* PER_CPU and nested thread interrupts are never polled */ |
51 | /* Only shared IRQ handlers are safe to call */ | 71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) |
52 | if (action->flags & IRQF_SHARED) { | 72 | goto out; |
53 | if (action->handler(irq, action->dev_id) == | ||
54 | IRQ_HANDLED) | ||
55 | ok = 1; | ||
56 | } | ||
57 | action = action->next; | ||
58 | } | ||
59 | local_irq_disable(); | ||
60 | /* Now clean up the flags */ | ||
61 | raw_spin_lock(&desc->lock); | ||
62 | action = desc->action; | ||
63 | 73 | ||
64 | /* | 74 | /* |
65 | * While we were looking for a fixup someone queued a real | 75 | * Do not poll disabled interrupts unless the spurious |
66 | * IRQ clashing with our walk: | 76 | * disabled poller asks explicitely. |
67 | */ | 77 | */ |
68 | while ((desc->status & IRQ_PENDING) && action) { | 78 | if (irqd_irq_disabled(&desc->irq_data) && !force) |
79 | goto out; | ||
80 | |||
81 | /* | ||
82 | * All handlers must agree on IRQF_SHARED, so we test just the | ||
83 | * first. Check for action->next as well. | ||
84 | */ | ||
85 | action = desc->action; | ||
86 | if (!action || !(action->flags & IRQF_SHARED) || | ||
87 | (action->flags & __IRQF_TIMER) || !action->next) | ||
88 | goto out; | ||
89 | |||
90 | /* Already running on another processor */ | ||
91 | if (irqd_irq_inprogress(&desc->irq_data)) { | ||
69 | /* | 92 | /* |
70 | * Perform real IRQ processing for the IRQ we deferred | 93 | * Already running: If it is shared get the other |
94 | * CPU to go looking for our mystery interrupt too | ||
71 | */ | 95 | */ |
72 | work = 1; | 96 | desc->istate |= IRQS_PENDING; |
73 | raw_spin_unlock(&desc->lock); | 97 | goto out; |
74 | handle_IRQ_event(irq, action); | ||
75 | raw_spin_lock(&desc->lock); | ||
76 | desc->status &= ~IRQ_PENDING; | ||
77 | } | 98 | } |
78 | desc->status &= ~IRQ_INPROGRESS; | ||
79 | /* | ||
80 | * If we did actual work for the real IRQ line we must let the | ||
81 | * IRQ controller clean up too | ||
82 | */ | ||
83 | if (work) | ||
84 | irq_end(irq, desc); | ||
85 | raw_spin_unlock(&desc->lock); | ||
86 | 99 | ||
87 | return ok; | 100 | /* Mark it poll in progress */ |
101 | desc->istate |= IRQS_POLL_INPROGRESS; | ||
102 | do { | ||
103 | if (handle_irq_event(desc) == IRQ_HANDLED) | ||
104 | ret = IRQ_HANDLED; | ||
105 | action = desc->action; | ||
106 | } while ((desc->istate & IRQS_PENDING) && action); | ||
107 | desc->istate &= ~IRQS_POLL_INPROGRESS; | ||
108 | out: | ||
109 | raw_spin_unlock(&desc->lock); | ||
110 | return ret == IRQ_HANDLED; | ||
88 | } | 111 | } |
89 | 112 | ||
90 | static int misrouted_irq(int irq) | 113 | static int misrouted_irq(int irq) |
@@ -92,6 +115,11 @@ static int misrouted_irq(int irq) | |||
92 | struct irq_desc *desc; | 115 | struct irq_desc *desc; |
93 | int i, ok = 0; | 116 | int i, ok = 0; |
94 | 117 | ||
118 | if (atomic_inc_return(&irq_poll_active) == 1) | ||
119 | goto out; | ||
120 | |||
121 | irq_poll_cpu = smp_processor_id(); | ||
122 | |||
95 | for_each_irq_desc(i, desc) { | 123 | for_each_irq_desc(i, desc) { |
96 | if (!i) | 124 | if (!i) |
97 | continue; | 125 | continue; |
@@ -99,9 +127,11 @@ static int misrouted_irq(int irq) | |||
99 | if (i == irq) /* Already tried */ | 127 | if (i == irq) /* Already tried */ |
100 | continue; | 128 | continue; |
101 | 129 | ||
102 | if (try_one_irq(i, desc)) | 130 | if (try_one_irq(i, desc, false)) |
103 | ok = 1; | 131 | ok = 1; |
104 | } | 132 | } |
133 | out: | ||
134 | atomic_dec(&irq_poll_active); | ||
105 | /* So the caller can adjust the irq error counts */ | 135 | /* So the caller can adjust the irq error counts */ |
106 | return ok; | 136 | return ok; |
107 | } | 137 | } |
@@ -111,23 +141,28 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
111 | struct irq_desc *desc; | 141 | struct irq_desc *desc; |
112 | int i; | 142 | int i; |
113 | 143 | ||
144 | if (atomic_inc_return(&irq_poll_active) != 1) | ||
145 | goto out; | ||
146 | irq_poll_cpu = smp_processor_id(); | ||
147 | |||
114 | for_each_irq_desc(i, desc) { | 148 | for_each_irq_desc(i, desc) { |
115 | unsigned int status; | 149 | unsigned int state; |
116 | 150 | ||
117 | if (!i) | 151 | if (!i) |
118 | continue; | 152 | continue; |
119 | 153 | ||
120 | /* Racy but it doesn't matter */ | 154 | /* Racy but it doesn't matter */ |
121 | status = desc->status; | 155 | state = desc->istate; |
122 | barrier(); | 156 | barrier(); |
123 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 157 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
124 | continue; | 158 | continue; |
125 | 159 | ||
126 | local_irq_disable(); | 160 | local_irq_disable(); |
127 | try_one_irq(i, desc); | 161 | try_one_irq(i, desc, true); |
128 | local_irq_enable(); | 162 | local_irq_enable(); |
129 | } | 163 | } |
130 | 164 | out: | |
165 | atomic_dec(&irq_poll_active); | ||
131 | mod_timer(&poll_spurious_irq_timer, | 166 | mod_timer(&poll_spurious_irq_timer, |
132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 167 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
133 | } | 168 | } |
@@ -139,15 +174,13 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
139 | * | 174 | * |
140 | * (The other 100-of-100,000 interrupts may have been a correctly | 175 | * (The other 100-of-100,000 interrupts may have been a correctly |
141 | * functioning device sharing an IRQ with the failing one) | 176 | * functioning device sharing an IRQ with the failing one) |
142 | * | ||
143 | * Called under desc->lock | ||
144 | */ | 177 | */ |
145 | |||
146 | static void | 178 | static void |
147 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, | 179 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
148 | irqreturn_t action_ret) | 180 | irqreturn_t action_ret) |
149 | { | 181 | { |
150 | struct irqaction *action; | 182 | struct irqaction *action; |
183 | unsigned long flags; | ||
151 | 184 | ||
152 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | 185 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
153 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | 186 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
@@ -159,6 +192,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
159 | dump_stack(); | 192 | dump_stack(); |
160 | printk(KERN_ERR "handlers:\n"); | 193 | printk(KERN_ERR "handlers:\n"); |
161 | 194 | ||
195 | /* | ||
196 | * We need to take desc->lock here. note_interrupt() is called | ||
197 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | ||
198 | * with something else removing an action. It's ok to take | ||
199 | * desc->lock here. See synchronize_irq(). | ||
200 | */ | ||
201 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
162 | action = desc->action; | 202 | action = desc->action; |
163 | while (action) { | 203 | while (action) { |
164 | printk(KERN_ERR "[<%p>]", action->handler); | 204 | printk(KERN_ERR "[<%p>]", action->handler); |
@@ -167,6 +207,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
167 | printk("\n"); | 207 | printk("\n"); |
168 | action = action->next; | 208 | action = action->next; |
169 | } | 209 | } |
210 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
170 | } | 211 | } |
171 | 212 | ||
172 | static void | 213 | static void |
@@ -218,6 +259,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
218 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 259 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
219 | irqreturn_t action_ret) | 260 | irqreturn_t action_ret) |
220 | { | 261 | { |
262 | if (desc->istate & IRQS_POLL_INPROGRESS) | ||
263 | return; | ||
264 | |||
221 | if (unlikely(action_ret != IRQ_HANDLED)) { | 265 | if (unlikely(action_ret != IRQ_HANDLED)) { |
222 | /* | 266 | /* |
223 | * If we are seeing only the odd spurious IRQ caused by | 267 | * If we are seeing only the odd spurious IRQ caused by |
@@ -254,9 +298,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
254 | * Now kill the IRQ | 298 | * Now kill the IRQ |
255 | */ | 299 | */ |
256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 300 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 301 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
258 | desc->depth++; | 302 | desc->depth++; |
259 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 303 | irq_disable(desc); |
260 | 304 | ||
261 | mod_timer(&poll_spurious_irq_timer, | 305 | mod_timer(&poll_spurious_irq_timer, |
262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 306 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |