diff options
Diffstat (limited to 'kernel/irq/spurious.c')
-rw-r--r-- | kernel/irq/spurious.c | 163 |
1 files changed, 104 insertions, 59 deletions
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index 3089d3b9d5f..dd586ebf9c8 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -21,70 +21,94 @@ static int irqfixup __read_mostly; | |||
21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) | 21 | #define POLL_SPURIOUS_IRQ_INTERVAL (HZ/10) |
22 | static void poll_spurious_irqs(unsigned long dummy); | 22 | static void poll_spurious_irqs(unsigned long dummy); |
23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); | 23 | static DEFINE_TIMER(poll_spurious_irq_timer, poll_spurious_irqs, 0, 0); |
24 | static int irq_poll_cpu; | ||
25 | static atomic_t irq_poll_active; | ||
26 | |||
27 | /* | ||
28 | * We wait here for a poller to finish. | ||
29 | * | ||
30 | * If the poll runs on this CPU, then we yell loudly and return | ||
31 | * false. That will leave the interrupt line disabled in the worst | ||
32 | * case, but it should never happen. | ||
33 | * | ||
34 | * We wait until the poller is done and then recheck disabled and | ||
35 | * action (about to be disabled). Only if it's still active, we return | ||
36 | * true and let the handler run. | ||
37 | */ | ||
38 | bool irq_wait_for_poll(struct irq_desc *desc) | ||
39 | { | ||
40 | if (WARN_ONCE(irq_poll_cpu == smp_processor_id(), | ||
41 | "irq poll in progress on cpu %d for irq %d\n", | ||
42 | smp_processor_id(), desc->irq_data.irq)) | ||
43 | return false; | ||
44 | |||
45 | #ifdef CONFIG_SMP | ||
46 | do { | ||
47 | raw_spin_unlock(&desc->lock); | ||
48 | while (desc->istate & IRQS_INPROGRESS) | ||
49 | cpu_relax(); | ||
50 | raw_spin_lock(&desc->lock); | ||
51 | } while (desc->istate & IRQS_INPROGRESS); | ||
52 | /* Might have been disabled in meantime */ | ||
53 | return !(desc->istate & IRQS_DISABLED) && desc->action; | ||
54 | #else | ||
55 | return false; | ||
56 | #endif | ||
57 | } | ||
58 | |||
24 | 59 | ||
25 | /* | 60 | /* |
26 | * Recovery handler for misrouted interrupts. | 61 | * Recovery handler for misrouted interrupts. |
27 | */ | 62 | */ |
28 | static int try_one_irq(int irq, struct irq_desc *desc) | 63 | static int try_one_irq(int irq, struct irq_desc *desc, bool force) |
29 | { | 64 | { |
65 | irqreturn_t ret = IRQ_NONE; | ||
30 | struct irqaction *action; | 66 | struct irqaction *action; |
31 | int ok = 0, work = 0; | ||
32 | 67 | ||
33 | raw_spin_lock(&desc->lock); | 68 | raw_spin_lock(&desc->lock); |
34 | /* Already running on another processor */ | ||
35 | if (desc->status & IRQ_INPROGRESS) { | ||
36 | /* | ||
37 | * Already running: If it is shared get the other | ||
38 | * CPU to go looking for our mystery interrupt too | ||
39 | */ | ||
40 | if (desc->action && (desc->action->flags & IRQF_SHARED)) | ||
41 | desc->status |= IRQ_PENDING; | ||
42 | raw_spin_unlock(&desc->lock); | ||
43 | return ok; | ||
44 | } | ||
45 | /* Honour the normal IRQ locking */ | ||
46 | desc->status |= IRQ_INPROGRESS; | ||
47 | action = desc->action; | ||
48 | raw_spin_unlock(&desc->lock); | ||
49 | 69 | ||
50 | while (action) { | 70 | /* PER_CPU and nested thread interrupts are never polled */ |
51 | /* Only shared IRQ handlers are safe to call */ | 71 | if (irq_settings_is_per_cpu(desc) || irq_settings_is_nested_thread(desc)) |
52 | if (action->flags & IRQF_SHARED) { | 72 | goto out; |
53 | if (action->handler(irq, action->dev_id) == | ||
54 | IRQ_HANDLED) | ||
55 | ok = 1; | ||
56 | } | ||
57 | action = action->next; | ||
58 | } | ||
59 | local_irq_disable(); | ||
60 | /* Now clean up the flags */ | ||
61 | raw_spin_lock(&desc->lock); | ||
62 | action = desc->action; | ||
63 | 73 | ||
64 | /* | 74 | /* |
65 | * While we were looking for a fixup someone queued a real | 75 | * Do not poll disabled interrupts unless the spurious |
66 | * IRQ clashing with our walk: | 76 | * disabled poller asks explicitely. |
67 | */ | 77 | */ |
68 | while ((desc->status & IRQ_PENDING) && action) { | 78 | if ((desc->istate & IRQS_DISABLED) && !force) |
79 | goto out; | ||
80 | |||
81 | /* | ||
82 | * All handlers must agree on IRQF_SHARED, so we test just the | ||
83 | * first. Check for action->next as well. | ||
84 | */ | ||
85 | action = desc->action; | ||
86 | if (!action || !(action->flags & IRQF_SHARED) || | ||
87 | (action->flags & __IRQF_TIMER) || !action->next) | ||
88 | goto out; | ||
89 | |||
90 | /* Already running on another processor */ | ||
91 | if (desc->istate & IRQS_INPROGRESS) { | ||
69 | /* | 92 | /* |
70 | * Perform real IRQ processing for the IRQ we deferred | 93 | * Already running: If it is shared get the other |
94 | * CPU to go looking for our mystery interrupt too | ||
71 | */ | 95 | */ |
72 | work = 1; | 96 | irq_compat_set_pending(desc); |
73 | raw_spin_unlock(&desc->lock); | 97 | desc->istate |= IRQS_PENDING; |
74 | handle_IRQ_event(irq, action); | 98 | goto out; |
75 | raw_spin_lock(&desc->lock); | ||
76 | desc->status &= ~IRQ_PENDING; | ||
77 | } | 99 | } |
78 | desc->status &= ~IRQ_INPROGRESS; | ||
79 | /* | ||
80 | * If we did actual work for the real IRQ line we must let the | ||
81 | * IRQ controller clean up too | ||
82 | */ | ||
83 | if (work) | ||
84 | irq_end(irq, desc); | ||
85 | raw_spin_unlock(&desc->lock); | ||
86 | 100 | ||
87 | return ok; | 101 | /* Mark it poll in progress */ |
102 | desc->istate |= IRQS_POLL_INPROGRESS; | ||
103 | do { | ||
104 | if (handle_irq_event(desc) == IRQ_HANDLED) | ||
105 | ret = IRQ_HANDLED; | ||
106 | action = desc->action; | ||
107 | } while ((desc->istate & IRQS_PENDING) && action); | ||
108 | desc->istate &= ~IRQS_POLL_INPROGRESS; | ||
109 | out: | ||
110 | raw_spin_unlock(&desc->lock); | ||
111 | return ret == IRQ_HANDLED; | ||
88 | } | 112 | } |
89 | 113 | ||
90 | static int misrouted_irq(int irq) | 114 | static int misrouted_irq(int irq) |
@@ -92,6 +116,11 @@ static int misrouted_irq(int irq) | |||
92 | struct irq_desc *desc; | 116 | struct irq_desc *desc; |
93 | int i, ok = 0; | 117 | int i, ok = 0; |
94 | 118 | ||
119 | if (atomic_inc_return(&irq_poll_active) == 1) | ||
120 | goto out; | ||
121 | |||
122 | irq_poll_cpu = smp_processor_id(); | ||
123 | |||
95 | for_each_irq_desc(i, desc) { | 124 | for_each_irq_desc(i, desc) { |
96 | if (!i) | 125 | if (!i) |
97 | continue; | 126 | continue; |
@@ -99,9 +128,11 @@ static int misrouted_irq(int irq) | |||
99 | if (i == irq) /* Already tried */ | 128 | if (i == irq) /* Already tried */ |
100 | continue; | 129 | continue; |
101 | 130 | ||
102 | if (try_one_irq(i, desc)) | 131 | if (try_one_irq(i, desc, false)) |
103 | ok = 1; | 132 | ok = 1; |
104 | } | 133 | } |
134 | out: | ||
135 | atomic_dec(&irq_poll_active); | ||
105 | /* So the caller can adjust the irq error counts */ | 136 | /* So the caller can adjust the irq error counts */ |
106 | return ok; | 137 | return ok; |
107 | } | 138 | } |
@@ -111,23 +142,28 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
111 | struct irq_desc *desc; | 142 | struct irq_desc *desc; |
112 | int i; | 143 | int i; |
113 | 144 | ||
145 | if (atomic_inc_return(&irq_poll_active) != 1) | ||
146 | goto out; | ||
147 | irq_poll_cpu = smp_processor_id(); | ||
148 | |||
114 | for_each_irq_desc(i, desc) { | 149 | for_each_irq_desc(i, desc) { |
115 | unsigned int status; | 150 | unsigned int state; |
116 | 151 | ||
117 | if (!i) | 152 | if (!i) |
118 | continue; | 153 | continue; |
119 | 154 | ||
120 | /* Racy but it doesn't matter */ | 155 | /* Racy but it doesn't matter */ |
121 | status = desc->status; | 156 | state = desc->istate; |
122 | barrier(); | 157 | barrier(); |
123 | if (!(status & IRQ_SPURIOUS_DISABLED)) | 158 | if (!(state & IRQS_SPURIOUS_DISABLED)) |
124 | continue; | 159 | continue; |
125 | 160 | ||
126 | local_irq_disable(); | 161 | local_irq_disable(); |
127 | try_one_irq(i, desc); | 162 | try_one_irq(i, desc, true); |
128 | local_irq_enable(); | 163 | local_irq_enable(); |
129 | } | 164 | } |
130 | 165 | out: | |
166 | atomic_dec(&irq_poll_active); | ||
131 | mod_timer(&poll_spurious_irq_timer, | 167 | mod_timer(&poll_spurious_irq_timer, |
132 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 168 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
133 | } | 169 | } |
@@ -139,15 +175,13 @@ static void poll_spurious_irqs(unsigned long dummy) | |||
139 | * | 175 | * |
140 | * (The other 100-of-100,000 interrupts may have been a correctly | 176 | * (The other 100-of-100,000 interrupts may have been a correctly |
141 | * functioning device sharing an IRQ with the failing one) | 177 | * functioning device sharing an IRQ with the failing one) |
142 | * | ||
143 | * Called under desc->lock | ||
144 | */ | 178 | */ |
145 | |||
146 | static void | 179 | static void |
147 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, | 180 | __report_bad_irq(unsigned int irq, struct irq_desc *desc, |
148 | irqreturn_t action_ret) | 181 | irqreturn_t action_ret) |
149 | { | 182 | { |
150 | struct irqaction *action; | 183 | struct irqaction *action; |
184 | unsigned long flags; | ||
151 | 185 | ||
152 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | 186 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { |
153 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | 187 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
@@ -159,6 +193,13 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
159 | dump_stack(); | 193 | dump_stack(); |
160 | printk(KERN_ERR "handlers:\n"); | 194 | printk(KERN_ERR "handlers:\n"); |
161 | 195 | ||
196 | /* | ||
197 | * We need to take desc->lock here. note_interrupt() is called | ||
198 | * w/o desc->lock held, but IRQ_PROGRESS set. We might race | ||
199 | * with something else removing an action. It's ok to take | ||
200 | * desc->lock here. See synchronize_irq(). | ||
201 | */ | ||
202 | raw_spin_lock_irqsave(&desc->lock, flags); | ||
162 | action = desc->action; | 203 | action = desc->action; |
163 | while (action) { | 204 | while (action) { |
164 | printk(KERN_ERR "[<%p>]", action->handler); | 205 | printk(KERN_ERR "[<%p>]", action->handler); |
@@ -167,6 +208,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
167 | printk("\n"); | 208 | printk("\n"); |
168 | action = action->next; | 209 | action = action->next; |
169 | } | 210 | } |
211 | raw_spin_unlock_irqrestore(&desc->lock, flags); | ||
170 | } | 212 | } |
171 | 213 | ||
172 | static void | 214 | static void |
@@ -218,6 +260,9 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
218 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 260 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
219 | irqreturn_t action_ret) | 261 | irqreturn_t action_ret) |
220 | { | 262 | { |
263 | if (desc->istate & IRQS_POLL_INPROGRESS) | ||
264 | return; | ||
265 | |||
221 | if (unlikely(action_ret != IRQ_HANDLED)) { | 266 | if (unlikely(action_ret != IRQ_HANDLED)) { |
222 | /* | 267 | /* |
223 | * If we are seeing only the odd spurious IRQ caused by | 268 | * If we are seeing only the odd spurious IRQ caused by |
@@ -254,9 +299,9 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
254 | * Now kill the IRQ | 299 | * Now kill the IRQ |
255 | */ | 300 | */ |
256 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); | 301 | printk(KERN_EMERG "Disabling IRQ #%d\n", irq); |
257 | desc->status |= IRQ_DISABLED | IRQ_SPURIOUS_DISABLED; | 302 | desc->istate |= IRQS_SPURIOUS_DISABLED; |
258 | desc->depth++; | 303 | desc->depth++; |
259 | desc->irq_data.chip->irq_disable(&desc->irq_data); | 304 | irq_disable(desc); |
260 | 305 | ||
261 | mod_timer(&poll_spurious_irq_timer, | 306 | mod_timer(&poll_spurious_irq_timer, |
262 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 307 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |