diff options
Diffstat (limited to 'kernel/irq')
-rw-r--r-- | kernel/irq/manage.c | 4 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 106 |
2 files changed, 104 insertions, 6 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index d34131ca372b..3dc6a61bf06a 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -886,8 +886,8 @@ static int irq_thread(void *data) | |||
886 | irq_thread_check_affinity(desc, action); | 886 | irq_thread_check_affinity(desc, action); |
887 | 887 | ||
888 | action_ret = handler_fn(desc, action); | 888 | action_ret = handler_fn(desc, action); |
889 | if (!noirqdebug) | 889 | if (action_ret == IRQ_HANDLED) |
890 | note_interrupt(action->irq, desc, action_ret); | 890 | atomic_inc(&desc->threads_handled); |
891 | 891 | ||
892 | wake_threads_waitq(desc); | 892 | wake_threads_waitq(desc); |
893 | } | 893 | } |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index a1d8cc63b56e..e2514b0e439e 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -270,6 +270,8 @@ try_misrouted_irq(unsigned int irq, struct irq_desc *desc, | |||
270 | return action && (action->flags & IRQF_IRQPOLL); | 270 | return action && (action->flags & IRQF_IRQPOLL); |
271 | } | 271 | } |
272 | 272 | ||
273 | #define SPURIOUS_DEFERRED 0x80000000 | ||
274 | |||
273 | void note_interrupt(unsigned int irq, struct irq_desc *desc, | 275 | void note_interrupt(unsigned int irq, struct irq_desc *desc, |
274 | irqreturn_t action_ret) | 276 | irqreturn_t action_ret) |
275 | { | 277 | { |
@@ -277,15 +279,111 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
277 | irq_settings_is_polled(desc)) | 279 | irq_settings_is_polled(desc)) |
278 | return; | 280 | return; |
279 | 281 | ||
280 | /* we get here again via the threaded handler */ | ||
281 | if (action_ret == IRQ_WAKE_THREAD) | ||
282 | return; | ||
283 | |||
284 | if (bad_action_ret(action_ret)) { | 282 | if (bad_action_ret(action_ret)) { |
285 | report_bad_irq(irq, desc, action_ret); | 283 | report_bad_irq(irq, desc, action_ret); |
286 | return; | 284 | return; |
287 | } | 285 | } |
288 | 286 | ||
287 | /* | ||
288 | * We cannot call note_interrupt from the threaded handler | ||
289 | * because we need to look at the compound of all handlers | ||
290 | * (primary and threaded). Aside of that in the threaded | ||
291 | * shared case we have no serialization against an incoming | ||
292 | * hardware interrupt while we are dealing with a threaded | ||
293 | * result. | ||
294 | * | ||
295 | * So in case a thread is woken, we just note the fact and | ||
296 | * defer the analysis to the next hardware interrupt. | ||
297 | * | ||
298 | * The threaded handlers store whether they sucessfully | ||
299 | * handled an interrupt and we check whether that number | ||
300 | * changed versus the last invocation. | ||
301 | * | ||
302 | * We could handle all interrupts with the delayed by one | ||
303 | * mechanism, but for the non forced threaded case we'd just | ||
304 | * add pointless overhead to the straight hardirq interrupts | ||
305 | * for the sake of a few lines less code. | ||
306 | */ | ||
307 | if (action_ret & IRQ_WAKE_THREAD) { | ||
308 | /* | ||
309 | * There is a thread woken. Check whether one of the | ||
310 | * shared primary handlers returned IRQ_HANDLED. If | ||
311 | * not we defer the spurious detection to the next | ||
312 | * interrupt. | ||
313 | */ | ||
314 | if (action_ret == IRQ_WAKE_THREAD) { | ||
315 | int handled; | ||
316 | /* | ||
317 | * We use bit 31 of thread_handled_last to | ||
318 | * denote the deferred spurious detection | ||
319 | * active. No locking necessary as | ||
320 | * thread_handled_last is only accessed here | ||
321 | * and we have the guarantee that hard | ||
322 | * interrupts are not reentrant. | ||
323 | */ | ||
324 | if (!(desc->threads_handled_last & SPURIOUS_DEFERRED)) { | ||
325 | desc->threads_handled_last |= SPURIOUS_DEFERRED; | ||
326 | return; | ||
327 | } | ||
328 | /* | ||
329 | * Check whether one of the threaded handlers | ||
330 | * returned IRQ_HANDLED since the last | ||
331 | * interrupt happened. | ||
332 | * | ||
333 | * For simplicity we just set bit 31, as it is | ||
334 | * set in threads_handled_last as well. So we | ||
335 | * avoid extra masking. And we really do not | ||
336 | * care about the high bits of the handled | ||
337 | * count. We just care about the count being | ||
338 | * different than the one we saw before. | ||
339 | */ | ||
340 | handled = atomic_read(&desc->threads_handled); | ||
341 | handled |= SPURIOUS_DEFERRED; | ||
342 | if (handled != desc->threads_handled_last) { | ||
343 | action_ret = IRQ_HANDLED; | ||
344 | /* | ||
345 | * Note: We keep the SPURIOUS_DEFERRED | ||
346 | * bit set. We are handling the | ||
347 | * previous invocation right now. | ||
348 | * Keep it for the current one, so the | ||
349 | * next hardware interrupt will | ||
350 | * account for it. | ||
351 | */ | ||
352 | desc->threads_handled_last = handled; | ||
353 | } else { | ||
354 | /* | ||
355 | * None of the threaded handlers felt | ||
356 | * responsible for the last interrupt | ||
357 | * | ||
358 | * We keep the SPURIOUS_DEFERRED bit | ||
359 | * set in threads_handled_last as we | ||
360 | * need to account for the current | ||
361 | * interrupt as well. | ||
362 | */ | ||
363 | action_ret = IRQ_NONE; | ||
364 | } | ||
365 | } else { | ||
366 | /* | ||
367 | * One of the primary handlers returned | ||
368 | * IRQ_HANDLED. So we don't care about the | ||
369 | * threaded handlers on the same line. Clear | ||
370 | * the deferred detection bit. | ||
371 | * | ||
372 | * In theory we could/should check whether the | ||
373 | * deferred bit is set and take the result of | ||
374 | * the previous run into account here as | ||
375 | * well. But it's really not worth the | ||
376 | * trouble. If every other interrupt is | ||
377 | * handled we never trigger the spurious | ||
378 | * detector. And if this is just the one out | ||
379 | * of 100k unhandled ones which is handled | ||
380 | * then we merily delay the spurious detection | ||
381 | * by one hard interrupt. Not a real problem. | ||
382 | */ | ||
383 | desc->threads_handled_last &= ~SPURIOUS_DEFERRED; | ||
384 | } | ||
385 | } | ||
386 | |||
289 | if (unlikely(action_ret == IRQ_NONE)) { | 387 | if (unlikely(action_ret == IRQ_NONE)) { |
290 | /* | 388 | /* |
291 | * If we are seeing only the odd spurious IRQ caused by | 389 | * If we are seeing only the odd spurious IRQ caused by |