diff options
author | Alan Cox <alan@lxorguk.ukuu.org.uk> | 2007-07-16 02:40:55 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-07-16 12:05:46 -0400 |
commit | 4f27c00bf80f122513d3a5be16ed851573164534 (patch) | |
tree | 2abad49c1e5c93d2d8698c558eb490b99bd35b87 | |
parent | f3dc8c189a20dc5d115b8f0d07ac620e69eff05c (diff) |
Improve behaviour of spurious IRQ detect
Currently we handle spurious IRQ activity based upon seeing a lot of
invalid interrupts, and we clear things back on the base of lots of valid
interrupts.
Unfortunately in some cases you get legitimate invalid interrupts caused by
timing asynchronicity between the PCI bus and the APIC bus when disabling
interrupts and pulling other tricks. In this case although the spurious
IRQs are not a problem our unhandled counters didn't clear and they act as
a slow running timebomb. (This is effectively what the serial port/tty
problem that was fixed by clearing counters when registering a handler
showed up)
It's easy enough to add a second parameter - time. This means that if we
see a regular stream of harmless spurious interrupts which are not harming
processing we don't go off and do something stupid like disable the IRQ
after a month of running. OTOH lockups and performance killers show up a
lot more than 10/second
[akpm@linux-foundation.org: cleanup]
Signed-off-by: Alan Cox <alan@redhat.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/irq.h | 1 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 12 |
2 files changed, 12 insertions, 1 deletions
diff --git a/include/linux/irq.h b/include/linux/irq.h index 1695054e8c63..44657197fcb0 100644 --- a/include/linux/irq.h +++ b/include/linux/irq.h | |||
@@ -161,6 +161,7 @@ struct irq_desc { | |||
161 | unsigned int wake_depth; /* nested wake enables */ | 161 | unsigned int wake_depth; /* nested wake enables */ |
162 | unsigned int irq_count; /* For detecting broken IRQs */ | 162 | unsigned int irq_count; /* For detecting broken IRQs */ |
163 | unsigned int irqs_unhandled; | 163 | unsigned int irqs_unhandled; |
164 | unsigned long last_unhandled; /* Aging timer for unhandled count */ | ||
164 | spinlock_t lock; | 165 | spinlock_t lock; |
165 | #ifdef CONFIG_SMP | 166 | #ifdef CONFIG_SMP |
166 | cpumask_t affinity; | 167 | cpumask_t affinity; |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index bd9e272d55e9..32b161972fad 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -172,7 +172,17 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
172 | irqreturn_t action_ret) | 172 | irqreturn_t action_ret) |
173 | { | 173 | { |
174 | if (unlikely(action_ret != IRQ_HANDLED)) { | 174 | if (unlikely(action_ret != IRQ_HANDLED)) { |
175 | desc->irqs_unhandled++; | 175 | /* |
176 | * If we are seeing only the odd spurious IRQ caused by | ||
177 | * bus asynchronicity then don't eventually trigger an error, | ||
178 | * otherwise the couter becomes a doomsday timer for otherwise | ||
179 | * working systems | ||
180 | */ | ||
181 | if (jiffies - desc->last_unhandled > HZ/10) | ||
182 | desc->irqs_unhandled = 1; | ||
183 | else | ||
184 | desc->irqs_unhandled++; | ||
185 | desc->last_unhandled = jiffies; | ||
176 | if (unlikely(action_ret != IRQ_NONE)) | 186 | if (unlikely(action_ret != IRQ_NONE)) |
177 | report_bad_irq(irq, desc, action_ret); | 187 | report_bad_irq(irq, desc, action_ret); |
178 | } | 188 | } |