aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <sebastian@breakpoint.cc>2011-05-31 02:56:11 -0400
committerThomas Gleixner <tglx@linutronix.de>2011-06-03 08:53:15 -0400
commit3a43e05f4d0600e906fa09f4a65d749288c44592 (patch)
treeb99732c8f459e70078c7dd90f62a5174376e3c62 /kernel
parentef26f20cd117eb3c185038ed7cbf7b235575751d (diff)
irq: Handle spurios irq detection for threaded irqs
The detection of spurios interrupts is currently limited to first level handler. In force-threaded mode we never notice if the threaded irq does not feel responsible. This patch catches the return value of the threaded handler and forwards it to the spurious detector. If the primary handler returns only IRQ_WAKE_THREAD then the spourious detector ignores it because it gets called again from the threaded handler. [ tglx: Report the erroneous return value early and bail out ] Signed-off-by: Sebastian Andrzej Siewior <sebastian@breakpoint.cc> Link: http://lkml.kernel.org/r/1306824972-27067-2-git-send-email-sebastian@breakpoint.cc Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/irq/handle.c6
-rw-r--r--kernel/irq/manage.c24
-rw-r--r--kernel/irq/spurious.c22
3 files changed, 36 insertions, 16 deletions
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 90cb55f6d7eb..470d08c82bbe 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -133,12 +133,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
133 switch (res) { 133 switch (res) {
134 case IRQ_WAKE_THREAD: 134 case IRQ_WAKE_THREAD:
135 /* 135 /*
136 * Set result to handled so the spurious check
137 * does not trigger.
138 */
139 res = IRQ_HANDLED;
140
141 /*
142 * Catch drivers which return WAKE_THREAD but 136 * Catch drivers which return WAKE_THREAD but
143 * did not set up a thread function 137 * did not set up a thread function
144 */ 138 */
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f7ce0021e1c4..d64bafb1afd0 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -723,13 +723,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
723 * context. So we need to disable bh here to avoid deadlocks and other 723 * context. So we need to disable bh here to avoid deadlocks and other
724 * side effects. 724 * side effects.
725 */ 725 */
726static void 726static irqreturn_t
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{ 728{
729 irqreturn_t ret;
730
729 local_bh_disable(); 731 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id); 732 ret = action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false); 733 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable(); 734 local_bh_enable();
735 return ret;
733} 736}
734 737
735/* 738/*
@@ -737,10 +740,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 * preemtible - many of them need to sleep and wait for slow busses to 740 * preemtible - many of them need to sleep and wait for slow busses to
738 * complete. 741 * complete.
739 */ 742 */
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 743static irqreturn_t irq_thread_fn(struct irq_desc *desc,
744 struct irqaction *action)
741{ 745{
742 action->thread_fn(action->irq, action->dev_id); 746 irqreturn_t ret;
747
748 ret = action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false); 749 irq_finalize_oneshot(desc, action, false);
750 return ret;
744} 751}
745 752
746/* 753/*
@@ -753,7 +760,8 @@ static int irq_thread(void *data)
753 }; 760 };
754 struct irqaction *action = data; 761 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq); 762 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 763 irqreturn_t (*handler_fn)(struct irq_desc *desc,
764 struct irqaction *action);
757 int wake; 765 int wake;
758 766
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 767 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +791,12 @@ static int irq_thread(void *data)
783 desc->istate |= IRQS_PENDING; 791 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock); 792 raw_spin_unlock_irq(&desc->lock);
785 } else { 793 } else {
794 irqreturn_t action_ret;
795
786 raw_spin_unlock_irq(&desc->lock); 796 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action); 797 action_ret = handler_fn(desc, action);
798 if (!noirqdebug)
799 note_interrupt(action->irq, desc, action_ret);
788 } 800 }
789 801
790 wake = atomic_dec_and_test(&desc->threads_active); 802 wake = atomic_dec_and_test(&desc->threads_active);
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c
index c9a78ba30b6f..aa57d5da18c1 100644
--- a/kernel/irq/spurious.c
+++ b/kernel/irq/spurious.c
@@ -167,6 +167,13 @@ out:
167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL); 167 jiffies + POLL_SPURIOUS_IRQ_INTERVAL);
168} 168}
169 169
170static inline int bad_action_ret(irqreturn_t action_ret)
171{
172 if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD)))
173 return 0;
174 return 1;
175}
176
170/* 177/*
171 * If 99,900 of the previous 100,000 interrupts have not been handled 178 * If 99,900 of the previous 100,000 interrupts have not been handled
172 * then assume that the IRQ is stuck in some manner. Drop a diagnostic 179 * then assume that the IRQ is stuck in some manner. Drop a diagnostic
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc,
182 struct irqaction *action; 189 struct irqaction *action;
183 unsigned long flags; 190 unsigned long flags;
184 191
185 if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { 192 if (bad_action_ret(action_ret)) {
186 printk(KERN_ERR "irq event %d: bogus return value %x\n", 193 printk(KERN_ERR "irq event %d: bogus return value %x\n",
187 irq, action_ret); 194 irq, action_ret);
188 } else { 195 } else {
@@ -263,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
263 if (desc->istate & IRQS_POLL_INPROGRESS) 270 if (desc->istate & IRQS_POLL_INPROGRESS)
264 return; 271 return;
265 272
266 if (unlikely(action_ret != IRQ_HANDLED)) { 273 /* we get here again via the threaded handler */
274 if (action_ret == IRQ_WAKE_THREAD)
275 return;
276
277 if (bad_action_ret(action_ret)) {
278 report_bad_irq(irq, desc, action_ret);
279 return;
280 }
281
282 if (unlikely(action_ret == IRQ_NONE)) {
267 /* 283 /*
268 * If we are seeing only the odd spurious IRQ caused by 284 * If we are seeing only the odd spurious IRQ caused by
269 * bus asynchronicity then don't eventually trigger an error, 285 * bus asynchronicity then don't eventually trigger an error,
@@ -275,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc,
275 else 291 else
276 desc->irqs_unhandled++; 292 desc->irqs_unhandled++;
277 desc->last_unhandled = jiffies; 293 desc->last_unhandled = jiffies;
278 if (unlikely(action_ret != IRQ_NONE))
279 report_bad_irq(irq, desc, action_ret);
280 } 294 }
281 295
282 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { 296 if (unlikely(try_misrouted_irq(irq, desc, action_ret))) {