diff options
-rw-r--r-- | include/linux/irqreturn.h | 6 | ||||
-rw-r--r-- | kernel/irq/handle.c | 6 | ||||
-rw-r--r-- | kernel/irq/manage.c | 24 | ||||
-rw-r--r-- | kernel/irq/spurious.c | 22 |
4 files changed, 39 insertions, 19 deletions
diff --git a/include/linux/irqreturn.h b/include/linux/irqreturn.h index 819acaaac3f5..714ba08dc092 100644 --- a/include/linux/irqreturn.h +++ b/include/linux/irqreturn.h | |||
@@ -8,9 +8,9 @@ | |||
8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread | 8 | * @IRQ_WAKE_THREAD handler requests to wake the handler thread |
9 | */ | 9 | */ |
10 | enum irqreturn { | 10 | enum irqreturn { |
11 | IRQ_NONE, | 11 | IRQ_NONE = (0 << 0), |
12 | IRQ_HANDLED, | 12 | IRQ_HANDLED = (1 << 0), |
13 | IRQ_WAKE_THREAD, | 13 | IRQ_WAKE_THREAD = (1 << 1), |
14 | }; | 14 | }; |
15 | 15 | ||
16 | typedef enum irqreturn irqreturn_t; | 16 | typedef enum irqreturn irqreturn_t; |
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c index 90cb55f6d7eb..470d08c82bbe 100644 --- a/kernel/irq/handle.c +++ b/kernel/irq/handle.c | |||
@@ -133,12 +133,6 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action) | |||
133 | switch (res) { | 133 | switch (res) { |
134 | case IRQ_WAKE_THREAD: | 134 | case IRQ_WAKE_THREAD: |
135 | /* | 135 | /* |
136 | * Set result to handled so the spurious check | ||
137 | * does not trigger. | ||
138 | */ | ||
139 | res = IRQ_HANDLED; | ||
140 | |||
141 | /* | ||
142 | * Catch drivers which return WAKE_THREAD but | 136 | * Catch drivers which return WAKE_THREAD but |
143 | * did not set up a thread function | 137 | * did not set up a thread function |
144 | */ | 138 | */ |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index f7ce0021e1c4..d64bafb1afd0 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -723,13 +723,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { } | |||
723 | * context. So we need to disable bh here to avoid deadlocks and other | 723 | * context. So we need to disable bh here to avoid deadlocks and other |
724 | * side effects. | 724 | * side effects. |
725 | */ | 725 | */ |
726 | static void | 726 | static irqreturn_t |
727 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | 727 | irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) |
728 | { | 728 | { |
729 | irqreturn_t ret; | ||
730 | |||
729 | local_bh_disable(); | 731 | local_bh_disable(); |
730 | action->thread_fn(action->irq, action->dev_id); | 732 | ret = action->thread_fn(action->irq, action->dev_id); |
731 | irq_finalize_oneshot(desc, action, false); | 733 | irq_finalize_oneshot(desc, action, false); |
732 | local_bh_enable(); | 734 | local_bh_enable(); |
735 | return ret; | ||
733 | } | 736 | } |
734 | 737 | ||
735 | /* | 738 | /* |
@@ -737,10 +740,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) | |||
737 | * preemtible - many of them need to sleep and wait for slow busses to | 740 | * preemtible - many of them need to sleep and wait for slow busses to |
738 | * complete. | 741 | * complete. |
739 | */ | 742 | */ |
740 | static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) | 743 | static irqreturn_t irq_thread_fn(struct irq_desc *desc, |
744 | struct irqaction *action) | ||
741 | { | 745 | { |
742 | action->thread_fn(action->irq, action->dev_id); | 746 | irqreturn_t ret; |
747 | |||
748 | ret = action->thread_fn(action->irq, action->dev_id); | ||
743 | irq_finalize_oneshot(desc, action, false); | 749 | irq_finalize_oneshot(desc, action, false); |
750 | return ret; | ||
744 | } | 751 | } |
745 | 752 | ||
746 | /* | 753 | /* |
@@ -753,7 +760,8 @@ static int irq_thread(void *data) | |||
753 | }; | 760 | }; |
754 | struct irqaction *action = data; | 761 | struct irqaction *action = data; |
755 | struct irq_desc *desc = irq_to_desc(action->irq); | 762 | struct irq_desc *desc = irq_to_desc(action->irq); |
756 | void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); | 763 | irqreturn_t (*handler_fn)(struct irq_desc *desc, |
764 | struct irqaction *action); | ||
757 | int wake; | 765 | int wake; |
758 | 766 | ||
759 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, | 767 | if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, |
@@ -783,8 +791,12 @@ static int irq_thread(void *data) | |||
783 | desc->istate |= IRQS_PENDING; | 791 | desc->istate |= IRQS_PENDING; |
784 | raw_spin_unlock_irq(&desc->lock); | 792 | raw_spin_unlock_irq(&desc->lock); |
785 | } else { | 793 | } else { |
794 | irqreturn_t action_ret; | ||
795 | |||
786 | raw_spin_unlock_irq(&desc->lock); | 796 | raw_spin_unlock_irq(&desc->lock); |
787 | handler_fn(desc, action); | 797 | action_ret = handler_fn(desc, action); |
798 | if (!noirqdebug) | ||
799 | note_interrupt(action->irq, desc, action_ret); | ||
788 | } | 800 | } |
789 | 801 | ||
790 | wake = atomic_dec_and_test(&desc->threads_active); | 802 | wake = atomic_dec_and_test(&desc->threads_active); |
diff --git a/kernel/irq/spurious.c b/kernel/irq/spurious.c index c9a78ba30b6f..aa57d5da18c1 100644 --- a/kernel/irq/spurious.c +++ b/kernel/irq/spurious.c | |||
@@ -167,6 +167,13 @@ out: | |||
167 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); | 167 | jiffies + POLL_SPURIOUS_IRQ_INTERVAL); |
168 | } | 168 | } |
169 | 169 | ||
170 | static inline int bad_action_ret(irqreturn_t action_ret) | ||
171 | { | ||
172 | if (likely(action_ret <= (IRQ_HANDLED | IRQ_WAKE_THREAD))) | ||
173 | return 0; | ||
174 | return 1; | ||
175 | } | ||
176 | |||
170 | /* | 177 | /* |
171 | * If 99,900 of the previous 100,000 interrupts have not been handled | 178 | * If 99,900 of the previous 100,000 interrupts have not been handled |
172 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic | 179 | * then assume that the IRQ is stuck in some manner. Drop a diagnostic |
@@ -182,7 +189,7 @@ __report_bad_irq(unsigned int irq, struct irq_desc *desc, | |||
182 | struct irqaction *action; | 189 | struct irqaction *action; |
183 | unsigned long flags; | 190 | unsigned long flags; |
184 | 191 | ||
185 | if (action_ret != IRQ_HANDLED && action_ret != IRQ_NONE) { | 192 | if (bad_action_ret(action_ret)) { |
186 | printk(KERN_ERR "irq event %d: bogus return value %x\n", | 193 | printk(KERN_ERR "irq event %d: bogus return value %x\n", |
187 | irq, action_ret); | 194 | irq, action_ret); |
188 | } else { | 195 | } else { |
@@ -263,7 +270,16 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
263 | if (desc->istate & IRQS_POLL_INPROGRESS) | 270 | if (desc->istate & IRQS_POLL_INPROGRESS) |
264 | return; | 271 | return; |
265 | 272 | ||
266 | if (unlikely(action_ret != IRQ_HANDLED)) { | 273 | /* we get here again via the threaded handler */ |
274 | if (action_ret == IRQ_WAKE_THREAD) | ||
275 | return; | ||
276 | |||
277 | if (bad_action_ret(action_ret)) { | ||
278 | report_bad_irq(irq, desc, action_ret); | ||
279 | return; | ||
280 | } | ||
281 | |||
282 | if (unlikely(action_ret == IRQ_NONE)) { | ||
267 | /* | 283 | /* |
268 | * If we are seeing only the odd spurious IRQ caused by | 284 | * If we are seeing only the odd spurious IRQ caused by |
269 | * bus asynchronicity then don't eventually trigger an error, | 285 | * bus asynchronicity then don't eventually trigger an error, |
@@ -275,8 +291,6 @@ void note_interrupt(unsigned int irq, struct irq_desc *desc, | |||
275 | else | 291 | else |
276 | desc->irqs_unhandled++; | 292 | desc->irqs_unhandled++; |
277 | desc->last_unhandled = jiffies; | 293 | desc->last_unhandled = jiffies; |
278 | if (unlikely(action_ret != IRQ_NONE)) | ||
279 | report_bad_irq(irq, desc, action_ret); | ||
280 | } | 294 | } |
281 | 295 | ||
282 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { | 296 | if (unlikely(try_misrouted_irq(irq, desc, action_ret))) { |