aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c27
1 files changed, 21 insertions, 6 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index f7ce0021e1c4..0a7840aeb0fb 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -491,6 +491,9 @@ int irq_set_irq_wake(unsigned int irq, unsigned int on)
491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags); 491 struct irq_desc *desc = irq_get_desc_buslock(irq, &flags);
492 int ret = 0; 492 int ret = 0;
493 493
494 if (!desc)
495 return -EINVAL;
496
494 /* wakeup-capable irqs can be shared between drivers that 497 /* wakeup-capable irqs can be shared between drivers that
495 * don't need to have the same sleep mode behaviors. 498 * don't need to have the same sleep mode behaviors.
496 */ 499 */
@@ -723,13 +726,16 @@ irq_thread_check_affinity(struct irq_desc *desc, struct irqaction *action) { }
723 * context. So we need to disable bh here to avoid deadlocks and other 726 * context. So we need to disable bh here to avoid deadlocks and other
724 * side effects. 727 * side effects.
725 */ 728 */
726static void 729static irqreturn_t
727irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action) 730irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
728{ 731{
732 irqreturn_t ret;
733
729 local_bh_disable(); 734 local_bh_disable();
730 action->thread_fn(action->irq, action->dev_id); 735 ret = action->thread_fn(action->irq, action->dev_id);
731 irq_finalize_oneshot(desc, action, false); 736 irq_finalize_oneshot(desc, action, false);
732 local_bh_enable(); 737 local_bh_enable();
738 return ret;
733} 739}
734 740
735/* 741/*
@@ -737,10 +743,14 @@ irq_forced_thread_fn(struct irq_desc *desc, struct irqaction *action)
737 * preemtible - many of them need to sleep and wait for slow busses to 743 * preemtible - many of them need to sleep and wait for slow busses to
738 * complete. 744 * complete.
739 */ 745 */
740static void irq_thread_fn(struct irq_desc *desc, struct irqaction *action) 746static irqreturn_t irq_thread_fn(struct irq_desc *desc,
747 struct irqaction *action)
741{ 748{
742 action->thread_fn(action->irq, action->dev_id); 749 irqreturn_t ret;
750
751 ret = action->thread_fn(action->irq, action->dev_id);
743 irq_finalize_oneshot(desc, action, false); 752 irq_finalize_oneshot(desc, action, false);
753 return ret;
744} 754}
745 755
746/* 756/*
@@ -753,7 +763,8 @@ static int irq_thread(void *data)
753 }; 763 };
754 struct irqaction *action = data; 764 struct irqaction *action = data;
755 struct irq_desc *desc = irq_to_desc(action->irq); 765 struct irq_desc *desc = irq_to_desc(action->irq);
756 void (*handler_fn)(struct irq_desc *desc, struct irqaction *action); 766 irqreturn_t (*handler_fn)(struct irq_desc *desc,
767 struct irqaction *action);
757 int wake; 768 int wake;
758 769
759 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD, 770 if (force_irqthreads & test_bit(IRQTF_FORCED_THREAD,
@@ -783,8 +794,12 @@ static int irq_thread(void *data)
783 desc->istate |= IRQS_PENDING; 794 desc->istate |= IRQS_PENDING;
784 raw_spin_unlock_irq(&desc->lock); 795 raw_spin_unlock_irq(&desc->lock);
785 } else { 796 } else {
797 irqreturn_t action_ret;
798
786 raw_spin_unlock_irq(&desc->lock); 799 raw_spin_unlock_irq(&desc->lock);
787 handler_fn(desc, action); 800 action_ret = handler_fn(desc, action);
801 if (!noirqdebug)
802 note_interrupt(action->irq, desc, action_ret);
788 } 803 }
789 804
790 wake = atomic_dec_and_test(&desc->threads_active); 805 wake = atomic_dec_and_test(&desc->threads_active);