aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/irq/manage.c
diff options
context:
space:
mode:
authorIdo Yariv <ido@wizery.com>2011-12-02 11:24:12 -0500
committerThomas Gleixner <tglx@linutronix.de>2012-03-14 06:56:20 -0400
commit7140ea1980f2fae9c7aaeac5f6b35317e1389ee6 (patch)
tree7b1021c74dee94b0171db25bc1965555e0e9a9e5 /kernel/irq/manage.c
parentdf8d291f28aa1e8437c8f7816328a6516379c71b (diff)
genirq: Flush the irq thread on synchronization
The current implementation does not always flush the threaded handler when disabling the irq. In case the irq handler was called, but the threaded handler hasn't started running yet, the interrupt will be flagged as pending, and the handler will not run. This implementation has some issues: First, if the interrupt is a wake source and flagged as pending, the system will not be able to suspend. Second, when quickly disabling and re-enabling the irq, the threaded handler might continue to run after the irq is re-enabled without the irq handler being called first. This might be an unexpected behavior. In addition, it might be counter-intuitive that the threaded handler will not be called even though the irq handler was called and returned IRQ_WAKE_THREAD. Fix this by always waiting for the threaded handler to complete in synchronize_irq(). [ tglx: Massaged comments, added WARN_ONs and the missing IRQTF_RUNTHREAD check in exit_irq_thread() ] Signed-off-by: Ido Yariv <ido@wizery.com> Link: http://lkml.kernel.org/r/1322843052-7166-1-git-send-email-ido@wizery.com Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r--kernel/irq/manage.c60
1 files changed, 32 insertions, 28 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index 1786cf7dac54..453feedbb390 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -759,6 +759,13 @@ static irqreturn_t irq_thread_fn(struct irq_desc *desc,
759 return ret; 759 return ret;
760} 760}
761 761
762static void wake_threads_waitq(struct irq_desc *desc)
763{
764 if (atomic_dec_and_test(&desc->threads_active) &&
765 waitqueue_active(&desc->wait_for_threads))
766 wake_up(&desc->wait_for_threads);
767}
768
762/* 769/*
763 * Interrupt handler thread 770 * Interrupt handler thread
764 */ 771 */
@@ -771,7 +778,6 @@ static int irq_thread(void *data)
771 struct irq_desc *desc = irq_to_desc(action->irq); 778 struct irq_desc *desc = irq_to_desc(action->irq);
772 irqreturn_t (*handler_fn)(struct irq_desc *desc, 779 irqreturn_t (*handler_fn)(struct irq_desc *desc,
773 struct irqaction *action); 780 struct irqaction *action);
774 int wake;
775 781
776 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD, 782 if (force_irqthreads && test_bit(IRQTF_FORCED_THREAD,
777 &action->thread_flags)) 783 &action->thread_flags))
@@ -783,39 +789,30 @@ static int irq_thread(void *data)
783 current->irq_thread = 1; 789 current->irq_thread = 1;
784 790
785 while (!irq_wait_for_interrupt(action)) { 791 while (!irq_wait_for_interrupt(action)) {
792 irqreturn_t action_ret;
786 793
787 irq_thread_check_affinity(desc, action); 794 irq_thread_check_affinity(desc, action);
788 795
789 atomic_inc(&desc->threads_active); 796 action_ret = handler_fn(desc, action);
797 if (!noirqdebug)
798 note_interrupt(action->irq, desc, action_ret);
790 799
791 raw_spin_lock_irq(&desc->lock); 800 wake_threads_waitq(desc);
792 if (unlikely(irqd_irq_disabled(&desc->irq_data))) {
793 /*
794 * CHECKME: We might need a dedicated
795 * IRQ_THREAD_PENDING flag here, which
796 * retriggers the thread in check_irq_resend()
797 * but AFAICT IRQS_PENDING should be fine as it
798 * retriggers the interrupt itself --- tglx
799 */
800 desc->istate |= IRQS_PENDING;
801 raw_spin_unlock_irq(&desc->lock);
802 } else {
803 irqreturn_t action_ret;
804
805 raw_spin_unlock_irq(&desc->lock);
806 action_ret = handler_fn(desc, action);
807 if (!noirqdebug)
808 note_interrupt(action->irq, desc, action_ret);
809 }
810
811 wake = atomic_dec_and_test(&desc->threads_active);
812
813 if (wake && waitqueue_active(&desc->wait_for_threads))
814 wake_up(&desc->wait_for_threads);
815 } 801 }
816 802
817 /* Prevent a stale desc->threads_oneshot */ 803 /*
818 irq_finalize_oneshot(desc, action, true); 804 * This is the regular exit path. __free_irq() is stopping the
805 * thread via kthread_stop() after calling
806 * synchronize_irq(). So neither IRQTF_RUNTHREAD nor the
807 * oneshot mask bit should be set.
808 *
809 * Verify that this is true.
810 */
811 if (WARN_ON(test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)))
812 wake_threads_waitq(desc);
813
814 if (WARN_ON(desc->threads_oneshot & action->thread_mask))
815 irq_finalize_oneshot(desc, action, true);
819 816
820 /* 817 /*
821 * Clear irq_thread. Otherwise exit_irq_thread() would make 818 * Clear irq_thread. Otherwise exit_irq_thread() would make
@@ -845,6 +842,13 @@ void exit_irq_thread(void)
845 842
846 desc = irq_to_desc(action->irq); 843 desc = irq_to_desc(action->irq);
847 844
845 /*
846 * If IRQTF_RUNTHREAD is set, we need to decrement
847 * desc->threads_active and wake possible waiters.
848 */
849 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
850 wake_threads_waitq(desc);
851
848 /* Prevent a stale desc->threads_oneshot */ 852 /* Prevent a stale desc->threads_oneshot */
849 irq_finalize_oneshot(desc, action, true); 853 irq_finalize_oneshot(desc, action, true);
850} 854}