diff options
Diffstat (limited to 'kernel/irq/manage.c')
-rw-r--r-- | kernel/irq/manage.c | 68 |
1 files changed, 33 insertions, 35 deletions
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7c475cd3f6e6..ea0c6c2ae6f7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/task_work.h> | ||
19 | 20 | ||
20 | #include "internals.h" | 21 | #include "internals.h" |
21 | 22 | ||
@@ -775,11 +776,39 @@ static void wake_threads_waitq(struct irq_desc *desc) | |||
775 | wake_up(&desc->wait_for_threads); | 776 | wake_up(&desc->wait_for_threads); |
776 | } | 777 | } |
777 | 778 | ||
779 | static void irq_thread_dtor(struct task_work *unused) | ||
780 | { | ||
781 | struct task_struct *tsk = current; | ||
782 | struct irq_desc *desc; | ||
783 | struct irqaction *action; | ||
784 | |||
785 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | ||
786 | return; | ||
787 | |||
788 | action = kthread_data(tsk); | ||
789 | |||
790 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
791 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
792 | |||
793 | |||
794 | desc = irq_to_desc(action->irq); | ||
795 | /* | ||
796 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
797 | * desc->threads_active and wake possible waiters. | ||
798 | */ | ||
799 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
800 | wake_threads_waitq(desc); | ||
801 | |||
802 | /* Prevent a stale desc->threads_oneshot */ | ||
803 | irq_finalize_oneshot(desc, action); | ||
804 | } | ||
805 | |||
778 | /* | 806 | /* |
779 | * Interrupt handler thread | 807 | * Interrupt handler thread |
780 | */ | 808 | */ |
781 | static int irq_thread(void *data) | 809 | static int irq_thread(void *data) |
782 | { | 810 | { |
811 | struct task_work on_exit_work; | ||
783 | static const struct sched_param param = { | 812 | static const struct sched_param param = { |
784 | .sched_priority = MAX_USER_RT_PRIO/2, | 813 | .sched_priority = MAX_USER_RT_PRIO/2, |
785 | }; | 814 | }; |
@@ -795,7 +824,9 @@ static int irq_thread(void *data) | |||
795 | handler_fn = irq_thread_fn; | 824 | handler_fn = irq_thread_fn; |
796 | 825 | ||
797 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 826 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
798 | current->irq_thread = 1; | 827 | |
828 | init_task_work(&on_exit_work, irq_thread_dtor, NULL); | ||
829 | task_work_add(current, &on_exit_work, false); | ||
799 | 830 | ||
800 | while (!irq_wait_for_interrupt(action)) { | 831 | while (!irq_wait_for_interrupt(action)) { |
801 | irqreturn_t action_ret; | 832 | irqreturn_t action_ret; |
@@ -817,44 +848,11 @@ static int irq_thread(void *data) | |||
817 | * cannot touch the oneshot mask at this point anymore as | 848 | * cannot touch the oneshot mask at this point anymore as |
818 | * __setup_irq() might have given out currents thread_mask | 849 | * __setup_irq() might have given out currents thread_mask |
819 | * again. | 850 | * again. |
820 | * | ||
821 | * Clear irq_thread. Otherwise exit_irq_thread() would make | ||
822 | * fuzz about an active irq thread going into nirvana. | ||
823 | */ | 851 | */ |
824 | current->irq_thread = 0; | 852 | task_work_cancel(current, irq_thread_dtor); |
825 | return 0; | 853 | return 0; |
826 | } | 854 | } |
827 | 855 | ||
828 | /* | ||
829 | * Called from do_exit() | ||
830 | */ | ||
831 | void exit_irq_thread(void) | ||
832 | { | ||
833 | struct task_struct *tsk = current; | ||
834 | struct irq_desc *desc; | ||
835 | struct irqaction *action; | ||
836 | |||
837 | if (!tsk->irq_thread) | ||
838 | return; | ||
839 | |||
840 | action = kthread_data(tsk); | ||
841 | |||
842 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
843 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
844 | |||
845 | desc = irq_to_desc(action->irq); | ||
846 | |||
847 | /* | ||
848 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
849 | * desc->threads_active and wake possible waiters. | ||
850 | */ | ||
851 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
852 | wake_threads_waitq(desc); | ||
853 | |||
854 | /* Prevent a stale desc->threads_oneshot */ | ||
855 | irq_finalize_oneshot(desc, action); | ||
856 | } | ||
857 | |||
858 | static void irq_setup_forced_threading(struct irqaction *new) | 856 | static void irq_setup_forced_threading(struct irqaction *new) |
859 | { | 857 | { |
860 | if (!force_irqthreads) | 858 | if (!force_irqthreads) |