aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2012-05-10 20:59:08 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-05-23 22:11:12 -0400
commit4d1d61a6b203d957777d73fcebf19d90b038b5b2 (patch)
tree8b597fa00168ee59e3abf5ec8cf4e92f8fd28d8a /kernel
parente73f8959af0439d114847eab5a8a5ce48f1217c4 (diff)
genirq: reimplement exit_irq_thread() hook via task_work_add()
exit_irq_thread() and task->irq_thread are needed to handle the unexpected (and unlikely) exit of irq-thread. We can use task_work instead and make this all private to kernel/irq/manage.c, cleanup plus micro-optimization. 1. rename exit_irq_thread() to irq_thread_dtor(), make it static, and move it up before irq_thread(). 2. change irq_thread() to do task_work_add(irq_thread_dtor) at the start and task_work_cancel() before return. tracehook_notify_resume() can never play with kthreads, only do_exit()->exit_task_work() can call the callback and this is what we want. 3. remove task_struct->irq_thread and the special hook in do_exit(). Signed-off-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Thomas Gleixner <tglx@linutronix.de> Cc: David Howells <dhowells@redhat.com> Cc: Richard Kuo <rkuo@codeaurora.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Alexander Gordeev <agordeev@redhat.com> Cc: Chris Zankel <chris@zankel.net> Cc: David Smith <dsmith@redhat.com> Cc: "Frank Ch. Eigler" <fche@redhat.com> Cc: Geert Uytterhoeven <geert@linux-m68k.org> Cc: Larry Woodman <lwoodman@redhat.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Tejun Heo <tj@kernel.org> Cc: Ingo Molnar <mingo@elte.hu> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c2
-rw-r--r--kernel/irq/manage.c68
2 files changed, 33 insertions, 37 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 3d93325e0b1a..3ecd096e5d4d 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -954,8 +954,6 @@ void do_exit(long code)
954 954
955 exit_task_work(tsk); 955 exit_task_work(tsk);
956 956
957 exit_irq_thread();
958
959 if (unlikely(in_atomic())) 957 if (unlikely(in_atomic()))
960 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 958 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
961 current->comm, task_pid_nr(current), 959 current->comm, task_pid_nr(current),
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index bb32326afe87..4d1f8f897414 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -14,6 +14,7 @@
14#include <linux/interrupt.h> 14#include <linux/interrupt.h>
15#include <linux/slab.h> 15#include <linux/slab.h>
16#include <linux/sched.h> 16#include <linux/sched.h>
17#include <linux/task_work.h>
17 18
18#include "internals.h" 19#include "internals.h"
19 20
@@ -773,11 +774,39 @@ static void wake_threads_waitq(struct irq_desc *desc)
773 wake_up(&desc->wait_for_threads); 774 wake_up(&desc->wait_for_threads);
774} 775}
775 776
777static void irq_thread_dtor(struct task_work *unused)
778{
779 struct task_struct *tsk = current;
780 struct irq_desc *desc;
781 struct irqaction *action;
782
783 if (WARN_ON_ONCE(!(current->flags & PF_EXITING)))
784 return;
785
786 action = kthread_data(tsk);
787
788 pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
789 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
790
791
792 desc = irq_to_desc(action->irq);
793 /*
794 * If IRQTF_RUNTHREAD is set, we need to decrement
795 * desc->threads_active and wake possible waiters.
796 */
797 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
798 wake_threads_waitq(desc);
799
800 /* Prevent a stale desc->threads_oneshot */
801 irq_finalize_oneshot(desc, action);
802}
803
776/* 804/*
777 * Interrupt handler thread 805 * Interrupt handler thread
778 */ 806 */
779static int irq_thread(void *data) 807static int irq_thread(void *data)
780{ 808{
809 struct task_work on_exit_work;
781 static const struct sched_param param = { 810 static const struct sched_param param = {
782 .sched_priority = MAX_USER_RT_PRIO/2, 811 .sched_priority = MAX_USER_RT_PRIO/2,
783 }; 812 };
@@ -793,7 +822,9 @@ static int irq_thread(void *data)
793 handler_fn = irq_thread_fn; 822 handler_fn = irq_thread_fn;
794 823
795 sched_setscheduler(current, SCHED_FIFO, &param); 824 sched_setscheduler(current, SCHED_FIFO, &param);
796 current->irq_thread = 1; 825
826 init_task_work(&on_exit_work, irq_thread_dtor, NULL);
827 task_work_add(current, &on_exit_work, false);
797 828
798 while (!irq_wait_for_interrupt(action)) { 829 while (!irq_wait_for_interrupt(action)) {
799 irqreturn_t action_ret; 830 irqreturn_t action_ret;
@@ -815,44 +846,11 @@ static int irq_thread(void *data)
815 * cannot touch the oneshot mask at this point anymore as 846 * cannot touch the oneshot mask at this point anymore as
816 * __setup_irq() might have given out currents thread_mask 847 * __setup_irq() might have given out currents thread_mask
817 * again. 848 * again.
818 *
819 * Clear irq_thread. Otherwise exit_irq_thread() would make
820 * fuzz about an active irq thread going into nirvana.
821 */ 849 */
822 current->irq_thread = 0; 850 task_work_cancel(current, irq_thread_dtor);
823 return 0; 851 return 0;
824} 852}
825 853
826/*
827 * Called from do_exit()
828 */
829void exit_irq_thread(void)
830{
831 struct task_struct *tsk = current;
832 struct irq_desc *desc;
833 struct irqaction *action;
834
835 if (!tsk->irq_thread)
836 return;
837
838 action = kthread_data(tsk);
839
840 pr_err("genirq: exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n",
841 tsk->comm ? tsk->comm : "", tsk->pid, action->irq);
842
843 desc = irq_to_desc(action->irq);
844
845 /*
846 * If IRQTF_RUNTHREAD is set, we need to decrement
847 * desc->threads_active and wake possible waiters.
848 */
849 if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags))
850 wake_threads_waitq(desc);
851
852 /* Prevent a stale desc->threads_oneshot */
853 irq_finalize_oneshot(desc, action);
854}
855
856static void irq_setup_forced_threading(struct irqaction *new) 854static void irq_setup_forced_threading(struct irqaction *new)
857{ 855{
858 if (!force_irqthreads) 856 if (!force_irqthreads)