diff options
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/Makefile | 2 | ||||
-rw-r--r-- | kernel/cred.c | 9 | ||||
-rw-r--r-- | kernel/exit.c | 5 | ||||
-rw-r--r-- | kernel/fork.c | 1 | ||||
-rw-r--r-- | kernel/irq/manage.c | 68 | ||||
-rw-r--r-- | kernel/task_work.c | 84 |
6 files changed, 122 insertions, 47 deletions
diff --git a/kernel/Makefile b/kernel/Makefile index 80be6ca0cc75..6f3d0ae044b2 100644 --- a/kernel/Makefile +++ b/kernel/Makefile | |||
@@ -5,7 +5,7 @@ | |||
5 | obj-y = fork.o exec_domain.o panic.o printk.o \ | 5 | obj-y = fork.o exec_domain.o panic.o printk.o \ |
6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ | 6 | cpu.o exit.o itimer.o time.o softirq.o resource.o \ |
7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ | 7 | sysctl.o sysctl_binary.o capability.o ptrace.o timer.o user.o \ |
8 | signal.o sys.o kmod.o workqueue.o pid.o \ | 8 | signal.o sys.o kmod.o workqueue.o pid.o task_work.o \ |
9 | rcupdate.o extable.o params.o posix-timers.o \ | 9 | rcupdate.o extable.o params.o posix-timers.o \ |
10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ | 10 | kthread.o wait.o kfifo.o sys_ni.o posix-cpu-timers.o mutex.o \ |
11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ | 11 | hrtimer.o rwsem.o nsproxy.o srcu.o semaphore.o \ |
diff --git a/kernel/cred.c b/kernel/cred.c index 430557ea488f..de728ac50d82 100644 --- a/kernel/cred.c +++ b/kernel/cred.c | |||
@@ -207,13 +207,6 @@ void exit_creds(struct task_struct *tsk) | |||
207 | validate_creds(cred); | 207 | validate_creds(cred); |
208 | alter_cred_subscribers(cred, -1); | 208 | alter_cred_subscribers(cred, -1); |
209 | put_cred(cred); | 209 | put_cred(cred); |
210 | |||
211 | cred = (struct cred *) tsk->replacement_session_keyring; | ||
212 | if (cred) { | ||
213 | tsk->replacement_session_keyring = NULL; | ||
214 | validate_creds(cred); | ||
215 | put_cred(cred); | ||
216 | } | ||
217 | } | 210 | } |
218 | 211 | ||
219 | /** | 212 | /** |
@@ -396,8 +389,6 @@ int copy_creds(struct task_struct *p, unsigned long clone_flags) | |||
396 | struct cred *new; | 389 | struct cred *new; |
397 | int ret; | 390 | int ret; |
398 | 391 | ||
399 | p->replacement_session_keyring = NULL; | ||
400 | |||
401 | if ( | 392 | if ( |
402 | #ifdef CONFIG_KEYS | 393 | #ifdef CONFIG_KEYS |
403 | !p->cred->thread_keyring && | 394 | !p->cred->thread_keyring && |
diff --git a/kernel/exit.c b/kernel/exit.c index 6d85655353e9..34867cc5b42a 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -946,12 +946,13 @@ void do_exit(long code) | |||
946 | exit_signals(tsk); /* sets PF_EXITING */ | 946 | exit_signals(tsk); /* sets PF_EXITING */ |
947 | /* | 947 | /* |
948 | * tsk->flags are checked in the futex code to protect against | 948 | * tsk->flags are checked in the futex code to protect against |
949 | * an exiting task cleaning up the robust pi futexes. | 949 | * an exiting task cleaning up the robust pi futexes, and in |
950 | * task_work_add() to avoid the race with exit_task_work(). | ||
950 | */ | 951 | */ |
951 | smp_mb(); | 952 | smp_mb(); |
952 | raw_spin_unlock_wait(&tsk->pi_lock); | 953 | raw_spin_unlock_wait(&tsk->pi_lock); |
953 | 954 | ||
954 | exit_irq_thread(); | 955 | exit_task_work(tsk); |
955 | 956 | ||
956 | if (unlikely(in_atomic())) | 957 | if (unlikely(in_atomic())) |
957 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 958 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
diff --git a/kernel/fork.c b/kernel/fork.c index c55b61ab6d64..ab5211b9e622 100644 --- a/kernel/fork.c +++ b/kernel/fork.c | |||
@@ -1415,6 +1415,7 @@ static struct task_struct *copy_process(unsigned long clone_flags, | |||
1415 | */ | 1415 | */ |
1416 | p->group_leader = p; | 1416 | p->group_leader = p; |
1417 | INIT_LIST_HEAD(&p->thread_group); | 1417 | INIT_LIST_HEAD(&p->thread_group); |
1418 | INIT_HLIST_HEAD(&p->task_works); | ||
1418 | 1419 | ||
1419 | /* Now that the task is set up, run cgroup callbacks if | 1420 | /* Now that the task is set up, run cgroup callbacks if |
1420 | * necessary. We need to run them before the task is visible | 1421 | * necessary. We need to run them before the task is visible |
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 7c475cd3f6e6..ea0c6c2ae6f7 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c | |||
@@ -16,6 +16,7 @@ | |||
16 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
17 | #include <linux/slab.h> | 17 | #include <linux/slab.h> |
18 | #include <linux/sched.h> | 18 | #include <linux/sched.h> |
19 | #include <linux/task_work.h> | ||
19 | 20 | ||
20 | #include "internals.h" | 21 | #include "internals.h" |
21 | 22 | ||
@@ -775,11 +776,39 @@ static void wake_threads_waitq(struct irq_desc *desc) | |||
775 | wake_up(&desc->wait_for_threads); | 776 | wake_up(&desc->wait_for_threads); |
776 | } | 777 | } |
777 | 778 | ||
779 | static void irq_thread_dtor(struct task_work *unused) | ||
780 | { | ||
781 | struct task_struct *tsk = current; | ||
782 | struct irq_desc *desc; | ||
783 | struct irqaction *action; | ||
784 | |||
785 | if (WARN_ON_ONCE(!(current->flags & PF_EXITING))) | ||
786 | return; | ||
787 | |||
788 | action = kthread_data(tsk); | ||
789 | |||
790 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
791 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
792 | |||
793 | |||
794 | desc = irq_to_desc(action->irq); | ||
795 | /* | ||
796 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
797 | * desc->threads_active and wake possible waiters. | ||
798 | */ | ||
799 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
800 | wake_threads_waitq(desc); | ||
801 | |||
802 | /* Prevent a stale desc->threads_oneshot */ | ||
803 | irq_finalize_oneshot(desc, action); | ||
804 | } | ||
805 | |||
778 | /* | 806 | /* |
779 | * Interrupt handler thread | 807 | * Interrupt handler thread |
780 | */ | 808 | */ |
781 | static int irq_thread(void *data) | 809 | static int irq_thread(void *data) |
782 | { | 810 | { |
811 | struct task_work on_exit_work; | ||
783 | static const struct sched_param param = { | 812 | static const struct sched_param param = { |
784 | .sched_priority = MAX_USER_RT_PRIO/2, | 813 | .sched_priority = MAX_USER_RT_PRIO/2, |
785 | }; | 814 | }; |
@@ -795,7 +824,9 @@ static int irq_thread(void *data) | |||
795 | handler_fn = irq_thread_fn; | 824 | handler_fn = irq_thread_fn; |
796 | 825 | ||
797 | sched_setscheduler(current, SCHED_FIFO, ¶m); | 826 | sched_setscheduler(current, SCHED_FIFO, ¶m); |
798 | current->irq_thread = 1; | 827 | |
828 | init_task_work(&on_exit_work, irq_thread_dtor, NULL); | ||
829 | task_work_add(current, &on_exit_work, false); | ||
799 | 830 | ||
800 | while (!irq_wait_for_interrupt(action)) { | 831 | while (!irq_wait_for_interrupt(action)) { |
801 | irqreturn_t action_ret; | 832 | irqreturn_t action_ret; |
@@ -817,44 +848,11 @@ static int irq_thread(void *data) | |||
817 | * cannot touch the oneshot mask at this point anymore as | 848 | * cannot touch the oneshot mask at this point anymore as |
818 | * __setup_irq() might have given out currents thread_mask | 849 | * __setup_irq() might have given out currents thread_mask |
819 | * again. | 850 | * again. |
820 | * | ||
821 | * Clear irq_thread. Otherwise exit_irq_thread() would make | ||
822 | * fuzz about an active irq thread going into nirvana. | ||
823 | */ | 851 | */ |
824 | current->irq_thread = 0; | 852 | task_work_cancel(current, irq_thread_dtor); |
825 | return 0; | 853 | return 0; |
826 | } | 854 | } |
827 | 855 | ||
828 | /* | ||
829 | * Called from do_exit() | ||
830 | */ | ||
831 | void exit_irq_thread(void) | ||
832 | { | ||
833 | struct task_struct *tsk = current; | ||
834 | struct irq_desc *desc; | ||
835 | struct irqaction *action; | ||
836 | |||
837 | if (!tsk->irq_thread) | ||
838 | return; | ||
839 | |||
840 | action = kthread_data(tsk); | ||
841 | |||
842 | pr_err("exiting task \"%s\" (%d) is an active IRQ thread (irq %d)\n", | ||
843 | tsk->comm ? tsk->comm : "", tsk->pid, action->irq); | ||
844 | |||
845 | desc = irq_to_desc(action->irq); | ||
846 | |||
847 | /* | ||
848 | * If IRQTF_RUNTHREAD is set, we need to decrement | ||
849 | * desc->threads_active and wake possible waiters. | ||
850 | */ | ||
851 | if (test_and_clear_bit(IRQTF_RUNTHREAD, &action->thread_flags)) | ||
852 | wake_threads_waitq(desc); | ||
853 | |||
854 | /* Prevent a stale desc->threads_oneshot */ | ||
855 | irq_finalize_oneshot(desc, action); | ||
856 | } | ||
857 | |||
858 | static void irq_setup_forced_threading(struct irqaction *new) | 856 | static void irq_setup_forced_threading(struct irqaction *new) |
859 | { | 857 | { |
860 | if (!force_irqthreads) | 858 | if (!force_irqthreads) |
diff --git a/kernel/task_work.c b/kernel/task_work.c new file mode 100644 index 000000000000..82d1c794066d --- /dev/null +++ b/kernel/task_work.c | |||
@@ -0,0 +1,84 @@ | |||
1 | #include <linux/spinlock.h> | ||
2 | #include <linux/task_work.h> | ||
3 | #include <linux/tracehook.h> | ||
4 | |||
5 | int | ||
6 | task_work_add(struct task_struct *task, struct task_work *twork, bool notify) | ||
7 | { | ||
8 | unsigned long flags; | ||
9 | int err = -ESRCH; | ||
10 | |||
11 | #ifndef TIF_NOTIFY_RESUME | ||
12 | if (notify) | ||
13 | return -ENOTSUPP; | ||
14 | #endif | ||
15 | /* | ||
16 | * We must not insert the new work if the task has already passed | ||
17 | * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait() | ||
18 | * and check PF_EXITING under pi_lock. | ||
19 | */ | ||
20 | raw_spin_lock_irqsave(&task->pi_lock, flags); | ||
21 | if (likely(!(task->flags & PF_EXITING))) { | ||
22 | hlist_add_head(&twork->hlist, &task->task_works); | ||
23 | err = 0; | ||
24 | } | ||
25 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
26 | |||
27 | /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ | ||
28 | if (likely(!err) && notify) | ||
29 | set_notify_resume(task); | ||
30 | return err; | ||
31 | } | ||
32 | |||
33 | struct task_work * | ||
34 | task_work_cancel(struct task_struct *task, task_work_func_t func) | ||
35 | { | ||
36 | unsigned long flags; | ||
37 | struct task_work *twork; | ||
38 | struct hlist_node *pos; | ||
39 | |||
40 | raw_spin_lock_irqsave(&task->pi_lock, flags); | ||
41 | hlist_for_each_entry(twork, pos, &task->task_works, hlist) { | ||
42 | if (twork->func == func) { | ||
43 | hlist_del(&twork->hlist); | ||
44 | goto found; | ||
45 | } | ||
46 | } | ||
47 | twork = NULL; | ||
48 | found: | ||
49 | raw_spin_unlock_irqrestore(&task->pi_lock, flags); | ||
50 | |||
51 | return twork; | ||
52 | } | ||
53 | |||
54 | void task_work_run(void) | ||
55 | { | ||
56 | struct task_struct *task = current; | ||
57 | struct hlist_head task_works; | ||
58 | struct hlist_node *pos; | ||
59 | |||
60 | raw_spin_lock_irq(&task->pi_lock); | ||
61 | hlist_move_list(&task->task_works, &task_works); | ||
62 | raw_spin_unlock_irq(&task->pi_lock); | ||
63 | |||
64 | if (unlikely(hlist_empty(&task_works))) | ||
65 | return; | ||
66 | /* | ||
67 | * We use hlist to save the space in task_struct, but we want fifo. | ||
68 | * Find the last entry, the list should be short, then process them | ||
69 | * in reverse order. | ||
70 | */ | ||
71 | for (pos = task_works.first; pos->next; pos = pos->next) | ||
72 | ; | ||
73 | |||
74 | for (;;) { | ||
75 | struct hlist_node **pprev = pos->pprev; | ||
76 | struct task_work *twork = container_of(pos, struct task_work, | ||
77 | hlist); | ||
78 | twork->func(twork); | ||
79 | |||
80 | if (pprev == &task_works.first) | ||
81 | break; | ||
82 | pos = container_of(pprev, struct hlist_node, next); | ||
83 | } | ||
84 | } | ||