aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorAl Viro <viro@zeniv.linux.org.uk>2012-06-27 03:31:24 -0400
committerAl Viro <viro@zeniv.linux.org.uk>2012-07-22 15:57:57 -0400
commited3e694d78cc75fa79bf29698631b146fd27aa35 (patch)
tree97dad6768546b489a3d09c42cef0738b57f4822e /kernel
parent67d1214551e800f9fe7dc7c47a346d2df0fafed5 (diff)
move exit_task_work() past exit_files() et.al.
... and get rid of PF_EXITING check in task_work_add(). Signed-off-by: Al Viro <viro@zeniv.linux.org.uk>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/exit.c6
-rw-r--r--kernel/task_work.c30
2 files changed, 13 insertions, 23 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 2f59cc334516..d17f6c4ddfa9 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -953,14 +953,11 @@ void do_exit(long code)
953 exit_signals(tsk); /* sets PF_EXITING */ 953 exit_signals(tsk); /* sets PF_EXITING */
954 /* 954 /*
955 * tsk->flags are checked in the futex code to protect against 955 * tsk->flags are checked in the futex code to protect against
956 * an exiting task cleaning up the robust pi futexes, and in 956 * an exiting task cleaning up the robust pi futexes.
957 * task_work_add() to avoid the race with exit_task_work().
958 */ 957 */
959 smp_mb(); 958 smp_mb();
960 raw_spin_unlock_wait(&tsk->pi_lock); 959 raw_spin_unlock_wait(&tsk->pi_lock);
961 960
962 exit_task_work(tsk);
963
964 if (unlikely(in_atomic())) 961 if (unlikely(in_atomic()))
965 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", 962 printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
966 current->comm, task_pid_nr(current), 963 current->comm, task_pid_nr(current),
@@ -995,6 +992,7 @@ void do_exit(long code)
995 exit_shm(tsk); 992 exit_shm(tsk);
996 exit_files(tsk); 993 exit_files(tsk);
997 exit_fs(tsk); 994 exit_fs(tsk);
995 exit_task_work(tsk);
998 check_stack_usage(); 996 check_stack_usage();
999 exit_thread(); 997 exit_thread();
1000 998
diff --git a/kernel/task_work.c b/kernel/task_work.c
index 76266fb665dc..fb396089f66a 100644
--- a/kernel/task_work.c
+++ b/kernel/task_work.c
@@ -5,34 +5,26 @@
5int 5int
6task_work_add(struct task_struct *task, struct callback_head *twork, bool notify) 6task_work_add(struct task_struct *task, struct callback_head *twork, bool notify)
7{ 7{
8 struct callback_head *last, *first;
8 unsigned long flags; 9 unsigned long flags;
9 int err = -ESRCH;
10 10
11#ifndef TIF_NOTIFY_RESUME
12 if (notify)
13 return -ENOTSUPP;
14#endif
15 /* 11 /*
16 * We must not insert the new work if the task has already passed 12 * Not inserting the new work if the task has already passed
17 * exit_task_work(). We rely on do_exit()->raw_spin_unlock_wait() 13 * exit_task_work() is the responisbility of callers.
18 * and check PF_EXITING under pi_lock.
19 */ 14 */
20 raw_spin_lock_irqsave(&task->pi_lock, flags); 15 raw_spin_lock_irqsave(&task->pi_lock, flags);
21 if (likely(!(task->flags & PF_EXITING))) { 16 last = task->task_works;
22 struct callback_head *last = task->task_works; 17 first = last ? last->next : twork;
23 struct callback_head *first = last ? last->next : twork; 18 twork->next = first;
24 twork->next = first; 19 if (last)
25 if (last) 20 last->next = twork;
26 last->next = twork; 21 task->task_works = twork;
27 task->task_works = twork;
28 err = 0;
29 }
30 raw_spin_unlock_irqrestore(&task->pi_lock, flags); 22 raw_spin_unlock_irqrestore(&task->pi_lock, flags);
31 23
32 /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */ 24 /* test_and_set_bit() implies mb(), see tracehook_notify_resume(). */
33 if (likely(!err) && notify) 25 if (notify)
34 set_notify_resume(task); 26 set_notify_resume(task);
35 return err; 27 return 0;
36} 28}
37 29
38struct callback_head * 30struct callback_head *