aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c40
1 files changed, 15 insertions, 25 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 019a2843bf95..ceffc67b564a 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -58,11 +58,11 @@
58 58
59static void exit_mm(struct task_struct * tsk); 59static void exit_mm(struct task_struct * tsk);
60 60
61static void __unhash_process(struct task_struct *p) 61static void __unhash_process(struct task_struct *p, bool group_dead)
62{ 62{
63 nr_threads--; 63 nr_threads--;
64 detach_pid(p, PIDTYPE_PID); 64 detach_pid(p, PIDTYPE_PID);
65 if (thread_group_leader(p)) { 65 if (group_dead) {
66 detach_pid(p, PIDTYPE_PGID); 66 detach_pid(p, PIDTYPE_PGID);
67 detach_pid(p, PIDTYPE_SID); 67 detach_pid(p, PIDTYPE_SID);
68 68
@@ -79,10 +79,9 @@ static void __unhash_process(struct task_struct *p)
79static void __exit_signal(struct task_struct *tsk) 79static void __exit_signal(struct task_struct *tsk)
80{ 80{
81 struct signal_struct *sig = tsk->signal; 81 struct signal_struct *sig = tsk->signal;
82 bool group_dead = thread_group_leader(tsk);
82 struct sighand_struct *sighand; 83 struct sighand_struct *sighand;
83 84 struct tty_struct *uninitialized_var(tty);
84 BUG_ON(!sig);
85 BUG_ON(!atomic_read(&sig->count));
86 85
87 sighand = rcu_dereference_check(tsk->sighand, 86 sighand = rcu_dereference_check(tsk->sighand,
88 rcu_read_lock_held() || 87 rcu_read_lock_held() ||
@@ -90,14 +89,16 @@ static void __exit_signal(struct task_struct *tsk)
90 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
91 90
92 posix_cpu_timers_exit(tsk); 91 posix_cpu_timers_exit(tsk);
93 if (atomic_dec_and_test(&sig->count)) 92 if (group_dead) {
94 posix_cpu_timers_exit_group(tsk); 93 posix_cpu_timers_exit_group(tsk);
95 else { 94 tty = sig->tty;
95 sig->tty = NULL;
96 } else {
96 /* 97 /*
97 * If there is any task waiting for the group exit 98 * If there is any task waiting for the group exit
98 * then notify it: 99 * then notify it:
99 */ 100 */
100 if (sig->group_exit_task && atomic_read(&sig->count) == sig->notify_count) 101 if (sig->notify_count > 0 && !--sig->notify_count)
101 wake_up_process(sig->group_exit_task); 102 wake_up_process(sig->group_exit_task);
102 103
103 if (tsk == sig->curr_target) 104 if (tsk == sig->curr_target)
@@ -123,32 +124,24 @@ static void __exit_signal(struct task_struct *tsk)
123 sig->oublock += task_io_get_oublock(tsk); 124 sig->oublock += task_io_get_oublock(tsk);
124 task_io_accounting_add(&sig->ioac, &tsk->ioac); 125 task_io_accounting_add(&sig->ioac, &tsk->ioac);
125 sig->sum_sched_runtime += tsk->se.sum_exec_runtime; 126 sig->sum_sched_runtime += tsk->se.sum_exec_runtime;
126 sig = NULL; /* Marker for below. */
127 } 127 }
128 128
129 __unhash_process(tsk); 129 sig->nr_threads--;
130 __unhash_process(tsk, group_dead);
130 131
131 /* 132 /*
132 * Do this under ->siglock, we can race with another thread 133 * Do this under ->siglock, we can race with another thread
133 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals. 134 * doing sigqueue_free() if we have SIGQUEUE_PREALLOC signals.
134 */ 135 */
135 flush_sigqueue(&tsk->pending); 136 flush_sigqueue(&tsk->pending);
136
137 tsk->signal = NULL;
138 tsk->sighand = NULL; 137 tsk->sighand = NULL;
139 spin_unlock(&sighand->siglock); 138 spin_unlock(&sighand->siglock);
140 139
141 __cleanup_sighand(sighand); 140 __cleanup_sighand(sighand);
142 clear_tsk_thread_flag(tsk,TIF_SIGPENDING); 141 clear_tsk_thread_flag(tsk,TIF_SIGPENDING);
143 if (sig) { 142 if (group_dead) {
144 flush_sigqueue(&sig->shared_pending); 143 flush_sigqueue(&sig->shared_pending);
145 taskstats_tgid_free(sig); 144 tty_kref_put(tty);
146 /*
147 * Make sure ->signal can't go away under rq->lock,
148 * see account_group_exec_runtime().
149 */
150 task_rq_unlock_wait(tsk);
151 __cleanup_signal(sig);
152 } 145 }
153} 146}
154 147
@@ -856,12 +849,9 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
856 849
857 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; 850 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE;
858 851
859 /* mt-exec, de_thread() is waiting for us */ 852 /* mt-exec, de_thread() is waiting for group leader */
860 if (thread_group_leader(tsk) && 853 if (unlikely(tsk->signal->notify_count < 0))
861 tsk->signal->group_exit_task &&
862 tsk->signal->notify_count < 0)
863 wake_up_process(tsk->signal->group_exit_task); 854 wake_up_process(tsk->signal->group_exit_task);
864
865 write_unlock_irq(&tasklist_lock); 855 write_unlock_irq(&tasklist_lock);
866 856
867 tracehook_report_death(tsk, signal, cookie, group_dead); 857 tracehook_report_death(tsk, signal, cookie, group_dead);