aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c124
1 files changed, 55 insertions, 69 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index 20a406471525..9ee58bb9e60f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -85,7 +85,6 @@ static void __exit_signal(struct task_struct *tsk)
85 struct tty_struct *uninitialized_var(tty); 85 struct tty_struct *uninitialized_var(tty);
86 86
87 sighand = rcu_dereference_check(tsk->sighand, 87 sighand = rcu_dereference_check(tsk->sighand,
88 rcu_read_lock_held() ||
89 lockdep_tasklist_lock_is_held()); 88 lockdep_tasklist_lock_is_held());
90 spin_lock(&sighand->siglock); 89 spin_lock(&sighand->siglock);
91 90
@@ -169,7 +168,6 @@ void release_task(struct task_struct * p)
169 struct task_struct *leader; 168 struct task_struct *leader;
170 int zap_leader; 169 int zap_leader;
171repeat: 170repeat:
172 tracehook_prepare_release_task(p);
173 /* don't need to get the RCU readlock here - the process is dead and 171 /* don't need to get the RCU readlock here - the process is dead and
174 * can't be modifying its own credentials. But shut RCU-lockdep up */ 172 * can't be modifying its own credentials. But shut RCU-lockdep up */
175 rcu_read_lock(); 173 rcu_read_lock();
@@ -179,7 +177,7 @@ repeat:
179 proc_flush_task(p); 177 proc_flush_task(p);
180 178
181 write_lock_irq(&tasklist_lock); 179 write_lock_irq(&tasklist_lock);
182 tracehook_finish_release_task(p); 180 ptrace_release_task(p);
183 __exit_signal(p); 181 __exit_signal(p);
184 182
185 /* 183 /*
@@ -190,22 +188,12 @@ repeat:
190 zap_leader = 0; 188 zap_leader = 0;
191 leader = p->group_leader; 189 leader = p->group_leader;
192 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) { 190 if (leader != p && thread_group_empty(leader) && leader->exit_state == EXIT_ZOMBIE) {
193 BUG_ON(task_detached(leader));
194 do_notify_parent(leader, leader->exit_signal);
195 /* 191 /*
196 * If we were the last child thread and the leader has 192 * If we were the last child thread and the leader has
197 * exited already, and the leader's parent ignores SIGCHLD, 193 * exited already, and the leader's parent ignores SIGCHLD,
198 * then we are the one who should release the leader. 194 * then we are the one who should release the leader.
199 *
200 * do_notify_parent() will have marked it self-reaping in
201 * that case.
202 */
203 zap_leader = task_detached(leader);
204
205 /*
206 * This maintains the invariant that release_task()
207 * only runs on a task in EXIT_DEAD, just for sanity.
208 */ 195 */
196 zap_leader = do_notify_parent(leader, leader->exit_signal);
209 if (zap_leader) 197 if (zap_leader)
210 leader->exit_state = EXIT_DEAD; 198 leader->exit_state = EXIT_DEAD;
211 } 199 }
@@ -277,18 +265,16 @@ int is_current_pgrp_orphaned(void)
277 return retval; 265 return retval;
278} 266}
279 267
280static int has_stopped_jobs(struct pid *pgrp) 268static bool has_stopped_jobs(struct pid *pgrp)
281{ 269{
282 int retval = 0;
283 struct task_struct *p; 270 struct task_struct *p;
284 271
285 do_each_pid_task(pgrp, PIDTYPE_PGID, p) { 272 do_each_pid_task(pgrp, PIDTYPE_PGID, p) {
286 if (!task_is_stopped(p)) 273 if (p->signal->flags & SIGNAL_STOP_STOPPED)
287 continue; 274 return true;
288 retval = 1;
289 break;
290 } while_each_pid_task(pgrp, PIDTYPE_PGID, p); 275 } while_each_pid_task(pgrp, PIDTYPE_PGID, p);
291 return retval; 276
277 return false;
292} 278}
293 279
294/* 280/*
@@ -561,29 +547,28 @@ void exit_files(struct task_struct *tsk)
561 547
562#ifdef CONFIG_MM_OWNER 548#ifdef CONFIG_MM_OWNER
563/* 549/*
564 * Task p is exiting and it owned mm, lets find a new owner for it 550 * A task is exiting. If it owned this mm, find a new owner for the mm.
565 */ 551 */
566static inline int
567mm_need_new_owner(struct mm_struct *mm, struct task_struct *p)
568{
569 /*
570 * If there are other users of the mm and the owner (us) is exiting
571 * we need to find a new owner to take on the responsibility.
572 */
573 if (atomic_read(&mm->mm_users) <= 1)
574 return 0;
575 if (mm->owner != p)
576 return 0;
577 return 1;
578}
579
580void mm_update_next_owner(struct mm_struct *mm) 552void mm_update_next_owner(struct mm_struct *mm)
581{ 553{
582 struct task_struct *c, *g, *p = current; 554 struct task_struct *c, *g, *p = current;
583 555
584retry: 556retry:
585 if (!mm_need_new_owner(mm, p)) 557 /*
558 * If the exiting or execing task is not the owner, it's
559 * someone else's problem.
560 */
561 if (mm->owner != p)
586 return; 562 return;
563 /*
564 * The current owner is exiting/execing and there are no other
565 * candidates. Do not leave the mm pointing to a possibly
566 * freed task structure.
567 */
568 if (atomic_read(&mm->mm_users) <= 1) {
569 mm->owner = NULL;
570 return;
571 }
587 572
588 read_lock(&tasklist_lock); 573 read_lock(&tasklist_lock);
589 /* 574 /*
@@ -752,7 +737,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
752{ 737{
753 list_move_tail(&p->sibling, &p->real_parent->children); 738 list_move_tail(&p->sibling, &p->real_parent->children);
754 739
755 if (task_detached(p)) 740 if (p->exit_state == EXIT_DEAD)
756 return; 741 return;
757 /* 742 /*
758 * If this is a threaded reparent there is no need to 743 * If this is a threaded reparent there is no need to
@@ -765,10 +750,9 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p,
765 p->exit_signal = SIGCHLD; 750 p->exit_signal = SIGCHLD;
766 751
767 /* If it has exited notify the new parent about this child's death. */ 752 /* If it has exited notify the new parent about this child's death. */
768 if (!task_ptrace(p) && 753 if (!p->ptrace &&
769 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { 754 p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) {
770 do_notify_parent(p, p->exit_signal); 755 if (do_notify_parent(p, p->exit_signal)) {
771 if (task_detached(p)) {
772 p->exit_state = EXIT_DEAD; 756 p->exit_state = EXIT_DEAD;
773 list_move_tail(&p->sibling, dead); 757 list_move_tail(&p->sibling, dead);
774 } 758 }
@@ -795,7 +779,7 @@ static void forget_original_parent(struct task_struct *father)
795 do { 779 do {
796 t->real_parent = reaper; 780 t->real_parent = reaper;
797 if (t->parent == father) { 781 if (t->parent == father) {
798 BUG_ON(task_ptrace(t)); 782 BUG_ON(t->ptrace);
799 t->parent = t->real_parent; 783 t->parent = t->real_parent;
800 } 784 }
801 if (t->pdeath_signal) 785 if (t->pdeath_signal)
@@ -820,8 +804,7 @@ static void forget_original_parent(struct task_struct *father)
820 */ 804 */
821static void exit_notify(struct task_struct *tsk, int group_dead) 805static void exit_notify(struct task_struct *tsk, int group_dead)
822{ 806{
823 int signal; 807 bool autoreap;
824 void *cookie;
825 808
826 /* 809 /*
827 * This does two things: 810 * This does two things:
@@ -852,26 +835,33 @@ static void exit_notify(struct task_struct *tsk, int group_dead)
852 * we have changed execution domain as these two values started 835 * we have changed execution domain as these two values started
853 * the same after a fork. 836 * the same after a fork.
854 */ 837 */
855 if (tsk->exit_signal != SIGCHLD && !task_detached(tsk) && 838 if (thread_group_leader(tsk) && tsk->exit_signal != SIGCHLD &&
856 (tsk->parent_exec_id != tsk->real_parent->self_exec_id || 839 (tsk->parent_exec_id != tsk->real_parent->self_exec_id ||
857 tsk->self_exec_id != tsk->parent_exec_id)) 840 tsk->self_exec_id != tsk->parent_exec_id))
858 tsk->exit_signal = SIGCHLD; 841 tsk->exit_signal = SIGCHLD;
859 842
860 signal = tracehook_notify_death(tsk, &cookie, group_dead); 843 if (unlikely(tsk->ptrace)) {
861 if (signal >= 0) 844 int sig = thread_group_leader(tsk) &&
862 signal = do_notify_parent(tsk, signal); 845 thread_group_empty(tsk) &&
846 !ptrace_reparented(tsk) ?
847 tsk->exit_signal : SIGCHLD;
848 autoreap = do_notify_parent(tsk, sig);
849 } else if (thread_group_leader(tsk)) {
850 autoreap = thread_group_empty(tsk) &&
851 do_notify_parent(tsk, tsk->exit_signal);
852 } else {
853 autoreap = true;
854 }
863 855
864 tsk->exit_state = signal == DEATH_REAP ? EXIT_DEAD : EXIT_ZOMBIE; 856 tsk->exit_state = autoreap ? EXIT_DEAD : EXIT_ZOMBIE;
865 857
866 /* mt-exec, de_thread() is waiting for group leader */ 858 /* mt-exec, de_thread() is waiting for group leader */
867 if (unlikely(tsk->signal->notify_count < 0)) 859 if (unlikely(tsk->signal->notify_count < 0))
868 wake_up_process(tsk->signal->group_exit_task); 860 wake_up_process(tsk->signal->group_exit_task);
869 write_unlock_irq(&tasklist_lock); 861 write_unlock_irq(&tasklist_lock);
870 862
871 tracehook_report_death(tsk, signal, cookie, group_dead);
872
873 /* If the process is dead, release it - nobody will wait for it */ 863 /* If the process is dead, release it - nobody will wait for it */
874 if (signal == DEATH_REAP) 864 if (autoreap)
875 release_task(tsk); 865 release_task(tsk);
876} 866}
877 867
@@ -907,7 +897,6 @@ NORET_TYPE void do_exit(long code)
907 897
908 profile_task_exit(tsk); 898 profile_task_exit(tsk);
909 899
910 WARN_ON(atomic_read(&tsk->fs_excl));
911 WARN_ON(blk_needs_flush_plug(tsk)); 900 WARN_ON(blk_needs_flush_plug(tsk));
912 901
913 if (unlikely(in_interrupt())) 902 if (unlikely(in_interrupt()))
@@ -924,7 +913,7 @@ NORET_TYPE void do_exit(long code)
924 */ 913 */
925 set_fs(USER_DS); 914 set_fs(USER_DS);
926 915
927 tracehook_report_exit(&code); 916 ptrace_event(PTRACE_EVENT_EXIT, code);
928 917
929 validate_creds_for_do_exit(tsk); 918 validate_creds_for_do_exit(tsk);
930 919
@@ -1236,9 +1225,9 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1236 traced = ptrace_reparented(p); 1225 traced = ptrace_reparented(p);
1237 /* 1226 /*
1238 * It can be ptraced but not reparented, check 1227 * It can be ptraced but not reparented, check
1239 * !task_detached() to filter out sub-threads. 1228 * thread_group_leader() to filter out sub-threads.
1240 */ 1229 */
1241 if (likely(!traced) && likely(!task_detached(p))) { 1230 if (likely(!traced) && thread_group_leader(p)) {
1242 struct signal_struct *psig; 1231 struct signal_struct *psig;
1243 struct signal_struct *sig; 1232 struct signal_struct *sig;
1244 unsigned long maxrss; 1233 unsigned long maxrss;
@@ -1346,16 +1335,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1346 /* We dropped tasklist, ptracer could die and untrace */ 1335 /* We dropped tasklist, ptracer could die and untrace */
1347 ptrace_unlink(p); 1336 ptrace_unlink(p);
1348 /* 1337 /*
1349 * If this is not a detached task, notify the parent. 1338 * If this is not a sub-thread, notify the parent.
1350 * If it's still not detached after that, don't release 1339 * If parent wants a zombie, don't release it now.
1351 * it now.
1352 */ 1340 */
1353 if (!task_detached(p)) { 1341 if (thread_group_leader(p) &&
1354 do_notify_parent(p, p->exit_signal); 1342 !do_notify_parent(p, p->exit_signal)) {
1355 if (!task_detached(p)) { 1343 p->exit_state = EXIT_ZOMBIE;
1356 p->exit_state = EXIT_ZOMBIE; 1344 p = NULL;
1357 p = NULL;
1358 }
1359 } 1345 }
1360 write_unlock_irq(&tasklist_lock); 1346 write_unlock_irq(&tasklist_lock);
1361 } 1347 }
@@ -1368,7 +1354,8 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p)
1368static int *task_stopped_code(struct task_struct *p, bool ptrace) 1354static int *task_stopped_code(struct task_struct *p, bool ptrace)
1369{ 1355{
1370 if (ptrace) { 1356 if (ptrace) {
1371 if (task_is_stopped_or_traced(p)) 1357 if (task_is_stopped_or_traced(p) &&
1358 !(p->jobctl & JOBCTL_LISTENING))
1372 return &p->exit_code; 1359 return &p->exit_code;
1373 } else { 1360 } else {
1374 if (p->signal->flags & SIGNAL_STOP_STOPPED) 1361 if (p->signal->flags & SIGNAL_STOP_STOPPED)
@@ -1564,7 +1551,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1564 * Notification and reaping will be cascaded to the real 1551 * Notification and reaping will be cascaded to the real
1565 * parent when the ptracer detaches. 1552 * parent when the ptracer detaches.
1566 */ 1553 */
1567 if (likely(!ptrace) && unlikely(task_ptrace(p))) { 1554 if (likely(!ptrace) && unlikely(p->ptrace)) {
1568 /* it will become visible, clear notask_error */ 1555 /* it will become visible, clear notask_error */
1569 wo->notask_error = 0; 1556 wo->notask_error = 0;
1570 return 0; 1557 return 0;
@@ -1607,8 +1594,7 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace,
1607 * own children, it should create a separate process which 1594 * own children, it should create a separate process which
1608 * takes the role of real parent. 1595 * takes the role of real parent.
1609 */ 1596 */
1610 if (likely(!ptrace) && task_ptrace(p) && 1597 if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p))
1611 same_thread_group(p->parent, p->real_parent))
1612 return 0; 1598 return 0;
1613 1599
1614 /* 1600 /*