diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 110 |
1 files changed, 52 insertions, 58 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 6480d1c85d7a..6ed6a1d552b5 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -570,7 +570,7 @@ static void reparent_leader(struct task_struct *father, struct task_struct *p, | |||
570 | if (same_thread_group(p->real_parent, father)) | 570 | if (same_thread_group(p->real_parent, father)) |
571 | return; | 571 | return; |
572 | 572 | ||
573 | /* We don't want people slaying init. */ | 573 | /* We don't want people slaying init. */ |
574 | p->exit_signal = SIGCHLD; | 574 | p->exit_signal = SIGCHLD; |
575 | 575 | ||
576 | /* If it has exited notify the new parent about this child's death. */ | 576 | /* If it has exited notify the new parent about this child's death. */ |
@@ -784,9 +784,10 @@ void do_exit(long code) | |||
784 | exit_shm(tsk); | 784 | exit_shm(tsk); |
785 | exit_files(tsk); | 785 | exit_files(tsk); |
786 | exit_fs(tsk); | 786 | exit_fs(tsk); |
787 | if (group_dead) | ||
788 | disassociate_ctty(1); | ||
787 | exit_task_namespaces(tsk); | 789 | exit_task_namespaces(tsk); |
788 | exit_task_work(tsk); | 790 | exit_task_work(tsk); |
789 | check_stack_usage(); | ||
790 | exit_thread(); | 791 | exit_thread(); |
791 | 792 | ||
792 | /* | 793 | /* |
@@ -799,19 +800,15 @@ void do_exit(long code) | |||
799 | 800 | ||
800 | cgroup_exit(tsk); | 801 | cgroup_exit(tsk); |
801 | 802 | ||
802 | if (group_dead) | ||
803 | disassociate_ctty(1); | ||
804 | |||
805 | module_put(task_thread_info(tsk)->exec_domain->module); | 803 | module_put(task_thread_info(tsk)->exec_domain->module); |
806 | 804 | ||
807 | proc_exit_connector(tsk); | ||
808 | |||
809 | /* | 805 | /* |
810 | * FIXME: do that only when needed, using sched_exit tracepoint | 806 | * FIXME: do that only when needed, using sched_exit tracepoint |
811 | */ | 807 | */ |
812 | flush_ptrace_hw_breakpoint(tsk); | 808 | flush_ptrace_hw_breakpoint(tsk); |
813 | 809 | ||
814 | exit_notify(tsk, group_dead); | 810 | exit_notify(tsk, group_dead); |
811 | proc_exit_connector(tsk); | ||
815 | #ifdef CONFIG_NUMA | 812 | #ifdef CONFIG_NUMA |
816 | task_lock(tsk); | 813 | task_lock(tsk); |
817 | mpol_put(tsk->mempolicy); | 814 | mpol_put(tsk->mempolicy); |
@@ -844,6 +841,7 @@ void do_exit(long code) | |||
844 | 841 | ||
845 | validate_creds_for_do_exit(tsk); | 842 | validate_creds_for_do_exit(tsk); |
846 | 843 | ||
844 | check_stack_usage(); | ||
847 | preempt_disable(); | 845 | preempt_disable(); |
848 | if (tsk->nr_dirtied) | 846 | if (tsk->nr_dirtied) |
849 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); | 847 | __this_cpu_add(dirty_throttle_leaks, tsk->nr_dirtied); |
@@ -1038,17 +1036,13 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1038 | return wait_noreap_copyout(wo, p, pid, uid, why, status); | 1036 | return wait_noreap_copyout(wo, p, pid, uid, why, status); |
1039 | } | 1037 | } |
1040 | 1038 | ||
1039 | traced = ptrace_reparented(p); | ||
1041 | /* | 1040 | /* |
1042 | * Try to move the task's state to DEAD | 1041 | * Move the task's state to DEAD/TRACE, only one thread can do this. |
1043 | * only one thread is allowed to do this: | ||
1044 | */ | 1042 | */ |
1045 | state = xchg(&p->exit_state, EXIT_DEAD); | 1043 | state = traced && thread_group_leader(p) ? EXIT_TRACE : EXIT_DEAD; |
1046 | if (state != EXIT_ZOMBIE) { | 1044 | if (cmpxchg(&p->exit_state, EXIT_ZOMBIE, state) != EXIT_ZOMBIE) |
1047 | BUG_ON(state != EXIT_DEAD); | ||
1048 | return 0; | 1045 | return 0; |
1049 | } | ||
1050 | |||
1051 | traced = ptrace_reparented(p); | ||
1052 | /* | 1046 | /* |
1053 | * It can be ptraced but not reparented, check | 1047 | * It can be ptraced but not reparented, check |
1054 | * thread_group_leader() to filter out sub-threads. | 1048 | * thread_group_leader() to filter out sub-threads. |
@@ -1109,7 +1103,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1109 | 1103 | ||
1110 | /* | 1104 | /* |
1111 | * Now we are sure this task is interesting, and no other | 1105 | * Now we are sure this task is interesting, and no other |
1112 | * thread can reap it because we set its state to EXIT_DEAD. | 1106 | * thread can reap it because we its state == DEAD/TRACE. |
1113 | */ | 1107 | */ |
1114 | read_unlock(&tasklist_lock); | 1108 | read_unlock(&tasklist_lock); |
1115 | 1109 | ||
@@ -1146,22 +1140,19 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1146 | if (!retval) | 1140 | if (!retval) |
1147 | retval = pid; | 1141 | retval = pid; |
1148 | 1142 | ||
1149 | if (traced) { | 1143 | if (state == EXIT_TRACE) { |
1150 | write_lock_irq(&tasklist_lock); | 1144 | write_lock_irq(&tasklist_lock); |
1151 | /* We dropped tasklist, ptracer could die and untrace */ | 1145 | /* We dropped tasklist, ptracer could die and untrace */ |
1152 | ptrace_unlink(p); | 1146 | ptrace_unlink(p); |
1153 | /* | 1147 | |
1154 | * If this is not a sub-thread, notify the parent. | 1148 | /* If parent wants a zombie, don't release it now */ |
1155 | * If parent wants a zombie, don't release it now. | 1149 | state = EXIT_ZOMBIE; |
1156 | */ | 1150 | if (do_notify_parent(p, p->exit_signal)) |
1157 | if (thread_group_leader(p) && | 1151 | state = EXIT_DEAD; |
1158 | !do_notify_parent(p, p->exit_signal)) { | 1152 | p->exit_state = state; |
1159 | p->exit_state = EXIT_ZOMBIE; | ||
1160 | p = NULL; | ||
1161 | } | ||
1162 | write_unlock_irq(&tasklist_lock); | 1153 | write_unlock_irq(&tasklist_lock); |
1163 | } | 1154 | } |
1164 | if (p != NULL) | 1155 | if (state == EXIT_DEAD) |
1165 | release_task(p); | 1156 | release_task(p); |
1166 | 1157 | ||
1167 | return retval; | 1158 | return retval; |
@@ -1338,7 +1329,12 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1338 | static int wait_consider_task(struct wait_opts *wo, int ptrace, | 1329 | static int wait_consider_task(struct wait_opts *wo, int ptrace, |
1339 | struct task_struct *p) | 1330 | struct task_struct *p) |
1340 | { | 1331 | { |
1341 | int ret = eligible_child(wo, p); | 1332 | int ret; |
1333 | |||
1334 | if (unlikely(p->exit_state == EXIT_DEAD)) | ||
1335 | return 0; | ||
1336 | |||
1337 | ret = eligible_child(wo, p); | ||
1342 | if (!ret) | 1338 | if (!ret) |
1343 | return ret; | 1339 | return ret; |
1344 | 1340 | ||
@@ -1356,33 +1352,44 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1356 | return 0; | 1352 | return 0; |
1357 | } | 1353 | } |
1358 | 1354 | ||
1359 | /* dead body doesn't have much to contribute */ | 1355 | if (unlikely(p->exit_state == EXIT_TRACE)) { |
1360 | if (unlikely(p->exit_state == EXIT_DEAD)) { | ||
1361 | /* | 1356 | /* |
1362 | * But do not ignore this task until the tracer does | 1357 | * ptrace == 0 means we are the natural parent. In this case |
1363 | * wait_task_zombie()->do_notify_parent(). | 1358 | * we should clear notask_error, debugger will notify us. |
1364 | */ | 1359 | */ |
1365 | if (likely(!ptrace) && unlikely(ptrace_reparented(p))) | 1360 | if (likely(!ptrace)) |
1366 | wo->notask_error = 0; | 1361 | wo->notask_error = 0; |
1367 | return 0; | 1362 | return 0; |
1368 | } | 1363 | } |
1369 | 1364 | ||
1370 | /* slay zombie? */ | 1365 | if (likely(!ptrace) && unlikely(p->ptrace)) { |
1371 | if (p->exit_state == EXIT_ZOMBIE) { | ||
1372 | /* | 1366 | /* |
1373 | * A zombie ptracee is only visible to its ptracer. | 1367 | * If it is traced by its real parent's group, just pretend |
1374 | * Notification and reaping will be cascaded to the real | 1368 | * the caller is ptrace_do_wait() and reap this child if it |
1375 | * parent when the ptracer detaches. | 1369 | * is zombie. |
1370 | * | ||
1371 | * This also hides group stop state from real parent; otherwise | ||
1372 | * a single stop can be reported twice as group and ptrace stop. | ||
1373 | * If a ptracer wants to distinguish these two events for its | ||
1374 | * own children it should create a separate process which takes | ||
1375 | * the role of real parent. | ||
1376 | */ | 1376 | */ |
1377 | if (likely(!ptrace) && unlikely(p->ptrace)) { | 1377 | if (!ptrace_reparented(p)) |
1378 | /* it will become visible, clear notask_error */ | 1378 | ptrace = 1; |
1379 | wo->notask_error = 0; | 1379 | } |
1380 | return 0; | ||
1381 | } | ||
1382 | 1380 | ||
1381 | /* slay zombie? */ | ||
1382 | if (p->exit_state == EXIT_ZOMBIE) { | ||
1383 | /* we don't reap group leaders with subthreads */ | 1383 | /* we don't reap group leaders with subthreads */ |
1384 | if (!delay_group_leader(p)) | 1384 | if (!delay_group_leader(p)) { |
1385 | return wait_task_zombie(wo, p); | 1385 | /* |
1386 | * A zombie ptracee is only visible to its ptracer. | ||
1387 | * Notification and reaping will be cascaded to the | ||
1388 | * real parent when the ptracer detaches. | ||
1389 | */ | ||
1390 | if (unlikely(ptrace) || likely(!p->ptrace)) | ||
1391 | return wait_task_zombie(wo, p); | ||
1392 | } | ||
1386 | 1393 | ||
1387 | /* | 1394 | /* |
1388 | * Allow access to stopped/continued state via zombie by | 1395 | * Allow access to stopped/continued state via zombie by |
@@ -1408,19 +1415,6 @@ static int wait_consider_task(struct wait_opts *wo, int ptrace, | |||
1408 | wo->notask_error = 0; | 1415 | wo->notask_error = 0; |
1409 | } else { | 1416 | } else { |
1410 | /* | 1417 | /* |
1411 | * If @p is ptraced by a task in its real parent's group, | ||
1412 | * hide group stop/continued state when looking at @p as | ||
1413 | * the real parent; otherwise, a single stop can be | ||
1414 | * reported twice as group and ptrace stops. | ||
1415 | * | ||
1416 | * If a ptracer wants to distinguish the two events for its | ||
1417 | * own children, it should create a separate process which | ||
1418 | * takes the role of real parent. | ||
1419 | */ | ||
1420 | if (likely(!ptrace) && p->ptrace && !ptrace_reparented(p)) | ||
1421 | return 0; | ||
1422 | |||
1423 | /* | ||
1424 | * @p is alive and it's gonna stop, continue or exit, so | 1418 | * @p is alive and it's gonna stop, continue or exit, so |
1425 | * there always is something to wait for. | 1419 | * there always is something to wait for. |
1426 | */ | 1420 | */ |