diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 213 |
1 files changed, 67 insertions, 146 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 167e1e3ad7c6..3bec141c82f6 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -61,11 +61,6 @@ DEFINE_TRACE(sched_process_wait); | |||
61 | 61 | ||
62 | static void exit_mm(struct task_struct * tsk); | 62 | static void exit_mm(struct task_struct * tsk); |
63 | 63 | ||
64 | static inline int task_detached(struct task_struct *p) | ||
65 | { | ||
66 | return p->exit_signal == -1; | ||
67 | } | ||
68 | |||
69 | static void __unhash_process(struct task_struct *p) | 64 | static void __unhash_process(struct task_struct *p) |
70 | { | 65 | { |
71 | nr_threads--; | 66 | nr_threads--; |
@@ -362,16 +357,12 @@ static void reparent_to_kthreadd(void) | |||
362 | void __set_special_pids(struct pid *pid) | 357 | void __set_special_pids(struct pid *pid) |
363 | { | 358 | { |
364 | struct task_struct *curr = current->group_leader; | 359 | struct task_struct *curr = current->group_leader; |
365 | pid_t nr = pid_nr(pid); | ||
366 | 360 | ||
367 | if (task_session(curr) != pid) { | 361 | if (task_session(curr) != pid) |
368 | change_pid(curr, PIDTYPE_SID, pid); | 362 | change_pid(curr, PIDTYPE_SID, pid); |
369 | set_task_session(curr, nr); | 363 | |
370 | } | 364 | if (task_pgrp(curr) != pid) |
371 | if (task_pgrp(curr) != pid) { | ||
372 | change_pid(curr, PIDTYPE_PGID, pid); | 365 | change_pid(curr, PIDTYPE_PGID, pid); |
373 | set_task_pgrp(curr, nr); | ||
374 | } | ||
375 | } | 366 | } |
376 | 367 | ||
377 | static void set_special_pids(struct pid *pid) | 368 | static void set_special_pids(struct pid *pid) |
@@ -732,119 +723,6 @@ static void exit_mm(struct task_struct * tsk) | |||
732 | } | 723 | } |
733 | 724 | ||
734 | /* | 725 | /* |
735 | * Return nonzero if @parent's children should reap themselves. | ||
736 | * | ||
737 | * Called with write_lock_irq(&tasklist_lock) held. | ||
738 | */ | ||
739 | static int ignoring_children(struct task_struct *parent) | ||
740 | { | ||
741 | int ret; | ||
742 | struct sighand_struct *psig = parent->sighand; | ||
743 | unsigned long flags; | ||
744 | spin_lock_irqsave(&psig->siglock, flags); | ||
745 | ret = (psig->action[SIGCHLD-1].sa.sa_handler == SIG_IGN || | ||
746 | (psig->action[SIGCHLD-1].sa.sa_flags & SA_NOCLDWAIT)); | ||
747 | spin_unlock_irqrestore(&psig->siglock, flags); | ||
748 | return ret; | ||
749 | } | ||
750 | |||
751 | /* | ||
752 | * Detach all tasks we were using ptrace on. | ||
753 | * Any that need to be release_task'd are put on the @dead list. | ||
754 | * | ||
755 | * Called with write_lock(&tasklist_lock) held. | ||
756 | */ | ||
757 | static void ptrace_exit(struct task_struct *parent, struct list_head *dead) | ||
758 | { | ||
759 | struct task_struct *p, *n; | ||
760 | int ign = -1; | ||
761 | |||
762 | list_for_each_entry_safe(p, n, &parent->ptraced, ptrace_entry) { | ||
763 | __ptrace_unlink(p); | ||
764 | |||
765 | if (p->exit_state != EXIT_ZOMBIE) | ||
766 | continue; | ||
767 | |||
768 | /* | ||
769 | * If it's a zombie, our attachedness prevented normal | ||
770 | * parent notification or self-reaping. Do notification | ||
771 | * now if it would have happened earlier. If it should | ||
772 | * reap itself, add it to the @dead list. We can't call | ||
773 | * release_task() here because we already hold tasklist_lock. | ||
774 | * | ||
775 | * If it's our own child, there is no notification to do. | ||
776 | * But if our normal children self-reap, then this child | ||
777 | * was prevented by ptrace and we must reap it now. | ||
778 | */ | ||
779 | if (!task_detached(p) && thread_group_empty(p)) { | ||
780 | if (!same_thread_group(p->real_parent, parent)) | ||
781 | do_notify_parent(p, p->exit_signal); | ||
782 | else { | ||
783 | if (ign < 0) | ||
784 | ign = ignoring_children(parent); | ||
785 | if (ign) | ||
786 | p->exit_signal = -1; | ||
787 | } | ||
788 | } | ||
789 | |||
790 | if (task_detached(p)) { | ||
791 | /* | ||
792 | * Mark it as in the process of being reaped. | ||
793 | */ | ||
794 | p->exit_state = EXIT_DEAD; | ||
795 | list_add(&p->ptrace_entry, dead); | ||
796 | } | ||
797 | } | ||
798 | } | ||
799 | |||
800 | /* | ||
801 | * Finish up exit-time ptrace cleanup. | ||
802 | * | ||
803 | * Called without locks. | ||
804 | */ | ||
805 | static void ptrace_exit_finish(struct task_struct *parent, | ||
806 | struct list_head *dead) | ||
807 | { | ||
808 | struct task_struct *p, *n; | ||
809 | |||
810 | BUG_ON(!list_empty(&parent->ptraced)); | ||
811 | |||
812 | list_for_each_entry_safe(p, n, dead, ptrace_entry) { | ||
813 | list_del_init(&p->ptrace_entry); | ||
814 | release_task(p); | ||
815 | } | ||
816 | } | ||
817 | |||
818 | static void reparent_thread(struct task_struct *p, struct task_struct *father) | ||
819 | { | ||
820 | if (p->pdeath_signal) | ||
821 | /* We already hold the tasklist_lock here. */ | ||
822 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
823 | |||
824 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
825 | |||
826 | /* If this is a threaded reparent there is no need to | ||
827 | * notify anyone anything has happened. | ||
828 | */ | ||
829 | if (same_thread_group(p->real_parent, father)) | ||
830 | return; | ||
831 | |||
832 | /* We don't want people slaying init. */ | ||
833 | if (!task_detached(p)) | ||
834 | p->exit_signal = SIGCHLD; | ||
835 | |||
836 | /* If we'd notified the old parent about this child's death, | ||
837 | * also notify the new parent. | ||
838 | */ | ||
839 | if (!ptrace_reparented(p) && | ||
840 | p->exit_state == EXIT_ZOMBIE && | ||
841 | !task_detached(p) && thread_group_empty(p)) | ||
842 | do_notify_parent(p, p->exit_signal); | ||
843 | |||
844 | kill_orphaned_pgrp(p, father); | ||
845 | } | ||
846 | |||
847 | /* | ||
848 | * When we die, we re-parent all our children. | 726 | * When we die, we re-parent all our children. |
849 | * Try to give them to another thread in our thread | 727 | * Try to give them to another thread in our thread |
850 | * group, and if no such member exists, give it to | 728 | * group, and if no such member exists, give it to |
@@ -883,17 +761,51 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
883 | return pid_ns->child_reaper; | 761 | return pid_ns->child_reaper; |
884 | } | 762 | } |
885 | 763 | ||
764 | /* | ||
765 | * Any that need to be release_task'd are put on the @dead list. | ||
766 | */ | ||
767 | static void reparent_thread(struct task_struct *father, struct task_struct *p, | ||
768 | struct list_head *dead) | ||
769 | { | ||
770 | if (p->pdeath_signal) | ||
771 | group_send_sig_info(p->pdeath_signal, SEND_SIG_NOINFO, p); | ||
772 | |||
773 | list_move_tail(&p->sibling, &p->real_parent->children); | ||
774 | |||
775 | if (task_detached(p)) | ||
776 | return; | ||
777 | /* | ||
778 | * If this is a threaded reparent there is no need to | ||
779 | * notify anyone anything has happened. | ||
780 | */ | ||
781 | if (same_thread_group(p->real_parent, father)) | ||
782 | return; | ||
783 | |||
784 | /* We don't want people slaying init. */ | ||
785 | p->exit_signal = SIGCHLD; | ||
786 | |||
787 | /* If it has exited notify the new parent about this child's death. */ | ||
788 | if (!p->ptrace && | ||
789 | p->exit_state == EXIT_ZOMBIE && thread_group_empty(p)) { | ||
790 | do_notify_parent(p, p->exit_signal); | ||
791 | if (task_detached(p)) { | ||
792 | p->exit_state = EXIT_DEAD; | ||
793 | list_move_tail(&p->sibling, dead); | ||
794 | } | ||
795 | } | ||
796 | |||
797 | kill_orphaned_pgrp(p, father); | ||
798 | } | ||
799 | |||
886 | static void forget_original_parent(struct task_struct *father) | 800 | static void forget_original_parent(struct task_struct *father) |
887 | { | 801 | { |
888 | struct task_struct *p, *n, *reaper; | 802 | struct task_struct *p, *n, *reaper; |
889 | LIST_HEAD(ptrace_dead); | 803 | LIST_HEAD(dead_children); |
804 | |||
805 | exit_ptrace(father); | ||
890 | 806 | ||
891 | write_lock_irq(&tasklist_lock); | 807 | write_lock_irq(&tasklist_lock); |
892 | reaper = find_new_reaper(father); | 808 | reaper = find_new_reaper(father); |
893 | /* | ||
894 | * First clean up ptrace if we were using it. | ||
895 | */ | ||
896 | ptrace_exit(father, &ptrace_dead); | ||
897 | 809 | ||
898 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 810 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
899 | p->real_parent = reaper; | 811 | p->real_parent = reaper; |
@@ -901,13 +813,16 @@ static void forget_original_parent(struct task_struct *father) | |||
901 | BUG_ON(p->ptrace); | 813 | BUG_ON(p->ptrace); |
902 | p->parent = p->real_parent; | 814 | p->parent = p->real_parent; |
903 | } | 815 | } |
904 | reparent_thread(p, father); | 816 | reparent_thread(father, p, &dead_children); |
905 | } | 817 | } |
906 | |||
907 | write_unlock_irq(&tasklist_lock); | 818 | write_unlock_irq(&tasklist_lock); |
819 | |||
908 | BUG_ON(!list_empty(&father->children)); | 820 | BUG_ON(!list_empty(&father->children)); |
909 | 821 | ||
910 | ptrace_exit_finish(father, &ptrace_dead); | 822 | list_for_each_entry_safe(p, n, &dead_children, sibling) { |
823 | list_del_init(&p->sibling); | ||
824 | release_task(p); | ||
825 | } | ||
911 | } | 826 | } |
912 | 827 | ||
913 | /* | 828 | /* |
@@ -1417,6 +1332,18 @@ static int wait_task_zombie(struct task_struct *p, int options, | |||
1417 | return retval; | 1332 | return retval; |
1418 | } | 1333 | } |
1419 | 1334 | ||
1335 | static int *task_stopped_code(struct task_struct *p, bool ptrace) | ||
1336 | { | ||
1337 | if (ptrace) { | ||
1338 | if (task_is_stopped_or_traced(p)) | ||
1339 | return &p->exit_code; | ||
1340 | } else { | ||
1341 | if (p->signal->flags & SIGNAL_STOP_STOPPED) | ||
1342 | return &p->signal->group_exit_code; | ||
1343 | } | ||
1344 | return NULL; | ||
1345 | } | ||
1346 | |||
1420 | /* | 1347 | /* |
1421 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold | 1348 | * Handle sys_wait4 work for one task in state TASK_STOPPED. We hold |
1422 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold | 1349 | * read_lock(&tasklist_lock) on entry. If we return zero, we still hold |
@@ -1427,7 +1354,7 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1427 | int options, struct siginfo __user *infop, | 1354 | int options, struct siginfo __user *infop, |
1428 | int __user *stat_addr, struct rusage __user *ru) | 1355 | int __user *stat_addr, struct rusage __user *ru) |
1429 | { | 1356 | { |
1430 | int retval, exit_code, why; | 1357 | int retval, exit_code, *p_code, why; |
1431 | uid_t uid = 0; /* unneeded, required by compiler */ | 1358 | uid_t uid = 0; /* unneeded, required by compiler */ |
1432 | pid_t pid; | 1359 | pid_t pid; |
1433 | 1360 | ||
@@ -1437,22 +1364,16 @@ static int wait_task_stopped(int ptrace, struct task_struct *p, | |||
1437 | exit_code = 0; | 1364 | exit_code = 0; |
1438 | spin_lock_irq(&p->sighand->siglock); | 1365 | spin_lock_irq(&p->sighand->siglock); |
1439 | 1366 | ||
1440 | if (unlikely(!task_is_stopped_or_traced(p))) | 1367 | p_code = task_stopped_code(p, ptrace); |
1441 | goto unlock_sig; | 1368 | if (unlikely(!p_code)) |
1442 | |||
1443 | if (!ptrace && p->signal->group_stop_count > 0) | ||
1444 | /* | ||
1445 | * A group stop is in progress and this is the group leader. | ||
1446 | * We won't report until all threads have stopped. | ||
1447 | */ | ||
1448 | goto unlock_sig; | 1369 | goto unlock_sig; |
1449 | 1370 | ||
1450 | exit_code = p->exit_code; | 1371 | exit_code = *p_code; |
1451 | if (!exit_code) | 1372 | if (!exit_code) |
1452 | goto unlock_sig; | 1373 | goto unlock_sig; |
1453 | 1374 | ||
1454 | if (!unlikely(options & WNOWAIT)) | 1375 | if (!unlikely(options & WNOWAIT)) |
1455 | p->exit_code = 0; | 1376 | *p_code = 0; |
1456 | 1377 | ||
1457 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1378 | /* don't need the RCU readlock here as we're holding a spinlock */ |
1458 | uid = __task_cred(p)->uid; | 1379 | uid = __task_cred(p)->uid; |
@@ -1608,7 +1529,7 @@ static int wait_consider_task(struct task_struct *parent, int ptrace, | |||
1608 | */ | 1529 | */ |
1609 | *notask_error = 0; | 1530 | *notask_error = 0; |
1610 | 1531 | ||
1611 | if (task_is_stopped_or_traced(p)) | 1532 | if (task_stopped_code(p, ptrace)) |
1612 | return wait_task_stopped(ptrace, p, options, | 1533 | return wait_task_stopped(ptrace, p, options, |
1613 | infop, stat_addr, ru); | 1534 | infop, stat_addr, ru); |
1614 | 1535 | ||
@@ -1812,7 +1733,7 @@ SYSCALL_DEFINE4(wait4, pid_t, upid, int __user *, stat_addr, | |||
1812 | pid = find_get_pid(-upid); | 1733 | pid = find_get_pid(-upid); |
1813 | } else if (upid == 0) { | 1734 | } else if (upid == 0) { |
1814 | type = PIDTYPE_PGID; | 1735 | type = PIDTYPE_PGID; |
1815 | pid = get_pid(task_pgrp(current)); | 1736 | pid = get_task_pid(current, PIDTYPE_PGID); |
1816 | } else /* upid > 0 */ { | 1737 | } else /* upid > 0 */ { |
1817 | type = PIDTYPE_PID; | 1738 | type = PIDTYPE_PID; |
1818 | pid = find_get_pid(upid); | 1739 | pid = find_get_pid(upid); |