diff options
Diffstat (limited to 'kernel/exit.c')
| -rw-r--r-- | kernel/exit.c | 100 |
1 files changed, 49 insertions, 51 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 38ec40630149..85a83c831856 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
| @@ -112,9 +112,9 @@ static void __exit_signal(struct task_struct *tsk) | |||
| 112 | * We won't ever get here for the group leader, since it | 112 | * We won't ever get here for the group leader, since it |
| 113 | * will have been the last reference on the signal_struct. | 113 | * will have been the last reference on the signal_struct. |
| 114 | */ | 114 | */ |
| 115 | sig->utime = cputime_add(sig->utime, tsk->utime); | 115 | sig->utime = cputime_add(sig->utime, task_utime(tsk)); |
| 116 | sig->stime = cputime_add(sig->stime, tsk->stime); | 116 | sig->stime = cputime_add(sig->stime, task_stime(tsk)); |
| 117 | sig->gtime = cputime_add(sig->gtime, tsk->gtime); | 117 | sig->gtime = cputime_add(sig->gtime, task_gtime(tsk)); |
| 118 | sig->min_flt += tsk->min_flt; | 118 | sig->min_flt += tsk->min_flt; |
| 119 | sig->maj_flt += tsk->maj_flt; | 119 | sig->maj_flt += tsk->maj_flt; |
| 120 | sig->nvcsw += tsk->nvcsw; | 120 | sig->nvcsw += tsk->nvcsw; |
| @@ -583,8 +583,6 @@ mm_need_new_owner(struct mm_struct *mm, struct task_struct *p) | |||
| 583 | * If there are other users of the mm and the owner (us) is exiting | 583 | * If there are other users of the mm and the owner (us) is exiting |
| 584 | * we need to find a new owner to take on the responsibility. | 584 | * we need to find a new owner to take on the responsibility. |
| 585 | */ | 585 | */ |
| 586 | if (!mm) | ||
| 587 | return 0; | ||
| 588 | if (atomic_read(&mm->mm_users) <= 1) | 586 | if (atomic_read(&mm->mm_users) <= 1) |
| 589 | return 0; | 587 | return 0; |
| 590 | if (mm->owner != p) | 588 | if (mm->owner != p) |
| @@ -627,6 +625,16 @@ retry: | |||
| 627 | } while_each_thread(g, c); | 625 | } while_each_thread(g, c); |
| 628 | 626 | ||
| 629 | read_unlock(&tasklist_lock); | 627 | read_unlock(&tasklist_lock); |
| 628 | /* | ||
| 629 | * We found no owner yet mm_users > 1: this implies that we are | ||
| 630 | * most likely racing with swapoff (try_to_unuse()) or /proc or | ||
| 631 | * ptrace or page migration (get_task_mm()). Mark owner as NULL, | ||
| 632 | * so that subsystems can understand the callback and take action. | ||
| 633 | */ | ||
| 634 | down_write(&mm->mmap_sem); | ||
| 635 | cgroup_mm_owner_callbacks(mm->owner, NULL); | ||
| 636 | mm->owner = NULL; | ||
| 637 | up_write(&mm->mmap_sem); | ||
| 630 | return; | 638 | return; |
| 631 | 639 | ||
| 632 | assign_new_owner: | 640 | assign_new_owner: |
| @@ -831,26 +839,50 @@ static void reparent_thread(struct task_struct *p, struct task_struct *father) | |||
| 831 | * the child reaper process (ie "init") in our pid | 839 | * the child reaper process (ie "init") in our pid |
| 832 | * space. | 840 | * space. |
| 833 | */ | 841 | */ |
| 842 | static struct task_struct *find_new_reaper(struct task_struct *father) | ||
| 843 | { | ||
| 844 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | ||
| 845 | struct task_struct *thread; | ||
| 846 | |||
| 847 | thread = father; | ||
| 848 | while_each_thread(father, thread) { | ||
| 849 | if (thread->flags & PF_EXITING) | ||
| 850 | continue; | ||
| 851 | if (unlikely(pid_ns->child_reaper == father)) | ||
| 852 | pid_ns->child_reaper = thread; | ||
| 853 | return thread; | ||
| 854 | } | ||
| 855 | |||
| 856 | if (unlikely(pid_ns->child_reaper == father)) { | ||
| 857 | write_unlock_irq(&tasklist_lock); | ||
| 858 | if (unlikely(pid_ns == &init_pid_ns)) | ||
| 859 | panic("Attempted to kill init!"); | ||
| 860 | |||
| 861 | zap_pid_ns_processes(pid_ns); | ||
| 862 | write_lock_irq(&tasklist_lock); | ||
| 863 | /* | ||
| 864 | * We can not clear ->child_reaper or leave it alone. | ||
| 865 | * There may by stealth EXIT_DEAD tasks on ->children, | ||
| 866 | * forget_original_parent() must move them somewhere. | ||
| 867 | */ | ||
| 868 | pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
| 869 | } | ||
| 870 | |||
| 871 | return pid_ns->child_reaper; | ||
| 872 | } | ||
| 873 | |||
| 834 | static void forget_original_parent(struct task_struct *father) | 874 | static void forget_original_parent(struct task_struct *father) |
| 835 | { | 875 | { |
| 836 | struct task_struct *p, *n, *reaper = father; | 876 | struct task_struct *p, *n, *reaper; |
| 837 | LIST_HEAD(ptrace_dead); | 877 | LIST_HEAD(ptrace_dead); |
| 838 | 878 | ||
| 839 | write_lock_irq(&tasklist_lock); | 879 | write_lock_irq(&tasklist_lock); |
| 840 | 880 | reaper = find_new_reaper(father); | |
| 841 | /* | 881 | /* |
| 842 | * First clean up ptrace if we were using it. | 882 | * First clean up ptrace if we were using it. |
| 843 | */ | 883 | */ |
| 844 | ptrace_exit(father, &ptrace_dead); | 884 | ptrace_exit(father, &ptrace_dead); |
| 845 | 885 | ||
| 846 | do { | ||
| 847 | reaper = next_thread(reaper); | ||
| 848 | if (reaper == father) { | ||
| 849 | reaper = task_child_reaper(father); | ||
| 850 | break; | ||
| 851 | } | ||
| 852 | } while (reaper->flags & PF_EXITING); | ||
| 853 | |||
| 854 | list_for_each_entry_safe(p, n, &father->children, sibling) { | 886 | list_for_each_entry_safe(p, n, &father->children, sibling) { |
| 855 | p->real_parent = reaper; | 887 | p->real_parent = reaper; |
| 856 | if (p->parent == father) { | 888 | if (p->parent == father) { |
| @@ -918,8 +950,8 @@ static void exit_notify(struct task_struct *tsk, int group_dead) | |||
| 918 | 950 | ||
| 919 | /* mt-exec, de_thread() is waiting for us */ | 951 | /* mt-exec, de_thread() is waiting for us */ |
| 920 | if (thread_group_leader(tsk) && | 952 | if (thread_group_leader(tsk) && |
| 921 | tsk->signal->notify_count < 0 && | 953 | tsk->signal->group_exit_task && |
| 922 | tsk->signal->group_exit_task) | 954 | tsk->signal->notify_count < 0) |
| 923 | wake_up_process(tsk->signal->group_exit_task); | 955 | wake_up_process(tsk->signal->group_exit_task); |
| 924 | 956 | ||
| 925 | write_unlock_irq(&tasklist_lock); | 957 | write_unlock_irq(&tasklist_lock); |
| @@ -959,39 +991,6 @@ static void check_stack_usage(void) | |||
| 959 | static inline void check_stack_usage(void) {} | 991 | static inline void check_stack_usage(void) {} |
| 960 | #endif | 992 | #endif |
| 961 | 993 | ||
| 962 | static inline void exit_child_reaper(struct task_struct *tsk) | ||
| 963 | { | ||
| 964 | if (likely(tsk->group_leader != task_child_reaper(tsk))) | ||
| 965 | return; | ||
| 966 | |||
| 967 | if (tsk->nsproxy->pid_ns == &init_pid_ns) | ||
| 968 | panic("Attempted to kill init!"); | ||
| 969 | |||
| 970 | /* | ||
| 971 | * @tsk is the last thread in the 'cgroup-init' and is exiting. | ||
| 972 | * Terminate all remaining processes in the namespace and reap them | ||
| 973 | * before exiting @tsk. | ||
| 974 | * | ||
| 975 | * Note that @tsk (last thread of cgroup-init) may not necessarily | ||
| 976 | * be the child-reaper (i.e main thread of cgroup-init) of the | ||
| 977 | * namespace i.e the child_reaper may have already exited. | ||
| 978 | * | ||
| 979 | * Even after a child_reaper exits, we let it inherit orphaned children, | ||
| 980 | * because, pid_ns->child_reaper remains valid as long as there is | ||
| 981 | * at least one living sub-thread in the cgroup init. | ||
| 982 | |||
| 983 | * This living sub-thread of the cgroup-init will be notified when | ||
| 984 | * a child inherited by the 'child-reaper' exits (do_notify_parent() | ||
| 985 | * uses __group_send_sig_info()). Further, when reaping child processes, | ||
| 986 | * do_wait() iterates over children of all living sub threads. | ||
| 987 | |||
| 988 | * i.e even though 'child_reaper' thread is listed as the parent of the | ||
| 989 | * orphaned children, any living sub-thread in the cgroup-init can | ||
| 990 | * perform the role of the child_reaper. | ||
| 991 | */ | ||
| 992 | zap_pid_ns_processes(tsk->nsproxy->pid_ns); | ||
| 993 | } | ||
| 994 | |||
| 995 | NORET_TYPE void do_exit(long code) | 994 | NORET_TYPE void do_exit(long code) |
| 996 | { | 995 | { |
| 997 | struct task_struct *tsk = current; | 996 | struct task_struct *tsk = current; |
| @@ -1051,7 +1050,6 @@ NORET_TYPE void do_exit(long code) | |||
| 1051 | } | 1050 | } |
| 1052 | group_dead = atomic_dec_and_test(&tsk->signal->live); | 1051 | group_dead = atomic_dec_and_test(&tsk->signal->live); |
| 1053 | if (group_dead) { | 1052 | if (group_dead) { |
| 1054 | exit_child_reaper(tsk); | ||
| 1055 | hrtimer_cancel(&tsk->signal->real_timer); | 1053 | hrtimer_cancel(&tsk->signal->real_timer); |
| 1056 | exit_itimers(tsk->signal); | 1054 | exit_itimers(tsk->signal); |
| 1057 | } | 1055 | } |
