diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 36 |
1 files changed, 22 insertions, 14 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index d8bd3b425fa7..2f59cc334516 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -72,6 +72,18 @@ static void __unhash_process(struct task_struct *p, bool group_dead) | |||
72 | list_del_rcu(&p->tasks); | 72 | list_del_rcu(&p->tasks); |
73 | list_del_init(&p->sibling); | 73 | list_del_init(&p->sibling); |
74 | __this_cpu_dec(process_counts); | 74 | __this_cpu_dec(process_counts); |
75 | /* | ||
76 | * If we are the last child process in a pid namespace to be | ||
77 | * reaped, notify the reaper sleeping zap_pid_ns_processes(). | ||
78 | */ | ||
79 | if (IS_ENABLED(CONFIG_PID_NS)) { | ||
80 | struct task_struct *parent = p->real_parent; | ||
81 | |||
82 | if ((task_active_pid_ns(parent)->child_reaper == parent) && | ||
83 | list_empty(&parent->children) && | ||
84 | (parent->flags & PF_EXITING)) | ||
85 | wake_up_process(parent); | ||
86 | } | ||
75 | } | 87 | } |
76 | list_del_rcu(&p->thread_group); | 88 | list_del_rcu(&p->thread_group); |
77 | } | 89 | } |
@@ -643,6 +655,7 @@ static void exit_mm(struct task_struct * tsk) | |||
643 | mm_release(tsk, mm); | 655 | mm_release(tsk, mm); |
644 | if (!mm) | 656 | if (!mm) |
645 | return; | 657 | return; |
658 | sync_mm_rss(mm); | ||
646 | /* | 659 | /* |
647 | * Serialize with any possible pending coredump. | 660 | * Serialize with any possible pending coredump. |
648 | * We must hold mmap_sem around checking core_state | 661 | * We must hold mmap_sem around checking core_state |
@@ -719,12 +732,6 @@ static struct task_struct *find_new_reaper(struct task_struct *father) | |||
719 | 732 | ||
720 | zap_pid_ns_processes(pid_ns); | 733 | zap_pid_ns_processes(pid_ns); |
721 | write_lock_irq(&tasklist_lock); | 734 | write_lock_irq(&tasklist_lock); |
722 | /* | ||
723 | * We can not clear ->child_reaper or leave it alone. | ||
724 | * There may by stealth EXIT_DEAD tasks on ->children, | ||
725 | * forget_original_parent() must move them somewhere. | ||
726 | */ | ||
727 | pid_ns->child_reaper = init_pid_ns.child_reaper; | ||
728 | } else if (father->signal->has_child_subreaper) { | 735 | } else if (father->signal->has_child_subreaper) { |
729 | struct task_struct *reaper; | 736 | struct task_struct *reaper; |
730 | 737 | ||
@@ -884,9 +891,9 @@ static void check_stack_usage(void) | |||
884 | 891 | ||
885 | spin_lock(&low_water_lock); | 892 | spin_lock(&low_water_lock); |
886 | if (free < lowest_to_date) { | 893 | if (free < lowest_to_date) { |
887 | printk(KERN_WARNING "%s used greatest stack depth: %lu bytes " | 894 | printk(KERN_WARNING "%s (%d) used greatest stack depth: " |
888 | "left\n", | 895 | "%lu bytes left\n", |
889 | current->comm, free); | 896 | current->comm, task_pid_nr(current), free); |
890 | lowest_to_date = free; | 897 | lowest_to_date = free; |
891 | } | 898 | } |
892 | spin_unlock(&low_water_lock); | 899 | spin_unlock(&low_water_lock); |
@@ -946,12 +953,13 @@ void do_exit(long code) | |||
946 | exit_signals(tsk); /* sets PF_EXITING */ | 953 | exit_signals(tsk); /* sets PF_EXITING */ |
947 | /* | 954 | /* |
948 | * tsk->flags are checked in the futex code to protect against | 955 | * tsk->flags are checked in the futex code to protect against |
949 | * an exiting task cleaning up the robust pi futexes. | 956 | * an exiting task cleaning up the robust pi futexes, and in |
957 | * task_work_add() to avoid the race with exit_task_work(). | ||
950 | */ | 958 | */ |
951 | smp_mb(); | 959 | smp_mb(); |
952 | raw_spin_unlock_wait(&tsk->pi_lock); | 960 | raw_spin_unlock_wait(&tsk->pi_lock); |
953 | 961 | ||
954 | exit_irq_thread(); | 962 | exit_task_work(tsk); |
955 | 963 | ||
956 | if (unlikely(in_atomic())) | 964 | if (unlikely(in_atomic())) |
957 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", | 965 | printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n", |
@@ -1214,7 +1222,7 @@ static int wait_task_zombie(struct wait_opts *wo, struct task_struct *p) | |||
1214 | unsigned long state; | 1222 | unsigned long state; |
1215 | int retval, status, traced; | 1223 | int retval, status, traced; |
1216 | pid_t pid = task_pid_vnr(p); | 1224 | pid_t pid = task_pid_vnr(p); |
1217 | uid_t uid = __task_cred(p)->uid; | 1225 | uid_t uid = from_kuid_munged(current_user_ns(), task_uid(p)); |
1218 | struct siginfo __user *infop; | 1226 | struct siginfo __user *infop; |
1219 | 1227 | ||
1220 | if (!likely(wo->wo_flags & WEXITED)) | 1228 | if (!likely(wo->wo_flags & WEXITED)) |
@@ -1427,7 +1435,7 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
1427 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1435 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1428 | *p_code = 0; | 1436 | *p_code = 0; |
1429 | 1437 | ||
1430 | uid = task_uid(p); | 1438 | uid = from_kuid_munged(current_user_ns(), task_uid(p)); |
1431 | unlock_sig: | 1439 | unlock_sig: |
1432 | spin_unlock_irq(&p->sighand->siglock); | 1440 | spin_unlock_irq(&p->sighand->siglock); |
1433 | if (!exit_code) | 1441 | if (!exit_code) |
@@ -1500,7 +1508,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1500 | } | 1508 | } |
1501 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1509 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1502 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1510 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1503 | uid = task_uid(p); | 1511 | uid = from_kuid_munged(current_user_ns(), task_uid(p)); |
1504 | spin_unlock_irq(&p->sighand->siglock); | 1512 | spin_unlock_irq(&p->sighand->siglock); |
1505 | 1513 | ||
1506 | pid = task_pid_vnr(p); | 1514 | pid = task_pid_vnr(p); |