aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/exit.c
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/exit.c')
-rw-r--r--kernel/exit.c38
1 files changed, 30 insertions, 8 deletions
diff --git a/kernel/exit.c b/kernel/exit.c
index ceffc67b564a..676149a4ac5f 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -50,6 +50,7 @@
50#include <linux/perf_event.h> 50#include <linux/perf_event.h>
51#include <trace/events/sched.h> 51#include <trace/events/sched.h>
52#include <linux/hw_breakpoint.h> 52#include <linux/hw_breakpoint.h>
53#include <linux/oom.h>
53 54
54#include <asm/uaccess.h> 55#include <asm/uaccess.h>
55#include <asm/unistd.h> 56#include <asm/unistd.h>
@@ -95,6 +96,14 @@ static void __exit_signal(struct task_struct *tsk)
95 sig->tty = NULL; 96 sig->tty = NULL;
96 } else { 97 } else {
97 /* 98 /*
99 * This can only happen if the caller is de_thread().
100 * FIXME: this is the temporary hack, we should teach
101 * posix-cpu-timers to handle this case correctly.
102 */
103 if (unlikely(has_group_leader_pid(tsk)))
104 posix_cpu_timers_exit_group(tsk);
105
106 /*
98 * If there is any task waiting for the group exit 107 * If there is any task waiting for the group exit
99 * then notify it: 108 * then notify it:
100 */ 109 */
@@ -149,9 +158,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp)
149{ 158{
150 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); 159 struct task_struct *tsk = container_of(rhp, struct task_struct, rcu);
151 160
152#ifdef CONFIG_PERF_EVENTS 161 perf_event_delayed_put(tsk);
153 WARN_ON_ONCE(tsk->perf_event_ctxp);
154#endif
155 trace_sched_process_free(tsk); 162 trace_sched_process_free(tsk);
156 put_task_struct(tsk); 163 put_task_struct(tsk);
157} 164}
@@ -689,6 +696,8 @@ static void exit_mm(struct task_struct * tsk)
689 enter_lazy_tlb(mm, current); 696 enter_lazy_tlb(mm, current);
690 /* We don't want this task to be frozen prematurely */ 697 /* We don't want this task to be frozen prematurely */
691 clear_freeze_flag(tsk); 698 clear_freeze_flag(tsk);
699 if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
700 atomic_dec(&mm->oom_disable_count);
692 task_unlock(tsk); 701 task_unlock(tsk);
693 mm_update_next_owner(mm); 702 mm_update_next_owner(mm);
694 mmput(mm); 703 mmput(mm);
@@ -702,6 +711,8 @@ static void exit_mm(struct task_struct * tsk)
702 * space. 711 * space.
703 */ 712 */
704static struct task_struct *find_new_reaper(struct task_struct *father) 713static struct task_struct *find_new_reaper(struct task_struct *father)
714 __releases(&tasklist_lock)
715 __acquires(&tasklist_lock)
705{ 716{
706 struct pid_namespace *pid_ns = task_active_pid_ns(father); 717 struct pid_namespace *pid_ns = task_active_pid_ns(father);
707 struct task_struct *thread; 718 struct task_struct *thread;
@@ -771,9 +782,12 @@ static void forget_original_parent(struct task_struct *father)
771 struct task_struct *p, *n, *reaper; 782 struct task_struct *p, *n, *reaper;
772 LIST_HEAD(dead_children); 783 LIST_HEAD(dead_children);
773 784
774 exit_ptrace(father);
775
776 write_lock_irq(&tasklist_lock); 785 write_lock_irq(&tasklist_lock);
786 /*
787 * Note that exit_ptrace() and find_new_reaper() might
788 * drop tasklist_lock and reacquire it.
789 */
790 exit_ptrace(father);
777 reaper = find_new_reaper(father); 791 reaper = find_new_reaper(father);
778 792
779 list_for_each_entry_safe(p, n, &father->children, sibling) { 793 list_for_each_entry_safe(p, n, &father->children, sibling) {
@@ -900,6 +914,15 @@ NORET_TYPE void do_exit(long code)
900 if (unlikely(!tsk->pid)) 914 if (unlikely(!tsk->pid))
901 panic("Attempted to kill the idle task!"); 915 panic("Attempted to kill the idle task!");
902 916
917 /*
918 * If do_exit is called because this processes oopsed, it's possible
919 * that get_fs() was left as KERNEL_DS, so reset it to USER_DS before
920 * continuing. Amongst other possible reasons, this is to prevent
921 * mm_release()->clear_child_tid() from writing to a user-controlled
922 * kernel address.
923 */
924 set_fs(USER_DS);
925
903 tracehook_report_exit(&code); 926 tracehook_report_exit(&code);
904 927
905 validate_creds_for_do_exit(tsk); 928 validate_creds_for_do_exit(tsk);
@@ -1383,8 +1406,7 @@ static int wait_task_stopped(struct wait_opts *wo,
1383 if (!unlikely(wo->wo_flags & WNOWAIT)) 1406 if (!unlikely(wo->wo_flags & WNOWAIT))
1384 *p_code = 0; 1407 *p_code = 0;
1385 1408
1386 /* don't need the RCU readlock here as we're holding a spinlock */ 1409 uid = task_uid(p);
1387 uid = __task_cred(p)->uid;
1388unlock_sig: 1410unlock_sig:
1389 spin_unlock_irq(&p->sighand->siglock); 1411 spin_unlock_irq(&p->sighand->siglock);
1390 if (!exit_code) 1412 if (!exit_code)
@@ -1457,7 +1479,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p)
1457 } 1479 }
1458 if (!unlikely(wo->wo_flags & WNOWAIT)) 1480 if (!unlikely(wo->wo_flags & WNOWAIT))
1459 p->signal->flags &= ~SIGNAL_STOP_CONTINUED; 1481 p->signal->flags &= ~SIGNAL_STOP_CONTINUED;
1460 uid = __task_cred(p)->uid; 1482 uid = task_uid(p);
1461 spin_unlock_irq(&p->sighand->siglock); 1483 spin_unlock_irq(&p->sighand->siglock);
1462 1484
1463 pid = task_pid_vnr(p); 1485 pid = task_pid_vnr(p);