diff options
Diffstat (limited to 'kernel/exit.c')
-rw-r--r-- | kernel/exit.c | 14 |
1 files changed, 8 insertions, 6 deletions
diff --git a/kernel/exit.c b/kernel/exit.c index 671ed56e0a49..b194febf5799 100644 --- a/kernel/exit.c +++ b/kernel/exit.c | |||
@@ -50,6 +50,7 @@ | |||
50 | #include <linux/perf_event.h> | 50 | #include <linux/perf_event.h> |
51 | #include <trace/events/sched.h> | 51 | #include <trace/events/sched.h> |
52 | #include <linux/hw_breakpoint.h> | 52 | #include <linux/hw_breakpoint.h> |
53 | #include <linux/oom.h> | ||
53 | 54 | ||
54 | #include <asm/uaccess.h> | 55 | #include <asm/uaccess.h> |
55 | #include <asm/unistd.h> | 56 | #include <asm/unistd.h> |
@@ -149,9 +150,7 @@ static void delayed_put_task_struct(struct rcu_head *rhp) | |||
149 | { | 150 | { |
150 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); | 151 | struct task_struct *tsk = container_of(rhp, struct task_struct, rcu); |
151 | 152 | ||
152 | #ifdef CONFIG_PERF_EVENTS | 153 | perf_event_delayed_put(tsk); |
153 | WARN_ON_ONCE(tsk->perf_event_ctxp); | ||
154 | #endif | ||
155 | trace_sched_process_free(tsk); | 154 | trace_sched_process_free(tsk); |
156 | put_task_struct(tsk); | 155 | put_task_struct(tsk); |
157 | } | 156 | } |
@@ -689,6 +688,8 @@ static void exit_mm(struct task_struct * tsk) | |||
689 | enter_lazy_tlb(mm, current); | 688 | enter_lazy_tlb(mm, current); |
690 | /* We don't want this task to be frozen prematurely */ | 689 | /* We don't want this task to be frozen prematurely */ |
691 | clear_freeze_flag(tsk); | 690 | clear_freeze_flag(tsk); |
691 | if (tsk->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) | ||
692 | atomic_dec(&mm->oom_disable_count); | ||
692 | task_unlock(tsk); | 693 | task_unlock(tsk); |
693 | mm_update_next_owner(mm); | 694 | mm_update_next_owner(mm); |
694 | mmput(mm); | 695 | mmput(mm); |
@@ -702,6 +703,8 @@ static void exit_mm(struct task_struct * tsk) | |||
702 | * space. | 703 | * space. |
703 | */ | 704 | */ |
704 | static struct task_struct *find_new_reaper(struct task_struct *father) | 705 | static struct task_struct *find_new_reaper(struct task_struct *father) |
706 | __releases(&tasklist_lock) | ||
707 | __acquires(&tasklist_lock) | ||
705 | { | 708 | { |
706 | struct pid_namespace *pid_ns = task_active_pid_ns(father); | 709 | struct pid_namespace *pid_ns = task_active_pid_ns(father); |
707 | struct task_struct *thread; | 710 | struct task_struct *thread; |
@@ -1386,8 +1389,7 @@ static int wait_task_stopped(struct wait_opts *wo, | |||
1386 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1389 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1387 | *p_code = 0; | 1390 | *p_code = 0; |
1388 | 1391 | ||
1389 | /* don't need the RCU readlock here as we're holding a spinlock */ | 1392 | uid = task_uid(p); |
1390 | uid = __task_cred(p)->uid; | ||
1391 | unlock_sig: | 1393 | unlock_sig: |
1392 | spin_unlock_irq(&p->sighand->siglock); | 1394 | spin_unlock_irq(&p->sighand->siglock); |
1393 | if (!exit_code) | 1395 | if (!exit_code) |
@@ -1460,7 +1462,7 @@ static int wait_task_continued(struct wait_opts *wo, struct task_struct *p) | |||
1460 | } | 1462 | } |
1461 | if (!unlikely(wo->wo_flags & WNOWAIT)) | 1463 | if (!unlikely(wo->wo_flags & WNOWAIT)) |
1462 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; | 1464 | p->signal->flags &= ~SIGNAL_STOP_CONTINUED; |
1463 | uid = __task_cred(p)->uid; | 1465 | uid = task_uid(p); |
1464 | spin_unlock_irq(&p->sighand->siglock); | 1466 | spin_unlock_irq(&p->sighand->siglock); |
1465 | 1467 | ||
1466 | pid = task_pid_vnr(p); | 1468 | pid = task_pid_vnr(p); |