aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c33
1 files changed, 29 insertions, 4 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 4029583a1024..7dcca55ede7c 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -162,10 +162,11 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
162 return 0; 162 return 0;
163 163
164 /* 164 /*
165 * Shortcut check for OOM_SCORE_ADJ_MIN so the entire heuristic doesn't 165 * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
166 * need to be executed for something that cannot be killed. 166 * so the entire heuristic doesn't need to be executed for something
167 * that cannot be killed.
167 */ 168 */
168 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN) { 169 if (atomic_read(&p->mm->oom_disable_count)) {
169 task_unlock(p); 170 task_unlock(p);
170 return 0; 171 return 0;
171 } 172 }
@@ -403,16 +404,40 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
403#define K(x) ((x) << (PAGE_SHIFT-10)) 404#define K(x) ((x) << (PAGE_SHIFT-10))
404static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) 405static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
405{ 406{
407 struct task_struct *q;
408 struct mm_struct *mm;
409
406 p = find_lock_task_mm(p); 410 p = find_lock_task_mm(p);
407 if (!p) 411 if (!p)
408 return 1; 412 return 1;
409 413
414 /* mm cannot be safely dereferenced after task_unlock(p) */
415 mm = p->mm;
416
410 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", 417 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
411 task_pid_nr(p), p->comm, K(p->mm->total_vm), 418 task_pid_nr(p), p->comm, K(p->mm->total_vm),
412 K(get_mm_counter(p->mm, MM_ANONPAGES)), 419 K(get_mm_counter(p->mm, MM_ANONPAGES)),
413 K(get_mm_counter(p->mm, MM_FILEPAGES))); 420 K(get_mm_counter(p->mm, MM_FILEPAGES)));
414 task_unlock(p); 421 task_unlock(p);
415 422
423 /*
424 * Kill all processes sharing p->mm in other thread groups, if any.
425 * They don't get access to memory reserves or a higher scheduler
426 * priority, though, to avoid depletion of all memory or task
427 * starvation. This prevents mm->mmap_sem livelock when an oom killed
428 * task cannot exit because it requires the semaphore and its contended
429 * by another thread trying to allocate memory itself. That thread will
430 * now get access to memory reserves since it has a pending fatal
431 * signal.
432 */
433 for_each_process(q)
434 if (q->mm == mm && !same_thread_group(q, p)) {
435 task_lock(q); /* Protect ->comm from prctl() */
436 pr_err("Kill process %d (%s) sharing same memory\n",
437 task_pid_nr(q), q->comm);
438 task_unlock(q);
439 force_sig(SIGKILL, q);
440 }
416 441
417 set_tsk_thread_flag(p, TIF_MEMDIE); 442 set_tsk_thread_flag(p, TIF_MEMDIE);
418 force_sig(SIGKILL, p); 443 force_sig(SIGKILL, p);
@@ -680,7 +705,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
680 read_lock(&tasklist_lock); 705 read_lock(&tasklist_lock);
681 if (sysctl_oom_kill_allocating_task && 706 if (sysctl_oom_kill_allocating_task &&
682 !oom_unkillable_task(current, NULL, nodemask) && 707 !oom_unkillable_task(current, NULL, nodemask) &&
683 (current->signal->oom_adj != OOM_DISABLE)) { 708 current->mm && !atomic_read(&current->mm->oom_disable_count)) {
684 /* 709 /*
685 * oom_kill_process() needs tasklist_lock held. If it returns 710 * oom_kill_process() needs tasklist_lock held. If it returns
686 * non-zero, current could not be killed so we must fallback to 711 * non-zero, current could not be killed so we must fallback to