aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-03-21 19:33:46 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-03-21 20:54:55 -0400
commit647f2bdf4a00dbcaa8964286501d68e7d2e6da93 (patch)
treea9efa94d4add977b4629b137de7f4d002eec56dd /mm
parent2a1c9b1fc0a0ea2e30cdeb69062647c5c5ae661f (diff)
mm, oom: fold oom_kill_task() into oom_kill_process()
oom_kill_task() has a single caller, so fold it into its parent function, oom_kill_process(). Slightly reduces the number of lines in the oom killer. Acked-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Michal Hocko <mhocko@suse.cz> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/oom_kill.c85
1 files changed, 38 insertions, 47 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index a26695fe8833..d402b2c1040e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -434,52 +434,6 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
434} 434}
435 435
436#define K(x) ((x) << (PAGE_SHIFT-10)) 436#define K(x) ((x) << (PAGE_SHIFT-10))
437static void oom_kill_task(struct task_struct *p)
438{
439 struct task_struct *q;
440 struct mm_struct *mm;
441
442 p = find_lock_task_mm(p);
443 if (!p)
444 return;
445
446 /* mm cannot be safely dereferenced after task_unlock(p) */
447 mm = p->mm;
448
449 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
450 task_pid_nr(p), p->comm, K(p->mm->total_vm),
451 K(get_mm_counter(p->mm, MM_ANONPAGES)),
452 K(get_mm_counter(p->mm, MM_FILEPAGES)));
453 task_unlock(p);
454
455 /*
456 * Kill all user processes sharing p->mm in other thread groups, if any.
457 * They don't get access to memory reserves or a higher scheduler
458 * priority, though, to avoid depletion of all memory or task
459 * starvation. This prevents mm->mmap_sem livelock when an oom killed
460 * task cannot exit because it requires the semaphore and its contended
461 * by another thread trying to allocate memory itself. That thread will
462 * now get access to memory reserves since it has a pending fatal
463 * signal.
464 */
465 for_each_process(q)
466 if (q->mm == mm && !same_thread_group(q, p) &&
467 !(q->flags & PF_KTHREAD)) {
468 if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
469 continue;
470
471 task_lock(q); /* Protect ->comm from prctl() */
472 pr_err("Kill process %d (%s) sharing same memory\n",
473 task_pid_nr(q), q->comm);
474 task_unlock(q);
475 force_sig(SIGKILL, q);
476 }
477
478 set_tsk_thread_flag(p, TIF_MEMDIE);
479 force_sig(SIGKILL, p);
480}
481#undef K
482
483static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, 437static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
484 unsigned int points, unsigned long totalpages, 438 unsigned int points, unsigned long totalpages,
485 struct mem_cgroup *memcg, nodemask_t *nodemask, 439 struct mem_cgroup *memcg, nodemask_t *nodemask,
@@ -488,6 +442,7 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
488 struct task_struct *victim = p; 442 struct task_struct *victim = p;
489 struct task_struct *child; 443 struct task_struct *child;
490 struct task_struct *t = p; 444 struct task_struct *t = p;
445 struct mm_struct *mm;
491 unsigned int victim_points = 0; 446 unsigned int victim_points = 0;
492 447
493 if (printk_ratelimit()) 448 if (printk_ratelimit())
@@ -531,8 +486,44 @@ static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
531 } 486 }
532 } while_each_thread(p, t); 487 } while_each_thread(p, t);
533 488
534 oom_kill_task(victim); 489 victim = find_lock_task_mm(victim);
490 if (!victim)
491 return;
492
493 /* mm cannot safely be dereferenced after task_unlock(victim) */
494 mm = victim->mm;
495 pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n",
496 task_pid_nr(victim), victim->comm, K(victim->mm->total_vm),
497 K(get_mm_counter(victim->mm, MM_ANONPAGES)),
498 K(get_mm_counter(victim->mm, MM_FILEPAGES)));
499 task_unlock(victim);
500
501 /*
502 * Kill all user processes sharing victim->mm in other thread groups, if
503 * any. They don't get access to memory reserves, though, to avoid
504 * depletion of all memory. This prevents mm->mmap_sem livelock when an
505 * oom killed thread cannot exit because it requires the semaphore and
506 * its contended by another thread trying to allocate memory itself.
507 * That thread will now get access to memory reserves since it has a
508 * pending fatal signal.
509 */
510 for_each_process(p)
511 if (p->mm == mm && !same_thread_group(p, victim) &&
512 !(p->flags & PF_KTHREAD)) {
513 if (p->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
514 continue;
515
516 task_lock(p); /* Protect ->comm from prctl() */
517 pr_err("Kill process %d (%s) sharing same memory\n",
518 task_pid_nr(p), p->comm);
519 task_unlock(p);
520 force_sig(SIGKILL, p);
521 }
522
523 set_tsk_thread_flag(victim, TIF_MEMDIE);
524 force_sig(SIGKILL, victim);
535} 525}
526#undef K
536 527
537/* 528/*
538 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 529 * Determines whether the kernel must panic because of the panic_on_oom sysctl.