aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2010-09-22 16:05:10 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-09-22 20:22:39 -0400
commite85bfd3aa7a34fa963bb268a676b41694e6dcf96 (patch)
tree6d92d25d390668879fe952e6b6068d774d0471b4 /mm/oom_kill.c
parentfd02db9de73faebc51240619c7c7f99bee9f65c7 (diff)
oom: filter unkillable tasks from tasklist dump
/proc/sys/vm/oom_dump_tasks is enabled by default, so it's necessary to limit as much information as possible that it should emit. The tasklist dump should be filtered to only those tasks that are eligible for oom kill. This is already done for memcg ooms, but this patch extends it to both cpuset and mempolicy ooms as well as init. In addition to suppressing irrelevant information, this also reduces confusion since users currently don't know which tasks in the tasklist aren't eligible for kill (such as those attached to cpusets or bound to mempolicies with a disjoint set of mems or nodes, respectively) since that information is not shown. Signed-off-by: David Rientjes <rientjes@google.com> Reviewed-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c40
1 files changed, 19 insertions, 21 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 859250c7dc06..4029583a1024 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -121,8 +121,8 @@ struct task_struct *find_lock_task_mm(struct task_struct *p)
121} 121}
122 122
123/* return true if the task is not adequate as candidate victim task. */ 123/* return true if the task is not adequate as candidate victim task. */
124static bool oom_unkillable_task(struct task_struct *p, struct mem_cgroup *mem, 124static bool oom_unkillable_task(struct task_struct *p,
125 const nodemask_t *nodemask) 125 const struct mem_cgroup *mem, const nodemask_t *nodemask)
126{ 126{
127 if (is_global_init(p)) 127 if (is_global_init(p))
128 return true; 128 return true;
@@ -344,26 +344,24 @@ static struct task_struct *select_bad_process(unsigned int *ppoints,
344/** 344/**
345 * dump_tasks - dump current memory state of all system tasks 345 * dump_tasks - dump current memory state of all system tasks
346 * @mem: current's memory controller, if constrained 346 * @mem: current's memory controller, if constrained
347 * @nodemask: nodemask passed to page allocator for mempolicy ooms
347 * 348 *
348 * Dumps the current memory state of all system tasks, excluding kernel threads. 349 * Dumps the current memory state of all eligible tasks. Tasks not in the same
350 * memcg, not in the same cpuset, or bound to a disjoint set of mempolicy nodes
351 * are not shown.
349 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj 352 * State information includes task's pid, uid, tgid, vm size, rss, cpu, oom_adj
350 * value, oom_score_adj value, and name. 353 * value, oom_score_adj value, and name.
351 * 354 *
352 * If the actual is non-NULL, only tasks that are a member of the mem_cgroup are
353 * shown.
354 *
355 * Call with tasklist_lock read-locked. 355 * Call with tasklist_lock read-locked.
356 */ 356 */
357static void dump_tasks(const struct mem_cgroup *mem) 357static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask)
358{ 358{
359 struct task_struct *p; 359 struct task_struct *p;
360 struct task_struct *task; 360 struct task_struct *task;
361 361
362 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); 362 pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n");
363 for_each_process(p) { 363 for_each_process(p) {
364 if (p->flags & PF_KTHREAD) 364 if (oom_unkillable_task(p, mem, nodemask))
365 continue;
366 if (mem && !task_in_mem_cgroup(p, mem))
367 continue; 365 continue;
368 366
369 task = find_lock_task_mm(p); 367 task = find_lock_task_mm(p);
@@ -386,7 +384,7 @@ static void dump_tasks(const struct mem_cgroup *mem)
386} 384}
387 385
388static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, 386static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
389 struct mem_cgroup *mem) 387 struct mem_cgroup *mem, const nodemask_t *nodemask)
390{ 388{
391 task_lock(current); 389 task_lock(current);
392 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " 390 pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, "
@@ -399,7 +397,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
399 mem_cgroup_print_oom_info(mem, p); 397 mem_cgroup_print_oom_info(mem, p);
400 show_mem(); 398 show_mem();
401 if (sysctl_oom_dump_tasks) 399 if (sysctl_oom_dump_tasks)
402 dump_tasks(mem); 400 dump_tasks(mem, nodemask);
403} 401}
404 402
405#define K(x) ((x) << (PAGE_SHIFT-10)) 403#define K(x) ((x) << (PAGE_SHIFT-10))
@@ -441,7 +439,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
441 unsigned int victim_points = 0; 439 unsigned int victim_points = 0;
442 440
443 if (printk_ratelimit()) 441 if (printk_ratelimit())
444 dump_header(p, gfp_mask, order, mem); 442 dump_header(p, gfp_mask, order, mem, nodemask);
445 443
446 /* 444 /*
447 * If the task is already exiting, don't alarm the sysadmin or kill 445 * If the task is already exiting, don't alarm the sysadmin or kill
@@ -487,7 +485,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
487 * Determines whether the kernel must panic because of the panic_on_oom sysctl. 485 * Determines whether the kernel must panic because of the panic_on_oom sysctl.
488 */ 486 */
489static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, 487static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
490 int order) 488 int order, const nodemask_t *nodemask)
491{ 489{
492 if (likely(!sysctl_panic_on_oom)) 490 if (likely(!sysctl_panic_on_oom))
493 return; 491 return;
@@ -501,7 +499,7 @@ static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask,
501 return; 499 return;
502 } 500 }
503 read_lock(&tasklist_lock); 501 read_lock(&tasklist_lock);
504 dump_header(NULL, gfp_mask, order, NULL); 502 dump_header(NULL, gfp_mask, order, NULL, nodemask);
505 read_unlock(&tasklist_lock); 503 read_unlock(&tasklist_lock);
506 panic("Out of memory: %s panic_on_oom is enabled\n", 504 panic("Out of memory: %s panic_on_oom is enabled\n",
507 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); 505 sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide");
@@ -514,7 +512,7 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
514 unsigned int points = 0; 512 unsigned int points = 0;
515 struct task_struct *p; 513 struct task_struct *p;
516 514
517 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0); 515 check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL);
518 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT; 516 limit = mem_cgroup_get_limit(mem) >> PAGE_SHIFT;
519 read_lock(&tasklist_lock); 517 read_lock(&tasklist_lock);
520retry: 518retry:
@@ -646,6 +644,7 @@ static void clear_system_oom(void)
646void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, 644void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
647 int order, nodemask_t *nodemask) 645 int order, nodemask_t *nodemask)
648{ 646{
647 const nodemask_t *mpol_mask;
649 struct task_struct *p; 648 struct task_struct *p;
650 unsigned long totalpages; 649 unsigned long totalpages;
651 unsigned long freed = 0; 650 unsigned long freed = 0;
@@ -675,7 +674,8 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
675 */ 674 */
676 constraint = constrained_alloc(zonelist, gfp_mask, nodemask, 675 constraint = constrained_alloc(zonelist, gfp_mask, nodemask,
677 &totalpages); 676 &totalpages);
678 check_panic_on_oom(constraint, gfp_mask, order); 677 mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL;
678 check_panic_on_oom(constraint, gfp_mask, order, mpol_mask);
679 679
680 read_lock(&tasklist_lock); 680 read_lock(&tasklist_lock);
681 if (sysctl_oom_kill_allocating_task && 681 if (sysctl_oom_kill_allocating_task &&
@@ -693,15 +693,13 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
693 } 693 }
694 694
695retry: 695retry:
696 p = select_bad_process(&points, totalpages, NULL, 696 p = select_bad_process(&points, totalpages, NULL, mpol_mask);
697 constraint == CONSTRAINT_MEMORY_POLICY ? nodemask :
698 NULL);
699 if (PTR_ERR(p) == -1UL) 697 if (PTR_ERR(p) == -1UL)
700 goto out; 698 goto out;
701 699
702 /* Found nothing?!?! Either we hang forever, or we panic. */ 700 /* Found nothing?!?! Either we hang forever, or we panic. */
703 if (!p) { 701 if (!p) {
704 dump_header(NULL, gfp_mask, order, NULL); 702 dump_header(NULL, gfp_mask, order, NULL, mpol_mask);
705 read_unlock(&tasklist_lock); 703 read_unlock(&tasklist_lock);
706 panic("Out of memory and no killable processes...\n"); 704 panic("Out of memory and no killable processes...\n");
707 } 705 }