aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2011-04-14 18:22:13 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-04-14 19:06:56 -0400
commit341aea2bc48bf652777fb015cc2b3dfa9a451817 (patch)
tree46846e06674fdf45542ed3101c7a55aa31a577af /mm/oom_kill.c
parent929bea7c714220fc76ce3f75bef9056477c28e74 (diff)
oom-kill: remove boost_dying_task_prio()
This is an almost-revert of commit 93b43fa ("oom: give the dying task a higher priority"). That commit dramatically improved oom killer logic when a fork-bomb occurs. But I've found that it has nasty corner case. Now cpu cgroup has strange default RT runtime. It's 0! That said, if a process under cpu cgroup promote RT scheduling class, the process never run at all. If an admin inserts a !RT process into a cpu cgroup by setting rtruntime=0, usually it runs perfectly because a !RT task isn't affected by the rtruntime knob. But if it promotes an RT task via an explicit setscheduler() syscall or an OOM, the task can't run at all. In short, the oom killer doesn't work at all if admins are using cpu cgroup and don't touch the rtruntime knob. Eventually, kernel may hang up when oom kill occur. I and the original author Luis agreed to disable this logic. Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Luis Claudio R. Goncalves <lclaudio@uudg.org> Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Reviewed-by: Minchan Kim <minchan.kim@gmail.com> Acked-by: David Rientjes <rientjes@google.com> Cc: <stable@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6a819d1b2c7d..83fb72c108b7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
84#endif /* CONFIG_NUMA */ 84#endif /* CONFIG_NUMA */
85 85
86/* 86/*
87 * If this is a system OOM (not a memcg OOM) and the task selected to be
88 * killed is not already running at high (RT) priorities, speed up the
89 * recovery by boosting the dying task to the lowest FIFO priority.
90 * That helps with the recovery and avoids interfering with RT tasks.
91 */
92static void boost_dying_task_prio(struct task_struct *p,
93 struct mem_cgroup *mem)
94{
95 struct sched_param param = { .sched_priority = 1 };
96
97 if (mem)
98 return;
99
100 if (!rt_task(p))
101 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
102}
103
104/*
105 * The process p may have detached its own ->mm while exiting or through 87 * The process p may have detached its own ->mm while exiting or through
106 * use_mm(), but one or more of its subthreads may still have a valid 88 * use_mm(), but one or more of its subthreads may still have a valid
107 * pointer. Return p, or any of its subthreads with a valid ->mm, with 89 * pointer. Return p, or any of its subthreads with a valid ->mm, with
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
452 set_tsk_thread_flag(p, TIF_MEMDIE); 434 set_tsk_thread_flag(p, TIF_MEMDIE);
453 force_sig(SIGKILL, p); 435 force_sig(SIGKILL, p);
454 436
455 /*
456 * We give our sacrificial lamb high priority and access to
457 * all the memory it needs. That way it should be able to
458 * exit() and clear out its resources quickly...
459 */
460 boost_dying_task_prio(p, mem);
461
462 return 0; 437 return 0;
463} 438}
464#undef K 439#undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
482 */ 457 */
483 if (p->flags & PF_EXITING) { 458 if (p->flags & PF_EXITING) {
484 set_tsk_thread_flag(p, TIF_MEMDIE); 459 set_tsk_thread_flag(p, TIF_MEMDIE);
485 boost_dying_task_prio(p, mem);
486 return 0; 460 return 0;
487 } 461 }
488 462
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
556 */ 530 */
557 if (fatal_signal_pending(current)) { 531 if (fatal_signal_pending(current)) {
558 set_thread_flag(TIF_MEMDIE); 532 set_thread_flag(TIF_MEMDIE);
559 boost_dying_task_prio(current, NULL);
560 return; 533 return;
561 } 534 }
562 535
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
712 */ 685 */
713 if (fatal_signal_pending(current)) { 686 if (fatal_signal_pending(current)) {
714 set_thread_flag(TIF_MEMDIE); 687 set_thread_flag(TIF_MEMDIE);
715 boost_dying_task_prio(current, NULL);
716 return; 688 return;
717 } 689 }
718 690