aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c34
1 files changed, 31 insertions, 3 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 942861bf9177..31bd0c344fa7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -82,6 +82,24 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
82#endif /* CONFIG_NUMA */ 82#endif /* CONFIG_NUMA */
83 83
84/* 84/*
85 * If this is a system OOM (not a memcg OOM) and the task selected to be
86 * killed is not already running at high (RT) priorities, speed up the
87 * recovery by boosting the dying task to the lowest FIFO priority.
88 * That helps with the recovery and avoids interfering with RT tasks.
89 */
90static void boost_dying_task_prio(struct task_struct *p,
91 struct mem_cgroup *mem)
92{
93 struct sched_param param = { .sched_priority = 1 };
94
95 if (mem)
96 return;
97
98 if (!rt_task(p))
99 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
100}
101
102/*
85 * The process p may have detached its own ->mm while exiting or through 103 * The process p may have detached its own ->mm while exiting or through
86 * use_mm(), but one or more of its subthreads may still have a valid 104 * use_mm(), but one or more of its subthreads may still have a valid
87 * pointer. Return p, or any of its subthreads with a valid ->mm, with 105 * pointer. Return p, or any of its subthreads with a valid ->mm, with
@@ -421,7 +439,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order,
421} 439}
422 440
423#define K(x) ((x) << (PAGE_SHIFT-10)) 441#define K(x) ((x) << (PAGE_SHIFT-10))
424static int oom_kill_task(struct task_struct *p) 442static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
425{ 443{
426 p = find_lock_task_mm(p); 444 p = find_lock_task_mm(p);
427 if (!p) { 445 if (!p) {
@@ -434,9 +452,17 @@ static int oom_kill_task(struct task_struct *p)
434 K(get_mm_counter(p->mm, MM_FILEPAGES))); 452 K(get_mm_counter(p->mm, MM_FILEPAGES)));
435 task_unlock(p); 453 task_unlock(p);
436 454
437 p->rt.time_slice = HZ; 455
438 set_tsk_thread_flag(p, TIF_MEMDIE); 456 set_tsk_thread_flag(p, TIF_MEMDIE);
439 force_sig(SIGKILL, p); 457 force_sig(SIGKILL, p);
458
459 /*
460 * We give our sacrificial lamb high priority and access to
461 * all the memory it needs. That way it should be able to
462 * exit() and clear out its resources quickly...
463 */
464 boost_dying_task_prio(p, mem);
465
440 return 0; 466 return 0;
441} 467}
442#undef K 468#undef K
@@ -460,6 +486,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
460 */ 486 */
461 if (p->flags & PF_EXITING) { 487 if (p->flags & PF_EXITING) {
462 set_tsk_thread_flag(p, TIF_MEMDIE); 488 set_tsk_thread_flag(p, TIF_MEMDIE);
489 boost_dying_task_prio(p, mem);
463 return 0; 490 return 0;
464 } 491 }
465 492
@@ -489,7 +516,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
489 } 516 }
490 } while_each_thread(p, t); 517 } while_each_thread(p, t);
491 518
492 return oom_kill_task(victim); 519 return oom_kill_task(victim, mem);
493} 520}
494 521
495/* 522/*
@@ -670,6 +697,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
670 */ 697 */
671 if (fatal_signal_pending(current)) { 698 if (fatal_signal_pending(current)) {
672 set_thread_flag(TIF_MEMDIE); 699 set_thread_flag(TIF_MEMDIE);
700 boost_dying_task_prio(current, NULL);
673 return; 701 return;
674 } 702 }
675 703