aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c28
1 files changed, 0 insertions, 28 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 6a819d1b2c7d..83fb72c108b7 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -84,24 +84,6 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk,
84#endif /* CONFIG_NUMA */ 84#endif /* CONFIG_NUMA */
85 85
86/* 86/*
87 * If this is a system OOM (not a memcg OOM) and the task selected to be
88 * killed is not already running at high (RT) priorities, speed up the
89 * recovery by boosting the dying task to the lowest FIFO priority.
90 * That helps with the recovery and avoids interfering with RT tasks.
91 */
92static void boost_dying_task_prio(struct task_struct *p,
93 struct mem_cgroup *mem)
94{
95 struct sched_param param = { .sched_priority = 1 };
96
97 if (mem)
98 return;
99
100 if (!rt_task(p))
101 sched_setscheduler_nocheck(p, SCHED_FIFO, &param);
102}
103
104/*
105 * The process p may have detached its own ->mm while exiting or through 87 * The process p may have detached its own ->mm while exiting or through
106 * use_mm(), but one or more of its subthreads may still have a valid 88 * use_mm(), but one or more of its subthreads may still have a valid
107 * pointer. Return p, or any of its subthreads with a valid ->mm, with 89 * pointer. Return p, or any of its subthreads with a valid ->mm, with
@@ -452,13 +434,6 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
452 set_tsk_thread_flag(p, TIF_MEMDIE); 434 set_tsk_thread_flag(p, TIF_MEMDIE);
453 force_sig(SIGKILL, p); 435 force_sig(SIGKILL, p);
454 436
455 /*
456 * We give our sacrificial lamb high priority and access to
457 * all the memory it needs. That way it should be able to
458 * exit() and clear out its resources quickly...
459 */
460 boost_dying_task_prio(p, mem);
461
462 return 0; 437 return 0;
463} 438}
464#undef K 439#undef K
@@ -482,7 +457,6 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order,
482 */ 457 */
483 if (p->flags & PF_EXITING) { 458 if (p->flags & PF_EXITING) {
484 set_tsk_thread_flag(p, TIF_MEMDIE); 459 set_tsk_thread_flag(p, TIF_MEMDIE);
485 boost_dying_task_prio(p, mem);
486 return 0; 460 return 0;
487 } 461 }
488 462
@@ -556,7 +530,6 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *mem, gfp_t gfp_mask)
556 */ 530 */
557 if (fatal_signal_pending(current)) { 531 if (fatal_signal_pending(current)) {
558 set_thread_flag(TIF_MEMDIE); 532 set_thread_flag(TIF_MEMDIE);
559 boost_dying_task_prio(current, NULL);
560 return; 533 return;
561 } 534 }
562 535
@@ -712,7 +685,6 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
712 */ 685 */
713 if (fatal_signal_pending(current)) { 686 if (fatal_signal_pending(current)) {
714 set_thread_flag(TIF_MEMDIE); 687 set_thread_flag(TIF_MEMDIE);
715 boost_dying_task_prio(current, NULL);
716 return; 688 return;
717 } 689 }
718 690