diff options
author | Luis Claudio R. Goncalves <lclaudio@uudg.org> | 2010-08-09 20:19:41 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-08-09 23:45:02 -0400 |
commit | 93b43fa55088fe977503a156d1097cc2055449a2 (patch) | |
tree | 32c688e20c3ac9b30edd9c240c98f2f779b20e67 /mm | |
parent | 19b4586cd9c8ed642798902e55c6f61ed576ad93 (diff) |
oom: give the dying task a higher priority
In a system under heavy load it was observed that even after the
oom-killer selects a task to die, the task may take a long time to die.
Right after sending a SIGKILL to the task selected by the oom-killer this
task has its priority increased so that it can exit() soon, freeing
memory. That is accomplished by:
/*
* We give our sacrificial lamb high priority and access to
* all the memory it needs. That way it should be able to
* exit() and clear out its resources quickly...
*/
p->rt.time_slice = HZ;
set_tsk_thread_flag(p, TIF_MEMDIE);
It sounds plausible giving the dying task an even higher priority to be
sure it will be scheduled sooner and free the desired memory. It was
suggested on LKML using SCHED_FIFO:1, the lowest RT priority so that this
task won't interfere with any running RT task.
If the dying task is already an RT task, leave it untouched. Another good
suggestion, implemented here, was to avoid boosting the dying task
priority in case of mem_cgroup OOM.
Signed-off-by: Luis Claudio R. Goncalves <lclaudio@uudg.org>
Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Reviewed-by: Minchan Kim <minchan.kim@gmail.com>
Cc: David Rientjes <rientjes@google.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Oleg Nesterov <oleg@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/oom_kill.c | 34 |
1 files changed, 31 insertions, 3 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 942861bf9177..31bd0c344fa7 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -82,6 +82,24 @@ static bool has_intersects_mems_allowed(struct task_struct *tsk, | |||
82 | #endif /* CONFIG_NUMA */ | 82 | #endif /* CONFIG_NUMA */ |
83 | 83 | ||
84 | /* | 84 | /* |
85 | * If this is a system OOM (not a memcg OOM) and the task selected to be | ||
86 | * killed is not already running at high (RT) priorities, speed up the | ||
87 | * recovery by boosting the dying task to the lowest FIFO priority. | ||
88 | * That helps with the recovery and avoids interfering with RT tasks. | ||
89 | */ | ||
90 | static void boost_dying_task_prio(struct task_struct *p, | ||
91 | struct mem_cgroup *mem) | ||
92 | { | ||
93 | struct sched_param param = { .sched_priority = 1 }; | ||
94 | |||
95 | if (mem) | ||
96 | return; | ||
97 | |||
98 | if (!rt_task(p)) | ||
99 | sched_setscheduler_nocheck(p, SCHED_FIFO, ¶m); | ||
100 | } | ||
101 | |||
102 | /* | ||
85 | * The process p may have detached its own ->mm while exiting or through | 103 | * The process p may have detached its own ->mm while exiting or through |
86 | * use_mm(), but one or more of its subthreads may still have a valid | 104 | * use_mm(), but one or more of its subthreads may still have a valid |
87 | * pointer. Return p, or any of its subthreads with a valid ->mm, with | 105 | * pointer. Return p, or any of its subthreads with a valid ->mm, with |
@@ -421,7 +439,7 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
421 | } | 439 | } |
422 | 440 | ||
423 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 441 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
424 | static int oom_kill_task(struct task_struct *p) | 442 | static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) |
425 | { | 443 | { |
426 | p = find_lock_task_mm(p); | 444 | p = find_lock_task_mm(p); |
427 | if (!p) { | 445 | if (!p) { |
@@ -434,9 +452,17 @@ static int oom_kill_task(struct task_struct *p) | |||
434 | K(get_mm_counter(p->mm, MM_FILEPAGES))); | 452 | K(get_mm_counter(p->mm, MM_FILEPAGES))); |
435 | task_unlock(p); | 453 | task_unlock(p); |
436 | 454 | ||
437 | p->rt.time_slice = HZ; | 455 | |
438 | set_tsk_thread_flag(p, TIF_MEMDIE); | 456 | set_tsk_thread_flag(p, TIF_MEMDIE); |
439 | force_sig(SIGKILL, p); | 457 | force_sig(SIGKILL, p); |
458 | |||
459 | /* | ||
460 | * We give our sacrificial lamb high priority and access to | ||
461 | * all the memory it needs. That way it should be able to | ||
462 | * exit() and clear out its resources quickly... | ||
463 | */ | ||
464 | boost_dying_task_prio(p, mem); | ||
465 | |||
440 | return 0; | 466 | return 0; |
441 | } | 467 | } |
442 | #undef K | 468 | #undef K |
@@ -460,6 +486,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
460 | */ | 486 | */ |
461 | if (p->flags & PF_EXITING) { | 487 | if (p->flags & PF_EXITING) { |
462 | set_tsk_thread_flag(p, TIF_MEMDIE); | 488 | set_tsk_thread_flag(p, TIF_MEMDIE); |
489 | boost_dying_task_prio(p, mem); | ||
463 | return 0; | 490 | return 0; |
464 | } | 491 | } |
465 | 492 | ||
@@ -489,7 +516,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
489 | } | 516 | } |
490 | } while_each_thread(p, t); | 517 | } while_each_thread(p, t); |
491 | 518 | ||
492 | return oom_kill_task(victim); | 519 | return oom_kill_task(victim, mem); |
493 | } | 520 | } |
494 | 521 | ||
495 | /* | 522 | /* |
@@ -670,6 +697,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
670 | */ | 697 | */ |
671 | if (fatal_signal_pending(current)) { | 698 | if (fatal_signal_pending(current)) { |
672 | set_thread_flag(TIF_MEMDIE); | 699 | set_thread_flag(TIF_MEMDIE); |
700 | boost_dying_task_prio(current, NULL); | ||
673 | return; | 701 | return; |
674 | } | 702 | } |
675 | 703 | ||