aboutsummaryrefslogtreecommitdiffstats
path: root/mm/oom_kill.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2011-10-31 20:07:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2011-10-31 20:30:45 -0400
commitc9f01245b6a7d77d17deaa71af10f6aca14fa24e (patch)
tree13ffde591a5bcefba39cb6393f09b27f1ebc1a30 /mm/oom_kill.c
parent7b0d44fa49b1dcfdcf4897f12ddd12ddeab1a9d7 (diff)
oom: remove oom_disable_count
This removes mm->oom_disable_count entirely since it's unnecessary and currently buggy. The counter was intended to be per-process but it's currently decremented in the exit path for each thread that exits, causing it to underflow. The count was originally intended to prevent oom killing threads that share memory with threads that cannot be killed since it doesn't lead to future memory freeing. The counter could be fixed to represent all threads sharing the same mm, but it's better to remove the count since: - it is possible that the OOM_DISABLE thread sharing memory with the victim is waiting on that thread to exit and will actually cause future memory freeing, and - there is no guarantee that a thread is disabled from oom killing just because another thread sharing its mm is oom disabled. Signed-off-by: David Rientjes <rientjes@google.com> Reported-by: Oleg Nesterov <oleg@redhat.com> Reviewed-by: Oleg Nesterov <oleg@redhat.com> Cc: Ying Han <yinghan@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r--mm/oom_kill.c23
1 files changed, 5 insertions, 18 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index b0d8943bc9fd..2b97e8f04607 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -54,13 +54,7 @@ int test_set_oom_score_adj(int new_val)
54 54
55 spin_lock_irq(&sighand->siglock); 55 spin_lock_irq(&sighand->siglock);
56 old_val = current->signal->oom_score_adj; 56 old_val = current->signal->oom_score_adj;
57 if (new_val != old_val) { 57 current->signal->oom_score_adj = new_val;
58 if (new_val == OOM_SCORE_ADJ_MIN)
59 atomic_inc(&current->mm->oom_disable_count);
60 else if (old_val == OOM_SCORE_ADJ_MIN)
61 atomic_dec(&current->mm->oom_disable_count);
62 current->signal->oom_score_adj = new_val;
63 }
64 spin_unlock_irq(&sighand->siglock); 58 spin_unlock_irq(&sighand->siglock);
65 59
66 return old_val; 60 return old_val;
@@ -173,16 +167,6 @@ unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem,
173 return 0; 167 return 0;
174 168
175 /* 169 /*
176 * Shortcut check for a thread sharing p->mm that is OOM_SCORE_ADJ_MIN
177 * so the entire heuristic doesn't need to be executed for something
178 * that cannot be killed.
179 */
180 if (atomic_read(&p->mm->oom_disable_count)) {
181 task_unlock(p);
182 return 0;
183 }
184
185 /*
186 * The memory controller may have a limit of 0 bytes, so avoid a divide 170 * The memory controller may have a limit of 0 bytes, so avoid a divide
187 * by zero, if necessary. 171 * by zero, if necessary.
188 */ 172 */
@@ -451,6 +435,9 @@ static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem)
451 for_each_process(q) 435 for_each_process(q)
452 if (q->mm == mm && !same_thread_group(q, p) && 436 if (q->mm == mm && !same_thread_group(q, p) &&
453 !(q->flags & PF_KTHREAD)) { 437 !(q->flags & PF_KTHREAD)) {
438 if (q->signal->oom_score_adj == OOM_SCORE_ADJ_MIN)
439 continue;
440
454 task_lock(q); /* Protect ->comm from prctl() */ 441 task_lock(q); /* Protect ->comm from prctl() */
455 pr_err("Kill process %d (%s) sharing same memory\n", 442 pr_err("Kill process %d (%s) sharing same memory\n",
456 task_pid_nr(q), q->comm); 443 task_pid_nr(q), q->comm);
@@ -727,7 +714,7 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask,
727 read_lock(&tasklist_lock); 714 read_lock(&tasklist_lock);
728 if (sysctl_oom_kill_allocating_task && 715 if (sysctl_oom_kill_allocating_task &&
729 !oom_unkillable_task(current, NULL, nodemask) && 716 !oom_unkillable_task(current, NULL, nodemask) &&
730 current->mm && !atomic_read(&current->mm->oom_disable_count)) { 717 current->mm) {
731 /* 718 /*
732 * oom_kill_process() needs tasklist_lock held. If it returns 719 * oom_kill_process() needs tasklist_lock held. If it returns
733 * non-zero, current could not be killed so we must fallback to 720 * non-zero, current could not be killed so we must fallback to