diff options
author | David Rientjes <rientjes@google.com> | 2012-03-21 19:33:46 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2012-03-21 20:54:55 -0400 |
commit | 2a1c9b1fc0a0ea2e30cdeb69062647c5c5ae661f (patch) | |
tree | a123d061a5d95984e0446a350189f4a310d06b53 /mm/oom_kill.c | |
parent | ce24d8a14207c2036df86d2bd3d14b4393eb51e3 (diff) |
mm, oom: avoid looping when chosen thread detaches its mm
oom_kill_task() returns non-zero iff the chosen process does not have any
threads with an attached ->mm.
In such a case, it's better to just return to the page allocator and retry
the allocation because memory could have been freed in the interim and the
oom condition may no longer exist. It's unnecessary to loop in the oom
killer and find another thread to kill.
This allows both oom_kill_task() and oom_kill_process() to be converted to
void functions. If the oom condition persists, the oom killer will be
recalled.
Acked-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Acked-by: Michal Hocko <mhocko@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/oom_kill.c')
-rw-r--r-- | mm/oom_kill.c | 56 |
1 files changed, 20 insertions, 36 deletions
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 2958fd8e7c9a..a26695fe8833 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -434,14 +434,14 @@ static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, | |||
434 | } | 434 | } |
435 | 435 | ||
436 | #define K(x) ((x) << (PAGE_SHIFT-10)) | 436 | #define K(x) ((x) << (PAGE_SHIFT-10)) |
437 | static int oom_kill_task(struct task_struct *p) | 437 | static void oom_kill_task(struct task_struct *p) |
438 | { | 438 | { |
439 | struct task_struct *q; | 439 | struct task_struct *q; |
440 | struct mm_struct *mm; | 440 | struct mm_struct *mm; |
441 | 441 | ||
442 | p = find_lock_task_mm(p); | 442 | p = find_lock_task_mm(p); |
443 | if (!p) | 443 | if (!p) |
444 | return 1; | 444 | return; |
445 | 445 | ||
446 | /* mm cannot be safely dereferenced after task_unlock(p) */ | 446 | /* mm cannot be safely dereferenced after task_unlock(p) */ |
447 | mm = p->mm; | 447 | mm = p->mm; |
@@ -477,15 +477,13 @@ static int oom_kill_task(struct task_struct *p) | |||
477 | 477 | ||
478 | set_tsk_thread_flag(p, TIF_MEMDIE); | 478 | set_tsk_thread_flag(p, TIF_MEMDIE); |
479 | force_sig(SIGKILL, p); | 479 | force_sig(SIGKILL, p); |
480 | |||
481 | return 0; | ||
482 | } | 480 | } |
483 | #undef K | 481 | #undef K |
484 | 482 | ||
485 | static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | 483 | static void oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, |
486 | unsigned int points, unsigned long totalpages, | 484 | unsigned int points, unsigned long totalpages, |
487 | struct mem_cgroup *memcg, nodemask_t *nodemask, | 485 | struct mem_cgroup *memcg, nodemask_t *nodemask, |
488 | const char *message) | 486 | const char *message) |
489 | { | 487 | { |
490 | struct task_struct *victim = p; | 488 | struct task_struct *victim = p; |
491 | struct task_struct *child; | 489 | struct task_struct *child; |
@@ -501,7 +499,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
501 | */ | 499 | */ |
502 | if (p->flags & PF_EXITING) { | 500 | if (p->flags & PF_EXITING) { |
503 | set_tsk_thread_flag(p, TIF_MEMDIE); | 501 | set_tsk_thread_flag(p, TIF_MEMDIE); |
504 | return 0; | 502 | return; |
505 | } | 503 | } |
506 | 504 | ||
507 | task_lock(p); | 505 | task_lock(p); |
@@ -533,7 +531,7 @@ static int oom_kill_process(struct task_struct *p, gfp_t gfp_mask, int order, | |||
533 | } | 531 | } |
534 | } while_each_thread(p, t); | 532 | } while_each_thread(p, t); |
535 | 533 | ||
536 | return oom_kill_task(victim); | 534 | oom_kill_task(victim); |
537 | } | 535 | } |
538 | 536 | ||
539 | /* | 537 | /* |
@@ -580,15 +578,10 @@ void mem_cgroup_out_of_memory(struct mem_cgroup *memcg, gfp_t gfp_mask) | |||
580 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); | 578 | check_panic_on_oom(CONSTRAINT_MEMCG, gfp_mask, 0, NULL); |
581 | limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT; | 579 | limit = mem_cgroup_get_limit(memcg) >> PAGE_SHIFT; |
582 | read_lock(&tasklist_lock); | 580 | read_lock(&tasklist_lock); |
583 | retry: | ||
584 | p = select_bad_process(&points, limit, memcg, NULL); | 581 | p = select_bad_process(&points, limit, memcg, NULL); |
585 | if (!p || PTR_ERR(p) == -1UL) | 582 | if (p && PTR_ERR(p) != -1UL) |
586 | goto out; | 583 | oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL, |
587 | 584 | "Memory cgroup out of memory"); | |
588 | if (oom_kill_process(p, gfp_mask, 0, points, limit, memcg, NULL, | ||
589 | "Memory cgroup out of memory")) | ||
590 | goto retry; | ||
591 | out: | ||
592 | read_unlock(&tasklist_lock); | 585 | read_unlock(&tasklist_lock); |
593 | } | 586 | } |
594 | #endif | 587 | #endif |
@@ -745,33 +738,24 @@ void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, | |||
745 | if (sysctl_oom_kill_allocating_task && | 738 | if (sysctl_oom_kill_allocating_task && |
746 | !oom_unkillable_task(current, NULL, nodemask) && | 739 | !oom_unkillable_task(current, NULL, nodemask) && |
747 | current->mm) { | 740 | current->mm) { |
748 | /* | 741 | oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, |
749 | * oom_kill_process() needs tasklist_lock held. If it returns | 742 | nodemask, |
750 | * non-zero, current could not be killed so we must fallback to | 743 | "Out of memory (oom_kill_allocating_task)"); |
751 | * the tasklist scan. | 744 | goto out; |
752 | */ | ||
753 | if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, | ||
754 | NULL, nodemask, | ||
755 | "Out of memory (oom_kill_allocating_task)")) | ||
756 | goto out; | ||
757 | } | 745 | } |
758 | 746 | ||
759 | retry: | ||
760 | p = select_bad_process(&points, totalpages, NULL, mpol_mask); | 747 | p = select_bad_process(&points, totalpages, NULL, mpol_mask); |
761 | if (PTR_ERR(p) == -1UL) | ||
762 | goto out; | ||
763 | |||
764 | /* Found nothing?!?! Either we hang forever, or we panic. */ | 748 | /* Found nothing?!?! Either we hang forever, or we panic. */ |
765 | if (!p) { | 749 | if (!p) { |
766 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); | 750 | dump_header(NULL, gfp_mask, order, NULL, mpol_mask); |
767 | read_unlock(&tasklist_lock); | 751 | read_unlock(&tasklist_lock); |
768 | panic("Out of memory and no killable processes...\n"); | 752 | panic("Out of memory and no killable processes...\n"); |
769 | } | 753 | } |
770 | 754 | if (PTR_ERR(p) != -1UL) { | |
771 | if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, | 755 | oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, |
772 | nodemask, "Out of memory")) | 756 | nodemask, "Out of memory"); |
773 | goto retry; | 757 | killed = 1; |
774 | killed = 1; | 758 | } |
775 | out: | 759 | out: |
776 | read_unlock(&tasklist_lock); | 760 | read_unlock(&tasklist_lock); |
777 | 761 | ||