aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2012-05-29 18:06:17 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2012-05-29 19:22:19 -0400
commitedad9d2c337d43278a9d5aeb0ed531c2e838f8a6 (patch)
tree9cd9aba0647d5d1eedd602451cd34f2d514fd30d /mm
parentaa2e878efa7949c8502c9760f92835222714f090 (diff)
mm, thp: allow fallback when pte_alloc_one() fails for huge pmd
The transparent hugepages feature is careful to not invoke the oom killer when a hugepage cannot be allocated. pte_alloc_one() failing in __do_huge_pmd_anonymous_page(), however, currently results in VM_FAULT_OOM which invokes the pagefault oom killer to kill a memory-hogging task. This is unnecessary since it's possible to drop the reference to the hugepage and fallback to allocating a small page. Signed-off-by: David Rientjes <rientjes@google.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/huge_memory.c13
1 files changed, 8 insertions, 5 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 8ab2d24faae5..d7d7165156ca 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -640,11 +640,8 @@ static int __do_huge_pmd_anonymous_page(struct mm_struct *mm,
640 640
641 VM_BUG_ON(!PageCompound(page)); 641 VM_BUG_ON(!PageCompound(page));
642 pgtable = pte_alloc_one(mm, haddr); 642 pgtable = pte_alloc_one(mm, haddr);
643 if (unlikely(!pgtable)) { 643 if (unlikely(!pgtable))
644 mem_cgroup_uncharge_page(page);
645 put_page(page);
646 return VM_FAULT_OOM; 644 return VM_FAULT_OOM;
647 }
648 645
649 clear_huge_page(page, haddr, HPAGE_PMD_NR); 646 clear_huge_page(page, haddr, HPAGE_PMD_NR);
650 __SetPageUptodate(page); 647 __SetPageUptodate(page);
@@ -723,8 +720,14 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
723 put_page(page); 720 put_page(page);
724 goto out; 721 goto out;
725 } 722 }
723 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd,
724 page))) {
725 mem_cgroup_uncharge_page(page);
726 put_page(page);
727 goto out;
728 }
726 729
727 return __do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page); 730 return 0;
728 } 731 }
729out: 732out:
730 /* 733 /*