aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2012-12-12 16:51:02 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-12 20:38:31 -0500
commit80371957f09814d25c38733d2d08de47f59a13c2 (patch)
tree2ebdedfa3a10af22941e44729f1c4c2623e8da74 /mm/huge_memory.c
parentc5a647d09fe9fc3e0241c89845cf8e6220b916f5 (diff)
thp: setup huge zero page on non-write page fault
All code paths seems covered. Now we can map huge zero page on read page fault. We setup it in do_huge_pmd_anonymous_page() if area around fault address is suitable for THP and we've got read page fault. If we fail to setup huge zero page (ENOMEM) we fallback to handle_pte_fault() as we normally do in THP. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Andi Kleen <ak@linux.intel.com> Cc: "H. Peter Anvin" <hpa@linux.intel.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c10
1 files changed, 10 insertions, 0 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index ea0e23fd6967..e1b6f4e13b91 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -733,6 +733,16 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
733 return VM_FAULT_OOM; 733 return VM_FAULT_OOM;
734 if (unlikely(khugepaged_enter(vma))) 734 if (unlikely(khugepaged_enter(vma)))
735 return VM_FAULT_OOM; 735 return VM_FAULT_OOM;
736 if (!(flags & FAULT_FLAG_WRITE)) {
737 pgtable_t pgtable;
738 pgtable = pte_alloc_one(mm, haddr);
739 if (unlikely(!pgtable))
740 return VM_FAULT_OOM;
741 spin_lock(&mm->page_table_lock);
742 set_huge_zero_page(pgtable, mm, vma, haddr, pmd);
743 spin_unlock(&mm->page_table_lock);
744 return 0;
745 }
736 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma), 746 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
737 vma, haddr, numa_node_id(), 0); 747 vma, haddr, numa_node_id(), 0);
738 if (unlikely(!page)) { 748 if (unlikely(!page)) {