aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2013-09-12 18:14:05 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:03 -0400
commitc02925540ca7019465a43c00f8a3c0186ddace2b (patch)
tree3097ece86eedd0a01cf5dbc0a8f6c28fcbd1f4f7
parent128ec037bafe5905b2e6f2796f426a1d247d0066 (diff)
thp: consolidate code between handle_mm_fault() and do_huge_pmd_anonymous_page()
do_huge_pmd_anonymous_page() has copy-pasted piece of handle_mm_fault() to handle fallback path. Let's consolidate code back by introducing VM_FAULT_FALLBACK return code. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Hugh Dickins <hughd@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Andi Kleen <ak@linux.intel.com> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/huge_mm.h3
-rw-r--r--include/linux/mm.h3
-rw-r--r--mm/huge_memory.c31
-rw-r--r--mm/memory.c9
4 files changed, 13 insertions, 33 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index b60de92e2edc..3935428c57cf 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -96,9 +96,6 @@ extern int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm,
96 pmd_t *dst_pmd, pmd_t *src_pmd, 96 pmd_t *dst_pmd, pmd_t *src_pmd,
97 struct vm_area_struct *vma, 97 struct vm_area_struct *vma,
98 unsigned long addr, unsigned long end); 98 unsigned long addr, unsigned long end);
99extern int handle_pte_fault(struct mm_struct *mm,
100 struct vm_area_struct *vma, unsigned long address,
101 pte_t *pte, pmd_t *pmd, unsigned int flags);
102extern int split_huge_page_to_list(struct page *page, struct list_head *list); 99extern int split_huge_page_to_list(struct page *page, struct list_head *list);
103static inline int split_huge_page(struct page *page) 100static inline int split_huge_page(struct page *page)
104{ 101{
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 928df792c005..8b6e55ee8855 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -877,11 +877,12 @@ static inline int page_mapped(struct page *page)
877#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */ 877#define VM_FAULT_NOPAGE 0x0100 /* ->fault installed the pte, not return page */
878#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */ 878#define VM_FAULT_LOCKED 0x0200 /* ->fault locked the returned page */
879#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */ 879#define VM_FAULT_RETRY 0x0400 /* ->fault blocked, must retry */
880#define VM_FAULT_FALLBACK 0x0800 /* huge page fault failed, fall back to small */
880 881
881#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */ 882#define VM_FAULT_HWPOISON_LARGE_MASK 0xf000 /* encodes hpage index for large hwpoison */
882 883
883#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \ 884#define VM_FAULT_ERROR (VM_FAULT_OOM | VM_FAULT_SIGBUS | VM_FAULT_HWPOISON | \
884 VM_FAULT_HWPOISON_LARGE) 885 VM_FAULT_FALLBACK | VM_FAULT_HWPOISON_LARGE)
885 886
886/* Encode hstate index for a hwpoisoned large page */ 887/* Encode hstate index for a hwpoisoned large page */
887#define VM_FAULT_SET_HINDEX(x) ((x) << 12) 888#define VM_FAULT_SET_HINDEX(x) ((x) << 12)
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 6551dd06dd64..243f4cc75777 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -783,10 +783,9 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
783{ 783{
784 struct page *page; 784 struct page *page;
785 unsigned long haddr = address & HPAGE_PMD_MASK; 785 unsigned long haddr = address & HPAGE_PMD_MASK;
786 pte_t *pte;
787 786
788 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end) 787 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
789 goto out; 788 return VM_FAULT_FALLBACK;
790 if (unlikely(anon_vma_prepare(vma))) 789 if (unlikely(anon_vma_prepare(vma)))
791 return VM_FAULT_OOM; 790 return VM_FAULT_OOM;
792 if (unlikely(khugepaged_enter(vma))) 791 if (unlikely(khugepaged_enter(vma)))
@@ -803,7 +802,7 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
803 if (unlikely(!zero_page)) { 802 if (unlikely(!zero_page)) {
804 pte_free(mm, pgtable); 803 pte_free(mm, pgtable);
805 count_vm_event(THP_FAULT_FALLBACK); 804 count_vm_event(THP_FAULT_FALLBACK);
806 goto out; 805 return VM_FAULT_FALLBACK;
807 } 806 }
808 spin_lock(&mm->page_table_lock); 807 spin_lock(&mm->page_table_lock);
809 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd, 808 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
@@ -819,40 +818,20 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
819 vma, haddr, numa_node_id(), 0); 818 vma, haddr, numa_node_id(), 0);
820 if (unlikely(!page)) { 819 if (unlikely(!page)) {
821 count_vm_event(THP_FAULT_FALLBACK); 820 count_vm_event(THP_FAULT_FALLBACK);
822 goto out; 821 return VM_FAULT_FALLBACK;
823 } 822 }
824 count_vm_event(THP_FAULT_ALLOC); 823 count_vm_event(THP_FAULT_ALLOC);
825 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 824 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
826 put_page(page); 825 put_page(page);
827 goto out; 826 return VM_FAULT_FALLBACK;
828 } 827 }
829 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) { 828 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
830 mem_cgroup_uncharge_page(page); 829 mem_cgroup_uncharge_page(page);
831 put_page(page); 830 put_page(page);
832 goto out; 831 return VM_FAULT_FALLBACK;
833 } 832 }
834 833
835 return 0; 834 return 0;
836out:
837 /*
838 * Use __pte_alloc instead of pte_alloc_map, because we can't
839 * run pte_offset_map on the pmd, if an huge pmd could
840 * materialize from under us from a different thread.
841 */
842 if (unlikely(pmd_none(*pmd)) &&
843 unlikely(__pte_alloc(mm, vma, pmd, address)))
844 return VM_FAULT_OOM;
845 /* if an huge pmd materialized from under us just retry later */
846 if (unlikely(pmd_trans_huge(*pmd)))
847 return 0;
848 /*
849 * A regular pmd is established and it can't morph into a huge pmd
850 * from under us anymore at this point because we hold the mmap_sem
851 * read mode and khugepaged takes it in write mode. So now it's
852 * safe to run pte_offset_map().
853 */
854 pte = pte_offset_map(pmd, address);
855 return handle_pte_fault(mm, vma, address, pte, pmd, flags);
856} 835}
857 836
858int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, 837int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
diff --git a/mm/memory.c b/mm/memory.c
index 5ec6f199e685..ca0003947115 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -3695,7 +3695,7 @@ static int do_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
3695 * but allow concurrent faults), and pte mapped but not yet locked. 3695 * but allow concurrent faults), and pte mapped but not yet locked.
3696 * We return with mmap_sem still held, but pte unmapped and unlocked. 3696 * We return with mmap_sem still held, but pte unmapped and unlocked.
3697 */ 3697 */
3698int handle_pte_fault(struct mm_struct *mm, 3698static int handle_pte_fault(struct mm_struct *mm,
3699 struct vm_area_struct *vma, unsigned long address, 3699 struct vm_area_struct *vma, unsigned long address,
3700 pte_t *pte, pmd_t *pmd, unsigned int flags) 3700 pte_t *pte, pmd_t *pmd, unsigned int flags)
3701{ 3701{
@@ -3774,9 +3774,12 @@ retry:
3774 if (!pmd) 3774 if (!pmd)
3775 return VM_FAULT_OOM; 3775 return VM_FAULT_OOM;
3776 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) { 3776 if (pmd_none(*pmd) && transparent_hugepage_enabled(vma)) {
3777 int ret = VM_FAULT_FALLBACK;
3777 if (!vma->vm_ops) 3778 if (!vma->vm_ops)
3778 return do_huge_pmd_anonymous_page(mm, vma, address, 3779 ret = do_huge_pmd_anonymous_page(mm, vma, address,
3779 pmd, flags); 3780 pmd, flags);
3781 if (!(ret & VM_FAULT_FALLBACK))
3782 return ret;
3780 } else { 3783 } else {
3781 pmd_t orig_pmd = *pmd; 3784 pmd_t orig_pmd = *pmd;
3782 int ret; 3785 int ret;