aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
authorKirill A. Shutemov <kirill.shutemov@linux.intel.com>2013-09-12 18:14:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-12 18:38:03 -0400
commit128ec037bafe5905b2e6f2796f426a1d247d0066 (patch)
treed4e04fe53d227686ed17e18b3f6016cbd5236149 /mm/huge_memory.c
parent3122359a64829afd231bad6ed899b557f46280e9 (diff)
thp: do_huge_pmd_anonymous_page() cleanup
Minor cleanup: unindent most code of the fucntion by inverting one condition. It's preparation for the next patch. No functional changes. Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com> Acked-by: Hillf Danton <dhillf@gmail.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Al Viro <viro@zeniv.linux.org.uk> Cc: Hugh Dickins <hughd@google.com> Cc: Wu Fengguang <fengguang.wu@intel.com> Cc: Jan Kara <jack@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: Andi Kleen <ak@linux.intel.com> Cc: Matthew Wilcox <willy@linux.intel.com> Cc: Dave Hansen <dave.hansen@linux.intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c83
1 files changed, 41 insertions, 42 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 60836870c6f7..6551dd06dd64 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -785,55 +785,54 @@ int do_huge_pmd_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
785 unsigned long haddr = address & HPAGE_PMD_MASK; 785 unsigned long haddr = address & HPAGE_PMD_MASK;
786 pte_t *pte; 786 pte_t *pte;
787 787
788 if (haddr >= vma->vm_start && haddr + HPAGE_PMD_SIZE <= vma->vm_end) { 788 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
789 if (unlikely(anon_vma_prepare(vma))) 789 goto out;
790 return VM_FAULT_OOM; 790 if (unlikely(anon_vma_prepare(vma)))
791 if (unlikely(khugepaged_enter(vma))) 791 return VM_FAULT_OOM;
792 if (unlikely(khugepaged_enter(vma)))
793 return VM_FAULT_OOM;
794 if (!(flags & FAULT_FLAG_WRITE) &&
795 transparent_hugepage_use_zero_page()) {
796 pgtable_t pgtable;
797 struct page *zero_page;
798 bool set;
799 pgtable = pte_alloc_one(mm, haddr);
800 if (unlikely(!pgtable))
792 return VM_FAULT_OOM; 801 return VM_FAULT_OOM;
793 if (!(flags & FAULT_FLAG_WRITE) && 802 zero_page = get_huge_zero_page();
794 transparent_hugepage_use_zero_page()) { 803 if (unlikely(!zero_page)) {
795 pgtable_t pgtable; 804 pte_free(mm, pgtable);
796 struct page *zero_page;
797 bool set;
798 pgtable = pte_alloc_one(mm, haddr);
799 if (unlikely(!pgtable))
800 return VM_FAULT_OOM;
801 zero_page = get_huge_zero_page();
802 if (unlikely(!zero_page)) {
803 pte_free(mm, pgtable);
804 count_vm_event(THP_FAULT_FALLBACK);
805 goto out;
806 }
807 spin_lock(&mm->page_table_lock);
808 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
809 zero_page);
810 spin_unlock(&mm->page_table_lock);
811 if (!set) {
812 pte_free(mm, pgtable);
813 put_huge_zero_page();
814 }
815 return 0;
816 }
817 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
818 vma, haddr, numa_node_id(), 0);
819 if (unlikely(!page)) {
820 count_vm_event(THP_FAULT_FALLBACK); 805 count_vm_event(THP_FAULT_FALLBACK);
821 goto out; 806 goto out;
822 } 807 }
823 count_vm_event(THP_FAULT_ALLOC); 808 spin_lock(&mm->page_table_lock);
824 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) { 809 set = set_huge_zero_page(pgtable, mm, vma, haddr, pmd,
825 put_page(page); 810 zero_page);
826 goto out; 811 spin_unlock(&mm->page_table_lock);
827 } 812 if (!set) {
828 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, 813 pte_free(mm, pgtable);
829 page))) { 814 put_huge_zero_page();
830 mem_cgroup_uncharge_page(page);
831 put_page(page);
832 goto out;
833 } 815 }
834
835 return 0; 816 return 0;
836 } 817 }
818 page = alloc_hugepage_vma(transparent_hugepage_defrag(vma),
819 vma, haddr, numa_node_id(), 0);
820 if (unlikely(!page)) {
821 count_vm_event(THP_FAULT_FALLBACK);
822 goto out;
823 }
824 count_vm_event(THP_FAULT_ALLOC);
825 if (unlikely(mem_cgroup_newpage_charge(page, mm, GFP_KERNEL))) {
826 put_page(page);
827 goto out;
828 }
829 if (unlikely(__do_huge_pmd_anonymous_page(mm, vma, haddr, pmd, page))) {
830 mem_cgroup_uncharge_page(page);
831 put_page(page);
832 goto out;
833 }
834
835 return 0;
837out: 836out:
838 /* 837 /*
839 * Use __pte_alloc instead of pte_alloc_map, because we can't 838 * Use __pte_alloc instead of pte_alloc_map, because we can't