diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2014-04-03 17:48:17 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2014-04-03 19:21:04 -0400 |
commit | e9b71ca91aedb295097bd47066a06542751ecca8 (patch) | |
tree | e029409b61a0cbf6b4566d8f2f987bd0c39fc94b /mm/huge_memory.c | |
parent | 3bb977946998ae0d756279c5a108435d04636e2b (diff) |
mm, thp: drop do_huge_pmd_wp_zero_page_fallback()
I've realized that there's no need for do_huge_pmd_wp_zero_page_fallback().
We can just split zero page with split_huge_page_pmd() and return
VM_FAULT_FALLBACK. handle_pte_fault() will handle write-protection
fault for us.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r-- | mm/huge_memory.c | 79 |
1 files changed, 2 insertions, 77 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 1546655a2d78..6ac89e9f82ef 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -941,81 +941,6 @@ unlock: | |||
941 | spin_unlock(ptl); | 941 | spin_unlock(ptl); |
942 | } | 942 | } |
943 | 943 | ||
944 | static int do_huge_pmd_wp_zero_page_fallback(struct mm_struct *mm, | ||
945 | struct vm_area_struct *vma, unsigned long address, | ||
946 | pmd_t *pmd, pmd_t orig_pmd, unsigned long haddr) | ||
947 | { | ||
948 | spinlock_t *ptl; | ||
949 | pgtable_t pgtable; | ||
950 | pmd_t _pmd; | ||
951 | struct page *page; | ||
952 | int i, ret = 0; | ||
953 | unsigned long mmun_start; /* For mmu_notifiers */ | ||
954 | unsigned long mmun_end; /* For mmu_notifiers */ | ||
955 | |||
956 | page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, address); | ||
957 | if (!page) { | ||
958 | ret |= VM_FAULT_OOM; | ||
959 | goto out; | ||
960 | } | ||
961 | |||
962 | if (mem_cgroup_newpage_charge(page, mm, GFP_KERNEL)) { | ||
963 | put_page(page); | ||
964 | ret |= VM_FAULT_OOM; | ||
965 | goto out; | ||
966 | } | ||
967 | |||
968 | clear_user_highpage(page, address); | ||
969 | __SetPageUptodate(page); | ||
970 | |||
971 | mmun_start = haddr; | ||
972 | mmun_end = haddr + HPAGE_PMD_SIZE; | ||
973 | mmu_notifier_invalidate_range_start(mm, mmun_start, mmun_end); | ||
974 | |||
975 | ptl = pmd_lock(mm, pmd); | ||
976 | if (unlikely(!pmd_same(*pmd, orig_pmd))) | ||
977 | goto out_free_page; | ||
978 | |||
979 | pmdp_clear_flush(vma, haddr, pmd); | ||
980 | /* leave pmd empty until pte is filled */ | ||
981 | |||
982 | pgtable = pgtable_trans_huge_withdraw(mm, pmd); | ||
983 | pmd_populate(mm, &_pmd, pgtable); | ||
984 | |||
985 | for (i = 0; i < HPAGE_PMD_NR; i++, haddr += PAGE_SIZE) { | ||
986 | pte_t *pte, entry; | ||
987 | if (haddr == (address & PAGE_MASK)) { | ||
988 | entry = mk_pte(page, vma->vm_page_prot); | ||
989 | entry = maybe_mkwrite(pte_mkdirty(entry), vma); | ||
990 | page_add_new_anon_rmap(page, vma, haddr); | ||
991 | } else { | ||
992 | entry = pfn_pte(my_zero_pfn(haddr), vma->vm_page_prot); | ||
993 | entry = pte_mkspecial(entry); | ||
994 | } | ||
995 | pte = pte_offset_map(&_pmd, haddr); | ||
996 | VM_BUG_ON(!pte_none(*pte)); | ||
997 | set_pte_at(mm, haddr, pte, entry); | ||
998 | pte_unmap(pte); | ||
999 | } | ||
1000 | smp_wmb(); /* make pte visible before pmd */ | ||
1001 | pmd_populate(mm, pmd, pgtable); | ||
1002 | spin_unlock(ptl); | ||
1003 | put_huge_zero_page(); | ||
1004 | inc_mm_counter(mm, MM_ANONPAGES); | ||
1005 | |||
1006 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
1007 | |||
1008 | ret |= VM_FAULT_WRITE; | ||
1009 | out: | ||
1010 | return ret; | ||
1011 | out_free_page: | ||
1012 | spin_unlock(ptl); | ||
1013 | mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_end); | ||
1014 | mem_cgroup_uncharge_page(page); | ||
1015 | put_page(page); | ||
1016 | goto out; | ||
1017 | } | ||
1018 | |||
1019 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, | 944 | static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, |
1020 | struct vm_area_struct *vma, | 945 | struct vm_area_struct *vma, |
1021 | unsigned long address, | 946 | unsigned long address, |
@@ -1161,8 +1086,8 @@ alloc: | |||
1161 | 1086 | ||
1162 | if (unlikely(!new_page)) { | 1087 | if (unlikely(!new_page)) { |
1163 | if (!page) { | 1088 | if (!page) { |
1164 | ret = do_huge_pmd_wp_zero_page_fallback(mm, vma, | 1089 | split_huge_page_pmd(vma, address, pmd); |
1165 | address, pmd, orig_pmd, haddr); | 1090 | ret |= VM_FAULT_FALLBACK; |
1166 | } else { | 1091 | } else { |
1167 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 1092 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
1168 | pmd, orig_pmd, page, haddr); | 1093 | pmd, orig_pmd, page, haddr); |