aboutsummaryrefslogtreecommitdiffstats
path: root/mm/huge_memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/huge_memory.c')
-rw-r--r--mm/huge_memory.c57
1 files changed, 47 insertions, 10 deletions
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index e60837dc785c..33514d88fef9 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -941,6 +941,37 @@ unlock:
941 spin_unlock(ptl); 941 spin_unlock(ptl);
942} 942}
943 943
944/*
945 * Save CONFIG_DEBUG_PAGEALLOC from faulting falsely on tail pages
946 * during copy_user_huge_page()'s copy_page_rep(): in the case when
947 * the source page gets split and a tail freed before copy completes.
948 * Called under pmd_lock of checked pmd, so safe from splitting itself.
949 */
950static void get_user_huge_page(struct page *page)
951{
952 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
953 struct page *endpage = page + HPAGE_PMD_NR;
954
955 atomic_add(HPAGE_PMD_NR, &page->_count);
956 while (++page < endpage)
957 get_huge_page_tail(page);
958 } else {
959 get_page(page);
960 }
961}
962
963static void put_user_huge_page(struct page *page)
964{
965 if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC)) {
966 struct page *endpage = page + HPAGE_PMD_NR;
967
968 while (page < endpage)
969 put_page(page++);
970 } else {
971 put_page(page);
972 }
973}
974
944static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm, 975static int do_huge_pmd_wp_page_fallback(struct mm_struct *mm,
945 struct vm_area_struct *vma, 976 struct vm_area_struct *vma,
946 unsigned long address, 977 unsigned long address,
@@ -1074,7 +1105,7 @@ int do_huge_pmd_wp_page(struct mm_struct *mm, struct vm_area_struct *vma,
1074 ret |= VM_FAULT_WRITE; 1105 ret |= VM_FAULT_WRITE;
1075 goto out_unlock; 1106 goto out_unlock;
1076 } 1107 }
1077 get_page(page); 1108 get_user_huge_page(page);
1078 spin_unlock(ptl); 1109 spin_unlock(ptl);
1079alloc: 1110alloc:
1080 if (transparent_hugepage_enabled(vma) && 1111 if (transparent_hugepage_enabled(vma) &&
@@ -1095,7 +1126,7 @@ alloc:
1095 split_huge_page(page); 1126 split_huge_page(page);
1096 ret |= VM_FAULT_FALLBACK; 1127 ret |= VM_FAULT_FALLBACK;
1097 } 1128 }
1098 put_page(page); 1129 put_user_huge_page(page);
1099 } 1130 }
1100 count_vm_event(THP_FAULT_FALLBACK); 1131 count_vm_event(THP_FAULT_FALLBACK);
1101 goto out; 1132 goto out;
@@ -1105,7 +1136,7 @@ alloc:
1105 put_page(new_page); 1136 put_page(new_page);
1106 if (page) { 1137 if (page) {
1107 split_huge_page(page); 1138 split_huge_page(page);
1108 put_page(page); 1139 put_user_huge_page(page);
1109 } else 1140 } else
1110 split_huge_page_pmd(vma, address, pmd); 1141 split_huge_page_pmd(vma, address, pmd);
1111 ret |= VM_FAULT_FALLBACK; 1142 ret |= VM_FAULT_FALLBACK;
@@ -1127,7 +1158,7 @@ alloc:
1127 1158
1128 spin_lock(ptl); 1159 spin_lock(ptl);
1129 if (page) 1160 if (page)
1130 put_page(page); 1161 put_user_huge_page(page);
1131 if (unlikely(!pmd_same(*pmd, orig_pmd))) { 1162 if (unlikely(!pmd_same(*pmd, orig_pmd))) {
1132 spin_unlock(ptl); 1163 spin_unlock(ptl);
1133 mem_cgroup_uncharge_page(new_page); 1164 mem_cgroup_uncharge_page(new_page);
@@ -2392,8 +2423,6 @@ static void collapse_huge_page(struct mm_struct *mm,
2392 pmd = mm_find_pmd(mm, address); 2423 pmd = mm_find_pmd(mm, address);
2393 if (!pmd) 2424 if (!pmd)
2394 goto out; 2425 goto out;
2395 if (pmd_trans_huge(*pmd))
2396 goto out;
2397 2426
2398 anon_vma_lock_write(vma->anon_vma); 2427 anon_vma_lock_write(vma->anon_vma);
2399 2428
@@ -2492,8 +2521,6 @@ static int khugepaged_scan_pmd(struct mm_struct *mm,
2492 pmd = mm_find_pmd(mm, address); 2521 pmd = mm_find_pmd(mm, address);
2493 if (!pmd) 2522 if (!pmd)
2494 goto out; 2523 goto out;
2495 if (pmd_trans_huge(*pmd))
2496 goto out;
2497 2524
2498 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load)); 2525 memset(khugepaged_node_load, 0, sizeof(khugepaged_node_load));
2499 pte = pte_offset_map_lock(mm, pmd, address, &ptl); 2526 pte = pte_offset_map_lock(mm, pmd, address, &ptl);
@@ -2846,12 +2873,22 @@ void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address,
2846static void split_huge_page_address(struct mm_struct *mm, 2873static void split_huge_page_address(struct mm_struct *mm,
2847 unsigned long address) 2874 unsigned long address)
2848{ 2875{
2876 pgd_t *pgd;
2877 pud_t *pud;
2849 pmd_t *pmd; 2878 pmd_t *pmd;
2850 2879
2851 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); 2880 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2852 2881
2853 pmd = mm_find_pmd(mm, address); 2882 pgd = pgd_offset(mm, address);
2854 if (!pmd) 2883 if (!pgd_present(*pgd))
2884 return;
2885
2886 pud = pud_offset(pgd, address);
2887 if (!pud_present(*pud))
2888 return;
2889
2890 pmd = pmd_offset(pud, address);
2891 if (!pmd_present(*pmd))
2855 return; 2892 return;
2856 /* 2893 /*
2857 * Caller holds the mmap_sem write mode, so a huge pmd cannot 2894 * Caller holds the mmap_sem write mode, so a huge pmd cannot