aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/huge_mm.h19
-rw-r--r--mm/huge_memory.c80
-rw-r--r--mm/mmap.c2
3 files changed, 99 insertions, 2 deletions
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index c590b08c6fa6..827595228734 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -104,6 +104,19 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd);
104#error "hugepages can't be allocated by the buddy allocator" 104#error "hugepages can't be allocated by the buddy allocator"
105#endif 105#endif
106extern int hugepage_madvise(unsigned long *vm_flags); 106extern int hugepage_madvise(unsigned long *vm_flags);
107extern void __vma_adjust_trans_huge(struct vm_area_struct *vma,
108 unsigned long start,
109 unsigned long end,
110 long adjust_next);
111static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
112 unsigned long start,
113 unsigned long end,
114 long adjust_next)
115{
116 if (!vma->anon_vma || vma->vm_ops || vma->vm_file)
117 return;
118 __vma_adjust_trans_huge(vma, start, end, adjust_next);
119}
107#else /* CONFIG_TRANSPARENT_HUGEPAGE */ 120#else /* CONFIG_TRANSPARENT_HUGEPAGE */
108#define HPAGE_PMD_SHIFT ({ BUG(); 0; }) 121#define HPAGE_PMD_SHIFT ({ BUG(); 0; })
109#define HPAGE_PMD_MASK ({ BUG(); 0; }) 122#define HPAGE_PMD_MASK ({ BUG(); 0; })
@@ -125,6 +138,12 @@ static inline int hugepage_madvise(unsigned long *vm_flags)
125 BUG(); 138 BUG();
126 return 0; 139 return 0;
127} 140}
141static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
142 unsigned long start,
143 unsigned long end,
144 long adjust_next)
145{
146}
128#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ 147#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
129 148
130#endif /* _LINUX_HUGE_MM_H */ 149#endif /* _LINUX_HUGE_MM_H */
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 30c3cec82023..b6facc35e893 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1075,8 +1075,16 @@ pmd_t *page_check_address_pmd(struct page *page,
1075 goto out; 1075 goto out;
1076 if (pmd_page(*pmd) != page) 1076 if (pmd_page(*pmd) != page)
1077 goto out; 1077 goto out;
1078 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG && 1078 /*
1079 pmd_trans_splitting(*pmd)); 1079 * split_vma() may create temporary aliased mappings. There is
1080 * no risk as long as all huge pmd are found and have their
1081 * splitting bit set before __split_huge_page_refcount
1082 * runs. Finding the same huge pmd more than once during the
1083 * same rmap walk is not a problem.
1084 */
1085 if (flag == PAGE_CHECK_ADDRESS_PMD_NOTSPLITTING_FLAG &&
1086 pmd_trans_splitting(*pmd))
1087 goto out;
1080 if (pmd_trans_huge(*pmd)) { 1088 if (pmd_trans_huge(*pmd)) {
1081 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG && 1089 VM_BUG_ON(flag == PAGE_CHECK_ADDRESS_PMD_SPLITTING_FLAG &&
1082 !pmd_trans_splitting(*pmd)); 1090 !pmd_trans_splitting(*pmd));
@@ -2196,3 +2204,71 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd)
2196 put_page(page); 2204 put_page(page);
2197 BUG_ON(pmd_trans_huge(*pmd)); 2205 BUG_ON(pmd_trans_huge(*pmd));
2198} 2206}
2207
2208static void split_huge_page_address(struct mm_struct *mm,
2209 unsigned long address)
2210{
2211 pgd_t *pgd;
2212 pud_t *pud;
2213 pmd_t *pmd;
2214
2215 VM_BUG_ON(!(address & ~HPAGE_PMD_MASK));
2216
2217 pgd = pgd_offset(mm, address);
2218 if (!pgd_present(*pgd))
2219 return;
2220
2221 pud = pud_offset(pgd, address);
2222 if (!pud_present(*pud))
2223 return;
2224
2225 pmd = pmd_offset(pud, address);
2226 if (!pmd_present(*pmd))
2227 return;
2228 /*
2229 * Caller holds the mmap_sem write mode, so a huge pmd cannot
2230 * materialize from under us.
2231 */
2232 split_huge_page_pmd(mm, pmd);
2233}
2234
2235void __vma_adjust_trans_huge(struct vm_area_struct *vma,
2236 unsigned long start,
2237 unsigned long end,
2238 long adjust_next)
2239{
2240 /*
2241 * If the new start address isn't hpage aligned and it could
2242 * previously contain an hugepage: check if we need to split
2243 * an huge pmd.
2244 */
2245 if (start & ~HPAGE_PMD_MASK &&
2246 (start & HPAGE_PMD_MASK) >= vma->vm_start &&
2247 (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2248 split_huge_page_address(vma->vm_mm, start);
2249
2250 /*
2251 * If the new end address isn't hpage aligned and it could
2252 * previously contain an hugepage: check if we need to split
2253 * an huge pmd.
2254 */
2255 if (end & ~HPAGE_PMD_MASK &&
2256 (end & HPAGE_PMD_MASK) >= vma->vm_start &&
2257 (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end)
2258 split_huge_page_address(vma->vm_mm, end);
2259
2260 /*
2261 * If we're also updating the vma->vm_next->vm_start, if the new
2262 * vm_next->vm_start isn't page aligned and it could previously
2263 * contain an hugepage: check if we need to split an huge pmd.
2264 */
2265 if (adjust_next > 0) {
2266 struct vm_area_struct *next = vma->vm_next;
2267 unsigned long nstart = next->vm_start;
2268 nstart += adjust_next << PAGE_SHIFT;
2269 if (nstart & ~HPAGE_PMD_MASK &&
2270 (nstart & HPAGE_PMD_MASK) >= next->vm_start &&
2271 (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end)
2272 split_huge_page_address(next->vm_mm, nstart);
2273 }
2274}
diff --git a/mm/mmap.c b/mm/mmap.c
index 753f44d17047..73cc648873d6 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -589,6 +589,8 @@ again: remove_next = 1 + (end > next->vm_end);
589 } 589 }
590 } 590 }
591 591
592 vma_adjust_trans_huge(vma, start, end, adjust_next);
593
592 /* 594 /*
593 * When changing only vma->vm_end, we don't really need anon_vma 595 * When changing only vma->vm_end, we don't really need anon_vma
594 * lock. This is a fairly rare case by itself, but the anon_vma 596 * lock. This is a fairly rare case by itself, but the anon_vma