diff options
-rw-r--r-- | Documentation/vm/transhuge.txt | 4 | ||||
-rw-r--r-- | arch/x86/kernel/vm86_32.c | 2 | ||||
-rw-r--r-- | fs/proc/task_mmu.c | 2 | ||||
-rw-r--r-- | include/linux/huge_mm.h | 14 | ||||
-rw-r--r-- | mm/huge_memory.c | 19 | ||||
-rw-r--r-- | mm/memory.c | 4 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 2 | ||||
-rw-r--r-- | mm/pagewalk.c | 2 |
10 files changed, 37 insertions, 16 deletions
diff --git a/Documentation/vm/transhuge.txt b/Documentation/vm/transhuge.txt index f734bb2a78dc..8f5b41db314c 100644 --- a/Documentation/vm/transhuge.txt +++ b/Documentation/vm/transhuge.txt | |||
@@ -276,7 +276,7 @@ unaffected. libhugetlbfs will also work fine as usual. | |||
276 | == Graceful fallback == | 276 | == Graceful fallback == |
277 | 277 | ||
278 | Code walking pagetables but unware about huge pmds can simply call | 278 | Code walking pagetables but unware about huge pmds can simply call |
279 | split_huge_page_pmd(mm, pmd) where the pmd is the one returned by | 279 | split_huge_page_pmd(vma, addr, pmd) where the pmd is the one returned by |
280 | pmd_offset. It's trivial to make the code transparent hugepage aware | 280 | pmd_offset. It's trivial to make the code transparent hugepage aware |
281 | by just grepping for "pmd_offset" and adding split_huge_page_pmd where | 281 | by just grepping for "pmd_offset" and adding split_huge_page_pmd where |
282 | missing after pmd_offset returns the pmd. Thanks to the graceful | 282 | missing after pmd_offset returns the pmd. Thanks to the graceful |
@@ -299,7 +299,7 @@ diff --git a/mm/mremap.c b/mm/mremap.c | |||
299 | return NULL; | 299 | return NULL; |
300 | 300 | ||
301 | pmd = pmd_offset(pud, addr); | 301 | pmd = pmd_offset(pud, addr); |
302 | + split_huge_page_pmd(mm, pmd); | 302 | + split_huge_page_pmd(vma, addr, pmd); |
303 | if (pmd_none_or_clear_bad(pmd)) | 303 | if (pmd_none_or_clear_bad(pmd)) |
304 | return NULL; | 304 | return NULL; |
305 | 305 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 5c9687b1bde6..1dfe69cc78a8 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -182,7 +182,7 @@ static void mark_screen_rdonly(struct mm_struct *mm) | |||
182 | if (pud_none_or_clear_bad(pud)) | 182 | if (pud_none_or_clear_bad(pud)) |
183 | goto out; | 183 | goto out; |
184 | pmd = pmd_offset(pud, 0xA0000); | 184 | pmd = pmd_offset(pud, 0xA0000); |
185 | split_huge_page_pmd(mm, pmd); | 185 | split_huge_page_pmd_mm(mm, 0xA0000, pmd); |
186 | if (pmd_none_or_clear_bad(pmd)) | 186 | if (pmd_none_or_clear_bad(pmd)) |
187 | goto out; | 187 | goto out; |
188 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); | 188 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c index 90c63f9392a5..291a0d15a0be 100644 --- a/fs/proc/task_mmu.c +++ b/fs/proc/task_mmu.c | |||
@@ -643,7 +643,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr, | |||
643 | spinlock_t *ptl; | 643 | spinlock_t *ptl; |
644 | struct page *page; | 644 | struct page *page; |
645 | 645 | ||
646 | split_huge_page_pmd(walk->mm, pmd); | 646 | split_huge_page_pmd(vma, addr, pmd); |
647 | if (pmd_trans_unstable(pmd)) | 647 | if (pmd_trans_unstable(pmd)) |
648 | return 0; | 648 | return 0; |
649 | 649 | ||
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index 1af477552459..3132ea788581 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -95,12 +95,14 @@ extern int handle_pte_fault(struct mm_struct *mm, | |||
95 | struct vm_area_struct *vma, unsigned long address, | 95 | struct vm_area_struct *vma, unsigned long address, |
96 | pte_t *pte, pmd_t *pmd, unsigned int flags); | 96 | pte_t *pte, pmd_t *pmd, unsigned int flags); |
97 | extern int split_huge_page(struct page *page); | 97 | extern int split_huge_page(struct page *page); |
98 | extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | 98 | extern void __split_huge_page_pmd(struct vm_area_struct *vma, |
99 | #define split_huge_page_pmd(__mm, __pmd) \ | 99 | unsigned long address, pmd_t *pmd); |
100 | #define split_huge_page_pmd(__vma, __address, __pmd) \ | ||
100 | do { \ | 101 | do { \ |
101 | pmd_t *____pmd = (__pmd); \ | 102 | pmd_t *____pmd = (__pmd); \ |
102 | if (unlikely(pmd_trans_huge(*____pmd))) \ | 103 | if (unlikely(pmd_trans_huge(*____pmd))) \ |
103 | __split_huge_page_pmd(__mm, ____pmd); \ | 104 | __split_huge_page_pmd(__vma, __address, \ |
105 | ____pmd); \ | ||
104 | } while (0) | 106 | } while (0) |
105 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 107 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
106 | do { \ | 108 | do { \ |
@@ -110,6 +112,8 @@ extern void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd); | |||
110 | BUG_ON(pmd_trans_splitting(*____pmd) || \ | 112 | BUG_ON(pmd_trans_splitting(*____pmd) || \ |
111 | pmd_trans_huge(*____pmd)); \ | 113 | pmd_trans_huge(*____pmd)); \ |
112 | } while (0) | 114 | } while (0) |
115 | extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | ||
116 | pmd_t *pmd); | ||
113 | #if HPAGE_PMD_ORDER > MAX_ORDER | 117 | #if HPAGE_PMD_ORDER > MAX_ORDER |
114 | #error "hugepages can't be allocated by the buddy allocator" | 118 | #error "hugepages can't be allocated by the buddy allocator" |
115 | #endif | 119 | #endif |
@@ -177,10 +181,12 @@ static inline int split_huge_page(struct page *page) | |||
177 | { | 181 | { |
178 | return 0; | 182 | return 0; |
179 | } | 183 | } |
180 | #define split_huge_page_pmd(__mm, __pmd) \ | 184 | #define split_huge_page_pmd(__vma, __address, __pmd) \ |
181 | do { } while (0) | 185 | do { } while (0) |
182 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 186 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
183 | do { } while (0) | 187 | do { } while (0) |
188 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | ||
189 | do { } while (0) | ||
184 | #define compound_trans_head(page) compound_head(page) | 190 | #define compound_trans_head(page) compound_head(page) |
185 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 191 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
186 | unsigned long *vm_flags, int advice) | 192 | unsigned long *vm_flags, int advice) |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 7742fb36eb4d..de6aa5f3fdd2 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -2475,9 +2475,14 @@ static int khugepaged(void *none) | |||
2475 | return 0; | 2475 | return 0; |
2476 | } | 2476 | } |
2477 | 2477 | ||
2478 | void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) | 2478 | void __split_huge_page_pmd(struct vm_area_struct *vma, unsigned long address, |
2479 | pmd_t *pmd) | ||
2479 | { | 2480 | { |
2480 | struct page *page; | 2481 | struct page *page; |
2482 | unsigned long haddr = address & HPAGE_PMD_MASK; | ||
2483 | struct mm_struct *mm = vma->vm_mm; | ||
2484 | |||
2485 | BUG_ON(vma->vm_start > haddr || vma->vm_end < haddr + HPAGE_PMD_SIZE); | ||
2481 | 2486 | ||
2482 | spin_lock(&mm->page_table_lock); | 2487 | spin_lock(&mm->page_table_lock); |
2483 | if (unlikely(!pmd_trans_huge(*pmd))) { | 2488 | if (unlikely(!pmd_trans_huge(*pmd))) { |
@@ -2495,6 +2500,16 @@ void __split_huge_page_pmd(struct mm_struct *mm, pmd_t *pmd) | |||
2495 | BUG_ON(pmd_trans_huge(*pmd)); | 2500 | BUG_ON(pmd_trans_huge(*pmd)); |
2496 | } | 2501 | } |
2497 | 2502 | ||
2503 | void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | ||
2504 | pmd_t *pmd) | ||
2505 | { | ||
2506 | struct vm_area_struct *vma; | ||
2507 | |||
2508 | vma = find_vma(mm, address); | ||
2509 | BUG_ON(vma == NULL); | ||
2510 | split_huge_page_pmd(vma, address, pmd); | ||
2511 | } | ||
2512 | |||
2498 | static void split_huge_page_address(struct mm_struct *mm, | 2513 | static void split_huge_page_address(struct mm_struct *mm, |
2499 | unsigned long address) | 2514 | unsigned long address) |
2500 | { | 2515 | { |
@@ -2509,7 +2524,7 @@ static void split_huge_page_address(struct mm_struct *mm, | |||
2509 | * Caller holds the mmap_sem write mode, so a huge pmd cannot | 2524 | * Caller holds the mmap_sem write mode, so a huge pmd cannot |
2510 | * materialize from under us. | 2525 | * materialize from under us. |
2511 | */ | 2526 | */ |
2512 | split_huge_page_pmd(mm, pmd); | 2527 | split_huge_page_pmd_mm(mm, address, pmd); |
2513 | } | 2528 | } |
2514 | 2529 | ||
2515 | void __vma_adjust_trans_huge(struct vm_area_struct *vma, | 2530 | void __vma_adjust_trans_huge(struct vm_area_struct *vma, |
diff --git a/mm/memory.c b/mm/memory.c index 259b34fe1347..f9a4b0cb8623 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1243,7 +1243,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1243 | BUG(); | 1243 | BUG(); |
1244 | } | 1244 | } |
1245 | #endif | 1245 | #endif |
1246 | split_huge_page_pmd(vma->vm_mm, pmd); | 1246 | split_huge_page_pmd(vma, addr, pmd); |
1247 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1247 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1248 | goto next; | 1248 | goto next; |
1249 | /* fall through */ | 1249 | /* fall through */ |
@@ -1512,7 +1512,7 @@ struct page *follow_page(struct vm_area_struct *vma, unsigned long address, | |||
1512 | } | 1512 | } |
1513 | if (pmd_trans_huge(*pmd)) { | 1513 | if (pmd_trans_huge(*pmd)) { |
1514 | if (flags & FOLL_SPLIT) { | 1514 | if (flags & FOLL_SPLIT) { |
1515 | split_huge_page_pmd(mm, pmd); | 1515 | split_huge_page_pmd(vma, address, pmd); |
1516 | goto split_fallthrough; | 1516 | goto split_fallthrough; |
1517 | } | 1517 | } |
1518 | spin_lock(&mm->page_table_lock); | 1518 | spin_lock(&mm->page_table_lock); |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 05b28361a39b..0719e8dd4945 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -511,7 +511,7 @@ static inline int check_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
511 | pmd = pmd_offset(pud, addr); | 511 | pmd = pmd_offset(pud, addr); |
512 | do { | 512 | do { |
513 | next = pmd_addr_end(addr, end); | 513 | next = pmd_addr_end(addr, end); |
514 | split_huge_page_pmd(vma->vm_mm, pmd); | 514 | split_huge_page_pmd(vma, addr, pmd); |
515 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 515 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
516 | continue; | 516 | continue; |
517 | if (check_pte_range(vma, pmd, addr, next, nodes, | 517 | if (check_pte_range(vma, pmd, addr, next, nodes, |
diff --git a/mm/mprotect.c b/mm/mprotect.c index a40992610ab6..e8c3938db6fa 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -90,7 +90,7 @@ static inline void change_pmd_range(struct vm_area_struct *vma, pud_t *pud, | |||
90 | next = pmd_addr_end(addr, end); | 90 | next = pmd_addr_end(addr, end); |
91 | if (pmd_trans_huge(*pmd)) { | 91 | if (pmd_trans_huge(*pmd)) { |
92 | if (next - addr != HPAGE_PMD_SIZE) | 92 | if (next - addr != HPAGE_PMD_SIZE) |
93 | split_huge_page_pmd(vma->vm_mm, pmd); | 93 | split_huge_page_pmd(vma, addr, pmd); |
94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) | 94 | else if (change_huge_pmd(vma, pmd, addr, newprot)) |
95 | continue; | 95 | continue; |
96 | /* fall through */ | 96 | /* fall through */ |
diff --git a/mm/mremap.c b/mm/mremap.c index 1b61c2d3307a..eabb24da6c9e 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -182,7 +182,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
182 | need_flush = true; | 182 | need_flush = true; |
183 | continue; | 183 | continue; |
184 | } else if (!err) { | 184 | } else if (!err) { |
185 | split_huge_page_pmd(vma->vm_mm, old_pmd); | 185 | split_huge_page_pmd(vma, old_addr, old_pmd); |
186 | } | 186 | } |
187 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); | 187 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); |
188 | } | 188 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 6c118d012bb5..35aa294656cd 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -58,7 +58,7 @@ again: | |||
58 | if (!walk->pte_entry) | 58 | if (!walk->pte_entry) |
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | split_huge_page_pmd(walk->mm, pmd); | 61 | split_huge_page_pmd_mm(walk->mm, addr, pmd); |
62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) | 62 | if (pmd_none_or_trans_huge_or_clear_bad(pmd)) |
63 | goto again; | 63 | goto again; |
64 | err = walk_pte_range(pmd, addr, next, walk); | 64 | err = walk_pte_range(pmd, addr, next, walk); |