diff options
author | Kirill A. Shutemov <kirill.shutemov@linux.intel.com> | 2016-01-15 19:52:42 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-01-15 20:56:32 -0500 |
commit | 78ddc53473419073ffb2e91178001e87bc513524 (patch) | |
tree | 5e53e05d563d95cdc3611049fb51ed60daf977b9 | |
parent | b1caa957ae6da3142a73ba8c5c9b2ca821021f0f (diff) |
thp: rename split_huge_page_pmd() to split_huge_pmd()
We are going to decouple splitting THP PMD from splitting underlying
compound page.
This patch renames split_huge_page_pmd*() functions to split_huge_pmd*()
to reflect the fact that it doesn't imply page splitting, only PMD.
Signed-off-by: Kirill A. Shutemov <kirill.shutemov@linux.intel.com>
Tested-by: Sasha Levin <sasha.levin@oracle.com>
Tested-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Acked-by: Jerome Marchand <jmarchan@redhat.com>
Acked-by: Vlastimil Babka <vbabka@suse.cz>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Dave Hansen <dave.hansen@intel.com>
Cc: Mel Gorman <mgorman@suse.de>
Cc: Rik van Riel <riel@redhat.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Steve Capper <steve.capper@linaro.org>
Cc: Johannes Weiner <hannes@cmpxchg.org>
Cc: Michal Hocko <mhocko@suse.cz>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | arch/powerpc/mm/subpage-prot.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/vm86_32.c | 6 | ||||
-rw-r--r-- | include/linux/huge_mm.h | 8 | ||||
-rw-r--r-- | mm/gup.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 32 | ||||
-rw-r--r-- | mm/memory.c | 2 | ||||
-rw-r--r-- | mm/mempolicy.c | 2 | ||||
-rw-r--r-- | mm/mprotect.c | 2 | ||||
-rw-r--r-- | mm/mremap.c | 2 | ||||
-rw-r--r-- | mm/pagewalk.c | 2 |
10 files changed, 25 insertions, 35 deletions
diff --git a/arch/powerpc/mm/subpage-prot.c b/arch/powerpc/mm/subpage-prot.c index fa9fb5b4c66c..d5543514c1df 100644 --- a/arch/powerpc/mm/subpage-prot.c +++ b/arch/powerpc/mm/subpage-prot.c | |||
@@ -135,7 +135,7 @@ static int subpage_walk_pmd_entry(pmd_t *pmd, unsigned long addr, | |||
135 | unsigned long end, struct mm_walk *walk) | 135 | unsigned long end, struct mm_walk *walk) |
136 | { | 136 | { |
137 | struct vm_area_struct *vma = walk->vma; | 137 | struct vm_area_struct *vma = walk->vma; |
138 | split_huge_page_pmd(vma, addr, pmd); | 138 | split_huge_pmd(vma, pmd, addr); |
139 | return 0; | 139 | return 0; |
140 | } | 140 | } |
141 | 141 | ||
diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c index 483231ebbb0b..e574b8546518 100644 --- a/arch/x86/kernel/vm86_32.c +++ b/arch/x86/kernel/vm86_32.c | |||
@@ -175,7 +175,11 @@ static void mark_screen_rdonly(struct mm_struct *mm) | |||
175 | if (pud_none_or_clear_bad(pud)) | 175 | if (pud_none_or_clear_bad(pud)) |
176 | goto out; | 176 | goto out; |
177 | pmd = pmd_offset(pud, 0xA0000); | 177 | pmd = pmd_offset(pud, 0xA0000); |
178 | split_huge_page_pmd_mm(mm, 0xA0000, pmd); | 178 | |
179 | if (pmd_trans_huge(*pmd)) { | ||
180 | struct vm_area_struct *vma = find_vma(mm, 0xA0000); | ||
181 | split_huge_pmd(vma, pmd, 0xA0000); | ||
182 | } | ||
179 | if (pmd_none_or_clear_bad(pmd)) | 183 | if (pmd_none_or_clear_bad(pmd)) |
180 | goto out; | 184 | goto out; |
181 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); | 185 | pte = pte_offset_map_lock(mm, pmd, 0xA0000, &ptl); |
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h index ecb080d6ff42..805c7ae42280 100644 --- a/include/linux/huge_mm.h +++ b/include/linux/huge_mm.h | |||
@@ -102,7 +102,7 @@ static inline int split_huge_page(struct page *page) | |||
102 | } | 102 | } |
103 | extern void __split_huge_page_pmd(struct vm_area_struct *vma, | 103 | extern void __split_huge_page_pmd(struct vm_area_struct *vma, |
104 | unsigned long address, pmd_t *pmd); | 104 | unsigned long address, pmd_t *pmd); |
105 | #define split_huge_page_pmd(__vma, __address, __pmd) \ | 105 | #define split_huge_pmd(__vma, __pmd, __address) \ |
106 | do { \ | 106 | do { \ |
107 | pmd_t *____pmd = (__pmd); \ | 107 | pmd_t *____pmd = (__pmd); \ |
108 | if (unlikely(pmd_trans_huge(*____pmd))) \ | 108 | if (unlikely(pmd_trans_huge(*____pmd))) \ |
@@ -117,8 +117,6 @@ extern void __split_huge_page_pmd(struct vm_area_struct *vma, | |||
117 | BUG_ON(pmd_trans_splitting(*____pmd) || \ | 117 | BUG_ON(pmd_trans_splitting(*____pmd) || \ |
118 | pmd_trans_huge(*____pmd)); \ | 118 | pmd_trans_huge(*____pmd)); \ |
119 | } while (0) | 119 | } while (0) |
120 | extern void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | ||
121 | pmd_t *pmd); | ||
122 | #if HPAGE_PMD_ORDER >= MAX_ORDER | 120 | #if HPAGE_PMD_ORDER >= MAX_ORDER |
123 | #error "hugepages can't be allocated by the buddy allocator" | 121 | #error "hugepages can't be allocated by the buddy allocator" |
124 | #endif | 122 | #endif |
@@ -183,11 +181,9 @@ static inline int split_huge_page(struct page *page) | |||
183 | { | 181 | { |
184 | return 0; | 182 | return 0; |
185 | } | 183 | } |
186 | #define split_huge_page_pmd(__vma, __address, __pmd) \ | ||
187 | do { } while (0) | ||
188 | #define wait_split_huge_page(__anon_vma, __pmd) \ | 184 | #define wait_split_huge_page(__anon_vma, __pmd) \ |
189 | do { } while (0) | 185 | do { } while (0) |
190 | #define split_huge_page_pmd_mm(__mm, __address, __pmd) \ | 186 | #define split_huge_pmd(__vma, __pmd, __address) \ |
191 | do { } while (0) | 187 | do { } while (0) |
192 | static inline int hugepage_madvise(struct vm_area_struct *vma, | 188 | static inline int hugepage_madvise(struct vm_area_struct *vma, |
193 | unsigned long *vm_flags, int advice) | 189 | unsigned long *vm_flags, int advice) |
@@ -254,7 +254,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma, | |||
254 | if (is_huge_zero_page(page)) { | 254 | if (is_huge_zero_page(page)) { |
255 | spin_unlock(ptl); | 255 | spin_unlock(ptl); |
256 | ret = 0; | 256 | ret = 0; |
257 | split_huge_page_pmd(vma, address, pmd); | 257 | split_huge_pmd(vma, pmd, address); |
258 | } else { | 258 | } else { |
259 | get_page(page); | 259 | get_page(page); |
260 | spin_unlock(ptl); | 260 | spin_unlock(ptl); |
diff --git a/mm/huge_memory.c b/mm/huge_memory.c index f4da89cef2cd..0d70ec056ecc 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c | |||
@@ -1233,13 +1233,13 @@ alloc: | |||
1233 | 1233 | ||
1234 | if (unlikely(!new_page)) { | 1234 | if (unlikely(!new_page)) { |
1235 | if (!page) { | 1235 | if (!page) { |
1236 | split_huge_page_pmd(vma, address, pmd); | 1236 | split_huge_pmd(vma, pmd, address); |
1237 | ret |= VM_FAULT_FALLBACK; | 1237 | ret |= VM_FAULT_FALLBACK; |
1238 | } else { | 1238 | } else { |
1239 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, | 1239 | ret = do_huge_pmd_wp_page_fallback(mm, vma, address, |
1240 | pmd, orig_pmd, page, haddr); | 1240 | pmd, orig_pmd, page, haddr); |
1241 | if (ret & VM_FAULT_OOM) { | 1241 | if (ret & VM_FAULT_OOM) { |
1242 | split_huge_page(page); | 1242 | split_huge_pmd(vma, pmd, address); |
1243 | ret |= VM_FAULT_FALLBACK; | 1243 | ret |= VM_FAULT_FALLBACK; |
1244 | } | 1244 | } |
1245 | put_user_huge_page(page); | 1245 | put_user_huge_page(page); |
@@ -1252,10 +1252,10 @@ alloc: | |||
1252 | true))) { | 1252 | true))) { |
1253 | put_page(new_page); | 1253 | put_page(new_page); |
1254 | if (page) { | 1254 | if (page) { |
1255 | split_huge_page(page); | 1255 | split_huge_pmd(vma, pmd, address); |
1256 | put_user_huge_page(page); | 1256 | put_user_huge_page(page); |
1257 | } else | 1257 | } else |
1258 | split_huge_page_pmd(vma, address, pmd); | 1258 | split_huge_pmd(vma, pmd, address); |
1259 | ret |= VM_FAULT_FALLBACK; | 1259 | ret |= VM_FAULT_FALLBACK; |
1260 | count_vm_event(THP_FAULT_FALLBACK); | 1260 | count_vm_event(THP_FAULT_FALLBACK); |
1261 | goto out; | 1261 | goto out; |
@@ -3131,17 +3131,7 @@ again: | |||
3131 | goto again; | 3131 | goto again; |
3132 | } | 3132 | } |
3133 | 3133 | ||
3134 | void split_huge_page_pmd_mm(struct mm_struct *mm, unsigned long address, | 3134 | static void split_huge_pmd_address(struct vm_area_struct *vma, |
3135 | pmd_t *pmd) | ||
3136 | { | ||
3137 | struct vm_area_struct *vma; | ||
3138 | |||
3139 | vma = find_vma(mm, address); | ||
3140 | BUG_ON(vma == NULL); | ||
3141 | split_huge_page_pmd(vma, address, pmd); | ||
3142 | } | ||
3143 | |||
3144 | static void split_huge_page_address(struct mm_struct *mm, | ||
3145 | unsigned long address) | 3135 | unsigned long address) |
3146 | { | 3136 | { |
3147 | pgd_t *pgd; | 3137 | pgd_t *pgd; |
@@ -3150,7 +3140,7 @@ static void split_huge_page_address(struct mm_struct *mm, | |||
3150 | 3140 | ||
3151 | VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); | 3141 | VM_BUG_ON(!(address & ~HPAGE_PMD_MASK)); |
3152 | 3142 | ||
3153 | pgd = pgd_offset(mm, address); | 3143 | pgd = pgd_offset(vma->vm_mm, address); |
3154 | if (!pgd_present(*pgd)) | 3144 | if (!pgd_present(*pgd)) |
3155 | return; | 3145 | return; |
3156 | 3146 | ||
@@ -3159,13 +3149,13 @@ static void split_huge_page_address(struct mm_struct *mm, | |||
3159 | return; | 3149 | return; |
3160 | 3150 | ||
3161 | pmd = pmd_offset(pud, address); | 3151 | pmd = pmd_offset(pud, address); |
3162 | if (!pmd_present(*pmd)) | 3152 | if (!pmd_present(*pmd) || !pmd_trans_huge(*pmd)) |
3163 | return; | 3153 | return; |
3164 | /* | 3154 | /* |
3165 | * Caller holds the mmap_sem write mode, so a huge pmd cannot | 3155 | * Caller holds the mmap_sem write mode, so a huge pmd cannot |
3166 | * materialize from under us. | 3156 | * materialize from under us. |
3167 | */ | 3157 | */ |
3168 | split_huge_page_pmd_mm(mm, address, pmd); | 3158 | __split_huge_page_pmd(vma, address, pmd); |
3169 | } | 3159 | } |
3170 | 3160 | ||
3171 | void vma_adjust_trans_huge(struct vm_area_struct *vma, | 3161 | void vma_adjust_trans_huge(struct vm_area_struct *vma, |
@@ -3181,7 +3171,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
3181 | if (start & ~HPAGE_PMD_MASK && | 3171 | if (start & ~HPAGE_PMD_MASK && |
3182 | (start & HPAGE_PMD_MASK) >= vma->vm_start && | 3172 | (start & HPAGE_PMD_MASK) >= vma->vm_start && |
3183 | (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) | 3173 | (start & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
3184 | split_huge_page_address(vma->vm_mm, start); | 3174 | split_huge_pmd_address(vma, start); |
3185 | 3175 | ||
3186 | /* | 3176 | /* |
3187 | * If the new end address isn't hpage aligned and it could | 3177 | * If the new end address isn't hpage aligned and it could |
@@ -3191,7 +3181,7 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
3191 | if (end & ~HPAGE_PMD_MASK && | 3181 | if (end & ~HPAGE_PMD_MASK && |
3192 | (end & HPAGE_PMD_MASK) >= vma->vm_start && | 3182 | (end & HPAGE_PMD_MASK) >= vma->vm_start && |
3193 | (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) | 3183 | (end & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= vma->vm_end) |
3194 | split_huge_page_address(vma->vm_mm, end); | 3184 | split_huge_pmd_address(vma, end); |
3195 | 3185 | ||
3196 | /* | 3186 | /* |
3197 | * If we're also updating the vma->vm_next->vm_start, if the new | 3187 | * If we're also updating the vma->vm_next->vm_start, if the new |
@@ -3205,6 +3195,6 @@ void vma_adjust_trans_huge(struct vm_area_struct *vma, | |||
3205 | if (nstart & ~HPAGE_PMD_MASK && | 3195 | if (nstart & ~HPAGE_PMD_MASK && |
3206 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && | 3196 | (nstart & HPAGE_PMD_MASK) >= next->vm_start && |
3207 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) | 3197 | (nstart & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE <= next->vm_end) |
3208 | split_huge_page_address(next->vm_mm, nstart); | 3198 | split_huge_pmd_address(next, nstart); |
3209 | } | 3199 | } |
3210 | } | 3200 | } |
diff --git a/mm/memory.c b/mm/memory.c index eecdd05e9923..561b7ad7f27a 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -1193,7 +1193,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb, | |||
1193 | BUG(); | 1193 | BUG(); |
1194 | } | 1194 | } |
1195 | #endif | 1195 | #endif |
1196 | split_huge_page_pmd(vma, addr, pmd); | 1196 | split_huge_pmd(vma, pmd, addr); |
1197 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) | 1197 | } else if (zap_huge_pmd(tlb, vma, pmd, addr)) |
1198 | goto next; | 1198 | goto next; |
1199 | /* fall through */ | 1199 | /* fall through */ |
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index d8caff071a30..5f7f9dace354 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c | |||
@@ -493,7 +493,7 @@ static int queue_pages_pte_range(pmd_t *pmd, unsigned long addr, | |||
493 | pte_t *pte; | 493 | pte_t *pte; |
494 | spinlock_t *ptl; | 494 | spinlock_t *ptl; |
495 | 495 | ||
496 | split_huge_page_pmd(vma, addr, pmd); | 496 | split_huge_pmd(vma, pmd, addr); |
497 | if (pmd_trans_unstable(pmd)) | 497 | if (pmd_trans_unstable(pmd)) |
498 | return 0; | 498 | return 0; |
499 | 499 | ||
diff --git a/mm/mprotect.c b/mm/mprotect.c index c764402c464f..6047707085c1 100644 --- a/mm/mprotect.c +++ b/mm/mprotect.c | |||
@@ -160,7 +160,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, | |||
160 | 160 | ||
161 | if (pmd_trans_huge(*pmd)) { | 161 | if (pmd_trans_huge(*pmd)) { |
162 | if (next - addr != HPAGE_PMD_SIZE) | 162 | if (next - addr != HPAGE_PMD_SIZE) |
163 | split_huge_page_pmd(vma, addr, pmd); | 163 | split_huge_pmd(vma, pmd, addr); |
164 | else { | 164 | else { |
165 | int nr_ptes = change_huge_pmd(vma, pmd, addr, | 165 | int nr_ptes = change_huge_pmd(vma, pmd, addr, |
166 | newprot, prot_numa); | 166 | newprot, prot_numa); |
diff --git a/mm/mremap.c b/mm/mremap.c index e55b157865d5..5969b5093850 100644 --- a/mm/mremap.c +++ b/mm/mremap.c | |||
@@ -209,7 +209,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma, | |||
209 | need_flush = true; | 209 | need_flush = true; |
210 | continue; | 210 | continue; |
211 | } else if (!err) { | 211 | } else if (!err) { |
212 | split_huge_page_pmd(vma, old_addr, old_pmd); | 212 | split_huge_pmd(vma, old_pmd, old_addr); |
213 | } | 213 | } |
214 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); | 214 | VM_BUG_ON(pmd_trans_huge(*old_pmd)); |
215 | } | 215 | } |
diff --git a/mm/pagewalk.c b/mm/pagewalk.c index 29f2f8b853ae..207244489a68 100644 --- a/mm/pagewalk.c +++ b/mm/pagewalk.c | |||
@@ -58,7 +58,7 @@ again: | |||
58 | if (!walk->pte_entry) | 58 | if (!walk->pte_entry) |
59 | continue; | 59 | continue; |
60 | 60 | ||
61 | split_huge_page_pmd_mm(walk->mm, addr, pmd); | 61 | split_huge_pmd(walk->vma, pmd, addr); |
62 | if (pmd_trans_unstable(pmd)) | 62 | if (pmd_trans_unstable(pmd)) |
63 | goto again; | 63 | goto again; |
64 | err = walk_pte_range(pmd, addr, next, walk); | 64 | err = walk_pte_range(pmd, addr, next, walk); |