aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/pgtable_64.c
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-05-14 02:03:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-07-04 10:43:50 -0400
commitd6eacedd1f0ebf00bdf1c77715d194f7c1036fd4 (patch)
treebf977356cbeb533eb3a4e2d73d4923eba6188be3 /arch/powerpc/mm/pgtable_64.c
parent259a948c4ba1829ae4a3c31bb6e40ad458a21254 (diff)
powerpc/book3s: Use config independent helpers for page table walk
Even when we have HugeTLB and THP disabled, kernel linear map can still be mapped with hugepages. This is only an issue with radix translation because hash MMU doesn't map kernel linear range in linux page table and other kernel map areas are not mapped using hugepage. Add config independent helpers and put WARN_ON() when we don't expect things to be mapped via hugepages. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/mm/pgtable_64.c')
-rw-r--r--arch/powerpc/mm/pgtable_64.c12
1 files changed, 9 insertions, 3 deletions
diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c
index 63cd81130643..2892246a6fef 100644
--- a/arch/powerpc/mm/pgtable_64.c
+++ b/arch/powerpc/mm/pgtable_64.c
@@ -309,16 +309,20 @@ EXPORT_SYMBOL(__iounmap_at);
309/* 4 level page table */ 309/* 4 level page table */
310struct page *pgd_page(pgd_t pgd) 310struct page *pgd_page(pgd_t pgd)
311{ 311{
312 if (pgd_huge(pgd)) 312 if (pgd_is_leaf(pgd)) {
313 VM_WARN_ON(!pgd_huge(pgd));
313 return pte_page(pgd_pte(pgd)); 314 return pte_page(pgd_pte(pgd));
315 }
314 return virt_to_page(pgd_page_vaddr(pgd)); 316 return virt_to_page(pgd_page_vaddr(pgd));
315} 317}
316#endif 318#endif
317 319
318struct page *pud_page(pud_t pud) 320struct page *pud_page(pud_t pud)
319{ 321{
320 if (pud_huge(pud)) 322 if (pud_is_leaf(pud)) {
323 VM_WARN_ON(!pud_huge(pud));
321 return pte_page(pud_pte(pud)); 324 return pte_page(pud_pte(pud));
325 }
322 return virt_to_page(pud_page_vaddr(pud)); 326 return virt_to_page(pud_page_vaddr(pud));
323} 327}
324 328
@@ -328,8 +332,10 @@ struct page *pud_page(pud_t pud)
328 */ 332 */
329struct page *pmd_page(pmd_t pmd) 333struct page *pmd_page(pmd_t pmd)
330{ 334{
331 if (pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)) 335 if (pmd_is_leaf(pmd)) {
336 VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd) || pmd_devmap(pmd)));
332 return pte_page(pmd_pte(pmd)); 337 return pte_page(pmd_pte(pmd));
338 }
333 return virt_to_page(pmd_page_vaddr(pmd)); 339 return virt_to_page(pmd_page_vaddr(pmd));
334} 340}
335 341