aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kvm
diff options
context:
space:
mode:
authorAneesh Kumar K.V <aneesh.kumar@linux.ibm.com>2019-05-14 02:03:00 -0400
committerMichael Ellerman <mpe@ellerman.id.au>2019-07-04 10:43:50 -0400
commitd6eacedd1f0ebf00bdf1c77715d194f7c1036fd4 (patch)
treebf977356cbeb533eb3a4e2d73d4923eba6188be3 /arch/powerpc/kvm
parent259a948c4ba1829ae4a3c31bb6e40ad458a21254 (diff)
powerpc/book3s: Use config independent helpers for page table walk
Even when we have HugeTLB and THP disabled, kernel linear map can still be mapped with hugepages. This is only an issue with radix translation because hash MMU doesn't map kernel linear range in linux page table and other kernel map areas are not mapped using hugepage. Add config independent helpers and put WARN_ON() when we don't expect things to be mapped via hugepages. Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com> Signed-off-by: Michael Ellerman <mpe@ellerman.id.au>
Diffstat (limited to 'arch/powerpc/kvm')
-rw-r--r--arch/powerpc/kvm/book3s_64_mmu_radix.c12
1 files changed, 3 insertions, 9 deletions
diff --git a/arch/powerpc/kvm/book3s_64_mmu_radix.c b/arch/powerpc/kvm/book3s_64_mmu_radix.c
index f55ef071883f..91efee7f0329 100644
--- a/arch/powerpc/kvm/book3s_64_mmu_radix.c
+++ b/arch/powerpc/kvm/book3s_64_mmu_radix.c
@@ -363,12 +363,6 @@ static void kvmppc_pte_free(pte_t *ptep)
363 kmem_cache_free(kvm_pte_cache, ptep); 363 kmem_cache_free(kvm_pte_cache, ptep);
364} 364}
365 365
366/* Like pmd_huge() and pmd_large(), but works regardless of config options */
367static inline int pmd_is_leaf(pmd_t pmd)
368{
369 return !!(pmd_val(pmd) & _PAGE_PTE);
370}
371
372static pmd_t *kvmppc_pmd_alloc(void) 366static pmd_t *kvmppc_pmd_alloc(void)
373{ 367{
374 return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL); 368 return kmem_cache_alloc(kvm_pmd_cache, GFP_KERNEL);
@@ -489,7 +483,7 @@ static void kvmppc_unmap_free_pud(struct kvm *kvm, pud_t *pud,
489 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) { 483 for (iu = 0; iu < PTRS_PER_PUD; ++iu, ++p) {
490 if (!pud_present(*p)) 484 if (!pud_present(*p))
491 continue; 485 continue;
492 if (pud_huge(*p)) { 486 if (pud_is_leaf(*p)) {
493 pud_clear(p); 487 pud_clear(p);
494 } else { 488 } else {
495 pmd_t *pmd; 489 pmd_t *pmd;
@@ -588,7 +582,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
588 new_pud = pud_alloc_one(kvm->mm, gpa); 582 new_pud = pud_alloc_one(kvm->mm, gpa);
589 583
590 pmd = NULL; 584 pmd = NULL;
591 if (pud && pud_present(*pud) && !pud_huge(*pud)) 585 if (pud && pud_present(*pud) && !pud_is_leaf(*pud))
592 pmd = pmd_offset(pud, gpa); 586 pmd = pmd_offset(pud, gpa);
593 else if (level <= 1) 587 else if (level <= 1)
594 new_pmd = kvmppc_pmd_alloc(); 588 new_pmd = kvmppc_pmd_alloc();
@@ -611,7 +605,7 @@ int kvmppc_create_pte(struct kvm *kvm, pgd_t *pgtable, pte_t pte,
611 new_pud = NULL; 605 new_pud = NULL;
612 } 606 }
613 pud = pud_offset(pgd, gpa); 607 pud = pud_offset(pgd, gpa);
614 if (pud_huge(*pud)) { 608 if (pud_is_leaf(*pud)) {
615 unsigned long hgpa = gpa & PUD_MASK; 609 unsigned long hgpa = gpa & PUD_MASK;
616 610
617 /* Check if we raced and someone else has set the same thing */ 611 /* Check if we raced and someone else has set the same thing */