aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kvm
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2016-03-01 05:03:06 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2016-04-21 08:56:44 -0400
commit120f0779c3ed89c25ef1db943feac8ed73a0d7f9 (patch)
tree5e83daf97d8365fde6ee28585643eaeebb50afcd /arch/arm/kvm
parentacd05010400215b281a9197a889bec3e67998654 (diff)
kvm arm: Move fake PGD handling to arch specific files
Rearrange the code for fake pgd handling, which is applicable only for arm64. This will later be removed once we introduce the stage2 page table walker macros. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r--arch/arm/kvm/mmu.c47
1 files changed, 7 insertions, 40 deletions
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c
index 58dbd5c439df..774d00b8066b 100644
--- a/arch/arm/kvm/mmu.c
+++ b/arch/arm/kvm/mmu.c
@@ -684,47 +684,16 @@ int kvm_alloc_stage2_pgd(struct kvm *kvm)
684 if (!hwpgd) 684 if (!hwpgd)
685 return -ENOMEM; 685 return -ENOMEM;
686 686
687 /* When the kernel uses more levels of page tables than the 687 /*
688 * When the kernel uses more levels of page tables than the
688 * guest, we allocate a fake PGD and pre-populate it to point 689 * guest, we allocate a fake PGD and pre-populate it to point
689 * to the next-level page table, which will be the real 690 * to the next-level page table, which will be the real
690 * initial page table pointed to by the VTTBR. 691 * initial page table pointed to by the VTTBR.
691 *
692 * When KVM_PREALLOC_LEVEL==2, we allocate a single page for
693 * the PMD and the kernel will use folded pud.
694 * When KVM_PREALLOC_LEVEL==1, we allocate 2 consecutive PUD
695 * pages.
696 */ 692 */
697 if (KVM_PREALLOC_LEVEL > 0) { 693 pgd = kvm_setup_fake_pgd(hwpgd);
698 int i; 694 if (IS_ERR(pgd)) {
699 695 kvm_free_hwpgd(hwpgd);
700 /* 696 return PTR_ERR(pgd);
701 * Allocate fake pgd for the page table manipulation macros to
702 * work. This is not used by the hardware and we have no
703 * alignment requirement for this allocation.
704 */
705 pgd = kmalloc(PTRS_PER_S2_PGD * sizeof(pgd_t),
706 GFP_KERNEL | __GFP_ZERO);
707
708 if (!pgd) {
709 kvm_free_hwpgd(hwpgd);
710 return -ENOMEM;
711 }
712
713 /* Plug the HW PGD into the fake one. */
714 for (i = 0; i < PTRS_PER_S2_PGD; i++) {
715 if (KVM_PREALLOC_LEVEL == 1)
716 pgd_populate(NULL, pgd + i,
717 (pud_t *)hwpgd + i * PTRS_PER_PUD);
718 else if (KVM_PREALLOC_LEVEL == 2)
719 pud_populate(NULL, pud_offset(pgd, 0) + i,
720 (pmd_t *)hwpgd + i * PTRS_PER_PMD);
721 }
722 } else {
723 /*
724 * Allocate actual first-level Stage-2 page table used by the
725 * hardware for Stage-2 page table walks.
726 */
727 pgd = (pgd_t *)hwpgd;
728 } 697 }
729 698
730 kvm_clean_pgd(pgd); 699 kvm_clean_pgd(pgd);
@@ -831,9 +800,7 @@ void kvm_free_stage2_pgd(struct kvm *kvm)
831 800
832 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); 801 unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE);
833 kvm_free_hwpgd(kvm_get_hwpgd(kvm)); 802 kvm_free_hwpgd(kvm_get_hwpgd(kvm));
834 if (KVM_PREALLOC_LEVEL > 0) 803 kvm_free_fake_pgd(kvm->arch.pgd);
835 kfree(kvm->arch.pgd);
836
837 kvm->arch.pgd = NULL; 804 kvm->arch.pgd = NULL;
838} 805}
839 806