diff options
author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2016-03-22 13:01:21 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2016-04-21 08:58:23 -0400 |
commit | 9163ee23e72333e4712f7edd1a49aef06eae6304 (patch) | |
tree | 4508f4556725aab519634737740b36c6880d5f98 /arch/arm/kvm | |
parent | da04fa04dc91e7dae79629f28804391cbcf6e604 (diff) |
kvm-arm: Cleanup stage2 pgd handling
Now that we don't have any fake page table levels for arm64,
cleanup the common code to get rid of the dead code.
Cc: Marc Zyngier <marc.zyngier@arm.com>
Acked-by: Christoffer Dall <christoffer.dall@linaro.org>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Diffstat (limited to 'arch/arm/kvm')
-rw-r--r-- | arch/arm/kvm/arm.c | 2 | ||||
-rw-r--r-- | arch/arm/kvm/mmu.c | 37 |
2 files changed, 7 insertions, 32 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index dded1b763c16..be4b6394a062 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -448,7 +448,7 @@ static void update_vttbr(struct kvm *kvm) | |||
448 | kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; | 448 | kvm_next_vmid &= (1 << kvm_vmid_bits) - 1; |
449 | 449 | ||
450 | /* update vttbr to be used with the new vmid */ | 450 | /* update vttbr to be used with the new vmid */ |
451 | pgd_phys = virt_to_phys(kvm_get_hwpgd(kvm)); | 451 | pgd_phys = virt_to_phys(kvm->arch.pgd); |
452 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); | 452 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); |
453 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); | 453 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK(kvm_vmid_bits); |
454 | kvm->arch.vttbr = pgd_phys | vmid; | 454 | kvm->arch.vttbr = pgd_phys | vmid; |
diff --git a/arch/arm/kvm/mmu.c b/arch/arm/kvm/mmu.c index d3fa96e0f709..42eefab3e8e1 100644 --- a/arch/arm/kvm/mmu.c +++ b/arch/arm/kvm/mmu.c | |||
@@ -43,6 +43,7 @@ static unsigned long hyp_idmap_start; | |||
43 | static unsigned long hyp_idmap_end; | 43 | static unsigned long hyp_idmap_end; |
44 | static phys_addr_t hyp_idmap_vector; | 44 | static phys_addr_t hyp_idmap_vector; |
45 | 45 | ||
46 | #define S2_PGD_SIZE (PTRS_PER_S2_PGD * sizeof(pgd_t)) | ||
46 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) | 47 | #define hyp_pgd_order get_order(PTRS_PER_PGD * sizeof(pgd_t)) |
47 | 48 | ||
48 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) | 49 | #define KVM_S2PTE_FLAG_IS_IOMAP (1UL << 0) |
@@ -736,20 +737,6 @@ int create_hyp_io_mappings(void *from, void *to, phys_addr_t phys_addr) | |||
736 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); | 737 | __phys_to_pfn(phys_addr), PAGE_HYP_DEVICE); |
737 | } | 738 | } |
738 | 739 | ||
739 | /* Free the HW pgd, one page at a time */ | ||
740 | static void kvm_free_hwpgd(void *hwpgd) | ||
741 | { | ||
742 | free_pages_exact(hwpgd, kvm_get_hwpgd_size()); | ||
743 | } | ||
744 | |||
745 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ | ||
746 | static void *kvm_alloc_hwpgd(void) | ||
747 | { | ||
748 | unsigned int size = kvm_get_hwpgd_size(); | ||
749 | |||
750 | return alloc_pages_exact(size, GFP_KERNEL | __GFP_ZERO); | ||
751 | } | ||
752 | |||
753 | /** | 740 | /** |
754 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. | 741 | * kvm_alloc_stage2_pgd - allocate level-1 table for stage-2 translation. |
755 | * @kvm: The KVM struct pointer for the VM. | 742 | * @kvm: The KVM struct pointer for the VM. |
@@ -764,29 +751,17 @@ static void *kvm_alloc_hwpgd(void) | |||
764 | int kvm_alloc_stage2_pgd(struct kvm *kvm) | 751 | int kvm_alloc_stage2_pgd(struct kvm *kvm) |
765 | { | 752 | { |
766 | pgd_t *pgd; | 753 | pgd_t *pgd; |
767 | void *hwpgd; | ||
768 | 754 | ||
769 | if (kvm->arch.pgd != NULL) { | 755 | if (kvm->arch.pgd != NULL) { |
770 | kvm_err("kvm_arch already initialized?\n"); | 756 | kvm_err("kvm_arch already initialized?\n"); |
771 | return -EINVAL; | 757 | return -EINVAL; |
772 | } | 758 | } |
773 | 759 | ||
774 | hwpgd = kvm_alloc_hwpgd(); | 760 | /* Allocate the HW PGD, making sure that each page gets its own refcount */ |
775 | if (!hwpgd) | 761 | pgd = alloc_pages_exact(S2_PGD_SIZE, GFP_KERNEL | __GFP_ZERO); |
762 | if (!pgd) | ||
776 | return -ENOMEM; | 763 | return -ENOMEM; |
777 | 764 | ||
778 | /* | ||
779 | * When the kernel uses more levels of page tables than the | ||
780 | * guest, we allocate a fake PGD and pre-populate it to point | ||
781 | * to the next-level page table, which will be the real | ||
782 | * initial page table pointed to by the VTTBR. | ||
783 | */ | ||
784 | pgd = kvm_setup_fake_pgd(hwpgd); | ||
785 | if (IS_ERR(pgd)) { | ||
786 | kvm_free_hwpgd(hwpgd); | ||
787 | return PTR_ERR(pgd); | ||
788 | } | ||
789 | |||
790 | kvm_clean_pgd(pgd); | 765 | kvm_clean_pgd(pgd); |
791 | kvm->arch.pgd = pgd; | 766 | kvm->arch.pgd = pgd; |
792 | return 0; | 767 | return 0; |
@@ -874,8 +849,8 @@ void kvm_free_stage2_pgd(struct kvm *kvm) | |||
874 | return; | 849 | return; |
875 | 850 | ||
876 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); | 851 | unmap_stage2_range(kvm, 0, KVM_PHYS_SIZE); |
877 | kvm_free_hwpgd(kvm_get_hwpgd(kvm)); | 852 | /* Free the HW pgd, one page at a time */ |
878 | kvm_free_fake_pgd(kvm->arch.pgd); | 853 | free_pages_exact(kvm->arch.pgd, S2_PGD_SIZE); |
879 | kvm->arch.pgd = NULL; | 854 | kvm->arch.pgd = NULL; |
880 | } | 855 | } |
881 | 856 | ||