diff options
author | Joel Schopp <joel.schopp@amd.com> | 2014-07-09 12:17:04 -0400 |
---|---|---|
committer | Christoffer Dall <christoffer.dall@linaro.org> | 2014-09-26 08:39:13 -0400 |
commit | dbff124e29fa24aff9705b354b5f4648cd96e0bb (patch) | |
tree | 656027fb37f0866098281c7fb48accba6b47db66 | |
parent | 0fea6d7628ed6e25a9ee1b67edf7c859718d39e8 (diff) |
arm/arm64: KVM: Fix VTTBR_BADDR_MASK and pgd alloc
The current aarch64 calculation for VTTBR_BADDR_MASK masks only 39 bits
and not all the bits in the PA range. This is clearly a bug that
manifests itself on systems that allocate memory in the higher address
space range.
[ Modified from Joel's original patch to be based on PHYS_MASK_SHIFT
instead of a hard-coded value and to move the alignment check of the
allocation to mmu.c. Also added a comment explaining why we hardcode
the IPA range and changed the stage-2 pgd allocation to be based on
the 40 bit IPA range instead of the maximum possible 48 bit PA range.
- Christoffer ]
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Joel Schopp <joel.schopp@amd.com>
Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r-- | arch/arm/kvm/arm.c | 4 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_arm.h | 13 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_mmu.h | 5 |
3 files changed, 16 insertions, 6 deletions
diff --git a/arch/arm/kvm/arm.c b/arch/arm/kvm/arm.c index 40bc3df6d87b..779605122f32 100644 --- a/arch/arm/kvm/arm.c +++ b/arch/arm/kvm/arm.c | |||
@@ -410,9 +410,9 @@ static void update_vttbr(struct kvm *kvm) | |||
410 | 410 | ||
411 | /* update vttbr to be used with the new vmid */ | 411 | /* update vttbr to be used with the new vmid */ |
412 | pgd_phys = virt_to_phys(kvm->arch.pgd); | 412 | pgd_phys = virt_to_phys(kvm->arch.pgd); |
413 | BUG_ON(pgd_phys & ~VTTBR_BADDR_MASK); | ||
413 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; | 414 | vmid = ((u64)(kvm->arch.vmid) << VTTBR_VMID_SHIFT) & VTTBR_VMID_MASK; |
414 | kvm->arch.vttbr = pgd_phys & VTTBR_BADDR_MASK; | 415 | kvm->arch.vttbr = pgd_phys | vmid; |
415 | kvm->arch.vttbr |= vmid; | ||
416 | 416 | ||
417 | spin_unlock(&kvm_vmid_lock); | 417 | spin_unlock(&kvm_vmid_lock); |
418 | } | 418 | } |
diff --git a/arch/arm64/include/asm/kvm_arm.h b/arch/arm64/include/asm/kvm_arm.h index cc83520459ed..7fd3e27e3ccc 100644 --- a/arch/arm64/include/asm/kvm_arm.h +++ b/arch/arm64/include/asm/kvm_arm.h | |||
@@ -122,6 +122,17 @@ | |||
122 | #define VTCR_EL2_T0SZ_MASK 0x3f | 122 | #define VTCR_EL2_T0SZ_MASK 0x3f |
123 | #define VTCR_EL2_T0SZ_40B 24 | 123 | #define VTCR_EL2_T0SZ_40B 24 |
124 | 124 | ||
125 | /* | ||
126 | * We configure the Stage-2 page tables to always restrict the IPA space to be | ||
127 | * 40 bits wide (T0SZ = 24). Systems with a PARange smaller than 40 bits are | ||
128 | * not known to exist and will break with this configuration. | ||
129 | * | ||
130 | * Note that when using 4K pages, we concatenate two first level page tables | ||
131 | * together. | ||
132 | * | ||
133 | * The magic numbers used for VTTBR_X in this patch can be found in Tables | ||
134 | * D4-23 and D4-25 in ARM DDI 0487A.b. | ||
135 | */ | ||
125 | #ifdef CONFIG_ARM64_64K_PAGES | 136 | #ifdef CONFIG_ARM64_64K_PAGES |
126 | /* | 137 | /* |
127 | * Stage2 translation configuration: | 138 | * Stage2 translation configuration: |
@@ -149,7 +160,7 @@ | |||
149 | #endif | 160 | #endif |
150 | 161 | ||
151 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) | 162 | #define VTTBR_BADDR_SHIFT (VTTBR_X - 1) |
152 | #define VTTBR_BADDR_MASK (((1LLU << (40 - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) | 163 | #define VTTBR_BADDR_MASK (((1LLU << (PHYS_MASK_SHIFT - VTTBR_X)) - 1) << VTTBR_BADDR_SHIFT) |
153 | #define VTTBR_VMID_SHIFT (48LLU) | 164 | #define VTTBR_VMID_SHIFT (48LLU) |
154 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) | 165 | #define VTTBR_VMID_MASK (0xffLLU << VTTBR_VMID_SHIFT) |
155 | 166 | ||
diff --git a/arch/arm64/include/asm/kvm_mmu.h b/arch/arm64/include/asm/kvm_mmu.h index 737da742b293..a030d163840b 100644 --- a/arch/arm64/include/asm/kvm_mmu.h +++ b/arch/arm64/include/asm/kvm_mmu.h | |||
@@ -59,10 +59,9 @@ | |||
59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) | 59 | #define KERN_TO_HYP(kva) ((unsigned long)kva - PAGE_OFFSET + HYP_PAGE_OFFSET) |
60 | 60 | ||
61 | /* | 61 | /* |
62 | * Align KVM with the kernel's view of physical memory. Should be | 62 | * We currently only support a 40bit IPA. |
63 | * 40bit IPA, with PGD being 8kB aligned in the 4KB page configuration. | ||
64 | */ | 63 | */ |
65 | #define KVM_PHYS_SHIFT PHYS_MASK_SHIFT | 64 | #define KVM_PHYS_SHIFT (40) |
66 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) | 65 | #define KVM_PHYS_SIZE (1UL << KVM_PHYS_SHIFT) |
67 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) | 66 | #define KVM_PHYS_MASK (KVM_PHYS_SIZE - 1UL) |
68 | 67 | ||