diff options
author | James Hogan <james.hogan@imgtec.com> | 2016-07-08 06:53:25 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2016-08-01 12:42:24 -0400 |
commit | 0d17aea5c27d7d748b1d8116d275b2b17dc5cad6 (patch) | |
tree | 39616859bb440c62c4421ff3ad4e56532492ec89 | |
parent | 1d756942533b2330d8929dd0ea61a81a5d020196 (diff) |
MIPS: KVM: Use 64-bit CP0_EBase when appropriate
Update the KVM entry point to write CP0_EBase as a 64-bit register when
it is 64-bits wide, and to set the WG (write gate) bit if it exists in
order to write bits 63:30 (or 31:30 on MIPS32).
Prior to MIPS64r6 it was UNDEFINED to perform a 64-bit read or write of
a 32-bit COP0 register. Since this is dynamically generated code,
generate the right type of access depending on whether the kernel is
64-bit and cpu_has_ebase_wg.
Signed-off-by: James Hogan <james.hogan@imgtec.com>
Cc: Paolo Bonzini <pbonzini@redhat.com>
Cc: "Radim Krčmář" <rkrcmar@redhat.com>
Cc: Ralf Baechle <ralf@linux-mips.org>
Cc: linux-mips@linux-mips.org
Cc: kvm@vger.kernel.org
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/mips/kvm/entry.c | 25 |
1 files changed, 22 insertions, 3 deletions
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c index c824bfc4daa0..6a02b3a3fa65 100644 --- a/arch/mips/kvm/entry.c +++ b/arch/mips/kvm/entry.c | |||
@@ -153,6 +153,25 @@ static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp, | |||
153 | } | 153 | } |
154 | 154 | ||
155 | /** | 155 | /** |
156 | * build_set_exc_base() - Assemble code to write exception base address. | ||
157 | * @p: Code buffer pointer. | ||
158 | * @reg: Source register (generated code may set WG bit in @reg). | ||
159 | * | ||
160 | * Assemble code to modify the exception base address in the EBase register, | ||
161 | * using the appropriately sized access and setting the WG bit if necessary. | ||
162 | */ | ||
163 | static inline void build_set_exc_base(u32 **p, unsigned int reg) | ||
164 | { | ||
165 | if (cpu_has_ebase_wg) { | ||
166 | /* Set WG so that all the bits get written */ | ||
167 | uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); | ||
168 | UASM_i_MTC0(p, reg, C0_EBASE); | ||
169 | } else { | ||
170 | uasm_i_mtc0(p, reg, C0_EBASE); | ||
171 | } | ||
172 | } | ||
173 | |||
174 | /** | ||
156 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. | 175 | * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU. |
157 | * @addr: Address to start writing code. | 176 | * @addr: Address to start writing code. |
158 | * | 177 | * |
@@ -216,7 +235,7 @@ void *kvm_mips_build_vcpu_run(void *addr) | |||
216 | 235 | ||
217 | /* load up the new EBASE */ | 236 | /* load up the new EBASE */ |
218 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); | 237 | UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); |
219 | uasm_i_mtc0(&p, K0, C0_EBASE); | 238 | build_set_exc_base(&p, K0); |
220 | 239 | ||
221 | /* | 240 | /* |
222 | * Now that the new EBASE has been loaded, unset BEV, set | 241 | * Now that the new EBASE has been loaded, unset BEV, set |
@@ -463,7 +482,7 @@ void *kvm_mips_build_exit(void *addr) | |||
463 | 482 | ||
464 | UASM_i_LA_mostly(&p, K0, (long)&ebase); | 483 | UASM_i_LA_mostly(&p, K0, (long)&ebase); |
465 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); | 484 | UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); |
466 | uasm_i_mtc0(&p, K0, C0_EBASE); | 485 | build_set_exc_base(&p, K0); |
467 | 486 | ||
468 | if (raw_cpu_has_fpu) { | 487 | if (raw_cpu_has_fpu) { |
469 | /* | 488 | /* |
@@ -620,7 +639,7 @@ static void *kvm_mips_build_ret_to_guest(void *addr) | |||
620 | uasm_i_or(&p, K0, V1, AT); | 639 | uasm_i_or(&p, K0, V1, AT); |
621 | uasm_i_mtc0(&p, K0, C0_STATUS); | 640 | uasm_i_mtc0(&p, K0, C0_STATUS); |
622 | uasm_i_ehb(&p); | 641 | uasm_i_ehb(&p); |
623 | uasm_i_mtc0(&p, T0, C0_EBASE); | 642 | build_set_exc_base(&p, T0); |
624 | 643 | ||
625 | /* Setup status register for running guest in UM */ | 644 | /* Setup status register for running guest in UM */ |
626 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); | 645 | uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); |