aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlex Bennée <alex.bennee@linaro.org>2017-11-23 07:11:34 -0500
committerChristoffer Dall <christoffer.dall@linaro.org>2017-11-29 12:17:47 -0500
commite3feebf81744acd8b581e5eb58a93e8fdcf042a5 (patch)
tree2abf6456d4edf1bf02b8cf1b60ea3095ec87aa1d
parente70dce73befcf96607bc6e24c2c8f84229d6721e (diff)
kvm: arm64: handle single-step of hyp emulated mmio instructions
There is a fast-path of MMIO emulation inside hyp mode. The handling of single-step is broadly the same as kvm_arm_handle_step_debug() except we just setup ESR/HSR so handle_exit() does the correct thing as we exit. For the case of an emulated illegal access causing an SError we will exit via the ARM_EXCEPTION_EL1_SERROR path in handle_exit(). We behave as we would during a real SError and clear the DBG_SPSR_SS bit for the emulated instruction. Acked-by: Marc Zyngier <marc.zyngier@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Alex Bennée <alex.bennee@linaro.org> Signed-off-by: Christoffer Dall <christoffer.dall@linaro.org>
-rw-r--r--arch/arm64/kvm/hyp/switch.c37
1 files changed, 30 insertions, 7 deletions
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 525c01f48867..f7c651f3a8c0 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -22,6 +22,7 @@
22#include <asm/kvm_emulate.h> 22#include <asm/kvm_emulate.h>
23#include <asm/kvm_hyp.h> 23#include <asm/kvm_hyp.h>
24#include <asm/fpsimd.h> 24#include <asm/fpsimd.h>
25#include <asm/debug-monitors.h>
25 26
26static bool __hyp_text __fpsimd_enabled_nvhe(void) 27static bool __hyp_text __fpsimd_enabled_nvhe(void)
27{ 28{
@@ -269,7 +270,11 @@ static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
269 return true; 270 return true;
270} 271}
271 272
272static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) 273/* Skip an instruction which has been emulated. Returns true if
274 * execution can continue or false if we need to exit hyp mode because
275 * single-step was in effect.
276 */
277static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
273{ 278{
274 *vcpu_pc(vcpu) = read_sysreg_el2(elr); 279 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
275 280
@@ -282,6 +287,14 @@ static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu)
282 } 287 }
283 288
284 write_sysreg_el2(*vcpu_pc(vcpu), elr); 289 write_sysreg_el2(*vcpu_pc(vcpu), elr);
290
291 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
292 vcpu->arch.fault.esr_el2 =
293 (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22;
294 return false;
295 } else {
296 return true;
297 }
285} 298}
286 299
287int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) 300int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu)
@@ -342,13 +355,21 @@ again:
342 int ret = __vgic_v2_perform_cpuif_access(vcpu); 355 int ret = __vgic_v2_perform_cpuif_access(vcpu);
343 356
344 if (ret == 1) { 357 if (ret == 1) {
345 __skip_instr(vcpu); 358 if (__skip_instr(vcpu))
346 goto again; 359 goto again;
360 else
361 exit_code = ARM_EXCEPTION_TRAP;
347 } 362 }
348 363
349 if (ret == -1) { 364 if (ret == -1) {
350 /* Promote an illegal access to an SError */ 365 /* Promote an illegal access to an
351 __skip_instr(vcpu); 366 * SError. If we would be returning
367 * due to single-step clear the SS
368 * bit so handle_exit knows what to
369 * do after dealing with the error.
370 */
371 if (!__skip_instr(vcpu))
372 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
352 exit_code = ARM_EXCEPTION_EL1_SERROR; 373 exit_code = ARM_EXCEPTION_EL1_SERROR;
353 } 374 }
354 375
@@ -363,8 +384,10 @@ again:
363 int ret = __vgic_v3_perform_cpuif_access(vcpu); 384 int ret = __vgic_v3_perform_cpuif_access(vcpu);
364 385
365 if (ret == 1) { 386 if (ret == 1) {
366 __skip_instr(vcpu); 387 if (__skip_instr(vcpu))
367 goto again; 388 goto again;
389 else
390 exit_code = ARM_EXCEPTION_TRAP;
368 } 391 }
369 392
370 /* 0 falls through to be handled out of EL2 */ 393 /* 0 falls through to be handled out of EL2 */