aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2018-01-15 14:39:01 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2018-01-16 10:08:41 -0500
commit4715c14bc136687bb79d12e24aafdc0f38786eb7 (patch)
treeeb35b59c2d79f731c1aacb722cead88cbe67b9c8
parent4f5abad9e826bd579b0661efa32682d9c9bc3fa8 (diff)
KVM: arm64: Set an impdef ESR for Virtual-SError using VSESR_EL2.
Prior to v8.2's RAS Extensions, the HCR_EL2.VSE 'virtual SError' feature generated an SError with an implementation defined ESR_EL1.ISS, because we had no mechanism to specify the ESR value. On Juno this generates an all-zero ESR, the most significant bit 'ISV' is clear indicating the remainder of the ISS field is invalid. With the RAS Extensions we have a mechanism to specify this value, and the most significant bit has a new meaning: 'IDS - Implementation Defined Syndrome'. An all-zero SError ESR now means: 'RAS error: Uncategorized' instead of 'no valid ISS'. Add KVM support for the VSESR_EL2 register to specify an ESR value when HCR_EL2.VSE generates a virtual SError. Change kvm_inject_vabt() to specify an implementation-defined value. We only need to restore the VSESR_EL2 value when HCR_EL2.VSE is set, KVM save/restores this bit during __{,de}activate_traps() and hardware clears the bit once the guest has consumed the virtual-SError. Future patches may add an API (or KVM CAP) to pend a virtual SError with a specified ESR. Cc: Dongjiu Geng <gengdongjiu@huawei.com> Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h3
-rw-r--r--arch/arm64/include/asm/sysreg.h1
-rw-r--r--arch/arm64/kvm/hyp/switch.c3
-rw-r--r--arch/arm64/kvm/inject_fault.c13
5 files changed, 24 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 5f28dfa14cee..6d3614795197 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -64,6 +64,11 @@ static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
64 vcpu->arch.hcr_el2 = hcr; 64 vcpu->arch.hcr_el2 = hcr;
65} 65}
66 66
67static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
68{
69 vcpu->arch.vsesr_el2 = vsesr;
70}
71
67static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) 72static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
68{ 73{
69 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc; 74 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index dcdd08edf5a5..3014b39b8fe2 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -280,6 +280,9 @@ struct kvm_vcpu_arch {
280 280
281 /* Detect first run of a vcpu */ 281 /* Detect first run of a vcpu */
282 bool has_run_once; 282 bool has_run_once;
283
284 /* Virtual SError ESR to restore when HCR_EL2.VSE is set */
285 u64 vsesr_el2;
283}; 286};
284 287
285#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs) 288#define vcpu_gp_regs(v) (&(v)->arch.ctxt.gp_regs)
diff --git a/arch/arm64/include/asm/sysreg.h b/arch/arm64/include/asm/sysreg.h
index b5d543fc677d..52cfdc216bcf 100644
--- a/arch/arm64/include/asm/sysreg.h
+++ b/arch/arm64/include/asm/sysreg.h
@@ -355,6 +355,7 @@
355 355
356#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0) 356#define SYS_DACR32_EL2 sys_reg(3, 4, 3, 0, 0)
357#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1) 357#define SYS_IFSR32_EL2 sys_reg(3, 4, 5, 0, 1)
358#define SYS_VSESR_EL2 sys_reg(3, 4, 5, 2, 3)
358#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0) 359#define SYS_FPEXC32_EL2 sys_reg(3, 4, 5, 3, 0)
359 360
360#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x) 361#define __SYS__AP0Rx_EL2(x) sys_reg(3, 4, 12, 8, x)
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 324f4202cdd5..b425b8aab45b 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -94,6 +94,9 @@ static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu)
94 94
95 write_sysreg(val, hcr_el2); 95 write_sysreg(val, hcr_el2);
96 96
97 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE))
98 write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2);
99
97 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ 100 /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */
98 write_sysreg(1 << 15, hstr_el2); 101 write_sysreg(1 << 15, hstr_el2);
99 /* 102 /*
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c
index 8ecbcb40e317..60666a056944 100644
--- a/arch/arm64/kvm/inject_fault.c
+++ b/arch/arm64/kvm/inject_fault.c
@@ -164,14 +164,25 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
164 inject_undef64(vcpu); 164 inject_undef64(vcpu);
165} 165}
166 166
167static void pend_guest_serror(struct kvm_vcpu *vcpu, u64 esr)
168{
169 vcpu_set_vsesr(vcpu, esr);
170 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE);
171}
172
167/** 173/**
168 * kvm_inject_vabt - inject an async abort / SError into the guest 174 * kvm_inject_vabt - inject an async abort / SError into the guest
169 * @vcpu: The VCPU to receive the exception 175 * @vcpu: The VCPU to receive the exception
170 * 176 *
171 * It is assumed that this code is called from the VCPU thread and that the 177 * It is assumed that this code is called from the VCPU thread and that the
172 * VCPU therefore is not currently executing guest code. 178 * VCPU therefore is not currently executing guest code.
179 *
180 * Systems with the RAS Extensions specify an imp-def ESR (ISV/IDS = 1) with
181 * the remaining ISS all-zeros so that this error is not interpreted as an
182 * uncategorized RAS error. Without the RAS Extensions we can't specify an ESR
183 * value, so the CPU generates an imp-def value.
173 */ 184 */
174void kvm_inject_vabt(struct kvm_vcpu *vcpu) 185void kvm_inject_vabt(struct kvm_vcpu *vcpu)
175{ 186{
176 vcpu_set_hcr(vcpu, vcpu_get_hcr(vcpu) | HCR_VSE); 187 pend_guest_serror(vcpu, ESR_ELx_ISV);
177} 188}