aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Morse <james.morse@arm.com>2018-01-15 14:39:05 -0500
committerCatalin Marinas <catalin.marinas@arm.com>2018-01-16 10:09:36 -0500
commit0067df413bd9d7e9ee3a78ece1e1a93535378862 (patch)
tree931defe6420049e2e258667c8f193ba7df864ac9
parent3368bd809764d3ef0810e16c1e1531fec32e8d8e (diff)
KVM: arm64: Handle RAS SErrors from EL2 on guest exit
We expect to have firmware-first handling of RAS SErrors, with errors notified via an APEI method. For systems without firmware-first, add some minimal handling to KVM. There are two ways KVM can take an SError due to a guest, either may be a RAS error: we exit the guest due to an SError routed to EL2 by HCR_EL2.AMO, or we take an SError from EL2 when we unmask PSTATE.A from __guest_exit. The current SError from EL2 code unmasks SError and tries to fence any pending SError into a single instruction window. It then leaves SError unmasked. With the v8.2 RAS Extensions we may take an SError for a 'corrected' error, but KVM is only able to handle SError from EL2 if they occur during this single instruction window... The RAS Extensions give us a new instruction to synchronise and consume SErrors. The RAS Extensions document (ARM DDI0587), '2.4.1 ESB and Unrecoverable errors' describes ESB as synchronising SError interrupts generated by 'instructions, translation table walks, hardware updates to the translation tables, and instruction fetches on the same PE'. This makes ESB equivalent to KVMs existing 'dsb, mrs-daifclr, isb' sequence. Use the alternatives to synchronise and consume any SError using ESB instead of unmasking and taking the SError. Set ARM_EXIT_WITH_SERROR_BIT in the exit_code so that we can restart the vcpu if it turns out this SError has no impact on the vcpu. Reviewed-by: Marc Zyngier <marc.zyngier@arm.com> Signed-off-by: James Morse <james.morse@arm.com> Signed-off-by: Catalin Marinas <catalin.marinas@arm.com>
-rw-r--r--arch/arm64/include/asm/kvm_emulate.h5
-rw-r--r--arch/arm64/include/asm/kvm_host.h1
-rw-r--r--arch/arm64/kernel/asm-offsets.c1
-rw-r--r--arch/arm64/kvm/handle_exit.c14
-rw-r--r--arch/arm64/kvm/hyp/entry.S13
5 files changed, 33 insertions, 1 deletions
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h
index 6d3614795197..e002ab7f919a 100644
--- a/arch/arm64/include/asm/kvm_emulate.h
+++ b/arch/arm64/include/asm/kvm_emulate.h
@@ -176,6 +176,11 @@ static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
176 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; 176 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
177} 177}
178 178
179static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
180{
181 return vcpu->arch.fault.disr_el1;
182}
183
179static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) 184static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
180{ 185{
181 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK; 186 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h
index abcfd164e690..4485ae8e98de 100644
--- a/arch/arm64/include/asm/kvm_host.h
+++ b/arch/arm64/include/asm/kvm_host.h
@@ -90,6 +90,7 @@ struct kvm_vcpu_fault_info {
90 u32 esr_el2; /* Hyp Syndrom Register */ 90 u32 esr_el2; /* Hyp Syndrom Register */
91 u64 far_el2; /* Hyp Fault Address Register */ 91 u64 far_el2; /* Hyp Fault Address Register */
92 u64 hpfar_el2; /* Hyp IPA Fault Address Register */ 92 u64 hpfar_el2; /* Hyp IPA Fault Address Register */
93 u64 disr_el1; /* Deferred [SError] Status Register */
93}; 94};
94 95
95/* 96/*
diff --git a/arch/arm64/kernel/asm-offsets.c b/arch/arm64/kernel/asm-offsets.c
index 1dcc493f5765..1303e04110cd 100644
--- a/arch/arm64/kernel/asm-offsets.c
+++ b/arch/arm64/kernel/asm-offsets.c
@@ -132,6 +132,7 @@ int main(void)
132 BLANK(); 132 BLANK();
133#ifdef CONFIG_KVM_ARM_HOST 133#ifdef CONFIG_KVM_ARM_HOST
134 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt)); 134 DEFINE(VCPU_CONTEXT, offsetof(struct kvm_vcpu, arch.ctxt));
135 DEFINE(VCPU_FAULT_DISR, offsetof(struct kvm_vcpu, arch.fault.disr_el1));
135 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs)); 136 DEFINE(CPU_GP_REGS, offsetof(struct kvm_cpu_context, gp_regs));
136 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs)); 137 DEFINE(CPU_USER_PT_REGS, offsetof(struct kvm_regs, regs));
137 DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs)); 138 DEFINE(CPU_FP_REGS, offsetof(struct kvm_regs, fp_regs));
diff --git a/arch/arm64/kvm/handle_exit.c b/arch/arm64/kvm/handle_exit.c
index 6a5a5db4292f..c09fc5a576c7 100644
--- a/arch/arm64/kvm/handle_exit.c
+++ b/arch/arm64/kvm/handle_exit.c
@@ -23,6 +23,7 @@
23#include <linux/kvm_host.h> 23#include <linux/kvm_host.h>
24 24
25#include <asm/esr.h> 25#include <asm/esr.h>
26#include <asm/exception.h>
26#include <asm/kvm_asm.h> 27#include <asm/kvm_asm.h>
27#include <asm/kvm_coproc.h> 28#include <asm/kvm_coproc.h>
28#include <asm/kvm_emulate.h> 29#include <asm/kvm_emulate.h>
@@ -249,7 +250,6 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
249 *vcpu_pc(vcpu) -= adj; 250 *vcpu_pc(vcpu) -= adj;
250 } 251 }
251 252
252 kvm_inject_vabt(vcpu);
253 return 1; 253 return 1;
254 } 254 }
255 255
@@ -286,6 +286,18 @@ int handle_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
286void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, 286void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run,
287 int exception_index) 287 int exception_index)
288{ 288{
289 if (ARM_SERROR_PENDING(exception_index)) {
290 if (this_cpu_has_cap(ARM64_HAS_RAS_EXTN)) {
291 u64 disr = kvm_vcpu_get_disr(vcpu);
292
293 kvm_handle_guest_serror(vcpu, disr_to_esr(disr));
294 } else {
295 kvm_inject_vabt(vcpu);
296 }
297
298 return;
299 }
300
289 exception_index = ARM_EXCEPTION_CODE(exception_index); 301 exception_index = ARM_EXCEPTION_CODE(exception_index);
290 302
291 if (exception_index == ARM_EXCEPTION_EL1_SERROR) 303 if (exception_index == ARM_EXCEPTION_EL1_SERROR)
diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S
index fe4678f20a85..fdd1068ee3a5 100644
--- a/arch/arm64/kvm/hyp/entry.S
+++ b/arch/arm64/kvm/hyp/entry.S
@@ -124,6 +124,17 @@ ENTRY(__guest_exit)
124 // Now restore the host regs 124 // Now restore the host regs
125 restore_callee_saved_regs x2 125 restore_callee_saved_regs x2
126 126
127alternative_if ARM64_HAS_RAS_EXTN
128 // If we have the RAS extensions we can consume a pending error
129 // without an unmask-SError and isb.
130 esb
131 mrs_s x2, SYS_DISR_EL1
132 str x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
133 cbz x2, 1f
134 msr_s SYS_DISR_EL1, xzr
135 orr x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1361: ret
137alternative_else
127 // If we have a pending asynchronous abort, now is the 138 // If we have a pending asynchronous abort, now is the
128 // time to find out. From your VAXorcist book, page 666: 139 // time to find out. From your VAXorcist book, page 666:
129 // "Threaten me not, oh Evil one! For I speak with 140 // "Threaten me not, oh Evil one! For I speak with
@@ -134,7 +145,9 @@ ENTRY(__guest_exit)
134 mov x5, x0 145 mov x5, x0
135 146
136 dsb sy // Synchronize against in-flight ld/st 147 dsb sy // Synchronize against in-flight ld/st
148 nop
137 msr daifclr, #4 // Unmask aborts 149 msr daifclr, #4 // Unmask aborts
150alternative_endif
138 151
139 // This is our single instruction exception window. A pending 152 // This is our single instruction exception window. A pending
140 // SError is guaranteed to occur at the earliest when we unmask 153 // SError is guaranteed to occur at the earliest when we unmask