aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm64/kvm
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-10-28 11:06:47 -0400
committerMarc Zyngier <marc.zyngier@arm.com>2016-02-29 13:34:18 -0500
commit5f05a72aed023e5824eebb2542b5397cb89188f4 (patch)
tree71166b529f6e077e7153119e09f30fac8ff2d815 /arch/arm64/kvm
parent253dcbd39adb00890f3c350230ae310fcfeeb760 (diff)
arm64: KVM: Move most of the fault decoding to C
The fault decoding process (including computing the IPA in the case of a permission fault) would be much better done in C code, as we have a reasonable infrastructure to deal with the VHE/non-VHE differences. Let's move the whole thing to C, including the workaround for erratum 834220, and just patch the odd ESR_EL2 access remaining in hyp-entry.S. Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Acked-by: Catalin Marinas <catalin.marinas@arm.com> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Diffstat (limited to 'arch/arm64/kvm')
-rw-r--r--arch/arm64/kvm/hyp/hyp-entry.S69
-rw-r--r--arch/arm64/kvm/hyp/switch.c85
2 files changed, 90 insertions, 64 deletions
diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S
index 1bdeee70833e..3488894397ff 100644
--- a/arch/arm64/kvm/hyp/hyp-entry.S
+++ b/arch/arm64/kvm/hyp/hyp-entry.S
@@ -19,7 +19,6 @@
19 19
20#include <asm/alternative.h> 20#include <asm/alternative.h>
21#include <asm/assembler.h> 21#include <asm/assembler.h>
22#include <asm/asm-offsets.h>
23#include <asm/cpufeature.h> 22#include <asm/cpufeature.h>
24#include <asm/kvm_arm.h> 23#include <asm/kvm_arm.h>
25#include <asm/kvm_asm.h> 24#include <asm/kvm_asm.h>
@@ -69,7 +68,11 @@ ENDPROC(__vhe_hyp_call)
69el1_sync: // Guest trapped into EL2 68el1_sync: // Guest trapped into EL2
70 save_x0_to_x3 69 save_x0_to_x3
71 70
71alternative_if_not ARM64_HAS_VIRT_HOST_EXTN
72 mrs x1, esr_el2 72 mrs x1, esr_el2
73alternative_else
74 mrs x1, esr_el1
75alternative_endif
73 lsr x2, x1, #ESR_ELx_EC_SHIFT 76 lsr x2, x1, #ESR_ELx_EC_SHIFT
74 77
75 cmp x2, #ESR_ELx_EC_HVC64 78 cmp x2, #ESR_ELx_EC_HVC64
@@ -105,72 +108,10 @@ el1_trap:
105 cmp x2, #ESR_ELx_EC_FP_ASIMD 108 cmp x2, #ESR_ELx_EC_FP_ASIMD
106 b.eq __fpsimd_guest_restore 109 b.eq __fpsimd_guest_restore
107 110
108 cmp x2, #ESR_ELx_EC_DABT_LOW 111 mrs x0, tpidr_el2
109 mov x0, #ESR_ELx_EC_IABT_LOW
110 ccmp x2, x0, #4, ne
111 b.ne 1f // Not an abort we care about
112
113 /* This is an abort. Check for permission fault */
114alternative_if_not ARM64_WORKAROUND_834220
115 and x2, x1, #ESR_ELx_FSC_TYPE
116 cmp x2, #FSC_PERM
117 b.ne 1f // Not a permission fault
118alternative_else
119 nop // Use the permission fault path to
120 nop // check for a valid S1 translation,
121 nop // regardless of the ESR value.
122alternative_endif
123
124 /*
125 * Check for Stage-1 page table walk, which is guaranteed
126 * to give a valid HPFAR_EL2.
127 */
128 tbnz x1, #7, 1f // S1PTW is set
129
130 /* Preserve PAR_EL1 */
131 mrs x3, par_el1
132 stp x3, xzr, [sp, #-16]!
133
134 /*
135 * Permission fault, HPFAR_EL2 is invalid.
136 * Resolve the IPA the hard way using the guest VA.
137 * Stage-1 translation already validated the memory access rights.
138 * As such, we can use the EL1 translation regime, and don't have
139 * to distinguish between EL0 and EL1 access.
140 */
141 mrs x2, far_el2
142 at s1e1r, x2
143 isb
144
145 /* Read result */
146 mrs x3, par_el1
147 ldp x0, xzr, [sp], #16 // Restore PAR_EL1 from the stack
148 msr par_el1, x0
149 tbnz x3, #0, 3f // Bail out if we failed the translation
150 ubfx x3, x3, #12, #36 // Extract IPA
151 lsl x3, x3, #4 // and present it like HPFAR
152 b 2f
153
1541: mrs x3, hpfar_el2
155 mrs x2, far_el2
156
1572: mrs x0, tpidr_el2
158 str w1, [x0, #VCPU_ESR_EL2]
159 str x2, [x0, #VCPU_FAR_EL2]
160 str x3, [x0, #VCPU_HPFAR_EL2]
161
162 mov x1, #ARM_EXCEPTION_TRAP 112 mov x1, #ARM_EXCEPTION_TRAP
163 b __guest_exit 113 b __guest_exit
164 114
165 /*
166 * Translation failed. Just return to the guest and
167 * let it fault again. Another CPU is probably playing
168 * behind our back.
169 */
1703: restore_x0_to_x3
171
172 eret
173
174el1_irq: 115el1_irq:
175 save_x0_to_x3 116 save_x0_to_x3
176 mrs x0, tpidr_el2 117 mrs x0, tpidr_el2
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c
index 731f0a2ffee0..ecf5b05d1e16 100644
--- a/arch/arm64/kvm/hyp/switch.c
+++ b/arch/arm64/kvm/hyp/switch.c
@@ -15,6 +15,7 @@
15 * along with this program. If not, see <http://www.gnu.org/licenses/>. 15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */ 16 */
17 17
18#include <linux/types.h>
18#include <asm/kvm_asm.h> 19#include <asm/kvm_asm.h>
19 20
20#include "hyp.h" 21#include "hyp.h"
@@ -149,6 +150,86 @@ static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu)
149 __vgic_call_restore_state()(vcpu); 150 __vgic_call_restore_state()(vcpu);
150} 151}
151 152
153static bool __hyp_text __true_value(void)
154{
155 return true;
156}
157
158static bool __hyp_text __false_value(void)
159{
160 return false;
161}
162
163static hyp_alternate_select(__check_arm_834220,
164 __false_value, __true_value,
165 ARM64_WORKAROUND_834220);
166
167static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar)
168{
169 u64 par, tmp;
170
171 /*
172 * Resolve the IPA the hard way using the guest VA.
173 *
174 * Stage-1 translation already validated the memory access
175 * rights. As such, we can use the EL1 translation regime, and
176 * don't have to distinguish between EL0 and EL1 access.
177 *
178 * We do need to save/restore PAR_EL1 though, as we haven't
179 * saved the guest context yet, and we may return early...
180 */
181 par = read_sysreg(par_el1);
182 asm volatile("at s1e1r, %0" : : "r" (far));
183 isb();
184
185 tmp = read_sysreg(par_el1);
186 write_sysreg(par, par_el1);
187
188 if (unlikely(tmp & 1))
189 return false; /* Translation failed, back to guest */
190
191 /* Convert PAR to HPFAR format */
192 *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4;
193 return true;
194}
195
196static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu)
197{
198 u64 esr = read_sysreg_el2(esr);
199 u8 ec = esr >> ESR_ELx_EC_SHIFT;
200 u64 hpfar, far;
201
202 vcpu->arch.fault.esr_el2 = esr;
203
204 if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW)
205 return true;
206
207 far = read_sysreg_el2(far);
208
209 /*
210 * The HPFAR can be invalid if the stage 2 fault did not
211 * happen during a stage 1 page table walk (the ESR_EL2.S1PTW
212 * bit is clear) and one of the two following cases are true:
213 * 1. The fault was due to a permission fault
214 * 2. The processor carries errata 834220
215 *
216 * Therefore, for all non S1PTW faults where we either have a
217 * permission fault or the errata workaround is enabled, we
218 * resolve the IPA using the AT instruction.
219 */
220 if (!(esr & ESR_ELx_S1PTW) &&
221 (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) {
222 if (!__translate_far_to_hpfar(far, &hpfar))
223 return false;
224 } else {
225 hpfar = read_sysreg(hpfar_el2);
226 }
227
228 vcpu->arch.fault.far_el2 = far;
229 vcpu->arch.fault.hpfar_el2 = hpfar;
230 return true;
231}
232
152static int __hyp_text __guest_run(struct kvm_vcpu *vcpu) 233static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
153{ 234{
154 struct kvm_cpu_context *host_ctxt; 235 struct kvm_cpu_context *host_ctxt;
@@ -180,9 +261,13 @@ static int __hyp_text __guest_run(struct kvm_vcpu *vcpu)
180 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); 261 __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt);
181 262
182 /* Jump in the fire! */ 263 /* Jump in the fire! */
264again:
183 exit_code = __guest_enter(vcpu, host_ctxt); 265 exit_code = __guest_enter(vcpu, host_ctxt);
184 /* And we're baaack! */ 266 /* And we're baaack! */
185 267
268 if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu))
269 goto again;
270
186 fp_enabled = __fpsimd_enabled(); 271 fp_enabled = __fpsimd_enabled();
187 272
188 __sysreg_save_guest_state(guest_ctxt); 273 __sysreg_save_guest_state(guest_ctxt);