aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/arm/aarch32.c
diff options
context:
space:
mode:
Diffstat (limited to 'virt/kvm/arm/aarch32.c')
-rw-r--r--virt/kvm/arm/aarch32.c97
1 files changed, 92 insertions, 5 deletions
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c
index 79c7c357804b..8bc479fa37e6 100644
--- a/virt/kvm/arm/aarch32.c
+++ b/virt/kvm/arm/aarch32.c
@@ -25,11 +25,6 @@
25#include <asm/kvm_emulate.h> 25#include <asm/kvm_emulate.h>
26#include <asm/kvm_hyp.h> 26#include <asm/kvm_hyp.h>
27 27
28#ifndef CONFIG_ARM64
29#define COMPAT_PSR_T_BIT PSR_T_BIT
30#define COMPAT_PSR_IT_MASK PSR_IT_MASK
31#endif
32
33/* 28/*
34 * stolen from arch/arm/kernel/opcodes.c 29 * stolen from arch/arm/kernel/opcodes.c
35 * 30 *
@@ -150,3 +145,95 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr)
150 *vcpu_pc(vcpu) += 4; 145 *vcpu_pc(vcpu) += 4;
151 kvm_adjust_itstate(vcpu); 146 kvm_adjust_itstate(vcpu);
152} 147}
148
149/*
150 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
151 */
152static const u8 return_offsets[8][2] = {
153 [0] = { 0, 0 }, /* Reset, unused */
154 [1] = { 4, 2 }, /* Undefined */
155 [2] = { 0, 0 }, /* SVC, unused */
156 [3] = { 4, 4 }, /* Prefetch abort */
157 [4] = { 8, 8 }, /* Data abort */
158 [5] = { 0, 0 }, /* HVC, unused */
159 [6] = { 4, 4 }, /* IRQ, unused */
160 [7] = { 4, 4 }, /* FIQ, unused */
161};
162
163static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
164{
165 unsigned long cpsr;
166 unsigned long new_spsr_value = *vcpu_cpsr(vcpu);
167 bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT);
168 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
169 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
170
171 cpsr = mode | COMPAT_PSR_I_BIT;
172
173 if (sctlr & (1 << 30))
174 cpsr |= COMPAT_PSR_T_BIT;
175 if (sctlr & (1 << 25))
176 cpsr |= COMPAT_PSR_E_BIT;
177
178 *vcpu_cpsr(vcpu) = cpsr;
179
180 /* Note: These now point to the banked copies */
181 *vcpu_spsr(vcpu) = new_spsr_value;
182 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
183
184 /* Branch to exception vector */
185 if (sctlr & (1 << 13))
186 vect_offset += 0xffff0000;
187 else /* always have security exceptions */
188 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
189
190 *vcpu_pc(vcpu) = vect_offset;
191}
192
193void kvm_inject_undef32(struct kvm_vcpu *vcpu)
194{
195 prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4);
196}
197
198/*
199 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
200 * pseudocode.
201 */
202static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
203 unsigned long addr)
204{
205 u32 vect_offset;
206 u32 *far, *fsr;
207 bool is_lpae;
208
209 if (is_pabt) {
210 vect_offset = 12;
211 far = &vcpu_cp15(vcpu, c6_IFAR);
212 fsr = &vcpu_cp15(vcpu, c5_IFSR);
213 } else { /* !iabt */
214 vect_offset = 16;
215 far = &vcpu_cp15(vcpu, c6_DFAR);
216 fsr = &vcpu_cp15(vcpu, c5_DFSR);
217 }
218
219 prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset);
220
221 *far = addr;
222
223 /* Give the guest an IMPLEMENTATION DEFINED exception */
224 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
225 if (is_lpae)
226 *fsr = 1 << 9 | 0x34;
227 else
228 *fsr = 0x14;
229}
230
231void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
232{
233 inject_abt32(vcpu, false, addr);
234}
235
236void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
237{
238 inject_abt32(vcpu, true, addr);
239}