diff options
-rw-r--r-- | arch/arm/include/asm/kvm_emulate.h | 36 | ||||
-rw-r--r-- | arch/arm/kvm/emulate.c | 139 | ||||
-rw-r--r-- | arch/arm64/include/asm/kvm_emulate.h | 3 | ||||
-rw-r--r-- | arch/arm64/kvm/inject_fault.c | 74 | ||||
-rw-r--r-- | virt/kvm/arm/aarch32.c | 97 |
5 files changed, 131 insertions, 218 deletions
diff --git a/arch/arm/include/asm/kvm_emulate.h b/arch/arm/include/asm/kvm_emulate.h index 98089ffd91bb..dcae3970148d 100644 --- a/arch/arm/include/asm/kvm_emulate.h +++ b/arch/arm/include/asm/kvm_emulate.h | |||
@@ -25,7 +25,22 @@ | |||
25 | #include <asm/kvm_arm.h> | 25 | #include <asm/kvm_arm.h> |
26 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
27 | 27 | ||
28 | /* arm64 compatibility macros */ | ||
29 | #define COMPAT_PSR_MODE_ABT ABT_MODE | ||
30 | #define COMPAT_PSR_MODE_UND UND_MODE | ||
31 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
32 | #define COMPAT_PSR_I_BIT PSR_I_BIT | ||
33 | #define COMPAT_PSR_A_BIT PSR_A_BIT | ||
34 | #define COMPAT_PSR_E_BIT PSR_E_BIT | ||
35 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
36 | |||
28 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); | 37 | unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num); |
38 | |||
39 | static inline unsigned long *vcpu_reg32(struct kvm_vcpu *vcpu, u8 reg_num) | ||
40 | { | ||
41 | return vcpu_reg(vcpu, reg_num); | ||
42 | } | ||
43 | |||
29 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); | 44 | unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu); |
30 | 45 | ||
31 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, | 46 | static inline unsigned long vcpu_get_reg(struct kvm_vcpu *vcpu, |
@@ -42,10 +57,25 @@ static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, | |||
42 | 57 | ||
43 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); | 58 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
44 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); | 59 | void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr); |
45 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); | 60 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); |
61 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
62 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
46 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | 63 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
47 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 64 | |
48 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 65 | static inline void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
66 | { | ||
67 | kvm_inject_undef32(vcpu); | ||
68 | } | ||
69 | |||
70 | static inline void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
71 | { | ||
72 | kvm_inject_dabt32(vcpu, addr); | ||
73 | } | ||
74 | |||
75 | static inline void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
76 | { | ||
77 | kvm_inject_pabt32(vcpu, addr); | ||
78 | } | ||
49 | 79 | ||
50 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) | 80 | static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
51 | { | 81 | { |
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c index 0064b86a2c87..cdff963f133a 100644 --- a/arch/arm/kvm/emulate.c +++ b/arch/arm/kvm/emulate.c | |||
@@ -165,145 +165,6 @@ unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu) | |||
165 | * Inject exceptions into the guest | 165 | * Inject exceptions into the guest |
166 | */ | 166 | */ |
167 | 167 | ||
168 | static u32 exc_vector_base(struct kvm_vcpu *vcpu) | ||
169 | { | ||
170 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
171 | u32 vbar = vcpu_cp15(vcpu, c12_VBAR); | ||
172 | |||
173 | if (sctlr & SCTLR_V) | ||
174 | return 0xffff0000; | ||
175 | else /* always have security exceptions */ | ||
176 | return vbar; | ||
177 | } | ||
178 | |||
179 | /* | ||
180 | * Switch to an exception mode, updating both CPSR and SPSR. Follow | ||
181 | * the logic described in AArch32.EnterMode() from the ARMv8 ARM. | ||
182 | */ | ||
183 | static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode) | ||
184 | { | ||
185 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
186 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
187 | |||
188 | *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode; | ||
189 | |||
190 | switch (mode) { | ||
191 | case FIQ_MODE: | ||
192 | *vcpu_cpsr(vcpu) |= PSR_F_BIT; | ||
193 | /* Fall through */ | ||
194 | case ABT_MODE: | ||
195 | case IRQ_MODE: | ||
196 | *vcpu_cpsr(vcpu) |= PSR_A_BIT; | ||
197 | /* Fall through */ | ||
198 | default: | ||
199 | *vcpu_cpsr(vcpu) |= PSR_I_BIT; | ||
200 | } | ||
201 | |||
202 | *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT); | ||
203 | |||
204 | if (sctlr & SCTLR_TE) | ||
205 | *vcpu_cpsr(vcpu) |= PSR_T_BIT; | ||
206 | if (sctlr & SCTLR_EE) | ||
207 | *vcpu_cpsr(vcpu) |= PSR_E_BIT; | ||
208 | |||
209 | /* Note: These now point to the mode banked copies */ | ||
210 | *vcpu_spsr(vcpu) = cpsr; | ||
211 | } | ||
212 | |||
213 | /** | ||
214 | * kvm_inject_undefined - inject an undefined exception into the guest | ||
215 | * @vcpu: The VCPU to receive the undefined exception | ||
216 | * | ||
217 | * It is assumed that this code is called from the VCPU thread and that the | ||
218 | * VCPU therefore is not currently executing guest code. | ||
219 | * | ||
220 | * Modelled after TakeUndefInstrException() pseudocode. | ||
221 | */ | ||
222 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | ||
223 | { | ||
224 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
225 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
226 | u32 vect_offset = 4; | ||
227 | u32 return_offset = (is_thumb) ? 2 : 4; | ||
228 | |||
229 | kvm_update_psr(vcpu, UND_MODE); | ||
230 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset; | ||
231 | |||
232 | /* Branch to exception vector */ | ||
233 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
234 | } | ||
235 | |||
236 | /* | ||
237 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
238 | * pseudocode. | ||
239 | */ | ||
240 | static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) | ||
241 | { | ||
242 | unsigned long cpsr = *vcpu_cpsr(vcpu); | ||
243 | bool is_thumb = (cpsr & PSR_T_BIT); | ||
244 | u32 vect_offset; | ||
245 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
246 | bool is_lpae; | ||
247 | |||
248 | kvm_update_psr(vcpu, ABT_MODE); | ||
249 | *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
250 | |||
251 | if (is_pabt) | ||
252 | vect_offset = 12; | ||
253 | else | ||
254 | vect_offset = 16; | ||
255 | |||
256 | /* Branch to exception vector */ | ||
257 | *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; | ||
258 | |||
259 | if (is_pabt) { | ||
260 | /* Set IFAR and IFSR */ | ||
261 | vcpu_cp15(vcpu, c6_IFAR) = addr; | ||
262 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
263 | /* Always give debug fault for now - should give guest a clue */ | ||
264 | if (is_lpae) | ||
265 | vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22; | ||
266 | else | ||
267 | vcpu_cp15(vcpu, c5_IFSR) = 2; | ||
268 | } else { /* !iabt */ | ||
269 | /* Set DFAR and DFSR */ | ||
270 | vcpu_cp15(vcpu, c6_DFAR) = addr; | ||
271 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
272 | /* Always give debug fault for now - should give guest a clue */ | ||
273 | if (is_lpae) | ||
274 | vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22; | ||
275 | else | ||
276 | vcpu_cp15(vcpu, c5_DFSR) = 2; | ||
277 | } | ||
278 | |||
279 | } | ||
280 | |||
281 | /** | ||
282 | * kvm_inject_dabt - inject a data abort into the guest | ||
283 | * @vcpu: The VCPU to receive the undefined exception | ||
284 | * @addr: The address to report in the DFAR | ||
285 | * | ||
286 | * It is assumed that this code is called from the VCPU thread and that the | ||
287 | * VCPU therefore is not currently executing guest code. | ||
288 | */ | ||
289 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
290 | { | ||
291 | inject_abt(vcpu, false, addr); | ||
292 | } | ||
293 | |||
294 | /** | ||
295 | * kvm_inject_pabt - inject a prefetch abort into the guest | ||
296 | * @vcpu: The VCPU to receive the undefined exception | ||
297 | * @addr: The address to report in the DFAR | ||
298 | * | ||
299 | * It is assumed that this code is called from the VCPU thread and that the | ||
300 | * VCPU therefore is not currently executing guest code. | ||
301 | */ | ||
302 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | ||
303 | { | ||
304 | inject_abt(vcpu, true, addr); | ||
305 | } | ||
306 | |||
307 | /** | 168 | /** |
308 | * kvm_inject_vabt - inject an async abort / SError into the guest | 169 | * kvm_inject_vabt - inject an async abort / SError into the guest |
309 | * @vcpu: The VCPU to receive the exception | 170 | * @vcpu: The VCPU to receive the exception |
diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index e5df3fce0008..bf61da0ef82b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h | |||
@@ -41,6 +41,9 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu); | |||
41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); | 41 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); | 42 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); | 43 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
44 | void kvm_inject_undef32(struct kvm_vcpu *vcpu); | ||
45 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
46 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr); | ||
44 | 47 | ||
45 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) | 48 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
46 | { | 49 | { |
diff --git a/arch/arm64/kvm/inject_fault.c b/arch/arm64/kvm/inject_fault.c index da6a8cfa54a0..8ecbcb40e317 100644 --- a/arch/arm64/kvm/inject_fault.c +++ b/arch/arm64/kvm/inject_fault.c | |||
@@ -33,74 +33,6 @@ | |||
33 | #define LOWER_EL_AArch64_VECTOR 0x400 | 33 | #define LOWER_EL_AArch64_VECTOR 0x400 |
34 | #define LOWER_EL_AArch32_VECTOR 0x600 | 34 | #define LOWER_EL_AArch32_VECTOR 0x600 |
35 | 35 | ||
36 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
37 | { | ||
38 | unsigned long cpsr; | ||
39 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
40 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
41 | u32 return_offset = (is_thumb) ? 4 : 0; | ||
42 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
43 | |||
44 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
45 | |||
46 | if (sctlr & (1 << 30)) | ||
47 | cpsr |= COMPAT_PSR_T_BIT; | ||
48 | if (sctlr & (1 << 25)) | ||
49 | cpsr |= COMPAT_PSR_E_BIT; | ||
50 | |||
51 | *vcpu_cpsr(vcpu) = cpsr; | ||
52 | |||
53 | /* Note: These now point to the banked copies */ | ||
54 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
55 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
56 | |||
57 | /* Branch to exception vector */ | ||
58 | if (sctlr & (1 << 13)) | ||
59 | vect_offset += 0xffff0000; | ||
60 | else /* always have security exceptions */ | ||
61 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
62 | |||
63 | *vcpu_pc(vcpu) = vect_offset; | ||
64 | } | ||
65 | |||
66 | static void inject_undef32(struct kvm_vcpu *vcpu) | ||
67 | { | ||
68 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
69 | } | ||
70 | |||
71 | /* | ||
72 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
73 | * pseudocode. | ||
74 | */ | ||
75 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
76 | unsigned long addr) | ||
77 | { | ||
78 | u32 vect_offset; | ||
79 | u32 *far, *fsr; | ||
80 | bool is_lpae; | ||
81 | |||
82 | if (is_pabt) { | ||
83 | vect_offset = 12; | ||
84 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
85 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
86 | } else { /* !iabt */ | ||
87 | vect_offset = 16; | ||
88 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
89 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
90 | } | ||
91 | |||
92 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
93 | |||
94 | *far = addr; | ||
95 | |||
96 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
97 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
98 | if (is_lpae) | ||
99 | *fsr = 1 << 9 | 0x34; | ||
100 | else | ||
101 | *fsr = 0x14; | ||
102 | } | ||
103 | |||
104 | enum exception_type { | 36 | enum exception_type { |
105 | except_type_sync = 0, | 37 | except_type_sync = 0, |
106 | except_type_irq = 0x80, | 38 | except_type_irq = 0x80, |
@@ -197,7 +129,7 @@ static void inject_undef64(struct kvm_vcpu *vcpu) | |||
197 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | 129 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) |
198 | { | 130 | { |
199 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 131 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
200 | inject_abt32(vcpu, false, addr); | 132 | kvm_inject_dabt32(vcpu, addr); |
201 | else | 133 | else |
202 | inject_abt64(vcpu, false, addr); | 134 | inject_abt64(vcpu, false, addr); |
203 | } | 135 | } |
@@ -213,7 +145,7 @@ void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
213 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | 145 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) |
214 | { | 146 | { |
215 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 147 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
216 | inject_abt32(vcpu, true, addr); | 148 | kvm_inject_pabt32(vcpu, addr); |
217 | else | 149 | else |
218 | inject_abt64(vcpu, true, addr); | 150 | inject_abt64(vcpu, true, addr); |
219 | } | 151 | } |
@@ -227,7 +159,7 @@ void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr) | |||
227 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) | 159 | void kvm_inject_undefined(struct kvm_vcpu *vcpu) |
228 | { | 160 | { |
229 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) | 161 | if (!(vcpu->arch.hcr_el2 & HCR_RW)) |
230 | inject_undef32(vcpu); | 162 | kvm_inject_undef32(vcpu); |
231 | else | 163 | else |
232 | inject_undef64(vcpu); | 164 | inject_undef64(vcpu); |
233 | } | 165 | } |
diff --git a/virt/kvm/arm/aarch32.c b/virt/kvm/arm/aarch32.c index 79c7c357804b..8bc479fa37e6 100644 --- a/virt/kvm/arm/aarch32.c +++ b/virt/kvm/arm/aarch32.c | |||
@@ -25,11 +25,6 @@ | |||
25 | #include <asm/kvm_emulate.h> | 25 | #include <asm/kvm_emulate.h> |
26 | #include <asm/kvm_hyp.h> | 26 | #include <asm/kvm_hyp.h> |
27 | 27 | ||
28 | #ifndef CONFIG_ARM64 | ||
29 | #define COMPAT_PSR_T_BIT PSR_T_BIT | ||
30 | #define COMPAT_PSR_IT_MASK PSR_IT_MASK | ||
31 | #endif | ||
32 | |||
33 | /* | 28 | /* |
34 | * stolen from arch/arm/kernel/opcodes.c | 29 | * stolen from arch/arm/kernel/opcodes.c |
35 | * | 30 | * |
@@ -150,3 +145,95 @@ void __hyp_text kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr) | |||
150 | *vcpu_pc(vcpu) += 4; | 145 | *vcpu_pc(vcpu) += 4; |
151 | kvm_adjust_itstate(vcpu); | 146 | kvm_adjust_itstate(vcpu); |
152 | } | 147 | } |
148 | |||
149 | /* | ||
150 | * Table taken from ARMv8 ARM DDI0487B-B, table G1-10. | ||
151 | */ | ||
152 | static const u8 return_offsets[8][2] = { | ||
153 | [0] = { 0, 0 }, /* Reset, unused */ | ||
154 | [1] = { 4, 2 }, /* Undefined */ | ||
155 | [2] = { 0, 0 }, /* SVC, unused */ | ||
156 | [3] = { 4, 4 }, /* Prefetch abort */ | ||
157 | [4] = { 8, 8 }, /* Data abort */ | ||
158 | [5] = { 0, 0 }, /* HVC, unused */ | ||
159 | [6] = { 4, 4 }, /* IRQ, unused */ | ||
160 | [7] = { 4, 4 }, /* FIQ, unused */ | ||
161 | }; | ||
162 | |||
163 | static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset) | ||
164 | { | ||
165 | unsigned long cpsr; | ||
166 | unsigned long new_spsr_value = *vcpu_cpsr(vcpu); | ||
167 | bool is_thumb = (new_spsr_value & COMPAT_PSR_T_BIT); | ||
168 | u32 return_offset = return_offsets[vect_offset >> 2][is_thumb]; | ||
169 | u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR); | ||
170 | |||
171 | cpsr = mode | COMPAT_PSR_I_BIT; | ||
172 | |||
173 | if (sctlr & (1 << 30)) | ||
174 | cpsr |= COMPAT_PSR_T_BIT; | ||
175 | if (sctlr & (1 << 25)) | ||
176 | cpsr |= COMPAT_PSR_E_BIT; | ||
177 | |||
178 | *vcpu_cpsr(vcpu) = cpsr; | ||
179 | |||
180 | /* Note: These now point to the banked copies */ | ||
181 | *vcpu_spsr(vcpu) = new_spsr_value; | ||
182 | *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset; | ||
183 | |||
184 | /* Branch to exception vector */ | ||
185 | if (sctlr & (1 << 13)) | ||
186 | vect_offset += 0xffff0000; | ||
187 | else /* always have security exceptions */ | ||
188 | vect_offset += vcpu_cp15(vcpu, c12_VBAR); | ||
189 | |||
190 | *vcpu_pc(vcpu) = vect_offset; | ||
191 | } | ||
192 | |||
193 | void kvm_inject_undef32(struct kvm_vcpu *vcpu) | ||
194 | { | ||
195 | prepare_fault32(vcpu, COMPAT_PSR_MODE_UND, 4); | ||
196 | } | ||
197 | |||
198 | /* | ||
199 | * Modelled after TakeDataAbortException() and TakePrefetchAbortException | ||
200 | * pseudocode. | ||
201 | */ | ||
202 | static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt, | ||
203 | unsigned long addr) | ||
204 | { | ||
205 | u32 vect_offset; | ||
206 | u32 *far, *fsr; | ||
207 | bool is_lpae; | ||
208 | |||
209 | if (is_pabt) { | ||
210 | vect_offset = 12; | ||
211 | far = &vcpu_cp15(vcpu, c6_IFAR); | ||
212 | fsr = &vcpu_cp15(vcpu, c5_IFSR); | ||
213 | } else { /* !iabt */ | ||
214 | vect_offset = 16; | ||
215 | far = &vcpu_cp15(vcpu, c6_DFAR); | ||
216 | fsr = &vcpu_cp15(vcpu, c5_DFSR); | ||
217 | } | ||
218 | |||
219 | prepare_fault32(vcpu, COMPAT_PSR_MODE_ABT | COMPAT_PSR_A_BIT, vect_offset); | ||
220 | |||
221 | *far = addr; | ||
222 | |||
223 | /* Give the guest an IMPLEMENTATION DEFINED exception */ | ||
224 | is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31); | ||
225 | if (is_lpae) | ||
226 | *fsr = 1 << 9 | 0x34; | ||
227 | else | ||
228 | *fsr = 0x14; | ||
229 | } | ||
230 | |||
231 | void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr) | ||
232 | { | ||
233 | inject_abt32(vcpu, false, addr); | ||
234 | } | ||
235 | |||
236 | void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr) | ||
237 | { | ||
238 | inject_abt32(vcpu, true, addr); | ||
239 | } | ||