aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMarc Zyngier <marc.zyngier@arm.com>2015-12-14 12:58:33 -0500
committerMarc Zyngier <marc.zyngier@arm.com>2015-12-18 05:14:54 -0500
commite078ef81514222ffc10bf1767c15df16ca0b84db (patch)
tree805acb2e91a9d87c6e925341b1988883f522e854
parent3ffa75cd18134a03f86f9d9b8b6e9128e0eda254 (diff)
ARM: KVM: Cleanup exception injection
David Binderman reported that the exception injection code had a couple of unused variables lingering around. Upon examination, it looked like this code could do with an anticipated spring cleaning, which amounts to deduplicating the CPSR/SPSR update, and making it look a bit more like the architecture spec. The spurious variables are removed in the process. Reported-by: David Binderman <dcb314@hotmail.com> Reviewed-by: Christoffer Dall <christoffer.dall@linaro.org> Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
-rw-r--r--arch/arm/kvm/emulate.c74
1 files changed, 38 insertions, 36 deletions
diff --git a/arch/arm/kvm/emulate.c b/arch/arm/kvm/emulate.c
index d6c005283678..dc99159857b4 100644
--- a/arch/arm/kvm/emulate.c
+++ b/arch/arm/kvm/emulate.c
@@ -275,6 +275,40 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
275 return vbar; 275 return vbar;
276} 276}
277 277
278/*
279 * Switch to an exception mode, updating both CPSR and SPSR. Follow
280 * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
281 */
282static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
283{
284 unsigned long cpsr = *vcpu_cpsr(vcpu);
285 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
286
287 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
288
289 switch (mode) {
290 case FIQ_MODE:
291 *vcpu_cpsr(vcpu) |= PSR_F_BIT;
292 /* Fall through */
293 case ABT_MODE:
294 case IRQ_MODE:
295 *vcpu_cpsr(vcpu) |= PSR_A_BIT;
296 /* Fall through */
297 default:
298 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
299 }
300
301 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
302
303 if (sctlr & SCTLR_TE)
304 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
305 if (sctlr & SCTLR_EE)
306 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
307
308 /* Note: These now point to the mode banked copies */
309 *vcpu_spsr(vcpu) = cpsr;
310}
311
278/** 312/**
279 * kvm_inject_undefined - inject an undefined exception into the guest 313 * kvm_inject_undefined - inject an undefined exception into the guest
280 * @vcpu: The VCPU to receive the undefined exception 314 * @vcpu: The VCPU to receive the undefined exception
@@ -286,29 +320,13 @@ static u32 exc_vector_base(struct kvm_vcpu *vcpu)
286 */ 320 */
287void kvm_inject_undefined(struct kvm_vcpu *vcpu) 321void kvm_inject_undefined(struct kvm_vcpu *vcpu)
288{ 322{
289 unsigned long new_lr_value;
290 unsigned long new_spsr_value;
291 unsigned long cpsr = *vcpu_cpsr(vcpu); 323 unsigned long cpsr = *vcpu_cpsr(vcpu);
292 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
293 bool is_thumb = (cpsr & PSR_T_BIT); 324 bool is_thumb = (cpsr & PSR_T_BIT);
294 u32 vect_offset = 4; 325 u32 vect_offset = 4;
295 u32 return_offset = (is_thumb) ? 2 : 4; 326 u32 return_offset = (is_thumb) ? 2 : 4;
296 327
297 new_spsr_value = cpsr; 328 kvm_update_psr(vcpu, UND_MODE);
298 new_lr_value = *vcpu_pc(vcpu) - return_offset; 329 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
299
300 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | UND_MODE;
301 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
302 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
303
304 if (sctlr & SCTLR_TE)
305 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
306 if (sctlr & SCTLR_EE)
307 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
308
309 /* Note: These now point to UND banked copies */
310 *vcpu_spsr(vcpu) = cpsr;
311 *vcpu_reg(vcpu, 14) = new_lr_value;
312 330
313 /* Branch to exception vector */ 331 /* Branch to exception vector */
314 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset; 332 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
@@ -320,30 +338,14 @@ void kvm_inject_undefined(struct kvm_vcpu *vcpu)
320 */ 338 */
321static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr) 339static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
322{ 340{
323 unsigned long new_lr_value;
324 unsigned long new_spsr_value;
325 unsigned long cpsr = *vcpu_cpsr(vcpu); 341 unsigned long cpsr = *vcpu_cpsr(vcpu);
326 u32 sctlr = vcpu->arch.cp15[c1_SCTLR];
327 bool is_thumb = (cpsr & PSR_T_BIT); 342 bool is_thumb = (cpsr & PSR_T_BIT);
328 u32 vect_offset; 343 u32 vect_offset;
329 u32 return_offset = (is_thumb) ? 4 : 0; 344 u32 return_offset = (is_thumb) ? 4 : 0;
330 bool is_lpae; 345 bool is_lpae;
331 346
332 new_spsr_value = cpsr; 347 kvm_update_psr(vcpu, ABT_MODE);
333 new_lr_value = *vcpu_pc(vcpu) + return_offset; 348 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
334
335 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | ABT_MODE;
336 *vcpu_cpsr(vcpu) |= PSR_I_BIT | PSR_A_BIT;
337 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
338
339 if (sctlr & SCTLR_TE)
340 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
341 if (sctlr & SCTLR_EE)
342 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
343
344 /* Note: These now point to ABT banked copies */
345 *vcpu_spsr(vcpu) = cpsr;
346 *vcpu_reg(vcpu, 14) = new_lr_value;
347 349
348 if (is_pabt) 350 if (is_pabt)
349 vect_offset = 12; 351 vect_offset = 12;