aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJames Hogan <james.hogan@imgtec.com>2014-05-29 05:16:33 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-30 07:01:25 -0400
commitc73c99b0dfa7cab4100fb2699d0b7362322278a1 (patch)
treedd62ccdbbc4621c1488e298f0330b565edfba01b
parent044f0f03eca0110e1835b2ea038a484b93950328 (diff)
MIPS: KVM: Fix timer race modifying guest CP0_Cause
The hrtimer callback for guest timer timeouts sets the guest's CP0_Cause.TI bit to indicate to the guest that a timer interrupt is pending, however there is no mutual exclusion implemented to prevent this occurring while the guest's CP0_Cause register is being read-modify-written elsewhere. When this occurs the setting of the CP0_Cause.TI bit is undone and the guest misses the timer interrupt and doesn't reprogram the CP0_Compare register for the next timeout. Currently another timer interrupt will be triggered again in another 10ms anyway due to the way timers are emulated, but after the MIPS timer emulation is fixed this would result in Linux guest time standing still and the guest scheduler not being invoked until the guest CP0_Count has looped around again, which at 100MHz takes just under 43 seconds. Currently this is the only asynchronous modification of guest registers, therefore it is fixed by adjusting the implementations of the kvm_set_c0_guest_cause(), kvm_clear_c0_guest_cause(), and kvm_change_c0_guest_cause() macros which are used for modifying the guest CP0_Cause register to use ll/sc to ensure atomic modification. This should work in both UP and SMP cases without requiring interrupts to be disabled. Signed-off-by: James Hogan <james.hogan@imgtec.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Gleb Natapov <gleb@kernel.org> Cc: kvm@vger.kernel.org Cc: Ralf Baechle <ralf@linux-mips.org> Cc: linux-mips@linux-mips.org Cc: Sanjay Lal <sanjayl@kymasys.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/mips/include/asm/kvm_host.h71
1 files changed, 65 insertions, 6 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6f9338450e24..79410f85a5a7 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -482,15 +482,74 @@ struct kvm_vcpu_arch {
482#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) 482#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
483#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) 483#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
484 484
485/*
486 * Some of the guest registers may be modified asynchronously (e.g. from a
487 * hrtimer callback in hard irq context) and therefore need stronger atomicity
488 * guarantees than other registers.
489 */
490
491static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
492 unsigned long val)
493{
494 unsigned long temp;
495 do {
496 __asm__ __volatile__(
497 " .set mips3 \n"
498 " " __LL "%0, %1 \n"
499 " or %0, %2 \n"
500 " " __SC "%0, %1 \n"
501 " .set mips0 \n"
502 : "=&r" (temp), "+m" (*reg)
503 : "r" (val));
504 } while (unlikely(!temp));
505}
506
507static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
508 unsigned long val)
509{
510 unsigned long temp;
511 do {
512 __asm__ __volatile__(
513 " .set mips3 \n"
514 " " __LL "%0, %1 \n"
515 " and %0, %2 \n"
516 " " __SC "%0, %1 \n"
517 " .set mips0 \n"
518 : "=&r" (temp), "+m" (*reg)
519 : "r" (~val));
520 } while (unlikely(!temp));
521}
522
523static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
524 unsigned long change,
525 unsigned long val)
526{
527 unsigned long temp;
528 do {
529 __asm__ __volatile__(
530 " .set mips3 \n"
531 " " __LL "%0, %1 \n"
532 " and %0, %2 \n"
533 " or %0, %3 \n"
534 " " __SC "%0, %1 \n"
535 " .set mips0 \n"
536 : "=&r" (temp), "+m" (*reg)
537 : "r" (~change), "r" (val & change));
538 } while (unlikely(!temp));
539}
540
485#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) 541#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
486#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) 542#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
487#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) 543
488#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) 544/* Cause can be modified asynchronously from hardirq hrtimer callback */
545#define kvm_set_c0_guest_cause(cop0, val) \
546 _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
547#define kvm_clear_c0_guest_cause(cop0, val) \
548 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
489#define kvm_change_c0_guest_cause(cop0, change, val) \ 549#define kvm_change_c0_guest_cause(cop0, change, val) \
490{ \ 550 _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
491 kvm_clear_c0_guest_cause(cop0, change); \ 551 change, val)
492 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ 552
493}
494#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) 553#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
495#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) 554#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
496#define kvm_change_c0_guest_ebase(cop0, change, val) \ 555#define kvm_change_c0_guest_ebase(cop0, change, val) \