aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/include/asm/kvm_host.h71
1 files changed, 65 insertions, 6 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 6f9338450e24..79410f85a5a7 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -482,15 +482,74 @@ struct kvm_vcpu_arch {
482#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0]) 482#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
483#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val)) 483#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
484 484
485/*
486 * Some of the guest registers may be modified asynchronously (e.g. from a
487 * hrtimer callback in hard irq context) and therefore need stronger atomicity
488 * guarantees than other registers.
489 */
490
491static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
492 unsigned long val)
493{
494 unsigned long temp;
495 do {
496 __asm__ __volatile__(
497 " .set mips3 \n"
498 " " __LL "%0, %1 \n"
499 " or %0, %2 \n"
500 " " __SC "%0, %1 \n"
501 " .set mips0 \n"
502 : "=&r" (temp), "+m" (*reg)
503 : "r" (val));
504 } while (unlikely(!temp));
505}
506
507static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
508 unsigned long val)
509{
510 unsigned long temp;
511 do {
512 __asm__ __volatile__(
513 " .set mips3 \n"
514 " " __LL "%0, %1 \n"
515 " and %0, %2 \n"
516 " " __SC "%0, %1 \n"
517 " .set mips0 \n"
518 : "=&r" (temp), "+m" (*reg)
519 : "r" (~val));
520 } while (unlikely(!temp));
521}
522
523static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
524 unsigned long change,
525 unsigned long val)
526{
527 unsigned long temp;
528 do {
529 __asm__ __volatile__(
530 " .set mips3 \n"
531 " " __LL "%0, %1 \n"
532 " and %0, %2 \n"
533 " or %0, %3 \n"
534 " " __SC "%0, %1 \n"
535 " .set mips0 \n"
536 : "=&r" (temp), "+m" (*reg)
537 : "r" (~change), "r" (val & change));
538 } while (unlikely(!temp));
539}
540
485#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val)) 541#define kvm_set_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] |= (val))
486#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val)) 542#define kvm_clear_c0_guest_status(cop0, val) (cop0->reg[MIPS_CP0_STATUS][0] &= ~(val))
487#define kvm_set_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] |= (val)) 543
488#define kvm_clear_c0_guest_cause(cop0, val) (cop0->reg[MIPS_CP0_CAUSE][0] &= ~(val)) 544/* Cause can be modified asynchronously from hardirq hrtimer callback */
545#define kvm_set_c0_guest_cause(cop0, val) \
546 _kvm_atomic_set_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
547#define kvm_clear_c0_guest_cause(cop0, val) \
548 _kvm_atomic_clear_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], val)
489#define kvm_change_c0_guest_cause(cop0, change, val) \ 549#define kvm_change_c0_guest_cause(cop0, change, val) \
490{ \ 550 _kvm_atomic_change_c0_guest_reg(&cop0->reg[MIPS_CP0_CAUSE][0], \
491 kvm_clear_c0_guest_cause(cop0, change); \ 551 change, val)
492 kvm_set_c0_guest_cause(cop0, ((val) & (change))); \ 552
493}
494#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val)) 553#define kvm_set_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] |= (val))
495#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val)) 554#define kvm_clear_c0_guest_ebase(cop0, val) (cop0->reg[MIPS_CP0_PRID][1] &= ~(val))
496#define kvm_change_c0_guest_ebase(cop0, change, val) \ 555#define kvm_change_c0_guest_ebase(cop0, change, val) \