aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/mips/include/asm/kvm_host.h6
-rw-r--r--arch/mips/include/uapi/asm/kvm.h28
-rw-r--r--arch/mips/kvm/kvm_mips.c9
-rw-r--r--arch/mips/kvm/kvm_mips_emul.c132
-rw-r--r--arch/mips/kvm/kvm_trap_emul.c15
5 files changed, 176 insertions, 14 deletions
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index 75ed94aeefe7..1deeaecbe73e 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -405,12 +405,16 @@ struct kvm_vcpu_arch {
405 u32 io_gpr; /* GPR used as IO source/target */ 405 u32 io_gpr; /* GPR used as IO source/target */
406 406
407 struct hrtimer comparecount_timer; 407 struct hrtimer comparecount_timer;
408 /* Count timer control KVM register */
409 uint32_t count_ctl;
408 /* Count bias from the raw time */ 410 /* Count bias from the raw time */
409 uint32_t count_bias; 411 uint32_t count_bias;
410 /* Frequency of timer in Hz */ 412 /* Frequency of timer in Hz */
411 uint32_t count_hz; 413 uint32_t count_hz;
412 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */ 414 /* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
413 s64 count_dyn_bias; 415 s64 count_dyn_bias;
416 /* Resume time */
417 ktime_t count_resume;
414 /* Period of timer tick in ns */ 418 /* Period of timer tick in ns */
415 u64 count_period; 419 u64 count_period;
416 420
@@ -714,6 +718,8 @@ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
714void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count); 718void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
715void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare); 719void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
716void kvm_mips_init_count(struct kvm_vcpu *vcpu); 720void kvm_mips_init_count(struct kvm_vcpu *vcpu);
721int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
722int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
717void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu); 723void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
718void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu); 724void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
719enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu); 725enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
diff --git a/arch/mips/include/uapi/asm/kvm.h b/arch/mips/include/uapi/asm/kvm.h
index f09ff5ae2059..f859fbada1f7 100644
--- a/arch/mips/include/uapi/asm/kvm.h
+++ b/arch/mips/include/uapi/asm/kvm.h
@@ -106,6 +106,34 @@ struct kvm_fpu {
106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33) 106#define KVM_REG_MIPS_LO (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 33)
107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34) 107#define KVM_REG_MIPS_PC (KVM_REG_MIPS | KVM_REG_SIZE_U64 | 34)
108 108
109/* KVM specific control registers */
110
111/*
112 * CP0_Count control
113 * DC: Set 0: Master disable CP0_Count and set COUNT_RESUME to now
114 * Set 1: Master re-enable CP0_Count with unchanged bias, handling timer
115 * interrupts since COUNT_RESUME
116 * This can be used to freeze the timer to get a consistent snapshot of
117 * the CP0_Count and timer interrupt pending state, while also resuming
118 * safely without losing time or guest timer interrupts.
119 * Other: Reserved, do not change.
120 */
121#define KVM_REG_MIPS_COUNT_CTL (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
122 0x20000 | 0)
123#define KVM_REG_MIPS_COUNT_CTL_DC 0x00000001
124
125/*
126 * CP0_Count resume monotonic nanoseconds
127 * The monotonic nanosecond time of the last set of COUNT_CTL.DC (master
128 * disable). Any reads and writes of Count related registers while
129 * COUNT_CTL.DC=1 will appear to occur at this time. When COUNT_CTL.DC is
130 * cleared again (master enable) any timer interrupts since this time will be
131 * emulated.
132 * Modifications to times in the future are rejected.
133 */
134#define KVM_REG_MIPS_COUNT_RESUME (KVM_REG_MIPS | KVM_REG_SIZE_U64 | \
135 0x20000 | 1)
136
109/* 137/*
110 * KVM MIPS specific structures and definitions 138 * KVM MIPS specific structures and definitions
111 * 139 *
diff --git a/arch/mips/kvm/kvm_mips.c b/arch/mips/kvm/kvm_mips.c
index fc5e44d827fc..a2d5d4243f51 100644
--- a/arch/mips/kvm/kvm_mips.c
+++ b/arch/mips/kvm/kvm_mips.c
@@ -542,7 +542,10 @@ static u64 kvm_mips_get_one_regs[] = {
542 KVM_REG_MIPS_CP0_CONFIG2, 542 KVM_REG_MIPS_CP0_CONFIG2,
543 KVM_REG_MIPS_CP0_CONFIG3, 543 KVM_REG_MIPS_CP0_CONFIG3,
544 KVM_REG_MIPS_CP0_CONFIG7, 544 KVM_REG_MIPS_CP0_CONFIG7,
545 KVM_REG_MIPS_CP0_ERROREPC 545 KVM_REG_MIPS_CP0_ERROREPC,
546
547 KVM_REG_MIPS_COUNT_CTL,
548 KVM_REG_MIPS_COUNT_RESUME,
546}; 549};
547 550
548static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, 551static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
@@ -622,6 +625,8 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
622 break; 625 break;
623 /* registers to be handled specially */ 626 /* registers to be handled specially */
624 case KVM_REG_MIPS_CP0_COUNT: 627 case KVM_REG_MIPS_CP0_COUNT:
628 case KVM_REG_MIPS_COUNT_CTL:
629 case KVM_REG_MIPS_COUNT_RESUME:
625 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); 630 ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
626 if (ret) 631 if (ret)
627 return ret; 632 return ret;
@@ -717,6 +722,8 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
717 case KVM_REG_MIPS_CP0_COUNT: 722 case KVM_REG_MIPS_CP0_COUNT:
718 case KVM_REG_MIPS_CP0_COMPARE: 723 case KVM_REG_MIPS_CP0_COMPARE:
719 case KVM_REG_MIPS_CP0_CAUSE: 724 case KVM_REG_MIPS_CP0_CAUSE:
725 case KVM_REG_MIPS_COUNT_CTL:
726 case KVM_REG_MIPS_COUNT_RESUME:
720 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); 727 return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
721 default: 728 default:
722 return -EINVAL; 729 return -EINVAL;
diff --git a/arch/mips/kvm/kvm_mips_emul.c b/arch/mips/kvm/kvm_mips_emul.c
index 088c25d73a11..65c8dea6d1f5 100644
--- a/arch/mips/kvm/kvm_mips_emul.c
+++ b/arch/mips/kvm/kvm_mips_emul.c
@@ -233,14 +233,15 @@ enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled. 233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU. 234 * @vcpu: Virtual CPU.
235 * 235 *
236 * Returns: 1 if the CP0_Count timer is disabled by the guest CP0_Cause.DC 236 * Returns: 1 if the CP0_Count timer is disabled by either the guest
237 * bit. 237 * CP0_Cause.DC bit or the count_ctl.DC bit.
238 * 0 otherwise (in which case CP0_Count timer is running). 238 * 0 otherwise (in which case CP0_Count timer is running).
239 */ 239 */
240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu) 240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
241{ 241{
242 struct mips_coproc *cop0 = vcpu->arch.cop0; 242 struct mips_coproc *cop0 = vcpu->arch.cop0;
243 return kvm_read_c0_guest_cause(cop0) & CAUSEF_DC; 243 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
244 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
244} 245}
245 246
246/** 247/**
@@ -280,6 +281,24 @@ static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
280} 281}
281 282
282/** 283/**
284 * kvm_mips_count_time() - Get effective current time.
285 * @vcpu: Virtual CPU.
286 *
287 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
288 * except when the master disable bit is set in count_ctl, in which case it is
289 * count_resume, i.e. the time that the count was disabled.
290 *
291 * Returns: Effective monotonic ktime for CP0_Count.
292 */
293static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
294{
295 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
296 return vcpu->arch.count_resume;
297
298 return ktime_get();
299}
300
301/**
283 * kvm_mips_read_count_running() - Read the current count value as if running. 302 * kvm_mips_read_count_running() - Read the current count value as if running.
284 * @vcpu: Virtual CPU. 303 * @vcpu: Virtual CPU.
285 * @now: Kernel time to read CP0_Count at. 304 * @now: Kernel time to read CP0_Count at.
@@ -448,7 +467,7 @@ void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
448 ktime_t now; 467 ktime_t now;
449 468
450 /* Calculate bias */ 469 /* Calculate bias */
451 now = ktime_get(); 470 now = kvm_mips_count_time(vcpu);
452 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now); 471 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
453 472
454 if (kvm_mips_count_disabled(vcpu)) 473 if (kvm_mips_count_disabled(vcpu))
@@ -508,8 +527,8 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
508 * Disable the CP0_Count timer. A timer interrupt on or before the final stop 527 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
509 * time will be handled but not after. 528 * time will be handled but not after.
510 * 529 *
511 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC has been 530 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
512 * set (count disabled). 531 * count_ctl.DC has been set (count disabled).
513 * 532 *
514 * Returns: The time that the timer was stopped. 533 * Returns: The time that the timer was stopped.
515 */ 534 */
@@ -535,7 +554,8 @@ static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
535 * @vcpu: Virtual CPU. 554 * @vcpu: Virtual CPU.
536 * 555 *
537 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or 556 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
538 * before the final stop time will be handled, but not after. 557 * before the final stop time will be handled if the timer isn't disabled by
558 * count_ctl.DC, but not after.
539 * 559 *
540 * Assumes CP0_Cause.DC is clear (count enabled). 560 * Assumes CP0_Cause.DC is clear (count enabled).
541 */ 561 */
@@ -544,7 +564,8 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
544 struct mips_coproc *cop0 = vcpu->arch.cop0; 564 struct mips_coproc *cop0 = vcpu->arch.cop0;
545 565
546 kvm_set_c0_guest_cause(cop0, CAUSEF_DC); 566 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
547 kvm_mips_count_disable(vcpu); 567 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
568 kvm_mips_count_disable(vcpu);
548} 569}
549 570
550/** 571/**
@@ -552,9 +573,9 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
552 * @vcpu: Virtual CPU. 573 * @vcpu: Virtual CPU.
553 * 574 *
554 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after 575 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
555 * the start time will be handled, potentially before even returning, so the 576 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
556 * caller should be careful with ordering of CP0_Cause modifications so as not 577 * potentially before even returning, so the caller should be careful with
557 * to lose it. 578 * ordering of CP0_Cause modifications so as not to lose it.
558 * 579 *
559 * Assumes CP0_Cause.DC is set (count disabled). 580 * Assumes CP0_Cause.DC is set (count disabled).
560 */ 581 */
@@ -567,13 +588,100 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
567 588
568 /* 589 /*
569 * Set the dynamic count to match the static count. 590 * Set the dynamic count to match the static count.
570 * This starts the hrtimer. 591 * This starts the hrtimer if count_ctl.DC allows it.
592 * Otherwise it conveniently updates the biases.
571 */ 593 */
572 count = kvm_read_c0_guest_count(cop0); 594 count = kvm_read_c0_guest_count(cop0);
573 kvm_mips_write_count(vcpu, count); 595 kvm_mips_write_count(vcpu, count);
574} 596}
575 597
576/** 598/**
599 * kvm_mips_set_count_ctl() - Update the count control KVM register.
600 * @vcpu: Virtual CPU.
601 * @count_ctl: Count control register new value.
602 *
603 * Set the count control KVM register. The timer is updated accordingly.
604 *
605 * Returns: -EINVAL if reserved bits are set.
606 * 0 on success.
607 */
608int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
609{
610 struct mips_coproc *cop0 = vcpu->arch.cop0;
611 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
612 s64 delta;
613 ktime_t expire, now;
614 uint32_t count, compare;
615
616 /* Only allow defined bits to be changed */
617 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
618 return -EINVAL;
619
620 /* Apply new value */
621 vcpu->arch.count_ctl = count_ctl;
622
623 /* Master CP0_Count disable */
624 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
625 /* Is CP0_Cause.DC already disabling CP0_Count? */
626 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
627 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
628 /* Just record the current time */
629 vcpu->arch.count_resume = ktime_get();
630 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
631 /* disable timer and record current time */
632 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
633 } else {
634 /*
635 * Calculate timeout relative to static count at resume
636 * time (wrap 0 to 2^32).
637 */
638 count = kvm_read_c0_guest_count(cop0);
639 compare = kvm_read_c0_guest_compare(cop0);
640 delta = (u64)(uint32_t)(compare - count - 1) + 1;
641 delta = div_u64(delta * NSEC_PER_SEC,
642 vcpu->arch.count_hz);
643 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
644
645 /* Handle pending interrupt */
646 now = ktime_get();
647 if (ktime_compare(now, expire) >= 0)
648 /* Nothing should be waiting on the timeout */
649 kvm_mips_callbacks->queue_timer_int(vcpu);
650
651 /* Resume hrtimer without changing bias */
652 count = kvm_mips_read_count_running(vcpu, now);
653 kvm_mips_resume_hrtimer(vcpu, now, count);
654 }
655 }
656
657 return 0;
658}
659
660/**
661 * kvm_mips_set_count_resume() - Update the count resume KVM register.
662 * @vcpu: Virtual CPU.
663 * @count_resume: Count resume register new value.
664 *
665 * Set the count resume KVM register.
666 *
667 * Returns: -EINVAL if out of valid range (0..now).
668 * 0 on success.
669 */
670int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
671{
672 /*
673 * It doesn't make sense for the resume time to be in the future, as it
674 * would be possible for the next interrupt to be more than a full
675 * period in the future.
676 */
677 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
678 return -EINVAL;
679
680 vcpu->arch.count_resume = ns_to_ktime(count_resume);
681 return 0;
682}
683
684/**
577 * kvm_mips_count_timeout() - Push timer forward on timeout. 685 * kvm_mips_count_timeout() - Push timer forward on timeout.
578 * @vcpu: Virtual CPU. 686 * @vcpu: Virtual CPU.
579 * 687 *
diff --git a/arch/mips/kvm/kvm_trap_emul.c b/arch/mips/kvm/kvm_trap_emul.c
index 9908f2b0ff46..854502bcc749 100644
--- a/arch/mips/kvm/kvm_trap_emul.c
+++ b/arch/mips/kvm/kvm_trap_emul.c
@@ -409,6 +409,12 @@ static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
409 case KVM_REG_MIPS_CP0_COUNT: 409 case KVM_REG_MIPS_CP0_COUNT:
410 *v = kvm_mips_read_count(vcpu); 410 *v = kvm_mips_read_count(vcpu);
411 break; 411 break;
412 case KVM_REG_MIPS_COUNT_CTL:
413 *v = vcpu->arch.count_ctl;
414 break;
415 case KVM_REG_MIPS_COUNT_RESUME:
416 *v = ktime_to_ns(vcpu->arch.count_resume);
417 break;
412 default: 418 default:
413 return -EINVAL; 419 return -EINVAL;
414 } 420 }
@@ -420,6 +426,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
420 s64 v) 426 s64 v)
421{ 427{
422 struct mips_coproc *cop0 = vcpu->arch.cop0; 428 struct mips_coproc *cop0 = vcpu->arch.cop0;
429 int ret = 0;
423 430
424 switch (reg->id) { 431 switch (reg->id) {
425 case KVM_REG_MIPS_CP0_COUNT: 432 case KVM_REG_MIPS_CP0_COUNT:
@@ -448,10 +455,16 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
448 kvm_write_c0_guest_cause(cop0, v); 455 kvm_write_c0_guest_cause(cop0, v);
449 } 456 }
450 break; 457 break;
458 case KVM_REG_MIPS_COUNT_CTL:
459 ret = kvm_mips_set_count_ctl(vcpu, v);
460 break;
461 case KVM_REG_MIPS_COUNT_RESUME:
462 ret = kvm_mips_set_count_resume(vcpu, v);
463 break;
451 default: 464 default:
452 return -EINVAL; 465 return -EINVAL;
453 } 466 }
454 return 0; 467 return ret;
455} 468}
456 469
457static struct kvm_mips_callbacks kvm_trap_emul_callbacks = { 470static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {