diff options
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/kvm_timer.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 142 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.h | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 21 |
5 files changed, 140 insertions, 30 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index ab62711ccb78..b4973f4dab98 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -674,6 +674,8 @@ u8 kvm_get_guest_memory_type(struct kvm_vcpu *vcpu, gfn_t gfn); | |||
674 | 674 | ||
675 | extern bool tdp_enabled; | 675 | extern bool tdp_enabled; |
676 | 676 | ||
677 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu); | ||
678 | |||
677 | /* control of guest tsc rate supported? */ | 679 | /* control of guest tsc rate supported? */ |
678 | extern bool kvm_has_tsc_control; | 680 | extern bool kvm_has_tsc_control; |
679 | /* minimum supported tsc_khz for guests */ | 681 | /* minimum supported tsc_khz for guests */ |
diff --git a/arch/x86/kvm/kvm_timer.h b/arch/x86/kvm/kvm_timer.h index 64bc6ea78d90..497dbaa366d4 100644 --- a/arch/x86/kvm/kvm_timer.h +++ b/arch/x86/kvm/kvm_timer.h | |||
@@ -2,6 +2,8 @@ | |||
2 | struct kvm_timer { | 2 | struct kvm_timer { |
3 | struct hrtimer timer; | 3 | struct hrtimer timer; |
4 | s64 period; /* unit: ns */ | 4 | s64 period; /* unit: ns */ |
5 | u32 timer_mode_mask; | ||
6 | u64 tscdeadline; | ||
5 | atomic_t pending; /* accumulated triggered timers */ | 7 | atomic_t pending; /* accumulated triggered timers */ |
6 | bool reinject; | 8 | bool reinject; |
7 | struct kvm_timer_ops *t_ops; | 9 | struct kvm_timer_ops *t_ops; |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 2fb20caae5d8..54abb40199d6 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -138,9 +138,23 @@ static inline int apic_lvt_vector(struct kvm_lapic *apic, int lvt_type) | |||
138 | return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; | 138 | return apic_get_reg(apic, lvt_type) & APIC_VECTOR_MASK; |
139 | } | 139 | } |
140 | 140 | ||
141 | static inline int apic_lvtt_oneshot(struct kvm_lapic *apic) | ||
142 | { | ||
143 | return ((apic_get_reg(apic, APIC_LVTT) & | ||
144 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_ONESHOT); | ||
145 | } | ||
146 | |||
141 | static inline int apic_lvtt_period(struct kvm_lapic *apic) | 147 | static inline int apic_lvtt_period(struct kvm_lapic *apic) |
142 | { | 148 | { |
143 | return apic_get_reg(apic, APIC_LVTT) & APIC_LVT_TIMER_PERIODIC; | 149 | return ((apic_get_reg(apic, APIC_LVTT) & |
150 | apic->lapic_timer.timer_mode_mask) == APIC_LVT_TIMER_PERIODIC); | ||
151 | } | ||
152 | |||
153 | static inline int apic_lvtt_tscdeadline(struct kvm_lapic *apic) | ||
154 | { | ||
155 | return ((apic_get_reg(apic, APIC_LVTT) & | ||
156 | apic->lapic_timer.timer_mode_mask) == | ||
157 | APIC_LVT_TIMER_TSCDEADLINE); | ||
144 | } | 158 | } |
145 | 159 | ||
146 | static inline int apic_lvt_nmi_mode(u32 lvt_val) | 160 | static inline int apic_lvt_nmi_mode(u32 lvt_val) |
@@ -169,7 +183,7 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic) | |||
169 | } | 183 | } |
170 | 184 | ||
171 | static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { | 185 | static unsigned int apic_lvt_mask[APIC_LVT_NUM] = { |
172 | LVT_MASK | APIC_LVT_TIMER_PERIODIC, /* LVTT */ | 186 | LVT_MASK , /* part LVTT mask, timer mode mask added at runtime */ |
173 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ | 187 | LVT_MASK | APIC_MODE_MASK, /* LVTTHMR */ |
174 | LVT_MASK | APIC_MODE_MASK, /* LVTPC */ | 188 | LVT_MASK | APIC_MODE_MASK, /* LVTPC */ |
175 | LINT_MASK, LINT_MASK, /* LVT0-1 */ | 189 | LINT_MASK, LINT_MASK, /* LVT0-1 */ |
@@ -572,6 +586,9 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) | |||
572 | break; | 586 | break; |
573 | 587 | ||
574 | case APIC_TMCCT: /* Timer CCR */ | 588 | case APIC_TMCCT: /* Timer CCR */ |
589 | if (apic_lvtt_tscdeadline(apic)) | ||
590 | return 0; | ||
591 | |||
575 | val = apic_get_tmcct(apic); | 592 | val = apic_get_tmcct(apic); |
576 | break; | 593 | break; |
577 | 594 | ||
@@ -666,37 +683,40 @@ static void update_divide_count(struct kvm_lapic *apic) | |||
666 | 683 | ||
667 | static void start_apic_timer(struct kvm_lapic *apic) | 684 | static void start_apic_timer(struct kvm_lapic *apic) |
668 | { | 685 | { |
669 | ktime_t now = apic->lapic_timer.timer.base->get_time(); | 686 | ktime_t now; |
670 | |||
671 | apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) * | ||
672 | APIC_BUS_CYCLE_NS * apic->divide_count; | ||
673 | atomic_set(&apic->lapic_timer.pending, 0); | 687 | atomic_set(&apic->lapic_timer.pending, 0); |
674 | 688 | ||
675 | if (!apic->lapic_timer.period) | 689 | if (apic_lvtt_period(apic) || apic_lvtt_oneshot(apic)) { |
676 | return; | 690 | /* lapic timer in oneshot or peroidic mode */ |
677 | /* | 691 | now = apic->lapic_timer.timer.base->get_time(); |
678 | * Do not allow the guest to program periodic timers with small | 692 | apic->lapic_timer.period = (u64)apic_get_reg(apic, APIC_TMICT) |
679 | * interval, since the hrtimers are not throttled by the host | 693 | * APIC_BUS_CYCLE_NS * apic->divide_count; |
680 | * scheduler. | 694 | |
681 | */ | 695 | if (!apic->lapic_timer.period) |
682 | if (apic_lvtt_period(apic)) { | 696 | return; |
683 | s64 min_period = min_timer_period_us * 1000LL; | 697 | /* |
684 | 698 | * Do not allow the guest to program periodic timers with small | |
685 | if (apic->lapic_timer.period < min_period) { | 699 | * interval, since the hrtimers are not throttled by the host |
686 | pr_info_ratelimited( | 700 | * scheduler. |
687 | "kvm: vcpu %i: requested %lld ns " | 701 | */ |
688 | "lapic timer period limited to %lld ns\n", | 702 | if (apic_lvtt_period(apic)) { |
689 | apic->vcpu->vcpu_id, apic->lapic_timer.period, | 703 | s64 min_period = min_timer_period_us * 1000LL; |
690 | min_period); | 704 | |
691 | apic->lapic_timer.period = min_period; | 705 | if (apic->lapic_timer.period < min_period) { |
706 | pr_info_ratelimited( | ||
707 | "kvm: vcpu %i: requested %lld ns " | ||
708 | "lapic timer period limited to %lld ns\n", | ||
709 | apic->vcpu->vcpu_id, | ||
710 | apic->lapic_timer.period, min_period); | ||
711 | apic->lapic_timer.period = min_period; | ||
712 | } | ||
692 | } | 713 | } |
693 | } | ||
694 | 714 | ||
695 | hrtimer_start(&apic->lapic_timer.timer, | 715 | hrtimer_start(&apic->lapic_timer.timer, |
696 | ktime_add_ns(now, apic->lapic_timer.period), | 716 | ktime_add_ns(now, apic->lapic_timer.period), |
697 | HRTIMER_MODE_ABS); | 717 | HRTIMER_MODE_ABS); |
698 | 718 | ||
699 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" | 719 | apic_debug("%s: bus cycle is %" PRId64 "ns, now 0x%016" |
700 | PRIx64 ", " | 720 | PRIx64 ", " |
701 | "timer initial count 0x%x, period %lldns, " | 721 | "timer initial count 0x%x, period %lldns, " |
702 | "expire @ 0x%016" PRIx64 ".\n", __func__, | 722 | "expire @ 0x%016" PRIx64 ".\n", __func__, |
@@ -705,6 +725,30 @@ static void start_apic_timer(struct kvm_lapic *apic) | |||
705 | apic->lapic_timer.period, | 725 | apic->lapic_timer.period, |
706 | ktime_to_ns(ktime_add_ns(now, | 726 | ktime_to_ns(ktime_add_ns(now, |
707 | apic->lapic_timer.period))); | 727 | apic->lapic_timer.period))); |
728 | } else if (apic_lvtt_tscdeadline(apic)) { | ||
729 | /* lapic timer in tsc deadline mode */ | ||
730 | u64 guest_tsc, tscdeadline = apic->lapic_timer.tscdeadline; | ||
731 | u64 ns = 0; | ||
732 | struct kvm_vcpu *vcpu = apic->vcpu; | ||
733 | unsigned long this_tsc_khz = vcpu_tsc_khz(vcpu); | ||
734 | unsigned long flags; | ||
735 | |||
736 | if (unlikely(!tscdeadline || !this_tsc_khz)) | ||
737 | return; | ||
738 | |||
739 | local_irq_save(flags); | ||
740 | |||
741 | now = apic->lapic_timer.timer.base->get_time(); | ||
742 | guest_tsc = kvm_x86_ops->read_l1_tsc(vcpu); | ||
743 | if (likely(tscdeadline > guest_tsc)) { | ||
744 | ns = (tscdeadline - guest_tsc) * 1000000ULL; | ||
745 | do_div(ns, this_tsc_khz); | ||
746 | } | ||
747 | hrtimer_start(&apic->lapic_timer.timer, | ||
748 | ktime_add_ns(now, ns), HRTIMER_MODE_ABS); | ||
749 | |||
750 | local_irq_restore(flags); | ||
751 | } | ||
708 | } | 752 | } |
709 | 753 | ||
710 | static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) | 754 | static void apic_manage_nmi_watchdog(struct kvm_lapic *apic, u32 lvt0_val) |
@@ -792,7 +836,6 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) | |||
792 | 836 | ||
793 | case APIC_LVT0: | 837 | case APIC_LVT0: |
794 | apic_manage_nmi_watchdog(apic, val); | 838 | apic_manage_nmi_watchdog(apic, val); |
795 | case APIC_LVTT: | ||
796 | case APIC_LVTTHMR: | 839 | case APIC_LVTTHMR: |
797 | case APIC_LVTPC: | 840 | case APIC_LVTPC: |
798 | case APIC_LVT1: | 841 | case APIC_LVT1: |
@@ -806,7 +849,22 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val) | |||
806 | 849 | ||
807 | break; | 850 | break; |
808 | 851 | ||
852 | case APIC_LVTT: | ||
853 | if ((apic_get_reg(apic, APIC_LVTT) & | ||
854 | apic->lapic_timer.timer_mode_mask) != | ||
855 | (val & apic->lapic_timer.timer_mode_mask)) | ||
856 | hrtimer_cancel(&apic->lapic_timer.timer); | ||
857 | |||
858 | if (!apic_sw_enabled(apic)) | ||
859 | val |= APIC_LVT_MASKED; | ||
860 | val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask); | ||
861 | apic_set_reg(apic, APIC_LVTT, val); | ||
862 | break; | ||
863 | |||
809 | case APIC_TMICT: | 864 | case APIC_TMICT: |
865 | if (apic_lvtt_tscdeadline(apic)) | ||
866 | break; | ||
867 | |||
810 | hrtimer_cancel(&apic->lapic_timer.timer); | 868 | hrtimer_cancel(&apic->lapic_timer.timer); |
811 | apic_set_reg(apic, APIC_TMICT, val); | 869 | apic_set_reg(apic, APIC_TMICT, val); |
812 | start_apic_timer(apic); | 870 | start_apic_timer(apic); |
@@ -902,6 +960,32 @@ void kvm_free_lapic(struct kvm_vcpu *vcpu) | |||
902 | *---------------------------------------------------------------------- | 960 | *---------------------------------------------------------------------- |
903 | */ | 961 | */ |
904 | 962 | ||
963 | u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu) | ||
964 | { | ||
965 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
966 | if (!apic) | ||
967 | return 0; | ||
968 | |||
969 | if (apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) | ||
970 | return 0; | ||
971 | |||
972 | return apic->lapic_timer.tscdeadline; | ||
973 | } | ||
974 | |||
975 | void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data) | ||
976 | { | ||
977 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
978 | if (!apic) | ||
979 | return; | ||
980 | |||
981 | if (apic_lvtt_oneshot(apic) || apic_lvtt_period(apic)) | ||
982 | return; | ||
983 | |||
984 | hrtimer_cancel(&apic->lapic_timer.timer); | ||
985 | apic->lapic_timer.tscdeadline = data; | ||
986 | start_apic_timer(apic); | ||
987 | } | ||
988 | |||
905 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) | 989 | void kvm_lapic_set_tpr(struct kvm_vcpu *vcpu, unsigned long cr8) |
906 | { | 990 | { |
907 | struct kvm_lapic *apic = vcpu->arch.apic; | 991 | struct kvm_lapic *apic = vcpu->arch.apic; |
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h index 82872432d475..138e8cc6fea6 100644 --- a/arch/x86/kvm/lapic.h +++ b/arch/x86/kvm/lapic.h | |||
@@ -42,6 +42,9 @@ int kvm_lapic_enabled(struct kvm_vcpu *vcpu); | |||
42 | bool kvm_apic_present(struct kvm_vcpu *vcpu); | 42 | bool kvm_apic_present(struct kvm_vcpu *vcpu); |
43 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); | 43 | int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); |
44 | 44 | ||
45 | u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); | ||
46 | void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); | ||
47 | |||
45 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); | 48 | void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); |
46 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); | 49 | void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); |
47 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); | 50 | void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d51e40733fcb..cf269096eadf 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -600,6 +600,8 @@ static bool guest_cpuid_has_fsgsbase(struct kvm_vcpu *vcpu) | |||
600 | static void update_cpuid(struct kvm_vcpu *vcpu) | 600 | static void update_cpuid(struct kvm_vcpu *vcpu) |
601 | { | 601 | { |
602 | struct kvm_cpuid_entry2 *best; | 602 | struct kvm_cpuid_entry2 *best; |
603 | struct kvm_lapic *apic = vcpu->arch.apic; | ||
604 | u32 timer_mode_mask; | ||
603 | 605 | ||
604 | best = kvm_find_cpuid_entry(vcpu, 1, 0); | 606 | best = kvm_find_cpuid_entry(vcpu, 1, 0); |
605 | if (!best) | 607 | if (!best) |
@@ -611,6 +613,16 @@ static void update_cpuid(struct kvm_vcpu *vcpu) | |||
611 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) | 613 | if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE)) |
612 | best->ecx |= bit(X86_FEATURE_OSXSAVE); | 614 | best->ecx |= bit(X86_FEATURE_OSXSAVE); |
613 | } | 615 | } |
616 | |||
617 | if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && | ||
618 | best->function == 0x1) { | ||
619 | best->ecx |= bit(X86_FEATURE_TSC_DEADLINE_TIMER); | ||
620 | timer_mode_mask = 3 << 17; | ||
621 | } else | ||
622 | timer_mode_mask = 1 << 17; | ||
623 | |||
624 | if (apic) | ||
625 | apic->lapic_timer.timer_mode_mask = timer_mode_mask; | ||
614 | } | 626 | } |
615 | 627 | ||
616 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) | 628 | int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) |
@@ -826,6 +838,7 @@ static u32 msrs_to_save[] = { | |||
826 | static unsigned num_msrs_to_save; | 838 | static unsigned num_msrs_to_save; |
827 | 839 | ||
828 | static u32 emulated_msrs[] = { | 840 | static u32 emulated_msrs[] = { |
841 | MSR_IA32_TSCDEADLINE, | ||
829 | MSR_IA32_MISC_ENABLE, | 842 | MSR_IA32_MISC_ENABLE, |
830 | MSR_IA32_MCG_STATUS, | 843 | MSR_IA32_MCG_STATUS, |
831 | MSR_IA32_MCG_CTL, | 844 | MSR_IA32_MCG_CTL, |
@@ -1001,7 +1014,7 @@ static inline int kvm_tsc_changes_freq(void) | |||
1001 | return ret; | 1014 | return ret; |
1002 | } | 1015 | } |
1003 | 1016 | ||
1004 | static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu) | 1017 | u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu) |
1005 | { | 1018 | { |
1006 | if (vcpu->arch.virtual_tsc_khz) | 1019 | if (vcpu->arch.virtual_tsc_khz) |
1007 | return vcpu->arch.virtual_tsc_khz; | 1020 | return vcpu->arch.virtual_tsc_khz; |
@@ -1565,6 +1578,9 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) | |||
1565 | break; | 1578 | break; |
1566 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: | 1579 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: |
1567 | return kvm_x2apic_msr_write(vcpu, msr, data); | 1580 | return kvm_x2apic_msr_write(vcpu, msr, data); |
1581 | case MSR_IA32_TSCDEADLINE: | ||
1582 | kvm_set_lapic_tscdeadline_msr(vcpu, data); | ||
1583 | break; | ||
1568 | case MSR_IA32_MISC_ENABLE: | 1584 | case MSR_IA32_MISC_ENABLE: |
1569 | vcpu->arch.ia32_misc_enable_msr = data; | 1585 | vcpu->arch.ia32_misc_enable_msr = data; |
1570 | break; | 1586 | break; |
@@ -1894,6 +1910,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata) | |||
1894 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: | 1910 | case APIC_BASE_MSR ... APIC_BASE_MSR + 0x3ff: |
1895 | return kvm_x2apic_msr_read(vcpu, msr, pdata); | 1911 | return kvm_x2apic_msr_read(vcpu, msr, pdata); |
1896 | break; | 1912 | break; |
1913 | case MSR_IA32_TSCDEADLINE: | ||
1914 | data = kvm_get_lapic_tscdeadline_msr(vcpu); | ||
1915 | break; | ||
1897 | case MSR_IA32_MISC_ENABLE: | 1916 | case MSR_IA32_MISC_ENABLE: |
1898 | data = vcpu->arch.ia32_misc_enable_msr; | 1917 | data = vcpu->arch.ia32_misc_enable_msr; |
1899 | break; | 1918 | break; |