aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/cpufeature.h1
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/include/asm/msr-index.h1
-rw-r--r--arch/x86/kvm/cpuid.c2
-rw-r--r--arch/x86/kvm/cpuid.h8
-rw-r--r--arch/x86/kvm/svm.c8
-rw-r--r--arch/x86/kvm/vmx.c9
-rw-r--r--arch/x86/kvm/x86.c22
8 files changed, 53 insertions, 0 deletions
diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
index 8c297aa53eef..602c4764614d 100644
--- a/arch/x86/include/asm/cpufeature.h
+++ b/arch/x86/include/asm/cpufeature.h
@@ -202,6 +202,7 @@
202 202
203/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */ 203/* Intel-defined CPU features, CPUID level 0x00000007:0 (ebx), word 9 */
204#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/ 204#define X86_FEATURE_FSGSBASE (9*32+ 0) /* {RD/WR}{FS/GS}BASE instructions*/
205#define X86_FEATURE_TSC_ADJUST (9*32+ 1) /* TSC adjustment MSR 0x3b */
205#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */ 206#define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
206#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */ 207#define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
207#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */ 208#define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 56c5dca9d78d..dc87b65e9c3a 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -444,6 +444,7 @@ struct kvm_vcpu_arch {
444 s8 virtual_tsc_shift; 444 s8 virtual_tsc_shift;
445 u32 virtual_tsc_mult; 445 u32 virtual_tsc_mult;
446 u32 virtual_tsc_khz; 446 u32 virtual_tsc_khz;
447 s64 ia32_tsc_adjust_msr;
447 448
448 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */ 449 atomic_t nmi_queued; /* unprocessed asynchronous NMIs */
449 unsigned nmi_pending; /* NMI queued after currently running handler */ 450 unsigned nmi_pending; /* NMI queued after currently running handler */
@@ -711,6 +712,7 @@ struct kvm_x86_ops {
711 bool (*has_wbinvd_exit)(void); 712 bool (*has_wbinvd_exit)(void);
712 713
713 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale); 714 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale);
715 u64 (*read_tsc_offset)(struct kvm_vcpu *vcpu);
714 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 716 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
715 717
716 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc); 718 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
index 7f0edceb7563..c2dea36dd7ac 100644
--- a/arch/x86/include/asm/msr-index.h
+++ b/arch/x86/include/asm/msr-index.h
@@ -236,6 +236,7 @@
236#define MSR_IA32_EBL_CR_POWERON 0x0000002a 236#define MSR_IA32_EBL_CR_POWERON 0x0000002a
237#define MSR_EBC_FREQUENCY_ID 0x0000002c 237#define MSR_EBC_FREQUENCY_ID 0x0000002c
238#define MSR_IA32_FEATURE_CONTROL 0x0000003a 238#define MSR_IA32_FEATURE_CONTROL 0x0000003a
239#define MSR_IA32_TSC_ADJUST 0x0000003b
239 240
240#define FEATURE_CONTROL_LOCKED (1<<0) 241#define FEATURE_CONTROL_LOCKED (1<<0)
241#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1) 242#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX (1<<1)
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index ec79e773342e..52f6166ef92c 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -320,6 +320,8 @@ static int do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
320 if (index == 0) { 320 if (index == 0) {
321 entry->ebx &= kvm_supported_word9_x86_features; 321 entry->ebx &= kvm_supported_word9_x86_features;
322 cpuid_mask(&entry->ebx, 9); 322 cpuid_mask(&entry->ebx, 9);
323 // TSC_ADJUST is emulated
324 entry->ebx |= F(TSC_ADJUST);
323 } else 325 } else
324 entry->ebx = 0; 326 entry->ebx = 0;
325 entry->eax = 0; 327 entry->eax = 0;
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index a10e46016851..3a8b50474477 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -28,6 +28,14 @@ static inline bool guest_cpuid_has_xsave(struct kvm_vcpu *vcpu)
28 return best && (best->ecx & bit(X86_FEATURE_XSAVE)); 28 return best && (best->ecx & bit(X86_FEATURE_XSAVE));
29} 29}
30 30
31static inline bool guest_cpuid_has_tsc_adjust(struct kvm_vcpu *vcpu)
32{
33 struct kvm_cpuid_entry2 *best;
34
35 best = kvm_find_cpuid_entry(vcpu, 7, 0);
36 return best && (best->ebx & bit(X86_FEATURE_TSC_ADJUST));
37}
38
31static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu) 39static inline bool guest_cpuid_has_smep(struct kvm_vcpu *vcpu)
32{ 40{
33 struct kvm_cpuid_entry2 *best; 41 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index fc22e58d23b7..dcb79527e7aa 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1009,6 +1009,13 @@ static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1009 svm->tsc_ratio = ratio; 1009 svm->tsc_ratio = ratio;
1010} 1010}
1011 1011
1012static u64 svm_read_tsc_offset(struct kvm_vcpu *vcpu)
1013{
1014 struct vcpu_svm *svm = to_svm(vcpu);
1015
1016 return svm->vmcb->control.tsc_offset;
1017}
1018
1012static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) 1019static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1013{ 1020{
1014 struct vcpu_svm *svm = to_svm(vcpu); 1021 struct vcpu_svm *svm = to_svm(vcpu);
@@ -4304,6 +4311,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4304 .has_wbinvd_exit = svm_has_wbinvd_exit, 4311 .has_wbinvd_exit = svm_has_wbinvd_exit,
4305 4312
4306 .set_tsc_khz = svm_set_tsc_khz, 4313 .set_tsc_khz = svm_set_tsc_khz,
4314 .read_tsc_offset = svm_read_tsc_offset,
4307 .write_tsc_offset = svm_write_tsc_offset, 4315 .write_tsc_offset = svm_write_tsc_offset,
4308 .adjust_tsc_offset = svm_adjust_tsc_offset, 4316 .adjust_tsc_offset = svm_adjust_tsc_offset,
4309 .compute_tsc_offset = svm_compute_tsc_offset, 4317 .compute_tsc_offset = svm_compute_tsc_offset,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 45ffa32352f1..2fd2046dc94c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1884,6 +1884,11 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool scale)
1884 WARN(1, "user requested TSC rate below hardware speed\n"); 1884 WARN(1, "user requested TSC rate below hardware speed\n");
1885} 1885}
1886 1886
1887static u64 vmx_read_tsc_offset(struct kvm_vcpu *vcpu)
1888{
1889 return vmcs_read64(TSC_OFFSET);
1890}
1891
1887/* 1892/*
1888 * writes 'offset' into guest's timestamp counter offset register 1893 * writes 'offset' into guest's timestamp counter offset register
1889 */ 1894 */
@@ -2266,6 +2271,9 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2266 } 2271 }
2267 ret = kvm_set_msr_common(vcpu, msr_info); 2272 ret = kvm_set_msr_common(vcpu, msr_info);
2268 break; 2273 break;
2274 case MSR_IA32_TSC_ADJUST:
2275 ret = kvm_set_msr_common(vcpu, msr_info);
2276 break;
2269 case MSR_TSC_AUX: 2277 case MSR_TSC_AUX:
2270 if (!vmx->rdtscp_enabled) 2278 if (!vmx->rdtscp_enabled)
2271 return 1; 2279 return 1;
@@ -7345,6 +7353,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
7345 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 7353 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
7346 7354
7347 .set_tsc_khz = vmx_set_tsc_khz, 7355 .set_tsc_khz = vmx_set_tsc_khz,
7356 .read_tsc_offset = vmx_read_tsc_offset,
7348 .write_tsc_offset = vmx_write_tsc_offset, 7357 .write_tsc_offset = vmx_write_tsc_offset,
7349 .adjust_tsc_offset = vmx_adjust_tsc_offset, 7358 .adjust_tsc_offset = vmx_adjust_tsc_offset,
7350 .compute_tsc_offset = vmx_compute_tsc_offset, 7359 .compute_tsc_offset = vmx_compute_tsc_offset,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 95f66136f2d5..b0b8abe688ce 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -831,6 +831,7 @@ static u32 msrs_to_save[] = {
831static unsigned num_msrs_to_save; 831static unsigned num_msrs_to_save;
832 832
833static const u32 emulated_msrs[] = { 833static const u32 emulated_msrs[] = {
834 MSR_IA32_TSC_ADJUST,
834 MSR_IA32_TSCDEADLINE, 835 MSR_IA32_TSCDEADLINE,
835 MSR_IA32_MISC_ENABLE, 836 MSR_IA32_MISC_ENABLE,
836 MSR_IA32_MCG_STATUS, 837 MSR_IA32_MCG_STATUS,
@@ -1135,6 +1136,12 @@ void kvm_track_tsc_matching(struct kvm_vcpu *vcpu)
1135#endif 1136#endif
1136} 1137}
1137 1138
1139static void update_ia32_tsc_adjust_msr(struct kvm_vcpu *vcpu, s64 offset)
1140{
1141 u64 curr_offset = kvm_x86_ops->read_tsc_offset(vcpu);
1142 vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
1143}
1144
1138void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr) 1145void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1139{ 1146{
1140 struct kvm *kvm = vcpu->kvm; 1147 struct kvm *kvm = vcpu->kvm;
@@ -1222,6 +1229,8 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
1222 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec; 1229 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1223 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write; 1230 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1224 1231
1232 if (guest_cpuid_has_tsc_adjust(vcpu) && !msr->host_initiated)
1233 update_ia32_tsc_adjust_msr(vcpu, offset);
1225 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1234 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1226 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1235 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1227 1236
@@ -1918,6 +1927,15 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
1918 case MSR_IA32_TSCDEADLINE: 1927 case MSR_IA32_TSCDEADLINE:
1919 kvm_set_lapic_tscdeadline_msr(vcpu, data); 1928 kvm_set_lapic_tscdeadline_msr(vcpu, data);
1920 break; 1929 break;
1930 case MSR_IA32_TSC_ADJUST:
1931 if (guest_cpuid_has_tsc_adjust(vcpu)) {
1932 if (!msr_info->host_initiated) {
1933 u64 adj = data - vcpu->arch.ia32_tsc_adjust_msr;
1934 kvm_x86_ops->adjust_tsc_offset(vcpu, adj, true);
1935 }
1936 vcpu->arch.ia32_tsc_adjust_msr = data;
1937 }
1938 break;
1921 case MSR_IA32_MISC_ENABLE: 1939 case MSR_IA32_MISC_ENABLE:
1922 vcpu->arch.ia32_misc_enable_msr = data; 1940 vcpu->arch.ia32_misc_enable_msr = data;
1923 break; 1941 break;
@@ -2277,6 +2295,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 *pdata)
2277 case MSR_IA32_TSCDEADLINE: 2295 case MSR_IA32_TSCDEADLINE:
2278 data = kvm_get_lapic_tscdeadline_msr(vcpu); 2296 data = kvm_get_lapic_tscdeadline_msr(vcpu);
2279 break; 2297 break;
2298 case MSR_IA32_TSC_ADJUST:
2299 data = (u64)vcpu->arch.ia32_tsc_adjust_msr;
2300 break;
2280 case MSR_IA32_MISC_ENABLE: 2301 case MSR_IA32_MISC_ENABLE:
2281 data = vcpu->arch.ia32_misc_enable_msr; 2302 data = vcpu->arch.ia32_misc_enable_msr;
2282 break; 2303 break;
@@ -6607,6 +6628,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
6607 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) 6628 if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL))
6608 goto fail_free_mce_banks; 6629 goto fail_free_mce_banks;
6609 6630
6631 vcpu->arch.ia32_tsc_adjust_msr = 0x0;
6610 kvm_async_pf_hash_reset(vcpu); 6632 kvm_async_pf_hash_reset(vcpu);
6611 kvm_pmu_init(vcpu); 6633 kvm_pmu_init(vcpu);
6612 6634