aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-03-25 04:44:50 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:05 -0400
commit857e40999e35906baa367a79137019912cfb5434 (patch)
treeeacae3e4ccffd38b3e6a55b9bc6afdc7ae2c5e9a
parent4051b18801f5b47bb0369feefdc80e57819d0ddf (diff)
KVM: X86: Delegate tsc-offset calculation to architecture code
With TSC scaling in SVM the tsc-offset needs to be calculated differently. This patch propagates this calculation into the architecture specific modules so that this complexity can be handled there. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/svm.c10
-rw-r--r--arch/x86/kvm/vmx.c6
-rw-r--r--arch/x86/kvm/x86.c10
4 files changed, 23 insertions, 5 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f3a7116f802..da0a8ce3a13 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -609,6 +609,8 @@ struct kvm_x86_ops {
609 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz); 609 void (*set_tsc_khz)(struct kvm_vcpu *vcpu, u32 user_tsc_khz);
610 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset); 610 void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
611 611
612 u64 (*compute_tsc_offset)(struct kvm_vcpu *vcpu, u64 target_tsc);
613
612 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2); 614 void (*get_exit_info)(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2);
613 615
614 int (*check_intercept)(struct kvm_vcpu *vcpu, 616 int (*check_intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a39fde4f5fe..8c4549bef4e 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -943,6 +943,15 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
943 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 943 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
944} 944}
945 945
946static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
947{
948 u64 tsc;
949
950 tsc = svm_scale_tsc(vcpu, native_read_tsc());
951
952 return target_tsc - tsc;
953}
954
946static void init_vmcb(struct vcpu_svm *svm) 955static void init_vmcb(struct vcpu_svm *svm)
947{ 956{
948 struct vmcb_control_area *control = &svm->vmcb->control; 957 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -4194,6 +4203,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4194 .set_tsc_khz = svm_set_tsc_khz, 4203 .set_tsc_khz = svm_set_tsc_khz,
4195 .write_tsc_offset = svm_write_tsc_offset, 4204 .write_tsc_offset = svm_write_tsc_offset,
4196 .adjust_tsc_offset = svm_adjust_tsc_offset, 4205 .adjust_tsc_offset = svm_adjust_tsc_offset,
4206 .compute_tsc_offset = svm_compute_tsc_offset,
4197 4207
4198 .set_tdp_cr3 = set_tdp_cr3, 4208 .set_tdp_cr3 = set_tdp_cr3,
4199 4209
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e19c7a5473d..aabe3334d06 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1184,6 +1184,11 @@ static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1184 vmcs_write64(TSC_OFFSET, offset + adjustment); 1184 vmcs_write64(TSC_OFFSET, offset + adjustment);
1185} 1185}
1186 1186
1187static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc)
1188{
1189 return target_tsc - native_read_tsc();
1190}
1191
1187/* 1192/*
1188 * Reads an msr value (of 'msr_index') into 'pdata'. 1193 * Reads an msr value (of 'msr_index') into 'pdata'.
1189 * Returns 0 on success, non-0 otherwise. 1194 * Returns 0 on success, non-0 otherwise.
@@ -4510,6 +4515,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4510 .set_tsc_khz = vmx_set_tsc_khz, 4515 .set_tsc_khz = vmx_set_tsc_khz,
4511 .write_tsc_offset = vmx_write_tsc_offset, 4516 .write_tsc_offset = vmx_write_tsc_offset,
4512 .adjust_tsc_offset = vmx_adjust_tsc_offset, 4517 .adjust_tsc_offset = vmx_adjust_tsc_offset,
4518 .compute_tsc_offset = vmx_compute_tsc_offset,
4513 4519
4514 .set_tdp_cr3 = vmx_set_cr3, 4520 .set_tdp_cr3 = vmx_set_cr3,
4515 4521
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fcce29b7b6f..579ce34e790 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -977,7 +977,7 @@ static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
977 return __this_cpu_read(cpu_tsc_khz); 977 return __this_cpu_read(cpu_tsc_khz);
978} 978}
979 979
980static inline u64 nsec_to_cycles(u64 nsec) 980static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
981{ 981{
982 u64 ret; 982 u64 ret;
983 983
@@ -985,7 +985,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
985 if (kvm_tsc_changes_freq()) 985 if (kvm_tsc_changes_freq())
986 printk_once(KERN_WARNING 986 printk_once(KERN_WARNING
987 "kvm: unreliable cycle conversion on adjustable rate TSC\n"); 987 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
988 ret = nsec * __this_cpu_read(cpu_tsc_khz); 988 ret = nsec * vcpu_tsc_khz(vcpu);
989 do_div(ret, USEC_PER_SEC); 989 do_div(ret, USEC_PER_SEC);
990 return ret; 990 return ret;
991} 991}
@@ -1015,7 +1015,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1015 s64 sdiff; 1015 s64 sdiff;
1016 1016
1017 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1017 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1018 offset = data - native_read_tsc(); 1018 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1019 ns = get_kernel_ns(); 1019 ns = get_kernel_ns();
1020 elapsed = ns - kvm->arch.last_tsc_nsec; 1020 elapsed = ns - kvm->arch.last_tsc_nsec;
1021 sdiff = data - kvm->arch.last_tsc_write; 1021 sdiff = data - kvm->arch.last_tsc_write;
@@ -1031,13 +1031,13 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1031 * In that case, for a reliable TSC, we can match TSC offsets, 1031 * In that case, for a reliable TSC, we can match TSC offsets,
1032 * or make a best guest using elapsed value. 1032 * or make a best guest using elapsed value.
1033 */ 1033 */
1034 if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) && 1034 if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
1035 elapsed < 5ULL * NSEC_PER_SEC) { 1035 elapsed < 5ULL * NSEC_PER_SEC) {
1036 if (!check_tsc_unstable()) { 1036 if (!check_tsc_unstable()) {
1037 offset = kvm->arch.last_tsc_offset; 1037 offset = kvm->arch.last_tsc_offset;
1038 pr_debug("kvm: matched tsc offset for %llu\n", data); 1038 pr_debug("kvm: matched tsc offset for %llu\n", data);
1039 } else { 1039 } else {
1040 u64 delta = nsec_to_cycles(elapsed); 1040 u64 delta = nsec_to_cycles(vcpu, elapsed);
1041 offset += delta; 1041 offset += delta;
1042 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1042 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1043 } 1043 }