aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2011-03-25 04:44:50 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:57:05 -0400
commit857e40999e35906baa367a79137019912cfb5434 (patch)
treeeacae3e4ccffd38b3e6a55b9bc6afdc7ae2c5e9a /arch/x86/kvm/x86.c
parent4051b18801f5b47bb0369feefdc80e57819d0ddf (diff)
KVM: X86: Delegate tsc-offset calculation to architecture code
With TSC scaling in SVM the tsc-offset needs to be calculated differently. This patch propagates this calculation into the architecture specific modules so that this complexity can be handled there. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index fcce29b7b6f..579ce34e790 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -977,7 +977,7 @@ static u64 vcpu_tsc_khz(struct kvm_vcpu *vcpu)
977 return __this_cpu_read(cpu_tsc_khz); 977 return __this_cpu_read(cpu_tsc_khz);
978} 978}
979 979
980static inline u64 nsec_to_cycles(u64 nsec) 980static inline u64 nsec_to_cycles(struct kvm_vcpu *vcpu, u64 nsec)
981{ 981{
982 u64 ret; 982 u64 ret;
983 983
@@ -985,7 +985,7 @@ static inline u64 nsec_to_cycles(u64 nsec)
985 if (kvm_tsc_changes_freq()) 985 if (kvm_tsc_changes_freq())
986 printk_once(KERN_WARNING 986 printk_once(KERN_WARNING
987 "kvm: unreliable cycle conversion on adjustable rate TSC\n"); 987 "kvm: unreliable cycle conversion on adjustable rate TSC\n");
988 ret = nsec * __this_cpu_read(cpu_tsc_khz); 988 ret = nsec * vcpu_tsc_khz(vcpu);
989 do_div(ret, USEC_PER_SEC); 989 do_div(ret, USEC_PER_SEC);
990 return ret; 990 return ret;
991} 991}
@@ -1015,7 +1015,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1015 s64 sdiff; 1015 s64 sdiff;
1016 1016
1017 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1017 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1018 offset = data - native_read_tsc(); 1018 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1019 ns = get_kernel_ns(); 1019 ns = get_kernel_ns();
1020 elapsed = ns - kvm->arch.last_tsc_nsec; 1020 elapsed = ns - kvm->arch.last_tsc_nsec;
1021 sdiff = data - kvm->arch.last_tsc_write; 1021 sdiff = data - kvm->arch.last_tsc_write;
@@ -1031,13 +1031,13 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1031 * In that case, for a reliable TSC, we can match TSC offsets, 1031 * In that case, for a reliable TSC, we can match TSC offsets,
1032 * or make a best guest using elapsed value. 1032 * or make a best guest using elapsed value.
1033 */ 1033 */
1034 if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) && 1034 if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) &&
1035 elapsed < 5ULL * NSEC_PER_SEC) { 1035 elapsed < 5ULL * NSEC_PER_SEC) {
1036 if (!check_tsc_unstable()) { 1036 if (!check_tsc_unstable()) {
1037 offset = kvm->arch.last_tsc_offset; 1037 offset = kvm->arch.last_tsc_offset;
1038 pr_debug("kvm: matched tsc offset for %llu\n", data); 1038 pr_debug("kvm: matched tsc offset for %llu\n", data);
1039 } else { 1039 } else {
1040 u64 delta = nsec_to_cycles(elapsed); 1040 u64 delta = nsec_to_cycles(vcpu, elapsed);
1041 offset += delta; 1041 offset += delta;
1042 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1042 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1043 } 1043 }