aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-08-20 04:07:20 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:22 -0400
commitf38e098ff3a315bb74abbb4a35cba11bbea8e2fa (patch)
tree22b605d3984c20456cfdc7c4fe22883450da2141 /arch/x86/kvm/x86.c
parent99e3e30aee1a326a98bf3a5f47b8622219c685f3 (diff)
KVM: x86: TSC reset compensation
Attempt to synchronize TSCs which are reset to the same value. In the case of a reliable hardware TSC, we can just re-use the same offset, but on non-reliable hardware, we can get closer by adjusting the offset to match the elapsed time. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c31
1 files changed, 30 insertions, 1 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 886132b6ef14..e7da14c317e6 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -898,11 +898,40 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
898void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) 898void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
899{ 899{
900 struct kvm *kvm = vcpu->kvm; 900 struct kvm *kvm = vcpu->kvm;
901 u64 offset; 901 u64 offset, ns, elapsed;
902 unsigned long flags; 902 unsigned long flags;
903 struct timespec ts;
903 904
904 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 905 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
905 offset = data - native_read_tsc(); 906 offset = data - native_read_tsc();
907 ktime_get_ts(&ts);
908 monotonic_to_bootbased(&ts);
909 ns = timespec_to_ns(&ts);
910 elapsed = ns - kvm->arch.last_tsc_nsec;
911
912 /*
913 * Special case: identical write to TSC within 5 seconds of
914 * another CPU is interpreted as an attempt to synchronize
915 * (the 5 seconds is to accomodate host load / swapping).
916 *
917 * In that case, for a reliable TSC, we can match TSC offsets,
918 * or make a best guest using kernel_ns value.
919 */
920 if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
921 if (!check_tsc_unstable()) {
922 offset = kvm->arch.last_tsc_offset;
923 pr_debug("kvm: matched tsc offset for %llu\n", data);
924 } else {
925 u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
926 tsc_delta = tsc_delta / USEC_PER_SEC;
927 offset += tsc_delta;
928 pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
929 }
930 ns = kvm->arch.last_tsc_nsec;
931 }
932 kvm->arch.last_tsc_nsec = ns;
933 kvm->arch.last_tsc_write = data;
934 kvm->arch.last_tsc_offset = offset;
906 kvm_x86_ops->write_tsc_offset(vcpu, offset); 935 kvm_x86_ops->write_tsc_offset(vcpu, offset);
907 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 936 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
908 937