aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/x86.c31
2 files changed, 33 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index a215153f1ff..57b4394491e 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -396,6 +396,9 @@ struct kvm_arch {
396 unsigned long irq_sources_bitmap; 396 unsigned long irq_sources_bitmap;
397 s64 kvmclock_offset; 397 s64 kvmclock_offset;
398 spinlock_t tsc_write_lock; 398 spinlock_t tsc_write_lock;
399 u64 last_tsc_nsec;
400 u64 last_tsc_offset;
401 u64 last_tsc_write;
399 402
400 struct kvm_xen_hvm_config xen_hvm_config; 403 struct kvm_xen_hvm_config xen_hvm_config;
401 404
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 886132b6ef1..e7da14c317e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -898,11 +898,40 @@ static DEFINE_PER_CPU(unsigned long, cpu_tsc_khz);
898void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data) 898void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
899{ 899{
900 struct kvm *kvm = vcpu->kvm; 900 struct kvm *kvm = vcpu->kvm;
901 u64 offset; 901 u64 offset, ns, elapsed;
902 unsigned long flags; 902 unsigned long flags;
903 struct timespec ts;
903 904
904 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 905 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
905 offset = data - native_read_tsc(); 906 offset = data - native_read_tsc();
907 ktime_get_ts(&ts);
908 monotonic_to_bootbased(&ts);
909 ns = timespec_to_ns(&ts);
910 elapsed = ns - kvm->arch.last_tsc_nsec;
911
912 /*
913 * Special case: identical write to TSC within 5 seconds of
914 * another CPU is interpreted as an attempt to synchronize
915 * (the 5 seconds is to accomodate host load / swapping).
916 *
917 * In that case, for a reliable TSC, we can match TSC offsets,
918 * or make a best guest using kernel_ns value.
919 */
920 if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) {
921 if (!check_tsc_unstable()) {
922 offset = kvm->arch.last_tsc_offset;
923 pr_debug("kvm: matched tsc offset for %llu\n", data);
924 } else {
925 u64 tsc_delta = elapsed * __get_cpu_var(cpu_tsc_khz);
926 tsc_delta = tsc_delta / USEC_PER_SEC;
927 offset += tsc_delta;
928 pr_debug("kvm: adjusted tsc offset by %llu\n", tsc_delta);
929 }
930 ns = kvm->arch.last_tsc_nsec;
931 }
932 kvm->arch.last_tsc_nsec = ns;
933 kvm->arch.last_tsc_write = data;
934 kvm->arch.last_tsc_offset = offset;
906 kvm_x86_ops->write_tsc_offset(vcpu, offset); 935 kvm_x86_ops->write_tsc_offset(vcpu, offset);
907 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 936 spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
908 937