aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/x86.c44
2 files changed, 30 insertions, 15 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ddebbe01fff9..8a34fca6c572 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -513,6 +513,7 @@ struct kvm_arch {
513 u64 last_tsc_nsec; 513 u64 last_tsc_nsec;
514 u64 last_tsc_offset; 514 u64 last_tsc_offset;
515 u64 last_tsc_write; 515 u64 last_tsc_write;
516 u32 last_tsc_khz;
516 517
517 struct kvm_xen_hvm_config xen_hvm_config; 518 struct kvm_xen_hvm_config xen_hvm_config;
518 519
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 41bb90acb238..4390f42b371f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1025,33 +1025,46 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1025 struct kvm *kvm = vcpu->kvm; 1025 struct kvm *kvm = vcpu->kvm;
1026 u64 offset, ns, elapsed; 1026 u64 offset, ns, elapsed;
1027 unsigned long flags; 1027 unsigned long flags;
1028 s64 sdiff; 1028 s64 nsdiff;
1029 1029
1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1032 ns = get_kernel_ns(); 1032 ns = get_kernel_ns();
1033 elapsed = ns - kvm->arch.last_tsc_nsec; 1033 elapsed = ns - kvm->arch.last_tsc_nsec;
1034 sdiff = data - kvm->arch.last_tsc_write; 1034
1035 if (sdiff < 0) 1035 /* n.b - signed multiplication and division required */
1036 sdiff = -sdiff; 1036 nsdiff = data - kvm->arch.last_tsc_write;
1037#ifdef CONFIG_X86_64
1038 nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1039#else
1040 /* do_div() only does unsigned */
1041 asm("idivl %2; xor %%edx, %%edx"
1042 : "=A"(nsdiff)
1043 : "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1044#endif
1045 nsdiff -= elapsed;
1046 if (nsdiff < 0)
1047 nsdiff = -nsdiff;
1037 1048
1038 /* 1049 /*
1039 * Special case: close write to TSC within 5 seconds of 1050 * Special case: TSC write with a small delta (1 second) of virtual
1040 * another CPU is interpreted as an attempt to synchronize 1051 * cycle time against real time is interpreted as an attempt to
1041 * The 5 seconds is to accommodate host load / swapping as 1052 * synchronize the CPU.
1042 * well as any reset of TSC during the boot process. 1053 *
1043 * 1054 * For a reliable TSC, we can match TSC offsets, and for an unstable
1044 * In that case, for a reliable TSC, we can match TSC offsets, 1055 * TSC, we add elapsed time in this computation. We could let the
1045 * or make a best guest using elapsed value. 1056 * compensation code attempt to catch up if we fall behind, but
1046 */ 1057 * it's better to try to match offsets from the beginning.
1047 if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) && 1058 */
1048 elapsed < 5ULL * NSEC_PER_SEC) { 1059 if (nsdiff < NSEC_PER_SEC &&
1060 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1049 if (!check_tsc_unstable()) { 1061 if (!check_tsc_unstable()) {
1050 offset = kvm->arch.last_tsc_offset; 1062 offset = kvm->arch.last_tsc_offset;
1051 pr_debug("kvm: matched tsc offset for %llu\n", data); 1063 pr_debug("kvm: matched tsc offset for %llu\n", data);
1052 } else { 1064 } else {
1053 u64 delta = nsec_to_cycles(vcpu, elapsed); 1065 u64 delta = nsec_to_cycles(vcpu, elapsed);
1054 offset += delta; 1066 data += delta;
1067 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1055 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1068 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1056 } 1069 }
1057 ns = kvm->arch.last_tsc_nsec; 1070 ns = kvm->arch.last_tsc_nsec;
@@ -1059,6 +1072,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1059 kvm->arch.last_tsc_nsec = ns; 1072 kvm->arch.last_tsc_nsec = ns;
1060 kvm->arch.last_tsc_write = data; 1073 kvm->arch.last_tsc_write = data;
1061 kvm->arch.last_tsc_offset = offset; 1074 kvm->arch.last_tsc_offset = offset;
1075 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1062 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1076 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1063 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1077 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1064 1078