aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2012-03-08 16:46:57 -0500
committerAvi Kivity <avi@redhat.com>2012-03-20 06:40:36 -0400
commit02626b6af5d2bc62db3bb85fc2891b2725535d44 (patch)
tree8f309c29c021118b72cd7f1323420120a27a676a /arch/x86/kvm
parentb74f05d61b73af584d0c39121980171389ecfaaa (diff)
KVM: x86: fix kvm_write_tsc() TSC matching thinko
kvm_write_tsc() converts from guest TSC to microseconds, not nanoseconds as intended. The result is that the window for matching is 1000 seconds, not 1 second. Microsecond precision is enough for checking whether the TSC write delta is within the heuristic values, so use it instead of nanoseconds. Noted by Avi Kivity. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 32096cf6c6c..7287812eeb7 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1025,7 +1025,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1025 struct kvm *kvm = vcpu->kvm; 1025 struct kvm *kvm = vcpu->kvm;
1026 u64 offset, ns, elapsed; 1026 u64 offset, ns, elapsed;
1027 unsigned long flags; 1027 unsigned long flags;
1028 s64 nsdiff; 1028 s64 usdiff;
1029 1029
1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
@@ -1033,18 +1033,19 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1033 elapsed = ns - kvm->arch.last_tsc_nsec; 1033 elapsed = ns - kvm->arch.last_tsc_nsec;
1034 1034
1035 /* n.b - signed multiplication and division required */ 1035 /* n.b - signed multiplication and division required */
1036 nsdiff = data - kvm->arch.last_tsc_write; 1036 usdiff = data - kvm->arch.last_tsc_write;
1037#ifdef CONFIG_X86_64 1037#ifdef CONFIG_X86_64
1038 nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz; 1038 usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1039#else 1039#else
1040 /* do_div() only does unsigned */ 1040 /* do_div() only does unsigned */
1041 asm("idivl %2; xor %%edx, %%edx" 1041 asm("idivl %2; xor %%edx, %%edx"
1042 : "=A"(nsdiff) 1042 : "=A"(usdiff)
1043 : "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz)); 1043 : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1044#endif 1044#endif
1045 nsdiff -= elapsed; 1045 do_div(elapsed, 1000);
1046 if (nsdiff < 0) 1046 usdiff -= elapsed;
1047 nsdiff = -nsdiff; 1047 if (usdiff < 0)
1048 usdiff = -usdiff;
1048 1049
1049 /* 1050 /*
1050 * Special case: TSC write with a small delta (1 second) of virtual 1051 * Special case: TSC write with a small delta (1 second) of virtual
@@ -1056,7 +1057,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1056 * compensation code attempt to catch up if we fall behind, but 1057 * compensation code attempt to catch up if we fall behind, but
1057 * it's better to try to match offsets from the beginning. 1058 * it's better to try to match offsets from the beginning.
1058 */ 1059 */
1059 if (nsdiff < NSEC_PER_SEC && 1060 if (usdiff < USEC_PER_SEC &&
1060 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 1061 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1061 if (!check_tsc_unstable()) { 1062 if (!check_tsc_unstable()) {
1062 offset = kvm->arch.cur_tsc_offset; 1063 offset = kvm->arch.cur_tsc_offset;