aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-08-20 04:07:26 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:23 -0400
commit46543ba45fc4b64ca32655efdc8d9c599b4164e2 (patch)
treea66d258a99fd7654f1fba049573d0a200ece85e6 /arch
parent759379dd68c2885d1fafa433083d4487e710a685 (diff)
KVM: x86: Robust TSC compensation
Make the match of TSC find TSC writes that are close to each other instead of perfectly identical; this allows the compensator to also work in migration / suspend scenarios. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/x86.c14
1 files changed, 10 insertions, 4 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4bcb120cc76a..4ff0c271f125 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -928,21 +928,27 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
928 struct kvm *kvm = vcpu->kvm; 928 struct kvm *kvm = vcpu->kvm;
929 u64 offset, ns, elapsed; 929 u64 offset, ns, elapsed;
930 unsigned long flags; 930 unsigned long flags;
931 s64 sdiff;
931 932
932 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 933 spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
933 offset = data - native_read_tsc(); 934 offset = data - native_read_tsc();
934 ns = get_kernel_ns(); 935 ns = get_kernel_ns();
935 elapsed = ns - kvm->arch.last_tsc_nsec; 936 elapsed = ns - kvm->arch.last_tsc_nsec;
937 sdiff = data - kvm->arch.last_tsc_write;
938 if (sdiff < 0)
939 sdiff = -sdiff;
936 940
937 /* 941 /*
938 * Special case: identical write to TSC within 5 seconds of 942 * Special case: close write to TSC within 5 seconds of
939 * another CPU is interpreted as an attempt to synchronize 943 * another CPU is interpreted as an attempt to synchronize
940 * (the 5 seconds is to accomodate host load / swapping). 944 * The 5 seconds is to accomodate host load / swapping as
945 * well as any reset of TSC during the boot process.
941 * 946 *
942 * In that case, for a reliable TSC, we can match TSC offsets, 947 * In that case, for a reliable TSC, we can match TSC offsets,
943 * or make a best guest using kernel_ns value. 948 * or make a best guest using elapsed value.
944 */ 949 */
945 if (data == kvm->arch.last_tsc_write && elapsed < 5ULL * NSEC_PER_SEC) { 950 if (sdiff < nsec_to_cycles(5ULL * NSEC_PER_SEC) &&
951 elapsed < 5ULL * NSEC_PER_SEC) {
946 if (!check_tsc_unstable()) { 952 if (!check_tsc_unstable()) {
947 offset = kvm->arch.last_tsc_offset; 953 offset = kvm->arch.last_tsc_offset;
948 pr_debug("kvm: matched tsc offset for %llu\n", data); 954 pr_debug("kvm: matched tsc offset for %llu\n", data);