aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@gmail.com>2012-02-03 12:43:51 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:03 -0500
commit5d3cb0f6a8e3af018a522ae8d36f8f7d2511b5d8 (patch)
tree4299c77a00d893615ded5f31c6112578edfb3dc3 /arch
parentcc578287e3224d0da196cc1d226bdae6b068faa7 (diff)
KVM: Improve TSC offset matching
There are a few improvements that can be made to the TSC offset matching code. First, we don't need to call the 128-bit multiply (especially on a constant number), the code works much nicer to do computation in nanosecond units. Second, the way everything is setup with software TSC rate scaling, we currently have per-cpu rates. Obviously this isn't too desirable to use in practice, but if for some reason we do change the rate of all VCPUs at runtime, then reset the TSCs, we will only want to match offsets for VCPUs running at the same rate. Finally, for the case where we have an unstable host TSC, but rate scaling is being done in hardware, we should call the platform code to compute the TSC offset, so the math is reorganized to recompute the base instead, then transform the base into an offset using the existing API. [avi: fix 64-bit division on i386] Signed-off-by: Zachary Amsden <zamsden@gmail.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> KVM: Fix 64-bit division in kvm_write_tsc() Breaks i386 build. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/x86.c44
2 files changed, 30 insertions, 15 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index ddebbe01fff9..8a34fca6c572 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -513,6 +513,7 @@ struct kvm_arch {
513 u64 last_tsc_nsec; 513 u64 last_tsc_nsec;
514 u64 last_tsc_offset; 514 u64 last_tsc_offset;
515 u64 last_tsc_write; 515 u64 last_tsc_write;
516 u32 last_tsc_khz;
516 517
517 struct kvm_xen_hvm_config xen_hvm_config; 518 struct kvm_xen_hvm_config xen_hvm_config;
518 519
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 41bb90acb238..4390f42b371f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1025,33 +1025,46 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1025 struct kvm *kvm = vcpu->kvm; 1025 struct kvm *kvm = vcpu->kvm;
1026 u64 offset, ns, elapsed; 1026 u64 offset, ns, elapsed;
1027 unsigned long flags; 1027 unsigned long flags;
1028 s64 sdiff; 1028 s64 nsdiff;
1029 1029
1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags); 1030 raw_spin_lock_irqsave(&kvm->arch.tsc_write_lock, flags);
1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1031 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1032 ns = get_kernel_ns(); 1032 ns = get_kernel_ns();
1033 elapsed = ns - kvm->arch.last_tsc_nsec; 1033 elapsed = ns - kvm->arch.last_tsc_nsec;
1034 sdiff = data - kvm->arch.last_tsc_write; 1034
1035 if (sdiff < 0) 1035 /* n.b - signed multiplication and division required */
1036 sdiff = -sdiff; 1036 nsdiff = data - kvm->arch.last_tsc_write;
1037#ifdef CONFIG_X86_64
1038 nsdiff = (nsdiff * 1000) / vcpu->arch.virtual_tsc_khz;
1039#else
1040 /* do_div() only does unsigned */
1041 asm("idivl %2; xor %%edx, %%edx"
1042 : "=A"(nsdiff)
1043 : "A"(nsdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
1044#endif
1045 nsdiff -= elapsed;
1046 if (nsdiff < 0)
1047 nsdiff = -nsdiff;
1037 1048
1038 /* 1049 /*
1039 * Special case: close write to TSC within 5 seconds of 1050 * Special case: TSC write with a small delta (1 second) of virtual
1040 * another CPU is interpreted as an attempt to synchronize 1051 * cycle time against real time is interpreted as an attempt to
1041 * The 5 seconds is to accommodate host load / swapping as 1052 * synchronize the CPU.
1042 * well as any reset of TSC during the boot process. 1053 *
1043 * 1054 * For a reliable TSC, we can match TSC offsets, and for an unstable
1044 * In that case, for a reliable TSC, we can match TSC offsets, 1055 * TSC, we add elapsed time in this computation. We could let the
1045 * or make a best guest using elapsed value. 1056 * compensation code attempt to catch up if we fall behind, but
1046 */ 1057 * it's better to try to match offsets from the beginning.
1047 if (sdiff < nsec_to_cycles(vcpu, 5ULL * NSEC_PER_SEC) && 1058 */
1048 elapsed < 5ULL * NSEC_PER_SEC) { 1059 if (nsdiff < NSEC_PER_SEC &&
1060 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1049 if (!check_tsc_unstable()) { 1061 if (!check_tsc_unstable()) {
1050 offset = kvm->arch.last_tsc_offset; 1062 offset = kvm->arch.last_tsc_offset;
1051 pr_debug("kvm: matched tsc offset for %llu\n", data); 1063 pr_debug("kvm: matched tsc offset for %llu\n", data);
1052 } else { 1064 } else {
1053 u64 delta = nsec_to_cycles(vcpu, elapsed); 1065 u64 delta = nsec_to_cycles(vcpu, elapsed);
1054 offset += delta; 1066 data += delta;
1067 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1055 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1068 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1056 } 1069 }
1057 ns = kvm->arch.last_tsc_nsec; 1070 ns = kvm->arch.last_tsc_nsec;
@@ -1059,6 +1072,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1059 kvm->arch.last_tsc_nsec = ns; 1072 kvm->arch.last_tsc_nsec = ns;
1060 kvm->arch.last_tsc_write = data; 1073 kvm->arch.last_tsc_write = data;
1061 kvm->arch.last_tsc_offset = offset; 1074 kvm->arch.last_tsc_offset = offset;
1075 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1062 kvm_x86_ops->write_tsc_offset(vcpu, offset); 1076 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1063 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags); 1077 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1064 1078