aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@gmail.com>2012-02-03 12:43:57 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:09 -0500
commite26101b116a6235bcd80b3a4c38c9fe91286cd79 (patch)
tree82c133ffb2c8877f675af519a2ffd4c55820c7e9 /arch
parent0dd6a6edb0124e6c71931ff575b18e15ed6e8603 (diff)
KVM: Track TSC synchronization in generations
This allows us to track the original nanosecond and counter values at each phase of TSC writing by the guest. This gets us perfect offset matching for stable TSC systems, and perfect software computed TSC matching for machines with unstable TSC. Signed-off-by: Zachary Amsden <zamsden@gmail.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h10
-rw-r--r--arch/x86/kvm/x86.c41
2 files changed, 40 insertions, 11 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 4fbeb84b181..c24125cd0c6 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -420,10 +420,11 @@ struct kvm_vcpu_arch {
420 420
421 u64 last_guest_tsc; 421 u64 last_guest_tsc;
422 u64 last_kernel_ns; 422 u64 last_kernel_ns;
423 u64 last_tsc_nsec;
424 u64 last_tsc_write;
425 u64 last_host_tsc; 423 u64 last_host_tsc;
426 u64 tsc_offset_adjustment; 424 u64 tsc_offset_adjustment;
425 u64 this_tsc_nsec;
426 u64 this_tsc_write;
427 u8 this_tsc_generation;
427 bool tsc_catchup; 428 bool tsc_catchup;
428 bool tsc_always_catchup; 429 bool tsc_always_catchup;
429 s8 virtual_tsc_shift; 430 s8 virtual_tsc_shift;
@@ -513,9 +514,12 @@ struct kvm_arch {
513 s64 kvmclock_offset; 514 s64 kvmclock_offset;
514 raw_spinlock_t tsc_write_lock; 515 raw_spinlock_t tsc_write_lock;
515 u64 last_tsc_nsec; 516 u64 last_tsc_nsec;
516 u64 last_tsc_offset;
517 u64 last_tsc_write; 517 u64 last_tsc_write;
518 u32 last_tsc_khz; 518 u32 last_tsc_khz;
519 u64 cur_tsc_nsec;
520 u64 cur_tsc_write;
521 u64 cur_tsc_offset;
522 u8 cur_tsc_generation;
519 523
520 struct kvm_xen_hvm_config xen_hvm_config; 524 struct kvm_xen_hvm_config xen_hvm_config;
521 525
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4e9bd23d522..e86f9b22eac 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1013,10 +1013,10 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
1013 1013
1014static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns) 1014static u64 compute_guest_tsc(struct kvm_vcpu *vcpu, s64 kernel_ns)
1015{ 1015{
1016 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.last_tsc_nsec, 1016 u64 tsc = pvclock_scale_delta(kernel_ns-vcpu->arch.this_tsc_nsec,
1017 vcpu->arch.virtual_tsc_mult, 1017 vcpu->arch.virtual_tsc_mult,
1018 vcpu->arch.virtual_tsc_shift); 1018 vcpu->arch.virtual_tsc_shift);
1019 tsc += vcpu->arch.last_tsc_write; 1019 tsc += vcpu->arch.this_tsc_write;
1020 return tsc; 1020 return tsc;
1021} 1021}
1022 1022
@@ -1059,7 +1059,7 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1059 if (nsdiff < NSEC_PER_SEC && 1059 if (nsdiff < NSEC_PER_SEC &&
1060 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) { 1060 vcpu->arch.virtual_tsc_khz == kvm->arch.last_tsc_khz) {
1061 if (!check_tsc_unstable()) { 1061 if (!check_tsc_unstable()) {
1062 offset = kvm->arch.last_tsc_offset; 1062 offset = kvm->arch.cur_tsc_offset;
1063 pr_debug("kvm: matched tsc offset for %llu\n", data); 1063 pr_debug("kvm: matched tsc offset for %llu\n", data);
1064 } else { 1064 } else {
1065 u64 delta = nsec_to_cycles(vcpu, elapsed); 1065 u64 delta = nsec_to_cycles(vcpu, elapsed);
@@ -1067,20 +1067,45 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, u64 data)
1067 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data); 1067 offset = kvm_x86_ops->compute_tsc_offset(vcpu, data);
1068 pr_debug("kvm: adjusted tsc offset by %llu\n", delta); 1068 pr_debug("kvm: adjusted tsc offset by %llu\n", delta);
1069 } 1069 }
1070 } else {
1071 /*
1072 * We split periods of matched TSC writes into generations.
1073 * For each generation, we track the original measured
1074 * nanosecond time, offset, and write, so if TSCs are in
1075 * sync, we can match exact offset, and if not, we can match
1076 * exact software computaion in compute_guest_tsc()
1077 *
1078 * These values are tracked in kvm->arch.cur_xxx variables.
1079 */
1080 kvm->arch.cur_tsc_generation++;
1081 kvm->arch.cur_tsc_nsec = ns;
1082 kvm->arch.cur_tsc_write = data;
1083 kvm->arch.cur_tsc_offset = offset;
1084 pr_debug("kvm: new tsc generation %u, clock %llu\n",
1085 kvm->arch.cur_tsc_generation, data);
1070 } 1086 }
1087
1088 /*
1089 * We also track th most recent recorded KHZ, write and time to
1090 * allow the matching interval to be extended at each write.
1091 */
1071 kvm->arch.last_tsc_nsec = ns; 1092 kvm->arch.last_tsc_nsec = ns;
1072 kvm->arch.last_tsc_write = data; 1093 kvm->arch.last_tsc_write = data;
1073 kvm->arch.last_tsc_offset = offset;
1074 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz; 1094 kvm->arch.last_tsc_khz = vcpu->arch.virtual_tsc_khz;
1075 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1076 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1077 1095
1078 /* Reset of TSC must disable overshoot protection below */ 1096 /* Reset of TSC must disable overshoot protection below */
1079 vcpu->arch.hv_clock.tsc_timestamp = 0; 1097 vcpu->arch.hv_clock.tsc_timestamp = 0;
1080 vcpu->arch.last_tsc_write = data;
1081 vcpu->arch.last_tsc_nsec = ns;
1082 vcpu->arch.last_guest_tsc = data; 1098 vcpu->arch.last_guest_tsc = data;
1099
1100 /* Keep track of which generation this VCPU has synchronized to */
1101 vcpu->arch.this_tsc_generation = kvm->arch.cur_tsc_generation;
1102 vcpu->arch.this_tsc_nsec = kvm->arch.cur_tsc_nsec;
1103 vcpu->arch.this_tsc_write = kvm->arch.cur_tsc_write;
1104
1105 kvm_x86_ops->write_tsc_offset(vcpu, offset);
1106 raw_spin_unlock_irqrestore(&kvm->arch.tsc_write_lock, flags);
1083} 1107}
1108
1084EXPORT_SYMBOL_GPL(kvm_write_tsc); 1109EXPORT_SYMBOL_GPL(kvm_write_tsc);
1085 1110
1086static int kvm_guest_time_update(struct kvm_vcpu *v) 1111static int kvm_guest_time_update(struct kvm_vcpu *v)