aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorZachary Amsden <zamsden@redhat.com>2010-08-20 04:07:23 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:23 -0400
commite48672fa25e879f7ae21785c7efd187738139593 (patch)
treebec27bad04ba6e933e72439cc565d2c752a31928 /arch/x86/kvm/x86.c
parent6755bae8e69093b2994b6f29cd3eaecdf610374e (diff)
KVM: x86: Unify TSC logic
Move the TSC control logic from the vendor backends into x86.c by adding adjust_tsc_offset to x86 ops. Now all TSC decisions can be done in one place. Signed-off-by: Zachary Amsden <zamsden@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c17
1 files changed, 14 insertions, 3 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a8dee58e8716..468fafaed1ae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -973,9 +973,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
973 return 1; 973 return 1;
974 } 974 }
975 975
976 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) { 976 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
977 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock); 977 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
978 vcpu->hv_clock_tsc_khz = this_tsc_khz; 978 vcpu->hw_tsc_khz = this_tsc_khz;
979 } 979 }
980 980
981 /* With all the info we got, fill in the values */ 981 /* With all the info we got, fill in the values */
@@ -1866,13 +1866,24 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1866 } 1866 }
1867 1867
1868 kvm_x86_ops->vcpu_load(vcpu, cpu); 1868 kvm_x86_ops->vcpu_load(vcpu, cpu);
1869 kvm_request_guest_time_update(vcpu); 1869 if (unlikely(vcpu->cpu != cpu)) {
1870 /* Make sure TSC doesn't go backwards */
1871 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
1872 native_read_tsc() - vcpu->arch.last_host_tsc;
1873 if (tsc_delta < 0)
1874 mark_tsc_unstable("KVM discovered backwards TSC");
1875 if (check_tsc_unstable())
1876 kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
1877 kvm_migrate_timers(vcpu);
1878 vcpu->cpu = cpu;
1879 }
1870} 1880}
1871 1881
1872void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1882void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1873{ 1883{
1874 kvm_x86_ops->vcpu_put(vcpu); 1884 kvm_x86_ops->vcpu_put(vcpu);
1875 kvm_put_guest_fpu(vcpu); 1885 kvm_put_guest_fpu(vcpu);
1886 vcpu->arch.last_host_tsc = native_read_tsc();
1876} 1887}
1877 1888
1878static int is_efer_nx(void) 1889static int is_efer_nx(void)