aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
-rw-r--r--arch/x86/kvm/svm.c26
-rw-r--r--arch/x86/kvm/vmx.c22
-rw-r--r--arch/x86/kvm/x86.c17
4 files changed, 35 insertions, 35 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 57b4394491ec..5ab1c3fb34ef 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -255,7 +255,6 @@ struct kvm_mmu {
255}; 255};
256 256
257struct kvm_vcpu_arch { 257struct kvm_vcpu_arch {
258 u64 host_tsc;
259 /* 258 /*
260 * rip and regs accesses must go through 259 * rip and regs accesses must go through
261 * kvm_{register,rip}_{read,write} functions. 260 * kvm_{register,rip}_{read,write} functions.
@@ -336,9 +335,10 @@ struct kvm_vcpu_arch {
336 335
337 gpa_t time; 336 gpa_t time;
338 struct pvclock_vcpu_time_info hv_clock; 337 struct pvclock_vcpu_time_info hv_clock;
339 unsigned int hv_clock_tsc_khz; 338 unsigned int hw_tsc_khz;
340 unsigned int time_offset; 339 unsigned int time_offset;
341 struct page *time_page; 340 struct page *time_page;
341 u64 last_host_tsc;
342 342
343 bool nmi_pending; 343 bool nmi_pending;
344 bool nmi_injected; 344 bool nmi_injected;
@@ -520,6 +520,7 @@ struct kvm_x86_ops {
520 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 520 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
521 int (*get_lpage_level)(void); 521 int (*get_lpage_level)(void);
522 bool (*rdtscp_supported)(void); 522 bool (*rdtscp_supported)(void);
523 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment);
523 524
524 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry); 525 void (*set_supported_cpuid)(u32 func, struct kvm_cpuid_entry2 *entry);
525 526
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ea41c551fa44..ff28f6521065 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -715,6 +715,15 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
715 svm->vmcb->control.tsc_offset = offset + g_tsc_offset; 715 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
716} 716}
717 717
718static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
719{
720 struct vcpu_svm *svm = to_svm(vcpu);
721
722 svm->vmcb->control.tsc_offset += adjustment;
723 if (is_nested(svm))
724 svm->nested.hsave->control.tsc_offset += adjustment;
725}
726
718static void init_vmcb(struct vcpu_svm *svm) 727static void init_vmcb(struct vcpu_svm *svm)
719{ 728{
720 struct vmcb_control_area *control = &svm->vmcb->control; 729 struct vmcb_control_area *control = &svm->vmcb->control;
@@ -961,20 +970,6 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
961 int i; 970 int i;
962 971
963 if (unlikely(cpu != vcpu->cpu)) { 972 if (unlikely(cpu != vcpu->cpu)) {
964 u64 delta;
965
966 if (check_tsc_unstable()) {
967 /*
968 * Make sure that the guest sees a monotonically
969 * increasing TSC.
970 */
971 delta = vcpu->arch.host_tsc - native_read_tsc();
972 svm->vmcb->control.tsc_offset += delta;
973 if (is_nested(svm))
974 svm->nested.hsave->control.tsc_offset += delta;
975 }
976 vcpu->cpu = cpu;
977 kvm_migrate_timers(vcpu);
978 svm->asid_generation = 0; 973 svm->asid_generation = 0;
979 } 974 }
980 975
@@ -990,8 +985,6 @@ static void svm_vcpu_put(struct kvm_vcpu *vcpu)
990 ++vcpu->stat.host_state_reload; 985 ++vcpu->stat.host_state_reload;
991 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++) 986 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
992 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]); 987 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
993
994 vcpu->arch.host_tsc = native_read_tsc();
995} 988}
996 989
997static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu) 990static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
@@ -3553,6 +3546,7 @@ static struct kvm_x86_ops svm_x86_ops = {
3553 .has_wbinvd_exit = svm_has_wbinvd_exit, 3546 .has_wbinvd_exit = svm_has_wbinvd_exit,
3554 3547
3555 .write_tsc_offset = svm_write_tsc_offset, 3548 .write_tsc_offset = svm_write_tsc_offset,
3549 .adjust_tsc_offset = svm_adjust_tsc_offset,
3556}; 3550};
3557 3551
3558static int __init svm_init(void) 3552static int __init svm_init(void)
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 138746d3afe9..275a81d571cf 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -505,7 +505,6 @@ static void __vcpu_clear(void *arg)
505 vmcs_clear(vmx->vmcs); 505 vmcs_clear(vmx->vmcs);
506 if (per_cpu(current_vmcs, cpu) == vmx->vmcs) 506 if (per_cpu(current_vmcs, cpu) == vmx->vmcs)
507 per_cpu(current_vmcs, cpu) = NULL; 507 per_cpu(current_vmcs, cpu) = NULL;
508 rdtscll(vmx->vcpu.arch.host_tsc);
509 list_del(&vmx->local_vcpus_link); 508 list_del(&vmx->local_vcpus_link);
510 vmx->vcpu.cpu = -1; 509 vmx->vcpu.cpu = -1;
511 vmx->launched = 0; 510 vmx->launched = 0;
@@ -881,7 +880,6 @@ static void vmx_load_host_state(struct vcpu_vmx *vmx)
881static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu) 880static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
882{ 881{
883 struct vcpu_vmx *vmx = to_vmx(vcpu); 882 struct vcpu_vmx *vmx = to_vmx(vcpu);
884 u64 tsc_this, delta, new_offset;
885 u64 phys_addr = __pa(per_cpu(vmxarea, cpu)); 883 u64 phys_addr = __pa(per_cpu(vmxarea, cpu));
886 884
887 if (!vmm_exclusive) 885 if (!vmm_exclusive)
@@ -898,14 +896,12 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
898 struct desc_ptr *gdt = &__get_cpu_var(host_gdt); 896 struct desc_ptr *gdt = &__get_cpu_var(host_gdt);
899 unsigned long sysenter_esp; 897 unsigned long sysenter_esp;
900 898
901 kvm_migrate_timers(vcpu);
902 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); 899 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
903 local_irq_disable(); 900 local_irq_disable();
904 list_add(&vmx->local_vcpus_link, 901 list_add(&vmx->local_vcpus_link,
905 &per_cpu(vcpus_on_cpu, cpu)); 902 &per_cpu(vcpus_on_cpu, cpu));
906 local_irq_enable(); 903 local_irq_enable();
907 904
908 vcpu->cpu = cpu;
909 /* 905 /*
910 * Linux uses per-cpu TSS and GDT, so set these when switching 906 * Linux uses per-cpu TSS and GDT, so set these when switching
911 * processors. 907 * processors.
@@ -915,16 +911,6 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
915 911
916 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp); 912 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
917 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */ 913 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
918
919 /*
920 * Make sure the time stamp counter is monotonous.
921 */
922 rdtscll(tsc_this);
923 if (tsc_this < vcpu->arch.host_tsc) {
924 delta = vcpu->arch.host_tsc - tsc_this;
925 new_offset = vmcs_read64(TSC_OFFSET) + delta;
926 vmcs_write64(TSC_OFFSET, new_offset);
927 }
928 } 914 }
929} 915}
930 916
@@ -1153,6 +1139,12 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1153 vmcs_write64(TSC_OFFSET, offset); 1139 vmcs_write64(TSC_OFFSET, offset);
1154} 1140}
1155 1141
1142static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment)
1143{
1144 u64 offset = vmcs_read64(TSC_OFFSET);
1145 vmcs_write64(TSC_OFFSET, offset + adjustment);
1146}
1147
1156/* 1148/*
1157 * Reads an msr value (of 'msr_index') into 'pdata'. 1149 * Reads an msr value (of 'msr_index') into 'pdata'.
1158 * Returns 0 on success, non-0 otherwise. 1150 * Returns 0 on success, non-0 otherwise.
@@ -4108,6 +4100,7 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id)
4108 4100
4109 cpu = get_cpu(); 4101 cpu = get_cpu();
4110 vmx_vcpu_load(&vmx->vcpu, cpu); 4102 vmx_vcpu_load(&vmx->vcpu, cpu);
4103 vmx->vcpu.cpu = cpu;
4111 err = vmx_vcpu_setup(vmx); 4104 err = vmx_vcpu_setup(vmx);
4112 vmx_vcpu_put(&vmx->vcpu); 4105 vmx_vcpu_put(&vmx->vcpu);
4113 put_cpu(); 4106 put_cpu();
@@ -4347,6 +4340,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
4347 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit, 4340 .has_wbinvd_exit = cpu_has_vmx_wbinvd_exit,
4348 4341
4349 .write_tsc_offset = vmx_write_tsc_offset, 4342 .write_tsc_offset = vmx_write_tsc_offset,
4343 .adjust_tsc_offset = vmx_adjust_tsc_offset,
4350}; 4344};
4351 4345
4352static int __init vmx_init(void) 4346static int __init vmx_init(void)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a8dee58e8716..468fafaed1ae 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -973,9 +973,9 @@ static int kvm_write_guest_time(struct kvm_vcpu *v)
973 return 1; 973 return 1;
974 } 974 }
975 975
976 if (unlikely(vcpu->hv_clock_tsc_khz != this_tsc_khz)) { 976 if (unlikely(vcpu->hw_tsc_khz != this_tsc_khz)) {
977 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock); 977 kvm_set_time_scale(this_tsc_khz, &vcpu->hv_clock);
978 vcpu->hv_clock_tsc_khz = this_tsc_khz; 978 vcpu->hw_tsc_khz = this_tsc_khz;
979 } 979 }
980 980
981 /* With all the info we got, fill in the values */ 981 /* With all the info we got, fill in the values */
@@ -1866,13 +1866,24 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1866 } 1866 }
1867 1867
1868 kvm_x86_ops->vcpu_load(vcpu, cpu); 1868 kvm_x86_ops->vcpu_load(vcpu, cpu);
1869 kvm_request_guest_time_update(vcpu); 1869 if (unlikely(vcpu->cpu != cpu)) {
1870 /* Make sure TSC doesn't go backwards */
1871 s64 tsc_delta = !vcpu->arch.last_host_tsc ? 0 :
1872 native_read_tsc() - vcpu->arch.last_host_tsc;
1873 if (tsc_delta < 0)
1874 mark_tsc_unstable("KVM discovered backwards TSC");
1875 if (check_tsc_unstable())
1876 kvm_x86_ops->adjust_tsc_offset(vcpu, -tsc_delta);
1877 kvm_migrate_timers(vcpu);
1878 vcpu->cpu = cpu;
1879 }
1870} 1880}
1871 1881
1872void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) 1882void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1873{ 1883{
1874 kvm_x86_ops->vcpu_put(vcpu); 1884 kvm_x86_ops->vcpu_put(vcpu);
1875 kvm_put_guest_fpu(vcpu); 1885 kvm_put_guest_fpu(vcpu);
1886 vcpu->arch.last_host_tsc = native_read_tsc();
1876} 1887}
1877 1888
1878static int is_efer_nx(void) 1889static int is_efer_nx(void)