aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_host.h13
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c2
4 files changed, 19 insertions, 4 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b23682900f41..dd439f13df84 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -646,7 +646,7 @@ struct kvm_x86_ops {
646 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 646 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
647 int (*get_lpage_level)(void); 647 int (*get_lpage_level)(void);
648 bool (*rdtscp_supported)(void); 648 bool (*rdtscp_supported)(void);
649 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment); 649 void (*adjust_tsc_offset)(struct kvm_vcpu *vcpu, s64 adjustment, bool host);
650 650
651 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3); 651 void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
652 652
@@ -676,6 +676,17 @@ struct kvm_arch_async_pf {
676 676
677extern struct kvm_x86_ops *kvm_x86_ops; 677extern struct kvm_x86_ops *kvm_x86_ops;
678 678
679static inline void adjust_tsc_offset_guest(struct kvm_vcpu *vcpu,
680 s64 adjustment)
681{
682 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, false);
683}
684
685static inline void adjust_tsc_offset_host(struct kvm_vcpu *vcpu, s64 adjustment)
686{
687 kvm_x86_ops->adjust_tsc_offset(vcpu, adjustment, true);
688}
689
679int kvm_mmu_module_init(void); 690int kvm_mmu_module_init(void);
680void kvm_mmu_module_exit(void); 691void kvm_mmu_module_exit(void);
681 692
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index e12026e5244e..0b7690ee20bd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1016,10 +1016,14 @@ static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1016 mark_dirty(svm->vmcb, VMCB_INTERCEPTS); 1016 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
1017} 1017}
1018 1018
1019static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) 1019static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1020{ 1020{
1021 struct vcpu_svm *svm = to_svm(vcpu); 1021 struct vcpu_svm *svm = to_svm(vcpu);
1022 1022
1023 WARN_ON(adjustment < 0);
1024 if (host)
1025 adjustment = svm_scale_tsc(vcpu, adjustment);
1026
1023 svm->vmcb->control.tsc_offset += adjustment; 1027 svm->vmcb->control.tsc_offset += adjustment;
1024 if (is_guest_mode(vcpu)) 1028 if (is_guest_mode(vcpu))
1025 svm->nested.hsave->control.tsc_offset += adjustment; 1029 svm->nested.hsave->control.tsc_offset += adjustment;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e6bf61fa1c03..575fb742a6fc 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1856,7 +1856,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1856 } 1856 }
1857} 1857}
1858 1858
1859static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) 1859static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment, bool host)
1860{ 1860{
1861 u64 offset = vmcs_read64(TSC_OFFSET); 1861 u64 offset = vmcs_read64(TSC_OFFSET);
1862 vmcs_write64(TSC_OFFSET, offset + adjustment); 1862 vmcs_write64(TSC_OFFSET, offset + adjustment);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 39a57dac884a..3b931302fa55 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1116,7 +1116,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
1116 if (vcpu->tsc_catchup) { 1116 if (vcpu->tsc_catchup) {
1117 u64 tsc = compute_guest_tsc(v, kernel_ns); 1117 u64 tsc = compute_guest_tsc(v, kernel_ns);
1118 if (tsc > tsc_timestamp) { 1118 if (tsc > tsc_timestamp) {
1119 kvm_x86_ops->adjust_tsc_offset(v, tsc - tsc_timestamp); 1119 adjust_tsc_offset_guest(v, tsc - tsc_timestamp);
1120 tsc_timestamp = tsc; 1120 tsc_timestamp = tsc;
1121 } 1121 }
1122 } 1122 }