diff options
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 12 |
1 files changed, 12 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 10b22619e7ec..afc1f06907ce 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1766,12 +1766,24 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) | |||
1766 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1766 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
1767 | { | 1767 | { |
1768 | vmcs_write64(TSC_OFFSET, offset); | 1768 | vmcs_write64(TSC_OFFSET, offset); |
1769 | if (is_guest_mode(vcpu)) | ||
1770 | /* | ||
1771 | * We're here if L1 chose not to trap the TSC MSR. Since | ||
1772 | * prepare_vmcs12() does not copy tsc_offset, we need to also | ||
1773 | * set the vmcs12 field here. | ||
1774 | */ | ||
1775 | get_vmcs12(vcpu)->tsc_offset = offset - | ||
1776 | to_vmx(vcpu)->nested.vmcs01_tsc_offset; | ||
1769 | } | 1777 | } |
1770 | 1778 | ||
1771 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) | 1779 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) |
1772 | { | 1780 | { |
1773 | u64 offset = vmcs_read64(TSC_OFFSET); | 1781 | u64 offset = vmcs_read64(TSC_OFFSET); |
1774 | vmcs_write64(TSC_OFFSET, offset + adjustment); | 1782 | vmcs_write64(TSC_OFFSET, offset + adjustment); |
1783 | if (is_guest_mode(vcpu)) { | ||
1784 | /* Even when running L2, the adjustment needs to apply to L1 */ | ||
1785 | to_vmx(vcpu)->nested.vmcs01_tsc_offset += adjustment; | ||
1786 | } | ||
1775 | } | 1787 | } |
1776 | 1788 | ||
1777 | static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) | 1789 | static u64 vmx_compute_tsc_offset(struct kvm_vcpu *vcpu, u64 target_tsc) |