diff options
author | Nadav Har'El <nyh@il.ibm.com> | 2011-08-02 08:54:52 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-09-25 12:18:02 -0400 |
commit | 27fc51b21cea3386a6672699631975d1097f9d39 (patch) | |
tree | d2e6f68925ccf067acdb69b6f218006344c1bf0a | |
parent | d5c1785d2f3aabe284d91bc7fc8f0abc58525dc9 (diff) |
KVM: nVMX: Fix nested VMX TSC emulation
This patch fixes two corner cases in nested (L2) handling of TSC-related
issues:
1. Somewhat suprisingly, according to the Intel spec, if L1 allows WRMSR to
the TSC MSR without an exit, then this should set L1's TSC value itself - not
offset by vmcs12.TSC_OFFSET (like was wrongly done in the previous code).
2. Allow L1 to disable the TSC_OFFSETING control, and then correctly ignore
the vmcs12.TSC_OFFSET.
Signed-off-by: Nadav Har'El <nyh@il.ibm.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/vmx.c | 31 |
1 files changed, 21 insertions, 10 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 97b64543d4ed..5e8d411b0a81 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1777,15 +1777,23 @@ static void vmx_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz) | |||
1777 | */ | 1777 | */ |
1778 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) | 1778 | static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset) |
1779 | { | 1779 | { |
1780 | vmcs_write64(TSC_OFFSET, offset); | 1780 | if (is_guest_mode(vcpu)) { |
1781 | if (is_guest_mode(vcpu)) | ||
1782 | /* | 1781 | /* |
1783 | * We're here if L1 chose not to trap the TSC MSR. Since | 1782 | * We're here if L1 chose not to trap WRMSR to TSC. According |
1784 | * prepare_vmcs12() does not copy tsc_offset, we need to also | 1783 | * to the spec, this should set L1's TSC; The offset that L1 |
1785 | * set the vmcs12 field here. | 1784 | * set for L2 remains unchanged, and still needs to be added |
1785 | * to the newly set TSC to get L2's TSC. | ||
1786 | */ | 1786 | */ |
1787 | get_vmcs12(vcpu)->tsc_offset = offset - | 1787 | struct vmcs12 *vmcs12; |
1788 | to_vmx(vcpu)->nested.vmcs01_tsc_offset; | 1788 | to_vmx(vcpu)->nested.vmcs01_tsc_offset = offset; |
1789 | /* recalculate vmcs02.TSC_OFFSET: */ | ||
1790 | vmcs12 = get_vmcs12(vcpu); | ||
1791 | vmcs_write64(TSC_OFFSET, offset + | ||
1792 | (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ? | ||
1793 | vmcs12->tsc_offset : 0)); | ||
1794 | } else { | ||
1795 | vmcs_write64(TSC_OFFSET, offset); | ||
1796 | } | ||
1789 | } | 1797 | } |
1790 | 1798 | ||
1791 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) | 1799 | static void vmx_adjust_tsc_offset(struct kvm_vcpu *vcpu, s64 adjustment) |
@@ -6485,8 +6493,11 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) | |||
6485 | 6493 | ||
6486 | set_cr4_guest_host_mask(vmx); | 6494 | set_cr4_guest_host_mask(vmx); |
6487 | 6495 | ||
6488 | vmcs_write64(TSC_OFFSET, | 6496 | if (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING) |
6489 | vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); | 6497 | vmcs_write64(TSC_OFFSET, |
6498 | vmx->nested.vmcs01_tsc_offset + vmcs12->tsc_offset); | ||
6499 | else | ||
6500 | vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); | ||
6490 | 6501 | ||
6491 | if (enable_vpid) { | 6502 | if (enable_vpid) { |
6492 | /* | 6503 | /* |
@@ -6893,7 +6904,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) | |||
6893 | 6904 | ||
6894 | load_vmcs12_host_state(vcpu, vmcs12); | 6905 | load_vmcs12_host_state(vcpu, vmcs12); |
6895 | 6906 | ||
6896 | /* Update TSC_OFFSET if vmx_adjust_tsc_offset() was used while L2 ran */ | 6907 | /* Update TSC_OFFSET if TSC was changed while L2 ran */ |
6897 | vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); | 6908 | vmcs_write64(TSC_OFFSET, vmx->nested.vmcs01_tsc_offset); |
6898 | 6909 | ||
6899 | /* This is needed for same reason as it was needed in prepare_vmcs02 */ | 6910 | /* This is needed for same reason as it was needed in prepare_vmcs02 */ |