aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLiran Alon <liran.alon@oracle.com>2019-03-25 15:09:17 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2019-04-16 09:37:34 -0400
commite51bfdb68725dc052d16241ace40ea3140f938aa (patch)
tree3413e07454f91e271d62cc6a71af4fe9ffc6f577
parent672ff6cff80ca43bf3258410d2b887036969df5f (diff)
KVM: nVMX: Expose RDPMC-exiting only when guest supports PMU
Issue was discovered when running kvm-unit-tests on KVM running as L1 on top of Hyper-V. When vmx_instruction_intercept unit-test attempts to run RDPMC to test RDPMC-exiting, it is intercepted by L1 KVM which it's EXIT_REASON_RDPMC handler raise #GP because vCPU exposed by Hyper-V doesn't support PMU. Instead of unit-test expectation to be reflected with EXIT_REASON_RDPMC. The reason vmx_instruction_intercept unit-test attempts to run RDPMC even though Hyper-V doesn't support PMU is because L1 expose to L2 support for RDPMC-exiting. Which is reasonable to assume that is supported only in case CPU supports PMU to being with. Above issue can easily be simulated by modifying vmx_instruction_intercept config in x86/unittests.cfg to run QEMU with "-cpu host,+vmx,-pmu" and run unit-test. To handle issue, change KVM to expose RDPMC-exiting only when guest supports PMU. Reported-by: Saar Amar <saaramar@microsoft.com> Reviewed-by: Mihai Carabas <mihai.carabas@oracle.com> Reviewed-by: Jim Mattson <jmattson@google.com> Signed-off-by: Liran Alon <liran.alon@oracle.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx/vmx.c25
1 files changed, 25 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 88060a621db2..5866e9e9f1e0 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -6856,6 +6856,30 @@ static void nested_vmx_entry_exit_ctls_update(struct kvm_vcpu *vcpu)
6856 } 6856 }
6857} 6857}
6858 6858
6859static bool guest_cpuid_has_pmu(struct kvm_vcpu *vcpu)
6860{
6861 struct kvm_cpuid_entry2 *entry;
6862 union cpuid10_eax eax;
6863
6864 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
6865 if (!entry)
6866 return false;
6867
6868 eax.full = entry->eax;
6869 return (eax.split.version_id > 0);
6870}
6871
6872static void nested_vmx_procbased_ctls_update(struct kvm_vcpu *vcpu)
6873{
6874 struct vcpu_vmx *vmx = to_vmx(vcpu);
6875 bool pmu_enabled = guest_cpuid_has_pmu(vcpu);
6876
6877 if (pmu_enabled)
6878 vmx->nested.msrs.procbased_ctls_high |= CPU_BASED_RDPMC_EXITING;
6879 else
6880 vmx->nested.msrs.procbased_ctls_high &= ~CPU_BASED_RDPMC_EXITING;
6881}
6882
6859static void update_intel_pt_cfg(struct kvm_vcpu *vcpu) 6883static void update_intel_pt_cfg(struct kvm_vcpu *vcpu)
6860{ 6884{
6861 struct vcpu_vmx *vmx = to_vmx(vcpu); 6885 struct vcpu_vmx *vmx = to_vmx(vcpu);
@@ -6944,6 +6968,7 @@ static void vmx_cpuid_update(struct kvm_vcpu *vcpu)
6944 if (nested_vmx_allowed(vcpu)) { 6968 if (nested_vmx_allowed(vcpu)) {
6945 nested_vmx_cr_fixed1_bits_update(vcpu); 6969 nested_vmx_cr_fixed1_bits_update(vcpu);
6946 nested_vmx_entry_exit_ctls_update(vcpu); 6970 nested_vmx_entry_exit_ctls_update(vcpu);
6971 nested_vmx_procbased_ctls_update(vcpu);
6947 } 6972 }
6948 6973
6949 if (boot_cpu_has(X86_FEATURE_INTEL_PT) && 6974 if (boot_cpu_has(X86_FEATURE_INTEL_PT) &&