aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2016-03-08 03:52:13 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2016-03-09 08:04:36 -0500
commita87036add09283e6c4f4103a15c596c67b86ab86 (patch)
tree42ca5c20c06ae8a8f988192e44f3d3263fbabdf3
parentab92f30875a7ec3e84644a5494febd8901e66742 (diff)
KVM: x86: disable MPX if host did not enable MPX XSAVE features
When eager FPU is disabled, KVM will still see the MPX bit in CPUID and presumably the MPX vmentry and vmexit controls. However, it will not be able to expose the MPX XSAVE features to the guest, because the guest's accessible XSAVE features are always a subset of host_xcr0. In this case, we should disable the MPX CPUID bit, the BNDCFGS MSR, and the MPX vmentry and vmexit controls for nested virtualization. It is then unnecessary to enable guest eager FPU if the guest has the MPX CPUID bit set. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/cpuid.c13
-rw-r--r--arch/x86/kvm/cpuid.h9
-rw-r--r--arch/x86/kvm/vmx.c13
3 files changed, 17 insertions, 18 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 6525e926f566..fa241d4fda98 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -46,11 +46,18 @@ static u32 xstate_required_size(u64 xstate_bv, bool compacted)
46 return ret; 46 return ret;
47} 47}
48 48
49bool kvm_mpx_supported(void)
50{
51 return ((host_xcr0 & (XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR))
52 && kvm_x86_ops->mpx_supported());
53}
54EXPORT_SYMBOL_GPL(kvm_mpx_supported);
55
49u64 kvm_supported_xcr0(void) 56u64 kvm_supported_xcr0(void)
50{ 57{
51 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; 58 u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0;
52 59
53 if (!kvm_x86_ops->mpx_supported()) 60 if (!kvm_mpx_supported())
54 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR); 61 xcr0 &= ~(XFEATURE_MASK_BNDREGS | XFEATURE_MASK_BNDCSR);
55 62
56 return xcr0; 63 return xcr0;
@@ -97,7 +104,7 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
97 if (best && (best->eax & (F(XSAVES) | F(XSAVEC)))) 104 if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
98 best->ebx = xstate_required_size(vcpu->arch.xcr0, true); 105 best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
99 106
100 vcpu->arch.eager_fpu = use_eager_fpu() || guest_cpuid_has_mpx(vcpu); 107 vcpu->arch.eager_fpu = use_eager_fpu();
101 if (vcpu->arch.eager_fpu) 108 if (vcpu->arch.eager_fpu)
102 kvm_x86_ops->fpu_activate(vcpu); 109 kvm_x86_ops->fpu_activate(vcpu);
103 110
@@ -295,7 +302,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function,
295#endif 302#endif
296 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; 303 unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0;
297 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; 304 unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0;
298 unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; 305 unsigned f_mpx = kvm_mpx_supported() ? F(MPX) : 0;
299 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0; 306 unsigned f_xsaves = kvm_x86_ops->xsaves_supported() ? F(XSAVES) : 0;
300 307
301 /* cpuid 1.edx */ 308 /* cpuid 1.edx */
diff --git a/arch/x86/kvm/cpuid.h b/arch/x86/kvm/cpuid.h
index c8eda1498121..66a6581724ad 100644
--- a/arch/x86/kvm/cpuid.h
+++ b/arch/x86/kvm/cpuid.h
@@ -5,6 +5,7 @@
5#include <asm/cpu.h> 5#include <asm/cpu.h>
6 6
7int kvm_update_cpuid(struct kvm_vcpu *vcpu); 7int kvm_update_cpuid(struct kvm_vcpu *vcpu);
8bool kvm_mpx_supported(void);
8struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu, 9struct kvm_cpuid_entry2 *kvm_find_cpuid_entry(struct kvm_vcpu *vcpu,
9 u32 function, u32 index); 10 u32 function, u32 index);
10int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid, 11int kvm_dev_ioctl_get_cpuid(struct kvm_cpuid2 *cpuid,
@@ -135,14 +136,6 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
135 return best && (best->ebx & bit(X86_FEATURE_RTM)); 136 return best && (best->ebx & bit(X86_FEATURE_RTM));
136} 137}
137 138
138static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
139{
140 struct kvm_cpuid_entry2 *best;
141
142 best = kvm_find_cpuid_entry(vcpu, 7, 0);
143 return best && (best->ebx & bit(X86_FEATURE_MPX));
144}
145
146static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu) 139static inline bool guest_cpuid_has_pcommit(struct kvm_vcpu *vcpu)
147{ 140{
148 struct kvm_cpuid_entry2 *best; 141 struct kvm_cpuid_entry2 *best;
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 46154dac71e6..e512aa7ed874 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -861,7 +861,6 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu);
861static u64 construct_eptp(unsigned long root_hpa); 861static u64 construct_eptp(unsigned long root_hpa);
862static void kvm_cpu_vmxon(u64 addr); 862static void kvm_cpu_vmxon(u64 addr);
863static void kvm_cpu_vmxoff(void); 863static void kvm_cpu_vmxoff(void);
864static bool vmx_mpx_supported(void);
865static bool vmx_xsaves_supported(void); 864static bool vmx_xsaves_supported(void);
866static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 865static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
867static void vmx_set_segment(struct kvm_vcpu *vcpu, 866static void vmx_set_segment(struct kvm_vcpu *vcpu,
@@ -2595,7 +2594,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2595 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER | 2594 VM_EXIT_LOAD_IA32_EFER | VM_EXIT_SAVE_IA32_EFER |
2596 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT; 2595 VM_EXIT_SAVE_VMX_PREEMPTION_TIMER | VM_EXIT_ACK_INTR_ON_EXIT;
2597 2596
2598 if (vmx_mpx_supported()) 2597 if (kvm_mpx_supported())
2599 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS; 2598 vmx->nested.nested_vmx_exit_ctls_high |= VM_EXIT_CLEAR_BNDCFGS;
2600 2599
2601 /* We support free control of debug control saving. */ 2600 /* We support free control of debug control saving. */
@@ -2616,7 +2615,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2616 VM_ENTRY_LOAD_IA32_PAT; 2615 VM_ENTRY_LOAD_IA32_PAT;
2617 vmx->nested.nested_vmx_entry_ctls_high |= 2616 vmx->nested.nested_vmx_entry_ctls_high |=
2618 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER); 2617 (VM_ENTRY_ALWAYSON_WITHOUT_TRUE_MSR | VM_ENTRY_LOAD_IA32_EFER);
2619 if (vmx_mpx_supported()) 2618 if (kvm_mpx_supported())
2620 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS; 2619 vmx->nested.nested_vmx_entry_ctls_high |= VM_ENTRY_LOAD_BNDCFGS;
2621 2620
2622 /* We support free control of debug control loading. */ 2621 /* We support free control of debug control loading. */
@@ -2860,7 +2859,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2860 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP); 2859 msr_info->data = vmcs_readl(GUEST_SYSENTER_ESP);
2861 break; 2860 break;
2862 case MSR_IA32_BNDCFGS: 2861 case MSR_IA32_BNDCFGS:
2863 if (!vmx_mpx_supported()) 2862 if (!kvm_mpx_supported())
2864 return 1; 2863 return 1;
2865 msr_info->data = vmcs_read64(GUEST_BNDCFGS); 2864 msr_info->data = vmcs_read64(GUEST_BNDCFGS);
2866 break; 2865 break;
@@ -2937,7 +2936,7 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
2937 vmcs_writel(GUEST_SYSENTER_ESP, data); 2936 vmcs_writel(GUEST_SYSENTER_ESP, data);
2938 break; 2937 break;
2939 case MSR_IA32_BNDCFGS: 2938 case MSR_IA32_BNDCFGS:
2940 if (!vmx_mpx_supported()) 2939 if (!kvm_mpx_supported())
2941 return 1; 2940 return 1;
2942 vmcs_write64(GUEST_BNDCFGS, data); 2941 vmcs_write64(GUEST_BNDCFGS, data);
2943 break; 2942 break;
@@ -3410,7 +3409,7 @@ static void init_vmcs_shadow_fields(void)
3410 for (i = j = 0; i < max_shadow_read_write_fields; i++) { 3409 for (i = j = 0; i < max_shadow_read_write_fields; i++) {
3411 switch (shadow_read_write_fields[i]) { 3410 switch (shadow_read_write_fields[i]) {
3412 case GUEST_BNDCFGS: 3411 case GUEST_BNDCFGS:
3413 if (!vmx_mpx_supported()) 3412 if (!kvm_mpx_supported())
3414 continue; 3413 continue;
3415 break; 3414 break;
3416 default: 3415 default:
@@ -10265,7 +10264,7 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
10265 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS); 10264 vmcs12->guest_sysenter_cs = vmcs_read32(GUEST_SYSENTER_CS);
10266 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP); 10265 vmcs12->guest_sysenter_esp = vmcs_readl(GUEST_SYSENTER_ESP);
10267 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP); 10266 vmcs12->guest_sysenter_eip = vmcs_readl(GUEST_SYSENTER_EIP);
10268 if (vmx_mpx_supported()) 10267 if (kvm_mpx_supported())
10269 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS); 10268 vmcs12->guest_bndcfgs = vmcs_read64(GUEST_BNDCFGS);
10270 if (nested_cpu_has_xsaves(vmcs12)) 10269 if (nested_cpu_has_xsaves(vmcs12))
10271 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP); 10270 vmcs12->xss_exit_bitmap = vmcs_read64(XSS_EXIT_BITMAP);