diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-05 17:19:52 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2014-03-17 07:21:39 -0400 |
commit | 93c4adc7afedf9b0ec190066d45b6d67db5270da (patch) | |
tree | 8b941c7e959fab7a677df0d6d7c4052ec468342d | |
parent | 36be0b9deb23161e9eba962c215aece551113a15 (diff) |
KVM: x86: handle missing MPX in nested virtualization
When doing nested virtualization, we may be able to read BNDCFGS but
still not be allowed to write to GUEST_BNDCFGS in the VMCS. Guard
writes to the field with vmx_mpx_supported(), and similarly hide the
MSR from userspace if the processor does not support the field.
We could work around this with the generic MSR save/load machinery,
but there is only a limited number of MSR save/load slots and it is
not really worthwhile to waste one for a scenario that should not
happen except in the nested virtualization case.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/cpuid.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 17 |
4 files changed, 30 insertions, 3 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c index 18aefb4d0927..64fae65730f3 100644 --- a/arch/x86/kvm/cpuid.c +++ b/arch/x86/kvm/cpuid.c | |||
@@ -47,7 +47,7 @@ u64 kvm_supported_xcr0(void) | |||
47 | { | 47 | { |
48 | u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; | 48 | u64 xcr0 = KVM_SUPPORTED_XCR0 & host_xcr0; |
49 | 49 | ||
50 | if (!kvm_x86_ops->mpx_supported || !kvm_x86_ops->mpx_supported()) | 50 | if (!kvm_x86_ops->mpx_supported()) |
51 | xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR); | 51 | xcr0 &= ~(XSTATE_BNDREGS | XSTATE_BNDCSR); |
52 | 52 | ||
53 | return xcr0; | 53 | return xcr0; |
@@ -259,8 +259,7 @@ static inline int __do_cpuid_ent(struct kvm_cpuid_entry2 *entry, u32 function, | |||
259 | #endif | 259 | #endif |
260 | unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; | 260 | unsigned f_rdtscp = kvm_x86_ops->rdtscp_supported() ? F(RDTSCP) : 0; |
261 | unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; | 261 | unsigned f_invpcid = kvm_x86_ops->invpcid_supported() ? F(INVPCID) : 0; |
262 | unsigned f_mpx = kvm_x86_ops->mpx_supported ? | 262 | unsigned f_mpx = kvm_x86_ops->mpx_supported() ? F(MPX) : 0; |
263 | (kvm_x86_ops->mpx_supported() ? F(MPX) : 0) : 0; | ||
264 | 263 | ||
265 | /* cpuid 1.edx */ | 264 | /* cpuid 1.edx */ |
266 | const u32 kvm_supported_word0_x86_features = | 265 | const u32 kvm_supported_word0_x86_features = |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index a449c3d76cba..2136cb6ab132 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -4089,6 +4089,11 @@ static bool svm_invpcid_supported(void) | |||
4089 | return false; | 4089 | return false; |
4090 | } | 4090 | } |
4091 | 4091 | ||
4092 | static bool svm_mpx_supported(void) | ||
4093 | { | ||
4094 | return false; | ||
4095 | } | ||
4096 | |||
4092 | static bool svm_has_wbinvd_exit(void) | 4097 | static bool svm_has_wbinvd_exit(void) |
4093 | { | 4098 | { |
4094 | return true; | 4099 | return true; |
@@ -4371,6 +4376,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
4371 | 4376 | ||
4372 | .rdtscp_supported = svm_rdtscp_supported, | 4377 | .rdtscp_supported = svm_rdtscp_supported, |
4373 | .invpcid_supported = svm_invpcid_supported, | 4378 | .invpcid_supported = svm_invpcid_supported, |
4379 | .mpx_supported = svm_mpx_supported, | ||
4374 | 4380 | ||
4375 | .set_supported_cpuid = svm_set_supported_cpuid, | 4381 | .set_supported_cpuid = svm_set_supported_cpuid, |
4376 | 4382 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c95bea17fc1e..1320e0f8e611 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -729,6 +729,7 @@ static unsigned long nested_ept_get_cr3(struct kvm_vcpu *vcpu); | |||
729 | static u64 construct_eptp(unsigned long root_hpa); | 729 | static u64 construct_eptp(unsigned long root_hpa); |
730 | static void kvm_cpu_vmxon(u64 addr); | 730 | static void kvm_cpu_vmxon(u64 addr); |
731 | static void kvm_cpu_vmxoff(void); | 731 | static void kvm_cpu_vmxoff(void); |
732 | static bool vmx_mpx_supported(void); | ||
732 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); | 733 | static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); |
733 | static void vmx_set_segment(struct kvm_vcpu *vcpu, | 734 | static void vmx_set_segment(struct kvm_vcpu *vcpu, |
734 | struct kvm_segment *var, int seg); | 735 | struct kvm_segment *var, int seg); |
@@ -2501,6 +2502,8 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
2501 | data = vmcs_readl(GUEST_SYSENTER_ESP); | 2502 | data = vmcs_readl(GUEST_SYSENTER_ESP); |
2502 | break; | 2503 | break; |
2503 | case MSR_IA32_BNDCFGS: | 2504 | case MSR_IA32_BNDCFGS: |
2505 | if (!vmx_mpx_supported()) | ||
2506 | return 1; | ||
2504 | data = vmcs_read64(GUEST_BNDCFGS); | 2507 | data = vmcs_read64(GUEST_BNDCFGS); |
2505 | break; | 2508 | break; |
2506 | case MSR_IA32_FEATURE_CONTROL: | 2509 | case MSR_IA32_FEATURE_CONTROL: |
@@ -2572,6 +2575,8 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) | |||
2572 | vmcs_writel(GUEST_SYSENTER_ESP, data); | 2575 | vmcs_writel(GUEST_SYSENTER_ESP, data); |
2573 | break; | 2576 | break; |
2574 | case MSR_IA32_BNDCFGS: | 2577 | case MSR_IA32_BNDCFGS: |
2578 | if (!vmx_mpx_supported()) | ||
2579 | return 1; | ||
2575 | vmcs_write64(GUEST_BNDCFGS, data); | 2580 | vmcs_write64(GUEST_BNDCFGS, data); |
2576 | break; | 2581 | break; |
2577 | case MSR_IA32_TSC: | 2582 | case MSR_IA32_TSC: |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3f5fb4535f9c..aa986959f237 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3937,6 +3937,23 @@ static void kvm_init_msr_list(void) | |||
3937 | for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { | 3937 | for (i = j = KVM_SAVE_MSRS_BEGIN; i < ARRAY_SIZE(msrs_to_save); i++) { |
3938 | if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) | 3938 | if (rdmsr_safe(msrs_to_save[i], &dummy[0], &dummy[1]) < 0) |
3939 | continue; | 3939 | continue; |
3940 | |||
3941 | /* | ||
3942 | * Even MSRs that are valid in the host may not be exposed | ||
3943 | * to the guests in some cases. We could work around this | ||
3944 | * in VMX with the generic MSR save/load machinery, but it | ||
3945 | * is not really worthwhile since it will really only | ||
3946 | * happen with nested virtualization. | ||
3947 | */ | ||
3948 | switch (msrs_to_save[i]) { | ||
3949 | case MSR_IA32_BNDCFGS: | ||
3950 | if (!kvm_x86_ops->mpx_supported()) | ||
3951 | continue; | ||
3952 | break; | ||
3953 | default: | ||
3954 | break; | ||
3955 | } | ||
3956 | |||
3940 | if (j < i) | 3957 | if (j < i) |
3941 | msrs_to_save[j] = msrs_to_save[i]; | 3958 | msrs_to_save[j] = msrs_to_save[i]; |
3942 | j++; | 3959 | j++; |