aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2016-11-29 21:14:08 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2016-12-08 09:31:07 -0500
commit3899152ccbf42d7e3d3c7830b1fae75a575a1ed6 (patch)
tree7a9df8cb460e4ab2638a378028b84958a4fca73c
parent62cc6b9dc61eeedd96c862daa7adc08ff1b2b23b (diff)
KVM: nVMX: fix checks on CR{0,4} during virtual VMX operation
KVM emulates MSR_IA32_VMX_CR{0,4}_FIXED1 with the value -1ULL, meaning all CR0 and CR4 bits are allowed to be 1 during VMX operation. This does not match real hardware, which disallows the high 32 bits of CR0 to be 1, and disallows reserved bits of CR4 to be 1 (including bits which are defined in the SDM but missing according to CPUID). A guest can induce a VM-entry failure by setting these bits in GUEST_CR0 and GUEST_CR4, despite MSR_IA32_VMX_CR{0,4}_FIXED1 indicating they are valid. Since KVM has allowed all bits to be 1 in CR0 and CR4, the existing checks on these registers do not verify must-be-0 bits. Fix these checks to identify must-be-0 bits according to MSR_IA32_VMX_CR{0,4}_FIXED1. This patch should introduce no change in behavior in KVM, since these MSRs are still -1ULL. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Radim Krčmář <rkrcmar@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c77
1 files changed, 53 insertions, 24 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index e0397c60e986..2b8d4f5ac246 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2892,12 +2892,18 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2892 vmx->nested.nested_vmx_vmcs_enum = 0x2e; 2892 vmx->nested.nested_vmx_vmcs_enum = 0x2e;
2893} 2893}
2894 2894
2895/*
2896 * if fixed0[i] == 1: val[i] must be 1
2897 * if fixed1[i] == 0: val[i] must be 0
2898 */
2899static inline bool fixed_bits_valid(u64 val, u64 fixed0, u64 fixed1)
2900{
2901 return ((val & fixed1) | fixed0) == val;
2902}
2903
2895static inline bool vmx_control_verify(u32 control, u32 low, u32 high) 2904static inline bool vmx_control_verify(u32 control, u32 low, u32 high)
2896{ 2905{
2897 /* 2906 return fixed_bits_valid(control, low, high);
2898 * Bits 0 in high must be 0, and bits 1 in low must be 1.
2899 */
2900 return ((control & high) | low) == control;
2901} 2907}
2902 2908
2903static inline u64 vmx_control_msr(u32 low, u32 high) 2909static inline u64 vmx_control_msr(u32 low, u32 high)
@@ -4132,6 +4138,40 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu)
4132 (unsigned long *)&vcpu->arch.regs_dirty); 4138 (unsigned long *)&vcpu->arch.regs_dirty);
4133} 4139}
4134 4140
4141static bool nested_guest_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
4142{
4143 u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
4144 u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
4145 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4146
4147 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
4148 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
4149 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
4150 fixed0 &= ~(X86_CR0_PE | X86_CR0_PG);
4151
4152 return fixed_bits_valid(val, fixed0, fixed1);
4153}
4154
4155static bool nested_host_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
4156{
4157 u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed0;
4158 u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr0_fixed1;
4159
4160 return fixed_bits_valid(val, fixed0, fixed1);
4161}
4162
4163static bool nested_cr4_valid(struct kvm_vcpu *vcpu, unsigned long val)
4164{
4165 u64 fixed0 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed0;
4166 u64 fixed1 = to_vmx(vcpu)->nested.nested_vmx_cr4_fixed1;
4167
4168 return fixed_bits_valid(val, fixed0, fixed1);
4169}
4170
4171/* No difference in the restrictions on guest and host CR4 in VMX operation. */
4172#define nested_guest_cr4_valid nested_cr4_valid
4173#define nested_host_cr4_valid nested_cr4_valid
4174
4135static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); 4175static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4);
4136 4176
4137static void ept_update_paging_mode_cr0(unsigned long *hw_cr0, 4177static void ept_update_paging_mode_cr0(unsigned long *hw_cr0,
@@ -4260,8 +4300,8 @@ static int vmx_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
4260 if (!nested_vmx_allowed(vcpu)) 4300 if (!nested_vmx_allowed(vcpu))
4261 return 1; 4301 return 1;
4262 } 4302 }
4263 if (to_vmx(vcpu)->nested.vmxon && 4303
4264 ((cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) 4304 if (to_vmx(vcpu)->nested.vmxon && !nested_cr4_valid(vcpu, cr4))
4265 return 1; 4305 return 1;
4266 4306
4267 vcpu->arch.cr4 = cr4; 4307 vcpu->arch.cr4 = cr4;
@@ -5826,18 +5866,6 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5826 hypercall[2] = 0xc1; 5866 hypercall[2] = 0xc1;
5827} 5867}
5828 5868
5829static bool nested_cr0_valid(struct kvm_vcpu *vcpu, unsigned long val)
5830{
5831 unsigned long always_on = VMXON_CR0_ALWAYSON;
5832 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
5833
5834 if (to_vmx(vcpu)->nested.nested_vmx_secondary_ctls_high &
5835 SECONDARY_EXEC_UNRESTRICTED_GUEST &&
5836 nested_cpu_has2(vmcs12, SECONDARY_EXEC_UNRESTRICTED_GUEST))
5837 always_on &= ~(X86_CR0_PE | X86_CR0_PG);
5838 return (val & always_on) == always_on;
5839}
5840
5841/* called to set cr0 as appropriate for a mov-to-cr0 exit. */ 5869/* called to set cr0 as appropriate for a mov-to-cr0 exit. */
5842static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val) 5870static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5843{ 5871{
@@ -5856,7 +5884,7 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5856 val = (val & ~vmcs12->cr0_guest_host_mask) | 5884 val = (val & ~vmcs12->cr0_guest_host_mask) |
5857 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask); 5885 (vmcs12->guest_cr0 & vmcs12->cr0_guest_host_mask);
5858 5886
5859 if (!nested_cr0_valid(vcpu, val)) 5887 if (!nested_guest_cr0_valid(vcpu, val))
5860 return 1; 5888 return 1;
5861 5889
5862 if (kvm_set_cr0(vcpu, val)) 5890 if (kvm_set_cr0(vcpu, val))
@@ -5865,8 +5893,9 @@ static int handle_set_cr0(struct kvm_vcpu *vcpu, unsigned long val)
5865 return 0; 5893 return 0;
5866 } else { 5894 } else {
5867 if (to_vmx(vcpu)->nested.vmxon && 5895 if (to_vmx(vcpu)->nested.vmxon &&
5868 ((val & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON)) 5896 !nested_host_cr0_valid(vcpu, val))
5869 return 1; 5897 return 1;
5898
5870 return kvm_set_cr0(vcpu, val); 5899 return kvm_set_cr0(vcpu, val);
5871 } 5900 }
5872} 5901}
@@ -10325,15 +10354,15 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
10325 goto out; 10354 goto out;
10326 } 10355 }
10327 10356
10328 if (((vmcs12->host_cr0 & VMXON_CR0_ALWAYSON) != VMXON_CR0_ALWAYSON) || 10357 if (!nested_host_cr0_valid(vcpu, vmcs12->host_cr0) ||
10329 ((vmcs12->host_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { 10358 !nested_host_cr4_valid(vcpu, vmcs12->host_cr4)) {
10330 nested_vmx_failValid(vcpu, 10359 nested_vmx_failValid(vcpu,
10331 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD); 10360 VMXERR_ENTRY_INVALID_HOST_STATE_FIELD);
10332 goto out; 10361 goto out;
10333 } 10362 }
10334 10363
10335 if (!nested_cr0_valid(vcpu, vmcs12->guest_cr0) || 10364 if (!nested_guest_cr0_valid(vcpu, vmcs12->guest_cr0) ||
10336 ((vmcs12->guest_cr4 & VMXON_CR4_ALWAYSON) != VMXON_CR4_ALWAYSON)) { 10365 !nested_guest_cr4_valid(vcpu, vmcs12->guest_cr4)) {
10337 nested_vmx_entry_failure(vcpu, vmcs12, 10366 nested_vmx_entry_failure(vcpu, vmcs12,
10338 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT); 10367 EXIT_REASON_INVALID_STATE, ENTRY_FAIL_DEFAULT);
10339 goto out; 10368 goto out;