aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorFelix Wilhelm <fwilhelm@google.com>2018-06-11 03:43:44 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-06-12 09:06:06 -0400
commit727ba748e110b4de50d142edca9d6a9b7e6111d8 (patch)
treeb7573effd1110a11aac720cc38407bfdb45f651f
parentf4160e459c57646122beaea3f163b798179ea446 (diff)
kvm: nVMX: Enforce cpl=0 for VMX instructions
VMX instructions executed inside a L1 VM will always trigger a VM exit even when executed with cpl 3. This means we must perform the privilege check in software. Fixes: 70f3aac964ae("kvm: nVMX: Remove superfluous VMX instruction fault checks") Cc: stable@vger.kernel.org Signed-off-by: Felix Wilhelm <fwilhelm@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c15
1 files changed, 13 insertions, 2 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 709de996f063..4bf1f9de9332 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7905,6 +7905,12 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7905 return 1; 7905 return 1;
7906 } 7906 }
7907 7907
7908 /* CPL=0 must be checked manually. */
7909 if (vmx_get_cpl(vcpu)) {
7910 kvm_queue_exception(vcpu, UD_VECTOR);
7911 return 1;
7912 }
7913
7908 if (vmx->nested.vmxon) { 7914 if (vmx->nested.vmxon) {
7909 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 7915 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
7910 return kvm_skip_emulated_instruction(vcpu); 7916 return kvm_skip_emulated_instruction(vcpu);
@@ -7964,6 +7970,11 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
7964 */ 7970 */
7965static int nested_vmx_check_permission(struct kvm_vcpu *vcpu) 7971static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
7966{ 7972{
7973 if (vmx_get_cpl(vcpu)) {
7974 kvm_queue_exception(vcpu, UD_VECTOR);
7975 return 0;
7976 }
7977
7967 if (!to_vmx(vcpu)->nested.vmxon) { 7978 if (!to_vmx(vcpu)->nested.vmxon) {
7968 kvm_queue_exception(vcpu, UD_VECTOR); 7979 kvm_queue_exception(vcpu, UD_VECTOR);
7969 return 0; 7980 return 0;
@@ -8283,7 +8294,7 @@ static int handle_vmread(struct kvm_vcpu *vcpu)
8283 if (get_vmx_mem_address(vcpu, exit_qualification, 8294 if (get_vmx_mem_address(vcpu, exit_qualification,
8284 vmx_instruction_info, true, &gva)) 8295 vmx_instruction_info, true, &gva))
8285 return 1; 8296 return 1;
8286 /* _system ok, as hardware has verified cpl=0 */ 8297 /* _system ok, nested_vmx_check_permission has verified cpl=0 */
8287 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva, 8298 kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, gva,
8288 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL); 8299 &field_value, (is_long_mode(vcpu) ? 8 : 4), NULL);
8289 } 8300 }
@@ -8448,7 +8459,7 @@ static int handle_vmptrst(struct kvm_vcpu *vcpu)
8448 if (get_vmx_mem_address(vcpu, exit_qualification, 8459 if (get_vmx_mem_address(vcpu, exit_qualification,
8449 vmx_instruction_info, true, &vmcs_gva)) 8460 vmx_instruction_info, true, &vmcs_gva))
8450 return 1; 8461 return 1;
8451 /* ok to use *_system, as hardware has verified cpl=0 */ 8462 /* *_system ok, nested_vmx_check_permission has verified cpl=0 */
8452 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva, 8463 if (kvm_write_guest_virt_system(&vcpu->arch.emulate_ctxt, vmcs_gva,
8453 (void *)&to_vmx(vcpu)->nested.current_vmptr, 8464 (void *)&to_vmx(vcpu)->nested.current_vmptr,
8454 sizeof(u64), &e)) { 8465 sizeof(u64), &e)) {