aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSean Christopherson <sean.j.christopherson@intel.com>2018-08-14 12:33:34 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2018-08-22 10:48:35 -0400
commit0b665d30402819830caa21aa23942f70f36a7e72 (patch)
tree14324efc3c1487e98c2104f78d433363f7c9b9fa
parent802ec461670773b433b678b27145050262df5994 (diff)
KVM: vmx: Inject #UD for SGX ENCLS instruction in guest
Virtualization of Intel SGX depends on Enclave Page Cache (EPC) management that is not yet available in the kernel, i.e. KVM support for exposing SGX to a guest cannot be added until basic support for SGX is upstreamed, which is a WIP[1]. Until SGX is properly supported in KVM, ensure a guest sees expected behavior for ENCLS, i.e. all ENCLS #UD. Because SGX does not have a true software enable bit, e.g. there is no CR4.SGXE bit, the ENCLS instruction can be executed[1] by the guest if SGX is supported by the system. Intercept all ENCLS leafs (via the ENCLS- exiting control and field) and unconditionally inject #UD. [1] https://www.spinics.net/lists/kvm/msg171333.html or https://lkml.org/lkml/2018/7/3/879 [2] A guest can execute ENCLS in the sense that ENCLS will not take an immediate #UD, but no ENCLS will ever succeed in a guest without explicit support from KVM (map EPC memory into the guest), unless KVM has a *very* egregious bug, e.g. accidentally mapped EPC memory into the guest SPTEs. In other words this patch is needed only to prevent the guest from seeing inconsistent behavior, e.g. #GP (SGX not enabled in Feature Control MSR) or #PF (leaf operand(s) does not point at EPC memory) instead of #UD on ENCLS. Intercepting ENCLS is not required to prevent the guest from truly utilizing SGX. Signed-off-by: Sean Christopherson <sean.j.christopherson@intel.com> Message-Id: <20180814163334.25724-3-sean.j.christopherson@intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c30
1 files changed, 29 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 31e90e83fdd6..c76ca8c4befa 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1684,6 +1684,12 @@ static inline bool cpu_has_vmx_virtual_intr_delivery(void)
1684 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; 1684 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
1685} 1685}
1686 1686
1687static inline bool cpu_has_vmx_encls_vmexit(void)
1688{
1689 return vmcs_config.cpu_based_2nd_exec_ctrl &
1690 SECONDARY_EXEC_ENCLS_EXITING;
1691}
1692
1687/* 1693/*
1688 * Comment's format: document - errata name - stepping - processor name. 1694 * Comment's format: document - errata name - stepping - processor name.
1689 * Refer from 1695 * Refer from
@@ -4551,7 +4557,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
4551 SECONDARY_EXEC_RDRAND_EXITING | 4557 SECONDARY_EXEC_RDRAND_EXITING |
4552 SECONDARY_EXEC_ENABLE_PML | 4558 SECONDARY_EXEC_ENABLE_PML |
4553 SECONDARY_EXEC_TSC_SCALING | 4559 SECONDARY_EXEC_TSC_SCALING |
4554 SECONDARY_EXEC_ENABLE_VMFUNC; 4560 SECONDARY_EXEC_ENABLE_VMFUNC |
4561 SECONDARY_EXEC_ENCLS_EXITING;
4555 if (adjust_vmx_controls(min2, opt2, 4562 if (adjust_vmx_controls(min2, opt2,
4556 MSR_IA32_VMX_PROCBASED_CTLS2, 4563 MSR_IA32_VMX_PROCBASED_CTLS2,
4557 &_cpu_based_2nd_exec_control) < 0) 4564 &_cpu_based_2nd_exec_control) < 0)
@@ -6648,6 +6655,9 @@ static void vmx_vcpu_setup(struct vcpu_vmx *vmx)
6648 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg)); 6655 vmcs_write64(PML_ADDRESS, page_to_phys(vmx->pml_pg));
6649 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1); 6656 vmcs_write16(GUEST_PML_INDEX, PML_ENTITY_NUM - 1);
6650 } 6657 }
6658
6659 if (cpu_has_vmx_encls_vmexit())
6660 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
6651} 6661}
6652 6662
6653static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) 6663static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
@@ -9314,6 +9324,17 @@ fail:
9314 return 1; 9324 return 1;
9315} 9325}
9316 9326
9327static int handle_encls(struct kvm_vcpu *vcpu)
9328{
9329 /*
9330 * SGX virtualization is not yet supported. There is no software
9331 * enable bit for SGX, so we have to trap ENCLS and inject a #UD
9332 * to prevent the guest from executing ENCLS.
9333 */
9334 kvm_queue_exception(vcpu, UD_VECTOR);
9335 return 1;
9336}
9337
9317/* 9338/*
9318 * The exit handlers return 1 if the exit was handled fully and guest execution 9339 * The exit handlers return 1 if the exit was handled fully and guest execution
9319 * may resume. Otherwise they set the kvm_run parameter to indicate what needs 9340 * may resume. Otherwise they set the kvm_run parameter to indicate what needs
@@ -9371,6 +9392,7 @@ static int (*const kvm_vmx_exit_handlers[])(struct kvm_vcpu *vcpu) = {
9371 [EXIT_REASON_INVPCID] = handle_invpcid, 9392 [EXIT_REASON_INVPCID] = handle_invpcid,
9372 [EXIT_REASON_VMFUNC] = handle_vmfunc, 9393 [EXIT_REASON_VMFUNC] = handle_vmfunc,
9373 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer, 9394 [EXIT_REASON_PREEMPTION_TIMER] = handle_preemption_timer,
9395 [EXIT_REASON_ENCLS] = handle_encls,
9374}; 9396};
9375 9397
9376static const int kvm_vmx_max_exit_handlers = 9398static const int kvm_vmx_max_exit_handlers =
@@ -9741,6 +9763,9 @@ static bool nested_vmx_exit_reflected(struct kvm_vcpu *vcpu, u32 exit_reason)
9741 case EXIT_REASON_VMFUNC: 9763 case EXIT_REASON_VMFUNC:
9742 /* VM functions are emulated through L2->L0 vmexits. */ 9764 /* VM functions are emulated through L2->L0 vmexits. */
9743 return false; 9765 return false;
9766 case EXIT_REASON_ENCLS:
9767 /* SGX is never exposed to L1 */
9768 return false;
9744 default: 9769 default:
9745 return true; 9770 return true;
9746 } 9771 }
@@ -12101,6 +12126,9 @@ static int prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
12101 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) 12126 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)
12102 vmcs_write64(APIC_ACCESS_ADDR, -1ull); 12127 vmcs_write64(APIC_ACCESS_ADDR, -1ull);
12103 12128
12129 if (exec_control & SECONDARY_EXEC_ENCLS_EXITING)
12130 vmcs_write64(ENCLS_EXITING_BITMAP, -1ull);
12131
12104 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control); 12132 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, exec_control);
12105 } 12133 }
12106 12134