aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorBandan Das <bsd@redhat.com>2014-05-06 02:19:16 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-05-06 13:00:27 -0400
commit3573e22cfecaac83f82ef4f6847d90e466fc8e10 (patch)
treead68136dbdd69476b8bfcced376aa392a4b89b76 /arch/x86/kvm
parent19677e32fe7d6913e07ce80f6f3dc7663ac7fe67 (diff)
KVM: nVMX: additional checks on vmxon region
Currently, the vmxon region isn't used in the nested case. However, according to the spec, the vmxon instruction performs additional sanity checks on this region and the associated pointer. Modify emulated vmxon to better adhere to the spec requirements Signed-off-by: Bandan Das <bsd@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/cpuid.c1
-rw-r--r--arch/x86/kvm/vmx.c67
2 files changed, 68 insertions, 0 deletions
diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
index 333b88db22fe..17b42fabc842 100644
--- a/arch/x86/kvm/cpuid.c
+++ b/arch/x86/kvm/cpuid.c
@@ -732,6 +732,7 @@ int cpuid_maxphyaddr(struct kvm_vcpu *vcpu)
732not_found: 732not_found:
733 return 36; 733 return 36;
734} 734}
735EXPORT_SYMBOL_GPL(cpuid_maxphyaddr);
735 736
736/* 737/*
737 * If no match is found, check whether we exceed the vCPU's limit 738 * If no match is found, check whether we exceed the vCPU's limit
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 917a15efc45b..0f7934767a2f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -354,6 +354,7 @@ struct vmcs02_list {
354struct nested_vmx { 354struct nested_vmx {
355 /* Has the level1 guest done vmxon? */ 355 /* Has the level1 guest done vmxon? */
356 bool vmxon; 356 bool vmxon;
357 gpa_t vmxon_ptr;
357 358
358 /* The guest-physical address of the current VMCS L1 keeps for L2 */ 359 /* The guest-physical address of the current VMCS L1 keeps for L2 */
359 gpa_t current_vmptr; 360 gpa_t current_vmptr;
@@ -5845,6 +5846,68 @@ static int get_vmx_mem_address(struct kvm_vcpu *vcpu,
5845} 5846}
5846 5847
5847/* 5848/*
5849 * This function performs the various checks including
5850 * - if it's 4KB aligned
5851 * - No bits beyond the physical address width are set
5852 * - Returns 0 on success or else 1
5853 */
5854static int nested_vmx_check_vmptr(struct kvm_vcpu *vcpu, int exit_reason)
5855{
5856 gva_t gva;
5857 gpa_t vmptr;
5858 struct x86_exception e;
5859 struct page *page;
5860 struct vcpu_vmx *vmx = to_vmx(vcpu);
5861 int maxphyaddr = cpuid_maxphyaddr(vcpu);
5862
5863 if (get_vmx_mem_address(vcpu, vmcs_readl(EXIT_QUALIFICATION),
5864 vmcs_read32(VMX_INSTRUCTION_INFO), &gva))
5865 return 1;
5866
5867 if (kvm_read_guest_virt(&vcpu->arch.emulate_ctxt, gva, &vmptr,
5868 sizeof(vmptr), &e)) {
5869 kvm_inject_page_fault(vcpu, &e);
5870 return 1;
5871 }
5872
5873 switch (exit_reason) {
5874 case EXIT_REASON_VMON:
5875 /*
5876 * SDM 3: 24.11.5
5877 * The first 4 bytes of VMXON region contain the supported
5878 * VMCS revision identifier
5879 *
5880 * Note - IA32_VMX_BASIC[48] will never be 1
5881 * for the nested case;
5882 * which replaces physical address width with 32
5883 *
5884 */
5885 if (!IS_ALIGNED(vmptr, PAGE_SIZE) || (vmptr >> maxphyaddr)) {
5886 nested_vmx_failInvalid(vcpu);
5887 skip_emulated_instruction(vcpu);
5888 return 1;
5889 }
5890
5891 page = nested_get_page(vcpu, vmptr);
5892 if (page == NULL ||
5893 *(u32 *)kmap(page) != VMCS12_REVISION) {
5894 nested_vmx_failInvalid(vcpu);
5895 kunmap(page);
5896 skip_emulated_instruction(vcpu);
5897 return 1;
5898 }
5899 kunmap(page);
5900 vmx->nested.vmxon_ptr = vmptr;
5901 break;
5902
5903 default:
5904 return 1; /* shouldn't happen */
5905 }
5906
5907 return 0;
5908}
5909
5910/*
5848 * Emulate the VMXON instruction. 5911 * Emulate the VMXON instruction.
5849 * Currently, we just remember that VMX is active, and do not save or even 5912 * Currently, we just remember that VMX is active, and do not save or even
5850 * inspect the argument to VMXON (the so-called "VMXON pointer") because we 5913 * inspect the argument to VMXON (the so-called "VMXON pointer") because we
@@ -5882,6 +5945,10 @@ static int handle_vmon(struct kvm_vcpu *vcpu)
5882 kvm_inject_gp(vcpu, 0); 5945 kvm_inject_gp(vcpu, 0);
5883 return 1; 5946 return 1;
5884 } 5947 }
5948
5949 if (nested_vmx_check_vmptr(vcpu, EXIT_REASON_VMON))
5950 return 1;
5951
5885 if (vmx->nested.vmxon) { 5952 if (vmx->nested.vmxon) {
5886 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION); 5953 nested_vmx_failValid(vcpu, VMXERR_VMXON_IN_VMX_ROOT_OPERATION);
5887 skip_emulated_instruction(vcpu); 5954 skip_emulated_instruction(vcpu);