aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWanpeng Li <wanpeng.li@linux.intel.com>2014-08-21 07:46:49 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-08-29 08:02:48 -0400
commita2bcba5035bb3d7fb3099e1893026316365f4b5d (patch)
tree8de8c1f7957e730a0835b60bd5bb7de4bb09cf0f
parent44b5ce73c99c389817be71b9161bceb197d40ecb (diff)
KVM: nVMX: introduce nested_get_vmcs12_pages
Introduce function nested_get_vmcs12_pages() to check the valid of nested apic access page and virtual apic page earlier. Signed-off-by: Wanpeng Li <wanpeng.li@linux.intel.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c37
1 files changed, 25 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 661abc2f7049..70516e11f051 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7927,6 +7927,30 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
7927 kvm_inject_page_fault(vcpu, fault); 7927 kvm_inject_page_fault(vcpu, fault);
7928} 7928}
7929 7929
7930static bool nested_get_vmcs12_pages(struct kvm_vcpu *vcpu,
7931 struct vmcs12 *vmcs12)
7932{
7933 struct vcpu_vmx *vmx = to_vmx(vcpu);
7934
7935 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES)) {
7936 if (!PAGE_ALIGNED(vmcs12->apic_access_addr))
7937 /*TODO: Also verify bits beyond physical address width are 0*/
7938 return false;
7939
7940 /*
7941 * Translate L1 physical address to host physical
7942 * address for vmcs02. Keep the page pinned, so this
7943 * physical address remains valid. We keep a reference
7944 * to it so we can release it later.
7945 */
7946 if (vmx->nested.apic_access_page) /* shouldn't happen */
7947 nested_release_page(vmx->nested.apic_access_page);
7948 vmx->nested.apic_access_page =
7949 nested_get_page(vcpu, vmcs12->apic_access_addr);
7950 }
7951 return true;
7952}
7953
7930static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu) 7954static void vmx_start_preemption_timer(struct kvm_vcpu *vcpu)
7931{ 7955{
7932 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value; 7956 u64 preemption_timeout = get_vmcs12(vcpu)->vmx_preemption_timer_value;
@@ -8073,16 +8097,6 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8073 8097
8074 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) { 8098 if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
8075 /* 8099 /*
8076 * Translate L1 physical address to host physical
8077 * address for vmcs02. Keep the page pinned, so this
8078 * physical address remains valid. We keep a reference
8079 * to it so we can release it later.
8080 */
8081 if (vmx->nested.apic_access_page) /* shouldn't happen */
8082 nested_release_page(vmx->nested.apic_access_page);
8083 vmx->nested.apic_access_page =
8084 nested_get_page(vcpu, vmcs12->apic_access_addr);
8085 /*
8086 * If translation failed, no matter: This feature asks 8100 * If translation failed, no matter: This feature asks
8087 * to exit when accessing the given address, and if it 8101 * to exit when accessing the given address, and if it
8088 * can never be accessed, this feature won't do 8102 * can never be accessed, this feature won't do
@@ -8288,8 +8302,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
8288 return 1; 8302 return 1;
8289 } 8303 }
8290 8304
8291 if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) && 8305 if (!nested_get_vmcs12_pages(vcpu, vmcs12)) {
8292 !PAGE_ALIGNED(vmcs12->apic_access_addr)) {
8293 /*TODO: Also verify bits beyond physical address width are 0*/ 8306 /*TODO: Also verify bits beyond physical address width are 0*/
8294 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD); 8307 nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
8295 return 1; 8308 return 1;