aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2013-09-25 05:51:36 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2013-09-30 03:14:25 -0400
commitfeaf0c7dc473fefa1f263d88788f57e39b4b007e (patch)
tree39cd73b295de323467da374f2b06e443e0f89f7d /arch/x86
parente011c663b9c786d115c0f45e5b0bfae0c39428d4 (diff)
KVM: nVMX: Do not generate #DF if #PF happens during exception delivery into L2
If #PF happens during delivery of an exception into L2 and L1 also do not have the page mapped in its shadow page table then L0 needs to generate vmexit to L2 with original event in IDT_VECTORING_INFO, but current code combines both exception and generates #DF instead. Fix that by providing nVMX specific function to handle page faults during page table walk that handles this case correctly. Signed-off-by: Gleb Natapov <gleb@redhat.com> Reviewed-by: Paolo Bonzini <pbonzini@redhat.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ac9ded4d4257..be7fd0e1ad42 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -7518,6 +7518,20 @@ static void nested_ept_uninit_mmu_context(struct kvm_vcpu *vcpu)
7518 vcpu->arch.walk_mmu = &vcpu->arch.mmu; 7518 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
7519} 7519}
7520 7520
7521static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
7522 struct x86_exception *fault)
7523{
7524 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7525
7526 WARN_ON(!is_guest_mode(vcpu));
7527
7528 /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
7529 if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
7530 nested_vmx_vmexit(vcpu);
7531 else
7532 kvm_inject_page_fault(vcpu, fault);
7533}
7534
7521/* 7535/*
7522 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested 7536 * prepare_vmcs02 is called when the L1 guest hypervisor runs its nested
7523 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it 7537 * L2 guest. L1 has a vmcs for L2 (vmcs12), and this function "merges" it
@@ -7771,6 +7785,9 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
7771 kvm_set_cr3(vcpu, vmcs12->guest_cr3); 7785 kvm_set_cr3(vcpu, vmcs12->guest_cr3);
7772 kvm_mmu_reset_context(vcpu); 7786 kvm_mmu_reset_context(vcpu);
7773 7787
7788 if (!enable_ept)
7789 vcpu->arch.walk_mmu->inject_page_fault = vmx_inject_page_fault_nested;
7790
7774 /* 7791 /*
7775 * L1 may access the L2's PDPTR, so save them to construct vmcs12 7792 * L1 may access the L2's PDPTR, so save them to construct vmcs12
7776 */ 7793 */
@@ -8230,6 +8247,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8230 kvm_set_cr3(vcpu, vmcs12->host_cr3); 8247 kvm_set_cr3(vcpu, vmcs12->host_cr3);
8231 kvm_mmu_reset_context(vcpu); 8248 kvm_mmu_reset_context(vcpu);
8232 8249
8250 if (!enable_ept)
8251 vcpu->arch.walk_mmu->inject_page_fault = kvm_inject_page_fault;
8252
8233 if (enable_vpid) { 8253 if (enable_vpid) {
8234 /* 8254 /*
8235 * Trivially support vpid by letting L2s share their parent 8255 * Trivially support vpid by letting L2s share their parent