diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2016-11-28 08:39:58 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2017-08-10 10:44:04 -0400 |
commit | eebed2438923f8df465c27f8fa41303771fdb2e8 (patch) | |
tree | a767edc12e331314c03493ef5172248334350f71 | |
parent | 64531a3b70b17c8d3e77f2e49e5e1bb70f571266 (diff) |
kvm: nVMX: Add support for fast unprotection of nested guest page tables
This is the same as commit 147277540bbc ("kvm: svm: Add support for
additional SVM NPF error codes", 2016-11-23), but for Intel processors.
In this case, the exit qualification field's bit 8 says whether the
EPT violation occurred while translating the guest's final physical
address or rather while translating the guest page tables.
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 5 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 5 |
3 files changed, 5 insertions, 6 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1679aabcabe5..9e4862e0e978 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -204,7 +204,6 @@ enum { | |||
204 | #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) | 204 | #define PFERR_GUEST_PAGE_MASK (1ULL << PFERR_GUEST_PAGE_BIT) |
205 | 205 | ||
206 | #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ | 206 | #define PFERR_NESTED_GUEST_PAGE (PFERR_GUEST_PAGE_MASK | \ |
207 | PFERR_USER_MASK | \ | ||
208 | PFERR_WRITE_MASK | \ | 207 | PFERR_WRITE_MASK | \ |
209 | PFERR_PRESENT_MASK) | 208 | PFERR_PRESENT_MASK) |
210 | 209 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 454d81dc8913..7ee21c087c83 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4836,12 +4836,9 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u64 error_code, | |||
4836 | * This can occur when using nested virtualization with nested | 4836 | * This can occur when using nested virtualization with nested |
4837 | * paging in both guests. If true, we simply unprotect the page | 4837 | * paging in both guests. If true, we simply unprotect the page |
4838 | * and resume the guest. | 4838 | * and resume the guest. |
4839 | * | ||
4840 | * Note: AMD only (since it supports the PFERR_GUEST_PAGE_MASK used | ||
4841 | * in PFERR_NEXT_GUEST_PAGE) | ||
4842 | */ | 4839 | */ |
4843 | if (vcpu->arch.mmu.direct_map && | 4840 | if (vcpu->arch.mmu.direct_map && |
4844 | error_code == PFERR_NESTED_GUEST_PAGE) { | 4841 | (error_code & PFERR_NESTED_GUEST_PAGE) == PFERR_NESTED_GUEST_PAGE) { |
4845 | kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); | 4842 | kvm_mmu_unprotect_page(vcpu->kvm, gpa_to_gfn(cr2)); |
4846 | return 1; | 4843 | return 1; |
4847 | } | 4844 | } |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c7cf5b11994a..ed1074e98b8e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6358,7 +6358,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) | |||
6358 | { | 6358 | { |
6359 | unsigned long exit_qualification; | 6359 | unsigned long exit_qualification; |
6360 | gpa_t gpa; | 6360 | gpa_t gpa; |
6361 | u32 error_code; | 6361 | u64 error_code; |
6362 | 6362 | ||
6363 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 6363 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
6364 | 6364 | ||
@@ -6390,6 +6390,9 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) | |||
6390 | EPT_VIOLATION_EXECUTABLE)) | 6390 | EPT_VIOLATION_EXECUTABLE)) |
6391 | ? PFERR_PRESENT_MASK : 0; | 6391 | ? PFERR_PRESENT_MASK : 0; |
6392 | 6392 | ||
6393 | error_code |= (exit_qualification & 0x100) != 0 ? | ||
6394 | PFERR_GUEST_FINAL_MASK : PFERR_GUEST_PAGE_MASK; | ||
6395 | |||
6393 | vcpu->arch.gpa_available = true; | 6396 | vcpu->arch.gpa_available = true; |
6394 | vcpu->arch.exit_qualification = exit_qualification; | 6397 | vcpu->arch.exit_qualification = exit_qualification; |
6395 | 6398 | ||