diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2010-09-10 11:30:50 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:36 -0400 |
commit | 6539e738f65a8f1fc7806295d5d701fba4008343 (patch) | |
tree | 7b66b9d2c28eae8a332cdb1da0e602357b0cbeda | |
parent | 14dfe855f978181cd611ec018e5ceba860a98545 (diff) |
KVM: MMU: Implement nested gva_to_gpa functions
This patch adds the functions to do a nested l2_gva to
l1_gpa page table walk.
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 10 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 31 | ||||
-rw-r--r-- | arch/x86/kvm/x86.h | 5 |
4 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 1b3eb8a0a1bc..8ec3547c433d 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -296,6 +296,16 @@ struct kvm_vcpu_arch { | |||
296 | struct kvm_mmu mmu; | 296 | struct kvm_mmu mmu; |
297 | 297 | ||
298 | /* | 298 | /* |
299 | * Paging state of an L2 guest (used for nested npt) | ||
300 | * | ||
301 | * This context will save all necessary information to walk page tables | ||
302 | * of the an L2 guest. This context is only initialized for page table | ||
303 | * walking and not for faulting since we never handle l2 page faults on | ||
304 | * the host. | ||
305 | */ | ||
306 | struct kvm_mmu nested_mmu; | ||
307 | |||
308 | /* | ||
299 | * Pointer to the mmu context currently used for | 309 | * Pointer to the mmu context currently used for |
300 | * gva_to_gpa translations. | 310 | * gva_to_gpa translations. |
301 | */ | 311 | */ |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index cb06adac92b1..1e215e8b9377 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2466,6 +2466,14 @@ static gpa_t nonpaging_gva_to_gpa(struct kvm_vcpu *vcpu, gva_t vaddr, | |||
2466 | return vaddr; | 2466 | return vaddr; |
2467 | } | 2467 | } |
2468 | 2468 | ||
2469 | static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr, | ||
2470 | u32 access, u32 *error) | ||
2471 | { | ||
2472 | if (error) | ||
2473 | *error = 0; | ||
2474 | return vcpu->arch.nested_mmu.translate_gpa(vcpu, vaddr, access); | ||
2475 | } | ||
2476 | |||
2469 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | 2477 | static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, |
2470 | u32 error_code) | 2478 | u32 error_code) |
2471 | { | 2479 | { |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index a704a8130e44..eefe363156b9 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
@@ -276,6 +276,16 @@ static int FNAME(walk_addr)(struct guest_walker *walker, | |||
276 | write_fault, user_fault, fetch_fault); | 276 | write_fault, user_fault, fetch_fault); |
277 | } | 277 | } |
278 | 278 | ||
279 | static int FNAME(walk_addr_nested)(struct guest_walker *walker, | ||
280 | struct kvm_vcpu *vcpu, gva_t addr, | ||
281 | int write_fault, int user_fault, | ||
282 | int fetch_fault) | ||
283 | { | ||
284 | return FNAME(walk_addr_generic)(walker, vcpu, &vcpu->arch.nested_mmu, | ||
285 | addr, write_fault, user_fault, | ||
286 | fetch_fault); | ||
287 | } | ||
288 | |||
279 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, | 289 | static void FNAME(update_pte)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
280 | u64 *spte, const void *pte) | 290 | u64 *spte, const void *pte) |
281 | { | 291 | { |
@@ -660,6 +670,27 @@ static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, | |||
660 | return gpa; | 670 | return gpa; |
661 | } | 671 | } |
662 | 672 | ||
673 | static gpa_t FNAME(gva_to_gpa_nested)(struct kvm_vcpu *vcpu, gva_t vaddr, | ||
674 | u32 access, u32 *error) | ||
675 | { | ||
676 | struct guest_walker walker; | ||
677 | gpa_t gpa = UNMAPPED_GVA; | ||
678 | int r; | ||
679 | |||
680 | r = FNAME(walk_addr_nested)(&walker, vcpu, vaddr, | ||
681 | access & PFERR_WRITE_MASK, | ||
682 | access & PFERR_USER_MASK, | ||
683 | access & PFERR_FETCH_MASK); | ||
684 | |||
685 | if (r) { | ||
686 | gpa = gfn_to_gpa(walker.gfn); | ||
687 | gpa |= vaddr & ~PAGE_MASK; | ||
688 | } else if (error) | ||
689 | *error = walker.error_code; | ||
690 | |||
691 | return gpa; | ||
692 | } | ||
693 | |||
663 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, | 694 | static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu, |
664 | struct kvm_mmu_page *sp) | 695 | struct kvm_mmu_page *sp) |
665 | { | 696 | { |
diff --git a/arch/x86/kvm/x86.h b/arch/x86/kvm/x86.h index 2d6385e44ccf..bf4dc2f40d7f 100644 --- a/arch/x86/kvm/x86.h +++ b/arch/x86/kvm/x86.h | |||
@@ -50,6 +50,11 @@ static inline int is_long_mode(struct kvm_vcpu *vcpu) | |||
50 | #endif | 50 | #endif |
51 | } | 51 | } |
52 | 52 | ||
53 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) | ||
54 | { | ||
55 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; | ||
56 | } | ||
57 | |||
53 | static inline int is_pae(struct kvm_vcpu *vcpu) | 58 | static inline int is_pae(struct kvm_vcpu *vcpu) |
54 | { | 59 | { |
55 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); | 60 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); |