diff options
author | Joerg Roedel <joerg.roedel@amd.com> | 2009-07-27 10:30:46 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:33:19 -0400 |
commit | 7e4e4056f72da51c5dede48515df0ecd20eaf8ca (patch) | |
tree | 74ed536181a241c959d1bf5366c624f376e6784a /arch/x86/kvm/mmu.c | |
parent | e04da980c35d75fa050ba4009ad99025432d8d7d (diff) |
KVM: MMU: shadow support for 1gb pages
This patch adds support for shadow paging to the 1gb page table code in KVM.
With this code the guest can use 1gb pages even if the host does not support
them.
[ Marcelo: fix shadow page collision on pmd level if a guest 1gb page is mapped
with 4kb ptes on host level ]
Signed-off-by: Joerg Roedel <joerg.roedel@amd.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 14 |
1 files changed, 2 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 09ab6433bf1d..1249c12e1d5c 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -2478,11 +2478,8 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu, | |||
2478 | const void *new) | 2478 | const void *new) |
2479 | { | 2479 | { |
2480 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { | 2480 | if (sp->role.level != PT_PAGE_TABLE_LEVEL) { |
2481 | if (vcpu->arch.update_pte.level == PT_PAGE_TABLE_LEVEL || | 2481 | ++vcpu->kvm->stat.mmu_pde_zapped; |
2482 | sp->role.glevels == PT32_ROOT_LEVEL) { | 2482 | return; |
2483 | ++vcpu->kvm->stat.mmu_pde_zapped; | ||
2484 | return; | ||
2485 | } | ||
2486 | } | 2483 | } |
2487 | 2484 | ||
2488 | ++vcpu->kvm->stat.mmu_pte_updated; | 2485 | ++vcpu->kvm->stat.mmu_pte_updated; |
@@ -2528,8 +2525,6 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2528 | u64 gpte = 0; | 2525 | u64 gpte = 0; |
2529 | pfn_t pfn; | 2526 | pfn_t pfn; |
2530 | 2527 | ||
2531 | vcpu->arch.update_pte.level = PT_PAGE_TABLE_LEVEL; | ||
2532 | |||
2533 | if (bytes != 4 && bytes != 8) | 2528 | if (bytes != 4 && bytes != 8) |
2534 | return; | 2529 | return; |
2535 | 2530 | ||
@@ -2557,11 +2552,6 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
2557 | return; | 2552 | return; |
2558 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; | 2553 | gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; |
2559 | 2554 | ||
2560 | if (is_large_pte(gpte) && | ||
2561 | (mapping_level(vcpu, gfn) == PT_DIRECTORY_LEVEL)) { | ||
2562 | gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1); | ||
2563 | vcpu->arch.update_pte.level = PT_DIRECTORY_LEVEL; | ||
2564 | } | ||
2565 | vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; | 2555 | vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; |
2566 | smp_rmb(); | 2556 | smp_rmb(); |
2567 | pfn = gfn_to_pfn(vcpu->kvm, gfn); | 2557 | pfn = gfn_to_pfn(vcpu->kvm, gfn); |