aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c30
1 files changed, 16 insertions, 14 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 12974de88aa..b67585c1ef0 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -394,9 +394,9 @@ static int *slot_largepage_idx(gfn_t gfn, struct kvm_memory_slot *slot)
394{ 394{
395 unsigned long idx; 395 unsigned long idx;
396 396
397 idx = (gfn / KVM_PAGES_PER_HPAGE) - 397 idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
398 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 398 (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
399 return &slot->lpage_info[idx].write_count; 399 return &slot->lpage_info[0][idx].write_count;
400} 400}
401 401
402static void account_shadowed(struct kvm *kvm, gfn_t gfn) 402static void account_shadowed(struct kvm *kvm, gfn_t gfn)
@@ -485,10 +485,10 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
485 if (!lpage) 485 if (!lpage)
486 return &slot->rmap[gfn - slot->base_gfn]; 486 return &slot->rmap[gfn - slot->base_gfn];
487 487
488 idx = (gfn / KVM_PAGES_PER_HPAGE) - 488 idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) -
489 (slot->base_gfn / KVM_PAGES_PER_HPAGE); 489 (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL));
490 490
491 return &slot->lpage_info[idx].rmap_pde; 491 return &slot->lpage_info[0][idx].rmap_pde;
492} 492}
493 493
494/* 494/*
@@ -731,11 +731,11 @@ static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
731 end = start + (memslot->npages << PAGE_SHIFT); 731 end = start + (memslot->npages << PAGE_SHIFT);
732 if (hva >= start && hva < end) { 732 if (hva >= start && hva < end) {
733 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT; 733 gfn_t gfn_offset = (hva - start) >> PAGE_SHIFT;
734 int idx = gfn_offset /
735 KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL);
734 retval |= handler(kvm, &memslot->rmap[gfn_offset]); 736 retval |= handler(kvm, &memslot->rmap[gfn_offset]);
735 retval |= handler(kvm, 737 retval |= handler(kvm,
736 &memslot->lpage_info[ 738 &memslot->lpage_info[0][idx].rmap_pde);
737 gfn_offset /
738 KVM_PAGES_PER_HPAGE].rmap_pde);
739 } 739 }
740 } 740 }
741 741
@@ -1876,8 +1876,9 @@ static int nonpaging_map(struct kvm_vcpu *vcpu, gva_t v, int write, gfn_t gfn)
1876 pfn_t pfn; 1876 pfn_t pfn;
1877 unsigned long mmu_seq; 1877 unsigned long mmu_seq;
1878 1878
1879 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 1879 if (is_largepage_backed(vcpu, gfn &
1880 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 1880 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
1881 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
1881 largepage = 1; 1882 largepage = 1;
1882 } 1883 }
1883 1884
@@ -2082,8 +2083,9 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2082 if (r) 2083 if (r)
2083 return r; 2084 return r;
2084 2085
2085 if (is_largepage_backed(vcpu, gfn & ~(KVM_PAGES_PER_HPAGE-1))) { 2086 if (is_largepage_backed(vcpu, gfn &
2086 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2087 ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1))) {
2088 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2087 largepage = 1; 2089 largepage = 1;
2088 } 2090 }
2089 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2091 mmu_seq = vcpu->kvm->mmu_notifier_seq;
@@ -2485,7 +2487,7 @@ static void mmu_guess_page_from_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2485 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT; 2487 gfn = (gpte & PT64_BASE_ADDR_MASK) >> PAGE_SHIFT;
2486 2488
2487 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) { 2489 if (is_large_pte(gpte) && is_largepage_backed(vcpu, gfn)) {
2488 gfn &= ~(KVM_PAGES_PER_HPAGE-1); 2490 gfn &= ~(KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL) - 1);
2489 vcpu->arch.update_pte.largepage = 1; 2491 vcpu->arch.update_pte.largepage = 1;
2490 } 2492 }
2491 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq; 2493 vcpu->arch.update_pte.mmu_seq = vcpu->kvm->mmu_notifier_seq;