aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorJoerg Roedel <joerg.roedel@amd.com>2009-07-27 10:30:42 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:33:18 -0400
commit44ad9944f151390363fc6edaba466de8dfef050f (patch)
tree3870f0e2b5fcc265b688681329ecb1baa9f7c8b2 /arch/x86/kvm/mmu.c
parentaed665f7bbacf09121370bc7e21b4cc7d55fc5ef (diff)
KVM: MMU: make rmap code aware of mapping levels
This patch removes the largepage parameter from the rmap_add function. Together with rmap_remove this function now uses the role.level field to find determine if the page is a huge page. Signed-off-by: Joerg Roedel <joerg.roedel@amd.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c53
1 files changed, 28 insertions, 25 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 86c2551fe136..b93ad2c79c15 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -479,19 +479,19 @@ static int is_largepage_backed(struct kvm_vcpu *vcpu, gfn_t large_gfn)
479 * Note: gfn must be unaliased before this function get called 479 * Note: gfn must be unaliased before this function get called
480 */ 480 */
481 481
482static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage) 482static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int level)
483{ 483{
484 struct kvm_memory_slot *slot; 484 struct kvm_memory_slot *slot;
485 unsigned long idx; 485 unsigned long idx;
486 486
487 slot = gfn_to_memslot(kvm, gfn); 487 slot = gfn_to_memslot(kvm, gfn);
488 if (!lpage) 488 if (likely(level == PT_PAGE_TABLE_LEVEL))
489 return &slot->rmap[gfn - slot->base_gfn]; 489 return &slot->rmap[gfn - slot->base_gfn];
490 490
491 idx = (gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)) - 491 idx = (gfn / KVM_PAGES_PER_HPAGE(level)) -
492 (slot->base_gfn / KVM_PAGES_PER_HPAGE(PT_DIRECTORY_LEVEL)); 492 (slot->base_gfn / KVM_PAGES_PER_HPAGE(level));
493 493
494 return &slot->lpage_info[0][idx].rmap_pde; 494 return &slot->lpage_info[level - 2][idx].rmap_pde;
495} 495}
496 496
497/* 497/*
@@ -507,7 +507,7 @@ static unsigned long *gfn_to_rmap(struct kvm *kvm, gfn_t gfn, int lpage)
507 * the spte was not added. 507 * the spte was not added.
508 * 508 *
509 */ 509 */
510static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage) 510static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn)
511{ 511{
512 struct kvm_mmu_page *sp; 512 struct kvm_mmu_page *sp;
513 struct kvm_rmap_desc *desc; 513 struct kvm_rmap_desc *desc;
@@ -519,7 +519,7 @@ static int rmap_add(struct kvm_vcpu *vcpu, u64 *spte, gfn_t gfn, int lpage)
519 gfn = unalias_gfn(vcpu->kvm, gfn); 519 gfn = unalias_gfn(vcpu->kvm, gfn);
520 sp = page_header(__pa(spte)); 520 sp = page_header(__pa(spte));
521 sp->gfns[spte - sp->spt] = gfn; 521 sp->gfns[spte - sp->spt] = gfn;
522 rmapp = gfn_to_rmap(vcpu->kvm, gfn, lpage); 522 rmapp = gfn_to_rmap(vcpu->kvm, gfn, sp->role.level);
523 if (!*rmapp) { 523 if (!*rmapp) {
524 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte); 524 rmap_printk("rmap_add: %p %llx 0->1\n", spte, *spte);
525 *rmapp = (unsigned long)spte; 525 *rmapp = (unsigned long)spte;
@@ -589,7 +589,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
589 kvm_release_pfn_dirty(pfn); 589 kvm_release_pfn_dirty(pfn);
590 else 590 else
591 kvm_release_pfn_clean(pfn); 591 kvm_release_pfn_clean(pfn);
592 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], is_large_pte(*spte)); 592 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
593 if (!*rmapp) { 593 if (!*rmapp) {
594 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 594 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
595 BUG(); 595 BUG();
@@ -652,10 +652,10 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
652{ 652{
653 unsigned long *rmapp; 653 unsigned long *rmapp;
654 u64 *spte; 654 u64 *spte;
655 int write_protected = 0; 655 int i, write_protected = 0;
656 656
657 gfn = unalias_gfn(kvm, gfn); 657 gfn = unalias_gfn(kvm, gfn);
658 rmapp = gfn_to_rmap(kvm, gfn, 0); 658 rmapp = gfn_to_rmap(kvm, gfn, PT_PAGE_TABLE_LEVEL);
659 659
660 spte = rmap_next(kvm, rmapp, NULL); 660 spte = rmap_next(kvm, rmapp, NULL);
661 while (spte) { 661 while (spte) {
@@ -677,21 +677,24 @@ static int rmap_write_protect(struct kvm *kvm, u64 gfn)
677 } 677 }
678 678
679 /* check for huge page mappings */ 679 /* check for huge page mappings */
680 rmapp = gfn_to_rmap(kvm, gfn, 1); 680 for (i = PT_DIRECTORY_LEVEL;
681 spte = rmap_next(kvm, rmapp, NULL); 681 i < PT_PAGE_TABLE_LEVEL + KVM_NR_PAGE_SIZES; ++i) {
682 while (spte) { 682 rmapp = gfn_to_rmap(kvm, gfn, i);
683 BUG_ON(!spte); 683 spte = rmap_next(kvm, rmapp, NULL);
684 BUG_ON(!(*spte & PT_PRESENT_MASK)); 684 while (spte) {
685 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)); 685 BUG_ON(!spte);
686 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn); 686 BUG_ON(!(*spte & PT_PRESENT_MASK));
687 if (is_writeble_pte(*spte)) { 687 BUG_ON((*spte & (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK)) != (PT_PAGE_SIZE_MASK|PT_PRESENT_MASK));
688 rmap_remove(kvm, spte); 688 pgprintk("rmap_write_protect(large): spte %p %llx %lld\n", spte, *spte, gfn);
689 --kvm->stat.lpages; 689 if (is_writeble_pte(*spte)) {
690 __set_spte(spte, shadow_trap_nonpresent_pte); 690 rmap_remove(kvm, spte);
691 spte = NULL; 691 --kvm->stat.lpages;
692 write_protected = 1; 692 __set_spte(spte, shadow_trap_nonpresent_pte);
693 spte = NULL;
694 write_protected = 1;
695 }
696 spte = rmap_next(kvm, rmapp, spte);
693 } 697 }
694 spte = rmap_next(kvm, rmapp, spte);
695 } 698 }
696 699
697 return write_protected; 700 return write_protected;
@@ -1815,7 +1818,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1815 1818
1816 page_header_update_slot(vcpu->kvm, sptep, gfn); 1819 page_header_update_slot(vcpu->kvm, sptep, gfn);
1817 if (!was_rmapped) { 1820 if (!was_rmapped) {
1818 rmap_count = rmap_add(vcpu, sptep, gfn, largepage); 1821 rmap_count = rmap_add(vcpu, sptep, gfn);
1819 if (!is_rmap_spte(*sptep)) 1822 if (!is_rmap_spte(*sptep))
1820 kvm_release_pfn_clean(pfn); 1823 kvm_release_pfn_clean(pfn);
1821 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1824 if (rmap_count > RMAP_RECYCLE_THRESHOLD)