aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIzik Eidus <ieidus@redhat.com>2009-09-23 14:47:16 -0400
committerAvi Kivity <avi@redhat.com>2009-10-04 11:04:48 -0400
commitacb66dd051d0834c8b36d147ff83a8d39da0fe0b (patch)
tree20c5fb817e34ddca6dd4ec20ce70613eba5b10d1
parent6a54435560efdab1a08f429a954df4d6c740bddf (diff)
KVM: MMU: dont hold pagecount reference for mapped sptes pages
When using mmu notifiers, we are allowed to remove the page count reference tooken by get_user_pages to a specific page that is mapped inside the shadow page tables. This is needed so we can balance the pagecount against mapcount checking. (Right now kvm increase the pagecount and does not increase the mapcount when mapping page into shadow page table entry, so when comparing pagecount against mapcount, you have no reliable result.) Signed-off-by: Izik Eidus <ieidus@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c7
1 files changed, 2 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index eca41ae9f453..6c67b230e958 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -634,9 +634,7 @@ static void rmap_remove(struct kvm *kvm, u64 *spte)
634 if (*spte & shadow_accessed_mask) 634 if (*spte & shadow_accessed_mask)
635 kvm_set_pfn_accessed(pfn); 635 kvm_set_pfn_accessed(pfn);
636 if (is_writeble_pte(*spte)) 636 if (is_writeble_pte(*spte))
637 kvm_release_pfn_dirty(pfn); 637 kvm_set_pfn_dirty(pfn);
638 else
639 kvm_release_pfn_clean(pfn);
640 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level); 638 rmapp = gfn_to_rmap(kvm, sp->gfns[spte - sp->spt], sp->role.level);
641 if (!*rmapp) { 639 if (!*rmapp) {
642 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte); 640 printk(KERN_ERR "rmap_remove: %p %llx 0->BUG\n", spte, *spte);
@@ -1877,8 +1875,7 @@ static void mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
1877 page_header_update_slot(vcpu->kvm, sptep, gfn); 1875 page_header_update_slot(vcpu->kvm, sptep, gfn);
1878 if (!was_rmapped) { 1876 if (!was_rmapped) {
1879 rmap_count = rmap_add(vcpu, sptep, gfn); 1877 rmap_count = rmap_add(vcpu, sptep, gfn);
1880 if (!is_rmap_spte(*sptep)) 1878 kvm_release_pfn_clean(pfn);
1881 kvm_release_pfn_clean(pfn);
1882 if (rmap_count > RMAP_RECYCLE_THRESHOLD) 1879 if (rmap_count > RMAP_RECYCLE_THRESHOLD)
1883 rmap_recycle(vcpu, sptep, gfn); 1880 rmap_recycle(vcpu, sptep, gfn);
1884 } else { 1881 } else {