aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2010-08-19 21:11:28 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:18 -0400
commit49d5ca26636cb8feb05aff92fc4dba3e494ec683 (patch)
tree180d5d2926e1282654c2c9438612aa567d9eb68f /arch/x86/kvm/mmu.c
parent39de71ec5397f374aed95e99509372d605e1407c (diff)
KVM: replace x86 kvm n_free_mmu_pages with n_used_mmu_pages
Doing this makes the code much more readable. That's borne out by the fact that this patch removes code. "used" also happens to be the number that we need to return back to the slab code when our shrinker gets called. Keeping this value as opposed to free makes the next patch simpler. So, 'struct kvm' is kzalloc()'d. 'struct kvm_arch' is a structure member (and not a pointer) of 'struct kvm'. That means they start out zeroed. I _think_ they get initialized properly by kvm_mmu_change_mmu_pages(). But, that only happens via kvm ioctls. Another benefit of storing 'used' intead of 'free' is that the values are consistent from the moment the structure is allocated: no negative "used" value. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 6979e7d1464e..ff39b85d7a4d 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -980,7 +980,7 @@ static void kvm_mmu_free_page(struct kvm *kvm, struct kvm_mmu_page *sp)
980 if (!sp->role.direct) 980 if (!sp->role.direct)
981 __free_page(virt_to_page(sp->gfns)); 981 __free_page(virt_to_page(sp->gfns));
982 kmem_cache_free(mmu_page_header_cache, sp); 982 kmem_cache_free(mmu_page_header_cache, sp);
983 ++kvm->arch.n_free_mmu_pages; 983 --kvm->arch.n_used_mmu_pages;
984} 984}
985 985
986static unsigned kvm_page_table_hashfn(gfn_t gfn) 986static unsigned kvm_page_table_hashfn(gfn_t gfn)
@@ -1003,7 +1003,7 @@ static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu,
1003 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); 1003 bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS);
1004 sp->multimapped = 0; 1004 sp->multimapped = 0;
1005 sp->parent_pte = parent_pte; 1005 sp->parent_pte = parent_pte;
1006 --vcpu->kvm->arch.n_free_mmu_pages; 1006 ++vcpu->kvm->arch.n_used_mmu_pages;
1007 return sp; 1007 return sp;
1008} 1008}
1009 1009
@@ -1689,41 +1689,32 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1689 1689
1690/* 1690/*
1691 * Changing the number of mmu pages allocated to the vm 1691 * Changing the number of mmu pages allocated to the vm
1692 * Note: if kvm_nr_mmu_pages is too small, you will get dead lock 1692 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
1693 */ 1693 */
1694void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages) 1694void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1695{ 1695{
1696 int used_pages;
1697 LIST_HEAD(invalid_list); 1696 LIST_HEAD(invalid_list);
1698
1699 used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
1700 used_pages = max(0, used_pages);
1701
1702 /* 1697 /*
1703 * If we set the number of mmu pages to be smaller be than the 1698 * If we set the number of mmu pages to be smaller be than the
1704 * number of actived pages , we must to free some mmu pages before we 1699 * number of actived pages , we must to free some mmu pages before we
1705 * change the value 1700 * change the value
1706 */ 1701 */
1707 1702
1708 if (used_pages > kvm_nr_mmu_pages) { 1703 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
1709 while (used_pages > kvm_nr_mmu_pages && 1704 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages &&
1710 !list_empty(&kvm->arch.active_mmu_pages)) { 1705 !list_empty(&kvm->arch.active_mmu_pages)) {
1711 struct kvm_mmu_page *page; 1706 struct kvm_mmu_page *page;
1712 1707
1713 page = container_of(kvm->arch.active_mmu_pages.prev, 1708 page = container_of(kvm->arch.active_mmu_pages.prev,
1714 struct kvm_mmu_page, link); 1709 struct kvm_mmu_page, link);
1715 used_pages -= kvm_mmu_prepare_zap_page(kvm, page, 1710 kvm_mmu_prepare_zap_page(kvm, page,
1716 &invalid_list); 1711 &invalid_list);
1717 } 1712 }
1718 kvm_mmu_commit_zap_page(kvm, &invalid_list); 1713 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1719 kvm_nr_mmu_pages = used_pages; 1714 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1720 kvm->arch.n_free_mmu_pages = 0;
1721 } 1715 }
1722 else
1723 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1724 - kvm->arch.n_max_mmu_pages;
1725 1716
1726 kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages; 1717 kvm->arch.n_max_mmu_pages = goal_nr_mmu_pages;
1727} 1718}
1728 1719
1729static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1720static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)