aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2010-08-19 21:11:14 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:18 -0400
commit39de71ec5397f374aed95e99509372d605e1407c (patch)
treea2da0845195322d50eb1d5c5bff067dd8f952fef /arch/x86/kvm/mmu.c
parente0df7b9f6cee43c01d6f4a8491bccfd410cb86e1 (diff)
KVM: rename x86 kvm->arch.n_alloc_mmu_pages
arch.n_alloc_mmu_pages is a poor choice of name. This value truly means, "the number of pages which _may_ be allocated". But, reading the name, "n_alloc_mmu_pages" implies "the number of allocated mmu pages", which is dead wrong. It's really the high watermark, so let's give it a name to match: nr_max_mmu_pages. This change will make the next few patches much more obvious and easy to read. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 625b17894661..6979e7d1464e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1696 int used_pages; 1696 int used_pages;
1697 LIST_HEAD(invalid_list); 1697 LIST_HEAD(invalid_list);
1698 1698
1699 used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm); 1699 used_pages = kvm->arch.n_max_mmu_pages - kvm_mmu_available_pages(kvm);
1700 used_pages = max(0, used_pages); 1700 used_pages = max(0, used_pages);
1701 1701
1702 /* 1702 /*
@@ -1721,9 +1721,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1721 } 1721 }
1722 else 1722 else
1723 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages 1723 kvm->arch.n_free_mmu_pages += kvm_nr_mmu_pages
1724 - kvm->arch.n_alloc_mmu_pages; 1724 - kvm->arch.n_max_mmu_pages;
1725 1725
1726 kvm->arch.n_alloc_mmu_pages = kvm_nr_mmu_pages; 1726 kvm->arch.n_max_mmu_pages = kvm_nr_mmu_pages;
1727} 1727}
1728 1728
1729static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn) 1729static int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn)
@@ -3141,7 +3141,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3141 3141
3142 idx = srcu_read_lock(&kvm->srcu); 3142 idx = srcu_read_lock(&kvm->srcu);
3143 spin_lock(&kvm->mmu_lock); 3143 spin_lock(&kvm->mmu_lock);
3144 npages = kvm->arch.n_alloc_mmu_pages - 3144 npages = kvm->arch.n_max_mmu_pages -
3145 kvm_mmu_available_pages(kvm); 3145 kvm_mmu_available_pages(kvm);
3146 cache_count += npages; 3146 cache_count += npages;
3147 if (!kvm_freed && nr_to_scan > 0 && npages > 0) { 3147 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {