aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorDave Hansen <dave@linux.vnet.ibm.com>2010-08-19 21:11:05 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:17 -0400
commite0df7b9f6cee43c01d6f4a8491bccfd410cb86e1 (patch)
tree4b3eefa6a3ebea76e429baa304d50ba4501c02ff /arch/x86/kvm/mmu.c
parent61429142802b068609ffd8ef48d891e05eeea0b9 (diff)
KVM: abstract kvm x86 mmu->n_free_mmu_pages
"free" is a poor name for this value. In this context, it means, "the number of mmu pages which this kvm instance should be able to allocate." But "free" implies much more that the objects are there and ready for use. "available" is a much better description, especially when you see how it is calculated. In this patch, we abstract its use into a function. We'll soon replace the function's contents by calculating the value in a different way. All of the reads of n_free_mmu_pages are taken care of in this patch. The modification sites will be handled in a patch later in the series. Signed-off-by: Dave Hansen <dave@linux.vnet.ibm.com> Signed-off-by: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c11
1 files changed, 4 insertions, 7 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ff95d418750d..625b17894661 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1696 int used_pages; 1696 int used_pages;
1697 LIST_HEAD(invalid_list); 1697 LIST_HEAD(invalid_list);
1698 1698
1699 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; 1699 used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
1700 used_pages = max(0, used_pages); 1700 used_pages = max(0, used_pages);
1701 1701
1702 /* 1702 /*
@@ -2959,18 +2959,15 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2959 2959
2960void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 2960void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2961{ 2961{
2962 int free_pages;
2963 LIST_HEAD(invalid_list); 2962 LIST_HEAD(invalid_list);
2964 2963
2965 free_pages = vcpu->kvm->arch.n_free_mmu_pages; 2964 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
2966 while (free_pages < KVM_REFILL_PAGES &&
2967 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 2965 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2968 struct kvm_mmu_page *sp; 2966 struct kvm_mmu_page *sp;
2969 2967
2970 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2968 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2971 struct kvm_mmu_page, link); 2969 struct kvm_mmu_page, link);
2972 free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 2970 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2973 &invalid_list);
2974 ++vcpu->kvm->stat.mmu_recycled; 2971 ++vcpu->kvm->stat.mmu_recycled;
2975 } 2972 }
2976 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 2973 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3145,7 +3142,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3145 idx = srcu_read_lock(&kvm->srcu); 3142 idx = srcu_read_lock(&kvm->srcu);
3146 spin_lock(&kvm->mmu_lock); 3143 spin_lock(&kvm->mmu_lock);
3147 npages = kvm->arch.n_alloc_mmu_pages - 3144 npages = kvm->arch.n_alloc_mmu_pages -
3148 kvm->arch.n_free_mmu_pages; 3145 kvm_mmu_available_pages(kvm);
3149 cache_count += npages; 3146 cache_count += npages;
3150 if (!kvm_freed && nr_to_scan > 0 && npages > 0) { 3147 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
3151 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, 3148 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,