aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c11
-rw-r--r--arch/x86/kvm/mmu.h7
2 files changed, 10 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index ff95d418750d..625b17894661 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1696,7 +1696,7 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1696 int used_pages; 1696 int used_pages;
1697 LIST_HEAD(invalid_list); 1697 LIST_HEAD(invalid_list);
1698 1698
1699 used_pages = kvm->arch.n_alloc_mmu_pages - kvm->arch.n_free_mmu_pages; 1699 used_pages = kvm->arch.n_alloc_mmu_pages - kvm_mmu_available_pages(kvm);
1700 used_pages = max(0, used_pages); 1700 used_pages = max(0, used_pages);
1701 1701
1702 /* 1702 /*
@@ -2959,18 +2959,15 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2959 2959
2960void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 2960void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2961{ 2961{
2962 int free_pages;
2963 LIST_HEAD(invalid_list); 2962 LIST_HEAD(invalid_list);
2964 2963
2965 free_pages = vcpu->kvm->arch.n_free_mmu_pages; 2964 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES &&
2966 while (free_pages < KVM_REFILL_PAGES &&
2967 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 2965 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2968 struct kvm_mmu_page *sp; 2966 struct kvm_mmu_page *sp;
2969 2967
2970 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2968 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2971 struct kvm_mmu_page, link); 2969 struct kvm_mmu_page, link);
2972 free_pages += kvm_mmu_prepare_zap_page(vcpu->kvm, sp, 2970 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2973 &invalid_list);
2974 ++vcpu->kvm->stat.mmu_recycled; 2971 ++vcpu->kvm->stat.mmu_recycled;
2975 } 2972 }
2976 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 2973 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -3145,7 +3142,7 @@ static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)
3145 idx = srcu_read_lock(&kvm->srcu); 3142 idx = srcu_read_lock(&kvm->srcu);
3146 spin_lock(&kvm->mmu_lock); 3143 spin_lock(&kvm->mmu_lock);
3147 npages = kvm->arch.n_alloc_mmu_pages - 3144 npages = kvm->arch.n_alloc_mmu_pages -
3148 kvm->arch.n_free_mmu_pages; 3145 kvm_mmu_available_pages(kvm);
3149 cache_count += npages; 3146 cache_count += npages;
3150 if (!kvm_freed && nr_to_scan > 0 && npages > 0) { 3147 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
3151 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm, 3148 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm,
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index be66759321a5..c3a689ae7df0 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -50,9 +50,14 @@
50 50
51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]); 51int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, u64 addr, u64 sptes[4]);
52 52
53static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
54{
55 return kvm->arch.n_free_mmu_pages;
56}
57
53static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 58static inline void kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
54{ 59{
55 if (unlikely(vcpu->kvm->arch.n_free_mmu_pages < KVM_MIN_FREE_MMU_PAGES)) 60 if (unlikely(kvm_mmu_available_pages(vcpu->kvm)< KVM_MIN_FREE_MMU_PAGES))
56 __kvm_mmu_free_some_pages(vcpu); 61 __kvm_mmu_free_some_pages(vcpu);
57} 62}
58 63