aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2012-06-04 07:53:23 -0400
committerAvi Kivity <avi@redhat.com>2012-06-05 10:46:43 -0400
commit1952639665e92481c34c34c3e2a71bf3e66ba362 (patch)
tree0f6a1904bffb0d59e9b74a46fb175207106436de
parenta6bb7929677aacfce3f864c3cdacaa7d527945d5 (diff)
KVM: MMU: do not iterate over all VMs in mmu_shrink()
mmu_shrink() needlessly iterates over all VMs even though it will not attempt to free mmu pages from more than one on them. Fix that and also check used mmu pages count outside of VM lock to skip inactive VMs faster. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c27
1 files changed, 17 insertions, 10 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d07e436b7a42..1ca7164a74f1 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -3944,7 +3944,6 @@ static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
3944static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) 3944static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3945{ 3945{
3946 struct kvm *kvm; 3946 struct kvm *kvm;
3947 struct kvm *kvm_freed = NULL;
3948 int nr_to_scan = sc->nr_to_scan; 3947 int nr_to_scan = sc->nr_to_scan;
3949 3948
3950 if (nr_to_scan == 0) 3949 if (nr_to_scan == 0)
@@ -3956,22 +3955,30 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
3956 int idx; 3955 int idx;
3957 LIST_HEAD(invalid_list); 3956 LIST_HEAD(invalid_list);
3958 3957
3958 /*
3959 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
3960 * here. We may skip a VM instance errorneosly, but we do not
3961 * want to shrink a VM that only started to populate its MMU
3962 * anyway.
3963 */
3964 if (kvm->arch.n_used_mmu_pages > 0) {
3965 if (!nr_to_scan--)
3966 break;
3967 continue;
3968 }
3969
3959 idx = srcu_read_lock(&kvm->srcu); 3970 idx = srcu_read_lock(&kvm->srcu);
3960 spin_lock(&kvm->mmu_lock); 3971 spin_lock(&kvm->mmu_lock);
3961 if (!kvm_freed && nr_to_scan > 0 &&
3962 kvm->arch.n_used_mmu_pages > 0) {
3963 kvm_mmu_remove_some_alloc_mmu_pages(kvm,
3964 &invalid_list);
3965 kvm_freed = kvm;
3966 }
3967 nr_to_scan--;
3968 3972
3973 kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list);
3969 kvm_mmu_commit_zap_page(kvm, &invalid_list); 3974 kvm_mmu_commit_zap_page(kvm, &invalid_list);
3975
3970 spin_unlock(&kvm->mmu_lock); 3976 spin_unlock(&kvm->mmu_lock);
3971 srcu_read_unlock(&kvm->srcu, idx); 3977 srcu_read_unlock(&kvm->srcu, idx);
3978
3979 list_move_tail(&kvm->vm_list, &vm_list);
3980 break;
3972 } 3981 }
3973 if (kvm_freed)
3974 list_move_tail(&kvm_freed->vm_list, &vm_list);
3975 3982
3976 raw_spin_unlock(&kvm_lock); 3983 raw_spin_unlock(&kvm_lock);
3977 3984