aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c13
1 files changed, 9 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 8e312a2e1412..399c177212b5 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -4155,16 +4155,21 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4155 LIST_HEAD(invalid_list); 4155 LIST_HEAD(invalid_list);
4156 4156
4157 /* 4157 /*
4158 * Never scan more than sc->nr_to_scan VM instances.
4159 * Will not hit this condition practically since we do not try
4160 * to shrink more than one VM and it is very unlikely to see
4161 * !n_used_mmu_pages so many times.
4162 */
4163 if (!nr_to_scan--)
4164 break;
4165 /*
4158 * n_used_mmu_pages is accessed without holding kvm->mmu_lock 4166 * n_used_mmu_pages is accessed without holding kvm->mmu_lock
4159 * here. We may skip a VM instance errorneosly, but we do not 4167 * here. We may skip a VM instance errorneosly, but we do not
4160 * want to shrink a VM that only started to populate its MMU 4168 * want to shrink a VM that only started to populate its MMU
4161 * anyway. 4169 * anyway.
4162 */ 4170 */
4163 if (kvm->arch.n_used_mmu_pages > 0) { 4171 if (!kvm->arch.n_used_mmu_pages)
4164 if (!nr_to_scan--)
4165 break;
4166 continue; 4172 continue;
4167 }
4168 4173
4169 idx = srcu_read_lock(&kvm->srcu); 4174 idx = srcu_read_lock(&kvm->srcu);
4170 spin_lock(&kvm->mmu_lock); 4175 spin_lock(&kvm->mmu_lock);