aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGui Jianfeng <guijianfeng@cn.fujitsu.com>2010-04-26 22:39:49 -0400
committerAvi Kivity <avi@redhat.com>2010-05-19 04:36:23 -0400
commitd35b8dd9355805f17225fdbfee4bc704d7bf7547 (patch)
tree7e287a657cf320b82cdba21d3db860be532c556d /arch/x86/kvm/mmu.c
parent5a7388c2d2faa2cc70c2d4717c8d7836d55459e0 (diff)
KVM: Fix mmu shrinker error
kvm_mmu_remove_one_alloc_mmu_page() assumes kvm_mmu_zap_page() only reclaims only one sp, but that's not the case. This will cause mmu shrinker returns a wrong number. This patch fix the counting error. Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c10
1 files changed, 5 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9696d654b01f..18d2f584945b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2902,13 +2902,13 @@ restart:
2902 kvm_flush_remote_tlbs(kvm); 2902 kvm_flush_remote_tlbs(kvm);
2903} 2903}
2904 2904
2905static void kvm_mmu_remove_one_alloc_mmu_page(struct kvm *kvm) 2905static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
2906{ 2906{
2907 struct kvm_mmu_page *page; 2907 struct kvm_mmu_page *page;
2908 2908
2909 page = container_of(kvm->arch.active_mmu_pages.prev, 2909 page = container_of(kvm->arch.active_mmu_pages.prev,
2910 struct kvm_mmu_page, link); 2910 struct kvm_mmu_page, link);
2911 kvm_mmu_zap_page(kvm, page); 2911 return kvm_mmu_zap_page(kvm, page) + 1;
2912} 2912}
2913 2913
2914static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask) 2914static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
@@ -2920,7 +2920,7 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2920 spin_lock(&kvm_lock); 2920 spin_lock(&kvm_lock);
2921 2921
2922 list_for_each_entry(kvm, &vm_list, vm_list) { 2922 list_for_each_entry(kvm, &vm_list, vm_list) {
2923 int npages, idx; 2923 int npages, idx, freed_pages;
2924 2924
2925 idx = srcu_read_lock(&kvm->srcu); 2925 idx = srcu_read_lock(&kvm->srcu);
2926 spin_lock(&kvm->mmu_lock); 2926 spin_lock(&kvm->mmu_lock);
@@ -2928,8 +2928,8 @@ static int mmu_shrink(int nr_to_scan, gfp_t gfp_mask)
2928 kvm->arch.n_free_mmu_pages; 2928 kvm->arch.n_free_mmu_pages;
2929 cache_count += npages; 2929 cache_count += npages;
2930 if (!kvm_freed && nr_to_scan > 0 && npages > 0) { 2930 if (!kvm_freed && nr_to_scan > 0 && npages > 0) {
2931 kvm_mmu_remove_one_alloc_mmu_page(kvm); 2931 freed_pages = kvm_mmu_remove_some_alloc_mmu_pages(kvm);
2932 cache_count--; 2932 cache_count -= freed_pages;
2933 kvm_freed = kvm; 2933 kvm_freed = kvm;
2934 } 2934 }
2935 nr_to_scan--; 2935 nr_to_scan--;