aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGui Jianfeng <guijianfeng@cn.fujitsu.com>2010-05-04 21:03:49 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:39 -0400
commit54a4f0239f2e98bc0842818f611a4cf73bb7dd35 (patch)
treef89db0a11d45d00244b4e789d032a2e83e765f27 /arch/x86/kvm/mmu.c
parent518c5a05e89a79e498c95c3e29f29bd236b3c972 (diff)
KVM: MMU: make kvm_mmu_zap_page() return the number of pages it actually freed
Currently, kvm_mmu_zap_page() returning the number of freed children sp. This might confuse the caller, because caller don't know the actual freed number. Let's make kvm_mmu_zap_page() return the number of pages it actually freed. Signed-off-by: Gui Jianfeng <guijianfeng@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c5
1 files changed, 3 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index b666d8d106a9..be981b1f1881 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1504,6 +1504,8 @@ static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp)
1504 if (sp->unsync) 1504 if (sp->unsync)
1505 kvm_unlink_unsync_page(kvm, sp); 1505 kvm_unlink_unsync_page(kvm, sp);
1506 if (!sp->root_count) { 1506 if (!sp->root_count) {
1507 /* Count self */
1508 ret++;
1507 hlist_del(&sp->hash_link); 1509 hlist_del(&sp->hash_link);
1508 kvm_mmu_free_page(kvm, sp); 1510 kvm_mmu_free_page(kvm, sp);
1509 } else { 1511 } else {
@@ -1540,7 +1542,6 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int kvm_nr_mmu_pages)
1540 page = container_of(kvm->arch.active_mmu_pages.prev, 1542 page = container_of(kvm->arch.active_mmu_pages.prev,
1541 struct kvm_mmu_page, link); 1543 struct kvm_mmu_page, link);
1542 used_pages -= kvm_mmu_zap_page(kvm, page); 1544 used_pages -= kvm_mmu_zap_page(kvm, page);
1543 used_pages--;
1544 } 1545 }
1545 kvm_nr_mmu_pages = used_pages; 1546 kvm_nr_mmu_pages = used_pages;
1546 kvm->arch.n_free_mmu_pages = 0; 1547 kvm->arch.n_free_mmu_pages = 0;
@@ -2941,7 +2942,7 @@ static int kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm)
2941 2942
2942 page = container_of(kvm->arch.active_mmu_pages.prev, 2943 page = container_of(kvm->arch.active_mmu_pages.prev,
2943 struct kvm_mmu_page, link); 2944 struct kvm_mmu_page, link);
2944 return kvm_mmu_zap_page(kvm, page) + 1; 2945 return kvm_mmu_zap_page(kvm, page);
2945} 2946}
2946 2947
2947static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask) 2948static int mmu_shrink(struct shrinker *shrink, int nr_to_scan, gfp_t gfp_mask)