aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-06-04 09:54:38 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:39:27 -0400
commit103ad25a86a6ec5418b3dca6a0d2bf2ba01a8318 (patch)
tree36ec9c5bad8446d57a974c80d5147e1db3e18d65
parent7775834a233478ec855b97e30727248f12eafe76 (diff)
KVM: MMU: don't get free page number in the loop
In the later patch, we will modify sp's zapping way like below: kvm_mmu_prepare_zap_page A kvm_mmu_prepare_zap_page B kvm_mmu_prepare_zap_page C .... kvm_mmu_commit_zap_page [ zaped multiple sps only need to call kvm_mmu_commit_zap_page once ] In __kvm_mmu_free_some_pages() function, the free page number is getted form 'vcpu->kvm->arch.n_free_mmu_pages' in loop, it will hinders us to apply kvm_mmu_prepare_zap_page() and kvm_mmu_commit_zap_page() since kvm_mmu_prepare_zap_page() not free sp. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/mmu.c7
1 files changed, 5 insertions, 2 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9b849a70742d..1aad8e713f78 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2863,13 +2863,16 @@ EXPORT_SYMBOL_GPL(kvm_mmu_unprotect_page_virt);
2863 2863
2864void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu) 2864void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2865{ 2865{
2866 while (vcpu->kvm->arch.n_free_mmu_pages < KVM_REFILL_PAGES && 2866 int free_pages;
2867
2868 free_pages = vcpu->kvm->arch.n_free_mmu_pages;
2869 while (free_pages < KVM_REFILL_PAGES &&
2867 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 2870 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) {
2868 struct kvm_mmu_page *sp; 2871 struct kvm_mmu_page *sp;
2869 2872
2870 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2873 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2871 struct kvm_mmu_page, link); 2874 struct kvm_mmu_page, link);
2872 kvm_mmu_zap_page(vcpu->kvm, sp); 2875 free_pages += kvm_mmu_zap_page(vcpu->kvm, sp);
2873 ++vcpu->kvm->stat.mmu_recycled; 2876 ++vcpu->kvm->stat.mmu_recycled;
2874 } 2877 }
2875} 2878}