aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorXiaotian Feng <dfeng@redhat.com>2010-08-23 22:31:07 -0400
committerAvi Kivity <avi@redhat.com>2010-10-24 04:51:21 -0400
commit80b63faf028fba79e630d3643b0e615bddf4067b (patch)
tree0100096362eac40c2da099ee5dc879daa2504d20 /arch/x86/kvm/mmu.c
parente4abac67b756680c63af369f053d11991616aeb4 (diff)
KVM: MMU: fix regression from rework mmu_shrink() code
Latest kvm mmu_shrink code rework makes kernel changes kvm->arch.n_used_mmu_pages/ kvm->arch.n_max_mmu_pages at kvm_mmu_free_page/kvm_mmu_alloc_page, which is called by kvm_mmu_commit_zap_page. So the kvm->arch.n_used_mmu_pages or kvm_mmu_available_pages(vcpu->kvm) is unchanged after kvm_mmu_prepare_zap_page(), This caused kvm_mmu_change_mmu_pages/__kvm_mmu_free_some_pages loops forever. Moving kvm_mmu_commit_zap_page would make the while loop performs as normal. Reported-by: Avi Kivity <avi@redhat.com> Signed-off-by: Xiaotian Feng <dfeng@redhat.com> Tested-by: Avi Kivity <avi@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Tim Pepper <lnxninja@linux.vnet.ibm.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c7
1 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 33d7af50cf8e..c2ac7004441a 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1720,10 +1720,9 @@ void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
1720 1720
1721 page = container_of(kvm->arch.active_mmu_pages.prev, 1721 page = container_of(kvm->arch.active_mmu_pages.prev,
1722 struct kvm_mmu_page, link); 1722 struct kvm_mmu_page, link);
1723 kvm_mmu_prepare_zap_page(kvm, page, 1723 kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
1724 &invalid_list); 1724 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1725 } 1725 }
1726 kvm_mmu_commit_zap_page(kvm, &invalid_list);
1727 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; 1726 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
1728 } 1727 }
1729 1728
@@ -2972,9 +2971,9 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
2972 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev, 2971 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
2973 struct kvm_mmu_page, link); 2972 struct kvm_mmu_page, link);
2974 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list); 2973 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
2974 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2975 ++vcpu->kvm->stat.mmu_recycled; 2975 ++vcpu->kvm->stat.mmu_recycled;
2976 } 2976 }
2977 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
2978} 2977}
2979 2978
2980int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code) 2979int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)