aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp>2013-03-06 02:06:58 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2013-03-07 15:26:27 -0500
commit5da596078f915a62e39a20e582308eab91b88c9a (patch)
tree32f91ce7582ae5c912863d1f781fc674eb7a167f /arch/x86/kvm/mmu.c
parent945315b9dbbe102bb3393a34ea4a10fb2a5ff303 (diff)
KVM: MMU: Introduce a helper function for FIFO zapping
Make the code for zapping the oldest mmu page, placed at the tail of the active list, a separate function. Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@lab.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c55
1 files changed, 23 insertions, 32 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 0f42645a063c..fdacabba6f62 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2110,6 +2110,21 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2110 } 2110 }
2111} 2111}
2112 2112
2113static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
2114 struct list_head *invalid_list)
2115{
2116 struct kvm_mmu_page *sp;
2117
2118 if (list_empty(&kvm->arch.active_mmu_pages))
2119 return false;
2120
2121 sp = list_entry(kvm->arch.active_mmu_pages.prev,
2122 struct kvm_mmu_page, link);
2123 kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
2124
2125 return true;
2126}
2127
2113/* 2128/*
2114 * Changing the number of mmu pages allocated to the vm 2129 * Changing the number of mmu pages allocated to the vm
2115 * Note: if goal_nr_mmu_pages is too small, you will get dead lock 2130 * Note: if goal_nr_mmu_pages is too small, you will get dead lock
@@ -2117,23 +2132,15 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
2117void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages) 2132void kvm_mmu_change_mmu_pages(struct kvm *kvm, unsigned int goal_nr_mmu_pages)
2118{ 2133{
2119 LIST_HEAD(invalid_list); 2134 LIST_HEAD(invalid_list);
2120 /*
2121 * If we set the number of mmu pages to be smaller be than the
2122 * number of actived pages , we must to free some mmu pages before we
2123 * change the value
2124 */
2125 2135
2126 spin_lock(&kvm->mmu_lock); 2136 spin_lock(&kvm->mmu_lock);
2127 2137
2128 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) { 2138 if (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages) {
2129 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages && 2139 /* Need to free some mmu pages to achieve the goal. */
2130 !list_empty(&kvm->arch.active_mmu_pages)) { 2140 while (kvm->arch.n_used_mmu_pages > goal_nr_mmu_pages)
2131 struct kvm_mmu_page *page; 2141 if (!prepare_zap_oldest_mmu_page(kvm, &invalid_list))
2142 break;
2132 2143
2133 page = container_of(kvm->arch.active_mmu_pages.prev,
2134 struct kvm_mmu_page, link);
2135 kvm_mmu_prepare_zap_page(kvm, page, &invalid_list);
2136 }
2137 kvm_mmu_commit_zap_page(kvm, &invalid_list); 2144 kvm_mmu_commit_zap_page(kvm, &invalid_list);
2138 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages; 2145 goal_nr_mmu_pages = kvm->arch.n_used_mmu_pages;
2139 } 2146 }
@@ -4007,13 +4014,10 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
4007{ 4014{
4008 LIST_HEAD(invalid_list); 4015 LIST_HEAD(invalid_list);
4009 4016
4010 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES && 4017 while (kvm_mmu_available_pages(vcpu->kvm) < KVM_REFILL_PAGES) {
4011 !list_empty(&vcpu->kvm->arch.active_mmu_pages)) { 4018 if (!prepare_zap_oldest_mmu_page(vcpu->kvm, &invalid_list))
4012 struct kvm_mmu_page *sp; 4019 break;
4013 4020
4014 sp = container_of(vcpu->kvm->arch.active_mmu_pages.prev,
4015 struct kvm_mmu_page, link);
4016 kvm_mmu_prepare_zap_page(vcpu->kvm, sp, &invalid_list);
4017 ++vcpu->kvm->stat.mmu_recycled; 4021 ++vcpu->kvm->stat.mmu_recycled;
4018 } 4022 }
4019 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 4023 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
@@ -4182,19 +4186,6 @@ restart:
4182 spin_unlock(&kvm->mmu_lock); 4186 spin_unlock(&kvm->mmu_lock);
4183} 4187}
4184 4188
4185static void kvm_mmu_remove_some_alloc_mmu_pages(struct kvm *kvm,
4186 struct list_head *invalid_list)
4187{
4188 struct kvm_mmu_page *page;
4189
4190 if (list_empty(&kvm->arch.active_mmu_pages))
4191 return;
4192
4193 page = container_of(kvm->arch.active_mmu_pages.prev,
4194 struct kvm_mmu_page, link);
4195 kvm_mmu_prepare_zap_page(kvm, page, invalid_list);
4196}
4197
4198static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc) 4189static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4199{ 4190{
4200 struct kvm *kvm; 4191 struct kvm *kvm;
@@ -4229,7 +4220,7 @@ static int mmu_shrink(struct shrinker *shrink, struct shrink_control *sc)
4229 idx = srcu_read_lock(&kvm->srcu); 4220 idx = srcu_read_lock(&kvm->srcu);
4230 spin_lock(&kvm->mmu_lock); 4221 spin_lock(&kvm->mmu_lock);
4231 4222
4232 kvm_mmu_remove_some_alloc_mmu_pages(kvm, &invalid_list); 4223 prepare_zap_oldest_mmu_page(kvm, &invalid_list);
4233 kvm_mmu_commit_zap_page(kvm, &invalid_list); 4224 kvm_mmu_commit_zap_page(kvm, &invalid_list);
4234 4225
4235 spin_unlock(&kvm->mmu_lock); 4226 spin_unlock(&kvm->mmu_lock);