diff options
author | Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com> | 2013-05-30 20:36:27 -0400 |
---|---|---|
committer | Gleb Natapov <gleb@redhat.com> | 2013-06-05 05:33:10 -0400 |
commit | e7d11c7a894986a13817c1c001e1e7668c5c4eb4 (patch) | |
tree | 73ef9501ae601f51c361ed71c1a419e84bc28c27 /arch/x86 | |
parent | 7f52af7412275c0d23becfc325331ec8b5ff2458 (diff) |
KVM: MMU: zap pages in batch
Zap at lease 10 pages before releasing mmu-lock to reduce the overload
caused by requiring lock
After the patch, kvm_zap_obsolete_pages can forward progress anyway,
so update the comments
[ It improves the case 0.6% ~ 1% that do kernel building meanwhile read
PCI ROM. ]
Note: i am not sure that "10" is the best speculative value, i just
guessed that '10' can make vcpu do not spend long time on
kvm_zap_obsolete_pages and do not cause mmu-lock too hungry.
Signed-off-by: Xiao Guangrong <xiaoguangrong@linux.vnet.ibm.com>
Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/kvm/mmu.c | 35 |
1 files changed, 11 insertions, 24 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 0880b9b425d7..fe9d6f10e7a9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -4197,14 +4197,18 @@ void kvm_mmu_slot_remove_write_access(struct kvm *kvm, int slot) | |||
4197 | spin_unlock(&kvm->mmu_lock); | 4197 | spin_unlock(&kvm->mmu_lock); |
4198 | } | 4198 | } |
4199 | 4199 | ||
4200 | #define BATCH_ZAP_PAGES 10 | ||
4200 | static void kvm_zap_obsolete_pages(struct kvm *kvm) | 4201 | static void kvm_zap_obsolete_pages(struct kvm *kvm) |
4201 | { | 4202 | { |
4202 | struct kvm_mmu_page *sp, *node; | 4203 | struct kvm_mmu_page *sp, *node; |
4203 | LIST_HEAD(invalid_list); | 4204 | LIST_HEAD(invalid_list); |
4205 | int batch = 0; | ||
4204 | 4206 | ||
4205 | restart: | 4207 | restart: |
4206 | list_for_each_entry_safe_reverse(sp, node, | 4208 | list_for_each_entry_safe_reverse(sp, node, |
4207 | &kvm->arch.active_mmu_pages, link) { | 4209 | &kvm->arch.active_mmu_pages, link) { |
4210 | int ret; | ||
4211 | |||
4208 | /* | 4212 | /* |
4209 | * No obsolete page exists before new created page since | 4213 | * No obsolete page exists before new created page since |
4210 | * active_mmu_pages is the FIFO list. | 4214 | * active_mmu_pages is the FIFO list. |
@@ -4213,28 +4217,6 @@ restart: | |||
4213 | break; | 4217 | break; |
4214 | 4218 | ||
4215 | /* | 4219 | /* |
4216 | * Do not repeatedly zap a root page to avoid unnecessary | ||
4217 | * KVM_REQ_MMU_RELOAD, otherwise we may not be able to | ||
4218 | * progress: | ||
4219 | * vcpu 0 vcpu 1 | ||
4220 | * call vcpu_enter_guest(): | ||
4221 | * 1): handle KVM_REQ_MMU_RELOAD | ||
4222 | * and require mmu-lock to | ||
4223 | * load mmu | ||
4224 | * repeat: | ||
4225 | * 1): zap root page and | ||
4226 | * send KVM_REQ_MMU_RELOAD | ||
4227 | * | ||
4228 | * 2): if (cond_resched_lock(mmu-lock)) | ||
4229 | * | ||
4230 | * 2): hold mmu-lock and load mmu | ||
4231 | * | ||
4232 | * 3): see KVM_REQ_MMU_RELOAD bit | ||
4233 | * on vcpu->requests is set | ||
4234 | * then return 1 to call | ||
4235 | * vcpu_enter_guest() again. | ||
4236 | * goto repeat; | ||
4237 | * | ||
4238 | * Since we are reversely walking the list and the invalid | 4220 | * Since we are reversely walking the list and the invalid |
4239 | * list will be moved to the head, skip the invalid page | 4221 | * list will be moved to the head, skip the invalid page |
4240 | * can help us to avoid the infinity list walking. | 4222 | * can help us to avoid the infinity list walking. |
@@ -4242,13 +4224,18 @@ restart: | |||
4242 | if (sp->role.invalid) | 4224 | if (sp->role.invalid) |
4243 | continue; | 4225 | continue; |
4244 | 4226 | ||
4245 | if (need_resched() || spin_needbreak(&kvm->mmu_lock)) { | 4227 | if (batch >= BATCH_ZAP_PAGES && |
4228 | (need_resched() || spin_needbreak(&kvm->mmu_lock))) { | ||
4229 | batch = 0; | ||
4246 | kvm_mmu_commit_zap_page(kvm, &invalid_list); | 4230 | kvm_mmu_commit_zap_page(kvm, &invalid_list); |
4247 | cond_resched_lock(&kvm->mmu_lock); | 4231 | cond_resched_lock(&kvm->mmu_lock); |
4248 | goto restart; | 4232 | goto restart; |
4249 | } | 4233 | } |
4250 | 4234 | ||
4251 | if (kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list)) | 4235 | ret = kvm_mmu_prepare_zap_page(kvm, sp, &invalid_list); |
4236 | batch += ret; | ||
4237 | |||
4238 | if (ret) | ||
4252 | goto restart; | 4239 | goto restart; |
4253 | } | 4240 | } |
4254 | 4241 | ||