diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-05-15 11:20:27 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 04:45:02 -0400 |
commit | 332b207d65c1d7982489dbb83e5071c95e19eb75 (patch) | |
tree | 5a9917fc3b03b6f1c9d9e1436c89c3bc5f45b636 | |
parent | 96304217a783a65c0923a26f54141cfe7a2a71b5 (diff) |
KVM: MMU: optimize pte write path if don't have protected sp
Simply return from kvm_mmu_pte_write path if no shadow page is
write-protected, then we can avoid to walk all shadow pages and hold
mmu-lock
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 9 |
2 files changed, 10 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index db4b6543b830..387780eb97bb 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -441,6 +441,7 @@ struct kvm_arch { | |||
441 | unsigned int n_used_mmu_pages; | 441 | unsigned int n_used_mmu_pages; |
442 | unsigned int n_requested_mmu_pages; | 442 | unsigned int n_requested_mmu_pages; |
443 | unsigned int n_max_mmu_pages; | 443 | unsigned int n_max_mmu_pages; |
444 | unsigned int indirect_shadow_pages; | ||
444 | atomic_t invlpg_counter; | 445 | atomic_t invlpg_counter; |
445 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; | 446 | struct hlist_head mmu_page_hash[KVM_NUM_MMU_PAGES]; |
446 | /* | 447 | /* |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index aee38623b768..b4ae7afa6b3b 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -498,6 +498,7 @@ static void account_shadowed(struct kvm *kvm, gfn_t gfn) | |||
498 | linfo = lpage_info_slot(gfn, slot, i); | 498 | linfo = lpage_info_slot(gfn, slot, i); |
499 | linfo->write_count += 1; | 499 | linfo->write_count += 1; |
500 | } | 500 | } |
501 | kvm->arch.indirect_shadow_pages++; | ||
501 | } | 502 | } |
502 | 503 | ||
503 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | 504 | static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) |
@@ -513,6 +514,7 @@ static void unaccount_shadowed(struct kvm *kvm, gfn_t gfn) | |||
513 | linfo->write_count -= 1; | 514 | linfo->write_count -= 1; |
514 | WARN_ON(linfo->write_count < 0); | 515 | WARN_ON(linfo->write_count < 0); |
515 | } | 516 | } |
517 | kvm->arch.indirect_shadow_pages--; | ||
516 | } | 518 | } |
517 | 519 | ||
518 | static int has_wrprotected_page(struct kvm *kvm, | 520 | static int has_wrprotected_page(struct kvm *kvm, |
@@ -3233,6 +3235,13 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, | |||
3233 | int level, npte, invlpg_counter, r, flooded = 0; | 3235 | int level, npte, invlpg_counter, r, flooded = 0; |
3234 | bool remote_flush, local_flush, zap_page; | 3236 | bool remote_flush, local_flush, zap_page; |
3235 | 3237 | ||
3238 | /* | ||
3239 | * If we don't have indirect shadow pages, it means no page is | ||
3240 | * write-protected, so we can exit simply. | ||
3241 | */ | ||
3242 | if (!ACCESS_ONCE(vcpu->kvm->arch.indirect_shadow_pages)) | ||
3243 | return; | ||
3244 | |||
3236 | zap_page = remote_flush = local_flush = false; | 3245 | zap_page = remote_flush = local_flush = false; |
3237 | offset = offset_in_page(gpa); | 3246 | offset = offset_in_page(gpa); |
3238 | 3247 | ||