aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:03 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:43 -0500
commitb1a368218ad5b6e62380c8f206f16e6f18bf154c (patch)
tree4c13c3bd259032d19255637dcff4001e970de6ae /arch/x86/kvm/mmu.c
parent60c8aec6e2c9923492dabbd6b67e34692bd26c20 (diff)
KVM: MMU: collapse remote TLB flushes on root sync
Collapse remote TLB flushes on root sync. kernbench is 2.7% faster on 4-way guest. Improvements have been seen with other loads such as AIM7. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c19
1 files changed, 14 insertions, 5 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7ce92f78f337..58c35dead321 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -621,7 +621,7 @@ static u64 *rmap_next(struct kvm *kvm, unsigned long *rmapp, u64 *spte)
621 return NULL; 621 return NULL;
622} 622}
623 623
624static void rmap_write_protect(struct kvm *kvm, u64 gfn) 624static int rmap_write_protect(struct kvm *kvm, u64 gfn)
625{ 625{
626 unsigned long *rmapp; 626 unsigned long *rmapp;
627 u64 *spte; 627 u64 *spte;
@@ -667,8 +667,7 @@ static void rmap_write_protect(struct kvm *kvm, u64 gfn)
667 spte = rmap_next(kvm, rmapp, spte); 667 spte = rmap_next(kvm, rmapp, spte);
668 } 668 }
669 669
670 if (write_protected) 670 return write_protected;
671 kvm_flush_remote_tlbs(kvm);
672} 671}
673 672
674static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp) 673static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp)
@@ -1083,7 +1082,8 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1083 return 1; 1082 return 1;
1084 } 1083 }
1085 1084
1086 rmap_write_protect(vcpu->kvm, sp->gfn); 1085 if (rmap_write_protect(vcpu->kvm, sp->gfn))
1086 kvm_flush_remote_tlbs(vcpu->kvm);
1087 kvm_unlink_unsync_page(vcpu->kvm, sp); 1087 kvm_unlink_unsync_page(vcpu->kvm, sp);
1088 if (vcpu->arch.mmu.sync_page(vcpu, sp)) { 1088 if (vcpu->arch.mmu.sync_page(vcpu, sp)) {
1089 kvm_mmu_zap_page(vcpu->kvm, sp); 1089 kvm_mmu_zap_page(vcpu->kvm, sp);
@@ -1162,6 +1162,14 @@ static void mmu_sync_children(struct kvm_vcpu *vcpu,
1162 1162
1163 kvm_mmu_pages_init(parent, &parents, &pages); 1163 kvm_mmu_pages_init(parent, &parents, &pages);
1164 while (mmu_unsync_walk(parent, &pages)) { 1164 while (mmu_unsync_walk(parent, &pages)) {
1165 int protected = 0;
1166
1167 for_each_sp(pages, sp, parents, i)
1168 protected |= rmap_write_protect(vcpu->kvm, sp->gfn);
1169
1170 if (protected)
1171 kvm_flush_remote_tlbs(vcpu->kvm);
1172
1165 for_each_sp(pages, sp, parents, i) { 1173 for_each_sp(pages, sp, parents, i) {
1166 kvm_sync_page(vcpu, sp); 1174 kvm_sync_page(vcpu, sp);
1167 mmu_pages_clear_parents(&parents); 1175 mmu_pages_clear_parents(&parents);
@@ -1226,7 +1234,8 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1226 sp->role = role; 1234 sp->role = role;
1227 hlist_add_head(&sp->hash_link, bucket); 1235 hlist_add_head(&sp->hash_link, bucket);
1228 if (!metaphysical) { 1236 if (!metaphysical) {
1229 rmap_write_protect(vcpu->kvm, gfn); 1237 if (rmap_write_protect(vcpu->kvm, gfn))
1238 kvm_flush_remote_tlbs(vcpu->kvm);
1230 account_shadowed(vcpu->kvm, gfn); 1239 account_shadowed(vcpu->kvm, gfn);
1231 } 1240 }
1232 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte) 1241 if (shadow_trap_nonpresent_pte != shadow_notrap_nonpresent_pte)