aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-09-23 12:18:40 -0400
committerAvi Kivity <avi@redhat.com>2008-10-15 08:25:26 -0400
commit0074ff63ebc195701062ca46e0d82fcea0fa3a0a (patch)
treefb9c40fc914a17de448d0d5ab822129a5e179b25 /arch
parent4731d4c7a07769cf2926c327177b97bb8c68cafc (diff)
KVM: MMU: speed up mmu_unsync_walk
Cache the unsynced children information in a per-page bitmap. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/mmu.c72
1 files changed, 60 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d88659ae7778..cb391d629af2 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -891,6 +891,52 @@ static void mmu_parent_walk(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
891 } 891 }
892} 892}
893 893
894static void kvm_mmu_update_unsync_bitmap(u64 *spte)
895{
896 unsigned int index;
897 struct kvm_mmu_page *sp = page_header(__pa(spte));
898
899 index = spte - sp->spt;
900 __set_bit(index, sp->unsync_child_bitmap);
901 sp->unsync_children = 1;
902}
903
904static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp)
905{
906 struct kvm_pte_chain *pte_chain;
907 struct hlist_node *node;
908 int i;
909
910 if (!sp->parent_pte)
911 return;
912
913 if (!sp->multimapped) {
914 kvm_mmu_update_unsync_bitmap(sp->parent_pte);
915 return;
916 }
917
918 hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link)
919 for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) {
920 if (!pte_chain->parent_ptes[i])
921 break;
922 kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]);
923 }
924}
925
926static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
927{
928 sp->unsync_children = 1;
929 kvm_mmu_update_parents_unsync(sp);
930 return 1;
931}
932
933static void kvm_mmu_mark_parents_unsync(struct kvm_vcpu *vcpu,
934 struct kvm_mmu_page *sp)
935{
936 mmu_parent_walk(vcpu, sp, unsync_walk_fn);
937 kvm_mmu_update_parents_unsync(sp);
938}
939
894static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, 940static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu,
895 struct kvm_mmu_page *sp) 941 struct kvm_mmu_page *sp)
896{ 942{
@@ -910,6 +956,11 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
910{ 956{
911} 957}
912 958
959#define for_each_unsync_children(bitmap, idx) \
960 for (idx = find_first_bit(bitmap, 512); \
961 idx < 512; \
962 idx = find_next_bit(bitmap, 512, idx+1))
963
913static int mmu_unsync_walk(struct kvm_mmu_page *sp, 964static int mmu_unsync_walk(struct kvm_mmu_page *sp,
914 struct kvm_unsync_walk *walker) 965 struct kvm_unsync_walk *walker)
915{ 966{
@@ -918,7 +969,7 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
918 if (!sp->unsync_children) 969 if (!sp->unsync_children)
919 return 0; 970 return 0;
920 971
921 for (i = 0; i < PT64_ENT_PER_PAGE; ++i) { 972 for_each_unsync_children(sp->unsync_child_bitmap, i) {
922 u64 ent = sp->spt[i]; 973 u64 ent = sp->spt[i];
923 974
924 if (is_shadow_present_pte(ent)) { 975 if (is_shadow_present_pte(ent)) {
@@ -929,17 +980,19 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp,
929 ret = mmu_unsync_walk(child, walker); 980 ret = mmu_unsync_walk(child, walker);
930 if (ret) 981 if (ret)
931 return ret; 982 return ret;
983 __clear_bit(i, sp->unsync_child_bitmap);
932 } 984 }
933 985
934 if (child->unsync) { 986 if (child->unsync) {
935 ret = walker->entry(child, walker); 987 ret = walker->entry(child, walker);
988 __clear_bit(i, sp->unsync_child_bitmap);
936 if (ret) 989 if (ret)
937 return ret; 990 return ret;
938 } 991 }
939 } 992 }
940 } 993 }
941 994
942 if (i == PT64_ENT_PER_PAGE) 995 if (find_first_bit(sp->unsync_child_bitmap, 512) == 512)
943 sp->unsync_children = 0; 996 sp->unsync_children = 0;
944 997
945 return 0; 998 return 0;
@@ -1056,10 +1109,11 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
1056 if (sp->role.word != role.word) 1109 if (sp->role.word != role.word)
1057 continue; 1110 continue;
1058 1111
1059 if (sp->unsync_children)
1060 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1061
1062 mmu_page_add_parent_pte(vcpu, sp, parent_pte); 1112 mmu_page_add_parent_pte(vcpu, sp, parent_pte);
1113 if (sp->unsync_children) {
1114 set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests);
1115 kvm_mmu_mark_parents_unsync(vcpu, sp);
1116 }
1063 pgprintk("%s: found\n", __func__); 1117 pgprintk("%s: found\n", __func__);
1064 return sp; 1118 return sp;
1065 } 1119 }
@@ -1336,12 +1390,6 @@ struct page *gva_to_page(struct kvm_vcpu *vcpu, gva_t gva)
1336 return page; 1390 return page;
1337} 1391}
1338 1392
1339static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1340{
1341 sp->unsync_children = 1;
1342 return 1;
1343}
1344
1345static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 1393static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1346{ 1394{
1347 unsigned index; 1395 unsigned index;
@@ -1358,7 +1406,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
1358 if (s->role.word != sp->role.word) 1406 if (s->role.word != sp->role.word)
1359 return 1; 1407 return 1;
1360 } 1408 }
1361 mmu_parent_walk(vcpu, sp, unsync_walk_fn); 1409 kvm_mmu_mark_parents_unsync(vcpu, sp);
1362 ++vcpu->kvm->stat.mmu_unsync; 1410 ++vcpu->kvm->stat.mmu_unsync;
1363 sp->unsync = 1; 1411 sp->unsync = 1;
1364 mmu_convert_notrap(sp); 1412 mmu_convert_notrap(sp);