diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2008-12-01 19:32:02 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2008-12-31 09:55:43 -0500 |
commit | 60c8aec6e2c9923492dabbd6b67e34692bd26c20 (patch) | |
tree | fa2d3b7f90f8dff8669a6fb6c85a41b9afb0f4c5 | |
parent | fbce554e940a983d005e29849636d0ef54b3eb18 (diff) |
KVM: MMU: use page array in unsync walk
Instead of invoking the handler directly collect pages into
an array so the caller can work with it.
Simplifies TLB flush collapsing.
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/mmu.c | 195 |
2 files changed, 141 insertions, 56 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index f58f7ebdea81..93d0aed35880 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -200,7 +200,7 @@ struct kvm_mmu_page { | |||
200 | int multimapped; /* More than one parent_pte? */ | 200 | int multimapped; /* More than one parent_pte? */ |
201 | int root_count; /* Currently serving as active root */ | 201 | int root_count; /* Currently serving as active root */ |
202 | bool unsync; | 202 | bool unsync; |
203 | bool unsync_children; | 203 | unsigned int unsync_children; |
204 | union { | 204 | union { |
205 | u64 *parent_pte; /* !multimapped */ | 205 | u64 *parent_pte; /* !multimapped */ |
206 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ | 206 | struct hlist_head parent_ptes; /* multimapped, kvm_pte_chain */ |
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index dd20b199a7c0..7ce92f78f337 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -908,8 +908,9 @@ static void kvm_mmu_update_unsync_bitmap(u64 *spte) | |||
908 | struct kvm_mmu_page *sp = page_header(__pa(spte)); | 908 | struct kvm_mmu_page *sp = page_header(__pa(spte)); |
909 | 909 | ||
910 | index = spte - sp->spt; | 910 | index = spte - sp->spt; |
911 | __set_bit(index, sp->unsync_child_bitmap); | 911 | if (!__test_and_set_bit(index, sp->unsync_child_bitmap)) |
912 | sp->unsync_children = 1; | 912 | sp->unsync_children++; |
913 | WARN_ON(!sp->unsync_children); | ||
913 | } | 914 | } |
914 | 915 | ||
915 | static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) | 916 | static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) |
@@ -936,7 +937,6 @@ static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) | |||
936 | 937 | ||
937 | static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 938 | static int unsync_walk_fn(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) |
938 | { | 939 | { |
939 | sp->unsync_children = 1; | ||
940 | kvm_mmu_update_parents_unsync(sp); | 940 | kvm_mmu_update_parents_unsync(sp); |
941 | return 1; | 941 | return 1; |
942 | } | 942 | } |
@@ -967,18 +967,41 @@ static void nonpaging_invlpg(struct kvm_vcpu *vcpu, gva_t gva) | |||
967 | { | 967 | { |
968 | } | 968 | } |
969 | 969 | ||
970 | #define KVM_PAGE_ARRAY_NR 16 | ||
971 | |||
972 | struct kvm_mmu_pages { | ||
973 | struct mmu_page_and_offset { | ||
974 | struct kvm_mmu_page *sp; | ||
975 | unsigned int idx; | ||
976 | } page[KVM_PAGE_ARRAY_NR]; | ||
977 | unsigned int nr; | ||
978 | }; | ||
979 | |||
970 | #define for_each_unsync_children(bitmap, idx) \ | 980 | #define for_each_unsync_children(bitmap, idx) \ |
971 | for (idx = find_first_bit(bitmap, 512); \ | 981 | for (idx = find_first_bit(bitmap, 512); \ |
972 | idx < 512; \ | 982 | idx < 512; \ |
973 | idx = find_next_bit(bitmap, 512, idx+1)) | 983 | idx = find_next_bit(bitmap, 512, idx+1)) |
974 | 984 | ||
975 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, | 985 | int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp, |
976 | struct kvm_unsync_walk *walker) | 986 | int idx) |
977 | { | 987 | { |
978 | int i, ret; | 988 | int i; |
979 | 989 | ||
980 | if (!sp->unsync_children) | 990 | if (sp->unsync) |
981 | return 0; | 991 | for (i=0; i < pvec->nr; i++) |
992 | if (pvec->page[i].sp == sp) | ||
993 | return 0; | ||
994 | |||
995 | pvec->page[pvec->nr].sp = sp; | ||
996 | pvec->page[pvec->nr].idx = idx; | ||
997 | pvec->nr++; | ||
998 | return (pvec->nr == KVM_PAGE_ARRAY_NR); | ||
999 | } | ||
1000 | |||
1001 | static int __mmu_unsync_walk(struct kvm_mmu_page *sp, | ||
1002 | struct kvm_mmu_pages *pvec) | ||
1003 | { | ||
1004 | int i, ret, nr_unsync_leaf = 0; | ||
982 | 1005 | ||
983 | for_each_unsync_children(sp->unsync_child_bitmap, i) { | 1006 | for_each_unsync_children(sp->unsync_child_bitmap, i) { |
984 | u64 ent = sp->spt[i]; | 1007 | u64 ent = sp->spt[i]; |
@@ -988,17 +1011,22 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, | |||
988 | child = page_header(ent & PT64_BASE_ADDR_MASK); | 1011 | child = page_header(ent & PT64_BASE_ADDR_MASK); |
989 | 1012 | ||
990 | if (child->unsync_children) { | 1013 | if (child->unsync_children) { |
991 | ret = mmu_unsync_walk(child, walker); | 1014 | if (mmu_pages_add(pvec, child, i)) |
992 | if (ret) | 1015 | return -ENOSPC; |
1016 | |||
1017 | ret = __mmu_unsync_walk(child, pvec); | ||
1018 | if (!ret) | ||
1019 | __clear_bit(i, sp->unsync_child_bitmap); | ||
1020 | else if (ret > 0) | ||
1021 | nr_unsync_leaf += ret; | ||
1022 | else | ||
993 | return ret; | 1023 | return ret; |
994 | __clear_bit(i, sp->unsync_child_bitmap); | ||
995 | } | 1024 | } |
996 | 1025 | ||
997 | if (child->unsync) { | 1026 | if (child->unsync) { |
998 | ret = walker->entry(child, walker); | 1027 | nr_unsync_leaf++; |
999 | __clear_bit(i, sp->unsync_child_bitmap); | 1028 | if (mmu_pages_add(pvec, child, i)) |
1000 | if (ret) | 1029 | return -ENOSPC; |
1001 | return ret; | ||
1002 | } | 1030 | } |
1003 | } | 1031 | } |
1004 | } | 1032 | } |
@@ -1006,7 +1034,17 @@ static int mmu_unsync_walk(struct kvm_mmu_page *sp, | |||
1006 | if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) | 1034 | if (find_first_bit(sp->unsync_child_bitmap, 512) == 512) |
1007 | sp->unsync_children = 0; | 1035 | sp->unsync_children = 0; |
1008 | 1036 | ||
1009 | return 0; | 1037 | return nr_unsync_leaf; |
1038 | } | ||
1039 | |||
1040 | static int mmu_unsync_walk(struct kvm_mmu_page *sp, | ||
1041 | struct kvm_mmu_pages *pvec) | ||
1042 | { | ||
1043 | if (!sp->unsync_children) | ||
1044 | return 0; | ||
1045 | |||
1046 | mmu_pages_add(pvec, sp, 0); | ||
1047 | return __mmu_unsync_walk(sp, pvec); | ||
1010 | } | 1048 | } |
1011 | 1049 | ||
1012 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) | 1050 | static struct kvm_mmu_page *kvm_mmu_lookup_page(struct kvm *kvm, gfn_t gfn) |
@@ -1056,30 +1094,81 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1056 | return 0; | 1094 | return 0; |
1057 | } | 1095 | } |
1058 | 1096 | ||
1059 | struct sync_walker { | 1097 | struct mmu_page_path { |
1060 | struct kvm_vcpu *vcpu; | 1098 | struct kvm_mmu_page *parent[PT64_ROOT_LEVEL-1]; |
1061 | struct kvm_unsync_walk walker; | 1099 | unsigned int idx[PT64_ROOT_LEVEL-1]; |
1062 | }; | 1100 | }; |
1063 | 1101 | ||
1064 | static int mmu_sync_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) | 1102 | #define for_each_sp(pvec, sp, parents, i) \ |
1103 | for (i = mmu_pages_next(&pvec, &parents, -1), \ | ||
1104 | sp = pvec.page[i].sp; \ | ||
1105 | i < pvec.nr && ({ sp = pvec.page[i].sp; 1;}); \ | ||
1106 | i = mmu_pages_next(&pvec, &parents, i)) | ||
1107 | |||
1108 | int mmu_pages_next(struct kvm_mmu_pages *pvec, struct mmu_page_path *parents, | ||
1109 | int i) | ||
1065 | { | 1110 | { |
1066 | struct sync_walker *sync_walk = container_of(walk, struct sync_walker, | 1111 | int n; |
1067 | walker); | 1112 | |
1068 | struct kvm_vcpu *vcpu = sync_walk->vcpu; | 1113 | for (n = i+1; n < pvec->nr; n++) { |
1114 | struct kvm_mmu_page *sp = pvec->page[n].sp; | ||
1115 | |||
1116 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) { | ||
1117 | parents->idx[0] = pvec->page[n].idx; | ||
1118 | return n; | ||
1119 | } | ||
1069 | 1120 | ||
1070 | kvm_sync_page(vcpu, sp); | 1121 | parents->parent[sp->role.level-2] = sp; |
1071 | return (need_resched() || spin_needbreak(&vcpu->kvm->mmu_lock)); | 1122 | parents->idx[sp->role.level-1] = pvec->page[n].idx; |
1123 | } | ||
1124 | |||
1125 | return n; | ||
1072 | } | 1126 | } |
1073 | 1127 | ||
1074 | static void mmu_sync_children(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | 1128 | void mmu_pages_clear_parents(struct mmu_page_path *parents) |
1075 | { | 1129 | { |
1076 | struct sync_walker walker = { | 1130 | struct kvm_mmu_page *sp; |
1077 | .walker = { .entry = mmu_sync_fn, }, | 1131 | unsigned int level = 0; |
1078 | .vcpu = vcpu, | 1132 | |
1079 | }; | 1133 | do { |
1134 | unsigned int idx = parents->idx[level]; | ||
1135 | |||
1136 | sp = parents->parent[level]; | ||
1137 | if (!sp) | ||
1138 | return; | ||
1139 | |||
1140 | --sp->unsync_children; | ||
1141 | WARN_ON((int)sp->unsync_children < 0); | ||
1142 | __clear_bit(idx, sp->unsync_child_bitmap); | ||
1143 | level++; | ||
1144 | } while (level < PT64_ROOT_LEVEL-1 && !sp->unsync_children); | ||
1145 | } | ||
1146 | |||
1147 | static void kvm_mmu_pages_init(struct kvm_mmu_page *parent, | ||
1148 | struct mmu_page_path *parents, | ||
1149 | struct kvm_mmu_pages *pvec) | ||
1150 | { | ||
1151 | parents->parent[parent->role.level-1] = NULL; | ||
1152 | pvec->nr = 0; | ||
1153 | } | ||
1080 | 1154 | ||
1081 | while (mmu_unsync_walk(sp, &walker.walker)) | 1155 | static void mmu_sync_children(struct kvm_vcpu *vcpu, |
1156 | struct kvm_mmu_page *parent) | ||
1157 | { | ||
1158 | int i; | ||
1159 | struct kvm_mmu_page *sp; | ||
1160 | struct mmu_page_path parents; | ||
1161 | struct kvm_mmu_pages pages; | ||
1162 | |||
1163 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1164 | while (mmu_unsync_walk(parent, &pages)) { | ||
1165 | for_each_sp(pages, sp, parents, i) { | ||
1166 | kvm_sync_page(vcpu, sp); | ||
1167 | mmu_pages_clear_parents(&parents); | ||
1168 | } | ||
1082 | cond_resched_lock(&vcpu->kvm->mmu_lock); | 1169 | cond_resched_lock(&vcpu->kvm->mmu_lock); |
1170 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1171 | } | ||
1083 | } | 1172 | } |
1084 | 1173 | ||
1085 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | 1174 | static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, |
@@ -1245,33 +1334,29 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1245 | } | 1334 | } |
1246 | } | 1335 | } |
1247 | 1336 | ||
1248 | struct zap_walker { | 1337 | static int mmu_zap_unsync_children(struct kvm *kvm, |
1249 | struct kvm_unsync_walk walker; | 1338 | struct kvm_mmu_page *parent) |
1250 | struct kvm *kvm; | ||
1251 | int zapped; | ||
1252 | }; | ||
1253 | |||
1254 | static int mmu_zap_fn(struct kvm_mmu_page *sp, struct kvm_unsync_walk *walk) | ||
1255 | { | ||
1256 | struct zap_walker *zap_walk = container_of(walk, struct zap_walker, | ||
1257 | walker); | ||
1258 | kvm_mmu_zap_page(zap_walk->kvm, sp); | ||
1259 | zap_walk->zapped = 1; | ||
1260 | return 0; | ||
1261 | } | ||
1262 | |||
1263 | static int mmu_zap_unsync_children(struct kvm *kvm, struct kvm_mmu_page *sp) | ||
1264 | { | 1339 | { |
1265 | struct zap_walker walker = { | 1340 | int i, zapped = 0; |
1266 | .walker = { .entry = mmu_zap_fn, }, | 1341 | struct mmu_page_path parents; |
1267 | .kvm = kvm, | 1342 | struct kvm_mmu_pages pages; |
1268 | .zapped = 0, | ||
1269 | }; | ||
1270 | 1343 | ||
1271 | if (sp->role.level == PT_PAGE_TABLE_LEVEL) | 1344 | if (parent->role.level == PT_PAGE_TABLE_LEVEL) |
1272 | return 0; | 1345 | return 0; |
1273 | mmu_unsync_walk(sp, &walker.walker); | 1346 | |
1274 | return walker.zapped; | 1347 | kvm_mmu_pages_init(parent, &parents, &pages); |
1348 | while (mmu_unsync_walk(parent, &pages)) { | ||
1349 | struct kvm_mmu_page *sp; | ||
1350 | |||
1351 | for_each_sp(pages, sp, parents, i) { | ||
1352 | kvm_mmu_zap_page(kvm, sp); | ||
1353 | mmu_pages_clear_parents(&parents); | ||
1354 | } | ||
1355 | zapped += pages.nr; | ||
1356 | kvm_mmu_pages_init(parent, &parents, &pages); | ||
1357 | } | ||
1358 | |||
1359 | return zapped; | ||
1275 | } | 1360 | } |
1276 | 1361 | ||
1277 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1362 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |