diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2011-05-15 11:27:08 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-07-12 04:45:07 -0400 |
commit | 67052b3508f09956427d6476fd35e8fddde6c618 (patch) | |
tree | b3b9585977d0fdbb746c3cf7dc41a0e24b87411e /arch/x86/kvm/mmu.c | |
parent | 53c07b18787d564a105e1aa678795d67eeb27447 (diff) |
KVM: MMU: remove the arithmetic of parent pte rmap
Parent pte rmap and page rmap are very similar, so use the same arithmetic
for them
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 189 |
1 files changed, 45 insertions, 144 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index a6811cbdbf0d..9eaca1c739a6 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -182,9 +182,6 @@ struct kvm_shadow_walk_iterator { | |||
182 | shadow_walk_okay(&(_walker)); \ | 182 | shadow_walk_okay(&(_walker)); \ |
183 | shadow_walk_next(&(_walker))) | 183 | shadow_walk_next(&(_walker))) |
184 | 184 | ||
185 | typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); | ||
186 | |||
187 | static struct kmem_cache *pte_chain_cache; | ||
188 | static struct kmem_cache *pte_list_desc_cache; | 185 | static struct kmem_cache *pte_list_desc_cache; |
189 | static struct kmem_cache *mmu_page_header_cache; | 186 | static struct kmem_cache *mmu_page_header_cache; |
190 | static struct percpu_counter kvm_total_used_mmu_pages; | 187 | static struct percpu_counter kvm_total_used_mmu_pages; |
@@ -397,12 +394,8 @@ static int mmu_topup_memory_caches(struct kvm_vcpu *vcpu) | |||
397 | { | 394 | { |
398 | int r; | 395 | int r; |
399 | 396 | ||
400 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_chain_cache, | ||
401 | pte_chain_cache, 4); | ||
402 | if (r) | ||
403 | goto out; | ||
404 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, | 397 | r = mmu_topup_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
405 | pte_list_desc_cache, 4 + PTE_PREFETCH_NUM); | 398 | pte_list_desc_cache, 8 + PTE_PREFETCH_NUM); |
406 | if (r) | 399 | if (r) |
407 | goto out; | 400 | goto out; |
408 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); | 401 | r = mmu_topup_memory_cache_page(&vcpu->arch.mmu_page_cache, 8); |
@@ -416,8 +409,6 @@ out: | |||
416 | 409 | ||
417 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) | 410 | static void mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
418 | { | 411 | { |
419 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_chain_cache, | ||
420 | pte_chain_cache); | ||
421 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, | 412 | mmu_free_memory_cache(&vcpu->arch.mmu_pte_list_desc_cache, |
422 | pte_list_desc_cache); | 413 | pte_list_desc_cache); |
423 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); | 414 | mmu_free_memory_cache_page(&vcpu->arch.mmu_page_cache); |
@@ -435,17 +426,6 @@ static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc, | |||
435 | return p; | 426 | return p; |
436 | } | 427 | } |
437 | 428 | ||
438 | static struct kvm_pte_chain *mmu_alloc_pte_chain(struct kvm_vcpu *vcpu) | ||
439 | { | ||
440 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_chain_cache, | ||
441 | sizeof(struct kvm_pte_chain)); | ||
442 | } | ||
443 | |||
444 | static void mmu_free_pte_chain(struct kvm_pte_chain *pc) | ||
445 | { | ||
446 | kmem_cache_free(pte_chain_cache, pc); | ||
447 | } | ||
448 | |||
449 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) | 429 | static struct pte_list_desc *mmu_alloc_pte_list_desc(struct kvm_vcpu *vcpu) |
450 | { | 430 | { |
451 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, | 431 | return mmu_memory_cache_alloc(&vcpu->arch.mmu_pte_list_desc_cache, |
@@ -721,6 +701,26 @@ static void pte_list_remove(u64 *spte, unsigned long *pte_list) | |||
721 | } | 701 | } |
722 | } | 702 | } |
723 | 703 | ||
704 | typedef void (*pte_list_walk_fn) (u64 *spte); | ||
705 | static void pte_list_walk(unsigned long *pte_list, pte_list_walk_fn fn) | ||
706 | { | ||
707 | struct pte_list_desc *desc; | ||
708 | int i; | ||
709 | |||
710 | if (!*pte_list) | ||
711 | return; | ||
712 | |||
713 | if (!(*pte_list & 1)) | ||
714 | return fn((u64 *)*pte_list); | ||
715 | |||
716 | desc = (struct pte_list_desc *)(*pte_list & ~1ul); | ||
717 | while (desc) { | ||
718 | for (i = 0; i < PTE_LIST_EXT && desc->sptes[i]; ++i) | ||
719 | fn(desc->sptes[i]); | ||
720 | desc = desc->more; | ||
721 | } | ||
722 | } | ||
723 | |||
724 | /* | 724 | /* |
725 | * Take gfn and return the reverse mapping to it. | 725 | * Take gfn and return the reverse mapping to it. |
726 | */ | 726 | */ |
@@ -1069,134 +1069,52 @@ static unsigned kvm_page_table_hashfn(gfn_t gfn) | |||
1069 | return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); | 1069 | return gfn & ((1 << KVM_MMU_HASH_SHIFT) - 1); |
1070 | } | 1070 | } |
1071 | 1071 | ||
1072 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, | ||
1073 | u64 *parent_pte, int direct) | ||
1074 | { | ||
1075 | struct kvm_mmu_page *sp; | ||
1076 | |||
1077 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, sizeof *sp); | ||
1078 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); | ||
1079 | if (!direct) | ||
1080 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, | ||
1081 | PAGE_SIZE); | ||
1082 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); | ||
1083 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); | ||
1084 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); | ||
1085 | sp->multimapped = 0; | ||
1086 | sp->parent_pte = parent_pte; | ||
1087 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); | ||
1088 | return sp; | ||
1089 | } | ||
1090 | |||
1091 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, | 1072 | static void mmu_page_add_parent_pte(struct kvm_vcpu *vcpu, |
1092 | struct kvm_mmu_page *sp, u64 *parent_pte) | 1073 | struct kvm_mmu_page *sp, u64 *parent_pte) |
1093 | { | 1074 | { |
1094 | struct kvm_pte_chain *pte_chain; | ||
1095 | struct hlist_node *node; | ||
1096 | int i; | ||
1097 | |||
1098 | if (!parent_pte) | 1075 | if (!parent_pte) |
1099 | return; | 1076 | return; |
1100 | if (!sp->multimapped) { | ||
1101 | u64 *old = sp->parent_pte; | ||
1102 | 1077 | ||
1103 | if (!old) { | 1078 | pte_list_add(vcpu, parent_pte, &sp->parent_ptes); |
1104 | sp->parent_pte = parent_pte; | ||
1105 | return; | ||
1106 | } | ||
1107 | sp->multimapped = 1; | ||
1108 | pte_chain = mmu_alloc_pte_chain(vcpu); | ||
1109 | INIT_HLIST_HEAD(&sp->parent_ptes); | ||
1110 | hlist_add_head(&pte_chain->link, &sp->parent_ptes); | ||
1111 | pte_chain->parent_ptes[0] = old; | ||
1112 | } | ||
1113 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) { | ||
1114 | if (pte_chain->parent_ptes[NR_PTE_CHAIN_ENTRIES-1]) | ||
1115 | continue; | ||
1116 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) | ||
1117 | if (!pte_chain->parent_ptes[i]) { | ||
1118 | pte_chain->parent_ptes[i] = parent_pte; | ||
1119 | return; | ||
1120 | } | ||
1121 | } | ||
1122 | pte_chain = mmu_alloc_pte_chain(vcpu); | ||
1123 | BUG_ON(!pte_chain); | ||
1124 | hlist_add_head(&pte_chain->link, &sp->parent_ptes); | ||
1125 | pte_chain->parent_ptes[0] = parent_pte; | ||
1126 | } | 1079 | } |
1127 | 1080 | ||
1128 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, | 1081 | static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, |
1129 | u64 *parent_pte) | 1082 | u64 *parent_pte) |
1130 | { | 1083 | { |
1131 | struct kvm_pte_chain *pte_chain; | 1084 | pte_list_remove(parent_pte, &sp->parent_ptes); |
1132 | struct hlist_node *node; | ||
1133 | int i; | ||
1134 | |||
1135 | if (!sp->multimapped) { | ||
1136 | BUG_ON(sp->parent_pte != parent_pte); | ||
1137 | sp->parent_pte = NULL; | ||
1138 | return; | ||
1139 | } | ||
1140 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) | ||
1141 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | ||
1142 | if (!pte_chain->parent_ptes[i]) | ||
1143 | break; | ||
1144 | if (pte_chain->parent_ptes[i] != parent_pte) | ||
1145 | continue; | ||
1146 | while (i + 1 < NR_PTE_CHAIN_ENTRIES | ||
1147 | && pte_chain->parent_ptes[i + 1]) { | ||
1148 | pte_chain->parent_ptes[i] | ||
1149 | = pte_chain->parent_ptes[i + 1]; | ||
1150 | ++i; | ||
1151 | } | ||
1152 | pte_chain->parent_ptes[i] = NULL; | ||
1153 | if (i == 0) { | ||
1154 | hlist_del(&pte_chain->link); | ||
1155 | mmu_free_pte_chain(pte_chain); | ||
1156 | if (hlist_empty(&sp->parent_ptes)) { | ||
1157 | sp->multimapped = 0; | ||
1158 | sp->parent_pte = NULL; | ||
1159 | } | ||
1160 | } | ||
1161 | return; | ||
1162 | } | ||
1163 | BUG(); | ||
1164 | } | 1085 | } |
1165 | 1086 | ||
1166 | static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) | 1087 | static struct kvm_mmu_page *kvm_mmu_alloc_page(struct kvm_vcpu *vcpu, |
1088 | u64 *parent_pte, int direct) | ||
1167 | { | 1089 | { |
1168 | struct kvm_pte_chain *pte_chain; | 1090 | struct kvm_mmu_page *sp; |
1169 | struct hlist_node *node; | 1091 | sp = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_header_cache, |
1170 | struct kvm_mmu_page *parent_sp; | 1092 | sizeof *sp); |
1171 | int i; | 1093 | sp->spt = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, PAGE_SIZE); |
1172 | 1094 | if (!direct) | |
1173 | if (!sp->multimapped && sp->parent_pte) { | 1095 | sp->gfns = mmu_memory_cache_alloc(&vcpu->arch.mmu_page_cache, |
1174 | parent_sp = page_header(__pa(sp->parent_pte)); | 1096 | PAGE_SIZE); |
1175 | fn(parent_sp, sp->parent_pte); | 1097 | set_page_private(virt_to_page(sp->spt), (unsigned long)sp); |
1176 | return; | 1098 | list_add(&sp->link, &vcpu->kvm->arch.active_mmu_pages); |
1177 | } | 1099 | bitmap_zero(sp->slot_bitmap, KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS); |
1178 | 1100 | sp->parent_ptes = 0; | |
1179 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) | 1101 | mmu_page_add_parent_pte(vcpu, sp, parent_pte); |
1180 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | 1102 | kvm_mod_used_mmu_pages(vcpu->kvm, +1); |
1181 | u64 *spte = pte_chain->parent_ptes[i]; | 1103 | return sp; |
1182 | |||
1183 | if (!spte) | ||
1184 | break; | ||
1185 | parent_sp = page_header(__pa(spte)); | ||
1186 | fn(parent_sp, spte); | ||
1187 | } | ||
1188 | } | 1104 | } |
1189 | 1105 | ||
1190 | static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte); | 1106 | static void mark_unsync(u64 *spte); |
1191 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) | 1107 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) |
1192 | { | 1108 | { |
1193 | mmu_parent_walk(sp, mark_unsync); | 1109 | pte_list_walk(&sp->parent_ptes, mark_unsync); |
1194 | } | 1110 | } |
1195 | 1111 | ||
1196 | static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) | 1112 | static void mark_unsync(u64 *spte) |
1197 | { | 1113 | { |
1114 | struct kvm_mmu_page *sp; | ||
1198 | unsigned int index; | 1115 | unsigned int index; |
1199 | 1116 | ||
1117 | sp = page_header(__pa(spte)); | ||
1200 | index = spte - sp->spt; | 1118 | index = spte - sp->spt; |
1201 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) | 1119 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) |
1202 | return; | 1120 | return; |
@@ -1694,17 +1612,7 @@ static void kvm_mmu_unlink_parents(struct kvm *kvm, struct kvm_mmu_page *sp) | |||
1694 | { | 1612 | { |
1695 | u64 *parent_pte; | 1613 | u64 *parent_pte; |
1696 | 1614 | ||
1697 | while (sp->multimapped || sp->parent_pte) { | 1615 | while ((parent_pte = pte_list_next(&sp->parent_ptes, NULL))) { |
1698 | if (!sp->multimapped) | ||
1699 | parent_pte = sp->parent_pte; | ||
1700 | else { | ||
1701 | struct kvm_pte_chain *chain; | ||
1702 | |||
1703 | chain = container_of(sp->parent_ptes.first, | ||
1704 | struct kvm_pte_chain, link); | ||
1705 | parent_pte = chain->parent_ptes[0]; | ||
1706 | } | ||
1707 | BUG_ON(!parent_pte); | ||
1708 | kvm_mmu_put_page(sp, parent_pte); | 1616 | kvm_mmu_put_page(sp, parent_pte); |
1709 | __set_spte(parent_pte, shadow_trap_nonpresent_pte); | 1617 | __set_spte(parent_pte, shadow_trap_nonpresent_pte); |
1710 | } | 1618 | } |
@@ -3617,8 +3525,6 @@ static struct shrinker mmu_shrinker = { | |||
3617 | 3525 | ||
3618 | static void mmu_destroy_caches(void) | 3526 | static void mmu_destroy_caches(void) |
3619 | { | 3527 | { |
3620 | if (pte_chain_cache) | ||
3621 | kmem_cache_destroy(pte_chain_cache); | ||
3622 | if (pte_list_desc_cache) | 3528 | if (pte_list_desc_cache) |
3623 | kmem_cache_destroy(pte_list_desc_cache); | 3529 | kmem_cache_destroy(pte_list_desc_cache); |
3624 | if (mmu_page_header_cache) | 3530 | if (mmu_page_header_cache) |
@@ -3627,11 +3533,6 @@ static void mmu_destroy_caches(void) | |||
3627 | 3533 | ||
3628 | int kvm_mmu_module_init(void) | 3534 | int kvm_mmu_module_init(void) |
3629 | { | 3535 | { |
3630 | pte_chain_cache = kmem_cache_create("kvm_pte_chain", | ||
3631 | sizeof(struct kvm_pte_chain), | ||
3632 | 0, 0, NULL); | ||
3633 | if (!pte_chain_cache) | ||
3634 | goto nomem; | ||
3635 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", | 3536 | pte_list_desc_cache = kmem_cache_create("pte_list_desc", |
3636 | sizeof(struct pte_list_desc), | 3537 | sizeof(struct pte_list_desc), |
3637 | 0, 0, NULL); | 3538 | 0, 0, NULL); |