diff options
author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-06-11 09:35:15 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-08-01 03:46:45 -0400 |
commit | 1047df1fb682a41eb9885d6b3f2d04d6c8fd3756 (patch) | |
tree | 0bb56adeae33459616149940d3c65246a78e61c9 /arch | |
parent | 7a8f1a74e4193d21e55b35928197486f2c047efb (diff) |
KVM: MMU: don't walk every parent pages while mark unsync
While we mark the parent's unsync_child_bitmap, if the parent is already
unsynced, it no need walk it's parent, it can reduce some unnecessary
workload
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/mmu.c | 61 |
1 files changed, 17 insertions, 44 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index ab12be4eb105..8c2f580956d9 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -175,7 +175,7 @@ struct kvm_shadow_walk_iterator { | |||
175 | shadow_walk_okay(&(_walker)); \ | 175 | shadow_walk_okay(&(_walker)); \ |
176 | shadow_walk_next(&(_walker))) | 176 | shadow_walk_next(&(_walker))) |
177 | 177 | ||
178 | typedef int (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp); | 178 | typedef void (*mmu_parent_walk_fn) (struct kvm_mmu_page *sp, u64 *spte); |
179 | 179 | ||
180 | static struct kmem_cache *pte_chain_cache; | 180 | static struct kmem_cache *pte_chain_cache; |
181 | static struct kmem_cache *rmap_desc_cache; | 181 | static struct kmem_cache *rmap_desc_cache; |
@@ -1024,7 +1024,6 @@ static void mmu_page_remove_parent_pte(struct kvm_mmu_page *sp, | |||
1024 | BUG(); | 1024 | BUG(); |
1025 | } | 1025 | } |
1026 | 1026 | ||
1027 | |||
1028 | static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) | 1027 | static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) |
1029 | { | 1028 | { |
1030 | struct kvm_pte_chain *pte_chain; | 1029 | struct kvm_pte_chain *pte_chain; |
@@ -1034,63 +1033,37 @@ static void mmu_parent_walk(struct kvm_mmu_page *sp, mmu_parent_walk_fn fn) | |||
1034 | 1033 | ||
1035 | if (!sp->multimapped && sp->parent_pte) { | 1034 | if (!sp->multimapped && sp->parent_pte) { |
1036 | parent_sp = page_header(__pa(sp->parent_pte)); | 1035 | parent_sp = page_header(__pa(sp->parent_pte)); |
1037 | fn(parent_sp); | 1036 | fn(parent_sp, sp->parent_pte); |
1038 | mmu_parent_walk(parent_sp, fn); | ||
1039 | return; | 1037 | return; |
1040 | } | 1038 | } |
1039 | |||
1041 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) | 1040 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) |
1042 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | 1041 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { |
1043 | if (!pte_chain->parent_ptes[i]) | 1042 | u64 *spte = pte_chain->parent_ptes[i]; |
1043 | |||
1044 | if (!spte) | ||
1044 | break; | 1045 | break; |
1045 | parent_sp = page_header(__pa(pte_chain->parent_ptes[i])); | 1046 | parent_sp = page_header(__pa(spte)); |
1046 | fn(parent_sp); | 1047 | fn(parent_sp, spte); |
1047 | mmu_parent_walk(parent_sp, fn); | ||
1048 | } | 1048 | } |
1049 | } | 1049 | } |
1050 | 1050 | ||
1051 | static void kvm_mmu_update_unsync_bitmap(u64 *spte) | 1051 | static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte); |
1052 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) | ||
1052 | { | 1053 | { |
1053 | unsigned int index; | 1054 | mmu_parent_walk(sp, mark_unsync); |
1054 | struct kvm_mmu_page *sp = page_header(__pa(spte)); | ||
1055 | |||
1056 | index = spte - sp->spt; | ||
1057 | if (!__test_and_set_bit(index, sp->unsync_child_bitmap)) | ||
1058 | sp->unsync_children++; | ||
1059 | WARN_ON(!sp->unsync_children); | ||
1060 | } | 1055 | } |
1061 | 1056 | ||
1062 | static void kvm_mmu_update_parents_unsync(struct kvm_mmu_page *sp) | 1057 | static void mark_unsync(struct kvm_mmu_page *sp, u64 *spte) |
1063 | { | 1058 | { |
1064 | struct kvm_pte_chain *pte_chain; | 1059 | unsigned int index; |
1065 | struct hlist_node *node; | ||
1066 | int i; | ||
1067 | 1060 | ||
1068 | if (!sp->parent_pte) | 1061 | index = spte - sp->spt; |
1062 | if (__test_and_set_bit(index, sp->unsync_child_bitmap)) | ||
1069 | return; | 1063 | return; |
1070 | 1064 | if (sp->unsync_children++) | |
1071 | if (!sp->multimapped) { | ||
1072 | kvm_mmu_update_unsync_bitmap(sp->parent_pte); | ||
1073 | return; | 1065 | return; |
1074 | } | 1066 | kvm_mmu_mark_parents_unsync(sp); |
1075 | |||
1076 | hlist_for_each_entry(pte_chain, node, &sp->parent_ptes, link) | ||
1077 | for (i = 0; i < NR_PTE_CHAIN_ENTRIES; ++i) { | ||
1078 | if (!pte_chain->parent_ptes[i]) | ||
1079 | break; | ||
1080 | kvm_mmu_update_unsync_bitmap(pte_chain->parent_ptes[i]); | ||
1081 | } | ||
1082 | } | ||
1083 | |||
1084 | static int unsync_walk_fn(struct kvm_mmu_page *sp) | ||
1085 | { | ||
1086 | kvm_mmu_update_parents_unsync(sp); | ||
1087 | return 1; | ||
1088 | } | ||
1089 | |||
1090 | static void kvm_mmu_mark_parents_unsync(struct kvm_mmu_page *sp) | ||
1091 | { | ||
1092 | mmu_parent_walk(sp, unsync_walk_fn); | ||
1093 | kvm_mmu_update_parents_unsync(sp); | ||
1094 | } | 1067 | } |
1095 | 1068 | ||
1096 | static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, | 1069 | static void nonpaging_prefetch_page(struct kvm_vcpu *vcpu, |