aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Matlack <dmatlack@google.com>2016-12-20 18:25:57 -0500
committerRadim Krčmář <rkrcmar@redhat.com>2017-01-09 08:46:03 -0500
commitf3414bc77419463c0d81eaa2cea7ee4ccb447c7d (patch)
tree153185d45ad2105c030648945e650687fe3adfec
parent826da32140dada1467f4216410525511393317e8 (diff)
kvm: x86: export maximum number of mmu_page_hash collisions
Report the maximum number of mmu_page_hash collisions as a per-VM stat. This will make it easy to identify problems with the mmu_page_hash in the future. Signed-off-by: David Matlack <dmatlack@google.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 20 insertions, 8 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index fc03ab1f6110..1bb1ffc0024c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -821,6 +821,7 @@ struct kvm_vm_stat {
821 ulong mmu_unsync; 821 ulong mmu_unsync;
822 ulong remote_tlb_flush; 822 ulong remote_tlb_flush;
823 ulong lpages; 823 ulong lpages;
824 ulong max_mmu_page_hash_collisions;
824}; 825};
825 826
826struct kvm_vcpu_stat { 827struct kvm_vcpu_stat {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 7012de4a1fed..45ee7ae88239 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1904,17 +1904,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
1904 * since it has been deleted from active_mmu_pages but still can be found 1904 * since it has been deleted from active_mmu_pages but still can be found
1905 * at hast list. 1905 * at hast list.
1906 * 1906 *
1907 * for_each_gfn_valid_sp() has skipped that kind of pages. 1907 * for_each_valid_sp() has skipped that kind of pages.
1908 */ 1908 */
1909#define for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1909#define for_each_valid_sp(_kvm, _sp, _gfn) \
1910 hlist_for_each_entry(_sp, \ 1910 hlist_for_each_entry(_sp, \
1911 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \ 1911 &(_kvm)->arch.mmu_page_hash[kvm_page_table_hashfn(_gfn)], hash_link) \
1912 if ((_sp)->gfn != (_gfn) || is_obsolete_sp((_kvm), (_sp)) \ 1912 if (is_obsolete_sp((_kvm), (_sp)) || (_sp)->role.invalid) { \
1913 || (_sp)->role.invalid) {} else 1913 } else
1914 1914
1915#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \ 1915#define for_each_gfn_indirect_valid_sp(_kvm, _sp, _gfn) \
1916 for_each_gfn_valid_sp(_kvm, _sp, _gfn) \ 1916 for_each_valid_sp(_kvm, _sp, _gfn) \
1917 if ((_sp)->role.direct) {} else 1917 if ((_sp)->gfn != (_gfn) || (_sp)->role.direct) {} else
1918 1918
1919/* @sp->gfn should be write-protected at the call site */ 1919/* @sp->gfn should be write-protected at the call site */
1920static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 1920static bool __kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
@@ -2116,6 +2116,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2116 struct kvm_mmu_page *sp; 2116 struct kvm_mmu_page *sp;
2117 bool need_sync = false; 2117 bool need_sync = false;
2118 bool flush = false; 2118 bool flush = false;
2119 int collisions = 0;
2119 LIST_HEAD(invalid_list); 2120 LIST_HEAD(invalid_list);
2120 2121
2121 role = vcpu->arch.mmu.base_role; 2122 role = vcpu->arch.mmu.base_role;
@@ -2130,7 +2131,12 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2130 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; 2131 quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1;
2131 role.quadrant = quadrant; 2132 role.quadrant = quadrant;
2132 } 2133 }
2133 for_each_gfn_valid_sp(vcpu->kvm, sp, gfn) { 2134 for_each_valid_sp(vcpu->kvm, sp, gfn) {
2135 if (sp->gfn != gfn) {
2136 collisions++;
2137 continue;
2138 }
2139
2134 if (!need_sync && sp->unsync) 2140 if (!need_sync && sp->unsync)
2135 need_sync = true; 2141 need_sync = true;
2136 2142
@@ -2153,7 +2159,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2153 2159
2154 __clear_sp_write_flooding_count(sp); 2160 __clear_sp_write_flooding_count(sp);
2155 trace_kvm_mmu_get_page(sp, false); 2161 trace_kvm_mmu_get_page(sp, false);
2156 return sp; 2162 goto out;
2157 } 2163 }
2158 2164
2159 ++vcpu->kvm->stat.mmu_cache_miss; 2165 ++vcpu->kvm->stat.mmu_cache_miss;
@@ -2183,6 +2189,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
2183 trace_kvm_mmu_get_page(sp, true); 2189 trace_kvm_mmu_get_page(sp, true);
2184 2190
2185 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush); 2191 kvm_mmu_flush_or_zap(vcpu, &invalid_list, false, flush);
2192out:
2193 if (collisions > vcpu->kvm->stat.max_mmu_page_hash_collisions)
2194 vcpu->kvm->stat.max_mmu_page_hash_collisions = collisions;
2186 return sp; 2195 return sp;
2187} 2196}
2188 2197
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a356d8e12c2f..4aece8b0a4aa 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -190,6 +190,8 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
190 { "mmu_unsync", VM_STAT(mmu_unsync) }, 190 { "mmu_unsync", VM_STAT(mmu_unsync) },
191 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, 191 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
192 { "largepages", VM_STAT(lpages) }, 192 { "largepages", VM_STAT(lpages) },
193 { "max_mmu_page_hash_collisions",
194 VM_STAT(max_mmu_page_hash_collisions) },
193 { NULL } 195 { NULL }
194}; 196};
195 197