aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/kvm/mmu.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@qumranet.com>2007-11-18 09:37:07 -0500
committerAvi Kivity <avi@qumranet.com>2008-01-30 10:53:04 -0500
commit4cee576493b6abc95cc7447a65f1b9d2b40b8f20 (patch)
tree64235c288aaee838e7cebea4d129157935a26e75 /drivers/kvm/mmu.c
parentba1389b7a04de07e6231693b7ebb34f5b5d1a3e6 (diff)
KVM: MMU: Add some mmu statistics
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'drivers/kvm/mmu.c')
-rw-r--r--drivers/kvm/mmu.c9
1 files changed, 8 insertions, 1 deletions
diff --git a/drivers/kvm/mmu.c b/drivers/kvm/mmu.c
index 9be54a5e858e..87d8e70fe502 100644
--- a/drivers/kvm/mmu.c
+++ b/drivers/kvm/mmu.c
@@ -755,6 +755,7 @@ static void kvm_mmu_zap_page(struct kvm *kvm,
755{ 755{
756 u64 *parent_pte; 756 u64 *parent_pte;
757 757
758 ++kvm->stat.mmu_shadow_zapped;
758 while (page->multimapped || page->parent_pte) { 759 while (page->multimapped || page->parent_pte) {
759 if (!page->multimapped) 760 if (!page->multimapped)
760 parent_pte = page->parent_pte; 761 parent_pte = page->parent_pte;
@@ -1226,9 +1227,12 @@ static void mmu_pte_write_new_pte(struct kvm_vcpu *vcpu,
1226 const void *new, int bytes, 1227 const void *new, int bytes,
1227 int offset_in_pte) 1228 int offset_in_pte)
1228{ 1229{
1229 if (page->role.level != PT_PAGE_TABLE_LEVEL) 1230 if (page->role.level != PT_PAGE_TABLE_LEVEL) {
1231 ++vcpu->kvm->stat.mmu_pde_zapped;
1230 return; 1232 return;
1233 }
1231 1234
1235 ++vcpu->kvm->stat.mmu_pte_updated;
1232 if (page->role.glevels == PT32_ROOT_LEVEL) 1236 if (page->role.glevels == PT32_ROOT_LEVEL)
1233 paging32_update_pte(vcpu, page, spte, new, bytes, 1237 paging32_update_pte(vcpu, page, spte, new, bytes,
1234 offset_in_pte); 1238 offset_in_pte);
@@ -1263,6 +1267,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1263 int npte; 1267 int npte;
1264 1268
1265 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes); 1269 pgprintk("%s: gpa %llx bytes %d\n", __FUNCTION__, gpa, bytes);
1270 ++vcpu->kvm->stat.mmu_pte_write;
1266 kvm_mmu_audit(vcpu, "pre pte write"); 1271 kvm_mmu_audit(vcpu, "pre pte write");
1267 if (gfn == vcpu->last_pt_write_gfn 1272 if (gfn == vcpu->last_pt_write_gfn
1268 && !last_updated_pte_accessed(vcpu)) { 1273 && !last_updated_pte_accessed(vcpu)) {
@@ -1296,6 +1301,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
1296 pgprintk("misaligned: gpa %llx bytes %d role %x\n", 1301 pgprintk("misaligned: gpa %llx bytes %d role %x\n",
1297 gpa, bytes, page->role.word); 1302 gpa, bytes, page->role.word);
1298 kvm_mmu_zap_page(vcpu->kvm, page); 1303 kvm_mmu_zap_page(vcpu->kvm, page);
1304 ++vcpu->kvm->stat.mmu_flooded;
1299 continue; 1305 continue;
1300 } 1306 }
1301 page_offset = offset; 1307 page_offset = offset;
@@ -1344,6 +1350,7 @@ void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu)
1344 page = container_of(vcpu->kvm->active_mmu_pages.prev, 1350 page = container_of(vcpu->kvm->active_mmu_pages.prev,
1345 struct kvm_mmu_page, link); 1351 struct kvm_mmu_page, link);
1346 kvm_mmu_zap_page(vcpu->kvm, page); 1352 kvm_mmu_zap_page(vcpu->kvm, page);
1353 ++vcpu->kvm->stat.mmu_recycled;
1347 } 1354 }
1348} 1355}
1349 1356