diff options
author | Avi Kivity <avi@redhat.com> | 2009-07-06 08:58:14 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:33:10 -0400 |
commit | f691fe1da7e2715137d21ae5a80bec64db4625db (patch) | |
tree | 831a24e7094543cc327cffe7a6fbecb4b58eb82d /arch/x86/kvm/mmu.c | |
parent | 9c1b96e34717d001873b603d85434aa78e730282 (diff) |
KVM: Trace shadow page lifecycle
Create, sync, unsync, zap.
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r-- | arch/x86/kvm/mmu.c | 10 |
1 files changed, 6 insertions, 4 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index c0dda6447b9f..ac121b39a5bc 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
@@ -1122,6 +1122,7 @@ static int kvm_sync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1122 | return 1; | 1122 | return 1; |
1123 | } | 1123 | } |
1124 | 1124 | ||
1125 | trace_kvm_mmu_sync_page(sp); | ||
1125 | if (rmap_write_protect(vcpu->kvm, sp->gfn)) | 1126 | if (rmap_write_protect(vcpu->kvm, sp->gfn)) |
1126 | kvm_flush_remote_tlbs(vcpu->kvm); | 1127 | kvm_flush_remote_tlbs(vcpu->kvm); |
1127 | kvm_unlink_unsync_page(vcpu->kvm, sp); | 1128 | kvm_unlink_unsync_page(vcpu->kvm, sp); |
@@ -1244,8 +1245,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1244 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; | 1245 | quadrant &= (1 << ((PT32_PT_BITS - PT64_PT_BITS) * level)) - 1; |
1245 | role.quadrant = quadrant; | 1246 | role.quadrant = quadrant; |
1246 | } | 1247 | } |
1247 | pgprintk("%s: looking gfn %lx role %x\n", __func__, | ||
1248 | gfn, role.word); | ||
1249 | index = kvm_page_table_hashfn(gfn); | 1248 | index = kvm_page_table_hashfn(gfn); |
1250 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1249 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1251 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) | 1250 | hlist_for_each_entry_safe(sp, node, tmp, bucket, hash_link) |
@@ -1262,14 +1261,13 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1262 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); | 1261 | set_bit(KVM_REQ_MMU_SYNC, &vcpu->requests); |
1263 | kvm_mmu_mark_parents_unsync(vcpu, sp); | 1262 | kvm_mmu_mark_parents_unsync(vcpu, sp); |
1264 | } | 1263 | } |
1265 | pgprintk("%s: found\n", __func__); | 1264 | trace_kvm_mmu_get_page(sp, false); |
1266 | return sp; | 1265 | return sp; |
1267 | } | 1266 | } |
1268 | ++vcpu->kvm->stat.mmu_cache_miss; | 1267 | ++vcpu->kvm->stat.mmu_cache_miss; |
1269 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); | 1268 | sp = kvm_mmu_alloc_page(vcpu, parent_pte); |
1270 | if (!sp) | 1269 | if (!sp) |
1271 | return sp; | 1270 | return sp; |
1272 | pgprintk("%s: adding gfn %lx role %x\n", __func__, gfn, role.word); | ||
1273 | sp->gfn = gfn; | 1271 | sp->gfn = gfn; |
1274 | sp->role = role; | 1272 | sp->role = role; |
1275 | hlist_add_head(&sp->hash_link, bucket); | 1273 | hlist_add_head(&sp->hash_link, bucket); |
@@ -1282,6 +1280,7 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu, | |||
1282 | vcpu->arch.mmu.prefetch_page(vcpu, sp); | 1280 | vcpu->arch.mmu.prefetch_page(vcpu, sp); |
1283 | else | 1281 | else |
1284 | nonpaging_prefetch_page(vcpu, sp); | 1282 | nonpaging_prefetch_page(vcpu, sp); |
1283 | trace_kvm_mmu_get_page(sp, true); | ||
1285 | return sp; | 1284 | return sp; |
1286 | } | 1285 | } |
1287 | 1286 | ||
@@ -1410,6 +1409,8 @@ static int mmu_zap_unsync_children(struct kvm *kvm, | |||
1410 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) | 1409 | static int kvm_mmu_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp) |
1411 | { | 1410 | { |
1412 | int ret; | 1411 | int ret; |
1412 | |||
1413 | trace_kvm_mmu_zap_page(sp); | ||
1413 | ++kvm->stat.mmu_shadow_zapped; | 1414 | ++kvm->stat.mmu_shadow_zapped; |
1414 | ret = mmu_zap_unsync_children(kvm, sp); | 1415 | ret = mmu_zap_unsync_children(kvm, sp); |
1415 | kvm_mmu_page_unlink_children(kvm, sp); | 1416 | kvm_mmu_page_unlink_children(kvm, sp); |
@@ -1656,6 +1657,7 @@ static int kvm_unsync_page(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) | |||
1656 | struct kvm_mmu_page *s; | 1657 | struct kvm_mmu_page *s; |
1657 | struct hlist_node *node, *n; | 1658 | struct hlist_node *node, *n; |
1658 | 1659 | ||
1660 | trace_kvm_mmu_unsync_page(sp); | ||
1659 | index = kvm_page_table_hashfn(sp->gfn); | 1661 | index = kvm_page_table_hashfn(sp->gfn); |
1660 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; | 1662 | bucket = &vcpu->kvm->arch.mmu_page_hash[index]; |
1661 | /* don't unsync if pagetable is shadowed with multiple roles */ | 1663 | /* don't unsync if pagetable is shadowed with multiple roles */ |