aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/mmu_audit.c28
-rw-r--r--arch/x86/kvm/mmutrace.h19
-rw-r--r--arch/x86/kvm/paging_tmpl.h4
4 files changed, 26 insertions, 41 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index d737443cdfdb..62f69dbf6b52 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -68,6 +68,12 @@ char *audit_point_name[] = {
68 "post sync" 68 "post sync"
69}; 69};
70 70
71#ifdef CONFIG_KVM_MMU_AUDIT
72static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point);
73#else
74static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point) { }
75#endif
76
71#undef MMU_DEBUG 77#undef MMU_DEBUG
72 78
73#ifdef MMU_DEBUG 79#ifdef MMU_DEBUG
@@ -2852,12 +2858,12 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2852 return; 2858 return;
2853 2859
2854 vcpu_clear_mmio_info(vcpu, ~0ul); 2860 vcpu_clear_mmio_info(vcpu, ~0ul);
2855 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC); 2861 kvm_mmu_audit(vcpu, AUDIT_PRE_SYNC);
2856 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) { 2862 if (vcpu->arch.mmu.root_level == PT64_ROOT_LEVEL) {
2857 hpa_t root = vcpu->arch.mmu.root_hpa; 2863 hpa_t root = vcpu->arch.mmu.root_hpa;
2858 sp = page_header(root); 2864 sp = page_header(root);
2859 mmu_sync_children(vcpu, sp); 2865 mmu_sync_children(vcpu, sp);
2860 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 2866 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2861 return; 2867 return;
2862 } 2868 }
2863 for (i = 0; i < 4; ++i) { 2869 for (i = 0; i < 4; ++i) {
@@ -2869,7 +2875,7 @@ static void mmu_sync_roots(struct kvm_vcpu *vcpu)
2869 mmu_sync_children(vcpu, sp); 2875 mmu_sync_children(vcpu, sp);
2870 } 2876 }
2871 } 2877 }
2872 trace_kvm_mmu_audit(vcpu, AUDIT_POST_SYNC); 2878 kvm_mmu_audit(vcpu, AUDIT_POST_SYNC);
2873} 2879}
2874 2880
2875void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu) 2881void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
@@ -3667,7 +3673,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3667 3673
3668 spin_lock(&vcpu->kvm->mmu_lock); 3674 spin_lock(&vcpu->kvm->mmu_lock);
3669 ++vcpu->kvm->stat.mmu_pte_write; 3675 ++vcpu->kvm->stat.mmu_pte_write;
3670 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE); 3676 kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
3671 3677
3672 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1; 3678 mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
3673 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) { 3679 for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn, node) {
@@ -3700,7 +3706,7 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
3700 } 3706 }
3701 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush); 3707 mmu_pte_write_flush_tlb(vcpu, zap_page, remote_flush, local_flush);
3702 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list); 3708 kvm_mmu_commit_zap_page(vcpu->kvm, &invalid_list);
3703 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE); 3709 kvm_mmu_audit(vcpu, AUDIT_POST_PTE_WRITE);
3704 spin_unlock(&vcpu->kvm->mmu_lock); 3710 spin_unlock(&vcpu->kvm->mmu_lock);
3705} 3711}
3706 3712
diff --git a/arch/x86/kvm/mmu_audit.c b/arch/x86/kvm/mmu_audit.c
index 746ec259d024..5df6736a5afb 100644
--- a/arch/x86/kvm/mmu_audit.c
+++ b/arch/x86/kvm/mmu_audit.c
@@ -224,30 +224,29 @@ static void audit_vcpu_spte(struct kvm_vcpu *vcpu)
224 mmu_spte_walk(vcpu, audit_spte); 224 mmu_spte_walk(vcpu, audit_spte);
225} 225}
226 226
227static void kvm_mmu_audit(void *ignore, struct kvm_vcpu *vcpu, int point) 227static bool mmu_audit;
228static struct jump_label_key mmu_audit_key;
229
230static void kvm_mmu_audit(struct kvm_vcpu *vcpu, int point)
228{ 231{
229 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10); 232 static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
230 233
231 if (!__ratelimit(&ratelimit_state)) 234 if (static_branch((&mmu_audit_key))) {
232 return; 235 if (!__ratelimit(&ratelimit_state))
236 return;
233 237
234 vcpu->kvm->arch.audit_point = point; 238 vcpu->kvm->arch.audit_point = point;
235 audit_all_active_sps(vcpu->kvm); 239 audit_all_active_sps(vcpu->kvm);
236 audit_vcpu_spte(vcpu); 240 audit_vcpu_spte(vcpu);
241 }
237} 242}
238 243
239static bool mmu_audit;
240
241static void mmu_audit_enable(void) 244static void mmu_audit_enable(void)
242{ 245{
243 int ret;
244
245 if (mmu_audit) 246 if (mmu_audit)
246 return; 247 return;
247 248
248 ret = register_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); 249 jump_label_inc(&mmu_audit_key);
249 WARN_ON(ret);
250
251 mmu_audit = true; 250 mmu_audit = true;
252} 251}
253 252
@@ -256,8 +255,7 @@ static void mmu_audit_disable(void)
256 if (!mmu_audit) 255 if (!mmu_audit)
257 return; 256 return;
258 257
259 unregister_trace_kvm_mmu_audit(kvm_mmu_audit, NULL); 258 jump_label_dec(&mmu_audit_key);
260 tracepoint_synchronize_unregister();
261 mmu_audit = false; 259 mmu_audit = false;
262} 260}
263 261
diff --git a/arch/x86/kvm/mmutrace.h b/arch/x86/kvm/mmutrace.h
index eed67f34146d..89fb0e81322a 100644
--- a/arch/x86/kvm/mmutrace.h
+++ b/arch/x86/kvm/mmutrace.h
@@ -243,25 +243,6 @@ TRACE_EVENT(
243 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn, 243 TP_printk("addr:%llx gfn %llx access %x", __entry->addr, __entry->gfn,
244 __entry->access) 244 __entry->access)
245); 245);
246
247TRACE_EVENT(
248 kvm_mmu_audit,
249 TP_PROTO(struct kvm_vcpu *vcpu, int audit_point),
250 TP_ARGS(vcpu, audit_point),
251
252 TP_STRUCT__entry(
253 __field(struct kvm_vcpu *, vcpu)
254 __field(int, audit_point)
255 ),
256
257 TP_fast_assign(
258 __entry->vcpu = vcpu;
259 __entry->audit_point = audit_point;
260 ),
261
262 TP_printk("vcpu:%d %s", __entry->vcpu->cpu,
263 audit_point_name[__entry->audit_point])
264);
265#endif /* _TRACE_KVMMMU_H */ 246#endif /* _TRACE_KVMMMU_H */
266 247
267#undef TRACE_INCLUDE_PATH 248#undef TRACE_INCLUDE_PATH
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index 52e9d58cec2b..15610285ebb6 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -632,7 +632,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
632 if (mmu_notifier_retry(vcpu, mmu_seq)) 632 if (mmu_notifier_retry(vcpu, mmu_seq))
633 goto out_unlock; 633 goto out_unlock;
634 634
635 trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); 635 kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT);
636 kvm_mmu_free_some_pages(vcpu); 636 kvm_mmu_free_some_pages(vcpu);
637 if (!force_pt_level) 637 if (!force_pt_level)
638 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level); 638 transparent_hugepage_adjust(vcpu, &walker.gfn, &pfn, &level);
@@ -643,7 +643,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
643 sptep, *sptep, emulate); 643 sptep, *sptep, emulate);
644 644
645 ++vcpu->stat.pf_fixed; 645 ++vcpu->stat.pf_fixed;
646 trace_kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT); 646 kvm_mmu_audit(vcpu, AUDIT_POST_PAGE_FAULT);
647 spin_unlock(&vcpu->kvm->mmu_lock); 647 spin_unlock(&vcpu->kvm->mmu_lock);
648 648
649 return emulate; 649 return emulate;