diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 2 | ||||
-rw-r--r-- | arch/x86/kvm/Makefile | 4 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 7 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 84 | ||||
-rw-r--r-- | arch/x86/kvm/trace.h | 260 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 78 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 48 |
7 files changed, 397 insertions, 86 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index c7b0cc2b7020..19027ab20412 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -14,6 +14,7 @@ | |||
14 | #include <linux/types.h> | 14 | #include <linux/types.h> |
15 | #include <linux/mm.h> | 15 | #include <linux/mm.h> |
16 | #include <linux/mmu_notifier.h> | 16 | #include <linux/mmu_notifier.h> |
17 | #include <linux/tracepoint.h> | ||
17 | 18 | ||
18 | #include <linux/kvm.h> | 19 | #include <linux/kvm.h> |
19 | #include <linux/kvm_para.h> | 20 | #include <linux/kvm_para.h> |
@@ -527,6 +528,7 @@ struct kvm_x86_ops { | |||
527 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); | 528 | int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); |
528 | int (*get_tdp_level)(void); | 529 | int (*get_tdp_level)(void); |
529 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); | 530 | u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); |
531 | const struct trace_print_flags *exit_reasons_str; | ||
530 | }; | 532 | }; |
531 | 533 | ||
532 | extern struct kvm_x86_ops *kvm_x86_ops; | 534 | extern struct kvm_x86_ops *kvm_x86_ops; |
diff --git a/arch/x86/kvm/Makefile b/arch/x86/kvm/Makefile index 01e3c61f749a..7c56850b82cb 100644 --- a/arch/x86/kvm/Makefile +++ b/arch/x86/kvm/Makefile | |||
@@ -1,6 +1,10 @@ | |||
1 | 1 | ||
2 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm | 2 | EXTRA_CFLAGS += -Ivirt/kvm -Iarch/x86/kvm |
3 | 3 | ||
4 | CFLAGS_x86.o := -I. | ||
5 | CFLAGS_svm.o := -I. | ||
6 | CFLAGS_vmx.o := -I. | ||
7 | |||
4 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ | 8 | kvm-y += $(addprefix ../../../virt/kvm/, kvm_main.o ioapic.o \ |
5 | coalesced_mmio.o irq_comm.o eventfd.o) | 9 | coalesced_mmio.o irq_comm.o eventfd.o) |
6 | kvm-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) | 10 | kvm-$(CONFIG_KVM_TRACE) += $(addprefix ../../../virt/kvm/, kvm_trace.o) |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 3bde43c3789e..2e0286596387 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -34,6 +34,7 @@ | |||
34 | #include <asm/atomic.h> | 34 | #include <asm/atomic.h> |
35 | #include "kvm_cache_regs.h" | 35 | #include "kvm_cache_regs.h" |
36 | #include "irq.h" | 36 | #include "irq.h" |
37 | #include "trace.h" | ||
37 | 38 | ||
38 | #ifndef CONFIG_X86_64 | 39 | #ifndef CONFIG_X86_64 |
39 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) | 40 | #define mod_64(x, y) ((x) - (y) * div64_u64(x, y)) |
@@ -515,8 +516,6 @@ static u32 __apic_read(struct kvm_lapic *apic, unsigned int offset) | |||
515 | { | 516 | { |
516 | u32 val = 0; | 517 | u32 val = 0; |
517 | 518 | ||
518 | KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler); | ||
519 | |||
520 | if (offset >= LAPIC_MMIO_LENGTH) | 519 | if (offset >= LAPIC_MMIO_LENGTH) |
521 | return 0; | 520 | return 0; |
522 | 521 | ||
@@ -562,6 +561,8 @@ static void apic_mmio_read(struct kvm_io_device *this, | |||
562 | } | 561 | } |
563 | result = __apic_read(apic, offset & ~0xf); | 562 | result = __apic_read(apic, offset & ~0xf); |
564 | 563 | ||
564 | trace_kvm_apic_read(offset, result); | ||
565 | |||
565 | switch (len) { | 566 | switch (len) { |
566 | case 1: | 567 | case 1: |
567 | case 2: | 568 | case 2: |
@@ -657,7 +658,7 @@ static void apic_mmio_write(struct kvm_io_device *this, | |||
657 | 658 | ||
658 | offset &= 0xff0; | 659 | offset &= 0xff0; |
659 | 660 | ||
660 | KVMTRACE_1D(APIC_ACCESS, apic->vcpu, (u32)offset, handler); | 661 | trace_kvm_apic_write(offset, val); |
661 | 662 | ||
662 | switch (offset) { | 663 | switch (offset) { |
663 | case APIC_ID: /* Local APIC ID */ | 664 | case APIC_ID: /* Local APIC ID */ |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 456666183770..b1c446208867 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -25,10 +25,12 @@ | |||
25 | #include <linux/vmalloc.h> | 25 | #include <linux/vmalloc.h> |
26 | #include <linux/highmem.h> | 26 | #include <linux/highmem.h> |
27 | #include <linux/sched.h> | 27 | #include <linux/sched.h> |
28 | #include <linux/ftrace_event.h> | ||
28 | 29 | ||
29 | #include <asm/desc.h> | 30 | #include <asm/desc.h> |
30 | 31 | ||
31 | #include <asm/virtext.h> | 32 | #include <asm/virtext.h> |
33 | #include "trace.h" | ||
32 | 34 | ||
33 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 35 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
34 | 36 | ||
@@ -1096,7 +1098,6 @@ static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) | |||
1096 | val = 0; | 1098 | val = 0; |
1097 | } | 1099 | } |
1098 | 1100 | ||
1099 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | ||
1100 | return val; | 1101 | return val; |
1101 | } | 1102 | } |
1102 | 1103 | ||
@@ -1105,8 +1106,6 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, | |||
1105 | { | 1106 | { |
1106 | struct vcpu_svm *svm = to_svm(vcpu); | 1107 | struct vcpu_svm *svm = to_svm(vcpu); |
1107 | 1108 | ||
1108 | KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler); | ||
1109 | |||
1110 | *exception = 0; | 1109 | *exception = 0; |
1111 | 1110 | ||
1112 | switch (dr) { | 1111 | switch (dr) { |
@@ -1154,14 +1153,7 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1154 | fault_address = svm->vmcb->control.exit_info_2; | 1153 | fault_address = svm->vmcb->control.exit_info_2; |
1155 | error_code = svm->vmcb->control.exit_info_1; | 1154 | error_code = svm->vmcb->control.exit_info_1; |
1156 | 1155 | ||
1157 | if (!npt_enabled) | 1156 | trace_kvm_page_fault(fault_address, error_code); |
1158 | KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code, | ||
1159 | (u32)fault_address, (u32)(fault_address >> 32), | ||
1160 | handler); | ||
1161 | else | ||
1162 | KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code, | ||
1163 | (u32)fault_address, (u32)(fault_address >> 32), | ||
1164 | handler); | ||
1165 | /* | 1157 | /* |
1166 | * FIXME: Tis shouldn't be necessary here, but there is a flush | 1158 | * FIXME: Tis shouldn't be necessary here, but there is a flush |
1167 | * missing in the MMU code. Until we find this bug, flush the | 1159 | * missing in the MMU code. Until we find this bug, flush the |
@@ -1288,14 +1280,12 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1288 | 1280 | ||
1289 | static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1281 | static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1290 | { | 1282 | { |
1291 | KVMTRACE_0D(NMI, &svm->vcpu, handler); | ||
1292 | return 1; | 1283 | return 1; |
1293 | } | 1284 | } |
1294 | 1285 | ||
1295 | static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1286 | static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1296 | { | 1287 | { |
1297 | ++svm->vcpu.stat.irq_exits; | 1288 | ++svm->vcpu.stat.irq_exits; |
1298 | KVMTRACE_0D(INTR, &svm->vcpu, handler); | ||
1299 | return 1; | 1289 | return 1; |
1300 | } | 1290 | } |
1301 | 1291 | ||
@@ -2077,8 +2067,7 @@ static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2077 | if (svm_get_msr(&svm->vcpu, ecx, &data)) | 2067 | if (svm_get_msr(&svm->vcpu, ecx, &data)) |
2078 | kvm_inject_gp(&svm->vcpu, 0); | 2068 | kvm_inject_gp(&svm->vcpu, 0); |
2079 | else { | 2069 | else { |
2080 | KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data, | 2070 | trace_kvm_msr_read(ecx, data); |
2081 | (u32)(data >> 32), handler); | ||
2082 | 2071 | ||
2083 | svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; | 2072 | svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; |
2084 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; | 2073 | svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32; |
@@ -2163,8 +2152,7 @@ static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2163 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) | 2152 | u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) |
2164 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 2153 | | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
2165 | 2154 | ||
2166 | KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32), | 2155 | trace_kvm_msr_write(ecx, data); |
2167 | handler); | ||
2168 | 2156 | ||
2169 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; | 2157 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; |
2170 | if (svm_set_msr(&svm->vcpu, ecx, data)) | 2158 | if (svm_set_msr(&svm->vcpu, ecx, data)) |
@@ -2185,8 +2173,6 @@ static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
2185 | static int interrupt_window_interception(struct vcpu_svm *svm, | 2173 | static int interrupt_window_interception(struct vcpu_svm *svm, |
2186 | struct kvm_run *kvm_run) | 2174 | struct kvm_run *kvm_run) |
2187 | { | 2175 | { |
2188 | KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); | ||
2189 | |||
2190 | svm_clear_vintr(svm); | 2176 | svm_clear_vintr(svm); |
2191 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | 2177 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
2192 | /* | 2178 | /* |
@@ -2265,8 +2251,7 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2265 | struct vcpu_svm *svm = to_svm(vcpu); | 2251 | struct vcpu_svm *svm = to_svm(vcpu); |
2266 | u32 exit_code = svm->vmcb->control.exit_code; | 2252 | u32 exit_code = svm->vmcb->control.exit_code; |
2267 | 2253 | ||
2268 | KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, | 2254 | trace_kvm_exit(exit_code, svm->vmcb->save.rip); |
2269 | (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); | ||
2270 | 2255 | ||
2271 | if (is_nested(svm)) { | 2256 | if (is_nested(svm)) { |
2272 | nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", | 2257 | nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n", |
@@ -2354,7 +2339,7 @@ static inline void svm_inject_irq(struct vcpu_svm *svm, int irq) | |||
2354 | { | 2339 | { |
2355 | struct vmcb_control_area *control; | 2340 | struct vmcb_control_area *control; |
2356 | 2341 | ||
2357 | KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler); | 2342 | trace_kvm_inj_virq(irq); |
2358 | 2343 | ||
2359 | ++svm->vcpu.stat.irq_injections; | 2344 | ++svm->vcpu.stat.irq_injections; |
2360 | control = &svm->vmcb->control; | 2345 | control = &svm->vmcb->control; |
@@ -2717,6 +2702,59 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
2717 | return 0; | 2702 | return 0; |
2718 | } | 2703 | } |
2719 | 2704 | ||
2705 | static const struct trace_print_flags svm_exit_reasons_str[] = { | ||
2706 | { SVM_EXIT_READ_CR0, "read_cr0" }, | ||
2707 | { SVM_EXIT_READ_CR3, "read_cr3" }, | ||
2708 | { SVM_EXIT_READ_CR4, "read_cr4" }, | ||
2709 | { SVM_EXIT_READ_CR8, "read_cr8" }, | ||
2710 | { SVM_EXIT_WRITE_CR0, "write_cr0" }, | ||
2711 | { SVM_EXIT_WRITE_CR3, "write_cr3" }, | ||
2712 | { SVM_EXIT_WRITE_CR4, "write_cr4" }, | ||
2713 | { SVM_EXIT_WRITE_CR8, "write_cr8" }, | ||
2714 | { SVM_EXIT_READ_DR0, "read_dr0" }, | ||
2715 | { SVM_EXIT_READ_DR1, "read_dr1" }, | ||
2716 | { SVM_EXIT_READ_DR2, "read_dr2" }, | ||
2717 | { SVM_EXIT_READ_DR3, "read_dr3" }, | ||
2718 | { SVM_EXIT_WRITE_DR0, "write_dr0" }, | ||
2719 | { SVM_EXIT_WRITE_DR1, "write_dr1" }, | ||
2720 | { SVM_EXIT_WRITE_DR2, "write_dr2" }, | ||
2721 | { SVM_EXIT_WRITE_DR3, "write_dr3" }, | ||
2722 | { SVM_EXIT_WRITE_DR5, "write_dr5" }, | ||
2723 | { SVM_EXIT_WRITE_DR7, "write_dr7" }, | ||
2724 | { SVM_EXIT_EXCP_BASE + DB_VECTOR, "DB excp" }, | ||
2725 | { SVM_EXIT_EXCP_BASE + BP_VECTOR, "BP excp" }, | ||
2726 | { SVM_EXIT_EXCP_BASE + UD_VECTOR, "UD excp" }, | ||
2727 | { SVM_EXIT_EXCP_BASE + PF_VECTOR, "PF excp" }, | ||
2728 | { SVM_EXIT_EXCP_BASE + NM_VECTOR, "NM excp" }, | ||
2729 | { SVM_EXIT_EXCP_BASE + MC_VECTOR, "MC excp" }, | ||
2730 | { SVM_EXIT_INTR, "interrupt" }, | ||
2731 | { SVM_EXIT_NMI, "nmi" }, | ||
2732 | { SVM_EXIT_SMI, "smi" }, | ||
2733 | { SVM_EXIT_INIT, "init" }, | ||
2734 | { SVM_EXIT_VINTR, "vintr" }, | ||
2735 | { SVM_EXIT_CPUID, "cpuid" }, | ||
2736 | { SVM_EXIT_INVD, "invd" }, | ||
2737 | { SVM_EXIT_HLT, "hlt" }, | ||
2738 | { SVM_EXIT_INVLPG, "invlpg" }, | ||
2739 | { SVM_EXIT_INVLPGA, "invlpga" }, | ||
2740 | { SVM_EXIT_IOIO, "io" }, | ||
2741 | { SVM_EXIT_MSR, "msr" }, | ||
2742 | { SVM_EXIT_TASK_SWITCH, "task_switch" }, | ||
2743 | { SVM_EXIT_SHUTDOWN, "shutdown" }, | ||
2744 | { SVM_EXIT_VMRUN, "vmrun" }, | ||
2745 | { SVM_EXIT_VMMCALL, "hypercall" }, | ||
2746 | { SVM_EXIT_VMLOAD, "vmload" }, | ||
2747 | { SVM_EXIT_VMSAVE, "vmsave" }, | ||
2748 | { SVM_EXIT_STGI, "stgi" }, | ||
2749 | { SVM_EXIT_CLGI, "clgi" }, | ||
2750 | { SVM_EXIT_SKINIT, "skinit" }, | ||
2751 | { SVM_EXIT_WBINVD, "wbinvd" }, | ||
2752 | { SVM_EXIT_MONITOR, "monitor" }, | ||
2753 | { SVM_EXIT_MWAIT, "mwait" }, | ||
2754 | { SVM_EXIT_NPF, "npf" }, | ||
2755 | { -1, NULL } | ||
2756 | }; | ||
2757 | |||
2720 | static struct kvm_x86_ops svm_x86_ops = { | 2758 | static struct kvm_x86_ops svm_x86_ops = { |
2721 | .cpu_has_kvm_support = has_svm, | 2759 | .cpu_has_kvm_support = has_svm, |
2722 | .disabled_by_bios = is_disabled, | 2760 | .disabled_by_bios = is_disabled, |
@@ -2778,6 +2816,8 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
2778 | .set_tss_addr = svm_set_tss_addr, | 2816 | .set_tss_addr = svm_set_tss_addr, |
2779 | .get_tdp_level = get_npt_level, | 2817 | .get_tdp_level = get_npt_level, |
2780 | .get_mt_mask = svm_get_mt_mask, | 2818 | .get_mt_mask = svm_get_mt_mask, |
2819 | |||
2820 | .exit_reasons_str = svm_exit_reasons_str, | ||
2781 | }; | 2821 | }; |
2782 | 2822 | ||
2783 | static int __init svm_init(void) | 2823 | static int __init svm_init(void) |
diff --git a/arch/x86/kvm/trace.h b/arch/x86/kvm/trace.h new file mode 100644 index 000000000000..cd8c90db41a5 --- /dev/null +++ b/arch/x86/kvm/trace.h | |||
@@ -0,0 +1,260 @@ | |||
1 | #if !defined(_TRACE_KVM_H) || defined(TRACE_HEADER_MULTI_READ) | ||
2 | #define _TRACE_KVM_H | ||
3 | |||
4 | #include <linux/tracepoint.h> | ||
5 | |||
6 | #undef TRACE_SYSTEM | ||
7 | #define TRACE_SYSTEM kvm | ||
8 | #define TRACE_INCLUDE_PATH arch/x86/kvm | ||
9 | #define TRACE_INCLUDE_FILE trace | ||
10 | |||
11 | /* | ||
12 | * Tracepoint for guest mode entry. | ||
13 | */ | ||
14 | TRACE_EVENT(kvm_entry, | ||
15 | TP_PROTO(unsigned int vcpu_id), | ||
16 | TP_ARGS(vcpu_id), | ||
17 | |||
18 | TP_STRUCT__entry( | ||
19 | __field( unsigned int, vcpu_id ) | ||
20 | ), | ||
21 | |||
22 | TP_fast_assign( | ||
23 | __entry->vcpu_id = vcpu_id; | ||
24 | ), | ||
25 | |||
26 | TP_printk("vcpu %u", __entry->vcpu_id) | ||
27 | ); | ||
28 | |||
29 | /* | ||
30 | * Tracepoint for hypercall. | ||
31 | */ | ||
32 | TRACE_EVENT(kvm_hypercall, | ||
33 | TP_PROTO(unsigned long nr, unsigned long a0, unsigned long a1, | ||
34 | unsigned long a2, unsigned long a3), | ||
35 | TP_ARGS(nr, a0, a1, a2, a3), | ||
36 | |||
37 | TP_STRUCT__entry( | ||
38 | __field( unsigned long, nr ) | ||
39 | __field( unsigned long, a0 ) | ||
40 | __field( unsigned long, a1 ) | ||
41 | __field( unsigned long, a2 ) | ||
42 | __field( unsigned long, a3 ) | ||
43 | ), | ||
44 | |||
45 | TP_fast_assign( | ||
46 | __entry->nr = nr; | ||
47 | __entry->a0 = a0; | ||
48 | __entry->a1 = a1; | ||
49 | __entry->a2 = a2; | ||
50 | __entry->a3 = a3; | ||
51 | ), | ||
52 | |||
53 | TP_printk("nr 0x%lx a0 0x%lx a1 0x%lx a2 0x%lx a3 0x%lx", | ||
54 | __entry->nr, __entry->a0, __entry->a1, __entry->a2, | ||
55 | __entry->a3) | ||
56 | ); | ||
57 | |||
58 | /* | ||
59 | * Tracepoint for PIO. | ||
60 | */ | ||
61 | TRACE_EVENT(kvm_pio, | ||
62 | TP_PROTO(unsigned int rw, unsigned int port, unsigned int size, | ||
63 | unsigned int count), | ||
64 | TP_ARGS(rw, port, size, count), | ||
65 | |||
66 | TP_STRUCT__entry( | ||
67 | __field( unsigned int, rw ) | ||
68 | __field( unsigned int, port ) | ||
69 | __field( unsigned int, size ) | ||
70 | __field( unsigned int, count ) | ||
71 | ), | ||
72 | |||
73 | TP_fast_assign( | ||
74 | __entry->rw = rw; | ||
75 | __entry->port = port; | ||
76 | __entry->size = size; | ||
77 | __entry->count = count; | ||
78 | ), | ||
79 | |||
80 | TP_printk("pio_%s at 0x%x size %d count %d", | ||
81 | __entry->rw ? "write" : "read", | ||
82 | __entry->port, __entry->size, __entry->count) | ||
83 | ); | ||
84 | |||
85 | /* | ||
86 | * Tracepoint for cpuid. | ||
87 | */ | ||
88 | TRACE_EVENT(kvm_cpuid, | ||
89 | TP_PROTO(unsigned int function, unsigned long rax, unsigned long rbx, | ||
90 | unsigned long rcx, unsigned long rdx), | ||
91 | TP_ARGS(function, rax, rbx, rcx, rdx), | ||
92 | |||
93 | TP_STRUCT__entry( | ||
94 | __field( unsigned int, function ) | ||
95 | __field( unsigned long, rax ) | ||
96 | __field( unsigned long, rbx ) | ||
97 | __field( unsigned long, rcx ) | ||
98 | __field( unsigned long, rdx ) | ||
99 | ), | ||
100 | |||
101 | TP_fast_assign( | ||
102 | __entry->function = function; | ||
103 | __entry->rax = rax; | ||
104 | __entry->rbx = rbx; | ||
105 | __entry->rcx = rcx; | ||
106 | __entry->rdx = rdx; | ||
107 | ), | ||
108 | |||
109 | TP_printk("func %x rax %lx rbx %lx rcx %lx rdx %lx", | ||
110 | __entry->function, __entry->rax, | ||
111 | __entry->rbx, __entry->rcx, __entry->rdx) | ||
112 | ); | ||
113 | |||
114 | /* | ||
115 | * Tracepoint for apic access. | ||
116 | */ | ||
117 | TRACE_EVENT(kvm_apic, | ||
118 | TP_PROTO(unsigned int rw, unsigned int reg, unsigned int val), | ||
119 | TP_ARGS(rw, reg, val), | ||
120 | |||
121 | TP_STRUCT__entry( | ||
122 | __field( unsigned int, rw ) | ||
123 | __field( unsigned int, reg ) | ||
124 | __field( unsigned int, val ) | ||
125 | ), | ||
126 | |||
127 | TP_fast_assign( | ||
128 | __entry->rw = rw; | ||
129 | __entry->reg = reg; | ||
130 | __entry->val = val; | ||
131 | ), | ||
132 | |||
133 | TP_printk("apic_%s 0x%x = 0x%x", | ||
134 | __entry->rw ? "write" : "read", | ||
135 | __entry->reg, __entry->val) | ||
136 | ); | ||
137 | |||
138 | #define trace_kvm_apic_read(reg, val) trace_kvm_apic(0, reg, val) | ||
139 | #define trace_kvm_apic_write(reg, val) trace_kvm_apic(1, reg, val) | ||
140 | |||
141 | /* | ||
142 | * Tracepoint for kvm guest exit: | ||
143 | */ | ||
144 | TRACE_EVENT(kvm_exit, | ||
145 | TP_PROTO(unsigned int exit_reason, unsigned long guest_rip), | ||
146 | TP_ARGS(exit_reason, guest_rip), | ||
147 | |||
148 | TP_STRUCT__entry( | ||
149 | __field( unsigned int, exit_reason ) | ||
150 | __field( unsigned long, guest_rip ) | ||
151 | ), | ||
152 | |||
153 | TP_fast_assign( | ||
154 | __entry->exit_reason = exit_reason; | ||
155 | __entry->guest_rip = guest_rip; | ||
156 | ), | ||
157 | |||
158 | TP_printk("reason %s rip 0x%lx", | ||
159 | ftrace_print_symbols_seq(p, __entry->exit_reason, | ||
160 | kvm_x86_ops->exit_reasons_str), | ||
161 | __entry->guest_rip) | ||
162 | ); | ||
163 | |||
164 | /* | ||
165 | * Tracepoint for kvm interrupt injection: | ||
166 | */ | ||
167 | TRACE_EVENT(kvm_inj_virq, | ||
168 | TP_PROTO(unsigned int irq), | ||
169 | TP_ARGS(irq), | ||
170 | |||
171 | TP_STRUCT__entry( | ||
172 | __field( unsigned int, irq ) | ||
173 | ), | ||
174 | |||
175 | TP_fast_assign( | ||
176 | __entry->irq = irq; | ||
177 | ), | ||
178 | |||
179 | TP_printk("irq %u", __entry->irq) | ||
180 | ); | ||
181 | |||
182 | /* | ||
183 | * Tracepoint for page fault. | ||
184 | */ | ||
185 | TRACE_EVENT(kvm_page_fault, | ||
186 | TP_PROTO(unsigned long fault_address, unsigned int error_code), | ||
187 | TP_ARGS(fault_address, error_code), | ||
188 | |||
189 | TP_STRUCT__entry( | ||
190 | __field( unsigned long, fault_address ) | ||
191 | __field( unsigned int, error_code ) | ||
192 | ), | ||
193 | |||
194 | TP_fast_assign( | ||
195 | __entry->fault_address = fault_address; | ||
196 | __entry->error_code = error_code; | ||
197 | ), | ||
198 | |||
199 | TP_printk("address %lx error_code %x", | ||
200 | __entry->fault_address, __entry->error_code) | ||
201 | ); | ||
202 | |||
203 | /* | ||
204 | * Tracepoint for guest MSR access. | ||
205 | */ | ||
206 | TRACE_EVENT(kvm_msr, | ||
207 | TP_PROTO(unsigned int rw, unsigned int ecx, unsigned long data), | ||
208 | TP_ARGS(rw, ecx, data), | ||
209 | |||
210 | TP_STRUCT__entry( | ||
211 | __field( unsigned int, rw ) | ||
212 | __field( unsigned int, ecx ) | ||
213 | __field( unsigned long, data ) | ||
214 | ), | ||
215 | |||
216 | TP_fast_assign( | ||
217 | __entry->rw = rw; | ||
218 | __entry->ecx = ecx; | ||
219 | __entry->data = data; | ||
220 | ), | ||
221 | |||
222 | TP_printk("msr_%s %x = 0x%lx", | ||
223 | __entry->rw ? "write" : "read", | ||
224 | __entry->ecx, __entry->data) | ||
225 | ); | ||
226 | |||
227 | #define trace_kvm_msr_read(ecx, data) trace_kvm_msr(0, ecx, data) | ||
228 | #define trace_kvm_msr_write(ecx, data) trace_kvm_msr(1, ecx, data) | ||
229 | |||
230 | /* | ||
231 | * Tracepoint for guest CR access. | ||
232 | */ | ||
233 | TRACE_EVENT(kvm_cr, | ||
234 | TP_PROTO(unsigned int rw, unsigned int cr, unsigned long val), | ||
235 | TP_ARGS(rw, cr, val), | ||
236 | |||
237 | TP_STRUCT__entry( | ||
238 | __field( unsigned int, rw ) | ||
239 | __field( unsigned int, cr ) | ||
240 | __field( unsigned long, val ) | ||
241 | ), | ||
242 | |||
243 | TP_fast_assign( | ||
244 | __entry->rw = rw; | ||
245 | __entry->cr = cr; | ||
246 | __entry->val = val; | ||
247 | ), | ||
248 | |||
249 | TP_printk("cr_%s %x = 0x%lx", | ||
250 | __entry->rw ? "write" : "read", | ||
251 | __entry->cr, __entry->val) | ||
252 | ); | ||
253 | |||
254 | #define trace_kvm_cr_read(cr, val) trace_kvm_cr(0, cr, val) | ||
255 | #define trace_kvm_cr_write(cr, val) trace_kvm_cr(1, cr, val) | ||
256 | |||
257 | #endif /* _TRACE_KVM_H */ | ||
258 | |||
259 | /* This part must be outside protection */ | ||
260 | #include <trace/define_trace.h> | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1a84ca191cd1..c6256b98f078 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/highmem.h> | 25 | #include <linux/highmem.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/ftrace_event.h> | ||
28 | #include "kvm_cache_regs.h" | 29 | #include "kvm_cache_regs.h" |
29 | #include "x86.h" | 30 | #include "x86.h" |
30 | 31 | ||
@@ -34,6 +35,8 @@ | |||
34 | #include <asm/virtext.h> | 35 | #include <asm/virtext.h> |
35 | #include <asm/mce.h> | 36 | #include <asm/mce.h> |
36 | 37 | ||
38 | #include "trace.h" | ||
39 | |||
37 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 40 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
38 | 41 | ||
39 | MODULE_AUTHOR("Qumranet"); | 42 | MODULE_AUTHOR("Qumranet"); |
@@ -2550,7 +2553,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |||
2550 | uint32_t intr; | 2553 | uint32_t intr; |
2551 | int irq = vcpu->arch.interrupt.nr; | 2554 | int irq = vcpu->arch.interrupt.nr; |
2552 | 2555 | ||
2553 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); | 2556 | trace_kvm_inj_virq(irq); |
2554 | 2557 | ||
2555 | ++vcpu->stat.irq_injections; | 2558 | ++vcpu->stat.irq_injections; |
2556 | if (vmx->rmode.vm86_active) { | 2559 | if (vmx->rmode.vm86_active) { |
@@ -2751,8 +2754,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2751 | if (enable_ept) | 2754 | if (enable_ept) |
2752 | BUG(); | 2755 | BUG(); |
2753 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 2756 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
2754 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | 2757 | trace_kvm_page_fault(cr2, error_code); |
2755 | (u32)((u64)cr2 >> 32), handler); | 2758 | |
2756 | if (kvm_event_needs_reinjection(vcpu)) | 2759 | if (kvm_event_needs_reinjection(vcpu)) |
2757 | kvm_mmu_unprotect_page_virt(vcpu, cr2); | 2760 | kvm_mmu_unprotect_page_virt(vcpu, cr2); |
2758 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2761 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
@@ -2799,7 +2802,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
2799 | struct kvm_run *kvm_run) | 2802 | struct kvm_run *kvm_run) |
2800 | { | 2803 | { |
2801 | ++vcpu->stat.irq_exits; | 2804 | ++vcpu->stat.irq_exits; |
2802 | KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler); | ||
2803 | return 1; | 2805 | return 1; |
2804 | } | 2806 | } |
2805 | 2807 | ||
@@ -2847,7 +2849,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
2847 | 2849 | ||
2848 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2850 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2849 | { | 2851 | { |
2850 | unsigned long exit_qualification; | 2852 | unsigned long exit_qualification, val; |
2851 | int cr; | 2853 | int cr; |
2852 | int reg; | 2854 | int reg; |
2853 | 2855 | ||
@@ -2856,21 +2858,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2856 | reg = (exit_qualification >> 8) & 15; | 2858 | reg = (exit_qualification >> 8) & 15; |
2857 | switch ((exit_qualification >> 4) & 3) { | 2859 | switch ((exit_qualification >> 4) & 3) { |
2858 | case 0: /* mov to cr */ | 2860 | case 0: /* mov to cr */ |
2859 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, | 2861 | val = kvm_register_read(vcpu, reg); |
2860 | (u32)kvm_register_read(vcpu, reg), | 2862 | trace_kvm_cr_write(cr, val); |
2861 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), | ||
2862 | handler); | ||
2863 | switch (cr) { | 2863 | switch (cr) { |
2864 | case 0: | 2864 | case 0: |
2865 | kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); | 2865 | kvm_set_cr0(vcpu, val); |
2866 | skip_emulated_instruction(vcpu); | 2866 | skip_emulated_instruction(vcpu); |
2867 | return 1; | 2867 | return 1; |
2868 | case 3: | 2868 | case 3: |
2869 | kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); | 2869 | kvm_set_cr3(vcpu, val); |
2870 | skip_emulated_instruction(vcpu); | 2870 | skip_emulated_instruction(vcpu); |
2871 | return 1; | 2871 | return 1; |
2872 | case 4: | 2872 | case 4: |
2873 | kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); | 2873 | kvm_set_cr4(vcpu, val); |
2874 | skip_emulated_instruction(vcpu); | 2874 | skip_emulated_instruction(vcpu); |
2875 | return 1; | 2875 | return 1; |
2876 | case 8: { | 2876 | case 8: { |
@@ -2892,23 +2892,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2892 | vcpu->arch.cr0 &= ~X86_CR0_TS; | 2892 | vcpu->arch.cr0 &= ~X86_CR0_TS; |
2893 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | 2893 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
2894 | vmx_fpu_activate(vcpu); | 2894 | vmx_fpu_activate(vcpu); |
2895 | KVMTRACE_0D(CLTS, vcpu, handler); | ||
2896 | skip_emulated_instruction(vcpu); | 2895 | skip_emulated_instruction(vcpu); |
2897 | return 1; | 2896 | return 1; |
2898 | case 1: /*mov from cr*/ | 2897 | case 1: /*mov from cr*/ |
2899 | switch (cr) { | 2898 | switch (cr) { |
2900 | case 3: | 2899 | case 3: |
2901 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); | 2900 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); |
2902 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, | 2901 | trace_kvm_cr_read(cr, vcpu->arch.cr3); |
2903 | (u32)kvm_register_read(vcpu, reg), | ||
2904 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), | ||
2905 | handler); | ||
2906 | skip_emulated_instruction(vcpu); | 2902 | skip_emulated_instruction(vcpu); |
2907 | return 1; | 2903 | return 1; |
2908 | case 8: | 2904 | case 8: |
2909 | kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); | 2905 | val = kvm_get_cr8(vcpu); |
2910 | KVMTRACE_2D(CR_READ, vcpu, (u32)cr, | 2906 | kvm_register_write(vcpu, reg, val); |
2911 | (u32)kvm_register_read(vcpu, reg), handler); | 2907 | trace_kvm_cr_read(cr, val); |
2912 | skip_emulated_instruction(vcpu); | 2908 | skip_emulated_instruction(vcpu); |
2913 | return 1; | 2909 | return 1; |
2914 | } | 2910 | } |
@@ -2976,7 +2972,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2976 | val = 0; | 2972 | val = 0; |
2977 | } | 2973 | } |
2978 | kvm_register_write(vcpu, reg, val); | 2974 | kvm_register_write(vcpu, reg, val); |
2979 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | ||
2980 | } else { | 2975 | } else { |
2981 | val = vcpu->arch.regs[reg]; | 2976 | val = vcpu->arch.regs[reg]; |
2982 | switch (dr) { | 2977 | switch (dr) { |
@@ -3009,7 +3004,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3009 | } | 3004 | } |
3010 | break; | 3005 | break; |
3011 | } | 3006 | } |
3012 | KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler); | ||
3013 | } | 3007 | } |
3014 | skip_emulated_instruction(vcpu); | 3008 | skip_emulated_instruction(vcpu); |
3015 | return 1; | 3009 | return 1; |
@@ -3031,8 +3025,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3031 | return 1; | 3025 | return 1; |
3032 | } | 3026 | } |
3033 | 3027 | ||
3034 | KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32), | 3028 | trace_kvm_msr_read(ecx, data); |
3035 | handler); | ||
3036 | 3029 | ||
3037 | /* FIXME: handling of bits 32:63 of rax, rdx */ | 3030 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
3038 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | 3031 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; |
@@ -3047,8 +3040,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3047 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 3040 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
3048 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 3041 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
3049 | 3042 | ||
3050 | KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32), | 3043 | trace_kvm_msr_write(ecx, data); |
3051 | handler); | ||
3052 | 3044 | ||
3053 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 3045 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
3054 | kvm_inject_gp(vcpu, 0); | 3046 | kvm_inject_gp(vcpu, 0); |
@@ -3075,7 +3067,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
3075 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | 3067 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
3076 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 3068 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
3077 | 3069 | ||
3078 | KVMTRACE_0D(PEND_INTR, vcpu, handler); | ||
3079 | ++vcpu->stat.irq_window_exits; | 3070 | ++vcpu->stat.irq_window_exits; |
3080 | 3071 | ||
3081 | /* | 3072 | /* |
@@ -3227,6 +3218,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3227 | } | 3218 | } |
3228 | 3219 | ||
3229 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | 3220 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
3221 | trace_kvm_page_fault(gpa, exit_qualification); | ||
3230 | return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); | 3222 | return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); |
3231 | } | 3223 | } |
3232 | 3224 | ||
@@ -3410,8 +3402,7 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3410 | u32 exit_reason = vmx->exit_reason; | 3402 | u32 exit_reason = vmx->exit_reason; |
3411 | u32 vectoring_info = vmx->idt_vectoring_info; | 3403 | u32 vectoring_info = vmx->idt_vectoring_info; |
3412 | 3404 | ||
3413 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), | 3405 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); |
3414 | (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); | ||
3415 | 3406 | ||
3416 | /* If we need to emulate an MMIO from handle_invalid_guest_state | 3407 | /* If we need to emulate an MMIO from handle_invalid_guest_state |
3417 | * we just return 0 */ | 3408 | * we just return 0 */ |
@@ -3500,10 +3491,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3500 | 3491 | ||
3501 | /* We need to handle NMIs before interrupts are enabled */ | 3492 | /* We need to handle NMIs before interrupts are enabled */ |
3502 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && | 3493 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && |
3503 | (exit_intr_info & INTR_INFO_VALID_MASK)) { | 3494 | (exit_intr_info & INTR_INFO_VALID_MASK)) |
3504 | KVMTRACE_0D(NMI, &vmx->vcpu, handler); | ||
3505 | asm("int $2"); | 3495 | asm("int $2"); |
3506 | } | ||
3507 | 3496 | ||
3508 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; | 3497 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
3509 | 3498 | ||
@@ -3891,6 +3880,29 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
3891 | return ret; | 3880 | return ret; |
3892 | } | 3881 | } |
3893 | 3882 | ||
3883 | static const struct trace_print_flags vmx_exit_reasons_str[] = { | ||
3884 | { EXIT_REASON_EXCEPTION_NMI, "exception" }, | ||
3885 | { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" }, | ||
3886 | { EXIT_REASON_TRIPLE_FAULT, "triple_fault" }, | ||
3887 | { EXIT_REASON_NMI_WINDOW, "nmi_window" }, | ||
3888 | { EXIT_REASON_IO_INSTRUCTION, "io_instruction" }, | ||
3889 | { EXIT_REASON_CR_ACCESS, "cr_access" }, | ||
3890 | { EXIT_REASON_DR_ACCESS, "dr_access" }, | ||
3891 | { EXIT_REASON_CPUID, "cpuid" }, | ||
3892 | { EXIT_REASON_MSR_READ, "rdmsr" }, | ||
3893 | { EXIT_REASON_MSR_WRITE, "wrmsr" }, | ||
3894 | { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" }, | ||
3895 | { EXIT_REASON_HLT, "halt" }, | ||
3896 | { EXIT_REASON_INVLPG, "invlpg" }, | ||
3897 | { EXIT_REASON_VMCALL, "hypercall" }, | ||
3898 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" }, | ||
3899 | { EXIT_REASON_APIC_ACCESS, "apic_access" }, | ||
3900 | { EXIT_REASON_WBINVD, "wbinvd" }, | ||
3901 | { EXIT_REASON_TASK_SWITCH, "task_switch" }, | ||
3902 | { EXIT_REASON_EPT_VIOLATION, "ept_violation" }, | ||
3903 | { -1, NULL } | ||
3904 | }; | ||
3905 | |||
3894 | static struct kvm_x86_ops vmx_x86_ops = { | 3906 | static struct kvm_x86_ops vmx_x86_ops = { |
3895 | .cpu_has_kvm_support = cpu_has_kvm_support, | 3907 | .cpu_has_kvm_support = cpu_has_kvm_support, |
3896 | .disabled_by_bios = vmx_disabled_by_bios, | 3908 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -3950,6 +3962,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3950 | .set_tss_addr = vmx_set_tss_addr, | 3962 | .set_tss_addr = vmx_set_tss_addr, |
3951 | .get_tdp_level = get_ept_level, | 3963 | .get_tdp_level = get_ept_level, |
3952 | .get_mt_mask = vmx_get_mt_mask, | 3964 | .get_mt_mask = vmx_get_mt_mask, |
3965 | |||
3966 | .exit_reasons_str = vmx_exit_reasons_str, | ||
3953 | }; | 3967 | }; |
3954 | 3968 | ||
3955 | static int __init vmx_init(void) | 3969 | static int __init vmx_init(void) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index a066876f1373..892a7a60c815 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -37,6 +37,8 @@ | |||
37 | #include <linux/iommu.h> | 37 | #include <linux/iommu.h> |
38 | #include <linux/intel-iommu.h> | 38 | #include <linux/intel-iommu.h> |
39 | #include <linux/cpufreq.h> | 39 | #include <linux/cpufreq.h> |
40 | #define CREATE_TRACE_POINTS | ||
41 | #include "trace.h" | ||
40 | 42 | ||
41 | #include <asm/uaccess.h> | 43 | #include <asm/uaccess.h> |
42 | #include <asm/msr.h> | 44 | #include <asm/msr.h> |
@@ -347,9 +349,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); | |||
347 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | 349 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
348 | { | 350 | { |
349 | kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); | 351 | kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); |
350 | KVMTRACE_1D(LMSW, vcpu, | ||
351 | (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)), | ||
352 | handler); | ||
353 | } | 352 | } |
354 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 353 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
355 | 354 | ||
@@ -2568,7 +2567,6 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address) | |||
2568 | 2567 | ||
2569 | int emulate_clts(struct kvm_vcpu *vcpu) | 2568 | int emulate_clts(struct kvm_vcpu *vcpu) |
2570 | { | 2569 | { |
2571 | KVMTRACE_0D(CLTS, vcpu, handler); | ||
2572 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); | 2570 | kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); |
2573 | return X86EMUL_CONTINUE; | 2571 | return X86EMUL_CONTINUE; |
2574 | } | 2572 | } |
@@ -2851,12 +2849,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2851 | vcpu->arch.pio.down = 0; | 2849 | vcpu->arch.pio.down = 0; |
2852 | vcpu->arch.pio.rep = 0; | 2850 | vcpu->arch.pio.rep = 0; |
2853 | 2851 | ||
2854 | if (vcpu->run->io.direction == KVM_EXIT_IO_IN) | 2852 | trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, |
2855 | KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, | 2853 | size, 1); |
2856 | handler); | ||
2857 | else | ||
2858 | KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, | ||
2859 | handler); | ||
2860 | 2854 | ||
2861 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); | 2855 | val = kvm_register_read(vcpu, VCPU_REGS_RAX); |
2862 | memcpy(vcpu->arch.pio_data, &val, 4); | 2856 | memcpy(vcpu->arch.pio_data, &val, 4); |
@@ -2892,12 +2886,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2892 | vcpu->arch.pio.down = down; | 2886 | vcpu->arch.pio.down = down; |
2893 | vcpu->arch.pio.rep = rep; | 2887 | vcpu->arch.pio.rep = rep; |
2894 | 2888 | ||
2895 | if (vcpu->run->io.direction == KVM_EXIT_IO_IN) | 2889 | trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port, |
2896 | KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, | 2890 | size, count); |
2897 | handler); | ||
2898 | else | ||
2899 | KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, | ||
2900 | handler); | ||
2901 | 2891 | ||
2902 | if (!count) { | 2892 | if (!count) { |
2903 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2893 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
@@ -3075,7 +3065,6 @@ void kvm_arch_exit(void) | |||
3075 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 3065 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) |
3076 | { | 3066 | { |
3077 | ++vcpu->stat.halt_exits; | 3067 | ++vcpu->stat.halt_exits; |
3078 | KVMTRACE_0D(HLT, vcpu, handler); | ||
3079 | if (irqchip_in_kernel(vcpu->kvm)) { | 3068 | if (irqchip_in_kernel(vcpu->kvm)) { |
3080 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; | 3069 | vcpu->arch.mp_state = KVM_MP_STATE_HALTED; |
3081 | return 1; | 3070 | return 1; |
@@ -3106,7 +3095,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
3106 | a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); | 3095 | a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); |
3107 | a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); | 3096 | a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); |
3108 | 3097 | ||
3109 | KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); | 3098 | trace_kvm_hypercall(nr, a0, a1, a2, a3); |
3110 | 3099 | ||
3111 | if (!is_long_mode(vcpu)) { | 3100 | if (!is_long_mode(vcpu)) { |
3112 | nr &= 0xFFFFFFFF; | 3101 | nr &= 0xFFFFFFFF; |
@@ -3206,8 +3195,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | |||
3206 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); | 3195 | vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); |
3207 | return 0; | 3196 | return 0; |
3208 | } | 3197 | } |
3209 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value, | ||
3210 | (u32)((u64)value >> 32), handler); | ||
3211 | 3198 | ||
3212 | return value; | 3199 | return value; |
3213 | } | 3200 | } |
@@ -3215,9 +3202,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr) | |||
3215 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, | 3202 | void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, |
3216 | unsigned long *rflags) | 3203 | unsigned long *rflags) |
3217 | { | 3204 | { |
3218 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val, | ||
3219 | (u32)((u64)val >> 32), handler); | ||
3220 | |||
3221 | switch (cr) { | 3205 | switch (cr) { |
3222 | case 0: | 3206 | case 0: |
3223 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); | 3207 | kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); |
@@ -3327,11 +3311,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
3327 | kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); | 3311 | kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); |
3328 | } | 3312 | } |
3329 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 3313 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
3330 | KVMTRACE_5D(CPUID, vcpu, function, | 3314 | trace_kvm_cpuid(function, |
3331 | (u32)kvm_register_read(vcpu, VCPU_REGS_RAX), | 3315 | kvm_register_read(vcpu, VCPU_REGS_RAX), |
3332 | (u32)kvm_register_read(vcpu, VCPU_REGS_RBX), | 3316 | kvm_register_read(vcpu, VCPU_REGS_RBX), |
3333 | (u32)kvm_register_read(vcpu, VCPU_REGS_RCX), | 3317 | kvm_register_read(vcpu, VCPU_REGS_RCX), |
3334 | (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler); | 3318 | kvm_register_read(vcpu, VCPU_REGS_RDX)); |
3335 | } | 3319 | } |
3336 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | 3320 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
3337 | 3321 | ||
@@ -3527,7 +3511,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3527 | set_debugreg(vcpu->arch.eff_db[3], 3); | 3511 | set_debugreg(vcpu->arch.eff_db[3], 3); |
3528 | } | 3512 | } |
3529 | 3513 | ||
3530 | KVMTRACE_0D(VMENTRY, vcpu, entryexit); | 3514 | trace_kvm_entry(vcpu->vcpu_id); |
3531 | kvm_x86_ops->run(vcpu, kvm_run); | 3515 | kvm_x86_ops->run(vcpu, kvm_run); |
3532 | 3516 | ||
3533 | if (unlikely(vcpu->arch.switch_db_regs)) { | 3517 | if (unlikely(vcpu->arch.switch_db_regs)) { |
@@ -4842,3 +4826,9 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu) | |||
4842 | { | 4826 | { |
4843 | return kvm_x86_ops->interrupt_allowed(vcpu); | 4827 | return kvm_x86_ops->interrupt_allowed(vcpu); |
4844 | } | 4828 | } |
4829 | |||
4830 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit); | ||
4831 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq); | ||
4832 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault); | ||
4833 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr); | ||
4834 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr); | ||