diff options
author | Marcelo Tosatti <mtosatti@redhat.com> | 2009-06-17 08:22:14 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-09-10 01:32:59 -0400 |
commit | 229456fc34b1c9031b04f7581e7b755d1cebfe9c (patch) | |
tree | 85fc0b54e9403d6ea059b8f7f78cea49594aaace /arch/x86/kvm/vmx.c | |
parent | 219b65dcf6c0bad83d51bfa12e25891c02de2414 (diff) |
KVM: convert custom marker based tracing to event traces
This allows use of the powerful ftrace infrastructure.
See Documentation/trace/ for usage information.
[avi, stephen: various build fixes]
[sheng: fix control register breakage]
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au>
Signed-off-by: Sheng Yang <sheng@linux.intel.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 78 |
1 files changed, 46 insertions, 32 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1a84ca191cd1..c6256b98f078 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -25,6 +25,7 @@ | |||
25 | #include <linux/highmem.h> | 25 | #include <linux/highmem.h> |
26 | #include <linux/sched.h> | 26 | #include <linux/sched.h> |
27 | #include <linux/moduleparam.h> | 27 | #include <linux/moduleparam.h> |
28 | #include <linux/ftrace_event.h> | ||
28 | #include "kvm_cache_regs.h" | 29 | #include "kvm_cache_regs.h" |
29 | #include "x86.h" | 30 | #include "x86.h" |
30 | 31 | ||
@@ -34,6 +35,8 @@ | |||
34 | #include <asm/virtext.h> | 35 | #include <asm/virtext.h> |
35 | #include <asm/mce.h> | 36 | #include <asm/mce.h> |
36 | 37 | ||
38 | #include "trace.h" | ||
39 | |||
37 | #define __ex(x) __kvm_handle_fault_on_reboot(x) | 40 | #define __ex(x) __kvm_handle_fault_on_reboot(x) |
38 | 41 | ||
39 | MODULE_AUTHOR("Qumranet"); | 42 | MODULE_AUTHOR("Qumranet"); |
@@ -2550,7 +2553,7 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu) | |||
2550 | uint32_t intr; | 2553 | uint32_t intr; |
2551 | int irq = vcpu->arch.interrupt.nr; | 2554 | int irq = vcpu->arch.interrupt.nr; |
2552 | 2555 | ||
2553 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); | 2556 | trace_kvm_inj_virq(irq); |
2554 | 2557 | ||
2555 | ++vcpu->stat.irq_injections; | 2558 | ++vcpu->stat.irq_injections; |
2556 | if (vmx->rmode.vm86_active) { | 2559 | if (vmx->rmode.vm86_active) { |
@@ -2751,8 +2754,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2751 | if (enable_ept) | 2754 | if (enable_ept) |
2752 | BUG(); | 2755 | BUG(); |
2753 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 2756 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
2754 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | 2757 | trace_kvm_page_fault(cr2, error_code); |
2755 | (u32)((u64)cr2 >> 32), handler); | 2758 | |
2756 | if (kvm_event_needs_reinjection(vcpu)) | 2759 | if (kvm_event_needs_reinjection(vcpu)) |
2757 | kvm_mmu_unprotect_page_virt(vcpu, cr2); | 2760 | kvm_mmu_unprotect_page_virt(vcpu, cr2); |
2758 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2761 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
@@ -2799,7 +2802,6 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
2799 | struct kvm_run *kvm_run) | 2802 | struct kvm_run *kvm_run) |
2800 | { | 2803 | { |
2801 | ++vcpu->stat.irq_exits; | 2804 | ++vcpu->stat.irq_exits; |
2802 | KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler); | ||
2803 | return 1; | 2805 | return 1; |
2804 | } | 2806 | } |
2805 | 2807 | ||
@@ -2847,7 +2849,7 @@ vmx_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall) | |||
2847 | 2849 | ||
2848 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2850 | static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2849 | { | 2851 | { |
2850 | unsigned long exit_qualification; | 2852 | unsigned long exit_qualification, val; |
2851 | int cr; | 2853 | int cr; |
2852 | int reg; | 2854 | int reg; |
2853 | 2855 | ||
@@ -2856,21 +2858,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2856 | reg = (exit_qualification >> 8) & 15; | 2858 | reg = (exit_qualification >> 8) & 15; |
2857 | switch ((exit_qualification >> 4) & 3) { | 2859 | switch ((exit_qualification >> 4) & 3) { |
2858 | case 0: /* mov to cr */ | 2860 | case 0: /* mov to cr */ |
2859 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, | 2861 | val = kvm_register_read(vcpu, reg); |
2860 | (u32)kvm_register_read(vcpu, reg), | 2862 | trace_kvm_cr_write(cr, val); |
2861 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), | ||
2862 | handler); | ||
2863 | switch (cr) { | 2863 | switch (cr) { |
2864 | case 0: | 2864 | case 0: |
2865 | kvm_set_cr0(vcpu, kvm_register_read(vcpu, reg)); | 2865 | kvm_set_cr0(vcpu, val); |
2866 | skip_emulated_instruction(vcpu); | 2866 | skip_emulated_instruction(vcpu); |
2867 | return 1; | 2867 | return 1; |
2868 | case 3: | 2868 | case 3: |
2869 | kvm_set_cr3(vcpu, kvm_register_read(vcpu, reg)); | 2869 | kvm_set_cr3(vcpu, val); |
2870 | skip_emulated_instruction(vcpu); | 2870 | skip_emulated_instruction(vcpu); |
2871 | return 1; | 2871 | return 1; |
2872 | case 4: | 2872 | case 4: |
2873 | kvm_set_cr4(vcpu, kvm_register_read(vcpu, reg)); | 2873 | kvm_set_cr4(vcpu, val); |
2874 | skip_emulated_instruction(vcpu); | 2874 | skip_emulated_instruction(vcpu); |
2875 | return 1; | 2875 | return 1; |
2876 | case 8: { | 2876 | case 8: { |
@@ -2892,23 +2892,19 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2892 | vcpu->arch.cr0 &= ~X86_CR0_TS; | 2892 | vcpu->arch.cr0 &= ~X86_CR0_TS; |
2893 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | 2893 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
2894 | vmx_fpu_activate(vcpu); | 2894 | vmx_fpu_activate(vcpu); |
2895 | KVMTRACE_0D(CLTS, vcpu, handler); | ||
2896 | skip_emulated_instruction(vcpu); | 2895 | skip_emulated_instruction(vcpu); |
2897 | return 1; | 2896 | return 1; |
2898 | case 1: /*mov from cr*/ | 2897 | case 1: /*mov from cr*/ |
2899 | switch (cr) { | 2898 | switch (cr) { |
2900 | case 3: | 2899 | case 3: |
2901 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); | 2900 | kvm_register_write(vcpu, reg, vcpu->arch.cr3); |
2902 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, | 2901 | trace_kvm_cr_read(cr, vcpu->arch.cr3); |
2903 | (u32)kvm_register_read(vcpu, reg), | ||
2904 | (u32)((u64)kvm_register_read(vcpu, reg) >> 32), | ||
2905 | handler); | ||
2906 | skip_emulated_instruction(vcpu); | 2902 | skip_emulated_instruction(vcpu); |
2907 | return 1; | 2903 | return 1; |
2908 | case 8: | 2904 | case 8: |
2909 | kvm_register_write(vcpu, reg, kvm_get_cr8(vcpu)); | 2905 | val = kvm_get_cr8(vcpu); |
2910 | KVMTRACE_2D(CR_READ, vcpu, (u32)cr, | 2906 | kvm_register_write(vcpu, reg, val); |
2911 | (u32)kvm_register_read(vcpu, reg), handler); | 2907 | trace_kvm_cr_read(cr, val); |
2912 | skip_emulated_instruction(vcpu); | 2908 | skip_emulated_instruction(vcpu); |
2913 | return 1; | 2909 | return 1; |
2914 | } | 2910 | } |
@@ -2976,7 +2972,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2976 | val = 0; | 2972 | val = 0; |
2977 | } | 2973 | } |
2978 | kvm_register_write(vcpu, reg, val); | 2974 | kvm_register_write(vcpu, reg, val); |
2979 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | ||
2980 | } else { | 2975 | } else { |
2981 | val = vcpu->arch.regs[reg]; | 2976 | val = vcpu->arch.regs[reg]; |
2982 | switch (dr) { | 2977 | switch (dr) { |
@@ -3009,7 +3004,6 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3009 | } | 3004 | } |
3010 | break; | 3005 | break; |
3011 | } | 3006 | } |
3012 | KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler); | ||
3013 | } | 3007 | } |
3014 | skip_emulated_instruction(vcpu); | 3008 | skip_emulated_instruction(vcpu); |
3015 | return 1; | 3009 | return 1; |
@@ -3031,8 +3025,7 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3031 | return 1; | 3025 | return 1; |
3032 | } | 3026 | } |
3033 | 3027 | ||
3034 | KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32), | 3028 | trace_kvm_msr_read(ecx, data); |
3035 | handler); | ||
3036 | 3029 | ||
3037 | /* FIXME: handling of bits 32:63 of rax, rdx */ | 3030 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
3038 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | 3031 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; |
@@ -3047,8 +3040,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3047 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 3040 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
3048 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 3041 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
3049 | 3042 | ||
3050 | KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32), | 3043 | trace_kvm_msr_write(ecx, data); |
3051 | handler); | ||
3052 | 3044 | ||
3053 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 3045 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
3054 | kvm_inject_gp(vcpu, 0); | 3046 | kvm_inject_gp(vcpu, 0); |
@@ -3075,7 +3067,6 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
3075 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | 3067 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
3076 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 3068 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
3077 | 3069 | ||
3078 | KVMTRACE_0D(PEND_INTR, vcpu, handler); | ||
3079 | ++vcpu->stat.irq_window_exits; | 3070 | ++vcpu->stat.irq_window_exits; |
3080 | 3071 | ||
3081 | /* | 3072 | /* |
@@ -3227,6 +3218,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3227 | } | 3218 | } |
3228 | 3219 | ||
3229 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); | 3220 | gpa = vmcs_read64(GUEST_PHYSICAL_ADDRESS); |
3221 | trace_kvm_page_fault(gpa, exit_qualification); | ||
3230 | return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); | 3222 | return kvm_mmu_page_fault(vcpu, gpa & PAGE_MASK, 0); |
3231 | } | 3223 | } |
3232 | 3224 | ||
@@ -3410,8 +3402,7 @@ static int vmx_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
3410 | u32 exit_reason = vmx->exit_reason; | 3402 | u32 exit_reason = vmx->exit_reason; |
3411 | u32 vectoring_info = vmx->idt_vectoring_info; | 3403 | u32 vectoring_info = vmx->idt_vectoring_info; |
3412 | 3404 | ||
3413 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)kvm_rip_read(vcpu), | 3405 | trace_kvm_exit(exit_reason, kvm_rip_read(vcpu)); |
3414 | (u32)((u64)kvm_rip_read(vcpu) >> 32), entryexit); | ||
3415 | 3406 | ||
3416 | /* If we need to emulate an MMIO from handle_invalid_guest_state | 3407 | /* If we need to emulate an MMIO from handle_invalid_guest_state |
3417 | * we just return 0 */ | 3408 | * we just return 0 */ |
@@ -3500,10 +3491,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3500 | 3491 | ||
3501 | /* We need to handle NMIs before interrupts are enabled */ | 3492 | /* We need to handle NMIs before interrupts are enabled */ |
3502 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && | 3493 | if ((exit_intr_info & INTR_INFO_INTR_TYPE_MASK) == INTR_TYPE_NMI_INTR && |
3503 | (exit_intr_info & INTR_INFO_VALID_MASK)) { | 3494 | (exit_intr_info & INTR_INFO_VALID_MASK)) |
3504 | KVMTRACE_0D(NMI, &vmx->vcpu, handler); | ||
3505 | asm("int $2"); | 3495 | asm("int $2"); |
3506 | } | ||
3507 | 3496 | ||
3508 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; | 3497 | idtv_info_valid = idt_vectoring_info & VECTORING_INFO_VALID_MASK; |
3509 | 3498 | ||
@@ -3891,6 +3880,29 @@ static u64 vmx_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio) | |||
3891 | return ret; | 3880 | return ret; |
3892 | } | 3881 | } |
3893 | 3882 | ||
3883 | static const struct trace_print_flags vmx_exit_reasons_str[] = { | ||
3884 | { EXIT_REASON_EXCEPTION_NMI, "exception" }, | ||
3885 | { EXIT_REASON_EXTERNAL_INTERRUPT, "ext_irq" }, | ||
3886 | { EXIT_REASON_TRIPLE_FAULT, "triple_fault" }, | ||
3887 | { EXIT_REASON_NMI_WINDOW, "nmi_window" }, | ||
3888 | { EXIT_REASON_IO_INSTRUCTION, "io_instruction" }, | ||
3889 | { EXIT_REASON_CR_ACCESS, "cr_access" }, | ||
3890 | { EXIT_REASON_DR_ACCESS, "dr_access" }, | ||
3891 | { EXIT_REASON_CPUID, "cpuid" }, | ||
3892 | { EXIT_REASON_MSR_READ, "rdmsr" }, | ||
3893 | { EXIT_REASON_MSR_WRITE, "wrmsr" }, | ||
3894 | { EXIT_REASON_PENDING_INTERRUPT, "interrupt_window" }, | ||
3895 | { EXIT_REASON_HLT, "halt" }, | ||
3896 | { EXIT_REASON_INVLPG, "invlpg" }, | ||
3897 | { EXIT_REASON_VMCALL, "hypercall" }, | ||
3898 | { EXIT_REASON_TPR_BELOW_THRESHOLD, "tpr_below_thres" }, | ||
3899 | { EXIT_REASON_APIC_ACCESS, "apic_access" }, | ||
3900 | { EXIT_REASON_WBINVD, "wbinvd" }, | ||
3901 | { EXIT_REASON_TASK_SWITCH, "task_switch" }, | ||
3902 | { EXIT_REASON_EPT_VIOLATION, "ept_violation" }, | ||
3903 | { -1, NULL } | ||
3904 | }; | ||
3905 | |||
3894 | static struct kvm_x86_ops vmx_x86_ops = { | 3906 | static struct kvm_x86_ops vmx_x86_ops = { |
3895 | .cpu_has_kvm_support = cpu_has_kvm_support, | 3907 | .cpu_has_kvm_support = cpu_has_kvm_support, |
3896 | .disabled_by_bios = vmx_disabled_by_bios, | 3908 | .disabled_by_bios = vmx_disabled_by_bios, |
@@ -3950,6 +3962,8 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3950 | .set_tss_addr = vmx_set_tss_addr, | 3962 | .set_tss_addr = vmx_set_tss_addr, |
3951 | .get_tdp_level = get_ept_level, | 3963 | .get_tdp_level = get_ept_level, |
3952 | .get_mt_mask = vmx_get_mt_mask, | 3964 | .get_mt_mask = vmx_get_mt_mask, |
3965 | |||
3966 | .exit_reasons_str = vmx_exit_reasons_str, | ||
3953 | }; | 3967 | }; |
3954 | 3968 | ||
3955 | static int __init vmx_init(void) | 3969 | static int __init vmx_init(void) |