aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2009-06-17 08:22:14 -0400
committerAvi Kivity <avi@redhat.com>2009-09-10 01:32:59 -0400
commit229456fc34b1c9031b04f7581e7b755d1cebfe9c (patch)
tree85fc0b54e9403d6ea059b8f7f78cea49594aaace /arch/x86/kvm/x86.c
parent219b65dcf6c0bad83d51bfa12e25891c02de2414 (diff)
KVM: convert custom marker based tracing to event traces
This allows use of the powerful ftrace infrastructure. See Documentation/trace/ for usage information. [avi, stephen: various build fixes] [sheng: fix control register breakage] Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Sheng Yang <sheng@linux.intel.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c48
1 files changed, 19 insertions, 29 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a066876f1373..892a7a60c815 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -37,6 +37,8 @@
37#include <linux/iommu.h> 37#include <linux/iommu.h>
38#include <linux/intel-iommu.h> 38#include <linux/intel-iommu.h>
39#include <linux/cpufreq.h> 39#include <linux/cpufreq.h>
40#define CREATE_TRACE_POINTS
41#include "trace.h"
40 42
41#include <asm/uaccess.h> 43#include <asm/uaccess.h>
42#include <asm/msr.h> 44#include <asm/msr.h>
@@ -347,9 +349,6 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0);
347void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) 349void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw)
348{ 350{
349 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); 351 kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f));
350 KVMTRACE_1D(LMSW, vcpu,
351 (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)),
352 handler);
353} 352}
354EXPORT_SYMBOL_GPL(kvm_lmsw); 353EXPORT_SYMBOL_GPL(kvm_lmsw);
355 354
@@ -2568,7 +2567,6 @@ int emulate_invlpg(struct kvm_vcpu *vcpu, gva_t address)
2568 2567
2569int emulate_clts(struct kvm_vcpu *vcpu) 2568int emulate_clts(struct kvm_vcpu *vcpu)
2570{ 2569{
2571 KVMTRACE_0D(CLTS, vcpu, handler);
2572 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS); 2570 kvm_x86_ops->set_cr0(vcpu, vcpu->arch.cr0 & ~X86_CR0_TS);
2573 return X86EMUL_CONTINUE; 2571 return X86EMUL_CONTINUE;
2574} 2572}
@@ -2851,12 +2849,8 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2851 vcpu->arch.pio.down = 0; 2849 vcpu->arch.pio.down = 0;
2852 vcpu->arch.pio.rep = 0; 2850 vcpu->arch.pio.rep = 0;
2853 2851
2854 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 2852 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2855 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, 2853 size, 1);
2856 handler);
2857 else
2858 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2859 handler);
2860 2854
2861 val = kvm_register_read(vcpu, VCPU_REGS_RAX); 2855 val = kvm_register_read(vcpu, VCPU_REGS_RAX);
2862 memcpy(vcpu->arch.pio_data, &val, 4); 2856 memcpy(vcpu->arch.pio_data, &val, 4);
@@ -2892,12 +2886,8 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in,
2892 vcpu->arch.pio.down = down; 2886 vcpu->arch.pio.down = down;
2893 vcpu->arch.pio.rep = rep; 2887 vcpu->arch.pio.rep = rep;
2894 2888
2895 if (vcpu->run->io.direction == KVM_EXIT_IO_IN) 2889 trace_kvm_pio(vcpu->run->io.direction == KVM_EXIT_IO_OUT, port,
2896 KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, 2890 size, count);
2897 handler);
2898 else
2899 KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size,
2900 handler);
2901 2891
2902 if (!count) { 2892 if (!count) {
2903 kvm_x86_ops->skip_emulated_instruction(vcpu); 2893 kvm_x86_ops->skip_emulated_instruction(vcpu);
@@ -3075,7 +3065,6 @@ void kvm_arch_exit(void)
3075int kvm_emulate_halt(struct kvm_vcpu *vcpu) 3065int kvm_emulate_halt(struct kvm_vcpu *vcpu)
3076{ 3066{
3077 ++vcpu->stat.halt_exits; 3067 ++vcpu->stat.halt_exits;
3078 KVMTRACE_0D(HLT, vcpu, handler);
3079 if (irqchip_in_kernel(vcpu->kvm)) { 3068 if (irqchip_in_kernel(vcpu->kvm)) {
3080 vcpu->arch.mp_state = KVM_MP_STATE_HALTED; 3069 vcpu->arch.mp_state = KVM_MP_STATE_HALTED;
3081 return 1; 3070 return 1;
@@ -3106,7 +3095,7 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
3106 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX); 3095 a2 = kvm_register_read(vcpu, VCPU_REGS_RDX);
3107 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI); 3096 a3 = kvm_register_read(vcpu, VCPU_REGS_RSI);
3108 3097
3109 KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); 3098 trace_kvm_hypercall(nr, a0, a1, a2, a3);
3110 3099
3111 if (!is_long_mode(vcpu)) { 3100 if (!is_long_mode(vcpu)) {
3112 nr &= 0xFFFFFFFF; 3101 nr &= 0xFFFFFFFF;
@@ -3206,8 +3195,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3206 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr); 3195 vcpu_printf(vcpu, "%s: unexpected cr %u\n", __func__, cr);
3207 return 0; 3196 return 0;
3208 } 3197 }
3209 KVMTRACE_3D(CR_READ, vcpu, (u32)cr, (u32)value,
3210 (u32)((u64)value >> 32), handler);
3211 3198
3212 return value; 3199 return value;
3213} 3200}
@@ -3215,9 +3202,6 @@ unsigned long realmode_get_cr(struct kvm_vcpu *vcpu, int cr)
3215void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val, 3202void realmode_set_cr(struct kvm_vcpu *vcpu, int cr, unsigned long val,
3216 unsigned long *rflags) 3203 unsigned long *rflags)
3217{ 3204{
3218 KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)val,
3219 (u32)((u64)val >> 32), handler);
3220
3221 switch (cr) { 3205 switch (cr) {
3222 case 0: 3206 case 0:
3223 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val)); 3207 kvm_set_cr0(vcpu, mk_cr_64(vcpu->arch.cr0, val));
@@ -3327,11 +3311,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu)
3327 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx); 3311 kvm_register_write(vcpu, VCPU_REGS_RDX, best->edx);
3328 } 3312 }
3329 kvm_x86_ops->skip_emulated_instruction(vcpu); 3313 kvm_x86_ops->skip_emulated_instruction(vcpu);
3330 KVMTRACE_5D(CPUID, vcpu, function, 3314 trace_kvm_cpuid(function,
3331 (u32)kvm_register_read(vcpu, VCPU_REGS_RAX), 3315 kvm_register_read(vcpu, VCPU_REGS_RAX),
3332 (u32)kvm_register_read(vcpu, VCPU_REGS_RBX), 3316 kvm_register_read(vcpu, VCPU_REGS_RBX),
3333 (u32)kvm_register_read(vcpu, VCPU_REGS_RCX), 3317 kvm_register_read(vcpu, VCPU_REGS_RCX),
3334 (u32)kvm_register_read(vcpu, VCPU_REGS_RDX), handler); 3318 kvm_register_read(vcpu, VCPU_REGS_RDX));
3335} 3319}
3336EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); 3320EXPORT_SYMBOL_GPL(kvm_emulate_cpuid);
3337 3321
@@ -3527,7 +3511,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3527 set_debugreg(vcpu->arch.eff_db[3], 3); 3511 set_debugreg(vcpu->arch.eff_db[3], 3);
3528 } 3512 }
3529 3513
3530 KVMTRACE_0D(VMENTRY, vcpu, entryexit); 3514 trace_kvm_entry(vcpu->vcpu_id);
3531 kvm_x86_ops->run(vcpu, kvm_run); 3515 kvm_x86_ops->run(vcpu, kvm_run);
3532 3516
3533 if (unlikely(vcpu->arch.switch_db_regs)) { 3517 if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -4842,3 +4826,9 @@ int kvm_arch_interrupt_allowed(struct kvm_vcpu *vcpu)
4842{ 4826{
4843 return kvm_x86_ops->interrupt_allowed(vcpu); 4827 return kvm_x86_ops->interrupt_allowed(vcpu);
4844} 4828}
4829
4830EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_exit);
4831EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_inj_virq);
4832EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_page_fault);
4833EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_msr);
4834EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_cr);