diff options
author | Feng (Eric) Liu <eric.e.liu@intel.com> | 2008-04-10 15:31:10 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 05:01:19 -0400 |
commit | 2714d1d3d6be882b97cd0125140fccf9976a460a (patch) | |
tree | 57b654cafff076ae95b62b7763113b1ef8511eb5 /arch/x86/kvm | |
parent | 53371b5098543ab09dcb0c7ce31da887dbe58c62 (diff) |
KVM: Add trace markers
Trace markers allow userspace to trace execution of a virtual machine
in order to monitor its performance.
Signed-off-by: Feng (Eric) Liu <eric.e.liu@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r-- | arch/x86/kvm/vmx.c | 35 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 26 |
2 files changed, 60 insertions, 1 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 6249810b2155..8e5d6645b90d 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -1843,6 +1843,8 @@ static void vmx_inject_irq(struct kvm_vcpu *vcpu, int irq) | |||
1843 | { | 1843 | { |
1844 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1844 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1845 | 1845 | ||
1846 | KVMTRACE_1D(INJ_VIRQ, vcpu, (u32)irq, handler); | ||
1847 | |||
1846 | if (vcpu->arch.rmode.active) { | 1848 | if (vcpu->arch.rmode.active) { |
1847 | vmx->rmode.irq.pending = true; | 1849 | vmx->rmode.irq.pending = true; |
1848 | vmx->rmode.irq.vector = irq; | 1850 | vmx->rmode.irq.vector = irq; |
@@ -1993,6 +1995,8 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
1993 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); | 1995 | error_code = vmcs_read32(VM_EXIT_INTR_ERROR_CODE); |
1994 | if (is_page_fault(intr_info)) { | 1996 | if (is_page_fault(intr_info)) { |
1995 | cr2 = vmcs_readl(EXIT_QUALIFICATION); | 1997 | cr2 = vmcs_readl(EXIT_QUALIFICATION); |
1998 | KVMTRACE_3D(PAGE_FAULT, vcpu, error_code, (u32)cr2, | ||
1999 | (u32)((u64)cr2 >> 32), handler); | ||
1996 | return kvm_mmu_page_fault(vcpu, cr2, error_code); | 2000 | return kvm_mmu_page_fault(vcpu, cr2, error_code); |
1997 | } | 2001 | } |
1998 | 2002 | ||
@@ -2021,6 +2025,7 @@ static int handle_external_interrupt(struct kvm_vcpu *vcpu, | |||
2021 | struct kvm_run *kvm_run) | 2025 | struct kvm_run *kvm_run) |
2022 | { | 2026 | { |
2023 | ++vcpu->stat.irq_exits; | 2027 | ++vcpu->stat.irq_exits; |
2028 | KVMTRACE_1D(INTR, vcpu, vmcs_read32(VM_EXIT_INTR_INFO), handler); | ||
2024 | return 1; | 2029 | return 1; |
2025 | } | 2030 | } |
2026 | 2031 | ||
@@ -2078,6 +2083,8 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2078 | reg = (exit_qualification >> 8) & 15; | 2083 | reg = (exit_qualification >> 8) & 15; |
2079 | switch ((exit_qualification >> 4) & 3) { | 2084 | switch ((exit_qualification >> 4) & 3) { |
2080 | case 0: /* mov to cr */ | 2085 | case 0: /* mov to cr */ |
2086 | KVMTRACE_3D(CR_WRITE, vcpu, (u32)cr, (u32)vcpu->arch.regs[reg], | ||
2087 | (u32)((u64)vcpu->arch.regs[reg] >> 32), handler); | ||
2081 | switch (cr) { | 2088 | switch (cr) { |
2082 | case 0: | 2089 | case 0: |
2083 | vcpu_load_rsp_rip(vcpu); | 2090 | vcpu_load_rsp_rip(vcpu); |
@@ -2110,6 +2117,7 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2110 | vcpu->arch.cr0 &= ~X86_CR0_TS; | 2117 | vcpu->arch.cr0 &= ~X86_CR0_TS; |
2111 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); | 2118 | vmcs_writel(CR0_READ_SHADOW, vcpu->arch.cr0); |
2112 | vmx_fpu_activate(vcpu); | 2119 | vmx_fpu_activate(vcpu); |
2120 | KVMTRACE_0D(CLTS, vcpu, handler); | ||
2113 | skip_emulated_instruction(vcpu); | 2121 | skip_emulated_instruction(vcpu); |
2114 | return 1; | 2122 | return 1; |
2115 | case 1: /*mov from cr*/ | 2123 | case 1: /*mov from cr*/ |
@@ -2118,12 +2126,18 @@ static int handle_cr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2118 | vcpu_load_rsp_rip(vcpu); | 2126 | vcpu_load_rsp_rip(vcpu); |
2119 | vcpu->arch.regs[reg] = vcpu->arch.cr3; | 2127 | vcpu->arch.regs[reg] = vcpu->arch.cr3; |
2120 | vcpu_put_rsp_rip(vcpu); | 2128 | vcpu_put_rsp_rip(vcpu); |
2129 | KVMTRACE_3D(CR_READ, vcpu, (u32)cr, | ||
2130 | (u32)vcpu->arch.regs[reg], | ||
2131 | (u32)((u64)vcpu->arch.regs[reg] >> 32), | ||
2132 | handler); | ||
2121 | skip_emulated_instruction(vcpu); | 2133 | skip_emulated_instruction(vcpu); |
2122 | return 1; | 2134 | return 1; |
2123 | case 8: | 2135 | case 8: |
2124 | vcpu_load_rsp_rip(vcpu); | 2136 | vcpu_load_rsp_rip(vcpu); |
2125 | vcpu->arch.regs[reg] = kvm_get_cr8(vcpu); | 2137 | vcpu->arch.regs[reg] = kvm_get_cr8(vcpu); |
2126 | vcpu_put_rsp_rip(vcpu); | 2138 | vcpu_put_rsp_rip(vcpu); |
2139 | KVMTRACE_2D(CR_READ, vcpu, (u32)cr, | ||
2140 | (u32)vcpu->arch.regs[reg], handler); | ||
2127 | skip_emulated_instruction(vcpu); | 2141 | skip_emulated_instruction(vcpu); |
2128 | return 1; | 2142 | return 1; |
2129 | } | 2143 | } |
@@ -2169,6 +2183,7 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2169 | val = 0; | 2183 | val = 0; |
2170 | } | 2184 | } |
2171 | vcpu->arch.regs[reg] = val; | 2185 | vcpu->arch.regs[reg] = val; |
2186 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | ||
2172 | } else { | 2187 | } else { |
2173 | /* mov to dr */ | 2188 | /* mov to dr */ |
2174 | } | 2189 | } |
@@ -2193,6 +2208,9 @@ static int handle_rdmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2193 | return 1; | 2208 | return 1; |
2194 | } | 2209 | } |
2195 | 2210 | ||
2211 | KVMTRACE_3D(MSR_READ, vcpu, ecx, (u32)data, (u32)(data >> 32), | ||
2212 | handler); | ||
2213 | |||
2196 | /* FIXME: handling of bits 32:63 of rax, rdx */ | 2214 | /* FIXME: handling of bits 32:63 of rax, rdx */ |
2197 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; | 2215 | vcpu->arch.regs[VCPU_REGS_RAX] = data & -1u; |
2198 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; | 2216 | vcpu->arch.regs[VCPU_REGS_RDX] = (data >> 32) & -1u; |
@@ -2206,6 +2224,9 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2206 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) | 2224 | u64 data = (vcpu->arch.regs[VCPU_REGS_RAX] & -1u) |
2207 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); | 2225 | | ((u64)(vcpu->arch.regs[VCPU_REGS_RDX] & -1u) << 32); |
2208 | 2226 | ||
2227 | KVMTRACE_3D(MSR_WRITE, vcpu, ecx, (u32)data, (u32)(data >> 32), | ||
2228 | handler); | ||
2229 | |||
2209 | if (vmx_set_msr(vcpu, ecx, data) != 0) { | 2230 | if (vmx_set_msr(vcpu, ecx, data) != 0) { |
2210 | kvm_inject_gp(vcpu, 0); | 2231 | kvm_inject_gp(vcpu, 0); |
2211 | return 1; | 2232 | return 1; |
@@ -2230,6 +2251,9 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu, | |||
2230 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); | 2251 | cpu_based_vm_exec_control = vmcs_read32(CPU_BASED_VM_EXEC_CONTROL); |
2231 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | 2252 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
2232 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 2253 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
2254 | |||
2255 | KVMTRACE_0D(PEND_INTR, vcpu, handler); | ||
2256 | |||
2233 | /* | 2257 | /* |
2234 | * If the user space waits to inject interrupts, exit as soon as | 2258 | * If the user space waits to inject interrupts, exit as soon as |
2235 | * possible | 2259 | * possible |
@@ -2272,6 +2296,8 @@ static int handle_apic_access(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2272 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); | 2296 | exit_qualification = vmcs_read64(EXIT_QUALIFICATION); |
2273 | offset = exit_qualification & 0xffful; | 2297 | offset = exit_qualification & 0xffful; |
2274 | 2298 | ||
2299 | KVMTRACE_1D(APIC_ACCESS, vcpu, (u32)offset, handler); | ||
2300 | |||
2275 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); | 2301 | er = emulate_instruction(vcpu, kvm_run, 0, 0, 0); |
2276 | 2302 | ||
2277 | if (er != EMULATE_DONE) { | 2303 | if (er != EMULATE_DONE) { |
@@ -2335,6 +2361,9 @@ static int kvm_handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) | |||
2335 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2361 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2336 | u32 vectoring_info = vmx->idt_vectoring_info; | 2362 | u32 vectoring_info = vmx->idt_vectoring_info; |
2337 | 2363 | ||
2364 | KVMTRACE_3D(VMEXIT, vcpu, exit_reason, (u32)vmcs_readl(GUEST_RIP), | ||
2365 | (u32)((u64)vmcs_readl(GUEST_RIP) >> 32), entryexit); | ||
2366 | |||
2338 | if (unlikely(vmx->fail)) { | 2367 | if (unlikely(vmx->fail)) { |
2339 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; | 2368 | kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
2340 | kvm_run->fail_entry.hardware_entry_failure_reason | 2369 | kvm_run->fail_entry.hardware_entry_failure_reason |
@@ -2416,6 +2445,8 @@ static void vmx_intr_assist(struct kvm_vcpu *vcpu) | |||
2416 | return; | 2445 | return; |
2417 | } | 2446 | } |
2418 | 2447 | ||
2448 | KVMTRACE_1D(REDELIVER_EVT, vcpu, idtv_info_field, handler); | ||
2449 | |||
2419 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); | 2450 | vmcs_write32(VM_ENTRY_INTR_INFO_FIELD, idtv_info_field); |
2420 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, | 2451 | vmcs_write32(VM_ENTRY_INSTRUCTION_LEN, |
2421 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); | 2452 | vmcs_read32(VM_EXIT_INSTRUCTION_LEN)); |
@@ -2601,8 +2632,10 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2601 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); | 2632 | intr_info = vmcs_read32(VM_EXIT_INTR_INFO); |
2602 | 2633 | ||
2603 | /* We need to handle NMIs before interrupts are enabled */ | 2634 | /* We need to handle NMIs before interrupts are enabled */ |
2604 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) /* nmi */ | 2635 | if ((intr_info & INTR_INFO_INTR_TYPE_MASK) == 0x200) { /* nmi */ |
2636 | KVMTRACE_0D(NMI, vcpu, handler); | ||
2605 | asm("int $2"); | 2637 | asm("int $2"); |
2638 | } | ||
2606 | } | 2639 | } |
2607 | 2640 | ||
2608 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) | 2641 | static void vmx_free_vmcs(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index c7ad2352227a..f070f0a9adee 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -303,6 +303,9 @@ EXPORT_SYMBOL_GPL(kvm_set_cr0); | |||
303 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) | 303 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw) |
304 | { | 304 | { |
305 | kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); | 305 | kvm_set_cr0(vcpu, (vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)); |
306 | KVMTRACE_1D(LMSW, vcpu, | ||
307 | (u32)((vcpu->arch.cr0 & ~0x0ful) | (msw & 0x0f)), | ||
308 | handler); | ||
306 | } | 309 | } |
307 | EXPORT_SYMBOL_GPL(kvm_lmsw); | 310 | EXPORT_SYMBOL_GPL(kvm_lmsw); |
308 | 311 | ||
@@ -2269,6 +2272,13 @@ int kvm_emulate_pio(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2269 | vcpu->arch.pio.guest_page_offset = 0; | 2272 | vcpu->arch.pio.guest_page_offset = 0; |
2270 | vcpu->arch.pio.rep = 0; | 2273 | vcpu->arch.pio.rep = 0; |
2271 | 2274 | ||
2275 | if (vcpu->run->io.direction == KVM_EXIT_IO_IN) | ||
2276 | KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, | ||
2277 | handler); | ||
2278 | else | ||
2279 | KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, | ||
2280 | handler); | ||
2281 | |||
2272 | kvm_x86_ops->cache_regs(vcpu); | 2282 | kvm_x86_ops->cache_regs(vcpu); |
2273 | memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); | 2283 | memcpy(vcpu->arch.pio_data, &vcpu->arch.regs[VCPU_REGS_RAX], 4); |
2274 | kvm_x86_ops->decache_regs(vcpu); | 2284 | kvm_x86_ops->decache_regs(vcpu); |
@@ -2307,6 +2317,13 @@ int kvm_emulate_pio_string(struct kvm_vcpu *vcpu, struct kvm_run *run, int in, | |||
2307 | vcpu->arch.pio.guest_page_offset = offset_in_page(address); | 2317 | vcpu->arch.pio.guest_page_offset = offset_in_page(address); |
2308 | vcpu->arch.pio.rep = rep; | 2318 | vcpu->arch.pio.rep = rep; |
2309 | 2319 | ||
2320 | if (vcpu->run->io.direction == KVM_EXIT_IO_IN) | ||
2321 | KVMTRACE_2D(IO_READ, vcpu, vcpu->run->io.port, (u32)size, | ||
2322 | handler); | ||
2323 | else | ||
2324 | KVMTRACE_2D(IO_WRITE, vcpu, vcpu->run->io.port, (u32)size, | ||
2325 | handler); | ||
2326 | |||
2310 | if (!count) { | 2327 | if (!count) { |
2311 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2328 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
2312 | return 1; | 2329 | return 1; |
@@ -2414,6 +2431,7 @@ void kvm_arch_exit(void) | |||
2414 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) | 2431 | int kvm_emulate_halt(struct kvm_vcpu *vcpu) |
2415 | { | 2432 | { |
2416 | ++vcpu->stat.halt_exits; | 2433 | ++vcpu->stat.halt_exits; |
2434 | KVMTRACE_0D(HLT, vcpu, handler); | ||
2417 | if (irqchip_in_kernel(vcpu->kvm)) { | 2435 | if (irqchip_in_kernel(vcpu->kvm)) { |
2418 | vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; | 2436 | vcpu->arch.mp_state = VCPU_MP_STATE_HALTED; |
2419 | up_read(&vcpu->kvm->slots_lock); | 2437 | up_read(&vcpu->kvm->slots_lock); |
@@ -2451,6 +2469,8 @@ int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) | |||
2451 | a2 = vcpu->arch.regs[VCPU_REGS_RDX]; | 2469 | a2 = vcpu->arch.regs[VCPU_REGS_RDX]; |
2452 | a3 = vcpu->arch.regs[VCPU_REGS_RSI]; | 2470 | a3 = vcpu->arch.regs[VCPU_REGS_RSI]; |
2453 | 2471 | ||
2472 | KVMTRACE_1D(VMMCALL, vcpu, (u32)nr, handler); | ||
2473 | |||
2454 | if (!is_long_mode(vcpu)) { | 2474 | if (!is_long_mode(vcpu)) { |
2455 | nr &= 0xFFFFFFFF; | 2475 | nr &= 0xFFFFFFFF; |
2456 | a0 &= 0xFFFFFFFF; | 2476 | a0 &= 0xFFFFFFFF; |
@@ -2639,6 +2659,11 @@ void kvm_emulate_cpuid(struct kvm_vcpu *vcpu) | |||
2639 | } | 2659 | } |
2640 | kvm_x86_ops->decache_regs(vcpu); | 2660 | kvm_x86_ops->decache_regs(vcpu); |
2641 | kvm_x86_ops->skip_emulated_instruction(vcpu); | 2661 | kvm_x86_ops->skip_emulated_instruction(vcpu); |
2662 | KVMTRACE_5D(CPUID, vcpu, function, | ||
2663 | (u32)vcpu->arch.regs[VCPU_REGS_RAX], | ||
2664 | (u32)vcpu->arch.regs[VCPU_REGS_RBX], | ||
2665 | (u32)vcpu->arch.regs[VCPU_REGS_RCX], | ||
2666 | (u32)vcpu->arch.regs[VCPU_REGS_RDX], handler); | ||
2642 | } | 2667 | } |
2643 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); | 2668 | EXPORT_SYMBOL_GPL(kvm_emulate_cpuid); |
2644 | 2669 | ||
@@ -2794,6 +2819,7 @@ again: | |||
2794 | if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) | 2819 | if (test_and_clear_bit(KVM_REQ_TLB_FLUSH, &vcpu->requests)) |
2795 | kvm_x86_ops->tlb_flush(vcpu); | 2820 | kvm_x86_ops->tlb_flush(vcpu); |
2796 | 2821 | ||
2822 | KVMTRACE_0D(VMENTRY, vcpu, entryexit); | ||
2797 | kvm_x86_ops->run(vcpu, kvm_run); | 2823 | kvm_x86_ops->run(vcpu, kvm_run); |
2798 | 2824 | ||
2799 | vcpu->guest_mode = 0; | 2825 | vcpu->guest_mode = 0; |