aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2008-12-15 07:52:10 -0500
committerAvi Kivity <avi@redhat.com>2009-03-24 05:02:49 -0400
commitd0bfb940ecabf0b44fb1fd80d8d60594e569e5ec (patch)
treeb5927d44937a54ec23d2a28d59db06a0262c0412 /arch/x86/kvm/vmx.c
parent8ab2d2e231062814bd89bba2d6d92563190aa2bb (diff)
KVM: New guest debug interface
This rips out the support for KVM_DEBUG_GUEST and introduces a new IOCTL instead: KVM_SET_GUEST_DEBUG. The IOCTL payload consists of a generic part, controlling the "main switch" and the single-step feature. The arch specific part adds an x86 interface for intercepting both types of debug exceptions separately and re-injecting them when the host was not interested. Moveover, the foundation for guest debugging via debug registers is layed. To signal breakpoint events properly back to userland, an arch-specific data block is now returned along KVM_EXIT_DEBUG. For x86, the arch block contains the PC, the debug exception, and relevant debug registers to tell debug events properly apart. The availability of this new interface is signaled by KVM_CAP_SET_GUEST_DEBUG. Empty stubs for not yet supported archs are provided. Note that both SVM and VTX are supported, but only the latter was tested yet. Based on the experience with all those VTX corner case, I would be fairly surprised if SVM will work out of the box. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c93
1 files changed, 36 insertions, 57 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 1d974c1eaa7d..f55690ddb3ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -480,8 +480,13 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu)
480 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); 480 eb = (1u << PF_VECTOR) | (1u << UD_VECTOR);
481 if (!vcpu->fpu_active) 481 if (!vcpu->fpu_active)
482 eb |= 1u << NM_VECTOR; 482 eb |= 1u << NM_VECTOR;
483 if (vcpu->guest_debug.enabled) 483 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
484 eb |= 1u << DB_VECTOR; 484 if (vcpu->guest_debug &
485 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
486 eb |= 1u << DB_VECTOR;
487 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
488 eb |= 1u << BP_VECTOR;
489 }
485 if (vcpu->arch.rmode.active) 490 if (vcpu->arch.rmode.active)
486 eb = ~0; 491 eb = ~0;
487 if (vm_need_ept()) 492 if (vm_need_ept())
@@ -1003,40 +1008,23 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
1003 } 1008 }
1004} 1009}
1005 1010
1006static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 1011static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1007{ 1012{
1008 unsigned long dr7 = 0x400; 1013 int old_debug = vcpu->guest_debug;
1009 int old_singlestep; 1014 unsigned long flags;
1010
1011 old_singlestep = vcpu->guest_debug.singlestep;
1012
1013 vcpu->guest_debug.enabled = dbg->enabled;
1014 if (vcpu->guest_debug.enabled) {
1015 int i;
1016
1017 dr7 |= 0x200; /* exact */
1018 for (i = 0; i < 4; ++i) {
1019 if (!dbg->breakpoints[i].enabled)
1020 continue;
1021 vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address;
1022 dr7 |= 2 << (i*2); /* global enable */
1023 dr7 |= 0 << (i*4+16); /* execution breakpoint */
1024 }
1025
1026 vcpu->guest_debug.singlestep = dbg->singlestep;
1027 } else
1028 vcpu->guest_debug.singlestep = 0;
1029 1015
1030 if (old_singlestep && !vcpu->guest_debug.singlestep) { 1016 vcpu->guest_debug = dbg->control;
1031 unsigned long flags; 1017 if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE))
1018 vcpu->guest_debug = 0;
1032 1019
1033 flags = vmcs_readl(GUEST_RFLAGS); 1020 flags = vmcs_readl(GUEST_RFLAGS);
1021 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1022 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
1023 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
1034 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); 1024 flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1035 vmcs_writel(GUEST_RFLAGS, flags); 1025 vmcs_writel(GUEST_RFLAGS, flags);
1036 }
1037 1026
1038 update_exception_bitmap(vcpu); 1027 update_exception_bitmap(vcpu);
1039 vmcs_writel(GUEST_DR7, dr7);
1040 1028
1041 return 0; 1029 return 0;
1042} 1030}
@@ -2540,24 +2528,6 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr)
2540 return 0; 2528 return 0;
2541} 2529}
2542 2530
2543static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu)
2544{
2545 struct kvm_guest_debug *dbg = &vcpu->guest_debug;
2546
2547 set_debugreg(dbg->bp[0], 0);
2548 set_debugreg(dbg->bp[1], 1);
2549 set_debugreg(dbg->bp[2], 2);
2550 set_debugreg(dbg->bp[3], 3);
2551
2552 if (dbg->singlestep) {
2553 unsigned long flags;
2554
2555 flags = vmcs_readl(GUEST_RFLAGS);
2556 flags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
2557 vmcs_writel(GUEST_RFLAGS, flags);
2558 }
2559}
2560
2561static int handle_rmode_exception(struct kvm_vcpu *vcpu, 2531static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2562 int vec, u32 err_code) 2532 int vec, u32 err_code)
2563{ 2533{
@@ -2574,9 +2544,17 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2574 * the required debugging infrastructure rework. 2544 * the required debugging infrastructure rework.
2575 */ 2545 */
2576 switch (vec) { 2546 switch (vec) {
2577 case DE_VECTOR:
2578 case DB_VECTOR: 2547 case DB_VECTOR:
2548 if (vcpu->guest_debug &
2549 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
2550 return 0;
2551 kvm_queue_exception(vcpu, vec);
2552 return 1;
2579 case BP_VECTOR: 2553 case BP_VECTOR:
2554 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
2555 return 0;
2556 /* fall through */
2557 case DE_VECTOR:
2580 case OF_VECTOR: 2558 case OF_VECTOR:
2581 case BR_VECTOR: 2559 case BR_VECTOR:
2582 case UD_VECTOR: 2560 case UD_VECTOR:
@@ -2593,7 +2571,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu,
2593static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 2571static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2594{ 2572{
2595 struct vcpu_vmx *vmx = to_vmx(vcpu); 2573 struct vcpu_vmx *vmx = to_vmx(vcpu);
2596 u32 intr_info, error_code; 2574 u32 intr_info, ex_no, error_code;
2597 unsigned long cr2, rip; 2575 unsigned long cr2, rip;
2598 u32 vect_info; 2576 u32 vect_info;
2599 enum emulation_result er; 2577 enum emulation_result er;
@@ -2653,14 +2631,16 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2653 return 1; 2631 return 1;
2654 } 2632 }
2655 2633
2656 if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == 2634 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2657 (INTR_TYPE_HARD_EXCEPTION | 1)) { 2635 if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) {
2658 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2636 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2659 return 0; 2637 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2638 kvm_run->debug.arch.exception = ex_no;
2639 } else {
2640 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2641 kvm_run->ex.exception = ex_no;
2642 kvm_run->ex.error_code = error_code;
2660 } 2643 }
2661 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2662 kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK;
2663 kvm_run->ex.error_code = error_code;
2664 return 0; 2644 return 0;
2665} 2645}
2666 2646
@@ -3600,7 +3580,6 @@ static struct kvm_x86_ops vmx_x86_ops = {
3600 .vcpu_put = vmx_vcpu_put, 3580 .vcpu_put = vmx_vcpu_put,
3601 3581
3602 .set_guest_debug = set_guest_debug, 3582 .set_guest_debug = set_guest_debug,
3603 .guest_debug_pre = kvm_guest_debug_pre,
3604 .get_msr = vmx_get_msr, 3583 .get_msr = vmx_get_msr,
3605 .set_msr = vmx_set_msr, 3584 .set_msr = vmx_set_msr,
3606 .get_segment_base = vmx_get_segment_base, 3585 .get_segment_base = vmx_get_segment_base,