diff options
author | Jan Kiszka <jan.kiszka@siemens.com> | 2008-12-15 07:52:10 -0500 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2009-03-24 05:02:49 -0400 |
commit | 42dbaa5a057736bf8b5c22aa42dbe975bf1080e5 (patch) | |
tree | a7e625373c1ff7477e8f6f3cd835f633f161689f /arch/x86/kvm/vmx.c | |
parent | 55934c0bd3bb232a9cf902820dd63ad18ed65e49 (diff) |
KVM: x86: Virtualize debug registers
So far KVM only had basic x86 debug register support, once introduced to
realize guest debugging that way. The guest itself was not able to use
those registers.
This patch now adds (almost) full support for guest self-debugging via
hardware registers. It refactors the code, moving generic parts out of
SVM (VMX was already cleaned up by the KVM_SET_GUEST_DEBUG patches), and
it ensures that the registers are properly switched between host and
guest.
This patch also prepares debug register usage by the host. The latter
will (once wired-up by the following patch) allow for hardware
breakpoints/watchpoints in guest code. If this is enabled, the guest
will only see faked debug registers without functionality, but with
content reflecting the guest's modifications.
Tested on Intel only, but SVM /should/ work as well, but who knows...
Known limitations: Trapping on tss switch won't work - most probably on
Intel.
Credits also go to Joerg Roedel - I used his once posted debugging
series as platform for this patch.
Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 114 |
1 files changed, 98 insertions, 16 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index c776868ffe41..0989776ee7b0 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -2311,7 +2311,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu) | |||
2311 | kvm_rip_write(vcpu, 0); | 2311 | kvm_rip_write(vcpu, 0); |
2312 | kvm_register_write(vcpu, VCPU_REGS_RSP, 0); | 2312 | kvm_register_write(vcpu, VCPU_REGS_RSP, 0); |
2313 | 2313 | ||
2314 | /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */ | ||
2315 | vmcs_writel(GUEST_DR7, 0x400); | 2314 | vmcs_writel(GUEST_DR7, 0x400); |
2316 | 2315 | ||
2317 | vmcs_writel(GUEST_GDTR_BASE, 0); | 2316 | vmcs_writel(GUEST_GDTR_BASE, 0); |
@@ -2577,7 +2576,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2577 | { | 2576 | { |
2578 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2577 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2579 | u32 intr_info, ex_no, error_code; | 2578 | u32 intr_info, ex_no, error_code; |
2580 | unsigned long cr2, rip; | 2579 | unsigned long cr2, rip, dr6; |
2581 | u32 vect_info; | 2580 | u32 vect_info; |
2582 | enum emulation_result er; | 2581 | enum emulation_result er; |
2583 | 2582 | ||
@@ -2637,14 +2636,28 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2637 | } | 2636 | } |
2638 | 2637 | ||
2639 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; | 2638 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; |
2640 | if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { | 2639 | switch (ex_no) { |
2640 | case DB_VECTOR: | ||
2641 | dr6 = vmcs_readl(EXIT_QUALIFICATION); | ||
2642 | if (!(vcpu->guest_debug & | ||
2643 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | ||
2644 | vcpu->arch.dr6 = dr6 | DR6_FIXED_1; | ||
2645 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
2646 | return 1; | ||
2647 | } | ||
2648 | kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1; | ||
2649 | kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7); | ||
2650 | /* fall through */ | ||
2651 | case BP_VECTOR: | ||
2641 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2652 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
2642 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; | 2653 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; |
2643 | kvm_run->debug.arch.exception = ex_no; | 2654 | kvm_run->debug.arch.exception = ex_no; |
2644 | } else { | 2655 | break; |
2656 | default: | ||
2645 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | 2657 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; |
2646 | kvm_run->ex.exception = ex_no; | 2658 | kvm_run->ex.exception = ex_no; |
2647 | kvm_run->ex.error_code = error_code; | 2659 | kvm_run->ex.error_code = error_code; |
2660 | break; | ||
2648 | } | 2661 | } |
2649 | return 0; | 2662 | return 0; |
2650 | } | 2663 | } |
@@ -2784,21 +2797,44 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2784 | unsigned long val; | 2797 | unsigned long val; |
2785 | int dr, reg; | 2798 | int dr, reg; |
2786 | 2799 | ||
2787 | /* | 2800 | dr = vmcs_readl(GUEST_DR7); |
2788 | * FIXME: this code assumes the host is debugging the guest. | 2801 | if (dr & DR7_GD) { |
2789 | * need to deal with guest debugging itself too. | 2802 | /* |
2790 | */ | 2803 | * As the vm-exit takes precedence over the debug trap, we |
2804 | * need to emulate the latter, either for the host or the | ||
2805 | * guest debugging itself. | ||
2806 | */ | ||
2807 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { | ||
2808 | kvm_run->debug.arch.dr6 = vcpu->arch.dr6; | ||
2809 | kvm_run->debug.arch.dr7 = dr; | ||
2810 | kvm_run->debug.arch.pc = | ||
2811 | vmcs_readl(GUEST_CS_BASE) + | ||
2812 | vmcs_readl(GUEST_RIP); | ||
2813 | kvm_run->debug.arch.exception = DB_VECTOR; | ||
2814 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
2815 | return 0; | ||
2816 | } else { | ||
2817 | vcpu->arch.dr7 &= ~DR7_GD; | ||
2818 | vcpu->arch.dr6 |= DR6_BD; | ||
2819 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | ||
2820 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
2821 | return 1; | ||
2822 | } | ||
2823 | } | ||
2824 | |||
2791 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); | 2825 | exit_qualification = vmcs_readl(EXIT_QUALIFICATION); |
2792 | dr = exit_qualification & 7; | 2826 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; |
2793 | reg = (exit_qualification >> 8) & 15; | 2827 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); |
2794 | if (exit_qualification & 16) { | 2828 | if (exit_qualification & TYPE_MOV_FROM_DR) { |
2795 | /* mov from dr */ | ||
2796 | switch (dr) { | 2829 | switch (dr) { |
2830 | case 0 ... 3: | ||
2831 | val = vcpu->arch.db[dr]; | ||
2832 | break; | ||
2797 | case 6: | 2833 | case 6: |
2798 | val = 0xffff0ff0; | 2834 | val = vcpu->arch.dr6; |
2799 | break; | 2835 | break; |
2800 | case 7: | 2836 | case 7: |
2801 | val = 0x400; | 2837 | val = vcpu->arch.dr7; |
2802 | break; | 2838 | break; |
2803 | default: | 2839 | default: |
2804 | val = 0; | 2840 | val = 0; |
@@ -2806,7 +2842,38 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2806 | kvm_register_write(vcpu, reg, val); | 2842 | kvm_register_write(vcpu, reg, val); |
2807 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); | 2843 | KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); |
2808 | } else { | 2844 | } else { |
2809 | /* mov to dr */ | 2845 | val = vcpu->arch.regs[reg]; |
2846 | switch (dr) { | ||
2847 | case 0 ... 3: | ||
2848 | vcpu->arch.db[dr] = val; | ||
2849 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
2850 | vcpu->arch.eff_db[dr] = val; | ||
2851 | break; | ||
2852 | case 4 ... 5: | ||
2853 | if (vcpu->arch.cr4 & X86_CR4_DE) | ||
2854 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
2855 | break; | ||
2856 | case 6: | ||
2857 | if (val & 0xffffffff00000000ULL) { | ||
2858 | kvm_queue_exception(vcpu, GP_VECTOR); | ||
2859 | break; | ||
2860 | } | ||
2861 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | ||
2862 | break; | ||
2863 | case 7: | ||
2864 | if (val & 0xffffffff00000000ULL) { | ||
2865 | kvm_queue_exception(vcpu, GP_VECTOR); | ||
2866 | break; | ||
2867 | } | ||
2868 | vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; | ||
2869 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
2870 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | ||
2871 | vcpu->arch.switch_db_regs = | ||
2872 | (val & DR7_BP_EN_MASK); | ||
2873 | } | ||
2874 | break; | ||
2875 | } | ||
2876 | KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler); | ||
2810 | } | 2877 | } |
2811 | skip_emulated_instruction(vcpu); | 2878 | skip_emulated_instruction(vcpu); |
2812 | return 1; | 2879 | return 1; |
@@ -2957,7 +3024,18 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2957 | } | 3024 | } |
2958 | tss_selector = exit_qualification; | 3025 | tss_selector = exit_qualification; |
2959 | 3026 | ||
2960 | return kvm_task_switch(vcpu, tss_selector, reason); | 3027 | if (!kvm_task_switch(vcpu, tss_selector, reason)) |
3028 | return 0; | ||
3029 | |||
3030 | /* clear all local breakpoint enable flags */ | ||
3031 | vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55); | ||
3032 | |||
3033 | /* | ||
3034 | * TODO: What about debug traps on tss switch? | ||
3035 | * Are we supposed to inject them and update dr6? | ||
3036 | */ | ||
3037 | |||
3038 | return 1; | ||
2961 | } | 3039 | } |
2962 | 3040 | ||
2963 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 3041 | static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
@@ -3342,6 +3420,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3342 | */ | 3420 | */ |
3343 | vmcs_writel(HOST_CR0, read_cr0()); | 3421 | vmcs_writel(HOST_CR0, read_cr0()); |
3344 | 3422 | ||
3423 | set_debugreg(vcpu->arch.dr6, 6); | ||
3424 | |||
3345 | asm( | 3425 | asm( |
3346 | /* Store host registers */ | 3426 | /* Store host registers */ |
3347 | "push %%"R"dx; push %%"R"bp;" | 3427 | "push %%"R"dx; push %%"R"bp;" |
@@ -3436,6 +3516,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3436 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); | 3516 | vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); |
3437 | vcpu->arch.regs_dirty = 0; | 3517 | vcpu->arch.regs_dirty = 0; |
3438 | 3518 | ||
3519 | get_debugreg(vcpu->arch.dr6, 6); | ||
3520 | |||
3439 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); | 3521 | vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); |
3440 | if (vmx->rmode.irq.pending) | 3522 | if (vmx->rmode.irq.pending) |
3441 | fixup_rmode_irq(vmx); | 3523 | fixup_rmode_irq(vmx); |