diff options
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/include/asm/kvm.h | 18 | ||||
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 9 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 50 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 93 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 14 |
5 files changed, 111 insertions, 73 deletions
diff --git a/arch/x86/include/asm/kvm.h b/arch/x86/include/asm/kvm.h index 886c9402ec45..32eb96c7ca27 100644 --- a/arch/x86/include/asm/kvm.h +++ b/arch/x86/include/asm/kvm.h | |||
@@ -212,6 +212,24 @@ struct kvm_pit_channel_state { | |||
212 | __s64 count_load_time; | 212 | __s64 count_load_time; |
213 | }; | 213 | }; |
214 | 214 | ||
215 | struct kvm_debug_exit_arch { | ||
216 | __u32 exception; | ||
217 | __u32 pad; | ||
218 | __u64 pc; | ||
219 | __u64 dr6; | ||
220 | __u64 dr7; | ||
221 | }; | ||
222 | |||
223 | #define KVM_GUESTDBG_USE_SW_BP 0x00010000 | ||
224 | #define KVM_GUESTDBG_USE_HW_BP 0x00020000 | ||
225 | #define KVM_GUESTDBG_INJECT_DB 0x00040000 | ||
226 | #define KVM_GUESTDBG_INJECT_BP 0x00080000 | ||
227 | |||
228 | /* for KVM_SET_GUEST_DEBUG */ | ||
229 | struct kvm_guest_debug_arch { | ||
230 | __u64 debugreg[8]; | ||
231 | }; | ||
232 | |||
215 | struct kvm_pit_state { | 233 | struct kvm_pit_state { |
216 | struct kvm_pit_channel_state channels[3]; | 234 | struct kvm_pit_channel_state channels[3]; |
217 | }; | 235 | }; |
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 53779309514a..c430cd580ee2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -135,12 +135,6 @@ enum { | |||
135 | 135 | ||
136 | #define KVM_NR_MEM_OBJS 40 | 136 | #define KVM_NR_MEM_OBJS 40 |
137 | 137 | ||
138 | struct kvm_guest_debug { | ||
139 | int enabled; | ||
140 | unsigned long bp[4]; | ||
141 | int singlestep; | ||
142 | }; | ||
143 | |||
144 | /* | 138 | /* |
145 | * We don't want allocation failures within the mmu code, so we preallocate | 139 | * We don't want allocation failures within the mmu code, so we preallocate |
146 | * enough memory for a single page fault in a cache. | 140 | * enough memory for a single page fault in a cache. |
@@ -448,8 +442,7 @@ struct kvm_x86_ops { | |||
448 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | 442 | void (*vcpu_put)(struct kvm_vcpu *vcpu); |
449 | 443 | ||
450 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, | 444 | int (*set_guest_debug)(struct kvm_vcpu *vcpu, |
451 | struct kvm_debug_guest *dbg); | 445 | struct kvm_guest_debug *dbg); |
452 | void (*guest_debug_pre)(struct kvm_vcpu *vcpu); | ||
453 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); | 446 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); |
454 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); | 447 | int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); |
455 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); | 448 | u64 (*get_segment_base)(struct kvm_vcpu *vcpu, int seg); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0fbbde54ecae..88d9062f4545 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -968,9 +968,32 @@ static void svm_set_segment(struct kvm_vcpu *vcpu, | |||
968 | 968 | ||
969 | } | 969 | } |
970 | 970 | ||
971 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | 971 | static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) |
972 | { | 972 | { |
973 | return -EOPNOTSUPP; | 973 | int old_debug = vcpu->guest_debug; |
974 | struct vcpu_svm *svm = to_svm(vcpu); | ||
975 | |||
976 | vcpu->guest_debug = dbg->control; | ||
977 | |||
978 | svm->vmcb->control.intercept_exceptions &= | ||
979 | ~((1 << DB_VECTOR) | (1 << BP_VECTOR)); | ||
980 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { | ||
981 | if (vcpu->guest_debug & | ||
982 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | ||
983 | svm->vmcb->control.intercept_exceptions |= | ||
984 | 1 << DB_VECTOR; | ||
985 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | ||
986 | svm->vmcb->control.intercept_exceptions |= | ||
987 | 1 << BP_VECTOR; | ||
988 | } else | ||
989 | vcpu->guest_debug = 0; | ||
990 | |||
991 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
992 | svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
993 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | ||
994 | svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | ||
995 | |||
996 | return 0; | ||
974 | } | 997 | } |
975 | 998 | ||
976 | static int svm_get_irq(struct kvm_vcpu *vcpu) | 999 | static int svm_get_irq(struct kvm_vcpu *vcpu) |
@@ -1094,6 +1117,27 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | |||
1094 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); | 1117 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); |
1095 | } | 1118 | } |
1096 | 1119 | ||
1120 | static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1121 | { | ||
1122 | if (!(svm->vcpu.guest_debug & | ||
1123 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) { | ||
1124 | kvm_queue_exception(&svm->vcpu, DB_VECTOR); | ||
1125 | return 1; | ||
1126 | } | ||
1127 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
1128 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | ||
1129 | kvm_run->debug.arch.exception = DB_VECTOR; | ||
1130 | return 0; | ||
1131 | } | ||
1132 | |||
1133 | static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | ||
1134 | { | ||
1135 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | ||
1136 | kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip; | ||
1137 | kvm_run->debug.arch.exception = BP_VECTOR; | ||
1138 | return 0; | ||
1139 | } | ||
1140 | |||
1097 | static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) | 1141 | static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) |
1098 | { | 1142 | { |
1099 | int er; | 1143 | int er; |
@@ -2050,6 +2094,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm, | |||
2050 | [SVM_EXIT_WRITE_DR3] = emulate_on_interception, | 2094 | [SVM_EXIT_WRITE_DR3] = emulate_on_interception, |
2051 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, | 2095 | [SVM_EXIT_WRITE_DR5] = emulate_on_interception, |
2052 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, | 2096 | [SVM_EXIT_WRITE_DR7] = emulate_on_interception, |
2097 | [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, | ||
2098 | [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, | ||
2053 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, | 2099 | [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, |
2054 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, | 2100 | [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, |
2055 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, | 2101 | [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1d974c1eaa7d..f55690ddb3ac 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -480,8 +480,13 @@ static void update_exception_bitmap(struct kvm_vcpu *vcpu) | |||
480 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); | 480 | eb = (1u << PF_VECTOR) | (1u << UD_VECTOR); |
481 | if (!vcpu->fpu_active) | 481 | if (!vcpu->fpu_active) |
482 | eb |= 1u << NM_VECTOR; | 482 | eb |= 1u << NM_VECTOR; |
483 | if (vcpu->guest_debug.enabled) | 483 | if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) { |
484 | eb |= 1u << DB_VECTOR; | 484 | if (vcpu->guest_debug & |
485 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | ||
486 | eb |= 1u << DB_VECTOR; | ||
487 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | ||
488 | eb |= 1u << BP_VECTOR; | ||
489 | } | ||
485 | if (vcpu->arch.rmode.active) | 490 | if (vcpu->arch.rmode.active) |
486 | eb = ~0; | 491 | eb = ~0; |
487 | if (vm_need_ept()) | 492 | if (vm_need_ept()) |
@@ -1003,40 +1008,23 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg) | |||
1003 | } | 1008 | } |
1004 | } | 1009 | } |
1005 | 1010 | ||
1006 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) | 1011 | static int set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg) |
1007 | { | 1012 | { |
1008 | unsigned long dr7 = 0x400; | 1013 | int old_debug = vcpu->guest_debug; |
1009 | int old_singlestep; | 1014 | unsigned long flags; |
1010 | |||
1011 | old_singlestep = vcpu->guest_debug.singlestep; | ||
1012 | |||
1013 | vcpu->guest_debug.enabled = dbg->enabled; | ||
1014 | if (vcpu->guest_debug.enabled) { | ||
1015 | int i; | ||
1016 | |||
1017 | dr7 |= 0x200; /* exact */ | ||
1018 | for (i = 0; i < 4; ++i) { | ||
1019 | if (!dbg->breakpoints[i].enabled) | ||
1020 | continue; | ||
1021 | vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; | ||
1022 | dr7 |= 2 << (i*2); /* global enable */ | ||
1023 | dr7 |= 0 << (i*4+16); /* execution breakpoint */ | ||
1024 | } | ||
1025 | |||
1026 | vcpu->guest_debug.singlestep = dbg->singlestep; | ||
1027 | } else | ||
1028 | vcpu->guest_debug.singlestep = 0; | ||
1029 | 1015 | ||
1030 | if (old_singlestep && !vcpu->guest_debug.singlestep) { | 1016 | vcpu->guest_debug = dbg->control; |
1031 | unsigned long flags; | 1017 | if (!(vcpu->guest_debug & KVM_GUESTDBG_ENABLE)) |
1018 | vcpu->guest_debug = 0; | ||
1032 | 1019 | ||
1033 | flags = vmcs_readl(GUEST_RFLAGS); | 1020 | flags = vmcs_readl(GUEST_RFLAGS); |
1021 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1022 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
1023 | else if (old_debug & KVM_GUESTDBG_SINGLESTEP) | ||
1034 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | 1024 | flags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); |
1035 | vmcs_writel(GUEST_RFLAGS, flags); | 1025 | vmcs_writel(GUEST_RFLAGS, flags); |
1036 | } | ||
1037 | 1026 | ||
1038 | update_exception_bitmap(vcpu); | 1027 | update_exception_bitmap(vcpu); |
1039 | vmcs_writel(GUEST_DR7, dr7); | ||
1040 | 1028 | ||
1041 | return 0; | 1029 | return 0; |
1042 | } | 1030 | } |
@@ -2540,24 +2528,6 @@ static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr) | |||
2540 | return 0; | 2528 | return 0; |
2541 | } | 2529 | } |
2542 | 2530 | ||
2543 | static void kvm_guest_debug_pre(struct kvm_vcpu *vcpu) | ||
2544 | { | ||
2545 | struct kvm_guest_debug *dbg = &vcpu->guest_debug; | ||
2546 | |||
2547 | set_debugreg(dbg->bp[0], 0); | ||
2548 | set_debugreg(dbg->bp[1], 1); | ||
2549 | set_debugreg(dbg->bp[2], 2); | ||
2550 | set_debugreg(dbg->bp[3], 3); | ||
2551 | |||
2552 | if (dbg->singlestep) { | ||
2553 | unsigned long flags; | ||
2554 | |||
2555 | flags = vmcs_readl(GUEST_RFLAGS); | ||
2556 | flags |= X86_EFLAGS_TF | X86_EFLAGS_RF; | ||
2557 | vmcs_writel(GUEST_RFLAGS, flags); | ||
2558 | } | ||
2559 | } | ||
2560 | |||
2561 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, | 2531 | static int handle_rmode_exception(struct kvm_vcpu *vcpu, |
2562 | int vec, u32 err_code) | 2532 | int vec, u32 err_code) |
2563 | { | 2533 | { |
@@ -2574,9 +2544,17 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2574 | * the required debugging infrastructure rework. | 2544 | * the required debugging infrastructure rework. |
2575 | */ | 2545 | */ |
2576 | switch (vec) { | 2546 | switch (vec) { |
2577 | case DE_VECTOR: | ||
2578 | case DB_VECTOR: | 2547 | case DB_VECTOR: |
2548 | if (vcpu->guest_debug & | ||
2549 | (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) | ||
2550 | return 0; | ||
2551 | kvm_queue_exception(vcpu, vec); | ||
2552 | return 1; | ||
2579 | case BP_VECTOR: | 2553 | case BP_VECTOR: |
2554 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | ||
2555 | return 0; | ||
2556 | /* fall through */ | ||
2557 | case DE_VECTOR: | ||
2580 | case OF_VECTOR: | 2558 | case OF_VECTOR: |
2581 | case BR_VECTOR: | 2559 | case BR_VECTOR: |
2582 | case UD_VECTOR: | 2560 | case UD_VECTOR: |
@@ -2593,7 +2571,7 @@ static int handle_rmode_exception(struct kvm_vcpu *vcpu, | |||
2593 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | 2571 | static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) |
2594 | { | 2572 | { |
2595 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 2573 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
2596 | u32 intr_info, error_code; | 2574 | u32 intr_info, ex_no, error_code; |
2597 | unsigned long cr2, rip; | 2575 | unsigned long cr2, rip; |
2598 | u32 vect_info; | 2576 | u32 vect_info; |
2599 | enum emulation_result er; | 2577 | enum emulation_result er; |
@@ -2653,14 +2631,16 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
2653 | return 1; | 2631 | return 1; |
2654 | } | 2632 | } |
2655 | 2633 | ||
2656 | if ((intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK)) == | 2634 | ex_no = intr_info & INTR_INFO_VECTOR_MASK; |
2657 | (INTR_TYPE_HARD_EXCEPTION | 1)) { | 2635 | if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { |
2658 | kvm_run->exit_reason = KVM_EXIT_DEBUG; | 2636 | kvm_run->exit_reason = KVM_EXIT_DEBUG; |
2659 | return 0; | 2637 | kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; |
2638 | kvm_run->debug.arch.exception = ex_no; | ||
2639 | } else { | ||
2640 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | ||
2641 | kvm_run->ex.exception = ex_no; | ||
2642 | kvm_run->ex.error_code = error_code; | ||
2660 | } | 2643 | } |
2661 | kvm_run->exit_reason = KVM_EXIT_EXCEPTION; | ||
2662 | kvm_run->ex.exception = intr_info & INTR_INFO_VECTOR_MASK; | ||
2663 | kvm_run->ex.error_code = error_code; | ||
2664 | return 0; | 2644 | return 0; |
2665 | } | 2645 | } |
2666 | 2646 | ||
@@ -3600,7 +3580,6 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
3600 | .vcpu_put = vmx_vcpu_put, | 3580 | .vcpu_put = vmx_vcpu_put, |
3601 | 3581 | ||
3602 | .set_guest_debug = set_guest_debug, | 3582 | .set_guest_debug = set_guest_debug, |
3603 | .guest_debug_pre = kvm_guest_debug_pre, | ||
3604 | .get_msr = vmx_get_msr, | 3583 | .get_msr = vmx_get_msr, |
3605 | .set_msr = vmx_set_msr, | 3584 | .set_msr = vmx_set_msr, |
3606 | .get_segment_base = vmx_get_segment_base, | 3585 | .get_segment_base = vmx_get_segment_base, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index b5e9932e0f62..e990d164b56d 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -3005,9 +3005,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) | |||
3005 | goto out; | 3005 | goto out; |
3006 | } | 3006 | } |
3007 | 3007 | ||
3008 | if (vcpu->guest_debug.enabled) | ||
3009 | kvm_x86_ops->guest_debug_pre(vcpu); | ||
3010 | |||
3011 | vcpu->guest_mode = 1; | 3008 | vcpu->guest_mode = 1; |
3012 | /* | 3009 | /* |
3013 | * Make sure that guest_mode assignment won't happen after | 3010 | * Make sure that guest_mode assignment won't happen after |
@@ -3218,7 +3215,7 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
3218 | /* | 3215 | /* |
3219 | * Don't leak debug flags in case they were set for guest debugging | 3216 | * Don't leak debug flags in case they were set for guest debugging |
3220 | */ | 3217 | */ |
3221 | if (vcpu->guest_debug.enabled && vcpu->guest_debug.singlestep) | 3218 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) |
3222 | regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); | 3219 | regs->rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF); |
3223 | 3220 | ||
3224 | vcpu_put(vcpu); | 3221 | vcpu_put(vcpu); |
@@ -3837,8 +3834,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
3837 | return 0; | 3834 | return 0; |
3838 | } | 3835 | } |
3839 | 3836 | ||
3840 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | 3837 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
3841 | struct kvm_debug_guest *dbg) | 3838 | struct kvm_guest_debug *dbg) |
3842 | { | 3839 | { |
3843 | int r; | 3840 | int r; |
3844 | 3841 | ||
@@ -3846,6 +3843,11 @@ int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, | |||
3846 | 3843 | ||
3847 | r = kvm_x86_ops->set_guest_debug(vcpu, dbg); | 3844 | r = kvm_x86_ops->set_guest_debug(vcpu, dbg); |
3848 | 3845 | ||
3846 | if (dbg->control & KVM_GUESTDBG_INJECT_DB) | ||
3847 | kvm_queue_exception(vcpu, DB_VECTOR); | ||
3848 | else if (dbg->control & KVM_GUESTDBG_INJECT_BP) | ||
3849 | kvm_queue_exception(vcpu, BP_VECTOR); | ||
3850 | |||
3849 | vcpu_put(vcpu); | 3851 | vcpu_put(vcpu); |
3850 | 3852 | ||
3851 | return r; | 3853 | return r; |