aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/kvm_svm.h6
-rw-r--r--arch/x86/kvm/svm.c116
-rw-r--r--arch/x86/kvm/vmx.c114
-rw-r--r--arch/x86/kvm/x86.c29
4 files changed, 170 insertions, 95 deletions
diff --git a/arch/x86/kvm/kvm_svm.h b/arch/x86/kvm/kvm_svm.h
index 91673413d8f7..ed66e4c078dc 100644
--- a/arch/x86/kvm/kvm_svm.h
+++ b/arch/x86/kvm/kvm_svm.h
@@ -18,7 +18,6 @@ static const u32 host_save_user_msrs[] = {
18}; 18};
19 19
20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs) 20#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
21#define NUM_DB_REGS 4
22 21
23struct kvm_vcpu; 22struct kvm_vcpu;
24 23
@@ -29,16 +28,11 @@ struct vcpu_svm {
29 struct svm_cpu_data *svm_data; 28 struct svm_cpu_data *svm_data;
30 uint64_t asid_generation; 29 uint64_t asid_generation;
31 30
32 unsigned long db_regs[NUM_DB_REGS];
33
34 u64 next_rip; 31 u64 next_rip;
35 32
36 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS]; 33 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
37 u64 host_gs_base; 34 u64 host_gs_base;
38 unsigned long host_cr2; 35 unsigned long host_cr2;
39 unsigned long host_db_regs[NUM_DB_REGS];
40 unsigned long host_dr6;
41 unsigned long host_dr7;
42 36
43 u32 *msrpm; 37 u32 *msrpm;
44 struct vmcb *hsave; 38 struct vmcb *hsave;
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 88d9062f4545..815f50e425ac 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -38,9 +38,6 @@ MODULE_LICENSE("GPL");
38#define IOPM_ALLOC_ORDER 2 38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1 39#define MSRPM_ALLOC_ORDER 1
40 40
41#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
43
44#define SEG_TYPE_LDT 2 41#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3 42#define SEG_TYPE_BUSY_TSS16 3
46 43
@@ -181,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val)
181 asm volatile ("mov %0, %%cr2" :: "r" (val)); 178 asm volatile ("mov %0, %%cr2" :: "r" (val));
182} 179}
183 180
184static inline unsigned long read_dr6(void)
185{
186 unsigned long dr6;
187
188 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
189 return dr6;
190}
191
192static inline void write_dr6(unsigned long val)
193{
194 asm volatile ("mov %0, %%dr6" :: "r" (val));
195}
196
197static inline unsigned long read_dr7(void)
198{
199 unsigned long dr7;
200
201 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
202 return dr7;
203}
204
205static inline void write_dr7(unsigned long val)
206{
207 asm volatile ("mov %0, %%dr7" :: "r" (val));
208}
209
210static inline void force_new_asid(struct kvm_vcpu *vcpu) 181static inline void force_new_asid(struct kvm_vcpu *vcpu)
211{ 182{
212 to_svm(vcpu)->asid_generation--; 183 to_svm(vcpu)->asid_generation--;
@@ -695,7 +666,6 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
695 clear_page(svm->vmcb); 666 clear_page(svm->vmcb);
696 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 667 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
697 svm->asid_generation = 0; 668 svm->asid_generation = 0;
698 memset(svm->db_regs, 0, sizeof(svm->db_regs));
699 init_vmcb(svm); 669 init_vmcb(svm);
700 670
701 fx_init(&svm->vcpu); 671 fx_init(&svm->vcpu);
@@ -1035,7 +1005,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
1035 1005
1036static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1006static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
1037{ 1007{
1038 unsigned long val = to_svm(vcpu)->db_regs[dr]; 1008 struct vcpu_svm *svm = to_svm(vcpu);
1009 unsigned long val;
1010
1011 switch (dr) {
1012 case 0 ... 3:
1013 val = vcpu->arch.db[dr];
1014 break;
1015 case 6:
1016 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1017 val = vcpu->arch.dr6;
1018 else
1019 val = svm->vmcb->save.dr6;
1020 break;
1021 case 7:
1022 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1023 val = vcpu->arch.dr7;
1024 else
1025 val = svm->vmcb->save.dr7;
1026 break;
1027 default:
1028 val = 0;
1029 }
1030
1039 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 1031 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
1040 return val; 1032 return val;
1041} 1033}
@@ -1045,33 +1037,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
1045{ 1037{
1046 struct vcpu_svm *svm = to_svm(vcpu); 1038 struct vcpu_svm *svm = to_svm(vcpu);
1047 1039
1048 *exception = 0; 1040 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
1049 1041
1050 if (svm->vmcb->save.dr7 & DR7_GD_MASK) { 1042 *exception = 0;
1051 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
1052 svm->vmcb->save.dr6 |= DR6_BD_MASK;
1053 *exception = DB_VECTOR;
1054 return;
1055 }
1056 1043
1057 switch (dr) { 1044 switch (dr) {
1058 case 0 ... 3: 1045 case 0 ... 3:
1059 svm->db_regs[dr] = value; 1046 vcpu->arch.db[dr] = value;
1047 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1048 vcpu->arch.eff_db[dr] = value;
1060 return; 1049 return;
1061 case 4 ... 5: 1050 case 4 ... 5:
1062 if (vcpu->arch.cr4 & X86_CR4_DE) { 1051 if (vcpu->arch.cr4 & X86_CR4_DE)
1063 *exception = UD_VECTOR; 1052 *exception = UD_VECTOR;
1053 return;
1054 case 6:
1055 if (value & 0xffffffff00000000ULL) {
1056 *exception = GP_VECTOR;
1064 return; 1057 return;
1065 } 1058 }
1066 case 7: { 1059 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1067 if (value & ~((1ULL << 32) - 1)) { 1060 return;
1061 case 7:
1062 if (value & 0xffffffff00000000ULL) {
1068 *exception = GP_VECTOR; 1063 *exception = GP_VECTOR;
1069 return; 1064 return;
1070 } 1065 }
1071 svm->vmcb->save.dr7 = value; 1066 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1067 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1068 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1069 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1070 }
1072 return; 1071 return;
1073 }
1074 default: 1072 default:
1073 /* FIXME: Possible case? */
1075 printk(KERN_DEBUG "%s: unexpected dr %u\n", 1074 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1076 __func__, dr); 1075 __func__, dr);
1077 *exception = UD_VECTOR; 1076 *exception = UD_VECTOR;
@@ -2365,22 +2364,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
2365 return 0; 2364 return 0;
2366} 2365}
2367 2366
2368static void save_db_regs(unsigned long *db_regs)
2369{
2370 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
2371 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
2372 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
2373 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
2374}
2375
2376static void load_db_regs(unsigned long *db_regs)
2377{
2378 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
2379 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
2380 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
2381 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
2382}
2383
2384static void svm_flush_tlb(struct kvm_vcpu *vcpu) 2367static void svm_flush_tlb(struct kvm_vcpu *vcpu)
2385{ 2368{
2386 force_new_asid(vcpu); 2369 force_new_asid(vcpu);
@@ -2439,20 +2422,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2439 gs_selector = kvm_read_gs(); 2422 gs_selector = kvm_read_gs();
2440 ldt_selector = kvm_read_ldt(); 2423 ldt_selector = kvm_read_ldt();
2441 svm->host_cr2 = kvm_read_cr2(); 2424 svm->host_cr2 = kvm_read_cr2();
2442 svm->host_dr6 = read_dr6();
2443 svm->host_dr7 = read_dr7();
2444 if (!is_nested(svm)) 2425 if (!is_nested(svm))
2445 svm->vmcb->save.cr2 = vcpu->arch.cr2; 2426 svm->vmcb->save.cr2 = vcpu->arch.cr2;
2446 /* required for live migration with NPT */ 2427 /* required for live migration with NPT */
2447 if (npt_enabled) 2428 if (npt_enabled)
2448 svm->vmcb->save.cr3 = vcpu->arch.cr3; 2429 svm->vmcb->save.cr3 = vcpu->arch.cr3;
2449 2430
2450 if (svm->vmcb->save.dr7 & 0xff) {
2451 write_dr7(0);
2452 save_db_regs(svm->host_db_regs);
2453 load_db_regs(svm->db_regs);
2454 }
2455
2456 clgi(); 2431 clgi();
2457 2432
2458 local_irq_enable(); 2433 local_irq_enable();
@@ -2528,16 +2503,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2528#endif 2503#endif
2529 ); 2504 );
2530 2505
2531 if ((svm->vmcb->save.dr7 & 0xff))
2532 load_db_regs(svm->host_db_regs);
2533
2534 vcpu->arch.cr2 = svm->vmcb->save.cr2; 2506 vcpu->arch.cr2 = svm->vmcb->save.cr2;
2535 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 2507 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
2536 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 2508 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
2537 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 2509 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
2538 2510
2539 write_dr6(svm->host_dr6);
2540 write_dr7(svm->host_dr7);
2541 kvm_write_cr2(svm->host_cr2); 2511 kvm_write_cr2(svm->host_cr2);
2542 2512
2543 kvm_load_fs(fs_selector); 2513 kvm_load_fs(fs_selector);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c776868ffe41..0989776ee7b0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2311,7 +2311,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
2311 kvm_rip_write(vcpu, 0); 2311 kvm_rip_write(vcpu, 0);
2312 kvm_register_write(vcpu, VCPU_REGS_RSP, 0); 2312 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
2313 2313
2314 /* todo: dr0 = dr1 = dr2 = dr3 = 0; dr6 = 0xffff0ff0 */
2315 vmcs_writel(GUEST_DR7, 0x400); 2314 vmcs_writel(GUEST_DR7, 0x400);
2316 2315
2317 vmcs_writel(GUEST_GDTR_BASE, 0); 2316 vmcs_writel(GUEST_GDTR_BASE, 0);
@@ -2577,7 +2576,7 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2577{ 2576{
2578 struct vcpu_vmx *vmx = to_vmx(vcpu); 2577 struct vcpu_vmx *vmx = to_vmx(vcpu);
2579 u32 intr_info, ex_no, error_code; 2578 u32 intr_info, ex_no, error_code;
2580 unsigned long cr2, rip; 2579 unsigned long cr2, rip, dr6;
2581 u32 vect_info; 2580 u32 vect_info;
2582 enum emulation_result er; 2581 enum emulation_result er;
2583 2582
@@ -2637,14 +2636,28 @@ static int handle_exception(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2637 } 2636 }
2638 2637
2639 ex_no = intr_info & INTR_INFO_VECTOR_MASK; 2638 ex_no = intr_info & INTR_INFO_VECTOR_MASK;
2640 if (ex_no == DB_VECTOR || ex_no == BP_VECTOR) { 2639 switch (ex_no) {
2640 case DB_VECTOR:
2641 dr6 = vmcs_readl(EXIT_QUALIFICATION);
2642 if (!(vcpu->guest_debug &
2643 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
2644 vcpu->arch.dr6 = dr6 | DR6_FIXED_1;
2645 kvm_queue_exception(vcpu, DB_VECTOR);
2646 return 1;
2647 }
2648 kvm_run->debug.arch.dr6 = dr6 | DR6_FIXED_1;
2649 kvm_run->debug.arch.dr7 = vmcs_readl(GUEST_DR7);
2650 /* fall through */
2651 case BP_VECTOR:
2641 kvm_run->exit_reason = KVM_EXIT_DEBUG; 2652 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2642 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip; 2653 kvm_run->debug.arch.pc = vmcs_readl(GUEST_CS_BASE) + rip;
2643 kvm_run->debug.arch.exception = ex_no; 2654 kvm_run->debug.arch.exception = ex_no;
2644 } else { 2655 break;
2656 default:
2645 kvm_run->exit_reason = KVM_EXIT_EXCEPTION; 2657 kvm_run->exit_reason = KVM_EXIT_EXCEPTION;
2646 kvm_run->ex.exception = ex_no; 2658 kvm_run->ex.exception = ex_no;
2647 kvm_run->ex.error_code = error_code; 2659 kvm_run->ex.error_code = error_code;
2660 break;
2648 } 2661 }
2649 return 0; 2662 return 0;
2650} 2663}
@@ -2784,21 +2797,44 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2784 unsigned long val; 2797 unsigned long val;
2785 int dr, reg; 2798 int dr, reg;
2786 2799
2787 /* 2800 dr = vmcs_readl(GUEST_DR7);
2788 * FIXME: this code assumes the host is debugging the guest. 2801 if (dr & DR7_GD) {
2789 * need to deal with guest debugging itself too. 2802 /*
2790 */ 2803 * As the vm-exit takes precedence over the debug trap, we
2804 * need to emulate the latter, either for the host or the
2805 * guest debugging itself.
2806 */
2807 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
2808 kvm_run->debug.arch.dr6 = vcpu->arch.dr6;
2809 kvm_run->debug.arch.dr7 = dr;
2810 kvm_run->debug.arch.pc =
2811 vmcs_readl(GUEST_CS_BASE) +
2812 vmcs_readl(GUEST_RIP);
2813 kvm_run->debug.arch.exception = DB_VECTOR;
2814 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2815 return 0;
2816 } else {
2817 vcpu->arch.dr7 &= ~DR7_GD;
2818 vcpu->arch.dr6 |= DR6_BD;
2819 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2820 kvm_queue_exception(vcpu, DB_VECTOR);
2821 return 1;
2822 }
2823 }
2824
2791 exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 2825 exit_qualification = vmcs_readl(EXIT_QUALIFICATION);
2792 dr = exit_qualification & 7; 2826 dr = exit_qualification & DEBUG_REG_ACCESS_NUM;
2793 reg = (exit_qualification >> 8) & 15; 2827 reg = DEBUG_REG_ACCESS_REG(exit_qualification);
2794 if (exit_qualification & 16) { 2828 if (exit_qualification & TYPE_MOV_FROM_DR) {
2795 /* mov from dr */
2796 switch (dr) { 2829 switch (dr) {
2830 case 0 ... 3:
2831 val = vcpu->arch.db[dr];
2832 break;
2797 case 6: 2833 case 6:
2798 val = 0xffff0ff0; 2834 val = vcpu->arch.dr6;
2799 break; 2835 break;
2800 case 7: 2836 case 7:
2801 val = 0x400; 2837 val = vcpu->arch.dr7;
2802 break; 2838 break;
2803 default: 2839 default:
2804 val = 0; 2840 val = 0;
@@ -2806,7 +2842,38 @@ static int handle_dr(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2806 kvm_register_write(vcpu, reg, val); 2842 kvm_register_write(vcpu, reg, val);
2807 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 2843 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
2808 } else { 2844 } else {
2809 /* mov to dr */ 2845 val = vcpu->arch.regs[reg];
2846 switch (dr) {
2847 case 0 ... 3:
2848 vcpu->arch.db[dr] = val;
2849 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2850 vcpu->arch.eff_db[dr] = val;
2851 break;
2852 case 4 ... 5:
2853 if (vcpu->arch.cr4 & X86_CR4_DE)
2854 kvm_queue_exception(vcpu, UD_VECTOR);
2855 break;
2856 case 6:
2857 if (val & 0xffffffff00000000ULL) {
2858 kvm_queue_exception(vcpu, GP_VECTOR);
2859 break;
2860 }
2861 vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1;
2862 break;
2863 case 7:
2864 if (val & 0xffffffff00000000ULL) {
2865 kvm_queue_exception(vcpu, GP_VECTOR);
2866 break;
2867 }
2868 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
2869 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
2870 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2871 vcpu->arch.switch_db_regs =
2872 (val & DR7_BP_EN_MASK);
2873 }
2874 break;
2875 }
2876 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)val, handler);
2810 } 2877 }
2811 skip_emulated_instruction(vcpu); 2878 skip_emulated_instruction(vcpu);
2812 return 1; 2879 return 1;
@@ -2957,7 +3024,18 @@ static int handle_task_switch(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
2957 } 3024 }
2958 tss_selector = exit_qualification; 3025 tss_selector = exit_qualification;
2959 3026
2960 return kvm_task_switch(vcpu, tss_selector, reason); 3027 if (!kvm_task_switch(vcpu, tss_selector, reason))
3028 return 0;
3029
3030 /* clear all local breakpoint enable flags */
3031 vmcs_writel(GUEST_DR7, vmcs_readl(GUEST_DR7) & ~55);
3032
3033 /*
3034 * TODO: What about debug traps on tss switch?
3035 * Are we supposed to inject them and update dr6?
3036 */
3037
3038 return 1;
2961} 3039}
2962 3040
2963static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run) 3041static int handle_ept_violation(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
@@ -3342,6 +3420,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3342 */ 3420 */
3343 vmcs_writel(HOST_CR0, read_cr0()); 3421 vmcs_writel(HOST_CR0, read_cr0());
3344 3422
3423 set_debugreg(vcpu->arch.dr6, 6);
3424
3345 asm( 3425 asm(
3346 /* Store host registers */ 3426 /* Store host registers */
3347 "push %%"R"dx; push %%"R"bp;" 3427 "push %%"R"dx; push %%"R"bp;"
@@ -3436,6 +3516,8 @@ static void vmx_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3436 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)); 3516 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP));
3437 vcpu->arch.regs_dirty = 0; 3517 vcpu->arch.regs_dirty = 0;
3438 3518
3519 get_debugreg(vcpu->arch.dr6, 6);
3520
3439 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD); 3521 vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);
3440 if (vmx->rmode.irq.pending) 3522 if (vmx->rmode.irq.pending)
3441 fixup_rmode_irq(vmx); 3523 fixup_rmode_irq(vmx);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e990d164b56d..300bc4d42abc 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3025,10 +3025,34 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
3025 3025
3026 kvm_guest_enter(); 3026 kvm_guest_enter();
3027 3027
3028 get_debugreg(vcpu->arch.host_dr6, 6);
3029 get_debugreg(vcpu->arch.host_dr7, 7);
3030 if (unlikely(vcpu->arch.switch_db_regs)) {
3031 get_debugreg(vcpu->arch.host_db[0], 0);
3032 get_debugreg(vcpu->arch.host_db[1], 1);
3033 get_debugreg(vcpu->arch.host_db[2], 2);
3034 get_debugreg(vcpu->arch.host_db[3], 3);
3035
3036 set_debugreg(0, 7);
3037 set_debugreg(vcpu->arch.eff_db[0], 0);
3038 set_debugreg(vcpu->arch.eff_db[1], 1);
3039 set_debugreg(vcpu->arch.eff_db[2], 2);
3040 set_debugreg(vcpu->arch.eff_db[3], 3);
3041 }
3028 3042
3029 KVMTRACE_0D(VMENTRY, vcpu, entryexit); 3043 KVMTRACE_0D(VMENTRY, vcpu, entryexit);
3030 kvm_x86_ops->run(vcpu, kvm_run); 3044 kvm_x86_ops->run(vcpu, kvm_run);
3031 3045
3046 if (unlikely(vcpu->arch.switch_db_regs)) {
3047 set_debugreg(0, 7);
3048 set_debugreg(vcpu->arch.host_db[0], 0);
3049 set_debugreg(vcpu->arch.host_db[1], 1);
3050 set_debugreg(vcpu->arch.host_db[2], 2);
3051 set_debugreg(vcpu->arch.host_db[3], 3);
3052 }
3053 set_debugreg(vcpu->arch.host_dr6, 6);
3054 set_debugreg(vcpu->arch.host_dr7, 7);
3055
3032 vcpu->guest_mode = 0; 3056 vcpu->guest_mode = 0;
3033 local_irq_enable(); 3057 local_irq_enable();
3034 3058
@@ -4035,6 +4059,11 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
4035 vcpu->arch.nmi_pending = false; 4059 vcpu->arch.nmi_pending = false;
4036 vcpu->arch.nmi_injected = false; 4060 vcpu->arch.nmi_injected = false;
4037 4061
4062 vcpu->arch.switch_db_regs = 0;
4063 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
4064 vcpu->arch.dr6 = DR6_FIXED_1;
4065 vcpu->arch.dr7 = DR7_FIXED_1;
4066
4038 return kvm_x86_ops->vcpu_reset(vcpu); 4067 return kvm_x86_ops->vcpu_reset(vcpu);
4039} 4068}
4040 4069