diff options
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 5 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 66 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 78 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 78 |
4 files changed, 93 insertions, 134 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index 0c49c888be6b..5d5e0a9afcf2 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -496,8 +496,7 @@ struct kvm_x86_ops { | |||
496 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | 496 | void (*set_idt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
497 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | 497 | void (*get_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
498 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); | 498 | void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); |
499 | int (*get_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long *dest); | 499 | void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); |
500 | int (*set_dr)(struct kvm_vcpu *vcpu, int dr, unsigned long value); | ||
501 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); | 500 | void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); |
502 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); | 501 | unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); |
503 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); | 502 | void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags); |
@@ -602,6 +601,8 @@ void kvm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0); | |||
602 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); | 601 | void kvm_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); |
603 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); | 602 | void kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4); |
604 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); | 603 | void kvm_set_cr8(struct kvm_vcpu *vcpu, unsigned long cr8); |
604 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val); | ||
605 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val); | ||
605 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); | 606 | unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu); |
606 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); | 607 | void kvm_lmsw(struct kvm_vcpu *vcpu, unsigned long msw); |
607 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); | 608 | void kvm_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l); |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 64b7f60dc5b8..87b36fbbfec8 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1307,70 +1307,11 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) | |||
1307 | svm->vmcb->control.asid = sd->next_asid++; | 1307 | svm->vmcb->control.asid = sd->next_asid++; |
1308 | } | 1308 | } |
1309 | 1309 | ||
1310 | static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest) | 1310 | static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value) |
1311 | { | 1311 | { |
1312 | struct vcpu_svm *svm = to_svm(vcpu); | 1312 | struct vcpu_svm *svm = to_svm(vcpu); |
1313 | 1313 | ||
1314 | switch (dr) { | 1314 | svm->vmcb->save.dr7 = value; |
1315 | case 0 ... 3: | ||
1316 | *dest = vcpu->arch.db[dr]; | ||
1317 | break; | ||
1318 | case 4: | ||
1319 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
1320 | return EMULATE_FAIL; /* will re-inject UD */ | ||
1321 | /* fall through */ | ||
1322 | case 6: | ||
1323 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
1324 | *dest = vcpu->arch.dr6; | ||
1325 | else | ||
1326 | *dest = svm->vmcb->save.dr6; | ||
1327 | break; | ||
1328 | case 5: | ||
1329 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
1330 | return EMULATE_FAIL; /* will re-inject UD */ | ||
1331 | /* fall through */ | ||
1332 | case 7: | ||
1333 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) | ||
1334 | *dest = vcpu->arch.dr7; | ||
1335 | else | ||
1336 | *dest = svm->vmcb->save.dr7; | ||
1337 | break; | ||
1338 | } | ||
1339 | |||
1340 | return EMULATE_DONE; | ||
1341 | } | ||
1342 | |||
1343 | static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value) | ||
1344 | { | ||
1345 | struct vcpu_svm *svm = to_svm(vcpu); | ||
1346 | |||
1347 | switch (dr) { | ||
1348 | case 0 ... 3: | ||
1349 | vcpu->arch.db[dr] = value; | ||
1350 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
1351 | vcpu->arch.eff_db[dr] = value; | ||
1352 | break; | ||
1353 | case 4: | ||
1354 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
1355 | return EMULATE_FAIL; /* will re-inject UD */ | ||
1356 | /* fall through */ | ||
1357 | case 6: | ||
1358 | vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; | ||
1359 | break; | ||
1360 | case 5: | ||
1361 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) | ||
1362 | return EMULATE_FAIL; /* will re-inject UD */ | ||
1363 | /* fall through */ | ||
1364 | case 7: | ||
1365 | vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; | ||
1366 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
1367 | svm->vmcb->save.dr7 = vcpu->arch.dr7; | ||
1368 | vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); | ||
1369 | } | ||
1370 | break; | ||
1371 | } | ||
1372 | |||
1373 | return EMULATE_DONE; | ||
1374 | } | 1315 | } |
1375 | 1316 | ||
1376 | static int pf_interception(struct vcpu_svm *svm) | 1317 | static int pf_interception(struct vcpu_svm *svm) |
@@ -3302,8 +3243,7 @@ static struct kvm_x86_ops svm_x86_ops = { | |||
3302 | .set_idt = svm_set_idt, | 3243 | .set_idt = svm_set_idt, |
3303 | .get_gdt = svm_get_gdt, | 3244 | .get_gdt = svm_get_gdt, |
3304 | .set_gdt = svm_set_gdt, | 3245 | .set_gdt = svm_set_gdt, |
3305 | .get_dr = svm_get_dr, | 3246 | .set_dr7 = svm_set_dr7, |
3306 | .set_dr = svm_set_dr, | ||
3307 | .cache_reg = svm_cache_reg, | 3247 | .cache_reg = svm_cache_reg, |
3308 | .get_rflags = svm_get_rflags, | 3248 | .get_rflags = svm_get_rflags, |
3309 | .set_rflags = svm_set_rflags, | 3249 | .set_rflags = svm_set_rflags, |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1cceca1c59be..fb4a8869bb99 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3089,19 +3089,9 @@ static int handle_cr(struct kvm_vcpu *vcpu) | |||
3089 | return 0; | 3089 | return 0; |
3090 | } | 3090 | } |
3091 | 3091 | ||
3092 | static int check_dr_alias(struct kvm_vcpu *vcpu) | ||
3093 | { | ||
3094 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
3095 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
3096 | return -1; | ||
3097 | } | ||
3098 | return 0; | ||
3099 | } | ||
3100 | |||
3101 | static int handle_dr(struct kvm_vcpu *vcpu) | 3092 | static int handle_dr(struct kvm_vcpu *vcpu) |
3102 | { | 3093 | { |
3103 | unsigned long exit_qualification; | 3094 | unsigned long exit_qualification; |
3104 | unsigned long val; | ||
3105 | int dr, reg; | 3095 | int dr, reg; |
3106 | 3096 | ||
3107 | /* Do not handle if the CPL > 0, will trigger GP on re-entry */ | 3097 | /* Do not handle if the CPL > 0, will trigger GP on re-entry */ |
@@ -3136,67 +3126,20 @@ static int handle_dr(struct kvm_vcpu *vcpu) | |||
3136 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; | 3126 | dr = exit_qualification & DEBUG_REG_ACCESS_NUM; |
3137 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); | 3127 | reg = DEBUG_REG_ACCESS_REG(exit_qualification); |
3138 | if (exit_qualification & TYPE_MOV_FROM_DR) { | 3128 | if (exit_qualification & TYPE_MOV_FROM_DR) { |
3139 | switch (dr) { | 3129 | unsigned long val; |
3140 | case 0 ... 3: | 3130 | if (!kvm_get_dr(vcpu, dr, &val)) |
3141 | val = vcpu->arch.db[dr]; | 3131 | kvm_register_write(vcpu, reg, val); |
3142 | break; | 3132 | } else |
3143 | case 4: | 3133 | kvm_set_dr(vcpu, dr, vcpu->arch.regs[reg]); |
3144 | if (check_dr_alias(vcpu) < 0) | ||
3145 | return 1; | ||
3146 | /* fall through */ | ||
3147 | case 6: | ||
3148 | val = vcpu->arch.dr6; | ||
3149 | break; | ||
3150 | case 5: | ||
3151 | if (check_dr_alias(vcpu) < 0) | ||
3152 | return 1; | ||
3153 | /* fall through */ | ||
3154 | default: /* 7 */ | ||
3155 | val = vcpu->arch.dr7; | ||
3156 | break; | ||
3157 | } | ||
3158 | kvm_register_write(vcpu, reg, val); | ||
3159 | } else { | ||
3160 | val = vcpu->arch.regs[reg]; | ||
3161 | switch (dr) { | ||
3162 | case 0 ... 3: | ||
3163 | vcpu->arch.db[dr] = val; | ||
3164 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
3165 | vcpu->arch.eff_db[dr] = val; | ||
3166 | break; | ||
3167 | case 4: | ||
3168 | if (check_dr_alias(vcpu) < 0) | ||
3169 | return 1; | ||
3170 | /* fall through */ | ||
3171 | case 6: | ||
3172 | if (val & 0xffffffff00000000ULL) { | ||
3173 | kvm_inject_gp(vcpu, 0); | ||
3174 | return 1; | ||
3175 | } | ||
3176 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | ||
3177 | break; | ||
3178 | case 5: | ||
3179 | if (check_dr_alias(vcpu) < 0) | ||
3180 | return 1; | ||
3181 | /* fall through */ | ||
3182 | default: /* 7 */ | ||
3183 | if (val & 0xffffffff00000000ULL) { | ||
3184 | kvm_inject_gp(vcpu, 0); | ||
3185 | return 1; | ||
3186 | } | ||
3187 | vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; | ||
3188 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
3189 | vmcs_writel(GUEST_DR7, vcpu->arch.dr7); | ||
3190 | vcpu->arch.switch_db_regs = | ||
3191 | (val & DR7_BP_EN_MASK); | ||
3192 | } | ||
3193 | break; | ||
3194 | } | ||
3195 | } | ||
3196 | skip_emulated_instruction(vcpu); | 3134 | skip_emulated_instruction(vcpu); |
3197 | return 1; | 3135 | return 1; |
3198 | } | 3136 | } |
3199 | 3137 | ||
3138 | static void vmx_set_dr7(struct kvm_vcpu *vcpu, unsigned long val) | ||
3139 | { | ||
3140 | vmcs_writel(GUEST_DR7, val); | ||
3141 | } | ||
3142 | |||
3200 | static int handle_cpuid(struct kvm_vcpu *vcpu) | 3143 | static int handle_cpuid(struct kvm_vcpu *vcpu) |
3201 | { | 3144 | { |
3202 | kvm_emulate_cpuid(vcpu); | 3145 | kvm_emulate_cpuid(vcpu); |
@@ -4187,6 +4130,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
4187 | .set_idt = vmx_set_idt, | 4130 | .set_idt = vmx_set_idt, |
4188 | .get_gdt = vmx_get_gdt, | 4131 | .get_gdt = vmx_get_gdt, |
4189 | .set_gdt = vmx_set_gdt, | 4132 | .set_gdt = vmx_set_gdt, |
4133 | .set_dr7 = vmx_set_dr7, | ||
4190 | .cache_reg = vmx_cache_reg, | 4134 | .cache_reg = vmx_cache_reg, |
4191 | .get_rflags = vmx_get_rflags, | 4135 | .get_rflags = vmx_get_rflags, |
4192 | .set_rflags = vmx_set_rflags, | 4136 | .set_rflags = vmx_set_rflags, |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index d65e481c5fa4..09dccac2df7e 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -562,6 +562,80 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu) | |||
562 | } | 562 | } |
563 | EXPORT_SYMBOL_GPL(kvm_get_cr8); | 563 | EXPORT_SYMBOL_GPL(kvm_get_cr8); |
564 | 564 | ||
565 | int kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) | ||
566 | { | ||
567 | switch (dr) { | ||
568 | case 0 ... 3: | ||
569 | vcpu->arch.db[dr] = val; | ||
570 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | ||
571 | vcpu->arch.eff_db[dr] = val; | ||
572 | break; | ||
573 | case 4: | ||
574 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
575 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
576 | return 1; | ||
577 | } | ||
578 | /* fall through */ | ||
579 | case 6: | ||
580 | if (val & 0xffffffff00000000ULL) { | ||
581 | kvm_inject_gp(vcpu, 0); | ||
582 | return 1; | ||
583 | } | ||
584 | vcpu->arch.dr6 = (val & DR6_VOLATILE) | DR6_FIXED_1; | ||
585 | break; | ||
586 | case 5: | ||
587 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
588 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
589 | return 1; | ||
590 | } | ||
591 | /* fall through */ | ||
592 | default: /* 7 */ | ||
593 | if (val & 0xffffffff00000000ULL) { | ||
594 | kvm_inject_gp(vcpu, 0); | ||
595 | return 1; | ||
596 | } | ||
597 | vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; | ||
598 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { | ||
599 | kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7); | ||
600 | vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK); | ||
601 | } | ||
602 | break; | ||
603 | } | ||
604 | |||
605 | return 0; | ||
606 | } | ||
607 | EXPORT_SYMBOL_GPL(kvm_set_dr); | ||
608 | |||
609 | int kvm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *val) | ||
610 | { | ||
611 | switch (dr) { | ||
612 | case 0 ... 3: | ||
613 | *val = vcpu->arch.db[dr]; | ||
614 | break; | ||
615 | case 4: | ||
616 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
617 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
618 | return 1; | ||
619 | } | ||
620 | /* fall through */ | ||
621 | case 6: | ||
622 | *val = vcpu->arch.dr6; | ||
623 | break; | ||
624 | case 5: | ||
625 | if (kvm_read_cr4_bits(vcpu, X86_CR4_DE)) { | ||
626 | kvm_queue_exception(vcpu, UD_VECTOR); | ||
627 | return 1; | ||
628 | } | ||
629 | /* fall through */ | ||
630 | default: /* 7 */ | ||
631 | *val = vcpu->arch.dr7; | ||
632 | break; | ||
633 | } | ||
634 | |||
635 | return 0; | ||
636 | } | ||
637 | EXPORT_SYMBOL_GPL(kvm_get_dr); | ||
638 | |||
565 | static inline u32 bit(int bitno) | 639 | static inline u32 bit(int bitno) |
566 | { | 640 | { |
567 | return 1 << (bitno & 31); | 641 | return 1 << (bitno & 31); |
@@ -3483,14 +3557,14 @@ int emulate_clts(struct kvm_vcpu *vcpu) | |||
3483 | 3557 | ||
3484 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) | 3558 | int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest) |
3485 | { | 3559 | { |
3486 | return kvm_x86_ops->get_dr(ctxt->vcpu, dr, dest); | 3560 | return kvm_get_dr(ctxt->vcpu, dr, dest); |
3487 | } | 3561 | } |
3488 | 3562 | ||
3489 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) | 3563 | int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value) |
3490 | { | 3564 | { |
3491 | unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; | 3565 | unsigned long mask = (ctxt->mode == X86EMUL_MODE_PROT64) ? ~0ULL : ~0U; |
3492 | 3566 | ||
3493 | return kvm_x86_ops->set_dr(ctxt->vcpu, dr, value & mask); | 3567 | return kvm_set_dr(ctxt->vcpu, dr, value & mask); |
3494 | } | 3568 | } |
3495 | 3569 | ||
3496 | void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) | 3570 | void kvm_report_emulation_failure(struct kvm_vcpu *vcpu, const char *context) |