aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2012-09-20 23:42:55 -0400
committerAvi Kivity <avi@redhat.com>2012-09-23 09:00:07 -0400
commitc863901075a42d50678616d8ee4b96ef13080498 (patch)
tree947ffa1bd109f15b9cb483c8257fb028d0ed7df3 /arch/x86/kvm
parent7a84428af7ca6a847f058c9ff244a18a2664fd1b (diff)
KVM: x86: Fix guest debug across vcpu INIT reset
If we reset a vcpu on INIT, we so far overwrote dr7 as provided by KVM_SET_GUEST_DEBUG, and we also cleared switch_db_regs unconditionally. Fix this by saving the dr7 used for guest debugging and calculating the effective register value as well as switch_db_regs on any potential change. This will change to focus of the set_guest_debug vendor op to update_dp_bp_intercept. Found while trying to stop on start_secondary. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/svm.c23
-rw-r--r--arch/x86/kvm/vmx.c14
-rw-r--r--arch/x86/kvm/x86.c26
3 files changed, 22 insertions, 41 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 818fceb3091e..d017df3899ef 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1146,7 +1146,6 @@ static void init_vmcb(struct vcpu_svm *svm)
1146 1146
1147 svm_set_efer(&svm->vcpu, 0); 1147 svm_set_efer(&svm->vcpu, 0);
1148 save->dr6 = 0xffff0ff0; 1148 save->dr6 = 0xffff0ff0;
1149 save->dr7 = 0x400;
1150 kvm_set_rflags(&svm->vcpu, 2); 1149 kvm_set_rflags(&svm->vcpu, 2);
1151 save->rip = 0x0000fff0; 1150 save->rip = 0x0000fff0;
1152 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; 1151 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
@@ -1643,7 +1642,7 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
1643 mark_dirty(svm->vmcb, VMCB_SEG); 1642 mark_dirty(svm->vmcb, VMCB_SEG);
1644} 1643}
1645 1644
1646static void update_db_intercept(struct kvm_vcpu *vcpu) 1645static void update_db_bp_intercept(struct kvm_vcpu *vcpu)
1647{ 1646{
1648 struct vcpu_svm *svm = to_svm(vcpu); 1647 struct vcpu_svm *svm = to_svm(vcpu);
1649 1648
@@ -1663,20 +1662,6 @@ static void update_db_intercept(struct kvm_vcpu *vcpu)
1663 vcpu->guest_debug = 0; 1662 vcpu->guest_debug = 0;
1664} 1663}
1665 1664
1666static void svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
1667{
1668 struct vcpu_svm *svm = to_svm(vcpu);
1669
1670 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1671 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
1672 else
1673 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1674
1675 mark_dirty(svm->vmcb, VMCB_DR);
1676
1677 update_db_intercept(vcpu);
1678}
1679
1680static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd) 1665static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1681{ 1666{
1682 if (sd->next_asid > sd->max_asid) { 1667 if (sd->next_asid > sd->max_asid) {
@@ -1748,7 +1733,7 @@ static int db_interception(struct vcpu_svm *svm)
1748 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) 1733 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP))
1749 svm->vmcb->save.rflags &= 1734 svm->vmcb->save.rflags &=
1750 ~(X86_EFLAGS_TF | X86_EFLAGS_RF); 1735 ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
1751 update_db_intercept(&svm->vcpu); 1736 update_db_bp_intercept(&svm->vcpu);
1752 } 1737 }
1753 1738
1754 if (svm->vcpu.guest_debug & 1739 if (svm->vcpu.guest_debug &
@@ -3659,7 +3644,7 @@ static void enable_nmi_window(struct kvm_vcpu *vcpu)
3659 */ 3644 */
3660 svm->nmi_singlestep = true; 3645 svm->nmi_singlestep = true;
3661 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF); 3646 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
3662 update_db_intercept(vcpu); 3647 update_db_bp_intercept(vcpu);
3663} 3648}
3664 3649
3665static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) 3650static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -4253,7 +4238,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4253 .vcpu_load = svm_vcpu_load, 4238 .vcpu_load = svm_vcpu_load,
4254 .vcpu_put = svm_vcpu_put, 4239 .vcpu_put = svm_vcpu_put,
4255 4240
4256 .set_guest_debug = svm_guest_debug, 4241 .update_db_bp_intercept = update_db_bp_intercept,
4257 .get_msr = svm_get_msr, 4242 .get_msr = svm_get_msr,
4258 .set_msr = svm_set_msr, 4243 .set_msr = svm_set_msr,
4259 .get_segment_base = svm_get_segment_base, 4244 .get_segment_base = svm_get_segment_base,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 30bcb953afee..5d46c905e06f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2288,16 +2288,6 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2288 } 2288 }
2289} 2289}
2290 2290
2291static void set_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
2292{
2293 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
2294 vmcs_writel(GUEST_DR7, dbg->arch.debugreg[7]);
2295 else
2296 vmcs_writel(GUEST_DR7, vcpu->arch.dr7);
2297
2298 update_exception_bitmap(vcpu);
2299}
2300
2301static __init int cpu_has_kvm_support(void) 2291static __init int cpu_has_kvm_support(void)
2302{ 2292{
2303 return cpu_has_vmx(); 2293 return cpu_has_vmx();
@@ -3960,8 +3950,6 @@ static int vmx_vcpu_reset(struct kvm_vcpu *vcpu)
3960 kvm_rip_write(vcpu, 0); 3950 kvm_rip_write(vcpu, 0);
3961 kvm_register_write(vcpu, VCPU_REGS_RSP, 0); 3951 kvm_register_write(vcpu, VCPU_REGS_RSP, 0);
3962 3952
3963 vmcs_writel(GUEST_DR7, 0x400);
3964
3965 vmcs_writel(GUEST_GDTR_BASE, 0); 3953 vmcs_writel(GUEST_GDTR_BASE, 0);
3966 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff); 3954 vmcs_write32(GUEST_GDTR_LIMIT, 0xffff);
3967 3955
@@ -7237,7 +7225,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
7237 .vcpu_load = vmx_vcpu_load, 7225 .vcpu_load = vmx_vcpu_load,
7238 .vcpu_put = vmx_vcpu_put, 7226 .vcpu_put = vmx_vcpu_put,
7239 7227
7240 .set_guest_debug = set_guest_debug, 7228 .update_db_bp_intercept = update_exception_bitmap,
7241 .get_msr = vmx_get_msr, 7229 .get_msr = vmx_get_msr,
7242 .set_msr = vmx_set_msr, 7230 .set_msr = vmx_set_msr,
7243 .get_segment_base = vmx_get_segment_base, 7231 .get_segment_base = vmx_get_segment_base,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 7d44204c6041..b16d4a5bfa41 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -692,6 +692,18 @@ unsigned long kvm_get_cr8(struct kvm_vcpu *vcpu)
692} 692}
693EXPORT_SYMBOL_GPL(kvm_get_cr8); 693EXPORT_SYMBOL_GPL(kvm_get_cr8);
694 694
695static void kvm_update_dr7(struct kvm_vcpu *vcpu)
696{
697 unsigned long dr7;
698
699 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
700 dr7 = vcpu->arch.guest_debug_dr7;
701 else
702 dr7 = vcpu->arch.dr7;
703 kvm_x86_ops->set_dr7(vcpu, dr7);
704 vcpu->arch.switch_db_regs = (dr7 & DR7_BP_EN_MASK);
705}
706
695static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val) 707static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
696{ 708{
697 switch (dr) { 709 switch (dr) {
@@ -717,10 +729,7 @@ static int __kvm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long val)
717 if (val & 0xffffffff00000000ULL) 729 if (val & 0xffffffff00000000ULL)
718 return -1; /* #GP */ 730 return -1; /* #GP */
719 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1; 731 vcpu->arch.dr7 = (val & DR7_VOLATILE) | DR7_FIXED_1;
720 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 732 kvm_update_dr7(vcpu);
721 kvm_x86_ops->set_dr7(vcpu, vcpu->arch.dr7);
722 vcpu->arch.switch_db_regs = (val & DR7_BP_EN_MASK);
723 }
724 break; 733 break;
725 } 734 }
726 735
@@ -5851,13 +5860,12 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5851 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) { 5860 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) {
5852 for (i = 0; i < KVM_NR_DB_REGS; ++i) 5861 for (i = 0; i < KVM_NR_DB_REGS; ++i)
5853 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i]; 5862 vcpu->arch.eff_db[i] = dbg->arch.debugreg[i];
5854 vcpu->arch.switch_db_regs = 5863 vcpu->arch.guest_debug_dr7 = dbg->arch.debugreg[7];
5855 (dbg->arch.debugreg[7] & DR7_BP_EN_MASK);
5856 } else { 5864 } else {
5857 for (i = 0; i < KVM_NR_DB_REGS; i++) 5865 for (i = 0; i < KVM_NR_DB_REGS; i++)
5858 vcpu->arch.eff_db[i] = vcpu->arch.db[i]; 5866 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
5859 vcpu->arch.switch_db_regs = (vcpu->arch.dr7 & DR7_BP_EN_MASK);
5860 } 5867 }
5868 kvm_update_dr7(vcpu);
5861 5869
5862 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) 5870 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
5863 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) + 5871 vcpu->arch.singlestep_rip = kvm_rip_read(vcpu) +
@@ -5869,7 +5877,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
5869 */ 5877 */
5870 kvm_set_rflags(vcpu, rflags); 5878 kvm_set_rflags(vcpu, rflags);
5871 5879
5872 kvm_x86_ops->set_guest_debug(vcpu, dbg); 5880 kvm_x86_ops->update_db_bp_intercept(vcpu);
5873 5881
5874 r = 0; 5882 r = 0;
5875 5883
@@ -6045,10 +6053,10 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu)
6045 vcpu->arch.nmi_pending = 0; 6053 vcpu->arch.nmi_pending = 0;
6046 vcpu->arch.nmi_injected = false; 6054 vcpu->arch.nmi_injected = false;
6047 6055
6048 vcpu->arch.switch_db_regs = 0;
6049 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db)); 6056 memset(vcpu->arch.db, 0, sizeof(vcpu->arch.db));
6050 vcpu->arch.dr6 = DR6_FIXED_1; 6057 vcpu->arch.dr6 = DR6_FIXED_1;
6051 vcpu->arch.dr7 = DR7_FIXED_1; 6058 vcpu->arch.dr7 = DR7_FIXED_1;
6059 kvm_update_dr7(vcpu);
6052 6060
6053 kvm_make_request(KVM_REQ_EVENT, vcpu); 6061 kvm_make_request(KVM_REQ_EVENT, vcpu);
6054 vcpu->arch.apf.msr_val = 0; 6062 vcpu->arch.apf.msr_val = 0;