aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-02-21 04:17:24 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-03-11 05:46:02 -0400
commitc77fb5fe6f031bee9403397ae7b94ea22ea19aa7 (patch)
treed730c16f4b6de15f2e80a0b1c77605379bb14d27
parent360b948d88bf30ef4b10b693adf497f51fb46a08 (diff)
KVM: x86: Allow the guest to run with dirty debug registers
When not running in guest-debug mode, the guest controls the debug registers and having to take an exit for each DR access is a waste of time. If the guest gets into a state where each context switch causes DR to be saved and restored, this can take away as much as 40% of the execution time from the guest. After this patch, VMX- and SVM-specific code can set a flag in switch_db_regs, telling vcpu_enter_guest that on the next exit the debug registers might be dirty and need to be reloaded (syncing will be taken care of by a new callback in kvm_x86_ops). This flag can be set on the first access to a debug registers, so that multiple accesses to the debug registers only cause one vmexit. Note that since the guest will be able to read debug registers and enable breakpoints in DR7, we need to ensure that they are synchronized on entry to the guest---including DR6 that was not synced before. Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h2
-rw-r--r--arch/x86/kvm/x86.c16
2 files changed, 18 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 35f538bda3a9..fcaf9c961265 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -339,6 +339,7 @@ struct kvm_pmu {
339 339
340enum { 340enum {
341 KVM_DEBUGREG_BP_ENABLED = 1, 341 KVM_DEBUGREG_BP_ENABLED = 1,
342 KVM_DEBUGREG_WONT_EXIT = 2,
342}; 343};
343 344
344struct kvm_vcpu_arch { 345struct kvm_vcpu_arch {
@@ -707,6 +708,7 @@ struct kvm_x86_ops {
707 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt); 708 void (*set_gdt)(struct kvm_vcpu *vcpu, struct desc_ptr *dt);
708 u64 (*get_dr6)(struct kvm_vcpu *vcpu); 709 u64 (*get_dr6)(struct kvm_vcpu *vcpu);
709 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value); 710 void (*set_dr6)(struct kvm_vcpu *vcpu, unsigned long value);
711 void (*sync_dirty_debug_regs)(struct kvm_vcpu *vcpu);
710 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value); 712 void (*set_dr7)(struct kvm_vcpu *vcpu, unsigned long value);
711 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg); 713 void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
712 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu); 714 unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 85c74e7df2df..d906391a0a3f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6040,12 +6040,28 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6040 set_debugreg(vcpu->arch.eff_db[1], 1); 6040 set_debugreg(vcpu->arch.eff_db[1], 1);
6041 set_debugreg(vcpu->arch.eff_db[2], 2); 6041 set_debugreg(vcpu->arch.eff_db[2], 2);
6042 set_debugreg(vcpu->arch.eff_db[3], 3); 6042 set_debugreg(vcpu->arch.eff_db[3], 3);
6043 set_debugreg(vcpu->arch.dr6, 6);
6043 } 6044 }
6044 6045
6045 trace_kvm_entry(vcpu->vcpu_id); 6046 trace_kvm_entry(vcpu->vcpu_id);
6046 kvm_x86_ops->run(vcpu); 6047 kvm_x86_ops->run(vcpu);
6047 6048
6048 /* 6049 /*
6050 * Do this here before restoring debug registers on the host. And
6051 * since we do this before handling the vmexit, a DR access vmexit
6052 * can (a) read the correct value of the debug registers, (b) set
6053 * KVM_DEBUGREG_WONT_EXIT again.
6054 */
6055 if (unlikely(vcpu->arch.switch_db_regs & KVM_DEBUGREG_WONT_EXIT)) {
6056 int i;
6057
6058 WARN_ON(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP);
6059 kvm_x86_ops->sync_dirty_debug_regs(vcpu);
6060 for (i = 0; i < KVM_NR_DB_REGS; i++)
6061 vcpu->arch.eff_db[i] = vcpu->arch.db[i];
6062 }
6063
6064 /*
6049 * If the guest has used debug registers, at least dr7 6065 * If the guest has used debug registers, at least dr7
6050 * will be disabled while returning to the host. 6066 * will be disabled while returning to the host.
6051 * If we don't have active breakpoints in the host, we don't 6067 * If we don't have active breakpoints in the host, we don't