aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorPaolo Bonzini <pbonzini@redhat.com>2014-03-27 06:29:28 -0400
committerPaolo Bonzini <pbonzini@redhat.com>2014-07-11 03:13:57 -0400
commit6addfc42992be4b073c39137ecfdf4b2aa2d487f (patch)
tree2251d34fd0234b6195f8e164983a95a30ae031b2 /arch/x86/kvm
parent37ccdcbe0757196ec98c0dcf9754bec8423807a5 (diff)
KVM: x86: avoid useless set of KVM_REQ_EVENT after emulation
Despite the provisions to emulate up to 130 consecutive instructions, in practice KVM will emulate just one before exiting handle_invalid_guest_state, because x86_emulate_instruction always sets KVM_REQ_EVENT. However, we only need to do this if an interrupt could be injected, which happens a) if an interrupt shadow bit (STI or MOV SS) has gone away; b) if the interrupt flag has just been set (other instructions than STI can set it without enabling an interrupt shadow). This cuts another 700-900 cycles from the cost of emulating an instruction (measured on a Sandy Bridge Xeon: 1650-2600 cycles before the patch on kvm-unit-tests, 925-1700 afterwards). Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/x86.c36
1 files changed, 26 insertions, 10 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a56126e6bd75..cd9316786dca 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -87,6 +87,7 @@ static u64 __read_mostly efer_reserved_bits = ~((u64)EFER_SCE);
87 87
88static void update_cr8_intercept(struct kvm_vcpu *vcpu); 88static void update_cr8_intercept(struct kvm_vcpu *vcpu);
89static void process_nmi(struct kvm_vcpu *vcpu); 89static void process_nmi(struct kvm_vcpu *vcpu);
90static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags);
90 91
91struct kvm_x86_ops *kvm_x86_ops; 92struct kvm_x86_ops *kvm_x86_ops;
92EXPORT_SYMBOL_GPL(kvm_x86_ops); 93EXPORT_SYMBOL_GPL(kvm_x86_ops);
@@ -4868,8 +4869,11 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
4868 */ 4869 */
4869 if (int_shadow & mask) 4870 if (int_shadow & mask)
4870 mask = 0; 4871 mask = 0;
4871 if (unlikely(int_shadow || mask)) 4872 if (unlikely(int_shadow || mask)) {
4872 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); 4873 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
4874 if (!mask)
4875 kvm_make_request(KVM_REQ_EVENT, vcpu);
4876 }
4873} 4877}
4874 4878
4875static void inject_emulated_exception(struct kvm_vcpu *vcpu) 4879static void inject_emulated_exception(struct kvm_vcpu *vcpu)
@@ -5095,20 +5099,18 @@ static int kvm_vcpu_check_hw_bp(unsigned long addr, u32 type, u32 dr7,
5095 return dr6; 5099 return dr6;
5096} 5100}
5097 5101
5098static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, int *r) 5102static void kvm_vcpu_check_singlestep(struct kvm_vcpu *vcpu, unsigned long rflags, int *r)
5099{ 5103{
5100 struct kvm_run *kvm_run = vcpu->run; 5104 struct kvm_run *kvm_run = vcpu->run;
5101 5105
5102 /* 5106 /*
5103 * Use the "raw" value to see if TF was passed to the processor. 5107 * rflags is the old, "raw" value of the flags. The new value has
5104 * Note that the new value of the flags has not been saved yet. 5108 * not been saved yet.
5105 * 5109 *
5106 * This is correct even for TF set by the guest, because "the 5110 * This is correct even for TF set by the guest, because "the
5107 * processor will not generate this exception after the instruction 5111 * processor will not generate this exception after the instruction
5108 * that sets the TF flag". 5112 * that sets the TF flag".
5109 */ 5113 */
5110 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5111
5112 if (unlikely(rflags & X86_EFLAGS_TF)) { 5114 if (unlikely(rflags & X86_EFLAGS_TF)) {
5113 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { 5115 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
5114 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1; 5116 kvm_run->debug.arch.dr6 = DR6_BS | DR6_FIXED_1;
@@ -5275,13 +5277,22 @@ restart:
5275 r = EMULATE_DONE; 5277 r = EMULATE_DONE;
5276 5278
5277 if (writeback) { 5279 if (writeback) {
5280 unsigned long rflags = kvm_x86_ops->get_rflags(vcpu);
5278 toggle_interruptibility(vcpu, ctxt->interruptibility); 5281 toggle_interruptibility(vcpu, ctxt->interruptibility);
5279 kvm_make_request(KVM_REQ_EVENT, vcpu);
5280 vcpu->arch.emulate_regs_need_sync_to_vcpu = false; 5282 vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
5281 kvm_rip_write(vcpu, ctxt->eip); 5283 kvm_rip_write(vcpu, ctxt->eip);
5282 if (r == EMULATE_DONE) 5284 if (r == EMULATE_DONE)
5283 kvm_vcpu_check_singlestep(vcpu, &r); 5285 kvm_vcpu_check_singlestep(vcpu, rflags, &r);
5284 kvm_set_rflags(vcpu, ctxt->eflags); 5286 __kvm_set_rflags(vcpu, ctxt->eflags);
5287
5288 /*
5289 * For STI, interrupts are shadowed; so KVM_REQ_EVENT will
5290 * do nothing, and it will be requested again as soon as
5291 * the shadow expires. But we still need to check here,
5292 * because POPF has no interrupt shadow.
5293 */
5294 if (unlikely((ctxt->eflags & ~rflags) & X86_EFLAGS_IF))
5295 kvm_make_request(KVM_REQ_EVENT, vcpu);
5285 } else 5296 } else
5286 vcpu->arch.emulate_regs_need_sync_to_vcpu = true; 5297 vcpu->arch.emulate_regs_need_sync_to_vcpu = true;
5287 5298
@@ -7406,12 +7417,17 @@ unsigned long kvm_get_rflags(struct kvm_vcpu *vcpu)
7406} 7417}
7407EXPORT_SYMBOL_GPL(kvm_get_rflags); 7418EXPORT_SYMBOL_GPL(kvm_get_rflags);
7408 7419
7409void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 7420static void __kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7410{ 7421{
7411 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && 7422 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP &&
7412 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) 7423 kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip))
7413 rflags |= X86_EFLAGS_TF; 7424 rflags |= X86_EFLAGS_TF;
7414 kvm_x86_ops->set_rflags(vcpu, rflags); 7425 kvm_x86_ops->set_rflags(vcpu, rflags);
7426}
7427
7428void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
7429{
7430 __kvm_set_rflags(vcpu, rflags);
7415 kvm_make_request(KVM_REQ_EVENT, vcpu); 7431 kvm_make_request(KVM_REQ_EVENT, vcpu);
7416} 7432}
7417EXPORT_SYMBOL_GPL(kvm_set_rflags); 7433EXPORT_SYMBOL_GPL(kvm_set_rflags);