diff options
author | Avi Kivity <avi@redhat.com> | 2010-07-27 05:30:24 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-10-24 04:52:50 -0400 |
commit | 3842d135ff246b6543f1df77f5600e12094a6845 (patch) | |
tree | 7b65456a0527fc3ea753a49c528643fd3b52a7d6 | |
parent | b0bc3ee2b54fcea0df42cc9aa05103b1ccd89db0 (diff) |
KVM: Check for pending events before attempting injection
Instead of blindly attempting to inject an event before each guest entry,
check for a possible event first in vcpu->requests. Sites that can trigger
event injection are modified to set KVM_REQ_EVENT:
- interrupt, nmi window opening
- ppr updates
- i8259 output changes
- local apic irr changes
- rflags updates
- gif flag set
- event set on exit
This improves non-injecting entry performance, and sets the stage for
non-atomic injection.
Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r-- | arch/x86/kvm/i8259.c | 1 | ||||
-rw-r--r-- | arch/x86/kvm/lapic.c | 13 | ||||
-rw-r--r-- | arch/x86/kvm/svm.c | 8 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 6 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 41 | ||||
-rw-r--r-- | include/linux/kvm_host.h | 1 |
6 files changed, 58 insertions, 12 deletions
diff --git a/arch/x86/kvm/i8259.c b/arch/x86/kvm/i8259.c index 6e77471951e8..ab1bb8ff9a8d 100644 --- a/arch/x86/kvm/i8259.c +++ b/arch/x86/kvm/i8259.c | |||
@@ -67,6 +67,7 @@ static void pic_unlock(struct kvm_pic *s) | |||
67 | if (!found) | 67 | if (!found) |
68 | return; | 68 | return; |
69 | 69 | ||
70 | kvm_make_request(KVM_REQ_EVENT, found); | ||
70 | kvm_vcpu_kick(found); | 71 | kvm_vcpu_kick(found); |
71 | } | 72 | } |
72 | } | 73 | } |
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c index 77d8c0f4817d..c6f2f159384a 100644 --- a/arch/x86/kvm/lapic.c +++ b/arch/x86/kvm/lapic.c | |||
@@ -259,9 +259,10 @@ static inline int apic_find_highest_isr(struct kvm_lapic *apic) | |||
259 | 259 | ||
260 | static void apic_update_ppr(struct kvm_lapic *apic) | 260 | static void apic_update_ppr(struct kvm_lapic *apic) |
261 | { | 261 | { |
262 | u32 tpr, isrv, ppr; | 262 | u32 tpr, isrv, ppr, old_ppr; |
263 | int isr; | 263 | int isr; |
264 | 264 | ||
265 | old_ppr = apic_get_reg(apic, APIC_PROCPRI); | ||
265 | tpr = apic_get_reg(apic, APIC_TASKPRI); | 266 | tpr = apic_get_reg(apic, APIC_TASKPRI); |
266 | isr = apic_find_highest_isr(apic); | 267 | isr = apic_find_highest_isr(apic); |
267 | isrv = (isr != -1) ? isr : 0; | 268 | isrv = (isr != -1) ? isr : 0; |
@@ -274,7 +275,10 @@ static void apic_update_ppr(struct kvm_lapic *apic) | |||
274 | apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", | 275 | apic_debug("vlapic %p, ppr 0x%x, isr 0x%x, isrv 0x%x", |
275 | apic, ppr, isr, isrv); | 276 | apic, ppr, isr, isrv); |
276 | 277 | ||
277 | apic_set_reg(apic, APIC_PROCPRI, ppr); | 278 | if (old_ppr != ppr) { |
279 | apic_set_reg(apic, APIC_PROCPRI, ppr); | ||
280 | kvm_make_request(KVM_REQ_EVENT, apic->vcpu); | ||
281 | } | ||
278 | } | 282 | } |
279 | 283 | ||
280 | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) | 284 | static void apic_set_tpr(struct kvm_lapic *apic, u32 tpr) |
@@ -391,6 +395,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
391 | break; | 395 | break; |
392 | } | 396 | } |
393 | 397 | ||
398 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
394 | kvm_vcpu_kick(vcpu); | 399 | kvm_vcpu_kick(vcpu); |
395 | break; | 400 | break; |
396 | 401 | ||
@@ -416,6 +421,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
416 | "INIT on a runnable vcpu %d\n", | 421 | "INIT on a runnable vcpu %d\n", |
417 | vcpu->vcpu_id); | 422 | vcpu->vcpu_id); |
418 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; | 423 | vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED; |
424 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
419 | kvm_vcpu_kick(vcpu); | 425 | kvm_vcpu_kick(vcpu); |
420 | } else { | 426 | } else { |
421 | apic_debug("Ignoring de-assert INIT to vcpu %d\n", | 427 | apic_debug("Ignoring de-assert INIT to vcpu %d\n", |
@@ -430,6 +436,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode, | |||
430 | result = 1; | 436 | result = 1; |
431 | vcpu->arch.sipi_vector = vector; | 437 | vcpu->arch.sipi_vector = vector; |
432 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; | 438 | vcpu->arch.mp_state = KVM_MP_STATE_SIPI_RECEIVED; |
439 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
433 | kvm_vcpu_kick(vcpu); | 440 | kvm_vcpu_kick(vcpu); |
434 | } | 441 | } |
435 | break; | 442 | break; |
@@ -475,6 +482,7 @@ static void apic_set_eoi(struct kvm_lapic *apic) | |||
475 | trigger_mode = IOAPIC_EDGE_TRIG; | 482 | trigger_mode = IOAPIC_EDGE_TRIG; |
476 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) | 483 | if (!(apic_get_reg(apic, APIC_SPIV) & APIC_SPIV_DIRECTED_EOI)) |
477 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); | 484 | kvm_ioapic_update_eoi(apic->vcpu->kvm, vector, trigger_mode); |
485 | kvm_make_request(KVM_REQ_EVENT, apic->vcpu); | ||
478 | } | 486 | } |
479 | 487 | ||
480 | static void apic_send_ipi(struct kvm_lapic *apic) | 488 | static void apic_send_ipi(struct kvm_lapic *apic) |
@@ -1152,6 +1160,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu) | |||
1152 | update_divide_count(apic); | 1160 | update_divide_count(apic); |
1153 | start_apic_timer(apic); | 1161 | start_apic_timer(apic); |
1154 | apic->irr_pending = true; | 1162 | apic->irr_pending = true; |
1163 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
1155 | } | 1164 | } |
1156 | 1165 | ||
1157 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) | 1166 | void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) |
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index e0f4da07f987..1d2ea65d3537 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -2371,6 +2371,7 @@ static int stgi_interception(struct vcpu_svm *svm) | |||
2371 | 2371 | ||
2372 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; | 2372 | svm->next_rip = kvm_rip_read(&svm->vcpu) + 3; |
2373 | skip_emulated_instruction(&svm->vcpu); | 2373 | skip_emulated_instruction(&svm->vcpu); |
2374 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | ||
2374 | 2375 | ||
2375 | enable_gif(svm); | 2376 | enable_gif(svm); |
2376 | 2377 | ||
@@ -2763,6 +2764,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm) | |||
2763 | { | 2764 | { |
2764 | struct kvm_run *kvm_run = svm->vcpu.run; | 2765 | struct kvm_run *kvm_run = svm->vcpu.run; |
2765 | 2766 | ||
2767 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | ||
2766 | svm_clear_vintr(svm); | 2768 | svm_clear_vintr(svm); |
2767 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; | 2769 | svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; |
2768 | /* | 2770 | /* |
@@ -3209,8 +3211,10 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
3209 | 3211 | ||
3210 | svm->int3_injected = 0; | 3212 | svm->int3_injected = 0; |
3211 | 3213 | ||
3212 | if (svm->vcpu.arch.hflags & HF_IRET_MASK) | 3214 | if (svm->vcpu.arch.hflags & HF_IRET_MASK) { |
3213 | svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); | 3215 | svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK); |
3216 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | ||
3217 | } | ||
3214 | 3218 | ||
3215 | svm->vcpu.arch.nmi_injected = false; | 3219 | svm->vcpu.arch.nmi_injected = false; |
3216 | kvm_clear_exception_queue(&svm->vcpu); | 3220 | kvm_clear_exception_queue(&svm->vcpu); |
@@ -3219,6 +3223,8 @@ static void svm_complete_interrupts(struct vcpu_svm *svm) | |||
3219 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) | 3223 | if (!(exitintinfo & SVM_EXITINTINFO_VALID)) |
3220 | return; | 3224 | return; |
3221 | 3225 | ||
3226 | kvm_make_request(KVM_REQ_EVENT, &svm->vcpu); | ||
3227 | |||
3222 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; | 3228 | vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK; |
3223 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; | 3229 | type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK; |
3224 | 3230 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 1a7691a87178..2ce2e0b13edb 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -3327,6 +3327,7 @@ static int handle_wrmsr(struct kvm_vcpu *vcpu) | |||
3327 | 3327 | ||
3328 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) | 3328 | static int handle_tpr_below_threshold(struct kvm_vcpu *vcpu) |
3329 | { | 3329 | { |
3330 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
3330 | return 1; | 3331 | return 1; |
3331 | } | 3332 | } |
3332 | 3333 | ||
@@ -3339,6 +3340,8 @@ static int handle_interrupt_window(struct kvm_vcpu *vcpu) | |||
3339 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; | 3340 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_INTR_PENDING; |
3340 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 3341 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
3341 | 3342 | ||
3343 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
3344 | |||
3342 | ++vcpu->stat.irq_window_exits; | 3345 | ++vcpu->stat.irq_window_exits; |
3343 | 3346 | ||
3344 | /* | 3347 | /* |
@@ -3595,6 +3598,7 @@ static int handle_nmi_window(struct kvm_vcpu *vcpu) | |||
3595 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; | 3598 | cpu_based_vm_exec_control &= ~CPU_BASED_VIRTUAL_NMI_PENDING; |
3596 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); | 3599 | vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, cpu_based_vm_exec_control); |
3597 | ++vcpu->stat.nmi_window_exits; | 3600 | ++vcpu->stat.nmi_window_exits; |
3601 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
3598 | 3602 | ||
3599 | return 1; | 3603 | return 1; |
3600 | } | 3604 | } |
@@ -3828,6 +3832,8 @@ static void vmx_complete_interrupts(struct vcpu_vmx *vmx) | |||
3828 | if (!idtv_info_valid) | 3832 | if (!idtv_info_valid) |
3829 | return; | 3833 | return; |
3830 | 3834 | ||
3835 | kvm_make_request(KVM_REQ_EVENT, &vmx->vcpu); | ||
3836 | |||
3831 | vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; | 3837 | vector = idt_vectoring_info & VECTORING_INFO_VECTOR_MASK; |
3832 | type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; | 3838 | type = idt_vectoring_info & VECTORING_INFO_TYPE_MASK; |
3833 | 3839 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 3ff0a8ff275c..e7198036db61 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -284,6 +284,8 @@ static void kvm_multiple_exception(struct kvm_vcpu *vcpu, | |||
284 | u32 prev_nr; | 284 | u32 prev_nr; |
285 | int class1, class2; | 285 | int class1, class2; |
286 | 286 | ||
287 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
288 | |||
287 | if (!vcpu->arch.exception.pending) { | 289 | if (!vcpu->arch.exception.pending) { |
288 | queue: | 290 | queue: |
289 | vcpu->arch.exception.pending = true; | 291 | vcpu->arch.exception.pending = true; |
@@ -356,6 +358,7 @@ void kvm_propagate_fault(struct kvm_vcpu *vcpu) | |||
356 | 358 | ||
357 | void kvm_inject_nmi(struct kvm_vcpu *vcpu) | 359 | void kvm_inject_nmi(struct kvm_vcpu *vcpu) |
358 | { | 360 | { |
361 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
359 | vcpu->arch.nmi_pending = 1; | 362 | vcpu->arch.nmi_pending = 1; |
360 | } | 363 | } |
361 | EXPORT_SYMBOL_GPL(kvm_inject_nmi); | 364 | EXPORT_SYMBOL_GPL(kvm_inject_nmi); |
@@ -2418,6 +2421,7 @@ static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, | |||
2418 | return -ENXIO; | 2421 | return -ENXIO; |
2419 | 2422 | ||
2420 | kvm_queue_interrupt(vcpu, irq->irq, false); | 2423 | kvm_queue_interrupt(vcpu, irq->irq, false); |
2424 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
2421 | 2425 | ||
2422 | return 0; | 2426 | return 0; |
2423 | } | 2427 | } |
@@ -2571,6 +2575,8 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu, | |||
2571 | if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) | 2575 | if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR) |
2572 | vcpu->arch.sipi_vector = events->sipi_vector; | 2576 | vcpu->arch.sipi_vector = events->sipi_vector; |
2573 | 2577 | ||
2578 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
2579 | |||
2574 | return 0; | 2580 | return 0; |
2575 | } | 2581 | } |
2576 | 2582 | ||
@@ -4329,6 +4335,7 @@ done: | |||
4329 | 4335 | ||
4330 | toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); | 4336 | toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); |
4331 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); | 4337 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); |
4338 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
4332 | memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); | 4339 | memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); |
4333 | kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); | 4340 | kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); |
4334 | 4341 | ||
@@ -4998,6 +5005,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
4998 | int r; | 5005 | int r; |
4999 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && | 5006 | bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && |
5000 | vcpu->run->request_interrupt_window; | 5007 | vcpu->run->request_interrupt_window; |
5008 | bool req_event; | ||
5001 | 5009 | ||
5002 | if (vcpu->requests) { | 5010 | if (vcpu->requests) { |
5003 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) | 5011 | if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) |
@@ -5045,8 +5053,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5045 | 5053 | ||
5046 | local_irq_disable(); | 5054 | local_irq_disable(); |
5047 | 5055 | ||
5056 | req_event = kvm_check_request(KVM_REQ_EVENT, vcpu); | ||
5057 | |||
5048 | if (!atomic_read(&vcpu->guest_mode) || vcpu->requests | 5058 | if (!atomic_read(&vcpu->guest_mode) || vcpu->requests |
5049 | || need_resched() || signal_pending(current)) { | 5059 | || need_resched() || signal_pending(current)) { |
5060 | if (req_event) | ||
5061 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5050 | atomic_set(&vcpu->guest_mode, 0); | 5062 | atomic_set(&vcpu->guest_mode, 0); |
5051 | smp_wmb(); | 5063 | smp_wmb(); |
5052 | local_irq_enable(); | 5064 | local_irq_enable(); |
@@ -5055,17 +5067,19 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
5055 | goto out; | 5067 | goto out; |
5056 | } | 5068 | } |
5057 | 5069 | ||
5058 | inject_pending_event(vcpu); | 5070 | if (req_event || req_int_win) { |
5071 | inject_pending_event(vcpu); | ||
5059 | 5072 | ||
5060 | /* enable NMI/IRQ window open exits if needed */ | 5073 | /* enable NMI/IRQ window open exits if needed */ |
5061 | if (vcpu->arch.nmi_pending) | 5074 | if (vcpu->arch.nmi_pending) |
5062 | kvm_x86_ops->enable_nmi_window(vcpu); | 5075 | kvm_x86_ops->enable_nmi_window(vcpu); |
5063 | else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) | 5076 | else if (kvm_cpu_has_interrupt(vcpu) || req_int_win) |
5064 | kvm_x86_ops->enable_irq_window(vcpu); | 5077 | kvm_x86_ops->enable_irq_window(vcpu); |
5065 | 5078 | ||
5066 | if (kvm_lapic_enabled(vcpu)) { | 5079 | if (kvm_lapic_enabled(vcpu)) { |
5067 | update_cr8_intercept(vcpu); | 5080 | update_cr8_intercept(vcpu); |
5068 | kvm_lapic_sync_to_vapic(vcpu); | 5081 | kvm_lapic_sync_to_vapic(vcpu); |
5082 | } | ||
5069 | } | 5083 | } |
5070 | 5084 | ||
5071 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); | 5085 | srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); |
@@ -5305,6 +5319,8 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |||
5305 | 5319 | ||
5306 | vcpu->arch.exception.pending = false; | 5320 | vcpu->arch.exception.pending = false; |
5307 | 5321 | ||
5322 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5323 | |||
5308 | return 0; | 5324 | return 0; |
5309 | } | 5325 | } |
5310 | 5326 | ||
@@ -5368,6 +5384,7 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |||
5368 | struct kvm_mp_state *mp_state) | 5384 | struct kvm_mp_state *mp_state) |
5369 | { | 5385 | { |
5370 | vcpu->arch.mp_state = mp_state->mp_state; | 5386 | vcpu->arch.mp_state = mp_state->mp_state; |
5387 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5371 | return 0; | 5388 | return 0; |
5372 | } | 5389 | } |
5373 | 5390 | ||
@@ -5389,6 +5406,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason, | |||
5389 | memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); | 5406 | memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); |
5390 | kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); | 5407 | kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); |
5391 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); | 5408 | kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); |
5409 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5392 | return EMULATE_DONE; | 5410 | return EMULATE_DONE; |
5393 | } | 5411 | } |
5394 | EXPORT_SYMBOL_GPL(kvm_task_switch); | 5412 | EXPORT_SYMBOL_GPL(kvm_task_switch); |
@@ -5459,6 +5477,8 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |||
5459 | !is_protmode(vcpu)) | 5477 | !is_protmode(vcpu)) |
5460 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; | 5478 | vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE; |
5461 | 5479 | ||
5480 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5481 | |||
5462 | return 0; | 5482 | return 0; |
5463 | } | 5483 | } |
5464 | 5484 | ||
@@ -5691,6 +5711,8 @@ int kvm_arch_vcpu_reset(struct kvm_vcpu *vcpu) | |||
5691 | vcpu->arch.dr6 = DR6_FIXED_1; | 5711 | vcpu->arch.dr6 = DR6_FIXED_1; |
5692 | vcpu->arch.dr7 = DR7_FIXED_1; | 5712 | vcpu->arch.dr7 = DR7_FIXED_1; |
5693 | 5713 | ||
5714 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
5715 | |||
5694 | return kvm_x86_ops->vcpu_reset(vcpu); | 5716 | return kvm_x86_ops->vcpu_reset(vcpu); |
5695 | } | 5717 | } |
5696 | 5718 | ||
@@ -6001,6 +6023,7 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) | |||
6001 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) | 6023 | kvm_is_linear_rip(vcpu, vcpu->arch.singlestep_rip)) |
6002 | rflags |= X86_EFLAGS_TF; | 6024 | rflags |= X86_EFLAGS_TF; |
6003 | kvm_x86_ops->set_rflags(vcpu, rflags); | 6025 | kvm_x86_ops->set_rflags(vcpu, rflags); |
6026 | kvm_make_request(KVM_REQ_EVENT, vcpu); | ||
6004 | } | 6027 | } |
6005 | EXPORT_SYMBOL_GPL(kvm_set_rflags); | 6028 | EXPORT_SYMBOL_GPL(kvm_set_rflags); |
6006 | 6029 | ||
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 917e68ff5ed2..6022da1490e4 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h | |||
@@ -39,6 +39,7 @@ | |||
39 | #define KVM_REQ_KVMCLOCK_UPDATE 8 | 39 | #define KVM_REQ_KVMCLOCK_UPDATE 8 |
40 | #define KVM_REQ_KICK 9 | 40 | #define KVM_REQ_KICK 9 |
41 | #define KVM_REQ_DEACTIVATE_FPU 10 | 41 | #define KVM_REQ_DEACTIVATE_FPU 10 |
42 | #define KVM_REQ_EVENT 11 | ||
42 | 43 | ||
43 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 | 44 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
44 | 45 | ||