aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
authorNadav Har'El <nyh@il.ibm.com>2011-09-22 06:52:56 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:43 -0500
commitd6185f20a0efbf175e12831d0de330e4f21725aa (patch)
tree5105132710e9ad6a1d73e2cf6895765e67dfb5ee /arch/x86/kvm/x86.c
parent371de6e4e0042adf4f9b54c414154f57414ddd37 (diff)
KVM: nVMX: Add KVM_REQ_IMMEDIATE_EXIT
This patch adds a new vcpu->requests bit, KVM_REQ_IMMEDIATE_EXIT. This bit requests that when next entering the guest, we should run it only for as little as possible, and exit again. We use this new option in nested VMX: When L1 launches L2, but L0 wishes L1 to continue running so it can inject an event to it, we unfortunately cannot just pretend to have run L2 for a little while - We must really launch L2, otherwise certain one-off vmcs12 parameters (namely, L1 injection into L2) will be lost. So the existing code runs L2 in this case. But L2 could potentially run for a long time until it exits, and the injection into L1 will be delayed. The new KVM_REQ_IMMEDIATE_EXIT allows us to request that L2 will be entered, as necessary, but will exit as soon as possible after entry. Our implementation of this request uses smp_send_reschedule() to send a self-IPI, with interrupts disabled. The interrupts remain disabled until the guest is entered, and then, after the entry is complete (often including processing an injection and jumping to the relevant handler), the physical interrupt is noticed and causes an exit. On recent Intel processors, we could have achieved the same goal by using MTF instead of a self-IPI. Another technique worth considering in the future is to use VM_EXIT_ACK_INTR_ON_EXIT and a highest-priority vector IPI - to slightly improve performance by avoiding the useless interrupt handler which ends up being called when smp_send_reschedule() is used. Signed-off-by: Nadav Har'El <nyh@il.ibm.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c7
1 files changed, 6 insertions, 1 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 4c938da2ba0..e24edbc7f2e 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -5648,6 +5648,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5648 int r; 5648 int r;
5649 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) && 5649 bool req_int_win = !irqchip_in_kernel(vcpu->kvm) &&
5650 vcpu->run->request_interrupt_window; 5650 vcpu->run->request_interrupt_window;
5651 bool req_immediate_exit = 0;
5651 5652
5652 if (vcpu->requests) { 5653 if (vcpu->requests) {
5653 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) 5654 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu))
@@ -5687,7 +5688,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5687 record_steal_time(vcpu); 5688 record_steal_time(vcpu);
5688 if (kvm_check_request(KVM_REQ_NMI, vcpu)) 5689 if (kvm_check_request(KVM_REQ_NMI, vcpu))
5689 process_nmi(vcpu); 5690 process_nmi(vcpu);
5690 5691 req_immediate_exit =
5692 kvm_check_request(KVM_REQ_IMMEDIATE_EXIT, vcpu);
5691 } 5693 }
5692 5694
5693 r = kvm_mmu_reload(vcpu); 5695 r = kvm_mmu_reload(vcpu);
@@ -5738,6 +5740,9 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
5738 5740
5739 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx); 5741 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
5740 5742
5743 if (req_immediate_exit)
5744 smp_send_reschedule(vcpu->cpu);
5745
5741 kvm_guest_enter(); 5746 kvm_guest_enter();
5742 5747
5743 if (unlikely(vcpu->arch.switch_db_regs)) { 5748 if (unlikely(vcpu->arch.switch_db_regs)) {