diff options
author | Liran Alon <liran.alon@oracle.com> | 2018-09-04 03:56:52 -0400 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2018-09-19 18:51:44 -0400 |
commit | e6c67d8cf1173b229f0c4343d1cc7925eca11c11 (patch) | |
tree | ac7d0b8e88cf0c9820b78440803bfab121bde538 | |
parent | 5bea5123cbf08f990a1aee8f08c643a272e06a0f (diff) |
KVM: nVMX: Wake blocked vCPU in guest-mode if pending interrupt in virtual APICv
In case L1 do not intercept L2 HLT or enter L2 in HLT activity-state,
it is possible for a vCPU to be blocked while it is in guest-mode.
According to Intel SDM 26.6.5 Interrupt-Window Exiting and
Virtual-Interrupt Delivery: "These events wake the logical processor
if it just entered the HLT state because of a VM entry".
Therefore, if L1 enters L2 in HLT activity-state and L2 has a pending
deliverable interrupt in vmcs12->guest_intr_status.RVI, then the vCPU
should be waken from the HLT state and injected with the interrupt.
In addition, if while the vCPU is blocked (while it is in guest-mode),
it receives a nested posted-interrupt, then the vCPU should also be
waken and injected with the posted interrupt.
To handle these cases, this patch enhances kvm_vcpu_has_events() to also
check if there is a pending interrupt in L2 virtual APICv provided by
L1. That is, it evaluates if there is a pending virtual interrupt for L2
by checking RVI[7:4] > VPPR[7:4] as specified in Intel SDM 29.2.1
Evaluation of Pending Interrupts.
Note that this also handles the case of nested posted-interrupt by the
fact RVI is updated in vmx_complete_nested_posted_interrupt() which is
called from kvm_vcpu_check_block() -> kvm_arch_vcpu_runnable() ->
kvm_vcpu_running() -> vmx_check_nested_events() ->
vmx_complete_nested_posted_interrupt().
Reviewed-by: Nikita Leshenko <nikita.leshchenko@oracle.com>
Reviewed-by: Darren Kenny <darren.kenny@oracle.com>
Signed-off-by: Liran Alon <liran.alon@oracle.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 22 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 10 |
3 files changed, 32 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index bffb25b50425..af63c2ca1616 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
@@ -1022,6 +1022,7 @@ struct kvm_x86_ops { | |||
1022 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); | 1022 | void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu); |
1023 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); | 1023 | void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); |
1024 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); | 1024 | void (*hwapic_isr_update)(struct kvm_vcpu *vcpu, int isr); |
1025 | bool (*guest_apic_has_interrupt)(struct kvm_vcpu *vcpu); | ||
1025 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); | 1026 | void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); |
1026 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); | 1027 | void (*set_virtual_apic_mode)(struct kvm_vcpu *vcpu); |
1027 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); | 1028 | void (*set_apic_access_page_addr)(struct kvm_vcpu *vcpu, hpa_t hpa); |
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 16e63a92992f..98b1203e8823 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -6189,6 +6189,27 @@ static void vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) | |||
6189 | nested_mark_vmcs12_pages_dirty(vcpu); | 6189 | nested_mark_vmcs12_pages_dirty(vcpu); |
6190 | } | 6190 | } |
6191 | 6191 | ||
6192 | static bool vmx_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
6193 | { | ||
6194 | struct vcpu_vmx *vmx = to_vmx(vcpu); | ||
6195 | void *vapic_page; | ||
6196 | u32 vppr; | ||
6197 | int rvi; | ||
6198 | |||
6199 | if (WARN_ON_ONCE(!is_guest_mode(vcpu)) || | ||
6200 | !nested_cpu_has_vid(get_vmcs12(vcpu)) || | ||
6201 | WARN_ON_ONCE(!vmx->nested.virtual_apic_page)) | ||
6202 | return false; | ||
6203 | |||
6204 | rvi = vmcs_read16(GUEST_INTR_STATUS) & 0xff; | ||
6205 | |||
6206 | vapic_page = kmap(vmx->nested.virtual_apic_page); | ||
6207 | vppr = *((u32 *)(vapic_page + APIC_PROCPRI)); | ||
6208 | kunmap(vmx->nested.virtual_apic_page); | ||
6209 | |||
6210 | return ((rvi & 0xf0) > (vppr & 0xf0)); | ||
6211 | } | ||
6212 | |||
6192 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, | 6213 | static inline bool kvm_vcpu_trigger_posted_interrupt(struct kvm_vcpu *vcpu, |
6193 | bool nested) | 6214 | bool nested) |
6194 | { | 6215 | { |
@@ -14129,6 +14150,7 @@ static struct kvm_x86_ops vmx_x86_ops __ro_after_init = { | |||
14129 | .apicv_post_state_restore = vmx_apicv_post_state_restore, | 14150 | .apicv_post_state_restore = vmx_apicv_post_state_restore, |
14130 | .hwapic_irr_update = vmx_hwapic_irr_update, | 14151 | .hwapic_irr_update = vmx_hwapic_irr_update, |
14131 | .hwapic_isr_update = vmx_hwapic_isr_update, | 14152 | .hwapic_isr_update = vmx_hwapic_isr_update, |
14153 | .guest_apic_has_interrupt = vmx_guest_apic_has_interrupt, | ||
14132 | .sync_pir_to_irr = vmx_sync_pir_to_irr, | 14154 | .sync_pir_to_irr = vmx_sync_pir_to_irr, |
14133 | .deliver_posted_interrupt = vmx_deliver_posted_interrupt, | 14155 | .deliver_posted_interrupt = vmx_deliver_posted_interrupt, |
14134 | 14156 | ||
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 6f4789398876..5fea53cdc583 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -9206,6 +9206,13 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |||
9206 | kvm_page_track_flush_slot(kvm, slot); | 9206 | kvm_page_track_flush_slot(kvm, slot); |
9207 | } | 9207 | } |
9208 | 9208 | ||
9209 | static inline bool kvm_guest_apic_has_interrupt(struct kvm_vcpu *vcpu) | ||
9210 | { | ||
9211 | return (is_guest_mode(vcpu) && | ||
9212 | kvm_x86_ops->guest_apic_has_interrupt && | ||
9213 | kvm_x86_ops->guest_apic_has_interrupt(vcpu)); | ||
9214 | } | ||
9215 | |||
9209 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | 9216 | static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) |
9210 | { | 9217 | { |
9211 | if (!list_empty_careful(&vcpu->async_pf.done)) | 9218 | if (!list_empty_careful(&vcpu->async_pf.done)) |
@@ -9230,7 +9237,8 @@ static inline bool kvm_vcpu_has_events(struct kvm_vcpu *vcpu) | |||
9230 | return true; | 9237 | return true; |
9231 | 9238 | ||
9232 | if (kvm_arch_interrupt_allowed(vcpu) && | 9239 | if (kvm_arch_interrupt_allowed(vcpu) && |
9233 | kvm_cpu_has_interrupt(vcpu)) | 9240 | (kvm_cpu_has_interrupt(vcpu) || |
9241 | kvm_guest_apic_has_interrupt(vcpu))) | ||
9234 | return true; | 9242 | return true; |
9235 | 9243 | ||
9236 | if (kvm_hv_has_stimer_pending(vcpu)) | 9244 | if (kvm_hv_has_stimer_pending(vcpu)) |