aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAndrey Smetanin <asmetanin@virtuozzo.com>2015-11-10 07:36:33 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2015-11-25 11:24:21 -0500
commitd62caabb41f33d96333f9ef15e09cd26e1c12760 (patch)
tree44e92d54641d6f6c04fc683175bd39ce563ce6fa
parent6308630bd3dbb6a8a883c4c571ce5e5a759a8a0e (diff)
kvm/x86: per-vcpu apicv deactivation support
The decision on whether to use hardware APIC virtualization used to be taken globally, based on the availability of the feature in the CPU and the value of a module parameter. However, under certain circumstances we want to control it on per-vcpu basis. In particular, when the userspace activates HyperV synthetic interrupt controller (SynIC), APICv has to be disabled as it's incompatible with SynIC auto-EOI behavior. To achieve that, introduce 'apicv_active' flag on struct kvm_vcpu_arch, and kvm_vcpu_deactivate_apicv() function to turn APICv off. The flag is initialized based on the module parameter and CPU capability, and consulted whenever an APICv-specific action is performed. Signed-off-by: Andrey Smetanin <asmetanin@virtuozzo.com> Reviewed-by: Roman Kagan <rkagan@virtuozzo.com> Signed-off-by: Denis V. Lunev <den@openvz.org> CC: Gleb Natapov <gleb@kernel.org> CC: Paolo Bonzini <pbonzini@redhat.com> CC: Roman Kagan <rkagan@virtuozzo.com> CC: Denis V. Lunev <den@openvz.org> CC: qemu-devel@nongnu.org Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_host.h6
-rw-r--r--arch/x86/kvm/irq.c2
-rw-r--r--arch/x86/kvm/lapic.c23
-rw-r--r--arch/x86/kvm/lapic.h4
-rw-r--r--arch/x86/kvm/svm.c11
-rw-r--r--arch/x86/kvm/vmx.c45
-rw-r--r--arch/x86/kvm/x86.c19
7 files changed, 63 insertions, 47 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f6d8894f25b4..bac0d540f49c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -400,6 +400,7 @@ struct kvm_vcpu_arch {
400 u64 efer; 400 u64 efer;
401 u64 apic_base; 401 u64 apic_base;
402 struct kvm_lapic *apic; /* kernel irqchip context */ 402 struct kvm_lapic *apic; /* kernel irqchip context */
403 bool apicv_active;
403 DECLARE_BITMAP(ioapic_handled_vectors, 256); 404 DECLARE_BITMAP(ioapic_handled_vectors, 256);
404 unsigned long apic_attention; 405 unsigned long apic_attention;
405 int32_t apic_arb_prio; 406 int32_t apic_arb_prio;
@@ -831,7 +832,8 @@ struct kvm_x86_ops {
831 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 832 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
832 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 833 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
833 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 834 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
834 int (*cpu_uses_apicv)(struct kvm_vcpu *vcpu); 835 bool (*get_enable_apicv)(void);
836 void (*refresh_apicv_exec_ctrl)(struct kvm_vcpu *vcpu);
835 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr); 837 void (*hwapic_irr_update)(struct kvm_vcpu *vcpu, int max_irr);
836 void (*hwapic_isr_update)(struct kvm *kvm, int isr); 838 void (*hwapic_isr_update)(struct kvm *kvm, int isr);
837 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap); 839 void (*load_eoi_exitmap)(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap);
@@ -1086,6 +1088,8 @@ gpa_t kvm_mmu_gva_to_gpa_write(struct kvm_vcpu *vcpu, gva_t gva,
1086gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva, 1088gpa_t kvm_mmu_gva_to_gpa_system(struct kvm_vcpu *vcpu, gva_t gva,
1087 struct x86_exception *exception); 1089 struct x86_exception *exception);
1088 1090
1091void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu);
1092
1089int kvm_emulate_hypercall(struct kvm_vcpu *vcpu); 1093int kvm_emulate_hypercall(struct kvm_vcpu *vcpu);
1090 1094
1091int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code, 1095int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t gva, u32 error_code,
diff --git a/arch/x86/kvm/irq.c b/arch/x86/kvm/irq.c
index 097060e33bd6..3982b479bb5f 100644
--- a/arch/x86/kvm/irq.c
+++ b/arch/x86/kvm/irq.c
@@ -76,7 +76,7 @@ int kvm_cpu_has_injectable_intr(struct kvm_vcpu *v)
76 if (kvm_cpu_has_extint(v)) 76 if (kvm_cpu_has_extint(v))
77 return 1; 77 return 1;
78 78
79 if (kvm_vcpu_apic_vid_enabled(v)) 79 if (kvm_vcpu_apicv_active(v))
80 return 0; 80 return 0;
81 81
82 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */ 82 return kvm_apic_has_interrupt(v) != -1; /* LAPIC */
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 9469d453abc8..618a20d5ca99 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -379,7 +379,8 @@ static inline int apic_find_highest_irr(struct kvm_lapic *apic)
379 if (!apic->irr_pending) 379 if (!apic->irr_pending)
380 return -1; 380 return -1;
381 381
382 kvm_x86_ops->sync_pir_to_irr(apic->vcpu); 382 if (apic->vcpu->arch.apicv_active)
383 kvm_x86_ops->sync_pir_to_irr(apic->vcpu);
383 result = apic_search_irr(apic); 384 result = apic_search_irr(apic);
384 ASSERT(result == -1 || result >= 16); 385 ASSERT(result == -1 || result >= 16);
385 386
@@ -392,7 +393,7 @@ static inline void apic_clear_irr(int vec, struct kvm_lapic *apic)
392 393
393 vcpu = apic->vcpu; 394 vcpu = apic->vcpu;
394 395
395 if (unlikely(kvm_vcpu_apic_vid_enabled(vcpu))) { 396 if (unlikely(vcpu->arch.apicv_active)) {
396 /* try to update RVI */ 397 /* try to update RVI */
397 apic_clear_vector(vec, apic->regs + APIC_IRR); 398 apic_clear_vector(vec, apic->regs + APIC_IRR);
398 kvm_make_request(KVM_REQ_EVENT, vcpu); 399 kvm_make_request(KVM_REQ_EVENT, vcpu);
@@ -418,7 +419,7 @@ static inline void apic_set_isr(int vec, struct kvm_lapic *apic)
418 * because the processor can modify ISR under the hood. Instead 419 * because the processor can modify ISR under the hood. Instead
419 * just set SVI. 420 * just set SVI.
420 */ 421 */
421 if (unlikely(kvm_x86_ops->hwapic_isr_update)) 422 if (unlikely(vcpu->arch.apicv_active))
422 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec); 423 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, vec);
423 else { 424 else {
424 ++apic->isr_count; 425 ++apic->isr_count;
@@ -466,7 +467,7 @@ static inline void apic_clear_isr(int vec, struct kvm_lapic *apic)
466 * on the other hand isr_count and highest_isr_cache are unused 467 * on the other hand isr_count and highest_isr_cache are unused
467 * and must be left alone. 468 * and must be left alone.
468 */ 469 */
469 if (unlikely(kvm_x86_ops->hwapic_isr_update)) 470 if (unlikely(vcpu->arch.apicv_active))
470 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 471 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
471 apic_find_highest_isr(apic)); 472 apic_find_highest_isr(apic));
472 else { 473 else {
@@ -852,7 +853,7 @@ static int __apic_accept_irq(struct kvm_lapic *apic, int delivery_mode,
852 apic_clear_vector(vector, apic->regs + APIC_TMR); 853 apic_clear_vector(vector, apic->regs + APIC_TMR);
853 } 854 }
854 855
855 if (kvm_x86_ops->deliver_posted_interrupt) 856 if (vcpu->arch.apicv_active)
856 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector); 857 kvm_x86_ops->deliver_posted_interrupt(vcpu, vector);
857 else { 858 else {
858 apic_set_irr(vector, apic); 859 apic_set_irr(vector, apic);
@@ -1225,7 +1226,7 @@ static bool lapic_timer_int_injected(struct kvm_vcpu *vcpu)
1225 int vec = reg & APIC_VECTOR_MASK; 1226 int vec = reg & APIC_VECTOR_MASK;
1226 void *bitmap = apic->regs + APIC_ISR; 1227 void *bitmap = apic->regs + APIC_ISR;
1227 1228
1228 if (kvm_x86_ops->deliver_posted_interrupt) 1229 if (vcpu->arch.apicv_active)
1229 bitmap = apic->regs + APIC_IRR; 1230 bitmap = apic->regs + APIC_IRR;
1230 1231
1231 if (apic_test_vector(vec, bitmap)) 1232 if (apic_test_vector(vec, bitmap))
@@ -1693,8 +1694,8 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu, bool init_event)
1693 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0); 1694 apic_set_reg(apic, APIC_ISR + 0x10 * i, 0);
1694 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0); 1695 apic_set_reg(apic, APIC_TMR + 0x10 * i, 0);
1695 } 1696 }
1696 apic->irr_pending = kvm_vcpu_apic_vid_enabled(vcpu); 1697 apic->irr_pending = vcpu->arch.apicv_active;
1697 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1 : 0; 1698 apic->isr_count = vcpu->arch.apicv_active ? 1 : 0;
1698 apic->highest_isr_cache = -1; 1699 apic->highest_isr_cache = -1;
1699 update_divide_count(apic); 1700 update_divide_count(apic);
1700 atomic_set(&apic->lapic_timer.pending, 0); 1701 atomic_set(&apic->lapic_timer.pending, 0);
@@ -1906,15 +1907,15 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
1906 update_divide_count(apic); 1907 update_divide_count(apic);
1907 start_apic_timer(apic); 1908 start_apic_timer(apic);
1908 apic->irr_pending = true; 1909 apic->irr_pending = true;
1909 apic->isr_count = kvm_x86_ops->hwapic_isr_update ? 1910 apic->isr_count = vcpu->arch.apicv_active ?
1910 1 : count_vectors(apic->regs + APIC_ISR); 1911 1 : count_vectors(apic->regs + APIC_ISR);
1911 apic->highest_isr_cache = -1; 1912 apic->highest_isr_cache = -1;
1912 if (kvm_x86_ops->hwapic_irr_update) 1913 if (vcpu->arch.apicv_active) {
1913 kvm_x86_ops->hwapic_irr_update(vcpu, 1914 kvm_x86_ops->hwapic_irr_update(vcpu,
1914 apic_find_highest_irr(apic)); 1915 apic_find_highest_irr(apic));
1915 if (unlikely(kvm_x86_ops->hwapic_isr_update))
1916 kvm_x86_ops->hwapic_isr_update(vcpu->kvm, 1916 kvm_x86_ops->hwapic_isr_update(vcpu->kvm,
1917 apic_find_highest_isr(apic)); 1917 apic_find_highest_isr(apic));
1918 }
1918 kvm_make_request(KVM_REQ_EVENT, vcpu); 1919 kvm_make_request(KVM_REQ_EVENT, vcpu);
1919 if (ioapic_in_kernel(vcpu->kvm)) 1920 if (ioapic_in_kernel(vcpu->kvm))
1920 kvm_rtc_eoi_tracking_restore_one(vcpu); 1921 kvm_rtc_eoi_tracking_restore_one(vcpu);
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index fde8e35d5850..5fc60e4bb4e2 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -143,9 +143,9 @@ static inline int apic_x2apic_mode(struct kvm_lapic *apic)
143 return apic->vcpu->arch.apic_base & X2APIC_ENABLE; 143 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
144} 144}
145 145
146static inline bool kvm_vcpu_apic_vid_enabled(struct kvm_vcpu *vcpu) 146static inline bool kvm_vcpu_apicv_active(struct kvm_vcpu *vcpu)
147{ 147{
148 return kvm_x86_ops->cpu_uses_apicv(vcpu); 148 return vcpu->arch.apic && vcpu->arch.apicv_active;
149} 149}
150 150
151static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu) 151static inline bool kvm_apic_has_events(struct kvm_vcpu *vcpu)
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index ebb76e8a91e1..2401fc88905b 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3559,9 +3559,13 @@ static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3559 return; 3559 return;
3560} 3560}
3561 3561
3562static int svm_cpu_uses_apicv(struct kvm_vcpu *vcpu) 3562static bool svm_get_enable_apicv(void)
3563{
3564 return false;
3565}
3566
3567static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
3563{ 3568{
3564 return 0;
3565} 3569}
3566 3570
3567static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 3571static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
@@ -4328,7 +4332,8 @@ static struct kvm_x86_ops svm_x86_ops = {
4328 .enable_irq_window = enable_irq_window, 4332 .enable_irq_window = enable_irq_window,
4329 .update_cr8_intercept = update_cr8_intercept, 4333 .update_cr8_intercept = update_cr8_intercept,
4330 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode, 4334 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4331 .cpu_uses_apicv = svm_cpu_uses_apicv, 4335 .get_enable_apicv = svm_get_enable_apicv,
4336 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
4332 .load_eoi_exitmap = svm_load_eoi_exitmap, 4337 .load_eoi_exitmap = svm_load_eoi_exitmap,
4333 .sync_pir_to_irr = svm_sync_pir_to_irr, 4338 .sync_pir_to_irr = svm_sync_pir_to_irr,
4334 4339
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index c8a87c94dc81..1a8bfaab89c7 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -19,6 +19,7 @@
19#include "irq.h" 19#include "irq.h"
20#include "mmu.h" 20#include "mmu.h"
21#include "cpuid.h" 21#include "cpuid.h"
22#include "lapic.h"
22 23
23#include <linux/kvm_host.h> 24#include <linux/kvm_host.h>
24#include <linux/module.h> 25#include <linux/module.h>
@@ -862,7 +863,6 @@ static void kvm_cpu_vmxon(u64 addr);
862static void kvm_cpu_vmxoff(void); 863static void kvm_cpu_vmxoff(void);
863static bool vmx_mpx_supported(void); 864static bool vmx_mpx_supported(void);
864static bool vmx_xsaves_supported(void); 865static bool vmx_xsaves_supported(void);
865static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu);
866static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 866static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
867static void vmx_set_segment(struct kvm_vcpu *vcpu, 867static void vmx_set_segment(struct kvm_vcpu *vcpu,
868 struct kvm_segment *var, int seg); 868 struct kvm_segment *var, int seg);
@@ -870,7 +870,6 @@ static void vmx_get_segment(struct kvm_vcpu *vcpu,
870 struct kvm_segment *var, int seg); 870 struct kvm_segment *var, int seg);
871static bool guest_state_valid(struct kvm_vcpu *vcpu); 871static bool guest_state_valid(struct kvm_vcpu *vcpu);
872static u32 vmx_segment_access_rights(struct kvm_segment *var); 872static u32 vmx_segment_access_rights(struct kvm_segment *var);
873static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu);
874static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx); 873static void copy_vmcs12_to_shadow(struct vcpu_vmx *vmx);
875static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx); 874static void copy_shadow_to_vmcs12(struct vcpu_vmx *vmx);
876static int alloc_identity_pagetable(struct kvm *kvm); 875static int alloc_identity_pagetable(struct kvm *kvm);
@@ -2498,7 +2497,7 @@ static void nested_vmx_setup_ctls_msrs(struct vcpu_vmx *vmx)
2498 vmx->nested.nested_vmx_pinbased_ctls_high |= 2497 vmx->nested.nested_vmx_pinbased_ctls_high |=
2499 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR | 2498 PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR |
2500 PIN_BASED_VMX_PREEMPTION_TIMER; 2499 PIN_BASED_VMX_PREEMPTION_TIMER;
2501 if (vmx_cpu_uses_apicv(&vmx->vcpu)) 2500 if (kvm_vcpu_apicv_active(&vmx->vcpu))
2502 vmx->nested.nested_vmx_pinbased_ctls_high |= 2501 vmx->nested.nested_vmx_pinbased_ctls_high |=
2503 PIN_BASED_POSTED_INTR; 2502 PIN_BASED_POSTED_INTR;
2504 2503
@@ -4462,9 +4461,9 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
4462 msr, MSR_TYPE_W); 4461 msr, MSR_TYPE_W);
4463} 4462}
4464 4463
4465static int vmx_cpu_uses_apicv(struct kvm_vcpu *vcpu) 4464static bool vmx_get_enable_apicv(void)
4466{ 4465{
4467 return enable_apicv && lapic_in_kernel(vcpu); 4466 return enable_apicv;
4468} 4467}
4469 4468
4470static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu) 4469static int vmx_complete_nested_posted_interrupt(struct kvm_vcpu *vcpu)
@@ -4586,11 +4585,6 @@ static void vmx_sync_pir_to_irr(struct kvm_vcpu *vcpu)
4586 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir); 4585 kvm_apic_update_irr(vcpu, vmx->pi_desc.pir);
4587} 4586}
4588 4587
4589static void vmx_sync_pir_to_irr_dummy(struct kvm_vcpu *vcpu)
4590{
4591 return;
4592}
4593
4594/* 4588/*
4595 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 4589 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
4596 * will not change in the lifetime of the guest. 4590 * will not change in the lifetime of the guest.
@@ -4660,11 +4654,18 @@ static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
4660{ 4654{
4661 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl; 4655 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
4662 4656
4663 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) 4657 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4664 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR; 4658 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
4665 return pin_based_exec_ctrl; 4659 return pin_based_exec_ctrl;
4666} 4660}
4667 4661
4662static void vmx_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4663{
4664 struct vcpu_vmx *vmx = to_vmx(vcpu);
4665
4666 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4667}
4668
4668static u32 vmx_exec_control(struct vcpu_vmx *vmx) 4669static u32 vmx_exec_control(struct vcpu_vmx *vmx)
4669{ 4670{
4670 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 4671 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
@@ -4703,7 +4704,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
4703 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST; 4704 exec_control &= ~SECONDARY_EXEC_UNRESTRICTED_GUEST;
4704 if (!ple_gap) 4705 if (!ple_gap)
4705 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 4706 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
4706 if (!vmx_cpu_uses_apicv(&vmx->vcpu)) 4707 if (!kvm_vcpu_apicv_active(&vmx->vcpu))
4707 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT | 4708 exec_control &= ~(SECONDARY_EXEC_APIC_REGISTER_VIRT |
4708 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY); 4709 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY);
4709 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE; 4710 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
@@ -4767,7 +4768,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4767 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, 4768 vmcs_write32(SECONDARY_VM_EXEC_CONTROL,
4768 vmx_secondary_exec_control(vmx)); 4769 vmx_secondary_exec_control(vmx));
4769 4770
4770 if (vmx_cpu_uses_apicv(&vmx->vcpu)) { 4771 if (kvm_vcpu_apicv_active(&vmx->vcpu)) {
4771 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4772 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4772 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4773 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4773 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4774 vmcs_write64(EOI_EXIT_BITMAP2, 0);
@@ -4919,7 +4920,7 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
4919 4920
4920 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu); 4921 kvm_make_request(KVM_REQ_APIC_PAGE_RELOAD, vcpu);
4921 4922
4922 if (vmx_cpu_uses_apicv(vcpu)) 4923 if (kvm_vcpu_apicv_active(vcpu))
4923 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc)); 4924 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
4924 4925
4925 if (vmx->vpid != 0) 4926 if (vmx->vpid != 0)
@@ -6203,15 +6204,6 @@ static __init int hardware_setup(void)
6203 kvm_tsc_scaling_ratio_frac_bits = 48; 6204 kvm_tsc_scaling_ratio_frac_bits = 48;
6204 } 6205 }
6205 6206
6206 if (enable_apicv)
6207 kvm_x86_ops->update_cr8_intercept = NULL;
6208 else {
6209 kvm_x86_ops->hwapic_irr_update = NULL;
6210 kvm_x86_ops->hwapic_isr_update = NULL;
6211 kvm_x86_ops->deliver_posted_interrupt = NULL;
6212 kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
6213 }
6214
6215 vmx_disable_intercept_for_msr(MSR_FS_BASE, false); 6207 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
6216 vmx_disable_intercept_for_msr(MSR_GS_BASE, false); 6208 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
6217 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true); 6209 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
@@ -8152,7 +8144,7 @@ static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
8152 * apicv 8144 * apicv
8153 */ 8145 */
8154 if (!cpu_has_vmx_virtualize_x2apic_mode() || 8146 if (!cpu_has_vmx_virtualize_x2apic_mode() ||
8155 !vmx_cpu_uses_apicv(vcpu)) 8147 !kvm_vcpu_apicv_active(vcpu))
8156 return; 8148 return;
8157 8149
8158 if (!cpu_need_tpr_shadow(vcpu)) 8150 if (!cpu_need_tpr_shadow(vcpu))
@@ -8259,7 +8251,7 @@ static void vmx_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
8259 8251
8260static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap) 8252static void vmx_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
8261{ 8253{
8262 if (!vmx_cpu_uses_apicv(vcpu)) 8254 if (!kvm_vcpu_apicv_active(vcpu))
8263 return; 8255 return;
8264 8256
8265 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]); 8257 vmcs_write64(EOI_EXIT_BITMAP0, eoi_exit_bitmap[0]);
@@ -10803,7 +10795,8 @@ static struct kvm_x86_ops vmx_x86_ops = {
10803 .update_cr8_intercept = update_cr8_intercept, 10795 .update_cr8_intercept = update_cr8_intercept,
10804 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode, 10796 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
10805 .set_apic_access_page_addr = vmx_set_apic_access_page_addr, 10797 .set_apic_access_page_addr = vmx_set_apic_access_page_addr,
10806 .cpu_uses_apicv = vmx_cpu_uses_apicv, 10798 .get_enable_apicv = vmx_get_enable_apicv,
10799 .refresh_apicv_exec_ctrl = vmx_refresh_apicv_exec_ctrl,
10807 .load_eoi_exitmap = vmx_load_eoi_exitmap, 10800 .load_eoi_exitmap = vmx_load_eoi_exitmap,
10808 .hwapic_irr_update = vmx_hwapic_irr_update, 10801 .hwapic_irr_update = vmx_hwapic_irr_update,
10809 .hwapic_isr_update = vmx_hwapic_isr_update, 10802 .hwapic_isr_update = vmx_hwapic_isr_update,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 9c69337a3d61..f0250a092ef3 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2748,7 +2748,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
2748static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu, 2748static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
2749 struct kvm_lapic_state *s) 2749 struct kvm_lapic_state *s)
2750{ 2750{
2751 kvm_x86_ops->sync_pir_to_irr(vcpu); 2751 if (vcpu->arch.apicv_active)
2752 kvm_x86_ops->sync_pir_to_irr(vcpu);
2753
2752 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s); 2754 memcpy(s->regs, vcpu->arch.apic->regs, sizeof *s);
2753 2755
2754 return 0; 2756 return 0;
@@ -5867,6 +5869,12 @@ static void kvm_pv_kick_cpu_op(struct kvm *kvm, unsigned long flags, int apicid)
5867 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL); 5869 kvm_irq_delivery_to_apic(kvm, NULL, &lapic_irq, NULL);
5868} 5870}
5869 5871
5872void kvm_vcpu_deactivate_apicv(struct kvm_vcpu *vcpu)
5873{
5874 vcpu->arch.apicv_active = false;
5875 kvm_x86_ops->refresh_apicv_exec_ctrl(vcpu);
5876}
5877
5870int kvm_emulate_hypercall(struct kvm_vcpu *vcpu) 5878int kvm_emulate_hypercall(struct kvm_vcpu *vcpu)
5871{ 5879{
5872 unsigned long nr, a0, a1, a2, a3, ret; 5880 unsigned long nr, a0, a1, a2, a3, ret;
@@ -5960,6 +5968,9 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu)
5960 if (!vcpu->arch.apic) 5968 if (!vcpu->arch.apic)
5961 return; 5969 return;
5962 5970
5971 if (vcpu->arch.apicv_active)
5972 return;
5973
5963 if (!vcpu->arch.apic->vapic_addr) 5974 if (!vcpu->arch.apic->vapic_addr)
5964 max_irr = kvm_lapic_find_highest_irr(vcpu); 5975 max_irr = kvm_lapic_find_highest_irr(vcpu);
5965 else 5976 else
@@ -6306,7 +6317,8 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
6306 if (irqchip_split(vcpu->kvm)) 6317 if (irqchip_split(vcpu->kvm))
6307 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors); 6318 kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
6308 else { 6319 else {
6309 kvm_x86_ops->sync_pir_to_irr(vcpu); 6320 if (vcpu->arch.apicv_active)
6321 kvm_x86_ops->sync_pir_to_irr(vcpu);
6310 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors); 6322 kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
6311 } 6323 }
6312 kvm_x86_ops->load_eoi_exitmap(vcpu, 6324 kvm_x86_ops->load_eoi_exitmap(vcpu,
@@ -6453,7 +6465,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
6453 * Update architecture specific hints for APIC 6465 * Update architecture specific hints for APIC
6454 * virtual interrupt delivery. 6466 * virtual interrupt delivery.
6455 */ 6467 */
6456 if (kvm_x86_ops->hwapic_irr_update) 6468 if (vcpu->arch.apicv_active)
6457 kvm_x86_ops->hwapic_irr_update(vcpu, 6469 kvm_x86_ops->hwapic_irr_update(vcpu,
6458 kvm_lapic_find_highest_irr(vcpu)); 6470 kvm_lapic_find_highest_irr(vcpu));
6459 } 6471 }
@@ -7524,6 +7536,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
7524 BUG_ON(vcpu->kvm == NULL); 7536 BUG_ON(vcpu->kvm == NULL);
7525 kvm = vcpu->kvm; 7537 kvm = vcpu->kvm;
7526 7538
7539 vcpu->arch.apicv_active = kvm_x86_ops->get_enable_apicv();
7527 vcpu->arch.pv.pv_unhalted = false; 7540 vcpu->arch.pv.pv_unhalted = false;
7528 vcpu->arch.emulate_ctxt.ops = &emulate_ops; 7541 vcpu->arch.emulate_ctxt.ops = &emulate_ops;
7529 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu)) 7542 if (!irqchip_in_kernel(kvm) || kvm_vcpu_is_reset_bsp(vcpu))