aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@Intel.com>2013-04-11 07:25:12 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2013-04-16 15:32:40 -0400
commit01e439be7753c163932538276f04f95cb1b66697 (patch)
tree9636b625d251cbc06cb43207b6e0aeee89039e08 /arch/x86
parentd78f2664832f8d70e36422af9a10e44276dced48 (diff)
KVM: VMX: Check the posted interrupt capability
Detect the posted interrupt feature. If it exists, then set it in vmcs_config. Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Reviewed-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/vmx.h4
-rw-r--r--arch/x86/kvm/vmx.c82
2 files changed, 66 insertions, 20 deletions
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index fc1c3134473b..6f07f1999138 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -71,6 +71,7 @@
71#define PIN_BASED_NMI_EXITING 0x00000008 71#define PIN_BASED_NMI_EXITING 0x00000008
72#define PIN_BASED_VIRTUAL_NMIS 0x00000020 72#define PIN_BASED_VIRTUAL_NMIS 0x00000020
73#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040 73#define PIN_BASED_VMX_PREEMPTION_TIMER 0x00000040
74#define PIN_BASED_POSTED_INTR 0x00000080
74 75
75#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016 76#define PIN_BASED_ALWAYSON_WITHOUT_TRUE_MSR 0x00000016
76 77
@@ -102,6 +103,7 @@
102/* VMCS Encodings */ 103/* VMCS Encodings */
103enum vmcs_field { 104enum vmcs_field {
104 VIRTUAL_PROCESSOR_ID = 0x00000000, 105 VIRTUAL_PROCESSOR_ID = 0x00000000,
106 POSTED_INTR_NV = 0x00000002,
105 GUEST_ES_SELECTOR = 0x00000800, 107 GUEST_ES_SELECTOR = 0x00000800,
106 GUEST_CS_SELECTOR = 0x00000802, 108 GUEST_CS_SELECTOR = 0x00000802,
107 GUEST_SS_SELECTOR = 0x00000804, 109 GUEST_SS_SELECTOR = 0x00000804,
@@ -136,6 +138,8 @@ enum vmcs_field {
136 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013, 138 VIRTUAL_APIC_PAGE_ADDR_HIGH = 0x00002013,
137 APIC_ACCESS_ADDR = 0x00002014, 139 APIC_ACCESS_ADDR = 0x00002014,
138 APIC_ACCESS_ADDR_HIGH = 0x00002015, 140 APIC_ACCESS_ADDR_HIGH = 0x00002015,
141 POSTED_INTR_DESC_ADDR = 0x00002016,
142 POSTED_INTR_DESC_ADDR_HIGH = 0x00002017,
139 EPT_POINTER = 0x0000201a, 143 EPT_POINTER = 0x0000201a,
140 EPT_POINTER_HIGH = 0x0000201b, 144 EPT_POINTER_HIGH = 0x0000201b,
141 EOI_EXIT_BITMAP0 = 0x0000201c, 145 EOI_EXIT_BITMAP0 = 0x0000201c,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 7a7605f0444b..7607c6c09c74 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -84,7 +84,8 @@ module_param(vmm_exclusive, bool, S_IRUGO);
84static bool __read_mostly fasteoi = 1; 84static bool __read_mostly fasteoi = 1;
85module_param(fasteoi, bool, S_IRUGO); 85module_param(fasteoi, bool, S_IRUGO);
86 86
87static bool __read_mostly enable_apicv_reg_vid; 87static bool __read_mostly enable_apicv;
88module_param(enable_apicv, bool, S_IRUGO);
88 89
89/* 90/*
90 * If nested=1, nested virtualization is supported, i.e., guests may use 91 * If nested=1, nested virtualization is supported, i.e., guests may use
@@ -366,6 +367,14 @@ struct nested_vmx {
366 struct page *apic_access_page; 367 struct page *apic_access_page;
367}; 368};
368 369
370#define POSTED_INTR_ON 0
371/* Posted-Interrupt Descriptor */
372struct pi_desc {
373 u32 pir[8]; /* Posted interrupt requested */
374 u32 control; /* bit 0 of control is outstanding notification bit */
375 u32 rsvd[7];
376} __aligned(64);
377
369struct vcpu_vmx { 378struct vcpu_vmx {
370 struct kvm_vcpu vcpu; 379 struct kvm_vcpu vcpu;
371 unsigned long host_rsp; 380 unsigned long host_rsp;
@@ -430,6 +439,9 @@ struct vcpu_vmx {
430 439
431 bool rdtscp_enabled; 440 bool rdtscp_enabled;
432 441
442 /* Posted interrupt descriptor */
443 struct pi_desc pi_desc;
444
433 /* Support for a guest hypervisor (nested VMX) */ 445 /* Support for a guest hypervisor (nested VMX) */
434 struct nested_vmx nested; 446 struct nested_vmx nested;
435}; 447};
@@ -785,6 +797,18 @@ static inline bool cpu_has_vmx_virtual_intr_delivery(void)
785 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY; 797 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY;
786} 798}
787 799
800static inline bool cpu_has_vmx_posted_intr(void)
801{
802 return vmcs_config.pin_based_exec_ctrl & PIN_BASED_POSTED_INTR;
803}
804
805static inline bool cpu_has_vmx_apicv(void)
806{
807 return cpu_has_vmx_apic_register_virt() &&
808 cpu_has_vmx_virtual_intr_delivery() &&
809 cpu_has_vmx_posted_intr();
810}
811
788static inline bool cpu_has_vmx_flexpriority(void) 812static inline bool cpu_has_vmx_flexpriority(void)
789{ 813{
790 return cpu_has_vmx_tpr_shadow() && 814 return cpu_has_vmx_tpr_shadow() &&
@@ -2552,12 +2576,6 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2552 u32 _vmexit_control = 0; 2576 u32 _vmexit_control = 0;
2553 u32 _vmentry_control = 0; 2577 u32 _vmentry_control = 0;
2554 2578
2555 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2556 opt = PIN_BASED_VIRTUAL_NMIS;
2557 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2558 &_pin_based_exec_control) < 0)
2559 return -EIO;
2560
2561 min = CPU_BASED_HLT_EXITING | 2579 min = CPU_BASED_HLT_EXITING |
2562#ifdef CONFIG_X86_64 2580#ifdef CONFIG_X86_64
2563 CPU_BASED_CR8_LOAD_EXITING | 2581 CPU_BASED_CR8_LOAD_EXITING |
@@ -2634,6 +2652,17 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2634 &_vmexit_control) < 0) 2652 &_vmexit_control) < 0)
2635 return -EIO; 2653 return -EIO;
2636 2654
2655 min = PIN_BASED_EXT_INTR_MASK | PIN_BASED_NMI_EXITING;
2656 opt = PIN_BASED_VIRTUAL_NMIS | PIN_BASED_POSTED_INTR;
2657 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PINBASED_CTLS,
2658 &_pin_based_exec_control) < 0)
2659 return -EIO;
2660
2661 if (!(_cpu_based_2nd_exec_control &
2662 SECONDARY_EXEC_VIRTUAL_INTR_DELIVERY) ||
2663 !(_vmexit_control & VM_EXIT_ACK_INTR_ON_EXIT))
2664 _pin_based_exec_control &= ~PIN_BASED_POSTED_INTR;
2665
2637 min = 0; 2666 min = 0;
2638 opt = VM_ENTRY_LOAD_IA32_PAT; 2667 opt = VM_ENTRY_LOAD_IA32_PAT;
2639 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS, 2668 if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_ENTRY_CTLS,
@@ -2812,11 +2841,10 @@ static __init int hardware_setup(void)
2812 if (!cpu_has_vmx_ple()) 2841 if (!cpu_has_vmx_ple())
2813 ple_gap = 0; 2842 ple_gap = 0;
2814 2843
2815 if (!cpu_has_vmx_apic_register_virt() || 2844 if (!cpu_has_vmx_apicv())
2816 !cpu_has_vmx_virtual_intr_delivery()) 2845 enable_apicv = 0;
2817 enable_apicv_reg_vid = 0;
2818 2846
2819 if (enable_apicv_reg_vid) 2847 if (enable_apicv)
2820 kvm_x86_ops->update_cr8_intercept = NULL; 2848 kvm_x86_ops->update_cr8_intercept = NULL;
2821 else 2849 else
2822 kvm_x86_ops->hwapic_irr_update = NULL; 2850 kvm_x86_ops->hwapic_irr_update = NULL;
@@ -3875,6 +3903,11 @@ static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
3875 msr, MSR_TYPE_W); 3903 msr, MSR_TYPE_W);
3876} 3904}
3877 3905
3906static int vmx_vm_has_apicv(struct kvm *kvm)
3907{
3908 return enable_apicv && irqchip_in_kernel(kvm);
3909}
3910
3878/* 3911/*
3879 * Set up the vmcs's constant host-state fields, i.e., host-state fields that 3912 * Set up the vmcs's constant host-state fields, i.e., host-state fields that
3880 * will not change in the lifetime of the guest. 3913 * will not change in the lifetime of the guest.
@@ -3935,6 +3968,15 @@ static void set_cr4_guest_host_mask(struct vcpu_vmx *vmx)
3935 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits); 3968 vmcs_writel(CR4_GUEST_HOST_MASK, ~vmx->vcpu.arch.cr4_guest_owned_bits);
3936} 3969}
3937 3970
3971static u32 vmx_pin_based_exec_ctrl(struct vcpu_vmx *vmx)
3972{
3973 u32 pin_based_exec_ctrl = vmcs_config.pin_based_exec_ctrl;
3974
3975 if (!vmx_vm_has_apicv(vmx->vcpu.kvm))
3976 pin_based_exec_ctrl &= ~PIN_BASED_POSTED_INTR;
3977 return pin_based_exec_ctrl;
3978}
3979
3938static u32 vmx_exec_control(struct vcpu_vmx *vmx) 3980static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3939{ 3981{
3940 u32 exec_control = vmcs_config.cpu_based_exec_ctrl; 3982 u32 exec_control = vmcs_config.cpu_based_exec_ctrl;
@@ -3952,11 +3994,6 @@ static u32 vmx_exec_control(struct vcpu_vmx *vmx)
3952 return exec_control; 3994 return exec_control;
3953} 3995}
3954 3996
3955static int vmx_vm_has_apicv(struct kvm *kvm)
3956{
3957 return enable_apicv_reg_vid && irqchip_in_kernel(kvm);
3958}
3959
3960static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx) 3997static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3961{ 3998{
3962 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl; 3999 u32 exec_control = vmcs_config.cpu_based_2nd_exec_ctrl;
@@ -4012,8 +4049,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4012 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 4049 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
4013 4050
4014 /* Control */ 4051 /* Control */
4015 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, 4052 vmcs_write32(PIN_BASED_VM_EXEC_CONTROL, vmx_pin_based_exec_ctrl(vmx));
4016 vmcs_config.pin_based_exec_ctrl);
4017 4053
4018 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx)); 4054 vmcs_write32(CPU_BASED_VM_EXEC_CONTROL, vmx_exec_control(vmx));
4019 4055
@@ -4022,13 +4058,16 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
4022 vmx_secondary_exec_control(vmx)); 4058 vmx_secondary_exec_control(vmx));
4023 } 4059 }
4024 4060
4025 if (enable_apicv_reg_vid) { 4061 if (vmx_vm_has_apicv(vmx->vcpu.kvm)) {
4026 vmcs_write64(EOI_EXIT_BITMAP0, 0); 4062 vmcs_write64(EOI_EXIT_BITMAP0, 0);
4027 vmcs_write64(EOI_EXIT_BITMAP1, 0); 4063 vmcs_write64(EOI_EXIT_BITMAP1, 0);
4028 vmcs_write64(EOI_EXIT_BITMAP2, 0); 4064 vmcs_write64(EOI_EXIT_BITMAP2, 0);
4029 vmcs_write64(EOI_EXIT_BITMAP3, 0); 4065 vmcs_write64(EOI_EXIT_BITMAP3, 0);
4030 4066
4031 vmcs_write16(GUEST_INTR_STATUS, 0); 4067 vmcs_write16(GUEST_INTR_STATUS, 0);
4068
4069 vmcs_write64(POSTED_INTR_NV, POSTED_INTR_VECTOR);
4070 vmcs_write64(POSTED_INTR_DESC_ADDR, __pa((&vmx->pi_desc)));
4032 } 4071 }
4033 4072
4034 if (ple_gap) { 4073 if (ple_gap) {
@@ -4170,6 +4209,9 @@ static void vmx_vcpu_reset(struct kvm_vcpu *vcpu)
4170 vmcs_write64(APIC_ACCESS_ADDR, 4209 vmcs_write64(APIC_ACCESS_ADDR,
4171 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page)); 4210 page_to_phys(vmx->vcpu.kvm->arch.apic_access_page));
4172 4211
4212 if (vmx_vm_has_apicv(vcpu->kvm))
4213 memset(&vmx->pi_desc, 0, sizeof(struct pi_desc));
4214
4173 if (vmx->vpid != 0) 4215 if (vmx->vpid != 0)
4174 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid); 4216 vmcs_write16(VIRTUAL_PROCESSOR_ID, vmx->vpid);
4175 4217
@@ -7842,7 +7884,7 @@ static int __init vmx_init(void)
7842 memcpy(vmx_msr_bitmap_longmode_x2apic, 7884 memcpy(vmx_msr_bitmap_longmode_x2apic,
7843 vmx_msr_bitmap_longmode, PAGE_SIZE); 7885 vmx_msr_bitmap_longmode, PAGE_SIZE);
7844 7886
7845 if (enable_apicv_reg_vid) { 7887 if (enable_apicv) {
7846 for (msr = 0x800; msr <= 0x8ff; msr++) 7888 for (msr = 0x800; msr <= 0x8ff; msr++)
7847 vmx_disable_intercept_msr_read_x2apic(msr); 7889 vmx_disable_intercept_msr_read_x2apic(msr);
7848 7890