aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorYang Zhang <yang.z.zhang@Intel.com>2013-01-24 21:18:50 -0500
committerGleb Natapov <gleb@redhat.com>2013-01-29 03:48:06 -0500
commit8d14695f9542e9e0195d6e41ddaa52c32322adf5 (patch)
tree30d723f19e2c13881f073280d6a3476549d15f83 /arch
parent83d4c286931c9d28c5be21bac3c73a2332cab681 (diff)
x86, apicv: add virtual x2apic support
basically to benefit from apicv, we need to enable virtualized x2apic mode. Currently, we only enable it when guest is really using x2apic. Also, clear MSR bitmap for corresponding x2apic MSRs when guest enabled x2apic: 0x800 - 0x8ff: no read intercept for apicv register virtualization, except APIC ID and TMCCT which need software's assistance to get right value. Reviewed-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Kevin Tian <kevin.tian@intel.com> Signed-off-by: Yang Zhang <yang.z.zhang@Intel.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_host.h1
-rw-r--r--arch/x86/include/asm/vmx.h1
-rw-r--r--arch/x86/kvm/lapic.c19
-rw-r--r--arch/x86/kvm/lapic.h5
-rw-r--r--arch/x86/kvm/svm.c6
-rw-r--r--arch/x86/kvm/vmx.c198
6 files changed, 201 insertions, 29 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 77d56a4ba89c..d42c2839be98 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -699,6 +699,7 @@ struct kvm_x86_ops {
699 void (*enable_nmi_window)(struct kvm_vcpu *vcpu); 699 void (*enable_nmi_window)(struct kvm_vcpu *vcpu);
700 void (*enable_irq_window)(struct kvm_vcpu *vcpu); 700 void (*enable_irq_window)(struct kvm_vcpu *vcpu);
701 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr); 701 void (*update_cr8_intercept)(struct kvm_vcpu *vcpu, int tpr, int irr);
702 void (*set_virtual_x2apic_mode)(struct kvm_vcpu *vcpu, bool set);
702 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr); 703 int (*set_tss_addr)(struct kvm *kvm, unsigned int addr);
703 int (*get_tdp_level)(void); 704 int (*get_tdp_level)(void);
704 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio); 705 u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
diff --git a/arch/x86/include/asm/vmx.h b/arch/x86/include/asm/vmx.h
index 44c3f7eb4532..0a54df0b36fc 100644
--- a/arch/x86/include/asm/vmx.h
+++ b/arch/x86/include/asm/vmx.h
@@ -139,6 +139,7 @@
139#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001 139#define SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES 0x00000001
140#define SECONDARY_EXEC_ENABLE_EPT 0x00000002 140#define SECONDARY_EXEC_ENABLE_EPT 0x00000002
141#define SECONDARY_EXEC_RDTSCP 0x00000008 141#define SECONDARY_EXEC_RDTSCP 0x00000008
142#define SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE 0x00000010
142#define SECONDARY_EXEC_ENABLE_VPID 0x00000020 143#define SECONDARY_EXEC_ENABLE_VPID 0x00000020
143#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040 144#define SECONDARY_EXEC_WBINVD_EXITING 0x00000040
144#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080 145#define SECONDARY_EXEC_UNRESTRICTED_GUEST 0x00000080
diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
index 0664c138e860..f69fc5077a89 100644
--- a/arch/x86/kvm/lapic.c
+++ b/arch/x86/kvm/lapic.c
@@ -140,11 +140,6 @@ static inline int apic_enabled(struct kvm_lapic *apic)
140 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \ 140 (LVT_MASK | APIC_MODE_MASK | APIC_INPUT_POLARITY | \
141 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER) 141 APIC_LVT_REMOTE_IRR | APIC_LVT_LEVEL_TRIGGER)
142 142
143static inline int apic_x2apic_mode(struct kvm_lapic *apic)
144{
145 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
146}
147
148static inline int kvm_apic_id(struct kvm_lapic *apic) 143static inline int kvm_apic_id(struct kvm_lapic *apic)
149{ 144{
150 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff; 145 return (kvm_apic_get_reg(apic, APIC_ID) >> 24) & 0xff;
@@ -1303,6 +1298,7 @@ u64 kvm_lapic_get_cr8(struct kvm_vcpu *vcpu)
1303 1298
1304void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value) 1299void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1305{ 1300{
1301 u64 old_value = vcpu->arch.apic_base;
1306 struct kvm_lapic *apic = vcpu->arch.apic; 1302 struct kvm_lapic *apic = vcpu->arch.apic;
1307 1303
1308 if (!apic) { 1304 if (!apic) {
@@ -1324,11 +1320,16 @@ void kvm_lapic_set_base(struct kvm_vcpu *vcpu, u64 value)
1324 value &= ~MSR_IA32_APICBASE_BSP; 1320 value &= ~MSR_IA32_APICBASE_BSP;
1325 1321
1326 vcpu->arch.apic_base = value; 1322 vcpu->arch.apic_base = value;
1327 if (apic_x2apic_mode(apic)) { 1323 if ((old_value ^ value) & X2APIC_ENABLE) {
1328 u32 id = kvm_apic_id(apic); 1324 if (value & X2APIC_ENABLE) {
1329 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf)); 1325 u32 id = kvm_apic_id(apic);
1330 kvm_apic_set_ldr(apic, ldr); 1326 u32 ldr = ((id >> 4) << 16) | (1 << (id & 0xf));
1327 kvm_apic_set_ldr(apic, ldr);
1328 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, true);
1329 } else
1330 kvm_x86_ops->set_virtual_x2apic_mode(vcpu, false);
1331 } 1331 }
1332
1332 apic->base_address = apic->vcpu->arch.apic_base & 1333 apic->base_address = apic->vcpu->arch.apic_base &
1333 MSR_IA32_APICBASE_BASE; 1334 MSR_IA32_APICBASE_BASE;
1334 1335
diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h
index 9a8ee22bc7a3..22a5397b638c 100644
--- a/arch/x86/kvm/lapic.h
+++ b/arch/x86/kvm/lapic.h
@@ -126,4 +126,9 @@ static inline int kvm_lapic_enabled(struct kvm_vcpu *vcpu)
126 return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic); 126 return kvm_apic_present(vcpu) && kvm_apic_sw_enabled(vcpu->arch.apic);
127} 127}
128 128
129static inline int apic_x2apic_mode(struct kvm_lapic *apic)
130{
131 return apic->vcpu->arch.apic_base & X2APIC_ENABLE;
132}
133
129#endif 134#endif
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index d29d3cd1c156..38407e9fd1bd 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3571,6 +3571,11 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
3571 set_cr_intercept(svm, INTERCEPT_CR8_WRITE); 3571 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
3572} 3572}
3573 3573
3574static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
3575{
3576 return;
3577}
3578
3574static int svm_nmi_allowed(struct kvm_vcpu *vcpu) 3579static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
3575{ 3580{
3576 struct vcpu_svm *svm = to_svm(vcpu); 3581 struct vcpu_svm *svm = to_svm(vcpu);
@@ -4290,6 +4295,7 @@ static struct kvm_x86_ops svm_x86_ops = {
4290 .enable_nmi_window = enable_nmi_window, 4295 .enable_nmi_window = enable_nmi_window,
4291 .enable_irq_window = enable_irq_window, 4296 .enable_irq_window = enable_irq_window,
4292 .update_cr8_intercept = update_cr8_intercept, 4297 .update_cr8_intercept = update_cr8_intercept,
4298 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
4293 4299
4294 .set_tss_addr = svm_set_tss_addr, 4300 .set_tss_addr = svm_set_tss_addr,
4295 .get_tdp_level = get_npt_level, 4301 .get_tdp_level = get_npt_level,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5ad7c8531083..3ce8a1629330 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -643,6 +643,8 @@ static unsigned long *vmx_io_bitmap_a;
643static unsigned long *vmx_io_bitmap_b; 643static unsigned long *vmx_io_bitmap_b;
644static unsigned long *vmx_msr_bitmap_legacy; 644static unsigned long *vmx_msr_bitmap_legacy;
645static unsigned long *vmx_msr_bitmap_longmode; 645static unsigned long *vmx_msr_bitmap_longmode;
646static unsigned long *vmx_msr_bitmap_legacy_x2apic;
647static unsigned long *vmx_msr_bitmap_longmode_x2apic;
646 648
647static bool cpu_has_load_ia32_efer; 649static bool cpu_has_load_ia32_efer;
648static bool cpu_has_load_perf_global_ctrl; 650static bool cpu_has_load_perf_global_ctrl;
@@ -767,6 +769,12 @@ static inline bool cpu_has_vmx_virtualize_apic_accesses(void)
767 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES; 769 SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
768} 770}
769 771
772static inline bool cpu_has_vmx_virtualize_x2apic_mode(void)
773{
774 return vmcs_config.cpu_based_2nd_exec_ctrl &
775 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
776}
777
770static inline bool cpu_has_vmx_apic_register_virt(void) 778static inline bool cpu_has_vmx_apic_register_virt(void)
771{ 779{
772 return vmcs_config.cpu_based_2nd_exec_ctrl & 780 return vmcs_config.cpu_based_2nd_exec_ctrl &
@@ -1830,6 +1838,25 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1830 vmx->guest_msrs[from] = tmp; 1838 vmx->guest_msrs[from] = tmp;
1831} 1839}
1832 1840
1841static void vmx_set_msr_bitmap(struct kvm_vcpu *vcpu)
1842{
1843 unsigned long *msr_bitmap;
1844
1845 if (irqchip_in_kernel(vcpu->kvm) && apic_x2apic_mode(vcpu->arch.apic)) {
1846 if (is_long_mode(vcpu))
1847 msr_bitmap = vmx_msr_bitmap_longmode_x2apic;
1848 else
1849 msr_bitmap = vmx_msr_bitmap_legacy_x2apic;
1850 } else {
1851 if (is_long_mode(vcpu))
1852 msr_bitmap = vmx_msr_bitmap_longmode;
1853 else
1854 msr_bitmap = vmx_msr_bitmap_legacy;
1855 }
1856
1857 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1858}
1859
1833/* 1860/*
1834 * Set up the vmcs to automatically save and restore system 1861 * Set up the vmcs to automatically save and restore system
1835 * msrs. Don't touch the 64-bit msrs if the guest is in legacy 1862 * msrs. Don't touch the 64-bit msrs if the guest is in legacy
@@ -1838,7 +1865,6 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
1838static void setup_msrs(struct vcpu_vmx *vmx) 1865static void setup_msrs(struct vcpu_vmx *vmx)
1839{ 1866{
1840 int save_nmsrs, index; 1867 int save_nmsrs, index;
1841 unsigned long *msr_bitmap;
1842 1868
1843 save_nmsrs = 0; 1869 save_nmsrs = 0;
1844#ifdef CONFIG_X86_64 1870#ifdef CONFIG_X86_64
@@ -1870,14 +1896,8 @@ static void setup_msrs(struct vcpu_vmx *vmx)
1870 1896
1871 vmx->save_nmsrs = save_nmsrs; 1897 vmx->save_nmsrs = save_nmsrs;
1872 1898
1873 if (cpu_has_vmx_msr_bitmap()) { 1899 if (cpu_has_vmx_msr_bitmap())
1874 if (is_long_mode(&vmx->vcpu)) 1900 vmx_set_msr_bitmap(&vmx->vcpu);
1875 msr_bitmap = vmx_msr_bitmap_longmode;
1876 else
1877 msr_bitmap = vmx_msr_bitmap_legacy;
1878
1879 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
1880 }
1881} 1901}
1882 1902
1883/* 1903/*
@@ -2543,6 +2563,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2543 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) { 2563 if (_cpu_based_exec_control & CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) {
2544 min2 = 0; 2564 min2 = 0;
2545 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES | 2565 opt2 = SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES |
2566 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE |
2546 SECONDARY_EXEC_WBINVD_EXITING | 2567 SECONDARY_EXEC_WBINVD_EXITING |
2547 SECONDARY_EXEC_ENABLE_VPID | 2568 SECONDARY_EXEC_ENABLE_VPID |
2548 SECONDARY_EXEC_ENABLE_EPT | 2569 SECONDARY_EXEC_ENABLE_EPT |
@@ -2564,7 +2585,8 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf)
2564 2585
2565 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW)) 2586 if (!(_cpu_based_exec_control & CPU_BASED_TPR_SHADOW))
2566 _cpu_based_2nd_exec_control &= ~( 2587 _cpu_based_2nd_exec_control &= ~(
2567 SECONDARY_EXEC_APIC_REGISTER_VIRT); 2588 SECONDARY_EXEC_APIC_REGISTER_VIRT |
2589 SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE);
2568 2590
2569 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) { 2591 if (_cpu_based_2nd_exec_control & SECONDARY_EXEC_ENABLE_EPT) {
2570 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT 2592 /* CR3 accesses and invlpg don't need to cause VM Exits when EPT
@@ -3725,7 +3747,10 @@ static void free_vpid(struct vcpu_vmx *vmx)
3725 spin_unlock(&vmx_vpid_lock); 3747 spin_unlock(&vmx_vpid_lock);
3726} 3748}
3727 3749
3728static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) 3750#define MSR_TYPE_R 1
3751#define MSR_TYPE_W 2
3752static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap,
3753 u32 msr, int type)
3729{ 3754{
3730 int f = sizeof(unsigned long); 3755 int f = sizeof(unsigned long);
3731 3756
@@ -3738,20 +3763,93 @@ static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
3738 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. 3763 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3739 */ 3764 */
3740 if (msr <= 0x1fff) { 3765 if (msr <= 0x1fff) {
3741 __clear_bit(msr, msr_bitmap + 0x000 / f); /* read-low */ 3766 if (type & MSR_TYPE_R)
3742 __clear_bit(msr, msr_bitmap + 0x800 / f); /* write-low */ 3767 /* read-low */
3768 __clear_bit(msr, msr_bitmap + 0x000 / f);
3769
3770 if (type & MSR_TYPE_W)
3771 /* write-low */
3772 __clear_bit(msr, msr_bitmap + 0x800 / f);
3773
3743 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { 3774 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3744 msr &= 0x1fff; 3775 msr &= 0x1fff;
3745 __clear_bit(msr, msr_bitmap + 0x400 / f); /* read-high */ 3776 if (type & MSR_TYPE_R)
3746 __clear_bit(msr, msr_bitmap + 0xc00 / f); /* write-high */ 3777 /* read-high */
3778 __clear_bit(msr, msr_bitmap + 0x400 / f);
3779
3780 if (type & MSR_TYPE_W)
3781 /* write-high */
3782 __clear_bit(msr, msr_bitmap + 0xc00 / f);
3783
3784 }
3785}
3786
3787static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
3788 u32 msr, int type)
3789{
3790 int f = sizeof(unsigned long);
3791
3792 if (!cpu_has_vmx_msr_bitmap())
3793 return;
3794
3795 /*
3796 * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
3797 * have the write-low and read-high bitmap offsets the wrong way round.
3798 * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
3799 */
3800 if (msr <= 0x1fff) {
3801 if (type & MSR_TYPE_R)
3802 /* read-low */
3803 __set_bit(msr, msr_bitmap + 0x000 / f);
3804
3805 if (type & MSR_TYPE_W)
3806 /* write-low */
3807 __set_bit(msr, msr_bitmap + 0x800 / f);
3808
3809 } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
3810 msr &= 0x1fff;
3811 if (type & MSR_TYPE_R)
3812 /* read-high */
3813 __set_bit(msr, msr_bitmap + 0x400 / f);
3814
3815 if (type & MSR_TYPE_W)
3816 /* write-high */
3817 __set_bit(msr, msr_bitmap + 0xc00 / f);
3818
3747 } 3819 }
3748} 3820}
3749 3821
3750static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only) 3822static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
3751{ 3823{
3752 if (!longmode_only) 3824 if (!longmode_only)
3753 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr); 3825 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy,
3754 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr); 3826 msr, MSR_TYPE_R | MSR_TYPE_W);
3827 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode,
3828 msr, MSR_TYPE_R | MSR_TYPE_W);
3829}
3830
3831static void vmx_enable_intercept_msr_read_x2apic(u32 msr)
3832{
3833 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3834 msr, MSR_TYPE_R);
3835 __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3836 msr, MSR_TYPE_R);
3837}
3838
3839static void vmx_disable_intercept_msr_read_x2apic(u32 msr)
3840{
3841 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3842 msr, MSR_TYPE_R);
3843 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3844 msr, MSR_TYPE_R);
3845}
3846
3847static void vmx_disable_intercept_msr_write_x2apic(u32 msr)
3848{
3849 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy_x2apic,
3850 msr, MSR_TYPE_W);
3851 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode_x2apic,
3852 msr, MSR_TYPE_W);
3755} 3853}
3756 3854
3757/* 3855/*
@@ -3849,6 +3947,7 @@ static u32 vmx_secondary_exec_control(struct vcpu_vmx *vmx)
3849 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING; 3947 exec_control &= ~SECONDARY_EXEC_PAUSE_LOOP_EXITING;
3850 if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm)) 3948 if (!enable_apicv_reg || !irqchip_in_kernel(vmx->vcpu.kvm))
3851 exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT; 3949 exec_control &= ~SECONDARY_EXEC_APIC_REGISTER_VIRT;
3950 exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
3852 return exec_control; 3951 return exec_control;
3853} 3952}
3854 3953
@@ -6101,6 +6200,34 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
6101 vmcs_write32(TPR_THRESHOLD, irr); 6200 vmcs_write32(TPR_THRESHOLD, irr);
6102} 6201}
6103 6202
6203static void vmx_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
6204{
6205 u32 sec_exec_control;
6206
6207 /*
6208 * There is not point to enable virtualize x2apic without enable
6209 * apicv
6210 */
6211 if (!cpu_has_vmx_virtualize_x2apic_mode() || !enable_apicv_reg)
6212 return;
6213
6214 if (!vm_need_tpr_shadow(vcpu->kvm))
6215 return;
6216
6217 sec_exec_control = vmcs_read32(SECONDARY_VM_EXEC_CONTROL);
6218
6219 if (set) {
6220 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6221 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6222 } else {
6223 sec_exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_X2APIC_MODE;
6224 sec_exec_control |= SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
6225 }
6226 vmcs_write32(SECONDARY_VM_EXEC_CONTROL, sec_exec_control);
6227
6228 vmx_set_msr_bitmap(vcpu);
6229}
6230
6104static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx) 6231static void vmx_complete_atomic_exit(struct vcpu_vmx *vmx)
6105{ 6232{
6106 u32 exit_intr_info; 6233 u32 exit_intr_info;
@@ -7364,6 +7491,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
7364 .enable_nmi_window = enable_nmi_window, 7491 .enable_nmi_window = enable_nmi_window,
7365 .enable_irq_window = enable_irq_window, 7492 .enable_irq_window = enable_irq_window,
7366 .update_cr8_intercept = update_cr8_intercept, 7493 .update_cr8_intercept = update_cr8_intercept,
7494 .set_virtual_x2apic_mode = vmx_set_virtual_x2apic_mode,
7367 7495
7368 .set_tss_addr = vmx_set_tss_addr, 7496 .set_tss_addr = vmx_set_tss_addr,
7369 .get_tdp_level = get_ept_level, 7497 .get_tdp_level = get_ept_level,
@@ -7396,7 +7524,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
7396 7524
7397static int __init vmx_init(void) 7525static int __init vmx_init(void)
7398{ 7526{
7399 int r, i; 7527 int r, i, msr;
7400 7528
7401 rdmsrl_safe(MSR_EFER, &host_efer); 7529 rdmsrl_safe(MSR_EFER, &host_efer);
7402 7530
@@ -7417,11 +7545,19 @@ static int __init vmx_init(void)
7417 if (!vmx_msr_bitmap_legacy) 7545 if (!vmx_msr_bitmap_legacy)
7418 goto out1; 7546 goto out1;
7419 7547
7548 vmx_msr_bitmap_legacy_x2apic =
7549 (unsigned long *)__get_free_page(GFP_KERNEL);
7550 if (!vmx_msr_bitmap_legacy_x2apic)
7551 goto out2;
7420 7552
7421 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL); 7553 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
7422 if (!vmx_msr_bitmap_longmode) 7554 if (!vmx_msr_bitmap_longmode)
7423 goto out2; 7555 goto out3;
7424 7556
7557 vmx_msr_bitmap_longmode_x2apic =
7558 (unsigned long *)__get_free_page(GFP_KERNEL);
7559 if (!vmx_msr_bitmap_longmode_x2apic)
7560 goto out4;
7425 7561
7426 /* 7562 /*
7427 * Allow direct access to the PC debug port (it is often used for I/O 7563 * Allow direct access to the PC debug port (it is often used for I/O
@@ -7453,6 +7589,24 @@ static int __init vmx_init(void)
7453 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false); 7589 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
7454 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false); 7590 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
7455 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false); 7591 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
7592 memcpy(vmx_msr_bitmap_legacy_x2apic,
7593 vmx_msr_bitmap_legacy, PAGE_SIZE);
7594 memcpy(vmx_msr_bitmap_longmode_x2apic,
7595 vmx_msr_bitmap_longmode, PAGE_SIZE);
7596
7597 if (enable_apicv_reg) {
7598 for (msr = 0x800; msr <= 0x8ff; msr++)
7599 vmx_disable_intercept_msr_read_x2apic(msr);
7600
7601 /* According SDM, in x2apic mode, the whole id reg is used.
7602 * But in KVM, it only use the highest eight bits. Need to
7603 * intercept it */
7604 vmx_enable_intercept_msr_read_x2apic(0x802);
7605 /* TMCCT */
7606 vmx_enable_intercept_msr_read_x2apic(0x839);
7607 /* TPR */
7608 vmx_disable_intercept_msr_write_x2apic(0x808);
7609 }
7456 7610
7457 if (enable_ept) { 7611 if (enable_ept) {
7458 kvm_mmu_set_mask_ptes(0ull, 7612 kvm_mmu_set_mask_ptes(0ull,
@@ -7466,8 +7620,10 @@ static int __init vmx_init(void)
7466 7620
7467 return 0; 7621 return 0;
7468 7622
7469out3: 7623out4:
7470 free_page((unsigned long)vmx_msr_bitmap_longmode); 7624 free_page((unsigned long)vmx_msr_bitmap_longmode);
7625out3:
7626 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
7471out2: 7627out2:
7472 free_page((unsigned long)vmx_msr_bitmap_legacy); 7628 free_page((unsigned long)vmx_msr_bitmap_legacy);
7473out1: 7629out1:
@@ -7479,6 +7635,8 @@ out:
7479 7635
7480static void __exit vmx_exit(void) 7636static void __exit vmx_exit(void)
7481{ 7637{
7638 free_page((unsigned long)vmx_msr_bitmap_legacy_x2apic);
7639 free_page((unsigned long)vmx_msr_bitmap_longmode_x2apic);
7482 free_page((unsigned long)vmx_msr_bitmap_legacy); 7640 free_page((unsigned long)vmx_msr_bitmap_legacy);
7483 free_page((unsigned long)vmx_msr_bitmap_longmode); 7641 free_page((unsigned long)vmx_msr_bitmap_longmode);
7484 free_page((unsigned long)vmx_io_bitmap_b); 7642 free_page((unsigned long)vmx_io_bitmap_b);