aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-02-24 15:26:47 -0500
committerAvi Kivity <avi@redhat.com>2009-06-10 04:48:21 -0400
commit5897297bc228fc3c85fdc421fd5c487f9a99821a (patch)
treebd66cd8b9482b8c3c38e009edc0a7a3796a0c948
parent3e7c73e9b15eab73e9cf72daf3931925da8afcff (diff)
KVM: VMX: Don't intercept MSR_KERNEL_GS_BASE
Windows 2008 accesses this MSR often on context switch intensive workloads; since we run in guest context with the guest MSR value loaded (so swapgs can work correctly), we can simply disable interception of rdmsr/wrmsr for this MSR. A complication occurs since in legacy mode, we run with the host MSR value loaded. In this case we enable interception. This means we need two MSR bitmaps, one for legacy mode and one for long mode. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c57
1 files changed, 43 insertions, 14 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index b20c9e47e925..b5eae7a00aad 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -113,7 +113,8 @@ static DEFINE_PER_CPU(struct list_head, vcpus_on_cpu);
113 113
114static unsigned long *vmx_io_bitmap_a; 114static unsigned long *vmx_io_bitmap_a;
115static unsigned long *vmx_io_bitmap_b; 115static unsigned long *vmx_io_bitmap_b;
116static unsigned long *vmx_msr_bitmap; 116static unsigned long *vmx_msr_bitmap_legacy;
117static unsigned long *vmx_msr_bitmap_longmode;
117 118
118static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); 119static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS);
119static DEFINE_SPINLOCK(vmx_vpid_lock); 120static DEFINE_SPINLOCK(vmx_vpid_lock);
@@ -812,6 +813,7 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to)
812static void setup_msrs(struct vcpu_vmx *vmx) 813static void setup_msrs(struct vcpu_vmx *vmx)
813{ 814{
814 int save_nmsrs; 815 int save_nmsrs;
816 unsigned long *msr_bitmap;
815 817
816 vmx_load_host_state(vmx); 818 vmx_load_host_state(vmx);
817 save_nmsrs = 0; 819 save_nmsrs = 0;
@@ -847,6 +849,15 @@ static void setup_msrs(struct vcpu_vmx *vmx)
847 __find_msr_index(vmx, MSR_KERNEL_GS_BASE); 849 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
848#endif 850#endif
849 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); 851 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
852
853 if (cpu_has_vmx_msr_bitmap()) {
854 if (is_long_mode(&vmx->vcpu))
855 msr_bitmap = vmx_msr_bitmap_longmode;
856 else
857 msr_bitmap = vmx_msr_bitmap_legacy;
858
859 vmcs_write64(MSR_BITMAP, __pa(msr_bitmap));
860 }
850} 861}
851 862
852/* 863/*
@@ -2082,7 +2093,7 @@ static void allocate_vpid(struct vcpu_vmx *vmx)
2082 spin_unlock(&vmx_vpid_lock); 2093 spin_unlock(&vmx_vpid_lock);
2083} 2094}
2084 2095
2085static void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr) 2096static void __vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2086{ 2097{
2087 int f = sizeof(unsigned long); 2098 int f = sizeof(unsigned long);
2088 2099
@@ -2104,6 +2115,13 @@ static void vmx_disable_intercept_for_msr(unsigned long *msr_bitmap, u32 msr)
2104 } 2115 }
2105} 2116}
2106 2117
2118static void vmx_disable_intercept_for_msr(u32 msr, bool longmode_only)
2119{
2120 if (!longmode_only)
2121 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_legacy, msr);
2122 __vmx_disable_intercept_for_msr(vmx_msr_bitmap_longmode, msr);
2123}
2124
2107/* 2125/*
2108 * Sets up the vmcs for emulated real mode. 2126 * Sets up the vmcs for emulated real mode.
2109 */ 2127 */
@@ -2123,7 +2141,7 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx)
2123 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b)); 2141 vmcs_write64(IO_BITMAP_B, __pa(vmx_io_bitmap_b));
2124 2142
2125 if (cpu_has_vmx_msr_bitmap()) 2143 if (cpu_has_vmx_msr_bitmap())
2126 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap)); 2144 vmcs_write64(MSR_BITMAP, __pa(vmx_msr_bitmap_legacy));
2127 2145
2128 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ 2146 vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */
2129 2147
@@ -3705,12 +3723,18 @@ static int __init vmx_init(void)
3705 goto out; 3723 goto out;
3706 } 3724 }
3707 3725
3708 vmx_msr_bitmap = (unsigned long *)__get_free_page(GFP_KERNEL); 3726 vmx_msr_bitmap_legacy = (unsigned long *)__get_free_page(GFP_KERNEL);
3709 if (!vmx_msr_bitmap) { 3727 if (!vmx_msr_bitmap_legacy) {
3710 r = -ENOMEM; 3728 r = -ENOMEM;
3711 goto out1; 3729 goto out1;
3712 } 3730 }
3713 3731
3732 vmx_msr_bitmap_longmode = (unsigned long *)__get_free_page(GFP_KERNEL);
3733 if (!vmx_msr_bitmap_longmode) {
3734 r = -ENOMEM;
3735 goto out2;
3736 }
3737
3714 /* 3738 /*
3715 * Allow direct access to the PC debug port (it is often used for I/O 3739 * Allow direct access to the PC debug port (it is often used for I/O
3716 * delays, but the vmexits simply slow things down). 3740 * delays, but the vmexits simply slow things down).
@@ -3720,19 +3744,21 @@ static int __init vmx_init(void)
3720 3744
3721 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE); 3745 memset(vmx_io_bitmap_b, 0xff, PAGE_SIZE);
3722 3746
3723 memset(vmx_msr_bitmap, 0xff, PAGE_SIZE); 3747 memset(vmx_msr_bitmap_legacy, 0xff, PAGE_SIZE);
3748 memset(vmx_msr_bitmap_longmode, 0xff, PAGE_SIZE);
3724 3749
3725 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ 3750 set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */
3726 3751
3727 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); 3752 r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE);
3728 if (r) 3753 if (r)
3729 goto out2; 3754 goto out3;
3730 3755
3731 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE); 3756 vmx_disable_intercept_for_msr(MSR_FS_BASE, false);
3732 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE); 3757 vmx_disable_intercept_for_msr(MSR_GS_BASE, false);
3733 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS); 3758 vmx_disable_intercept_for_msr(MSR_KERNEL_GS_BASE, true);
3734 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); 3759 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_CS, false);
3735 vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); 3760 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_ESP, false);
3761 vmx_disable_intercept_for_msr(MSR_IA32_SYSENTER_EIP, false);
3736 3762
3737 if (vm_need_ept()) { 3763 if (vm_need_ept()) {
3738 bypass_guest_pf = 0; 3764 bypass_guest_pf = 0;
@@ -3752,8 +3778,10 @@ static int __init vmx_init(void)
3752 3778
3753 return 0; 3779 return 0;
3754 3780
3781out3:
3782 free_page((unsigned long)vmx_msr_bitmap_longmode);
3755out2: 3783out2:
3756 free_page((unsigned long)vmx_msr_bitmap); 3784 free_page((unsigned long)vmx_msr_bitmap_legacy);
3757out1: 3785out1:
3758 free_page((unsigned long)vmx_io_bitmap_b); 3786 free_page((unsigned long)vmx_io_bitmap_b);
3759out: 3787out:
@@ -3763,7 +3791,8 @@ out:
3763 3791
3764static void __exit vmx_exit(void) 3792static void __exit vmx_exit(void)
3765{ 3793{
3766 free_page((unsigned long)vmx_msr_bitmap); 3794 free_page((unsigned long)vmx_msr_bitmap_legacy);
3795 free_page((unsigned long)vmx_msr_bitmap_longmode);
3767 free_page((unsigned long)vmx_io_bitmap_b); 3796 free_page((unsigned long)vmx_io_bitmap_b);
3768 free_page((unsigned long)vmx_io_bitmap_a); 3797 free_page((unsigned long)vmx_io_bitmap_a);
3769 3798