diff options
author | Sheng Yang <sheng.yang@intel.com> | 2008-03-28 01:18:56 -0400 |
---|---|---|
committer | Avi Kivity <avi@qumranet.com> | 2008-04-27 05:00:52 -0400 |
commit | 25c5f225beda4fbea878ed8b6203ab4ecc7de2d1 (patch) | |
tree | b15dc6e9a311c556a9c5ed1ccbc0ed0830d24b23 /arch/x86/kvm/vmx.c | |
parent | e976a2b997fc4ad70ccc53acfe62811c4aaec851 (diff) |
KVM: VMX: Enable MSR Bitmap feature
MSR Bitmap controls whether the accessing of an MSR causes VM Exit.
Eliminating exits on automatically saved and restored MSRs yields a
small performance gain.
Signed-off-by: Sheng Yang <sheng.yang@intel.com>
Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r-- | arch/x86/kvm/vmx.c | 67 |
1 files changed, 60 insertions, 7 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index cbca46acfac3..87eee7a7f16e 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -91,6 +91,7 @@ static DEFINE_PER_CPU(struct vmcs *, current_vmcs); | |||
91 | 91 | ||
92 | static struct page *vmx_io_bitmap_a; | 92 | static struct page *vmx_io_bitmap_a; |
93 | static struct page *vmx_io_bitmap_b; | 93 | static struct page *vmx_io_bitmap_b; |
94 | static struct page *vmx_msr_bitmap; | ||
94 | 95 | ||
95 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); | 96 | static DECLARE_BITMAP(vmx_vpid_bitmap, VMX_NR_VPIDS); |
96 | static DEFINE_SPINLOCK(vmx_vpid_lock); | 97 | static DEFINE_SPINLOCK(vmx_vpid_lock); |
@@ -185,6 +186,11 @@ static inline int is_external_interrupt(u32 intr_info) | |||
185 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); | 186 | == (INTR_TYPE_EXT_INTR | INTR_INFO_VALID_MASK); |
186 | } | 187 | } |
187 | 188 | ||
189 | static inline int cpu_has_vmx_msr_bitmap(void) | ||
190 | { | ||
191 | return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_USE_MSR_BITMAPS); | ||
192 | } | ||
193 | |||
188 | static inline int cpu_has_vmx_tpr_shadow(void) | 194 | static inline int cpu_has_vmx_tpr_shadow(void) |
189 | { | 195 | { |
190 | return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); | 196 | return (vmcs_config.cpu_based_exec_ctrl & CPU_BASED_TPR_SHADOW); |
@@ -1001,6 +1007,7 @@ static __init int setup_vmcs_config(struct vmcs_config *vmcs_conf) | |||
1001 | CPU_BASED_MOV_DR_EXITING | | 1007 | CPU_BASED_MOV_DR_EXITING | |
1002 | CPU_BASED_USE_TSC_OFFSETING; | 1008 | CPU_BASED_USE_TSC_OFFSETING; |
1003 | opt = CPU_BASED_TPR_SHADOW | | 1009 | opt = CPU_BASED_TPR_SHADOW | |
1010 | CPU_BASED_USE_MSR_BITMAPS | | ||
1004 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; | 1011 | CPU_BASED_ACTIVATE_SECONDARY_CONTROLS; |
1005 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, | 1012 | if (adjust_vmx_controls(min, opt, MSR_IA32_VMX_PROCBASED_CTLS, |
1006 | &_cpu_based_exec_control) < 0) | 1013 | &_cpu_based_exec_control) < 0) |
@@ -1575,6 +1582,30 @@ static void allocate_vpid(struct vcpu_vmx *vmx) | |||
1575 | spin_unlock(&vmx_vpid_lock); | 1582 | spin_unlock(&vmx_vpid_lock); |
1576 | } | 1583 | } |
1577 | 1584 | ||
1585 | void vmx_disable_intercept_for_msr(struct page *msr_bitmap, u32 msr) | ||
1586 | { | ||
1587 | void *va; | ||
1588 | |||
1589 | if (!cpu_has_vmx_msr_bitmap()) | ||
1590 | return; | ||
1591 | |||
1592 | /* | ||
1593 | * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals | ||
1594 | * have the write-low and read-high bitmap offsets the wrong way round. | ||
1595 | * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff. | ||
1596 | */ | ||
1597 | va = kmap(msr_bitmap); | ||
1598 | if (msr <= 0x1fff) { | ||
1599 | __clear_bit(msr, va + 0x000); /* read-low */ | ||
1600 | __clear_bit(msr, va + 0x800); /* write-low */ | ||
1601 | } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) { | ||
1602 | msr &= 0x1fff; | ||
1603 | __clear_bit(msr, va + 0x400); /* read-high */ | ||
1604 | __clear_bit(msr, va + 0xc00); /* write-high */ | ||
1605 | } | ||
1606 | kunmap(msr_bitmap); | ||
1607 | } | ||
1608 | |||
1578 | /* | 1609 | /* |
1579 | * Sets up the vmcs for emulated real mode. | 1610 | * Sets up the vmcs for emulated real mode. |
1580 | */ | 1611 | */ |
@@ -1592,6 +1623,9 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
1592 | vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); | 1623 | vmcs_write64(IO_BITMAP_A, page_to_phys(vmx_io_bitmap_a)); |
1593 | vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); | 1624 | vmcs_write64(IO_BITMAP_B, page_to_phys(vmx_io_bitmap_b)); |
1594 | 1625 | ||
1626 | if (cpu_has_vmx_msr_bitmap()) | ||
1627 | vmcs_write64(MSR_BITMAP, page_to_phys(vmx_msr_bitmap)); | ||
1628 | |||
1595 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ | 1629 | vmcs_write64(VMCS_LINK_POINTER, -1ull); /* 22.3.1.5 */ |
1596 | 1630 | ||
1597 | /* Control */ | 1631 | /* Control */ |
@@ -2728,7 +2762,7 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
2728 | 2762 | ||
2729 | static int __init vmx_init(void) | 2763 | static int __init vmx_init(void) |
2730 | { | 2764 | { |
2731 | void *iova; | 2765 | void *va; |
2732 | int r; | 2766 | int r; |
2733 | 2767 | ||
2734 | vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | 2768 | vmx_io_bitmap_a = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); |
@@ -2741,30 +2775,48 @@ static int __init vmx_init(void) | |||
2741 | goto out; | 2775 | goto out; |
2742 | } | 2776 | } |
2743 | 2777 | ||
2778 | vmx_msr_bitmap = alloc_page(GFP_KERNEL | __GFP_HIGHMEM); | ||
2779 | if (!vmx_msr_bitmap) { | ||
2780 | r = -ENOMEM; | ||
2781 | goto out1; | ||
2782 | } | ||
2783 | |||
2744 | /* | 2784 | /* |
2745 | * Allow direct access to the PC debug port (it is often used for I/O | 2785 | * Allow direct access to the PC debug port (it is often used for I/O |
2746 | * delays, but the vmexits simply slow things down). | 2786 | * delays, but the vmexits simply slow things down). |
2747 | */ | 2787 | */ |
2748 | iova = kmap(vmx_io_bitmap_a); | 2788 | va = kmap(vmx_io_bitmap_a); |
2749 | memset(iova, 0xff, PAGE_SIZE); | 2789 | memset(va, 0xff, PAGE_SIZE); |
2750 | clear_bit(0x80, iova); | 2790 | clear_bit(0x80, va); |
2751 | kunmap(vmx_io_bitmap_a); | 2791 | kunmap(vmx_io_bitmap_a); |
2752 | 2792 | ||
2753 | iova = kmap(vmx_io_bitmap_b); | 2793 | va = kmap(vmx_io_bitmap_b); |
2754 | memset(iova, 0xff, PAGE_SIZE); | 2794 | memset(va, 0xff, PAGE_SIZE); |
2755 | kunmap(vmx_io_bitmap_b); | 2795 | kunmap(vmx_io_bitmap_b); |
2756 | 2796 | ||
2797 | va = kmap(vmx_msr_bitmap); | ||
2798 | memset(va, 0xff, PAGE_SIZE); | ||
2799 | kunmap(vmx_msr_bitmap); | ||
2800 | |||
2757 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ | 2801 | set_bit(0, vmx_vpid_bitmap); /* 0 is reserved for host */ |
2758 | 2802 | ||
2759 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); | 2803 | r = kvm_init(&vmx_x86_ops, sizeof(struct vcpu_vmx), THIS_MODULE); |
2760 | if (r) | 2804 | if (r) |
2761 | goto out1; | 2805 | goto out2; |
2806 | |||
2807 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_FS_BASE); | ||
2808 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_GS_BASE); | ||
2809 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_CS); | ||
2810 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_ESP); | ||
2811 | vmx_disable_intercept_for_msr(vmx_msr_bitmap, MSR_IA32_SYSENTER_EIP); | ||
2762 | 2812 | ||
2763 | if (bypass_guest_pf) | 2813 | if (bypass_guest_pf) |
2764 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); | 2814 | kvm_mmu_set_nonpresent_ptes(~0xffeull, 0ull); |
2765 | 2815 | ||
2766 | return 0; | 2816 | return 0; |
2767 | 2817 | ||
2818 | out2: | ||
2819 | __free_page(vmx_msr_bitmap); | ||
2768 | out1: | 2820 | out1: |
2769 | __free_page(vmx_io_bitmap_b); | 2821 | __free_page(vmx_io_bitmap_b); |
2770 | out: | 2822 | out: |
@@ -2774,6 +2826,7 @@ out: | |||
2774 | 2826 | ||
2775 | static void __exit vmx_exit(void) | 2827 | static void __exit vmx_exit(void) |
2776 | { | 2828 | { |
2829 | __free_page(vmx_msr_bitmap); | ||
2777 | __free_page(vmx_io_bitmap_b); | 2830 | __free_page(vmx_io_bitmap_b); |
2778 | __free_page(vmx_io_bitmap_a); | 2831 | __free_page(vmx_io_bitmap_a); |
2779 | 2832 | ||