diff options
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/kvm/vmx.c | 112 |
1 files changed, 42 insertions, 70 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 32512519e1ac..bf46253149c3 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -86,6 +86,11 @@ struct vmcs { | |||
86 | char data[0]; | 86 | char data[0]; |
87 | }; | 87 | }; |
88 | 88 | ||
89 | struct shared_msr_entry { | ||
90 | unsigned index; | ||
91 | u64 data; | ||
92 | }; | ||
93 | |||
89 | struct vcpu_vmx { | 94 | struct vcpu_vmx { |
90 | struct kvm_vcpu vcpu; | 95 | struct kvm_vcpu vcpu; |
91 | struct list_head local_vcpus_link; | 96 | struct list_head local_vcpus_link; |
@@ -93,8 +98,7 @@ struct vcpu_vmx { | |||
93 | int launched; | 98 | int launched; |
94 | u8 fail; | 99 | u8 fail; |
95 | u32 idt_vectoring_info; | 100 | u32 idt_vectoring_info; |
96 | struct kvm_msr_entry *guest_msrs; | 101 | struct shared_msr_entry *guest_msrs; |
97 | struct kvm_msr_entry *host_msrs; | ||
98 | int nmsrs; | 102 | int nmsrs; |
99 | int save_nmsrs; | 103 | int save_nmsrs; |
100 | int msr_offset_efer; | 104 | int msr_offset_efer; |
@@ -108,7 +112,6 @@ struct vcpu_vmx { | |||
108 | u16 fs_sel, gs_sel, ldt_sel; | 112 | u16 fs_sel, gs_sel, ldt_sel; |
109 | int gs_ldt_reload_needed; | 113 | int gs_ldt_reload_needed; |
110 | int fs_reload_needed; | 114 | int fs_reload_needed; |
111 | int guest_efer_loaded; | ||
112 | } host_state; | 115 | } host_state; |
113 | struct { | 116 | struct { |
114 | int vm86_active; | 117 | int vm86_active; |
@@ -195,6 +198,8 @@ static struct kvm_vmx_segment_field { | |||
195 | VMX_SEGMENT_FIELD(LDTR), | 198 | VMX_SEGMENT_FIELD(LDTR), |
196 | }; | 199 | }; |
197 | 200 | ||
201 | static u64 host_efer; | ||
202 | |||
198 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); | 203 | static void ept_save_pdptrs(struct kvm_vcpu *vcpu); |
199 | 204 | ||
200 | /* | 205 | /* |
@@ -209,22 +214,6 @@ static const u32 vmx_msr_index[] = { | |||
209 | }; | 214 | }; |
210 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) | 215 | #define NR_VMX_MSR ARRAY_SIZE(vmx_msr_index) |
211 | 216 | ||
212 | static void load_msrs(struct kvm_msr_entry *e, int n) | ||
213 | { | ||
214 | int i; | ||
215 | |||
216 | for (i = 0; i < n; ++i) | ||
217 | wrmsrl(e[i].index, e[i].data); | ||
218 | } | ||
219 | |||
220 | static void save_msrs(struct kvm_msr_entry *e, int n) | ||
221 | { | ||
222 | int i; | ||
223 | |||
224 | for (i = 0; i < n; ++i) | ||
225 | rdmsrl(e[i].index, e[i].data); | ||
226 | } | ||
227 | |||
228 | static inline int is_page_fault(u32 intr_info) | 217 | static inline int is_page_fault(u32 intr_info) |
229 | { | 218 | { |
230 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | | 219 | return (intr_info & (INTR_INFO_INTR_TYPE_MASK | INTR_INFO_VECTOR_MASK | |
@@ -373,7 +362,7 @@ static int __find_msr_index(struct vcpu_vmx *vmx, u32 msr) | |||
373 | int i; | 362 | int i; |
374 | 363 | ||
375 | for (i = 0; i < vmx->nmsrs; ++i) | 364 | for (i = 0; i < vmx->nmsrs; ++i) |
376 | if (vmx->guest_msrs[i].index == msr) | 365 | if (vmx_msr_index[vmx->guest_msrs[i].index] == msr) |
377 | return i; | 366 | return i; |
378 | return -1; | 367 | return -1; |
379 | } | 368 | } |
@@ -404,7 +393,7 @@ static inline void __invept(int ext, u64 eptp, gpa_t gpa) | |||
404 | : : "a" (&operand), "c" (ext) : "cc", "memory"); | 393 | : : "a" (&operand), "c" (ext) : "cc", "memory"); |
405 | } | 394 | } |
406 | 395 | ||
407 | static struct kvm_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) | 396 | static struct shared_msr_entry *find_msr_entry(struct vcpu_vmx *vmx, u32 msr) |
408 | { | 397 | { |
409 | int i; | 398 | int i; |
410 | 399 | ||
@@ -595,17 +584,15 @@ static void reload_tss(void) | |||
595 | load_TR_desc(); | 584 | load_TR_desc(); |
596 | } | 585 | } |
597 | 586 | ||
598 | static void load_transition_efer(struct vcpu_vmx *vmx) | 587 | static bool update_transition_efer(struct vcpu_vmx *vmx) |
599 | { | 588 | { |
600 | int efer_offset = vmx->msr_offset_efer; | 589 | int efer_offset = vmx->msr_offset_efer; |
601 | u64 host_efer; | ||
602 | u64 guest_efer; | 590 | u64 guest_efer; |
603 | u64 ignore_bits; | 591 | u64 ignore_bits; |
604 | 592 | ||
605 | if (efer_offset < 0) | 593 | if (efer_offset < 0) |
606 | return; | 594 | return false; |
607 | host_efer = vmx->host_msrs[efer_offset].data; | 595 | guest_efer = vmx->vcpu.arch.shadow_efer; |
608 | guest_efer = vmx->guest_msrs[efer_offset].data; | ||
609 | 596 | ||
610 | /* | 597 | /* |
611 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless | 598 | * NX is emulated; LMA and LME handled by hardware; SCE meaninless |
@@ -619,26 +606,18 @@ static void load_transition_efer(struct vcpu_vmx *vmx) | |||
619 | ignore_bits &= ~(u64)EFER_SCE; | 606 | ignore_bits &= ~(u64)EFER_SCE; |
620 | #endif | 607 | #endif |
621 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) | 608 | if ((guest_efer & ~ignore_bits) == (host_efer & ~ignore_bits)) |
622 | return; | 609 | return false; |
623 | 610 | ||
624 | vmx->host_state.guest_efer_loaded = 1; | ||
625 | guest_efer &= ~ignore_bits; | 611 | guest_efer &= ~ignore_bits; |
626 | guest_efer |= host_efer & ignore_bits; | 612 | guest_efer |= host_efer & ignore_bits; |
627 | wrmsrl(MSR_EFER, guest_efer); | 613 | vmx->guest_msrs[efer_offset].data = guest_efer; |
628 | vmx->vcpu.stat.efer_reload++; | 614 | return true; |
629 | } | ||
630 | |||
631 | static void reload_host_efer(struct vcpu_vmx *vmx) | ||
632 | { | ||
633 | if (vmx->host_state.guest_efer_loaded) { | ||
634 | vmx->host_state.guest_efer_loaded = 0; | ||
635 | load_msrs(vmx->host_msrs + vmx->msr_offset_efer, 1); | ||
636 | } | ||
637 | } | 615 | } |
638 | 616 | ||
639 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) | 617 | static void vmx_save_host_state(struct kvm_vcpu *vcpu) |
640 | { | 618 | { |
641 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 619 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
620 | int i; | ||
642 | 621 | ||
643 | if (vmx->host_state.loaded) | 622 | if (vmx->host_state.loaded) |
644 | return; | 623 | return; |
@@ -680,8 +659,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
680 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 659 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
681 | } | 660 | } |
682 | #endif | 661 | #endif |
683 | load_msrs(vmx->guest_msrs, vmx->save_nmsrs); | 662 | for (i = 0; i < vmx->save_nmsrs; ++i) |
684 | load_transition_efer(vmx); | 663 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
664 | vmx->guest_msrs[i].data); | ||
685 | } | 665 | } |
686 | 666 | ||
687 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) | 667 | static void __vmx_load_host_state(struct vcpu_vmx *vmx) |
@@ -709,9 +689,6 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
709 | local_irq_restore(flags); | 689 | local_irq_restore(flags); |
710 | } | 690 | } |
711 | reload_tss(); | 691 | reload_tss(); |
712 | save_msrs(vmx->guest_msrs, vmx->save_nmsrs); | ||
713 | load_msrs(vmx->host_msrs, vmx->save_nmsrs); | ||
714 | reload_host_efer(vmx); | ||
715 | #ifdef CONFIG_X86_64 | 692 | #ifdef CONFIG_X86_64 |
716 | if (is_long_mode(&vmx->vcpu)) { | 693 | if (is_long_mode(&vmx->vcpu)) { |
717 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 694 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
@@ -908,19 +885,14 @@ static void vmx_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, | |||
908 | /* | 885 | /* |
909 | * Swap MSR entry in host/guest MSR entry array. | 886 | * Swap MSR entry in host/guest MSR entry array. |
910 | */ | 887 | */ |
911 | #ifdef CONFIG_X86_64 | ||
912 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | 888 | static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) |
913 | { | 889 | { |
914 | struct kvm_msr_entry tmp; | 890 | struct shared_msr_entry tmp; |
915 | 891 | ||
916 | tmp = vmx->guest_msrs[to]; | 892 | tmp = vmx->guest_msrs[to]; |
917 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; | 893 | vmx->guest_msrs[to] = vmx->guest_msrs[from]; |
918 | vmx->guest_msrs[from] = tmp; | 894 | vmx->guest_msrs[from] = tmp; |
919 | tmp = vmx->host_msrs[to]; | ||
920 | vmx->host_msrs[to] = vmx->host_msrs[from]; | ||
921 | vmx->host_msrs[from] = tmp; | ||
922 | } | 895 | } |
923 | #endif | ||
924 | 896 | ||
925 | /* | 897 | /* |
926 | * Set up the vmcs to automatically save and restore system | 898 | * Set up the vmcs to automatically save and restore system |
@@ -929,15 +901,13 @@ static void move_msr_up(struct vcpu_vmx *vmx, int from, int to) | |||
929 | */ | 901 | */ |
930 | static void setup_msrs(struct vcpu_vmx *vmx) | 902 | static void setup_msrs(struct vcpu_vmx *vmx) |
931 | { | 903 | { |
932 | int save_nmsrs; | 904 | int save_nmsrs, index; |
933 | unsigned long *msr_bitmap; | 905 | unsigned long *msr_bitmap; |
934 | 906 | ||
935 | vmx_load_host_state(vmx); | 907 | vmx_load_host_state(vmx); |
936 | save_nmsrs = 0; | 908 | save_nmsrs = 0; |
937 | #ifdef CONFIG_X86_64 | 909 | #ifdef CONFIG_X86_64 |
938 | if (is_long_mode(&vmx->vcpu)) { | 910 | if (is_long_mode(&vmx->vcpu)) { |
939 | int index; | ||
940 | |||
941 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); | 911 | index = __find_msr_index(vmx, MSR_SYSCALL_MASK); |
942 | if (index >= 0) | 912 | if (index >= 0) |
943 | move_msr_up(vmx, index, save_nmsrs++); | 913 | move_msr_up(vmx, index, save_nmsrs++); |
@@ -956,9 +926,11 @@ static void setup_msrs(struct vcpu_vmx *vmx) | |||
956 | move_msr_up(vmx, index, save_nmsrs++); | 926 | move_msr_up(vmx, index, save_nmsrs++); |
957 | } | 927 | } |
958 | #endif | 928 | #endif |
959 | vmx->save_nmsrs = save_nmsrs; | 929 | vmx->msr_offset_efer = index = __find_msr_index(vmx, MSR_EFER); |
930 | if (index >= 0 && update_transition_efer(vmx)) | ||
931 | move_msr_up(vmx, index, save_nmsrs++); | ||
960 | 932 | ||
961 | vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); | 933 | vmx->save_nmsrs = save_nmsrs; |
962 | 934 | ||
963 | if (cpu_has_vmx_msr_bitmap()) { | 935 | if (cpu_has_vmx_msr_bitmap()) { |
964 | if (is_long_mode(&vmx->vcpu)) | 936 | if (is_long_mode(&vmx->vcpu)) |
@@ -1000,7 +972,7 @@ static void guest_write_tsc(u64 guest_tsc, u64 host_tsc) | |||
1000 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | 972 | static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) |
1001 | { | 973 | { |
1002 | u64 data; | 974 | u64 data; |
1003 | struct kvm_msr_entry *msr; | 975 | struct shared_msr_entry *msr; |
1004 | 976 | ||
1005 | if (!pdata) { | 977 | if (!pdata) { |
1006 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); | 978 | printk(KERN_ERR "BUG: get_msr called with NULL pdata\n"); |
@@ -1019,9 +991,9 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
1019 | vmx_load_host_state(to_vmx(vcpu)); | 991 | vmx_load_host_state(to_vmx(vcpu)); |
1020 | data = to_vmx(vcpu)->msr_guest_kernel_gs_base; | 992 | data = to_vmx(vcpu)->msr_guest_kernel_gs_base; |
1021 | break; | 993 | break; |
994 | #endif | ||
1022 | case MSR_EFER: | 995 | case MSR_EFER: |
1023 | return kvm_get_msr_common(vcpu, msr_index, pdata); | 996 | return kvm_get_msr_common(vcpu, msr_index, pdata); |
1024 | #endif | ||
1025 | case MSR_IA32_TSC: | 997 | case MSR_IA32_TSC: |
1026 | data = guest_read_tsc(); | 998 | data = guest_read_tsc(); |
1027 | break; | 999 | break; |
@@ -1035,6 +1007,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
1035 | data = vmcs_readl(GUEST_SYSENTER_ESP); | 1007 | data = vmcs_readl(GUEST_SYSENTER_ESP); |
1036 | break; | 1008 | break; |
1037 | default: | 1009 | default: |
1010 | vmx_load_host_state(to_vmx(vcpu)); | ||
1038 | msr = find_msr_entry(to_vmx(vcpu), msr_index); | 1011 | msr = find_msr_entry(to_vmx(vcpu), msr_index); |
1039 | if (msr) { | 1012 | if (msr) { |
1040 | vmx_load_host_state(to_vmx(vcpu)); | 1013 | vmx_load_host_state(to_vmx(vcpu)); |
@@ -1056,7 +1029,7 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata) | |||
1056 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) | 1029 | static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data) |
1057 | { | 1030 | { |
1058 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1031 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1059 | struct kvm_msr_entry *msr; | 1032 | struct shared_msr_entry *msr; |
1060 | u64 host_tsc; | 1033 | u64 host_tsc; |
1061 | int ret = 0; | 1034 | int ret = 0; |
1062 | 1035 | ||
@@ -1565,7 +1538,10 @@ continue_rmode: | |||
1565 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) | 1538 | static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer) |
1566 | { | 1539 | { |
1567 | struct vcpu_vmx *vmx = to_vmx(vcpu); | 1540 | struct vcpu_vmx *vmx = to_vmx(vcpu); |
1568 | struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); | 1541 | struct shared_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); |
1542 | |||
1543 | if (!msr) | ||
1544 | return; | ||
1569 | 1545 | ||
1570 | /* | 1546 | /* |
1571 | * Force kernel_gs_base reloading before EFER changes, as control | 1547 | * Force kernel_gs_base reloading before EFER changes, as control |
@@ -2417,10 +2393,8 @@ static int vmx_vcpu_setup(struct vcpu_vmx *vmx) | |||
2417 | if (wrmsr_safe(index, data_low, data_high) < 0) | 2393 | if (wrmsr_safe(index, data_low, data_high) < 0) |
2418 | continue; | 2394 | continue; |
2419 | data = data_low | ((u64)data_high << 32); | 2395 | data = data_low | ((u64)data_high << 32); |
2420 | vmx->host_msrs[j].index = index; | 2396 | vmx->guest_msrs[j].index = i; |
2421 | vmx->host_msrs[j].reserved = 0; | 2397 | vmx->guest_msrs[j].data = 0; |
2422 | vmx->host_msrs[j].data = data; | ||
2423 | vmx->guest_msrs[j] = vmx->host_msrs[j]; | ||
2424 | ++vmx->nmsrs; | 2398 | ++vmx->nmsrs; |
2425 | } | 2399 | } |
2426 | 2400 | ||
@@ -3821,7 +3795,6 @@ static void vmx_free_vcpu(struct kvm_vcpu *vcpu) | |||
3821 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); | 3795 | __clear_bit(vmx->vpid, vmx_vpid_bitmap); |
3822 | spin_unlock(&vmx_vpid_lock); | 3796 | spin_unlock(&vmx_vpid_lock); |
3823 | vmx_free_vmcs(vcpu); | 3797 | vmx_free_vmcs(vcpu); |
3824 | kfree(vmx->host_msrs); | ||
3825 | kfree(vmx->guest_msrs); | 3798 | kfree(vmx->guest_msrs); |
3826 | kvm_vcpu_uninit(vcpu); | 3799 | kvm_vcpu_uninit(vcpu); |
3827 | kmem_cache_free(kvm_vcpu_cache, vmx); | 3800 | kmem_cache_free(kvm_vcpu_cache, vmx); |
@@ -3848,10 +3821,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3848 | goto uninit_vcpu; | 3821 | goto uninit_vcpu; |
3849 | } | 3822 | } |
3850 | 3823 | ||
3851 | vmx->host_msrs = kmalloc(PAGE_SIZE, GFP_KERNEL); | ||
3852 | if (!vmx->host_msrs) | ||
3853 | goto free_guest_msrs; | ||
3854 | |||
3855 | vmx->vmcs = alloc_vmcs(); | 3824 | vmx->vmcs = alloc_vmcs(); |
3856 | if (!vmx->vmcs) | 3825 | if (!vmx->vmcs) |
3857 | goto free_msrs; | 3826 | goto free_msrs; |
@@ -3882,8 +3851,6 @@ static struct kvm_vcpu *vmx_create_vcpu(struct kvm *kvm, unsigned int id) | |||
3882 | free_vmcs: | 3851 | free_vmcs: |
3883 | free_vmcs(vmx->vmcs); | 3852 | free_vmcs(vmx->vmcs); |
3884 | free_msrs: | 3853 | free_msrs: |
3885 | kfree(vmx->host_msrs); | ||
3886 | free_guest_msrs: | ||
3887 | kfree(vmx->guest_msrs); | 3854 | kfree(vmx->guest_msrs); |
3888 | uninit_vcpu: | 3855 | uninit_vcpu: |
3889 | kvm_vcpu_uninit(&vmx->vcpu); | 3856 | kvm_vcpu_uninit(&vmx->vcpu); |
@@ -4033,7 +4000,12 @@ static struct kvm_x86_ops vmx_x86_ops = { | |||
4033 | 4000 | ||
4034 | static int __init vmx_init(void) | 4001 | static int __init vmx_init(void) |
4035 | { | 4002 | { |
4036 | int r; | 4003 | int r, i; |
4004 | |||
4005 | rdmsrl_safe(MSR_EFER, &host_efer); | ||
4006 | |||
4007 | for (i = 0; i < NR_VMX_MSR; ++i) | ||
4008 | kvm_define_shared_msr(i, vmx_msr_index[i]); | ||
4037 | 4009 | ||
4038 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); | 4010 | vmx_io_bitmap_a = (unsigned long *)__get_free_page(GFP_KERNEL); |
4039 | if (!vmx_io_bitmap_a) | 4011 | if (!vmx_io_bitmap_a) |