aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2009-09-06 08:55:37 -0400
committerAvi Kivity <avi@redhat.com>2009-12-03 02:32:21 -0500
commit44ea2b1758d88ad822e65b1c4c21ca6164494e27 (patch)
tree8fd83b5184e9e472f96f25186695543de531b2ba /arch/x86/kvm/vmx.c
parent3ce672d48400e0112fec7a3cb6bb2120493c6e11 (diff)
KVM: VMX: Move MSR_KERNEL_GS_BASE out of the vmx autoload msr area
Currently MSR_KERNEL_GS_BASE is saved and restored as part of the guest/host msr reloading. Since we wish to lazy-restore all the other msrs, save and reload MSR_KERNEL_GS_BASE explicitly instead of using the common code. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c39
1 files changed, 26 insertions, 13 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 17730175aa08..32512519e1ac 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -99,7 +99,8 @@ struct vcpu_vmx {
99 int save_nmsrs; 99 int save_nmsrs;
100 int msr_offset_efer; 100 int msr_offset_efer;
101#ifdef CONFIG_X86_64 101#ifdef CONFIG_X86_64
102 int msr_offset_kernel_gs_base; 102 u64 msr_host_kernel_gs_base;
103 u64 msr_guest_kernel_gs_base;
103#endif 104#endif
104 struct vmcs *vmcs; 105 struct vmcs *vmcs;
105 struct { 106 struct {
@@ -202,7 +203,7 @@ static void ept_save_pdptrs(struct kvm_vcpu *vcpu);
202 */ 203 */
203static const u32 vmx_msr_index[] = { 204static const u32 vmx_msr_index[] = {
204#ifdef CONFIG_X86_64 205#ifdef CONFIG_X86_64
205 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR, MSR_KERNEL_GS_BASE, 206 MSR_SYSCALL_MASK, MSR_LSTAR, MSR_CSTAR,
206#endif 207#endif
207 MSR_EFER, MSR_K6_STAR, 208 MSR_EFER, MSR_K6_STAR,
208}; 209};
@@ -674,10 +675,10 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu)
674#endif 675#endif
675 676
676#ifdef CONFIG_X86_64 677#ifdef CONFIG_X86_64
677 if (is_long_mode(&vmx->vcpu)) 678 if (is_long_mode(&vmx->vcpu)) {
678 save_msrs(vmx->host_msrs + 679 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
679 vmx->msr_offset_kernel_gs_base, 1); 680 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
680 681 }
681#endif 682#endif
682 load_msrs(vmx->guest_msrs, vmx->save_nmsrs); 683 load_msrs(vmx->guest_msrs, vmx->save_nmsrs);
683 load_transition_efer(vmx); 684 load_transition_efer(vmx);
@@ -711,6 +712,12 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
711 save_msrs(vmx->guest_msrs, vmx->save_nmsrs); 712 save_msrs(vmx->guest_msrs, vmx->save_nmsrs);
712 load_msrs(vmx->host_msrs, vmx->save_nmsrs); 713 load_msrs(vmx->host_msrs, vmx->save_nmsrs);
713 reload_host_efer(vmx); 714 reload_host_efer(vmx);
715#ifdef CONFIG_X86_64
716 if (is_long_mode(&vmx->vcpu)) {
717 rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
718 wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base);
719 }
720#endif
714} 721}
715 722
716static void vmx_load_host_state(struct vcpu_vmx *vmx) 723static void vmx_load_host_state(struct vcpu_vmx *vmx)
@@ -940,9 +947,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
940 index = __find_msr_index(vmx, MSR_CSTAR); 947 index = __find_msr_index(vmx, MSR_CSTAR);
941 if (index >= 0) 948 if (index >= 0)
942 move_msr_up(vmx, index, save_nmsrs++); 949 move_msr_up(vmx, index, save_nmsrs++);
943 index = __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
944 if (index >= 0)
945 move_msr_up(vmx, index, save_nmsrs++);
946 /* 950 /*
947 * MSR_K6_STAR is only needed on long mode guests, and only 951 * MSR_K6_STAR is only needed on long mode guests, and only
948 * if efer.sce is enabled. 952 * if efer.sce is enabled.
@@ -954,10 +958,6 @@ static void setup_msrs(struct vcpu_vmx *vmx)
954#endif 958#endif
955 vmx->save_nmsrs = save_nmsrs; 959 vmx->save_nmsrs = save_nmsrs;
956 960
957#ifdef CONFIG_X86_64
958 vmx->msr_offset_kernel_gs_base =
959 __find_msr_index(vmx, MSR_KERNEL_GS_BASE);
960#endif
961 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER); 961 vmx->msr_offset_efer = __find_msr_index(vmx, MSR_EFER);
962 962
963 if (cpu_has_vmx_msr_bitmap()) { 963 if (cpu_has_vmx_msr_bitmap()) {
@@ -1015,6 +1015,10 @@ static int vmx_get_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata)
1015 case MSR_GS_BASE: 1015 case MSR_GS_BASE:
1016 data = vmcs_readl(GUEST_GS_BASE); 1016 data = vmcs_readl(GUEST_GS_BASE);
1017 break; 1017 break;
1018 case MSR_KERNEL_GS_BASE:
1019 vmx_load_host_state(to_vmx(vcpu));
1020 data = to_vmx(vcpu)->msr_guest_kernel_gs_base;
1021 break;
1018 case MSR_EFER: 1022 case MSR_EFER:
1019 return kvm_get_msr_common(vcpu, msr_index, pdata); 1023 return kvm_get_msr_common(vcpu, msr_index, pdata);
1020#endif 1024#endif
@@ -1068,6 +1072,10 @@ static int vmx_set_msr(struct kvm_vcpu *vcpu, u32 msr_index, u64 data)
1068 case MSR_GS_BASE: 1072 case MSR_GS_BASE:
1069 vmcs_writel(GUEST_GS_BASE, data); 1073 vmcs_writel(GUEST_GS_BASE, data);
1070 break; 1074 break;
1075 case MSR_KERNEL_GS_BASE:
1076 vmx_load_host_state(vmx);
1077 vmx->msr_guest_kernel_gs_base = data;
1078 break;
1071#endif 1079#endif
1072 case MSR_IA32_SYSENTER_CS: 1080 case MSR_IA32_SYSENTER_CS:
1073 vmcs_write32(GUEST_SYSENTER_CS, data); 1081 vmcs_write32(GUEST_SYSENTER_CS, data);
@@ -1559,6 +1567,11 @@ static void vmx_set_efer(struct kvm_vcpu *vcpu, u64 efer)
1559 struct vcpu_vmx *vmx = to_vmx(vcpu); 1567 struct vcpu_vmx *vmx = to_vmx(vcpu);
1560 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER); 1568 struct kvm_msr_entry *msr = find_msr_entry(vmx, MSR_EFER);
1561 1569
1570 /*
1571 * Force kernel_gs_base reloading before EFER changes, as control
1572 * of this msr depends on is_long_mode().
1573 */
1574 vmx_load_host_state(to_vmx(vcpu));
1562 vcpu->arch.shadow_efer = efer; 1575 vcpu->arch.shadow_efer = efer;
1563 if (!msr) 1576 if (!msr)
1564 return; 1577 return;