diff options
| -rw-r--r-- | arch/x86/kvm/svm.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/vmx.c | 19 |
2 files changed, 10 insertions, 11 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 82e144a4e514..1ca12298ffc7 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
| @@ -3395,6 +3395,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3395 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
| 3396 | 3396 | ||
| 3397 | load_host_msrs(vcpu); | 3397 | load_host_msrs(vcpu); |
| 3398 | kvm_load_ldt(ldt_selector); | ||
| 3398 | loadsegment(fs, fs_selector); | 3399 | loadsegment(fs, fs_selector); |
| 3399 | #ifdef CONFIG_X86_64 | 3400 | #ifdef CONFIG_X86_64 |
| 3400 | load_gs_index(gs_selector); | 3401 | load_gs_index(gs_selector); |
| @@ -3402,7 +3403,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
| 3402 | #else | 3403 | #else |
| 3403 | loadsegment(gs, gs_selector); | 3404 | loadsegment(gs, gs_selector); |
| 3404 | #endif | 3405 | #endif |
| 3405 | kvm_load_ldt(ldt_selector); | ||
| 3406 | 3406 | ||
| 3407 | reload_tss(vcpu); | 3407 | reload_tss(vcpu); |
| 3408 | 3408 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 8da0e45ff7c9..ff21fdda0c53 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
| @@ -821,10 +821,9 @@ static void vmx_save_host_state(struct kvm_vcpu *vcpu) | |||
| 821 | #endif | 821 | #endif |
| 822 | 822 | ||
| 823 | #ifdef CONFIG_X86_64 | 823 | #ifdef CONFIG_X86_64 |
| 824 | if (is_long_mode(&vmx->vcpu)) { | 824 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
| 825 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | 825 | if (is_long_mode(&vmx->vcpu)) |
| 826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | 826 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); |
| 827 | } | ||
| 828 | #endif | 827 | #endif |
| 829 | for (i = 0; i < vmx->save_nmsrs; ++i) | 828 | for (i = 0; i < vmx->save_nmsrs; ++i) |
| 830 | kvm_set_shared_msr(vmx->guest_msrs[i].index, | 829 | kvm_set_shared_msr(vmx->guest_msrs[i].index, |
| @@ -839,23 +838,23 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx) | |||
| 839 | 838 | ||
| 840 | ++vmx->vcpu.stat.host_state_reload; | 839 | ++vmx->vcpu.stat.host_state_reload; |
| 841 | vmx->host_state.loaded = 0; | 840 | vmx->host_state.loaded = 0; |
| 842 | if (vmx->host_state.fs_reload_needed) | 841 | #ifdef CONFIG_X86_64 |
| 843 | loadsegment(fs, vmx->host_state.fs_sel); | 842 | if (is_long_mode(&vmx->vcpu)) |
| 843 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
| 844 | #endif | ||
| 844 | if (vmx->host_state.gs_ldt_reload_needed) { | 845 | if (vmx->host_state.gs_ldt_reload_needed) { |
| 845 | kvm_load_ldt(vmx->host_state.ldt_sel); | 846 | kvm_load_ldt(vmx->host_state.ldt_sel); |
| 846 | #ifdef CONFIG_X86_64 | 847 | #ifdef CONFIG_X86_64 |
| 847 | load_gs_index(vmx->host_state.gs_sel); | 848 | load_gs_index(vmx->host_state.gs_sel); |
| 848 | wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gs); | ||
| 849 | #else | 849 | #else |
| 850 | loadsegment(gs, vmx->host_state.gs_sel); | 850 | loadsegment(gs, vmx->host_state.gs_sel); |
| 851 | #endif | 851 | #endif |
| 852 | } | 852 | } |
| 853 | if (vmx->host_state.fs_reload_needed) | ||
| 854 | loadsegment(fs, vmx->host_state.fs_sel); | ||
| 853 | reload_tss(); | 855 | reload_tss(); |
| 854 | #ifdef CONFIG_X86_64 | 856 | #ifdef CONFIG_X86_64 |
| 855 | if (is_long_mode(&vmx->vcpu)) { | 857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); |
| 856 | rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base); | ||
| 857 | wrmsrl(MSR_KERNEL_GS_BASE, vmx->msr_host_kernel_gs_base); | ||
| 858 | } | ||
| 859 | #endif | 858 | #endif |
| 860 | if (current_thread_info()->status & TS_USEDFPU) | 859 | if (current_thread_info()->status & TS_USEDFPU) |
| 861 | clts(); | 860 | clts(); |
