aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
authorOrit Wasserman <owasserm@redhat.com>2012-05-31 07:49:22 -0400
committerAvi Kivity <avi@redhat.com>2012-06-05 10:51:46 -0400
commitb246dd5df139501b974bd6b28f7815e53b3a792f (patch)
treef6e687822a232612d703c061a4d2cf332660c4f4 /arch/x86/kvm/vmx.c
parent1952639665e92481c34c34c3e2a71bf3e66ba362 (diff)
KVM: VMX: Fix KVM_SET_SREGS with big real mode segments
For example migration between Westmere and Nehelem hosts, caught in big real mode. The code that fixes the segments for real mode guest was moved from enter_rmode to vmx_set_segments. enter_rmode calls vmx_set_segments for each segment. Signed-off-by: Orit Wasserman <owasserm@rehdat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c70
1 files changed, 58 insertions, 12 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 396148ab089b..f78662ec8677 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -618,6 +618,10 @@ static void kvm_cpu_vmxon(u64 addr);
618static void kvm_cpu_vmxoff(void); 618static void kvm_cpu_vmxoff(void);
619static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3); 619static void vmx_set_cr3(struct kvm_vcpu *vcpu, unsigned long cr3);
620static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr); 620static int vmx_set_tss_addr(struct kvm *kvm, unsigned int addr);
621static void vmx_set_segment(struct kvm_vcpu *vcpu,
622 struct kvm_segment *var, int seg);
623static void vmx_get_segment(struct kvm_vcpu *vcpu,
624 struct kvm_segment *var, int seg);
621 625
622static DEFINE_PER_CPU(struct vmcs *, vmxarea); 626static DEFINE_PER_CPU(struct vmcs *, vmxarea);
623static DEFINE_PER_CPU(struct vmcs *, current_vmcs); 627static DEFINE_PER_CPU(struct vmcs *, current_vmcs);
@@ -2782,6 +2786,7 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
2782{ 2786{
2783 unsigned long flags; 2787 unsigned long flags;
2784 struct vcpu_vmx *vmx = to_vmx(vcpu); 2788 struct vcpu_vmx *vmx = to_vmx(vcpu);
2789 struct kvm_segment var;
2785 2790
2786 if (enable_unrestricted_guest) 2791 if (enable_unrestricted_guest)
2787 return; 2792 return;
@@ -2825,20 +2830,23 @@ static void enter_rmode(struct kvm_vcpu *vcpu)
2825 if (emulate_invalid_guest_state) 2830 if (emulate_invalid_guest_state)
2826 goto continue_rmode; 2831 goto continue_rmode;
2827 2832
2828 vmcs_write16(GUEST_SS_SELECTOR, vmcs_readl(GUEST_SS_BASE) >> 4); 2833 vmx_get_segment(vcpu, &var, VCPU_SREG_SS);
2829 vmcs_write32(GUEST_SS_LIMIT, 0xffff); 2834 vmx_set_segment(vcpu, &var, VCPU_SREG_SS);
2830 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3); 2835
2836 vmx_get_segment(vcpu, &var, VCPU_SREG_CS);
2837 vmx_set_segment(vcpu, &var, VCPU_SREG_CS);
2838
2839 vmx_get_segment(vcpu, &var, VCPU_SREG_ES);
2840 vmx_set_segment(vcpu, &var, VCPU_SREG_ES);
2831 2841
2832 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3); 2842 vmx_get_segment(vcpu, &var, VCPU_SREG_DS);
2833 vmcs_write32(GUEST_CS_LIMIT, 0xffff); 2843 vmx_set_segment(vcpu, &var, VCPU_SREG_DS);
2834 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
2835 vmcs_writel(GUEST_CS_BASE, 0xf0000);
2836 vmcs_write16(GUEST_CS_SELECTOR, vmcs_readl(GUEST_CS_BASE) >> 4);
2837 2844
2838 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es); 2845 vmx_get_segment(vcpu, &var, VCPU_SREG_GS);
2839 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds); 2846 vmx_set_segment(vcpu, &var, VCPU_SREG_GS);
2840 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs); 2847
2841 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs); 2848 vmx_get_segment(vcpu, &var, VCPU_SREG_FS);
2849 vmx_set_segment(vcpu, &var, VCPU_SREG_FS);
2842 2850
2843continue_rmode: 2851continue_rmode:
2844 kvm_mmu_reset_context(vcpu); 2852 kvm_mmu_reset_context(vcpu);
@@ -3243,6 +3251,44 @@ static void vmx_set_segment(struct kvm_vcpu *vcpu,
3243 3251
3244 vmcs_write32(sf->ar_bytes, ar); 3252 vmcs_write32(sf->ar_bytes, ar);
3245 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); 3253 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3254
3255 /*
3256 * Fix segments for real mode guest in hosts that don't have
3257 * "unrestricted_mode" or it was disabled.
3258 * This is done to allow migration of the guests from hosts with
3259 * unrestricted guest like Westmere to older host that don't have
3260 * unrestricted guest like Nehelem.
3261 */
3262 if (!enable_unrestricted_guest && vmx->rmode.vm86_active) {
3263 switch (seg) {
3264 case VCPU_SREG_CS:
3265 vmcs_write32(GUEST_CS_AR_BYTES, 0xf3);
3266 vmcs_write32(GUEST_CS_LIMIT, 0xffff);
3267 if (vmcs_readl(GUEST_CS_BASE) == 0xffff0000)
3268 vmcs_writel(GUEST_CS_BASE, 0xf0000);
3269 vmcs_write16(GUEST_CS_SELECTOR,
3270 vmcs_readl(GUEST_CS_BASE) >> 4);
3271 break;
3272 case VCPU_SREG_ES:
3273 fix_rmode_seg(VCPU_SREG_ES, &vmx->rmode.es);
3274 break;
3275 case VCPU_SREG_DS:
3276 fix_rmode_seg(VCPU_SREG_DS, &vmx->rmode.ds);
3277 break;
3278 case VCPU_SREG_GS:
3279 fix_rmode_seg(VCPU_SREG_GS, &vmx->rmode.gs);
3280 break;
3281 case VCPU_SREG_FS:
3282 fix_rmode_seg(VCPU_SREG_FS, &vmx->rmode.fs);
3283 break;
3284 case VCPU_SREG_SS:
3285 vmcs_write16(GUEST_SS_SELECTOR,
3286 vmcs_readl(GUEST_SS_BASE) >> 4);
3287 vmcs_write32(GUEST_SS_LIMIT, 0xffff);
3288 vmcs_write32(GUEST_SS_AR_BYTES, 0xf3);
3289 break;
3290 }
3291 }
3246} 3292}
3247 3293
3248static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l) 3294static void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)