aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/vmx.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/vmx.c')
-rw-r--r--arch/x86/kvm/vmx.c15
1 files changed, 4 insertions, 11 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 55dfc375f1ab..dd2a85c1c6f0 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1696,7 +1696,6 @@ static unsigned long vmx_get_rflags(struct kvm_vcpu *vcpu)
1696static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags) 1696static void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
1697{ 1697{
1698 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail); 1698 __set_bit(VCPU_EXREG_RFLAGS, (ulong *)&vcpu->arch.regs_avail);
1699 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
1700 to_vmx(vcpu)->rflags = rflags; 1699 to_vmx(vcpu)->rflags = rflags;
1701 if (to_vmx(vcpu)->rmode.vm86_active) { 1700 if (to_vmx(vcpu)->rmode.vm86_active) {
1702 to_vmx(vcpu)->rmode.save_rflags = rflags; 1701 to_vmx(vcpu)->rmode.save_rflags = rflags;
@@ -3110,7 +3109,6 @@ static void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
3110 vmcs_writel(CR0_READ_SHADOW, cr0); 3109 vmcs_writel(CR0_READ_SHADOW, cr0);
3111 vmcs_writel(GUEST_CR0, hw_cr0); 3110 vmcs_writel(GUEST_CR0, hw_cr0);
3112 vcpu->arch.cr0 = cr0; 3111 vcpu->arch.cr0 = cr0;
3113 __clear_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3114} 3112}
3115 3113
3116static u64 construct_eptp(unsigned long root_hpa) 3114static u64 construct_eptp(unsigned long root_hpa)
@@ -3220,8 +3218,10 @@ static u64 vmx_get_segment_base(struct kvm_vcpu *vcpu, int seg)
3220 return vmx_read_guest_seg_base(to_vmx(vcpu), seg); 3218 return vmx_read_guest_seg_base(to_vmx(vcpu), seg);
3221} 3219}
3222 3220
3223static int __vmx_get_cpl(struct kvm_vcpu *vcpu) 3221static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3224{ 3222{
3223 struct vcpu_vmx *vmx = to_vmx(vcpu);
3224
3225 if (!is_protmode(vcpu)) 3225 if (!is_protmode(vcpu))
3226 return 0; 3226 return 0;
3227 3227
@@ -3229,13 +3229,6 @@ static int __vmx_get_cpl(struct kvm_vcpu *vcpu)
3229 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */ 3229 && (kvm_get_rflags(vcpu) & X86_EFLAGS_VM)) /* if virtual 8086 */
3230 return 3; 3230 return 3;
3231 3231
3232 return vmx_read_guest_seg_selector(to_vmx(vcpu), VCPU_SREG_CS) & 3;
3233}
3234
3235static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3236{
3237 struct vcpu_vmx *vmx = to_vmx(vcpu);
3238
3239 /* 3232 /*
3240 * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations 3233 * If we enter real mode with cs.sel & 3 != 0, the normal CPL calculations
3241 * fail; use the cache instead. 3234 * fail; use the cache instead.
@@ -3246,7 +3239,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu)
3246 3239
3247 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) { 3240 if (!test_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail)) {
3248 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail); 3241 __set_bit(VCPU_EXREG_CPL, (ulong *)&vcpu->arch.regs_avail);
3249 vmx->cpl = __vmx_get_cpl(vcpu); 3242 vmx->cpl = vmx_read_guest_seg_selector(vmx, VCPU_SREG_CS) & 3;
3250 } 3243 }
3251 3244
3252 return vmx->cpl; 3245 return vmx->cpl;