aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/svm.c14
-rw-r--r--arch/x86/kvm/vmx.c2
-rw-r--r--arch/x86/kvm/x86.c8
3 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 6bb15d583e47..2a193222c987 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -975,7 +975,7 @@ static void init_vmcb(struct vcpu_svm *svm)
975 svm_set_efer(&svm->vcpu, 0); 975 svm_set_efer(&svm->vcpu, 0);
976 save->dr6 = 0xffff0ff0; 976 save->dr6 = 0xffff0ff0;
977 save->dr7 = 0x400; 977 save->dr7 = 0x400;
978 save->rflags = 2; 978 kvm_set_rflags(&svm->vcpu, 2);
979 save->rip = 0x0000fff0; 979 save->rip = 0x0000fff0;
980 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip; 980 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
981 981
@@ -2127,7 +2127,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
2127 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu); 2127 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
2128 nested_vmcb->save.cr2 = vmcb->save.cr2; 2128 nested_vmcb->save.cr2 = vmcb->save.cr2;
2129 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4; 2129 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
2130 nested_vmcb->save.rflags = vmcb->save.rflags; 2130 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
2131 nested_vmcb->save.rip = vmcb->save.rip; 2131 nested_vmcb->save.rip = vmcb->save.rip;
2132 nested_vmcb->save.rsp = vmcb->save.rsp; 2132 nested_vmcb->save.rsp = vmcb->save.rsp;
2133 nested_vmcb->save.rax = vmcb->save.rax; 2133 nested_vmcb->save.rax = vmcb->save.rax;
@@ -2184,7 +2184,7 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
2184 svm->vmcb->save.ds = hsave->save.ds; 2184 svm->vmcb->save.ds = hsave->save.ds;
2185 svm->vmcb->save.gdtr = hsave->save.gdtr; 2185 svm->vmcb->save.gdtr = hsave->save.gdtr;
2186 svm->vmcb->save.idtr = hsave->save.idtr; 2186 svm->vmcb->save.idtr = hsave->save.idtr;
2187 svm->vmcb->save.rflags = hsave->save.rflags; 2187 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
2188 svm_set_efer(&svm->vcpu, hsave->save.efer); 2188 svm_set_efer(&svm->vcpu, hsave->save.efer);
2189 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE); 2189 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
2190 svm_set_cr4(&svm->vcpu, hsave->save.cr4); 2190 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
@@ -2312,7 +2312,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2312 hsave->save.efer = svm->vcpu.arch.efer; 2312 hsave->save.efer = svm->vcpu.arch.efer;
2313 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); 2313 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
2314 hsave->save.cr4 = svm->vcpu.arch.cr4; 2314 hsave->save.cr4 = svm->vcpu.arch.cr4;
2315 hsave->save.rflags = vmcb->save.rflags; 2315 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
2316 hsave->save.rip = kvm_rip_read(&svm->vcpu); 2316 hsave->save.rip = kvm_rip_read(&svm->vcpu);
2317 hsave->save.rsp = vmcb->save.rsp; 2317 hsave->save.rsp = vmcb->save.rsp;
2318 hsave->save.rax = vmcb->save.rax; 2318 hsave->save.rax = vmcb->save.rax;
@@ -2323,7 +2323,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2323 2323
2324 copy_vmcb_control_area(hsave, vmcb); 2324 copy_vmcb_control_area(hsave, vmcb);
2325 2325
2326 if (svm->vmcb->save.rflags & X86_EFLAGS_IF) 2326 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
2327 svm->vcpu.arch.hflags |= HF_HIF_MASK; 2327 svm->vcpu.arch.hflags |= HF_HIF_MASK;
2328 else 2328 else
2329 svm->vcpu.arch.hflags &= ~HF_HIF_MASK; 2329 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
@@ -2341,7 +2341,7 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
2341 svm->vmcb->save.ds = nested_vmcb->save.ds; 2341 svm->vmcb->save.ds = nested_vmcb->save.ds;
2342 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr; 2342 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
2343 svm->vmcb->save.idtr = nested_vmcb->save.idtr; 2343 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
2344 svm->vmcb->save.rflags = nested_vmcb->save.rflags; 2344 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
2345 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer); 2345 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
2346 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0); 2346 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
2347 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4); 2347 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
@@ -3384,7 +3384,7 @@ static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
3384 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)) 3384 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
3385 return 0; 3385 return 0;
3386 3386
3387 ret = !!(vmcb->save.rflags & X86_EFLAGS_IF); 3387 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
3388 3388
3389 if (is_guest_mode(vcpu)) 3389 if (is_guest_mode(vcpu))
3390 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK); 3390 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 5b4cdcbd154c..d09833e45f6f 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -2113,7 +2113,7 @@ static int vmx_get_cpl(struct kvm_vcpu *vcpu)
2113 if (!is_protmode(vcpu)) 2113 if (!is_protmode(vcpu))
2114 return 0; 2114 return 0;
2115 2115
2116 if (vmx_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */ 2116 if (kvm_get_rflags(vcpu) & X86_EFLAGS_VM) /* if virtual 8086 */
2117 return 3; 2117 return 3;
2118 2118
2119 return vmcs_read16(GUEST_CS_SELECTOR) & 3; 2119 return vmcs_read16(GUEST_CS_SELECTOR) & 3;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 934b4c6b0bf9..3a557eefd2fb 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4310,7 +4310,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4310 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l); 4310 kvm_x86_ops->get_cs_db_l_bits(vcpu, &cs_db, &cs_l);
4311 4311
4312 vcpu->arch.emulate_ctxt.vcpu = vcpu; 4312 vcpu->arch.emulate_ctxt.vcpu = vcpu;
4313 vcpu->arch.emulate_ctxt.eflags = kvm_x86_ops->get_rflags(vcpu); 4313 vcpu->arch.emulate_ctxt.eflags = kvm_get_rflags(vcpu);
4314 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu); 4314 vcpu->arch.emulate_ctxt.eip = kvm_rip_read(vcpu);
4315 vcpu->arch.emulate_ctxt.mode = 4315 vcpu->arch.emulate_ctxt.mode =
4316 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL : 4316 (!is_protmode(vcpu)) ? X86EMUL_MODE_REAL :
@@ -4340,7 +4340,7 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq)
4340 vcpu->arch.emulate_ctxt.eip = c->eip; 4340 vcpu->arch.emulate_ctxt.eip = c->eip;
4341 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 4341 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4342 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 4342 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
4343 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 4343 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4344 4344
4345 if (irq == NMI_VECTOR) 4345 if (irq == NMI_VECTOR)
4346 vcpu->arch.nmi_pending = false; 4346 vcpu->arch.nmi_pending = false;
@@ -4473,7 +4473,7 @@ restart:
4473 r = EMULATE_DONE; 4473 r = EMULATE_DONE;
4474 4474
4475 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility); 4475 toggle_interruptibility(vcpu, vcpu->arch.emulate_ctxt.interruptibility);
4476 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 4476 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
4477 kvm_make_request(KVM_REQ_EVENT, vcpu); 4477 kvm_make_request(KVM_REQ_EVENT, vcpu);
4478 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 4478 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
4479 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 4479 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
@@ -5592,7 +5592,7 @@ int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int reason,
5592 5592
5593 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 5593 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
5594 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 5594 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
5595 kvm_x86_ops->set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags); 5595 kvm_set_rflags(vcpu, vcpu->arch.emulate_ctxt.eflags);
5596 kvm_make_request(KVM_REQ_EVENT, vcpu); 5596 kvm_make_request(KVM_REQ_EVENT, vcpu);
5597 return EMULATE_DONE; 5597 return EMULATE_DONE;
5598} 5598}