aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-04-20 06:37:53 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:39:03 -0400
commit717746e382e58f075642403eaac26bce0640b2c5 (patch)
treeaa942f278b9382de50df2e9e15614c99eff7330e
parent4bff1e86ad286d4b3a54902540abeeaf95e64db3 (diff)
KVM: x86 emulator: drop vcpu argument from cr/dr/cpl/msr callbacks
Making the emulator caller agnostic. Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_emulate.h14
-rw-r--r--arch/x86/kvm/emulate.c84
-rw-r--r--arch/x86/kvm/x86.c34
3 files changed, 73 insertions, 59 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 656046a1bd51..2c02e753ab82 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -176,13 +176,13 @@ struct x86_emulate_ops {
176 int seg); 176 int seg);
177 void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); 177 void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
178 void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt); 178 void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
179 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); 179 ulong (*get_cr)(struct x86_emulate_ctxt *ctxt, int cr);
180 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); 180 int (*set_cr)(struct x86_emulate_ctxt *ctxt, int cr, ulong val);
181 int (*cpl)(struct kvm_vcpu *vcpu); 181 int (*cpl)(struct x86_emulate_ctxt *ctxt);
182 int (*get_dr)(int dr, unsigned long *dest, struct kvm_vcpu *vcpu); 182 int (*get_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong *dest);
183 int (*set_dr)(int dr, unsigned long value, struct kvm_vcpu *vcpu); 183 int (*set_dr)(struct x86_emulate_ctxt *ctxt, int dr, ulong value);
184 int (*set_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 data); 184 int (*set_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 data);
185 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 185 int (*get_msr)(struct x86_emulate_ctxt *ctxt, u32 msr_index, u64 *pdata);
186 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */ 186 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
187 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */ 187 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
188 int (*intercept)(struct kvm_vcpu *vcpu, 188 int (*intercept)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 9602889f0f7f..33ad16b7db2b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -596,7 +596,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
596 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim) 596 if (addr.ea > lim || (u32)(addr.ea + size - 1) > lim)
597 goto bad; 597 goto bad;
598 } 598 }
599 cpl = ctxt->ops->cpl(ctxt->vcpu); 599 cpl = ctxt->ops->cpl(ctxt);
600 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3; 600 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3;
601 cpl = max(cpl, rpl); 601 cpl = max(cpl, rpl);
602 if (!(desc.type & 8)) { 602 if (!(desc.type & 8)) {
@@ -1248,7 +1248,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1248 1248
1249 rpl = selector & 3; 1249 rpl = selector & 3;
1250 dpl = seg_desc.dpl; 1250 dpl = seg_desc.dpl;
1251 cpl = ops->cpl(ctxt->vcpu); 1251 cpl = ops->cpl(ctxt);
1252 1252
1253 switch (seg) { 1253 switch (seg) {
1254 case VCPU_SREG_SS: 1254 case VCPU_SREG_SS:
@@ -1407,7 +1407,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1407 int rc; 1407 int rc;
1408 unsigned long val, change_mask; 1408 unsigned long val, change_mask;
1409 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1409 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1410 int cpl = ops->cpl(ctxt->vcpu); 1410 int cpl = ops->cpl(ctxt);
1411 1411
1412 rc = emulate_pop(ctxt, ops, &val, len); 1412 rc = emulate_pop(ctxt, ops, &val, len);
1413 if (rc != X86EMUL_CONTINUE) 1413 if (rc != X86EMUL_CONTINUE)
@@ -1852,7 +1852,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1852 1852
1853 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1853 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1854 1854
1855 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); 1855 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1856 msr_data >>= 32; 1856 msr_data >>= 32;
1857 cs_sel = (u16)(msr_data & 0xfffc); 1857 cs_sel = (u16)(msr_data & 0xfffc);
1858 ss_sel = (u16)(msr_data + 8); 1858 ss_sel = (u16)(msr_data + 8);
@@ -1871,17 +1871,17 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1871#ifdef CONFIG_X86_64 1871#ifdef CONFIG_X86_64
1872 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF; 1872 c->regs[VCPU_REGS_R11] = ctxt->eflags & ~EFLG_RF;
1873 1873
1874 ops->get_msr(ctxt->vcpu, 1874 ops->get_msr(ctxt,
1875 ctxt->mode == X86EMUL_MODE_PROT64 ? 1875 ctxt->mode == X86EMUL_MODE_PROT64 ?
1876 MSR_LSTAR : MSR_CSTAR, &msr_data); 1876 MSR_LSTAR : MSR_CSTAR, &msr_data);
1877 c->eip = msr_data; 1877 c->eip = msr_data;
1878 1878
1879 ops->get_msr(ctxt->vcpu, MSR_SYSCALL_MASK, &msr_data); 1879 ops->get_msr(ctxt, MSR_SYSCALL_MASK, &msr_data);
1880 ctxt->eflags &= ~(msr_data | EFLG_RF); 1880 ctxt->eflags &= ~(msr_data | EFLG_RF);
1881#endif 1881#endif
1882 } else { 1882 } else {
1883 /* legacy mode */ 1883 /* legacy mode */
1884 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); 1884 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1885 c->eip = (u32)msr_data; 1885 c->eip = (u32)msr_data;
1886 1886
1887 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 1887 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
@@ -1910,7 +1910,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1910 1910
1911 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1911 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1912 1912
1913 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); 1913 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1914 switch (ctxt->mode) { 1914 switch (ctxt->mode) {
1915 case X86EMUL_MODE_PROT32: 1915 case X86EMUL_MODE_PROT32:
1916 if ((msr_data & 0xfffc) == 0x0) 1916 if ((msr_data & 0xfffc) == 0x0)
@@ -1938,10 +1938,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1938 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS); 1938 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1939 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS); 1939 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1940 1940
1941 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); 1941 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1942 c->eip = msr_data; 1942 c->eip = msr_data;
1943 1943
1944 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_ESP, &msr_data); 1944 ops->get_msr(ctxt, MSR_IA32_SYSENTER_ESP, &msr_data);
1945 c->regs[VCPU_REGS_RSP] = msr_data; 1945 c->regs[VCPU_REGS_RSP] = msr_data;
1946 1946
1947 return X86EMUL_CONTINUE; 1947 return X86EMUL_CONTINUE;
@@ -1970,7 +1970,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1970 1970
1971 cs.dpl = 3; 1971 cs.dpl = 3;
1972 ss.dpl = 3; 1972 ss.dpl = 3;
1973 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); 1973 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1974 switch (usermode) { 1974 switch (usermode) {
1975 case X86EMUL_MODE_PROT32: 1975 case X86EMUL_MODE_PROT32:
1976 cs_sel = (u16)(msr_data + 16); 1976 cs_sel = (u16)(msr_data + 16);
@@ -2010,7 +2010,7 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2010 if (ctxt->mode == X86EMUL_MODE_VM86) 2010 if (ctxt->mode == X86EMUL_MODE_VM86)
2011 return true; 2011 return true;
2012 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 2012 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2013 return ops->cpl(ctxt->vcpu) > iopl; 2013 return ops->cpl(ctxt) > iopl;
2014} 2014}
2015 2015
2016static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2016static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
@@ -2187,7 +2187,7 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2187{ 2187{
2188 struct decode_cache *c = &ctxt->decode; 2188 struct decode_cache *c = &ctxt->decode;
2189 2189
2190 tss->cr3 = ops->get_cr(3, ctxt->vcpu); 2190 tss->cr3 = ops->get_cr(ctxt, 3);
2191 tss->eip = c->eip; 2191 tss->eip = c->eip;
2192 tss->eflags = ctxt->eflags; 2192 tss->eflags = ctxt->eflags;
2193 tss->eax = c->regs[VCPU_REGS_RAX]; 2193 tss->eax = c->regs[VCPU_REGS_RAX];
@@ -2215,7 +2215,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2215 struct decode_cache *c = &ctxt->decode; 2215 struct decode_cache *c = &ctxt->decode;
2216 int ret; 2216 int ret;
2217 2217
2218 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) 2218 if (ops->set_cr(ctxt, 3, tss->cr3))
2219 return emulate_gp(ctxt, 0); 2219 return emulate_gp(ctxt, 0);
2220 c->eip = tss->eip; 2220 c->eip = tss->eip;
2221 ctxt->eflags = tss->eflags | 2; 2221 ctxt->eflags = tss->eflags | 2;
@@ -2338,7 +2338,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2338 2338
2339 if (reason != TASK_SWITCH_IRET) { 2339 if (reason != TASK_SWITCH_IRET) {
2340 if ((tss_selector & 3) > next_tss_desc.dpl || 2340 if ((tss_selector & 3) > next_tss_desc.dpl ||
2341 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) 2341 ops->cpl(ctxt) > next_tss_desc.dpl)
2342 return emulate_gp(ctxt, 0); 2342 return emulate_gp(ctxt, 0);
2343 } 2343 }
2344 2344
@@ -2382,7 +2382,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2382 &next_tss_desc); 2382 &next_tss_desc);
2383 } 2383 }
2384 2384
2385 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); 2385 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2386 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR); 2386 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR);
2387 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR); 2387 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
2388 2388
@@ -2542,7 +2542,7 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2542 struct decode_cache *c = &ctxt->decode; 2542 struct decode_cache *c = &ctxt->decode;
2543 u64 tsc = 0; 2543 u64 tsc = 0;
2544 2544
2545 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc); 2545 ctxt->ops->get_msr(ctxt, MSR_IA32_TSC, &tsc);
2546 c->regs[VCPU_REGS_RAX] = (u32)tsc; 2546 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2547 c->regs[VCPU_REGS_RDX] = tsc >> 32; 2547 c->regs[VCPU_REGS_RDX] = tsc >> 32;
2548 return X86EMUL_CONTINUE; 2548 return X86EMUL_CONTINUE;
@@ -2625,8 +2625,8 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2625 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD))) 2625 ((new_val & X86_CR0_NW) && !(new_val & X86_CR0_CD)))
2626 return emulate_gp(ctxt, 0); 2626 return emulate_gp(ctxt, 0);
2627 2627
2628 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu); 2628 cr4 = ctxt->ops->get_cr(ctxt, 4);
2629 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer); 2629 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2630 2630
2631 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) && 2631 if ((new_val & X86_CR0_PG) && (efer & EFER_LME) &&
2632 !(cr4 & X86_CR4_PAE)) 2632 !(cr4 & X86_CR4_PAE))
@@ -2652,8 +2652,8 @@ static int check_cr_write(struct x86_emulate_ctxt *ctxt)
2652 case 4: { 2652 case 4: {
2653 u64 cr4, efer; 2653 u64 cr4, efer;
2654 2654
2655 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu); 2655 cr4 = ctxt->ops->get_cr(ctxt, 4);
2656 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer); 2656 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2657 2657
2658 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE)) 2658 if ((efer & EFER_LMA) && !(new_val & X86_CR4_PAE))
2659 return emulate_gp(ctxt, 0); 2659 return emulate_gp(ctxt, 0);
@@ -2669,7 +2669,7 @@ static int check_dr7_gd(struct x86_emulate_ctxt *ctxt)
2669{ 2669{
2670 unsigned long dr7; 2670 unsigned long dr7;
2671 2671
2672 ctxt->ops->get_dr(7, &dr7, ctxt->vcpu); 2672 ctxt->ops->get_dr(ctxt, 7, &dr7);
2673 2673
2674 /* Check if DR7.Global_Enable is set */ 2674 /* Check if DR7.Global_Enable is set */
2675 return dr7 & (1 << 13); 2675 return dr7 & (1 << 13);
@@ -2684,7 +2684,7 @@ static int check_dr_read(struct x86_emulate_ctxt *ctxt)
2684 if (dr > 7) 2684 if (dr > 7)
2685 return emulate_ud(ctxt); 2685 return emulate_ud(ctxt);
2686 2686
2687 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu); 2687 cr4 = ctxt->ops->get_cr(ctxt, 4);
2688 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5)) 2688 if ((cr4 & X86_CR4_DE) && (dr == 4 || dr == 5))
2689 return emulate_ud(ctxt); 2689 return emulate_ud(ctxt);
2690 2690
@@ -2710,7 +2710,7 @@ static int check_svme(struct x86_emulate_ctxt *ctxt)
2710{ 2710{
2711 u64 efer; 2711 u64 efer;
2712 2712
2713 ctxt->ops->get_msr(ctxt->vcpu, MSR_EFER, &efer); 2713 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer);
2714 2714
2715 if (!(efer & EFER_SVME)) 2715 if (!(efer & EFER_SVME))
2716 return emulate_ud(ctxt); 2716 return emulate_ud(ctxt);
@@ -2731,9 +2731,9 @@ static int check_svme_pa(struct x86_emulate_ctxt *ctxt)
2731 2731
2732static int check_rdtsc(struct x86_emulate_ctxt *ctxt) 2732static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2733{ 2733{
2734 u64 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu); 2734 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2735 2735
2736 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt->vcpu)) 2736 if (cr4 & X86_CR4_TSD && ctxt->ops->cpl(ctxt))
2737 return emulate_ud(ctxt); 2737 return emulate_ud(ctxt);
2738 2738
2739 return X86EMUL_CONTINUE; 2739 return X86EMUL_CONTINUE;
@@ -2741,10 +2741,10 @@ static int check_rdtsc(struct x86_emulate_ctxt *ctxt)
2741 2741
2742static int check_rdpmc(struct x86_emulate_ctxt *ctxt) 2742static int check_rdpmc(struct x86_emulate_ctxt *ctxt)
2743{ 2743{
2744 u64 cr4 = ctxt->ops->get_cr(4, ctxt->vcpu); 2744 u64 cr4 = ctxt->ops->get_cr(ctxt, 4);
2745 u64 rcx = kvm_register_read(ctxt->vcpu, VCPU_REGS_RCX); 2745 u64 rcx = kvm_register_read(ctxt->vcpu, VCPU_REGS_RCX);
2746 2746
2747 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt->vcpu)) || 2747 if ((!(cr4 & X86_CR4_PCE) && ctxt->ops->cpl(ctxt)) ||
2748 (rcx > 3)) 2748 (rcx > 3))
2749 return emulate_gp(ctxt, 0); 2749 return emulate_gp(ctxt, 0);
2750 2750
@@ -3514,13 +3514,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3514 } 3514 }
3515 3515
3516 if ((c->d & Sse) 3516 if ((c->d & Sse)
3517 && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM) 3517 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)
3518 || !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) { 3518 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
3519 rc = emulate_ud(ctxt); 3519 rc = emulate_ud(ctxt);
3520 goto done; 3520 goto done;
3521 } 3521 }
3522 3522
3523 if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) { 3523 if ((c->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
3524 rc = emulate_nm(ctxt); 3524 rc = emulate_nm(ctxt);
3525 goto done; 3525 goto done;
3526 } 3526 }
@@ -3533,7 +3533,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3533 } 3533 }
3534 3534
3535 /* Privileged instruction can be executed only in CPL=0 */ 3535 /* Privileged instruction can be executed only in CPL=0 */
3536 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 3536 if ((c->d & Priv) && ops->cpl(ctxt)) {
3537 rc = emulate_gp(ctxt, 0); 3537 rc = emulate_gp(ctxt, 0);
3538 goto done; 3538 goto done;
3539 } 3539 }
@@ -4052,11 +4052,11 @@ twobyte_insn:
4052 break; 4052 break;
4053 case 4: /* smsw */ 4053 case 4: /* smsw */
4054 c->dst.bytes = 2; 4054 c->dst.bytes = 2;
4055 c->dst.val = ops->get_cr(0, ctxt->vcpu); 4055 c->dst.val = ops->get_cr(ctxt, 0);
4056 break; 4056 break;
4057 case 6: /* lmsw */ 4057 case 6: /* lmsw */
4058 ops->set_cr(0, (ops->get_cr(0, ctxt->vcpu) & ~0x0eul) | 4058 ops->set_cr(ctxt, 0, (ops->get_cr(ctxt, 0) & ~0x0eul) |
4059 (c->src.val & 0x0f), ctxt->vcpu); 4059 (c->src.val & 0x0f));
4060 c->dst.type = OP_NONE; 4060 c->dst.type = OP_NONE;
4061 break; 4061 break;
4062 case 5: /* not defined */ 4062 case 5: /* not defined */
@@ -4084,13 +4084,13 @@ twobyte_insn:
4084 case 0x18: /* Grp16 (prefetch/nop) */ 4084 case 0x18: /* Grp16 (prefetch/nop) */
4085 break; 4085 break;
4086 case 0x20: /* mov cr, reg */ 4086 case 0x20: /* mov cr, reg */
4087 c->dst.val = ops->get_cr(c->modrm_reg, ctxt->vcpu); 4087 c->dst.val = ops->get_cr(ctxt, c->modrm_reg);
4088 break; 4088 break;
4089 case 0x21: /* mov from dr to reg */ 4089 case 0x21: /* mov from dr to reg */
4090 ops->get_dr(c->modrm_reg, &c->dst.val, ctxt->vcpu); 4090 ops->get_dr(ctxt, c->modrm_reg, &c->dst.val);
4091 break; 4091 break;
4092 case 0x22: /* mov reg, cr */ 4092 case 0x22: /* mov reg, cr */
4093 if (ops->set_cr(c->modrm_reg, c->src.val, ctxt->vcpu)) { 4093 if (ops->set_cr(ctxt, c->modrm_reg, c->src.val)) {
4094 emulate_gp(ctxt, 0); 4094 emulate_gp(ctxt, 0);
4095 rc = X86EMUL_PROPAGATE_FAULT; 4095 rc = X86EMUL_PROPAGATE_FAULT;
4096 goto done; 4096 goto done;
@@ -4098,9 +4098,9 @@ twobyte_insn:
4098 c->dst.type = OP_NONE; 4098 c->dst.type = OP_NONE;
4099 break; 4099 break;
4100 case 0x23: /* mov from reg to dr */ 4100 case 0x23: /* mov from reg to dr */
4101 if (ops->set_dr(c->modrm_reg, c->src.val & 4101 if (ops->set_dr(ctxt, c->modrm_reg, c->src.val &
4102 ((ctxt->mode == X86EMUL_MODE_PROT64) ? 4102 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
4103 ~0ULL : ~0U), ctxt->vcpu) < 0) { 4103 ~0ULL : ~0U)) < 0) {
4104 /* #UD condition is already handled by the code above */ 4104 /* #UD condition is already handled by the code above */
4105 emulate_gp(ctxt, 0); 4105 emulate_gp(ctxt, 0);
4106 rc = X86EMUL_PROPAGATE_FAULT; 4106 rc = X86EMUL_PROPAGATE_FAULT;
@@ -4113,7 +4113,7 @@ twobyte_insn:
4113 /* wrmsr */ 4113 /* wrmsr */
4114 msr_data = (u32)c->regs[VCPU_REGS_RAX] 4114 msr_data = (u32)c->regs[VCPU_REGS_RAX]
4115 | ((u64)c->regs[VCPU_REGS_RDX] << 32); 4115 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
4116 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { 4116 if (ops->set_msr(ctxt, c->regs[VCPU_REGS_RCX], msr_data)) {
4117 emulate_gp(ctxt, 0); 4117 emulate_gp(ctxt, 0);
4118 rc = X86EMUL_PROPAGATE_FAULT; 4118 rc = X86EMUL_PROPAGATE_FAULT;
4119 goto done; 4119 goto done;
@@ -4122,7 +4122,7 @@ twobyte_insn:
4122 break; 4122 break;
4123 case 0x32: 4123 case 0x32:
4124 /* rdmsr */ 4124 /* rdmsr */
4125 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { 4125 if (ops->get_msr(ctxt, c->regs[VCPU_REGS_RCX], &msr_data)) {
4126 emulate_gp(ctxt, 0); 4126 emulate_gp(ctxt, 0);
4127 rc = X86EMUL_PROPAGATE_FAULT; 4127 rc = X86EMUL_PROPAGATE_FAULT;
4128 goto done; 4128 goto done;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 6a7fbf671b26..16373a5bfd01 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4160,15 +4160,15 @@ int emulate_clts(struct kvm_vcpu *vcpu)
4160 return X86EMUL_CONTINUE; 4160 return X86EMUL_CONTINUE;
4161} 4161}
4162 4162
4163int emulator_get_dr(int dr, unsigned long *dest, struct kvm_vcpu *vcpu) 4163int emulator_get_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long *dest)
4164{ 4164{
4165 return _kvm_get_dr(vcpu, dr, dest); 4165 return _kvm_get_dr(emul_to_vcpu(ctxt), dr, dest);
4166} 4166}
4167 4167
4168int emulator_set_dr(int dr, unsigned long value, struct kvm_vcpu *vcpu) 4168int emulator_set_dr(struct x86_emulate_ctxt *ctxt, int dr, unsigned long value)
4169{ 4169{
4170 4170
4171 return __kvm_set_dr(vcpu, dr, value); 4171 return __kvm_set_dr(emul_to_vcpu(ctxt), dr, value);
4172} 4172}
4173 4173
4174static u64 mk_cr_64(u64 curr_cr, u32 new_val) 4174static u64 mk_cr_64(u64 curr_cr, u32 new_val)
@@ -4176,8 +4176,9 @@ static u64 mk_cr_64(u64 curr_cr, u32 new_val)
4176 return (curr_cr & ~((1ULL << 32) - 1)) | new_val; 4176 return (curr_cr & ~((1ULL << 32) - 1)) | new_val;
4177} 4177}
4178 4178
4179static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu) 4179static unsigned long emulator_get_cr(struct x86_emulate_ctxt *ctxt, int cr)
4180{ 4180{
4181 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4181 unsigned long value; 4182 unsigned long value;
4182 4183
4183 switch (cr) { 4184 switch (cr) {
@@ -4204,8 +4205,9 @@ static unsigned long emulator_get_cr(int cr, struct kvm_vcpu *vcpu)
4204 return value; 4205 return value;
4205} 4206}
4206 4207
4207static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu) 4208static int emulator_set_cr(struct x86_emulate_ctxt *ctxt, int cr, ulong val)
4208{ 4209{
4210 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4209 int res = 0; 4211 int res = 0;
4210 4212
4211 switch (cr) { 4213 switch (cr) {
@@ -4232,9 +4234,9 @@ static int emulator_set_cr(int cr, unsigned long val, struct kvm_vcpu *vcpu)
4232 return res; 4234 return res;
4233} 4235}
4234 4236
4235static int emulator_get_cpl(struct kvm_vcpu *vcpu) 4237static int emulator_get_cpl(struct x86_emulate_ctxt *ctxt)
4236{ 4238{
4237 return kvm_x86_ops->get_cpl(vcpu); 4239 return kvm_x86_ops->get_cpl(emul_to_vcpu(ctxt));
4238} 4240}
4239 4241
4240static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt) 4242static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
@@ -4335,6 +4337,18 @@ static void emulator_set_segment_selector(struct x86_emulate_ctxt *ctxt,
4335 kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg); 4337 kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
4336} 4338}
4337 4339
4340static int emulator_get_msr(struct x86_emulate_ctxt *ctxt,
4341 u32 msr_index, u64 *pdata)
4342{
4343 return kvm_get_msr(emul_to_vcpu(ctxt), msr_index, pdata);
4344}
4345
4346static int emulator_set_msr(struct x86_emulate_ctxt *ctxt,
4347 u32 msr_index, u64 data)
4348{
4349 return kvm_set_msr(emul_to_vcpu(ctxt), msr_index, data);
4350}
4351
4338static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 4352static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)
4339{ 4353{
4340 preempt_disable(); 4354 preempt_disable();
@@ -4379,8 +4393,8 @@ static struct x86_emulate_ops emulate_ops = {
4379 .cpl = emulator_get_cpl, 4393 .cpl = emulator_get_cpl,
4380 .get_dr = emulator_get_dr, 4394 .get_dr = emulator_get_dr,
4381 .set_dr = emulator_set_dr, 4395 .set_dr = emulator_set_dr,
4382 .set_msr = kvm_set_msr, 4396 .set_msr = emulator_set_msr,
4383 .get_msr = kvm_get_msr, 4397 .get_msr = emulator_get_msr,
4384 .get_fpu = emulator_get_fpu, 4398 .get_fpu = emulator_get_fpu,
4385 .put_fpu = emulator_put_fpu, 4399 .put_fpu = emulator_put_fpu,
4386 .intercept = emulator_intercept, 4400 .intercept = emulator_intercept,