aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-04-20 06:37:53 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:35:20 -0400
commit4bff1e86ad286d4b3a54902540abeeaf95e64db3 (patch)
treeb2c55a41ec05a43b02da404c784d968276dbc0c6
parentca1d4a9e772bde0a0b8cda61ee9fdca29f80f361 (diff)
KVM: x86 emulator: drop vcpu argument from segment/gdt/idt callbacks
Making the emulator caller agnostic. [Takuya Yoshikawa: fix typo leading to LDT failures] Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Avi Kivity <avi@redhat.com>
-rw-r--r--arch/x86/include/asm/kvm_emulate.h22
-rw-r--r--arch/x86/kvm/emulate.c112
-rw-r--r--arch/x86/kvm/x86.c39
3 files changed, 90 insertions, 83 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 1348bdf14a43..656046a1bd51 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -163,15 +163,19 @@ struct x86_emulate_ops {
163 int size, unsigned short port, const void *val, 163 int size, unsigned short port, const void *val,
164 unsigned int count); 164 unsigned int count);
165 165
166 bool (*get_cached_descriptor)(struct desc_struct *desc, u32 *base3, 166 bool (*get_cached_descriptor)(struct x86_emulate_ctxt *ctxt,
167 int seg, struct kvm_vcpu *vcpu); 167 struct desc_struct *desc, u32 *base3,
168 void (*set_cached_descriptor)(struct desc_struct *desc, u32 base3, 168 int seg);
169 int seg, struct kvm_vcpu *vcpu); 169 void (*set_cached_descriptor)(struct x86_emulate_ctxt *ctxt,
170 u16 (*get_segment_selector)(int seg, struct kvm_vcpu *vcpu); 170 struct desc_struct *desc, u32 base3,
171 void (*set_segment_selector)(u16 sel, int seg, struct kvm_vcpu *vcpu); 171 int seg);
172 unsigned long (*get_cached_segment_base)(int seg, struct kvm_vcpu *vcpu); 172 u16 (*get_segment_selector)(struct x86_emulate_ctxt *ctxt, int seg);
173 void (*get_gdt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); 173 void (*set_segment_selector)(struct x86_emulate_ctxt *ctxt,
174 void (*get_idt)(struct desc_ptr *dt, struct kvm_vcpu *vcpu); 174 u16 sel, int seg);
175 unsigned long (*get_cached_segment_base)(struct x86_emulate_ctxt *ctxt,
176 int seg);
177 void (*get_gdt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
178 void (*get_idt)(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt);
175 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu); 179 ulong (*get_cr)(int cr, struct kvm_vcpu *vcpu);
176 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu); 180 int (*set_cr)(int cr, ulong val, struct kvm_vcpu *vcpu);
177 int (*cpl)(struct kvm_vcpu *vcpu); 181 int (*cpl)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8af08a16f4dd..9602889f0f7f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -495,7 +495,7 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
495 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 495 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
496 return 0; 496 return 0;
497 497
498 return ops->get_cached_segment_base(seg, ctxt->vcpu); 498 return ops->get_cached_segment_base(ctxt, seg);
499} 499}
500 500
501static unsigned seg_override(struct x86_emulate_ctxt *ctxt, 501static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
@@ -573,8 +573,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
573 return emulate_gp(ctxt, 0); 573 return emulate_gp(ctxt, 0);
574 break; 574 break;
575 default: 575 default:
576 usable = ctxt->ops->get_cached_descriptor(&desc, NULL, addr.seg, 576 usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL,
577 ctxt->vcpu); 577 addr.seg);
578 if (!usable) 578 if (!usable)
579 goto bad; 579 goto bad;
580 /* code segment or read-only data segment */ 580 /* code segment or read-only data segment */
@@ -597,7 +597,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
597 goto bad; 597 goto bad;
598 } 598 }
599 cpl = ctxt->ops->cpl(ctxt->vcpu); 599 cpl = ctxt->ops->cpl(ctxt->vcpu);
600 rpl = ctxt->ops->get_segment_selector(addr.seg, ctxt->vcpu) & 3; 600 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3;
601 cpl = max(cpl, rpl); 601 cpl = max(cpl, rpl);
602 if (!(desc.type & 8)) { 602 if (!(desc.type & 8)) {
603 /* data segment */ 603 /* data segment */
@@ -1142,14 +1142,14 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1142 if (selector & 1 << 2) { 1142 if (selector & 1 << 2) {
1143 struct desc_struct desc; 1143 struct desc_struct desc;
1144 memset (dt, 0, sizeof *dt); 1144 memset (dt, 0, sizeof *dt);
1145 if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR, 1145 if (!ops->get_cached_descriptor(ctxt, &desc, NULL,
1146 ctxt->vcpu)) 1146 VCPU_SREG_LDTR))
1147 return; 1147 return;
1148 1148
1149 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1149 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
1150 dt->address = get_desc_base(&desc); 1150 dt->address = get_desc_base(&desc);
1151 } else 1151 } else
1152 ops->get_gdt(dt, ctxt->vcpu); 1152 ops->get_gdt(ctxt, dt);
1153} 1153}
1154 1154
1155/* allowed just for 8 bytes segments */ 1155/* allowed just for 8 bytes segments */
@@ -1304,8 +1304,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1304 return ret; 1304 return ret;
1305 } 1305 }
1306load: 1306load:
1307 ops->set_segment_selector(selector, seg, ctxt->vcpu); 1307 ops->set_segment_selector(ctxt, selector, seg);
1308 ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu); 1308 ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
1309 return X86EMUL_CONTINUE; 1309 return X86EMUL_CONTINUE;
1310exception: 1310exception:
1311 emulate_exception(ctxt, err_vec, err_code, true); 1311 emulate_exception(ctxt, err_vec, err_code, true);
@@ -1446,7 +1446,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1446{ 1446{
1447 struct decode_cache *c = &ctxt->decode; 1447 struct decode_cache *c = &ctxt->decode;
1448 1448
1449 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); 1449 c->src.val = ops->get_segment_selector(ctxt, seg);
1450 1450
1451 return em_push(ctxt); 1451 return em_push(ctxt);
1452} 1452}
@@ -1527,7 +1527,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1527 1527
1528 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); 1528 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1529 1529
1530 c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); 1530 c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
1531 rc = em_push(ctxt); 1531 rc = em_push(ctxt);
1532 if (rc != X86EMUL_CONTINUE) 1532 if (rc != X86EMUL_CONTINUE)
1533 return rc; 1533 return rc;
@@ -1537,7 +1537,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1537 if (rc != X86EMUL_CONTINUE) 1537 if (rc != X86EMUL_CONTINUE)
1538 return rc; 1538 return rc;
1539 1539
1540 ops->get_idt(&dt, ctxt->vcpu); 1540 ops->get_idt(ctxt, &dt);
1541 1541
1542 eip_addr = dt.address + (irq << 2); 1542 eip_addr = dt.address + (irq << 2);
1543 cs_addr = dt.address + (irq << 2) + 2; 1543 cs_addr = dt.address + (irq << 2) + 2;
@@ -1814,7 +1814,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1814 struct desc_struct *ss) 1814 struct desc_struct *ss)
1815{ 1815{
1816 memset(cs, 0, sizeof(struct desc_struct)); 1816 memset(cs, 0, sizeof(struct desc_struct));
1817 ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu); 1817 ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS);
1818 memset(ss, 0, sizeof(struct desc_struct)); 1818 memset(ss, 0, sizeof(struct desc_struct));
1819 1819
1820 cs->l = 0; /* will be adjusted later */ 1820 cs->l = 0; /* will be adjusted later */
@@ -1861,10 +1861,10 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1861 cs.d = 0; 1861 cs.d = 0;
1862 cs.l = 1; 1862 cs.l = 1;
1863 } 1863 }
1864 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); 1864 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1865 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1865 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1866 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); 1866 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1867 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1867 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1868 1868
1869 c->regs[VCPU_REGS_RCX] = c->eip; 1869 c->regs[VCPU_REGS_RCX] = c->eip;
1870 if (is_long_mode(ctxt->vcpu)) { 1870 if (is_long_mode(ctxt->vcpu)) {
@@ -1933,10 +1933,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1933 cs.l = 1; 1933 cs.l = 1;
1934 } 1934 }
1935 1935
1936 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); 1936 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1937 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1937 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1938 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); 1938 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1939 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1939 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1940 1940
1941 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); 1941 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1942 c->eip = msr_data; 1942 c->eip = msr_data;
@@ -1990,10 +1990,10 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1990 cs_sel |= SELECTOR_RPL_MASK; 1990 cs_sel |= SELECTOR_RPL_MASK;
1991 ss_sel |= SELECTOR_RPL_MASK; 1991 ss_sel |= SELECTOR_RPL_MASK;
1992 1992
1993 ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); 1993 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS);
1994 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); 1994 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS);
1995 ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); 1995 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1996 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); 1996 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1997 1997
1998 c->eip = c->regs[VCPU_REGS_RDX]; 1998 c->eip = c->regs[VCPU_REGS_RDX];
1999 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; 1999 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
@@ -2024,7 +2024,7 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2024 unsigned mask = (1 << len) - 1; 2024 unsigned mask = (1 << len) - 1;
2025 unsigned long base; 2025 unsigned long base;
2026 2026
2027 ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu); 2027 ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR);
2028 if (!tr_seg.p) 2028 if (!tr_seg.p)
2029 return false; 2029 return false;
2030 if (desc_limit_scaled(&tr_seg) < 103) 2030 if (desc_limit_scaled(&tr_seg) < 103)
@@ -2079,11 +2079,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2079 tss->si = c->regs[VCPU_REGS_RSI]; 2079 tss->si = c->regs[VCPU_REGS_RSI];
2080 tss->di = c->regs[VCPU_REGS_RDI]; 2080 tss->di = c->regs[VCPU_REGS_RDI];
2081 2081
2082 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); 2082 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2083 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); 2083 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2084 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); 2084 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2085 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); 2085 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2086 tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); 2086 tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2087} 2087}
2088 2088
2089static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2089static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
@@ -2108,11 +2108,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2108 * SDM says that segment selectors are loaded before segment 2108 * SDM says that segment selectors are loaded before segment
2109 * descriptors 2109 * descriptors
2110 */ 2110 */
2111 ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); 2111 ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2112 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); 2112 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2113 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); 2113 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2114 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); 2114 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2115 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); 2115 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2116 2116
2117 /* 2117 /*
2118 * Now load segment descriptors. If fault happenes at this stage 2118 * Now load segment descriptors. If fault happenes at this stage
@@ -2199,13 +2199,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2199 tss->esi = c->regs[VCPU_REGS_RSI]; 2199 tss->esi = c->regs[VCPU_REGS_RSI];
2200 tss->edi = c->regs[VCPU_REGS_RDI]; 2200 tss->edi = c->regs[VCPU_REGS_RDI];
2201 2201
2202 tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); 2202 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES);
2203 tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); 2203 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2204 tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); 2204 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS);
2205 tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); 2205 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS);
2206 tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); 2206 tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS);
2207 tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); 2207 tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS);
2208 tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); 2208 tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR);
2209} 2209}
2210 2210
2211static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2211static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
@@ -2232,13 +2232,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2232 * SDM says that segment selectors are loaded before segment 2232 * SDM says that segment selectors are loaded before segment
2233 * descriptors 2233 * descriptors
2234 */ 2234 */
2235 ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); 2235 ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2236 ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); 2236 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2237 ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); 2237 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2238 ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); 2238 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2239 ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); 2239 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2240 ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); 2240 ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2241 ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); 2241 ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2242 2242
2243 /* 2243 /*
2244 * Now load segment descriptors. If fault happenes at this stage 2244 * Now load segment descriptors. If fault happenes at this stage
@@ -2320,9 +2320,9 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2320{ 2320{
2321 struct desc_struct curr_tss_desc, next_tss_desc; 2321 struct desc_struct curr_tss_desc, next_tss_desc;
2322 int ret; 2322 int ret;
2323 u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); 2323 u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR);
2324 ulong old_tss_base = 2324 ulong old_tss_base =
2325 ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu); 2325 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2326 u32 desc_limit; 2326 u32 desc_limit;
2327 2327
2328 /* FIXME: old_tss_base == ~0 ? */ 2328 /* FIXME: old_tss_base == ~0 ? */
@@ -2383,8 +2383,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2383 } 2383 }
2384 2384
2385 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); 2385 ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu);
2386 ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu); 2386 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR);
2387 ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); 2387 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
2388 2388
2389 if (has_error_code) { 2389 if (has_error_code) {
2390 struct decode_cache *c = &ctxt->decode; 2390 struct decode_cache *c = &ctxt->decode;
@@ -2475,7 +2475,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
2475 ulong old_eip; 2475 ulong old_eip;
2476 int rc; 2476 int rc;
2477 2477
2478 old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); 2478 old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS);
2479 old_eip = c->eip; 2479 old_eip = c->eip;
2480 2480
2481 memcpy(&sel, c->src.valptr + c->op_bytes, 2); 2481 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
@@ -3743,7 +3743,7 @@ special_insn:
3743 rc = emulate_ud(ctxt); 3743 rc = emulate_ud(ctxt);
3744 goto done; 3744 goto done;
3745 } 3745 }
3746 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); 3746 c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg);
3747 break; 3747 break;
3748 case 0x8d: /* lea r16/r32, m */ 3748 case 0x8d: /* lea r16/r32, m */
3749 c->dst.val = c->src.addr.mem.ea; 3749 c->dst.val = c->src.addr.mem.ea;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e9040a9b25c6..6a7fbf671b26 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4237,28 +4237,29 @@ static int emulator_get_cpl(struct kvm_vcpu *vcpu)
4237 return kvm_x86_ops->get_cpl(vcpu); 4237 return kvm_x86_ops->get_cpl(vcpu);
4238} 4238}
4239 4239
4240static void emulator_get_gdt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) 4240static void emulator_get_gdt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4241{ 4241{
4242 kvm_x86_ops->get_gdt(vcpu, dt); 4242 kvm_x86_ops->get_gdt(emul_to_vcpu(ctxt), dt);
4243} 4243}
4244 4244
4245static void emulator_get_idt(struct desc_ptr *dt, struct kvm_vcpu *vcpu) 4245static void emulator_get_idt(struct x86_emulate_ctxt *ctxt, struct desc_ptr *dt)
4246{ 4246{
4247 kvm_x86_ops->get_idt(vcpu, dt); 4247 kvm_x86_ops->get_idt(emul_to_vcpu(ctxt), dt);
4248} 4248}
4249 4249
4250static unsigned long emulator_get_cached_segment_base(int seg, 4250static unsigned long emulator_get_cached_segment_base(
4251 struct kvm_vcpu *vcpu) 4251 struct x86_emulate_ctxt *ctxt, int seg)
4252{ 4252{
4253 return get_segment_base(vcpu, seg); 4253 return get_segment_base(emul_to_vcpu(ctxt), seg);
4254} 4254}
4255 4255
4256static bool emulator_get_cached_descriptor(struct desc_struct *desc, u32 *base3, 4256static bool emulator_get_cached_descriptor(struct x86_emulate_ctxt *ctxt,
4257 int seg, struct kvm_vcpu *vcpu) 4257 struct desc_struct *desc, u32 *base3,
4258 int seg)
4258{ 4259{
4259 struct kvm_segment var; 4260 struct kvm_segment var;
4260 4261
4261 kvm_get_segment(vcpu, &var, seg); 4262 kvm_get_segment(emul_to_vcpu(ctxt), &var, seg);
4262 4263
4263 if (var.unusable) 4264 if (var.unusable)
4264 return false; 4265 return false;
@@ -4283,9 +4284,11 @@ static bool emulator_get_cached_descriptor(struct desc_struct *desc, u32 *base3,
4283 return true; 4284 return true;
4284} 4285}
4285 4286
4286static void emulator_set_cached_descriptor(struct desc_struct *desc, u32 base3, 4287static void emulator_set_cached_descriptor(struct x86_emulate_ctxt *ctxt,
4287 int seg, struct kvm_vcpu *vcpu) 4288 struct desc_struct *desc, u32 base3,
4289 int seg)
4288{ 4290{
4291 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4289 struct kvm_segment var; 4292 struct kvm_segment var;
4290 4293
4291 /* needed to preserve selector */ 4294 /* needed to preserve selector */
@@ -4314,22 +4317,22 @@ static void emulator_set_cached_descriptor(struct desc_struct *desc, u32 base3,
4314 return; 4317 return;
4315} 4318}
4316 4319
4317static u16 emulator_get_segment_selector(int seg, struct kvm_vcpu *vcpu) 4320static u16 emulator_get_segment_selector(struct x86_emulate_ctxt *ctxt, int seg)
4318{ 4321{
4319 struct kvm_segment kvm_seg; 4322 struct kvm_segment kvm_seg;
4320 4323
4321 kvm_get_segment(vcpu, &kvm_seg, seg); 4324 kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
4322 return kvm_seg.selector; 4325 return kvm_seg.selector;
4323} 4326}
4324 4327
4325static void emulator_set_segment_selector(u16 sel, int seg, 4328static void emulator_set_segment_selector(struct x86_emulate_ctxt *ctxt,
4326 struct kvm_vcpu *vcpu) 4329 u16 sel, int seg)
4327{ 4330{
4328 struct kvm_segment kvm_seg; 4331 struct kvm_segment kvm_seg;
4329 4332
4330 kvm_get_segment(vcpu, &kvm_seg, seg); 4333 kvm_get_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
4331 kvm_seg.selector = sel; 4334 kvm_seg.selector = sel;
4332 kvm_set_segment(vcpu, &kvm_seg, seg); 4335 kvm_set_segment(emul_to_vcpu(ctxt), &kvm_seg, seg);
4333} 4336}
4334 4337
4335static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt) 4338static void emulator_get_fpu(struct x86_emulate_ctxt *ctxt)