diff options
author | Avi Kivity <avi@redhat.com> | 2011-04-20 06:37:53 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2011-05-22 08:35:20 -0400 |
commit | 4bff1e86ad286d4b3a54902540abeeaf95e64db3 (patch) | |
tree | b2c55a41ec05a43b02da404c784d968276dbc0c6 /arch/x86/kvm/emulate.c | |
parent | ca1d4a9e772bde0a0b8cda61ee9fdca29f80f361 (diff) |
KVM: x86 emulator: drop vcpu argument from segment/gdt/idt callbacks
Making the emulator caller agnostic.
[Takuya Yoshikawa: fix typo leading to LDT failures]
Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>
Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 112 |
1 files changed, 56 insertions, 56 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index 8af08a16f4dd..9602889f0f7f 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -495,7 +495,7 @@ static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, | |||
495 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) | 495 | if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) |
496 | return 0; | 496 | return 0; |
497 | 497 | ||
498 | return ops->get_cached_segment_base(seg, ctxt->vcpu); | 498 | return ops->get_cached_segment_base(ctxt, seg); |
499 | } | 499 | } |
500 | 500 | ||
501 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt, | 501 | static unsigned seg_override(struct x86_emulate_ctxt *ctxt, |
@@ -573,8 +573,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
573 | return emulate_gp(ctxt, 0); | 573 | return emulate_gp(ctxt, 0); |
574 | break; | 574 | break; |
575 | default: | 575 | default: |
576 | usable = ctxt->ops->get_cached_descriptor(&desc, NULL, addr.seg, | 576 | usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL, |
577 | ctxt->vcpu); | 577 | addr.seg); |
578 | if (!usable) | 578 | if (!usable) |
579 | goto bad; | 579 | goto bad; |
580 | /* code segment or read-only data segment */ | 580 | /* code segment or read-only data segment */ |
@@ -597,7 +597,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt, | |||
597 | goto bad; | 597 | goto bad; |
598 | } | 598 | } |
599 | cpl = ctxt->ops->cpl(ctxt->vcpu); | 599 | cpl = ctxt->ops->cpl(ctxt->vcpu); |
600 | rpl = ctxt->ops->get_segment_selector(addr.seg, ctxt->vcpu) & 3; | 600 | rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3; |
601 | cpl = max(cpl, rpl); | 601 | cpl = max(cpl, rpl); |
602 | if (!(desc.type & 8)) { | 602 | if (!(desc.type & 8)) { |
603 | /* data segment */ | 603 | /* data segment */ |
@@ -1142,14 +1142,14 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | |||
1142 | if (selector & 1 << 2) { | 1142 | if (selector & 1 << 2) { |
1143 | struct desc_struct desc; | 1143 | struct desc_struct desc; |
1144 | memset (dt, 0, sizeof *dt); | 1144 | memset (dt, 0, sizeof *dt); |
1145 | if (!ops->get_cached_descriptor(&desc, NULL, VCPU_SREG_LDTR, | 1145 | if (!ops->get_cached_descriptor(ctxt, &desc, NULL, |
1146 | ctxt->vcpu)) | 1146 | VCPU_SREG_LDTR)) |
1147 | return; | 1147 | return; |
1148 | 1148 | ||
1149 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | 1149 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ |
1150 | dt->address = get_desc_base(&desc); | 1150 | dt->address = get_desc_base(&desc); |
1151 | } else | 1151 | } else |
1152 | ops->get_gdt(dt, ctxt->vcpu); | 1152 | ops->get_gdt(ctxt, dt); |
1153 | } | 1153 | } |
1154 | 1154 | ||
1155 | /* allowed just for 8 bytes segments */ | 1155 | /* allowed just for 8 bytes segments */ |
@@ -1304,8 +1304,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1304 | return ret; | 1304 | return ret; |
1305 | } | 1305 | } |
1306 | load: | 1306 | load: |
1307 | ops->set_segment_selector(selector, seg, ctxt->vcpu); | 1307 | ops->set_segment_selector(ctxt, selector, seg); |
1308 | ops->set_cached_descriptor(&seg_desc, 0, seg, ctxt->vcpu); | 1308 | ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg); |
1309 | return X86EMUL_CONTINUE; | 1309 | return X86EMUL_CONTINUE; |
1310 | exception: | 1310 | exception: |
1311 | emulate_exception(ctxt, err_vec, err_code, true); | 1311 | emulate_exception(ctxt, err_vec, err_code, true); |
@@ -1446,7 +1446,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, | |||
1446 | { | 1446 | { |
1447 | struct decode_cache *c = &ctxt->decode; | 1447 | struct decode_cache *c = &ctxt->decode; |
1448 | 1448 | ||
1449 | c->src.val = ops->get_segment_selector(seg, ctxt->vcpu); | 1449 | c->src.val = ops->get_segment_selector(ctxt, seg); |
1450 | 1450 | ||
1451 | return em_push(ctxt); | 1451 | return em_push(ctxt); |
1452 | } | 1452 | } |
@@ -1527,7 +1527,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, | |||
1527 | 1527 | ||
1528 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); | 1528 | ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); |
1529 | 1529 | ||
1530 | c->src.val = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 1530 | c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS); |
1531 | rc = em_push(ctxt); | 1531 | rc = em_push(ctxt); |
1532 | if (rc != X86EMUL_CONTINUE) | 1532 | if (rc != X86EMUL_CONTINUE) |
1533 | return rc; | 1533 | return rc; |
@@ -1537,7 +1537,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt, | |||
1537 | if (rc != X86EMUL_CONTINUE) | 1537 | if (rc != X86EMUL_CONTINUE) |
1538 | return rc; | 1538 | return rc; |
1539 | 1539 | ||
1540 | ops->get_idt(&dt, ctxt->vcpu); | 1540 | ops->get_idt(ctxt, &dt); |
1541 | 1541 | ||
1542 | eip_addr = dt.address + (irq << 2); | 1542 | eip_addr = dt.address + (irq << 2); |
1543 | cs_addr = dt.address + (irq << 2) + 2; | 1543 | cs_addr = dt.address + (irq << 2) + 2; |
@@ -1814,7 +1814,7 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, | |||
1814 | struct desc_struct *ss) | 1814 | struct desc_struct *ss) |
1815 | { | 1815 | { |
1816 | memset(cs, 0, sizeof(struct desc_struct)); | 1816 | memset(cs, 0, sizeof(struct desc_struct)); |
1817 | ops->get_cached_descriptor(cs, NULL, VCPU_SREG_CS, ctxt->vcpu); | 1817 | ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS); |
1818 | memset(ss, 0, sizeof(struct desc_struct)); | 1818 | memset(ss, 0, sizeof(struct desc_struct)); |
1819 | 1819 | ||
1820 | cs->l = 0; /* will be adjusted later */ | 1820 | cs->l = 0; /* will be adjusted later */ |
@@ -1861,10 +1861,10 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1861 | cs.d = 0; | 1861 | cs.d = 0; |
1862 | cs.l = 1; | 1862 | cs.l = 1; |
1863 | } | 1863 | } |
1864 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 1864 | ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); |
1865 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 1865 | ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); |
1866 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | 1866 | ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS); |
1867 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | 1867 | ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS); |
1868 | 1868 | ||
1869 | c->regs[VCPU_REGS_RCX] = c->eip; | 1869 | c->regs[VCPU_REGS_RCX] = c->eip; |
1870 | if (is_long_mode(ctxt->vcpu)) { | 1870 | if (is_long_mode(ctxt->vcpu)) { |
@@ -1933,10 +1933,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1933 | cs.l = 1; | 1933 | cs.l = 1; |
1934 | } | 1934 | } |
1935 | 1935 | ||
1936 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 1936 | ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); |
1937 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 1937 | ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); |
1938 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | 1938 | ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS); |
1939 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | 1939 | ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS); |
1940 | 1940 | ||
1941 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); | 1941 | ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); |
1942 | c->eip = msr_data; | 1942 | c->eip = msr_data; |
@@ -1990,10 +1990,10 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | |||
1990 | cs_sel |= SELECTOR_RPL_MASK; | 1990 | cs_sel |= SELECTOR_RPL_MASK; |
1991 | ss_sel |= SELECTOR_RPL_MASK; | 1991 | ss_sel |= SELECTOR_RPL_MASK; |
1992 | 1992 | ||
1993 | ops->set_cached_descriptor(&cs, 0, VCPU_SREG_CS, ctxt->vcpu); | 1993 | ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); |
1994 | ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu); | 1994 | ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); |
1995 | ops->set_cached_descriptor(&ss, 0, VCPU_SREG_SS, ctxt->vcpu); | 1995 | ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS); |
1996 | ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu); | 1996 | ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS); |
1997 | 1997 | ||
1998 | c->eip = c->regs[VCPU_REGS_RDX]; | 1998 | c->eip = c->regs[VCPU_REGS_RDX]; |
1999 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; | 1999 | c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; |
@@ -2024,7 +2024,7 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, | |||
2024 | unsigned mask = (1 << len) - 1; | 2024 | unsigned mask = (1 << len) - 1; |
2025 | unsigned long base; | 2025 | unsigned long base; |
2026 | 2026 | ||
2027 | ops->get_cached_descriptor(&tr_seg, &base3, VCPU_SREG_TR, ctxt->vcpu); | 2027 | ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR); |
2028 | if (!tr_seg.p) | 2028 | if (!tr_seg.p) |
2029 | return false; | 2029 | return false; |
2030 | if (desc_limit_scaled(&tr_seg) < 103) | 2030 | if (desc_limit_scaled(&tr_seg) < 103) |
@@ -2079,11 +2079,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | |||
2079 | tss->si = c->regs[VCPU_REGS_RSI]; | 2079 | tss->si = c->regs[VCPU_REGS_RSI]; |
2080 | tss->di = c->regs[VCPU_REGS_RDI]; | 2080 | tss->di = c->regs[VCPU_REGS_RDI]; |
2081 | 2081 | ||
2082 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2082 | tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); |
2083 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2083 | tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); |
2084 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2084 | tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); |
2085 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2085 | tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); |
2086 | tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2086 | tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); |
2087 | } | 2087 | } |
2088 | 2088 | ||
2089 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | 2089 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, |
@@ -2108,11 +2108,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2108 | * SDM says that segment selectors are loaded before segment | 2108 | * SDM says that segment selectors are loaded before segment |
2109 | * descriptors | 2109 | * descriptors |
2110 | */ | 2110 | */ |
2111 | ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); | 2111 | ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); |
2112 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2112 | ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
2113 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2113 | ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
2114 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2114 | ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
2115 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2115 | ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
2116 | 2116 | ||
2117 | /* | 2117 | /* |
2118 | * Now load segment descriptors. If fault happenes at this stage | 2118 | * Now load segment descriptors. If fault happenes at this stage |
@@ -2199,13 +2199,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | |||
2199 | tss->esi = c->regs[VCPU_REGS_RSI]; | 2199 | tss->esi = c->regs[VCPU_REGS_RSI]; |
2200 | tss->edi = c->regs[VCPU_REGS_RDI]; | 2200 | tss->edi = c->regs[VCPU_REGS_RDI]; |
2201 | 2201 | ||
2202 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | 2202 | tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); |
2203 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2203 | tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); |
2204 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | 2204 | tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); |
2205 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | 2205 | tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); |
2206 | tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); | 2206 | tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS); |
2207 | tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); | 2207 | tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS); |
2208 | tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | 2208 | tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); |
2209 | } | 2209 | } |
2210 | 2210 | ||
2211 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | 2211 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, |
@@ -2232,13 +2232,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2232 | * SDM says that segment selectors are loaded before segment | 2232 | * SDM says that segment selectors are loaded before segment |
2233 | * descriptors | 2233 | * descriptors |
2234 | */ | 2234 | */ |
2235 | ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); | 2235 | ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); |
2236 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | 2236 | ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); |
2237 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | 2237 | ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); |
2238 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | 2238 | ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); |
2239 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | 2239 | ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); |
2240 | ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); | 2240 | ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); |
2241 | ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); | 2241 | ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); |
2242 | 2242 | ||
2243 | /* | 2243 | /* |
2244 | * Now load segment descriptors. If fault happenes at this stage | 2244 | * Now load segment descriptors. If fault happenes at this stage |
@@ -2320,9 +2320,9 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2320 | { | 2320 | { |
2321 | struct desc_struct curr_tss_desc, next_tss_desc; | 2321 | struct desc_struct curr_tss_desc, next_tss_desc; |
2322 | int ret; | 2322 | int ret; |
2323 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | 2323 | u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR); |
2324 | ulong old_tss_base = | 2324 | ulong old_tss_base = |
2325 | ops->get_cached_segment_base(VCPU_SREG_TR, ctxt->vcpu); | 2325 | ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); |
2326 | u32 desc_limit; | 2326 | u32 desc_limit; |
2327 | 2327 | ||
2328 | /* FIXME: old_tss_base == ~0 ? */ | 2328 | /* FIXME: old_tss_base == ~0 ? */ |
@@ -2383,8 +2383,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | |||
2383 | } | 2383 | } |
2384 | 2384 | ||
2385 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); | 2385 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); |
2386 | ops->set_cached_descriptor(&next_tss_desc, 0, VCPU_SREG_TR, ctxt->vcpu); | 2386 | ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR); |
2387 | ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); | 2387 | ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR); |
2388 | 2388 | ||
2389 | if (has_error_code) { | 2389 | if (has_error_code) { |
2390 | struct decode_cache *c = &ctxt->decode; | 2390 | struct decode_cache *c = &ctxt->decode; |
@@ -2475,7 +2475,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) | |||
2475 | ulong old_eip; | 2475 | ulong old_eip; |
2476 | int rc; | 2476 | int rc; |
2477 | 2477 | ||
2478 | old_cs = ctxt->ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | 2478 | old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS); |
2479 | old_eip = c->eip; | 2479 | old_eip = c->eip; |
2480 | 2480 | ||
2481 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); | 2481 | memcpy(&sel, c->src.valptr + c->op_bytes, 2); |
@@ -3743,7 +3743,7 @@ special_insn: | |||
3743 | rc = emulate_ud(ctxt); | 3743 | rc = emulate_ud(ctxt); |
3744 | goto done; | 3744 | goto done; |
3745 | } | 3745 | } |
3746 | c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); | 3746 | c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg); |
3747 | break; | 3747 | break; |
3748 | case 0x8d: /* lea r16/r32, m */ | 3748 | case 0x8d: /* lea r16/r32, m */ |
3749 | c->dst.val = c->src.addr.mem.ea; | 3749 | c->dst.val = c->src.addr.mem.ea; |