aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-04-27 06:20:30 -0400
committerAvi Kivity <avi@redhat.com>2011-05-22 08:47:39 -0400
commit1aa366163b8b69f660cf94fd5062fa44859e4318 (patch)
tree29d6843cbe1c66a04c11dad24169ec5e1318e53d /arch/x86/kvm/emulate.c
parent0a434bb2bf094f463ca3ca71ac42cea9e423048f (diff)
KVM: x86 emulator: consolidate segment accessors
Instead of separate accessors for the segment selector and cached descriptor, use one accessor for both. This simplifies the code somewhat. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c122
1 files changed, 69 insertions, 53 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 3624f202b44..59992484f5f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -553,6 +553,26 @@ static int emulate_nm(struct x86_emulate_ctxt *ctxt)
553 return emulate_exception(ctxt, NM_VECTOR, 0, false); 553 return emulate_exception(ctxt, NM_VECTOR, 0, false);
554} 554}
555 555
556static u16 get_segment_selector(struct x86_emulate_ctxt *ctxt, unsigned seg)
557{
558 u16 selector;
559 struct desc_struct desc;
560
561 ctxt->ops->get_segment(ctxt, &selector, &desc, NULL, seg);
562 return selector;
563}
564
565static void set_segment_selector(struct x86_emulate_ctxt *ctxt, u16 selector,
566 unsigned seg)
567{
568 u16 dummy;
569 u32 base3;
570 struct desc_struct desc;
571
572 ctxt->ops->get_segment(ctxt, &dummy, &desc, &base3, seg);
573 ctxt->ops->set_segment(ctxt, selector, &desc, base3, seg);
574}
575
556static int __linearize(struct x86_emulate_ctxt *ctxt, 576static int __linearize(struct x86_emulate_ctxt *ctxt,
557 struct segmented_address addr, 577 struct segmented_address addr,
558 unsigned size, bool write, bool fetch, 578 unsigned size, bool write, bool fetch,
@@ -563,6 +583,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
563 bool usable; 583 bool usable;
564 ulong la; 584 ulong la;
565 u32 lim; 585 u32 lim;
586 u16 sel;
566 unsigned cpl, rpl; 587 unsigned cpl, rpl;
567 588
568 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; 589 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea;
@@ -574,8 +595,8 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
574 return emulate_gp(ctxt, 0); 595 return emulate_gp(ctxt, 0);
575 break; 596 break;
576 default: 597 default:
577 usable = ctxt->ops->get_cached_descriptor(ctxt, &desc, NULL, 598 usable = ctxt->ops->get_segment(ctxt, &sel, &desc, NULL,
578 addr.seg); 599 addr.seg);
579 if (!usable) 600 if (!usable)
580 goto bad; 601 goto bad;
581 /* code segment or read-only data segment */ 602 /* code segment or read-only data segment */
@@ -598,7 +619,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
598 goto bad; 619 goto bad;
599 } 620 }
600 cpl = ctxt->ops->cpl(ctxt); 621 cpl = ctxt->ops->cpl(ctxt);
601 rpl = ctxt->ops->get_segment_selector(ctxt, addr.seg) & 3; 622 rpl = sel & 3;
602 cpl = max(cpl, rpl); 623 cpl = max(cpl, rpl);
603 if (!(desc.type & 8)) { 624 if (!(desc.type & 8)) {
604 /* data segment */ 625 /* data segment */
@@ -1142,9 +1163,10 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1142{ 1163{
1143 if (selector & 1 << 2) { 1164 if (selector & 1 << 2) {
1144 struct desc_struct desc; 1165 struct desc_struct desc;
1166 u16 sel;
1167
1145 memset (dt, 0, sizeof *dt); 1168 memset (dt, 0, sizeof *dt);
1146 if (!ops->get_cached_descriptor(ctxt, &desc, NULL, 1169 if (!ops->get_segment(ctxt, &sel, &desc, NULL, VCPU_SREG_LDTR))
1147 VCPU_SREG_LDTR))
1148 return; 1170 return;
1149 1171
1150 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ 1172 dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */
@@ -1305,8 +1327,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1305 return ret; 1327 return ret;
1306 } 1328 }
1307load: 1329load:
1308 ops->set_segment_selector(ctxt, selector, seg); 1330 ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1309 ops->set_cached_descriptor(ctxt, &seg_desc, 0, seg);
1310 return X86EMUL_CONTINUE; 1331 return X86EMUL_CONTINUE;
1311exception: 1332exception:
1312 emulate_exception(ctxt, err_vec, err_code, true); 1333 emulate_exception(ctxt, err_vec, err_code, true);
@@ -1464,7 +1485,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1464{ 1485{
1465 struct decode_cache *c = &ctxt->decode; 1486 struct decode_cache *c = &ctxt->decode;
1466 1487
1467 c->src.val = ops->get_segment_selector(ctxt, seg); 1488 c->src.val = get_segment_selector(ctxt, seg);
1468 1489
1469 return em_push(ctxt); 1490 return em_push(ctxt);
1470} 1491}
@@ -1552,7 +1573,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1552 1573
1553 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC); 1574 ctxt->eflags &= ~(EFLG_IF | EFLG_TF | EFLG_AC);
1554 1575
1555 c->src.val = ops->get_segment_selector(ctxt, VCPU_SREG_CS); 1576 c->src.val = get_segment_selector(ctxt, VCPU_SREG_CS);
1556 rc = em_push(ctxt); 1577 rc = em_push(ctxt);
1557 if (rc != X86EMUL_CONTINUE) 1578 if (rc != X86EMUL_CONTINUE)
1558 return rc; 1579 return rc;
@@ -1838,8 +1859,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1838 struct x86_emulate_ops *ops, struct desc_struct *cs, 1859 struct x86_emulate_ops *ops, struct desc_struct *cs,
1839 struct desc_struct *ss) 1860 struct desc_struct *ss)
1840{ 1861{
1862 u16 selector;
1863
1841 memset(cs, 0, sizeof(struct desc_struct)); 1864 memset(cs, 0, sizeof(struct desc_struct));
1842 ops->get_cached_descriptor(ctxt, cs, NULL, VCPU_SREG_CS); 1865 ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1843 memset(ss, 0, sizeof(struct desc_struct)); 1866 memset(ss, 0, sizeof(struct desc_struct));
1844 1867
1845 cs->l = 0; /* will be adjusted later */ 1868 cs->l = 0; /* will be adjusted later */
@@ -1888,10 +1911,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1888 cs.d = 0; 1911 cs.d = 0;
1889 cs.l = 1; 1912 cs.l = 1;
1890 } 1913 }
1891 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); 1914 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1892 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); 1915 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1893 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1894 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1895 1916
1896 c->regs[VCPU_REGS_RCX] = c->eip; 1917 c->regs[VCPU_REGS_RCX] = c->eip;
1897 if (efer & EFER_LMA) { 1918 if (efer & EFER_LMA) {
@@ -1961,10 +1982,8 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1961 cs.l = 1; 1982 cs.l = 1;
1962 } 1983 }
1963 1984
1964 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); 1985 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
1965 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); 1986 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
1966 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
1967 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
1968 1987
1969 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data); 1988 ops->get_msr(ctxt, MSR_IA32_SYSENTER_EIP, &msr_data);
1970 c->eip = msr_data; 1989 c->eip = msr_data;
@@ -2018,10 +2037,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2018 cs_sel |= SELECTOR_RPL_MASK; 2037 cs_sel |= SELECTOR_RPL_MASK;
2019 ss_sel |= SELECTOR_RPL_MASK; 2038 ss_sel |= SELECTOR_RPL_MASK;
2020 2039
2021 ops->set_cached_descriptor(ctxt, &cs, 0, VCPU_SREG_CS); 2040 ops->set_segment(ctxt, cs_sel, &cs, 0, VCPU_SREG_CS);
2022 ops->set_segment_selector(ctxt, cs_sel, VCPU_SREG_CS); 2041 ops->set_segment(ctxt, ss_sel, &ss, 0, VCPU_SREG_SS);
2023 ops->set_cached_descriptor(ctxt, &ss, 0, VCPU_SREG_SS);
2024 ops->set_segment_selector(ctxt, ss_sel, VCPU_SREG_SS);
2025 2042
2026 c->eip = c->regs[VCPU_REGS_RDX]; 2043 c->eip = c->regs[VCPU_REGS_RDX];
2027 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX]; 2044 c->regs[VCPU_REGS_RSP] = c->regs[VCPU_REGS_RCX];
@@ -2048,11 +2065,11 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2048 struct desc_struct tr_seg; 2065 struct desc_struct tr_seg;
2049 u32 base3; 2066 u32 base3;
2050 int r; 2067 int r;
2051 u16 io_bitmap_ptr, perm, bit_idx = port & 0x7; 2068 u16 tr, io_bitmap_ptr, perm, bit_idx = port & 0x7;
2052 unsigned mask = (1 << len) - 1; 2069 unsigned mask = (1 << len) - 1;
2053 unsigned long base; 2070 unsigned long base;
2054 2071
2055 ops->get_cached_descriptor(ctxt, &tr_seg, &base3, VCPU_SREG_TR); 2072 ops->get_segment(ctxt, &tr, &tr_seg, &base3, VCPU_SREG_TR);
2056 if (!tr_seg.p) 2073 if (!tr_seg.p)
2057 return false; 2074 return false;
2058 if (desc_limit_scaled(&tr_seg) < 103) 2075 if (desc_limit_scaled(&tr_seg) < 103)
@@ -2107,11 +2124,11 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2107 tss->si = c->regs[VCPU_REGS_RSI]; 2124 tss->si = c->regs[VCPU_REGS_RSI];
2108 tss->di = c->regs[VCPU_REGS_RDI]; 2125 tss->di = c->regs[VCPU_REGS_RDI];
2109 2126
2110 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); 2127 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2111 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); 2128 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2112 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); 2129 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2113 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); 2130 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2114 tss->ldt = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); 2131 tss->ldt = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2115} 2132}
2116 2133
2117static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2134static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
@@ -2136,11 +2153,11 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2136 * SDM says that segment selectors are loaded before segment 2153 * SDM says that segment selectors are loaded before segment
2137 * descriptors 2154 * descriptors
2138 */ 2155 */
2139 ops->set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR); 2156 set_segment_selector(ctxt, tss->ldt, VCPU_SREG_LDTR);
2140 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2157 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2141 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2158 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2142 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2159 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2143 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2160 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2144 2161
2145 /* 2162 /*
2146 * Now load segment descriptors. If fault happenes at this stage 2163 * Now load segment descriptors. If fault happenes at this stage
@@ -2227,13 +2244,13 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2227 tss->esi = c->regs[VCPU_REGS_RSI]; 2244 tss->esi = c->regs[VCPU_REGS_RSI];
2228 tss->edi = c->regs[VCPU_REGS_RDI]; 2245 tss->edi = c->regs[VCPU_REGS_RDI];
2229 2246
2230 tss->es = ops->get_segment_selector(ctxt, VCPU_SREG_ES); 2247 tss->es = get_segment_selector(ctxt, VCPU_SREG_ES);
2231 tss->cs = ops->get_segment_selector(ctxt, VCPU_SREG_CS); 2248 tss->cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2232 tss->ss = ops->get_segment_selector(ctxt, VCPU_SREG_SS); 2249 tss->ss = get_segment_selector(ctxt, VCPU_SREG_SS);
2233 tss->ds = ops->get_segment_selector(ctxt, VCPU_SREG_DS); 2250 tss->ds = get_segment_selector(ctxt, VCPU_SREG_DS);
2234 tss->fs = ops->get_segment_selector(ctxt, VCPU_SREG_FS); 2251 tss->fs = get_segment_selector(ctxt, VCPU_SREG_FS);
2235 tss->gs = ops->get_segment_selector(ctxt, VCPU_SREG_GS); 2252 tss->gs = get_segment_selector(ctxt, VCPU_SREG_GS);
2236 tss->ldt_selector = ops->get_segment_selector(ctxt, VCPU_SREG_LDTR); 2253 tss->ldt_selector = get_segment_selector(ctxt, VCPU_SREG_LDTR);
2237} 2254}
2238 2255
2239static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2256static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
@@ -2260,13 +2277,13 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2260 * SDM says that segment selectors are loaded before segment 2277 * SDM says that segment selectors are loaded before segment
2261 * descriptors 2278 * descriptors
2262 */ 2279 */
2263 ops->set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2280 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2264 ops->set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2281 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
2265 ops->set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS); 2282 set_segment_selector(ctxt, tss->cs, VCPU_SREG_CS);
2266 ops->set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2283 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2267 ops->set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2284 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2268 ops->set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS); 2285 set_segment_selector(ctxt, tss->fs, VCPU_SREG_FS);
2269 ops->set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS); 2286 set_segment_selector(ctxt, tss->gs, VCPU_SREG_GS);
2270 2287
2271 /* 2288 /*
2272 * Now load segment descriptors. If fault happenes at this stage 2289 * Now load segment descriptors. If fault happenes at this stage
@@ -2348,7 +2365,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2348{ 2365{
2349 struct desc_struct curr_tss_desc, next_tss_desc; 2366 struct desc_struct curr_tss_desc, next_tss_desc;
2350 int ret; 2367 int ret;
2351 u16 old_tss_sel = ops->get_segment_selector(ctxt, VCPU_SREG_TR); 2368 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
2352 ulong old_tss_base = 2369 ulong old_tss_base =
2353 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR); 2370 ops->get_cached_segment_base(ctxt, VCPU_SREG_TR);
2354 u32 desc_limit; 2371 u32 desc_limit;
@@ -2411,8 +2428,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2411 } 2428 }
2412 2429
2413 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 2430 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
2414 ops->set_cached_descriptor(ctxt, &next_tss_desc, 0, VCPU_SREG_TR); 2431 ops->set_segment(ctxt, tss_selector, &next_tss_desc, 0, VCPU_SREG_TR);
2415 ops->set_segment_selector(ctxt, tss_selector, VCPU_SREG_TR);
2416 2432
2417 if (has_error_code) { 2433 if (has_error_code) {
2418 struct decode_cache *c = &ctxt->decode; 2434 struct decode_cache *c = &ctxt->decode;
@@ -2503,7 +2519,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
2503 ulong old_eip; 2519 ulong old_eip;
2504 int rc; 2520 int rc;
2505 2521
2506 old_cs = ctxt->ops->get_segment_selector(ctxt, VCPU_SREG_CS); 2522 old_cs = get_segment_selector(ctxt, VCPU_SREG_CS);
2507 old_eip = c->eip; 2523 old_eip = c->eip;
2508 2524
2509 memcpy(&sel, c->src.valptr + c->op_bytes, 2); 2525 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
@@ -3881,7 +3897,7 @@ special_insn:
3881 rc = emulate_ud(ctxt); 3897 rc = emulate_ud(ctxt);
3882 goto done; 3898 goto done;
3883 } 3899 }
3884 c->dst.val = ops->get_segment_selector(ctxt, c->modrm_reg); 3900 c->dst.val = get_segment_selector(ctxt, c->modrm_reg);
3885 break; 3901 break;
3886 case 0x8d: /* lea r16/r32, m */ 3902 case 0x8d: /* lea r16/r32, m */
3887 c->dst.val = c->src.addr.mem.ea; 3903 c->dst.val = c->src.addr.mem.ea;