diff options
author | Gleb Natapov <gleb@redhat.com> | 2010-03-18 09:20:17 -0400 |
---|---|---|
committer | Avi Kivity <avi@redhat.com> | 2010-05-17 05:16:15 -0400 |
commit | 38ba30ba51a003360f177d5b8349439fe44fc55b (patch) | |
tree | 27c6667cf9b0470e13d371bd9b86c8a605e2e52c /arch/x86/kvm/emulate.c | |
parent | 2dafc6c234b6064189405f42e1602e9a0abe5a44 (diff) |
KVM: x86 emulator: Emulate task switch in emulator.c
Implement emulation of 16/32 bit task switch in emulator.c
Signed-off-by: Gleb Natapov <gleb@redhat.com>
Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 563 |
1 files changed, 563 insertions, 0 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index d696cbd6ff7a..db4776c6b500 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -33,6 +33,7 @@ | |||
33 | #include <asm/kvm_emulate.h> | 33 | #include <asm/kvm_emulate.h> |
34 | 34 | ||
35 | #include "x86.h" | 35 | #include "x86.h" |
36 | #include "tss.h" | ||
36 | 37 | ||
37 | /* | 38 | /* |
38 | * Opcode effective-address decode tables. | 39 | * Opcode effective-address decode tables. |
@@ -1221,6 +1222,198 @@ done: | |||
1221 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; | 1222 | return (rc == X86EMUL_UNHANDLEABLE) ? -1 : 0; |
1222 | } | 1223 | } |
1223 | 1224 | ||
1225 | static u32 desc_limit_scaled(struct desc_struct *desc) | ||
1226 | { | ||
1227 | u32 limit = get_desc_limit(desc); | ||
1228 | |||
1229 | return desc->g ? (limit << 12) | 0xfff : limit; | ||
1230 | } | ||
1231 | |||
1232 | static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, | ||
1233 | struct x86_emulate_ops *ops, | ||
1234 | u16 selector, struct desc_ptr *dt) | ||
1235 | { | ||
1236 | if (selector & 1 << 2) { | ||
1237 | struct desc_struct desc; | ||
1238 | memset (dt, 0, sizeof *dt); | ||
1239 | if (!ops->get_cached_descriptor(&desc, VCPU_SREG_LDTR, ctxt->vcpu)) | ||
1240 | return; | ||
1241 | |||
1242 | dt->size = desc_limit_scaled(&desc); /* what if limit > 65535? */ | ||
1243 | dt->address = get_desc_base(&desc); | ||
1244 | } else | ||
1245 | ops->get_gdt(dt, ctxt->vcpu); | ||
1246 | } | ||
1247 | |||
1248 | /* allowed just for 8 bytes segments */ | ||
1249 | static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1250 | struct x86_emulate_ops *ops, | ||
1251 | u16 selector, struct desc_struct *desc) | ||
1252 | { | ||
1253 | struct desc_ptr dt; | ||
1254 | u16 index = selector >> 3; | ||
1255 | int ret; | ||
1256 | u32 err; | ||
1257 | ulong addr; | ||
1258 | |||
1259 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | ||
1260 | |||
1261 | if (dt.size < index * 8 + 7) { | ||
1262 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | ||
1263 | return X86EMUL_PROPAGATE_FAULT; | ||
1264 | } | ||
1265 | addr = dt.address + index * 8; | ||
1266 | ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | ||
1267 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1268 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | ||
1269 | |||
1270 | return ret; | ||
1271 | } | ||
1272 | |||
1273 | /* allowed just for 8 bytes segments */ | ||
1274 | static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1275 | struct x86_emulate_ops *ops, | ||
1276 | u16 selector, struct desc_struct *desc) | ||
1277 | { | ||
1278 | struct desc_ptr dt; | ||
1279 | u16 index = selector >> 3; | ||
1280 | u32 err; | ||
1281 | ulong addr; | ||
1282 | int ret; | ||
1283 | |||
1284 | get_descriptor_table_ptr(ctxt, ops, selector, &dt); | ||
1285 | |||
1286 | if (dt.size < index * 8 + 7) { | ||
1287 | kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); | ||
1288 | return X86EMUL_PROPAGATE_FAULT; | ||
1289 | } | ||
1290 | |||
1291 | addr = dt.address + index * 8; | ||
1292 | ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); | ||
1293 | if (ret == X86EMUL_PROPAGATE_FAULT) | ||
1294 | kvm_inject_page_fault(ctxt->vcpu, addr, err); | ||
1295 | |||
1296 | return ret; | ||
1297 | } | ||
1298 | |||
1299 | static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | ||
1300 | struct x86_emulate_ops *ops, | ||
1301 | u16 selector, int seg) | ||
1302 | { | ||
1303 | struct desc_struct seg_desc; | ||
1304 | u8 dpl, rpl, cpl; | ||
1305 | unsigned err_vec = GP_VECTOR; | ||
1306 | u32 err_code = 0; | ||
1307 | bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ | ||
1308 | int ret; | ||
1309 | |||
1310 | memset(&seg_desc, 0, sizeof seg_desc); | ||
1311 | |||
1312 | if ((seg <= VCPU_SREG_GS && ctxt->mode == X86EMUL_MODE_VM86) | ||
1313 | || ctxt->mode == X86EMUL_MODE_REAL) { | ||
1314 | /* set real mode segment descriptor */ | ||
1315 | set_desc_base(&seg_desc, selector << 4); | ||
1316 | set_desc_limit(&seg_desc, 0xffff); | ||
1317 | seg_desc.type = 3; | ||
1318 | seg_desc.p = 1; | ||
1319 | seg_desc.s = 1; | ||
1320 | goto load; | ||
1321 | } | ||
1322 | |||
1323 | /* NULL selector is not valid for TR, CS and SS */ | ||
1324 | if ((seg == VCPU_SREG_CS || seg == VCPU_SREG_SS || seg == VCPU_SREG_TR) | ||
1325 | && null_selector) | ||
1326 | goto exception; | ||
1327 | |||
1328 | /* TR should be in GDT only */ | ||
1329 | if (seg == VCPU_SREG_TR && (selector & (1 << 2))) | ||
1330 | goto exception; | ||
1331 | |||
1332 | if (null_selector) /* for NULL selector skip all following checks */ | ||
1333 | goto load; | ||
1334 | |||
1335 | ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); | ||
1336 | if (ret != X86EMUL_CONTINUE) | ||
1337 | return ret; | ||
1338 | |||
1339 | err_code = selector & 0xfffc; | ||
1340 | err_vec = GP_VECTOR; | ||
1341 | |||
1342 | /* can't load system descriptor into segment selecor */ | ||
1343 | if (seg <= VCPU_SREG_GS && !seg_desc.s) | ||
1344 | goto exception; | ||
1345 | |||
1346 | if (!seg_desc.p) { | ||
1347 | err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; | ||
1348 | goto exception; | ||
1349 | } | ||
1350 | |||
1351 | rpl = selector & 3; | ||
1352 | dpl = seg_desc.dpl; | ||
1353 | cpl = ops->cpl(ctxt->vcpu); | ||
1354 | |||
1355 | switch (seg) { | ||
1356 | case VCPU_SREG_SS: | ||
1357 | /* | ||
1358 | * segment is not a writable data segment or segment | ||
1359 | * selector's RPL != CPL or segment selector's RPL != CPL | ||
1360 | */ | ||
1361 | if (rpl != cpl || (seg_desc.type & 0xa) != 0x2 || dpl != cpl) | ||
1362 | goto exception; | ||
1363 | break; | ||
1364 | case VCPU_SREG_CS: | ||
1365 | if (!(seg_desc.type & 8)) | ||
1366 | goto exception; | ||
1367 | |||
1368 | if (seg_desc.type & 4) { | ||
1369 | /* conforming */ | ||
1370 | if (dpl > cpl) | ||
1371 | goto exception; | ||
1372 | } else { | ||
1373 | /* nonconforming */ | ||
1374 | if (rpl > cpl || dpl != cpl) | ||
1375 | goto exception; | ||
1376 | } | ||
1377 | /* CS(RPL) <- CPL */ | ||
1378 | selector = (selector & 0xfffc) | cpl; | ||
1379 | break; | ||
1380 | case VCPU_SREG_TR: | ||
1381 | if (seg_desc.s || (seg_desc.type != 1 && seg_desc.type != 9)) | ||
1382 | goto exception; | ||
1383 | break; | ||
1384 | case VCPU_SREG_LDTR: | ||
1385 | if (seg_desc.s || seg_desc.type != 2) | ||
1386 | goto exception; | ||
1387 | break; | ||
1388 | default: /* DS, ES, FS, or GS */ | ||
1389 | /* | ||
1390 | * segment is not a data or readable code segment or | ||
1391 | * ((segment is a data or nonconforming code segment) | ||
1392 | * and (both RPL and CPL > DPL)) | ||
1393 | */ | ||
1394 | if ((seg_desc.type & 0xa) == 0x8 || | ||
1395 | (((seg_desc.type & 0xc) != 0xc) && | ||
1396 | (rpl > dpl && cpl > dpl))) | ||
1397 | goto exception; | ||
1398 | break; | ||
1399 | } | ||
1400 | |||
1401 | if (seg_desc.s) { | ||
1402 | /* mark segment as accessed */ | ||
1403 | seg_desc.type |= 1; | ||
1404 | ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); | ||
1405 | if (ret != X86EMUL_CONTINUE) | ||
1406 | return ret; | ||
1407 | } | ||
1408 | load: | ||
1409 | ops->set_segment_selector(selector, seg, ctxt->vcpu); | ||
1410 | ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); | ||
1411 | return X86EMUL_CONTINUE; | ||
1412 | exception: | ||
1413 | kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code); | ||
1414 | return X86EMUL_PROPAGATE_FAULT; | ||
1415 | } | ||
1416 | |||
1224 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt) | 1417 | static inline void emulate_push(struct x86_emulate_ctxt *ctxt) |
1225 | { | 1418 | { |
1226 | struct decode_cache *c = &ctxt->decode; | 1419 | struct decode_cache *c = &ctxt->decode; |
@@ -1812,6 +2005,376 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, | |||
1812 | return true; | 2005 | return true; |
1813 | } | 2006 | } |
1814 | 2007 | ||
2008 | static u32 get_cached_descriptor_base(struct x86_emulate_ctxt *ctxt, | ||
2009 | struct x86_emulate_ops *ops, | ||
2010 | int seg) | ||
2011 | { | ||
2012 | struct desc_struct desc; | ||
2013 | if (ops->get_cached_descriptor(&desc, seg, ctxt->vcpu)) | ||
2014 | return get_desc_base(&desc); | ||
2015 | else | ||
2016 | return ~0; | ||
2017 | } | ||
2018 | |||
2019 | static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, | ||
2020 | struct x86_emulate_ops *ops, | ||
2021 | struct tss_segment_16 *tss) | ||
2022 | { | ||
2023 | struct decode_cache *c = &ctxt->decode; | ||
2024 | |||
2025 | tss->ip = c->eip; | ||
2026 | tss->flag = ctxt->eflags; | ||
2027 | tss->ax = c->regs[VCPU_REGS_RAX]; | ||
2028 | tss->cx = c->regs[VCPU_REGS_RCX]; | ||
2029 | tss->dx = c->regs[VCPU_REGS_RDX]; | ||
2030 | tss->bx = c->regs[VCPU_REGS_RBX]; | ||
2031 | tss->sp = c->regs[VCPU_REGS_RSP]; | ||
2032 | tss->bp = c->regs[VCPU_REGS_RBP]; | ||
2033 | tss->si = c->regs[VCPU_REGS_RSI]; | ||
2034 | tss->di = c->regs[VCPU_REGS_RDI]; | ||
2035 | |||
2036 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | ||
2037 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | ||
2038 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | ||
2039 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | ||
2040 | tss->ldt = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | ||
2041 | } | ||
2042 | |||
2043 | static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | ||
2044 | struct x86_emulate_ops *ops, | ||
2045 | struct tss_segment_16 *tss) | ||
2046 | { | ||
2047 | struct decode_cache *c = &ctxt->decode; | ||
2048 | int ret; | ||
2049 | |||
2050 | c->eip = tss->ip; | ||
2051 | ctxt->eflags = tss->flag | 2; | ||
2052 | c->regs[VCPU_REGS_RAX] = tss->ax; | ||
2053 | c->regs[VCPU_REGS_RCX] = tss->cx; | ||
2054 | c->regs[VCPU_REGS_RDX] = tss->dx; | ||
2055 | c->regs[VCPU_REGS_RBX] = tss->bx; | ||
2056 | c->regs[VCPU_REGS_RSP] = tss->sp; | ||
2057 | c->regs[VCPU_REGS_RBP] = tss->bp; | ||
2058 | c->regs[VCPU_REGS_RSI] = tss->si; | ||
2059 | c->regs[VCPU_REGS_RDI] = tss->di; | ||
2060 | |||
2061 | /* | ||
2062 | * SDM says that segment selectors are loaded before segment | ||
2063 | * descriptors | ||
2064 | */ | ||
2065 | ops->set_segment_selector(tss->ldt, VCPU_SREG_LDTR, ctxt->vcpu); | ||
2066 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | ||
2067 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | ||
2068 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2069 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | ||
2070 | |||
2071 | /* | ||
2072 | * Now load segment descriptors. If fault happenes at this stage | ||
2073 | * it is handled in a context of new task | ||
2074 | */ | ||
2075 | ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR); | ||
2076 | if (ret != X86EMUL_CONTINUE) | ||
2077 | return ret; | ||
2078 | ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); | ||
2079 | if (ret != X86EMUL_CONTINUE) | ||
2080 | return ret; | ||
2081 | ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); | ||
2082 | if (ret != X86EMUL_CONTINUE) | ||
2083 | return ret; | ||
2084 | ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); | ||
2085 | if (ret != X86EMUL_CONTINUE) | ||
2086 | return ret; | ||
2087 | ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); | ||
2088 | if (ret != X86EMUL_CONTINUE) | ||
2089 | return ret; | ||
2090 | |||
2091 | return X86EMUL_CONTINUE; | ||
2092 | } | ||
2093 | |||
2094 | static int task_switch_16(struct x86_emulate_ctxt *ctxt, | ||
2095 | struct x86_emulate_ops *ops, | ||
2096 | u16 tss_selector, u16 old_tss_sel, | ||
2097 | ulong old_tss_base, struct desc_struct *new_desc) | ||
2098 | { | ||
2099 | struct tss_segment_16 tss_seg; | ||
2100 | int ret; | ||
2101 | u32 err, new_tss_base = get_desc_base(new_desc); | ||
2102 | |||
2103 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2104 | &err); | ||
2105 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2106 | /* FIXME: need to provide precise fault address */ | ||
2107 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2108 | return ret; | ||
2109 | } | ||
2110 | |||
2111 | save_state_to_tss16(ctxt, ops, &tss_seg); | ||
2112 | |||
2113 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2114 | &err); | ||
2115 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2116 | /* FIXME: need to provide precise fault address */ | ||
2117 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2118 | return ret; | ||
2119 | } | ||
2120 | |||
2121 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2122 | &err); | ||
2123 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2124 | /* FIXME: need to provide precise fault address */ | ||
2125 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2126 | return ret; | ||
2127 | } | ||
2128 | |||
2129 | if (old_tss_sel != 0xffff) { | ||
2130 | tss_seg.prev_task_link = old_tss_sel; | ||
2131 | |||
2132 | ret = ops->write_std(new_tss_base, | ||
2133 | &tss_seg.prev_task_link, | ||
2134 | sizeof tss_seg.prev_task_link, | ||
2135 | ctxt->vcpu, &err); | ||
2136 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2137 | /* FIXME: need to provide precise fault address */ | ||
2138 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2139 | return ret; | ||
2140 | } | ||
2141 | } | ||
2142 | |||
2143 | return load_state_from_tss16(ctxt, ops, &tss_seg); | ||
2144 | } | ||
2145 | |||
2146 | static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, | ||
2147 | struct x86_emulate_ops *ops, | ||
2148 | struct tss_segment_32 *tss) | ||
2149 | { | ||
2150 | struct decode_cache *c = &ctxt->decode; | ||
2151 | |||
2152 | tss->cr3 = ops->get_cr(3, ctxt->vcpu); | ||
2153 | tss->eip = c->eip; | ||
2154 | tss->eflags = ctxt->eflags; | ||
2155 | tss->eax = c->regs[VCPU_REGS_RAX]; | ||
2156 | tss->ecx = c->regs[VCPU_REGS_RCX]; | ||
2157 | tss->edx = c->regs[VCPU_REGS_RDX]; | ||
2158 | tss->ebx = c->regs[VCPU_REGS_RBX]; | ||
2159 | tss->esp = c->regs[VCPU_REGS_RSP]; | ||
2160 | tss->ebp = c->regs[VCPU_REGS_RBP]; | ||
2161 | tss->esi = c->regs[VCPU_REGS_RSI]; | ||
2162 | tss->edi = c->regs[VCPU_REGS_RDI]; | ||
2163 | |||
2164 | tss->es = ops->get_segment_selector(VCPU_SREG_ES, ctxt->vcpu); | ||
2165 | tss->cs = ops->get_segment_selector(VCPU_SREG_CS, ctxt->vcpu); | ||
2166 | tss->ss = ops->get_segment_selector(VCPU_SREG_SS, ctxt->vcpu); | ||
2167 | tss->ds = ops->get_segment_selector(VCPU_SREG_DS, ctxt->vcpu); | ||
2168 | tss->fs = ops->get_segment_selector(VCPU_SREG_FS, ctxt->vcpu); | ||
2169 | tss->gs = ops->get_segment_selector(VCPU_SREG_GS, ctxt->vcpu); | ||
2170 | tss->ldt_selector = ops->get_segment_selector(VCPU_SREG_LDTR, ctxt->vcpu); | ||
2171 | } | ||
2172 | |||
2173 | static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | ||
2174 | struct x86_emulate_ops *ops, | ||
2175 | struct tss_segment_32 *tss) | ||
2176 | { | ||
2177 | struct decode_cache *c = &ctxt->decode; | ||
2178 | int ret; | ||
2179 | |||
2180 | ops->set_cr(3, tss->cr3, ctxt->vcpu); | ||
2181 | c->eip = tss->eip; | ||
2182 | ctxt->eflags = tss->eflags | 2; | ||
2183 | c->regs[VCPU_REGS_RAX] = tss->eax; | ||
2184 | c->regs[VCPU_REGS_RCX] = tss->ecx; | ||
2185 | c->regs[VCPU_REGS_RDX] = tss->edx; | ||
2186 | c->regs[VCPU_REGS_RBX] = tss->ebx; | ||
2187 | c->regs[VCPU_REGS_RSP] = tss->esp; | ||
2188 | c->regs[VCPU_REGS_RBP] = tss->ebp; | ||
2189 | c->regs[VCPU_REGS_RSI] = tss->esi; | ||
2190 | c->regs[VCPU_REGS_RDI] = tss->edi; | ||
2191 | |||
2192 | /* | ||
2193 | * SDM says that segment selectors are loaded before segment | ||
2194 | * descriptors | ||
2195 | */ | ||
2196 | ops->set_segment_selector(tss->ldt_selector, VCPU_SREG_LDTR, ctxt->vcpu); | ||
2197 | ops->set_segment_selector(tss->es, VCPU_SREG_ES, ctxt->vcpu); | ||
2198 | ops->set_segment_selector(tss->cs, VCPU_SREG_CS, ctxt->vcpu); | ||
2199 | ops->set_segment_selector(tss->ss, VCPU_SREG_SS, ctxt->vcpu); | ||
2200 | ops->set_segment_selector(tss->ds, VCPU_SREG_DS, ctxt->vcpu); | ||
2201 | ops->set_segment_selector(tss->fs, VCPU_SREG_FS, ctxt->vcpu); | ||
2202 | ops->set_segment_selector(tss->gs, VCPU_SREG_GS, ctxt->vcpu); | ||
2203 | |||
2204 | /* | ||
2205 | * Now load segment descriptors. If fault happenes at this stage | ||
2206 | * it is handled in a context of new task | ||
2207 | */ | ||
2208 | ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR); | ||
2209 | if (ret != X86EMUL_CONTINUE) | ||
2210 | return ret; | ||
2211 | ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); | ||
2212 | if (ret != X86EMUL_CONTINUE) | ||
2213 | return ret; | ||
2214 | ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); | ||
2215 | if (ret != X86EMUL_CONTINUE) | ||
2216 | return ret; | ||
2217 | ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); | ||
2218 | if (ret != X86EMUL_CONTINUE) | ||
2219 | return ret; | ||
2220 | ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); | ||
2221 | if (ret != X86EMUL_CONTINUE) | ||
2222 | return ret; | ||
2223 | ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS); | ||
2224 | if (ret != X86EMUL_CONTINUE) | ||
2225 | return ret; | ||
2226 | ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS); | ||
2227 | if (ret != X86EMUL_CONTINUE) | ||
2228 | return ret; | ||
2229 | |||
2230 | return X86EMUL_CONTINUE; | ||
2231 | } | ||
2232 | |||
2233 | static int task_switch_32(struct x86_emulate_ctxt *ctxt, | ||
2234 | struct x86_emulate_ops *ops, | ||
2235 | u16 tss_selector, u16 old_tss_sel, | ||
2236 | ulong old_tss_base, struct desc_struct *new_desc) | ||
2237 | { | ||
2238 | struct tss_segment_32 tss_seg; | ||
2239 | int ret; | ||
2240 | u32 err, new_tss_base = get_desc_base(new_desc); | ||
2241 | |||
2242 | ret = ops->read_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2243 | &err); | ||
2244 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2245 | /* FIXME: need to provide precise fault address */ | ||
2246 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2247 | return ret; | ||
2248 | } | ||
2249 | |||
2250 | save_state_to_tss32(ctxt, ops, &tss_seg); | ||
2251 | |||
2252 | ret = ops->write_std(old_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2253 | &err); | ||
2254 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2255 | /* FIXME: need to provide precise fault address */ | ||
2256 | kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); | ||
2257 | return ret; | ||
2258 | } | ||
2259 | |||
2260 | ret = ops->read_std(new_tss_base, &tss_seg, sizeof tss_seg, ctxt->vcpu, | ||
2261 | &err); | ||
2262 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2263 | /* FIXME: need to provide precise fault address */ | ||
2264 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2265 | return ret; | ||
2266 | } | ||
2267 | |||
2268 | if (old_tss_sel != 0xffff) { | ||
2269 | tss_seg.prev_task_link = old_tss_sel; | ||
2270 | |||
2271 | ret = ops->write_std(new_tss_base, | ||
2272 | &tss_seg.prev_task_link, | ||
2273 | sizeof tss_seg.prev_task_link, | ||
2274 | ctxt->vcpu, &err); | ||
2275 | if (ret == X86EMUL_PROPAGATE_FAULT) { | ||
2276 | /* FIXME: need to provide precise fault address */ | ||
2277 | kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); | ||
2278 | return ret; | ||
2279 | } | ||
2280 | } | ||
2281 | |||
2282 | return load_state_from_tss32(ctxt, ops, &tss_seg); | ||
2283 | } | ||
2284 | |||
2285 | static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, | ||
2286 | struct x86_emulate_ops *ops, | ||
2287 | u16 tss_selector, int reason) | ||
2288 | { | ||
2289 | struct desc_struct curr_tss_desc, next_tss_desc; | ||
2290 | int ret; | ||
2291 | u16 old_tss_sel = ops->get_segment_selector(VCPU_SREG_TR, ctxt->vcpu); | ||
2292 | ulong old_tss_base = | ||
2293 | get_cached_descriptor_base(ctxt, ops, VCPU_SREG_TR); | ||
2294 | |||
2295 | /* FIXME: old_tss_base == ~0 ? */ | ||
2296 | |||
2297 | ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc); | ||
2298 | if (ret != X86EMUL_CONTINUE) | ||
2299 | return ret; | ||
2300 | ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc); | ||
2301 | if (ret != X86EMUL_CONTINUE) | ||
2302 | return ret; | ||
2303 | |||
2304 | /* FIXME: check that next_tss_desc is tss */ | ||
2305 | |||
2306 | if (reason != TASK_SWITCH_IRET) { | ||
2307 | if ((tss_selector & 3) > next_tss_desc.dpl || | ||
2308 | ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { | ||
2309 | kvm_inject_gp(ctxt->vcpu, 0); | ||
2310 | return X86EMUL_PROPAGATE_FAULT; | ||
2311 | } | ||
2312 | } | ||
2313 | |||
2314 | if (!next_tss_desc.p || desc_limit_scaled(&next_tss_desc) < 0x67) { | ||
2315 | kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR, | ||
2316 | tss_selector & 0xfffc); | ||
2317 | return X86EMUL_PROPAGATE_FAULT; | ||
2318 | } | ||
2319 | |||
2320 | if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { | ||
2321 | curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ | ||
2322 | write_segment_descriptor(ctxt, ops, old_tss_sel, | ||
2323 | &curr_tss_desc); | ||
2324 | } | ||
2325 | |||
2326 | if (reason == TASK_SWITCH_IRET) | ||
2327 | ctxt->eflags = ctxt->eflags & ~X86_EFLAGS_NT; | ||
2328 | |||
2329 | /* set back link to prev task only if NT bit is set in eflags | ||
2330 | note that old_tss_sel is not used afetr this point */ | ||
2331 | if (reason != TASK_SWITCH_CALL && reason != TASK_SWITCH_GATE) | ||
2332 | old_tss_sel = 0xffff; | ||
2333 | |||
2334 | if (next_tss_desc.type & 8) | ||
2335 | ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel, | ||
2336 | old_tss_base, &next_tss_desc); | ||
2337 | else | ||
2338 | ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel, | ||
2339 | old_tss_base, &next_tss_desc); | ||
2340 | |||
2341 | if (reason == TASK_SWITCH_CALL || reason == TASK_SWITCH_GATE) | ||
2342 | ctxt->eflags = ctxt->eflags | X86_EFLAGS_NT; | ||
2343 | |||
2344 | if (reason != TASK_SWITCH_IRET) { | ||
2345 | next_tss_desc.type |= (1 << 1); /* set busy flag */ | ||
2346 | write_segment_descriptor(ctxt, ops, tss_selector, | ||
2347 | &next_tss_desc); | ||
2348 | } | ||
2349 | |||
2350 | ops->set_cr(0, ops->get_cr(0, ctxt->vcpu) | X86_CR0_TS, ctxt->vcpu); | ||
2351 | ops->set_cached_descriptor(&next_tss_desc, VCPU_SREG_TR, ctxt->vcpu); | ||
2352 | ops->set_segment_selector(tss_selector, VCPU_SREG_TR, ctxt->vcpu); | ||
2353 | |||
2354 | return ret; | ||
2355 | } | ||
2356 | |||
2357 | int emulator_task_switch(struct x86_emulate_ctxt *ctxt, | ||
2358 | struct x86_emulate_ops *ops, | ||
2359 | u16 tss_selector, int reason) | ||
2360 | { | ||
2361 | struct decode_cache *c = &ctxt->decode; | ||
2362 | int rc; | ||
2363 | |||
2364 | memset(c, 0, sizeof(struct decode_cache)); | ||
2365 | c->eip = ctxt->eip; | ||
2366 | memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); | ||
2367 | |||
2368 | rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason); | ||
2369 | |||
2370 | if (rc == X86EMUL_CONTINUE) { | ||
2371 | memcpy(ctxt->vcpu->arch.regs, c->regs, sizeof c->regs); | ||
2372 | kvm_rip_write(ctxt->vcpu, c->eip); | ||
2373 | } | ||
2374 | |||
2375 | return rc; | ||
2376 | } | ||
2377 | |||
1815 | int | 2378 | int |
1816 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) | 2379 | x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) |
1817 | { | 2380 | { |