aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-04-28 12:15:44 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:37 -0400
commit54b8486f469475d6c8e8aec917b91239a54eb8c8 (patch)
treea619826dc8e4915fcf308f3514a43615345e077a /arch
parent95cb229530f329ec8002274891793be9c91385f7 (diff)
KVM: x86 emulator: do not inject exception directly into vcpu
Return exception as a result of instruction emulation and handle injection in KVM code. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h6
-rw-r--r--arch/x86/kvm/emulate.c124
-rw-r--r--arch/x86/kvm/x86.c20
3 files changed, 100 insertions, 50 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index a87d95f09572..51cfd730ac5d 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -216,6 +216,12 @@ struct x86_emulate_ctxt {
216 int interruptibility; 216 int interruptibility;
217 217
218 bool restart; /* restart string instruction after writeback */ 218 bool restart; /* restart string instruction after writeback */
219
220 int exception; /* exception that happens during emulation or -1 */
221 u32 error_code; /* error code for exception */
222 bool error_code_valid;
223 unsigned long cr2; /* faulted address in case of #PF */
224
219 /* decode cache */ 225 /* decode cache */
220 struct decode_cache decode; 226 struct decode_cache decode;
221}; 227};
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index c40b40561dff..b43ac98ef790 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -653,6 +653,37 @@ static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
653 return seg_base(ctxt, ops, VCPU_SREG_SS); 653 return seg_base(ctxt, ops, VCPU_SREG_SS);
654} 654}
655 655
656static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
657 u32 error, bool valid)
658{
659 ctxt->exception = vec;
660 ctxt->error_code = error;
661 ctxt->error_code_valid = valid;
662 ctxt->restart = false;
663}
664
665static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
666{
667 emulate_exception(ctxt, GP_VECTOR, err, true);
668}
669
670static void emulate_pf(struct x86_emulate_ctxt *ctxt, unsigned long addr,
671 int err)
672{
673 ctxt->cr2 = addr;
674 emulate_exception(ctxt, PF_VECTOR, err, true);
675}
676
677static void emulate_ud(struct x86_emulate_ctxt *ctxt)
678{
679 emulate_exception(ctxt, UD_VECTOR, 0, false);
680}
681
682static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
683{
684 emulate_exception(ctxt, TS_VECTOR, err, true);
685}
686
656static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 687static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
657 struct x86_emulate_ops *ops, 688 struct x86_emulate_ops *ops,
658 unsigned long eip, u8 *dest) 689 unsigned long eip, u8 *dest)
@@ -1285,7 +1316,7 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
1285 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err, 1316 rc = ops->read_emulated(addr, mc->data + mc->end, n, &err,
1286 ctxt->vcpu); 1317 ctxt->vcpu);
1287 if (rc == X86EMUL_PROPAGATE_FAULT) 1318 if (rc == X86EMUL_PROPAGATE_FAULT)
1288 kvm_inject_page_fault(ctxt->vcpu, addr, err); 1319 emulate_pf(ctxt, addr, err);
1289 if (rc != X86EMUL_CONTINUE) 1320 if (rc != X86EMUL_CONTINUE)
1290 return rc; 1321 return rc;
1291 mc->end += n; 1322 mc->end += n;
@@ -1366,13 +1397,13 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1366 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 1397 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1367 1398
1368 if (dt.size < index * 8 + 7) { 1399 if (dt.size < index * 8 + 7) {
1369 kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); 1400 emulate_gp(ctxt, selector & 0xfffc);
1370 return X86EMUL_PROPAGATE_FAULT; 1401 return X86EMUL_PROPAGATE_FAULT;
1371 } 1402 }
1372 addr = dt.address + index * 8; 1403 addr = dt.address + index * 8;
1373 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 1404 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1374 if (ret == X86EMUL_PROPAGATE_FAULT) 1405 if (ret == X86EMUL_PROPAGATE_FAULT)
1375 kvm_inject_page_fault(ctxt->vcpu, addr, err); 1406 emulate_pf(ctxt, addr, err);
1376 1407
1377 return ret; 1408 return ret;
1378} 1409}
@@ -1391,14 +1422,14 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1391 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 1422 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
1392 1423
1393 if (dt.size < index * 8 + 7) { 1424 if (dt.size < index * 8 + 7) {
1394 kvm_inject_gp(ctxt->vcpu, selector & 0xfffc); 1425 emulate_gp(ctxt, selector & 0xfffc);
1395 return X86EMUL_PROPAGATE_FAULT; 1426 return X86EMUL_PROPAGATE_FAULT;
1396 } 1427 }
1397 1428
1398 addr = dt.address + index * 8; 1429 addr = dt.address + index * 8;
1399 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err); 1430 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, &err);
1400 if (ret == X86EMUL_PROPAGATE_FAULT) 1431 if (ret == X86EMUL_PROPAGATE_FAULT)
1401 kvm_inject_page_fault(ctxt->vcpu, addr, err); 1432 emulate_pf(ctxt, addr, err);
1402 1433
1403 return ret; 1434 return ret;
1404} 1435}
@@ -1517,7 +1548,7 @@ load:
1517 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu); 1548 ops->set_cached_descriptor(&seg_desc, seg, ctxt->vcpu);
1518 return X86EMUL_CONTINUE; 1549 return X86EMUL_CONTINUE;
1519exception: 1550exception:
1520 kvm_queue_exception_e(ctxt->vcpu, err_vec, err_code); 1551 emulate_exception(ctxt, err_vec, err_code, true);
1521 return X86EMUL_PROPAGATE_FAULT; 1552 return X86EMUL_PROPAGATE_FAULT;
1522} 1553}
1523 1554
@@ -1578,7 +1609,7 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1578 break; 1609 break;
1579 case X86EMUL_MODE_VM86: 1610 case X86EMUL_MODE_VM86:
1580 if (iopl < 3) { 1611 if (iopl < 3) {
1581 kvm_inject_gp(ctxt->vcpu, 0); 1612 emulate_gp(ctxt, 0);
1582 return X86EMUL_PROPAGATE_FAULT; 1613 return X86EMUL_PROPAGATE_FAULT;
1583 } 1614 }
1584 change_mask |= EFLG_IF; 1615 change_mask |= EFLG_IF;
@@ -1829,7 +1860,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1829 &err, 1860 &err,
1830 ctxt->vcpu); 1861 ctxt->vcpu);
1831 if (rc == X86EMUL_PROPAGATE_FAULT) 1862 if (rc == X86EMUL_PROPAGATE_FAULT)
1832 kvm_inject_page_fault(ctxt->vcpu, 1863 emulate_pf(ctxt,
1833 (unsigned long)c->dst.ptr, err); 1864 (unsigned long)c->dst.ptr, err);
1834 if (rc != X86EMUL_CONTINUE) 1865 if (rc != X86EMUL_CONTINUE)
1835 return rc; 1866 return rc;
@@ -1883,7 +1914,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1883 /* syscall is not available in real mode */ 1914 /* syscall is not available in real mode */
1884 if (ctxt->mode == X86EMUL_MODE_REAL || 1915 if (ctxt->mode == X86EMUL_MODE_REAL ||
1885 ctxt->mode == X86EMUL_MODE_VM86) { 1916 ctxt->mode == X86EMUL_MODE_VM86) {
1886 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 1917 emulate_ud(ctxt);
1887 return X86EMUL_PROPAGATE_FAULT; 1918 return X86EMUL_PROPAGATE_FAULT;
1888 } 1919 }
1889 1920
@@ -1937,7 +1968,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1937 1968
1938 /* inject #GP if in real mode */ 1969 /* inject #GP if in real mode */
1939 if (ctxt->mode == X86EMUL_MODE_REAL) { 1970 if (ctxt->mode == X86EMUL_MODE_REAL) {
1940 kvm_inject_gp(ctxt->vcpu, 0); 1971 emulate_gp(ctxt, 0);
1941 return X86EMUL_PROPAGATE_FAULT; 1972 return X86EMUL_PROPAGATE_FAULT;
1942 } 1973 }
1943 1974
@@ -1945,7 +1976,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1945 * Therefore, we inject an #UD. 1976 * Therefore, we inject an #UD.
1946 */ 1977 */
1947 if (ctxt->mode == X86EMUL_MODE_PROT64) { 1978 if (ctxt->mode == X86EMUL_MODE_PROT64) {
1948 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 1979 emulate_ud(ctxt);
1949 return X86EMUL_PROPAGATE_FAULT; 1980 return X86EMUL_PROPAGATE_FAULT;
1950 } 1981 }
1951 1982
@@ -1955,13 +1986,13 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1955 switch (ctxt->mode) { 1986 switch (ctxt->mode) {
1956 case X86EMUL_MODE_PROT32: 1987 case X86EMUL_MODE_PROT32:
1957 if ((msr_data & 0xfffc) == 0x0) { 1988 if ((msr_data & 0xfffc) == 0x0) {
1958 kvm_inject_gp(ctxt->vcpu, 0); 1989 emulate_gp(ctxt, 0);
1959 return X86EMUL_PROPAGATE_FAULT; 1990 return X86EMUL_PROPAGATE_FAULT;
1960 } 1991 }
1961 break; 1992 break;
1962 case X86EMUL_MODE_PROT64: 1993 case X86EMUL_MODE_PROT64:
1963 if (msr_data == 0x0) { 1994 if (msr_data == 0x0) {
1964 kvm_inject_gp(ctxt->vcpu, 0); 1995 emulate_gp(ctxt, 0);
1965 return X86EMUL_PROPAGATE_FAULT; 1996 return X86EMUL_PROPAGATE_FAULT;
1966 } 1997 }
1967 break; 1998 break;
@@ -2004,7 +2035,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2004 /* inject #GP if in real mode or Virtual 8086 mode */ 2035 /* inject #GP if in real mode or Virtual 8086 mode */
2005 if (ctxt->mode == X86EMUL_MODE_REAL || 2036 if (ctxt->mode == X86EMUL_MODE_REAL ||
2006 ctxt->mode == X86EMUL_MODE_VM86) { 2037 ctxt->mode == X86EMUL_MODE_VM86) {
2007 kvm_inject_gp(ctxt->vcpu, 0); 2038 emulate_gp(ctxt, 0);
2008 return X86EMUL_PROPAGATE_FAULT; 2039 return X86EMUL_PROPAGATE_FAULT;
2009 } 2040 }
2010 2041
@@ -2022,7 +2053,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2022 case X86EMUL_MODE_PROT32: 2053 case X86EMUL_MODE_PROT32:
2023 cs_sel = (u16)(msr_data + 16); 2054 cs_sel = (u16)(msr_data + 16);
2024 if ((msr_data & 0xfffc) == 0x0) { 2055 if ((msr_data & 0xfffc) == 0x0) {
2025 kvm_inject_gp(ctxt->vcpu, 0); 2056 emulate_gp(ctxt, 0);
2026 return X86EMUL_PROPAGATE_FAULT; 2057 return X86EMUL_PROPAGATE_FAULT;
2027 } 2058 }
2028 ss_sel = (u16)(msr_data + 24); 2059 ss_sel = (u16)(msr_data + 24);
@@ -2030,7 +2061,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2030 case X86EMUL_MODE_PROT64: 2061 case X86EMUL_MODE_PROT64:
2031 cs_sel = (u16)(msr_data + 32); 2062 cs_sel = (u16)(msr_data + 32);
2032 if (msr_data == 0x0) { 2063 if (msr_data == 0x0) {
2033 kvm_inject_gp(ctxt->vcpu, 0); 2064 emulate_gp(ctxt, 0);
2034 return X86EMUL_PROPAGATE_FAULT; 2065 return X86EMUL_PROPAGATE_FAULT;
2035 } 2066 }
2036 ss_sel = cs_sel + 8; 2067 ss_sel = cs_sel + 8;
@@ -2192,7 +2223,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2192 &err); 2223 &err);
2193 if (ret == X86EMUL_PROPAGATE_FAULT) { 2224 if (ret == X86EMUL_PROPAGATE_FAULT) {
2194 /* FIXME: need to provide precise fault address */ 2225 /* FIXME: need to provide precise fault address */
2195 kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); 2226 emulate_pf(ctxt, old_tss_base, err);
2196 return ret; 2227 return ret;
2197 } 2228 }
2198 2229
@@ -2202,7 +2233,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2202 &err); 2233 &err);
2203 if (ret == X86EMUL_PROPAGATE_FAULT) { 2234 if (ret == X86EMUL_PROPAGATE_FAULT) {
2204 /* FIXME: need to provide precise fault address */ 2235 /* FIXME: need to provide precise fault address */
2205 kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); 2236 emulate_pf(ctxt, old_tss_base, err);
2206 return ret; 2237 return ret;
2207 } 2238 }
2208 2239
@@ -2210,7 +2241,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2210 &err); 2241 &err);
2211 if (ret == X86EMUL_PROPAGATE_FAULT) { 2242 if (ret == X86EMUL_PROPAGATE_FAULT) {
2212 /* FIXME: need to provide precise fault address */ 2243 /* FIXME: need to provide precise fault address */
2213 kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); 2244 emulate_pf(ctxt, new_tss_base, err);
2214 return ret; 2245 return ret;
2215 } 2246 }
2216 2247
@@ -2223,7 +2254,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2223 ctxt->vcpu, &err); 2254 ctxt->vcpu, &err);
2224 if (ret == X86EMUL_PROPAGATE_FAULT) { 2255 if (ret == X86EMUL_PROPAGATE_FAULT) {
2225 /* FIXME: need to provide precise fault address */ 2256 /* FIXME: need to provide precise fault address */
2226 kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); 2257 emulate_pf(ctxt, new_tss_base, err);
2227 return ret; 2258 return ret;
2228 } 2259 }
2229 } 2260 }
@@ -2266,7 +2297,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2266 int ret; 2297 int ret;
2267 2298
2268 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { 2299 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) {
2269 kvm_inject_gp(ctxt->vcpu, 0); 2300 emulate_gp(ctxt, 0);
2270 return X86EMUL_PROPAGATE_FAULT; 2301 return X86EMUL_PROPAGATE_FAULT;
2271 } 2302 }
2272 c->eip = tss->eip; 2303 c->eip = tss->eip;
@@ -2334,7 +2365,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2334 &err); 2365 &err);
2335 if (ret == X86EMUL_PROPAGATE_FAULT) { 2366 if (ret == X86EMUL_PROPAGATE_FAULT) {
2336 /* FIXME: need to provide precise fault address */ 2367 /* FIXME: need to provide precise fault address */
2337 kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); 2368 emulate_pf(ctxt, old_tss_base, err);
2338 return ret; 2369 return ret;
2339 } 2370 }
2340 2371
@@ -2344,7 +2375,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2344 &err); 2375 &err);
2345 if (ret == X86EMUL_PROPAGATE_FAULT) { 2376 if (ret == X86EMUL_PROPAGATE_FAULT) {
2346 /* FIXME: need to provide precise fault address */ 2377 /* FIXME: need to provide precise fault address */
2347 kvm_inject_page_fault(ctxt->vcpu, old_tss_base, err); 2378 emulate_pf(ctxt, old_tss_base, err);
2348 return ret; 2379 return ret;
2349 } 2380 }
2350 2381
@@ -2352,7 +2383,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2352 &err); 2383 &err);
2353 if (ret == X86EMUL_PROPAGATE_FAULT) { 2384 if (ret == X86EMUL_PROPAGATE_FAULT) {
2354 /* FIXME: need to provide precise fault address */ 2385 /* FIXME: need to provide precise fault address */
2355 kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); 2386 emulate_pf(ctxt, new_tss_base, err);
2356 return ret; 2387 return ret;
2357 } 2388 }
2358 2389
@@ -2365,7 +2396,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2365 ctxt->vcpu, &err); 2396 ctxt->vcpu, &err);
2366 if (ret == X86EMUL_PROPAGATE_FAULT) { 2397 if (ret == X86EMUL_PROPAGATE_FAULT) {
2367 /* FIXME: need to provide precise fault address */ 2398 /* FIXME: need to provide precise fault address */
2368 kvm_inject_page_fault(ctxt->vcpu, new_tss_base, err); 2399 emulate_pf(ctxt, new_tss_base, err);
2369 return ret; 2400 return ret;
2370 } 2401 }
2371 } 2402 }
@@ -2399,7 +2430,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2399 if (reason != TASK_SWITCH_IRET) { 2430 if (reason != TASK_SWITCH_IRET) {
2400 if ((tss_selector & 3) > next_tss_desc.dpl || 2431 if ((tss_selector & 3) > next_tss_desc.dpl ||
2401 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { 2432 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) {
2402 kvm_inject_gp(ctxt->vcpu, 0); 2433 emulate_gp(ctxt, 0);
2403 return X86EMUL_PROPAGATE_FAULT; 2434 return X86EMUL_PROPAGATE_FAULT;
2404 } 2435 }
2405 } 2436 }
@@ -2408,8 +2439,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2408 if (!next_tss_desc.p || 2439 if (!next_tss_desc.p ||
2409 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || 2440 ((desc_limit < 0x67 && (next_tss_desc.type & 8)) ||
2410 desc_limit < 0x2b)) { 2441 desc_limit < 0x2b)) {
2411 kvm_queue_exception_e(ctxt->vcpu, TS_VECTOR, 2442 emulate_ts(ctxt, tss_selector & 0xfffc);
2412 tss_selector & 0xfffc);
2413 return X86EMUL_PROPAGATE_FAULT; 2443 return X86EMUL_PROPAGATE_FAULT;
2414 } 2444 }
2415 2445
@@ -2505,19 +2535,19 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2505 ctxt->decode.mem_read.pos = 0; 2535 ctxt->decode.mem_read.pos = 0;
2506 2536
2507 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { 2537 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2508 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2538 emulate_ud(ctxt);
2509 goto done; 2539 goto done;
2510 } 2540 }
2511 2541
2512 /* LOCK prefix is allowed only with some instructions */ 2542 /* LOCK prefix is allowed only with some instructions */
2513 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { 2543 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2514 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2544 emulate_ud(ctxt);
2515 goto done; 2545 goto done;
2516 } 2546 }
2517 2547
2518 /* Privileged instruction can be executed only in CPL=0 */ 2548 /* Privileged instruction can be executed only in CPL=0 */
2519 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 2549 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
2520 kvm_inject_gp(ctxt->vcpu, 0); 2550 emulate_gp(ctxt, 0);
2521 goto done; 2551 goto done;
2522 } 2552 }
2523 2553
@@ -2679,7 +2709,7 @@ special_insn:
2679 c->dst.bytes = min(c->dst.bytes, 4u); 2709 c->dst.bytes = min(c->dst.bytes, 4u);
2680 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], 2710 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2681 c->dst.bytes)) { 2711 c->dst.bytes)) {
2682 kvm_inject_gp(ctxt->vcpu, 0); 2712 emulate_gp(ctxt, 0);
2683 goto done; 2713 goto done;
2684 } 2714 }
2685 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, 2715 if (!pio_in_emulated(ctxt, ops, c->dst.bytes,
@@ -2691,7 +2721,7 @@ special_insn:
2691 c->src.bytes = min(c->src.bytes, 4u); 2721 c->src.bytes = min(c->src.bytes, 4u);
2692 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX], 2722 if (!emulator_io_permited(ctxt, ops, c->regs[VCPU_REGS_RDX],
2693 c->src.bytes)) { 2723 c->src.bytes)) {
2694 kvm_inject_gp(ctxt->vcpu, 0); 2724 emulate_gp(ctxt, 0);
2695 goto done; 2725 goto done;
2696 } 2726 }
2697 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX], 2727 ops->pio_out_emulated(c->src.bytes, c->regs[VCPU_REGS_RDX],
@@ -2754,7 +2784,7 @@ special_insn:
2754 goto mov; 2784 goto mov;
2755 case 0x8c: /* mov r/m, sreg */ 2785 case 0x8c: /* mov r/m, sreg */
2756 if (c->modrm_reg > VCPU_SREG_GS) { 2786 if (c->modrm_reg > VCPU_SREG_GS) {
2757 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2787 emulate_ud(ctxt);
2758 goto done; 2788 goto done;
2759 } 2789 }
2760 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); 2790 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
@@ -2769,7 +2799,7 @@ special_insn:
2769 2799
2770 if (c->modrm_reg == VCPU_SREG_CS || 2800 if (c->modrm_reg == VCPU_SREG_CS ||
2771 c->modrm_reg > VCPU_SREG_GS) { 2801 c->modrm_reg > VCPU_SREG_GS) {
2772 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2802 emulate_ud(ctxt);
2773 goto done; 2803 goto done;
2774 } 2804 }
2775 2805
@@ -2895,7 +2925,7 @@ special_insn:
2895 do_io_in: 2925 do_io_in:
2896 c->dst.bytes = min(c->dst.bytes, 4u); 2926 c->dst.bytes = min(c->dst.bytes, 4u);
2897 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { 2927 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
2898 kvm_inject_gp(ctxt->vcpu, 0); 2928 emulate_gp(ctxt, 0);
2899 goto done; 2929 goto done;
2900 } 2930 }
2901 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 2931 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
@@ -2908,7 +2938,7 @@ special_insn:
2908 do_io_out: 2938 do_io_out:
2909 c->dst.bytes = min(c->dst.bytes, 4u); 2939 c->dst.bytes = min(c->dst.bytes, 4u);
2910 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { 2940 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
2911 kvm_inject_gp(ctxt->vcpu, 0); 2941 emulate_gp(ctxt, 0);
2912 goto done; 2942 goto done;
2913 } 2943 }
2914 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1, 2944 ops->pio_out_emulated(c->dst.bytes, c->src.val, &c->dst.val, 1,
@@ -2933,7 +2963,7 @@ special_insn:
2933 break; 2963 break;
2934 case 0xfa: /* cli */ 2964 case 0xfa: /* cli */
2935 if (emulator_bad_iopl(ctxt, ops)) 2965 if (emulator_bad_iopl(ctxt, ops))
2936 kvm_inject_gp(ctxt->vcpu, 0); 2966 emulate_gp(ctxt, 0);
2937 else { 2967 else {
2938 ctxt->eflags &= ~X86_EFLAGS_IF; 2968 ctxt->eflags &= ~X86_EFLAGS_IF;
2939 c->dst.type = OP_NONE; /* Disable writeback. */ 2969 c->dst.type = OP_NONE; /* Disable writeback. */
@@ -2941,7 +2971,7 @@ special_insn:
2941 break; 2971 break;
2942 case 0xfb: /* sti */ 2972 case 0xfb: /* sti */
2943 if (emulator_bad_iopl(ctxt, ops)) 2973 if (emulator_bad_iopl(ctxt, ops))
2944 kvm_inject_gp(ctxt->vcpu, 0); 2974 emulate_gp(ctxt, 0);
2945 else { 2975 else {
2946 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 2976 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;
2947 ctxt->eflags |= X86_EFLAGS_IF; 2977 ctxt->eflags |= X86_EFLAGS_IF;
@@ -3069,7 +3099,7 @@ twobyte_insn:
3069 c->dst.type = OP_NONE; 3099 c->dst.type = OP_NONE;
3070 break; 3100 break;
3071 case 5: /* not defined */ 3101 case 5: /* not defined */
3072 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 3102 emulate_ud(ctxt);
3073 goto done; 3103 goto done;
3074 case 7: /* invlpg*/ 3104 case 7: /* invlpg*/
3075 emulate_invlpg(ctxt->vcpu, c->modrm_ea); 3105 emulate_invlpg(ctxt->vcpu, c->modrm_ea);
@@ -3102,7 +3132,7 @@ twobyte_insn:
3102 case 1: 3132 case 1:
3103 case 5 ... 7: 3133 case 5 ... 7:
3104 case 9 ... 15: 3134 case 9 ... 15:
3105 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 3135 emulate_ud(ctxt);
3106 goto done; 3136 goto done;
3107 } 3137 }
3108 c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu); 3138 c->regs[c->modrm_rm] = ops->get_cr(c->modrm_reg, ctxt->vcpu);
@@ -3111,7 +3141,7 @@ twobyte_insn:
3111 case 0x21: /* mov from dr to reg */ 3141 case 0x21: /* mov from dr to reg */
3112 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && 3142 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3113 (c->modrm_reg == 4 || c->modrm_reg == 5)) { 3143 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3114 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 3144 emulate_ud(ctxt);
3115 goto done; 3145 goto done;
3116 } 3146 }
3117 ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu); 3147 ops->get_dr(c->modrm_reg, &c->regs[c->modrm_rm], ctxt->vcpu);
@@ -3119,7 +3149,7 @@ twobyte_insn:
3119 break; 3149 break;
3120 case 0x22: /* mov reg, cr */ 3150 case 0x22: /* mov reg, cr */
3121 if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) { 3151 if (ops->set_cr(c->modrm_reg, c->modrm_val, ctxt->vcpu)) {
3122 kvm_inject_gp(ctxt->vcpu, 0); 3152 emulate_gp(ctxt, 0);
3123 goto done; 3153 goto done;
3124 } 3154 }
3125 c->dst.type = OP_NONE; 3155 c->dst.type = OP_NONE;
@@ -3127,7 +3157,7 @@ twobyte_insn:
3127 case 0x23: /* mov from reg to dr */ 3157 case 0x23: /* mov from reg to dr */
3128 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) && 3158 if ((ops->get_cr(4, ctxt->vcpu) & X86_CR4_DE) &&
3129 (c->modrm_reg == 4 || c->modrm_reg == 5)) { 3159 (c->modrm_reg == 4 || c->modrm_reg == 5)) {
3130 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 3160 emulate_ud(ctxt);
3131 goto done; 3161 goto done;
3132 } 3162 }
3133 3163
@@ -3135,7 +3165,7 @@ twobyte_insn:
3135 ((ctxt->mode == X86EMUL_MODE_PROT64) ? 3165 ((ctxt->mode == X86EMUL_MODE_PROT64) ?
3136 ~0ULL : ~0U), ctxt->vcpu) < 0) { 3166 ~0ULL : ~0U), ctxt->vcpu) < 0) {
3137 /* #UD condition is already handled by the code above */ 3167 /* #UD condition is already handled by the code above */
3138 kvm_inject_gp(ctxt->vcpu, 0); 3168 emulate_gp(ctxt, 0);
3139 goto done; 3169 goto done;
3140 } 3170 }
3141 3171
@@ -3146,7 +3176,7 @@ twobyte_insn:
3146 msr_data = (u32)c->regs[VCPU_REGS_RAX] 3176 msr_data = (u32)c->regs[VCPU_REGS_RAX]
3147 | ((u64)c->regs[VCPU_REGS_RDX] << 32); 3177 | ((u64)c->regs[VCPU_REGS_RDX] << 32);
3148 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) { 3178 if (ops->set_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], msr_data)) {
3149 kvm_inject_gp(ctxt->vcpu, 0); 3179 emulate_gp(ctxt, 0);
3150 goto done; 3180 goto done;
3151 } 3181 }
3152 rc = X86EMUL_CONTINUE; 3182 rc = X86EMUL_CONTINUE;
@@ -3155,7 +3185,7 @@ twobyte_insn:
3155 case 0x32: 3185 case 0x32:
3156 /* rdmsr */ 3186 /* rdmsr */
3157 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) { 3187 if (ops->get_msr(ctxt->vcpu, c->regs[VCPU_REGS_RCX], &msr_data)) {
3158 kvm_inject_gp(ctxt->vcpu, 0); 3188 emulate_gp(ctxt, 0);
3159 goto done; 3189 goto done;
3160 } else { 3190 } else {
3161 c->regs[VCPU_REGS_RAX] = (u32)msr_data; 3191 c->regs[VCPU_REGS_RAX] = (u32)msr_data;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 91bfe7771f50..63c87adcec48 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -3852,6 +3852,17 @@ static void toggle_interruptibility(struct kvm_vcpu *vcpu, u32 mask)
3852 kvm_x86_ops->set_interrupt_shadow(vcpu, mask); 3852 kvm_x86_ops->set_interrupt_shadow(vcpu, mask);
3853} 3853}
3854 3854
3855static void inject_emulated_exception(struct kvm_vcpu *vcpu)
3856{
3857 struct x86_emulate_ctxt *ctxt = &vcpu->arch.emulate_ctxt;
3858 if (ctxt->exception == PF_VECTOR)
3859 kvm_inject_page_fault(vcpu, ctxt->cr2, ctxt->error_code);
3860 else if (ctxt->error_code_valid)
3861 kvm_queue_exception_e(vcpu, ctxt->exception, ctxt->error_code);
3862 else
3863 kvm_queue_exception(vcpu, ctxt->exception);
3864}
3865
3855int emulate_instruction(struct kvm_vcpu *vcpu, 3866int emulate_instruction(struct kvm_vcpu *vcpu,
3856 unsigned long cr2, 3867 unsigned long cr2,
3857 u16 error_code, 3868 u16 error_code,
@@ -3886,6 +3897,7 @@ int emulate_instruction(struct kvm_vcpu *vcpu,
3886 memset(c, 0, sizeof(struct decode_cache)); 3897 memset(c, 0, sizeof(struct decode_cache));
3887 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); 3898 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
3888 vcpu->arch.emulate_ctxt.interruptibility = 0; 3899 vcpu->arch.emulate_ctxt.interruptibility = 0;
3900 vcpu->arch.emulate_ctxt.exception = -1;
3889 3901
3890 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops); 3902 r = x86_decode_insn(&vcpu->arch.emulate_ctxt, &emulate_ops);
3891 trace_kvm_emulate_insn_start(vcpu); 3903 trace_kvm_emulate_insn_start(vcpu);
@@ -3958,6 +3970,11 @@ restart:
3958 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs); 3970 memcpy(vcpu->arch.regs, c->regs, sizeof c->regs);
3959 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip); 3971 kvm_rip_write(vcpu, vcpu->arch.emulate_ctxt.eip);
3960 3972
3973 if (vcpu->arch.emulate_ctxt.exception >= 0) {
3974 inject_emulated_exception(vcpu);
3975 return EMULATE_DONE;
3976 }
3977
3961 if (vcpu->arch.pio.count) { 3978 if (vcpu->arch.pio.count) {
3962 if (!vcpu->arch.pio.in) 3979 if (!vcpu->arch.pio.in)
3963 vcpu->arch.pio.count = 0; 3980 vcpu->arch.pio.count = 0;
@@ -3970,9 +3987,6 @@ restart:
3970 return EMULATE_DO_MMIO; 3987 return EMULATE_DO_MMIO;
3971 } 3988 }
3972 3989
3973 if (vcpu->arch.exception.pending)
3974 vcpu->arch.emulate_ctxt.restart = false;
3975
3976 if (vcpu->arch.emulate_ctxt.restart) 3990 if (vcpu->arch.emulate_ctxt.restart)
3977 goto restart; 3991 goto restart;
3978 3992