aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2010-11-22 10:53:25 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:58 -0500
commit35d3d4a1dd2c1ffd6f2481f6d8ad6c358bb22f07 (patch)
tree5e306a209b6f015cf72ed9467ae5a0176cdffdc7 /arch/x86/kvm/emulate.c
parentdb297e3d8ed8409b969512c3ecd9d13223f2981c (diff)
KVM: x86 emulator: simplify exception generation
Immediately after we generate an exception, we want a X86EMUL_PROPAGATE_FAULT constant, so return it from the generation functions. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c140
1 files changed, 50 insertions, 90 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 223c536b7f7..36534ecaf59 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -466,33 +466,33 @@ static ulong linear(struct x86_emulate_ctxt *ctxt,
466 return la; 466 return la;
467} 467}
468 468
469static void emulate_exception(struct x86_emulate_ctxt *ctxt, int vec, 469static int emulate_exception(struct x86_emulate_ctxt *ctxt, int vec,
470 u32 error, bool valid) 470 u32 error, bool valid)
471{ 471{
472 ctxt->exception.vector = vec; 472 ctxt->exception.vector = vec;
473 ctxt->exception.error_code = error; 473 ctxt->exception.error_code = error;
474 ctxt->exception.error_code_valid = valid; 474 ctxt->exception.error_code_valid = valid;
475 return X86EMUL_PROPAGATE_FAULT;
475} 476}
476 477
477static void emulate_gp(struct x86_emulate_ctxt *ctxt, int err) 478static int emulate_gp(struct x86_emulate_ctxt *ctxt, int err)
478{ 479{
479 emulate_exception(ctxt, GP_VECTOR, err, true); 480 return emulate_exception(ctxt, GP_VECTOR, err, true);
480} 481}
481 482
482static void emulate_ud(struct x86_emulate_ctxt *ctxt) 483static int emulate_ud(struct x86_emulate_ctxt *ctxt)
483{ 484{
484 emulate_exception(ctxt, UD_VECTOR, 0, false); 485 return emulate_exception(ctxt, UD_VECTOR, 0, false);
485} 486}
486 487
487static void emulate_ts(struct x86_emulate_ctxt *ctxt, int err) 488static int emulate_ts(struct x86_emulate_ctxt *ctxt, int err)
488{ 489{
489 emulate_exception(ctxt, TS_VECTOR, err, true); 490 return emulate_exception(ctxt, TS_VECTOR, err, true);
490} 491}
491 492
492static int emulate_de(struct x86_emulate_ctxt *ctxt) 493static int emulate_de(struct x86_emulate_ctxt *ctxt)
493{ 494{
494 emulate_exception(ctxt, DE_VECTOR, 0, false); 495 return emulate_exception(ctxt, DE_VECTOR, 0, false);
495 return X86EMUL_PROPAGATE_FAULT;
496} 496}
497 497
498static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 498static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -898,10 +898,8 @@ static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
898 898
899 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 899 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
900 900
901 if (dt.size < index * 8 + 7) { 901 if (dt.size < index * 8 + 7)
902 emulate_gp(ctxt, selector & 0xfffc); 902 return emulate_gp(ctxt, selector & 0xfffc);
903 return X86EMUL_PROPAGATE_FAULT;
904 }
905 addr = dt.address + index * 8; 903 addr = dt.address + index * 8;
906 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu, 904 ret = ops->read_std(addr, desc, sizeof *desc, ctxt->vcpu,
907 &ctxt->exception); 905 &ctxt->exception);
@@ -921,10 +919,8 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
921 919
922 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 920 get_descriptor_table_ptr(ctxt, ops, selector, &dt);
923 921
924 if (dt.size < index * 8 + 7) { 922 if (dt.size < index * 8 + 7)
925 emulate_gp(ctxt, selector & 0xfffc); 923 return emulate_gp(ctxt, selector & 0xfffc);
926 return X86EMUL_PROPAGATE_FAULT;
927 }
928 924
929 addr = dt.address + index * 8; 925 addr = dt.address + index * 8;
930 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu, 926 ret = ops->write_std(addr, desc, sizeof *desc, ctxt->vcpu,
@@ -1165,10 +1161,8 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1165 change_mask |= EFLG_IF; 1161 change_mask |= EFLG_IF;
1166 break; 1162 break;
1167 case X86EMUL_MODE_VM86: 1163 case X86EMUL_MODE_VM86:
1168 if (iopl < 3) { 1164 if (iopl < 3)
1169 emulate_gp(ctxt, 0); 1165 return emulate_gp(ctxt, 0);
1170 return X86EMUL_PROPAGATE_FAULT;
1171 }
1172 change_mask |= EFLG_IF; 1166 change_mask |= EFLG_IF;
1173 break; 1167 break;
1174 default: /* real mode */ 1168 default: /* real mode */
@@ -1347,10 +1341,8 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1347 if (rc != X86EMUL_CONTINUE) 1341 if (rc != X86EMUL_CONTINUE)
1348 return rc; 1342 return rc;
1349 1343
1350 if (temp_eip & ~0xffff) { 1344 if (temp_eip & ~0xffff)
1351 emulate_gp(ctxt, 0); 1345 return emulate_gp(ctxt, 0);
1352 return X86EMUL_PROPAGATE_FAULT;
1353 }
1354 1346
1355 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); 1347 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1356 1348
@@ -1601,10 +1593,8 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1601 1593
1602 /* syscall is not available in real mode */ 1594 /* syscall is not available in real mode */
1603 if (ctxt->mode == X86EMUL_MODE_REAL || 1595 if (ctxt->mode == X86EMUL_MODE_REAL ||
1604 ctxt->mode == X86EMUL_MODE_VM86) { 1596 ctxt->mode == X86EMUL_MODE_VM86)
1605 emulate_ud(ctxt); 1597 return emulate_ud(ctxt);
1606 return X86EMUL_PROPAGATE_FAULT;
1607 }
1608 1598
1609 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1599 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1610 1600
@@ -1655,34 +1645,26 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1655 u16 cs_sel, ss_sel; 1645 u16 cs_sel, ss_sel;
1656 1646
1657 /* inject #GP if in real mode */ 1647 /* inject #GP if in real mode */
1658 if (ctxt->mode == X86EMUL_MODE_REAL) { 1648 if (ctxt->mode == X86EMUL_MODE_REAL)
1659 emulate_gp(ctxt, 0); 1649 return emulate_gp(ctxt, 0);
1660 return X86EMUL_PROPAGATE_FAULT;
1661 }
1662 1650
1663 /* XXX sysenter/sysexit have not been tested in 64bit mode. 1651 /* XXX sysenter/sysexit have not been tested in 64bit mode.
1664 * Therefore, we inject an #UD. 1652 * Therefore, we inject an #UD.
1665 */ 1653 */
1666 if (ctxt->mode == X86EMUL_MODE_PROT64) { 1654 if (ctxt->mode == X86EMUL_MODE_PROT64)
1667 emulate_ud(ctxt); 1655 return emulate_ud(ctxt);
1668 return X86EMUL_PROPAGATE_FAULT;
1669 }
1670 1656
1671 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1657 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1672 1658
1673 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); 1659 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1674 switch (ctxt->mode) { 1660 switch (ctxt->mode) {
1675 case X86EMUL_MODE_PROT32: 1661 case X86EMUL_MODE_PROT32:
1676 if ((msr_data & 0xfffc) == 0x0) { 1662 if ((msr_data & 0xfffc) == 0x0)
1677 emulate_gp(ctxt, 0); 1663 return emulate_gp(ctxt, 0);
1678 return X86EMUL_PROPAGATE_FAULT;
1679 }
1680 break; 1664 break;
1681 case X86EMUL_MODE_PROT64: 1665 case X86EMUL_MODE_PROT64:
1682 if (msr_data == 0x0) { 1666 if (msr_data == 0x0)
1683 emulate_gp(ctxt, 0); 1667 return emulate_gp(ctxt, 0);
1684 return X86EMUL_PROPAGATE_FAULT;
1685 }
1686 break; 1668 break;
1687 } 1669 }
1688 1670
@@ -1722,10 +1704,8 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1722 1704
1723 /* inject #GP if in real mode or Virtual 8086 mode */ 1705 /* inject #GP if in real mode or Virtual 8086 mode */
1724 if (ctxt->mode == X86EMUL_MODE_REAL || 1706 if (ctxt->mode == X86EMUL_MODE_REAL ||
1725 ctxt->mode == X86EMUL_MODE_VM86) { 1707 ctxt->mode == X86EMUL_MODE_VM86)
1726 emulate_gp(ctxt, 0); 1708 return emulate_gp(ctxt, 0);
1727 return X86EMUL_PROPAGATE_FAULT;
1728 }
1729 1709
1730 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1710 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1731 1711
@@ -1740,18 +1720,14 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1740 switch (usermode) { 1720 switch (usermode) {
1741 case X86EMUL_MODE_PROT32: 1721 case X86EMUL_MODE_PROT32:
1742 cs_sel = (u16)(msr_data + 16); 1722 cs_sel = (u16)(msr_data + 16);
1743 if ((msr_data & 0xfffc) == 0x0) { 1723 if ((msr_data & 0xfffc) == 0x0)
1744 emulate_gp(ctxt, 0); 1724 return emulate_gp(ctxt, 0);
1745 return X86EMUL_PROPAGATE_FAULT;
1746 }
1747 ss_sel = (u16)(msr_data + 24); 1725 ss_sel = (u16)(msr_data + 24);
1748 break; 1726 break;
1749 case X86EMUL_MODE_PROT64: 1727 case X86EMUL_MODE_PROT64:
1750 cs_sel = (u16)(msr_data + 32); 1728 cs_sel = (u16)(msr_data + 32);
1751 if (msr_data == 0x0) { 1729 if (msr_data == 0x0)
1752 emulate_gp(ctxt, 0); 1730 return emulate_gp(ctxt, 0);
1753 return X86EMUL_PROPAGATE_FAULT;
1754 }
1755 ss_sel = cs_sel + 8; 1731 ss_sel = cs_sel + 8;
1756 cs.d = 0; 1732 cs.d = 0;
1757 cs.l = 1; 1733 cs.l = 1;
@@ -1982,10 +1958,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
1982 struct decode_cache *c = &ctxt->decode; 1958 struct decode_cache *c = &ctxt->decode;
1983 int ret; 1959 int ret;
1984 1960
1985 if (ops->set_cr(3, tss->cr3, ctxt->vcpu)) { 1961 if (ops->set_cr(3, tss->cr3, ctxt->vcpu))
1986 emulate_gp(ctxt, 0); 1962 return emulate_gp(ctxt, 0);
1987 return X86EMUL_PROPAGATE_FAULT;
1988 }
1989 c->eip = tss->eip; 1963 c->eip = tss->eip;
1990 ctxt->eflags = tss->eflags | 2; 1964 ctxt->eflags = tss->eflags | 2;
1991 c->regs[VCPU_REGS_RAX] = tss->eax; 1965 c->regs[VCPU_REGS_RAX] = tss->eax;
@@ -2107,10 +2081,8 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2107 2081
2108 if (reason != TASK_SWITCH_IRET) { 2082 if (reason != TASK_SWITCH_IRET) {
2109 if ((tss_selector & 3) > next_tss_desc.dpl || 2083 if ((tss_selector & 3) > next_tss_desc.dpl ||
2110 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl) { 2084 ops->cpl(ctxt->vcpu) > next_tss_desc.dpl)
2111 emulate_gp(ctxt, 0); 2085 return emulate_gp(ctxt, 0);
2112 return X86EMUL_PROPAGATE_FAULT;
2113 }
2114 } 2086 }
2115 2087
2116 desc_limit = desc_limit_scaled(&next_tss_desc); 2088 desc_limit = desc_limit_scaled(&next_tss_desc);
@@ -2331,10 +2303,8 @@ static int em_rdtsc(struct x86_emulate_ctxt *ctxt)
2331 struct decode_cache *c = &ctxt->decode; 2303 struct decode_cache *c = &ctxt->decode;
2332 u64 tsc = 0; 2304 u64 tsc = 0;
2333 2305
2334 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD)) { 2306 if (cpl > 0 && (ctxt->ops->get_cr(4, ctxt->vcpu) & X86_CR4_TSD))
2335 emulate_gp(ctxt, 0); 2307 return emulate_gp(ctxt, 0);
2336 return X86EMUL_PROPAGATE_FAULT;
2337 }
2338 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc); 2308 ctxt->ops->get_msr(ctxt->vcpu, MSR_IA32_TSC, &tsc);
2339 c->regs[VCPU_REGS_RAX] = (u32)tsc; 2309 c->regs[VCPU_REGS_RAX] = (u32)tsc;
2340 c->regs[VCPU_REGS_RDX] = tsc >> 32; 2310 c->regs[VCPU_REGS_RDX] = tsc >> 32;
@@ -2979,28 +2949,24 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
2979 ctxt->decode.mem_read.pos = 0; 2949 ctxt->decode.mem_read.pos = 0;
2980 2950
2981 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) { 2951 if (ctxt->mode == X86EMUL_MODE_PROT64 && (c->d & No64)) {
2982 emulate_ud(ctxt); 2952 rc = emulate_ud(ctxt);
2983 rc = X86EMUL_PROPAGATE_FAULT;
2984 goto done; 2953 goto done;
2985 } 2954 }
2986 2955
2987 /* LOCK prefix is allowed only with some instructions */ 2956 /* LOCK prefix is allowed only with some instructions */
2988 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) { 2957 if (c->lock_prefix && (!(c->d & Lock) || c->dst.type != OP_MEM)) {
2989 emulate_ud(ctxt); 2958 rc = emulate_ud(ctxt);
2990 rc = X86EMUL_PROPAGATE_FAULT;
2991 goto done; 2959 goto done;
2992 } 2960 }
2993 2961
2994 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) { 2962 if ((c->d & SrcMask) == SrcMemFAddr && c->src.type != OP_MEM) {
2995 emulate_ud(ctxt); 2963 rc = emulate_ud(ctxt);
2996 rc = X86EMUL_PROPAGATE_FAULT;
2997 goto done; 2964 goto done;
2998 } 2965 }
2999 2966
3000 /* Privileged instruction can be executed only in CPL=0 */ 2967 /* Privileged instruction can be executed only in CPL=0 */
3001 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 2968 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3002 emulate_gp(ctxt, 0); 2969 rc = emulate_gp(ctxt, 0);
3003 rc = X86EMUL_PROPAGATE_FAULT;
3004 goto done; 2970 goto done;
3005 } 2971 }
3006 2972
@@ -3178,8 +3144,7 @@ special_insn:
3178 break; 3144 break;
3179 case 0x8c: /* mov r/m, sreg */ 3145 case 0x8c: /* mov r/m, sreg */
3180 if (c->modrm_reg > VCPU_SREG_GS) { 3146 if (c->modrm_reg > VCPU_SREG_GS) {
3181 emulate_ud(ctxt); 3147 rc = emulate_ud(ctxt);
3182 rc = X86EMUL_PROPAGATE_FAULT;
3183 goto done; 3148 goto done;
3184 } 3149 }
3185 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu); 3150 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
@@ -3194,8 +3159,7 @@ special_insn:
3194 3159
3195 if (c->modrm_reg == VCPU_SREG_CS || 3160 if (c->modrm_reg == VCPU_SREG_CS ||
3196 c->modrm_reg > VCPU_SREG_GS) { 3161 c->modrm_reg > VCPU_SREG_GS) {
3197 emulate_ud(ctxt); 3162 rc = emulate_ud(ctxt);
3198 rc = X86EMUL_PROPAGATE_FAULT;
3199 goto done; 3163 goto done;
3200 } 3164 }
3201 3165
@@ -3327,8 +3291,7 @@ special_insn:
3327 do_io_in: 3291 do_io_in:
3328 c->dst.bytes = min(c->dst.bytes, 4u); 3292 c->dst.bytes = min(c->dst.bytes, 4u);
3329 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) { 3293 if (!emulator_io_permited(ctxt, ops, c->src.val, c->dst.bytes)) {
3330 emulate_gp(ctxt, 0); 3294 rc = emulate_gp(ctxt, 0);
3331 rc = X86EMUL_PROPAGATE_FAULT;
3332 goto done; 3295 goto done;
3333 } 3296 }
3334 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 3297 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val,
@@ -3342,8 +3305,7 @@ special_insn:
3342 c->src.bytes = min(c->src.bytes, 4u); 3305 c->src.bytes = min(c->src.bytes, 4u);
3343 if (!emulator_io_permited(ctxt, ops, c->dst.val, 3306 if (!emulator_io_permited(ctxt, ops, c->dst.val,
3344 c->src.bytes)) { 3307 c->src.bytes)) {
3345 emulate_gp(ctxt, 0); 3308 rc = emulate_gp(ctxt, 0);
3346 rc = X86EMUL_PROPAGATE_FAULT;
3347 goto done; 3309 goto done;
3348 } 3310 }
3349 ops->pio_out_emulated(c->src.bytes, c->dst.val, 3311 ops->pio_out_emulated(c->src.bytes, c->dst.val,
@@ -3368,16 +3330,14 @@ special_insn:
3368 break; 3330 break;
3369 case 0xfa: /* cli */ 3331 case 0xfa: /* cli */
3370 if (emulator_bad_iopl(ctxt, ops)) { 3332 if (emulator_bad_iopl(ctxt, ops)) {
3371 emulate_gp(ctxt, 0); 3333 rc = emulate_gp(ctxt, 0);
3372 rc = X86EMUL_PROPAGATE_FAULT;
3373 goto done; 3334 goto done;
3374 } else 3335 } else
3375 ctxt->eflags &= ~X86_EFLAGS_IF; 3336 ctxt->eflags &= ~X86_EFLAGS_IF;
3376 break; 3337 break;
3377 case 0xfb: /* sti */ 3338 case 0xfb: /* sti */
3378 if (emulator_bad_iopl(ctxt, ops)) { 3339 if (emulator_bad_iopl(ctxt, ops)) {
3379 emulate_gp(ctxt, 0); 3340 rc = emulate_gp(ctxt, 0);
3380 rc = X86EMUL_PROPAGATE_FAULT;
3381 goto done; 3341 goto done;
3382 } else { 3342 } else {
3383 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI; 3343 ctxt->interruptibility = KVM_X86_SHADOW_INT_STI;