aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2011-05-14 12:00:52 -0400
committerAvi Kivity <avi@redhat.com>2011-07-12 04:44:59 -0400
commit7b105ca2903b84f023c49965d9a511c5e55256dc (patch)
tree24fd046655eabda5d0160c3f88f9cd2195851bae /arch/x86
parentef5d75cc9af2bca7c525158666b5f9696846ffb6 (diff)
KVM: x86 emulator: Stop passing ctxt->ops as arg of emul functions
Dereference it in the actual users. This not only cleans up the emulator but also makes it easy to convert the old emulation functions to the new em_xxx() form later. Note: Remove some inline keywords to let the compiler decide inlining. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_emulate.h3
-rw-r--r--arch/x86/kvm/emulate.c259
-rw-r--r--arch/x86/kvm/x86.c2
3 files changed, 118 insertions, 146 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 0049211959c0..ab09ba290db3 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -373,6 +373,5 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt);
373int emulator_task_switch(struct x86_emulate_ctxt *ctxt, 373int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
374 u16 tss_selector, int reason, 374 u16 tss_selector, int reason,
375 bool has_error_code, u32 error_code); 375 bool has_error_code, u32 error_code);
376int emulate_int_real(struct x86_emulate_ctxt *ctxt, 376int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq);
377 struct x86_emulate_ops *ops, int irq);
378#endif /* _ASM_X86_KVM_X86_EMULATE_H */ 377#endif /* _ASM_X86_KVM_X86_EMULATE_H */
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index df9082c811e0..d3f4466cd19c 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -475,13 +475,12 @@ static void set_seg_override(struct decode_cache *c, int seg)
475 c->seg_override = seg; 475 c->seg_override = seg;
476} 476}
477 477
478static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, 478static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg)
479 struct x86_emulate_ops *ops, int seg)
480{ 479{
481 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 480 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
482 return 0; 481 return 0;
483 482
484 return ops->get_cached_segment_base(ctxt, seg); 483 return ctxt->ops->get_cached_segment_base(ctxt, seg);
485} 484}
486 485
487static unsigned seg_override(struct x86_emulate_ctxt *ctxt, 486static unsigned seg_override(struct x86_emulate_ctxt *ctxt,
@@ -570,7 +569,7 @@ static int __linearize(struct x86_emulate_ctxt *ctxt,
570 u16 sel; 569 u16 sel;
571 unsigned cpl, rpl; 570 unsigned cpl, rpl;
572 571
573 la = seg_base(ctxt, ctxt->ops, addr.seg) + addr.ea; 572 la = seg_base(ctxt, addr.seg) + addr.ea;
574 switch (ctxt->mode) { 573 switch (ctxt->mode) {
575 case X86EMUL_MODE_REAL: 574 case X86EMUL_MODE_REAL:
576 break; 575 break;
@@ -1052,7 +1051,6 @@ static void fetch_bit_operand(struct decode_cache *c)
1052} 1051}
1053 1052
1054static int read_emulated(struct x86_emulate_ctxt *ctxt, 1053static int read_emulated(struct x86_emulate_ctxt *ctxt,
1055 struct x86_emulate_ops *ops,
1056 unsigned long addr, void *dest, unsigned size) 1054 unsigned long addr, void *dest, unsigned size)
1057{ 1055{
1058 int rc; 1056 int rc;
@@ -1064,8 +1062,8 @@ static int read_emulated(struct x86_emulate_ctxt *ctxt,
1064 if (mc->pos < mc->end) 1062 if (mc->pos < mc->end)
1065 goto read_cached; 1063 goto read_cached;
1066 1064
1067 rc = ops->read_emulated(ctxt, addr, mc->data + mc->end, n, 1065 rc = ctxt->ops->read_emulated(ctxt, addr, mc->data + mc->end, n,
1068 &ctxt->exception); 1066 &ctxt->exception);
1069 if (rc != X86EMUL_CONTINUE) 1067 if (rc != X86EMUL_CONTINUE)
1070 return rc; 1068 return rc;
1071 mc->end += n; 1069 mc->end += n;
@@ -1090,7 +1088,7 @@ static int segmented_read(struct x86_emulate_ctxt *ctxt,
1090 rc = linearize(ctxt, addr, size, false, &linear); 1088 rc = linearize(ctxt, addr, size, false, &linear);
1091 if (rc != X86EMUL_CONTINUE) 1089 if (rc != X86EMUL_CONTINUE)
1092 return rc; 1090 return rc;
1093 return read_emulated(ctxt, ctxt->ops, linear, data, size); 1091 return read_emulated(ctxt, linear, data, size);
1094} 1092}
1095 1093
1096static int segmented_write(struct x86_emulate_ctxt *ctxt, 1094static int segmented_write(struct x86_emulate_ctxt *ctxt,
@@ -1124,7 +1122,6 @@ static int segmented_cmpxchg(struct x86_emulate_ctxt *ctxt,
1124} 1122}
1125 1123
1126static int pio_in_emulated(struct x86_emulate_ctxt *ctxt, 1124static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1127 struct x86_emulate_ops *ops,
1128 unsigned int size, unsigned short port, 1125 unsigned int size, unsigned short port,
1129 void *dest) 1126 void *dest)
1130{ 1127{
@@ -1143,7 +1140,7 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1143 if (n == 0) 1140 if (n == 0)
1144 n = 1; 1141 n = 1;
1145 rc->pos = rc->end = 0; 1142 rc->pos = rc->end = 0;
1146 if (!ops->pio_in_emulated(ctxt, size, port, rc->data, n)) 1143 if (!ctxt->ops->pio_in_emulated(ctxt, size, port, rc->data, n))
1147 return 0; 1144 return 0;
1148 rc->end = n * size; 1145 rc->end = n * size;
1149 } 1146 }
@@ -1154,9 +1151,10 @@ static int pio_in_emulated(struct x86_emulate_ctxt *ctxt,
1154} 1151}
1155 1152
1156static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt, 1153static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1157 struct x86_emulate_ops *ops,
1158 u16 selector, struct desc_ptr *dt) 1154 u16 selector, struct desc_ptr *dt)
1159{ 1155{
1156 struct x86_emulate_ops *ops = ctxt->ops;
1157
1160 if (selector & 1 << 2) { 1158 if (selector & 1 << 2) {
1161 struct desc_struct desc; 1159 struct desc_struct desc;
1162 u16 sel; 1160 u16 sel;
@@ -1173,48 +1171,42 @@ static void get_descriptor_table_ptr(struct x86_emulate_ctxt *ctxt,
1173 1171
1174/* allowed just for 8 bytes segments */ 1172/* allowed just for 8 bytes segments */
1175static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1173static int read_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1176 struct x86_emulate_ops *ops,
1177 u16 selector, struct desc_struct *desc) 1174 u16 selector, struct desc_struct *desc)
1178{ 1175{
1179 struct desc_ptr dt; 1176 struct desc_ptr dt;
1180 u16 index = selector >> 3; 1177 u16 index = selector >> 3;
1181 int ret;
1182 ulong addr; 1178 ulong addr;
1183 1179
1184 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 1180 get_descriptor_table_ptr(ctxt, selector, &dt);
1185 1181
1186 if (dt.size < index * 8 + 7) 1182 if (dt.size < index * 8 + 7)
1187 return emulate_gp(ctxt, selector & 0xfffc); 1183 return emulate_gp(ctxt, selector & 0xfffc);
1188 addr = dt.address + index * 8;
1189 ret = ops->read_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception);
1190 1184
1191 return ret; 1185 addr = dt.address + index * 8;
1186 return ctxt->ops->read_std(ctxt, addr, desc, sizeof *desc,
1187 &ctxt->exception);
1192} 1188}
1193 1189
1194/* allowed just for 8 bytes segments */ 1190/* allowed just for 8 bytes segments */
1195static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1191static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1196 struct x86_emulate_ops *ops,
1197 u16 selector, struct desc_struct *desc) 1192 u16 selector, struct desc_struct *desc)
1198{ 1193{
1199 struct desc_ptr dt; 1194 struct desc_ptr dt;
1200 u16 index = selector >> 3; 1195 u16 index = selector >> 3;
1201 ulong addr; 1196 ulong addr;
1202 int ret;
1203 1197
1204 get_descriptor_table_ptr(ctxt, ops, selector, &dt); 1198 get_descriptor_table_ptr(ctxt, selector, &dt);
1205 1199
1206 if (dt.size < index * 8 + 7) 1200 if (dt.size < index * 8 + 7)
1207 return emulate_gp(ctxt, selector & 0xfffc); 1201 return emulate_gp(ctxt, selector & 0xfffc);
1208 1202
1209 addr = dt.address + index * 8; 1203 addr = dt.address + index * 8;
1210 ret = ops->write_std(ctxt, addr, desc, sizeof *desc, &ctxt->exception); 1204 return ctxt->ops->write_std(ctxt, addr, desc, sizeof *desc,
1211 1205 &ctxt->exception);
1212 return ret;
1213} 1206}
1214 1207
1215/* Does not support long mode */ 1208/* Does not support long mode */
1216static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1209static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1217 struct x86_emulate_ops *ops,
1218 u16 selector, int seg) 1210 u16 selector, int seg)
1219{ 1211{
1220 struct desc_struct seg_desc; 1212 struct desc_struct seg_desc;
@@ -1249,7 +1241,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1249 if (null_selector) /* for NULL selector skip all following checks */ 1241 if (null_selector) /* for NULL selector skip all following checks */
1250 goto load; 1242 goto load;
1251 1243
1252 ret = read_segment_descriptor(ctxt, ops, selector, &seg_desc); 1244 ret = read_segment_descriptor(ctxt, selector, &seg_desc);
1253 if (ret != X86EMUL_CONTINUE) 1245 if (ret != X86EMUL_CONTINUE)
1254 return ret; 1246 return ret;
1255 1247
@@ -1267,7 +1259,7 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1267 1259
1268 rpl = selector & 3; 1260 rpl = selector & 3;
1269 dpl = seg_desc.dpl; 1261 dpl = seg_desc.dpl;
1270 cpl = ops->cpl(ctxt); 1262 cpl = ctxt->ops->cpl(ctxt);
1271 1263
1272 switch (seg) { 1264 switch (seg) {
1273 case VCPU_SREG_SS: 1265 case VCPU_SREG_SS:
@@ -1318,12 +1310,12 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1318 if (seg_desc.s) { 1310 if (seg_desc.s) {
1319 /* mark segment as accessed */ 1311 /* mark segment as accessed */
1320 seg_desc.type |= 1; 1312 seg_desc.type |= 1;
1321 ret = write_segment_descriptor(ctxt, ops, selector, &seg_desc); 1313 ret = write_segment_descriptor(ctxt, selector, &seg_desc);
1322 if (ret != X86EMUL_CONTINUE) 1314 if (ret != X86EMUL_CONTINUE)
1323 return ret; 1315 return ret;
1324 } 1316 }
1325load: 1317load:
1326 ops->set_segment(ctxt, selector, &seg_desc, 0, seg); 1318 ctxt->ops->set_segment(ctxt, selector, &seg_desc, 0, seg);
1327 return X86EMUL_CONTINUE; 1319 return X86EMUL_CONTINUE;
1328exception: 1320exception:
1329 emulate_exception(ctxt, err_vec, err_code, true); 1321 emulate_exception(ctxt, err_vec, err_code, true);
@@ -1424,13 +1416,12 @@ static int em_pop(struct x86_emulate_ctxt *ctxt)
1424} 1416}
1425 1417
1426static int emulate_popf(struct x86_emulate_ctxt *ctxt, 1418static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1427 struct x86_emulate_ops *ops, 1419 void *dest, int len)
1428 void *dest, int len)
1429{ 1420{
1430 int rc; 1421 int rc;
1431 unsigned long val, change_mask; 1422 unsigned long val, change_mask;
1432 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 1423 int iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
1433 int cpl = ops->cpl(ctxt); 1424 int cpl = ctxt->ops->cpl(ctxt);
1434 1425
1435 rc = emulate_pop(ctxt, &val, len); 1426 rc = emulate_pop(ctxt, &val, len);
1436 if (rc != X86EMUL_CONTINUE) 1427 if (rc != X86EMUL_CONTINUE)
@@ -1471,11 +1462,10 @@ static int em_popf(struct x86_emulate_ctxt *ctxt)
1471 c->dst.type = OP_REG; 1462 c->dst.type = OP_REG;
1472 c->dst.addr.reg = &ctxt->eflags; 1463 c->dst.addr.reg = &ctxt->eflags;
1473 c->dst.bytes = c->op_bytes; 1464 c->dst.bytes = c->op_bytes;
1474 return emulate_popf(ctxt, ctxt->ops, &c->dst.val, c->op_bytes); 1465 return emulate_popf(ctxt, &c->dst.val, c->op_bytes);
1475} 1466}
1476 1467
1477static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, 1468static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1478 struct x86_emulate_ops *ops, int seg)
1479{ 1469{
1480 struct decode_cache *c = &ctxt->decode; 1470 struct decode_cache *c = &ctxt->decode;
1481 1471
@@ -1484,8 +1474,7 @@ static int emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1484 return em_push(ctxt); 1474 return em_push(ctxt);
1485} 1475}
1486 1476
1487static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, 1477static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, int seg)
1488 struct x86_emulate_ops *ops, int seg)
1489{ 1478{
1490 struct decode_cache *c = &ctxt->decode; 1479 struct decode_cache *c = &ctxt->decode;
1491 unsigned long selector; 1480 unsigned long selector;
@@ -1495,7 +1484,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1495 if (rc != X86EMUL_CONTINUE) 1484 if (rc != X86EMUL_CONTINUE)
1496 return rc; 1485 return rc;
1497 1486
1498 rc = load_segment_descriptor(ctxt, ops, (u16)selector, seg); 1487 rc = load_segment_descriptor(ctxt, (u16)selector, seg);
1499 return rc; 1488 return rc;
1500} 1489}
1501 1490
@@ -1549,10 +1538,10 @@ static int em_popa(struct x86_emulate_ctxt *ctxt)
1549 return rc; 1538 return rc;
1550} 1539}
1551 1540
1552int emulate_int_real(struct x86_emulate_ctxt *ctxt, 1541int emulate_int_real(struct x86_emulate_ctxt *ctxt, int irq)
1553 struct x86_emulate_ops *ops, int irq)
1554{ 1542{
1555 struct decode_cache *c = &ctxt->decode; 1543 struct decode_cache *c = &ctxt->decode;
1544 struct x86_emulate_ops *ops = ctxt->ops;
1556 int rc; 1545 int rc;
1557 struct desc_ptr dt; 1546 struct desc_ptr dt;
1558 gva_t cs_addr; 1547 gva_t cs_addr;
@@ -1590,7 +1579,7 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1590 if (rc != X86EMUL_CONTINUE) 1579 if (rc != X86EMUL_CONTINUE)
1591 return rc; 1580 return rc;
1592 1581
1593 rc = load_segment_descriptor(ctxt, ops, cs, VCPU_SREG_CS); 1582 rc = load_segment_descriptor(ctxt, cs, VCPU_SREG_CS);
1594 if (rc != X86EMUL_CONTINUE) 1583 if (rc != X86EMUL_CONTINUE)
1595 return rc; 1584 return rc;
1596 1585
@@ -1599,12 +1588,11 @@ int emulate_int_real(struct x86_emulate_ctxt *ctxt,
1599 return rc; 1588 return rc;
1600} 1589}
1601 1590
1602static int emulate_int(struct x86_emulate_ctxt *ctxt, 1591static int emulate_int(struct x86_emulate_ctxt *ctxt, int irq)
1603 struct x86_emulate_ops *ops, int irq)
1604{ 1592{
1605 switch(ctxt->mode) { 1593 switch(ctxt->mode) {
1606 case X86EMUL_MODE_REAL: 1594 case X86EMUL_MODE_REAL:
1607 return emulate_int_real(ctxt, ops, irq); 1595 return emulate_int_real(ctxt, irq);
1608 case X86EMUL_MODE_VM86: 1596 case X86EMUL_MODE_VM86:
1609 case X86EMUL_MODE_PROT16: 1597 case X86EMUL_MODE_PROT16:
1610 case X86EMUL_MODE_PROT32: 1598 case X86EMUL_MODE_PROT32:
@@ -1615,8 +1603,7 @@ static int emulate_int(struct x86_emulate_ctxt *ctxt,
1615 } 1603 }
1616} 1604}
1617 1605
1618static int emulate_iret_real(struct x86_emulate_ctxt *ctxt, 1606static int emulate_iret_real(struct x86_emulate_ctxt *ctxt)
1619 struct x86_emulate_ops *ops)
1620{ 1607{
1621 struct decode_cache *c = &ctxt->decode; 1608 struct decode_cache *c = &ctxt->decode;
1622 int rc = X86EMUL_CONTINUE; 1609 int rc = X86EMUL_CONTINUE;
@@ -1648,7 +1635,7 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1648 if (rc != X86EMUL_CONTINUE) 1635 if (rc != X86EMUL_CONTINUE)
1649 return rc; 1636 return rc;
1650 1637
1651 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); 1638 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1652 1639
1653 if (rc != X86EMUL_CONTINUE) 1640 if (rc != X86EMUL_CONTINUE)
1654 return rc; 1641 return rc;
@@ -1669,12 +1656,11 @@ static int emulate_iret_real(struct x86_emulate_ctxt *ctxt,
1669 return rc; 1656 return rc;
1670} 1657}
1671 1658
1672static inline int emulate_iret(struct x86_emulate_ctxt *ctxt, 1659static int emulate_iret(struct x86_emulate_ctxt *ctxt)
1673 struct x86_emulate_ops* ops)
1674{ 1660{
1675 switch(ctxt->mode) { 1661 switch(ctxt->mode) {
1676 case X86EMUL_MODE_REAL: 1662 case X86EMUL_MODE_REAL:
1677 return emulate_iret_real(ctxt, ops); 1663 return emulate_iret_real(ctxt);
1678 case X86EMUL_MODE_VM86: 1664 case X86EMUL_MODE_VM86:
1679 case X86EMUL_MODE_PROT16: 1665 case X86EMUL_MODE_PROT16:
1680 case X86EMUL_MODE_PROT32: 1666 case X86EMUL_MODE_PROT32:
@@ -1693,7 +1679,7 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt)
1693 1679
1694 memcpy(&sel, c->src.valptr + c->op_bytes, 2); 1680 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1695 1681
1696 rc = load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS); 1682 rc = load_segment_descriptor(ctxt, sel, VCPU_SREG_CS);
1697 if (rc != X86EMUL_CONTINUE) 1683 if (rc != X86EMUL_CONTINUE)
1698 return rc; 1684 return rc;
1699 1685
@@ -1830,8 +1816,7 @@ static int em_grp9(struct x86_emulate_ctxt *ctxt)
1830 return X86EMUL_CONTINUE; 1816 return X86EMUL_CONTINUE;
1831} 1817}
1832 1818
1833static int emulate_ret_far(struct x86_emulate_ctxt *ctxt, 1819static int emulate_ret_far(struct x86_emulate_ctxt *ctxt)
1834 struct x86_emulate_ops *ops)
1835{ 1820{
1836 struct decode_cache *c = &ctxt->decode; 1821 struct decode_cache *c = &ctxt->decode;
1837 int rc; 1822 int rc;
@@ -1845,12 +1830,11 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1845 rc = emulate_pop(ctxt, &cs, c->op_bytes); 1830 rc = emulate_pop(ctxt, &cs, c->op_bytes);
1846 if (rc != X86EMUL_CONTINUE) 1831 if (rc != X86EMUL_CONTINUE)
1847 return rc; 1832 return rc;
1848 rc = load_segment_descriptor(ctxt, ops, (u16)cs, VCPU_SREG_CS); 1833 rc = load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS);
1849 return rc; 1834 return rc;
1850} 1835}
1851 1836
1852static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, 1837static int emulate_load_segment(struct x86_emulate_ctxt *ctxt, int seg)
1853 struct x86_emulate_ops *ops, int seg)
1854{ 1838{
1855 struct decode_cache *c = &ctxt->decode; 1839 struct decode_cache *c = &ctxt->decode;
1856 unsigned short sel; 1840 unsigned short sel;
@@ -1858,7 +1842,7 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1858 1842
1859 memcpy(&sel, c->src.valptr + c->op_bytes, 2); 1843 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
1860 1844
1861 rc = load_segment_descriptor(ctxt, ops, sel, seg); 1845 rc = load_segment_descriptor(ctxt, sel, seg);
1862 if (rc != X86EMUL_CONTINUE) 1846 if (rc != X86EMUL_CONTINUE)
1863 return rc; 1847 return rc;
1864 1848
@@ -1866,15 +1850,14 @@ static int emulate_load_segment(struct x86_emulate_ctxt *ctxt,
1866 return rc; 1850 return rc;
1867} 1851}
1868 1852
1869static inline void 1853static void
1870setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 1854setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1871 struct x86_emulate_ops *ops, struct desc_struct *cs, 1855 struct desc_struct *cs, struct desc_struct *ss)
1872 struct desc_struct *ss)
1873{ 1856{
1874 u16 selector; 1857 u16 selector;
1875 1858
1876 memset(cs, 0, sizeof(struct desc_struct)); 1859 memset(cs, 0, sizeof(struct desc_struct));
1877 ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS); 1860 ctxt->ops->get_segment(ctxt, &selector, cs, NULL, VCPU_SREG_CS);
1878 memset(ss, 0, sizeof(struct desc_struct)); 1861 memset(ss, 0, sizeof(struct desc_struct));
1879 1862
1880 cs->l = 0; /* will be adjusted later */ 1863 cs->l = 0; /* will be adjusted later */
@@ -1897,10 +1880,10 @@ setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1897 ss->p = 1; 1880 ss->p = 1;
1898} 1881}
1899 1882
1900static int 1883static int emulate_syscall(struct x86_emulate_ctxt *ctxt)
1901emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1902{ 1884{
1903 struct decode_cache *c = &ctxt->decode; 1885 struct decode_cache *c = &ctxt->decode;
1886 struct x86_emulate_ops *ops = ctxt->ops;
1904 struct desc_struct cs, ss; 1887 struct desc_struct cs, ss;
1905 u64 msr_data; 1888 u64 msr_data;
1906 u16 cs_sel, ss_sel; 1889 u16 cs_sel, ss_sel;
@@ -1912,7 +1895,7 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1912 return emulate_ud(ctxt); 1895 return emulate_ud(ctxt);
1913 1896
1914 ops->get_msr(ctxt, MSR_EFER, &efer); 1897 ops->get_msr(ctxt, MSR_EFER, &efer);
1915 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1898 setup_syscalls_segments(ctxt, &cs, &ss);
1916 1899
1917 ops->get_msr(ctxt, MSR_STAR, &msr_data); 1900 ops->get_msr(ctxt, MSR_STAR, &msr_data);
1918 msr_data >>= 32; 1901 msr_data >>= 32;
@@ -1950,16 +1933,16 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1950 return X86EMUL_CONTINUE; 1933 return X86EMUL_CONTINUE;
1951} 1934}
1952 1935
1953static int 1936static int emulate_sysenter(struct x86_emulate_ctxt *ctxt)
1954emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1955{ 1937{
1956 struct decode_cache *c = &ctxt->decode; 1938 struct decode_cache *c = &ctxt->decode;
1939 struct x86_emulate_ops *ops = ctxt->ops;
1957 struct desc_struct cs, ss; 1940 struct desc_struct cs, ss;
1958 u64 msr_data; 1941 u64 msr_data;
1959 u16 cs_sel, ss_sel; 1942 u16 cs_sel, ss_sel;
1960 u64 efer = 0; 1943 u64 efer = 0;
1961 1944
1962 ctxt->ops->get_msr(ctxt, MSR_EFER, &efer); 1945 ops->get_msr(ctxt, MSR_EFER, &efer);
1963 /* inject #GP if in real mode */ 1946 /* inject #GP if in real mode */
1964 if (ctxt->mode == X86EMUL_MODE_REAL) 1947 if (ctxt->mode == X86EMUL_MODE_REAL)
1965 return emulate_gp(ctxt, 0); 1948 return emulate_gp(ctxt, 0);
@@ -1970,7 +1953,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1970 if (ctxt->mode == X86EMUL_MODE_PROT64) 1953 if (ctxt->mode == X86EMUL_MODE_PROT64)
1971 return emulate_ud(ctxt); 1954 return emulate_ud(ctxt);
1972 1955
1973 setup_syscalls_segments(ctxt, ops, &cs, &ss); 1956 setup_syscalls_segments(ctxt, &cs, &ss);
1974 1957
1975 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data); 1958 ops->get_msr(ctxt, MSR_IA32_SYSENTER_CS, &msr_data);
1976 switch (ctxt->mode) { 1959 switch (ctxt->mode) {
@@ -2006,10 +1989,10 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2006 return X86EMUL_CONTINUE; 1989 return X86EMUL_CONTINUE;
2007} 1990}
2008 1991
2009static int 1992static int emulate_sysexit(struct x86_emulate_ctxt *ctxt)
2010emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2011{ 1993{
2012 struct decode_cache *c = &ctxt->decode; 1994 struct decode_cache *c = &ctxt->decode;
1995 struct x86_emulate_ops *ops = ctxt->ops;
2013 struct desc_struct cs, ss; 1996 struct desc_struct cs, ss;
2014 u64 msr_data; 1997 u64 msr_data;
2015 int usermode; 1998 int usermode;
@@ -2020,7 +2003,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2020 ctxt->mode == X86EMUL_MODE_VM86) 2003 ctxt->mode == X86EMUL_MODE_VM86)
2021 return emulate_gp(ctxt, 0); 2004 return emulate_gp(ctxt, 0);
2022 2005
2023 setup_syscalls_segments(ctxt, ops, &cs, &ss); 2006 setup_syscalls_segments(ctxt, &cs, &ss);
2024 2007
2025 if ((c->rex_prefix & 0x8) != 0x0) 2008 if ((c->rex_prefix & 0x8) != 0x0)
2026 usermode = X86EMUL_MODE_PROT64; 2009 usermode = X86EMUL_MODE_PROT64;
@@ -2058,8 +2041,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2058 return X86EMUL_CONTINUE; 2041 return X86EMUL_CONTINUE;
2059} 2042}
2060 2043
2061static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt, 2044static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt)
2062 struct x86_emulate_ops *ops)
2063{ 2045{
2064 int iopl; 2046 int iopl;
2065 if (ctxt->mode == X86EMUL_MODE_REAL) 2047 if (ctxt->mode == X86EMUL_MODE_REAL)
@@ -2067,13 +2049,13 @@ static bool emulator_bad_iopl(struct x86_emulate_ctxt *ctxt,
2067 if (ctxt->mode == X86EMUL_MODE_VM86) 2049 if (ctxt->mode == X86EMUL_MODE_VM86)
2068 return true; 2050 return true;
2069 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT; 2051 iopl = (ctxt->eflags & X86_EFLAGS_IOPL) >> IOPL_SHIFT;
2070 return ops->cpl(ctxt) > iopl; 2052 return ctxt->ops->cpl(ctxt) > iopl;
2071} 2053}
2072 2054
2073static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt, 2055static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2074 struct x86_emulate_ops *ops,
2075 u16 port, u16 len) 2056 u16 port, u16 len)
2076{ 2057{
2058 struct x86_emulate_ops *ops = ctxt->ops;
2077 struct desc_struct tr_seg; 2059 struct desc_struct tr_seg;
2078 u32 base3; 2060 u32 base3;
2079 int r; 2061 int r;
@@ -2104,14 +2086,13 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2104} 2086}
2105 2087
2106static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt, 2088static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2107 struct x86_emulate_ops *ops,
2108 u16 port, u16 len) 2089 u16 port, u16 len)
2109{ 2090{
2110 if (ctxt->perm_ok) 2091 if (ctxt->perm_ok)
2111 return true; 2092 return true;
2112 2093
2113 if (emulator_bad_iopl(ctxt, ops)) 2094 if (emulator_bad_iopl(ctxt))
2114 if (!emulator_io_port_access_allowed(ctxt, ops, port, len)) 2095 if (!emulator_io_port_access_allowed(ctxt, port, len))
2115 return false; 2096 return false;
2116 2097
2117 ctxt->perm_ok = true; 2098 ctxt->perm_ok = true;
@@ -2120,7 +2101,6 @@ static bool emulator_io_permited(struct x86_emulate_ctxt *ctxt,
2120} 2101}
2121 2102
2122static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt, 2103static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2123 struct x86_emulate_ops *ops,
2124 struct tss_segment_16 *tss) 2104 struct tss_segment_16 *tss)
2125{ 2105{
2126 struct decode_cache *c = &ctxt->decode; 2106 struct decode_cache *c = &ctxt->decode;
@@ -2144,7 +2124,6 @@ static void save_state_to_tss16(struct x86_emulate_ctxt *ctxt,
2144} 2124}
2145 2125
2146static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, 2126static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2147 struct x86_emulate_ops *ops,
2148 struct tss_segment_16 *tss) 2127 struct tss_segment_16 *tss)
2149{ 2128{
2150 struct decode_cache *c = &ctxt->decode; 2129 struct decode_cache *c = &ctxt->decode;
@@ -2175,19 +2154,19 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2175 * Now load segment descriptors. If fault happenes at this stage 2154 * Now load segment descriptors. If fault happenes at this stage
2176 * it is handled in a context of new task 2155 * it is handled in a context of new task
2177 */ 2156 */
2178 ret = load_segment_descriptor(ctxt, ops, tss->ldt, VCPU_SREG_LDTR); 2157 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR);
2179 if (ret != X86EMUL_CONTINUE) 2158 if (ret != X86EMUL_CONTINUE)
2180 return ret; 2159 return ret;
2181 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); 2160 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2182 if (ret != X86EMUL_CONTINUE) 2161 if (ret != X86EMUL_CONTINUE)
2183 return ret; 2162 return ret;
2184 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); 2163 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2185 if (ret != X86EMUL_CONTINUE) 2164 if (ret != X86EMUL_CONTINUE)
2186 return ret; 2165 return ret;
2187 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); 2166 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2188 if (ret != X86EMUL_CONTINUE) 2167 if (ret != X86EMUL_CONTINUE)
2189 return ret; 2168 return ret;
2190 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); 2169 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2191 if (ret != X86EMUL_CONTINUE) 2170 if (ret != X86EMUL_CONTINUE)
2192 return ret; 2171 return ret;
2193 2172
@@ -2195,10 +2174,10 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2195} 2174}
2196 2175
2197static int task_switch_16(struct x86_emulate_ctxt *ctxt, 2176static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2198 struct x86_emulate_ops *ops,
2199 u16 tss_selector, u16 old_tss_sel, 2177 u16 tss_selector, u16 old_tss_sel,
2200 ulong old_tss_base, struct desc_struct *new_desc) 2178 ulong old_tss_base, struct desc_struct *new_desc)
2201{ 2179{
2180 struct x86_emulate_ops *ops = ctxt->ops;
2202 struct tss_segment_16 tss_seg; 2181 struct tss_segment_16 tss_seg;
2203 int ret; 2182 int ret;
2204 u32 new_tss_base = get_desc_base(new_desc); 2183 u32 new_tss_base = get_desc_base(new_desc);
@@ -2209,7 +2188,7 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2209 /* FIXME: need to provide precise fault address */ 2188 /* FIXME: need to provide precise fault address */
2210 return ret; 2189 return ret;
2211 2190
2212 save_state_to_tss16(ctxt, ops, &tss_seg); 2191 save_state_to_tss16(ctxt, &tss_seg);
2213 2192
2214 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2193 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2215 &ctxt->exception); 2194 &ctxt->exception);
@@ -2235,16 +2214,15 @@ static int task_switch_16(struct x86_emulate_ctxt *ctxt,
2235 return ret; 2214 return ret;
2236 } 2215 }
2237 2216
2238 return load_state_from_tss16(ctxt, ops, &tss_seg); 2217 return load_state_from_tss16(ctxt, &tss_seg);
2239} 2218}
2240 2219
2241static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt, 2220static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2242 struct x86_emulate_ops *ops,
2243 struct tss_segment_32 *tss) 2221 struct tss_segment_32 *tss)
2244{ 2222{
2245 struct decode_cache *c = &ctxt->decode; 2223 struct decode_cache *c = &ctxt->decode;
2246 2224
2247 tss->cr3 = ops->get_cr(ctxt, 3); 2225 tss->cr3 = ctxt->ops->get_cr(ctxt, 3);
2248 tss->eip = c->eip; 2226 tss->eip = c->eip;
2249 tss->eflags = ctxt->eflags; 2227 tss->eflags = ctxt->eflags;
2250 tss->eax = c->regs[VCPU_REGS_RAX]; 2228 tss->eax = c->regs[VCPU_REGS_RAX];
@@ -2266,13 +2244,12 @@ static void save_state_to_tss32(struct x86_emulate_ctxt *ctxt,
2266} 2244}
2267 2245
2268static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, 2246static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2269 struct x86_emulate_ops *ops,
2270 struct tss_segment_32 *tss) 2247 struct tss_segment_32 *tss)
2271{ 2248{
2272 struct decode_cache *c = &ctxt->decode; 2249 struct decode_cache *c = &ctxt->decode;
2273 int ret; 2250 int ret;
2274 2251
2275 if (ops->set_cr(ctxt, 3, tss->cr3)) 2252 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2276 return emulate_gp(ctxt, 0); 2253 return emulate_gp(ctxt, 0);
2277 c->eip = tss->eip; 2254 c->eip = tss->eip;
2278 ctxt->eflags = tss->eflags | 2; 2255 ctxt->eflags = tss->eflags | 2;
@@ -2301,25 +2278,25 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2301 * Now load segment descriptors. If fault happenes at this stage 2278 * Now load segment descriptors. If fault happenes at this stage
2302 * it is handled in a context of new task 2279 * it is handled in a context of new task
2303 */ 2280 */
2304 ret = load_segment_descriptor(ctxt, ops, tss->ldt_selector, VCPU_SREG_LDTR); 2281 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2305 if (ret != X86EMUL_CONTINUE) 2282 if (ret != X86EMUL_CONTINUE)
2306 return ret; 2283 return ret;
2307 ret = load_segment_descriptor(ctxt, ops, tss->es, VCPU_SREG_ES); 2284 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES);
2308 if (ret != X86EMUL_CONTINUE) 2285 if (ret != X86EMUL_CONTINUE)
2309 return ret; 2286 return ret;
2310 ret = load_segment_descriptor(ctxt, ops, tss->cs, VCPU_SREG_CS); 2287 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS);
2311 if (ret != X86EMUL_CONTINUE) 2288 if (ret != X86EMUL_CONTINUE)
2312 return ret; 2289 return ret;
2313 ret = load_segment_descriptor(ctxt, ops, tss->ss, VCPU_SREG_SS); 2290 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS);
2314 if (ret != X86EMUL_CONTINUE) 2291 if (ret != X86EMUL_CONTINUE)
2315 return ret; 2292 return ret;
2316 ret = load_segment_descriptor(ctxt, ops, tss->ds, VCPU_SREG_DS); 2293 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS);
2317 if (ret != X86EMUL_CONTINUE) 2294 if (ret != X86EMUL_CONTINUE)
2318 return ret; 2295 return ret;
2319 ret = load_segment_descriptor(ctxt, ops, tss->fs, VCPU_SREG_FS); 2296 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS);
2320 if (ret != X86EMUL_CONTINUE) 2297 if (ret != X86EMUL_CONTINUE)
2321 return ret; 2298 return ret;
2322 ret = load_segment_descriptor(ctxt, ops, tss->gs, VCPU_SREG_GS); 2299 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS);
2323 if (ret != X86EMUL_CONTINUE) 2300 if (ret != X86EMUL_CONTINUE)
2324 return ret; 2301 return ret;
2325 2302
@@ -2327,10 +2304,10 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2327} 2304}
2328 2305
2329static int task_switch_32(struct x86_emulate_ctxt *ctxt, 2306static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2330 struct x86_emulate_ops *ops,
2331 u16 tss_selector, u16 old_tss_sel, 2307 u16 tss_selector, u16 old_tss_sel,
2332 ulong old_tss_base, struct desc_struct *new_desc) 2308 ulong old_tss_base, struct desc_struct *new_desc)
2333{ 2309{
2310 struct x86_emulate_ops *ops = ctxt->ops;
2334 struct tss_segment_32 tss_seg; 2311 struct tss_segment_32 tss_seg;
2335 int ret; 2312 int ret;
2336 u32 new_tss_base = get_desc_base(new_desc); 2313 u32 new_tss_base = get_desc_base(new_desc);
@@ -2341,7 +2318,7 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2341 /* FIXME: need to provide precise fault address */ 2318 /* FIXME: need to provide precise fault address */
2342 return ret; 2319 return ret;
2343 2320
2344 save_state_to_tss32(ctxt, ops, &tss_seg); 2321 save_state_to_tss32(ctxt, &tss_seg);
2345 2322
2346 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg, 2323 ret = ops->write_std(ctxt, old_tss_base, &tss_seg, sizeof tss_seg,
2347 &ctxt->exception); 2324 &ctxt->exception);
@@ -2367,14 +2344,14 @@ static int task_switch_32(struct x86_emulate_ctxt *ctxt,
2367 return ret; 2344 return ret;
2368 } 2345 }
2369 2346
2370 return load_state_from_tss32(ctxt, ops, &tss_seg); 2347 return load_state_from_tss32(ctxt, &tss_seg);
2371} 2348}
2372 2349
2373static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt, 2350static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2374 struct x86_emulate_ops *ops,
2375 u16 tss_selector, int reason, 2351 u16 tss_selector, int reason,
2376 bool has_error_code, u32 error_code) 2352 bool has_error_code, u32 error_code)
2377{ 2353{
2354 struct x86_emulate_ops *ops = ctxt->ops;
2378 struct desc_struct curr_tss_desc, next_tss_desc; 2355 struct desc_struct curr_tss_desc, next_tss_desc;
2379 int ret; 2356 int ret;
2380 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR); 2357 u16 old_tss_sel = get_segment_selector(ctxt, VCPU_SREG_TR);
@@ -2384,10 +2361,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2384 2361
2385 /* FIXME: old_tss_base == ~0 ? */ 2362 /* FIXME: old_tss_base == ~0 ? */
2386 2363
2387 ret = read_segment_descriptor(ctxt, ops, tss_selector, &next_tss_desc); 2364 ret = read_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2388 if (ret != X86EMUL_CONTINUE) 2365 if (ret != X86EMUL_CONTINUE)
2389 return ret; 2366 return ret;
2390 ret = read_segment_descriptor(ctxt, ops, old_tss_sel, &curr_tss_desc); 2367 ret = read_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2391 if (ret != X86EMUL_CONTINUE) 2368 if (ret != X86EMUL_CONTINUE)
2392 return ret; 2369 return ret;
2393 2370
@@ -2409,8 +2386,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2409 2386
2410 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) { 2387 if (reason == TASK_SWITCH_IRET || reason == TASK_SWITCH_JMP) {
2411 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ 2388 curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */
2412 write_segment_descriptor(ctxt, ops, old_tss_sel, 2389 write_segment_descriptor(ctxt, old_tss_sel, &curr_tss_desc);
2413 &curr_tss_desc);
2414 } 2390 }
2415 2391
2416 if (reason == TASK_SWITCH_IRET) 2392 if (reason == TASK_SWITCH_IRET)
@@ -2422,10 +2398,10 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2422 old_tss_sel = 0xffff; 2398 old_tss_sel = 0xffff;
2423 2399
2424 if (next_tss_desc.type & 8) 2400 if (next_tss_desc.type & 8)
2425 ret = task_switch_32(ctxt, ops, tss_selector, old_tss_sel, 2401 ret = task_switch_32(ctxt, tss_selector, old_tss_sel,
2426 old_tss_base, &next_tss_desc); 2402 old_tss_base, &next_tss_desc);
2427 else 2403 else
2428 ret = task_switch_16(ctxt, ops, tss_selector, old_tss_sel, 2404 ret = task_switch_16(ctxt, tss_selector, old_tss_sel,
2429 old_tss_base, &next_tss_desc); 2405 old_tss_base, &next_tss_desc);
2430 if (ret != X86EMUL_CONTINUE) 2406 if (ret != X86EMUL_CONTINUE)
2431 return ret; 2407 return ret;
@@ -2435,8 +2411,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2435 2411
2436 if (reason != TASK_SWITCH_IRET) { 2412 if (reason != TASK_SWITCH_IRET) {
2437 next_tss_desc.type |= (1 << 1); /* set busy flag */ 2413 next_tss_desc.type |= (1 << 1); /* set busy flag */
2438 write_segment_descriptor(ctxt, ops, tss_selector, 2414 write_segment_descriptor(ctxt, tss_selector, &next_tss_desc);
2439 &next_tss_desc);
2440 } 2415 }
2441 2416
2442 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS); 2417 ops->set_cr(ctxt, 0, ops->get_cr(ctxt, 0) | X86_CR0_TS);
@@ -2458,14 +2433,13 @@ int emulator_task_switch(struct x86_emulate_ctxt *ctxt,
2458 u16 tss_selector, int reason, 2433 u16 tss_selector, int reason,
2459 bool has_error_code, u32 error_code) 2434 bool has_error_code, u32 error_code)
2460{ 2435{
2461 struct x86_emulate_ops *ops = ctxt->ops;
2462 struct decode_cache *c = &ctxt->decode; 2436 struct decode_cache *c = &ctxt->decode;
2463 int rc; 2437 int rc;
2464 2438
2465 c->eip = ctxt->eip; 2439 c->eip = ctxt->eip;
2466 c->dst.type = OP_NONE; 2440 c->dst.type = OP_NONE;
2467 2441
2468 rc = emulator_do_task_switch(ctxt, ops, tss_selector, reason, 2442 rc = emulator_do_task_switch(ctxt, tss_selector, reason,
2469 has_error_code, error_code); 2443 has_error_code, error_code);
2470 2444
2471 if (rc == X86EMUL_CONTINUE) 2445 if (rc == X86EMUL_CONTINUE)
@@ -2535,7 +2509,7 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt)
2535 old_eip = c->eip; 2509 old_eip = c->eip;
2536 2510
2537 memcpy(&sel, c->src.valptr + c->op_bytes, 2); 2511 memcpy(&sel, c->src.valptr + c->op_bytes, 2);
2538 if (load_segment_descriptor(ctxt, ctxt->ops, sel, VCPU_SREG_CS)) 2512 if (load_segment_descriptor(ctxt, sel, VCPU_SREG_CS))
2539 return X86EMUL_CONTINUE; 2513 return X86EMUL_CONTINUE;
2540 2514
2541 c->eip = 0; 2515 c->eip = 0;
@@ -2973,7 +2947,7 @@ static int check_perm_in(struct x86_emulate_ctxt *ctxt)
2973 struct decode_cache *c = &ctxt->decode; 2947 struct decode_cache *c = &ctxt->decode;
2974 2948
2975 c->dst.bytes = min(c->dst.bytes, 4u); 2949 c->dst.bytes = min(c->dst.bytes, 4u);
2976 if (!emulator_io_permited(ctxt, ctxt->ops, c->src.val, c->dst.bytes)) 2950 if (!emulator_io_permited(ctxt, c->src.val, c->dst.bytes))
2977 return emulate_gp(ctxt, 0); 2951 return emulate_gp(ctxt, 0);
2978 2952
2979 return X86EMUL_CONTINUE; 2953 return X86EMUL_CONTINUE;
@@ -2984,7 +2958,7 @@ static int check_perm_out(struct x86_emulate_ctxt *ctxt)
2984 struct decode_cache *c = &ctxt->decode; 2958 struct decode_cache *c = &ctxt->decode;
2985 2959
2986 c->src.bytes = min(c->src.bytes, 4u); 2960 c->src.bytes = min(c->src.bytes, 4u);
2987 if (!emulator_io_permited(ctxt, ctxt->ops, c->dst.val, c->src.bytes)) 2961 if (!emulator_io_permited(ctxt, c->dst.val, c->src.bytes))
2988 return emulate_gp(ctxt, 0); 2962 return emulate_gp(ctxt, 0);
2989 2963
2990 return X86EMUL_CONTINUE; 2964 return X86EMUL_CONTINUE;
@@ -3724,8 +3698,7 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
3724 return false; 3698 return false;
3725} 3699}
3726 3700
3727int 3701int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3728x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3729{ 3702{
3730 struct x86_emulate_ops *ops = ctxt->ops; 3703 struct x86_emulate_ops *ops = ctxt->ops;
3731 u64 msr_data; 3704 u64 msr_data;
@@ -3854,25 +3827,25 @@ special_insn:
3854 3827
3855 switch (c->b) { 3828 switch (c->b) {
3856 case 0x06: /* push es */ 3829 case 0x06: /* push es */
3857 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_ES); 3830 rc = emulate_push_sreg(ctxt, VCPU_SREG_ES);
3858 break; 3831 break;
3859 case 0x07: /* pop es */ 3832 case 0x07: /* pop es */
3860 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); 3833 rc = emulate_pop_sreg(ctxt, VCPU_SREG_ES);
3861 break; 3834 break;
3862 case 0x0e: /* push cs */ 3835 case 0x0e: /* push cs */
3863 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_CS); 3836 rc = emulate_push_sreg(ctxt, VCPU_SREG_CS);
3864 break; 3837 break;
3865 case 0x16: /* push ss */ 3838 case 0x16: /* push ss */
3866 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_SS); 3839 rc = emulate_push_sreg(ctxt, VCPU_SREG_SS);
3867 break; 3840 break;
3868 case 0x17: /* pop ss */ 3841 case 0x17: /* pop ss */
3869 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); 3842 rc = emulate_pop_sreg(ctxt, VCPU_SREG_SS);
3870 break; 3843 break;
3871 case 0x1e: /* push ds */ 3844 case 0x1e: /* push ds */
3872 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_DS); 3845 rc = emulate_push_sreg(ctxt, VCPU_SREG_DS);
3873 break; 3846 break;
3874 case 0x1f: /* pop ds */ 3847 case 0x1f: /* pop ds */
3875 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); 3848 rc = emulate_pop_sreg(ctxt, VCPU_SREG_DS);
3876 break; 3849 break;
3877 case 0x40 ... 0x47: /* inc r16/r32 */ 3850 case 0x40 ... 0x47: /* inc r16/r32 */
3878 emulate_1op("inc", c->dst, ctxt->eflags); 3851 emulate_1op("inc", c->dst, ctxt->eflags);
@@ -3938,7 +3911,7 @@ special_insn:
3938 if (c->modrm_reg == VCPU_SREG_SS) 3911 if (c->modrm_reg == VCPU_SREG_SS)
3939 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS; 3912 ctxt->interruptibility = KVM_X86_SHADOW_INT_MOV_SS;
3940 3913
3941 rc = load_segment_descriptor(ctxt, ops, sel, c->modrm_reg); 3914 rc = load_segment_descriptor(ctxt, sel, c->modrm_reg);
3942 3915
3943 c->dst.type = OP_NONE; /* Disable writeback. */ 3916 c->dst.type = OP_NONE; /* Disable writeback. */
3944 break; 3917 break;
@@ -3969,13 +3942,13 @@ special_insn:
3969 rc = em_pop(ctxt); 3942 rc = em_pop(ctxt);
3970 break; 3943 break;
3971 case 0xc4: /* les */ 3944 case 0xc4: /* les */
3972 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_ES); 3945 rc = emulate_load_segment(ctxt, VCPU_SREG_ES);
3973 break; 3946 break;
3974 case 0xc5: /* lds */ 3947 case 0xc5: /* lds */
3975 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_DS); 3948 rc = emulate_load_segment(ctxt, VCPU_SREG_DS);
3976 break; 3949 break;
3977 case 0xcb: /* ret far */ 3950 case 0xcb: /* ret far */
3978 rc = emulate_ret_far(ctxt, ops); 3951 rc = emulate_ret_far(ctxt);
3979 break; 3952 break;
3980 case 0xcc: /* int3 */ 3953 case 0xcc: /* int3 */
3981 irq = 3; 3954 irq = 3;
@@ -3983,7 +3956,7 @@ special_insn:
3983 case 0xcd: /* int n */ 3956 case 0xcd: /* int n */
3984 irq = c->src.val; 3957 irq = c->src.val;
3985 do_interrupt: 3958 do_interrupt:
3986 rc = emulate_int(ctxt, ops, irq); 3959 rc = emulate_int(ctxt, irq);
3987 break; 3960 break;
3988 case 0xce: /* into */ 3961 case 0xce: /* into */
3989 if (ctxt->eflags & EFLG_OF) { 3962 if (ctxt->eflags & EFLG_OF) {
@@ -3992,7 +3965,7 @@ special_insn:
3992 } 3965 }
3993 break; 3966 break;
3994 case 0xcf: /* iret */ 3967 case 0xcf: /* iret */
3995 rc = emulate_iret(ctxt, ops); 3968 rc = emulate_iret(ctxt);
3996 break; 3969 break;
3997 case 0xd0 ... 0xd1: /* Grp2 */ 3970 case 0xd0 ... 0xd1: /* Grp2 */
3998 rc = em_grp2(ctxt); 3971 rc = em_grp2(ctxt);
@@ -4037,7 +4010,7 @@ special_insn:
4037 case 0xec: /* in al,dx */ 4010 case 0xec: /* in al,dx */
4038 case 0xed: /* in (e/r)ax,dx */ 4011 case 0xed: /* in (e/r)ax,dx */
4039 do_io_in: 4012 do_io_in:
4040 if (!pio_in_emulated(ctxt, ops, c->dst.bytes, c->src.val, 4013 if (!pio_in_emulated(ctxt, c->dst.bytes, c->src.val,
4041 &c->dst.val)) 4014 &c->dst.val))
4042 goto done; /* IO is needed */ 4015 goto done; /* IO is needed */
4043 break; 4016 break;
@@ -4065,14 +4038,14 @@ special_insn:
4065 ctxt->eflags |= EFLG_CF; 4038 ctxt->eflags |= EFLG_CF;
4066 break; 4039 break;
4067 case 0xfa: /* cli */ 4040 case 0xfa: /* cli */
4068 if (emulator_bad_iopl(ctxt, ops)) { 4041 if (emulator_bad_iopl(ctxt)) {
4069 rc = emulate_gp(ctxt, 0); 4042 rc = emulate_gp(ctxt, 0);
4070 goto done; 4043 goto done;
4071 } else 4044 } else
4072 ctxt->eflags &= ~X86_EFLAGS_IF; 4045 ctxt->eflags &= ~X86_EFLAGS_IF;
4073 break; 4046 break;
4074 case 0xfb: /* sti */ 4047 case 0xfb: /* sti */
4075 if (emulator_bad_iopl(ctxt, ops)) { 4048 if (emulator_bad_iopl(ctxt)) {
4076 rc = emulate_gp(ctxt, 0); 4049 rc = emulate_gp(ctxt, 0);
4077 goto done; 4050 goto done;
4078 } else { 4051 } else {
@@ -4154,7 +4127,7 @@ done:
4154twobyte_insn: 4127twobyte_insn:
4155 switch (c->b) { 4128 switch (c->b) {
4156 case 0x05: /* syscall */ 4129 case 0x05: /* syscall */
4157 rc = emulate_syscall(ctxt, ops); 4130 rc = emulate_syscall(ctxt);
4158 break; 4131 break;
4159 case 0x06: 4132 case 0x06:
4160 rc = em_clts(ctxt); 4133 rc = em_clts(ctxt);
@@ -4216,10 +4189,10 @@ twobyte_insn:
4216 rc = X86EMUL_CONTINUE; 4189 rc = X86EMUL_CONTINUE;
4217 break; 4190 break;
4218 case 0x34: /* sysenter */ 4191 case 0x34: /* sysenter */
4219 rc = emulate_sysenter(ctxt, ops); 4192 rc = emulate_sysenter(ctxt);
4220 break; 4193 break;
4221 case 0x35: /* sysexit */ 4194 case 0x35: /* sysexit */
4222 rc = emulate_sysexit(ctxt, ops); 4195 rc = emulate_sysexit(ctxt);
4223 break; 4196 break;
4224 case 0x40 ... 0x4f: /* cmov */ 4197 case 0x40 ... 0x4f: /* cmov */
4225 c->dst.val = c->dst.orig_val = c->src.val; 4198 c->dst.val = c->dst.orig_val = c->src.val;
@@ -4234,10 +4207,10 @@ twobyte_insn:
4234 c->dst.val = test_cc(c->b, ctxt->eflags); 4207 c->dst.val = test_cc(c->b, ctxt->eflags);
4235 break; 4208 break;
4236 case 0xa0: /* push fs */ 4209 case 0xa0: /* push fs */
4237 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_FS); 4210 rc = emulate_push_sreg(ctxt, VCPU_SREG_FS);
4238 break; 4211 break;
4239 case 0xa1: /* pop fs */ 4212 case 0xa1: /* pop fs */
4240 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); 4213 rc = emulate_pop_sreg(ctxt, VCPU_SREG_FS);
4241 break; 4214 break;
4242 case 0xa3: 4215 case 0xa3:
4243 bt: /* bt */ 4216 bt: /* bt */
@@ -4251,10 +4224,10 @@ twobyte_insn:
4251 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); 4224 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
4252 break; 4225 break;
4253 case 0xa8: /* push gs */ 4226 case 0xa8: /* push gs */
4254 rc = emulate_push_sreg(ctxt, ops, VCPU_SREG_GS); 4227 rc = emulate_push_sreg(ctxt, VCPU_SREG_GS);
4255 break; 4228 break;
4256 case 0xa9: /* pop gs */ 4229 case 0xa9: /* pop gs */
4257 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); 4230 rc = emulate_pop_sreg(ctxt, VCPU_SREG_GS);
4258 break; 4231 break;
4259 case 0xab: 4232 case 0xab:
4260 bts: /* bts */ 4233 bts: /* bts */
@@ -4284,17 +4257,17 @@ twobyte_insn:
4284 } 4257 }
4285 break; 4258 break;
4286 case 0xb2: /* lss */ 4259 case 0xb2: /* lss */
4287 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_SS); 4260 rc = emulate_load_segment(ctxt, VCPU_SREG_SS);
4288 break; 4261 break;
4289 case 0xb3: 4262 case 0xb3:
4290 btr: /* btr */ 4263 btr: /* btr */
4291 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags); 4264 emulate_2op_SrcV_nobyte("btr", c->src, c->dst, ctxt->eflags);
4292 break; 4265 break;
4293 case 0xb4: /* lfs */ 4266 case 0xb4: /* lfs */
4294 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_FS); 4267 rc = emulate_load_segment(ctxt, VCPU_SREG_FS);
4295 break; 4268 break;
4296 case 0xb5: /* lgs */ 4269 case 0xb5: /* lgs */
4297 rc = emulate_load_segment(ctxt, ops, VCPU_SREG_GS); 4270 rc = emulate_load_segment(ctxt, VCPU_SREG_GS);
4298 break; 4271 break;
4299 case 0xb6 ... 0xb7: /* movzx */ 4272 case 0xb6 ... 0xb7: /* movzx */
4300 c->dst.bytes = c->op_bytes; 4273 c->dst.bytes = c->op_bytes;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 77c9d8673dc4..51df0b6c891a 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4513,7 +4513,7 @@ int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip)
4513 vcpu->arch.emulate_ctxt.decode.ad_bytes = 2; 4513 vcpu->arch.emulate_ctxt.decode.ad_bytes = 2;
4514 vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip + 4514 vcpu->arch.emulate_ctxt.decode.eip = vcpu->arch.emulate_ctxt.eip +
4515 inc_eip; 4515 inc_eip;
4516 ret = emulate_int_real(&vcpu->arch.emulate_ctxt, &emulate_ops, irq); 4516 ret = emulate_int_real(&vcpu->arch.emulate_ctxt, irq);
4517 4517
4518 if (ret != X86EMUL_CONTINUE) 4518 if (ret != X86EMUL_CONTINUE)
4519 return EMULATE_FAIL; 4519 return EMULATE_FAIL;