aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-04-28 12:15:30 -0400
committerAvi Kivity <avi@redhat.com>2010-08-01 03:35:31 -0400
commit79168fd1a307ffee46ee03b7f8711559241738c7 (patch)
treee8b259d42828d641d11d8b6be95fa8c48aa57e74 /arch
parent5951c4423724759906b10a26aa6a8817c4afa615 (diff)
KVM: x86 emulator: cleanup some direct calls into kvm to use existing callbacks
Use callbacks from x86_emulate_ops to access segments instead of calling into kvm directly. Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/emulate.c200
1 files changed, 105 insertions, 95 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 8228778ace38..f56ec486393e 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -622,31 +622,35 @@ static void set_seg_override(struct decode_cache *c, int seg)
622 c->seg_override = seg; 622 c->seg_override = seg;
623} 623}
624 624
625static unsigned long seg_base(struct x86_emulate_ctxt *ctxt, int seg) 625static unsigned long seg_base(struct x86_emulate_ctxt *ctxt,
626 struct x86_emulate_ops *ops, int seg)
626{ 627{
627 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS) 628 if (ctxt->mode == X86EMUL_MODE_PROT64 && seg < VCPU_SREG_FS)
628 return 0; 629 return 0;
629 630
630 return kvm_x86_ops->get_segment_base(ctxt->vcpu, seg); 631 return ops->get_cached_segment_base(seg, ctxt->vcpu);
631} 632}
632 633
633static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt, 634static unsigned long seg_override_base(struct x86_emulate_ctxt *ctxt,
635 struct x86_emulate_ops *ops,
634 struct decode_cache *c) 636 struct decode_cache *c)
635{ 637{
636 if (!c->has_seg_override) 638 if (!c->has_seg_override)
637 return 0; 639 return 0;
638 640
639 return seg_base(ctxt, c->seg_override); 641 return seg_base(ctxt, ops, c->seg_override);
640} 642}
641 643
642static unsigned long es_base(struct x86_emulate_ctxt *ctxt) 644static unsigned long es_base(struct x86_emulate_ctxt *ctxt,
645 struct x86_emulate_ops *ops)
643{ 646{
644 return seg_base(ctxt, VCPU_SREG_ES); 647 return seg_base(ctxt, ops, VCPU_SREG_ES);
645} 648}
646 649
647static unsigned long ss_base(struct x86_emulate_ctxt *ctxt) 650static unsigned long ss_base(struct x86_emulate_ctxt *ctxt,
651 struct x86_emulate_ops *ops)
648{ 652{
649 return seg_base(ctxt, VCPU_SREG_SS); 653 return seg_base(ctxt, ops, VCPU_SREG_SS);
650} 654}
651 655
652static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 656static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
@@ -941,7 +945,7 @@ x86_decode_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
941 memset(c, 0, sizeof(struct decode_cache)); 945 memset(c, 0, sizeof(struct decode_cache));
942 c->eip = ctxt->eip; 946 c->eip = ctxt->eip;
943 c->fetch.start = c->fetch.end = c->eip; 947 c->fetch.start = c->fetch.end = c->eip;
944 ctxt->cs_base = seg_base(ctxt, VCPU_SREG_CS); 948 ctxt->cs_base = seg_base(ctxt, ops, VCPU_SREG_CS);
945 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs); 949 memcpy(c->regs, ctxt->vcpu->arch.regs, sizeof c->regs);
946 950
947 switch (mode) { 951 switch (mode) {
@@ -1065,7 +1069,7 @@ done_prefixes:
1065 set_seg_override(c, VCPU_SREG_DS); 1069 set_seg_override(c, VCPU_SREG_DS);
1066 1070
1067 if (!(!c->twobyte && c->b == 0x8d)) 1071 if (!(!c->twobyte && c->b == 0x8d))
1068 c->modrm_ea += seg_override_base(ctxt, c); 1072 c->modrm_ea += seg_override_base(ctxt, ops, c);
1069 1073
1070 if (c->ad_bytes != 8) 1074 if (c->ad_bytes != 8)
1071 c->modrm_ea = (u32)c->modrm_ea; 1075 c->modrm_ea = (u32)c->modrm_ea;
@@ -1161,7 +1165,7 @@ done_prefixes:
1161 c->src.type = OP_MEM; 1165 c->src.type = OP_MEM;
1162 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1166 c->src.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1163 c->src.ptr = (unsigned long *) 1167 c->src.ptr = (unsigned long *)
1164 register_address(c, seg_override_base(ctxt, c), 1168 register_address(c, seg_override_base(ctxt, ops, c),
1165 c->regs[VCPU_REGS_RSI]); 1169 c->regs[VCPU_REGS_RSI]);
1166 c->src.val = 0; 1170 c->src.val = 0;
1167 break; 1171 break;
@@ -1257,7 +1261,7 @@ done_prefixes:
1257 c->dst.type = OP_MEM; 1261 c->dst.type = OP_MEM;
1258 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 1262 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
1259 c->dst.ptr = (unsigned long *) 1263 c->dst.ptr = (unsigned long *)
1260 register_address(c, es_base(ctxt), 1264 register_address(c, es_base(ctxt, ops),
1261 c->regs[VCPU_REGS_RDI]); 1265 c->regs[VCPU_REGS_RDI]);
1262 c->dst.val = 0; 1266 c->dst.val = 0;
1263 break; 1267 break;
@@ -1516,7 +1520,8 @@ exception:
1516 return X86EMUL_PROPAGATE_FAULT; 1520 return X86EMUL_PROPAGATE_FAULT;
1517} 1521}
1518 1522
1519static inline void emulate_push(struct x86_emulate_ctxt *ctxt) 1523static inline void emulate_push(struct x86_emulate_ctxt *ctxt,
1524 struct x86_emulate_ops *ops)
1520{ 1525{
1521 struct decode_cache *c = &ctxt->decode; 1526 struct decode_cache *c = &ctxt->decode;
1522 1527
@@ -1524,7 +1529,7 @@ static inline void emulate_push(struct x86_emulate_ctxt *ctxt)
1524 c->dst.bytes = c->op_bytes; 1529 c->dst.bytes = c->op_bytes;
1525 c->dst.val = c->src.val; 1530 c->dst.val = c->src.val;
1526 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes); 1531 register_address_increment(c, &c->regs[VCPU_REGS_RSP], -c->op_bytes);
1527 c->dst.ptr = (void *) register_address(c, ss_base(ctxt), 1532 c->dst.ptr = (void *) register_address(c, ss_base(ctxt, ops),
1528 c->regs[VCPU_REGS_RSP]); 1533 c->regs[VCPU_REGS_RSP]);
1529} 1534}
1530 1535
@@ -1535,7 +1540,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1535 struct decode_cache *c = &ctxt->decode; 1540 struct decode_cache *c = &ctxt->decode;
1536 int rc; 1541 int rc;
1537 1542
1538 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt), 1543 rc = read_emulated(ctxt, ops, register_address(c, ss_base(ctxt, ops),
1539 c->regs[VCPU_REGS_RSP]), 1544 c->regs[VCPU_REGS_RSP]),
1540 dest, len); 1545 dest, len);
1541 if (rc != X86EMUL_CONTINUE) 1546 if (rc != X86EMUL_CONTINUE)
@@ -1588,15 +1593,14 @@ static int emulate_popf(struct x86_emulate_ctxt *ctxt,
1588 return rc; 1593 return rc;
1589} 1594}
1590 1595
1591static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt, int seg) 1596static void emulate_push_sreg(struct x86_emulate_ctxt *ctxt,
1597 struct x86_emulate_ops *ops, int seg)
1592{ 1598{
1593 struct decode_cache *c = &ctxt->decode; 1599 struct decode_cache *c = &ctxt->decode;
1594 struct kvm_segment segment;
1595 1600
1596 kvm_x86_ops->get_segment(ctxt->vcpu, &segment, seg); 1601 c->src.val = ops->get_segment_selector(seg, ctxt->vcpu);
1597 1602
1598 c->src.val = segment.selector; 1603 emulate_push(ctxt, ops);
1599 emulate_push(ctxt);
1600} 1604}
1601 1605
1602static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt, 1606static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
@@ -1614,7 +1618,8 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1614 return rc; 1618 return rc;
1615} 1619}
1616 1620
1617static void emulate_pusha(struct x86_emulate_ctxt *ctxt) 1621static void emulate_pusha(struct x86_emulate_ctxt *ctxt,
1622 struct x86_emulate_ops *ops)
1618{ 1623{
1619 struct decode_cache *c = &ctxt->decode; 1624 struct decode_cache *c = &ctxt->decode;
1620 unsigned long old_esp = c->regs[VCPU_REGS_RSP]; 1625 unsigned long old_esp = c->regs[VCPU_REGS_RSP];
@@ -1624,7 +1629,7 @@ static void emulate_pusha(struct x86_emulate_ctxt *ctxt)
1624 (reg == VCPU_REGS_RSP) ? 1629 (reg == VCPU_REGS_RSP) ?
1625 (c->src.val = old_esp) : (c->src.val = c->regs[reg]); 1630 (c->src.val = old_esp) : (c->src.val = c->regs[reg]);
1626 1631
1627 emulate_push(ctxt); 1632 emulate_push(ctxt, ops);
1628 ++reg; 1633 ++reg;
1629 } 1634 }
1630} 1635}
@@ -1726,14 +1731,14 @@ static inline int emulate_grp45(struct x86_emulate_ctxt *ctxt,
1726 old_eip = c->eip; 1731 old_eip = c->eip;
1727 c->eip = c->src.val; 1732 c->eip = c->src.val;
1728 c->src.val = old_eip; 1733 c->src.val = old_eip;
1729 emulate_push(ctxt); 1734 emulate_push(ctxt, ops);
1730 break; 1735 break;
1731 } 1736 }
1732 case 4: /* jmp abs */ 1737 case 4: /* jmp abs */
1733 c->eip = c->src.val; 1738 c->eip = c->src.val;
1734 break; 1739 break;
1735 case 6: /* push */ 1740 case 6: /* push */
1736 emulate_push(ctxt); 1741 emulate_push(ctxt, ops);
1737 break; 1742 break;
1738 } 1743 }
1739 return X86EMUL_CONTINUE; 1744 return X86EMUL_CONTINUE;
@@ -1847,39 +1852,40 @@ static void toggle_interruptibility(struct x86_emulate_ctxt *ctxt, u32 mask)
1847 1852
1848static inline void 1853static inline void
1849setup_syscalls_segments(struct x86_emulate_ctxt *ctxt, 1854setup_syscalls_segments(struct x86_emulate_ctxt *ctxt,
1850 struct kvm_segment *cs, struct kvm_segment *ss) 1855 struct x86_emulate_ops *ops, struct desc_struct *cs,
1856 struct desc_struct *ss)
1851{ 1857{
1852 memset(cs, 0, sizeof(struct kvm_segment)); 1858 memset(cs, 0, sizeof(struct desc_struct));
1853 kvm_x86_ops->get_segment(ctxt->vcpu, cs, VCPU_SREG_CS); 1859 ops->get_cached_descriptor(cs, VCPU_SREG_CS, ctxt->vcpu);
1854 memset(ss, 0, sizeof(struct kvm_segment)); 1860 memset(ss, 0, sizeof(struct desc_struct));
1855 1861
1856 cs->l = 0; /* will be adjusted later */ 1862 cs->l = 0; /* will be adjusted later */
1857 cs->base = 0; /* flat segment */ 1863 set_desc_base(cs, 0); /* flat segment */
1858 cs->g = 1; /* 4kb granularity */ 1864 cs->g = 1; /* 4kb granularity */
1859 cs->limit = 0xffffffff; /* 4GB limit */ 1865 set_desc_limit(cs, 0xfffff); /* 4GB limit */
1860 cs->type = 0x0b; /* Read, Execute, Accessed */ 1866 cs->type = 0x0b; /* Read, Execute, Accessed */
1861 cs->s = 1; 1867 cs->s = 1;
1862 cs->dpl = 0; /* will be adjusted later */ 1868 cs->dpl = 0; /* will be adjusted later */
1863 cs->present = 1; 1869 cs->p = 1;
1864 cs->db = 1; 1870 cs->d = 1;
1865 1871
1866 ss->unusable = 0; 1872 set_desc_base(ss, 0); /* flat segment */
1867 ss->base = 0; /* flat segment */ 1873 set_desc_limit(ss, 0xfffff); /* 4GB limit */
1868 ss->limit = 0xffffffff; /* 4GB limit */
1869 ss->g = 1; /* 4kb granularity */ 1874 ss->g = 1; /* 4kb granularity */
1870 ss->s = 1; 1875 ss->s = 1;
1871 ss->type = 0x03; /* Read/Write, Accessed */ 1876 ss->type = 0x03; /* Read/Write, Accessed */
1872 ss->db = 1; /* 32bit stack segment */ 1877 ss->d = 1; /* 32bit stack segment */
1873 ss->dpl = 0; 1878 ss->dpl = 0;
1874 ss->present = 1; 1879 ss->p = 1;
1875} 1880}
1876 1881
1877static int 1882static int
1878emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 1883emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1879{ 1884{
1880 struct decode_cache *c = &ctxt->decode; 1885 struct decode_cache *c = &ctxt->decode;
1881 struct kvm_segment cs, ss; 1886 struct desc_struct cs, ss;
1882 u64 msr_data; 1887 u64 msr_data;
1888 u16 cs_sel, ss_sel;
1883 1889
1884 /* syscall is not available in real mode */ 1890 /* syscall is not available in real mode */
1885 if (ctxt->mode == X86EMUL_MODE_REAL || 1891 if (ctxt->mode == X86EMUL_MODE_REAL ||
@@ -1888,19 +1894,21 @@ emulate_syscall(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1888 return X86EMUL_PROPAGATE_FAULT; 1894 return X86EMUL_PROPAGATE_FAULT;
1889 } 1895 }
1890 1896
1891 setup_syscalls_segments(ctxt, &cs, &ss); 1897 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1892 1898
1893 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data); 1899 ops->get_msr(ctxt->vcpu, MSR_STAR, &msr_data);
1894 msr_data >>= 32; 1900 msr_data >>= 32;
1895 cs.selector = (u16)(msr_data & 0xfffc); 1901 cs_sel = (u16)(msr_data & 0xfffc);
1896 ss.selector = (u16)(msr_data + 8); 1902 ss_sel = (u16)(msr_data + 8);
1897 1903
1898 if (is_long_mode(ctxt->vcpu)) { 1904 if (is_long_mode(ctxt->vcpu)) {
1899 cs.db = 0; 1905 cs.d = 0;
1900 cs.l = 1; 1906 cs.l = 1;
1901 } 1907 }
1902 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); 1908 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1903 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); 1909 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1910 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1911 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1904 1912
1905 c->regs[VCPU_REGS_RCX] = c->eip; 1913 c->regs[VCPU_REGS_RCX] = c->eip;
1906 if (is_long_mode(ctxt->vcpu)) { 1914 if (is_long_mode(ctxt->vcpu)) {
@@ -1930,8 +1938,9 @@ static int
1930emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 1938emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1931{ 1939{
1932 struct decode_cache *c = &ctxt->decode; 1940 struct decode_cache *c = &ctxt->decode;
1933 struct kvm_segment cs, ss; 1941 struct desc_struct cs, ss;
1934 u64 msr_data; 1942 u64 msr_data;
1943 u16 cs_sel, ss_sel;
1935 1944
1936 /* inject #GP if in real mode */ 1945 /* inject #GP if in real mode */
1937 if (ctxt->mode == X86EMUL_MODE_REAL) { 1946 if (ctxt->mode == X86EMUL_MODE_REAL) {
@@ -1947,7 +1956,7 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1947 return X86EMUL_PROPAGATE_FAULT; 1956 return X86EMUL_PROPAGATE_FAULT;
1948 } 1957 }
1949 1958
1950 setup_syscalls_segments(ctxt, &cs, &ss); 1959 setup_syscalls_segments(ctxt, ops, &cs, &ss);
1951 1960
1952 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); 1961 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
1953 switch (ctxt->mode) { 1962 switch (ctxt->mode) {
@@ -1966,18 +1975,20 @@ emulate_sysenter(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1966 } 1975 }
1967 1976
1968 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF); 1977 ctxt->eflags &= ~(EFLG_VM | EFLG_IF | EFLG_RF);
1969 cs.selector = (u16)msr_data; 1978 cs_sel = (u16)msr_data;
1970 cs.selector &= ~SELECTOR_RPL_MASK; 1979 cs_sel &= ~SELECTOR_RPL_MASK;
1971 ss.selector = cs.selector + 8; 1980 ss_sel = cs_sel + 8;
1972 ss.selector &= ~SELECTOR_RPL_MASK; 1981 ss_sel &= ~SELECTOR_RPL_MASK;
1973 if (ctxt->mode == X86EMUL_MODE_PROT64 1982 if (ctxt->mode == X86EMUL_MODE_PROT64
1974 || is_long_mode(ctxt->vcpu)) { 1983 || is_long_mode(ctxt->vcpu)) {
1975 cs.db = 0; 1984 cs.d = 0;
1976 cs.l = 1; 1985 cs.l = 1;
1977 } 1986 }
1978 1987
1979 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); 1988 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
1980 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); 1989 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
1990 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
1991 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
1981 1992
1982 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data); 1993 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_EIP, &msr_data);
1983 c->eip = msr_data; 1994 c->eip = msr_data;
@@ -1992,9 +2003,10 @@ static int
1992emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops) 2003emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1993{ 2004{
1994 struct decode_cache *c = &ctxt->decode; 2005 struct decode_cache *c = &ctxt->decode;
1995 struct kvm_segment cs, ss; 2006 struct desc_struct cs, ss;
1996 u64 msr_data; 2007 u64 msr_data;
1997 int usermode; 2008 int usermode;
2009 u16 cs_sel, ss_sel;
1998 2010
1999 /* inject #GP if in real mode or Virtual 8086 mode */ 2011 /* inject #GP if in real mode or Virtual 8086 mode */
2000 if (ctxt->mode == X86EMUL_MODE_REAL || 2012 if (ctxt->mode == X86EMUL_MODE_REAL ||
@@ -2003,7 +2015,7 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2003 return X86EMUL_PROPAGATE_FAULT; 2015 return X86EMUL_PROPAGATE_FAULT;
2004 } 2016 }
2005 2017
2006 setup_syscalls_segments(ctxt, &cs, &ss); 2018 setup_syscalls_segments(ctxt, ops, &cs, &ss);
2007 2019
2008 if ((c->rex_prefix & 0x8) != 0x0) 2020 if ((c->rex_prefix & 0x8) != 0x0)
2009 usermode = X86EMUL_MODE_PROT64; 2021 usermode = X86EMUL_MODE_PROT64;
@@ -2015,29 +2027,31 @@ emulate_sysexit(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
2015 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data); 2027 ops->get_msr(ctxt->vcpu, MSR_IA32_SYSENTER_CS, &msr_data);
2016 switch (usermode) { 2028 switch (usermode) {
2017 case X86EMUL_MODE_PROT32: 2029 case X86EMUL_MODE_PROT32:
2018 cs.selector = (u16)(msr_data + 16); 2030 cs_sel = (u16)(msr_data + 16);
2019 if ((msr_data & 0xfffc) == 0x0) { 2031 if ((msr_data & 0xfffc) == 0x0) {
2020 kvm_inject_gp(ctxt->vcpu, 0); 2032 kvm_inject_gp(ctxt->vcpu, 0);
2021 return X86EMUL_PROPAGATE_FAULT; 2033 return X86EMUL_PROPAGATE_FAULT;
2022 } 2034 }
2023 ss.selector = (u16)(msr_data + 24); 2035 ss_sel = (u16)(msr_data + 24);
2024 break; 2036 break;
2025 case X86EMUL_MODE_PROT64: 2037 case X86EMUL_MODE_PROT64:
2026 cs.selector = (u16)(msr_data + 32); 2038 cs_sel = (u16)(msr_data + 32);
2027 if (msr_data == 0x0) { 2039 if (msr_data == 0x0) {
2028 kvm_inject_gp(ctxt->vcpu, 0); 2040 kvm_inject_gp(ctxt->vcpu, 0);
2029 return X86EMUL_PROPAGATE_FAULT; 2041 return X86EMUL_PROPAGATE_FAULT;
2030 } 2042 }
2031 ss.selector = cs.selector + 8; 2043 ss_sel = cs_sel + 8;
2032 cs.db = 0; 2044 cs.d = 0;
2033 cs.l = 1; 2045 cs.l = 1;
2034 break; 2046 break;
2035 } 2047 }
2036 cs.selector |= SELECTOR_RPL_MASK; 2048 cs_sel |= SELECTOR_RPL_MASK;
2037 ss.selector |= SELECTOR_RPL_MASK; 2049 ss_sel |= SELECTOR_RPL_MASK;
2038 2050
2039 kvm_x86_ops->set_segment(ctxt->vcpu, &cs, VCPU_SREG_CS); 2051 ops->set_cached_descriptor(&cs, VCPU_SREG_CS, ctxt->vcpu);
2040 kvm_x86_ops->set_segment(ctxt->vcpu, &ss, VCPU_SREG_SS); 2052 ops->set_segment_selector(cs_sel, VCPU_SREG_CS, ctxt->vcpu);
2053 ops->set_cached_descriptor(&ss, VCPU_SREG_SS, ctxt->vcpu);
2054 ops->set_segment_selector(ss_sel, VCPU_SREG_SS, ctxt->vcpu);
2041 2055
2042 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX]; 2056 c->eip = ctxt->vcpu->arch.regs[VCPU_REGS_RDX];
2043 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX]; 2057 c->regs[VCPU_REGS_RSP] = ctxt->vcpu->arch.regs[VCPU_REGS_RCX];
@@ -2061,25 +2075,25 @@ static bool emulator_io_port_access_allowed(struct x86_emulate_ctxt *ctxt,
2061 struct x86_emulate_ops *ops, 2075 struct x86_emulate_ops *ops,
2062 u16 port, u16 len) 2076 u16 port, u16 len)
2063{ 2077{
2064 struct kvm_segment tr_seg; 2078 struct desc_struct tr_seg;
2065 int r; 2079 int r;
2066 u16 io_bitmap_ptr; 2080 u16 io_bitmap_ptr;
2067 u8 perm, bit_idx = port & 0x7; 2081 u8 perm, bit_idx = port & 0x7;
2068 unsigned mask = (1 << len) - 1; 2082 unsigned mask = (1 << len) - 1;
2069 2083
2070 kvm_get_segment(ctxt->vcpu, &tr_seg, VCPU_SREG_TR); 2084 ops->get_cached_descriptor(&tr_seg, VCPU_SREG_TR, ctxt->vcpu);
2071 if (tr_seg.unusable) 2085 if (!tr_seg.p)
2072 return false; 2086 return false;
2073 if (tr_seg.limit < 103) 2087 if (desc_limit_scaled(&tr_seg) < 103)
2074 return false; 2088 return false;
2075 r = ops->read_std(tr_seg.base + 102, &io_bitmap_ptr, 2, ctxt->vcpu, 2089 r = ops->read_std(get_desc_base(&tr_seg) + 102, &io_bitmap_ptr, 2,
2076 NULL); 2090 ctxt->vcpu, NULL);
2077 if (r != X86EMUL_CONTINUE) 2091 if (r != X86EMUL_CONTINUE)
2078 return false; 2092 return false;
2079 if (io_bitmap_ptr + port/8 > tr_seg.limit) 2093 if (io_bitmap_ptr + port/8 > desc_limit_scaled(&tr_seg))
2080 return false; 2094 return false;
2081 r = ops->read_std(tr_seg.base + io_bitmap_ptr + port/8, &perm, 1, 2095 r = ops->read_std(get_desc_base(&tr_seg) + io_bitmap_ptr + port/8,
2082 ctxt->vcpu, NULL); 2096 &perm, 1, ctxt->vcpu, NULL);
2083 if (r != X86EMUL_CONTINUE) 2097 if (r != X86EMUL_CONTINUE)
2084 return false; 2098 return false;
2085 if ((perm >> bit_idx) & mask) 2099 if ((perm >> bit_idx) & mask)
@@ -2445,7 +2459,7 @@ static int emulator_do_task_switch(struct x86_emulate_ctxt *ctxt,
2445 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2; 2459 c->op_bytes = c->ad_bytes = (next_tss_desc.type & 8) ? 4 : 2;
2446 c->lock_prefix = 0; 2460 c->lock_prefix = 0;
2447 c->src.val = (unsigned long) error_code; 2461 c->src.val = (unsigned long) error_code;
2448 emulate_push(ctxt); 2462 emulate_push(ctxt, ops);
2449 } 2463 }
2450 2464
2451 return ret; 2465 return ret;
@@ -2588,7 +2602,7 @@ special_insn:
2588 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags); 2602 emulate_2op_SrcV("add", c->src, c->dst, ctxt->eflags);
2589 break; 2603 break;
2590 case 0x06: /* push es */ 2604 case 0x06: /* push es */
2591 emulate_push_sreg(ctxt, VCPU_SREG_ES); 2605 emulate_push_sreg(ctxt, ops, VCPU_SREG_ES);
2592 break; 2606 break;
2593 case 0x07: /* pop es */ 2607 case 0x07: /* pop es */
2594 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES); 2608 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_ES);
@@ -2600,14 +2614,14 @@ special_insn:
2600 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags); 2614 emulate_2op_SrcV("or", c->src, c->dst, ctxt->eflags);
2601 break; 2615 break;
2602 case 0x0e: /* push cs */ 2616 case 0x0e: /* push cs */
2603 emulate_push_sreg(ctxt, VCPU_SREG_CS); 2617 emulate_push_sreg(ctxt, ops, VCPU_SREG_CS);
2604 break; 2618 break;
2605 case 0x10 ... 0x15: 2619 case 0x10 ... 0x15:
2606 adc: /* adc */ 2620 adc: /* adc */
2607 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags); 2621 emulate_2op_SrcV("adc", c->src, c->dst, ctxt->eflags);
2608 break; 2622 break;
2609 case 0x16: /* push ss */ 2623 case 0x16: /* push ss */
2610 emulate_push_sreg(ctxt, VCPU_SREG_SS); 2624 emulate_push_sreg(ctxt, ops, VCPU_SREG_SS);
2611 break; 2625 break;
2612 case 0x17: /* pop ss */ 2626 case 0x17: /* pop ss */
2613 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS); 2627 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_SS);
@@ -2619,7 +2633,7 @@ special_insn:
2619 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags); 2633 emulate_2op_SrcV("sbb", c->src, c->dst, ctxt->eflags);
2620 break; 2634 break;
2621 case 0x1e: /* push ds */ 2635 case 0x1e: /* push ds */
2622 emulate_push_sreg(ctxt, VCPU_SREG_DS); 2636 emulate_push_sreg(ctxt, ops, VCPU_SREG_DS);
2623 break; 2637 break;
2624 case 0x1f: /* pop ds */ 2638 case 0x1f: /* pop ds */
2625 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS); 2639 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_DS);
@@ -2649,7 +2663,7 @@ special_insn:
2649 emulate_1op("dec", c->dst, ctxt->eflags); 2663 emulate_1op("dec", c->dst, ctxt->eflags);
2650 break; 2664 break;
2651 case 0x50 ... 0x57: /* push reg */ 2665 case 0x50 ... 0x57: /* push reg */
2652 emulate_push(ctxt); 2666 emulate_push(ctxt, ops);
2653 break; 2667 break;
2654 case 0x58 ... 0x5f: /* pop reg */ 2668 case 0x58 ... 0x5f: /* pop reg */
2655 pop_instruction: 2669 pop_instruction:
@@ -2658,7 +2672,7 @@ special_insn:
2658 goto done; 2672 goto done;
2659 break; 2673 break;
2660 case 0x60: /* pusha */ 2674 case 0x60: /* pusha */
2661 emulate_pusha(ctxt); 2675 emulate_pusha(ctxt, ops);
2662 break; 2676 break;
2663 case 0x61: /* popa */ 2677 case 0x61: /* popa */
2664 rc = emulate_popa(ctxt, ops); 2678 rc = emulate_popa(ctxt, ops);
@@ -2672,7 +2686,7 @@ special_insn:
2672 break; 2686 break;
2673 case 0x68: /* push imm */ 2687 case 0x68: /* push imm */
2674 case 0x6a: /* push imm8 */ 2688 case 0x6a: /* push imm8 */
2675 emulate_push(ctxt); 2689 emulate_push(ctxt, ops);
2676 break; 2690 break;
2677 case 0x6c: /* insb */ 2691 case 0x6c: /* insb */
2678 case 0x6d: /* insw/insd */ 2692 case 0x6d: /* insw/insd */
@@ -2752,18 +2766,13 @@ special_insn:
2752 break; 2766 break;
2753 case 0x88 ... 0x8b: /* mov */ 2767 case 0x88 ... 0x8b: /* mov */
2754 goto mov; 2768 goto mov;
2755 case 0x8c: { /* mov r/m, sreg */ 2769 case 0x8c: /* mov r/m, sreg */
2756 struct kvm_segment segreg; 2770 if (c->modrm_reg > VCPU_SREG_GS) {
2757
2758 if (c->modrm_reg <= VCPU_SREG_GS)
2759 kvm_get_segment(ctxt->vcpu, &segreg, c->modrm_reg);
2760 else {
2761 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2771 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
2762 goto done; 2772 goto done;
2763 } 2773 }
2764 c->dst.val = segreg.selector; 2774 c->dst.val = ops->get_segment_selector(c->modrm_reg, ctxt->vcpu);
2765 break; 2775 break;
2766 }
2767 case 0x8d: /* lea r16/r32, m */ 2776 case 0x8d: /* lea r16/r32, m */
2768 c->dst.val = c->modrm_ea; 2777 c->dst.val = c->modrm_ea;
2769 break; 2778 break;
@@ -2804,7 +2813,7 @@ special_insn:
2804 goto xchg; 2813 goto xchg;
2805 case 0x9c: /* pushf */ 2814 case 0x9c: /* pushf */
2806 c->src.val = (unsigned long) ctxt->eflags; 2815 c->src.val = (unsigned long) ctxt->eflags;
2807 emulate_push(ctxt); 2816 emulate_push(ctxt, ops);
2808 break; 2817 break;
2809 case 0x9d: /* popf */ 2818 case 0x9d: /* popf */
2810 c->dst.type = OP_REG; 2819 c->dst.type = OP_REG;
@@ -2872,7 +2881,7 @@ special_insn:
2872 long int rel = c->src.val; 2881 long int rel = c->src.val;
2873 c->src.val = (unsigned long) c->eip; 2882 c->src.val = (unsigned long) c->eip;
2874 jmp_rel(c, rel); 2883 jmp_rel(c, rel);
2875 emulate_push(ctxt); 2884 emulate_push(ctxt, ops);
2876 break; 2885 break;
2877 } 2886 }
2878 case 0xe9: /* jmp rel */ 2887 case 0xe9: /* jmp rel */
@@ -2985,11 +2994,12 @@ writeback:
2985 c->dst.type = saved_dst_type; 2994 c->dst.type = saved_dst_type;
2986 2995
2987 if ((c->d & SrcMask) == SrcSI) 2996 if ((c->d & SrcMask) == SrcSI)
2988 string_addr_inc(ctxt, seg_override_base(ctxt, c), VCPU_REGS_RSI, 2997 string_addr_inc(ctxt, seg_override_base(ctxt, ops, c),
2989 &c->src); 2998 VCPU_REGS_RSI, &c->src);
2990 2999
2991 if ((c->d & DstMask) == DstDI) 3000 if ((c->d & DstMask) == DstDI)
2992 string_addr_inc(ctxt, es_base(ctxt), VCPU_REGS_RDI, &c->dst); 3001 string_addr_inc(ctxt, es_base(ctxt, ops), VCPU_REGS_RDI,
3002 &c->dst);
2993 3003
2994 if (c->rep_prefix && (c->d & String)) { 3004 if (c->rep_prefix && (c->d & String)) {
2995 struct read_cache *rc = &ctxt->decode.io_read; 3005 struct read_cache *rc = &ctxt->decode.io_read;
@@ -3188,7 +3198,7 @@ twobyte_insn:
3188 c->dst.type = OP_NONE; 3198 c->dst.type = OP_NONE;
3189 break; 3199 break;
3190 case 0xa0: /* push fs */ 3200 case 0xa0: /* push fs */
3191 emulate_push_sreg(ctxt, VCPU_SREG_FS); 3201 emulate_push_sreg(ctxt, ops, VCPU_SREG_FS);
3192 break; 3202 break;
3193 case 0xa1: /* pop fs */ 3203 case 0xa1: /* pop fs */
3194 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS); 3204 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_FS);
@@ -3207,7 +3217,7 @@ twobyte_insn:
3207 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags); 3217 emulate_2op_cl("shld", c->src2, c->src, c->dst, ctxt->eflags);
3208 break; 3218 break;
3209 case 0xa8: /* push gs */ 3219 case 0xa8: /* push gs */
3210 emulate_push_sreg(ctxt, VCPU_SREG_GS); 3220 emulate_push_sreg(ctxt, ops, VCPU_SREG_GS);
3211 break; 3221 break;
3212 case 0xa9: /* pop gs */ 3222 case 0xa9: /* pop gs */
3213 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS); 3223 rc = emulate_pop_sreg(ctxt, ops, VCPU_SREG_GS);