aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2010-01-20 02:47:21 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:01 -0500
commitb60d513c32e2ddc8b3e9e1465b94913d44d19810 (patch)
treed0b42d5ff9e331107ada9b7b00f08778341953bd
parent647492047763c3ee8fe51ecf9a04f39040aa495b (diff)
KVM: x86: Use macros for x86_emulate_ops to avoid future mistakes
The return values from x86_emulate_ops are defined in kvm_emulate.h as macros X86EMUL_*. But in emulate.c, we are comparing the return values from these ops with 0 to check if they're X86EMUL_CONTINUE or not: X86EMUL_CONTINUE is defined as 0 now. To avoid possible mistakes in the future, this patch substitutes "X86EMUL_CONTINUE" for "0" that are being compared with the return values from x86_emulate_ops. We think that there are more places we should use these macros, but the meanings of rc values in x86_emulate_insn() were not so clear at a glance. If we use proper macros in this function, we would be able to follow the flow of each emulation more easily and, maybe, more securely. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
-rw-r--r--arch/x86/kvm/emulate.c65
1 files changed, 36 insertions, 29 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 0f89e320bc96..48c7f9f8a08f 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1196,7 +1196,7 @@ static int emulate_pop(struct x86_emulate_ctxt *ctxt,
1196 rc = ops->read_emulated(register_address(c, ss_base(ctxt), 1196 rc = ops->read_emulated(register_address(c, ss_base(ctxt),
1197 c->regs[VCPU_REGS_RSP]), 1197 c->regs[VCPU_REGS_RSP]),
1198 dest, len, ctxt->vcpu); 1198 dest, len, ctxt->vcpu);
1199 if (rc != 0) 1199 if (rc != X86EMUL_CONTINUE)
1200 return rc; 1200 return rc;
1201 1201
1202 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len); 1202 register_address_increment(c, &c->regs[VCPU_REGS_RSP], len);
@@ -1370,7 +1370,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1370 int rc; 1370 int rc;
1371 1371
1372 rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu); 1372 rc = ops->read_emulated(memop, &old, 8, ctxt->vcpu);
1373 if (rc != 0) 1373 if (rc != X86EMUL_CONTINUE)
1374 return rc; 1374 return rc;
1375 1375
1376 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) || 1376 if (((u32) (old >> 0) != (u32) c->regs[VCPU_REGS_RAX]) ||
@@ -1385,7 +1385,7 @@ static inline int emulate_grp9(struct x86_emulate_ctxt *ctxt,
1385 (u32) c->regs[VCPU_REGS_RBX]; 1385 (u32) c->regs[VCPU_REGS_RBX];
1386 1386
1387 rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu); 1387 rc = ops->cmpxchg_emulated(memop, &old, &new, 8, ctxt->vcpu);
1388 if (rc != 0) 1388 if (rc != X86EMUL_CONTINUE)
1389 return rc; 1389 return rc;
1390 ctxt->eflags |= EFLG_ZF; 1390 ctxt->eflags |= EFLG_ZF;
1391 } 1391 }
@@ -1451,7 +1451,7 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1451 &c->dst.val, 1451 &c->dst.val,
1452 c->dst.bytes, 1452 c->dst.bytes,
1453 ctxt->vcpu); 1453 ctxt->vcpu);
1454 if (rc != 0) 1454 if (rc != X86EMUL_CONTINUE)
1455 return rc; 1455 return rc;
1456 break; 1456 break;
1457 case OP_NONE: 1457 case OP_NONE:
@@ -1749,7 +1749,7 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1749 &c->src.val, 1749 &c->src.val,
1750 c->src.bytes, 1750 c->src.bytes,
1751 ctxt->vcpu); 1751 ctxt->vcpu);
1752 if (rc != 0) 1752 if (rc != X86EMUL_CONTINUE)
1753 goto done; 1753 goto done;
1754 c->src.orig_val = c->src.val; 1754 c->src.orig_val = c->src.val;
1755 } 1755 }
@@ -1768,12 +1768,15 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt, struct x86_emulate_ops *ops)
1768 c->dst.ptr = (void *)c->dst.ptr + 1768 c->dst.ptr = (void *)c->dst.ptr +
1769 (c->src.val & mask) / 8; 1769 (c->src.val & mask) / 8;
1770 } 1770 }
1771 if (!(c->d & Mov) && 1771 if (!(c->d & Mov)) {
1772 /* optimisation - avoid slow emulated read */ 1772 /* optimisation - avoid slow emulated read */
1773 ((rc = ops->read_emulated((unsigned long)c->dst.ptr, 1773 rc = ops->read_emulated((unsigned long)c->dst.ptr,
1774 &c->dst.val, 1774 &c->dst.val,
1775 c->dst.bytes, ctxt->vcpu)) != 0)) 1775 c->dst.bytes,
1776 goto done; 1776 ctxt->vcpu);
1777 if (rc != X86EMUL_CONTINUE)
1778 goto done;
1779 }
1777 } 1780 }
1778 c->dst.orig_val = c->dst.val; 1781 c->dst.orig_val = c->dst.val;
1779 1782
@@ -2039,11 +2042,12 @@ special_insn:
2039 c->dst.ptr = (unsigned long *)register_address(c, 2042 c->dst.ptr = (unsigned long *)register_address(c,
2040 es_base(ctxt), 2043 es_base(ctxt),
2041 c->regs[VCPU_REGS_RDI]); 2044 c->regs[VCPU_REGS_RDI]);
2042 if ((rc = ops->read_emulated(register_address(c, 2045 rc = ops->read_emulated(register_address(c,
2043 seg_override_base(ctxt, c), 2046 seg_override_base(ctxt, c),
2044 c->regs[VCPU_REGS_RSI]), 2047 c->regs[VCPU_REGS_RSI]),
2045 &c->dst.val, 2048 &c->dst.val,
2046 c->dst.bytes, ctxt->vcpu)) != 0) 2049 c->dst.bytes, ctxt->vcpu);
2050 if (rc != X86EMUL_CONTINUE)
2047 goto done; 2051 goto done;
2048 register_address_increment(c, &c->regs[VCPU_REGS_RSI], 2052 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2049 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 2053 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes
@@ -2058,10 +2062,11 @@ special_insn:
2058 c->src.ptr = (unsigned long *)register_address(c, 2062 c->src.ptr = (unsigned long *)register_address(c,
2059 seg_override_base(ctxt, c), 2063 seg_override_base(ctxt, c),
2060 c->regs[VCPU_REGS_RSI]); 2064 c->regs[VCPU_REGS_RSI]);
2061 if ((rc = ops->read_emulated((unsigned long)c->src.ptr, 2065 rc = ops->read_emulated((unsigned long)c->src.ptr,
2062 &c->src.val, 2066 &c->src.val,
2063 c->src.bytes, 2067 c->src.bytes,
2064 ctxt->vcpu)) != 0) 2068 ctxt->vcpu);
2069 if (rc != X86EMUL_CONTINUE)
2065 goto done; 2070 goto done;
2066 2071
2067 c->dst.type = OP_NONE; /* Disable writeback. */ 2072 c->dst.type = OP_NONE; /* Disable writeback. */
@@ -2069,10 +2074,11 @@ special_insn:
2069 c->dst.ptr = (unsigned long *)register_address(c, 2074 c->dst.ptr = (unsigned long *)register_address(c,
2070 es_base(ctxt), 2075 es_base(ctxt),
2071 c->regs[VCPU_REGS_RDI]); 2076 c->regs[VCPU_REGS_RDI]);
2072 if ((rc = ops->read_emulated((unsigned long)c->dst.ptr, 2077 rc = ops->read_emulated((unsigned long)c->dst.ptr,
2073 &c->dst.val, 2078 &c->dst.val,
2074 c->dst.bytes, 2079 c->dst.bytes,
2075 ctxt->vcpu)) != 0) 2080 ctxt->vcpu);
2081 if (rc != X86EMUL_CONTINUE)
2076 goto done; 2082 goto done;
2077 2083
2078 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr); 2084 DPRINTF("cmps: mem1=0x%p mem2=0x%p\n", c->src.ptr, c->dst.ptr);
@@ -2102,12 +2108,13 @@ special_insn:
2102 c->dst.type = OP_REG; 2108 c->dst.type = OP_REG;
2103 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 2109 c->dst.bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
2104 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX]; 2110 c->dst.ptr = (unsigned long *)&c->regs[VCPU_REGS_RAX];
2105 if ((rc = ops->read_emulated(register_address(c, 2111 rc = ops->read_emulated(register_address(c,
2106 seg_override_base(ctxt, c), 2112 seg_override_base(ctxt, c),
2107 c->regs[VCPU_REGS_RSI]), 2113 c->regs[VCPU_REGS_RSI]),
2108 &c->dst.val, 2114 &c->dst.val,
2109 c->dst.bytes, 2115 c->dst.bytes,
2110 ctxt->vcpu)) != 0) 2116 ctxt->vcpu);
2117 if (rc != X86EMUL_CONTINUE)
2111 goto done; 2118 goto done;
2112 register_address_increment(c, &c->regs[VCPU_REGS_RSI], 2119 register_address_increment(c, &c->regs[VCPU_REGS_RSI],
2113 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes 2120 (ctxt->eflags & EFLG_DF) ? -c->dst.bytes