aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-02-18 05:15:01 -0500
committerMarcelo Tosatti <mtosatti@redhat.com>2010-03-01 10:36:14 -0500
commitc697518a861e6c43b92b848895f9926580ee63c3 (patch)
tree23c0b9169dfe783bd4615795cd5495f369d42122 /arch/x86/kvm/emulate.c
parent6f550484a15ea1b468665cdf59f020bf08ccb292 (diff)
KVM: Fix segment descriptor loading
Add proper error and permission checking. This patch also change task switching code to load segment selectors before segment descriptors, like SDM requires, otherwise permission checking during segment descriptor loading will be incorrect. Cc: stable@kernel.org (2.6.33, 2.6.32) Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c30
1 files changed, 8 insertions, 22 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2db760ff887c..a1a7b27adf41 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1309,7 +1309,7 @@ static int emulate_pop_sreg(struct x86_emulate_ctxt *ctxt,
1309 if (rc != 0) 1309 if (rc != 0)
1310 return rc; 1310 return rc;
1311 1311
1312 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, 1, seg); 1312 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)selector, seg);
1313 return rc; 1313 return rc;
1314} 1314}
1315 1315
@@ -1491,7 +1491,7 @@ static int emulate_ret_far(struct x86_emulate_ctxt *ctxt,
1491 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes); 1491 rc = emulate_pop(ctxt, ops, &cs, c->op_bytes);
1492 if (rc) 1492 if (rc)
1493 return rc; 1493 return rc;
1494 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, 1, VCPU_SREG_CS); 1494 rc = kvm_load_segment_descriptor(ctxt->vcpu, (u16)cs, VCPU_SREG_CS);
1495 return rc; 1495 return rc;
1496} 1496}
1497 1497
@@ -2122,12 +2122,11 @@ special_insn:
2122 break; 2122 break;
2123 case 0x8e: { /* mov seg, r/m16 */ 2123 case 0x8e: { /* mov seg, r/m16 */
2124 uint16_t sel; 2124 uint16_t sel;
2125 int type_bits;
2126 int err;
2127 2125
2128 sel = c->src.val; 2126 sel = c->src.val;
2129 2127
2130 if (c->modrm_reg == VCPU_SREG_CS) { 2128 if (c->modrm_reg == VCPU_SREG_CS ||
2129 c->modrm_reg > VCPU_SREG_GS) {
2131 kvm_queue_exception(ctxt->vcpu, UD_VECTOR); 2130 kvm_queue_exception(ctxt->vcpu, UD_VECTOR);
2132 goto done; 2131 goto done;
2133 } 2132 }
@@ -2135,18 +2134,7 @@ special_insn:
2135 if (c->modrm_reg == VCPU_SREG_SS) 2134 if (c->modrm_reg == VCPU_SREG_SS)
2136 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS); 2135 toggle_interruptibility(ctxt, X86_SHADOW_INT_MOV_SS);
2137 2136
2138 if (c->modrm_reg <= 5) { 2137 rc = kvm_load_segment_descriptor(ctxt->vcpu, sel, c->modrm_reg);
2139 type_bits = (c->modrm_reg == 1) ? 9 : 1;
2140 err = kvm_load_segment_descriptor(ctxt->vcpu, sel,
2141 type_bits, c->modrm_reg);
2142 } else {
2143 printk(KERN_INFO "Invalid segreg in modrm byte 0x%02x\n",
2144 c->modrm);
2145 goto cannot_emulate;
2146 }
2147
2148 if (err < 0)
2149 goto cannot_emulate;
2150 2138
2151 c->dst.type = OP_NONE; /* Disable writeback. */ 2139 c->dst.type = OP_NONE; /* Disable writeback. */
2152 break; 2140 break;
@@ -2320,11 +2308,9 @@ special_insn:
2320 case 0xe9: /* jmp rel */ 2308 case 0xe9: /* jmp rel */
2321 goto jmp; 2309 goto jmp;
2322 case 0xea: /* jmp far */ 2310 case 0xea: /* jmp far */
2323 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val, 9, 2311 if (kvm_load_segment_descriptor(ctxt->vcpu, c->src2.val,
2324 VCPU_SREG_CS) < 0) { 2312 VCPU_SREG_CS))
2325 DPRINTF("jmp far: Failed to load CS descriptor\n"); 2313 goto done;
2326 goto cannot_emulate;
2327 }
2328 2314
2329 c->eip = c->src.val; 2315 c->eip = c->src.val;
2330 break; 2316 break;