aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/emulate.c60
1 files changed, 33 insertions, 27 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index e8a58409b5ac..47e716ef46b7 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -1410,11 +1410,11 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1410} 1410}
1411 1411
1412/* Does not support long mode */ 1412/* Does not support long mode */
1413static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, 1413static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1414 u16 selector, int seg) 1414 u16 selector, int seg, u8 cpl)
1415{ 1415{
1416 struct desc_struct seg_desc, old_desc; 1416 struct desc_struct seg_desc, old_desc;
1417 u8 dpl, rpl, cpl; 1417 u8 dpl, rpl;
1418 unsigned err_vec = GP_VECTOR; 1418 unsigned err_vec = GP_VECTOR;
1419 u32 err_code = 0; 1419 u32 err_code = 0;
1420 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */ 1420 bool null_selector = !(selector & ~0x3); /* 0000-0003 are null */
@@ -1442,7 +1442,6 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1442 } 1442 }
1443 1443
1444 rpl = selector & 3; 1444 rpl = selector & 3;
1445 cpl = ctxt->ops->cpl(ctxt);
1446 1445
1447 /* NULL selector is not valid for TR, CS and SS (except for long mode) */ 1446 /* NULL selector is not valid for TR, CS and SS (except for long mode) */
1448 if ((seg == VCPU_SREG_CS 1447 if ((seg == VCPU_SREG_CS
@@ -1544,6 +1543,13 @@ exception:
1544 return X86EMUL_PROPAGATE_FAULT; 1543 return X86EMUL_PROPAGATE_FAULT;
1545} 1544}
1546 1545
1546static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt,
1547 u16 selector, int seg)
1548{
1549 u8 cpl = ctxt->ops->cpl(ctxt);
1550 return __load_segment_descriptor(ctxt, selector, seg, cpl);
1551}
1552
1547static void write_register_operand(struct operand *op) 1553static void write_register_operand(struct operand *op)
1548{ 1554{
1549 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */ 1555 /* The 4-byte case *is* correct: in 64-bit mode we zero-extend. */
@@ -2405,6 +2411,7 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2405 struct tss_segment_16 *tss) 2411 struct tss_segment_16 *tss)
2406{ 2412{
2407 int ret; 2413 int ret;
2414 u8 cpl;
2408 2415
2409 ctxt->_eip = tss->ip; 2416 ctxt->_eip = tss->ip;
2410 ctxt->eflags = tss->flag | 2; 2417 ctxt->eflags = tss->flag | 2;
@@ -2427,23 +2434,25 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt,
2427 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS); 2434 set_segment_selector(ctxt, tss->ss, VCPU_SREG_SS);
2428 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS); 2435 set_segment_selector(ctxt, tss->ds, VCPU_SREG_DS);
2429 2436
2437 cpl = tss->cs & 3;
2438
2430 /* 2439 /*
2431 * Now load segment descriptors. If fault happens at this stage 2440 * Now load segment descriptors. If fault happens at this stage
2432 * it is handled in a context of new task 2441 * it is handled in a context of new task
2433 */ 2442 */
2434 ret = load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR); 2443 ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl);
2435 if (ret != X86EMUL_CONTINUE) 2444 if (ret != X86EMUL_CONTINUE)
2436 return ret; 2445 return ret;
2437 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); 2446 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
2438 if (ret != X86EMUL_CONTINUE) 2447 if (ret != X86EMUL_CONTINUE)
2439 return ret; 2448 return ret;
2440 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); 2449 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
2441 if (ret != X86EMUL_CONTINUE) 2450 if (ret != X86EMUL_CONTINUE)
2442 return ret; 2451 return ret;
2443 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); 2452 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
2444 if (ret != X86EMUL_CONTINUE) 2453 if (ret != X86EMUL_CONTINUE)
2445 return ret; 2454 return ret;
2446 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); 2455 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
2447 if (ret != X86EMUL_CONTINUE) 2456 if (ret != X86EMUL_CONTINUE)
2448 return ret; 2457 return ret;
2449 2458
@@ -2521,6 +2530,7 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2521 struct tss_segment_32 *tss) 2530 struct tss_segment_32 *tss)
2522{ 2531{
2523 int ret; 2532 int ret;
2533 u8 cpl;
2524 2534
2525 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3)) 2535 if (ctxt->ops->set_cr(ctxt, 3, tss->cr3))
2526 return emulate_gp(ctxt, 0); 2536 return emulate_gp(ctxt, 0);
@@ -2539,7 +2549,8 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2539 2549
2540 /* 2550 /*
2541 * SDM says that segment selectors are loaded before segment 2551 * SDM says that segment selectors are loaded before segment
2542 * descriptors 2552 * descriptors. This is important because CPL checks will
2553 * use CS.RPL.
2543 */ 2554 */
2544 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2555 set_segment_selector(ctxt, tss->ldt_selector, VCPU_SREG_LDTR);
2545 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES); 2556 set_segment_selector(ctxt, tss->es, VCPU_SREG_ES);
@@ -2553,43 +2564,38 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt,
2553 * If we're switching between Protected Mode and VM86, we need to make 2564 * If we're switching between Protected Mode and VM86, we need to make
2554 * sure to update the mode before loading the segment descriptors so 2565 * sure to update the mode before loading the segment descriptors so
2555 * that the selectors are interpreted correctly. 2566 * that the selectors are interpreted correctly.
2556 *
2557 * Need to get rflags to the vcpu struct immediately because it
2558 * influences the CPL which is checked at least when loading the segment
2559 * descriptors and when pushing an error code to the new kernel stack.
2560 *
2561 * TODO Introduce a separate ctxt->ops->set_cpl callback
2562 */ 2567 */
2563 if (ctxt->eflags & X86_EFLAGS_VM) 2568 if (ctxt->eflags & X86_EFLAGS_VM) {
2564 ctxt->mode = X86EMUL_MODE_VM86; 2569 ctxt->mode = X86EMUL_MODE_VM86;
2565 else 2570 cpl = 3;
2571 } else {
2566 ctxt->mode = X86EMUL_MODE_PROT32; 2572 ctxt->mode = X86EMUL_MODE_PROT32;
2567 2573 cpl = tss->cs & 3;
2568 ctxt->ops->set_rflags(ctxt, ctxt->eflags); 2574 }
2569 2575
2570 /* 2576 /*
2571 * Now load segment descriptors. If fault happenes at this stage 2577 * Now load segment descriptors. If fault happenes at this stage
2572 * it is handled in a context of new task 2578 * it is handled in a context of new task
2573 */ 2579 */
2574 ret = load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR); 2580 ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, cpl);
2575 if (ret != X86EMUL_CONTINUE) 2581 if (ret != X86EMUL_CONTINUE)
2576 return ret; 2582 return ret;
2577 ret = load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES); 2583 ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl);
2578 if (ret != X86EMUL_CONTINUE) 2584 if (ret != X86EMUL_CONTINUE)
2579 return ret; 2585 return ret;
2580 ret = load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS); 2586 ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl);
2581 if (ret != X86EMUL_CONTINUE) 2587 if (ret != X86EMUL_CONTINUE)
2582 return ret; 2588 return ret;
2583 ret = load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS); 2589 ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl);
2584 if (ret != X86EMUL_CONTINUE) 2590 if (ret != X86EMUL_CONTINUE)
2585 return ret; 2591 return ret;
2586 ret = load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS); 2592 ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl);
2587 if (ret != X86EMUL_CONTINUE) 2593 if (ret != X86EMUL_CONTINUE)
2588 return ret; 2594 return ret;
2589 ret = load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS); 2595 ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl);
2590 if (ret != X86EMUL_CONTINUE) 2596 if (ret != X86EMUL_CONTINUE)
2591 return ret; 2597 return ret;
2592 ret = load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS); 2598 ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl);
2593 if (ret != X86EMUL_CONTINUE) 2599 if (ret != X86EMUL_CONTINUE)
2594 return ret; 2600 return ret;
2595 2601