diff options
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r-- | arch/x86/kvm/emulate.c | 54 |
1 files changed, 34 insertions, 20 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c index fff11885a3a0..1fec3ed86cbf 100644 --- a/arch/x86/kvm/emulate.c +++ b/arch/x86/kvm/emulate.c | |||
@@ -263,6 +263,13 @@ struct instr_dual { | |||
263 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a | 263 | #define EFLG_RESERVED_ZEROS_MASK 0xffc0802a |
264 | #define EFLG_RESERVED_ONE_MASK 2 | 264 | #define EFLG_RESERVED_ONE_MASK 2 |
265 | 265 | ||
266 | enum x86_transfer_type { | ||
267 | X86_TRANSFER_NONE, | ||
268 | X86_TRANSFER_CALL_JMP, | ||
269 | X86_TRANSFER_RET, | ||
270 | X86_TRANSFER_TASK_SWITCH, | ||
271 | }; | ||
272 | |||
266 | static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) | 273 | static ulong reg_read(struct x86_emulate_ctxt *ctxt, unsigned nr) |
267 | { | 274 | { |
268 | if (!(ctxt->regs_valid & (1 << nr))) { | 275 | if (!(ctxt->regs_valid & (1 << nr))) { |
@@ -1472,7 +1479,7 @@ static int write_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1472 | /* Does not support long mode */ | 1479 | /* Does not support long mode */ |
1473 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | 1480 | static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, |
1474 | u16 selector, int seg, u8 cpl, | 1481 | u16 selector, int seg, u8 cpl, |
1475 | bool in_task_switch, | 1482 | enum x86_transfer_type transfer, |
1476 | struct desc_struct *desc) | 1483 | struct desc_struct *desc) |
1477 | { | 1484 | { |
1478 | struct desc_struct seg_desc, old_desc; | 1485 | struct desc_struct seg_desc, old_desc; |
@@ -1526,11 +1533,15 @@ static int __load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1526 | return ret; | 1533 | return ret; |
1527 | 1534 | ||
1528 | err_code = selector & 0xfffc; | 1535 | err_code = selector & 0xfffc; |
1529 | err_vec = in_task_switch ? TS_VECTOR : GP_VECTOR; | 1536 | err_vec = (transfer == X86_TRANSFER_TASK_SWITCH) ? TS_VECTOR : |
1537 | GP_VECTOR; | ||
1530 | 1538 | ||
1531 | /* can't load system descriptor into segment selector */ | 1539 | /* can't load system descriptor into segment selector */ |
1532 | if (seg <= VCPU_SREG_GS && !seg_desc.s) | 1540 | if (seg <= VCPU_SREG_GS && !seg_desc.s) { |
1541 | if (transfer == X86_TRANSFER_CALL_JMP) | ||
1542 | return X86EMUL_UNHANDLEABLE; | ||
1533 | goto exception; | 1543 | goto exception; |
1544 | } | ||
1534 | 1545 | ||
1535 | if (!seg_desc.p) { | 1546 | if (!seg_desc.p) { |
1536 | err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; | 1547 | err_vec = (seg == VCPU_SREG_SS) ? SS_VECTOR : NP_VECTOR; |
@@ -1628,7 +1639,8 @@ static int load_segment_descriptor(struct x86_emulate_ctxt *ctxt, | |||
1628 | u16 selector, int seg) | 1639 | u16 selector, int seg) |
1629 | { | 1640 | { |
1630 | u8 cpl = ctxt->ops->cpl(ctxt); | 1641 | u8 cpl = ctxt->ops->cpl(ctxt); |
1631 | return __load_segment_descriptor(ctxt, selector, seg, cpl, false, NULL); | 1642 | return __load_segment_descriptor(ctxt, selector, seg, cpl, |
1643 | X86_TRANSFER_NONE, NULL); | ||
1632 | } | 1644 | } |
1633 | 1645 | ||
1634 | static void write_register_operand(struct operand *op) | 1646 | static void write_register_operand(struct operand *op) |
@@ -2040,7 +2052,8 @@ static int em_jmp_far(struct x86_emulate_ctxt *ctxt) | |||
2040 | 2052 | ||
2041 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); | 2053 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
2042 | 2054 | ||
2043 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, | 2055 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, |
2056 | X86_TRANSFER_CALL_JMP, | ||
2044 | &new_desc); | 2057 | &new_desc); |
2045 | if (rc != X86EMUL_CONTINUE) | 2058 | if (rc != X86EMUL_CONTINUE) |
2046 | return rc; | 2059 | return rc; |
@@ -2129,7 +2142,8 @@ static int em_ret_far(struct x86_emulate_ctxt *ctxt) | |||
2129 | /* Outer-privilege level return is not implemented */ | 2142 | /* Outer-privilege level return is not implemented */ |
2130 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) | 2143 | if (ctxt->mode >= X86EMUL_MODE_PROT16 && (cs & 3) > cpl) |
2131 | return X86EMUL_UNHANDLEABLE; | 2144 | return X86EMUL_UNHANDLEABLE; |
2132 | rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, false, | 2145 | rc = __load_segment_descriptor(ctxt, (u16)cs, VCPU_SREG_CS, cpl, |
2146 | X86_TRANSFER_RET, | ||
2133 | &new_desc); | 2147 | &new_desc); |
2134 | if (rc != X86EMUL_CONTINUE) | 2148 | if (rc != X86EMUL_CONTINUE) |
2135 | return rc; | 2149 | return rc; |
@@ -2566,23 +2580,23 @@ static int load_state_from_tss16(struct x86_emulate_ctxt *ctxt, | |||
2566 | * it is handled in a context of new task | 2580 | * it is handled in a context of new task |
2567 | */ | 2581 | */ |
2568 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, | 2582 | ret = __load_segment_descriptor(ctxt, tss->ldt, VCPU_SREG_LDTR, cpl, |
2569 | true, NULL); | 2583 | X86_TRANSFER_TASK_SWITCH, NULL); |
2570 | if (ret != X86EMUL_CONTINUE) | 2584 | if (ret != X86EMUL_CONTINUE) |
2571 | return ret; | 2585 | return ret; |
2572 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, | 2586 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, |
2573 | true, NULL); | 2587 | X86_TRANSFER_TASK_SWITCH, NULL); |
2574 | if (ret != X86EMUL_CONTINUE) | 2588 | if (ret != X86EMUL_CONTINUE) |
2575 | return ret; | 2589 | return ret; |
2576 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, | 2590 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, |
2577 | true, NULL); | 2591 | X86_TRANSFER_TASK_SWITCH, NULL); |
2578 | if (ret != X86EMUL_CONTINUE) | 2592 | if (ret != X86EMUL_CONTINUE) |
2579 | return ret; | 2593 | return ret; |
2580 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, | 2594 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, |
2581 | true, NULL); | 2595 | X86_TRANSFER_TASK_SWITCH, NULL); |
2582 | if (ret != X86EMUL_CONTINUE) | 2596 | if (ret != X86EMUL_CONTINUE) |
2583 | return ret; | 2597 | return ret; |
2584 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, | 2598 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, |
2585 | true, NULL); | 2599 | X86_TRANSFER_TASK_SWITCH, NULL); |
2586 | if (ret != X86EMUL_CONTINUE) | 2600 | if (ret != X86EMUL_CONTINUE) |
2587 | return ret; | 2601 | return ret; |
2588 | 2602 | ||
@@ -2704,31 +2718,31 @@ static int load_state_from_tss32(struct x86_emulate_ctxt *ctxt, | |||
2704 | * it is handled in a context of new task | 2718 | * it is handled in a context of new task |
2705 | */ | 2719 | */ |
2706 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, | 2720 | ret = __load_segment_descriptor(ctxt, tss->ldt_selector, VCPU_SREG_LDTR, |
2707 | cpl, true, NULL); | 2721 | cpl, X86_TRANSFER_TASK_SWITCH, NULL); |
2708 | if (ret != X86EMUL_CONTINUE) | 2722 | if (ret != X86EMUL_CONTINUE) |
2709 | return ret; | 2723 | return ret; |
2710 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, | 2724 | ret = __load_segment_descriptor(ctxt, tss->es, VCPU_SREG_ES, cpl, |
2711 | true, NULL); | 2725 | X86_TRANSFER_TASK_SWITCH, NULL); |
2712 | if (ret != X86EMUL_CONTINUE) | 2726 | if (ret != X86EMUL_CONTINUE) |
2713 | return ret; | 2727 | return ret; |
2714 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, | 2728 | ret = __load_segment_descriptor(ctxt, tss->cs, VCPU_SREG_CS, cpl, |
2715 | true, NULL); | 2729 | X86_TRANSFER_TASK_SWITCH, NULL); |
2716 | if (ret != X86EMUL_CONTINUE) | 2730 | if (ret != X86EMUL_CONTINUE) |
2717 | return ret; | 2731 | return ret; |
2718 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, | 2732 | ret = __load_segment_descriptor(ctxt, tss->ss, VCPU_SREG_SS, cpl, |
2719 | true, NULL); | 2733 | X86_TRANSFER_TASK_SWITCH, NULL); |
2720 | if (ret != X86EMUL_CONTINUE) | 2734 | if (ret != X86EMUL_CONTINUE) |
2721 | return ret; | 2735 | return ret; |
2722 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, | 2736 | ret = __load_segment_descriptor(ctxt, tss->ds, VCPU_SREG_DS, cpl, |
2723 | true, NULL); | 2737 | X86_TRANSFER_TASK_SWITCH, NULL); |
2724 | if (ret != X86EMUL_CONTINUE) | 2738 | if (ret != X86EMUL_CONTINUE) |
2725 | return ret; | 2739 | return ret; |
2726 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, | 2740 | ret = __load_segment_descriptor(ctxt, tss->fs, VCPU_SREG_FS, cpl, |
2727 | true, NULL); | 2741 | X86_TRANSFER_TASK_SWITCH, NULL); |
2728 | if (ret != X86EMUL_CONTINUE) | 2742 | if (ret != X86EMUL_CONTINUE) |
2729 | return ret; | 2743 | return ret; |
2730 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, | 2744 | ret = __load_segment_descriptor(ctxt, tss->gs, VCPU_SREG_GS, cpl, |
2731 | true, NULL); | 2745 | X86_TRANSFER_TASK_SWITCH, NULL); |
2732 | if (ret != X86EMUL_CONTINUE) | 2746 | if (ret != X86EMUL_CONTINUE) |
2733 | return ret; | 2747 | return ret; |
2734 | 2748 | ||
@@ -3010,8 +3024,8 @@ static int em_call_far(struct x86_emulate_ctxt *ctxt) | |||
3010 | ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); | 3024 | ops->get_segment(ctxt, &old_cs, &old_desc, NULL, VCPU_SREG_CS); |
3011 | 3025 | ||
3012 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); | 3026 | memcpy(&sel, ctxt->src.valptr + ctxt->op_bytes, 2); |
3013 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, false, | 3027 | rc = __load_segment_descriptor(ctxt, sel, VCPU_SREG_CS, cpl, |
3014 | &new_desc); | 3028 | X86_TRANSFER_CALL_JMP, &new_desc); |
3015 | if (rc != X86EMUL_CONTINUE) | 3029 | if (rc != X86EMUL_CONTINUE) |
3016 | return X86EMUL_CONTINUE; | 3030 | return X86EMUL_CONTINUE; |
3017 | 3031 | ||