aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/emulate.c
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2011-03-29 05:41:27 -0400
committerAvi Kivity <avi@redhat.com>2011-05-11 07:56:59 -0400
commit1253791df91b064c039282feea094e5943294924 (patch)
tree36cd958089c9f011a1085b3ae82556fa0d16b480 /arch/x86/kvm/emulate.c
parent0d7cdee83ad1582eecbf3b4a220e82dcb5ad561c (diff)
KVM: x86 emulator: SSE support
Add support for marking an instruction as SSE, switching registers used to the SSE register file. Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/emulate.c')
-rw-r--r--arch/x86/kvm/emulate.c102
1 files changed, 99 insertions, 3 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index fcce7aeacc8..4e11102f560 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -76,6 +76,7 @@
76#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */ 76#define Group (1<<14) /* Bits 3:5 of modrm byte extend opcode */
77#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */ 77#define GroupDual (1<<15) /* Alternate decoding of mod == 3 */
78#define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */ 78#define Prefix (1<<16) /* Instruction varies with 66/f2/f3 prefix */
79#define Sse (1<<17) /* SSE Vector instruction */
79/* Misc flags */ 80/* Misc flags */
80#define VendorSpecific (1<<22) /* Vendor specific instruction */ 81#define VendorSpecific (1<<22) /* Vendor specific instruction */
81#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */ 82#define NoAccess (1<<23) /* Don't access memory (lea/invlpg/verr etc) */
@@ -505,6 +506,11 @@ static int emulate_de(struct x86_emulate_ctxt *ctxt)
505 return emulate_exception(ctxt, DE_VECTOR, 0, false); 506 return emulate_exception(ctxt, DE_VECTOR, 0, false);
506} 507}
507 508
509static int emulate_nm(struct x86_emulate_ctxt *ctxt)
510{
511 return emulate_exception(ctxt, NM_VECTOR, 0, false);
512}
513
508static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt, 514static int do_fetch_insn_byte(struct x86_emulate_ctxt *ctxt,
509 struct x86_emulate_ops *ops, 515 struct x86_emulate_ops *ops,
510 unsigned long eip, u8 *dest) 516 unsigned long eip, u8 *dest)
@@ -632,7 +638,63 @@ static void fetch_register_operand(struct operand *op)
632 } 638 }
633} 639}
634 640
635static void decode_register_operand(struct operand *op, 641static void read_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data, int reg)
642{
643 ctxt->ops->get_fpu(ctxt);
644 switch (reg) {
645 case 0: asm("movdqu %%xmm0, %0" : "=m"(*data)); break;
646 case 1: asm("movdqu %%xmm1, %0" : "=m"(*data)); break;
647 case 2: asm("movdqu %%xmm2, %0" : "=m"(*data)); break;
648 case 3: asm("movdqu %%xmm3, %0" : "=m"(*data)); break;
649 case 4: asm("movdqu %%xmm4, %0" : "=m"(*data)); break;
650 case 5: asm("movdqu %%xmm5, %0" : "=m"(*data)); break;
651 case 6: asm("movdqu %%xmm6, %0" : "=m"(*data)); break;
652 case 7: asm("movdqu %%xmm7, %0" : "=m"(*data)); break;
653#ifdef CONFIG_X86_64
654 case 8: asm("movdqu %%xmm8, %0" : "=m"(*data)); break;
655 case 9: asm("movdqu %%xmm9, %0" : "=m"(*data)); break;
656 case 10: asm("movdqu %%xmm10, %0" : "=m"(*data)); break;
657 case 11: asm("movdqu %%xmm11, %0" : "=m"(*data)); break;
658 case 12: asm("movdqu %%xmm12, %0" : "=m"(*data)); break;
659 case 13: asm("movdqu %%xmm13, %0" : "=m"(*data)); break;
660 case 14: asm("movdqu %%xmm14, %0" : "=m"(*data)); break;
661 case 15: asm("movdqu %%xmm15, %0" : "=m"(*data)); break;
662#endif
663 default: BUG();
664 }
665 ctxt->ops->put_fpu(ctxt);
666}
667
668static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
669 int reg)
670{
671 ctxt->ops->get_fpu(ctxt);
672 switch (reg) {
673 case 0: asm("movdqu %0, %%xmm0" : : "m"(*data)); break;
674 case 1: asm("movdqu %0, %%xmm1" : : "m"(*data)); break;
675 case 2: asm("movdqu %0, %%xmm2" : : "m"(*data)); break;
676 case 3: asm("movdqu %0, %%xmm3" : : "m"(*data)); break;
677 case 4: asm("movdqu %0, %%xmm4" : : "m"(*data)); break;
678 case 5: asm("movdqu %0, %%xmm5" : : "m"(*data)); break;
679 case 6: asm("movdqu %0, %%xmm6" : : "m"(*data)); break;
680 case 7: asm("movdqu %0, %%xmm7" : : "m"(*data)); break;
681#ifdef CONFIG_X86_64
682 case 8: asm("movdqu %0, %%xmm8" : : "m"(*data)); break;
683 case 9: asm("movdqu %0, %%xmm9" : : "m"(*data)); break;
684 case 10: asm("movdqu %0, %%xmm10" : : "m"(*data)); break;
685 case 11: asm("movdqu %0, %%xmm11" : : "m"(*data)); break;
686 case 12: asm("movdqu %0, %%xmm12" : : "m"(*data)); break;
687 case 13: asm("movdqu %0, %%xmm13" : : "m"(*data)); break;
688 case 14: asm("movdqu %0, %%xmm14" : : "m"(*data)); break;
689 case 15: asm("movdqu %0, %%xmm15" : : "m"(*data)); break;
690#endif
691 default: BUG();
692 }
693 ctxt->ops->put_fpu(ctxt);
694}
695
696static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
697 struct operand *op,
636 struct decode_cache *c, 698 struct decode_cache *c,
637 int inhibit_bytereg) 699 int inhibit_bytereg)
638{ 700{
@@ -641,6 +703,15 @@ static void decode_register_operand(struct operand *op,
641 703
642 if (!(c->d & ModRM)) 704 if (!(c->d & ModRM))
643 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3); 705 reg = (c->b & 7) | ((c->rex_prefix & 1) << 3);
706
707 if (c->d & Sse) {
708 op->type = OP_XMM;
709 op->bytes = 16;
710 op->addr.xmm = reg;
711 read_sse_reg(ctxt, &op->vec_val, reg);
712 return;
713 }
714
644 op->type = OP_REG; 715 op->type = OP_REG;
645 if ((c->d & ByteOp) && !inhibit_bytereg) { 716 if ((c->d & ByteOp) && !inhibit_bytereg) {
646 op->addr.reg = decode_register(reg, c->regs, highbyte_regs); 717 op->addr.reg = decode_register(reg, c->regs, highbyte_regs);
@@ -680,6 +751,13 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
680 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes; 751 op->bytes = (c->d & ByteOp) ? 1 : c->op_bytes;
681 op->addr.reg = decode_register(c->modrm_rm, 752 op->addr.reg = decode_register(c->modrm_rm,
682 c->regs, c->d & ByteOp); 753 c->regs, c->d & ByteOp);
754 if (c->d & Sse) {
755 op->type = OP_XMM;
756 op->bytes = 16;
757 op->addr.xmm = c->modrm_rm;
758 read_sse_reg(ctxt, &op->vec_val, c->modrm_rm);
759 return rc;
760 }
683 fetch_register_operand(op); 761 fetch_register_operand(op);
684 return rc; 762 return rc;
685 } 763 }
@@ -1107,6 +1185,9 @@ static inline int writeback(struct x86_emulate_ctxt *ctxt,
1107 if (rc != X86EMUL_CONTINUE) 1185 if (rc != X86EMUL_CONTINUE)
1108 return rc; 1186 return rc;
1109 break; 1187 break;
1188 case OP_XMM:
1189 write_sse_reg(ctxt, &c->dst.vec_val, c->dst.addr.xmm);
1190 break;
1110 case OP_NONE: 1191 case OP_NONE:
1111 /* no writeback */ 1192 /* no writeback */
1112 break; 1193 break;
@@ -2785,6 +2866,9 @@ done_prefixes:
2785 c->op_bytes = 4; 2866 c->op_bytes = 4;
2786 } 2867 }
2787 2868
2869 if (c->d & Sse)
2870 c->op_bytes = 16;
2871
2788 /* ModRM and SIB bytes. */ 2872 /* ModRM and SIB bytes. */
2789 if (c->d & ModRM) { 2873 if (c->d & ModRM) {
2790 rc = decode_modrm(ctxt, ops, &memop); 2874 rc = decode_modrm(ctxt, ops, &memop);
@@ -2814,7 +2898,7 @@ done_prefixes:
2814 case SrcNone: 2898 case SrcNone:
2815 break; 2899 break;
2816 case SrcReg: 2900 case SrcReg:
2817 decode_register_operand(&c->src, c, 0); 2901 decode_register_operand(ctxt, &c->src, c, 0);
2818 break; 2902 break;
2819 case SrcMem16: 2903 case SrcMem16:
2820 memop.bytes = 2; 2904 memop.bytes = 2;
@@ -2905,7 +2989,7 @@ done_prefixes:
2905 /* Decode and fetch the destination operand: register or memory. */ 2989 /* Decode and fetch the destination operand: register or memory. */
2906 switch (c->d & DstMask) { 2990 switch (c->d & DstMask) {
2907 case DstReg: 2991 case DstReg:
2908 decode_register_operand(&c->dst, c, 2992 decode_register_operand(ctxt, &c->dst, c,
2909 c->twobyte && (c->b == 0xb6 || c->b == 0xb7)); 2993 c->twobyte && (c->b == 0xb6 || c->b == 0xb7));
2910 break; 2994 break;
2911 case DstImmUByte: 2995 case DstImmUByte:
@@ -3001,6 +3085,18 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3001 goto done; 3085 goto done;
3002 } 3086 }
3003 3087
3088 if ((c->d & Sse)
3089 && ((ops->get_cr(0, ctxt->vcpu) & X86_CR0_EM)
3090 || !(ops->get_cr(4, ctxt->vcpu) & X86_CR4_OSFXSR))) {
3091 rc = emulate_ud(ctxt);
3092 goto done;
3093 }
3094
3095 if ((c->d & Sse) && (ops->get_cr(0, ctxt->vcpu) & X86_CR0_TS)) {
3096 rc = emulate_nm(ctxt);
3097 goto done;
3098 }
3099
3004 /* Privileged instruction can be executed only in CPL=0 */ 3100 /* Privileged instruction can be executed only in CPL=0 */
3005 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 3101 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3006 rc = emulate_gp(ctxt, 0); 3102 rc = emulate_gp(ctxt, 0);