aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm
diff options
context:
space:
mode:
authorAvi Kivity <avi@redhat.com>2012-04-09 11:40:02 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-04-16 19:36:16 -0400
commitcbe2c9d30aa69b0551247ddb0fb450b6e8080ec4 (patch)
tree84edcdbe15e4db09c6f6ba463d083bd42e51bbeb /arch/x86/kvm
parent3e114eb4db3a33141b8c91bb53dae9ba6b015a32 (diff)
KVM: x86 emulator: MMX support
General support for the MMX instruction set. Special care is taken to trap pending x87 exceptions so that they are properly reflected to the guest. Signed-off-by: Avi Kivity <avi@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm')
-rw-r--r--arch/x86/kvm/emulate.c103
1 files changed, 99 insertions, 4 deletions
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index fb39e0b32ed..0011b4ad44b 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -142,6 +142,7 @@
142#define Src2FS (OpFS << Src2Shift) 142#define Src2FS (OpFS << Src2Shift)
143#define Src2GS (OpGS << Src2Shift) 143#define Src2GS (OpGS << Src2Shift)
144#define Src2Mask (OpMask << Src2Shift) 144#define Src2Mask (OpMask << Src2Shift)
145#define Mmx ((u64)1 << 40) /* MMX Vector instruction */
145#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */ 146#define Aligned ((u64)1 << 41) /* Explicitly aligned (e.g. MOVDQA) */
146#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */ 147#define Unaligned ((u64)1 << 42) /* Explicitly unaligned (e.g. MOVDQU) */
147#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */ 148#define Avx ((u64)1 << 43) /* Advanced Vector Extensions */
@@ -887,6 +888,40 @@ static void write_sse_reg(struct x86_emulate_ctxt *ctxt, sse128_t *data,
887 ctxt->ops->put_fpu(ctxt); 888 ctxt->ops->put_fpu(ctxt);
888} 889}
889 890
891static void read_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
892{
893 ctxt->ops->get_fpu(ctxt);
894 switch (reg) {
895 case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
896 case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
897 case 2: asm("movq %%mm2, %0" : "=m"(*data)); break;
898 case 3: asm("movq %%mm3, %0" : "=m"(*data)); break;
899 case 4: asm("movq %%mm4, %0" : "=m"(*data)); break;
900 case 5: asm("movq %%mm5, %0" : "=m"(*data)); break;
901 case 6: asm("movq %%mm6, %0" : "=m"(*data)); break;
902 case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
903 default: BUG();
904 }
905 ctxt->ops->put_fpu(ctxt);
906}
907
908static void write_mmx_reg(struct x86_emulate_ctxt *ctxt, u64 *data, int reg)
909{
910 ctxt->ops->get_fpu(ctxt);
911 switch (reg) {
912 case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
913 case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
914 case 2: asm("movq %0, %%mm2" : : "m"(*data)); break;
915 case 3: asm("movq %0, %%mm3" : : "m"(*data)); break;
916 case 4: asm("movq %0, %%mm4" : : "m"(*data)); break;
917 case 5: asm("movq %0, %%mm5" : : "m"(*data)); break;
918 case 6: asm("movq %0, %%mm6" : : "m"(*data)); break;
919 case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
920 default: BUG();
921 }
922 ctxt->ops->put_fpu(ctxt);
923}
924
890static void decode_register_operand(struct x86_emulate_ctxt *ctxt, 925static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
891 struct operand *op) 926 struct operand *op)
892{ 927{
@@ -903,6 +938,13 @@ static void decode_register_operand(struct x86_emulate_ctxt *ctxt,
903 read_sse_reg(ctxt, &op->vec_val, reg); 938 read_sse_reg(ctxt, &op->vec_val, reg);
904 return; 939 return;
905 } 940 }
941 if (ctxt->d & Mmx) {
942 reg &= 7;
943 op->type = OP_MM;
944 op->bytes = 8;
945 op->addr.mm = reg;
946 return;
947 }
906 948
907 op->type = OP_REG; 949 op->type = OP_REG;
908 if (ctxt->d & ByteOp) { 950 if (ctxt->d & ByteOp) {
@@ -948,6 +990,12 @@ static int decode_modrm(struct x86_emulate_ctxt *ctxt,
948 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm); 990 read_sse_reg(ctxt, &op->vec_val, ctxt->modrm_rm);
949 return rc; 991 return rc;
950 } 992 }
993 if (ctxt->d & Mmx) {
994 op->type = OP_MM;
995 op->bytes = 8;
996 op->addr.xmm = ctxt->modrm_rm & 7;
997 return rc;
998 }
951 fetch_register_operand(op); 999 fetch_register_operand(op);
952 return rc; 1000 return rc;
953 } 1001 }
@@ -1415,6 +1463,9 @@ static int writeback(struct x86_emulate_ctxt *ctxt)
1415 case OP_XMM: 1463 case OP_XMM:
1416 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm); 1464 write_sse_reg(ctxt, &ctxt->dst.vec_val, ctxt->dst.addr.xmm);
1417 break; 1465 break;
1466 case OP_MM:
1467 write_mmx_reg(ctxt, &ctxt->dst.mm_val, ctxt->dst.addr.mm);
1468 break;
1418 case OP_NONE: 1469 case OP_NONE:
1419 /* no writeback */ 1470 /* no writeback */
1420 break; 1471 break;
@@ -3987,6 +4038,8 @@ done_prefixes:
3987 4038
3988 if (ctxt->d & Sse) 4039 if (ctxt->d & Sse)
3989 ctxt->op_bytes = 16; 4040 ctxt->op_bytes = 16;
4041 else if (ctxt->d & Mmx)
4042 ctxt->op_bytes = 8;
3990 4043
3991 /* ModRM and SIB bytes. */ 4044 /* ModRM and SIB bytes. */
3992 if (ctxt->d & ModRM) { 4045 if (ctxt->d & ModRM) {
@@ -4057,6 +4110,35 @@ static bool string_insn_completed(struct x86_emulate_ctxt *ctxt)
4057 return false; 4110 return false;
4058} 4111}
4059 4112
4113static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
4114{
4115 bool fault = false;
4116
4117 ctxt->ops->get_fpu(ctxt);
4118 asm volatile("1: fwait \n\t"
4119 "2: \n\t"
4120 ".pushsection .fixup,\"ax\" \n\t"
4121 "3: \n\t"
4122 "movb $1, %[fault] \n\t"
4123 "jmp 2b \n\t"
4124 ".popsection \n\t"
4125 _ASM_EXTABLE(1b, 3b)
4126 : [fault]"+rm"(fault));
4127 ctxt->ops->put_fpu(ctxt);
4128
4129 if (unlikely(fault))
4130 return emulate_exception(ctxt, MF_VECTOR, 0, false);
4131
4132 return X86EMUL_CONTINUE;
4133}
4134
4135static void fetch_possible_mmx_operand(struct x86_emulate_ctxt *ctxt,
4136 struct operand *op)
4137{
4138 if (op->type == OP_MM)
4139 read_mmx_reg(ctxt, &op->mm_val, op->addr.mm);
4140}
4141
4060int x86_emulate_insn(struct x86_emulate_ctxt *ctxt) 4142int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4061{ 4143{
4062 struct x86_emulate_ops *ops = ctxt->ops; 4144 struct x86_emulate_ops *ops = ctxt->ops;
@@ -4081,18 +4163,31 @@ int x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
4081 goto done; 4163 goto done;
4082 } 4164 }
4083 4165
4084 if ((ctxt->d & Sse) 4166 if (((ctxt->d & (Sse|Mmx)) && ((ops->get_cr(ctxt, 0) & X86_CR0_EM)))
4085 && ((ops->get_cr(ctxt, 0) & X86_CR0_EM) 4167 || ((ctxt->d & Sse) && !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4086 || !(ops->get_cr(ctxt, 4) & X86_CR4_OSFXSR))) {
4087 rc = emulate_ud(ctxt); 4168 rc = emulate_ud(ctxt);
4088 goto done; 4169 goto done;
4089 } 4170 }
4090 4171
4091 if ((ctxt->d & Sse) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) { 4172 if ((ctxt->d & (Sse|Mmx)) && (ops->get_cr(ctxt, 0) & X86_CR0_TS)) {
4092 rc = emulate_nm(ctxt); 4173 rc = emulate_nm(ctxt);
4093 goto done; 4174 goto done;
4094 } 4175 }
4095 4176
4177 if (ctxt->d & Mmx) {
4178 rc = flush_pending_x87_faults(ctxt);
4179 if (rc != X86EMUL_CONTINUE)
4180 goto done;
4181 /*
4182 * Now that we know the fpu is exception safe, we can fetch
4183 * operands from it.
4184 */
4185 fetch_possible_mmx_operand(ctxt, &ctxt->src);
4186 fetch_possible_mmx_operand(ctxt, &ctxt->src2);
4187 if (!(ctxt->d & Mov))
4188 fetch_possible_mmx_operand(ctxt, &ctxt->dst);
4189 }
4190
4096 if (unlikely(ctxt->guest_mode) && ctxt->intercept) { 4191 if (unlikely(ctxt->guest_mode) && ctxt->intercept) {
4097 rc = emulator_check_intercept(ctxt, ctxt->intercept, 4192 rc = emulator_check_intercept(ctxt, ctxt->intercept,
4098 X86_ICPT_PRE_EXCEPT); 4193 X86_ICPT_PRE_EXCEPT);