aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/include/asm/kvm_emulate.h20
-rw-r--r--arch/x86/kvm/emulate.c26
-rw-r--r--arch/x86/kvm/x86.c9
3 files changed, 55 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_emulate.h b/arch/x86/include/asm/kvm_emulate.h
index 48693f0d3842..2cfea49d4706 100644
--- a/arch/x86/include/asm/kvm_emulate.h
+++ b/arch/x86/include/asm/kvm_emulate.h
@@ -14,6 +14,8 @@
14#include <asm/desc_defs.h> 14#include <asm/desc_defs.h>
15 15
16struct x86_emulate_ctxt; 16struct x86_emulate_ctxt;
17enum x86_intercept;
18enum x86_intercept_stage;
17 19
18struct x86_exception { 20struct x86_exception {
19 u8 vector; 21 u8 vector;
@@ -62,6 +64,7 @@ struct x86_exception {
62#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */ 64#define X86EMUL_RETRY_INSTR 3 /* retry the instruction for some reason */
63#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */ 65#define X86EMUL_CMPXCHG_FAILED 4 /* cmpxchg did not see expected value */
64#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */ 66#define X86EMUL_IO_NEEDED 5 /* IO is needed to complete emulation */
67#define X86EMUL_INTERCEPTED 6 /* Intercepted by nested VMCB/VMCS */
65 68
66struct x86_emulate_ops { 69struct x86_emulate_ops {
67 /* 70 /*
@@ -160,6 +163,9 @@ struct x86_emulate_ops {
160 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata); 163 int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr_index, u64 *pdata);
161 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */ 164 void (*get_fpu)(struct x86_emulate_ctxt *ctxt); /* disables preempt */
162 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */ 165 void (*put_fpu)(struct x86_emulate_ctxt *ctxt); /* reenables preempt */
166 int (*intercept)(struct x86_emulate_ctxt *ctxt,
167 enum x86_intercept intercept,
168 enum x86_intercept_stage stage);
163}; 169};
164 170
165typedef u32 __attribute__((vector_size(16))) sse128_t; 171typedef u32 __attribute__((vector_size(16))) sse128_t;
@@ -203,6 +209,7 @@ struct read_cache {
203struct decode_cache { 209struct decode_cache {
204 u8 twobyte; 210 u8 twobyte;
205 u8 b; 211 u8 b;
212 u8 intercept;
206 u8 lock_prefix; 213 u8 lock_prefix;
207 u8 rep_prefix; 214 u8 rep_prefix;
208 u8 op_bytes; 215 u8 op_bytes;
@@ -244,6 +251,7 @@ struct x86_emulate_ctxt {
244 /* interruptibility state, as a result of execution of STI or MOV SS */ 251 /* interruptibility state, as a result of execution of STI or MOV SS */
245 int interruptibility; 252 int interruptibility;
246 253
254 bool guest_mode; /* guest running a nested guest */
247 bool perm_ok; /* do not check permissions if true */ 255 bool perm_ok; /* do not check permissions if true */
248 bool only_vendor_specific_insn; 256 bool only_vendor_specific_insn;
249 257
@@ -265,6 +273,18 @@ struct x86_emulate_ctxt {
265#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */ 273#define X86EMUL_MODE_PROT32 4 /* 32-bit protected mode. */
266#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */ 274#define X86EMUL_MODE_PROT64 8 /* 64-bit (long) mode. */
267 275
276enum x86_intercept_stage {
277 X86_ICPT_PRE_EXCEPT,
278 X86_ICPT_POST_EXCEPT,
279 X86_ICPT_POST_MEMACCESS,
280};
281
282enum x86_intercept {
283 x86_intercept_none,
284
285 nr_x86_intercepts
286};
287
268/* Host execution mode. */ 288/* Host execution mode. */
269#if defined(CONFIG_X86_32) 289#if defined(CONFIG_X86_32)
270#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32 290#define X86EMUL_MODE_HOST X86EMUL_MODE_PROT32
diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
index 2b6c24e572d4..a81486790ba8 100644
--- a/arch/x86/kvm/emulate.c
+++ b/arch/x86/kvm/emulate.c
@@ -104,6 +104,7 @@
104 104
105struct opcode { 105struct opcode {
106 u32 flags; 106 u32 flags;
107 u8 intercept;
107 union { 108 union {
108 int (*execute)(struct x86_emulate_ctxt *ctxt); 109 int (*execute)(struct x86_emulate_ctxt *ctxt);
109 struct opcode *group; 110 struct opcode *group;
@@ -2423,10 +2424,13 @@ static int em_movdqu(struct x86_emulate_ctxt *ctxt)
2423} 2424}
2424 2425
2425#define D(_y) { .flags = (_y) } 2426#define D(_y) { .flags = (_y) }
2427#define DI(_y, _i) { .flags = (_y), .intercept = x86_intercept_##_i }
2426#define N D(0) 2428#define N D(0)
2427#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) } 2429#define G(_f, _g) { .flags = ((_f) | Group), .u.group = (_g) }
2428#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) } 2430#define GD(_f, _g) { .flags = ((_f) | Group | GroupDual), .u.gdual = (_g) }
2429#define I(_f, _e) { .flags = (_f), .u.execute = (_e) } 2431#define I(_f, _e) { .flags = (_f), .u.execute = (_e) }
2432#define II(_f, _e, _i) \
2433 { .flags = (_f), .u.execute = (_e), .intercept = x86_intercept_##_i }
2430#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) } 2434#define GP(_f, _g) { .flags = ((_f) | Prefix), .u.gprefix = (_g) }
2431 2435
2432#define D2bv(_f) D((_f) | ByteOp), D(_f) 2436#define D2bv(_f) D((_f) | ByteOp), D(_f)
@@ -2867,6 +2871,7 @@ done_prefixes:
2867 } 2871 }
2868 2872
2869 c->execute = opcode.u.execute; 2873 c->execute = opcode.u.execute;
2874 c->intercept = opcode.intercept;
2870 2875
2871 /* Unrecognised? */ 2876 /* Unrecognised? */
2872 if (c->d == 0 || (c->d & Undefined)) 2877 if (c->d == 0 || (c->d & Undefined))
@@ -3116,12 +3121,26 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3116 goto done; 3121 goto done;
3117 } 3122 }
3118 3123
3124 if (unlikely(ctxt->guest_mode) && c->intercept) {
3125 rc = ops->intercept(ctxt, c->intercept,
3126 X86_ICPT_PRE_EXCEPT);
3127 if (rc != X86EMUL_CONTINUE)
3128 goto done;
3129 }
3130
3119 /* Privileged instruction can be executed only in CPL=0 */ 3131 /* Privileged instruction can be executed only in CPL=0 */
3120 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) { 3132 if ((c->d & Priv) && ops->cpl(ctxt->vcpu)) {
3121 rc = emulate_gp(ctxt, 0); 3133 rc = emulate_gp(ctxt, 0);
3122 goto done; 3134 goto done;
3123 } 3135 }
3124 3136
3137 if (unlikely(ctxt->guest_mode) && c->intercept) {
3138 rc = ops->intercept(ctxt, c->intercept,
3139 X86_ICPT_POST_EXCEPT);
3140 if (rc != X86EMUL_CONTINUE)
3141 goto done;
3142 }
3143
3125 if (c->rep_prefix && (c->d & String)) { 3144 if (c->rep_prefix && (c->d & String)) {
3126 /* All REP prefixes have the same first termination condition */ 3145 /* All REP prefixes have the same first termination condition */
3127 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) { 3146 if (address_mask(c, c->regs[VCPU_REGS_RCX]) == 0) {
@@ -3160,6 +3179,13 @@ x86_emulate_insn(struct x86_emulate_ctxt *ctxt)
3160 3179
3161special_insn: 3180special_insn:
3162 3181
3182 if (unlikely(ctxt->guest_mode) && c->intercept) {
3183 rc = ops->intercept(ctxt, c->intercept,
3184 X86_ICPT_POST_MEMACCESS);
3185 if (rc != X86EMUL_CONTINUE)
3186 goto done;
3187 }
3188
3163 if (c->execute) { 3189 if (c->execute) {
3164 rc = c->execute(ctxt); 3190 rc = c->execute(ctxt);
3165 if (rc != X86EMUL_CONTINUE) 3191 if (rc != X86EMUL_CONTINUE)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 5af66515337d..36786bbb4c09 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4297,6 +4297,13 @@ static void emulator_put_fpu(struct x86_emulate_ctxt *ctxt)
4297 preempt_enable(); 4297 preempt_enable();
4298} 4298}
4299 4299
4300static int emulator_intercept(struct x86_emulate_ctxt *ctxt,
4301 enum x86_intercept intercept,
4302 enum x86_intercept_stage stage)
4303{
4304 return X86EMUL_CONTINUE;
4305}
4306
4300static struct x86_emulate_ops emulate_ops = { 4307static struct x86_emulate_ops emulate_ops = {
4301 .read_std = kvm_read_guest_virt_system, 4308 .read_std = kvm_read_guest_virt_system,
4302 .write_std = kvm_write_guest_virt_system, 4309 .write_std = kvm_write_guest_virt_system,
@@ -4322,6 +4329,7 @@ static struct x86_emulate_ops emulate_ops = {
4322 .get_msr = kvm_get_msr, 4329 .get_msr = kvm_get_msr,
4323 .get_fpu = emulator_get_fpu, 4330 .get_fpu = emulator_get_fpu,
4324 .put_fpu = emulator_put_fpu, 4331 .put_fpu = emulator_put_fpu,
4332 .intercept = emulator_intercept,
4325}; 4333};
4326 4334
4327static void cache_all_regs(struct kvm_vcpu *vcpu) 4335static void cache_all_regs(struct kvm_vcpu *vcpu)
@@ -4376,6 +4384,7 @@ static void init_emulate_ctxt(struct kvm_vcpu *vcpu)
4376 ? X86EMUL_MODE_VM86 : cs_l 4384 ? X86EMUL_MODE_VM86 : cs_l
4377 ? X86EMUL_MODE_PROT64 : cs_db 4385 ? X86EMUL_MODE_PROT64 : cs_db
4378 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16; 4386 ? X86EMUL_MODE_PROT32 : X86EMUL_MODE_PROT16;
4387 vcpu->arch.emulate_ctxt.guest_mode = is_guest_mode(vcpu);
4379 memset(c, 0, sizeof(struct decode_cache)); 4388 memset(c, 0, sizeof(struct decode_cache));
4380 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs); 4389 memcpy(c->regs, vcpu->arch.regs, sizeof c->regs);
4381} 4390}