aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c43
1 files changed, 40 insertions, 3 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8a3f9f64f86f..e7c3f3bd08fc 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -88,6 +88,14 @@ struct nested_state {
88 /* A VMEXIT is required but not yet emulated */ 88 /* A VMEXIT is required but not yet emulated */
89 bool exit_required; 89 bool exit_required;
90 90
91 /*
92 * If we vmexit during an instruction emulation we need this to restore
93 * the l1 guest rip after the emulation
94 */
95 unsigned long vmexit_rip;
96 unsigned long vmexit_rsp;
97 unsigned long vmexit_rax;
98
91 /* cache for intercepts of the guest */ 99 /* cache for intercepts of the guest */
92 u16 intercept_cr_read; 100 u16 intercept_cr_read;
93 u16 intercept_cr_write; 101 u16 intercept_cr_write;
@@ -1206,8 +1214,12 @@ static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
1206 if (old == new) { 1214 if (old == new) {
1207 /* cr0 write with ts and mp unchanged */ 1215 /* cr0 write with ts and mp unchanged */
1208 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE; 1216 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
1209 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) 1217 if (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE) {
1218 svm->nested.vmexit_rip = kvm_rip_read(vcpu);
1219 svm->nested.vmexit_rsp = kvm_register_read(vcpu, VCPU_REGS_RSP);
1220 svm->nested.vmexit_rax = kvm_register_read(vcpu, VCPU_REGS_RAX);
1210 return; 1221 return;
1222 }
1211 } 1223 }
1212 } 1224 }
1213 1225
@@ -2399,6 +2411,23 @@ static int emulate_on_interception(struct vcpu_svm *svm)
2399 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE; 2411 return emulate_instruction(&svm->vcpu, 0, 0, 0) == EMULATE_DONE;
2400} 2412}
2401 2413
2414static int cr0_write_interception(struct vcpu_svm *svm)
2415{
2416 struct kvm_vcpu *vcpu = &svm->vcpu;
2417 int r;
2418
2419 r = emulate_instruction(&svm->vcpu, 0, 0, 0);
2420
2421 if (svm->nested.vmexit_rip) {
2422 kvm_register_write(vcpu, VCPU_REGS_RIP, svm->nested.vmexit_rip);
2423 kvm_register_write(vcpu, VCPU_REGS_RSP, svm->nested.vmexit_rsp);
2424 kvm_register_write(vcpu, VCPU_REGS_RAX, svm->nested.vmexit_rax);
2425 svm->nested.vmexit_rip = 0;
2426 }
2427
2428 return r == EMULATE_DONE;
2429}
2430
2402static int cr8_write_interception(struct vcpu_svm *svm) 2431static int cr8_write_interception(struct vcpu_svm *svm)
2403{ 2432{
2404 struct kvm_run *kvm_run = svm->vcpu.run; 2433 struct kvm_run *kvm_run = svm->vcpu.run;
@@ -2672,7 +2701,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2672 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2701 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2673 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2702 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2674 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, 2703 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2675 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2704 [SVM_EXIT_WRITE_CR0] = cr0_write_interception,
2676 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2705 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2677 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2706 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
2678 [SVM_EXIT_WRITE_CR8] = cr8_write_interception, 2707 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
@@ -3252,6 +3281,7 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3252 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3281 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3253 3282
3254 load_host_msrs(vcpu); 3283 load_host_msrs(vcpu);
3284 kvm_load_ldt(ldt_selector);
3255 loadsegment(fs, fs_selector); 3285 loadsegment(fs, fs_selector);
3256#ifdef CONFIG_X86_64 3286#ifdef CONFIG_X86_64
3257 load_gs_index(gs_selector); 3287 load_gs_index(gs_selector);
@@ -3259,7 +3289,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3259#else 3289#else
3260 loadsegment(gs, gs_selector); 3290 loadsegment(gs, gs_selector);
3261#endif 3291#endif
3262 kvm_load_ldt(ldt_selector);
3263 3292
3264 reload_tss(vcpu); 3293 reload_tss(vcpu);
3265 3294
@@ -3354,6 +3383,14 @@ static void svm_cpuid_update(struct kvm_vcpu *vcpu)
3354static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry) 3383static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
3355{ 3384{
3356 switch (func) { 3385 switch (func) {
3386 case 0x00000001:
3387 /* Mask out xsave bit as long as it is not supported by SVM */
3388 entry->ecx &= ~(bit(X86_FEATURE_XSAVE));
3389 break;
3390 case 0x80000001:
3391 if (nested)
3392 entry->ecx |= (1 << 2); /* Set SVM bit */
3393 break;
3357 case 0x8000000A: 3394 case 0x8000000A:
3358 entry->eax = 1; /* SVM revision 1 */ 3395 entry->eax = 1; /* SVM revision 1 */
3359 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper 3396 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper