aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c263
1 files changed, 155 insertions, 108 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 1d9b33843c80..2ba58206812a 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -26,6 +26,7 @@
26#include <linux/highmem.h> 26#include <linux/highmem.h>
27#include <linux/sched.h> 27#include <linux/sched.h>
28#include <linux/ftrace_event.h> 28#include <linux/ftrace_event.h>
29#include <linux/slab.h>
29 30
30#include <asm/desc.h> 31#include <asm/desc.h>
31 32
@@ -231,7 +232,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
231 efer &= ~EFER_LME; 232 efer &= ~EFER_LME;
232 233
233 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME; 234 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
234 vcpu->arch.shadow_efer = efer; 235 vcpu->arch.efer = efer;
235} 236}
236 237
237static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr, 238static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
@@ -540,6 +541,8 @@ static void init_vmcb(struct vcpu_svm *svm)
540 struct vmcb_control_area *control = &svm->vmcb->control; 541 struct vmcb_control_area *control = &svm->vmcb->control;
541 struct vmcb_save_area *save = &svm->vmcb->save; 542 struct vmcb_save_area *save = &svm->vmcb->save;
542 543
544 svm->vcpu.fpu_active = 1;
545
543 control->intercept_cr_read = INTERCEPT_CR0_MASK | 546 control->intercept_cr_read = INTERCEPT_CR0_MASK |
544 INTERCEPT_CR3_MASK | 547 INTERCEPT_CR3_MASK |
545 INTERCEPT_CR4_MASK; 548 INTERCEPT_CR4_MASK;
@@ -552,13 +555,19 @@ static void init_vmcb(struct vcpu_svm *svm)
552 control->intercept_dr_read = INTERCEPT_DR0_MASK | 555 control->intercept_dr_read = INTERCEPT_DR0_MASK |
553 INTERCEPT_DR1_MASK | 556 INTERCEPT_DR1_MASK |
554 INTERCEPT_DR2_MASK | 557 INTERCEPT_DR2_MASK |
555 INTERCEPT_DR3_MASK; 558 INTERCEPT_DR3_MASK |
559 INTERCEPT_DR4_MASK |
560 INTERCEPT_DR5_MASK |
561 INTERCEPT_DR6_MASK |
562 INTERCEPT_DR7_MASK;
556 563
557 control->intercept_dr_write = INTERCEPT_DR0_MASK | 564 control->intercept_dr_write = INTERCEPT_DR0_MASK |
558 INTERCEPT_DR1_MASK | 565 INTERCEPT_DR1_MASK |
559 INTERCEPT_DR2_MASK | 566 INTERCEPT_DR2_MASK |
560 INTERCEPT_DR3_MASK | 567 INTERCEPT_DR3_MASK |
568 INTERCEPT_DR4_MASK |
561 INTERCEPT_DR5_MASK | 569 INTERCEPT_DR5_MASK |
570 INTERCEPT_DR6_MASK |
562 INTERCEPT_DR7_MASK; 571 INTERCEPT_DR7_MASK;
563 572
564 control->intercept_exceptions = (1 << PF_VECTOR) | 573 control->intercept_exceptions = (1 << PF_VECTOR) |
@@ -569,6 +578,7 @@ static void init_vmcb(struct vcpu_svm *svm)
569 control->intercept = (1ULL << INTERCEPT_INTR) | 578 control->intercept = (1ULL << INTERCEPT_INTR) |
570 (1ULL << INTERCEPT_NMI) | 579 (1ULL << INTERCEPT_NMI) |
571 (1ULL << INTERCEPT_SMI) | 580 (1ULL << INTERCEPT_SMI) |
581 (1ULL << INTERCEPT_SELECTIVE_CR0) |
572 (1ULL << INTERCEPT_CPUID) | 582 (1ULL << INTERCEPT_CPUID) |
573 (1ULL << INTERCEPT_INVD) | 583 (1ULL << INTERCEPT_INVD) |
574 (1ULL << INTERCEPT_HLT) | 584 (1ULL << INTERCEPT_HLT) |
@@ -641,10 +651,8 @@ static void init_vmcb(struct vcpu_svm *svm)
641 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) | 651 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
642 (1ULL << INTERCEPT_INVLPG)); 652 (1ULL << INTERCEPT_INVLPG));
643 control->intercept_exceptions &= ~(1 << PF_VECTOR); 653 control->intercept_exceptions &= ~(1 << PF_VECTOR);
644 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK| 654 control->intercept_cr_read &= ~INTERCEPT_CR3_MASK;
645 INTERCEPT_CR3_MASK); 655 control->intercept_cr_write &= ~INTERCEPT_CR3_MASK;
646 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
647 INTERCEPT_CR3_MASK);
648 save->g_pat = 0x0007040600070406ULL; 656 save->g_pat = 0x0007040600070406ULL;
649 save->cr3 = 0; 657 save->cr3 = 0;
650 save->cr4 = 0; 658 save->cr4 = 0;
@@ -698,29 +706,28 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
698 if (err) 706 if (err)
699 goto free_svm; 707 goto free_svm;
700 708
709 err = -ENOMEM;
701 page = alloc_page(GFP_KERNEL); 710 page = alloc_page(GFP_KERNEL);
702 if (!page) { 711 if (!page)
703 err = -ENOMEM;
704 goto uninit; 712 goto uninit;
705 }
706 713
707 err = -ENOMEM;
708 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 714 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
709 if (!msrpm_pages) 715 if (!msrpm_pages)
710 goto uninit; 716 goto free_page1;
711 717
712 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 718 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
713 if (!nested_msrpm_pages) 719 if (!nested_msrpm_pages)
714 goto uninit; 720 goto free_page2;
715
716 svm->msrpm = page_address(msrpm_pages);
717 svm_vcpu_init_msrpm(svm->msrpm);
718 721
719 hsave_page = alloc_page(GFP_KERNEL); 722 hsave_page = alloc_page(GFP_KERNEL);
720 if (!hsave_page) 723 if (!hsave_page)
721 goto uninit; 724 goto free_page3;
725
722 svm->nested.hsave = page_address(hsave_page); 726 svm->nested.hsave = page_address(hsave_page);
723 727
728 svm->msrpm = page_address(msrpm_pages);
729 svm_vcpu_init_msrpm(svm->msrpm);
730
724 svm->nested.msrpm = page_address(nested_msrpm_pages); 731 svm->nested.msrpm = page_address(nested_msrpm_pages);
725 732
726 svm->vmcb = page_address(page); 733 svm->vmcb = page_address(page);
@@ -730,13 +737,18 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
730 init_vmcb(svm); 737 init_vmcb(svm);
731 738
732 fx_init(&svm->vcpu); 739 fx_init(&svm->vcpu);
733 svm->vcpu.fpu_active = 1;
734 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE; 740 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
735 if (kvm_vcpu_is_bsp(&svm->vcpu)) 741 if (kvm_vcpu_is_bsp(&svm->vcpu))
736 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP; 742 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
737 743
738 return &svm->vcpu; 744 return &svm->vcpu;
739 745
746free_page3:
747 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
748free_page2:
749 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
750free_page1:
751 __free_page(page);
740uninit: 752uninit:
741 kvm_vcpu_uninit(&svm->vcpu); 753 kvm_vcpu_uninit(&svm->vcpu);
742free_svm: 754free_svm:
@@ -765,14 +777,16 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
765 if (unlikely(cpu != vcpu->cpu)) { 777 if (unlikely(cpu != vcpu->cpu)) {
766 u64 delta; 778 u64 delta;
767 779
768 /* 780 if (check_tsc_unstable()) {
769 * Make sure that the guest sees a monotonically 781 /*
770 * increasing TSC. 782 * Make sure that the guest sees a monotonically
771 */ 783 * increasing TSC.
772 delta = vcpu->arch.host_tsc - native_read_tsc(); 784 */
773 svm->vmcb->control.tsc_offset += delta; 785 delta = vcpu->arch.host_tsc - native_read_tsc();
774 if (is_nested(svm)) 786 svm->vmcb->control.tsc_offset += delta;
775 svm->nested.hsave->control.tsc_offset += delta; 787 if (is_nested(svm))
788 svm->nested.hsave->control.tsc_offset += delta;
789 }
776 vcpu->cpu = cpu; 790 vcpu->cpu = cpu;
777 kvm_migrate_timers(vcpu); 791 kvm_migrate_timers(vcpu);
778 svm->asid_generation = 0; 792 svm->asid_generation = 0;
@@ -954,42 +968,59 @@ static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
954 svm->vmcb->save.gdtr.base = dt->base ; 968 svm->vmcb->save.gdtr.base = dt->base ;
955} 969}
956 970
971static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
972{
973}
974
957static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu) 975static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
958{ 976{
959} 977}
960 978
979static void update_cr0_intercept(struct vcpu_svm *svm)
980{
981 ulong gcr0 = svm->vcpu.arch.cr0;
982 u64 *hcr0 = &svm->vmcb->save.cr0;
983
984 if (!svm->vcpu.fpu_active)
985 *hcr0 |= SVM_CR0_SELECTIVE_MASK;
986 else
987 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
988 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
989
990
991 if (gcr0 == *hcr0 && svm->vcpu.fpu_active) {
992 svm->vmcb->control.intercept_cr_read &= ~INTERCEPT_CR0_MASK;
993 svm->vmcb->control.intercept_cr_write &= ~INTERCEPT_CR0_MASK;
994 } else {
995 svm->vmcb->control.intercept_cr_read |= INTERCEPT_CR0_MASK;
996 svm->vmcb->control.intercept_cr_write |= INTERCEPT_CR0_MASK;
997 }
998}
999
961static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0) 1000static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
962{ 1001{
963 struct vcpu_svm *svm = to_svm(vcpu); 1002 struct vcpu_svm *svm = to_svm(vcpu);
964 1003
965#ifdef CONFIG_X86_64 1004#ifdef CONFIG_X86_64
966 if (vcpu->arch.shadow_efer & EFER_LME) { 1005 if (vcpu->arch.efer & EFER_LME) {
967 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) { 1006 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
968 vcpu->arch.shadow_efer |= EFER_LMA; 1007 vcpu->arch.efer |= EFER_LMA;
969 svm->vmcb->save.efer |= EFER_LMA | EFER_LME; 1008 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
970 } 1009 }
971 1010
972 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) { 1011 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
973 vcpu->arch.shadow_efer &= ~EFER_LMA; 1012 vcpu->arch.efer &= ~EFER_LMA;
974 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME); 1013 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
975 } 1014 }
976 } 1015 }
977#endif 1016#endif
978 if (npt_enabled) 1017 vcpu->arch.cr0 = cr0;
979 goto set;
980 1018
981 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) { 1019 if (!npt_enabled)
982 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1020 cr0 |= X86_CR0_PG | X86_CR0_WP;
983 vcpu->fpu_active = 1;
984 }
985 1021
986 vcpu->arch.cr0 = cr0; 1022 if (!vcpu->fpu_active)
987 cr0 |= X86_CR0_PG | X86_CR0_WP;
988 if (!vcpu->fpu_active) {
989 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
990 cr0 |= X86_CR0_TS; 1023 cr0 |= X86_CR0_TS;
991 }
992set:
993 /* 1024 /*
994 * re-enable caching here because the QEMU bios 1025 * re-enable caching here because the QEMU bios
995 * does not do it - this results in some delay at 1026 * does not do it - this results in some delay at
@@ -997,6 +1028,7 @@ set:
997 */ 1028 */
998 cr0 &= ~(X86_CR0_CD | X86_CR0_NW); 1029 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
999 svm->vmcb->save.cr0 = cr0; 1030 svm->vmcb->save.cr0 = cr0;
1031 update_cr0_intercept(svm);
1000} 1032}
1001 1033
1002static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) 1034static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
@@ -1102,76 +1134,70 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
1102 svm->vmcb->control.asid = sd->next_asid++; 1134 svm->vmcb->control.asid = sd->next_asid++;
1103} 1135}
1104 1136
1105static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1137static int svm_get_dr(struct kvm_vcpu *vcpu, int dr, unsigned long *dest)
1106{ 1138{
1107 struct vcpu_svm *svm = to_svm(vcpu); 1139 struct vcpu_svm *svm = to_svm(vcpu);
1108 unsigned long val;
1109 1140
1110 switch (dr) { 1141 switch (dr) {
1111 case 0 ... 3: 1142 case 0 ... 3:
1112 val = vcpu->arch.db[dr]; 1143 *dest = vcpu->arch.db[dr];
1113 break; 1144 break;
1145 case 4:
1146 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1147 return EMULATE_FAIL; /* will re-inject UD */
1148 /* fall through */
1114 case 6: 1149 case 6:
1115 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1150 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1116 val = vcpu->arch.dr6; 1151 *dest = vcpu->arch.dr6;
1117 else 1152 else
1118 val = svm->vmcb->save.dr6; 1153 *dest = svm->vmcb->save.dr6;
1119 break; 1154 break;
1155 case 5:
1156 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1157 return EMULATE_FAIL; /* will re-inject UD */
1158 /* fall through */
1120 case 7: 1159 case 7:
1121 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP) 1160 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1122 val = vcpu->arch.dr7; 1161 *dest = vcpu->arch.dr7;
1123 else 1162 else
1124 val = svm->vmcb->save.dr7; 1163 *dest = svm->vmcb->save.dr7;
1125 break; 1164 break;
1126 default:
1127 val = 0;
1128 } 1165 }
1129 1166
1130 return val; 1167 return EMULATE_DONE;
1131} 1168}
1132 1169
1133static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value, 1170static int svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value)
1134 int *exception)
1135{ 1171{
1136 struct vcpu_svm *svm = to_svm(vcpu); 1172 struct vcpu_svm *svm = to_svm(vcpu);
1137 1173
1138 *exception = 0;
1139
1140 switch (dr) { 1174 switch (dr) {
1141 case 0 ... 3: 1175 case 0 ... 3:
1142 vcpu->arch.db[dr] = value; 1176 vcpu->arch.db[dr] = value;
1143 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) 1177 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1144 vcpu->arch.eff_db[dr] = value; 1178 vcpu->arch.eff_db[dr] = value;
1145 return; 1179 break;
1146 case 4 ... 5: 1180 case 4:
1147 if (vcpu->arch.cr4 & X86_CR4_DE) 1181 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1148 *exception = UD_VECTOR; 1182 return EMULATE_FAIL; /* will re-inject UD */
1149 return; 1183 /* fall through */
1150 case 6: 1184 case 6:
1151 if (value & 0xffffffff00000000ULL) {
1152 *exception = GP_VECTOR;
1153 return;
1154 }
1155 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1; 1185 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
1156 return; 1186 break;
1187 case 5:
1188 if (kvm_read_cr4_bits(vcpu, X86_CR4_DE))
1189 return EMULATE_FAIL; /* will re-inject UD */
1190 /* fall through */
1157 case 7: 1191 case 7:
1158 if (value & 0xffffffff00000000ULL) {
1159 *exception = GP_VECTOR;
1160 return;
1161 }
1162 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1; 1192 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1163 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) { 1193 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1164 svm->vmcb->save.dr7 = vcpu->arch.dr7; 1194 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1165 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK); 1195 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1166 } 1196 }
1167 return; 1197 break;
1168 default:
1169 /* FIXME: Possible case? */
1170 printk(KERN_DEBUG "%s: unexpected dr %u\n",
1171 __func__, dr);
1172 *exception = UD_VECTOR;
1173 return;
1174 } 1198 }
1199
1200 return EMULATE_DONE;
1175} 1201}
1176 1202
1177static int pf_interception(struct vcpu_svm *svm) 1203static int pf_interception(struct vcpu_svm *svm)
@@ -1239,13 +1265,17 @@ static int ud_interception(struct vcpu_svm *svm)
1239 return 1; 1265 return 1;
1240} 1266}
1241 1267
1242static int nm_interception(struct vcpu_svm *svm) 1268static void svm_fpu_activate(struct kvm_vcpu *vcpu)
1243{ 1269{
1270 struct vcpu_svm *svm = to_svm(vcpu);
1244 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR); 1271 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
1245 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
1246 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
1247 svm->vcpu.fpu_active = 1; 1272 svm->vcpu.fpu_active = 1;
1273 update_cr0_intercept(svm);
1274}
1248 1275
1276static int nm_interception(struct vcpu_svm *svm)
1277{
1278 svm_fpu_activate(&svm->vcpu);
1249 return 1; 1279 return 1;
1250} 1280}
1251 1281
@@ -1337,7 +1367,7 @@ static int vmmcall_interception(struct vcpu_svm *svm)
1337 1367
1338static int nested_svm_check_permissions(struct vcpu_svm *svm) 1368static int nested_svm_check_permissions(struct vcpu_svm *svm)
1339{ 1369{
1340 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME) 1370 if (!(svm->vcpu.arch.efer & EFER_SVME)
1341 || !is_paging(&svm->vcpu)) { 1371 || !is_paging(&svm->vcpu)) {
1342 kvm_queue_exception(&svm->vcpu, UD_VECTOR); 1372 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1343 return 1; 1373 return 1;
@@ -1740,8 +1770,8 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm)
1740 hsave->save.ds = vmcb->save.ds; 1770 hsave->save.ds = vmcb->save.ds;
1741 hsave->save.gdtr = vmcb->save.gdtr; 1771 hsave->save.gdtr = vmcb->save.gdtr;
1742 hsave->save.idtr = vmcb->save.idtr; 1772 hsave->save.idtr = vmcb->save.idtr;
1743 hsave->save.efer = svm->vcpu.arch.shadow_efer; 1773 hsave->save.efer = svm->vcpu.arch.efer;
1744 hsave->save.cr0 = svm->vcpu.arch.cr0; 1774 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
1745 hsave->save.cr4 = svm->vcpu.arch.cr4; 1775 hsave->save.cr4 = svm->vcpu.arch.cr4;
1746 hsave->save.rflags = vmcb->save.rflags; 1776 hsave->save.rflags = vmcb->save.rflags;
1747 hsave->save.rip = svm->next_rip; 1777 hsave->save.rip = svm->next_rip;
@@ -2153,9 +2183,10 @@ static int rdmsr_interception(struct vcpu_svm *svm)
2153 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX]; 2183 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2154 u64 data; 2184 u64 data;
2155 2185
2156 if (svm_get_msr(&svm->vcpu, ecx, &data)) 2186 if (svm_get_msr(&svm->vcpu, ecx, &data)) {
2187 trace_kvm_msr_read_ex(ecx);
2157 kvm_inject_gp(&svm->vcpu, 0); 2188 kvm_inject_gp(&svm->vcpu, 0);
2158 else { 2189 } else {
2159 trace_kvm_msr_read(ecx, data); 2190 trace_kvm_msr_read(ecx, data);
2160 2191
2161 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff; 2192 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
@@ -2247,13 +2278,15 @@ static int wrmsr_interception(struct vcpu_svm *svm)
2247 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u) 2278 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
2248 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32); 2279 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
2249 2280
2250 trace_kvm_msr_write(ecx, data);
2251 2281
2252 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2; 2282 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
2253 if (svm_set_msr(&svm->vcpu, ecx, data)) 2283 if (svm_set_msr(&svm->vcpu, ecx, data)) {
2284 trace_kvm_msr_write_ex(ecx, data);
2254 kvm_inject_gp(&svm->vcpu, 0); 2285 kvm_inject_gp(&svm->vcpu, 0);
2255 else 2286 } else {
2287 trace_kvm_msr_write(ecx, data);
2256 skip_emulated_instruction(&svm->vcpu); 2288 skip_emulated_instruction(&svm->vcpu);
2289 }
2257 return 1; 2290 return 1;
2258} 2291}
2259 2292
@@ -2297,7 +2330,7 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2297 [SVM_EXIT_READ_CR3] = emulate_on_interception, 2330 [SVM_EXIT_READ_CR3] = emulate_on_interception,
2298 [SVM_EXIT_READ_CR4] = emulate_on_interception, 2331 [SVM_EXIT_READ_CR4] = emulate_on_interception,
2299 [SVM_EXIT_READ_CR8] = emulate_on_interception, 2332 [SVM_EXIT_READ_CR8] = emulate_on_interception,
2300 /* for now: */ 2333 [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception,
2301 [SVM_EXIT_WRITE_CR0] = emulate_on_interception, 2334 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
2302 [SVM_EXIT_WRITE_CR3] = emulate_on_interception, 2335 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
2303 [SVM_EXIT_WRITE_CR4] = emulate_on_interception, 2336 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
@@ -2306,11 +2339,17 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm) = {
2306 [SVM_EXIT_READ_DR1] = emulate_on_interception, 2339 [SVM_EXIT_READ_DR1] = emulate_on_interception,
2307 [SVM_EXIT_READ_DR2] = emulate_on_interception, 2340 [SVM_EXIT_READ_DR2] = emulate_on_interception,
2308 [SVM_EXIT_READ_DR3] = emulate_on_interception, 2341 [SVM_EXIT_READ_DR3] = emulate_on_interception,
2342 [SVM_EXIT_READ_DR4] = emulate_on_interception,
2343 [SVM_EXIT_READ_DR5] = emulate_on_interception,
2344 [SVM_EXIT_READ_DR6] = emulate_on_interception,
2345 [SVM_EXIT_READ_DR7] = emulate_on_interception,
2309 [SVM_EXIT_WRITE_DR0] = emulate_on_interception, 2346 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
2310 [SVM_EXIT_WRITE_DR1] = emulate_on_interception, 2347 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
2311 [SVM_EXIT_WRITE_DR2] = emulate_on_interception, 2348 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
2312 [SVM_EXIT_WRITE_DR3] = emulate_on_interception, 2349 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
2350 [SVM_EXIT_WRITE_DR4] = emulate_on_interception,
2313 [SVM_EXIT_WRITE_DR5] = emulate_on_interception, 2351 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
2352 [SVM_EXIT_WRITE_DR6] = emulate_on_interception,
2314 [SVM_EXIT_WRITE_DR7] = emulate_on_interception, 2353 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
2315 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception, 2354 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2316 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception, 2355 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
@@ -2383,20 +2422,10 @@ static int handle_exit(struct kvm_vcpu *vcpu)
2383 2422
2384 svm_complete_interrupts(svm); 2423 svm_complete_interrupts(svm);
2385 2424
2386 if (npt_enabled) { 2425 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR0_MASK))
2387 int mmu_reload = 0;
2388 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
2389 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
2390 mmu_reload = 1;
2391 }
2392 vcpu->arch.cr0 = svm->vmcb->save.cr0; 2426 vcpu->arch.cr0 = svm->vmcb->save.cr0;
2427 if (npt_enabled)
2393 vcpu->arch.cr3 = svm->vmcb->save.cr3; 2428 vcpu->arch.cr3 = svm->vmcb->save.cr3;
2394 if (mmu_reload) {
2395 kvm_mmu_reset_context(vcpu);
2396 kvm_mmu_load(vcpu);
2397 }
2398 }
2399
2400 2429
2401 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) { 2430 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
2402 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY; 2431 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
@@ -2798,12 +2827,6 @@ static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
2798 2827
2799 svm->vmcb->save.cr3 = root; 2828 svm->vmcb->save.cr3 = root;
2800 force_new_asid(vcpu); 2829 force_new_asid(vcpu);
2801
2802 if (vcpu->fpu_active) {
2803 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
2804 svm->vmcb->save.cr0 |= X86_CR0_TS;
2805 vcpu->fpu_active = 0;
2806 }
2807} 2830}
2808 2831
2809static int is_disabled(void) 2832static int is_disabled(void)
@@ -2852,6 +2875,10 @@ static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
2852 return 0; 2875 return 0;
2853} 2876}
2854 2877
2878static void svm_cpuid_update(struct kvm_vcpu *vcpu)
2879{
2880}
2881
2855static const struct trace_print_flags svm_exit_reasons_str[] = { 2882static const struct trace_print_flags svm_exit_reasons_str[] = {
2856 { SVM_EXIT_READ_CR0, "read_cr0" }, 2883 { SVM_EXIT_READ_CR0, "read_cr0" },
2857 { SVM_EXIT_READ_CR3, "read_cr3" }, 2884 { SVM_EXIT_READ_CR3, "read_cr3" },
@@ -2905,9 +2932,22 @@ static const struct trace_print_flags svm_exit_reasons_str[] = {
2905 { -1, NULL } 2932 { -1, NULL }
2906}; 2933};
2907 2934
2908static bool svm_gb_page_enable(void) 2935static int svm_get_lpage_level(void)
2909{ 2936{
2910 return true; 2937 return PT_PDPE_LEVEL;
2938}
2939
2940static bool svm_rdtscp_supported(void)
2941{
2942 return false;
2943}
2944
2945static void svm_fpu_deactivate(struct kvm_vcpu *vcpu)
2946{
2947 struct vcpu_svm *svm = to_svm(vcpu);
2948
2949 update_cr0_intercept(svm);
2950 svm->vmcb->control.intercept_exceptions |= 1 << NM_VECTOR;
2911} 2951}
2912 2952
2913static struct kvm_x86_ops svm_x86_ops = { 2953static struct kvm_x86_ops svm_x86_ops = {
@@ -2936,6 +2976,7 @@ static struct kvm_x86_ops svm_x86_ops = {
2936 .set_segment = svm_set_segment, 2976 .set_segment = svm_set_segment,
2937 .get_cpl = svm_get_cpl, 2977 .get_cpl = svm_get_cpl,
2938 .get_cs_db_l_bits = kvm_get_cs_db_l_bits, 2978 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
2979 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
2939 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits, 2980 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
2940 .set_cr0 = svm_set_cr0, 2981 .set_cr0 = svm_set_cr0,
2941 .set_cr3 = svm_set_cr3, 2982 .set_cr3 = svm_set_cr3,
@@ -2950,6 +2991,8 @@ static struct kvm_x86_ops svm_x86_ops = {
2950 .cache_reg = svm_cache_reg, 2991 .cache_reg = svm_cache_reg,
2951 .get_rflags = svm_get_rflags, 2992 .get_rflags = svm_get_rflags,
2952 .set_rflags = svm_set_rflags, 2993 .set_rflags = svm_set_rflags,
2994 .fpu_activate = svm_fpu_activate,
2995 .fpu_deactivate = svm_fpu_deactivate,
2953 2996
2954 .tlb_flush = svm_flush_tlb, 2997 .tlb_flush = svm_flush_tlb,
2955 2998
@@ -2975,7 +3018,11 @@ static struct kvm_x86_ops svm_x86_ops = {
2975 .get_mt_mask = svm_get_mt_mask, 3018 .get_mt_mask = svm_get_mt_mask,
2976 3019
2977 .exit_reasons_str = svm_exit_reasons_str, 3020 .exit_reasons_str = svm_exit_reasons_str,
2978 .gb_page_enable = svm_gb_page_enable, 3021 .get_lpage_level = svm_get_lpage_level,
3022
3023 .cpuid_update = svm_cpuid_update,
3024
3025 .rdtscp_supported = svm_rdtscp_supported,
2979}; 3026};
2980 3027
2981static int __init svm_init(void) 3028static int __init svm_init(void)