aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c916
1 files changed, 808 insertions, 108 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index a9e769e4e251..1821c2078199 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -38,9 +38,6 @@ MODULE_LICENSE("GPL");
38#define IOPM_ALLOC_ORDER 2 38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1 39#define MSRPM_ALLOC_ORDER 1
40 40
41#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
43
44#define SEG_TYPE_LDT 2 41#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3 42#define SEG_TYPE_BUSY_TSS16 3
46 43
@@ -50,6 +47,15 @@ MODULE_LICENSE("GPL");
50 47
51#define DEBUGCTL_RESERVED_BITS (~(0x3fULL)) 48#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52 49
50/* Turn on to get debugging output*/
51/* #define NESTED_DEBUG */
52
53#ifdef NESTED_DEBUG
54#define nsvm_printk(fmt, args...) printk(KERN_INFO fmt, ## args)
55#else
56#define nsvm_printk(fmt, args...) do {} while(0)
57#endif
58
53/* enable NPT for AMD64 and X86 with PAE */ 59/* enable NPT for AMD64 and X86 with PAE */
54#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) 60#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
55static bool npt_enabled = true; 61static bool npt_enabled = true;
@@ -60,14 +66,29 @@ static int npt = 1;
60 66
61module_param(npt, int, S_IRUGO); 67module_param(npt, int, S_IRUGO);
62 68
69static int nested = 0;
70module_param(nested, int, S_IRUGO);
71
63static void kvm_reput_irq(struct vcpu_svm *svm); 72static void kvm_reput_irq(struct vcpu_svm *svm);
64static void svm_flush_tlb(struct kvm_vcpu *vcpu); 73static void svm_flush_tlb(struct kvm_vcpu *vcpu);
65 74
75static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override);
76static int nested_svm_vmexit(struct vcpu_svm *svm);
77static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
78 void *arg2, void *opaque);
79static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
80 bool has_error_code, u32 error_code);
81
66static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu) 82static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{ 83{
68 return container_of(vcpu, struct vcpu_svm, vcpu); 84 return container_of(vcpu, struct vcpu_svm, vcpu);
69} 85}
70 86
87static inline bool is_nested(struct vcpu_svm *svm)
88{
89 return svm->nested_vmcb;
90}
91
71static unsigned long iopm_base; 92static unsigned long iopm_base;
72 93
73struct kvm_ldttss_desc { 94struct kvm_ldttss_desc {
@@ -157,32 +178,6 @@ static inline void kvm_write_cr2(unsigned long val)
157 asm volatile ("mov %0, %%cr2" :: "r" (val)); 178 asm volatile ("mov %0, %%cr2" :: "r" (val));
158} 179}
159 180
160static inline unsigned long read_dr6(void)
161{
162 unsigned long dr6;
163
164 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
165 return dr6;
166}
167
168static inline void write_dr6(unsigned long val)
169{
170 asm volatile ("mov %0, %%dr6" :: "r" (val));
171}
172
173static inline unsigned long read_dr7(void)
174{
175 unsigned long dr7;
176
177 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
178 return dr7;
179}
180
181static inline void write_dr7(unsigned long val)
182{
183 asm volatile ("mov %0, %%dr7" :: "r" (val));
184}
185
186static inline void force_new_asid(struct kvm_vcpu *vcpu) 181static inline void force_new_asid(struct kvm_vcpu *vcpu)
187{ 182{
188 to_svm(vcpu)->asid_generation--; 183 to_svm(vcpu)->asid_generation--;
@@ -198,7 +193,7 @@ static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
198 if (!npt_enabled && !(efer & EFER_LMA)) 193 if (!npt_enabled && !(efer & EFER_LMA))
199 efer &= ~EFER_LME; 194 efer &= ~EFER_LME;
200 195
201 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK; 196 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
202 vcpu->arch.shadow_efer = efer; 197 vcpu->arch.shadow_efer = efer;
203} 198}
204 199
@@ -207,6 +202,11 @@ static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
207{ 202{
208 struct vcpu_svm *svm = to_svm(vcpu); 203 struct vcpu_svm *svm = to_svm(vcpu);
209 204
205 /* If we are within a nested VM we'd better #VMEXIT and let the
206 guest handle the exception */
207 if (nested_svm_check_exception(svm, nr, has_error_code, error_code))
208 return;
209
210 svm->vmcb->control.event_inj = nr 210 svm->vmcb->control.event_inj = nr
211 | SVM_EVTINJ_VALID 211 | SVM_EVTINJ_VALID
212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0) 212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
@@ -242,7 +242,7 @@ static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
242 kvm_rip_write(vcpu, svm->next_rip); 242 kvm_rip_write(vcpu, svm->next_rip);
243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK; 243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
244 244
245 vcpu->arch.interrupt_window_open = 1; 245 vcpu->arch.interrupt_window_open = (svm->vcpu.arch.hflags & HF_GIF_MASK);
246} 246}
247 247
248static int has_svm(void) 248static int has_svm(void)
@@ -250,7 +250,7 @@ static int has_svm(void)
250 const char *msg; 250 const char *msg;
251 251
252 if (!cpu_has_svm(&msg)) { 252 if (!cpu_has_svm(&msg)) {
253 printk(KERN_INFO "has_svn: %s\n", msg); 253 printk(KERN_INFO "has_svm: %s\n", msg);
254 return 0; 254 return 0;
255 } 255 }
256 256
@@ -292,7 +292,7 @@ static void svm_hardware_enable(void *garbage)
292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS); 292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293 293
294 rdmsrl(MSR_EFER, efer); 294 rdmsrl(MSR_EFER, efer);
295 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK); 295 wrmsrl(MSR_EFER, efer | EFER_SVME);
296 296
297 wrmsrl(MSR_VM_HSAVE_PA, 297 wrmsrl(MSR_VM_HSAVE_PA,
298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT); 298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
@@ -417,6 +417,14 @@ static __init int svm_hardware_setup(void)
417 if (boot_cpu_has(X86_FEATURE_NX)) 417 if (boot_cpu_has(X86_FEATURE_NX))
418 kvm_enable_efer_bits(EFER_NX); 418 kvm_enable_efer_bits(EFER_NX);
419 419
420 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
421 kvm_enable_efer_bits(EFER_FFXSR);
422
423 if (nested) {
424 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
425 kvm_enable_efer_bits(EFER_SVME);
426 }
427
420 for_each_online_cpu(cpu) { 428 for_each_online_cpu(cpu) {
421 r = svm_cpu_init(cpu); 429 r = svm_cpu_init(cpu);
422 if (r) 430 if (r)
@@ -559,7 +567,7 @@ static void init_vmcb(struct vcpu_svm *svm)
559 init_sys_seg(&save->ldtr, SEG_TYPE_LDT); 567 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
560 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16); 568 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
561 569
562 save->efer = MSR_EFER_SVME_MASK; 570 save->efer = EFER_SVME;
563 save->dr6 = 0xffff0ff0; 571 save->dr6 = 0xffff0ff0;
564 save->dr7 = 0x400; 572 save->dr7 = 0x400;
565 save->rflags = 2; 573 save->rflags = 2;
@@ -591,6 +599,9 @@ static void init_vmcb(struct vcpu_svm *svm)
591 save->cr4 = 0; 599 save->cr4 = 0;
592 } 600 }
593 force_new_asid(&svm->vcpu); 601 force_new_asid(&svm->vcpu);
602
603 svm->nested_vmcb = 0;
604 svm->vcpu.arch.hflags = HF_GIF_MASK;
594} 605}
595 606
596static int svm_vcpu_reset(struct kvm_vcpu *vcpu) 607static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
@@ -615,6 +626,8 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
615 struct vcpu_svm *svm; 626 struct vcpu_svm *svm;
616 struct page *page; 627 struct page *page;
617 struct page *msrpm_pages; 628 struct page *msrpm_pages;
629 struct page *hsave_page;
630 struct page *nested_msrpm_pages;
618 int err; 631 int err;
619 632
620 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); 633 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
@@ -637,14 +650,25 @@ static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
637 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER); 650 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
638 if (!msrpm_pages) 651 if (!msrpm_pages)
639 goto uninit; 652 goto uninit;
653
654 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
655 if (!nested_msrpm_pages)
656 goto uninit;
657
640 svm->msrpm = page_address(msrpm_pages); 658 svm->msrpm = page_address(msrpm_pages);
641 svm_vcpu_init_msrpm(svm->msrpm); 659 svm_vcpu_init_msrpm(svm->msrpm);
642 660
661 hsave_page = alloc_page(GFP_KERNEL);
662 if (!hsave_page)
663 goto uninit;
664 svm->hsave = page_address(hsave_page);
665
666 svm->nested_msrpm = page_address(nested_msrpm_pages);
667
643 svm->vmcb = page_address(page); 668 svm->vmcb = page_address(page);
644 clear_page(svm->vmcb); 669 clear_page(svm->vmcb);
645 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT; 670 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
646 svm->asid_generation = 0; 671 svm->asid_generation = 0;
647 memset(svm->db_regs, 0, sizeof(svm->db_regs));
648 init_vmcb(svm); 672 init_vmcb(svm);
649 673
650 fx_init(&svm->vcpu); 674 fx_init(&svm->vcpu);
@@ -669,6 +693,8 @@ static void svm_free_vcpu(struct kvm_vcpu *vcpu)
669 693
670 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT)); 694 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
671 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER); 695 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
696 __free_page(virt_to_page(svm->hsave));
697 __free_pages(virt_to_page(svm->nested_msrpm), MSRPM_ALLOC_ORDER);
672 kvm_vcpu_uninit(vcpu); 698 kvm_vcpu_uninit(vcpu);
673 kmem_cache_free(kvm_vcpu_cache, svm); 699 kmem_cache_free(kvm_vcpu_cache, svm);
674} 700}
@@ -718,6 +744,16 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
718 to_svm(vcpu)->vmcb->save.rflags = rflags; 744 to_svm(vcpu)->vmcb->save.rflags = rflags;
719} 745}
720 746
747static void svm_set_vintr(struct vcpu_svm *svm)
748{
749 svm->vmcb->control.intercept |= 1ULL << INTERCEPT_VINTR;
750}
751
752static void svm_clear_vintr(struct vcpu_svm *svm)
753{
754 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
755}
756
721static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg) 757static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
722{ 758{
723 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save; 759 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
@@ -760,20 +796,37 @@ static void svm_get_segment(struct kvm_vcpu *vcpu,
760 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1; 796 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
761 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1; 797 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
762 798
763 /* 799 switch (seg) {
764 * SVM always stores 0 for the 'G' bit in the CS selector in 800 case VCPU_SREG_CS:
765 * the VMCB on a VMEXIT. This hurts cross-vendor migration: 801 /*
766 * Intel's VMENTRY has a check on the 'G' bit. 802 * SVM always stores 0 for the 'G' bit in the CS selector in
767 */ 803 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
768 if (seg == VCPU_SREG_CS) 804 * Intel's VMENTRY has a check on the 'G' bit.
805 */
769 var->g = s->limit > 0xfffff; 806 var->g = s->limit > 0xfffff;
770 807 break;
771 /* 808 case VCPU_SREG_TR:
772 * Work around a bug where the busy flag in the tr selector 809 /*
773 * isn't exposed 810 * Work around a bug where the busy flag in the tr selector
774 */ 811 * isn't exposed
775 if (seg == VCPU_SREG_TR) 812 */
776 var->type |= 0x2; 813 var->type |= 0x2;
814 break;
815 case VCPU_SREG_DS:
816 case VCPU_SREG_ES:
817 case VCPU_SREG_FS:
818 case VCPU_SREG_GS:
819 /*
820 * The accessed bit must always be set in the segment
821 * descriptor cache, although it can be cleared in the
822 * descriptor, the cached bit always remains at 1. Since
823 * Intel has a check on this, set it here to support
824 * cross-vendor migration.
825 */
826 if (!var->unusable)
827 var->type |= 0x1;
828 break;
829 }
777 830
778 var->unusable = !var->present; 831 var->unusable = !var->present;
779} 832}
@@ -905,9 +958,37 @@ static void svm_set_segment(struct kvm_vcpu *vcpu,
905 958
906} 959}
907 960
908static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg) 961static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_guest_debug *dbg)
909{ 962{
910 return -EOPNOTSUPP; 963 int old_debug = vcpu->guest_debug;
964 struct vcpu_svm *svm = to_svm(vcpu);
965
966 vcpu->guest_debug = dbg->control;
967
968 svm->vmcb->control.intercept_exceptions &=
969 ~((1 << DB_VECTOR) | (1 << BP_VECTOR));
970 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
971 if (vcpu->guest_debug &
972 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))
973 svm->vmcb->control.intercept_exceptions |=
974 1 << DB_VECTOR;
975 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
976 svm->vmcb->control.intercept_exceptions |=
977 1 << BP_VECTOR;
978 } else
979 vcpu->guest_debug = 0;
980
981 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
982 svm->vmcb->save.dr7 = dbg->arch.debugreg[7];
983 else
984 svm->vmcb->save.dr7 = vcpu->arch.dr7;
985
986 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
987 svm->vmcb->save.rflags |= X86_EFLAGS_TF | X86_EFLAGS_RF;
988 else if (old_debug & KVM_GUESTDBG_SINGLESTEP)
989 svm->vmcb->save.rflags &= ~(X86_EFLAGS_TF | X86_EFLAGS_RF);
990
991 return 0;
911} 992}
912 993
913static int svm_get_irq(struct kvm_vcpu *vcpu) 994static int svm_get_irq(struct kvm_vcpu *vcpu)
@@ -949,7 +1030,29 @@ static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
949 1030
950static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr) 1031static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
951{ 1032{
952 unsigned long val = to_svm(vcpu)->db_regs[dr]; 1033 struct vcpu_svm *svm = to_svm(vcpu);
1034 unsigned long val;
1035
1036 switch (dr) {
1037 case 0 ... 3:
1038 val = vcpu->arch.db[dr];
1039 break;
1040 case 6:
1041 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1042 val = vcpu->arch.dr6;
1043 else
1044 val = svm->vmcb->save.dr6;
1045 break;
1046 case 7:
1047 if (vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)
1048 val = vcpu->arch.dr7;
1049 else
1050 val = svm->vmcb->save.dr7;
1051 break;
1052 default:
1053 val = 0;
1054 }
1055
953 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler); 1056 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
954 return val; 1057 return val;
955} 1058}
@@ -959,33 +1062,40 @@ static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
959{ 1062{
960 struct vcpu_svm *svm = to_svm(vcpu); 1063 struct vcpu_svm *svm = to_svm(vcpu);
961 1064
962 *exception = 0; 1065 KVMTRACE_2D(DR_WRITE, vcpu, (u32)dr, (u32)value, handler);
963 1066
964 if (svm->vmcb->save.dr7 & DR7_GD_MASK) { 1067 *exception = 0;
965 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
966 svm->vmcb->save.dr6 |= DR6_BD_MASK;
967 *exception = DB_VECTOR;
968 return;
969 }
970 1068
971 switch (dr) { 1069 switch (dr) {
972 case 0 ... 3: 1070 case 0 ... 3:
973 svm->db_regs[dr] = value; 1071 vcpu->arch.db[dr] = value;
1072 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1073 vcpu->arch.eff_db[dr] = value;
974 return; 1074 return;
975 case 4 ... 5: 1075 case 4 ... 5:
976 if (vcpu->arch.cr4 & X86_CR4_DE) { 1076 if (vcpu->arch.cr4 & X86_CR4_DE)
977 *exception = UD_VECTOR; 1077 *exception = UD_VECTOR;
1078 return;
1079 case 6:
1080 if (value & 0xffffffff00000000ULL) {
1081 *exception = GP_VECTOR;
978 return; 1082 return;
979 } 1083 }
980 case 7: { 1084 vcpu->arch.dr6 = (value & DR6_VOLATILE) | DR6_FIXED_1;
981 if (value & ~((1ULL << 32) - 1)) { 1085 return;
1086 case 7:
1087 if (value & 0xffffffff00000000ULL) {
982 *exception = GP_VECTOR; 1088 *exception = GP_VECTOR;
983 return; 1089 return;
984 } 1090 }
985 svm->vmcb->save.dr7 = value; 1091 vcpu->arch.dr7 = (value & DR7_VOLATILE) | DR7_FIXED_1;
1092 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) {
1093 svm->vmcb->save.dr7 = vcpu->arch.dr7;
1094 vcpu->arch.switch_db_regs = (value & DR7_BP_EN_MASK);
1095 }
986 return; 1096 return;
987 }
988 default: 1097 default:
1098 /* FIXME: Possible case? */
989 printk(KERN_DEBUG "%s: unexpected dr %u\n", 1099 printk(KERN_DEBUG "%s: unexpected dr %u\n",
990 __func__, dr); 1100 __func__, dr);
991 *exception = UD_VECTOR; 1101 *exception = UD_VECTOR;
@@ -1031,6 +1141,27 @@ static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1031 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code); 1141 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
1032} 1142}
1033 1143
1144static int db_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1145{
1146 if (!(svm->vcpu.guest_debug &
1147 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP))) {
1148 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
1149 return 1;
1150 }
1151 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1152 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1153 kvm_run->debug.arch.exception = DB_VECTOR;
1154 return 0;
1155}
1156
1157static int bp_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1158{
1159 kvm_run->exit_reason = KVM_EXIT_DEBUG;
1160 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
1161 kvm_run->debug.arch.exception = BP_VECTOR;
1162 return 0;
1163}
1164
1034static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1165static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1035{ 1166{
1036 int er; 1167 int er;
@@ -1080,7 +1211,7 @@ static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1080static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run) 1211static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1081{ 1212{
1082 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */ 1213 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
1083 int size, down, in, string, rep; 1214 int size, in, string;
1084 unsigned port; 1215 unsigned port;
1085 1216
1086 ++svm->vcpu.stat.io_exits; 1217 ++svm->vcpu.stat.io_exits;
@@ -1099,8 +1230,6 @@ static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1099 in = (io_info & SVM_IOIO_TYPE_MASK) != 0; 1230 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1100 port = io_info >> 16; 1231 port = io_info >> 16;
1101 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT; 1232 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
1102 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
1103 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
1104 1233
1105 skip_emulated_instruction(&svm->vcpu); 1234 skip_emulated_instruction(&svm->vcpu);
1106 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port); 1235 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
@@ -1139,6 +1268,567 @@ static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1139 return 1; 1268 return 1;
1140} 1269}
1141 1270
1271static int nested_svm_check_permissions(struct vcpu_svm *svm)
1272{
1273 if (!(svm->vcpu.arch.shadow_efer & EFER_SVME)
1274 || !is_paging(&svm->vcpu)) {
1275 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
1276 return 1;
1277 }
1278
1279 if (svm->vmcb->save.cpl) {
1280 kvm_inject_gp(&svm->vcpu, 0);
1281 return 1;
1282 }
1283
1284 return 0;
1285}
1286
1287static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
1288 bool has_error_code, u32 error_code)
1289{
1290 if (is_nested(svm)) {
1291 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
1292 svm->vmcb->control.exit_code_hi = 0;
1293 svm->vmcb->control.exit_info_1 = error_code;
1294 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
1295 if (nested_svm_exit_handled(svm, false)) {
1296 nsvm_printk("VMexit -> EXCP 0x%x\n", nr);
1297
1298 nested_svm_vmexit(svm);
1299 return 1;
1300 }
1301 }
1302
1303 return 0;
1304}
1305
1306static inline int nested_svm_intr(struct vcpu_svm *svm)
1307{
1308 if (is_nested(svm)) {
1309 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1310 return 0;
1311
1312 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
1313 return 0;
1314
1315 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
1316
1317 if (nested_svm_exit_handled(svm, false)) {
1318 nsvm_printk("VMexit -> INTR\n");
1319 nested_svm_vmexit(svm);
1320 return 1;
1321 }
1322 }
1323
1324 return 0;
1325}
1326
1327static struct page *nested_svm_get_page(struct vcpu_svm *svm, u64 gpa)
1328{
1329 struct page *page;
1330
1331 down_read(&current->mm->mmap_sem);
1332 page = gfn_to_page(svm->vcpu.kvm, gpa >> PAGE_SHIFT);
1333 up_read(&current->mm->mmap_sem);
1334
1335 if (is_error_page(page)) {
1336 printk(KERN_INFO "%s: could not find page at 0x%llx\n",
1337 __func__, gpa);
1338 kvm_release_page_clean(page);
1339 kvm_inject_gp(&svm->vcpu, 0);
1340 return NULL;
1341 }
1342 return page;
1343}
1344
1345static int nested_svm_do(struct vcpu_svm *svm,
1346 u64 arg1_gpa, u64 arg2_gpa, void *opaque,
1347 int (*handler)(struct vcpu_svm *svm,
1348 void *arg1,
1349 void *arg2,
1350 void *opaque))
1351{
1352 struct page *arg1_page;
1353 struct page *arg2_page = NULL;
1354 void *arg1;
1355 void *arg2 = NULL;
1356 int retval;
1357
1358 arg1_page = nested_svm_get_page(svm, arg1_gpa);
1359 if(arg1_page == NULL)
1360 return 1;
1361
1362 if (arg2_gpa) {
1363 arg2_page = nested_svm_get_page(svm, arg2_gpa);
1364 if(arg2_page == NULL) {
1365 kvm_release_page_clean(arg1_page);
1366 return 1;
1367 }
1368 }
1369
1370 arg1 = kmap_atomic(arg1_page, KM_USER0);
1371 if (arg2_gpa)
1372 arg2 = kmap_atomic(arg2_page, KM_USER1);
1373
1374 retval = handler(svm, arg1, arg2, opaque);
1375
1376 kunmap_atomic(arg1, KM_USER0);
1377 if (arg2_gpa)
1378 kunmap_atomic(arg2, KM_USER1);
1379
1380 kvm_release_page_dirty(arg1_page);
1381 if (arg2_gpa)
1382 kvm_release_page_dirty(arg2_page);
1383
1384 return retval;
1385}
1386
1387static int nested_svm_exit_handled_real(struct vcpu_svm *svm,
1388 void *arg1,
1389 void *arg2,
1390 void *opaque)
1391{
1392 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1393 bool kvm_overrides = *(bool *)opaque;
1394 u32 exit_code = svm->vmcb->control.exit_code;
1395
1396 if (kvm_overrides) {
1397 switch (exit_code) {
1398 case SVM_EXIT_INTR:
1399 case SVM_EXIT_NMI:
1400 return 0;
1401 /* For now we are always handling NPFs when using them */
1402 case SVM_EXIT_NPF:
1403 if (npt_enabled)
1404 return 0;
1405 break;
1406 /* When we're shadowing, trap PFs */
1407 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
1408 if (!npt_enabled)
1409 return 0;
1410 break;
1411 default:
1412 break;
1413 }
1414 }
1415
1416 switch (exit_code) {
1417 case SVM_EXIT_READ_CR0 ... SVM_EXIT_READ_CR8: {
1418 u32 cr_bits = 1 << (exit_code - SVM_EXIT_READ_CR0);
1419 if (nested_vmcb->control.intercept_cr_read & cr_bits)
1420 return 1;
1421 break;
1422 }
1423 case SVM_EXIT_WRITE_CR0 ... SVM_EXIT_WRITE_CR8: {
1424 u32 cr_bits = 1 << (exit_code - SVM_EXIT_WRITE_CR0);
1425 if (nested_vmcb->control.intercept_cr_write & cr_bits)
1426 return 1;
1427 break;
1428 }
1429 case SVM_EXIT_READ_DR0 ... SVM_EXIT_READ_DR7: {
1430 u32 dr_bits = 1 << (exit_code - SVM_EXIT_READ_DR0);
1431 if (nested_vmcb->control.intercept_dr_read & dr_bits)
1432 return 1;
1433 break;
1434 }
1435 case SVM_EXIT_WRITE_DR0 ... SVM_EXIT_WRITE_DR7: {
1436 u32 dr_bits = 1 << (exit_code - SVM_EXIT_WRITE_DR0);
1437 if (nested_vmcb->control.intercept_dr_write & dr_bits)
1438 return 1;
1439 break;
1440 }
1441 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
1442 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
1443 if (nested_vmcb->control.intercept_exceptions & excp_bits)
1444 return 1;
1445 break;
1446 }
1447 default: {
1448 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
1449 nsvm_printk("exit code: 0x%x\n", exit_code);
1450 if (nested_vmcb->control.intercept & exit_bits)
1451 return 1;
1452 }
1453 }
1454
1455 return 0;
1456}
1457
1458static int nested_svm_exit_handled_msr(struct vcpu_svm *svm,
1459 void *arg1, void *arg2,
1460 void *opaque)
1461{
1462 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1463 u8 *msrpm = (u8 *)arg2;
1464 u32 t0, t1;
1465 u32 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
1466 u32 param = svm->vmcb->control.exit_info_1 & 1;
1467
1468 if (!(nested_vmcb->control.intercept & (1ULL << INTERCEPT_MSR_PROT)))
1469 return 0;
1470
1471 switch(msr) {
1472 case 0 ... 0x1fff:
1473 t0 = (msr * 2) % 8;
1474 t1 = msr / 8;
1475 break;
1476 case 0xc0000000 ... 0xc0001fff:
1477 t0 = (8192 + msr - 0xc0000000) * 2;
1478 t1 = (t0 / 8);
1479 t0 %= 8;
1480 break;
1481 case 0xc0010000 ... 0xc0011fff:
1482 t0 = (16384 + msr - 0xc0010000) * 2;
1483 t1 = (t0 / 8);
1484 t0 %= 8;
1485 break;
1486 default:
1487 return 1;
1488 break;
1489 }
1490 if (msrpm[t1] & ((1 << param) << t0))
1491 return 1;
1492
1493 return 0;
1494}
1495
1496static int nested_svm_exit_handled(struct vcpu_svm *svm, bool kvm_override)
1497{
1498 bool k = kvm_override;
1499
1500 switch (svm->vmcb->control.exit_code) {
1501 case SVM_EXIT_MSR:
1502 return nested_svm_do(svm, svm->nested_vmcb,
1503 svm->nested_vmcb_msrpm, NULL,
1504 nested_svm_exit_handled_msr);
1505 default: break;
1506 }
1507
1508 return nested_svm_do(svm, svm->nested_vmcb, 0, &k,
1509 nested_svm_exit_handled_real);
1510}
1511
1512static int nested_svm_vmexit_real(struct vcpu_svm *svm, void *arg1,
1513 void *arg2, void *opaque)
1514{
1515 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1516 struct vmcb *hsave = svm->hsave;
1517 u64 nested_save[] = { nested_vmcb->save.cr0,
1518 nested_vmcb->save.cr3,
1519 nested_vmcb->save.cr4,
1520 nested_vmcb->save.efer,
1521 nested_vmcb->control.intercept_cr_read,
1522 nested_vmcb->control.intercept_cr_write,
1523 nested_vmcb->control.intercept_dr_read,
1524 nested_vmcb->control.intercept_dr_write,
1525 nested_vmcb->control.intercept_exceptions,
1526 nested_vmcb->control.intercept,
1527 nested_vmcb->control.msrpm_base_pa,
1528 nested_vmcb->control.iopm_base_pa,
1529 nested_vmcb->control.tsc_offset };
1530
1531 /* Give the current vmcb to the guest */
1532 memcpy(nested_vmcb, svm->vmcb, sizeof(struct vmcb));
1533 nested_vmcb->save.cr0 = nested_save[0];
1534 if (!npt_enabled)
1535 nested_vmcb->save.cr3 = nested_save[1];
1536 nested_vmcb->save.cr4 = nested_save[2];
1537 nested_vmcb->save.efer = nested_save[3];
1538 nested_vmcb->control.intercept_cr_read = nested_save[4];
1539 nested_vmcb->control.intercept_cr_write = nested_save[5];
1540 nested_vmcb->control.intercept_dr_read = nested_save[6];
1541 nested_vmcb->control.intercept_dr_write = nested_save[7];
1542 nested_vmcb->control.intercept_exceptions = nested_save[8];
1543 nested_vmcb->control.intercept = nested_save[9];
1544 nested_vmcb->control.msrpm_base_pa = nested_save[10];
1545 nested_vmcb->control.iopm_base_pa = nested_save[11];
1546 nested_vmcb->control.tsc_offset = nested_save[12];
1547
1548 /* We always set V_INTR_MASKING and remember the old value in hflags */
1549 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
1550 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
1551
1552 if ((nested_vmcb->control.int_ctl & V_IRQ_MASK) &&
1553 (nested_vmcb->control.int_vector)) {
1554 nsvm_printk("WARNING: IRQ 0x%x still enabled on #VMEXIT\n",
1555 nested_vmcb->control.int_vector);
1556 }
1557
1558 /* Restore the original control entries */
1559 svm->vmcb->control = hsave->control;
1560
1561 /* Kill any pending exceptions */
1562 if (svm->vcpu.arch.exception.pending == true)
1563 nsvm_printk("WARNING: Pending Exception\n");
1564 svm->vcpu.arch.exception.pending = false;
1565
1566 /* Restore selected save entries */
1567 svm->vmcb->save.es = hsave->save.es;
1568 svm->vmcb->save.cs = hsave->save.cs;
1569 svm->vmcb->save.ss = hsave->save.ss;
1570 svm->vmcb->save.ds = hsave->save.ds;
1571 svm->vmcb->save.gdtr = hsave->save.gdtr;
1572 svm->vmcb->save.idtr = hsave->save.idtr;
1573 svm->vmcb->save.rflags = hsave->save.rflags;
1574 svm_set_efer(&svm->vcpu, hsave->save.efer);
1575 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
1576 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
1577 if (npt_enabled) {
1578 svm->vmcb->save.cr3 = hsave->save.cr3;
1579 svm->vcpu.arch.cr3 = hsave->save.cr3;
1580 } else {
1581 kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
1582 }
1583 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
1584 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
1585 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
1586 svm->vmcb->save.dr7 = 0;
1587 svm->vmcb->save.cpl = 0;
1588 svm->vmcb->control.exit_int_info = 0;
1589
1590 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1591 /* Exit nested SVM mode */
1592 svm->nested_vmcb = 0;
1593
1594 return 0;
1595}
1596
1597static int nested_svm_vmexit(struct vcpu_svm *svm)
1598{
1599 nsvm_printk("VMexit\n");
1600 if (nested_svm_do(svm, svm->nested_vmcb, 0,
1601 NULL, nested_svm_vmexit_real))
1602 return 1;
1603
1604 kvm_mmu_reset_context(&svm->vcpu);
1605 kvm_mmu_load(&svm->vcpu);
1606
1607 return 0;
1608}
1609
1610static int nested_svm_vmrun_msrpm(struct vcpu_svm *svm, void *arg1,
1611 void *arg2, void *opaque)
1612{
1613 int i;
1614 u32 *nested_msrpm = (u32*)arg1;
1615 for (i=0; i< PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER) / 4; i++)
1616 svm->nested_msrpm[i] = svm->msrpm[i] | nested_msrpm[i];
1617 svm->vmcb->control.msrpm_base_pa = __pa(svm->nested_msrpm);
1618
1619 return 0;
1620}
1621
1622static int nested_svm_vmrun(struct vcpu_svm *svm, void *arg1,
1623 void *arg2, void *opaque)
1624{
1625 struct vmcb *nested_vmcb = (struct vmcb *)arg1;
1626 struct vmcb *hsave = svm->hsave;
1627
1628 /* nested_vmcb is our indicator if nested SVM is activated */
1629 svm->nested_vmcb = svm->vmcb->save.rax;
1630
1631 /* Clear internal status */
1632 svm->vcpu.arch.exception.pending = false;
1633
1634 /* Save the old vmcb, so we don't need to pick what we save, but
1635 can restore everything when a VMEXIT occurs */
1636 memcpy(hsave, svm->vmcb, sizeof(struct vmcb));
1637 /* We need to remember the original CR3 in the SPT case */
1638 if (!npt_enabled)
1639 hsave->save.cr3 = svm->vcpu.arch.cr3;
1640 hsave->save.cr4 = svm->vcpu.arch.cr4;
1641 hsave->save.rip = svm->next_rip;
1642
1643 if (svm->vmcb->save.rflags & X86_EFLAGS_IF)
1644 svm->vcpu.arch.hflags |= HF_HIF_MASK;
1645 else
1646 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
1647
1648 /* Load the nested guest state */
1649 svm->vmcb->save.es = nested_vmcb->save.es;
1650 svm->vmcb->save.cs = nested_vmcb->save.cs;
1651 svm->vmcb->save.ss = nested_vmcb->save.ss;
1652 svm->vmcb->save.ds = nested_vmcb->save.ds;
1653 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
1654 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
1655 svm->vmcb->save.rflags = nested_vmcb->save.rflags;
1656 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
1657 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
1658 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
1659 if (npt_enabled) {
1660 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
1661 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
1662 } else {
1663 kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
1664 kvm_mmu_reset_context(&svm->vcpu);
1665 }
1666 svm->vmcb->save.cr2 = nested_vmcb->save.cr2;
1667 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
1668 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
1669 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
1670 /* In case we don't even reach vcpu_run, the fields are not updated */
1671 svm->vmcb->save.rax = nested_vmcb->save.rax;
1672 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
1673 svm->vmcb->save.rip = nested_vmcb->save.rip;
1674 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
1675 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
1676 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
1677
1678 /* We don't want a nested guest to be more powerful than the guest,
1679 so all intercepts are ORed */
1680 svm->vmcb->control.intercept_cr_read |=
1681 nested_vmcb->control.intercept_cr_read;
1682 svm->vmcb->control.intercept_cr_write |=
1683 nested_vmcb->control.intercept_cr_write;
1684 svm->vmcb->control.intercept_dr_read |=
1685 nested_vmcb->control.intercept_dr_read;
1686 svm->vmcb->control.intercept_dr_write |=
1687 nested_vmcb->control.intercept_dr_write;
1688 svm->vmcb->control.intercept_exceptions |=
1689 nested_vmcb->control.intercept_exceptions;
1690
1691 svm->vmcb->control.intercept |= nested_vmcb->control.intercept;
1692
1693 svm->nested_vmcb_msrpm = nested_vmcb->control.msrpm_base_pa;
1694
1695 force_new_asid(&svm->vcpu);
1696 svm->vmcb->control.exit_int_info = nested_vmcb->control.exit_int_info;
1697 svm->vmcb->control.exit_int_info_err = nested_vmcb->control.exit_int_info_err;
1698 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
1699 if (nested_vmcb->control.int_ctl & V_IRQ_MASK) {
1700 nsvm_printk("nSVM Injecting Interrupt: 0x%x\n",
1701 nested_vmcb->control.int_ctl);
1702 }
1703 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
1704 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
1705 else
1706 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
1707
1708 nsvm_printk("nSVM exit_int_info: 0x%x | int_state: 0x%x\n",
1709 nested_vmcb->control.exit_int_info,
1710 nested_vmcb->control.int_state);
1711
1712 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
1713 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
1714 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
1715 if (nested_vmcb->control.event_inj & SVM_EVTINJ_VALID)
1716 nsvm_printk("Injecting Event: 0x%x\n",
1717 nested_vmcb->control.event_inj);
1718 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
1719 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
1720
1721 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1722
1723 return 0;
1724}
1725
1726static int nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
1727{
1728 to_vmcb->save.fs = from_vmcb->save.fs;
1729 to_vmcb->save.gs = from_vmcb->save.gs;
1730 to_vmcb->save.tr = from_vmcb->save.tr;
1731 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
1732 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
1733 to_vmcb->save.star = from_vmcb->save.star;
1734 to_vmcb->save.lstar = from_vmcb->save.lstar;
1735 to_vmcb->save.cstar = from_vmcb->save.cstar;
1736 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
1737 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
1738 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
1739 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
1740
1741 return 1;
1742}
1743
1744static int nested_svm_vmload(struct vcpu_svm *svm, void *nested_vmcb,
1745 void *arg2, void *opaque)
1746{
1747 return nested_svm_vmloadsave((struct vmcb *)nested_vmcb, svm->vmcb);
1748}
1749
1750static int nested_svm_vmsave(struct vcpu_svm *svm, void *nested_vmcb,
1751 void *arg2, void *opaque)
1752{
1753 return nested_svm_vmloadsave(svm->vmcb, (struct vmcb *)nested_vmcb);
1754}
1755
1756static int vmload_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1757{
1758 if (nested_svm_check_permissions(svm))
1759 return 1;
1760
1761 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1762 skip_emulated_instruction(&svm->vcpu);
1763
1764 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmload);
1765
1766 return 1;
1767}
1768
1769static int vmsave_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1770{
1771 if (nested_svm_check_permissions(svm))
1772 return 1;
1773
1774 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1775 skip_emulated_instruction(&svm->vcpu);
1776
1777 nested_svm_do(svm, svm->vmcb->save.rax, 0, NULL, nested_svm_vmsave);
1778
1779 return 1;
1780}
1781
1782static int vmrun_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1783{
1784 nsvm_printk("VMrun\n");
1785 if (nested_svm_check_permissions(svm))
1786 return 1;
1787
1788 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1789 skip_emulated_instruction(&svm->vcpu);
1790
1791 if (nested_svm_do(svm, svm->vmcb->save.rax, 0,
1792 NULL, nested_svm_vmrun))
1793 return 1;
1794
1795 if (nested_svm_do(svm, svm->nested_vmcb_msrpm, 0,
1796 NULL, nested_svm_vmrun_msrpm))
1797 return 1;
1798
1799 return 1;
1800}
1801
1802static int stgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1803{
1804 if (nested_svm_check_permissions(svm))
1805 return 1;
1806
1807 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1808 skip_emulated_instruction(&svm->vcpu);
1809
1810 svm->vcpu.arch.hflags |= HF_GIF_MASK;
1811
1812 return 1;
1813}
1814
1815static int clgi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1816{
1817 if (nested_svm_check_permissions(svm))
1818 return 1;
1819
1820 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
1821 skip_emulated_instruction(&svm->vcpu);
1822
1823 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
1824
1825 /* After a CLGI no interrupts should come */
1826 svm_clear_vintr(svm);
1827 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1828
1829 return 1;
1830}
1831
1142static int invalid_op_interception(struct vcpu_svm *svm, 1832static int invalid_op_interception(struct vcpu_svm *svm,
1143 struct kvm_run *kvm_run) 1833 struct kvm_run *kvm_run)
1144{ 1834{
@@ -1250,6 +1940,15 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1250 case MSR_IA32_LASTINTTOIP: 1940 case MSR_IA32_LASTINTTOIP:
1251 *data = svm->vmcb->save.last_excp_to; 1941 *data = svm->vmcb->save.last_excp_to;
1252 break; 1942 break;
1943 case MSR_VM_HSAVE_PA:
1944 *data = svm->hsave_msr;
1945 break;
1946 case MSR_VM_CR:
1947 *data = 0;
1948 break;
1949 case MSR_IA32_UCODE_REV:
1950 *data = 0x01000065;
1951 break;
1253 default: 1952 default:
1254 return kvm_get_msr_common(vcpu, ecx, data); 1953 return kvm_get_msr_common(vcpu, ecx, data);
1255 } 1954 }
@@ -1344,6 +2043,9 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1344 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data); 2043 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1345 2044
1346 break; 2045 break;
2046 case MSR_VM_HSAVE_PA:
2047 svm->hsave_msr = data;
2048 break;
1347 default: 2049 default:
1348 return kvm_set_msr_common(vcpu, ecx, data); 2050 return kvm_set_msr_common(vcpu, ecx, data);
1349 } 2051 }
@@ -1380,7 +2082,7 @@ static int interrupt_window_interception(struct vcpu_svm *svm,
1380{ 2082{
1381 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler); 2083 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1382 2084
1383 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR); 2085 svm_clear_vintr(svm);
1384 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK; 2086 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
1385 /* 2087 /*
1386 * If the user space waits to inject interrupts, exit as soon as 2088 * If the user space waits to inject interrupts, exit as soon as
@@ -1417,6 +2119,8 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1417 [SVM_EXIT_WRITE_DR3] = emulate_on_interception, 2119 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1418 [SVM_EXIT_WRITE_DR5] = emulate_on_interception, 2120 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1419 [SVM_EXIT_WRITE_DR7] = emulate_on_interception, 2121 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
2122 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
2123 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
1420 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception, 2124 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
1421 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception, 2125 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
1422 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception, 2126 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
@@ -1436,12 +2140,12 @@ static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
1436 [SVM_EXIT_MSR] = msr_interception, 2140 [SVM_EXIT_MSR] = msr_interception,
1437 [SVM_EXIT_TASK_SWITCH] = task_switch_interception, 2141 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
1438 [SVM_EXIT_SHUTDOWN] = shutdown_interception, 2142 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
1439 [SVM_EXIT_VMRUN] = invalid_op_interception, 2143 [SVM_EXIT_VMRUN] = vmrun_interception,
1440 [SVM_EXIT_VMMCALL] = vmmcall_interception, 2144 [SVM_EXIT_VMMCALL] = vmmcall_interception,
1441 [SVM_EXIT_VMLOAD] = invalid_op_interception, 2145 [SVM_EXIT_VMLOAD] = vmload_interception,
1442 [SVM_EXIT_VMSAVE] = invalid_op_interception, 2146 [SVM_EXIT_VMSAVE] = vmsave_interception,
1443 [SVM_EXIT_STGI] = invalid_op_interception, 2147 [SVM_EXIT_STGI] = stgi_interception,
1444 [SVM_EXIT_CLGI] = invalid_op_interception, 2148 [SVM_EXIT_CLGI] = clgi_interception,
1445 [SVM_EXIT_SKINIT] = invalid_op_interception, 2149 [SVM_EXIT_SKINIT] = invalid_op_interception,
1446 [SVM_EXIT_WBINVD] = emulate_on_interception, 2150 [SVM_EXIT_WBINVD] = emulate_on_interception,
1447 [SVM_EXIT_MONITOR] = invalid_op_interception, 2151 [SVM_EXIT_MONITOR] = invalid_op_interception,
@@ -1457,6 +2161,17 @@ static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1457 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip, 2161 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1458 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit); 2162 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1459 2163
2164 if (is_nested(svm)) {
2165 nsvm_printk("nested handle_exit: 0x%x | 0x%lx | 0x%lx | 0x%lx\n",
2166 exit_code, svm->vmcb->control.exit_info_1,
2167 svm->vmcb->control.exit_info_2, svm->vmcb->save.rip);
2168 if (nested_svm_exit_handled(svm, true)) {
2169 nested_svm_vmexit(svm);
2170 nsvm_printk("-> #VMEXIT\n");
2171 return 1;
2172 }
2173 }
2174
1460 if (npt_enabled) { 2175 if (npt_enabled) {
1461 int mmu_reload = 0; 2176 int mmu_reload = 0;
1462 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) { 2177 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
@@ -1544,6 +2259,8 @@ static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1544{ 2259{
1545 struct vcpu_svm *svm = to_svm(vcpu); 2260 struct vcpu_svm *svm = to_svm(vcpu);
1546 2261
2262 nested_svm_intr(svm);
2263
1547 svm_inject_irq(svm, irq); 2264 svm_inject_irq(svm, irq);
1548} 2265}
1549 2266
@@ -1589,11 +2306,17 @@ static void svm_intr_assist(struct kvm_vcpu *vcpu)
1589 if (!kvm_cpu_has_interrupt(vcpu)) 2306 if (!kvm_cpu_has_interrupt(vcpu))
1590 goto out; 2307 goto out;
1591 2308
2309 if (nested_svm_intr(svm))
2310 goto out;
2311
2312 if (!(svm->vcpu.arch.hflags & HF_GIF_MASK))
2313 goto out;
2314
1592 if (!(vmcb->save.rflags & X86_EFLAGS_IF) || 2315 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1593 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) || 2316 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1594 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) { 2317 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1595 /* unable to deliver irq, set pending irq */ 2318 /* unable to deliver irq, set pending irq */
1596 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR); 2319 svm_set_vintr(svm);
1597 svm_inject_irq(svm, 0x0); 2320 svm_inject_irq(svm, 0x0);
1598 goto out; 2321 goto out;
1599 } 2322 }
@@ -1615,7 +2338,8 @@ static void kvm_reput_irq(struct vcpu_svm *svm)
1615 } 2338 }
1616 2339
1617 svm->vcpu.arch.interrupt_window_open = 2340 svm->vcpu.arch.interrupt_window_open =
1618 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK); 2341 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
2342 (svm->vcpu.arch.hflags & HF_GIF_MASK);
1619} 2343}
1620 2344
1621static void svm_do_inject_vector(struct vcpu_svm *svm) 2345static void svm_do_inject_vector(struct vcpu_svm *svm)
@@ -1637,9 +2361,13 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1637 struct vcpu_svm *svm = to_svm(vcpu); 2361 struct vcpu_svm *svm = to_svm(vcpu);
1638 struct vmcb_control_area *control = &svm->vmcb->control; 2362 struct vmcb_control_area *control = &svm->vmcb->control;
1639 2363
2364 if (nested_svm_intr(svm))
2365 return;
2366
1640 svm->vcpu.arch.interrupt_window_open = 2367 svm->vcpu.arch.interrupt_window_open =
1641 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) && 2368 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
1642 (svm->vmcb->save.rflags & X86_EFLAGS_IF)); 2369 (svm->vmcb->save.rflags & X86_EFLAGS_IF) &&
2370 (svm->vcpu.arch.hflags & HF_GIF_MASK));
1643 2371
1644 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary) 2372 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
1645 /* 2373 /*
@@ -1652,9 +2380,9 @@ static void do_interrupt_requests(struct kvm_vcpu *vcpu,
1652 */ 2380 */
1653 if (!svm->vcpu.arch.interrupt_window_open && 2381 if (!svm->vcpu.arch.interrupt_window_open &&
1654 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window)) 2382 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
1655 control->intercept |= 1ULL << INTERCEPT_VINTR; 2383 svm_set_vintr(svm);
1656 else 2384 else
1657 control->intercept &= ~(1ULL << INTERCEPT_VINTR); 2385 svm_clear_vintr(svm);
1658} 2386}
1659 2387
1660static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr) 2388static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
@@ -1662,22 +2390,6 @@ static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1662 return 0; 2390 return 0;
1663} 2391}
1664 2392
1665static void save_db_regs(unsigned long *db_regs)
1666{
1667 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1668 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1669 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1670 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
1671}
1672
1673static void load_db_regs(unsigned long *db_regs)
1674{
1675 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1676 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1677 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1678 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
1679}
1680
1681static void svm_flush_tlb(struct kvm_vcpu *vcpu) 2393static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1682{ 2394{
1683 force_new_asid(vcpu); 2395 force_new_asid(vcpu);
@@ -1736,19 +2448,12 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1736 gs_selector = kvm_read_gs(); 2448 gs_selector = kvm_read_gs();
1737 ldt_selector = kvm_read_ldt(); 2449 ldt_selector = kvm_read_ldt();
1738 svm->host_cr2 = kvm_read_cr2(); 2450 svm->host_cr2 = kvm_read_cr2();
1739 svm->host_dr6 = read_dr6(); 2451 if (!is_nested(svm))
1740 svm->host_dr7 = read_dr7(); 2452 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1741 svm->vmcb->save.cr2 = vcpu->arch.cr2;
1742 /* required for live migration with NPT */ 2453 /* required for live migration with NPT */
1743 if (npt_enabled) 2454 if (npt_enabled)
1744 svm->vmcb->save.cr3 = vcpu->arch.cr3; 2455 svm->vmcb->save.cr3 = vcpu->arch.cr3;
1745 2456
1746 if (svm->vmcb->save.dr7 & 0xff) {
1747 write_dr7(0);
1748 save_db_regs(svm->host_db_regs);
1749 load_db_regs(svm->db_regs);
1750 }
1751
1752 clgi(); 2457 clgi();
1753 2458
1754 local_irq_enable(); 2459 local_irq_enable();
@@ -1824,16 +2529,11 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1824#endif 2529#endif
1825 ); 2530 );
1826 2531
1827 if ((svm->vmcb->save.dr7 & 0xff))
1828 load_db_regs(svm->host_db_regs);
1829
1830 vcpu->arch.cr2 = svm->vmcb->save.cr2; 2532 vcpu->arch.cr2 = svm->vmcb->save.cr2;
1831 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax; 2533 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1832 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 2534 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1833 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 2535 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
1834 2536
1835 write_dr6(svm->host_dr6);
1836 write_dr7(svm->host_dr7);
1837 kvm_write_cr2(svm->host_cr2); 2537 kvm_write_cr2(svm->host_cr2);
1838 2538
1839 kvm_load_fs(fs_selector); 2539 kvm_load_fs(fs_selector);