diff options
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r-- | arch/x86/kvm/svm.c | 241 |
1 files changed, 171 insertions, 70 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 0e68f0b3cbf7..b71daed3cca2 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -1034,15 +1034,12 @@ static int avic_ga_log_notifier(u32 ga_tag) | |||
1034 | } | 1034 | } |
1035 | spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); | 1035 | spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags); |
1036 | 1036 | ||
1037 | if (!vcpu) | ||
1038 | return 0; | ||
1039 | |||
1040 | /* Note: | 1037 | /* Note: |
1041 | * At this point, the IOMMU should have already set the pending | 1038 | * At this point, the IOMMU should have already set the pending |
1042 | * bit in the vAPIC backing page. So, we just need to schedule | 1039 | * bit in the vAPIC backing page. So, we just need to schedule |
1043 | * in the vcpu. | 1040 | * in the vcpu. |
1044 | */ | 1041 | */ |
1045 | if (vcpu->mode == OUTSIDE_GUEST_MODE) | 1042 | if (vcpu) |
1046 | kvm_vcpu_wake_up(vcpu); | 1043 | kvm_vcpu_wake_up(vcpu); |
1047 | 1044 | ||
1048 | return 0; | 1045 | return 0; |
@@ -2144,7 +2141,18 @@ static int pf_interception(struct vcpu_svm *svm) | |||
2144 | 2141 | ||
2145 | return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, | 2142 | return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address, |
2146 | svm->vmcb->control.insn_bytes, | 2143 | svm->vmcb->control.insn_bytes, |
2147 | svm->vmcb->control.insn_len, !npt_enabled); | 2144 | svm->vmcb->control.insn_len); |
2145 | } | ||
2146 | |||
2147 | static int npf_interception(struct vcpu_svm *svm) | ||
2148 | { | ||
2149 | u64 fault_address = svm->vmcb->control.exit_info_2; | ||
2150 | u64 error_code = svm->vmcb->control.exit_info_1; | ||
2151 | |||
2152 | trace_kvm_page_fault(fault_address, error_code); | ||
2153 | return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code, | ||
2154 | svm->vmcb->control.insn_bytes, | ||
2155 | svm->vmcb->control.insn_len); | ||
2148 | } | 2156 | } |
2149 | 2157 | ||
2150 | static int db_interception(struct vcpu_svm *svm) | 2158 | static int db_interception(struct vcpu_svm *svm) |
@@ -2916,70 +2924,9 @@ static bool nested_vmcb_checks(struct vmcb *vmcb) | |||
2916 | return true; | 2924 | return true; |
2917 | } | 2925 | } |
2918 | 2926 | ||
2919 | static bool nested_svm_vmrun(struct vcpu_svm *svm) | 2927 | static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa, |
2928 | struct vmcb *nested_vmcb, struct page *page) | ||
2920 | { | 2929 | { |
2921 | struct vmcb *nested_vmcb; | ||
2922 | struct vmcb *hsave = svm->nested.hsave; | ||
2923 | struct vmcb *vmcb = svm->vmcb; | ||
2924 | struct page *page; | ||
2925 | u64 vmcb_gpa; | ||
2926 | |||
2927 | vmcb_gpa = svm->vmcb->save.rax; | ||
2928 | |||
2929 | nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); | ||
2930 | if (!nested_vmcb) | ||
2931 | return false; | ||
2932 | |||
2933 | if (!nested_vmcb_checks(nested_vmcb)) { | ||
2934 | nested_vmcb->control.exit_code = SVM_EXIT_ERR; | ||
2935 | nested_vmcb->control.exit_code_hi = 0; | ||
2936 | nested_vmcb->control.exit_info_1 = 0; | ||
2937 | nested_vmcb->control.exit_info_2 = 0; | ||
2938 | |||
2939 | nested_svm_unmap(page); | ||
2940 | |||
2941 | return false; | ||
2942 | } | ||
2943 | |||
2944 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, | ||
2945 | nested_vmcb->save.rip, | ||
2946 | nested_vmcb->control.int_ctl, | ||
2947 | nested_vmcb->control.event_inj, | ||
2948 | nested_vmcb->control.nested_ctl); | ||
2949 | |||
2950 | trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, | ||
2951 | nested_vmcb->control.intercept_cr >> 16, | ||
2952 | nested_vmcb->control.intercept_exceptions, | ||
2953 | nested_vmcb->control.intercept); | ||
2954 | |||
2955 | /* Clear internal status */ | ||
2956 | kvm_clear_exception_queue(&svm->vcpu); | ||
2957 | kvm_clear_interrupt_queue(&svm->vcpu); | ||
2958 | |||
2959 | /* | ||
2960 | * Save the old vmcb, so we don't need to pick what we save, but can | ||
2961 | * restore everything when a VMEXIT occurs | ||
2962 | */ | ||
2963 | hsave->save.es = vmcb->save.es; | ||
2964 | hsave->save.cs = vmcb->save.cs; | ||
2965 | hsave->save.ss = vmcb->save.ss; | ||
2966 | hsave->save.ds = vmcb->save.ds; | ||
2967 | hsave->save.gdtr = vmcb->save.gdtr; | ||
2968 | hsave->save.idtr = vmcb->save.idtr; | ||
2969 | hsave->save.efer = svm->vcpu.arch.efer; | ||
2970 | hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); | ||
2971 | hsave->save.cr4 = svm->vcpu.arch.cr4; | ||
2972 | hsave->save.rflags = kvm_get_rflags(&svm->vcpu); | ||
2973 | hsave->save.rip = kvm_rip_read(&svm->vcpu); | ||
2974 | hsave->save.rsp = vmcb->save.rsp; | ||
2975 | hsave->save.rax = vmcb->save.rax; | ||
2976 | if (npt_enabled) | ||
2977 | hsave->save.cr3 = vmcb->save.cr3; | ||
2978 | else | ||
2979 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); | ||
2980 | |||
2981 | copy_vmcb_control_area(hsave, vmcb); | ||
2982 | |||
2983 | if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) | 2930 | if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF) |
2984 | svm->vcpu.arch.hflags |= HF_HIF_MASK; | 2931 | svm->vcpu.arch.hflags |= HF_HIF_MASK; |
2985 | else | 2932 | else |
@@ -3072,6 +3019,73 @@ static bool nested_svm_vmrun(struct vcpu_svm *svm) | |||
3072 | enable_gif(svm); | 3019 | enable_gif(svm); |
3073 | 3020 | ||
3074 | mark_all_dirty(svm->vmcb); | 3021 | mark_all_dirty(svm->vmcb); |
3022 | } | ||
3023 | |||
3024 | static bool nested_svm_vmrun(struct vcpu_svm *svm) | ||
3025 | { | ||
3026 | struct vmcb *nested_vmcb; | ||
3027 | struct vmcb *hsave = svm->nested.hsave; | ||
3028 | struct vmcb *vmcb = svm->vmcb; | ||
3029 | struct page *page; | ||
3030 | u64 vmcb_gpa; | ||
3031 | |||
3032 | vmcb_gpa = svm->vmcb->save.rax; | ||
3033 | |||
3034 | nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page); | ||
3035 | if (!nested_vmcb) | ||
3036 | return false; | ||
3037 | |||
3038 | if (!nested_vmcb_checks(nested_vmcb)) { | ||
3039 | nested_vmcb->control.exit_code = SVM_EXIT_ERR; | ||
3040 | nested_vmcb->control.exit_code_hi = 0; | ||
3041 | nested_vmcb->control.exit_info_1 = 0; | ||
3042 | nested_vmcb->control.exit_info_2 = 0; | ||
3043 | |||
3044 | nested_svm_unmap(page); | ||
3045 | |||
3046 | return false; | ||
3047 | } | ||
3048 | |||
3049 | trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa, | ||
3050 | nested_vmcb->save.rip, | ||
3051 | nested_vmcb->control.int_ctl, | ||
3052 | nested_vmcb->control.event_inj, | ||
3053 | nested_vmcb->control.nested_ctl); | ||
3054 | |||
3055 | trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff, | ||
3056 | nested_vmcb->control.intercept_cr >> 16, | ||
3057 | nested_vmcb->control.intercept_exceptions, | ||
3058 | nested_vmcb->control.intercept); | ||
3059 | |||
3060 | /* Clear internal status */ | ||
3061 | kvm_clear_exception_queue(&svm->vcpu); | ||
3062 | kvm_clear_interrupt_queue(&svm->vcpu); | ||
3063 | |||
3064 | /* | ||
3065 | * Save the old vmcb, so we don't need to pick what we save, but can | ||
3066 | * restore everything when a VMEXIT occurs | ||
3067 | */ | ||
3068 | hsave->save.es = vmcb->save.es; | ||
3069 | hsave->save.cs = vmcb->save.cs; | ||
3070 | hsave->save.ss = vmcb->save.ss; | ||
3071 | hsave->save.ds = vmcb->save.ds; | ||
3072 | hsave->save.gdtr = vmcb->save.gdtr; | ||
3073 | hsave->save.idtr = vmcb->save.idtr; | ||
3074 | hsave->save.efer = svm->vcpu.arch.efer; | ||
3075 | hsave->save.cr0 = kvm_read_cr0(&svm->vcpu); | ||
3076 | hsave->save.cr4 = svm->vcpu.arch.cr4; | ||
3077 | hsave->save.rflags = kvm_get_rflags(&svm->vcpu); | ||
3078 | hsave->save.rip = kvm_rip_read(&svm->vcpu); | ||
3079 | hsave->save.rsp = vmcb->save.rsp; | ||
3080 | hsave->save.rax = vmcb->save.rax; | ||
3081 | if (npt_enabled) | ||
3082 | hsave->save.cr3 = vmcb->save.cr3; | ||
3083 | else | ||
3084 | hsave->save.cr3 = kvm_read_cr3(&svm->vcpu); | ||
3085 | |||
3086 | copy_vmcb_control_area(hsave, vmcb); | ||
3087 | |||
3088 | enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page); | ||
3075 | 3089 | ||
3076 | return true; | 3090 | return true; |
3077 | } | 3091 | } |
@@ -3173,7 +3187,7 @@ static int stgi_interception(struct vcpu_svm *svm) | |||
3173 | 3187 | ||
3174 | /* | 3188 | /* |
3175 | * If VGIF is enabled, the STGI intercept is only added to | 3189 | * If VGIF is enabled, the STGI intercept is only added to |
3176 | * detect the opening of the NMI window; remove it now. | 3190 | * detect the opening of the SMI/NMI window; remove it now. |
3177 | */ | 3191 | */ |
3178 | if (vgif_enabled(svm)) | 3192 | if (vgif_enabled(svm)) |
3179 | clr_intercept(svm, INTERCEPT_STGI); | 3193 | clr_intercept(svm, INTERCEPT_STGI); |
@@ -4131,7 +4145,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { | |||
4131 | [SVM_EXIT_MONITOR] = monitor_interception, | 4145 | [SVM_EXIT_MONITOR] = monitor_interception, |
4132 | [SVM_EXIT_MWAIT] = mwait_interception, | 4146 | [SVM_EXIT_MWAIT] = mwait_interception, |
4133 | [SVM_EXIT_XSETBV] = xsetbv_interception, | 4147 | [SVM_EXIT_XSETBV] = xsetbv_interception, |
4134 | [SVM_EXIT_NPF] = pf_interception, | 4148 | [SVM_EXIT_NPF] = npf_interception, |
4135 | [SVM_EXIT_RSM] = emulate_on_interception, | 4149 | [SVM_EXIT_RSM] = emulate_on_interception, |
4136 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, | 4150 | [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception, |
4137 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, | 4151 | [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception, |
@@ -5393,6 +5407,88 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu) | |||
5393 | vcpu->arch.mcg_cap &= 0x1ff; | 5407 | vcpu->arch.mcg_cap &= 0x1ff; |
5394 | } | 5408 | } |
5395 | 5409 | ||
5410 | static int svm_smi_allowed(struct kvm_vcpu *vcpu) | ||
5411 | { | ||
5412 | struct vcpu_svm *svm = to_svm(vcpu); | ||
5413 | |||
5414 | /* Per APM Vol.2 15.22.2 "Response to SMI" */ | ||
5415 | if (!gif_set(svm)) | ||
5416 | return 0; | ||
5417 | |||
5418 | if (is_guest_mode(&svm->vcpu) && | ||
5419 | svm->nested.intercept & (1ULL << INTERCEPT_SMI)) { | ||
5420 | /* TODO: Might need to set exit_info_1 and exit_info_2 here */ | ||
5421 | svm->vmcb->control.exit_code = SVM_EXIT_SMI; | ||
5422 | svm->nested.exit_required = true; | ||
5423 | return 0; | ||
5424 | } | ||
5425 | |||
5426 | return 1; | ||
5427 | } | ||
5428 | |||
5429 | static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate) | ||
5430 | { | ||
5431 | struct vcpu_svm *svm = to_svm(vcpu); | ||
5432 | int ret; | ||
5433 | |||
5434 | if (is_guest_mode(vcpu)) { | ||
5435 | /* FED8h - SVM Guest */ | ||
5436 | put_smstate(u64, smstate, 0x7ed8, 1); | ||
5437 | /* FEE0h - SVM Guest VMCB Physical Address */ | ||
5438 | put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb); | ||
5439 | |||
5440 | svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX]; | ||
5441 | svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP]; | ||
5442 | svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP]; | ||
5443 | |||
5444 | ret = nested_svm_vmexit(svm); | ||
5445 | if (ret) | ||
5446 | return ret; | ||
5447 | } | ||
5448 | return 0; | ||
5449 | } | ||
5450 | |||
5451 | static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase) | ||
5452 | { | ||
5453 | struct vcpu_svm *svm = to_svm(vcpu); | ||
5454 | struct vmcb *nested_vmcb; | ||
5455 | struct page *page; | ||
5456 | struct { | ||
5457 | u64 guest; | ||
5458 | u64 vmcb; | ||
5459 | } svm_state_save; | ||
5460 | int ret; | ||
5461 | |||
5462 | ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save, | ||
5463 | sizeof(svm_state_save)); | ||
5464 | if (ret) | ||
5465 | return ret; | ||
5466 | |||
5467 | if (svm_state_save.guest) { | ||
5468 | vcpu->arch.hflags &= ~HF_SMM_MASK; | ||
5469 | nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page); | ||
5470 | if (nested_vmcb) | ||
5471 | enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page); | ||
5472 | else | ||
5473 | ret = 1; | ||
5474 | vcpu->arch.hflags |= HF_SMM_MASK; | ||
5475 | } | ||
5476 | return ret; | ||
5477 | } | ||
5478 | |||
5479 | static int enable_smi_window(struct kvm_vcpu *vcpu) | ||
5480 | { | ||
5481 | struct vcpu_svm *svm = to_svm(vcpu); | ||
5482 | |||
5483 | if (!gif_set(svm)) { | ||
5484 | if (vgif_enabled(svm)) | ||
5485 | set_intercept(svm, INTERCEPT_STGI); | ||
5486 | /* STGI will cause a vm exit */ | ||
5487 | return 1; | ||
5488 | } | ||
5489 | return 0; | ||
5490 | } | ||
5491 | |||
5396 | static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | 5492 | static struct kvm_x86_ops svm_x86_ops __ro_after_init = { |
5397 | .cpu_has_kvm_support = has_svm, | 5493 | .cpu_has_kvm_support = has_svm, |
5398 | .disabled_by_bios = is_disabled, | 5494 | .disabled_by_bios = is_disabled, |
@@ -5503,6 +5599,11 @@ static struct kvm_x86_ops svm_x86_ops __ro_after_init = { | |||
5503 | .deliver_posted_interrupt = svm_deliver_avic_intr, | 5599 | .deliver_posted_interrupt = svm_deliver_avic_intr, |
5504 | .update_pi_irte = svm_update_pi_irte, | 5600 | .update_pi_irte = svm_update_pi_irte, |
5505 | .setup_mce = svm_setup_mce, | 5601 | .setup_mce = svm_setup_mce, |
5602 | |||
5603 | .smi_allowed = svm_smi_allowed, | ||
5604 | .pre_enter_smm = svm_pre_enter_smm, | ||
5605 | .pre_leave_smm = svm_pre_leave_smm, | ||
5606 | .enable_smi_window = enable_smi_window, | ||
5506 | }; | 5607 | }; |
5507 | 5608 | ||
5508 | static int __init svm_init(void) | 5609 | static int __init svm_init(void) |