aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/svm.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/svm.c')
-rw-r--r--arch/x86/kvm/svm.c34
1 files changed, 18 insertions, 16 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 307e5bddb6d9..f13a3a24d360 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3414,6 +3414,14 @@ static int nested_svm_vmexit(struct vcpu_svm *svm)
3414 kvm_mmu_reset_context(&svm->vcpu); 3414 kvm_mmu_reset_context(&svm->vcpu);
3415 kvm_mmu_load(&svm->vcpu); 3415 kvm_mmu_load(&svm->vcpu);
3416 3416
3417 /*
3418 * Drop what we picked up for L2 via svm_complete_interrupts() so it
3419 * doesn't end up in L1.
3420 */
3421 svm->vcpu.arch.nmi_injected = false;
3422 kvm_clear_exception_queue(&svm->vcpu);
3423 kvm_clear_interrupt_queue(&svm->vcpu);
3424
3417 return 0; 3425 return 0;
3418} 3426}
3419 3427
@@ -4395,7 +4403,7 @@ static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
4395 case MSR_IA32_APICBASE: 4403 case MSR_IA32_APICBASE:
4396 if (kvm_vcpu_apicv_active(vcpu)) 4404 if (kvm_vcpu_apicv_active(vcpu))
4397 avic_update_vapic_bar(to_svm(vcpu), data); 4405 avic_update_vapic_bar(to_svm(vcpu), data);
4398 /* Follow through */ 4406 /* Fall through */
4399 default: 4407 default:
4400 return kvm_set_msr_common(vcpu, msr); 4408 return kvm_set_msr_common(vcpu, msr);
4401 } 4409 }
@@ -4504,28 +4512,19 @@ static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4504 kvm_lapic_reg_write(apic, APIC_ICR, icrl); 4512 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4505 break; 4513 break;
4506 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: { 4514 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4507 int i;
4508 struct kvm_vcpu *vcpu;
4509 struct kvm *kvm = svm->vcpu.kvm;
4510 struct kvm_lapic *apic = svm->vcpu.arch.apic; 4515 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4511 4516
4512 /* 4517 /*
4513 * At this point, we expect that the AVIC HW has already 4518 * Update ICR high and low, then emulate sending IPI,
4514 * set the appropriate IRR bits on the valid target 4519 * which is handled when writing APIC_ICR.
4515 * vcpus. So, we just need to kick the appropriate vcpu.
4516 */ 4520 */
4517 kvm_for_each_vcpu(i, vcpu, kvm) { 4521 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4518 bool m = kvm_apic_match_dest(vcpu, apic, 4522 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4519 icrl & KVM_APIC_SHORT_MASK,
4520 GET_APIC_DEST_FIELD(icrh),
4521 icrl & KVM_APIC_DEST_MASK);
4522
4523 if (m && !avic_vcpu_is_running(vcpu))
4524 kvm_vcpu_wake_up(vcpu);
4525 }
4526 break; 4523 break;
4527 } 4524 }
4528 case AVIC_IPI_FAILURE_INVALID_TARGET: 4525 case AVIC_IPI_FAILURE_INVALID_TARGET:
4526 WARN_ONCE(1, "Invalid IPI target: index=%u, vcpu=%d, icr=%#0x:%#0x\n",
4527 index, svm->vcpu.vcpu_id, icrh, icrl);
4529 break; 4528 break;
4530 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE: 4529 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4531 WARN_ONCE(1, "Invalid backing page\n"); 4530 WARN_ONCE(1, "Invalid backing page\n");
@@ -6278,6 +6277,9 @@ static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6278 int asid, ret; 6277 int asid, ret;
6279 6278
6280 ret = -EBUSY; 6279 ret = -EBUSY;
6280 if (unlikely(sev->active))
6281 return ret;
6282
6281 asid = sev_asid_new(); 6283 asid = sev_asid_new();
6282 if (asid < 0) 6284 if (asid < 0)
6283 return ret; 6285 return ret;