diff options
author | Paolo Bonzini <pbonzini@redhat.com> | 2015-12-10 12:37:32 -0500 |
---|---|---|
committer | Paolo Bonzini <pbonzini@redhat.com> | 2015-12-11 06:26:33 -0500 |
commit | 8b89fe1f6c430589122542f228a802d34995bebd (patch) | |
tree | f8465bc4d48bf413da077350a2845a14066981b4 | |
parent | 6764e5ebd5c62236d082f9ae030674467d0b2779 (diff) |
kvm: x86: move tracepoints outside extended quiescent state
Invoking tracepoints within kvm_guest_enter/kvm_guest_exit causes a
lockdep splat.
Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r-- | arch/x86/kvm/svm.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/vmx.c | 3 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 4 |
3 files changed, 6 insertions, 5 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c index 83a1c643f9a5..899c40f826dd 100644 --- a/arch/x86/kvm/svm.c +++ b/arch/x86/kvm/svm.c | |||
@@ -3422,6 +3422,8 @@ static int handle_exit(struct kvm_vcpu *vcpu) | |||
3422 | struct kvm_run *kvm_run = vcpu->run; | 3422 | struct kvm_run *kvm_run = vcpu->run; |
3423 | u32 exit_code = svm->vmcb->control.exit_code; | 3423 | u32 exit_code = svm->vmcb->control.exit_code; |
3424 | 3424 | ||
3425 | trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM); | ||
3426 | |||
3425 | if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) | 3427 | if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) |
3426 | vcpu->arch.cr0 = svm->vmcb->save.cr0; | 3428 | vcpu->arch.cr0 = svm->vmcb->save.cr0; |
3427 | if (npt_enabled) | 3429 | if (npt_enabled) |
@@ -3892,8 +3894,6 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu) | |||
3892 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; | 3894 | vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; |
3893 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; | 3895 | vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; |
3894 | 3896 | ||
3895 | trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM); | ||
3896 | |||
3897 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) | 3897 | if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) |
3898 | kvm_before_handle_nmi(&svm->vcpu); | 3898 | kvm_before_handle_nmi(&svm->vcpu); |
3899 | 3899 | ||
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index af823a388c19..6b5605607849 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c | |||
@@ -8042,6 +8042,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu) | |||
8042 | u32 exit_reason = vmx->exit_reason; | 8042 | u32 exit_reason = vmx->exit_reason; |
8043 | u32 vectoring_info = vmx->idt_vectoring_info; | 8043 | u32 vectoring_info = vmx->idt_vectoring_info; |
8044 | 8044 | ||
8045 | trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX); | ||
8046 | |||
8045 | /* | 8047 | /* |
8046 | * Flush logged GPAs PML buffer, this will make dirty_bitmap more | 8048 | * Flush logged GPAs PML buffer, this will make dirty_bitmap more |
8047 | * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before | 8049 | * updated. Another good is, in kvm_vm_ioctl_get_dirty_log, before |
@@ -8668,7 +8670,6 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) | |||
8668 | vmx->loaded_vmcs->launched = 1; | 8670 | vmx->loaded_vmcs->launched = 1; |
8669 | 8671 | ||
8670 | vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); | 8672 | vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); |
8671 | trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX); | ||
8672 | 8673 | ||
8673 | /* | 8674 | /* |
8674 | * the KVM_REQ_EVENT optimization bit is only on for one entry, and if | 8675 | * the KVM_REQ_EVENT optimization bit is only on for one entry, and if |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index eed32283d22c..b84ba4b17757 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
@@ -6515,6 +6515,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6515 | if (req_immediate_exit) | 6515 | if (req_immediate_exit) |
6516 | smp_send_reschedule(vcpu->cpu); | 6516 | smp_send_reschedule(vcpu->cpu); |
6517 | 6517 | ||
6518 | trace_kvm_entry(vcpu->vcpu_id); | ||
6519 | wait_lapic_expire(vcpu); | ||
6518 | __kvm_guest_enter(); | 6520 | __kvm_guest_enter(); |
6519 | 6521 | ||
6520 | if (unlikely(vcpu->arch.switch_db_regs)) { | 6522 | if (unlikely(vcpu->arch.switch_db_regs)) { |
@@ -6527,8 +6529,6 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu) | |||
6527 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; | 6529 | vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_RELOAD; |
6528 | } | 6530 | } |
6529 | 6531 | ||
6530 | trace_kvm_entry(vcpu->vcpu_id); | ||
6531 | wait_lapic_expire(vcpu); | ||
6532 | kvm_x86_ops->run(vcpu); | 6532 | kvm_x86_ops->run(vcpu); |
6533 | 6533 | ||
6534 | /* | 6534 | /* |