aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2011-09-12 04:52:24 -0400
committerAvi Kivity <avi@redhat.com>2011-09-25 12:52:41 -0400
commit1e2b1dd797f9356f779268ecc1ba21110d4e341c (patch)
tree3a61d8c05758bfda30ab22a125c8b1af6d503ab1 /arch/x86
parentcaa8a168e35650961b9b0d43b9b6fc2279351949 (diff)
KVM: x86: Move kvm_trace_exit into atomic vmexit section
This avoids that events causing the vmexit are recorded before the actual exit reason. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/svm.c4
-rw-r--r--arch/x86/kvm/vmx.c3
2 files changed, 3 insertions, 4 deletions
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 8277f32017ad..e7ed4b1623b9 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -3335,8 +3335,6 @@ static int handle_exit(struct kvm_vcpu *vcpu)
3335 struct kvm_run *kvm_run = vcpu->run; 3335 struct kvm_run *kvm_run = vcpu->run;
3336 u32 exit_code = svm->vmcb->control.exit_code; 3336 u32 exit_code = svm->vmcb->control.exit_code;
3337 3337
3338 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
3339
3340 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE)) 3338 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
3341 vcpu->arch.cr0 = svm->vmcb->save.cr0; 3339 vcpu->arch.cr0 = svm->vmcb->save.cr0;
3342 if (npt_enabled) 3340 if (npt_enabled)
@@ -3790,6 +3788,8 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
3790 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp; 3788 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
3791 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip; 3789 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
3792 3790
3791 trace_kvm_exit(svm->vmcb->control.exit_code, vcpu, KVM_ISA_SVM);
3792
3793 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI)) 3793 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
3794 kvm_before_handle_nmi(&svm->vcpu); 3794 kvm_before_handle_nmi(&svm->vcpu);
3795 3795
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 47419d6031ea..21217b65b129 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -5739,8 +5739,6 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
5739 u32 exit_reason = vmx->exit_reason; 5739 u32 exit_reason = vmx->exit_reason;
5740 u32 vectoring_info = vmx->idt_vectoring_info; 5740 u32 vectoring_info = vmx->idt_vectoring_info;
5741 5741
5742 trace_kvm_exit(exit_reason, vcpu, KVM_ISA_VMX);
5743
5744 /* If guest state is invalid, start emulating */ 5742 /* If guest state is invalid, start emulating */
5745 if (vmx->emulation_required && emulate_invalid_guest_state) 5743 if (vmx->emulation_required && emulate_invalid_guest_state)
5746 return handle_invalid_guest_state(vcpu); 5744 return handle_invalid_guest_state(vcpu);
@@ -6144,6 +6142,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6144 vmx->loaded_vmcs->launched = 1; 6142 vmx->loaded_vmcs->launched = 1;
6145 6143
6146 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON); 6144 vmx->exit_reason = vmcs_read32(VM_EXIT_REASON);
6145 trace_kvm_exit(vmx->exit_reason, vcpu, KVM_ISA_VMX);
6147 6146
6148 vmx_complete_atomic_exit(vmx); 6147 vmx_complete_atomic_exit(vmx);
6149 vmx_recover_nmi_blocking(vmx); 6148 vmx_recover_nmi_blocking(vmx);