aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJan Kiszka <jan.kiszka@siemens.com>2014-01-04 12:47:20 -0500
committerPaolo Bonzini <pbonzini@redhat.com>2014-01-17 04:22:14 -0500
commit533558bcb69ef28aff81b6ae9acda8943575319f (patch)
treebc8ba9c288267c16e0a0039ec0c50e89392fe884
parent42124925c1f580068661bebd963d7c102175a8a9 (diff)
KVM: nVMX: Pass vmexit parameters to nested_vmx_vmexit
Instead of fixing up the vmcs12 after the nested vmexit, pass key parameters already when calling nested_vmx_vmexit. This will help tracing those vmexits. Signed-off-by: Jan Kiszka <jan.kiszka@siemens.com> Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
-rw-r--r--arch/x86/kvm/vmx.c63
1 files changed, 34 insertions, 29 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index bff55554faec..e3578b301d81 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -1058,7 +1058,9 @@ static inline bool is_exception(u32 intr_info)
1058 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK); 1058 == (INTR_TYPE_HARD_EXCEPTION | INTR_INFO_VALID_MASK);
1059} 1059}
1060 1060
1061static void nested_vmx_vmexit(struct kvm_vcpu *vcpu); 1061static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
1062 u32 exit_intr_info,
1063 unsigned long exit_qualification);
1062static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu, 1064static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
1063 struct vmcs12 *vmcs12, 1065 struct vmcs12 *vmcs12,
1064 u32 reason, unsigned long qualification); 1066 u32 reason, unsigned long qualification);
@@ -1967,7 +1969,9 @@ static int nested_vmx_check_exception(struct kvm_vcpu *vcpu, unsigned nr)
1967 if (!(vmcs12->exception_bitmap & (1u << nr))) 1969 if (!(vmcs12->exception_bitmap & (1u << nr)))
1968 return 0; 1970 return 0;
1969 1971
1970 nested_vmx_vmexit(vcpu); 1972 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
1973 vmcs_read32(VM_EXIT_INTR_INFO),
1974 vmcs_readl(EXIT_QUALIFICATION));
1971 return 1; 1975 return 1;
1972} 1976}
1973 1977
@@ -4649,15 +4653,12 @@ static void vmx_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
4649static int vmx_nmi_allowed(struct kvm_vcpu *vcpu) 4653static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4650{ 4654{
4651 if (is_guest_mode(vcpu)) { 4655 if (is_guest_mode(vcpu)) {
4652 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4653
4654 if (to_vmx(vcpu)->nested.nested_run_pending) 4656 if (to_vmx(vcpu)->nested.nested_run_pending)
4655 return 0; 4657 return 0;
4656 if (nested_exit_on_nmi(vcpu)) { 4658 if (nested_exit_on_nmi(vcpu)) {
4657 nested_vmx_vmexit(vcpu); 4659 nested_vmx_vmexit(vcpu, EXIT_REASON_EXCEPTION_NMI,
4658 vmcs12->vm_exit_reason = EXIT_REASON_EXCEPTION_NMI; 4660 NMI_VECTOR | INTR_TYPE_NMI_INTR |
4659 vmcs12->vm_exit_intr_info = NMI_VECTOR | 4661 INTR_INFO_VALID_MASK, 0);
4660 INTR_TYPE_NMI_INTR | INTR_INFO_VALID_MASK;
4661 /* 4662 /*
4662 * The NMI-triggered VM exit counts as injection: 4663 * The NMI-triggered VM exit counts as injection:
4663 * clear this one and block further NMIs. 4664 * clear this one and block further NMIs.
@@ -4679,15 +4680,11 @@ static int vmx_nmi_allowed(struct kvm_vcpu *vcpu)
4679static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu) 4680static int vmx_interrupt_allowed(struct kvm_vcpu *vcpu)
4680{ 4681{
4681 if (is_guest_mode(vcpu)) { 4682 if (is_guest_mode(vcpu)) {
4682 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
4683
4684 if (to_vmx(vcpu)->nested.nested_run_pending) 4683 if (to_vmx(vcpu)->nested.nested_run_pending)
4685 return 0; 4684 return 0;
4686 if (nested_exit_on_intr(vcpu)) { 4685 if (nested_exit_on_intr(vcpu)) {
4687 nested_vmx_vmexit(vcpu); 4686 nested_vmx_vmexit(vcpu, EXIT_REASON_EXTERNAL_INTERRUPT,
4688 vmcs12->vm_exit_reason = 4687 0, 0);
4689 EXIT_REASON_EXTERNAL_INTERRUPT;
4690 vmcs12->vm_exit_intr_info = 0;
4691 /* 4688 /*
4692 * fall through to normal code, but now in L1, not L2 4689 * fall through to normal code, but now in L1, not L2
4693 */ 4690 */
@@ -6849,7 +6846,9 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
6849 return handle_invalid_guest_state(vcpu); 6846 return handle_invalid_guest_state(vcpu);
6850 6847
6851 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) { 6848 if (is_guest_mode(vcpu) && nested_vmx_exit_handled(vcpu)) {
6852 nested_vmx_vmexit(vcpu); 6849 nested_vmx_vmexit(vcpu, exit_reason,
6850 vmcs_read32(VM_EXIT_INTR_INFO),
6851 vmcs_readl(EXIT_QUALIFICATION));
6853 return 1; 6852 return 1;
6854 } 6853 }
6855 6854
@@ -7590,15 +7589,14 @@ static void vmx_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
7590static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu, 7589static void nested_ept_inject_page_fault(struct kvm_vcpu *vcpu,
7591 struct x86_exception *fault) 7590 struct x86_exception *fault)
7592{ 7591{
7593 struct vmcs12 *vmcs12; 7592 struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
7594 nested_vmx_vmexit(vcpu); 7593 u32 exit_reason;
7595 vmcs12 = get_vmcs12(vcpu);
7596 7594
7597 if (fault->error_code & PFERR_RSVD_MASK) 7595 if (fault->error_code & PFERR_RSVD_MASK)
7598 vmcs12->vm_exit_reason = EXIT_REASON_EPT_MISCONFIG; 7596 exit_reason = EXIT_REASON_EPT_MISCONFIG;
7599 else 7597 else
7600 vmcs12->vm_exit_reason = EXIT_REASON_EPT_VIOLATION; 7598 exit_reason = EXIT_REASON_EPT_VIOLATION;
7601 vmcs12->exit_qualification = vcpu->arch.exit_qualification; 7599 nested_vmx_vmexit(vcpu, exit_reason, 0, vcpu->arch.exit_qualification);
7602 vmcs12->guest_physical_address = fault->address; 7600 vmcs12->guest_physical_address = fault->address;
7603} 7601}
7604 7602
@@ -7636,7 +7634,9 @@ static void vmx_inject_page_fault_nested(struct kvm_vcpu *vcpu,
7636 7634
7637 /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */ 7635 /* TODO: also check PFEC_MATCH/MASK, not just EB.PF. */
7638 if (vmcs12->exception_bitmap & (1u << PF_VECTOR)) 7636 if (vmcs12->exception_bitmap & (1u << PF_VECTOR))
7639 nested_vmx_vmexit(vcpu); 7637 nested_vmx_vmexit(vcpu, to_vmx(vcpu)->exit_reason,
7638 vmcs_read32(VM_EXIT_INTR_INFO),
7639 vmcs_readl(EXIT_QUALIFICATION));
7640 else 7640 else
7641 kvm_inject_page_fault(vcpu, fault); 7641 kvm_inject_page_fault(vcpu, fault);
7642} 7642}
@@ -8191,7 +8191,9 @@ static void vmcs12_save_pending_event(struct kvm_vcpu *vcpu,
8191 * exit-information fields only. Other fields are modified by L1 with VMWRITE, 8191 * exit-information fields only. Other fields are modified by L1 with VMWRITE,
8192 * which already writes to vmcs12 directly. 8192 * which already writes to vmcs12 directly.
8193 */ 8193 */
8194static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12) 8194static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12,
8195 u32 exit_reason, u32 exit_intr_info,
8196 unsigned long exit_qualification)
8195{ 8197{
8196 /* update guest state fields: */ 8198 /* update guest state fields: */
8197 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12); 8199 vmcs12->guest_cr0 = vmcs12_guest_cr0(vcpu, vmcs12);
@@ -8282,10 +8284,10 @@ static void prepare_vmcs12(struct kvm_vcpu *vcpu, struct vmcs12 *vmcs12)
8282 8284
8283 /* update exit information fields: */ 8285 /* update exit information fields: */
8284 8286
8285 vmcs12->vm_exit_reason = to_vmx(vcpu)->exit_reason; 8287 vmcs12->vm_exit_reason = exit_reason;
8286 vmcs12->exit_qualification = vmcs_readl(EXIT_QUALIFICATION); 8288 vmcs12->exit_qualification = exit_qualification;
8287 8289
8288 vmcs12->vm_exit_intr_info = vmcs_read32(VM_EXIT_INTR_INFO); 8290 vmcs12->vm_exit_intr_info = exit_intr_info;
8289 if ((vmcs12->vm_exit_intr_info & 8291 if ((vmcs12->vm_exit_intr_info &
8290 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) == 8292 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) ==
8291 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK)) 8293 (INTR_INFO_VALID_MASK | INTR_INFO_DELIVER_CODE_MASK))
@@ -8452,7 +8454,9 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
8452 * and modify vmcs12 to make it see what it would expect to see there if 8454 * and modify vmcs12 to make it see what it would expect to see there if
8453 * L2 was its real guest. Must only be called when in L2 (is_guest_mode()) 8455 * L2 was its real guest. Must only be called when in L2 (is_guest_mode())
8454 */ 8456 */
8455static void nested_vmx_vmexit(struct kvm_vcpu *vcpu) 8457static void nested_vmx_vmexit(struct kvm_vcpu *vcpu, u32 exit_reason,
8458 u32 exit_intr_info,
8459 unsigned long exit_qualification)
8456{ 8460{
8457 struct vcpu_vmx *vmx = to_vmx(vcpu); 8461 struct vcpu_vmx *vmx = to_vmx(vcpu);
8458 int cpu; 8462 int cpu;
@@ -8462,7 +8466,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
8462 WARN_ON_ONCE(vmx->nested.nested_run_pending); 8466 WARN_ON_ONCE(vmx->nested.nested_run_pending);
8463 8467
8464 leave_guest_mode(vcpu); 8468 leave_guest_mode(vcpu);
8465 prepare_vmcs12(vcpu, vmcs12); 8469 prepare_vmcs12(vcpu, vmcs12, exit_reason, exit_intr_info,
8470 exit_qualification);
8466 8471
8467 cpu = get_cpu(); 8472 cpu = get_cpu();
8468 vmx->loaded_vmcs = &vmx->vmcs01; 8473 vmx->loaded_vmcs = &vmx->vmcs01;
@@ -8513,7 +8518,7 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
8513static void vmx_leave_nested(struct kvm_vcpu *vcpu) 8518static void vmx_leave_nested(struct kvm_vcpu *vcpu)
8514{ 8519{
8515 if (is_guest_mode(vcpu)) 8520 if (is_guest_mode(vcpu))
8516 nested_vmx_vmexit(vcpu); 8521 nested_vmx_vmexit(vcpu, -1, 0, 0);
8517 free_nested(to_vmx(vcpu)); 8522 free_nested(to_vmx(vcpu));
8518} 8523}
8519 8524