aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorAbel Gordon <abelg@il.ibm.com>2013-04-18 07:39:25 -0400
committerGleb Natapov <gleb@redhat.com>2013-04-22 03:52:45 -0400
commit012f83cb2f8d7b9b7ad3b65e7e53a9365a357014 (patch)
tree5bd38e8f9d612c1dae6bc2da2d17316d178dd93e /arch/x86
parentc3114420d1c7a6075fb0cfdc69b567423e5cfc13 (diff)
KVM: nVMX: Synchronize VMCS12 content with the shadow vmcs
Synchronize between the VMCS12 software controlled structure and the processor-specific shadow vmcs Signed-off-by: Abel Gordon <abelg@il.ibm.com> Reviewed-by: Orit Wasserman <owasserm@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kvm/vmx.c32
1 files changed, 32 insertions, 0 deletions
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index 8dc59aaab3db..c5baecc37c98 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -356,6 +356,11 @@ struct nested_vmx {
356 struct page *current_vmcs12_page; 356 struct page *current_vmcs12_page;
357 struct vmcs12 *current_vmcs12; 357 struct vmcs12 *current_vmcs12;
358 struct vmcs *current_shadow_vmcs; 358 struct vmcs *current_shadow_vmcs;
359 /*
360 * Indicates if the shadow vmcs must be updated with the
361 * data hold by vmcs12
362 */
363 bool sync_shadow_vmcs;
359 364
360 /* vmcs02_list cache of VMCSs recently used to run L2 guests */ 365 /* vmcs02_list cache of VMCSs recently used to run L2 guests */
361 struct list_head vmcs02_pool; 366 struct list_head vmcs02_pool;
@@ -5611,6 +5616,14 @@ static int nested_vmx_check_permission(struct kvm_vcpu *vcpu)
5611 5616
5612static inline void nested_release_vmcs12(struct vcpu_vmx *vmx) 5617static inline void nested_release_vmcs12(struct vcpu_vmx *vmx)
5613{ 5618{
5619 if (enable_shadow_vmcs) {
5620 if (vmx->nested.current_vmcs12 != NULL) {
5621 /* copy to memory all shadowed fields in case
5622 they were modified */
5623 copy_shadow_to_vmcs12(vmx);
5624 vmx->nested.sync_shadow_vmcs = false;
5625 }
5626 }
5614 kunmap(vmx->nested.current_vmcs12_page); 5627 kunmap(vmx->nested.current_vmcs12_page);
5615 nested_release_page(vmx->nested.current_vmcs12_page); 5628 nested_release_page(vmx->nested.current_vmcs12_page);
5616} 5629}
@@ -5739,6 +5752,10 @@ static void nested_vmx_failValid(struct kvm_vcpu *vcpu,
5739 X86_EFLAGS_SF | X86_EFLAGS_OF)) 5752 X86_EFLAGS_SF | X86_EFLAGS_OF))
5740 | X86_EFLAGS_ZF); 5753 | X86_EFLAGS_ZF);
5741 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error; 5754 get_vmcs12(vcpu)->vm_instruction_error = vm_instruction_error;
5755 /*
5756 * We don't need to force a shadow sync because
5757 * VM_INSTRUCTION_ERROR is not shadowed
5758 */
5742} 5759}
5743 5760
5744/* Emulate the VMCLEAR instruction */ 5761/* Emulate the VMCLEAR instruction */
@@ -6137,6 +6154,9 @@ static int handle_vmptrld(struct kvm_vcpu *vcpu)
6137 vmx->nested.current_vmptr = vmptr; 6154 vmx->nested.current_vmptr = vmptr;
6138 vmx->nested.current_vmcs12 = new_vmcs12; 6155 vmx->nested.current_vmcs12 = new_vmcs12;
6139 vmx->nested.current_vmcs12_page = page; 6156 vmx->nested.current_vmcs12_page = page;
6157 if (enable_shadow_vmcs) {
6158 vmx->nested.sync_shadow_vmcs = true;
6159 }
6140 } 6160 }
6141 6161
6142 nested_vmx_succeed(vcpu); 6162 nested_vmx_succeed(vcpu);
@@ -6895,6 +6915,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
6895 if (vmx->emulation_required) 6915 if (vmx->emulation_required)
6896 return; 6916 return;
6897 6917
6918 if (vmx->nested.sync_shadow_vmcs) {
6919 copy_vmcs12_to_shadow(vmx);
6920 vmx->nested.sync_shadow_vmcs = false;
6921 }
6922
6898 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty)) 6923 if (test_bit(VCPU_REGS_RSP, (unsigned long *)&vcpu->arch.regs_dirty))
6899 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]); 6924 vmcs_writel(GUEST_RSP, vcpu->arch.regs[VCPU_REGS_RSP]);
6900 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty)) 6925 if (test_bit(VCPU_REGS_RIP, (unsigned long *)&vcpu->arch.regs_dirty))
@@ -7504,6 +7529,9 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool launch)
7504 skip_emulated_instruction(vcpu); 7529 skip_emulated_instruction(vcpu);
7505 vmcs12 = get_vmcs12(vcpu); 7530 vmcs12 = get_vmcs12(vcpu);
7506 7531
7532 if (enable_shadow_vmcs)
7533 copy_shadow_to_vmcs12(vmx);
7534
7507 /* 7535 /*
7508 * The nested entry process starts with enforcing various prerequisites 7536 * The nested entry process starts with enforcing various prerequisites
7509 * on vmcs12 as required by the Intel SDM, and act appropriately when 7537 * on vmcs12 as required by the Intel SDM, and act appropriately when
@@ -7950,6 +7978,8 @@ static void nested_vmx_vmexit(struct kvm_vcpu *vcpu)
7950 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR)); 7978 nested_vmx_failValid(vcpu, vmcs_read32(VM_INSTRUCTION_ERROR));
7951 } else 7979 } else
7952 nested_vmx_succeed(vcpu); 7980 nested_vmx_succeed(vcpu);
7981 if (enable_shadow_vmcs)
7982 vmx->nested.sync_shadow_vmcs = true;
7953} 7983}
7954 7984
7955/* 7985/*
@@ -7967,6 +7997,8 @@ static void nested_vmx_entry_failure(struct kvm_vcpu *vcpu,
7967 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY; 7997 vmcs12->vm_exit_reason = reason | VMX_EXIT_REASONS_FAILED_VMENTRY;
7968 vmcs12->exit_qualification = qualification; 7998 vmcs12->exit_qualification = qualification;
7969 nested_vmx_succeed(vcpu); 7999 nested_vmx_succeed(vcpu);
8000 if (enable_shadow_vmcs)
8001 to_vmx(vcpu)->nested.sync_shadow_vmcs = true;
7970} 8002}
7971 8003
7972static int vmx_check_intercept(struct kvm_vcpu *vcpu, 8004static int vmx_check_intercept(struct kvm_vcpu *vcpu,