aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/x86.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kvm/x86.c')
-rw-r--r--arch/x86/kvm/x86.c47
1 files changed, 47 insertions, 0 deletions
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e24edbc7f2ec..7ba1ab73fd03 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4836,6 +4836,50 @@ static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t gva)
4836 return false; 4836 return false;
4837} 4837}
4838 4838
4839static bool retry_instruction(struct x86_emulate_ctxt *ctxt,
4840 unsigned long cr2, int emulation_type)
4841{
4842 struct kvm_vcpu *vcpu = emul_to_vcpu(ctxt);
4843 unsigned long last_retry_eip, last_retry_addr, gpa = cr2;
4844
4845 last_retry_eip = vcpu->arch.last_retry_eip;
4846 last_retry_addr = vcpu->arch.last_retry_addr;
4847
4848 /*
4849 * If the emulation is caused by #PF and it is non-page_table
4850 * writing instruction, it means the VM-EXIT is caused by shadow
4851 * page protected, we can zap the shadow page and retry this
4852 * instruction directly.
4853 *
4854 * Note: if the guest uses a non-page-table modifying instruction
4855 * on the PDE that points to the instruction, then we will unmap
4856 * the instruction and go to an infinite loop. So, we cache the
4857 * last retried eip and the last fault address, if we meet the eip
4858 * and the address again, we can break out of the potential infinite
4859 * loop.
4860 */
4861 vcpu->arch.last_retry_eip = vcpu->arch.last_retry_addr = 0;
4862
4863 if (!(emulation_type & EMULTYPE_RETRY))
4864 return false;
4865
4866 if (x86_page_table_writing_insn(ctxt))
4867 return false;
4868
4869 if (ctxt->eip == last_retry_eip && last_retry_addr == cr2)
4870 return false;
4871
4872 vcpu->arch.last_retry_eip = ctxt->eip;
4873 vcpu->arch.last_retry_addr = cr2;
4874
4875 if (!vcpu->arch.mmu.direct_map)
4876 gpa = kvm_mmu_gva_to_gpa_write(vcpu, cr2, NULL);
4877
4878 kvm_mmu_unprotect_page(vcpu->kvm, gpa >> PAGE_SHIFT);
4879
4880 return true;
4881}
4882
4839int x86_emulate_instruction(struct kvm_vcpu *vcpu, 4883int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4840 unsigned long cr2, 4884 unsigned long cr2,
4841 int emulation_type, 4885 int emulation_type,
@@ -4877,6 +4921,9 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
4877 return EMULATE_DONE; 4921 return EMULATE_DONE;
4878 } 4922 }
4879 4923
4924 if (retry_instruction(ctxt, cr2, emulation_type))
4925 return EMULATE_DONE;
4926
4880 /* this is needed for vmware backdoor interface to work since it 4927 /* this is needed for vmware backdoor interface to work since it
4881 changes registers values during IO operation */ 4928 changes registers values during IO operation */
4882 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) { 4929 if (vcpu->arch.emulate_regs_need_sync_from_vcpu) {