aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-09-22 05:02:48 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:50 -0500
commit1cb3f3ae5a3855ba430430706da4201ace1d6ec4 (patch)
tree1ff844904958cf669c86650da4601eb392f14a91 /arch/x86/include/asm/kvm_host.h
parentd5ae7ce835cc89556dc18e2070e754f026402efa (diff)
KVM: x86: retry non-page-table writing instructions
If the emulation is caused by #PF and it is non-page_table writing instruction, it means the VM-EXIT is caused by shadow page protected, we can zap the shadow page and retry this instruction directly The idea is from Avi Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h5
1 files changed, 5 insertions, 0 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b4973f4dab98..4ceefa9567ed 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -444,6 +444,9 @@ struct kvm_vcpu_arch {
444 444
445 cpumask_var_t wbinvd_dirty_mask; 445 cpumask_var_t wbinvd_dirty_mask;
446 446
447 unsigned long last_retry_eip;
448 unsigned long last_retry_addr;
449
447 struct { 450 struct {
448 bool halted; 451 bool halted;
449 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)]; 452 gfn_t gfns[roundup_pow_of_two(ASYNC_PF_PER_VCPU)];
@@ -692,6 +695,7 @@ enum emulation_result {
692#define EMULTYPE_NO_DECODE (1 << 0) 695#define EMULTYPE_NO_DECODE (1 << 0)
693#define EMULTYPE_TRAP_UD (1 << 1) 696#define EMULTYPE_TRAP_UD (1 << 1)
694#define EMULTYPE_SKIP (1 << 2) 697#define EMULTYPE_SKIP (1 << 2)
698#define EMULTYPE_RETRY (1 << 3)
695int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2, 699int x86_emulate_instruction(struct kvm_vcpu *vcpu, unsigned long cr2,
696 int emulation_type, void *insn, int insn_len); 700 int emulation_type, void *insn, int insn_len);
697 701
@@ -756,6 +760,7 @@ void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
756void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 760void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
757 const u8 *new, int bytes, 761 const u8 *new, int bytes,
758 bool guest_initiated); 762 bool guest_initiated);
763int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
759int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 764int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
760void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 765void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
761int kvm_mmu_load(struct kvm_vcpu *vcpu); 766int kvm_mmu_load(struct kvm_vcpu *vcpu);