aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/include/asm/kvm_host.h
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-17 12:13:42 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:06 -0500
commit56028d0861e48f7cc9c573d79f2d8a0a933a2bba (patch)
treefa42d19f235c585c6514337a26db9641780ac759 /arch/x86/include/asm/kvm_host.h
parentaf585b921e5d1e919947c4b1164b59507fe7cd7b (diff)
KVM: Retry fault before vmentry
When page is swapped in it is mapped into guest memory only after guest tries to access it again and generate another fault. To save this fault we can map it immediately since we know that guest is going to access the page. Do it only when tdp is enabled for now. Shadow paging case is more complicated. CR[034] and EFER registers should be switched before doing mapping and then switched back. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/include/asm/kvm_host.h')
-rw-r--r--arch/x86/include/asm/kvm_host.h4
1 files changed, 3 insertions, 1 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b5f4c1a36d65..c3076bcf5ef7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -241,7 +241,7 @@ struct kvm_mmu {
241 void (*new_cr3)(struct kvm_vcpu *vcpu); 241 void (*new_cr3)(struct kvm_vcpu *vcpu);
242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
245 void (*inject_page_fault)(struct kvm_vcpu *vcpu); 245 void (*inject_page_fault)(struct kvm_vcpu *vcpu);
246 void (*free)(struct kvm_vcpu *vcpu); 246 void (*free)(struct kvm_vcpu *vcpu);
247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
@@ -815,6 +815,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
815 struct kvm_async_pf *work); 815 struct kvm_async_pf *work);
816void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 816void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
817 struct kvm_async_pf *work); 817 struct kvm_async_pf *work);
818void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
819 struct kvm_async_pf *work);
818extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 820extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
819 821
820#endif /* _ASM_X86_KVM_HOST_H */ 822#endif /* _ASM_X86_KVM_HOST_H */