aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-17 12:13:42 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:06 -0500
commit56028d0861e48f7cc9c573d79f2d8a0a933a2bba (patch)
treefa42d19f235c585c6514337a26db9641780ac759 /arch/x86/kvm/paging_tmpl.h
parentaf585b921e5d1e919947c4b1164b59507fe7cd7b (diff)
KVM: Retry fault before vmentry
When page is swapped in it is mapped into guest memory only after guest tries to access it again and generate another fault. To save this fault we can map it immediately since we know that guest is going to access the page. Do it only when tdp is enabled for now. Shadow paging case is more complicated. CR[034] and EFER registers should be switched before doing mapping and then switched back. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c45376dd041..d6b281e989b 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -527,8 +527,8 @@ out_gpte_changed:
527 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or 527 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
528 * a negative value on error. 528 * a negative value on error.
529 */ 529 */
530static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, 530static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
531 u32 error_code) 531 bool no_apf)
532{ 532{
533 int write_fault = error_code & PFERR_WRITE_MASK; 533 int write_fault = error_code & PFERR_WRITE_MASK;
534 int user_fault = error_code & PFERR_USER_MASK; 534 int user_fault = error_code & PFERR_USER_MASK;
@@ -569,7 +569,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
569 mmu_seq = vcpu->kvm->mmu_notifier_seq; 569 mmu_seq = vcpu->kvm->mmu_notifier_seq;
570 smp_rmb(); 570 smp_rmb();
571 571
572 if (try_async_pf(vcpu, walker.gfn, addr, &pfn)) 572 if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn))
573 return 0; 573 return 0;
574 574
575 /* mmio */ 575 /* mmio */