aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-17 12:13:42 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:06 -0500
commit56028d0861e48f7cc9c573d79f2d8a0a933a2bba (patch)
treefa42d19f235c585c6514337a26db9641780ac759 /arch/x86/kvm/mmu.c
parentaf585b921e5d1e919947c4b1164b59507fe7cd7b (diff)
KVM: Retry fault before vmentry
When page is swapped in it is mapped into guest memory only after guest tries to access it again and generate another fault. To save this fault we can map it immediately since we know that guest is going to access the page. Do it only when tdp is enabled for now. Shadow paging case is more complicated. CR[034] and EFER registers should be switched before doing mapping and then switched back. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c16
1 files changed, 8 insertions, 8 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ab04de5a76a..b2c60986a7ce 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2570,7 +2570,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2570} 2570}
2571 2571
2572static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 2572static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2573 u32 error_code) 2573 u32 error_code, bool no_apf)
2574{ 2574{
2575 gfn_t gfn; 2575 gfn_t gfn;
2576 int r; 2576 int r;
@@ -2606,8 +2606,8 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2606 return kvm_x86_ops->interrupt_allowed(vcpu); 2606 return kvm_x86_ops->interrupt_allowed(vcpu);
2607} 2607}
2608 2608
2609static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, 2609static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
2610 pfn_t *pfn) 2610 gva_t gva, pfn_t *pfn)
2611{ 2611{
2612 bool async; 2612 bool async;
2613 2613
@@ -2618,7 +2618,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
2618 2618
2619 put_page(pfn_to_page(*pfn)); 2619 put_page(pfn_to_page(*pfn));
2620 2620
2621 if (can_do_async_pf(vcpu)) { 2621 if (!no_apf && can_do_async_pf(vcpu)) {
2622 trace_kvm_try_async_get_page(async, *pfn); 2622 trace_kvm_try_async_get_page(async, *pfn);
2623 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 2623 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2624 trace_kvm_async_pf_doublefault(gva, gfn); 2624 trace_kvm_async_pf_doublefault(gva, gfn);
@@ -2633,8 +2633,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
2633 return false; 2633 return false;
2634} 2634}
2635 2635
2636static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, 2636static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2637 u32 error_code) 2637 bool no_apf)
2638{ 2638{
2639 pfn_t pfn; 2639 pfn_t pfn;
2640 int r; 2640 int r;
@@ -2656,7 +2656,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2656 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2656 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2657 smp_rmb(); 2657 smp_rmb();
2658 2658
2659 if (try_async_pf(vcpu, gfn, gpa, &pfn)) 2659 if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn))
2660 return 0; 2660 return 0;
2661 2661
2662 /* mmio */ 2662 /* mmio */
@@ -3319,7 +3319,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
3319 int r; 3319 int r;
3320 enum emulation_result er; 3320 enum emulation_result er;
3321 3321
3322 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code); 3322 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3323 if (r < 0) 3323 if (r < 0)
3324 goto out; 3324 goto out;
3325 3325