aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorGleb Natapov <gleb@redhat.com>2010-10-17 12:13:42 -0400
committerAvi Kivity <avi@redhat.com>2011-01-12 04:23:06 -0500
commit56028d0861e48f7cc9c573d79f2d8a0a933a2bba (patch)
treefa42d19f235c585c6514337a26db9641780ac759 /arch/x86
parentaf585b921e5d1e919947c4b1164b59507fe7cd7b (diff)
KVM: Retry fault before vmentry
When page is swapped in it is mapped into guest memory only after guest tries to access it again and generate another fault. To save this fault we can map it immediately since we know that guest is going to access the page. Do it only when tdp is enabled for now. Shadow paging case is more complicated. CR[034] and EFER registers should be switched before doing mapping and then switched back. Acked-by: Rik van Riel <riel@redhat.com> Signed-off-by: Gleb Natapov <gleb@redhat.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h4
-rw-r--r--arch/x86/kvm/mmu.c16
-rw-r--r--arch/x86/kvm/paging_tmpl.h6
-rw-r--r--arch/x86/kvm/x86.c14
4 files changed, 28 insertions, 12 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b5f4c1a36d65..c3076bcf5ef7 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -241,7 +241,7 @@ struct kvm_mmu {
241 void (*new_cr3)(struct kvm_vcpu *vcpu); 241 void (*new_cr3)(struct kvm_vcpu *vcpu);
242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root); 242 void (*set_cr3)(struct kvm_vcpu *vcpu, unsigned long root);
243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu); 243 unsigned long (*get_cr3)(struct kvm_vcpu *vcpu);
244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err); 244 int (*page_fault)(struct kvm_vcpu *vcpu, gva_t gva, u32 err, bool no_apf);
245 void (*inject_page_fault)(struct kvm_vcpu *vcpu); 245 void (*inject_page_fault)(struct kvm_vcpu *vcpu);
246 void (*free)(struct kvm_vcpu *vcpu); 246 void (*free)(struct kvm_vcpu *vcpu);
247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access, 247 gpa_t (*gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t gva, u32 access,
@@ -815,6 +815,8 @@ void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
815 struct kvm_async_pf *work); 815 struct kvm_async_pf *work);
816void kvm_arch_async_page_present(struct kvm_vcpu *vcpu, 816void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
817 struct kvm_async_pf *work); 817 struct kvm_async_pf *work);
818void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
819 struct kvm_async_pf *work);
818extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); 820extern bool kvm_find_async_pf_gfn(struct kvm_vcpu *vcpu, gfn_t gfn);
819 821
820#endif /* _ASM_X86_KVM_HOST_H */ 822#endif /* _ASM_X86_KVM_HOST_H */
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 4ab04de5a76a..b2c60986a7ce 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2570,7 +2570,7 @@ static gpa_t nonpaging_gva_to_gpa_nested(struct kvm_vcpu *vcpu, gva_t vaddr,
2570} 2570}
2571 2571
2572static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, 2572static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva,
2573 u32 error_code) 2573 u32 error_code, bool no_apf)
2574{ 2574{
2575 gfn_t gfn; 2575 gfn_t gfn;
2576 int r; 2576 int r;
@@ -2606,8 +2606,8 @@ static bool can_do_async_pf(struct kvm_vcpu *vcpu)
2606 return kvm_x86_ops->interrupt_allowed(vcpu); 2606 return kvm_x86_ops->interrupt_allowed(vcpu);
2607} 2607}
2608 2608
2609static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva, 2609static bool try_async_pf(struct kvm_vcpu *vcpu, bool no_apf, gfn_t gfn,
2610 pfn_t *pfn) 2610 gva_t gva, pfn_t *pfn)
2611{ 2611{
2612 bool async; 2612 bool async;
2613 2613
@@ -2618,7 +2618,7 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
2618 2618
2619 put_page(pfn_to_page(*pfn)); 2619 put_page(pfn_to_page(*pfn));
2620 2620
2621 if (can_do_async_pf(vcpu)) { 2621 if (!no_apf && can_do_async_pf(vcpu)) {
2622 trace_kvm_try_async_get_page(async, *pfn); 2622 trace_kvm_try_async_get_page(async, *pfn);
2623 if (kvm_find_async_pf_gfn(vcpu, gfn)) { 2623 if (kvm_find_async_pf_gfn(vcpu, gfn)) {
2624 trace_kvm_async_pf_doublefault(gva, gfn); 2624 trace_kvm_async_pf_doublefault(gva, gfn);
@@ -2633,8 +2633,8 @@ static bool try_async_pf(struct kvm_vcpu *vcpu, gfn_t gfn, gva_t gva,
2633 return false; 2633 return false;
2634} 2634}
2635 2635
2636static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, 2636static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa, u32 error_code,
2637 u32 error_code) 2637 bool no_apf)
2638{ 2638{
2639 pfn_t pfn; 2639 pfn_t pfn;
2640 int r; 2640 int r;
@@ -2656,7 +2656,7 @@ static int tdp_page_fault(struct kvm_vcpu *vcpu, gva_t gpa,
2656 mmu_seq = vcpu->kvm->mmu_notifier_seq; 2656 mmu_seq = vcpu->kvm->mmu_notifier_seq;
2657 smp_rmb(); 2657 smp_rmb();
2658 2658
2659 if (try_async_pf(vcpu, gfn, gpa, &pfn)) 2659 if (try_async_pf(vcpu, no_apf, gfn, gpa, &pfn))
2660 return 0; 2660 return 0;
2661 2661
2662 /* mmio */ 2662 /* mmio */
@@ -3319,7 +3319,7 @@ int kvm_mmu_page_fault(struct kvm_vcpu *vcpu, gva_t cr2, u32 error_code)
3319 int r; 3319 int r;
3320 enum emulation_result er; 3320 enum emulation_result er;
3321 3321
3322 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code); 3322 r = vcpu->arch.mmu.page_fault(vcpu, cr2, error_code, false);
3323 if (r < 0) 3323 if (r < 0)
3324 goto out; 3324 goto out;
3325 3325
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index c45376dd041a..d6b281e989b1 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -527,8 +527,8 @@ out_gpte_changed:
527 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or 527 * Returns: 1 if we need to emulate the instruction, 0 otherwise, or
528 * a negative value on error. 528 * a negative value on error.
529 */ 529 */
530static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, 530static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
531 u32 error_code) 531 bool no_apf)
532{ 532{
533 int write_fault = error_code & PFERR_WRITE_MASK; 533 int write_fault = error_code & PFERR_WRITE_MASK;
534 int user_fault = error_code & PFERR_USER_MASK; 534 int user_fault = error_code & PFERR_USER_MASK;
@@ -569,7 +569,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr,
569 mmu_seq = vcpu->kvm->mmu_notifier_seq; 569 mmu_seq = vcpu->kvm->mmu_notifier_seq;
570 smp_rmb(); 570 smp_rmb();
571 571
572 if (try_async_pf(vcpu, walker.gfn, addr, &pfn)) 572 if (try_async_pf(vcpu, no_apf, walker.gfn, addr, &pfn))
573 return 0; 573 return 0;
574 574
575 /* mmio */ 575 /* mmio */
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 3cd4d091c2f3..71beb27597fd 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -6138,6 +6138,20 @@ void kvm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
6138} 6138}
6139EXPORT_SYMBOL_GPL(kvm_set_rflags); 6139EXPORT_SYMBOL_GPL(kvm_set_rflags);
6140 6140
6141void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work)
6142{
6143 int r;
6144
6145 if (!vcpu->arch.mmu.direct_map || is_error_page(work->page))
6146 return;
6147
6148 r = kvm_mmu_reload(vcpu);
6149 if (unlikely(r))
6150 return;
6151
6152 vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true);
6153}
6154
6141static inline u32 kvm_async_pf_hash_fn(gfn_t gfn) 6155static inline u32 kvm_async_pf_hash_fn(gfn_t gfn)
6142{ 6156{
6143 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU)); 6157 return hash_32(gfn & 0xffffffff, order_base_2(ASYNC_PF_PER_VCPU));