diff options
| author | Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> | 2010-12-06 21:35:25 -0500 |
|---|---|---|
| committer | Avi Kivity <avi@redhat.com> | 2011-01-12 04:30:41 -0500 |
| commit | fb67e14fc90f18250259faf61a269320ea8e4d8f (patch) | |
| tree | 4bb294030aa3e43ca5dbf573f39a915452624665 | |
| parent | 2ec4739ddc889af11d09b3d5ca33687f1f3f1020 (diff) | |
KVM: MMU: retry #PF for softmmu
Retry #PF for softmmu only when the current vcpu has the same cr3 as the time
when #PF occurs
Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com>
Signed-off-by: Avi Kivity <avi@redhat.com>
| -rw-r--r-- | arch/x86/include/asm/kvm_host.h | 1 | ||||
| -rw-r--r-- | arch/x86/kvm/mmu.c | 2 | ||||
| -rw-r--r-- | arch/x86/kvm/paging_tmpl.h | 14 | ||||
| -rw-r--r-- | arch/x86/kvm/x86.c | 6 |
4 files changed, 17 insertions, 6 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index aa1518d794cc..4461429957a9 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h | |||
| @@ -593,6 +593,7 @@ struct kvm_x86_ops { | |||
| 593 | struct kvm_arch_async_pf { | 593 | struct kvm_arch_async_pf { |
| 594 | u32 token; | 594 | u32 token; |
| 595 | gfn_t gfn; | 595 | gfn_t gfn; |
| 596 | unsigned long cr3; | ||
| 596 | bool direct_map; | 597 | bool direct_map; |
| 597 | }; | 598 | }; |
| 598 | 599 | ||
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index d475b6b87dce..abda57fac659 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c | |||
| @@ -2608,9 +2608,11 @@ static int nonpaging_page_fault(struct kvm_vcpu *vcpu, gva_t gva, | |||
| 2608 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) | 2608 | static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, gfn_t gfn) |
| 2609 | { | 2609 | { |
| 2610 | struct kvm_arch_async_pf arch; | 2610 | struct kvm_arch_async_pf arch; |
| 2611 | |||
| 2611 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; | 2612 | arch.token = (vcpu->arch.apf.id++ << 12) | vcpu->vcpu_id; |
| 2612 | arch.gfn = gfn; | 2613 | arch.gfn = gfn; |
| 2613 | arch.direct_map = vcpu->arch.mmu.direct_map; | 2614 | arch.direct_map = vcpu->arch.mmu.direct_map; |
| 2615 | arch.cr3 = vcpu->arch.mmu.get_cr3(vcpu); | ||
| 2614 | 2616 | ||
| 2615 | return kvm_setup_async_pf(vcpu, gva, gfn, &arch); | 2617 | return kvm_setup_async_pf(vcpu, gva, gfn, &arch); |
| 2616 | } | 2618 | } |
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h index 52b3e91918c6..146b681e6ab0 100644 --- a/arch/x86/kvm/paging_tmpl.h +++ b/arch/x86/kvm/paging_tmpl.h | |||
| @@ -438,7 +438,8 @@ static void FNAME(pte_prefetch)(struct kvm_vcpu *vcpu, struct guest_walker *gw, | |||
| 438 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | 438 | static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, |
| 439 | struct guest_walker *gw, | 439 | struct guest_walker *gw, |
| 440 | int user_fault, int write_fault, int hlevel, | 440 | int user_fault, int write_fault, int hlevel, |
| 441 | int *ptwrite, pfn_t pfn, bool map_writable) | 441 | int *ptwrite, pfn_t pfn, bool map_writable, |
| 442 | bool prefault) | ||
| 442 | { | 443 | { |
| 443 | unsigned access = gw->pt_access; | 444 | unsigned access = gw->pt_access; |
| 444 | struct kvm_mmu_page *sp = NULL; | 445 | struct kvm_mmu_page *sp = NULL; |
| @@ -512,7 +513,7 @@ static u64 *FNAME(fetch)(struct kvm_vcpu *vcpu, gva_t addr, | |||
| 512 | 513 | ||
| 513 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, | 514 | mmu_set_spte(vcpu, it.sptep, access, gw->pte_access & access, |
| 514 | user_fault, write_fault, dirty, ptwrite, it.level, | 515 | user_fault, write_fault, dirty, ptwrite, it.level, |
| 515 | gw->gfn, pfn, false, map_writable); | 516 | gw->gfn, pfn, prefault, map_writable); |
| 516 | FNAME(pte_prefetch)(vcpu, gw, it.sptep); | 517 | FNAME(pte_prefetch)(vcpu, gw, it.sptep); |
| 517 | 518 | ||
| 518 | return it.sptep; | 519 | return it.sptep; |
| @@ -568,8 +569,11 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
| 568 | */ | 569 | */ |
| 569 | if (!r) { | 570 | if (!r) { |
| 570 | pgprintk("%s: guest page fault\n", __func__); | 571 | pgprintk("%s: guest page fault\n", __func__); |
| 571 | inject_page_fault(vcpu, &walker.fault); | 572 | if (!prefault) { |
| 572 | vcpu->arch.last_pt_write_count = 0; /* reset fork detector */ | 573 | inject_page_fault(vcpu, &walker.fault); |
| 574 | /* reset fork detector */ | ||
| 575 | vcpu->arch.last_pt_write_count = 0; | ||
| 576 | } | ||
| 573 | return 0; | 577 | return 0; |
| 574 | } | 578 | } |
| 575 | 579 | ||
| @@ -599,7 +603,7 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code, | |||
| 599 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); | 603 | trace_kvm_mmu_audit(vcpu, AUDIT_PRE_PAGE_FAULT); |
| 600 | kvm_mmu_free_some_pages(vcpu); | 604 | kvm_mmu_free_some_pages(vcpu); |
| 601 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, | 605 | sptep = FNAME(fetch)(vcpu, addr, &walker, user_fault, write_fault, |
| 602 | level, &write_pt, pfn, map_writable); | 606 | level, &write_pt, pfn, map_writable, prefault); |
| 603 | (void)sptep; | 607 | (void)sptep; |
| 604 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, | 608 | pgprintk("%s: shadow pte %p %llx ptwrite %d\n", __func__, |
| 605 | sptep, *sptep, write_pt); | 609 | sptep, *sptep, write_pt); |
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8b4d5fc08012..cd71d210c409 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c | |||
| @@ -6182,7 +6182,7 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) | |||
| 6182 | { | 6182 | { |
| 6183 | int r; | 6183 | int r; |
| 6184 | 6184 | ||
| 6185 | if (!vcpu->arch.mmu.direct_map || !work->arch.direct_map || | 6185 | if ((vcpu->arch.mmu.direct_map != work->arch.direct_map) || |
| 6186 | is_error_page(work->page)) | 6186 | is_error_page(work->page)) |
| 6187 | return; | 6187 | return; |
| 6188 | 6188 | ||
| @@ -6190,6 +6190,10 @@ void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu, struct kvm_async_pf *work) | |||
| 6190 | if (unlikely(r)) | 6190 | if (unlikely(r)) |
| 6191 | return; | 6191 | return; |
| 6192 | 6192 | ||
| 6193 | if (!vcpu->arch.mmu.direct_map && | ||
| 6194 | work->arch.cr3 != vcpu->arch.mmu.get_cr3(vcpu)) | ||
| 6195 | return; | ||
| 6196 | |||
| 6193 | vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); | 6197 | vcpu->arch.mmu.page_fault(vcpu, work->gva, 0, true); |
| 6194 | } | 6198 | } |
| 6195 | 6199 | ||
