aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:05 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:44 -0500
commitad218f85e388e8ca816ff09d91c246cd014c53a8 (patch)
tree73fff9d1b3f01e760c2da8bc2276c1a74f4a36e4 /arch/x86/kvm/paging_tmpl.h
parent6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 (diff)
KVM: MMU: prepopulate the shadow on invlpg
If the guest executes invlpg, peek into the pagetable and attempt to prepopulate the shadow entry. Also stop dirty fault updates from interfering with the fork detector. 2% improvement on RHEL3/AIM7. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h25
1 files changed, 24 insertions, 1 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e644d81979b..d2064015421 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -82,6 +82,7 @@ struct shadow_walker {
82 int *ptwrite; 82 int *ptwrite;
83 pfn_t pfn; 83 pfn_t pfn;
84 u64 *sptep; 84 u64 *sptep;
85 gpa_t pte_gpa;
85}; 86};
86 87
87static gfn_t gpte_to_gfn(pt_element_t gpte) 88static gfn_t gpte_to_gfn(pt_element_t gpte)
@@ -222,7 +223,7 @@ walk:
222 if (ret) 223 if (ret)
223 goto walk; 224 goto walk;
224 pte |= PT_DIRTY_MASK; 225 pte |= PT_DIRTY_MASK;
225 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); 226 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0);
226 walker->ptes[walker->level - 1] = pte; 227 walker->ptes[walker->level - 1] = pte;
227 } 228 }
228 229
@@ -468,8 +469,15 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
468 struct kvm_vcpu *vcpu, u64 addr, 469 struct kvm_vcpu *vcpu, u64 addr,
469 u64 *sptep, int level) 470 u64 *sptep, int level)
470{ 471{
472 struct shadow_walker *sw =
473 container_of(_sw, struct shadow_walker, walker);
471 474
472 if (level == PT_PAGE_TABLE_LEVEL) { 475 if (level == PT_PAGE_TABLE_LEVEL) {
476 struct kvm_mmu_page *sp = page_header(__pa(sptep));
477
478 sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
479 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
480
473 if (is_shadow_present_pte(*sptep)) 481 if (is_shadow_present_pte(*sptep))
474 rmap_remove(vcpu->kvm, sptep); 482 rmap_remove(vcpu->kvm, sptep);
475 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 483 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
@@ -482,11 +490,26 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
482 490
483static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 491static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
484{ 492{
493 pt_element_t gpte;
485 struct shadow_walker walker = { 494 struct shadow_walker walker = {
486 .walker = { .entry = FNAME(shadow_invlpg_entry), }, 495 .walker = { .entry = FNAME(shadow_invlpg_entry), },
496 .pte_gpa = -1,
487 }; 497 };
488 498
499 spin_lock(&vcpu->kvm->mmu_lock);
489 walk_shadow(&walker.walker, vcpu, gva); 500 walk_shadow(&walker.walker, vcpu, gva);
501 spin_unlock(&vcpu->kvm->mmu_lock);
502 if (walker.pte_gpa == -1)
503 return;
504 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte,
505 sizeof(pt_element_t)))
506 return;
507 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
508 if (mmu_topup_memory_caches(vcpu))
509 return;
510 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte,
511 sizeof(pt_element_t), 0);
512 }
490} 513}
491 514
492static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 515static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)