aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:05 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:44 -0500
commitad218f85e388e8ca816ff09d91c246cd014c53a8 (patch)
tree73fff9d1b3f01e760c2da8bc2276c1a74f4a36e4 /arch/x86
parent6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 (diff)
KVM: MMU: prepopulate the shadow on invlpg
If the guest executes invlpg, peek into the pagetable and attempt to prepopulate the shadow entry. Also stop dirty fault updates from interfering with the fork detector. 2% improvement on RHEL3/AIM7. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/include/asm/kvm_host.h3
-rw-r--r--arch/x86/kvm/mmu.c25
-rw-r--r--arch/x86/kvm/paging_tmpl.h25
-rw-r--r--arch/x86/kvm/x86.c2
4 files changed, 40 insertions, 15 deletions
diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 65b1ed295698..97215a458e5f 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -602,7 +602,8 @@ unsigned long segment_base(u16 selector);
602 602
603void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu); 603void kvm_mmu_flush_tlb(struct kvm_vcpu *vcpu);
604void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 604void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
605 const u8 *new, int bytes); 605 const u8 *new, int bytes,
606 bool guest_initiated);
606int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva); 607int kvm_mmu_unprotect_page_virt(struct kvm_vcpu *vcpu, gva_t gva);
607void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu); 608void __kvm_mmu_free_some_pages(struct kvm_vcpu *vcpu);
608int kvm_mmu_load(struct kvm_vcpu *vcpu); 609int kvm_mmu_load(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cbac9e4b156f..863baf70506e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2441,7 +2441,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2441} 2441}
2442 2442
2443void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 2443void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2444 const u8 *new, int bytes) 2444 const u8 *new, int bytes,
2445 bool guest_initiated)
2445{ 2446{
2446 gfn_t gfn = gpa >> PAGE_SHIFT; 2447 gfn_t gfn = gpa >> PAGE_SHIFT;
2447 struct kvm_mmu_page *sp; 2448 struct kvm_mmu_page *sp;
@@ -2467,15 +2468,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2467 kvm_mmu_free_some_pages(vcpu); 2468 kvm_mmu_free_some_pages(vcpu);
2468 ++vcpu->kvm->stat.mmu_pte_write; 2469 ++vcpu->kvm->stat.mmu_pte_write;
2469 kvm_mmu_audit(vcpu, "pre pte write"); 2470 kvm_mmu_audit(vcpu, "pre pte write");
2470 if (gfn == vcpu->arch.last_pt_write_gfn 2471 if (guest_initiated) {
2471 && !last_updated_pte_accessed(vcpu)) { 2472 if (gfn == vcpu->arch.last_pt_write_gfn
2472 ++vcpu->arch.last_pt_write_count; 2473 && !last_updated_pte_accessed(vcpu)) {
2473 if (vcpu->arch.last_pt_write_count >= 3) 2474 ++vcpu->arch.last_pt_write_count;
2474 flooded = 1; 2475 if (vcpu->arch.last_pt_write_count >= 3)
2475 } else { 2476 flooded = 1;
2476 vcpu->arch.last_pt_write_gfn = gfn; 2477 } else {
2477 vcpu->arch.last_pt_write_count = 1; 2478 vcpu->arch.last_pt_write_gfn = gfn;
2478 vcpu->arch.last_pte_updated = NULL; 2479 vcpu->arch.last_pt_write_count = 1;
2480 vcpu->arch.last_pte_updated = NULL;
2481 }
2479 } 2482 }
2480 index = kvm_page_table_hashfn(gfn); 2483 index = kvm_page_table_hashfn(gfn);
2481 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 2484 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
@@ -2615,9 +2618,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2615 2618
2616void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 2619void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2617{ 2620{
2618 spin_lock(&vcpu->kvm->mmu_lock);
2619 vcpu->arch.mmu.invlpg(vcpu, gva); 2621 vcpu->arch.mmu.invlpg(vcpu, gva);
2620 spin_unlock(&vcpu->kvm->mmu_lock);
2621 kvm_mmu_flush_tlb(vcpu); 2622 kvm_mmu_flush_tlb(vcpu);
2622 ++vcpu->stat.invlpg; 2623 ++vcpu->stat.invlpg;
2623} 2624}
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index e644d81979b6..d20640154216 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -82,6 +82,7 @@ struct shadow_walker {
82 int *ptwrite; 82 int *ptwrite;
83 pfn_t pfn; 83 pfn_t pfn;
84 u64 *sptep; 84 u64 *sptep;
85 gpa_t pte_gpa;
85}; 86};
86 87
87static gfn_t gpte_to_gfn(pt_element_t gpte) 88static gfn_t gpte_to_gfn(pt_element_t gpte)
@@ -222,7 +223,7 @@ walk:
222 if (ret) 223 if (ret)
223 goto walk; 224 goto walk;
224 pte |= PT_DIRTY_MASK; 225 pte |= PT_DIRTY_MASK;
225 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte)); 226 kvm_mmu_pte_write(vcpu, pte_gpa, (u8 *)&pte, sizeof(pte), 0);
226 walker->ptes[walker->level - 1] = pte; 227 walker->ptes[walker->level - 1] = pte;
227 } 228 }
228 229
@@ -468,8 +469,15 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
468 struct kvm_vcpu *vcpu, u64 addr, 469 struct kvm_vcpu *vcpu, u64 addr,
469 u64 *sptep, int level) 470 u64 *sptep, int level)
470{ 471{
472 struct shadow_walker *sw =
473 container_of(_sw, struct shadow_walker, walker);
471 474
472 if (level == PT_PAGE_TABLE_LEVEL) { 475 if (level == PT_PAGE_TABLE_LEVEL) {
476 struct kvm_mmu_page *sp = page_header(__pa(sptep));
477
478 sw->pte_gpa = (sp->gfn << PAGE_SHIFT);
479 sw->pte_gpa += (sptep - sp->spt) * sizeof(pt_element_t);
480
473 if (is_shadow_present_pte(*sptep)) 481 if (is_shadow_present_pte(*sptep))
474 rmap_remove(vcpu->kvm, sptep); 482 rmap_remove(vcpu->kvm, sptep);
475 set_shadow_pte(sptep, shadow_trap_nonpresent_pte); 483 set_shadow_pte(sptep, shadow_trap_nonpresent_pte);
@@ -482,11 +490,26 @@ static int FNAME(shadow_invlpg_entry)(struct kvm_shadow_walk *_sw,
482 490
483static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva) 491static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
484{ 492{
493 pt_element_t gpte;
485 struct shadow_walker walker = { 494 struct shadow_walker walker = {
486 .walker = { .entry = FNAME(shadow_invlpg_entry), }, 495 .walker = { .entry = FNAME(shadow_invlpg_entry), },
496 .pte_gpa = -1,
487 }; 497 };
488 498
499 spin_lock(&vcpu->kvm->mmu_lock);
489 walk_shadow(&walker.walker, vcpu, gva); 500 walk_shadow(&walker.walker, vcpu, gva);
501 spin_unlock(&vcpu->kvm->mmu_lock);
502 if (walker.pte_gpa == -1)
503 return;
504 if (kvm_read_guest_atomic(vcpu->kvm, walker.pte_gpa, &gpte,
505 sizeof(pt_element_t)))
506 return;
507 if (is_present_pte(gpte) && (gpte & PT_ACCESSED_MASK)) {
508 if (mmu_topup_memory_caches(vcpu))
509 return;
510 kvm_mmu_pte_write(vcpu, walker.pte_gpa, (const u8 *)&gpte,
511 sizeof(pt_element_t), 0);
512 }
490} 513}
491 514
492static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr) 515static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr)
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 774db00d2db6..ba102879de33 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -2046,7 +2046,7 @@ int emulator_write_phys(struct kvm_vcpu *vcpu, gpa_t gpa,
2046 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes); 2046 ret = kvm_write_guest(vcpu->kvm, gpa, val, bytes);
2047 if (ret < 0) 2047 if (ret < 0)
2048 return 0; 2048 return 0;
2049 kvm_mmu_pte_write(vcpu, gpa, val, bytes); 2049 kvm_mmu_pte_write(vcpu, gpa, val, bytes, 1);
2050 return 1; 2050 return 1;
2051} 2051}
2052 2052