aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/mmu.c
diff options
context:
space:
mode:
authorMarcelo Tosatti <mtosatti@redhat.com>2008-12-01 19:32:05 -0500
committerAvi Kivity <avi@redhat.com>2008-12-31 09:55:44 -0500
commitad218f85e388e8ca816ff09d91c246cd014c53a8 (patch)
tree73fff9d1b3f01e760c2da8bc2276c1a74f4a36e4 /arch/x86/kvm/mmu.c
parent6cffe8ca4a2adf1ac5003d9cad08fe4434d6eee0 (diff)
KVM: MMU: prepopulate the shadow on invlpg
If the guest executes invlpg, peek into the pagetable and attempt to prepopulate the shadow entry. Also stop dirty fault updates from interfering with the fork detector. 2% improvement on RHEL3/AIM7. Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/mmu.c')
-rw-r--r--arch/x86/kvm/mmu.c25
1 files changed, 13 insertions, 12 deletions
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index cbac9e4b156f..863baf70506e 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2441,7 +2441,8 @@ static void kvm_mmu_access_page(struct kvm_vcpu *vcpu, gfn_t gfn)
2441} 2441}
2442 2442
2443void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa, 2443void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2444 const u8 *new, int bytes) 2444 const u8 *new, int bytes,
2445 bool guest_initiated)
2445{ 2446{
2446 gfn_t gfn = gpa >> PAGE_SHIFT; 2447 gfn_t gfn = gpa >> PAGE_SHIFT;
2447 struct kvm_mmu_page *sp; 2448 struct kvm_mmu_page *sp;
@@ -2467,15 +2468,17 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
2467 kvm_mmu_free_some_pages(vcpu); 2468 kvm_mmu_free_some_pages(vcpu);
2468 ++vcpu->kvm->stat.mmu_pte_write; 2469 ++vcpu->kvm->stat.mmu_pte_write;
2469 kvm_mmu_audit(vcpu, "pre pte write"); 2470 kvm_mmu_audit(vcpu, "pre pte write");
2470 if (gfn == vcpu->arch.last_pt_write_gfn 2471 if (guest_initiated) {
2471 && !last_updated_pte_accessed(vcpu)) { 2472 if (gfn == vcpu->arch.last_pt_write_gfn
2472 ++vcpu->arch.last_pt_write_count; 2473 && !last_updated_pte_accessed(vcpu)) {
2473 if (vcpu->arch.last_pt_write_count >= 3) 2474 ++vcpu->arch.last_pt_write_count;
2474 flooded = 1; 2475 if (vcpu->arch.last_pt_write_count >= 3)
2475 } else { 2476 flooded = 1;
2476 vcpu->arch.last_pt_write_gfn = gfn; 2477 } else {
2477 vcpu->arch.last_pt_write_count = 1; 2478 vcpu->arch.last_pt_write_gfn = gfn;
2478 vcpu->arch.last_pte_updated = NULL; 2479 vcpu->arch.last_pt_write_count = 1;
2480 vcpu->arch.last_pte_updated = NULL;
2481 }
2479 } 2482 }
2480 index = kvm_page_table_hashfn(gfn); 2483 index = kvm_page_table_hashfn(gfn);
2481 bucket = &vcpu->kvm->arch.mmu_page_hash[index]; 2484 bucket = &vcpu->kvm->arch.mmu_page_hash[index];
@@ -2615,9 +2618,7 @@ EXPORT_SYMBOL_GPL(kvm_mmu_page_fault);
2615 2618
2616void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva) 2619void kvm_mmu_invlpg(struct kvm_vcpu *vcpu, gva_t gva)
2617{ 2620{
2618 spin_lock(&vcpu->kvm->mmu_lock);
2619 vcpu->arch.mmu.invlpg(vcpu, gva); 2621 vcpu->arch.mmu.invlpg(vcpu, gva);
2620 spin_unlock(&vcpu->kvm->mmu_lock);
2621 kvm_mmu_flush_tlb(vcpu); 2622 kvm_mmu_flush_tlb(vcpu);
2622 ++vcpu->stat.invlpg; 2623 ++vcpu->stat.invlpg;
2623} 2624}