aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kvm/paging_tmpl.h
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2011-09-22 04:56:39 -0400
committerAvi Kivity <avi@redhat.com>2011-12-27 04:16:56 -0500
commitf57f2ef58f6703e6df70ed52a198920cb3e8edba (patch)
tree831564ca3314ef897fdcfcbd70ef91a52d369a13 /arch/x86/kvm/paging_tmpl.h
parent505aef8f30a95f7e4abf2c07e54ded1521587ba0 (diff)
KVM: MMU: fast prefetch spte on invlpg path
Fast prefetch spte for the unsync shadow page on invlpg path Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'arch/x86/kvm/paging_tmpl.h')
-rw-r--r--arch/x86/kvm/paging_tmpl.h30
1 files changed, 18 insertions, 12 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index d8d3906649da..9efb86035774 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -672,20 +672,27 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
672{ 672{
673 struct kvm_shadow_walk_iterator iterator; 673 struct kvm_shadow_walk_iterator iterator;
674 struct kvm_mmu_page *sp; 674 struct kvm_mmu_page *sp;
675 gpa_t pte_gpa = -1;
676 int level; 675 int level;
677 u64 *sptep; 676 u64 *sptep;
678 677
679 vcpu_clear_mmio_info(vcpu, gva); 678 vcpu_clear_mmio_info(vcpu, gva);
680 679
681 spin_lock(&vcpu->kvm->mmu_lock); 680 /*
681 * No need to check return value here, rmap_can_add() can
682 * help us to skip pte prefetch later.
683 */
684 mmu_topup_memory_caches(vcpu);
682 685
686 spin_lock(&vcpu->kvm->mmu_lock);
683 for_each_shadow_entry(vcpu, gva, iterator) { 687 for_each_shadow_entry(vcpu, gva, iterator) {
684 level = iterator.level; 688 level = iterator.level;
685 sptep = iterator.sptep; 689 sptep = iterator.sptep;
686 690
687 sp = page_header(__pa(sptep)); 691 sp = page_header(__pa(sptep));
688 if (is_last_spte(*sptep, level)) { 692 if (is_last_spte(*sptep, level)) {
693 pt_element_t gpte;
694 gpa_t pte_gpa;
695
689 if (!sp->unsync) 696 if (!sp->unsync)
690 break; 697 break;
691 698
@@ -694,22 +701,21 @@ static void FNAME(invlpg)(struct kvm_vcpu *vcpu, gva_t gva)
694 701
695 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep)) 702 if (mmu_page_zap_pte(vcpu->kvm, sp, sptep))
696 kvm_flush_remote_tlbs(vcpu->kvm); 703 kvm_flush_remote_tlbs(vcpu->kvm);
704
705 if (!rmap_can_add(vcpu))
706 break;
707
708 if (kvm_read_guest_atomic(vcpu->kvm, pte_gpa, &gpte,
709 sizeof(pt_element_t)))
710 break;
711
712 FNAME(update_pte)(vcpu, sp, sptep, &gpte);
697 } 713 }
698 714
699 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children) 715 if (!is_shadow_present_pte(*sptep) || !sp->unsync_children)
700 break; 716 break;
701 } 717 }
702
703 atomic_inc(&vcpu->kvm->arch.invlpg_counter);
704
705 spin_unlock(&vcpu->kvm->mmu_lock); 718 spin_unlock(&vcpu->kvm->mmu_lock);
706
707 if (pte_gpa == -1)
708 return;
709
710 if (mmu_topup_memory_caches(vcpu))
711 return;
712 kvm_mmu_pte_write(vcpu, pte_gpa, NULL, sizeof(pt_element_t), 0);
713} 719}
714 720
715static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access, 721static gpa_t FNAME(gva_to_gpa)(struct kvm_vcpu *vcpu, gva_t vaddr, u32 access,