aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorXiao Guangrong <xiaoguangrong@cn.fujitsu.com>2010-11-22 22:13:00 -0500
committerAvi Kivity <avi@redhat.com>2011-01-12 04:29:51 -0500
commita4ee1ca4a36e7857d90ae8c2b85f1bde9a042c10 (patch)
tree29707dd004ef14df318ac35321b95ac62570fc99 /arch
parent407c61c6bd6a51b56d02f8bbad8aadf19db8c7b5 (diff)
KVM: MMU: delay flush all tlbs on sync_page path
Quote from Avi: | I don't think we need to flush immediately; set a "tlb dirty" bit somewhere | that is cleareded when we flush the tlb. kvm_mmu_notifier_invalidate_page() | can consult the bit and force a flush if set. Signed-off-by: Xiao Guangrong <xiaoguangrong@cn.fujitsu.com> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kvm/paging_tmpl.h12
1 files changed, 10 insertions, 2 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a43f4ccd30b..2b3d66c7b68 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -746,6 +746,14 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
746 * Using the cached information from sp->gfns is safe because: 746 * Using the cached information from sp->gfns is safe because:
747 * - The spte has a reference to the struct page, so the pfn for a given gfn 747 * - The spte has a reference to the struct page, so the pfn for a given gfn
748 * can't change unless all sptes pointing to it are nuked first. 748 * can't change unless all sptes pointing to it are nuked first.
749 *
750 * Note:
751 * We should flush all tlbs if spte is dropped even though guest is
752 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
753 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
754 * used by guest then tlbs are not flushed, so guest is allowed to access the
755 * freed pages.
756 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
749 */ 757 */
750static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 758static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
751{ 759{
@@ -781,14 +789,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
781 gfn = gpte_to_gfn(gpte); 789 gfn = gpte_to_gfn(gpte);
782 790
783 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 791 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
784 kvm_flush_remote_tlbs(vcpu->kvm); 792 vcpu->kvm->tlbs_dirty++;
785 continue; 793 continue;
786 } 794 }
787 795
788 if (gfn != sp->gfns[i]) { 796 if (gfn != sp->gfns[i]) {
789 drop_spte(vcpu->kvm, &sp->spt[i], 797 drop_spte(vcpu->kvm, &sp->spt[i],
790 shadow_trap_nonpresent_pte); 798 shadow_trap_nonpresent_pte);
791 kvm_flush_remote_tlbs(vcpu->kvm); 799 vcpu->kvm->tlbs_dirty++;
792 continue; 800 continue;
793 } 801 }
794 802