aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--arch/x86/kvm/paging_tmpl.h12
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--virt/kvm/kvm_main.c7
3 files changed, 18 insertions, 3 deletions
diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
index a43f4ccd30bb..2b3d66c7b68d 100644
--- a/arch/x86/kvm/paging_tmpl.h
+++ b/arch/x86/kvm/paging_tmpl.h
@@ -746,6 +746,14 @@ static void FNAME(prefetch_page)(struct kvm_vcpu *vcpu,
746 * Using the cached information from sp->gfns is safe because: 746 * Using the cached information from sp->gfns is safe because:
747 * - The spte has a reference to the struct page, so the pfn for a given gfn 747 * - The spte has a reference to the struct page, so the pfn for a given gfn
748 * can't change unless all sptes pointing to it are nuked first. 748 * can't change unless all sptes pointing to it are nuked first.
749 *
750 * Note:
751 * We should flush all tlbs if spte is dropped even though guest is
752 * responsible for it. Since if we don't, kvm_mmu_notifier_invalidate_page
753 * and kvm_mmu_notifier_invalidate_range_start detect the mapping page isn't
754 * used by guest then tlbs are not flushed, so guest is allowed to access the
755 * freed pages.
756 * And we increase kvm->tlbs_dirty to delay tlbs flush in this case.
749 */ 757 */
750static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp) 758static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
751{ 759{
@@ -781,14 +789,14 @@ static int FNAME(sync_page)(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp)
781 gfn = gpte_to_gfn(gpte); 789 gfn = gpte_to_gfn(gpte);
782 790
783 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) { 791 if (FNAME(prefetch_invalid_gpte)(vcpu, sp, &sp->spt[i], gpte)) {
784 kvm_flush_remote_tlbs(vcpu->kvm); 792 vcpu->kvm->tlbs_dirty++;
785 continue; 793 continue;
786 } 794 }
787 795
788 if (gfn != sp->gfns[i]) { 796 if (gfn != sp->gfns[i]) {
789 drop_spte(vcpu->kvm, &sp->spt[i], 797 drop_spte(vcpu->kvm, &sp->spt[i],
790 shadow_trap_nonpresent_pte); 798 shadow_trap_nonpresent_pte);
791 kvm_flush_remote_tlbs(vcpu->kvm); 799 vcpu->kvm->tlbs_dirty++;
792 continue; 800 continue;
793 } 801 }
794 802
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index da0794f707f6..ac4e83a1a10d 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -254,6 +254,7 @@ struct kvm {
254 struct mmu_notifier mmu_notifier; 254 struct mmu_notifier mmu_notifier;
255 unsigned long mmu_notifier_seq; 255 unsigned long mmu_notifier_seq;
256 long mmu_notifier_count; 256 long mmu_notifier_count;
257 long tlbs_dirty;
257#endif 258#endif
258}; 259};
259 260
@@ -382,6 +383,7 @@ void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
382void kvm_resched(struct kvm_vcpu *vcpu); 383void kvm_resched(struct kvm_vcpu *vcpu);
383void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); 384void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
384void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); 385void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
386
385void kvm_flush_remote_tlbs(struct kvm *kvm); 387void kvm_flush_remote_tlbs(struct kvm *kvm);
386void kvm_reload_remote_mmus(struct kvm *kvm); 388void kvm_reload_remote_mmus(struct kvm *kvm);
387 389
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 5156d458a84d..ee99b77e4451 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -168,8 +168,12 @@ static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
168 168
169void kvm_flush_remote_tlbs(struct kvm *kvm) 169void kvm_flush_remote_tlbs(struct kvm *kvm)
170{ 170{
171 int dirty_count = kvm->tlbs_dirty;
172
173 smp_mb();
171 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH)) 174 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
172 ++kvm->stat.remote_tlb_flush; 175 ++kvm->stat.remote_tlb_flush;
176 cmpxchg(&kvm->tlbs_dirty, dirty_count, 0);
173} 177}
174 178
175void kvm_reload_remote_mmus(struct kvm *kvm) 179void kvm_reload_remote_mmus(struct kvm *kvm)
@@ -249,7 +253,7 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
249 idx = srcu_read_lock(&kvm->srcu); 253 idx = srcu_read_lock(&kvm->srcu);
250 spin_lock(&kvm->mmu_lock); 254 spin_lock(&kvm->mmu_lock);
251 kvm->mmu_notifier_seq++; 255 kvm->mmu_notifier_seq++;
252 need_tlb_flush = kvm_unmap_hva(kvm, address); 256 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
253 spin_unlock(&kvm->mmu_lock); 257 spin_unlock(&kvm->mmu_lock);
254 srcu_read_unlock(&kvm->srcu, idx); 258 srcu_read_unlock(&kvm->srcu, idx);
255 259
@@ -293,6 +297,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
293 kvm->mmu_notifier_count++; 297 kvm->mmu_notifier_count++;
294 for (; start < end; start += PAGE_SIZE) 298 for (; start < end; start += PAGE_SIZE)
295 need_tlb_flush |= kvm_unmap_hva(kvm, start); 299 need_tlb_flush |= kvm_unmap_hva(kvm, start);
300 need_tlb_flush |= kvm->tlbs_dirty;
296 spin_unlock(&kvm->mmu_lock); 301 spin_unlock(&kvm->mmu_lock);
297 srcu_read_unlock(&kvm->srcu, idx); 302 srcu_read_unlock(&kvm->srcu, idx);
298 303