aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm/kvm_main.c
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-02-10 01:28:31 -0500
committerAvi Kivity <avi@redhat.com>2012-03-08 07:10:23 -0500
commit565f3be2174611f364405bbea2d86e153c2e7e78 (patch)
tree6605216c0d4f06043ddf6bbcb9b3d5a82571ccc6 /virt/kvm/kvm_main.c
parentdb3fe4eb45f3555d91a7124e18cf3a2f2a30eb90 (diff)
KVM: mmu_notifier: Flush TLBs before releasing mmu_lock
Other threads may process the same page in that small window and skip TLB flush and then return before these functions do flush. Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com> Signed-off-by: Avi Kivity <avi@redhat.com>
Diffstat (limited to 'virt/kvm/kvm_main.c')
-rw-r--r--virt/kvm/kvm_main.c19
1 files changed, 10 insertions, 9 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 8340e0e62034..e4431ada5947 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -289,15 +289,15 @@ static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
289 */ 289 */
290 idx = srcu_read_lock(&kvm->srcu); 290 idx = srcu_read_lock(&kvm->srcu);
291 spin_lock(&kvm->mmu_lock); 291 spin_lock(&kvm->mmu_lock);
292
292 kvm->mmu_notifier_seq++; 293 kvm->mmu_notifier_seq++;
293 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty; 294 need_tlb_flush = kvm_unmap_hva(kvm, address) | kvm->tlbs_dirty;
294 spin_unlock(&kvm->mmu_lock);
295 srcu_read_unlock(&kvm->srcu, idx);
296
297 /* we've to flush the tlb before the pages can be freed */ 295 /* we've to flush the tlb before the pages can be freed */
298 if (need_tlb_flush) 296 if (need_tlb_flush)
299 kvm_flush_remote_tlbs(kvm); 297 kvm_flush_remote_tlbs(kvm);
300 298
299 spin_unlock(&kvm->mmu_lock);
300 srcu_read_unlock(&kvm->srcu, idx);
301} 301}
302 302
303static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn, 303static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
@@ -335,12 +335,12 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
335 for (; start < end; start += PAGE_SIZE) 335 for (; start < end; start += PAGE_SIZE)
336 need_tlb_flush |= kvm_unmap_hva(kvm, start); 336 need_tlb_flush |= kvm_unmap_hva(kvm, start);
337 need_tlb_flush |= kvm->tlbs_dirty; 337 need_tlb_flush |= kvm->tlbs_dirty;
338 spin_unlock(&kvm->mmu_lock);
339 srcu_read_unlock(&kvm->srcu, idx);
340
341 /* we've to flush the tlb before the pages can be freed */ 338 /* we've to flush the tlb before the pages can be freed */
342 if (need_tlb_flush) 339 if (need_tlb_flush)
343 kvm_flush_remote_tlbs(kvm); 340 kvm_flush_remote_tlbs(kvm);
341
342 spin_unlock(&kvm->mmu_lock);
343 srcu_read_unlock(&kvm->srcu, idx);
344} 344}
345 345
346static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 346static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
@@ -378,13 +378,14 @@ static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
378 378
379 idx = srcu_read_lock(&kvm->srcu); 379 idx = srcu_read_lock(&kvm->srcu);
380 spin_lock(&kvm->mmu_lock); 380 spin_lock(&kvm->mmu_lock);
381 young = kvm_age_hva(kvm, address);
382 spin_unlock(&kvm->mmu_lock);
383 srcu_read_unlock(&kvm->srcu, idx);
384 381
382 young = kvm_age_hva(kvm, address);
385 if (young) 383 if (young)
386 kvm_flush_remote_tlbs(kvm); 384 kvm_flush_remote_tlbs(kvm);
387 385
386 spin_unlock(&kvm->mmu_lock);
387 srcu_read_unlock(&kvm->srcu, idx);
388
388 return young; 389 return young;
389} 390}
390 391