aboutsummaryrefslogtreecommitdiffstats
path: root/virt
diff options
context:
space:
mode:
authorTakuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp>2012-07-02 04:56:33 -0400
committerMarcelo Tosatti <mtosatti@redhat.com>2012-07-18 15:55:04 -0400
commitb3ae2096974b12c3af2ad1a4e7716b084949867f (patch)
tree38ca81ac607cec1c00d6961effb38992577114df /virt
parent84504ef38673fa021b3d8f3da2b79cf878b33315 (diff)
KVM: Introduce kvm_unmap_hva_range() for kvm_mmu_notifier_invalidate_range_start()
When we tested KVM under memory pressure, with THP enabled on the host, we noticed that MMU notifier took a long time to invalidate huge pages. Since the invalidation was done with mmu_lock held, it not only wasted the CPU but also made the host harder to respond. This patch mitigates this by using kvm_handle_hva_range(). Signed-off-by: Takuya Yoshikawa <yoshikawa.takuya@oss.ntt.co.jp> Cc: Alexander Graf <agraf@suse.de> Cc: Paul Mackerras <paulus@samba.org> Signed-off-by: Marcelo Tosatti <mtosatti@redhat.com>
Diffstat (limited to 'virt')
-rw-r--r--virt/kvm/kvm_main.c3
1 files changed, 1 insertions, 2 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index b3ce91c623e2..e2b1a159e5df 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -332,8 +332,7 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
332 * count is also read inside the mmu_lock critical section. 332 * count is also read inside the mmu_lock critical section.
333 */ 333 */
334 kvm->mmu_notifier_count++; 334 kvm->mmu_notifier_count++;
335 for (; start < end; start += PAGE_SIZE) 335 need_tlb_flush = kvm_unmap_hva_range(kvm, start, end);
336 need_tlb_flush |= kvm_unmap_hva(kvm, start);
337 need_tlb_flush |= kvm->tlbs_dirty; 336 need_tlb_flush |= kvm->tlbs_dirty;
338 /* we've to flush the tlb before the pages can be freed */ 337 /* we've to flush the tlb before the pages can be freed */
339 if (need_tlb_flush) 338 if (need_tlb_flush)