aboutsummaryrefslogtreecommitdiffstats
path: root/virt/kvm
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2018-08-22 00:52:33 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-22 13:52:44 -0400
commit93065ac753e4443840a057bfef4be71ec766fde9 (patch)
tree6293538251b5e62affb5cdbe56a4d2f7257fc656 /virt/kvm
parentc2343d2761f86ae1b857f78c7cdb9f51e5fa1641 (diff)
mm, oom: distinguish blockable mode for mmu notifiers
There are several blockable mmu notifiers which might sleep in mmu_notifier_invalidate_range_start and that is a problem for the oom_reaper because it needs to guarantee a forward progress so it cannot depend on any sleepable locks. Currently we simply back off and mark an oom victim with blockable mmu notifiers as done after a short sleep. That can result in selecting a new oom victim prematurely because the previous one still hasn't torn its memory down yet. We can do much better though. Even if mmu notifiers use sleepable locks there is no reason to automatically assume those locks are held. Moreover majority of notifiers only care about a portion of the address space and there is absolutely zero reason to fail when we are unmapping an unrelated range. Many notifiers do really block and wait for HW which is harder to handle and we have to bail out though. This patch handles the low hanging fruit. __mmu_notifier_invalidate_range_start gets a blockable flag and callbacks are not allowed to sleep if the flag is set to false. This is achieved by using trylock instead of the sleepable lock for most callbacks and continue as long as we do not block down the call chain. I think we can improve that even further because there is a common pattern to do a range lookup first and then do something about that. The first part can be done without a sleeping lock in most cases AFAICS. The oom_reaper end then simply retries if there is at least one notifier which couldn't make any progress in !blockable mode. A retry loop is already implemented to wait for the mmap_sem and this is basically the same thing. The simplest way for driver developers to test this code path is to wrap userspace code which uses these notifiers into a memcg and set the hard limit to hit the oom. This can be done e.g. after the test faults in all the mmu notifier managed memory and set the hard limit to something really small. Then we are looking for a proper process tear down. [akpm@linux-foundation.org: coding style fixes] [akpm@linux-foundation.org: minor code simplification] Link: http://lkml.kernel.org/r/20180716115058.5559-1-mhocko@kernel.org Signed-off-by: Michal Hocko <mhocko@suse.com> Acked-by: Christian König <christian.koenig@amd.com> # AMD notifiers Acked-by: Leon Romanovsky <leonro@mellanox.com> # mlx and umem_odp Reported-by: David Rientjes <rientjes@google.com> Cc: "David (ChunMing) Zhou" <David1.Zhou@amd.com> Cc: Paolo Bonzini <pbonzini@redhat.com> Cc: Alex Deucher <alexander.deucher@amd.com> Cc: David Airlie <airlied@linux.ie> Cc: Jani Nikula <jani.nikula@linux.intel.com> Cc: Joonas Lahtinen <joonas.lahtinen@linux.intel.com> Cc: Rodrigo Vivi <rodrigo.vivi@intel.com> Cc: Doug Ledford <dledford@redhat.com> Cc: Jason Gunthorpe <jgg@ziepe.ca> Cc: Mike Marciniszyn <mike.marciniszyn@intel.com> Cc: Dennis Dalessandro <dennis.dalessandro@intel.com> Cc: Sudeep Dutt <sudeep.dutt@intel.com> Cc: Ashutosh Dixit <ashutosh.dixit@intel.com> Cc: Dimitri Sivanich <sivanich@sgi.com> Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com> Cc: Juergen Gross <jgross@suse.com> Cc: "Jérôme Glisse" <jglisse@redhat.com> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Felix Kuehling <felix.kuehling@amd.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'virt/kvm')
-rw-r--r--virt/kvm/kvm_main.c15
1 files changed, 10 insertions, 5 deletions
diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c
index 9263ead9fd32..0116b449b993 100644
--- a/virt/kvm/kvm_main.c
+++ b/virt/kvm/kvm_main.c
@@ -140,9 +140,10 @@ static void kvm_uevent_notify_change(unsigned int type, struct kvm *kvm);
140static unsigned long long kvm_createvm_count; 140static unsigned long long kvm_createvm_count;
141static unsigned long long kvm_active_vms; 141static unsigned long long kvm_active_vms;
142 142
143__weak void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, 143__weak int kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
144 unsigned long start, unsigned long end) 144 unsigned long start, unsigned long end, bool blockable)
145{ 145{
146 return 0;
146} 147}
147 148
148bool kvm_is_reserved_pfn(kvm_pfn_t pfn) 149bool kvm_is_reserved_pfn(kvm_pfn_t pfn)
@@ -360,13 +361,15 @@ static void kvm_mmu_notifier_change_pte(struct mmu_notifier *mn,
360 srcu_read_unlock(&kvm->srcu, idx); 361 srcu_read_unlock(&kvm->srcu, idx);
361} 362}
362 363
363static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, 364static int kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
364 struct mm_struct *mm, 365 struct mm_struct *mm,
365 unsigned long start, 366 unsigned long start,
366 unsigned long end) 367 unsigned long end,
368 bool blockable)
367{ 369{
368 struct kvm *kvm = mmu_notifier_to_kvm(mn); 370 struct kvm *kvm = mmu_notifier_to_kvm(mn);
369 int need_tlb_flush = 0, idx; 371 int need_tlb_flush = 0, idx;
372 int ret;
370 373
371 idx = srcu_read_lock(&kvm->srcu); 374 idx = srcu_read_lock(&kvm->srcu);
372 spin_lock(&kvm->mmu_lock); 375 spin_lock(&kvm->mmu_lock);
@@ -384,9 +387,11 @@ static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
384 387
385 spin_unlock(&kvm->mmu_lock); 388 spin_unlock(&kvm->mmu_lock);
386 389
387 kvm_arch_mmu_notifier_invalidate_range(kvm, start, end); 390 ret = kvm_arch_mmu_notifier_invalidate_range(kvm, start, end, blockable);
388 391
389 srcu_read_unlock(&kvm->srcu, idx); 392 srcu_read_unlock(&kvm->srcu, idx);
393
394 return ret;
390} 395}
391 396
392static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, 397static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,