aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorMichal Hocko <mhocko@suse.com>2016-05-20 19:57:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-20 20:58:30 -0400
commitec8d7c14ea14922fe21945b458a75e39f11dd832 (patch)
treefe45cbd94518218d2be1288a812dbca8c1e01d95 /kernel
parentbb8a4b7fd1266ef888b3a80aa5f266062b224ef4 (diff)
mm, oom_reaper: do not mmput synchronously from the oom reaper context
Tetsuo has properly noted that mmput slow path might get blocked waiting for another party (e.g. exit_aio waits for an IO). If that happens the oom_reaper would be put out of the way and will not be able to process next oom victim. We should strive for making this context as reliable and independent on other subsystems as much as possible. Introduce mmput_async which will perform the slow path from an async (WQ) context. This will delay the operation but that shouldn't be a problem because the oom_reaper has reclaimed the victim's address space for most cases as much as possible and the remaining context shouldn't bind too much memory anymore. The only exception is when mmap_sem trylock has failed which shouldn't happen too often. The issue is only theoretical but not impossible. Signed-off-by: Michal Hocko <mhocko@suse.com> Reported-by: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/fork.c50
1 files changed, 35 insertions, 15 deletions
diff --git a/kernel/fork.c b/kernel/fork.c
index 3e8451527cbe..8fbed7194af1 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -699,6 +699,26 @@ void __mmdrop(struct mm_struct *mm)
699} 699}
700EXPORT_SYMBOL_GPL(__mmdrop); 700EXPORT_SYMBOL_GPL(__mmdrop);
701 701
702static inline void __mmput(struct mm_struct *mm)
703{
704 VM_BUG_ON(atomic_read(&mm->mm_users));
705
706 uprobe_clear_state(mm);
707 exit_aio(mm);
708 ksm_exit(mm);
709 khugepaged_exit(mm); /* must run before exit_mmap */
710 exit_mmap(mm);
711 set_mm_exe_file(mm, NULL);
712 if (!list_empty(&mm->mmlist)) {
713 spin_lock(&mmlist_lock);
714 list_del(&mm->mmlist);
715 spin_unlock(&mmlist_lock);
716 }
717 if (mm->binfmt)
718 module_put(mm->binfmt->module);
719 mmdrop(mm);
720}
721
702/* 722/*
703 * Decrement the use count and release all resources for an mm. 723 * Decrement the use count and release all resources for an mm.
704 */ 724 */
@@ -706,24 +726,24 @@ void mmput(struct mm_struct *mm)
706{ 726{
707 might_sleep(); 727 might_sleep();
708 728
729 if (atomic_dec_and_test(&mm->mm_users))
730 __mmput(mm);
731}
732EXPORT_SYMBOL_GPL(mmput);
733
734static void mmput_async_fn(struct work_struct *work)
735{
736 struct mm_struct *mm = container_of(work, struct mm_struct, async_put_work);
737 __mmput(mm);
738}
739
740void mmput_async(struct mm_struct *mm)
741{
709 if (atomic_dec_and_test(&mm->mm_users)) { 742 if (atomic_dec_and_test(&mm->mm_users)) {
710 uprobe_clear_state(mm); 743 INIT_WORK(&mm->async_put_work, mmput_async_fn);
711 exit_aio(mm); 744 schedule_work(&mm->async_put_work);
712 ksm_exit(mm);
713 khugepaged_exit(mm); /* must run before exit_mmap */
714 exit_mmap(mm);
715 set_mm_exe_file(mm, NULL);
716 if (!list_empty(&mm->mmlist)) {
717 spin_lock(&mmlist_lock);
718 list_del(&mm->mmlist);
719 spin_unlock(&mmlist_lock);
720 }
721 if (mm->binfmt)
722 module_put(mm->binfmt->module);
723 mmdrop(mm);
724 } 745 }
725} 746}
726EXPORT_SYMBOL_GPL(mmput);
727 747
728/** 748/**
729 * set_mm_exe_file - change a reference to the mm's executable file 749 * set_mm_exe_file - change a reference to the mm's executable file