diff options
author | Michal Hocko <mhocko@suse.com> | 2017-12-14 18:33:15 -0500 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2017-12-14 19:00:49 -0500 |
commit | 4837fe37adff1d159904f0c013471b1ecbcb455e (patch) | |
tree | 169fb195798dbf1afb20a87d32681230116157ce | |
parent | bdcf0a423ea1c40bbb40e7ee483b50fc8aa3d758 (diff) |
mm, oom_reaper: fix memory corruption
David Rientjes has reported the following memory corruption while the
oom reaper tries to unmap the victims address space
BUG: Bad page map in process oom_reaper pte:6353826300000000 pmd:00000000
addr:00007f50cab1d000 vm_flags:08100073 anon_vma:ffff9eea335603f0 mapping: (null) index:7f50cab1d
file: (null) fault: (null) mmap: (null) readpage: (null)
CPU: 2 PID: 1001 Comm: oom_reaper
Call Trace:
unmap_page_range+0x1068/0x1130
__oom_reap_task_mm+0xd5/0x16b
oom_reaper+0xff/0x14c
kthread+0xc1/0xe0
Tetsuo Handa has noticed that the synchronization inside exit_mmap is
insufficient. We only synchronize with the oom reaper if
tsk_is_oom_victim which is not true if the final __mmput is called from
a different context than the oom victim exit path. This can trivially
happen from context of any task which has grabbed mm reference (e.g. to
read /proc/<pid>/ file which requires mm etc.).
The race would look like this
oom_reaper oom_victim task
mmget_not_zero
do_exit
mmput
__oom_reap_task_mm mmput
__mmput
exit_mmap
remove_vma
unmap_page_range
Fix this issue by providing a new mm_is_oom_victim() helper which
operates on the mm struct rather than a task. Any context which
operates on a remote mm struct should use this helper in place of
tsk_is_oom_victim. The flag is set in mark_oom_victim and never cleared
so it is stable in the exit_mmap path.
Debugged by Tetsuo Handa.
Link: http://lkml.kernel.org/r/20171210095130.17110-1-mhocko@kernel.org
Fixes: 212925802454 ("mm: oom: let oom_reap_task and exit_mmap run concurrently")
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reported-by: David Rientjes <rientjes@google.com>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Tetsuo Handa <penguin-kernel@I-love.SAKURA.ne.jp>
Cc: Andrea Argangeli <andrea@kernel.org>
Cc: <stable@vger.kernel.org> [4.14]
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/oom.h | 9 | ||||
-rw-r--r-- | include/linux/sched/coredump.h | 1 | ||||
-rw-r--r-- | mm/mmap.c | 10 | ||||
-rw-r--r-- | mm/oom_kill.c | 4 |
4 files changed, 18 insertions, 6 deletions
diff --git a/include/linux/oom.h b/include/linux/oom.h index 01c91d874a57..5bad038ac012 100644 --- a/include/linux/oom.h +++ b/include/linux/oom.h | |||
@@ -67,6 +67,15 @@ static inline bool tsk_is_oom_victim(struct task_struct * tsk) | |||
67 | } | 67 | } |
68 | 68 | ||
69 | /* | 69 | /* |
70 | * Use this helper if tsk->mm != mm and the victim mm needs a special | ||
71 | * handling. This is guaranteed to stay true after once set. | ||
72 | */ | ||
73 | static inline bool mm_is_oom_victim(struct mm_struct *mm) | ||
74 | { | ||
75 | return test_bit(MMF_OOM_VICTIM, &mm->flags); | ||
76 | } | ||
77 | |||
78 | /* | ||
70 | * Checks whether a page fault on the given mm is still reliable. | 79 | * Checks whether a page fault on the given mm is still reliable. |
71 | * This is no longer true if the oom reaper started to reap the | 80 | * This is no longer true if the oom reaper started to reap the |
72 | * address space which is reflected by MMF_UNSTABLE flag set in | 81 | * address space which is reflected by MMF_UNSTABLE flag set in |
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h index 9c8847395b5e..ec912d01126f 100644 --- a/include/linux/sched/coredump.h +++ b/include/linux/sched/coredump.h | |||
@@ -70,6 +70,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ | 70 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ |
71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ | 71 | #define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */ |
72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ | 72 | #define MMF_DISABLE_THP 24 /* disable THP for all VMAs */ |
73 | #define MMF_OOM_VICTIM 25 /* mm is the oom victim */ | ||
73 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) | 74 | #define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP) |
74 | 75 | ||
75 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ | 76 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\ |
@@ -3019,20 +3019,20 @@ void exit_mmap(struct mm_struct *mm) | |||
3019 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ | 3019 | /* Use -1 here to ensure all VMAs in the mm are unmapped */ |
3020 | unmap_vmas(&tlb, vma, 0, -1); | 3020 | unmap_vmas(&tlb, vma, 0, -1); |
3021 | 3021 | ||
3022 | set_bit(MMF_OOM_SKIP, &mm->flags); | 3022 | if (unlikely(mm_is_oom_victim(mm))) { |
3023 | if (unlikely(tsk_is_oom_victim(current))) { | ||
3024 | /* | 3023 | /* |
3025 | * Wait for oom_reap_task() to stop working on this | 3024 | * Wait for oom_reap_task() to stop working on this |
3026 | * mm. Because MMF_OOM_SKIP is already set before | 3025 | * mm. Because MMF_OOM_SKIP is already set before |
3027 | * calling down_read(), oom_reap_task() will not run | 3026 | * calling down_read(), oom_reap_task() will not run |
3028 | * on this "mm" post up_write(). | 3027 | * on this "mm" post up_write(). |
3029 | * | 3028 | * |
3030 | * tsk_is_oom_victim() cannot be set from under us | 3029 | * mm_is_oom_victim() cannot be set from under us |
3031 | * either because current->mm is already set to NULL | 3030 | * either because victim->mm is already set to NULL |
3032 | * under task_lock before calling mmput and oom_mm is | 3031 | * under task_lock before calling mmput and oom_mm is |
3033 | * set not NULL by the OOM killer only if current->mm | 3032 | * set not NULL by the OOM killer only if victim->mm |
3034 | * is found not NULL while holding the task_lock. | 3033 | * is found not NULL while holding the task_lock. |
3035 | */ | 3034 | */ |
3035 | set_bit(MMF_OOM_SKIP, &mm->flags); | ||
3036 | down_write(&mm->mmap_sem); | 3036 | down_write(&mm->mmap_sem); |
3037 | up_write(&mm->mmap_sem); | 3037 | up_write(&mm->mmap_sem); |
3038 | } | 3038 | } |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index c957be32b27a..29f855551efe 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -683,8 +683,10 @@ static void mark_oom_victim(struct task_struct *tsk) | |||
683 | return; | 683 | return; |
684 | 684 | ||
685 | /* oom_mm is bound to the signal struct life time. */ | 685 | /* oom_mm is bound to the signal struct life time. */ |
686 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) | 686 | if (!cmpxchg(&tsk->signal->oom_mm, NULL, mm)) { |
687 | mmgrab(tsk->signal->oom_mm); | 687 | mmgrab(tsk->signal->oom_mm); |
688 | set_bit(MMF_OOM_VICTIM, &mm->flags); | ||
689 | } | ||
688 | 690 | ||
689 | /* | 691 | /* |
690 | * Make sure that the task is woken up from uninterruptible sleep | 692 | * Make sure that the task is woken up from uninterruptible sleep |