diff options
-rw-r--r-- | include/linux/sched.h | 1 | ||||
-rw-r--r-- | mm/memory.c | 13 | ||||
-rw-r--r-- | mm/oom_kill.c | 8 |
3 files changed, 22 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h index af0721364788..6bee6f988912 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h | |||
@@ -525,6 +525,7 @@ static inline int get_dumpable(struct mm_struct *mm) | |||
525 | #define MMF_HAS_UPROBES 19 /* has uprobes */ | 525 | #define MMF_HAS_UPROBES 19 /* has uprobes */ |
526 | #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ | 526 | #define MMF_RECALC_UPROBES 20 /* MMF_HAS_UPROBES can be wrong */ |
527 | #define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ | 527 | #define MMF_OOM_SKIP 21 /* mm is of no interest for the OOM killer */ |
528 | #define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */ | ||
528 | 529 | ||
529 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) | 530 | #define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK) |
530 | 531 | ||
diff --git a/mm/memory.c b/mm/memory.c index f1a68049edff..4bfc3a9c3b18 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
@@ -3658,6 +3658,19 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
3658 | mem_cgroup_oom_synchronize(false); | 3658 | mem_cgroup_oom_synchronize(false); |
3659 | } | 3659 | } |
3660 | 3660 | ||
3661 | /* | ||
3662 | * This mm has been already reaped by the oom reaper and so the | ||
3663 | * refault cannot be trusted in general. Anonymous refaults would | ||
3664 | * lose data and give a zero page instead e.g. This is especially | ||
3665 | * problem for use_mm() because regular tasks will just die and | ||
3666 | * the corrupted data will not be visible anywhere while kthread | ||
3667 | * will outlive the oom victim and potentially propagate the date | ||
3668 | * further. | ||
3669 | */ | ||
3670 | if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR) | ||
3671 | && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) | ||
3672 | ret = VM_FAULT_SIGBUS; | ||
3673 | |||
3661 | return ret; | 3674 | return ret; |
3662 | } | 3675 | } |
3663 | EXPORT_SYMBOL_GPL(handle_mm_fault); | 3676 | EXPORT_SYMBOL_GPL(handle_mm_fault); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3b990544db6d..5a3ba96c8338 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
@@ -495,6 +495,14 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | |||
495 | goto unlock_oom; | 495 | goto unlock_oom; |
496 | } | 496 | } |
497 | 497 | ||
498 | /* | ||
499 | * Tell all users of get_user/copy_from_user etc... that the content | ||
500 | * is no longer stable. No barriers really needed because unmapping | ||
501 | * should imply barriers already and the reader would hit a page fault | ||
502 | * if it stumbled over a reaped memory. | ||
503 | */ | ||
504 | set_bit(MMF_UNSTABLE, &mm->flags); | ||
505 | |||
498 | tlb_gather_mmu(&tlb, mm, 0, -1); | 506 | tlb_gather_mmu(&tlb, mm, 0, -1); |
499 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | 507 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { |
500 | if (is_vm_hugetlb_page(vma)) | 508 | if (is_vm_hugetlb_page(vma)) |