diff options
Diffstat (limited to 'mm')
| -rw-r--r-- | mm/memory.c | 13 | ||||
| -rw-r--r-- | mm/oom_kill.c | 8 |
2 files changed, 21 insertions, 0 deletions
diff --git a/mm/memory.c b/mm/memory.c index f1a68049edff..4bfc3a9c3b18 100644 --- a/mm/memory.c +++ b/mm/memory.c | |||
| @@ -3658,6 +3658,19 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address, | |||
| 3658 | mem_cgroup_oom_synchronize(false); | 3658 | mem_cgroup_oom_synchronize(false); |
| 3659 | } | 3659 | } |
| 3660 | 3660 | ||
| 3661 | /* | ||
| 3662 | * This mm has been already reaped by the oom reaper and so the | ||
| 3663 | * refault cannot be trusted in general. Anonymous refaults would | ||
| 3664 | * lose data and give a zero page instead e.g. This is especially | ||
| 3665 | * problem for use_mm() because regular tasks will just die and | ||
| 3666 | * the corrupted data will not be visible anywhere while kthread | ||
| 3667 | * will outlive the oom victim and potentially propagate the date | ||
| 3668 | * further. | ||
| 3669 | */ | ||
| 3670 | if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR) | ||
| 3671 | && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags))) | ||
| 3672 | ret = VM_FAULT_SIGBUS; | ||
| 3673 | |||
| 3661 | return ret; | 3674 | return ret; |
| 3662 | } | 3675 | } |
| 3663 | EXPORT_SYMBOL_GPL(handle_mm_fault); | 3676 | EXPORT_SYMBOL_GPL(handle_mm_fault); |
diff --git a/mm/oom_kill.c b/mm/oom_kill.c index 3b990544db6d..5a3ba96c8338 100644 --- a/mm/oom_kill.c +++ b/mm/oom_kill.c | |||
| @@ -495,6 +495,14 @@ static bool __oom_reap_task_mm(struct task_struct *tsk, struct mm_struct *mm) | |||
| 495 | goto unlock_oom; | 495 | goto unlock_oom; |
| 496 | } | 496 | } |
| 497 | 497 | ||
| 498 | /* | ||
| 499 | * Tell all users of get_user/copy_from_user etc... that the content | ||
| 500 | * is no longer stable. No barriers really needed because unmapping | ||
| 501 | * should imply barriers already and the reader would hit a page fault | ||
| 502 | * if it stumbled over a reaped memory. | ||
| 503 | */ | ||
| 504 | set_bit(MMF_UNSTABLE, &mm->flags); | ||
| 505 | |||
| 498 | tlb_gather_mmu(&tlb, mm, 0, -1); | 506 | tlb_gather_mmu(&tlb, mm, 0, -1); |
| 499 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { | 507 | for (vma = mm->mmap ; vma; vma = vma->vm_next) { |
| 500 | if (is_vm_hugetlb_page(vma)) | 508 | if (is_vm_hugetlb_page(vma)) |
