aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c36
1 files changed, 20 insertions, 16 deletions
diff --git a/mm/memory.c b/mm/memory.c
index e158f7ac6730..fe2fba27ded2 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -68,6 +68,7 @@
68#include <linux/debugfs.h> 68#include <linux/debugfs.h>
69#include <linux/userfaultfd_k.h> 69#include <linux/userfaultfd_k.h>
70#include <linux/dax.h> 70#include <linux/dax.h>
71#include <linux/oom.h>
71 72
72#include <asm/io.h> 73#include <asm/io.h>
73#include <asm/mmu_context.h> 74#include <asm/mmu_context.h>
@@ -2893,6 +2894,7 @@ static int do_anonymous_page(struct vm_fault *vmf)
2893 struct vm_area_struct *vma = vmf->vma; 2894 struct vm_area_struct *vma = vmf->vma;
2894 struct mem_cgroup *memcg; 2895 struct mem_cgroup *memcg;
2895 struct page *page; 2896 struct page *page;
2897 int ret = 0;
2896 pte_t entry; 2898 pte_t entry;
2897 2899
2898 /* File mapping without ->vm_ops ? */ 2900 /* File mapping without ->vm_ops ? */
@@ -2925,6 +2927,9 @@ static int do_anonymous_page(struct vm_fault *vmf)
2925 vmf->address, &vmf->ptl); 2927 vmf->address, &vmf->ptl);
2926 if (!pte_none(*vmf->pte)) 2928 if (!pte_none(*vmf->pte))
2927 goto unlock; 2929 goto unlock;
2930 ret = check_stable_address_space(vma->vm_mm);
2931 if (ret)
2932 goto unlock;
2928 /* Deliver the page fault to userland, check inside PT lock */ 2933 /* Deliver the page fault to userland, check inside PT lock */
2929 if (userfaultfd_missing(vma)) { 2934 if (userfaultfd_missing(vma)) {
2930 pte_unmap_unlock(vmf->pte, vmf->ptl); 2935 pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2959,6 +2964,10 @@ static int do_anonymous_page(struct vm_fault *vmf)
2959 if (!pte_none(*vmf->pte)) 2964 if (!pte_none(*vmf->pte))
2960 goto release; 2965 goto release;
2961 2966
2967 ret = check_stable_address_space(vma->vm_mm);
2968 if (ret)
2969 goto release;
2970
2962 /* Deliver the page fault to userland, check inside PT lock */ 2971 /* Deliver the page fault to userland, check inside PT lock */
2963 if (userfaultfd_missing(vma)) { 2972 if (userfaultfd_missing(vma)) {
2964 pte_unmap_unlock(vmf->pte, vmf->ptl); 2973 pte_unmap_unlock(vmf->pte, vmf->ptl);
@@ -2978,7 +2987,7 @@ setpte:
2978 update_mmu_cache(vma, vmf->address, vmf->pte); 2987 update_mmu_cache(vma, vmf->address, vmf->pte);
2979unlock: 2988unlock:
2980 pte_unmap_unlock(vmf->pte, vmf->ptl); 2989 pte_unmap_unlock(vmf->pte, vmf->ptl);
2981 return 0; 2990 return ret;
2982release: 2991release:
2983 mem_cgroup_cancel_charge(page, memcg, false); 2992 mem_cgroup_cancel_charge(page, memcg, false);
2984 put_page(page); 2993 put_page(page);
@@ -3252,7 +3261,7 @@ int alloc_set_pte(struct vm_fault *vmf, struct mem_cgroup *memcg,
3252int finish_fault(struct vm_fault *vmf) 3261int finish_fault(struct vm_fault *vmf)
3253{ 3262{
3254 struct page *page; 3263 struct page *page;
3255 int ret; 3264 int ret = 0;
3256 3265
3257 /* Did we COW the page? */ 3266 /* Did we COW the page? */
3258 if ((vmf->flags & FAULT_FLAG_WRITE) && 3267 if ((vmf->flags & FAULT_FLAG_WRITE) &&
@@ -3260,7 +3269,15 @@ int finish_fault(struct vm_fault *vmf)
3260 page = vmf->cow_page; 3269 page = vmf->cow_page;
3261 else 3270 else
3262 page = vmf->page; 3271 page = vmf->page;
3263 ret = alloc_set_pte(vmf, vmf->memcg, page); 3272
3273 /*
3274 * check even for read faults because we might have lost our CoWed
3275 * page
3276 */
3277 if (!(vmf->vma->vm_flags & VM_SHARED))
3278 ret = check_stable_address_space(vmf->vma->vm_mm);
3279 if (!ret)
3280 ret = alloc_set_pte(vmf, vmf->memcg, page);
3264 if (vmf->pte) 3281 if (vmf->pte)
3265 pte_unmap_unlock(vmf->pte, vmf->ptl); 3282 pte_unmap_unlock(vmf->pte, vmf->ptl);
3266 return ret; 3283 return ret;
@@ -3900,19 +3917,6 @@ int handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
3900 mem_cgroup_oom_synchronize(false); 3917 mem_cgroup_oom_synchronize(false);
3901 } 3918 }
3902 3919
3903 /*
3904 * This mm has been already reaped by the oom reaper and so the
3905 * refault cannot be trusted in general. Anonymous refaults would
3906 * lose data and give a zero page instead e.g. This is especially
3907 * problem for use_mm() because regular tasks will just die and
3908 * the corrupted data will not be visible anywhere while kthread
3909 * will outlive the oom victim and potentially propagate the date
3910 * further.
3911 */
3912 if (unlikely((current->flags & PF_KTHREAD) && !(ret & VM_FAULT_ERROR)
3913 && test_bit(MMF_UNSTABLE, &vma->vm_mm->flags)))
3914 ret = VM_FAULT_SIGBUS;
3915
3916 return ret; 3920 return ret;
3917} 3921}
3918EXPORT_SYMBOL_GPL(handle_mm_fault); 3922EXPORT_SYMBOL_GPL(handle_mm_fault);