aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
commita88cc8da0279f8e481b0d90e51a0a1cffac55906 (patch)
tree4be3f8598d4146e3ea2f4f344a140d9c18f11932 /mm/memory.c
parent9cb2feb4d21d97386eb25c7b67e2793efcc1e70a (diff)
parent73444bc4d8f92e46a20cb6bd3342fc2ea75c6787 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: do not wake kswapd with zone lock held hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization" hugetlbfs: revert "Use i_mmap_rwsem to fix page fault/truncate race" mm: page_mapped: don't assume compound page is huge or THP mm/memory.c: initialise mmu_notifier_range correctly tools/vm/page_owner: use page_owner_sort in the use example kasan: fix krealloc handling for tag-based mode kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning mm, memcg: fix reclaim deadlock with writeback mm/usercopy.c: no check page span for stack objects slab: alien caches must not be initialized if the allocation of the alien cache failed fork, memcg: fix cached_stacks case zram: idle writeback fixes and cleanup
Diffstat (limited to 'mm/memory.c')
-rw-r--r--mm/memory.c26
1 files changed, 24 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c
index a52663c0612d..e11ca9dd823f 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2994,6 +2994,28 @@ static vm_fault_t __do_fault(struct vm_fault *vmf)
2994 struct vm_area_struct *vma = vmf->vma; 2994 struct vm_area_struct *vma = vmf->vma;
2995 vm_fault_t ret; 2995 vm_fault_t ret;
2996 2996
2997 /*
2998 * Preallocate pte before we take page_lock because this might lead to
2999 * deadlocks for memcg reclaim which waits for pages under writeback:
3000 * lock_page(A)
3001 * SetPageWriteback(A)
3002 * unlock_page(A)
3003 * lock_page(B)
3004 * lock_page(B)
3005 * pte_alloc_pne
3006 * shrink_page_list
3007 * wait_on_page_writeback(A)
3008 * SetPageWriteback(B)
3009 * unlock_page(B)
3010 * # flush A, B to clear the writeback
3011 */
3012 if (pmd_none(*vmf->pmd) && !vmf->prealloc_pte) {
3013 vmf->prealloc_pte = pte_alloc_one(vmf->vma->vm_mm);
3014 if (!vmf->prealloc_pte)
3015 return VM_FAULT_OOM;
3016 smp_wmb(); /* See comment in __pte_alloc() */
3017 }
3018
2997 ret = vma->vm_ops->fault(vmf); 3019 ret = vma->vm_ops->fault(vmf);
2998 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY | 3020 if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY |
2999 VM_FAULT_DONE_COW))) 3021 VM_FAULT_DONE_COW)))
@@ -4077,8 +4099,8 @@ static int __follow_pte_pmd(struct mm_struct *mm, unsigned long address,
4077 goto out; 4099 goto out;
4078 4100
4079 if (range) { 4101 if (range) {
4080 range->start = address & PAGE_MASK; 4102 mmu_notifier_range_init(range, mm, address & PAGE_MASK,
4081 range->end = range->start + PAGE_SIZE; 4103 (address & PAGE_MASK) + PAGE_SIZE);
4082 mmu_notifier_invalidate_range_start(range); 4104 mmu_notifier_invalidate_range_start(range);
4083 } 4105 }
4084 ptep = pte_offset_map_lock(mm, pmd, address, ptlp); 4106 ptep = pte_offset_map_lock(mm, pmd, address, ptlp);