aboutsummaryrefslogtreecommitdiffstats
path: root/mm/memory-failure.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
commita88cc8da0279f8e481b0d90e51a0a1cffac55906 (patch)
tree4be3f8598d4146e3ea2f4f344a140d9c18f11932 /mm/memory-failure.c
parent9cb2feb4d21d97386eb25c7b67e2793efcc1e70a (diff)
parent73444bc4d8f92e46a20cb6bd3342fc2ea75c6787 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: do not wake kswapd with zone lock held hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization" hugetlbfs: revert "Use i_mmap_rwsem to fix page fault/truncate race" mm: page_mapped: don't assume compound page is huge or THP mm/memory.c: initialise mmu_notifier_range correctly tools/vm/page_owner: use page_owner_sort in the use example kasan: fix krealloc handling for tag-based mode kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning mm, memcg: fix reclaim deadlock with writeback mm/usercopy.c: no check page span for stack objects slab: alien caches must not be initialized if the allocation of the alien cache failed fork, memcg: fix cached_stacks case zram: idle writeback fixes and cleanup
Diffstat (limited to 'mm/memory-failure.c')
-rw-r--r--mm/memory-failure.c16
1 files changed, 2 insertions, 14 deletions
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 6379fff1a5ff..7c72f2a95785 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -966,7 +966,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS; 966 enum ttu_flags ttu = TTU_IGNORE_MLOCK | TTU_IGNORE_ACCESS;
967 struct address_space *mapping; 967 struct address_space *mapping;
968 LIST_HEAD(tokill); 968 LIST_HEAD(tokill);
969 bool unmap_success = true; 969 bool unmap_success;
970 int kill = 1, forcekill; 970 int kill = 1, forcekill;
971 struct page *hpage = *hpagep; 971 struct page *hpage = *hpagep;
972 bool mlocked = PageMlocked(hpage); 972 bool mlocked = PageMlocked(hpage);
@@ -1028,19 +1028,7 @@ static bool hwpoison_user_mappings(struct page *p, unsigned long pfn,
1028 if (kill) 1028 if (kill)
1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED); 1029 collect_procs(hpage, &tokill, flags & MF_ACTION_REQUIRED);
1030 1030
1031 if (!PageHuge(hpage)) { 1031 unmap_success = try_to_unmap(hpage, ttu);
1032 unmap_success = try_to_unmap(hpage, ttu);
1033 } else if (mapping) {
1034 /*
1035 * For hugetlb pages, try_to_unmap could potentially call
1036 * huge_pmd_unshare. Because of this, take semaphore in
1037 * write mode here and set TTU_RMAP_LOCKED to indicate we
1038 * have taken the lock at this higer level.
1039 */
1040 i_mmap_lock_write(mapping);
1041 unmap_success = try_to_unmap(hpage, ttu|TTU_RMAP_LOCKED);
1042 i_mmap_unlock_write(mapping);
1043 }
1044 if (!unmap_success) 1032 if (!unmap_success)
1045 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n", 1033 pr_err("Memory failure: %#lx: failed to unmap page (mapcount=%d)\n",
1046 pfn, page_mapcount(hpage)); 1034 pfn, page_mapcount(hpage));