aboutsummaryrefslogtreecommitdiffstats
path: root/mm/rmap.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
commita88cc8da0279f8e481b0d90e51a0a1cffac55906 (patch)
tree4be3f8598d4146e3ea2f4f344a140d9c18f11932 /mm/rmap.c
parent9cb2feb4d21d97386eb25c7b67e2793efcc1e70a (diff)
parent73444bc4d8f92e46a20cb6bd3342fc2ea75c6787 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: do not wake kswapd with zone lock held hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization" hugetlbfs: revert "Use i_mmap_rwsem to fix page fault/truncate race" mm: page_mapped: don't assume compound page is huge or THP mm/memory.c: initialise mmu_notifier_range correctly tools/vm/page_owner: use page_owner_sort in the use example kasan: fix krealloc handling for tag-based mode kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning mm, memcg: fix reclaim deadlock with writeback mm/usercopy.c: no check page span for stack objects slab: alien caches must not be initialized if the allocation of the alien cache failed fork, memcg: fix cached_stacks case zram: idle writeback fixes and cleanup
Diffstat (limited to 'mm/rmap.c')
-rw-r--r--mm/rmap.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/mm/rmap.c b/mm/rmap.c
index 21a26cf51114..68a1a5b869a5 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -25,7 +25,6 @@
25 * page->flags PG_locked (lock_page) 25 * page->flags PG_locked (lock_page)
26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share) 26 * hugetlbfs_i_mmap_rwsem_key (in huge_pmd_share)
27 * mapping->i_mmap_rwsem 27 * mapping->i_mmap_rwsem
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
29 * anon_vma->rwsem 28 * anon_vma->rwsem
30 * mm->page_table_lock or pte_lock 29 * mm->page_table_lock or pte_lock
31 * zone_lru_lock (in mark_page_accessed, isolate_lru_page) 30 * zone_lru_lock (in mark_page_accessed, isolate_lru_page)
@@ -1379,9 +1378,6 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
1379 /* 1378 /*
1380 * If sharing is possible, start and end will be adjusted 1379 * If sharing is possible, start and end will be adjusted
1381 * accordingly. 1380 * accordingly.
1382 *
1383 * If called for a huge page, caller must hold i_mmap_rwsem
1384 * in write mode as it is possible to call huge_pmd_unshare.
1385 */ 1381 */
1386 adjust_range_if_pmd_sharing_possible(vma, &range.start, 1382 adjust_range_if_pmd_sharing_possible(vma, &range.start,
1387 &range.end); 1383 &range.end);