aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-01-08 21:58:29 -0500
commita88cc8da0279f8e481b0d90e51a0a1cffac55906 (patch)
tree4be3f8598d4146e3ea2f4f344a140d9c18f11932 /mm/page_alloc.c
parent9cb2feb4d21d97386eb25c7b67e2793efcc1e70a (diff)
parent73444bc4d8f92e46a20cb6bd3342fc2ea75c6787 (diff)
Merge branch 'akpm' (patches from Andrew)
Merge misc fixes from Andrew Morton: "14 fixes" * emailed patches from Andrew Morton <akpm@linux-foundation.org>: mm, page_alloc: do not wake kswapd with zone lock held hugetlbfs: revert "use i_mmap_rwsem for more pmd sharing synchronization" hugetlbfs: revert "Use i_mmap_rwsem to fix page fault/truncate race" mm: page_mapped: don't assume compound page is huge or THP mm/memory.c: initialise mmu_notifier_range correctly tools/vm/page_owner: use page_owner_sort in the use example kasan: fix krealloc handling for tag-based mode kasan: make tag based mode work with CONFIG_HARDENED_USERCOPY kasan, arm64: use ARCH_SLAB_MINALIGN instead of manual aligning mm, memcg: fix reclaim deadlock with writeback mm/usercopy.c: no check page span for stack objects slab: alien caches must not be initialized if the allocation of the alien cache failed fork, memcg: fix cached_stacks case zram: idle writeback fixes and cleanup
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c8
1 files changed, 7 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cde5dac6229a..d295c9bc01a8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2214,7 +2214,7 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
2214 */ 2214 */
2215 boost_watermark(zone); 2215 boost_watermark(zone);
2216 if (alloc_flags & ALLOC_KSWAPD) 2216 if (alloc_flags & ALLOC_KSWAPD)
2217 wakeup_kswapd(zone, 0, 0, zone_idx(zone)); 2217 set_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
2218 2218
2219 /* We are not allowed to try stealing from the whole block */ 2219 /* We are not allowed to try stealing from the whole block */
2220 if (!whole_block) 2220 if (!whole_block)
@@ -3102,6 +3102,12 @@ struct page *rmqueue(struct zone *preferred_zone,
3102 local_irq_restore(flags); 3102 local_irq_restore(flags);
3103 3103
3104out: 3104out:
3105 /* Separate test+clear to avoid unnecessary atomics */
3106 if (test_bit(ZONE_BOOSTED_WATERMARK, &zone->flags)) {
3107 clear_bit(ZONE_BOOSTED_WATERMARK, &zone->flags);
3108 wakeup_kswapd(zone, 0, 0, zone_idx(zone));
3109 }
3110
3105 VM_BUG_ON_PAGE(page && bad_range(zone, page), page); 3111 VM_BUG_ON_PAGE(page && bad_range(zone, page), page);
3106 return page; 3112 return page;
3107 3113