diff options
author | David S. Miller <davem@davemloft.net> | 2019-05-02 22:14:21 -0400 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2019-05-02 22:14:21 -0400 |
commit | ff24e4980a68d83090a02fda081741a410fe8eef (patch) | |
tree | 4d874dfcaf2bb8c3abc2446af9447a983402c0ae /mm/page_alloc.c | |
parent | 26f146ed971c0e4a264ce525d7a66a71ef73690d (diff) | |
parent | ea9866793d1e925b4d320eaea409263b2a568f38 (diff) |
Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
Three trivial overlapping conflicts.
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 27 |
1 files changed, 19 insertions, 8 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c6ce20aaf80b..c02cff1ed56e 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -266,7 +266,20 @@ compound_page_dtor * const compound_page_dtors[] = { | |||
266 | 266 | ||
267 | int min_free_kbytes = 1024; | 267 | int min_free_kbytes = 1024; |
268 | int user_min_free_kbytes = -1; | 268 | int user_min_free_kbytes = -1; |
269 | #ifdef CONFIG_DISCONTIGMEM | ||
270 | /* | ||
271 | * DiscontigMem defines memory ranges as separate pg_data_t even if the ranges | ||
272 | * are not on separate NUMA nodes. Functionally this works but with | ||
273 | * watermark_boost_factor, it can reclaim prematurely as the ranges can be | ||
274 | * quite small. By default, do not boost watermarks on discontigmem as in | ||
275 | * many cases very high-order allocations like THP are likely to be | ||
276 | * unsupported and the premature reclaim offsets the advantage of long-term | ||
277 | * fragmentation avoidance. | ||
278 | */ | ||
279 | int watermark_boost_factor __read_mostly; | ||
280 | #else | ||
269 | int watermark_boost_factor __read_mostly = 15000; | 281 | int watermark_boost_factor __read_mostly = 15000; |
282 | #endif | ||
270 | int watermark_scale_factor = 10; | 283 | int watermark_scale_factor = 10; |
271 | 284 | ||
272 | static unsigned long nr_kernel_pages __initdata; | 285 | static unsigned long nr_kernel_pages __initdata; |
@@ -3419,8 +3432,11 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) | |||
3419 | alloc_flags |= ALLOC_KSWAPD; | 3432 | alloc_flags |= ALLOC_KSWAPD; |
3420 | 3433 | ||
3421 | #ifdef CONFIG_ZONE_DMA32 | 3434 | #ifdef CONFIG_ZONE_DMA32 |
3435 | if (!zone) | ||
3436 | return alloc_flags; | ||
3437 | |||
3422 | if (zone_idx(zone) != ZONE_NORMAL) | 3438 | if (zone_idx(zone) != ZONE_NORMAL) |
3423 | goto out; | 3439 | return alloc_flags; |
3424 | 3440 | ||
3425 | /* | 3441 | /* |
3426 | * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and | 3442 | * If ZONE_DMA32 exists, assume it is the one after ZONE_NORMAL and |
@@ -3429,9 +3445,9 @@ alloc_flags_nofragment(struct zone *zone, gfp_t gfp_mask) | |||
3429 | */ | 3445 | */ |
3430 | BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); | 3446 | BUILD_BUG_ON(ZONE_NORMAL - ZONE_DMA32 != 1); |
3431 | if (nr_online_nodes > 1 && !populated_zone(--zone)) | 3447 | if (nr_online_nodes > 1 && !populated_zone(--zone)) |
3432 | goto out; | 3448 | return alloc_flags; |
3433 | 3449 | ||
3434 | out: | 3450 | alloc_flags |= ALLOC_NOFRAGMENT; |
3435 | #endif /* CONFIG_ZONE_DMA32 */ | 3451 | #endif /* CONFIG_ZONE_DMA32 */ |
3436 | return alloc_flags; | 3452 | return alloc_flags; |
3437 | } | 3453 | } |
@@ -3773,11 +3789,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
3773 | memalloc_noreclaim_restore(noreclaim_flag); | 3789 | memalloc_noreclaim_restore(noreclaim_flag); |
3774 | psi_memstall_leave(&pflags); | 3790 | psi_memstall_leave(&pflags); |
3775 | 3791 | ||
3776 | if (*compact_result <= COMPACT_INACTIVE) { | ||
3777 | WARN_ON_ONCE(page); | ||
3778 | return NULL; | ||
3779 | } | ||
3780 | |||
3781 | /* | 3792 | /* |
3782 | * At least in one zone compaction wasn't deferred or skipped, so let's | 3793 | * At least in one zone compaction wasn't deferred or skipped, so let's |
3783 | * count a compaction stall | 3794 | * count a compaction stall |