diff options
-rw-r--r-- | mm/page_alloc.c | 37 |
1 files changed, 27 insertions, 10 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 8193809f3de0..a8f2c87792c3 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2378,6 +2378,15 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) | |||
2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); | 2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | /* Returns true if the allocation is likely for THP */ | ||
2382 | static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) | ||
2383 | { | ||
2384 | if (order == pageblock_order && | ||
2385 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) | ||
2386 | return true; | ||
2387 | return false; | ||
2388 | } | ||
2389 | |||
2381 | static inline struct page * | 2390 | static inline struct page * |
2382 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | 2391 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
2383 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2392 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
@@ -2416,7 +2425,9 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2416 | goto nopage; | 2425 | goto nopage; |
2417 | 2426 | ||
2418 | restart: | 2427 | restart: |
2419 | wake_all_kswapd(order, zonelist, high_zoneidx, | 2428 | /* The decision whether to wake kswapd for THP is made later */ |
2429 | if (!is_thp_alloc(gfp_mask, order)) | ||
2430 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2420 | zone_idx(preferred_zone)); | 2431 | zone_idx(preferred_zone)); |
2421 | 2432 | ||
2422 | /* | 2433 | /* |
@@ -2487,15 +2498,21 @@ rebalance: | |||
2487 | goto got_pg; | 2498 | goto got_pg; |
2488 | sync_migration = true; | 2499 | sync_migration = true; |
2489 | 2500 | ||
2490 | /* | 2501 | if (is_thp_alloc(gfp_mask, order)) { |
2491 | * If compaction is deferred for high-order allocations, it is because | 2502 | /* |
2492 | * sync compaction recently failed. In this is the case and the caller | 2503 | * If compaction is deferred for high-order allocations, it is |
2493 | * requested a movable allocation that does not heavily disrupt the | 2504 | * because sync compaction recently failed. If this is the case |
2494 | * system then fail the allocation instead of entering direct reclaim. | 2505 | * and the caller requested a movable allocation that does not |
2495 | */ | 2506 | * heavily disrupt the system then fail the allocation instead |
2496 | if ((deferred_compaction || contended_compaction) && | 2507 | * of entering direct reclaim. |
2497 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) | 2508 | */ |
2498 | goto nopage; | 2509 | if (deferred_compaction || contended_compaction) |
2510 | goto nopage; | ||
2511 | |||
2512 | /* If process is willing to reclaim/compact then wake kswapd */ | ||
2513 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2514 | zone_idx(preferred_zone)); | ||
2515 | } | ||
2499 | 2516 | ||
2500 | /* Try direct reclaim and then allocating */ | 2517 | /* Try direct reclaim and then allocating */ |
2501 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 2518 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |