diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 37 |
1 files changed, 10 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index a8f2c87792c3..8193809f3de0 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask) | |||
2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); | 2378 | return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); |
2379 | } | 2379 | } |
2380 | 2380 | ||
2381 | /* Returns true if the allocation is likely for THP */ | ||
2382 | static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order) | ||
2383 | { | ||
2384 | if (order == pageblock_order && | ||
2385 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) | ||
2386 | return true; | ||
2387 | return false; | ||
2388 | } | ||
2389 | |||
2390 | static inline struct page * | 2381 | static inline struct page * |
2391 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | 2382 | __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, |
2392 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2383 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
@@ -2425,9 +2416,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2425 | goto nopage; | 2416 | goto nopage; |
2426 | 2417 | ||
2427 | restart: | 2418 | restart: |
2428 | /* The decision whether to wake kswapd for THP is made later */ | 2419 | wake_all_kswapd(order, zonelist, high_zoneidx, |
2429 | if (!is_thp_alloc(gfp_mask, order)) | ||
2430 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2431 | zone_idx(preferred_zone)); | 2420 | zone_idx(preferred_zone)); |
2432 | 2421 | ||
2433 | /* | 2422 | /* |
@@ -2498,21 +2487,15 @@ rebalance: | |||
2498 | goto got_pg; | 2487 | goto got_pg; |
2499 | sync_migration = true; | 2488 | sync_migration = true; |
2500 | 2489 | ||
2501 | if (is_thp_alloc(gfp_mask, order)) { | 2490 | /* |
2502 | /* | 2491 | * If compaction is deferred for high-order allocations, it is because |
2503 | * If compaction is deferred for high-order allocations, it is | 2492 | * sync compaction recently failed. In this is the case and the caller |
2504 | * because sync compaction recently failed. If this is the case | 2493 | * requested a movable allocation that does not heavily disrupt the |
2505 | * and the caller requested a movable allocation that does not | 2494 | * system then fail the allocation instead of entering direct reclaim. |
2506 | * heavily disrupt the system then fail the allocation instead | 2495 | */ |
2507 | * of entering direct reclaim. | 2496 | if ((deferred_compaction || contended_compaction) && |
2508 | */ | 2497 | (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE) |
2509 | if (deferred_compaction || contended_compaction) | 2498 | goto nopage; |
2510 | goto nopage; | ||
2511 | |||
2512 | /* If process is willing to reclaim/compact then wake kswapd */ | ||
2513 | wake_all_kswapd(order, zonelist, high_zoneidx, | ||
2514 | zone_idx(preferred_zone)); | ||
2515 | } | ||
2516 | 2499 | ||
2517 | /* Try direct reclaim and then allocating */ | 2500 | /* Try direct reclaim and then allocating */ |
2518 | page = __alloc_pages_direct_reclaim(gfp_mask, order, | 2501 | page = __alloc_pages_direct_reclaim(gfp_mask, order, |