aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2012-12-10 13:47:45 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-12-10 13:47:45 -0500
commit31f8d42d44b48ba72b586ca03e810cbbd21ea16b (patch)
treeecb5b522d62a4ffbe8b1511baade7e7120a3d8c5
parented23ec4f0a510528e0ffe415f9394107418ae854 (diff)
Revert "mm: avoid waking kswapd for THP allocations when compaction is deferred or contended"
This reverts commit 782fd30406ecb9d9b082816abe0c6008fc72a7b0. We are going to reinstate the __GFP_NO_KSWAPD flag that has been removed, the removal reverted, and then removed again. Making this commit a pointless fixup for a problem that was caused by the removal of __GFP_NO_KSWAPD flag. The thing is, we really don't want to wake up kswapd for THP allocations (because they fail quite commonly under any kind of memory pressure, including when there is tons of memory free), and these patches were just trying to fix up the underlying bug: the original removal of __GFP_NO_KSWAPD in commit c654345924f7 ("mm: remove __GFP_NO_KSWAPD") was simply bogus. Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c37
1 files changed, 10 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index a8f2c87792c3..8193809f3de0 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -2378,15 +2378,6 @@ bool gfp_pfmemalloc_allowed(gfp_t gfp_mask)
2378 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS); 2378 return !!(gfp_to_alloc_flags(gfp_mask) & ALLOC_NO_WATERMARKS);
2379} 2379}
2380 2380
2381/* Returns true if the allocation is likely for THP */
2382static bool is_thp_alloc(gfp_t gfp_mask, unsigned int order)
2383{
2384 if (order == pageblock_order &&
2385 (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
2386 return true;
2387 return false;
2388}
2389
2390static inline struct page * 2381static inline struct page *
2391__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, 2382__alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2392 struct zonelist *zonelist, enum zone_type high_zoneidx, 2383 struct zonelist *zonelist, enum zone_type high_zoneidx,
@@ -2425,9 +2416,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
2425 goto nopage; 2416 goto nopage;
2426 2417
2427restart: 2418restart:
2428 /* The decision whether to wake kswapd for THP is made later */ 2419 wake_all_kswapd(order, zonelist, high_zoneidx,
2429 if (!is_thp_alloc(gfp_mask, order))
2430 wake_all_kswapd(order, zonelist, high_zoneidx,
2431 zone_idx(preferred_zone)); 2420 zone_idx(preferred_zone));
2432 2421
2433 /* 2422 /*
@@ -2498,21 +2487,15 @@ rebalance:
2498 goto got_pg; 2487 goto got_pg;
2499 sync_migration = true; 2488 sync_migration = true;
2500 2489
2501 if (is_thp_alloc(gfp_mask, order)) { 2490 /*
2502 /* 2491 * If compaction is deferred for high-order allocations, it is because
2503 * If compaction is deferred for high-order allocations, it is 2492 * sync compaction recently failed. In this is the case and the caller
2504 * because sync compaction recently failed. If this is the case 2493 * requested a movable allocation that does not heavily disrupt the
2505 * and the caller requested a movable allocation that does not 2494 * system then fail the allocation instead of entering direct reclaim.
2506 * heavily disrupt the system then fail the allocation instead 2495 */
2507 * of entering direct reclaim. 2496 if ((deferred_compaction || contended_compaction) &&
2508 */ 2497 (gfp_mask & (__GFP_MOVABLE|__GFP_REPEAT)) == __GFP_MOVABLE)
2509 if (deferred_compaction || contended_compaction) 2498 goto nopage;
2510 goto nopage;
2511
2512 /* If process is willing to reclaim/compact then wake kswapd */
2513 wake_all_kswapd(order, zonelist, high_zoneidx,
2514 zone_idx(preferred_zone));
2515 }
2516 2499
2517 /* Try direct reclaim and then allocating */ 2500 /* Try direct reclaim and then allocating */
2518 page = __alloc_pages_direct_reclaim(gfp_mask, order, 2501 page = __alloc_pages_direct_reclaim(gfp_mask, order,