diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 45 |
1 files changed, 33 insertions, 12 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index dfbf54b51649..313338d74095 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -2297,7 +2297,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2297 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2297 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2298 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2298 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2299 | int classzone_idx, int migratetype, enum migrate_mode mode, | 2299 | int classzone_idx, int migratetype, enum migrate_mode mode, |
2300 | bool *contended_compaction, bool *deferred_compaction) | 2300 | int *contended_compaction, bool *deferred_compaction) |
2301 | { | 2301 | { |
2302 | struct zone *last_compact_zone = NULL; | 2302 | struct zone *last_compact_zone = NULL; |
2303 | unsigned long compact_result; | 2303 | unsigned long compact_result; |
@@ -2371,7 +2371,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order, | |||
2371 | struct zonelist *zonelist, enum zone_type high_zoneidx, | 2371 | struct zonelist *zonelist, enum zone_type high_zoneidx, |
2372 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, | 2372 | nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone, |
2373 | int classzone_idx, int migratetype, enum migrate_mode mode, | 2373 | int classzone_idx, int migratetype, enum migrate_mode mode, |
2374 | bool *contended_compaction, bool *deferred_compaction) | 2374 | int *contended_compaction, bool *deferred_compaction) |
2375 | { | 2375 | { |
2376 | return NULL; | 2376 | return NULL; |
2377 | } | 2377 | } |
@@ -2547,7 +2547,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order, | |||
2547 | unsigned long did_some_progress; | 2547 | unsigned long did_some_progress; |
2548 | enum migrate_mode migration_mode = MIGRATE_ASYNC; | 2548 | enum migrate_mode migration_mode = MIGRATE_ASYNC; |
2549 | bool deferred_compaction = false; | 2549 | bool deferred_compaction = false; |
2550 | bool contended_compaction = false; | 2550 | int contended_compaction = COMPACT_CONTENDED_NONE; |
2551 | 2551 | ||
2552 | /* | 2552 | /* |
2553 | * In the slowpath, we sanity check order to avoid ever trying to | 2553 | * In the slowpath, we sanity check order to avoid ever trying to |
@@ -2651,15 +2651,36 @@ rebalance: | |||
2651 | if (page) | 2651 | if (page) |
2652 | goto got_pg; | 2652 | goto got_pg; |
2653 | 2653 | ||
2654 | /* | 2654 | /* Checks for THP-specific high-order allocations */ |
2655 | * If compaction is deferred for high-order allocations, it is because | 2655 | if ((gfp_mask & GFP_TRANSHUGE) == GFP_TRANSHUGE) { |
2656 | * sync compaction recently failed. In this is the case and the caller | 2656 | /* |
2657 | * requested a movable allocation that does not heavily disrupt the | 2657 | * If compaction is deferred for high-order allocations, it is |
2658 | * system then fail the allocation instead of entering direct reclaim. | 2658 | * because sync compaction recently failed. If this is the case |
2659 | */ | 2659 | * and the caller requested a THP allocation, we do not want |
2660 | if ((deferred_compaction || contended_compaction) && | 2660 | * to heavily disrupt the system, so we fail the allocation |
2661 | (gfp_mask & __GFP_NO_KSWAPD)) | 2661 | * instead of entering direct reclaim. |
2662 | goto nopage; | 2662 | */ |
2663 | if (deferred_compaction) | ||
2664 | goto nopage; | ||
2665 | |||
2666 | /* | ||
2667 | * In all zones where compaction was attempted (and not | ||
2668 | * deferred or skipped), lock contention has been detected. | ||
2669 | * For THP allocation we do not want to disrupt the others | ||
2670 | * so we fallback to base pages instead. | ||
2671 | */ | ||
2672 | if (contended_compaction == COMPACT_CONTENDED_LOCK) | ||
2673 | goto nopage; | ||
2674 | |||
2675 | /* | ||
2676 | * If compaction was aborted due to need_resched(), we do not | ||
2677 | * want to further increase allocation latency, unless it is | ||
2678 | * khugepaged trying to collapse. | ||
2679 | */ | ||
2680 | if (contended_compaction == COMPACT_CONTENDED_SCHED | ||
2681 | && !(current->flags & PF_KTHREAD)) | ||
2682 | goto nopage; | ||
2683 | } | ||
2663 | 2684 | ||
2664 | /* | 2685 | /* |
2665 | * It can become very expensive to allocate transparent hugepages at | 2686 | * It can become very expensive to allocate transparent hugepages at |