summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2016-07-28 18:49:30 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 19:07:41 -0400
commitc3486f5376696034d0fcbef8ba70c70cfcb26f51 (patch)
tree5faec99d3537ddabaaf79e90d3335f0812e69766 /mm/page_alloc.c
parenta5508cd83f10f663e05d212cb81f600a3af46e40 (diff)
mm, compaction: simplify contended compaction handling
Async compaction detects contention either due to failing trylock on zone->lock or lru_lock, or by need_resched(). Since 1f9efdef4f3f ("mm, compaction: khugepaged should not give up due to need_resched()") the code got quite complicated to distinguish these two up to the __alloc_pages_slowpath() level, so different decisions could be taken for khugepaged allocations. After the recent changes, khugepaged allocations don't check for contended compaction anymore, so we again don't need to distinguish lock and sched contention, and simplify the current convoluted code a lot. However, I believe it's also possible to simplify even more and completely remove the check for contended compaction after the initial async compaction for costly orders, which was originally aimed at THP page fault allocations. There are several reasons why this can be done now: - with the new defaults, THP page faults no longer do reclaim/compaction at all, unless the system admin has overridden the default, or application has indicated via madvise that it can benefit from THP's. In both cases, it means that the potential extra latency is expected and worth the benefits. - even if reclaim/compaction proceeds after this patch where it previously wouldn't, the second compaction attempt is still async and will detect the contention and back off, if the contention persists - there are still heuristics like deferred compaction and pageblock skip bits in place that prevent excessive THP page fault latencies Link: http://lkml.kernel.org/r/20160721073614.24395-9-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Michal Hocko <mhocko@suse.com> Acked-by: Mel Gorman <mgorman@techsingularity.net> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c28
1 files changed, 1 insertions, 27 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 26c6fe74f5c5..ea759b935360 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3099,14 +3099,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3099 enum compact_priority prio, enum compact_result *compact_result) 3099 enum compact_priority prio, enum compact_result *compact_result)
3100{ 3100{
3101 struct page *page; 3101 struct page *page;
3102 int contended_compaction;
3103 3102
3104 if (!order) 3103 if (!order)
3105 return NULL; 3104 return NULL;
3106 3105
3107 current->flags |= PF_MEMALLOC; 3106 current->flags |= PF_MEMALLOC;
3108 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac, 3107 *compact_result = try_to_compact_pages(gfp_mask, order, alloc_flags, ac,
3109 prio, &contended_compaction); 3108 prio);
3110 current->flags &= ~PF_MEMALLOC; 3109 current->flags &= ~PF_MEMALLOC;
3111 3110
3112 if (*compact_result <= COMPACT_INACTIVE) 3111 if (*compact_result <= COMPACT_INACTIVE)
@@ -3135,24 +3134,6 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
3135 */ 3134 */
3136 count_vm_event(COMPACTFAIL); 3135 count_vm_event(COMPACTFAIL);
3137 3136
3138 /*
3139 * In all zones where compaction was attempted (and not
3140 * deferred or skipped), lock contention has been detected.
3141 * For THP allocation we do not want to disrupt the others
3142 * so we fallback to base pages instead.
3143 */
3144 if (contended_compaction == COMPACT_CONTENDED_LOCK)
3145 *compact_result = COMPACT_CONTENDED;
3146
3147 /*
3148 * If compaction was aborted due to need_resched(), we do not
3149 * want to further increase allocation latency, unless it is
3150 * khugepaged trying to collapse.
3151 */
3152 if (contended_compaction == COMPACT_CONTENDED_SCHED
3153 && !(current->flags & PF_KTHREAD))
3154 *compact_result = COMPACT_CONTENDED;
3155
3156 cond_resched(); 3137 cond_resched();
3157 3138
3158 return NULL; 3139 return NULL;
@@ -3548,13 +3529,6 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3548 goto nopage; 3529 goto nopage;
3549 3530
3550 /* 3531 /*
3551 * Compaction is contended so rather back off than cause
3552 * excessive stalls.
3553 */
3554 if (compact_result == COMPACT_CONTENDED)
3555 goto nopage;
3556
3557 /*
3558 * Looks like reclaim/compaction is worth trying, but 3532 * Looks like reclaim/compaction is worth trying, but
3559 * sync compaction could be very expensive, so keep 3533 * sync compaction could be very expensive, so keep
3560 * using async compaction. 3534 * using async compaction.