summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-05-08 18:54:49 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 20:15:10 -0400
commit282722b0d258ec23fc79d80165418fee83f01736 (patch)
tree01b33534bae259a2ad1ce7acf98e5b5632bb467b /mm
parentd39773a0622c267fef3f79e3b1f0e7bdbad8a1a8 (diff)
mm, compaction: restrict async compaction to pageblocks of same migratetype
The migrate scanner in async compaction is currently limited to MIGRATE_MOVABLE pageblocks. This is a heuristic intended to reduce latency, based on the assumption that non-MOVABLE pageblocks are unlikely to contain movable pages. However, with the exception of THP's, most high-order allocations are not movable. Should the async compaction succeed, this increases the chance that the non-MOVABLE allocations will fallback to a MOVABLE pageblock, making the long-term fragmentation worse. This patch attempts to help the situation by changing async direct compaction so that the migrate scanner only scans the pageblocks of the requested migratetype. If it's a non-MOVABLE type and there are such pageblocks that do contain movable pages, chances are that the allocation can succeed within one of such pageblocks, removing the need for a fallback. If that fails, the subsequent sync attempt will ignore this restriction. In testing based on 4.9 kernel with stress-highalloc from mmtests configured for order-4 GFP_KERNEL allocations, this patch has reduced the number of unmovable allocations falling back to movable pageblocks by 30%. The number of movable allocations falling back is reduced by 12%. Link: http://lkml.kernel.org/r/20170307131545.28577-8-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@techsingularity.net> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/page_alloc.c20
2 files changed, 22 insertions, 9 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 365b3c8ae943..206847d35978 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -986,10 +986,17 @@ isolate_migratepages_range(struct compact_control *cc, unsigned long start_pfn,
986static bool suitable_migration_source(struct compact_control *cc, 986static bool suitable_migration_source(struct compact_control *cc,
987 struct page *page) 987 struct page *page)
988{ 988{
989 if (cc->mode != MIGRATE_ASYNC) 989 int block_mt;
990
991 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
990 return true; 992 return true;
991 993
992 return is_migrate_movable(get_pageblock_migratetype(page)); 994 block_mt = get_pageblock_migratetype(page);
995
996 if (cc->migratetype == MIGRATE_MOVABLE)
997 return is_migrate_movable(block_mt);
998 else
999 return block_mt == cc->migratetype;
993} 1000}
994 1001
995/* Returns true if the page is within a block suitable for migration to */ 1002/* Returns true if the page is within a block suitable for migration to */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d90792addeb9..e7486afa7fa7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -3665,6 +3665,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
3665 struct alloc_context *ac) 3665 struct alloc_context *ac)
3666{ 3666{
3667 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM; 3667 bool can_direct_reclaim = gfp_mask & __GFP_DIRECT_RECLAIM;
3668 const bool costly_order = order > PAGE_ALLOC_COSTLY_ORDER;
3668 struct page *page = NULL; 3669 struct page *page = NULL;
3669 unsigned int alloc_flags; 3670 unsigned int alloc_flags;
3670 unsigned long did_some_progress; 3671 unsigned long did_some_progress;
@@ -3732,12 +3733,17 @@ retry_cpuset:
3732 3733
3733 /* 3734 /*
3734 * For costly allocations, try direct compaction first, as it's likely 3735 * For costly allocations, try direct compaction first, as it's likely
3735 * that we have enough base pages and don't need to reclaim. Don't try 3736 * that we have enough base pages and don't need to reclaim. For non-
3736 * that for allocations that are allowed to ignore watermarks, as the 3737 * movable high-order allocations, do that as well, as compaction will
3737 * ALLOC_NO_WATERMARKS attempt didn't yet happen. 3738 * try prevent permanent fragmentation by migrating from blocks of the
3739 * same migratetype.
3740 * Don't try this for allocations that are allowed to ignore
3741 * watermarks, as the ALLOC_NO_WATERMARKS attempt didn't yet happen.
3738 */ 3742 */
3739 if (can_direct_reclaim && order > PAGE_ALLOC_COSTLY_ORDER && 3743 if (can_direct_reclaim &&
3740 !gfp_pfmemalloc_allowed(gfp_mask)) { 3744 (costly_order ||
3745 (order > 0 && ac->migratetype != MIGRATE_MOVABLE))
3746 && !gfp_pfmemalloc_allowed(gfp_mask)) {
3741 page = __alloc_pages_direct_compact(gfp_mask, order, 3747 page = __alloc_pages_direct_compact(gfp_mask, order,
3742 alloc_flags, ac, 3748 alloc_flags, ac,
3743 INIT_COMPACT_PRIORITY, 3749 INIT_COMPACT_PRIORITY,
@@ -3749,7 +3755,7 @@ retry_cpuset:
3749 * Checks for costly allocations with __GFP_NORETRY, which 3755 * Checks for costly allocations with __GFP_NORETRY, which
3750 * includes THP page fault allocations 3756 * includes THP page fault allocations
3751 */ 3757 */
3752 if (gfp_mask & __GFP_NORETRY) { 3758 if (costly_order && (gfp_mask & __GFP_NORETRY)) {
3753 /* 3759 /*
3754 * If compaction is deferred for high-order allocations, 3760 * If compaction is deferred for high-order allocations,
3755 * it is because sync compaction recently failed. If 3761 * it is because sync compaction recently failed. If
@@ -3830,7 +3836,7 @@ retry:
3830 * Do not retry costly high order allocations unless they are 3836 * Do not retry costly high order allocations unless they are
3831 * __GFP_REPEAT 3837 * __GFP_REPEAT
3832 */ 3838 */
3833 if (order > PAGE_ALLOC_COSTLY_ORDER && !(gfp_mask & __GFP_REPEAT)) 3839 if (costly_order && !(gfp_mask & __GFP_REPEAT))
3834 goto nopage; 3840 goto nopage;
3835 3841
3836 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags, 3842 if (should_reclaim_retry(gfp_mask, order, ac, alloc_flags,