summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 18:45:14 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:16 -0500
commit9bebefd59084af7c75b66eeee241bf0777f39b88 (patch)
tree51fafee9e4d0268f9577a0c37f7753c483c80a40 /mm/compaction.c
parentcb2dcaf023c2cf12d45289c82d4030d33f7df73e (diff)
mm, compaction: check early for huge pages encountered by the migration scanner
When scanning for sources or targets, PageCompound is checked for huge pages as they can be skipped quickly but it happens relatively late after a lot of setup and checking. This patch short-cuts the check to make it earlier. It might still change when the lock is acquired but this has less overhead overall. The free scanner advances but the migration scanner does not. Typically the free scanner encounters more movable blocks that change state over the lifetime of the system and also tends to scan more aggressively as it's actively filling its portion of the physical address space with data. This could change in the future but for the moment, this worked better in practice and incurred fewer scan restarts. The impact on latency and allocation success rates is marginal but the free scan rates are reduced by 15% and system CPU usage is reduced by 3.3%. The 2-socket results are not materially different. Link: http://lkml.kernel.org/r/20190118175136.31341-15-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c16
1 files changed, 12 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 5325211398f8..e609415059e8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1061,6 +1061,9 @@ static bool suitable_migration_source(struct compact_control *cc,
1061{ 1061{
1062 int block_mt; 1062 int block_mt;
1063 1063
1064 if (pageblock_skip_persistent(page))
1065 return false;
1066
1064 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction) 1067 if ((cc->mode != MIGRATE_ASYNC) || !cc->direct_compaction)
1065 return true; 1068 return true;
1066 1069
@@ -1697,12 +1700,17 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1697 continue; 1700 continue;
1698 1701
1699 /* 1702 /*
1700 * For async compaction, also only scan in MOVABLE blocks. 1703 * For async compaction, also only scan in MOVABLE blocks
1701 * Async compaction is optimistic to see if the minimum amount 1704 * without huge pages. Async compaction is optimistic to see
1702 * of work satisfies the allocation. 1705 * if the minimum amount of work satisfies the allocation.
1706 * The cached PFN is updated as it's possible that all
1707 * remaining blocks between source and target are unsuitable
1708 * and the compaction scanners fail to meet.
1703 */ 1709 */
1704 if (!suitable_migration_source(cc, page)) 1710 if (!suitable_migration_source(cc, page)) {
1711 update_cached_migrate(cc, block_end_pfn);
1705 continue; 1712 continue;
1713 }
1706 1714
1707 /* Perform the isolation */ 1715 /* Perform the isolation */
1708 low_pfn = isolate_migratepages_block(cc, low_pfn, 1716 low_pfn = isolate_migratepages_block(cc, low_pfn,