summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2015-09-08 18:02:36 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-09-08 18:35:28 -0400
commitf2849aa09d4fbc4145ebb5dc96187c9ab967f5cf (patch)
tree37695139d97fac75efd794f140a370abdcbb9f2a /mm/compaction.c
parent1fc524d74cf40072a2de3f74a920818398dbff30 (diff)
mm, compaction: more robust check for scanners meeting
Assorted compaction cleanups and optimizations. The interesting patches are 4 and 5. In 4, skipping of compound pages in single iteration is improved for migration scanner, so it works also for !PageLRU compound pages such as hugetlbfs, slab etc. Patch 5 introduces this kind of skipping in the free scanner. The trick is that we can read compound_order() without any protection, if we are careful to filter out values larger than MAX_ORDER. The only danger is that we skip too much. The same trick was already used for reading the freepage order in the migrate scanner. To demonstrate improvements of Patches 4 and 5 I've run stress-highalloc from mmtests, set to simulate THP allocations (including __GFP_COMP) on a 4GB system where 1GB was occupied by hugetlbfs pages. I'll include just the relevant stats: Patch 3 Patch 4 Patch 5 Compaction stalls 7523 7529 7515 Compaction success 323 304 322 Compaction failures 7200 7224 7192 Page migrate success 247778 264395 240737 Page migrate failure 15358 33184 21621 Compaction pages isolated 906928 980192 909983 Compaction migrate scanned 2005277 1692805 1498800 Compaction free scanned 13255284 11539986 9011276 Compaction cost 288 305 277 With 5 iterations per patch, the results are still noisy, but we can see that Patch 4 does reduce migrate_scanned by 15% thanks to skipping the hugetlbfs pages at once. Interestingly, free_scanned is also reduced and I have no idea why. Patch 5 further reduces free_scanned as expected, by 15%. Other stats are unaffected modulo noise. [1] https://lkml.org/lkml/2015/1/19/158 This patch (of 5): Compaction should finish when the migration and free scanner meet, i.e. they reach the same pageblock. Currently however, the test in compact_finished() simply just compares the exact pfns, which may yield a false negative when the free scanner position is in the middle of a pageblock and the migration scanner reaches the begining of the same pageblock. This hasn't been a problem until commit e14c720efdd7 ("mm, compaction: remember position within pageblock in free pages scanner") allowed the free scanner position to be in the middle of a pageblock between invocations. The hot-fix 1d5bfe1ffb5b ("mm, compaction: prevent infinite loop in compact_zone") prevented the issue by adding a special check in the migration scanner to satisfy the current detection of scanners meeting. However, the proper fix is to make the detection more robust. This patch introduces the compact_scanners_met() function that returns true when the free scanner position is in the same or lower pageblock than the migration scanner. The special case in isolate_migratepages() introduced by 1d5bfe1ffb5b is removed. Suggested-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Cc: Minchan Kim <minchan@kernel.org> Acked-by: Mel Gorman <mgorman@suse.de> Acked-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Christoph Lameter <cl@linux.com> Acked-by: Rik van Riel <riel@redhat.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c22
1 files changed, 14 insertions, 8 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 018f08da99a2..7077b81a4893 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -898,6 +898,16 @@ static bool suitable_migration_target(struct page *page)
898} 898}
899 899
900/* 900/*
901 * Test whether the free scanner has reached the same or lower pageblock than
902 * the migration scanner, and compaction should thus terminate.
903 */
904static inline bool compact_scanners_met(struct compact_control *cc)
905{
906 return (cc->free_pfn >> pageblock_order)
907 <= (cc->migrate_pfn >> pageblock_order);
908}
909
910/*
901 * Based on information in the current compact_control, find blocks 911 * Based on information in the current compact_control, find blocks
902 * suitable for isolating free pages from and then isolate them. 912 * suitable for isolating free pages from and then isolate them.
903 */ 913 */
@@ -1127,12 +1137,8 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1127 } 1137 }
1128 1138
1129 acct_isolated(zone, cc); 1139 acct_isolated(zone, cc);
1130 /* 1140 /* Record where migration scanner will be restarted. */
1131 * Record where migration scanner will be restarted. If we end up in 1141 cc->migrate_pfn = low_pfn;
1132 * the same pageblock as the free scanner, make the scanners fully
1133 * meet so that compact_finished() terminates compaction.
1134 */
1135 cc->migrate_pfn = (end_pfn <= cc->free_pfn) ? low_pfn : cc->free_pfn;
1136 1142
1137 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE; 1143 return cc->nr_migratepages ? ISOLATE_SUCCESS : ISOLATE_NONE;
1138} 1144}
@@ -1147,7 +1153,7 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
1147 return COMPACT_PARTIAL; 1153 return COMPACT_PARTIAL;
1148 1154
1149 /* Compaction run completes if the migrate and free scanner meet */ 1155 /* Compaction run completes if the migrate and free scanner meet */
1150 if (cc->free_pfn <= cc->migrate_pfn) { 1156 if (compact_scanners_met(cc)) {
1151 /* Let the next compaction start anew. */ 1157 /* Let the next compaction start anew. */
1152 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn; 1158 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
1153 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn; 1159 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
@@ -1376,7 +1382,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1376 * migrate_pages() may return -ENOMEM when scanners meet 1382 * migrate_pages() may return -ENOMEM when scanners meet
1377 * and we want compact_finished() to detect it 1383 * and we want compact_finished() to detect it
1378 */ 1384 */
1379 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { 1385 if (err == -ENOMEM && !compact_scanners_met(cc)) {
1380 ret = COMPACT_PARTIAL; 1386 ret = COMPACT_PARTIAL;
1381 goto out; 1387 goto out;
1382 } 1388 }