summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2017-05-08 18:54:52 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-08 20:15:10 -0400
commitbaf6a9a1db5a40ebfa5d3e761428d3deb2cc3a3b (patch)
treee99b3cf3b4ed0d48a9d3bc983d794cb4e0758483 /mm/compaction.c
parent282722b0d258ec23fc79d80165418fee83f01736 (diff)
mm, compaction: finish whole pageblock to reduce fragmentation
The main goal of direct compaction is to form a high-order page for allocation, but it should also help against long-term fragmentation when possible. Most lower-than-pageblock-order compactions are for non-movable allocations, which means that if we compact in a movable pageblock and terminate as soon as we create the high-order page, it's unlikely that the fallback heuristics will claim the whole block. Instead there might be a single unmovable page in a pageblock full of movable pages, and the next unmovable allocation might pick another pageblock and increase long-term fragmentation. To help against such scenarios, this patch changes the termination criteria for compaction so that the current pageblock is finished even though the high-order page already exists. Note that it might be possible that the high-order page formed elsewhere in the zone due to parallel activity, but this patch doesn't try to detect that. This is only done with sync compaction, because async compaction is limited to pageblock of the same migratetype, where it cannot result in a migratetype fallback. (Async compaction also eagerly skips order-aligned blocks where isolation fails, which is against the goal of migrating away as much of the pageblock as possible.) As a result of this patch, long-term memory fragmentation should be reduced. In testing based on 4.9 kernel with stress-highalloc from mmtests configured for order-4 GFP_KERNEL allocations, this patch has reduced the number of unmovable allocations falling back to movable pageblocks by 20%. The number Link: http://lkml.kernel.org/r/20170307131545.28577-9-vbabka@suse.cz Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Acked-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: David Rientjes <rientjes@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c36
1 files changed, 34 insertions, 2 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 206847d35978..613c59e928cb 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1318,6 +1318,17 @@ static enum compact_result __compact_finished(struct zone *zone,
1318 if (is_via_compact_memory(cc->order)) 1318 if (is_via_compact_memory(cc->order))
1319 return COMPACT_CONTINUE; 1319 return COMPACT_CONTINUE;
1320 1320
1321 if (cc->finishing_block) {
1322 /*
1323 * We have finished the pageblock, but better check again that
1324 * we really succeeded.
1325 */
1326 if (IS_ALIGNED(cc->migrate_pfn, pageblock_nr_pages))
1327 cc->finishing_block = false;
1328 else
1329 return COMPACT_CONTINUE;
1330 }
1331
1321 /* Direct compactor: Is a suitable page free? */ 1332 /* Direct compactor: Is a suitable page free? */
1322 for (order = cc->order; order < MAX_ORDER; order++) { 1333 for (order = cc->order; order < MAX_ORDER; order++) {
1323 struct free_area *area = &zone->free_area[order]; 1334 struct free_area *area = &zone->free_area[order];
@@ -1338,8 +1349,29 @@ static enum compact_result __compact_finished(struct zone *zone,
1338 * other migratetype buddy lists. 1349 * other migratetype buddy lists.
1339 */ 1350 */
1340 if (find_suitable_fallback(area, order, migratetype, 1351 if (find_suitable_fallback(area, order, migratetype,
1341 true, &can_steal) != -1) 1352 true, &can_steal) != -1) {
1342 return COMPACT_SUCCESS; 1353
1354 /* movable pages are OK in any pageblock */
1355 if (migratetype == MIGRATE_MOVABLE)
1356 return COMPACT_SUCCESS;
1357
1358 /*
1359 * We are stealing for a non-movable allocation. Make
1360 * sure we finish compacting the current pageblock
1361 * first so it is as free as possible and we won't
1362 * have to steal another one soon. This only applies
1363 * to sync compaction, as async compaction operates
1364 * on pageblocks of the same migratetype.
1365 */
1366 if (cc->mode == MIGRATE_ASYNC ||
1367 IS_ALIGNED(cc->migrate_pfn,
1368 pageblock_nr_pages)) {
1369 return COMPACT_SUCCESS;
1370 }
1371
1372 cc->finishing_block = true;
1373 return COMPACT_CONTINUE;
1374 }
1343 } 1375 }
1344 1376
1345 return COMPACT_NO_SUITABLE_PAGE; 1377 return COMPACT_NO_SUITABLE_PAGE;