summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 18:45:28 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:17 -0500
commitd097a6f63522547dfc7c75c7084a05b6a7f9e838 (patch)
tree6dad157661546b41dd606af81428cc951b463cba /mm/compaction.c
parentcf66f0700c8f1d7c7c1c1d7e5e846a1836814601 (diff)
mm, compaction: reduce premature advancement of the migration target scanner
The fast isolation of free pages allows the cached PFN of the free scanner to advance faster than necessary depending on the contents of the free list. The key is that fast_isolate_freepages() can update zone->compact_cached_free_pfn via isolate_freepages_block(). When the fast search fails, the linear scan can start from a point that has skipped valid migration targets, particularly pageblocks with just low-order free pages. This can cause the migration source/target scanners to meet prematurely causing a reset. This patch starts by avoiding an update of the pageblock skip information and cached PFN from isolate_freepages_block() and puts the responsibility of updating that information in the callers. The fast scanner will update the cached PFN if and only if it finds a block that is higher than the existing cached PFN and sets the skip if the pageblock is full or nearly full. The linear scanner will update skipped information and the cached PFN only when a block is completely scanned. The total impact is that the free scanner advances more slowly as it is primarily driven by the linear scanner instead of the fast search. 5.0.0-rc1 5.0.0-rc1 noresched-v3r17 slowfree-v3r17 Amean fault-both-3 2965.68 ( 0.00%) 3036.75 ( -2.40%) Amean fault-both-5 3995.90 ( 0.00%) 4522.24 * -13.17%* Amean fault-both-7 5842.12 ( 0.00%) 6365.35 ( -8.96%) Amean fault-both-12 9550.87 ( 0.00%) 10340.93 ( -8.27%) Amean fault-both-18 13304.72 ( 0.00%) 14732.46 ( -10.73%) Amean fault-both-24 14618.59 ( 0.00%) 16288.96 ( -11.43%) Amean fault-both-30 16650.96 ( 0.00%) 16346.21 ( 1.83%) Amean fault-both-32 17145.15 ( 0.00%) 19317.49 ( -12.67%) The impact to latency is higher than the last version but it appears to be due to a slight increase in the free scan rates which is a potential side-effect of the patch. However, this is necessary for later patches that are more careful about how pageblocks are treated as earlier iterations of those patches hit corner cases where the restarts were punishing and very visible. Link: http://lkml.kernel.org/r/20190118175136.31341-19-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c27
1 files changed, 10 insertions, 17 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 9c7d43fd4655..452beef0541e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -330,10 +330,9 @@ static void update_cached_migrate(struct compact_control *cc, unsigned long pfn)
330 * future. The information is later cleared by __reset_isolation_suitable(). 330 * future. The information is later cleared by __reset_isolation_suitable().
331 */ 331 */
332static void update_pageblock_skip(struct compact_control *cc, 332static void update_pageblock_skip(struct compact_control *cc,
333 struct page *page, unsigned long nr_isolated) 333 struct page *page, unsigned long pfn)
334{ 334{
335 struct zone *zone = cc->zone; 335 struct zone *zone = cc->zone;
336 unsigned long pfn;
337 336
338 if (cc->no_set_skip_hint) 337 if (cc->no_set_skip_hint)
339 return; 338 return;
@@ -341,13 +340,8 @@ static void update_pageblock_skip(struct compact_control *cc,
341 if (!page) 340 if (!page)
342 return; 341 return;
343 342
344 if (nr_isolated)
345 return;
346
347 set_pageblock_skip(page); 343 set_pageblock_skip(page);
348 344
349 pfn = page_to_pfn(page);
350
351 /* Update where async and sync compaction should restart */ 345 /* Update where async and sync compaction should restart */
352 if (pfn < zone->compact_cached_free_pfn) 346 if (pfn < zone->compact_cached_free_pfn)
353 zone->compact_cached_free_pfn = pfn; 347 zone->compact_cached_free_pfn = pfn;
@@ -365,7 +359,7 @@ static inline bool pageblock_skip_persistent(struct page *page)
365} 359}
366 360
367static inline void update_pageblock_skip(struct compact_control *cc, 361static inline void update_pageblock_skip(struct compact_control *cc,
368 struct page *page, unsigned long nr_isolated) 362 struct page *page, unsigned long pfn)
369{ 363{
370} 364}
371 365
@@ -449,7 +443,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
449 bool strict) 443 bool strict)
450{ 444{
451 int nr_scanned = 0, total_isolated = 0; 445 int nr_scanned = 0, total_isolated = 0;
452 struct page *cursor, *valid_page = NULL; 446 struct page *cursor;
453 unsigned long flags = 0; 447 unsigned long flags = 0;
454 bool locked = false; 448 bool locked = false;
455 unsigned long blockpfn = *start_pfn; 449 unsigned long blockpfn = *start_pfn;
@@ -476,9 +470,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
476 if (!pfn_valid_within(blockpfn)) 470 if (!pfn_valid_within(blockpfn))
477 goto isolate_fail; 471 goto isolate_fail;
478 472
479 if (!valid_page)
480 valid_page = page;
481
482 /* 473 /*
483 * For compound pages such as THP and hugetlbfs, we can save 474 * For compound pages such as THP and hugetlbfs, we can save
484 * potentially a lot of iterations if we skip them at once. 475 * potentially a lot of iterations if we skip them at once.
@@ -566,10 +557,6 @@ isolate_fail:
566 if (strict && blockpfn < end_pfn) 557 if (strict && blockpfn < end_pfn)
567 total_isolated = 0; 558 total_isolated = 0;
568 559
569 /* Update the pageblock-skip if the whole pageblock was scanned */
570 if (blockpfn == end_pfn)
571 update_pageblock_skip(cc, valid_page, total_isolated);
572
573 cc->total_free_scanned += nr_scanned; 560 cc->total_free_scanned += nr_scanned;
574 if (total_isolated) 561 if (total_isolated)
575 count_compact_events(COMPACTISOLATED, total_isolated); 562 count_compact_events(COMPACTISOLATED, total_isolated);
@@ -1293,8 +1280,10 @@ fast_isolate_freepages(struct compact_control *cc)
1293 } 1280 }
1294 } 1281 }
1295 1282
1296 if (highest && highest > cc->zone->compact_cached_free_pfn) 1283 if (highest && highest >= cc->zone->compact_cached_free_pfn) {
1284 highest -= pageblock_nr_pages;
1297 cc->zone->compact_cached_free_pfn = highest; 1285 cc->zone->compact_cached_free_pfn = highest;
1286 }
1298 1287
1299 cc->total_free_scanned += nr_scanned; 1288 cc->total_free_scanned += nr_scanned;
1300 if (!page) 1289 if (!page)
@@ -1374,6 +1363,10 @@ static void isolate_freepages(struct compact_control *cc)
1374 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn, 1363 isolate_freepages_block(cc, &isolate_start_pfn, block_end_pfn,
1375 freelist, false); 1364 freelist, false);
1376 1365
1366 /* Update the skip hint if the full pageblock was scanned */
1367 if (isolate_start_pfn == block_end_pfn)
1368 update_pageblock_skip(cc, page, block_start_pfn);
1369
1377 /* Are enough freepages isolated? */ 1370 /* Are enough freepages isolated? */
1378 if (cc->nr_freepages >= cc->nr_migratepages) { 1371 if (cc->nr_freepages >= cc->nr_migratepages) {
1379 if (isolate_start_pfn >= block_end_pfn) { 1372 if (isolate_start_pfn >= block_end_pfn) {