summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 18:45:24 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:17 -0500
commitcf66f0700c8f1d7c7c1c1d7e5e846a1836814601 (patch)
tree92218a9b9dbf3e22ea532d8968e74824e567ba4c /mm/compaction.c
parentcb810ad294d3c3a454e51b12fbb483bbb7096b98 (diff)
mm, compaction: do not consider a need to reschedule as contention
Scanning on large machines can take a considerable length of time and eventually need to be rescheduled. This is treated as an abort event but that's not appropriate as the attempt is likely to be retried after making numerous checks and taking another cycle through the page allocator. This patch will check the need to reschedule if necessary but continue the scanning. The main benefit is reduced scanning when compaction is taking a long time or the machine is over-saturated. It also avoids an unnecessary exit of compaction that ends up being retried by the page allocator in the outer loop. 5.0.0-rc1 5.0.0-rc1 synccached-v3r16 noresched-v3r17 Amean fault-both-1 0.00 ( 0.00%) 0.00 * 0.00%* Amean fault-both-3 2958.27 ( 0.00%) 2965.68 ( -0.25%) Amean fault-both-5 4091.90 ( 0.00%) 3995.90 ( 2.35%) Amean fault-both-7 5803.05 ( 0.00%) 5842.12 ( -0.67%) Amean fault-both-12 9481.06 ( 0.00%) 9550.87 ( -0.74%) Amean fault-both-18 14141.51 ( 0.00%) 13304.72 ( 5.92%) Amean fault-both-24 16438.00 ( 0.00%) 14618.59 ( 11.07%) Amean fault-both-30 17531.72 ( 0.00%) 16650.96 ( 5.02%) Amean fault-both-32 17101.96 ( 0.00%) 17145.15 ( -0.25%) Link: http://lkml.kernel.org/r/20190118175136.31341-18-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: Dan Carpenter <dan.carpenter@oracle.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c23
1 files changed, 4 insertions, 19 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 68e3c214bcbd..9c7d43fd4655 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -405,21 +405,6 @@ static bool compact_lock_irqsave(spinlock_t *lock, unsigned long *flags,
405} 405}
406 406
407/* 407/*
408 * Aside from avoiding lock contention, compaction also periodically checks
409 * need_resched() and records async compaction as contended if necessary.
410 */
411static inline void compact_check_resched(struct compact_control *cc)
412{
413 /* async compaction aborts if contended */
414 if (need_resched()) {
415 if (cc->mode == MIGRATE_ASYNC)
416 cc->contended = true;
417
418 cond_resched();
419 }
420}
421
422/*
423 * Compaction requires the taking of some coarse locks that are potentially 408 * Compaction requires the taking of some coarse locks that are potentially
424 * very heavily contended. The lock should be periodically unlocked to avoid 409 * very heavily contended. The lock should be periodically unlocked to avoid
425 * having disabled IRQs for a long time, even when there is nobody waiting on 410 * having disabled IRQs for a long time, even when there is nobody waiting on
@@ -447,7 +432,7 @@ static bool compact_unlock_should_abort(spinlock_t *lock,
447 return true; 432 return true;
448 } 433 }
449 434
450 compact_check_resched(cc); 435 cond_resched();
451 436
452 return false; 437 return false;
453} 438}
@@ -736,7 +721,7 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
736 return 0; 721 return 0;
737 } 722 }
738 723
739 compact_check_resched(cc); 724 cond_resched();
740 725
741 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) { 726 if (cc->direct_compaction && (cc->mode == MIGRATE_ASYNC)) {
742 skip_on_failure = true; 727 skip_on_failure = true;
@@ -1370,7 +1355,7 @@ static void isolate_freepages(struct compact_control *cc)
1370 * suitable migration targets, so periodically check resched. 1355 * suitable migration targets, so periodically check resched.
1371 */ 1356 */
1372 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1357 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1373 compact_check_resched(cc); 1358 cond_resched();
1374 1359
1375 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1360 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1376 zone); 1361 zone);
@@ -1666,7 +1651,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
1666 * need to schedule. 1651 * need to schedule.
1667 */ 1652 */
1668 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))) 1653 if (!(low_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages)))
1669 compact_check_resched(cc); 1654 cond_resched();
1670 1655
1671 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn, 1656 page = pageblock_pfn_to_page(block_start_pfn, block_end_pfn,
1672 zone); 1657 zone);