aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorVlastimil Babka <vbabka@suse.cz>2014-06-04 19:10:41 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:11 -0400
commitbe9765722e6b7ece8263cbab857490332339bd6f (patch)
treee22047a59a4d45a319e0788ee95d7e22806c55c2 /mm/compaction.c
parent6e6870d4fd19e25332e7d975604497c8568949d9 (diff)
mm, compaction: properly signal and act upon lock and need_sched() contention
Compaction uses compact_checklock_irqsave() function to periodically check for lock contention and need_resched() to either abort async compaction, or to free the lock, schedule and retake the lock. When aborting, cc->contended is set to signal the contended state to the caller. Two problems have been identified in this mechanism. First, compaction also calls directly cond_resched() in both scanners when no lock is yet taken. This call either does not abort async compaction, or set cc->contended appropriately. This patch introduces a new compact_should_abort() function to achieve both. In isolate_freepages(), the check frequency is reduced to once by SWAP_CLUSTER_MAX pageblocks to match what the migration scanner does in the preliminary page checks. In case a pageblock is found suitable for calling isolate_freepages_block(), the checks within there are done on higher frequency. Second, isolate_freepages() does not check if isolate_freepages_block() aborted due to contention, and advances to the next pageblock. This violates the principle of aborting on contention, and might result in pageblocks not being scanned completely, since the scanning cursor is advanced. This problem has been noticed in the code by Joonsoo Kim when reviewing related patches. This patch makes isolate_freepages_block() check the cc->contended flag and abort. In case isolate_freepages() has already isolated some pages before aborting due to contention, page migration will proceed, which is OK since we do not want to waste the work that has been done, and page migration has own checks for contention. However, we do not want another isolation attempt by either of the scanners, so cc->contended flag check is added also to compaction_alloc() and compact_finished() to make sure compaction is aborted right after the migration. The outcome of the patch should be reduced lock contention by async compaction and lower latencies for higher-order allocations where direct compaction is involved. [akpm@linux-foundation.org: fix typo in comment] Reported-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Signed-off-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Minchan Kim <minchan@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Cc: Bartlomiej Zolnierkiewicz <b.zolnierkie@samsung.com> Cc: Michal Nazarewicz <mina86@mina86.com> Cc: Christoph Lameter <cl@linux.com> Cc: Rik van Riel <riel@redhat.com> Acked-by: Michal Nazarewicz <mina86@mina86.com> Tested-by: Shawn Guo <shawn.guo@linaro.org> Tested-by: Kevin Hilman <khilman@linaro.org> Tested-by: Stephen Warren <swarren@nvidia.com> Tested-by: Fabio Estevam <fabio.estevam@freescale.com> Cc: David Rientjes <rientjes@google.com> Cc: Stephen Rothwell <sfr@canb.auug.org.au> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c54
1 files changed, 44 insertions, 10 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 58441220b953..21bf292b642a 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -222,6 +222,30 @@ static bool compact_checklock_irqsave(spinlock_t *lock, unsigned long *flags,
222 return true; 222 return true;
223} 223}
224 224
225/*
226 * Aside from avoiding lock contention, compaction also periodically checks
227 * need_resched() and either schedules in sync compaction or aborts async
228 * compaction. This is similar to what compact_checklock_irqsave() does, but
229 * is used where no lock is concerned.
230 *
231 * Returns false when no scheduling was needed, or sync compaction scheduled.
232 * Returns true when async compaction should abort.
233 */
234static inline bool compact_should_abort(struct compact_control *cc)
235{
236 /* async compaction aborts if contended */
237 if (need_resched()) {
238 if (cc->mode == MIGRATE_ASYNC) {
239 cc->contended = true;
240 return true;
241 }
242
243 cond_resched();
244 }
245
246 return false;
247}
248
225/* Returns true if the page is within a block suitable for migration to */ 249/* Returns true if the page is within a block suitable for migration to */
226static bool suitable_migration_target(struct page *page) 250static bool suitable_migration_target(struct page *page)
227{ 251{
@@ -494,11 +518,8 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
494 return 0; 518 return 0;
495 } 519 }
496 520
497 if (cond_resched()) { 521 if (compact_should_abort(cc))
498 /* Async terminates prematurely on need_resched() */ 522 return 0;
499 if (cc->mode == MIGRATE_ASYNC)
500 return 0;
501 }
502 523
503 /* Time to isolate some pages for migration */ 524 /* Time to isolate some pages for migration */
504 for (; low_pfn < end_pfn; low_pfn++) { 525 for (; low_pfn < end_pfn; low_pfn++) {
@@ -720,9 +741,11 @@ static void isolate_freepages(struct zone *zone,
720 /* 741 /*
721 * This can iterate a massively long zone without finding any 742 * This can iterate a massively long zone without finding any
722 * suitable migration targets, so periodically check if we need 743 * suitable migration targets, so periodically check if we need
723 * to schedule. 744 * to schedule, or even abort async compaction.
724 */ 745 */
725 cond_resched(); 746 if (!(block_start_pfn % (SWAP_CLUSTER_MAX * pageblock_nr_pages))
747 && compact_should_abort(cc))
748 break;
726 749
727 if (!pfn_valid(block_start_pfn)) 750 if (!pfn_valid(block_start_pfn))
728 continue; 751 continue;
@@ -760,6 +783,13 @@ static void isolate_freepages(struct zone *zone,
760 */ 783 */
761 if (isolated) 784 if (isolated)
762 cc->finished_update_free = true; 785 cc->finished_update_free = true;
786
787 /*
788 * isolate_freepages_block() might have aborted due to async
789 * compaction being contended
790 */
791 if (cc->contended)
792 break;
763 } 793 }
764 794
765 /* split_free_page does not map the pages */ 795 /* split_free_page does not map the pages */
@@ -786,9 +816,13 @@ static struct page *compaction_alloc(struct page *migratepage,
786 struct compact_control *cc = (struct compact_control *)data; 816 struct compact_control *cc = (struct compact_control *)data;
787 struct page *freepage; 817 struct page *freepage;
788 818
789 /* Isolate free pages if necessary */ 819 /*
820 * Isolate free pages if necessary, and if we are not aborting due to
821 * contention.
822 */
790 if (list_empty(&cc->freepages)) { 823 if (list_empty(&cc->freepages)) {
791 isolate_freepages(cc->zone, cc); 824 if (!cc->contended)
825 isolate_freepages(cc->zone, cc);
792 826
793 if (list_empty(&cc->freepages)) 827 if (list_empty(&cc->freepages))
794 return NULL; 828 return NULL;
@@ -858,7 +892,7 @@ static int compact_finished(struct zone *zone,
858 unsigned int order; 892 unsigned int order;
859 unsigned long watermark; 893 unsigned long watermark;
860 894
861 if (fatal_signal_pending(current)) 895 if (cc->contended || fatal_signal_pending(current))
862 return COMPACT_PARTIAL; 896 return COMPACT_PARTIAL;
863 897
864 /* Compaction run completes if the migrate and free scanner meet */ 898 /* Compaction run completes if the migrate and free scanner meet */