summaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
authorDavid Rientjes <rientjes@google.com>2014-06-04 19:08:27 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2014-06-04 19:54:06 -0400
commit35979ef3393110ff3c12c6b94552208d3bdf1a36 (patch)
treecb91854e96fce599e999a958c0a1d3036be3a7af /mm/compaction.c
parentd53aea3d46d64e95da9952887969f7533b9ab25e (diff)
mm, compaction: add per-zone migration pfn cache for async compaction
Each zone has a cached migration scanner pfn for memory compaction so that subsequent calls to memory compaction can start where the previous call left off. Currently, the compaction migration scanner only updates the per-zone cached pfn when pageblocks were not skipped for async compaction. This creates a dependency on calling sync compaction to avoid having subsequent calls to async compaction from scanning an enormous amount of non-MOVABLE pageblocks each time it is called. On large machines, this could be potentially very expensive. This patch adds a per-zone cached migration scanner pfn only for async compaction. It is updated everytime a pageblock has been scanned in its entirety and when no pages from it were successfully isolated. The cached migration scanner pfn for sync compaction is updated only when called for sync compaction. Signed-off-by: David Rientjes <rientjes@google.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Greg Thelen <gthelen@google.com> Cc: Mel Gorman <mgorman@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c66
1 files changed, 40 insertions, 26 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d0c7c994e11b..70c0f8cda33f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
89 unsigned long end_pfn = zone_end_pfn(zone); 89 unsigned long end_pfn = zone_end_pfn(zone);
90 unsigned long pfn; 90 unsigned long pfn;
91 91
92 zone->compact_cached_migrate_pfn = start_pfn; 92 zone->compact_cached_migrate_pfn[0] = start_pfn;
93 zone->compact_cached_migrate_pfn[1] = start_pfn;
93 zone->compact_cached_free_pfn = end_pfn; 94 zone->compact_cached_free_pfn = end_pfn;
94 zone->compact_blockskip_flush = false; 95 zone->compact_blockskip_flush = false;
95 96
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
131 */ 132 */
132static void update_pageblock_skip(struct compact_control *cc, 133static void update_pageblock_skip(struct compact_control *cc,
133 struct page *page, unsigned long nr_isolated, 134 struct page *page, unsigned long nr_isolated,
134 bool migrate_scanner) 135 bool set_unsuitable, bool migrate_scanner)
135{ 136{
136 struct zone *zone = cc->zone; 137 struct zone *zone = cc->zone;
138 unsigned long pfn;
137 139
138 if (cc->ignore_skip_hint) 140 if (cc->ignore_skip_hint)
139 return; 141 return;
@@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
141 if (!page) 143 if (!page)
142 return; 144 return;
143 145
144 if (!nr_isolated) { 146 if (nr_isolated)
145 unsigned long pfn = page_to_pfn(page); 147 return;
148
149 /*
150 * Only skip pageblocks when all forms of compaction will be known to
151 * fail in the near future.
152 */
153 if (set_unsuitable)
146 set_pageblock_skip(page); 154 set_pageblock_skip(page);
147 155
148 /* Update where compaction should restart */ 156 pfn = page_to_pfn(page);
149 if (migrate_scanner) { 157
150 if (!cc->finished_update_migrate && 158 /* Update where async and sync compaction should restart */
151 pfn > zone->compact_cached_migrate_pfn) 159 if (migrate_scanner) {
152 zone->compact_cached_migrate_pfn = pfn; 160 if (cc->finished_update_migrate)
153 } else { 161 return;
154 if (!cc->finished_update_free && 162 if (pfn > zone->compact_cached_migrate_pfn[0])
155 pfn < zone->compact_cached_free_pfn) 163 zone->compact_cached_migrate_pfn[0] = pfn;
156 zone->compact_cached_free_pfn = pfn; 164 if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
157 } 165 zone->compact_cached_migrate_pfn[1] = pfn;
166 } else {
167 if (cc->finished_update_free)
168 return;
169 if (pfn < zone->compact_cached_free_pfn)
170 zone->compact_cached_free_pfn = pfn;
158 } 171 }
159} 172}
160#else 173#else
@@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
166 179
167static void update_pageblock_skip(struct compact_control *cc, 180static void update_pageblock_skip(struct compact_control *cc,
168 struct page *page, unsigned long nr_isolated, 181 struct page *page, unsigned long nr_isolated,
169 bool migrate_scanner) 182 bool set_unsuitable, bool migrate_scanner)
170{ 183{
171} 184}
172#endif /* CONFIG_COMPACTION */ 185#endif /* CONFIG_COMPACTION */
@@ -323,7 +336,8 @@ isolate_fail:
323 336
324 /* Update the pageblock-skip if the whole pageblock was scanned */ 337 /* Update the pageblock-skip if the whole pageblock was scanned */
325 if (blockpfn == end_pfn) 338 if (blockpfn == end_pfn)
326 update_pageblock_skip(cc, valid_page, total_isolated, false); 339 update_pageblock_skip(cc, valid_page, total_isolated, true,
340 false);
327 341
328 count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 342 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
329 if (total_isolated) 343 if (total_isolated)
@@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
458 unsigned long flags; 472 unsigned long flags;
459 bool locked = false; 473 bool locked = false;
460 struct page *page = NULL, *valid_page = NULL; 474 struct page *page = NULL, *valid_page = NULL;
461 bool skipped_async_unsuitable = false; 475 bool set_unsuitable = true;
462 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | 476 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
463 (unevictable ? ISOLATE_UNEVICTABLE : 0); 477 (unevictable ? ISOLATE_UNEVICTABLE : 0);
464 478
@@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
535 */ 549 */
536 mt = get_pageblock_migratetype(page); 550 mt = get_pageblock_migratetype(page);
537 if (!cc->sync && !migrate_async_suitable(mt)) { 551 if (!cc->sync && !migrate_async_suitable(mt)) {
538 cc->finished_update_migrate = true; 552 set_unsuitable = false;
539 skipped_async_unsuitable = true;
540 goto next_pageblock; 553 goto next_pageblock;
541 } 554 }
542 } 555 }
@@ -640,11 +653,10 @@ next_pageblock:
640 /* 653 /*
641 * Update the pageblock-skip information and cached scanner pfn, 654 * Update the pageblock-skip information and cached scanner pfn,
642 * if the whole pageblock was scanned without isolating any page. 655 * if the whole pageblock was scanned without isolating any page.
643 * This is not done when pageblock was skipped due to being unsuitable
644 * for async compaction, so that eventual sync compaction can try.
645 */ 656 */
646 if (low_pfn == end_pfn && !skipped_async_unsuitable) 657 if (low_pfn == end_pfn)
647 update_pageblock_skip(cc, valid_page, nr_isolated, true); 658 update_pageblock_skip(cc, valid_page, nr_isolated,
659 set_unsuitable, true);
648 660
649 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 661 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
650 662
@@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone,
868 /* Compaction run completes if the migrate and free scanner meet */ 880 /* Compaction run completes if the migrate and free scanner meet */
869 if (cc->free_pfn <= cc->migrate_pfn) { 881 if (cc->free_pfn <= cc->migrate_pfn) {
870 /* Let the next compaction start anew. */ 882 /* Let the next compaction start anew. */
871 zone->compact_cached_migrate_pfn = zone->zone_start_pfn; 883 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
884 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
872 zone->compact_cached_free_pfn = zone_end_pfn(zone); 885 zone->compact_cached_free_pfn = zone_end_pfn(zone);
873 886
874 /* 887 /*
@@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
993 * information on where the scanners should start but check that it 1006 * information on where the scanners should start but check that it
994 * is initialised by ensuring the values are within zone boundaries. 1007 * is initialised by ensuring the values are within zone boundaries.
995 */ 1008 */
996 cc->migrate_pfn = zone->compact_cached_migrate_pfn; 1009 cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
997 cc->free_pfn = zone->compact_cached_free_pfn; 1010 cc->free_pfn = zone->compact_cached_free_pfn;
998 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1011 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
999 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1012 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
@@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1001 } 1014 }
1002 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1015 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1003 cc->migrate_pfn = start_pfn; 1016 cc->migrate_pfn = start_pfn;
1004 zone->compact_cached_migrate_pfn = cc->migrate_pfn; 1017 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1018 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1005 } 1019 }
1006 1020
1007 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 1021 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);