aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c66
1 files changed, 40 insertions, 26 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index d0c7c994e11b..70c0f8cda33f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -89,7 +89,8 @@ static void __reset_isolation_suitable(struct zone *zone)
89 unsigned long end_pfn = zone_end_pfn(zone); 89 unsigned long end_pfn = zone_end_pfn(zone);
90 unsigned long pfn; 90 unsigned long pfn;
91 91
92 zone->compact_cached_migrate_pfn = start_pfn; 92 zone->compact_cached_migrate_pfn[0] = start_pfn;
93 zone->compact_cached_migrate_pfn[1] = start_pfn;
93 zone->compact_cached_free_pfn = end_pfn; 94 zone->compact_cached_free_pfn = end_pfn;
94 zone->compact_blockskip_flush = false; 95 zone->compact_blockskip_flush = false;
95 96
@@ -131,9 +132,10 @@ void reset_isolation_suitable(pg_data_t *pgdat)
131 */ 132 */
132static void update_pageblock_skip(struct compact_control *cc, 133static void update_pageblock_skip(struct compact_control *cc,
133 struct page *page, unsigned long nr_isolated, 134 struct page *page, unsigned long nr_isolated,
134 bool migrate_scanner) 135 bool set_unsuitable, bool migrate_scanner)
135{ 136{
136 struct zone *zone = cc->zone; 137 struct zone *zone = cc->zone;
138 unsigned long pfn;
137 139
138 if (cc->ignore_skip_hint) 140 if (cc->ignore_skip_hint)
139 return; 141 return;
@@ -141,20 +143,31 @@ static void update_pageblock_skip(struct compact_control *cc,
141 if (!page) 143 if (!page)
142 return; 144 return;
143 145
144 if (!nr_isolated) { 146 if (nr_isolated)
145 unsigned long pfn = page_to_pfn(page); 147 return;
148
149 /*
150 * Only skip pageblocks when all forms of compaction will be known to
151 * fail in the near future.
152 */
153 if (set_unsuitable)
146 set_pageblock_skip(page); 154 set_pageblock_skip(page);
147 155
148 /* Update where compaction should restart */ 156 pfn = page_to_pfn(page);
149 if (migrate_scanner) { 157
150 if (!cc->finished_update_migrate && 158 /* Update where async and sync compaction should restart */
151 pfn > zone->compact_cached_migrate_pfn) 159 if (migrate_scanner) {
152 zone->compact_cached_migrate_pfn = pfn; 160 if (cc->finished_update_migrate)
153 } else { 161 return;
154 if (!cc->finished_update_free && 162 if (pfn > zone->compact_cached_migrate_pfn[0])
155 pfn < zone->compact_cached_free_pfn) 163 zone->compact_cached_migrate_pfn[0] = pfn;
156 zone->compact_cached_free_pfn = pfn; 164 if (cc->sync && pfn > zone->compact_cached_migrate_pfn[1])
157 } 165 zone->compact_cached_migrate_pfn[1] = pfn;
166 } else {
167 if (cc->finished_update_free)
168 return;
169 if (pfn < zone->compact_cached_free_pfn)
170 zone->compact_cached_free_pfn = pfn;
158 } 171 }
159} 172}
160#else 173#else
@@ -166,7 +179,7 @@ static inline bool isolation_suitable(struct compact_control *cc,
166 179
167static void update_pageblock_skip(struct compact_control *cc, 180static void update_pageblock_skip(struct compact_control *cc,
168 struct page *page, unsigned long nr_isolated, 181 struct page *page, unsigned long nr_isolated,
169 bool migrate_scanner) 182 bool set_unsuitable, bool migrate_scanner)
170{ 183{
171} 184}
172#endif /* CONFIG_COMPACTION */ 185#endif /* CONFIG_COMPACTION */
@@ -323,7 +336,8 @@ isolate_fail:
323 336
324 /* Update the pageblock-skip if the whole pageblock was scanned */ 337 /* Update the pageblock-skip if the whole pageblock was scanned */
325 if (blockpfn == end_pfn) 338 if (blockpfn == end_pfn)
326 update_pageblock_skip(cc, valid_page, total_isolated, false); 339 update_pageblock_skip(cc, valid_page, total_isolated, true,
340 false);
327 341
328 count_compact_events(COMPACTFREE_SCANNED, nr_scanned); 342 count_compact_events(COMPACTFREE_SCANNED, nr_scanned);
329 if (total_isolated) 343 if (total_isolated)
@@ -458,7 +472,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
458 unsigned long flags; 472 unsigned long flags;
459 bool locked = false; 473 bool locked = false;
460 struct page *page = NULL, *valid_page = NULL; 474 struct page *page = NULL, *valid_page = NULL;
461 bool skipped_async_unsuitable = false; 475 bool set_unsuitable = true;
462 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) | 476 const isolate_mode_t mode = (!cc->sync ? ISOLATE_ASYNC_MIGRATE : 0) |
463 (unevictable ? ISOLATE_UNEVICTABLE : 0); 477 (unevictable ? ISOLATE_UNEVICTABLE : 0);
464 478
@@ -535,8 +549,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
535 */ 549 */
536 mt = get_pageblock_migratetype(page); 550 mt = get_pageblock_migratetype(page);
537 if (!cc->sync && !migrate_async_suitable(mt)) { 551 if (!cc->sync && !migrate_async_suitable(mt)) {
538 cc->finished_update_migrate = true; 552 set_unsuitable = false;
539 skipped_async_unsuitable = true;
540 goto next_pageblock; 553 goto next_pageblock;
541 } 554 }
542 } 555 }
@@ -640,11 +653,10 @@ next_pageblock:
640 /* 653 /*
641 * Update the pageblock-skip information and cached scanner pfn, 654 * Update the pageblock-skip information and cached scanner pfn,
642 * if the whole pageblock was scanned without isolating any page. 655 * if the whole pageblock was scanned without isolating any page.
643 * This is not done when pageblock was skipped due to being unsuitable
644 * for async compaction, so that eventual sync compaction can try.
645 */ 656 */
646 if (low_pfn == end_pfn && !skipped_async_unsuitable) 657 if (low_pfn == end_pfn)
647 update_pageblock_skip(cc, valid_page, nr_isolated, true); 658 update_pageblock_skip(cc, valid_page, nr_isolated,
659 set_unsuitable, true);
648 660
649 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 661 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
650 662
@@ -868,7 +880,8 @@ static int compact_finished(struct zone *zone,
868 /* Compaction run completes if the migrate and free scanner meet */ 880 /* Compaction run completes if the migrate and free scanner meet */
869 if (cc->free_pfn <= cc->migrate_pfn) { 881 if (cc->free_pfn <= cc->migrate_pfn) {
870 /* Let the next compaction start anew. */ 882 /* Let the next compaction start anew. */
871 zone->compact_cached_migrate_pfn = zone->zone_start_pfn; 883 zone->compact_cached_migrate_pfn[0] = zone->zone_start_pfn;
884 zone->compact_cached_migrate_pfn[1] = zone->zone_start_pfn;
872 zone->compact_cached_free_pfn = zone_end_pfn(zone); 885 zone->compact_cached_free_pfn = zone_end_pfn(zone);
873 886
874 /* 887 /*
@@ -993,7 +1006,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
993 * information on where the scanners should start but check that it 1006 * information on where the scanners should start but check that it
994 * is initialised by ensuring the values are within zone boundaries. 1007 * is initialised by ensuring the values are within zone boundaries.
995 */ 1008 */
996 cc->migrate_pfn = zone->compact_cached_migrate_pfn; 1009 cc->migrate_pfn = zone->compact_cached_migrate_pfn[cc->sync];
997 cc->free_pfn = zone->compact_cached_free_pfn; 1010 cc->free_pfn = zone->compact_cached_free_pfn;
998 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) { 1011 if (cc->free_pfn < start_pfn || cc->free_pfn > end_pfn) {
999 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1); 1012 cc->free_pfn = end_pfn & ~(pageblock_nr_pages-1);
@@ -1001,7 +1014,8 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1001 } 1014 }
1002 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) { 1015 if (cc->migrate_pfn < start_pfn || cc->migrate_pfn > end_pfn) {
1003 cc->migrate_pfn = start_pfn; 1016 cc->migrate_pfn = start_pfn;
1004 zone->compact_cached_migrate_pfn = cc->migrate_pfn; 1017 zone->compact_cached_migrate_pfn[0] = cc->migrate_pfn;
1018 zone->compact_cached_migrate_pfn[1] = cc->migrate_pfn;
1005 } 1019 }
1006 1020
1007 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); 1021 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);