aboutsummaryrefslogtreecommitdiffstats
path: root/mm/compaction.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/compaction.c')
-rw-r--r--mm/compaction.c88
1 files changed, 61 insertions, 27 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index f58bcd016f43..918577595ea8 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -251,7 +251,6 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
251{ 251{
252 int nr_scanned = 0, total_isolated = 0; 252 int nr_scanned = 0, total_isolated = 0;
253 struct page *cursor, *valid_page = NULL; 253 struct page *cursor, *valid_page = NULL;
254 unsigned long nr_strict_required = end_pfn - blockpfn;
255 unsigned long flags; 254 unsigned long flags;
256 bool locked = false; 255 bool locked = false;
257 256
@@ -264,11 +263,12 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
264 263
265 nr_scanned++; 264 nr_scanned++;
266 if (!pfn_valid_within(blockpfn)) 265 if (!pfn_valid_within(blockpfn))
267 continue; 266 goto isolate_fail;
267
268 if (!valid_page) 268 if (!valid_page)
269 valid_page = page; 269 valid_page = page;
270 if (!PageBuddy(page)) 270 if (!PageBuddy(page))
271 continue; 271 goto isolate_fail;
272 272
273 /* 273 /*
274 * The zone lock must be held to isolate freepages. 274 * The zone lock must be held to isolate freepages.
@@ -289,12 +289,10 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
289 289
290 /* Recheck this is a buddy page under lock */ 290 /* Recheck this is a buddy page under lock */
291 if (!PageBuddy(page)) 291 if (!PageBuddy(page))
292 continue; 292 goto isolate_fail;
293 293
294 /* Found a free page, break it into order-0 pages */ 294 /* Found a free page, break it into order-0 pages */
295 isolated = split_free_page(page); 295 isolated = split_free_page(page);
296 if (!isolated && strict)
297 break;
298 total_isolated += isolated; 296 total_isolated += isolated;
299 for (i = 0; i < isolated; i++) { 297 for (i = 0; i < isolated; i++) {
300 list_add(&page->lru, freelist); 298 list_add(&page->lru, freelist);
@@ -305,7 +303,15 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
305 if (isolated) { 303 if (isolated) {
306 blockpfn += isolated - 1; 304 blockpfn += isolated - 1;
307 cursor += isolated - 1; 305 cursor += isolated - 1;
306 continue;
308 } 307 }
308
309isolate_fail:
310 if (strict)
311 break;
312 else
313 continue;
314
309 } 315 }
310 316
311 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated); 317 trace_mm_compaction_isolate_freepages(nr_scanned, total_isolated);
@@ -315,7 +321,7 @@ static unsigned long isolate_freepages_block(struct compact_control *cc,
315 * pages requested were isolated. If there were any failures, 0 is 321 * pages requested were isolated. If there were any failures, 0 is
316 * returned and CMA will fail. 322 * returned and CMA will fail.
317 */ 323 */
318 if (strict && nr_strict_required > total_isolated) 324 if (strict && blockpfn < end_pfn)
319 total_isolated = 0; 325 total_isolated = 0;
320 326
321 if (locked) 327 if (locked)
@@ -459,6 +465,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
459 unsigned long flags; 465 unsigned long flags;
460 bool locked = false; 466 bool locked = false;
461 struct page *page = NULL, *valid_page = NULL; 467 struct page *page = NULL, *valid_page = NULL;
468 bool skipped_async_unsuitable = false;
462 469
463 /* 470 /*
464 * Ensure that there are not too many pages isolated from the LRU 471 * Ensure that there are not too many pages isolated from the LRU
@@ -522,7 +529,10 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
522 if (!isolation_suitable(cc, page)) 529 if (!isolation_suitable(cc, page))
523 goto next_pageblock; 530 goto next_pageblock;
524 531
525 /* Skip if free */ 532 /*
533 * Skip if free. page_order cannot be used without zone->lock
534 * as nothing prevents parallel allocations or buddy merging.
535 */
526 if (PageBuddy(page)) 536 if (PageBuddy(page))
527 continue; 537 continue;
528 538
@@ -534,6 +544,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
534 if (!cc->sync && last_pageblock_nr != pageblock_nr && 544 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
535 !migrate_async_suitable(get_pageblock_migratetype(page))) { 545 !migrate_async_suitable(get_pageblock_migratetype(page))) {
536 cc->finished_update_migrate = true; 546 cc->finished_update_migrate = true;
547 skipped_async_unsuitable = true;
537 goto next_pageblock; 548 goto next_pageblock;
538 } 549 }
539 550
@@ -599,7 +610,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
599 if (__isolate_lru_page(page, mode) != 0) 610 if (__isolate_lru_page(page, mode) != 0)
600 continue; 611 continue;
601 612
602 VM_BUG_ON(PageTransCompound(page)); 613 VM_BUG_ON_PAGE(PageTransCompound(page), page);
603 614
604 /* Successfully isolated */ 615 /* Successfully isolated */
605 cc->finished_update_migrate = true; 616 cc->finished_update_migrate = true;
@@ -627,8 +638,13 @@ next_pageblock:
627 if (locked) 638 if (locked)
628 spin_unlock_irqrestore(&zone->lru_lock, flags); 639 spin_unlock_irqrestore(&zone->lru_lock, flags);
629 640
630 /* Update the pageblock-skip if the whole pageblock was scanned */ 641 /*
631 if (low_pfn == end_pfn) 642 * Update the pageblock-skip information and cached scanner pfn,
643 * if the whole pageblock was scanned without isolating any page.
644 * This is not done when pageblock was skipped due to being unsuitable
645 * for async compaction, so that eventual sync compaction can try.
646 */
647 if (low_pfn == end_pfn && !skipped_async_unsuitable)
632 update_pageblock_skip(cc, valid_page, nr_isolated, true); 648 update_pageblock_skip(cc, valid_page, nr_isolated, true);
633 649
634 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); 650 trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated);
@@ -660,7 +676,7 @@ static void isolate_freepages(struct zone *zone,
660 * is the end of the pageblock the migration scanner is using. 676 * is the end of the pageblock the migration scanner is using.
661 */ 677 */
662 pfn = cc->free_pfn; 678 pfn = cc->free_pfn;
663 low_pfn = cc->migrate_pfn + pageblock_nr_pages; 679 low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages);
664 680
665 /* 681 /*
666 * Take care that if the migration scanner is at the end of the zone 682 * Take care that if the migration scanner is at the end of the zone
@@ -676,7 +692,7 @@ static void isolate_freepages(struct zone *zone,
676 * pages on cc->migratepages. We stop searching if the migrate 692 * pages on cc->migratepages. We stop searching if the migrate
677 * and free page scanners meet or enough free pages are isolated. 693 * and free page scanners meet or enough free pages are isolated.
678 */ 694 */
679 for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; 695 for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages;
680 pfn -= pageblock_nr_pages) { 696 pfn -= pageblock_nr_pages) {
681 unsigned long isolated; 697 unsigned long isolated;
682 698
@@ -738,7 +754,14 @@ static void isolate_freepages(struct zone *zone,
738 /* split_free_page does not map the pages */ 754 /* split_free_page does not map the pages */
739 map_pages(freelist); 755 map_pages(freelist);
740 756
741 cc->free_pfn = high_pfn; 757 /*
758 * If we crossed the migrate scanner, we want to keep it that way
759 * so that compact_finished() may detect this
760 */
761 if (pfn < low_pfn)
762 cc->free_pfn = max(pfn, zone->zone_start_pfn);
763 else
764 cc->free_pfn = high_pfn;
742 cc->nr_freepages = nr_freepages; 765 cc->nr_freepages = nr_freepages;
743} 766}
744 767
@@ -837,6 +860,10 @@ static int compact_finished(struct zone *zone,
837 860
838 /* Compaction run completes if the migrate and free scanner meet */ 861 /* Compaction run completes if the migrate and free scanner meet */
839 if (cc->free_pfn <= cc->migrate_pfn) { 862 if (cc->free_pfn <= cc->migrate_pfn) {
863 /* Let the next compaction start anew. */
864 zone->compact_cached_migrate_pfn = zone->zone_start_pfn;
865 zone->compact_cached_free_pfn = zone_end_pfn(zone);
866
840 /* 867 /*
841 * Mark that the PG_migrate_skip information should be cleared 868 * Mark that the PG_migrate_skip information should be cleared
842 * by kswapd when it goes to sleep. kswapd does not set the 869 * by kswapd when it goes to sleep. kswapd does not set the
@@ -947,6 +974,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
947 } 974 }
948 975
949 /* 976 /*
977 * Clear pageblock skip if there were failures recently and compaction
978 * is about to be retried after being deferred. kswapd does not do
979 * this reset as it'll reset the cached information when going to sleep.
980 */
981 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
982 __reset_isolation_suitable(zone);
983
984 /*
950 * Setup to move all movable pages to the end of the zone. Used cached 985 * Setup to move all movable pages to the end of the zone. Used cached
951 * information on where the scanners should start but check that it 986 * information on where the scanners should start but check that it
952 * is initialised by ensuring the values are within zone boundaries. 987 * is initialised by ensuring the values are within zone boundaries.
@@ -962,13 +997,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
962 zone->compact_cached_migrate_pfn = cc->migrate_pfn; 997 zone->compact_cached_migrate_pfn = cc->migrate_pfn;
963 } 998 }
964 999
965 /* 1000 trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn);
966 * Clear pageblock skip if there were failures recently and compaction
967 * is about to be retried after being deferred. kswapd does not do
968 * this reset as it'll reset the cached information when going to sleep.
969 */
970 if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
971 __reset_isolation_suitable(zone);
972 1001
973 migrate_prep_local(); 1002 migrate_prep_local();
974 1003
@@ -1003,7 +1032,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
1003 if (err) { 1032 if (err) {
1004 putback_movable_pages(&cc->migratepages); 1033 putback_movable_pages(&cc->migratepages);
1005 cc->nr_migratepages = 0; 1034 cc->nr_migratepages = 0;
1006 if (err == -ENOMEM) { 1035 /*
1036 * migrate_pages() may return -ENOMEM when scanners meet
1037 * and we want compact_finished() to detect it
1038 */
1039 if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) {
1007 ret = COMPACT_PARTIAL; 1040 ret = COMPACT_PARTIAL;
1008 goto out; 1041 goto out;
1009 } 1042 }
@@ -1015,6 +1048,8 @@ out:
1015 cc->nr_freepages -= release_freepages(&cc->freepages); 1048 cc->nr_freepages -= release_freepages(&cc->freepages);
1016 VM_BUG_ON(cc->nr_freepages != 0); 1049 VM_BUG_ON(cc->nr_freepages != 0);
1017 1050
1051 trace_mm_compaction_end(ret);
1052
1018 return ret; 1053 return ret;
1019} 1054}
1020 1055
@@ -1120,12 +1155,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
1120 compact_zone(zone, cc); 1155 compact_zone(zone, cc);
1121 1156
1122 if (cc->order > 0) { 1157 if (cc->order > 0) {
1123 int ok = zone_watermark_ok(zone, cc->order, 1158 if (zone_watermark_ok(zone, cc->order,
1124 low_wmark_pages(zone), 0, 0); 1159 low_wmark_pages(zone), 0, 0))
1125 if (ok && cc->order >= zone->compact_order_failed) 1160 compaction_defer_reset(zone, cc->order, false);
1126 zone->compact_order_failed = cc->order + 1;
1127 /* Currently async compaction is never deferred. */ 1161 /* Currently async compaction is never deferred. */
1128 else if (!ok && cc->sync) 1162 else if (cc->sync)
1129 defer_compaction(zone, cc->order); 1163 defer_compaction(zone, cc->order);
1130 } 1164 }
1131 1165