diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 61 |
1 files changed, 43 insertions, 18 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index f58bcd016f43..3a91a2ea3d34 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -459,6 +459,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
459 | unsigned long flags; | 459 | unsigned long flags; |
460 | bool locked = false; | 460 | bool locked = false; |
461 | struct page *page = NULL, *valid_page = NULL; | 461 | struct page *page = NULL, *valid_page = NULL; |
462 | bool skipped_async_unsuitable = false; | ||
462 | 463 | ||
463 | /* | 464 | /* |
464 | * Ensure that there are not too many pages isolated from the LRU | 465 | * Ensure that there are not too many pages isolated from the LRU |
@@ -534,6 +535,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc, | |||
534 | if (!cc->sync && last_pageblock_nr != pageblock_nr && | 535 | if (!cc->sync && last_pageblock_nr != pageblock_nr && |
535 | !migrate_async_suitable(get_pageblock_migratetype(page))) { | 536 | !migrate_async_suitable(get_pageblock_migratetype(page))) { |
536 | cc->finished_update_migrate = true; | 537 | cc->finished_update_migrate = true; |
538 | skipped_async_unsuitable = true; | ||
537 | goto next_pageblock; | 539 | goto next_pageblock; |
538 | } | 540 | } |
539 | 541 | ||
@@ -627,8 +629,13 @@ next_pageblock: | |||
627 | if (locked) | 629 | if (locked) |
628 | spin_unlock_irqrestore(&zone->lru_lock, flags); | 630 | spin_unlock_irqrestore(&zone->lru_lock, flags); |
629 | 631 | ||
630 | /* Update the pageblock-skip if the whole pageblock was scanned */ | 632 | /* |
631 | if (low_pfn == end_pfn) | 633 | * Update the pageblock-skip information and cached scanner pfn, |
634 | * if the whole pageblock was scanned without isolating any page. | ||
635 | * This is not done when pageblock was skipped due to being unsuitable | ||
636 | * for async compaction, so that eventual sync compaction can try. | ||
637 | */ | ||
638 | if (low_pfn == end_pfn && !skipped_async_unsuitable) | ||
632 | update_pageblock_skip(cc, valid_page, nr_isolated, true); | 639 | update_pageblock_skip(cc, valid_page, nr_isolated, true); |
633 | 640 | ||
634 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); | 641 | trace_mm_compaction_isolate_migratepages(nr_scanned, nr_isolated); |
@@ -660,7 +667,7 @@ static void isolate_freepages(struct zone *zone, | |||
660 | * is the end of the pageblock the migration scanner is using. | 667 | * is the end of the pageblock the migration scanner is using. |
661 | */ | 668 | */ |
662 | pfn = cc->free_pfn; | 669 | pfn = cc->free_pfn; |
663 | low_pfn = cc->migrate_pfn + pageblock_nr_pages; | 670 | low_pfn = ALIGN(cc->migrate_pfn + 1, pageblock_nr_pages); |
664 | 671 | ||
665 | /* | 672 | /* |
666 | * Take care that if the migration scanner is at the end of the zone | 673 | * Take care that if the migration scanner is at the end of the zone |
@@ -676,7 +683,7 @@ static void isolate_freepages(struct zone *zone, | |||
676 | * pages on cc->migratepages. We stop searching if the migrate | 683 | * pages on cc->migratepages. We stop searching if the migrate |
677 | * and free page scanners meet or enough free pages are isolated. | 684 | * and free page scanners meet or enough free pages are isolated. |
678 | */ | 685 | */ |
679 | for (; pfn > low_pfn && cc->nr_migratepages > nr_freepages; | 686 | for (; pfn >= low_pfn && cc->nr_migratepages > nr_freepages; |
680 | pfn -= pageblock_nr_pages) { | 687 | pfn -= pageblock_nr_pages) { |
681 | unsigned long isolated; | 688 | unsigned long isolated; |
682 | 689 | ||
@@ -738,7 +745,14 @@ static void isolate_freepages(struct zone *zone, | |||
738 | /* split_free_page does not map the pages */ | 745 | /* split_free_page does not map the pages */ |
739 | map_pages(freelist); | 746 | map_pages(freelist); |
740 | 747 | ||
741 | cc->free_pfn = high_pfn; | 748 | /* |
749 | * If we crossed the migrate scanner, we want to keep it that way | ||
750 | * so that compact_finished() may detect this | ||
751 | */ | ||
752 | if (pfn < low_pfn) | ||
753 | cc->free_pfn = max(pfn, zone->zone_start_pfn); | ||
754 | else | ||
755 | cc->free_pfn = high_pfn; | ||
742 | cc->nr_freepages = nr_freepages; | 756 | cc->nr_freepages = nr_freepages; |
743 | } | 757 | } |
744 | 758 | ||
@@ -837,6 +851,10 @@ static int compact_finished(struct zone *zone, | |||
837 | 851 | ||
838 | /* Compaction run completes if the migrate and free scanner meet */ | 852 | /* Compaction run completes if the migrate and free scanner meet */ |
839 | if (cc->free_pfn <= cc->migrate_pfn) { | 853 | if (cc->free_pfn <= cc->migrate_pfn) { |
854 | /* Let the next compaction start anew. */ | ||
855 | zone->compact_cached_migrate_pfn = zone->zone_start_pfn; | ||
856 | zone->compact_cached_free_pfn = zone_end_pfn(zone); | ||
857 | |||
840 | /* | 858 | /* |
841 | * Mark that the PG_migrate_skip information should be cleared | 859 | * Mark that the PG_migrate_skip information should be cleared |
842 | * by kswapd when it goes to sleep. kswapd does not set the | 860 | * by kswapd when it goes to sleep. kswapd does not set the |
@@ -947,6 +965,14 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
947 | } | 965 | } |
948 | 966 | ||
949 | /* | 967 | /* |
968 | * Clear pageblock skip if there were failures recently and compaction | ||
969 | * is about to be retried after being deferred. kswapd does not do | ||
970 | * this reset as it'll reset the cached information when going to sleep. | ||
971 | */ | ||
972 | if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) | ||
973 | __reset_isolation_suitable(zone); | ||
974 | |||
975 | /* | ||
950 | * Setup to move all movable pages to the end of the zone. Used cached | 976 | * Setup to move all movable pages to the end of the zone. Used cached |
951 | * information on where the scanners should start but check that it | 977 | * information on where the scanners should start but check that it |
952 | * is initialised by ensuring the values are within zone boundaries. | 978 | * is initialised by ensuring the values are within zone boundaries. |
@@ -962,13 +988,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
962 | zone->compact_cached_migrate_pfn = cc->migrate_pfn; | 988 | zone->compact_cached_migrate_pfn = cc->migrate_pfn; |
963 | } | 989 | } |
964 | 990 | ||
965 | /* | 991 | trace_mm_compaction_begin(start_pfn, cc->migrate_pfn, cc->free_pfn, end_pfn); |
966 | * Clear pageblock skip if there were failures recently and compaction | ||
967 | * is about to be retried after being deferred. kswapd does not do | ||
968 | * this reset as it'll reset the cached information when going to sleep. | ||
969 | */ | ||
970 | if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) | ||
971 | __reset_isolation_suitable(zone); | ||
972 | 992 | ||
973 | migrate_prep_local(); | 993 | migrate_prep_local(); |
974 | 994 | ||
@@ -1003,7 +1023,11 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
1003 | if (err) { | 1023 | if (err) { |
1004 | putback_movable_pages(&cc->migratepages); | 1024 | putback_movable_pages(&cc->migratepages); |
1005 | cc->nr_migratepages = 0; | 1025 | cc->nr_migratepages = 0; |
1006 | if (err == -ENOMEM) { | 1026 | /* |
1027 | * migrate_pages() may return -ENOMEM when scanners meet | ||
1028 | * and we want compact_finished() to detect it | ||
1029 | */ | ||
1030 | if (err == -ENOMEM && cc->free_pfn > cc->migrate_pfn) { | ||
1007 | ret = COMPACT_PARTIAL; | 1031 | ret = COMPACT_PARTIAL; |
1008 | goto out; | 1032 | goto out; |
1009 | } | 1033 | } |
@@ -1015,6 +1039,8 @@ out: | |||
1015 | cc->nr_freepages -= release_freepages(&cc->freepages); | 1039 | cc->nr_freepages -= release_freepages(&cc->freepages); |
1016 | VM_BUG_ON(cc->nr_freepages != 0); | 1040 | VM_BUG_ON(cc->nr_freepages != 0); |
1017 | 1041 | ||
1042 | trace_mm_compaction_end(ret); | ||
1043 | |||
1018 | return ret; | 1044 | return ret; |
1019 | } | 1045 | } |
1020 | 1046 | ||
@@ -1120,12 +1146,11 @@ static void __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc) | |||
1120 | compact_zone(zone, cc); | 1146 | compact_zone(zone, cc); |
1121 | 1147 | ||
1122 | if (cc->order > 0) { | 1148 | if (cc->order > 0) { |
1123 | int ok = zone_watermark_ok(zone, cc->order, | 1149 | if (zone_watermark_ok(zone, cc->order, |
1124 | low_wmark_pages(zone), 0, 0); | 1150 | low_wmark_pages(zone), 0, 0)) |
1125 | if (ok && cc->order >= zone->compact_order_failed) | 1151 | compaction_defer_reset(zone, cc->order, false); |
1126 | zone->compact_order_failed = cc->order + 1; | ||
1127 | /* Currently async compaction is never deferred. */ | 1152 | /* Currently async compaction is never deferred. */ |
1128 | else if (!ok && cc->sync) | 1153 | else if (cc->sync) |
1129 | defer_compaction(zone, cc->order); | 1154 | defer_compaction(zone, cc->order); |
1130 | } | 1155 | } |
1131 | 1156 | ||