diff options
Diffstat (limited to 'mm/compaction.c')
-rw-r--r-- | mm/compaction.c | 50 |
1 files changed, 34 insertions, 16 deletions
diff --git a/mm/compaction.c b/mm/compaction.c index f94cbc0b99a5..d8187f9cabbf 100644 --- a/mm/compaction.c +++ b/mm/compaction.c | |||
@@ -66,24 +66,15 @@ static inline bool isolation_suitable(struct compact_control *cc, | |||
66 | * should be skipped for page isolation when the migrate and free page scanner | 66 | * should be skipped for page isolation when the migrate and free page scanner |
67 | * meet. | 67 | * meet. |
68 | */ | 68 | */ |
69 | static void reset_isolation_suitable(struct zone *zone) | 69 | static void __reset_isolation_suitable(struct zone *zone) |
70 | { | 70 | { |
71 | unsigned long start_pfn = zone->zone_start_pfn; | 71 | unsigned long start_pfn = zone->zone_start_pfn; |
72 | unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; | 72 | unsigned long end_pfn = zone->zone_start_pfn + zone->spanned_pages; |
73 | unsigned long pfn; | 73 | unsigned long pfn; |
74 | 74 | ||
75 | /* | ||
76 | * Do not reset more than once every five seconds. If allocations are | ||
77 | * failing sufficiently quickly to allow this to happen then continually | ||
78 | * scanning for compaction is not going to help. The choice of five | ||
79 | * seconds is arbitrary but will mitigate excessive scanning. | ||
80 | */ | ||
81 | if (time_before(jiffies, zone->compact_blockskip_expire)) | ||
82 | return; | ||
83 | |||
84 | zone->compact_cached_migrate_pfn = start_pfn; | 75 | zone->compact_cached_migrate_pfn = start_pfn; |
85 | zone->compact_cached_free_pfn = end_pfn; | 76 | zone->compact_cached_free_pfn = end_pfn; |
86 | zone->compact_blockskip_expire = jiffies + (HZ * 5); | 77 | zone->compact_blockskip_flush = false; |
87 | 78 | ||
88 | /* Walk the zone and mark every pageblock as suitable for isolation */ | 79 | /* Walk the zone and mark every pageblock as suitable for isolation */ |
89 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { | 80 | for (pfn = start_pfn; pfn < end_pfn; pfn += pageblock_nr_pages) { |
@@ -102,9 +93,24 @@ static void reset_isolation_suitable(struct zone *zone) | |||
102 | } | 93 | } |
103 | } | 94 | } |
104 | 95 | ||
96 | void reset_isolation_suitable(pg_data_t *pgdat) | ||
97 | { | ||
98 | int zoneid; | ||
99 | |||
100 | for (zoneid = 0; zoneid < MAX_NR_ZONES; zoneid++) { | ||
101 | struct zone *zone = &pgdat->node_zones[zoneid]; | ||
102 | if (!populated_zone(zone)) | ||
103 | continue; | ||
104 | |||
105 | /* Only flush if a full compaction finished recently */ | ||
106 | if (zone->compact_blockskip_flush) | ||
107 | __reset_isolation_suitable(zone); | ||
108 | } | ||
109 | } | ||
110 | |||
105 | /* | 111 | /* |
106 | * If no pages were isolated then mark this pageblock to be skipped in the | 112 | * If no pages were isolated then mark this pageblock to be skipped in the |
107 | * future. The information is later cleared by reset_isolation_suitable(). | 113 | * future. The information is later cleared by __reset_isolation_suitable(). |
108 | */ | 114 | */ |
109 | static void update_pageblock_skip(struct compact_control *cc, | 115 | static void update_pageblock_skip(struct compact_control *cc, |
110 | struct page *page, unsigned long nr_isolated, | 116 | struct page *page, unsigned long nr_isolated, |
@@ -820,7 +826,15 @@ static int compact_finished(struct zone *zone, | |||
820 | 826 | ||
821 | /* Compaction run completes if the migrate and free scanner meet */ | 827 | /* Compaction run completes if the migrate and free scanner meet */ |
822 | if (cc->free_pfn <= cc->migrate_pfn) { | 828 | if (cc->free_pfn <= cc->migrate_pfn) { |
823 | reset_isolation_suitable(cc->zone); | 829 | /* |
830 | * Mark that the PG_migrate_skip information should be cleared | ||
831 | * by kswapd when it goes to sleep. kswapd does not set the | ||
832 | * flag itself as the decision to be clear should be directly | ||
833 | * based on an allocation request. | ||
834 | */ | ||
835 | if (!current_is_kswapd()) | ||
836 | zone->compact_blockskip_flush = true; | ||
837 | |||
824 | return COMPACT_COMPLETE; | 838 | return COMPACT_COMPLETE; |
825 | } | 839 | } |
826 | 840 | ||
@@ -943,9 +957,13 @@ static int compact_zone(struct zone *zone, struct compact_control *cc) | |||
943 | zone->compact_cached_migrate_pfn = cc->migrate_pfn; | 957 | zone->compact_cached_migrate_pfn = cc->migrate_pfn; |
944 | } | 958 | } |
945 | 959 | ||
946 | /* Clear pageblock skip if there are numerous alloc failures */ | 960 | /* |
947 | if (zone->compact_defer_shift == COMPACT_MAX_DEFER_SHIFT) | 961 | * Clear pageblock skip if there were failures recently and compaction |
948 | reset_isolation_suitable(zone); | 962 | * is about to be retried after being deferred. kswapd does not do |
963 | * this reset as it'll reset the cached information when going to sleep. | ||
964 | */ | ||
965 | if (compaction_restarting(zone, cc->order) && !current_is_kswapd()) | ||
966 | __reset_isolation_suitable(zone); | ||
949 | 967 | ||
950 | migrate_prep_local(); | 968 | migrate_prep_local(); |
951 | 969 | ||