summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2019-03-05 18:45:31 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2019-03-06 00:07:17 -0500
commitdbe2d4e4f12e07c6a2215e3603a5f77056323081 (patch)
tree03ac02c91b3900b20c999bbc48c064f5c584afd6
parentd097a6f63522547dfc7c75c7084a05b6a7f9e838 (diff)
mm, compaction: round-robin the order while searching the free lists for a target
As compaction proceeds and creates high-order blocks, the free list search gets less efficient as the larger blocks are used as compaction targets. Eventually, the larger blocks will be behind the migration scanner for partially migrated pageblocks and the search fails. This patch round-robins what orders are searched so that larger blocks can be ignored and find smaller blocks that can be used as migration targets. The overall impact was small on 1-socket but it avoids corner cases where the migration/free scanners meet prematurely or situations where many of the pageblocks encountered by the free scanner are almost full instead of being properly packed. Previous testing had indicated that without this patch there were occasional large spikes in the free scanner without this patch. [dan.carpenter@oracle.com: fix static checker warning] Link: http://lkml.kernel.org/r/20190118175136.31341-20-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Andrea Arcangeli <aarcange@redhat.com> Cc: David Rientjes <rientjes@google.com> Cc: YueHaibing <yuehaibing@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/compaction.c33
-rw-r--r--mm/internal.h3
2 files changed, 32 insertions, 4 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 452beef0541e..b3055983a80f 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1147,6 +1147,24 @@ fast_isolate_around(struct compact_control *cc, unsigned long pfn, unsigned long
1147 set_pageblock_skip(page); 1147 set_pageblock_skip(page);
1148} 1148}
1149 1149
1150/* Search orders in round-robin fashion */
1151static int next_search_order(struct compact_control *cc, int order)
1152{
1153 order--;
1154 if (order < 0)
1155 order = cc->order - 1;
1156
1157 /* Search wrapped around? */
1158 if (order == cc->search_order) {
1159 cc->search_order--;
1160 if (cc->search_order < 0)
1161 cc->search_order = cc->order - 1;
1162 return -1;
1163 }
1164
1165 return order;
1166}
1167
1150static unsigned long 1168static unsigned long
1151fast_isolate_freepages(struct compact_control *cc) 1169fast_isolate_freepages(struct compact_control *cc)
1152{ 1170{
@@ -1183,9 +1201,15 @@ fast_isolate_freepages(struct compact_control *cc)
1183 if (WARN_ON_ONCE(min_pfn > low_pfn)) 1201 if (WARN_ON_ONCE(min_pfn > low_pfn))
1184 low_pfn = min_pfn; 1202 low_pfn = min_pfn;
1185 1203
1186 for (order = cc->order - 1; 1204 /*
1187 order >= 0 && !page; 1205 * Search starts from the last successful isolation order or the next
1188 order--) { 1206 * order to search after a previous failure
1207 */
1208 cc->search_order = min_t(unsigned int, cc->order - 1, cc->search_order);
1209
1210 for (order = cc->search_order;
1211 !page && order >= 0;
1212 order = next_search_order(cc, order)) {
1189 struct free_area *area = &cc->zone->free_area[order]; 1213 struct free_area *area = &cc->zone->free_area[order];
1190 struct list_head *freelist; 1214 struct list_head *freelist;
1191 struct page *freepage; 1215 struct page *freepage;
@@ -1209,6 +1233,7 @@ fast_isolate_freepages(struct compact_control *cc)
1209 1233
1210 if (pfn >= low_pfn) { 1234 if (pfn >= low_pfn) {
1211 cc->fast_search_fail = 0; 1235 cc->fast_search_fail = 0;
1236 cc->search_order = order;
1212 page = freepage; 1237 page = freepage;
1213 break; 1238 break;
1214 } 1239 }
@@ -2138,6 +2163,7 @@ static enum compact_result compact_zone_order(struct zone *zone, int order,
2138 .total_migrate_scanned = 0, 2163 .total_migrate_scanned = 0,
2139 .total_free_scanned = 0, 2164 .total_free_scanned = 0,
2140 .order = order, 2165 .order = order,
2166 .search_order = order,
2141 .gfp_mask = gfp_mask, 2167 .gfp_mask = gfp_mask,
2142 .zone = zone, 2168 .zone = zone,
2143 .mode = (prio == COMPACT_PRIO_ASYNC) ? 2169 .mode = (prio == COMPACT_PRIO_ASYNC) ?
@@ -2369,6 +2395,7 @@ static void kcompactd_do_work(pg_data_t *pgdat)
2369 struct zone *zone; 2395 struct zone *zone;
2370 struct compact_control cc = { 2396 struct compact_control cc = {
2371 .order = pgdat->kcompactd_max_order, 2397 .order = pgdat->kcompactd_max_order,
2398 .search_order = pgdat->kcompactd_max_order,
2372 .total_migrate_scanned = 0, 2399 .total_migrate_scanned = 0,
2373 .total_free_scanned = 0, 2400 .total_free_scanned = 0,
2374 .classzone_idx = pgdat->kcompactd_classzone_idx, 2401 .classzone_idx = pgdat->kcompactd_classzone_idx,
diff --git a/mm/internal.h b/mm/internal.h
index d5b999e5eb5f..31bb0be6fd52 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -192,7 +192,8 @@ struct compact_control {
192 struct zone *zone; 192 struct zone *zone;
193 unsigned long total_migrate_scanned; 193 unsigned long total_migrate_scanned;
194 unsigned long total_free_scanned; 194 unsigned long total_free_scanned;
195 unsigned int fast_search_fail; /* failures to use free list searches */ 195 unsigned short fast_search_fail;/* failures to use free list searches */
196 short search_order; /* order to start a fast search at */
196 const gfp_t gfp_mask; /* gfp mask of a direct compactor */ 197 const gfp_t gfp_mask; /* gfp mask of a direct compactor */
197 int order; /* order a direct compactor needs */ 198 int order; /* order a direct compactor needs */
198 int migratetype; /* migratetype of direct compactor */ 199 int migratetype; /* migratetype of direct compactor */