aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2015-04-14 18:45:21 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:01 -0400
commit2149cdaef6c0eb59a9edf3b152027392cd66b41f (patch)
treeed01f6ab07dd36968c9e6899d74d9b804ff0d8e8
parent4eb7dce62007113f1a2778213980fd6d8034ef5e (diff)
mm/compaction: enhance compaction finish condition
Compaction has anti fragmentation algorithm. It is that freepage should be more than pageblock order to finish the compaction if we don't find any freepage in requested migratetype buddy list. This is for mitigating fragmentation, but, there is a lack of migratetype consideration and it is too excessive compared to page allocator's anti fragmentation algorithm. Not considering migratetype would cause premature finish of compaction. For example, if allocation request is for unmovable migratetype, freepage with CMA migratetype doesn't help that allocation and compaction should not be stopped. But, current logic regards this situation as compaction is no longer needed, so finish the compaction. Secondly, condition is too excessive compared to page allocator's logic. We can steal freepage from other migratetype and change pageblock migratetype on more relaxed conditions in page allocator. This is designed to prevent fragmentation and we can use it here. Imposing hard constraint only to the compaction doesn't help much in this case since page allocator would cause fragmentation again. To solve these problems, this patch borrows anti fragmentation logic from page allocator. It will reduce premature compaction finish in some cases and reduce excessive compaction work. stress-highalloc test in mmtests with non movable order 7 allocation shows considerable increase of compaction success rate. Compaction success rate (Compaction success * 100 / Compaction stalls, %) 31.82 : 42.20 I tested it on non-reboot 5 runs stress-highalloc benchmark and found that there is no more degradation on allocation success rate than before. That roughly means that this patch doesn't result in more fragmentations. Vlastimil suggests additional idea that we only test for fallbacks when migration scanner has scanned a whole pageblock. It looked good for fragmentation because chance of stealing increase due to making more free pages in certain pageblock. So, I tested it, but, it results in decreased compaction success rate, roughly 38.00. I guess the reason that if system is low memory condition, watermark check could be failed due to not enough order 0 free page and so, sometimes, we can't reach a fallback check although migrate_pfn is aligned to pageblock_nr_pages. I can insert code to cope with this situation but it makes code more complicated so I don't include his idea at this patch. [akpm@linux-foundation.org: fix CONFIG_CMA=n build] Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/compaction.c15
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c19
3 files changed, 29 insertions, 7 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8c0d9459b54a..a18201a8124e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1174,13 +1174,24 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
1174 /* Direct compactor: Is a suitable page free? */ 1174 /* Direct compactor: Is a suitable page free? */
1175 for (order = cc->order; order < MAX_ORDER; order++) { 1175 for (order = cc->order; order < MAX_ORDER; order++) {
1176 struct free_area *area = &zone->free_area[order]; 1176 struct free_area *area = &zone->free_area[order];
1177 bool can_steal;
1177 1178
1178 /* Job done if page is free of the right migratetype */ 1179 /* Job done if page is free of the right migratetype */
1179 if (!list_empty(&area->free_list[migratetype])) 1180 if (!list_empty(&area->free_list[migratetype]))
1180 return COMPACT_PARTIAL; 1181 return COMPACT_PARTIAL;
1181 1182
1182 /* Job done if allocation would set block type */ 1183#ifdef CONFIG_CMA
1183 if (order >= pageblock_order && area->nr_free) 1184 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1185 if (migratetype == MIGRATE_MOVABLE &&
1186 !list_empty(&area->free_list[MIGRATE_CMA]))
1187 return COMPACT_PARTIAL;
1188#endif
1189 /*
1190 * Job done if allocation would steal freepages from
1191 * other migratetype buddy lists.
1192 */
1193 if (find_suitable_fallback(area, order, migratetype,
1194 true, &can_steal) != -1)
1184 return COMPACT_PARTIAL; 1195 return COMPACT_PARTIAL;
1185 } 1196 }
1186 1197
diff --git a/mm/internal.h b/mm/internal.h
index 7df78a5269f3..edaab69a9c35 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -200,6 +200,8 @@ isolate_freepages_range(struct compact_control *cc,
200unsigned long 200unsigned long
201isolate_migratepages_range(struct compact_control *cc, 201isolate_migratepages_range(struct compact_control *cc,
202 unsigned long low_pfn, unsigned long end_pfn); 202 unsigned long low_pfn, unsigned long end_pfn);
203int find_suitable_fallback(struct free_area *area, unsigned int order,
204 int migratetype, bool only_stealable, bool *can_steal);
203 205
204#endif 206#endif
205 207
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 31aa943365d8..6dfa5b24cc79 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1194,9 +1194,14 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
1194 set_pageblock_migratetype(page, start_type); 1194 set_pageblock_migratetype(page, start_type);
1195} 1195}
1196 1196
1197/* Check whether there is a suitable fallback freepage with requested order. */ 1197/*
1198static int find_suitable_fallback(struct free_area *area, unsigned int order, 1198 * Check whether there is a suitable fallback freepage with requested order.
1199 int migratetype, bool *can_steal) 1199 * If only_stealable is true, this function returns fallback_mt only if
1200 * we can steal other freepages all together. This would help to reduce
1201 * fragmentation due to mixed migratetype pages in one pageblock.
1202 */
1203int find_suitable_fallback(struct free_area *area, unsigned int order,
1204 int migratetype, bool only_stealable, bool *can_steal)
1200{ 1205{
1201 int i; 1206 int i;
1202 int fallback_mt; 1207 int fallback_mt;
@@ -1216,7 +1221,11 @@ static int find_suitable_fallback(struct free_area *area, unsigned int order,
1216 if (can_steal_fallback(order, migratetype)) 1221 if (can_steal_fallback(order, migratetype))
1217 *can_steal = true; 1222 *can_steal = true;
1218 1223
1219 return fallback_mt; 1224 if (!only_stealable)
1225 return fallback_mt;
1226
1227 if (*can_steal)
1228 return fallback_mt;
1220 } 1229 }
1221 1230
1222 return -1; 1231 return -1;
@@ -1238,7 +1247,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1238 --current_order) { 1247 --current_order) {
1239 area = &(zone->free_area[current_order]); 1248 area = &(zone->free_area[current_order]);
1240 fallback_mt = find_suitable_fallback(area, current_order, 1249 fallback_mt = find_suitable_fallback(area, current_order,
1241 start_migratetype, &can_steal); 1250 start_migratetype, false, &can_steal);
1242 if (fallback_mt == -1) 1251 if (fallback_mt == -1)
1243 continue; 1252 continue;
1244 1253