aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/compaction.c15
-rw-r--r--mm/internal.h2
-rw-r--r--mm/page_alloc.c19
3 files changed, 29 insertions, 7 deletions
diff --git a/mm/compaction.c b/mm/compaction.c
index 8c0d9459b54a..a18201a8124e 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -1174,13 +1174,24 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
1174 /* Direct compactor: Is a suitable page free? */ 1174 /* Direct compactor: Is a suitable page free? */
1175 for (order = cc->order; order < MAX_ORDER; order++) { 1175 for (order = cc->order; order < MAX_ORDER; order++) {
1176 struct free_area *area = &zone->free_area[order]; 1176 struct free_area *area = &zone->free_area[order];
1177 bool can_steal;
1177 1178
1178 /* Job done if page is free of the right migratetype */ 1179 /* Job done if page is free of the right migratetype */
1179 if (!list_empty(&area->free_list[migratetype])) 1180 if (!list_empty(&area->free_list[migratetype]))
1180 return COMPACT_PARTIAL; 1181 return COMPACT_PARTIAL;
1181 1182
1182 /* Job done if allocation would set block type */ 1183#ifdef CONFIG_CMA
1183 if (order >= pageblock_order && area->nr_free) 1184 /* MIGRATE_MOVABLE can fallback on MIGRATE_CMA */
1185 if (migratetype == MIGRATE_MOVABLE &&
1186 !list_empty(&area->free_list[MIGRATE_CMA]))
1187 return COMPACT_PARTIAL;
1188#endif
1189 /*
1190 * Job done if allocation would steal freepages from
1191 * other migratetype buddy lists.
1192 */
1193 if (find_suitable_fallback(area, order, migratetype,
1194 true, &can_steal) != -1)
1184 return COMPACT_PARTIAL; 1195 return COMPACT_PARTIAL;
1185 } 1196 }
1186 1197
diff --git a/mm/internal.h b/mm/internal.h
index 7df78a5269f3..edaab69a9c35 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -200,6 +200,8 @@ isolate_freepages_range(struct compact_control *cc,
200unsigned long 200unsigned long
201isolate_migratepages_range(struct compact_control *cc, 201isolate_migratepages_range(struct compact_control *cc,
202 unsigned long low_pfn, unsigned long end_pfn); 202 unsigned long low_pfn, unsigned long end_pfn);
203int find_suitable_fallback(struct free_area *area, unsigned int order,
204 int migratetype, bool only_stealable, bool *can_steal);
203 205
204#endif 206#endif
205 207
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 31aa943365d8..6dfa5b24cc79 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1194,9 +1194,14 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page,
1194 set_pageblock_migratetype(page, start_type); 1194 set_pageblock_migratetype(page, start_type);
1195} 1195}
1196 1196
1197/* Check whether there is a suitable fallback freepage with requested order. */ 1197/*
1198static int find_suitable_fallback(struct free_area *area, unsigned int order, 1198 * Check whether there is a suitable fallback freepage with requested order.
1199 int migratetype, bool *can_steal) 1199 * If only_stealable is true, this function returns fallback_mt only if
1200 * we can steal other freepages all together. This would help to reduce
1201 * fragmentation due to mixed migratetype pages in one pageblock.
1202 */
1203int find_suitable_fallback(struct free_area *area, unsigned int order,
1204 int migratetype, bool only_stealable, bool *can_steal)
1200{ 1205{
1201 int i; 1206 int i;
1202 int fallback_mt; 1207 int fallback_mt;
@@ -1216,7 +1221,11 @@ static int find_suitable_fallback(struct free_area *area, unsigned int order,
1216 if (can_steal_fallback(order, migratetype)) 1221 if (can_steal_fallback(order, migratetype))
1217 *can_steal = true; 1222 *can_steal = true;
1218 1223
1219 return fallback_mt; 1224 if (!only_stealable)
1225 return fallback_mt;
1226
1227 if (*can_steal)
1228 return fallback_mt;
1220 } 1229 }
1221 1230
1222 return -1; 1231 return -1;
@@ -1238,7 +1247,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1238 --current_order) { 1247 --current_order) {
1239 area = &(zone->free_area[current_order]); 1248 area = &(zone->free_area[current_order]);
1240 fallback_mt = find_suitable_fallback(area, current_order, 1249 fallback_mt = find_suitable_fallback(area, current_order,
1241 start_migratetype, &can_steal); 1250 start_migratetype, false, &can_steal);
1242 if (fallback_mt == -1) 1251 if (fallback_mt == -1)
1243 continue; 1252 continue;
1244 1253