diff options
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 19 |
1 files changed, 14 insertions, 5 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 31aa943365d8..6dfa5b24cc79 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -1194,9 +1194,14 @@ static void steal_suitable_fallback(struct zone *zone, struct page *page, | |||
1194 | set_pageblock_migratetype(page, start_type); | 1194 | set_pageblock_migratetype(page, start_type); |
1195 | } | 1195 | } |
1196 | 1196 | ||
1197 | /* Check whether there is a suitable fallback freepage with requested order. */ | 1197 | /* |
1198 | static int find_suitable_fallback(struct free_area *area, unsigned int order, | 1198 | * Check whether there is a suitable fallback freepage with requested order. |
1199 | int migratetype, bool *can_steal) | 1199 | * If only_stealable is true, this function returns fallback_mt only if |
1200 | * we can steal other freepages all together. This would help to reduce | ||
1201 | * fragmentation due to mixed migratetype pages in one pageblock. | ||
1202 | */ | ||
1203 | int find_suitable_fallback(struct free_area *area, unsigned int order, | ||
1204 | int migratetype, bool only_stealable, bool *can_steal) | ||
1200 | { | 1205 | { |
1201 | int i; | 1206 | int i; |
1202 | int fallback_mt; | 1207 | int fallback_mt; |
@@ -1216,7 +1221,11 @@ static int find_suitable_fallback(struct free_area *area, unsigned int order, | |||
1216 | if (can_steal_fallback(order, migratetype)) | 1221 | if (can_steal_fallback(order, migratetype)) |
1217 | *can_steal = true; | 1222 | *can_steal = true; |
1218 | 1223 | ||
1219 | return fallback_mt; | 1224 | if (!only_stealable) |
1225 | return fallback_mt; | ||
1226 | |||
1227 | if (*can_steal) | ||
1228 | return fallback_mt; | ||
1220 | } | 1229 | } |
1221 | 1230 | ||
1222 | return -1; | 1231 | return -1; |
@@ -1238,7 +1247,7 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype) | |||
1238 | --current_order) { | 1247 | --current_order) { |
1239 | area = &(zone->free_area[current_order]); | 1248 | area = &(zone->free_area[current_order]); |
1240 | fallback_mt = find_suitable_fallback(area, current_order, | 1249 | fallback_mt = find_suitable_fallback(area, current_order, |
1241 | start_migratetype, &can_steal); | 1250 | start_migratetype, false, &can_steal); |
1242 | if (fallback_mt == -1) | 1251 | if (fallback_mt == -1) |
1243 | continue; | 1252 | continue; |
1244 | 1253 | ||