summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJoonsoo Kim <iamjoonsoo.kim@lge.com>2015-04-14 18:45:15 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-04-14 19:49:01 -0400
commitdc67647b78b92d9497f01fab95ac6764ed886b40 (patch)
treed8bfeadafe4a1f228547c455aef00edf95399135 /mm/page_alloc.c
parent30467e0b3be83c286d60039f8267dd421128ca74 (diff)
mm/cma: change fallback behaviour for CMA freepage
Freepage with MIGRATE_CMA can be used only for MIGRATE_MOVABLE and they should not be expanded to other migratetype buddy list to protect them from unmovable/reclaimable allocation. Implementing these requirements in __rmqueue_fallback(), that is, finding largest possible block of freepage has bad effect that high order freepage with MIGRATE_CMA are broken continually although there are suitable order CMA freepage. Reason is that they are not be expanded to other migratetype buddy list and next __rmqueue_fallback() invocation try to finds another largest block of freepage and break it again. So, MIGRATE_CMA fallback should be handled separately. This patch introduces __rmqueue_cma_fallback(), that just wrapper of __rmqueue_smallest() and call it before __rmqueue_fallback() if migratetype == MIGRATE_MOVABLE. This results in unintended behaviour change that MIGRATE_CMA freepage is always used first rather than other migratetype as movable allocation's fallback. But, as already mentioned above, MIGRATE_CMA can be used only for MIGRATE_MOVABLE, so it is better to use MIGRATE_CMA freepage first as much as possible. Otherwise, we needlessly take up precious freepages with other migratetype and increase chance of fragmentation. Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@lge.com> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Mel Gorman <mgorman@suse.de> Cc: David Rientjes <rientjes@google.com> Cc: Rik van Riel <riel@redhat.com> Cc: Zhang Yanfei <zhangyanfei@cn.fujitsu.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c36
1 files changed, 19 insertions, 17 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 40e29429e7b0..9ca626756927 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1032,11 +1032,9 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
1032static int fallbacks[MIGRATE_TYPES][4] = { 1032static int fallbacks[MIGRATE_TYPES][4] = {
1033 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 1033 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1034 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 1034 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
1035 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1035#ifdef CONFIG_CMA 1036#ifdef CONFIG_CMA
1036 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1037 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */ 1037 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
1038#else
1039 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
1040#endif 1038#endif
1041 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 1039 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
1042#ifdef CONFIG_MEMORY_ISOLATION 1040#ifdef CONFIG_MEMORY_ISOLATION
@@ -1044,6 +1042,17 @@ static int fallbacks[MIGRATE_TYPES][4] = {
1044#endif 1042#endif
1045}; 1043};
1046 1044
1045#ifdef CONFIG_CMA
1046static struct page *__rmqueue_cma_fallback(struct zone *zone,
1047 unsigned int order)
1048{
1049 return __rmqueue_smallest(zone, order, MIGRATE_CMA);
1050}
1051#else
1052static inline struct page *__rmqueue_cma_fallback(struct zone *zone,
1053 unsigned int order) { return NULL; }
1054#endif
1055
1047/* 1056/*
1048 * Move the free pages in a range to the free lists of the requested type. 1057 * Move the free pages in a range to the free lists of the requested type.
1049 * Note that start_page and end_pages are not aligned on a pageblock 1058 * Note that start_page and end_pages are not aligned on a pageblock
@@ -1195,19 +1204,8 @@ __rmqueue_fallback(struct zone *zone, unsigned int order, int start_migratetype)
1195 struct page, lru); 1204 struct page, lru);
1196 area->nr_free--; 1205 area->nr_free--;
1197 1206
1198 if (!is_migrate_cma(migratetype)) { 1207 try_to_steal_freepages(zone, page, start_migratetype,
1199 try_to_steal_freepages(zone, page, 1208 migratetype);
1200 start_migratetype,
1201 migratetype);
1202 } else {
1203 /*
1204 * When borrowing from MIGRATE_CMA, we need to
1205 * release the excess buddy pages to CMA
1206 * itself, and we do not try to steal extra
1207 * free pages.
1208 */
1209 buddy_type = migratetype;
1210 }
1211 1209
1212 /* Remove the page from the freelists */ 1210 /* Remove the page from the freelists */
1213 list_del(&page->lru); 1211 list_del(&page->lru);
@@ -1249,7 +1247,11 @@ retry_reserve:
1249 page = __rmqueue_smallest(zone, order, migratetype); 1247 page = __rmqueue_smallest(zone, order, migratetype);
1250 1248
1251 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) { 1249 if (unlikely(!page) && migratetype != MIGRATE_RESERVE) {
1252 page = __rmqueue_fallback(zone, order, migratetype); 1250 if (migratetype == MIGRATE_MOVABLE)
1251 page = __rmqueue_cma_fallback(zone, order);
1252
1253 if (!page)
1254 page = __rmqueue_fallback(zone, order, migratetype);
1253 1255
1254 /* 1256 /*
1255 * Use MIGRATE_RESERVE rather than fail an allocation. goto 1257 * Use MIGRATE_RESERVE rather than fail an allocation. goto