aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c95
1 files changed, 59 insertions, 36 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2ca3e9bd739c..b09ce5fe0cd2 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1008,6 +1008,52 @@ static void change_pageblock_range(struct page *pageblock_page,
1008 } 1008 }
1009} 1009}
1010 1010
1011/*
1012 * If breaking a large block of pages, move all free pages to the preferred
1013 * allocation list. If falling back for a reclaimable kernel allocation, be
1014 * more aggressive about taking ownership of free pages.
1015 *
1016 * On the other hand, never change migration type of MIGRATE_CMA pageblocks
1017 * nor move CMA pages to different free lists. We don't want unmovable pages
1018 * to be allocated from MIGRATE_CMA areas.
1019 *
1020 * Returns the new migratetype of the pageblock (or the same old migratetype
1021 * if it was unchanged).
1022 */
1023static int try_to_steal_freepages(struct zone *zone, struct page *page,
1024 int start_type, int fallback_type)
1025{
1026 int current_order = page_order(page);
1027
1028 if (is_migrate_cma(fallback_type))
1029 return fallback_type;
1030
1031 /* Take ownership for orders >= pageblock_order */
1032 if (current_order >= pageblock_order) {
1033 change_pageblock_range(page, current_order, start_type);
1034 return start_type;
1035 }
1036
1037 if (current_order >= pageblock_order / 2 ||
1038 start_type == MIGRATE_RECLAIMABLE ||
1039 page_group_by_mobility_disabled) {
1040 int pages;
1041
1042 pages = move_freepages_block(zone, page, start_type);
1043
1044 /* Claim the whole block if over half of it is free */
1045 if (pages >= (1 << (pageblock_order-1)) ||
1046 page_group_by_mobility_disabled) {
1047
1048 set_pageblock_migratetype(page, start_type);
1049 return start_type;
1050 }
1051
1052 }
1053
1054 return fallback_type;
1055}
1056
1011/* Remove an element from the buddy allocator from the fallback list */ 1057/* Remove an element from the buddy allocator from the fallback list */
1012static inline struct page * 1058static inline struct page *
1013__rmqueue_fallback(struct zone *zone, int order, int start_migratetype) 1059__rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
@@ -1015,7 +1061,7 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1015 struct free_area *area; 1061 struct free_area *area;
1016 int current_order; 1062 int current_order;
1017 struct page *page; 1063 struct page *page;
1018 int migratetype, i; 1064 int migratetype, new_type, i;
1019 1065
1020 /* Find the largest possible block of pages in the other list */ 1066 /* Find the largest possible block of pages in the other list */
1021 for (current_order = MAX_ORDER-1; current_order >= order; 1067 for (current_order = MAX_ORDER-1; current_order >= order;
@@ -1035,51 +1081,28 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1035 struct page, lru); 1081 struct page, lru);
1036 area->nr_free--; 1082 area->nr_free--;
1037 1083
1038 /* 1084 new_type = try_to_steal_freepages(zone, page,
1039 * If breaking a large block of pages, move all free 1085 start_migratetype,
1040 * pages to the preferred allocation list. If falling 1086 migratetype);
1041 * back for a reclaimable kernel allocation, be more
1042 * aggressive about taking ownership of free pages
1043 *
1044 * On the other hand, never change migration
1045 * type of MIGRATE_CMA pageblocks nor move CMA
1046 * pages on different free lists. We don't
1047 * want unmovable pages to be allocated from
1048 * MIGRATE_CMA areas.
1049 */
1050 if (!is_migrate_cma(migratetype) &&
1051 (current_order >= pageblock_order / 2 ||
1052 start_migratetype == MIGRATE_RECLAIMABLE ||
1053 page_group_by_mobility_disabled)) {
1054 int pages;
1055 pages = move_freepages_block(zone, page,
1056 start_migratetype);
1057
1058 /* Claim the whole block if over half of it is free */
1059 if (pages >= (1 << (pageblock_order-1)) ||
1060 page_group_by_mobility_disabled)
1061 set_pageblock_migratetype(page,
1062 start_migratetype);
1063
1064 migratetype = start_migratetype;
1065 }
1066 1087
1067 /* Remove the page from the freelists */ 1088 /* Remove the page from the freelists */
1068 list_del(&page->lru); 1089 list_del(&page->lru);
1069 rmv_page_order(page); 1090 rmv_page_order(page);
1070 1091
1071 /* Take ownership for orders >= pageblock_order */ 1092 /*
1072 if (current_order >= pageblock_order && 1093 * Borrow the excess buddy pages as well, irrespective
1073 !is_migrate_cma(migratetype)) 1094 * of whether we stole freepages, or took ownership of
1074 change_pageblock_range(page, current_order, 1095 * the pageblock or not.
1075 start_migratetype); 1096 *
1076 1097 * Exception: When borrowing from MIGRATE_CMA, release
1098 * the excess buddy pages to CMA itself.
1099 */
1077 expand(zone, page, order, current_order, area, 1100 expand(zone, page, order, current_order, area,
1078 is_migrate_cma(migratetype) 1101 is_migrate_cma(migratetype)
1079 ? migratetype : start_migratetype); 1102 ? migratetype : start_migratetype);
1080 1103
1081 trace_mm_page_alloc_extfrag(page, order, current_order, 1104 trace_mm_page_alloc_extfrag(page, order, current_order,
1082 start_migratetype, migratetype); 1105 start_migratetype, new_type);
1083 1106
1084 return page; 1107 return page;
1085 } 1108 }