aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c76
1 files changed, 60 insertions, 16 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d6b580c660f5..0869eb1e9461 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
750 __free_pages(page, order); 750 __free_pages(page, order);
751} 751}
752 752
753#ifdef CONFIG_CMA
754/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
755void __init init_cma_reserved_pageblock(struct page *page)
756{
757 unsigned i = pageblock_nr_pages;
758 struct page *p = page;
759
760 do {
761 __ClearPageReserved(p);
762 set_page_count(p, 0);
763 } while (++p, --i);
764
765 set_page_refcounted(page);
766 set_pageblock_migratetype(page, MIGRATE_CMA);
767 __free_pages(page, pageblock_order);
768 totalram_pages += pageblock_nr_pages;
769}
770#endif
753 771
754/* 772/*
755 * The order of subdivision here is critical for the IO subsystem. 773 * The order of subdivision here is critical for the IO subsystem.
@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
875 * This array describes the order lists are fallen back to when 893 * This array describes the order lists are fallen back to when
876 * the free lists for the desirable migrate type are depleted 894 * the free lists for the desirable migrate type are depleted
877 */ 895 */
878static int fallbacks[MIGRATE_TYPES][3] = { 896static int fallbacks[MIGRATE_TYPES][4] = {
879 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 897 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
880 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 898 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
881 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 899#ifdef CONFIG_CMA
900 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
901 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
902#else
903 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
904#endif
882 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 905 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
883 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ 906 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
884}; 907};
@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
995 * pages to the preferred allocation list. If falling 1018 * pages to the preferred allocation list. If falling
996 * back for a reclaimable kernel allocation, be more 1019 * back for a reclaimable kernel allocation, be more
997 * aggressive about taking ownership of free pages 1020 * aggressive about taking ownership of free pages
1021 *
1022 * On the other hand, never change migration
1023 * type of MIGRATE_CMA pageblocks nor move CMA
1024 * pages on different free lists. We don't
1025 * want unmovable pages to be allocated from
1026 * MIGRATE_CMA areas.
998 */ 1027 */
999 if (unlikely(current_order >= (pageblock_order >> 1)) || 1028 if (!is_migrate_cma(migratetype) &&
1000 start_migratetype == MIGRATE_RECLAIMABLE || 1029 (unlikely(current_order >= pageblock_order / 2) ||
1001 page_group_by_mobility_disabled) { 1030 start_migratetype == MIGRATE_RECLAIMABLE ||
1002 unsigned long pages; 1031 page_group_by_mobility_disabled)) {
1032 int pages;
1003 pages = move_freepages_block(zone, page, 1033 pages = move_freepages_block(zone, page,
1004 start_migratetype); 1034 start_migratetype);
1005 1035
@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1017 rmv_page_order(page); 1047 rmv_page_order(page);
1018 1048
1019 /* Take ownership for orders >= pageblock_order */ 1049 /* Take ownership for orders >= pageblock_order */
1020 if (current_order >= pageblock_order) 1050 if (current_order >= pageblock_order &&
1051 !is_migrate_cma(migratetype))
1021 change_pageblock_range(page, current_order, 1052 change_pageblock_range(page, current_order,
1022 start_migratetype); 1053 start_migratetype);
1023 1054
1024 expand(zone, page, order, current_order, area, migratetype); 1055 expand(zone, page, order, current_order, area,
1056 is_migrate_cma(migratetype)
1057 ? migratetype : start_migratetype);
1025 1058
1026 trace_mm_page_alloc_extfrag(page, order, current_order, 1059 trace_mm_page_alloc_extfrag(page, order, current_order,
1027 start_migratetype, migratetype); 1060 start_migratetype, migratetype);
@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1072 unsigned long count, struct list_head *list, 1105 unsigned long count, struct list_head *list,
1073 int migratetype, int cold) 1106 int migratetype, int cold)
1074{ 1107{
1075 int i; 1108 int mt = migratetype, i;
1076 1109
1077 spin_lock(&zone->lock); 1110 spin_lock(&zone->lock);
1078 for (i = 0; i < count; ++i) { 1111 for (i = 0; i < count; ++i) {
@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1093 list_add(&page->lru, list); 1126 list_add(&page->lru, list);
1094 else 1127 else
1095 list_add_tail(&page->lru, list); 1128 list_add_tail(&page->lru, list);
1096 set_page_private(page, migratetype); 1129 if (IS_ENABLED(CONFIG_CMA)) {
1130 mt = get_pageblock_migratetype(page);
1131 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1132 mt = migratetype;
1133 }
1134 set_page_private(page, mt);
1097 list = &page->lru; 1135 list = &page->lru;
1098 } 1136 }
1099 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1137 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page)
1373 1411
1374 if (order >= pageblock_order - 1) { 1412 if (order >= pageblock_order - 1) {
1375 struct page *endpage = page + (1 << order) - 1; 1413 struct page *endpage = page + (1 << order) - 1;
1376 for (; page < endpage; page += pageblock_nr_pages) 1414 for (; page < endpage; page += pageblock_nr_pages) {
1377 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1415 int mt = get_pageblock_migratetype(page);
1416 if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1417 set_pageblock_migratetype(page,
1418 MIGRATE_MOVABLE);
1419 }
1378 } 1420 }
1379 1421
1380 return 1 << order; 1422 return 1 << order;
@@ -5414,14 +5456,16 @@ static int
5414__count_immobile_pages(struct zone *zone, struct page *page, int count) 5456__count_immobile_pages(struct zone *zone, struct page *page, int count)
5415{ 5457{
5416 unsigned long pfn, iter, found; 5458 unsigned long pfn, iter, found;
5459 int mt;
5460
5417 /* 5461 /*
5418 * For avoiding noise data, lru_add_drain_all() should be called 5462 * For avoiding noise data, lru_add_drain_all() should be called
5419 * If ZONE_MOVABLE, the zone never contains immobile pages 5463 * If ZONE_MOVABLE, the zone never contains immobile pages
5420 */ 5464 */
5421 if (zone_idx(zone) == ZONE_MOVABLE) 5465 if (zone_idx(zone) == ZONE_MOVABLE)
5422 return true; 5466 return true;
5423 5467 mt = get_pageblock_migratetype(page);
5424 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) 5468 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5425 return true; 5469 return true;
5426 5470
5427 pfn = page_to_pfn(page); 5471 pfn = page_to_pfn(page);