aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMichal Nazarewicz <mina86@mina86.com>2011-12-29 07:09:50 -0500
committerMarek Szyprowski <m.szyprowski@samsung.com>2012-05-21 09:09:32 -0400
commit47118af076f64844b4f423bc2f545b2da9dab50d (patch)
tree00df88cf2f60a2a3efc1a6c46ad88d128aee2071
parent6d4a49160de2c684fb59fa627bce80e200224331 (diff)
mm: mmzone: MIGRATE_CMA migration type added
The MIGRATE_CMA migration type has two main characteristics: (i) only movable pages can be allocated from MIGRATE_CMA pageblocks and (ii) page allocator will never change migration type of MIGRATE_CMA pageblocks. This guarantees (to some degree) that page in a MIGRATE_CMA page block can always be migrated somewhere else (unless there's no memory left in the system). It is designed to be used for allocating big chunks (eg. 10MiB) of physically contiguous memory. Once driver requests contiguous memory, pages from MIGRATE_CMA pageblocks may be migrated away to create a contiguous block. To minimise number of migrations, MIGRATE_CMA migration type is the last type tried when page allocator falls back to other migration types when requested. Signed-off-by: Michal Nazarewicz <mina86@mina86.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Signed-off-by: Kyungmin Park <kyungmin.park@samsung.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Reviewed-by: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Tested-by: Rob Clark <rob.clark@linaro.org> Tested-by: Ohad Ben-Cohen <ohad@wizery.com> Tested-by: Benjamin Gaignard <benjamin.gaignard@linaro.org> Tested-by: Robert Nelson <robertcnelson@gmail.com> Tested-by: Barry Song <Baohua.Song@csr.com>
-rw-r--r--include/linux/gfp.h3
-rw-r--r--include/linux/mmzone.h38
-rw-r--r--mm/Kconfig2
-rw-r--r--mm/compaction.c11
-rw-r--r--mm/page_alloc.c76
-rw-r--r--mm/vmstat.c3
6 files changed, 106 insertions, 27 deletions
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 052a5b6cc4d0..78d32a7be257 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -397,6 +397,9 @@ static inline bool pm_suspended_storage(void)
397extern int alloc_contig_range(unsigned long start, unsigned long end); 397extern int alloc_contig_range(unsigned long start, unsigned long end);
398extern void free_contig_range(unsigned long pfn, unsigned nr_pages); 398extern void free_contig_range(unsigned long pfn, unsigned nr_pages);
399 399
400/* CMA stuff */
401extern void init_cma_reserved_pageblock(struct page *page);
402
400#endif 403#endif
401 404
402#endif /* __LINUX_GFP_H */ 405#endif /* __LINUX_GFP_H */
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index dff711509661..8c1335f3c3a3 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,13 +35,37 @@
35 */ 35 */
36#define PAGE_ALLOC_COSTLY_ORDER 3 36#define PAGE_ALLOC_COSTLY_ORDER 3
37 37
38#define MIGRATE_UNMOVABLE 0 38enum {
39#define MIGRATE_RECLAIMABLE 1 39 MIGRATE_UNMOVABLE,
40#define MIGRATE_MOVABLE 2 40 MIGRATE_RECLAIMABLE,
41#define MIGRATE_PCPTYPES 3 /* the number of types on the pcp lists */ 41 MIGRATE_MOVABLE,
42#define MIGRATE_RESERVE 3 42 MIGRATE_PCPTYPES, /* the number of types on the pcp lists */
43#define MIGRATE_ISOLATE 4 /* can't allocate from here */ 43 MIGRATE_RESERVE = MIGRATE_PCPTYPES,
44#define MIGRATE_TYPES 5 44#ifdef CONFIG_CMA
45 /*
46 * MIGRATE_CMA migration type is designed to mimic the way
47 * ZONE_MOVABLE works. Only movable pages can be allocated
48 * from MIGRATE_CMA pageblocks and page allocator never
49 * implicitly change migration type of MIGRATE_CMA pageblock.
50 *
51 * The way to use it is to change migratetype of a range of
52 * pageblocks to MIGRATE_CMA which can be done by
53 * __free_pageblock_cma() function. What is important though
54 * is that a range of pageblocks must be aligned to
55 * MAX_ORDER_NR_PAGES should biggest page be bigger then
56 * a single pageblock.
57 */
58 MIGRATE_CMA,
59#endif
60 MIGRATE_ISOLATE, /* can't allocate from here */
61 MIGRATE_TYPES
62};
63
64#ifdef CONFIG_CMA
65# define is_migrate_cma(migratetype) unlikely((migratetype) == MIGRATE_CMA)
66#else
67# define is_migrate_cma(migratetype) false
68#endif
45 69
46#define for_each_migratetype_order(order, type) \ 70#define for_each_migratetype_order(order, type) \
47 for (order = 0; order < MAX_ORDER; order++) \ 71 for (order = 0; order < MAX_ORDER; order++) \
diff --git a/mm/Kconfig b/mm/Kconfig
index e338407f1225..39220026c797 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -198,7 +198,7 @@ config COMPACTION
198config MIGRATION 198config MIGRATION
199 bool "Page migration" 199 bool "Page migration"
200 def_bool y 200 def_bool y
201 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION 201 depends on NUMA || ARCH_ENABLE_MEMORY_HOTREMOVE || COMPACTION || CMA
202 help 202 help
203 Allows the migration of the physical location of pages of processes 203 Allows the migration of the physical location of pages of processes
204 while the virtual addresses are not changed. This is useful in 204 while the virtual addresses are not changed. This is useful in
diff --git a/mm/compaction.c b/mm/compaction.c
index 7a92e418a187..da7d35ea5103 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -45,6 +45,11 @@ static void map_pages(struct list_head *list)
45 } 45 }
46} 46}
47 47
48static inline bool migrate_async_suitable(int migratetype)
49{
50 return is_migrate_cma(migratetype) || migratetype == MIGRATE_MOVABLE;
51}
52
48/* 53/*
49 * Isolate free pages onto a private freelist. Caller must hold zone->lock. 54 * Isolate free pages onto a private freelist. Caller must hold zone->lock.
50 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free 55 * If @strict is true, will abort returning 0 on any invalid PFNs or non-free
@@ -299,7 +304,7 @@ isolate_migratepages_range(struct zone *zone, struct compact_control *cc,
299 */ 304 */
300 pageblock_nr = low_pfn >> pageblock_order; 305 pageblock_nr = low_pfn >> pageblock_order;
301 if (!cc->sync && last_pageblock_nr != pageblock_nr && 306 if (!cc->sync && last_pageblock_nr != pageblock_nr &&
302 get_pageblock_migratetype(page) != MIGRATE_MOVABLE) { 307 !migrate_async_suitable(get_pageblock_migratetype(page))) {
303 low_pfn += pageblock_nr_pages; 308 low_pfn += pageblock_nr_pages;
304 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1; 309 low_pfn = ALIGN(low_pfn, pageblock_nr_pages) - 1;
305 last_pageblock_nr = pageblock_nr; 310 last_pageblock_nr = pageblock_nr;
@@ -367,8 +372,8 @@ static bool suitable_migration_target(struct page *page)
367 if (PageBuddy(page) && page_order(page) >= pageblock_order) 372 if (PageBuddy(page) && page_order(page) >= pageblock_order)
368 return true; 373 return true;
369 374
370 /* If the block is MIGRATE_MOVABLE, allow migration */ 375 /* If the block is MIGRATE_MOVABLE or MIGRATE_CMA, allow migration */
371 if (migratetype == MIGRATE_MOVABLE) 376 if (migrate_async_suitable(migratetype))
372 return true; 377 return true;
373 378
374 /* Otherwise skip the block */ 379 /* Otherwise skip the block */
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d6b580c660f5..0869eb1e9461 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -750,6 +750,24 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
750 __free_pages(page, order); 750 __free_pages(page, order);
751} 751}
752 752
753#ifdef CONFIG_CMA
754/* Free whole pageblock and set it's migration type to MIGRATE_CMA. */
755void __init init_cma_reserved_pageblock(struct page *page)
756{
757 unsigned i = pageblock_nr_pages;
758 struct page *p = page;
759
760 do {
761 __ClearPageReserved(p);
762 set_page_count(p, 0);
763 } while (++p, --i);
764
765 set_page_refcounted(page);
766 set_pageblock_migratetype(page, MIGRATE_CMA);
767 __free_pages(page, pageblock_order);
768 totalram_pages += pageblock_nr_pages;
769}
770#endif
753 771
754/* 772/*
755 * The order of subdivision here is critical for the IO subsystem. 773 * The order of subdivision here is critical for the IO subsystem.
@@ -875,10 +893,15 @@ struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
875 * This array describes the order lists are fallen back to when 893 * This array describes the order lists are fallen back to when
876 * the free lists for the desirable migrate type are depleted 894 * the free lists for the desirable migrate type are depleted
877 */ 895 */
878static int fallbacks[MIGRATE_TYPES][3] = { 896static int fallbacks[MIGRATE_TYPES][4] = {
879 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 897 [MIGRATE_UNMOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
880 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE }, 898 [MIGRATE_RECLAIMABLE] = { MIGRATE_UNMOVABLE, MIGRATE_MOVABLE, MIGRATE_RESERVE },
881 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE }, 899#ifdef CONFIG_CMA
900 [MIGRATE_MOVABLE] = { MIGRATE_CMA, MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
901 [MIGRATE_CMA] = { MIGRATE_RESERVE }, /* Never used */
902#else
903 [MIGRATE_MOVABLE] = { MIGRATE_RECLAIMABLE, MIGRATE_UNMOVABLE, MIGRATE_RESERVE },
904#endif
882 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */ 905 [MIGRATE_RESERVE] = { MIGRATE_RESERVE }, /* Never used */
883 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */ 906 [MIGRATE_ISOLATE] = { MIGRATE_RESERVE }, /* Never used */
884}; 907};
@@ -995,11 +1018,18 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
995 * pages to the preferred allocation list. If falling 1018 * pages to the preferred allocation list. If falling
996 * back for a reclaimable kernel allocation, be more 1019 * back for a reclaimable kernel allocation, be more
997 * aggressive about taking ownership of free pages 1020 * aggressive about taking ownership of free pages
1021 *
1022 * On the other hand, never change migration
1023 * type of MIGRATE_CMA pageblocks nor move CMA
1024 * pages on different free lists. We don't
1025 * want unmovable pages to be allocated from
1026 * MIGRATE_CMA areas.
998 */ 1027 */
999 if (unlikely(current_order >= (pageblock_order >> 1)) || 1028 if (!is_migrate_cma(migratetype) &&
1000 start_migratetype == MIGRATE_RECLAIMABLE || 1029 (unlikely(current_order >= pageblock_order / 2) ||
1001 page_group_by_mobility_disabled) { 1030 start_migratetype == MIGRATE_RECLAIMABLE ||
1002 unsigned long pages; 1031 page_group_by_mobility_disabled)) {
1032 int pages;
1003 pages = move_freepages_block(zone, page, 1033 pages = move_freepages_block(zone, page,
1004 start_migratetype); 1034 start_migratetype);
1005 1035
@@ -1017,11 +1047,14 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype)
1017 rmv_page_order(page); 1047 rmv_page_order(page);
1018 1048
1019 /* Take ownership for orders >= pageblock_order */ 1049 /* Take ownership for orders >= pageblock_order */
1020 if (current_order >= pageblock_order) 1050 if (current_order >= pageblock_order &&
1051 !is_migrate_cma(migratetype))
1021 change_pageblock_range(page, current_order, 1052 change_pageblock_range(page, current_order,
1022 start_migratetype); 1053 start_migratetype);
1023 1054
1024 expand(zone, page, order, current_order, area, migratetype); 1055 expand(zone, page, order, current_order, area,
1056 is_migrate_cma(migratetype)
1057 ? migratetype : start_migratetype);
1025 1058
1026 trace_mm_page_alloc_extfrag(page, order, current_order, 1059 trace_mm_page_alloc_extfrag(page, order, current_order,
1027 start_migratetype, migratetype); 1060 start_migratetype, migratetype);
@@ -1072,7 +1105,7 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1072 unsigned long count, struct list_head *list, 1105 unsigned long count, struct list_head *list,
1073 int migratetype, int cold) 1106 int migratetype, int cold)
1074{ 1107{
1075 int i; 1108 int mt = migratetype, i;
1076 1109
1077 spin_lock(&zone->lock); 1110 spin_lock(&zone->lock);
1078 for (i = 0; i < count; ++i) { 1111 for (i = 0; i < count; ++i) {
@@ -1093,7 +1126,12 @@ static int rmqueue_bulk(struct zone *zone, unsigned int order,
1093 list_add(&page->lru, list); 1126 list_add(&page->lru, list);
1094 else 1127 else
1095 list_add_tail(&page->lru, list); 1128 list_add_tail(&page->lru, list);
1096 set_page_private(page, migratetype); 1129 if (IS_ENABLED(CONFIG_CMA)) {
1130 mt = get_pageblock_migratetype(page);
1131 if (!is_migrate_cma(mt) && mt != MIGRATE_ISOLATE)
1132 mt = migratetype;
1133 }
1134 set_page_private(page, mt);
1097 list = &page->lru; 1135 list = &page->lru;
1098 } 1136 }
1099 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order)); 1137 __mod_zone_page_state(zone, NR_FREE_PAGES, -(i << order));
@@ -1373,8 +1411,12 @@ int split_free_page(struct page *page)
1373 1411
1374 if (order >= pageblock_order - 1) { 1412 if (order >= pageblock_order - 1) {
1375 struct page *endpage = page + (1 << order) - 1; 1413 struct page *endpage = page + (1 << order) - 1;
1376 for (; page < endpage; page += pageblock_nr_pages) 1414 for (; page < endpage; page += pageblock_nr_pages) {
1377 set_pageblock_migratetype(page, MIGRATE_MOVABLE); 1415 int mt = get_pageblock_migratetype(page);
1416 if (mt != MIGRATE_ISOLATE && !is_migrate_cma(mt))
1417 set_pageblock_migratetype(page,
1418 MIGRATE_MOVABLE);
1419 }
1378 } 1420 }
1379 1421
1380 return 1 << order; 1422 return 1 << order;
@@ -5414,14 +5456,16 @@ static int
5414__count_immobile_pages(struct zone *zone, struct page *page, int count) 5456__count_immobile_pages(struct zone *zone, struct page *page, int count)
5415{ 5457{
5416 unsigned long pfn, iter, found; 5458 unsigned long pfn, iter, found;
5459 int mt;
5460
5417 /* 5461 /*
5418 * For avoiding noise data, lru_add_drain_all() should be called 5462 * For avoiding noise data, lru_add_drain_all() should be called
5419 * If ZONE_MOVABLE, the zone never contains immobile pages 5463 * If ZONE_MOVABLE, the zone never contains immobile pages
5420 */ 5464 */
5421 if (zone_idx(zone) == ZONE_MOVABLE) 5465 if (zone_idx(zone) == ZONE_MOVABLE)
5422 return true; 5466 return true;
5423 5467 mt = get_pageblock_migratetype(page);
5424 if (get_pageblock_migratetype(page) == MIGRATE_MOVABLE) 5468 if (mt == MIGRATE_MOVABLE || is_migrate_cma(mt))
5425 return true; 5469 return true;
5426 5470
5427 pfn = page_to_pfn(page); 5471 pfn = page_to_pfn(page);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 7db1b9bab492..0dad31dc1618 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -613,6 +613,9 @@ static char * const migratetype_names[MIGRATE_TYPES] = {
613 "Reclaimable", 613 "Reclaimable",
614 "Movable", 614 "Movable",
615 "Reserve", 615 "Reserve",
616#ifdef CONFIG_CMA
617 "CMA",
618#endif
616 "Isolate", 619 "Isolate",
617}; 620};
618 621