aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/linux/mmzone.h9
-rw-r--r--init/Kconfig13
-rw-r--r--mm/page_alloc.c42
3 files changed, 2 insertions, 62 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index afdec8117458..09b2c4f50e38 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -33,21 +33,12 @@
33 */ 33 */
34#define PAGE_ALLOC_COSTLY_ORDER 3 34#define PAGE_ALLOC_COSTLY_ORDER 3
35 35
36#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
37#define MIGRATE_UNMOVABLE 0 36#define MIGRATE_UNMOVABLE 0
38#define MIGRATE_RECLAIMABLE 1 37#define MIGRATE_RECLAIMABLE 1
39#define MIGRATE_MOVABLE 2 38#define MIGRATE_MOVABLE 2
40#define MIGRATE_HIGHATOMIC 3 39#define MIGRATE_HIGHATOMIC 3
41#define MIGRATE_RESERVE 4 40#define MIGRATE_RESERVE 4
42#define MIGRATE_TYPES 5 41#define MIGRATE_TYPES 5
43#else
44#define MIGRATE_UNMOVABLE 0
45#define MIGRATE_UNRECLAIMABLE 0
46#define MIGRATE_MOVABLE 0
47#define MIGRATE_HIGHATOMIC 0
48#define MIGRATE_RESERVE 0
49#define MIGRATE_TYPES 1
50#endif
51 42
52#define for_each_migratetype_order(order, type) \ 43#define for_each_migratetype_order(order, type) \
53 for (order = 0; order < MAX_ORDER; order++) \ 44 for (order = 0; order < MAX_ORDER; order++) \
diff --git a/init/Kconfig b/init/Kconfig
index bab643f7717f..54f31a191b88 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -607,19 +607,6 @@ config BASE_SMALL
607 default 0 if BASE_FULL 607 default 0 if BASE_FULL
608 default 1 if !BASE_FULL 608 default 1 if !BASE_FULL
609 609
610config PAGE_GROUP_BY_MOBILITY
611 bool "Group pages based on their mobility in the page allocator"
612 def_bool y
613 help
614 The standard allocator will fragment memory over time which means
615 that high order allocations will fail even if kswapd is running. If
616 this option is set, the allocator will try and group page types
617 based on their ability to migrate or reclaim. This is a best effort
618 attempt at lowering fragmentation which a few workloads care about.
619 The loss is a more complex allocator that may perform slower. If
620 you are interested in working with large pages, say Y and set
621 /proc/sys/vm/min_free_bytes to 16374. Otherwise say N
622
623menuconfig MODULES 610menuconfig MODULES
624 bool "Enable loadable module support" 611 bool "Enable loadable module support"
625 help 612 help
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f7873a47fa8e..8aec4d4601e7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
158EXPORT_SYMBOL(nr_node_ids); 158EXPORT_SYMBOL(nr_node_ids);
159#endif 159#endif
160 160
161#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
162int page_group_by_mobility_disabled __read_mostly; 161int page_group_by_mobility_disabled __read_mostly;
163 162
164static inline int get_pageblock_migratetype(struct page *page) 163static inline int get_pageblock_migratetype(struct page *page)
@@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
192 ((gfp_flags & __GFP_RECLAIMABLE) != 0); 191 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
193} 192}
194 193
195#else
196static inline int get_pageblock_migratetype(struct page *page)
197{
198 return MIGRATE_UNMOVABLE;
199}
200
201static void set_pageblock_migratetype(struct page *page, int migratetype)
202{
203}
204
205static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
206{
207 return MIGRATE_UNMOVABLE;
208}
209#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
210
211#ifdef CONFIG_DEBUG_VM 194#ifdef CONFIG_DEBUG_VM
212static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 195static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
213{ 196{
@@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
718} 701}
719 702
720 703
721#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
722/* 704/*
723 * This array describes the order lists are fallen back to when 705 * This array describes the order lists are fallen back to when
724 * the free lists for the desirable migrate type are depleted 706 * the free lists for the desirable migrate type are depleted
@@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
750 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 732 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
751 * anyway as we check zone boundaries in move_freepages_block(). 733 * anyway as we check zone boundaries in move_freepages_block().
752 * Remove at a later date when no bug reports exist related to 734 * Remove at a later date when no bug reports exist related to
753 * CONFIG_PAGE_GROUP_BY_MOBILITY 735 * grouping pages by mobility
754 */ 736 */
755 BUG_ON(page_zone(start_page) != page_zone(end_page)); 737 BUG_ON(page_zone(start_page) != page_zone(end_page));
756#endif 738#endif
@@ -899,13 +881,6 @@ retry:
899 /* Use MIGRATE_RESERVE rather than fail an allocation */ 881 /* Use MIGRATE_RESERVE rather than fail an allocation */
900 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 882 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
901} 883}
902#else
903static struct page *__rmqueue_fallback(struct zone *zone, int order,
904 int start_migratetype)
905{
906 return NULL;
907}
908#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
909 884
910/* 885/*
911 * Do the hard work of removing an element from the buddy allocator. 886 * Do the hard work of removing an element from the buddy allocator.
@@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
1033} 1008}
1034#endif /* CONFIG_PM */ 1009#endif /* CONFIG_PM */
1035 1010
1036#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
1037/* 1011/*
1038 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1012 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1039 */ 1013 */
@@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
1064 1038
1065 smp_call_function(smp_drain_local_pages, NULL, 0, 1); 1039 smp_call_function(smp_drain_local_pages, NULL, 0, 1);
1066} 1040}
1067#else
1068void drain_all_local_pages(void) {}
1069#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
1070 1041
1071/* 1042/*
1072 * Free a 0-order page 1043 * Free a 0-order page
@@ -1157,7 +1128,6 @@ again:
1157 goto failed; 1128 goto failed;
1158 } 1129 }
1159 1130
1160#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
1161 /* Find a page of the appropriate migrate type */ 1131 /* Find a page of the appropriate migrate type */
1162 list_for_each_entry(page, &pcp->list, lru) 1132 list_for_each_entry(page, &pcp->list, lru)
1163 if (page_private(page) == migratetype) 1133 if (page_private(page) == migratetype)
@@ -1169,9 +1139,6 @@ again:
1169 pcp->batch, &pcp->list, migratetype); 1139 pcp->batch, &pcp->list, migratetype);
1170 page = list_entry(pcp->list.next, struct page, lru); 1140 page = list_entry(pcp->list.next, struct page, lru);
1171 } 1141 }
1172#else
1173 page = list_entry(pcp->list.next, struct page, lru);
1174#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
1175 1142
1176 list_del(&page->lru); 1143 list_del(&page->lru);
1177 pcp->count--; 1144 pcp->count--;
@@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
2525 2492
2526#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2493#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2527 2494
2528#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
2529/* 2495/*
2530 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number 2496 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
2531 * of blocks reserved is based on zone->pages_min. The memory within the 2497 * of blocks reserved is based on zone->pages_min. The memory within the
@@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2579 } 2545 }
2580 } 2546 }
2581} 2547}
2582#else 2548
2583static inline void setup_zone_migrate_reserve(struct zone *zone)
2584{
2585}
2586#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
2587/* 2549/*
2588 * Initially all pages are reserved - free ones are freed 2550 * Initially all pages are reserved - free ones are freed
2589 * up by free_all_bootmem() once the early boot process is 2551 * up by free_all_bootmem() once the early boot process is