aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:58 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commitac0e5b7a6b93fb291b01fe1e951e3c16bcdd3503 (patch)
tree732f67c8de6e0d2e001b60c17af9599468b80163 /mm/page_alloc.c
parent56fd56b868f19385c50af8941a4c78df433b2d32 (diff)
remove PAGE_GROUP_BY_MOBILITY
Grouping pages by mobility can be disabled at compile-time. This was considered undesirable by a number of people. However, in the current stack of patches, it is not a simple case of just dropping the configurable patch as it would cause merge conflicts. This patch backs out the configuration option. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c42
1 files changed, 2 insertions, 40 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f7873a47fa8e..8aec4d4601e7 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -158,7 +158,6 @@ int nr_node_ids __read_mostly = MAX_NUMNODES;
158EXPORT_SYMBOL(nr_node_ids); 158EXPORT_SYMBOL(nr_node_ids);
159#endif 159#endif
160 160
161#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
162int page_group_by_mobility_disabled __read_mostly; 161int page_group_by_mobility_disabled __read_mostly;
163 162
164static inline int get_pageblock_migratetype(struct page *page) 163static inline int get_pageblock_migratetype(struct page *page)
@@ -192,22 +191,6 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
192 ((gfp_flags & __GFP_RECLAIMABLE) != 0); 191 ((gfp_flags & __GFP_RECLAIMABLE) != 0);
193} 192}
194 193
195#else
196static inline int get_pageblock_migratetype(struct page *page)
197{
198 return MIGRATE_UNMOVABLE;
199}
200
201static void set_pageblock_migratetype(struct page *page, int migratetype)
202{
203}
204
205static inline int allocflags_to_migratetype(gfp_t gfp_flags, int order)
206{
207 return MIGRATE_UNMOVABLE;
208}
209#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
210
211#ifdef CONFIG_DEBUG_VM 194#ifdef CONFIG_DEBUG_VM
212static int page_outside_zone_boundaries(struct zone *zone, struct page *page) 195static int page_outside_zone_boundaries(struct zone *zone, struct page *page)
213{ 196{
@@ -718,7 +701,6 @@ static struct page *__rmqueue_smallest(struct zone *zone, unsigned int order,
718} 701}
719 702
720 703
721#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
722/* 704/*
723 * This array describes the order lists are fallen back to when 705 * This array describes the order lists are fallen back to when
724 * the free lists for the desirable migrate type are depleted 706 * the free lists for the desirable migrate type are depleted
@@ -750,7 +732,7 @@ int move_freepages(struct zone *zone,
750 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant 732 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
751 * anyway as we check zone boundaries in move_freepages_block(). 733 * anyway as we check zone boundaries in move_freepages_block().
752 * Remove at a later date when no bug reports exist related to 734 * Remove at a later date when no bug reports exist related to
753 * CONFIG_PAGE_GROUP_BY_MOBILITY 735 * grouping pages by mobility
754 */ 736 */
755 BUG_ON(page_zone(start_page) != page_zone(end_page)); 737 BUG_ON(page_zone(start_page) != page_zone(end_page));
756#endif 738#endif
@@ -899,13 +881,6 @@ retry:
899 /* Use MIGRATE_RESERVE rather than fail an allocation */ 881 /* Use MIGRATE_RESERVE rather than fail an allocation */
900 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE); 882 return __rmqueue_smallest(zone, order, MIGRATE_RESERVE);
901} 883}
902#else
903static struct page *__rmqueue_fallback(struct zone *zone, int order,
904 int start_migratetype)
905{
906 return NULL;
907}
908#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
909 884
910/* 885/*
911 * Do the hard work of removing an element from the buddy allocator. 886 * Do the hard work of removing an element from the buddy allocator.
@@ -1033,7 +1008,6 @@ void mark_free_pages(struct zone *zone)
1033} 1008}
1034#endif /* CONFIG_PM */ 1009#endif /* CONFIG_PM */
1035 1010
1036#if defined(CONFIG_HIBERNATION) || defined(CONFIG_PAGE_GROUP_BY_MOBILITY)
1037/* 1011/*
1038 * Spill all of this CPU's per-cpu pages back into the buddy allocator. 1012 * Spill all of this CPU's per-cpu pages back into the buddy allocator.
1039 */ 1013 */
@@ -1064,9 +1038,6 @@ void drain_all_local_pages(void)
1064 1038
1065 smp_call_function(smp_drain_local_pages, NULL, 0, 1); 1039 smp_call_function(smp_drain_local_pages, NULL, 0, 1);
1066} 1040}
1067#else
1068void drain_all_local_pages(void) {}
1069#endif /* CONFIG_HIBERNATION || CONFIG_PAGE_GROUP_BY_MOBILITY */
1070 1041
1071/* 1042/*
1072 * Free a 0-order page 1043 * Free a 0-order page
@@ -1157,7 +1128,6 @@ again:
1157 goto failed; 1128 goto failed;
1158 } 1129 }
1159 1130
1160#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
1161 /* Find a page of the appropriate migrate type */ 1131 /* Find a page of the appropriate migrate type */
1162 list_for_each_entry(page, &pcp->list, lru) 1132 list_for_each_entry(page, &pcp->list, lru)
1163 if (page_private(page) == migratetype) 1133 if (page_private(page) == migratetype)
@@ -1169,9 +1139,6 @@ again:
1169 pcp->batch, &pcp->list, migratetype); 1139 pcp->batch, &pcp->list, migratetype);
1170 page = list_entry(pcp->list.next, struct page, lru); 1140 page = list_entry(pcp->list.next, struct page, lru);
1171 } 1141 }
1172#else
1173 page = list_entry(pcp->list.next, struct page, lru);
1174#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
1175 1142
1176 list_del(&page->lru); 1143 list_del(&page->lru);
1177 pcp->count--; 1144 pcp->count--;
@@ -2525,7 +2492,6 @@ static inline unsigned long wait_table_bits(unsigned long size)
2525 2492
2526#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1)) 2493#define LONG_ALIGN(x) (((x)+(sizeof(long))-1)&~((sizeof(long))-1))
2527 2494
2528#ifdef CONFIG_PAGE_GROUP_BY_MOBILITY
2529/* 2495/*
2530 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number 2496 * Mark a number of MAX_ORDER_NR_PAGES blocks as MIGRATE_RESERVE. The number
2531 * of blocks reserved is based on zone->pages_min. The memory within the 2497 * of blocks reserved is based on zone->pages_min. The memory within the
@@ -2579,11 +2545,7 @@ static void setup_zone_migrate_reserve(struct zone *zone)
2579 } 2545 }
2580 } 2546 }
2581} 2547}
2582#else 2548
2583static inline void setup_zone_migrate_reserve(struct zone *zone)
2584{
2585}
2586#endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */
2587/* 2549/*
2588 * Initially all pages are reserved - free ones are freed 2550 * Initially all pages are reserved - free ones are freed
2589 * up by free_all_bootmem() once the early boot process is 2551 * up by free_all_bootmem() once the early boot process is