diff options
author | Mel Gorman <mel@csn.ul.ie> | 2007-10-16 04:25:50 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:59 -0400 |
commit | b92a6edd4b77a8794adb497280beea5df5e59a14 (patch) | |
tree | 396ea5cf2b53fc066e949c443f03747ec868de1e /mm | |
parent | 535131e6925b4a95f321148ad7293f496e0e58d7 (diff) |
Add a configure option to group pages by mobility
The grouping mechanism has some memory overhead and a more complex allocation
path. This patch allows the strategy to be disabled for small memory systems
or if it is known the workload is suffering because of the strategy. It also
acts to show where the page groupings strategy interacts with the standard
buddy allocator.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Joel Schopp <jschopp@austin.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page_alloc.c | 51 |
1 files changed, 37 insertions, 14 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index e3e726bd2858..fea1e3b56c3d 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -158,6 +158,7 @@ int nr_node_ids __read_mostly = MAX_NUMNODES; | |||
158 | EXPORT_SYMBOL(nr_node_ids); | 158 | EXPORT_SYMBOL(nr_node_ids); |
159 | #endif | 159 | #endif |
160 | 160 | ||
161 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
161 | static inline int get_pageblock_migratetype(struct page *page) | 162 | static inline int get_pageblock_migratetype(struct page *page) |
162 | { | 163 | { |
163 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); | 164 | return get_pageblock_flags_group(page, PB_migrate, PB_migrate_end); |
@@ -174,6 +175,22 @@ static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | |||
174 | return ((gfp_flags & __GFP_MOVABLE) != 0); | 175 | return ((gfp_flags & __GFP_MOVABLE) != 0); |
175 | } | 176 | } |
176 | 177 | ||
178 | #else | ||
179 | static inline int get_pageblock_migratetype(struct page *page) | ||
180 | { | ||
181 | return MIGRATE_UNMOVABLE; | ||
182 | } | ||
183 | |||
184 | static void set_pageblock_migratetype(struct page *page, int migratetype) | ||
185 | { | ||
186 | } | ||
187 | |||
188 | static inline int gfpflags_to_migratetype(gfp_t gfp_flags) | ||
189 | { | ||
190 | return MIGRATE_UNMOVABLE; | ||
191 | } | ||
192 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
193 | |||
177 | #ifdef CONFIG_DEBUG_VM | 194 | #ifdef CONFIG_DEBUG_VM |
178 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) | 195 | static int page_outside_zone_boundaries(struct zone *zone, struct page *page) |
179 | { | 196 | { |
@@ -653,6 +670,7 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags) | |||
653 | return 0; | 670 | return 0; |
654 | } | 671 | } |
655 | 672 | ||
673 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
656 | /* | 674 | /* |
657 | * This array describes the order lists are fallen back to when | 675 | * This array describes the order lists are fallen back to when |
658 | * the free lists for the desirable migrate type are depleted | 676 | * the free lists for the desirable migrate type are depleted |
@@ -709,6 +727,13 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order, | |||
709 | 727 | ||
710 | return NULL; | 728 | return NULL; |
711 | } | 729 | } |
730 | #else | ||
731 | static struct page *__rmqueue_fallback(struct zone *zone, int order, | ||
732 | int start_migratetype) | ||
733 | { | ||
734 | return NULL; | ||
735 | } | ||
736 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
712 | 737 | ||
713 | /* | 738 | /* |
714 | * Do the hard work of removing an element from the buddy allocator. | 739 | * Do the hard work of removing an element from the buddy allocator. |
@@ -953,27 +978,25 @@ again: | |||
953 | if (unlikely(!pcp->count)) | 978 | if (unlikely(!pcp->count)) |
954 | goto failed; | 979 | goto failed; |
955 | } | 980 | } |
981 | |||
982 | #ifdef CONFIG_PAGE_GROUP_BY_MOBILITY | ||
956 | /* Find a page of the appropriate migrate type */ | 983 | /* Find a page of the appropriate migrate type */ |
957 | list_for_each_entry(page, &pcp->list, lru) { | 984 | list_for_each_entry(page, &pcp->list, lru) |
958 | if (page_private(page) == migratetype) { | 985 | if (page_private(page) == migratetype) |
959 | list_del(&page->lru); | ||
960 | pcp->count--; | ||
961 | break; | 986 | break; |
962 | } | ||
963 | } | ||
964 | 987 | ||
965 | /* | 988 | /* Allocate more to the pcp list if necessary */ |
966 | * Check if a page of the appropriate migrate type | 989 | if (unlikely(&page->lru == &pcp->list)) { |
967 | * was found. If not, allocate more to the pcp list | ||
968 | */ | ||
969 | if (&page->lru == &pcp->list) { | ||
970 | pcp->count += rmqueue_bulk(zone, 0, | 990 | pcp->count += rmqueue_bulk(zone, 0, |
971 | pcp->batch, &pcp->list, migratetype); | 991 | pcp->batch, &pcp->list, migratetype); |
972 | page = list_entry(pcp->list.next, struct page, lru); | 992 | page = list_entry(pcp->list.next, struct page, lru); |
973 | VM_BUG_ON(page_private(page) != migratetype); | ||
974 | list_del(&page->lru); | ||
975 | pcp->count--; | ||
976 | } | 993 | } |
994 | #else | ||
995 | page = list_entry(pcp->list.next, struct page, lru); | ||
996 | #endif /* CONFIG_PAGE_GROUP_BY_MOBILITY */ | ||
997 | |||
998 | list_del(&page->lru); | ||
999 | pcp->count--; | ||
977 | } else { | 1000 | } else { |
978 | spin_lock_irqsave(&zone->lock, flags); | 1001 | spin_lock_irqsave(&zone->lock, flags); |
979 | page = __rmqueue(zone, order, migratetype); | 1002 | page = __rmqueue(zone, order, migratetype); |