diff options
author | Mel Gorman <mel@csn.ul.ie> | 2009-09-21 20:02:31 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-22 10:17:33 -0400 |
commit | 2f66a68f3fac2e94da360c342ff78ab45553f86c (patch) | |
tree | ec8de9c7d18d866e63e2c9bcbecf902896b687bd /mm/page_alloc.c | |
parent | fe1ff49d0d1c30254dbfc84c3786eb538e0cc7d1 (diff) |
page-allocator: change migratetype for all pageblocks within a high-order page during __rmqueue_fallback
When there are no pages of a target migratetype free, the page allocator
selects a high-order block of another migratetype to allocate from. When
the order of the page taken is greater than pageblock_order, all
pageblocks within that high-order page should change migratetype so that
pages are later freed to the correct free-lists.
The current behaviour is that pageblocks change migratetype if the order
being split matches the pageblock_order. When pageblock_order <
MAX_ORDER-1, ownership is not changing correct and pages are being later
freed to the incorrect list and this impacts fragmentation avoidance.
This patch changes all pageblocks within the high-order page being split
to the correct migratetype. Without the patch, allocation success rates
for hugepages under stress were about 59% of physical memory on x86-64.
With the patch applied, this goes up to 65%.
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 16 |
1 files changed, 14 insertions, 2 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 9242d13f4ff3..20759803a64a 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -783,6 +783,17 @@ static int move_freepages_block(struct zone *zone, struct page *page, | |||
783 | return move_freepages(zone, start_page, end_page, migratetype); | 783 | return move_freepages(zone, start_page, end_page, migratetype); |
784 | } | 784 | } |
785 | 785 | ||
786 | static void change_pageblock_range(struct page *pageblock_page, | ||
787 | int start_order, int migratetype) | ||
788 | { | ||
789 | int nr_pageblocks = 1 << (start_order - pageblock_order); | ||
790 | |||
791 | while (nr_pageblocks--) { | ||
792 | set_pageblock_migratetype(pageblock_page, migratetype); | ||
793 | pageblock_page += pageblock_nr_pages; | ||
794 | } | ||
795 | } | ||
796 | |||
786 | /* Remove an element from the buddy allocator from the fallback list */ | 797 | /* Remove an element from the buddy allocator from the fallback list */ |
787 | static inline struct page * | 798 | static inline struct page * |
788 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | 799 | __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) |
@@ -836,8 +847,9 @@ __rmqueue_fallback(struct zone *zone, int order, int start_migratetype) | |||
836 | list_del(&page->lru); | 847 | list_del(&page->lru); |
837 | rmv_page_order(page); | 848 | rmv_page_order(page); |
838 | 849 | ||
839 | if (current_order == pageblock_order) | 850 | /* Take ownership for orders >= pageblock_order */ |
840 | set_pageblock_migratetype(page, | 851 | if (current_order >= pageblock_order) |
852 | change_pageblock_range(page, current_order, | ||
841 | start_migratetype); | 853 | start_migratetype); |
842 | 854 | ||
843 | expand(zone, page, order, current_order, area, migratetype); | 855 | expand(zone, page, order, current_order, area, migratetype); |