aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:51 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commitc361be55b3128474aa66d31092db330b07539103 (patch)
tree9ce134f4e679144d28f5c32924bdba999a1aae6b /mm/page_alloc.c
parente2c55dc87f4a398b9c4dcc702dbc23a07fe14e23 (diff)
Move free pages between lists on steal
When a fallback occurs, there will be free pages for one allocation type stored on the list for another. When a large steal occurs, this patch will move all the free pages within one list to the other. [y-goto@jp.fujitsu.com: fix BUG_ON check at move_freepages()] [apw@shadowen.org: Move to using pfn_valid_within()] Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <clameter@engr.sgi.com> Signed-off-by: Yasunori Goto <y-goto@jp.fujitsu.com> Cc: Bjorn Helgaas <bjorn.helgaas@hp.com> Signed-off-by: Andy Whitcroft <andyw@uk.ibm.com> Cc: Bob Picco <bob.picco@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c74
1 files changed, 71 insertions, 3 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index aa7e5d2f28a5..d575a3ee8dd8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -680,6 +680,72 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = {
680 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE }, 680 [MIGRATE_MOVABLE] = { MIGRATE_UNMOVABLE },
681}; 681};
682 682
683/*
684 * Move the free pages in a range to the free lists of the requested type.
685 * Note that start_page and end_pages are not aligned in a MAX_ORDER_NR_PAGES
686 * boundary. If alignment is required, use move_freepages_block()
687 */
688int move_freepages(struct zone *zone,
689 struct page *start_page, struct page *end_page,
690 int migratetype)
691{
692 struct page *page;
693 unsigned long order;
694 int blocks_moved = 0;
695
696#ifndef CONFIG_HOLES_IN_ZONE
697 /*
698 * page_zone is not safe to call in this context when
699 * CONFIG_HOLES_IN_ZONE is set. This bug check is probably redundant
700 * anyway as we check zone boundaries in move_freepages_block().
701 * Remove at a later date when no bug reports exist related to
702 * CONFIG_PAGE_GROUP_BY_MOBILITY
703 */
704 BUG_ON(page_zone(start_page) != page_zone(end_page));
705#endif
706
707 for (page = start_page; page <= end_page;) {
708 if (!pfn_valid_within(page_to_pfn(page))) {
709 page++;
710 continue;
711 }
712
713 if (!PageBuddy(page)) {
714 page++;
715 continue;
716 }
717
718 order = page_order(page);
719 list_del(&page->lru);
720 list_add(&page->lru,
721 &zone->free_area[order].free_list[migratetype]);
722 page += 1 << order;
723 blocks_moved++;
724 }
725
726 return blocks_moved;
727}
728
729int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
730{
731 unsigned long start_pfn, end_pfn;
732 struct page *start_page, *end_page;
733
734 start_pfn = page_to_pfn(page);
735 start_pfn = start_pfn & ~(MAX_ORDER_NR_PAGES-1);
736 start_page = pfn_to_page(start_pfn);
737 end_page = start_page + MAX_ORDER_NR_PAGES - 1;
738 end_pfn = start_pfn + MAX_ORDER_NR_PAGES - 1;
739
740 /* Do not cross zone boundaries */
741 if (start_pfn < zone->zone_start_pfn)
742 start_page = page;
743 if (end_pfn >= zone->zone_start_pfn + zone->spanned_pages)
744 return 0;
745
746 return move_freepages(zone, start_page, end_page, migratetype);
747}
748
683/* Remove an element from the buddy allocator from the fallback list */ 749/* Remove an element from the buddy allocator from the fallback list */
684static struct page *__rmqueue_fallback(struct zone *zone, int order, 750static struct page *__rmqueue_fallback(struct zone *zone, int order,
685 int start_migratetype) 751 int start_migratetype)
@@ -704,11 +770,13 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
704 area->nr_free--; 770 area->nr_free--;
705 771
706 /* 772 /*
707 * If breaking a large block of pages, place the buddies 773 * If breaking a large block of pages, move all free
708 * on the preferred allocation list 774 * pages to the preferred allocation list
709 */ 775 */
710 if (unlikely(current_order >= MAX_ORDER / 2)) 776 if (unlikely(current_order >= MAX_ORDER / 2)) {
711 migratetype = start_migratetype; 777 migratetype = start_migratetype;
778 move_freepages_block(zone, page, migratetype);
779 }
712 780
713 /* Remove the page from the freelists */ 781 /* Remove the page from the freelists */
714 list_del(&page->lru); 782 list_del(&page->lru);