aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:25:54 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commit5adc5be7cd1bcef6bb64f5255d2a33f20a3cf5be (patch)
tree39ed023e8c36cde0c0f9cba50f945e77d3ca26fa /mm/page_alloc.c
parent9ef9acb05a741ec10a5e9122717736de12adced9 (diff)
Bias the placement of kernel pages at lower PFNs
This patch chooses blocks with lower PFNs when placing kernel allocations. This is particularly important during fallback in low memory situations to stop unmovable pages being placed throughout the entire address space. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c20
1 files changed, 20 insertions, 0 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 676aec93d699..e1d87ee1d9c6 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -765,6 +765,23 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
765 return move_freepages(zone, start_page, end_page, migratetype); 765 return move_freepages(zone, start_page, end_page, migratetype);
766} 766}
767 767
768/* Return the page with the lowest PFN in the list */
769static struct page *min_page(struct list_head *list)
770{
771 unsigned long min_pfn = -1UL;
772 struct page *min_page = NULL, *page;;
773
774 list_for_each_entry(page, list, lru) {
775 unsigned long pfn = page_to_pfn(page);
776 if (pfn < min_pfn) {
777 min_pfn = pfn;
778 min_page = page;
779 }
780 }
781
782 return min_page;
783}
784
768/* Remove an element from the buddy allocator from the fallback list */ 785/* Remove an element from the buddy allocator from the fallback list */
769static struct page *__rmqueue_fallback(struct zone *zone, int order, 786static struct page *__rmqueue_fallback(struct zone *zone, int order,
770 int start_migratetype) 787 int start_migratetype)
@@ -795,8 +812,11 @@ retry:
795 if (list_empty(&area->free_list[migratetype])) 812 if (list_empty(&area->free_list[migratetype]))
796 continue; 813 continue;
797 814
815 /* Bias kernel allocations towards low pfns */
798 page = list_entry(area->free_list[migratetype].next, 816 page = list_entry(area->free_list[migratetype].next,
799 struct page, lru); 817 struct page, lru);
818 if (unlikely(start_migratetype != MIGRATE_MOVABLE))
819 page = min_page(&area->free_list[migratetype]);
800 area->nr_free--; 820 area->nr_free--;
801 821
802 /* 822 /*