aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-12 17:14:44 -0500
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-11-12 17:14:44 -0500
commit44048d700bcbfaf4bcca6e2e0a73d89d01ec0878 (patch)
tree975427ee367023b6e3a254519d5e5fdced2c0969
parente6a5c27f3b0fef72e528fc35e343af4b2db790ff (diff)
Revert "Bias the placement of kernel pages at lower PFNs"
This reverts commit 5adc5be7cd1bcef6bb64f5255d2a33f20a3cf5be. Alexey Dobriyan reports that it causes huge slowdowns under some loads, in his case a "mkfs.ext2" on a 30G partition. With the placement bias, the mkfs took over four minutes, with it reverted it's back to about ten seconds for Alexey. Reported-and-tested-by: Alexey Dobriyan <adobriyan@gmail.com> Cc: Mel Gorman <mel@csn.ul.ie> Cc: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--mm/page_alloc.c20
1 files changed, 0 insertions, 20 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index da69d833e067..12376ae3f733 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -749,23 +749,6 @@ int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
749 return move_freepages(zone, start_page, end_page, migratetype); 749 return move_freepages(zone, start_page, end_page, migratetype);
750} 750}
751 751
752/* Return the page with the lowest PFN in the list */
753static struct page *min_page(struct list_head *list)
754{
755 unsigned long min_pfn = -1UL;
756 struct page *min_page = NULL, *page;;
757
758 list_for_each_entry(page, list, lru) {
759 unsigned long pfn = page_to_pfn(page);
760 if (pfn < min_pfn) {
761 min_pfn = pfn;
762 min_page = page;
763 }
764 }
765
766 return min_page;
767}
768
769/* Remove an element from the buddy allocator from the fallback list */ 752/* Remove an element from the buddy allocator from the fallback list */
770static struct page *__rmqueue_fallback(struct zone *zone, int order, 753static struct page *__rmqueue_fallback(struct zone *zone, int order,
771 int start_migratetype) 754 int start_migratetype)
@@ -789,11 +772,8 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
789 if (list_empty(&area->free_list[migratetype])) 772 if (list_empty(&area->free_list[migratetype]))
790 continue; 773 continue;
791 774
792 /* Bias kernel allocations towards low pfns */
793 page = list_entry(area->free_list[migratetype].next, 775 page = list_entry(area->free_list[migratetype].next,
794 struct page, lru); 776 struct page, lru);
795 if (unlikely(start_migratetype != MIGRATE_MOVABLE))
796 page = min_page(&area->free_list[migratetype]);
797 area->nr_free--; 777 area->nr_free--;
798 778
799 /* 779 /*