aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2007-10-16 04:26:00 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:43:00 -0400
commitd100313fd615cc30374ff92e0b3facb053838330 (patch)
treef0bcd5e3b07bee40a65182c63b54baceca366849 /mm
parent64c5e135bf5a2a7f0ededb3435a31adbe0202f0c (diff)
Fix calculation in move_freepages_block for counting pages
move_freepages_block() returns the number of blocks moved. This value is used to determine if a block of pages should be stolen for the exclusive use of a migrate type or not. However, the value returned is being used correctly. This patch fixes the calculation to return the number of base pages that have been moved. This should be considered a fix to the patch move-free-pages-between-lists-on-steal.patch Credit to Andy Whitcroft for spotting the problem. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Acked-by: Andy Whitcroft <apw@shadowen.org> Acked-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ac8fc51825bb..942498fba942 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -718,7 +718,7 @@ int move_freepages(struct zone *zone,
718{ 718{
719 struct page *page; 719 struct page *page;
720 unsigned long order; 720 unsigned long order;
721 int blocks_moved = 0; 721 int pages_moved = 0;
722 722
723#ifndef CONFIG_HOLES_IN_ZONE 723#ifndef CONFIG_HOLES_IN_ZONE
724 /* 724 /*
@@ -747,10 +747,10 @@ int move_freepages(struct zone *zone,
747 list_add(&page->lru, 747 list_add(&page->lru,
748 &zone->free_area[order].free_list[migratetype]); 748 &zone->free_area[order].free_list[migratetype]);
749 page += 1 << order; 749 page += 1 << order;
750 blocks_moved++; 750 pages_moved += 1 << order;
751 } 751 }
752 752
753 return blocks_moved; 753 return pages_moved;
754} 754}
755 755
756int move_freepages_block(struct zone *zone, struct page *page, int migratetype) 756int move_freepages_block(struct zone *zone, struct page *page, int migratetype)
@@ -833,7 +833,7 @@ static struct page *__rmqueue_fallback(struct zone *zone, int order,
833 start_migratetype); 833 start_migratetype);
834 834
835 /* Claim the whole block if over half of it is free */ 835 /* Claim the whole block if over half of it is free */
836 if ((pages << current_order) >= (1 << (MAX_ORDER-2))) 836 if (pages >= (1 << (MAX_ORDER-2)))
837 set_pageblock_migratetype(page, 837 set_pageblock_migratetype(page,
838 start_migratetype); 838 start_migratetype);
839 839