aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMel Gorman <mel@csn.ul.ie>2009-06-16 18:32:14 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2009-06-16 22:47:35 -0400
commit974709bdb2a34db378fc84140220f363f558d0d6 (patch)
tree2b63a089cc077579e3b67efba1995c71102db2e2 /mm
parentf2260e6b1f4eba0f5b5906795117791b5c660154 (diff)
page allocator: get the pageblock migratetype without disabling interrupts
Local interrupts are disabled when freeing pages to the PCP list. Part of that free checks what the migratetype of the pageblock the page is in but it checks this with interrupts disabled and interupts should never be disabled longer than necessary. This patch checks the pagetype with interrupts enabled with the impact that it is possible a page is freed to the wrong list when a pageblock changes type. As that block is now already considered mixed from an anti-fragmentation perspective, it's not of vital importance. Signed-off-by: Mel Gorman <mel@csn.ul.ie> Cc: Christoph Lameter <cl@linux-foundation.org> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Pekka Enberg <penberg@cs.helsinki.fi> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Nick Piggin <nickpiggin@yahoo.com.au> Cc: Dave Hansen <dave@linux.vnet.ibm.com> Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c2
1 files changed, 1 insertions, 1 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index d56e377ad085..e60e41474332 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1030,6 +1030,7 @@ static void free_hot_cold_page(struct page *page, int cold)
1030 kernel_map_pages(page, 1, 0); 1030 kernel_map_pages(page, 1, 0);
1031 1031
1032 pcp = &zone_pcp(zone, get_cpu())->pcp; 1032 pcp = &zone_pcp(zone, get_cpu())->pcp;
1033 set_page_private(page, get_pageblock_migratetype(page));
1033 local_irq_save(flags); 1034 local_irq_save(flags);
1034 if (unlikely(clearMlocked)) 1035 if (unlikely(clearMlocked))
1035 free_page_mlock(page); 1036 free_page_mlock(page);
@@ -1039,7 +1040,6 @@ static void free_hot_cold_page(struct page *page, int cold)
1039 list_add_tail(&page->lru, &pcp->list); 1040 list_add_tail(&page->lru, &pcp->list);
1040 else 1041 else
1041 list_add(&page->lru, &pcp->list); 1042 list_add(&page->lru, &pcp->list);
1042 set_page_private(page, get_pageblock_migratetype(page));
1043 pcp->count++; 1043 pcp->count++;
1044 if (pcp->count >= pcp->high) { 1044 if (pcp->count >= pcp->high) {
1045 free_pages_bulk(zone, pcp->batch, &pcp->list, 0); 1045 free_pages_bulk(zone, pcp->batch, &pcp->list, 0);