aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorKOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com>2010-05-24 17:32:38 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2010-05-25 11:07:00 -0400
commitec95f53aa6ed62ba68660cb19c8474ebe9025cce (patch)
tree953ad71c8ee0373ca76f17ee9df65358ad2fe7b7 /mm
parent5f53e76299ceebd68bdf9495e8ff80db77711236 (diff)
mm: introduce free_pages_prepare()
free_hot_cold_page() and __free_pages_ok() have very similar freeing preparation. Consolidate them. [akpm@linux-foundation.org: fix busted coding style] Signed-off-by: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Acked-by: Mel Gorman <mel@csn.ul.ie> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c40
1 files changed, 21 insertions, 19 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 95ad42de5a87..8f4f27841b71 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -620,20 +620,23 @@ static void free_one_page(struct zone *zone, struct page *page, int order,
620 spin_unlock(&zone->lock); 620 spin_unlock(&zone->lock);
621} 621}
622 622
623static void __free_pages_ok(struct page *page, unsigned int order) 623static bool free_pages_prepare(struct page *page, unsigned int order)
624{ 624{
625 unsigned long flags;
626 int i; 625 int i;
627 int bad = 0; 626 int bad = 0;
628 int wasMlocked = __TestClearPageMlocked(page);
629 627
630 trace_mm_page_free_direct(page, order); 628 trace_mm_page_free_direct(page, order);
631 kmemcheck_free_shadow(page, order); 629 kmemcheck_free_shadow(page, order);
632 630
633 for (i = 0 ; i < (1 << order) ; ++i) 631 for (i = 0; i < (1 << order); i++) {
634 bad += free_pages_check(page + i); 632 struct page *pg = page + i;
633
634 if (PageAnon(pg))
635 pg->mapping = NULL;
636 bad += free_pages_check(pg);
637 }
635 if (bad) 638 if (bad)
636 return; 639 return false;
637 640
638 if (!PageHighMem(page)) { 641 if (!PageHighMem(page)) {
639 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order); 642 debug_check_no_locks_freed(page_address(page),PAGE_SIZE<<order);
@@ -643,6 +646,17 @@ static void __free_pages_ok(struct page *page, unsigned int order)
643 arch_free_page(page, order); 646 arch_free_page(page, order);
644 kernel_map_pages(page, 1 << order, 0); 647 kernel_map_pages(page, 1 << order, 0);
645 648
649 return true;
650}
651
652static void __free_pages_ok(struct page *page, unsigned int order)
653{
654 unsigned long flags;
655 int wasMlocked = __TestClearPageMlocked(page);
656
657 if (!free_pages_prepare(page, order))
658 return;
659
646 local_irq_save(flags); 660 local_irq_save(flags);
647 if (unlikely(wasMlocked)) 661 if (unlikely(wasMlocked))
648 free_page_mlock(page); 662 free_page_mlock(page);
@@ -1128,21 +1142,9 @@ void free_hot_cold_page(struct page *page, int cold)
1128 int migratetype; 1142 int migratetype;
1129 int wasMlocked = __TestClearPageMlocked(page); 1143 int wasMlocked = __TestClearPageMlocked(page);
1130 1144
1131 trace_mm_page_free_direct(page, 0); 1145 if (!free_pages_prepare(page, 0))
1132 kmemcheck_free_shadow(page, 0);
1133
1134 if (PageAnon(page))
1135 page->mapping = NULL;
1136 if (free_pages_check(page))
1137 return; 1146 return;
1138 1147
1139 if (!PageHighMem(page)) {
1140 debug_check_no_locks_freed(page_address(page), PAGE_SIZE);
1141 debug_check_no_obj_freed(page_address(page), PAGE_SIZE);
1142 }
1143 arch_free_page(page, 0);
1144 kernel_map_pages(page, 1, 0);
1145
1146 migratetype = get_pageblock_migratetype(page); 1148 migratetype = get_pageblock_migratetype(page);
1147 set_page_private(page, migratetype); 1149 set_page_private(page, migratetype);
1148 local_irq_save(flags); 1150 local_irq_save(flags);