aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorJohannes Weiner <hannes@cmpxchg.org>2012-01-10 18:08:10 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2012-01-10 19:30:44 -0500
commitc3993076f842de3754360e5b998d6657a9d30303 (patch)
tree78c1ca3d031483932e2f236706b20064742c0b0c /mm/page_alloc.c
parent43d2b113241d6797b890318767e0af78e313414b (diff)
mm: page_alloc: generalize order handling in __free_pages_bootmem()
__free_pages_bootmem() used to special-case higher-order frees to save individual page checking with free_pages_bulk(). Nowadays, both zero order and non-zero order frees use free_pages(), which checks each individual page anyway, and so there is little point in making the distinction anymore. The higher-order loop will work just fine for zero order pages. Signed-off-by: Johannes Weiner <hannes@cmpxchg.org> Cc: Uwe Kleine-König <u.kleine-koenig@pengutronix.de> Cc: Tejun Heo <tj@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c33
1 files changed, 12 insertions, 21 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 59153da58c69..794e6715c226 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -730,32 +730,23 @@ static void __free_pages_ok(struct page *page, unsigned int order)
730 local_irq_restore(flags); 730 local_irq_restore(flags);
731} 731}
732 732
733/*
734 * permit the bootmem allocator to evade page validation on high-order frees
735 */
736void __meminit __free_pages_bootmem(struct page *page, unsigned int order) 733void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
737{ 734{
738 if (order == 0) { 735 unsigned int nr_pages = 1 << order;
739 __ClearPageReserved(page); 736 unsigned int loop;
740 set_page_count(page, 0);
741 set_page_refcounted(page);
742 __free_page(page);
743 } else {
744 int loop;
745
746 prefetchw(page);
747 for (loop = 0; loop < (1 << order); loop++) {
748 struct page *p = &page[loop];
749 737
750 if (loop + 1 < (1 << order)) 738 prefetchw(page);
751 prefetchw(p + 1); 739 for (loop = 0; loop < nr_pages; loop++) {
752 __ClearPageReserved(p); 740 struct page *p = &page[loop];
753 set_page_count(p, 0);
754 }
755 741
756 set_page_refcounted(page); 742 if (loop + 1 < nr_pages)
757 __free_pages(page, order); 743 prefetchw(p + 1);
744 __ClearPageReserved(p);
745 set_page_count(p, 0);
758 } 746 }
747
748 set_page_refcounted(page);
749 __free_pages(page, order);
759} 750}
760 751
761 752