aboutsummaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorYinghai Lu <yinghai@kernel.org>2013-09-11 17:20:37 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2013-09-11 18:57:19 -0400
commite2d0bd2b924d74d5e0d4f395f8f4730d125e198c (patch)
tree1ed63051163b55dce3491cb6cee65d621a80c923 /mm/page_alloc.c
parentf92310c1877fc73470bdcd9228758fa3713c191b (diff)
mm: kill one if loop in __free_pages_bootmem()
We should not check loop+1 with loop end in loop body. Just duplicate two lines code to avoid it. That will help a bit when we have huge amount of pages on system with 16TiB memory. Signed-off-by: Yinghai Lu <yinghai@kernel.org> Cc: Mel Gorman <mgorman@suse.de> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c14
1 files changed, 7 insertions, 7 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 2748fc6a9003..8c68ef13cefa 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -751,19 +751,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
751void __init __free_pages_bootmem(struct page *page, unsigned int order) 751void __init __free_pages_bootmem(struct page *page, unsigned int order)
752{ 752{
753 unsigned int nr_pages = 1 << order; 753 unsigned int nr_pages = 1 << order;
754 struct page *p = page;
754 unsigned int loop; 755 unsigned int loop;
755 756
756 prefetchw(page); 757 prefetchw(p);
757 for (loop = 0; loop < nr_pages; loop++) { 758 for (loop = 0; loop < (nr_pages - 1); loop++, p++) {
758 struct page *p = &page[loop]; 759 prefetchw(p + 1);
759
760 if (loop + 1 < nr_pages)
761 prefetchw(p + 1);
762 __ClearPageReserved(p); 760 __ClearPageReserved(p);
763 set_page_count(p, 0); 761 set_page_count(p, 0);
764 } 762 }
763 __ClearPageReserved(p);
764 set_page_count(p, 0);
765 765
766 page_zone(page)->managed_pages += 1 << order; 766 page_zone(page)->managed_pages += nr_pages;
767 set_page_refcounted(page); 767 set_page_refcounted(page);
768 __free_pages(page, order); 768 __free_pages(page, order);
769} 769}