summaryrefslogtreecommitdiffstats
path: root/mm/page_alloc.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@suse.de>2015-06-30 17:57:20 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 22:44:56 -0400
commitac5d2539b2382689b1cdb90bd60dcd49f61c2773 (patch)
treeb48f21e19d6b6bb9c50a3af7fdf81d22fceca125 /mm/page_alloc.c
parenta4de83dd3377eb43ad95387cc16c27a11aae2feb (diff)
mm: meminit: reduce number of times pageblocks are set during struct page init
During parallel sturct page initialisation, ranges are checked for every PFN unnecessarily which increases boot times. This patch alters when the ranges are checked. Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Nate Zimmer <nzimmer@sgi.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r--mm/page_alloc.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3f00f622f28..f1f455a69cef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -838,33 +838,12 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
838static void __meminit __init_single_page(struct page *page, unsigned long pfn, 838static void __meminit __init_single_page(struct page *page, unsigned long pfn,
839 unsigned long zone, int nid) 839 unsigned long zone, int nid)
840{ 840{
841 struct zone *z = &NODE_DATA(nid)->node_zones[zone];
842
843 set_page_links(page, zone, nid, pfn); 841 set_page_links(page, zone, nid, pfn);
844 mminit_verify_page_links(page, zone, nid, pfn); 842 mminit_verify_page_links(page, zone, nid, pfn);
845 init_page_count(page); 843 init_page_count(page);
846 page_mapcount_reset(page); 844 page_mapcount_reset(page);
847 page_cpupid_reset_last(page); 845 page_cpupid_reset_last(page);
848 846
849 /*
850 * Mark the block movable so that blocks are reserved for
851 * movable at startup. This will force kernel allocations
852 * to reserve their blocks rather than leaking throughout
853 * the address space during boot when many long-lived
854 * kernel allocations are made. Later some blocks near
855 * the start are marked MIGRATE_RESERVE by
856 * setup_zone_migrate_reserve()
857 *
858 * bitmap is created for zone's valid pfn range. but memmap
859 * can be created for invalid pages (for alignment)
860 * check here not to call set_pageblock_migratetype() against
861 * pfn out of zone.
862 */
863 if ((z->zone_start_pfn <= pfn)
864 && (pfn < zone_end_pfn(z))
865 && !(pfn & (pageblock_nr_pages - 1)))
866 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
867
868 INIT_LIST_HEAD(&page->lru); 847 INIT_LIST_HEAD(&page->lru);
869#ifdef WANT_PAGE_VIRTUAL 848#ifdef WANT_PAGE_VIRTUAL
870 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 849 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
@@ -1073,6 +1052,7 @@ static void __defermem_init deferred_free_range(struct page *page,
1073 /* Free a large naturally-aligned chunk if possible */ 1052 /* Free a large naturally-aligned chunk if possible */
1074 if (nr_pages == MAX_ORDER_NR_PAGES && 1053 if (nr_pages == MAX_ORDER_NR_PAGES &&
1075 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 1054 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1055 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1076 __free_pages_boot_core(page, pfn, MAX_ORDER-1); 1056 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1077 return; 1057 return;
1078 } 1058 }
@@ -4593,7 +4573,29 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4593 &nr_initialised)) 4573 &nr_initialised))
4594 break; 4574 break;
4595 } 4575 }
4596 __init_single_pfn(pfn, zone, nid); 4576
4577 /*
4578 * Mark the block movable so that blocks are reserved for
4579 * movable at startup. This will force kernel allocations
4580 * to reserve their blocks rather than leaking throughout
4581 * the address space during boot when many long-lived
4582 * kernel allocations are made. Later some blocks near
4583 * the start are marked MIGRATE_RESERVE by
4584 * setup_zone_migrate_reserve()
4585 *
4586 * bitmap is created for zone's valid pfn range. but memmap
4587 * can be created for invalid pages (for alignment)
4588 * check here not to call set_pageblock_migratetype() against
4589 * pfn out of zone.
4590 */
4591 if (!(pfn & (pageblock_nr_pages - 1))) {
4592 struct page *page = pfn_to_page(pfn);
4593
4594 __init_single_page(page, pfn, zone, nid);
4595 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4596 } else {
4597 __init_single_pfn(pfn, zone, nid);
4598 }
4597 } 4599 }
4598} 4600}
4599 4601