summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorRobin Holt <holt@sgi.com>2015-06-30 17:56:45 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2015-06-30 22:44:55 -0400
commit1e8ce83cd17fd0f549a7ad145ddd2bfcdd7dfe37 (patch)
treea8c7d1ad2e5e7f449070fa3a8c4acfa6ac0ccdb1 /mm
parent8e7a7f8619f1f93736d9bb7e31caf4721bdc739d (diff)
mm: meminit: move page initialization into a separate function
Currently, memmap_init_zone() has all the smarts for initializing a single page. A subset of this is required for parallel page initialisation and so this patch breaks up the monolithic function in preparation. Signed-off-by: Robin Holt <holt@sgi.com> Signed-off-by: Nathan Zimmer <nzimmer@sgi.com> Signed-off-by: Mel Gorman <mgorman@suse.de> Tested-by: Nate Zimmer <nzimmer@sgi.com> Tested-by: Waiman Long <waiman.long@hp.com> Tested-by: Daniel J Blueman <daniel@numascale.com> Acked-by: Pekka Enberg <penberg@kernel.org> Cc: Robin Holt <robinmholt@gmail.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Waiman Long <waiman.long@hp.com> Cc: Scott Norton <scott.norton@hp.com> Cc: "Luck, Tony" <tony.luck@intel.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: "H. Peter Anvin" <hpa@zytor.com> Cc: Thomas Gleixner <tglx@linutronix.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/page_alloc.c79
1 files changed, 46 insertions, 33 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5e6fa06f2784..bc5da2cdfc84 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -764,6 +764,51 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
764 return 0; 764 return 0;
765} 765}
766 766
767static void __meminit __init_single_page(struct page *page, unsigned long pfn,
768 unsigned long zone, int nid)
769{
770 struct zone *z = &NODE_DATA(nid)->node_zones[zone];
771
772 set_page_links(page, zone, nid, pfn);
773 mminit_verify_page_links(page, zone, nid, pfn);
774 init_page_count(page);
775 page_mapcount_reset(page);
776 page_cpupid_reset_last(page);
777 SetPageReserved(page);
778
779 /*
780 * Mark the block movable so that blocks are reserved for
781 * movable at startup. This will force kernel allocations
782 * to reserve their blocks rather than leaking throughout
783 * the address space during boot when many long-lived
784 * kernel allocations are made. Later some blocks near
785 * the start are marked MIGRATE_RESERVE by
786 * setup_zone_migrate_reserve()
787 *
788 * bitmap is created for zone's valid pfn range. but memmap
789 * can be created for invalid pages (for alignment)
790 * check here not to call set_pageblock_migratetype() against
791 * pfn out of zone.
792 */
793 if ((z->zone_start_pfn <= pfn)
794 && (pfn < zone_end_pfn(z))
795 && !(pfn & (pageblock_nr_pages - 1)))
796 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
797
798 INIT_LIST_HEAD(&page->lru);
799#ifdef WANT_PAGE_VIRTUAL
800 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
801 if (!is_highmem_idx(zone))
802 set_page_address(page, __va(pfn << PAGE_SHIFT));
803#endif
804}
805
806static void __meminit __init_single_pfn(unsigned long pfn, unsigned long zone,
807 int nid)
808{
809 return __init_single_page(pfn_to_page(pfn), pfn, zone, nid);
810}
811
767static bool free_pages_prepare(struct page *page, unsigned int order) 812static bool free_pages_prepare(struct page *page, unsigned int order)
768{ 813{
769 bool compound = PageCompound(page); 814 bool compound = PageCompound(page);
@@ -4212,7 +4257,6 @@ static void setup_zone_migrate_reserve(struct zone *zone)
4212void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, 4257void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4213 unsigned long start_pfn, enum memmap_context context) 4258 unsigned long start_pfn, enum memmap_context context)
4214{ 4259{
4215 struct page *page;
4216 unsigned long end_pfn = start_pfn + size; 4260 unsigned long end_pfn = start_pfn + size;
4217 unsigned long pfn; 4261 unsigned long pfn;
4218 struct zone *z; 4262 struct zone *z;
@@ -4233,38 +4277,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4233 if (!early_pfn_in_nid(pfn, nid)) 4277 if (!early_pfn_in_nid(pfn, nid))
4234 continue; 4278 continue;
4235 } 4279 }
4236 page = pfn_to_page(pfn); 4280 __init_single_pfn(pfn, zone, nid);
4237 set_page_links(page, zone, nid, pfn);
4238 mminit_verify_page_links(page, zone, nid, pfn);
4239 init_page_count(page);
4240 page_mapcount_reset(page);
4241 page_cpupid_reset_last(page);
4242 SetPageReserved(page);
4243 /*
4244 * Mark the block movable so that blocks are reserved for
4245 * movable at startup. This will force kernel allocations
4246 * to reserve their blocks rather than leaking throughout
4247 * the address space during boot when many long-lived
4248 * kernel allocations are made. Later some blocks near
4249 * the start are marked MIGRATE_RESERVE by
4250 * setup_zone_migrate_reserve()
4251 *
4252 * bitmap is created for zone's valid pfn range. but memmap
4253 * can be created for invalid pages (for alignment)
4254 * check here not to call set_pageblock_migratetype() against
4255 * pfn out of zone.
4256 */
4257 if ((z->zone_start_pfn <= pfn)
4258 && (pfn < zone_end_pfn(z))
4259 && !(pfn & (pageblock_nr_pages - 1)))
4260 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4261
4262 INIT_LIST_HEAD(&page->lru);
4263#ifdef WANT_PAGE_VIRTUAL
4264 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
4265 if (!is_highmem_idx(zone))
4266 set_page_address(page, __va(pfn << PAGE_SHIFT));
4267#endif
4268 } 4281 }
4269} 4282}
4270 4283