summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--mm/page_alloc.c46
1 files changed, 24 insertions, 22 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e3f00f622f28..f1f455a69cef 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -838,33 +838,12 @@ static int free_tail_pages_check(struct page *head_page, struct page *page)
838static void __meminit __init_single_page(struct page *page, unsigned long pfn, 838static void __meminit __init_single_page(struct page *page, unsigned long pfn,
839 unsigned long zone, int nid) 839 unsigned long zone, int nid)
840{ 840{
841 struct zone *z = &NODE_DATA(nid)->node_zones[zone];
842
843 set_page_links(page, zone, nid, pfn); 841 set_page_links(page, zone, nid, pfn);
844 mminit_verify_page_links(page, zone, nid, pfn); 842 mminit_verify_page_links(page, zone, nid, pfn);
845 init_page_count(page); 843 init_page_count(page);
846 page_mapcount_reset(page); 844 page_mapcount_reset(page);
847 page_cpupid_reset_last(page); 845 page_cpupid_reset_last(page);
848 846
849 /*
850 * Mark the block movable so that blocks are reserved for
851 * movable at startup. This will force kernel allocations
852 * to reserve their blocks rather than leaking throughout
853 * the address space during boot when many long-lived
854 * kernel allocations are made. Later some blocks near
855 * the start are marked MIGRATE_RESERVE by
856 * setup_zone_migrate_reserve()
857 *
858 * bitmap is created for zone's valid pfn range. but memmap
859 * can be created for invalid pages (for alignment)
860 * check here not to call set_pageblock_migratetype() against
861 * pfn out of zone.
862 */
863 if ((z->zone_start_pfn <= pfn)
864 && (pfn < zone_end_pfn(z))
865 && !(pfn & (pageblock_nr_pages - 1)))
866 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
867
868 INIT_LIST_HEAD(&page->lru); 847 INIT_LIST_HEAD(&page->lru);
869#ifdef WANT_PAGE_VIRTUAL 848#ifdef WANT_PAGE_VIRTUAL
870 /* The shift won't overflow because ZONE_NORMAL is below 4G. */ 849 /* The shift won't overflow because ZONE_NORMAL is below 4G. */
@@ -1073,6 +1052,7 @@ static void __defermem_init deferred_free_range(struct page *page,
1073 /* Free a large naturally-aligned chunk if possible */ 1052 /* Free a large naturally-aligned chunk if possible */
1074 if (nr_pages == MAX_ORDER_NR_PAGES && 1053 if (nr_pages == MAX_ORDER_NR_PAGES &&
1075 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) { 1054 (pfn & (MAX_ORDER_NR_PAGES-1)) == 0) {
1055 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
1076 __free_pages_boot_core(page, pfn, MAX_ORDER-1); 1056 __free_pages_boot_core(page, pfn, MAX_ORDER-1);
1077 return; 1057 return;
1078 } 1058 }
@@ -4593,7 +4573,29 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone,
4593 &nr_initialised)) 4573 &nr_initialised))
4594 break; 4574 break;
4595 } 4575 }
4596 __init_single_pfn(pfn, zone, nid); 4576
4577 /*
4578 * Mark the block movable so that blocks are reserved for
4579 * movable at startup. This will force kernel allocations
4580 * to reserve their blocks rather than leaking throughout
4581 * the address space during boot when many long-lived
4582 * kernel allocations are made. Later some blocks near
4583 * the start are marked MIGRATE_RESERVE by
4584 * setup_zone_migrate_reserve()
4585 *
4586 * bitmap is created for zone's valid pfn range. but memmap
4587 * can be created for invalid pages (for alignment)
4588 * check here not to call set_pageblock_migratetype() against
4589 * pfn out of zone.
4590 */
4591 if (!(pfn & (pageblock_nr_pages - 1))) {
4592 struct page *page = pfn_to_page(pfn);
4593
4594 __init_single_page(page, pfn, zone, nid);
4595 set_pageblock_migratetype(page, MIGRATE_MOVABLE);
4596 } else {
4597 __init_single_pfn(pfn, zone, nid);
4598 }
4597 } 4599 }
4598} 4600}
4599 4601