diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-09-22 07:08:57 -0400 |
commit | 0b88641f1bafdbd087d5e63987a30cc0eadd63b9 (patch) | |
tree | 81dcf756db373444140bb2623584710c628e3048 /mm/page_alloc.c | |
parent | fbdbf709938d155c719c76b9894d28342632c797 (diff) | |
parent | 72d31053f62c4bc464c2783974926969614a8649 (diff) |
Merge commit 'v2.6.27-rc7' into x86/debug
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 182 |
1 files changed, 113 insertions, 69 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 79ac4afc908c..e293c58bea58 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve; | |||
153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; | 153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; |
154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; | 154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; |
155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ |
156 | unsigned long __initdata required_kernelcore; | 156 | static unsigned long __initdata required_kernelcore; |
157 | static unsigned long __initdata required_movablecore; | 157 | static unsigned long __initdata required_movablecore; |
158 | unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; | 158 | static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; |
159 | 159 | ||
160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ | 160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
161 | int movable_zone; | 161 | int movable_zone; |
@@ -264,7 +264,7 @@ static void free_compound_page(struct page *page) | |||
264 | __free_pages_ok(page, compound_order(page)); | 264 | __free_pages_ok(page, compound_order(page)); |
265 | } | 265 | } |
266 | 266 | ||
267 | static void prep_compound_page(struct page *page, unsigned long order) | 267 | void prep_compound_page(struct page *page, unsigned long order) |
268 | { | 268 | { |
269 | int i; | 269 | int i; |
270 | int nr_pages = 1 << order; | 270 | int nr_pages = 1 << order; |
@@ -432,8 +432,9 @@ static inline void __free_one_page(struct page *page, | |||
432 | 432 | ||
433 | buddy = __page_find_buddy(page, page_idx, order); | 433 | buddy = __page_find_buddy(page, page_idx, order); |
434 | if (!page_is_buddy(page, buddy, order)) | 434 | if (!page_is_buddy(page, buddy, order)) |
435 | break; /* Move the buddy up one level. */ | 435 | break; |
436 | 436 | ||
437 | /* Our buddy is free, merge with it and move up one order. */ | ||
437 | list_del(&buddy->lru); | 438 | list_del(&buddy->lru); |
438 | zone->free_area[order].nr_free--; | 439 | zone->free_area[order].nr_free--; |
439 | rmv_page_order(buddy); | 440 | rmv_page_order(buddy); |
@@ -532,7 +533,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | /* | 533 | /* |
533 | * permit the bootmem allocator to evade page validation on high-order frees | 534 | * permit the bootmem allocator to evade page validation on high-order frees |
534 | */ | 535 | */ |
535 | void __free_pages_bootmem(struct page *page, unsigned int order) | 536 | void __meminit __free_pages_bootmem(struct page *page, unsigned int order) |
536 | { | 537 | { |
537 | if (order == 0) { | 538 | if (order == 0) { |
538 | __ClearPageReserved(page); | 539 | __ClearPageReserved(page); |
@@ -673,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | |||
673 | * Note that start_page and end_pages are not aligned on a pageblock | 674 | * Note that start_page and end_pages are not aligned on a pageblock |
674 | * boundary. If alignment is required, use move_freepages_block() | 675 | * boundary. If alignment is required, use move_freepages_block() |
675 | */ | 676 | */ |
676 | int move_freepages(struct zone *zone, | 677 | static int move_freepages(struct zone *zone, |
677 | struct page *start_page, struct page *end_page, | 678 | struct page *start_page, struct page *end_page, |
678 | int migratetype) | 679 | int migratetype) |
679 | { | 680 | { |
680 | struct page *page; | 681 | struct page *page; |
681 | unsigned long order; | 682 | unsigned long order; |
@@ -693,6 +694,9 @@ int move_freepages(struct zone *zone, | |||
693 | #endif | 694 | #endif |
694 | 695 | ||
695 | for (page = start_page; page <= end_page;) { | 696 | for (page = start_page; page <= end_page;) { |
697 | /* Make sure we are not inadvertently changing nodes */ | ||
698 | VM_BUG_ON(page_to_nid(page) != zone_to_nid(zone)); | ||
699 | |||
696 | if (!pfn_valid_within(page_to_pfn(page))) { | 700 | if (!pfn_valid_within(page_to_pfn(page))) { |
697 | page++; | 701 | page++; |
698 | continue; | 702 | continue; |
@@ -714,7 +718,8 @@ int move_freepages(struct zone *zone, | |||
714 | return pages_moved; | 718 | return pages_moved; |
715 | } | 719 | } |
716 | 720 | ||
717 | int move_freepages_block(struct zone *zone, struct page *page, int migratetype) | 721 | static int move_freepages_block(struct zone *zone, struct page *page, |
722 | int migratetype) | ||
718 | { | 723 | { |
719 | unsigned long start_pfn, end_pfn; | 724 | unsigned long start_pfn, end_pfn; |
720 | struct page *start_page, *end_page; | 725 | struct page *start_page, *end_page; |
@@ -1429,7 +1434,7 @@ try_next_zone: | |||
1429 | /* | 1434 | /* |
1430 | * This is the 'heart' of the zoned buddy allocator. | 1435 | * This is the 'heart' of the zoned buddy allocator. |
1431 | */ | 1436 | */ |
1432 | static struct page * | 1437 | struct page * |
1433 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 1438 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, |
1434 | struct zonelist *zonelist, nodemask_t *nodemask) | 1439 | struct zonelist *zonelist, nodemask_t *nodemask) |
1435 | { | 1440 | { |
@@ -1632,22 +1637,7 @@ nopage: | |||
1632 | got_pg: | 1637 | got_pg: |
1633 | return page; | 1638 | return page; |
1634 | } | 1639 | } |
1635 | 1640 | EXPORT_SYMBOL(__alloc_pages_internal); | |
1636 | struct page * | ||
1637 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | ||
1638 | struct zonelist *zonelist) | ||
1639 | { | ||
1640 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | ||
1641 | } | ||
1642 | |||
1643 | struct page * | ||
1644 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
1645 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
1646 | { | ||
1647 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
1648 | } | ||
1649 | |||
1650 | EXPORT_SYMBOL(__alloc_pages); | ||
1651 | 1641 | ||
1652 | /* | 1642 | /* |
1653 | * Common helper functions. | 1643 | * Common helper functions. |
@@ -1711,6 +1701,59 @@ void free_pages(unsigned long addr, unsigned int order) | |||
1711 | 1701 | ||
1712 | EXPORT_SYMBOL(free_pages); | 1702 | EXPORT_SYMBOL(free_pages); |
1713 | 1703 | ||
1704 | /** | ||
1705 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. | ||
1706 | * @size: the number of bytes to allocate | ||
1707 | * @gfp_mask: GFP flags for the allocation | ||
1708 | * | ||
1709 | * This function is similar to alloc_pages(), except that it allocates the | ||
1710 | * minimum number of pages to satisfy the request. alloc_pages() can only | ||
1711 | * allocate memory in power-of-two pages. | ||
1712 | * | ||
1713 | * This function is also limited by MAX_ORDER. | ||
1714 | * | ||
1715 | * Memory allocated by this function must be released by free_pages_exact(). | ||
1716 | */ | ||
1717 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | ||
1718 | { | ||
1719 | unsigned int order = get_order(size); | ||
1720 | unsigned long addr; | ||
1721 | |||
1722 | addr = __get_free_pages(gfp_mask, order); | ||
1723 | if (addr) { | ||
1724 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
1725 | unsigned long used = addr + PAGE_ALIGN(size); | ||
1726 | |||
1727 | split_page(virt_to_page(addr), order); | ||
1728 | while (used < alloc_end) { | ||
1729 | free_page(used); | ||
1730 | used += PAGE_SIZE; | ||
1731 | } | ||
1732 | } | ||
1733 | |||
1734 | return (void *)addr; | ||
1735 | } | ||
1736 | EXPORT_SYMBOL(alloc_pages_exact); | ||
1737 | |||
1738 | /** | ||
1739 | * free_pages_exact - release memory allocated via alloc_pages_exact() | ||
1740 | * @virt: the value returned by alloc_pages_exact. | ||
1741 | * @size: size of allocation, same value as passed to alloc_pages_exact(). | ||
1742 | * | ||
1743 | * Release the memory allocated by a previous call to alloc_pages_exact. | ||
1744 | */ | ||
1745 | void free_pages_exact(void *virt, size_t size) | ||
1746 | { | ||
1747 | unsigned long addr = (unsigned long)virt; | ||
1748 | unsigned long end = addr + PAGE_ALIGN(size); | ||
1749 | |||
1750 | while (addr < end) { | ||
1751 | free_page(addr); | ||
1752 | addr += PAGE_SIZE; | ||
1753 | } | ||
1754 | } | ||
1755 | EXPORT_SYMBOL(free_pages_exact); | ||
1756 | |||
1714 | static unsigned int nr_free_zone_pages(int offset) | 1757 | static unsigned int nr_free_zone_pages(int offset) |
1715 | { | 1758 | { |
1716 | struct zoneref *z; | 1759 | struct zoneref *z; |
@@ -2332,7 +2375,7 @@ static void build_zonelist_cache(pg_data_t *pgdat) | |||
2332 | 2375 | ||
2333 | #endif /* CONFIG_NUMA */ | 2376 | #endif /* CONFIG_NUMA */ |
2334 | 2377 | ||
2335 | /* return values int ....just for stop_machine_run() */ | 2378 | /* return values int ....just for stop_machine() */ |
2336 | static int __build_all_zonelists(void *dummy) | 2379 | static int __build_all_zonelists(void *dummy) |
2337 | { | 2380 | { |
2338 | int nid; | 2381 | int nid; |
@@ -2352,11 +2395,12 @@ void build_all_zonelists(void) | |||
2352 | 2395 | ||
2353 | if (system_state == SYSTEM_BOOTING) { | 2396 | if (system_state == SYSTEM_BOOTING) { |
2354 | __build_all_zonelists(NULL); | 2397 | __build_all_zonelists(NULL); |
2398 | mminit_verify_zonelist(); | ||
2355 | cpuset_init_current_mems_allowed(); | 2399 | cpuset_init_current_mems_allowed(); |
2356 | } else { | 2400 | } else { |
2357 | /* we have to stop all cpus to guarantee there is no user | 2401 | /* we have to stop all cpus to guarantee there is no user |
2358 | of zonelist */ | 2402 | of zonelist */ |
2359 | stop_machine_run(__build_all_zonelists, NULL, NR_CPUS); | 2403 | stop_machine(__build_all_zonelists, NULL, NULL); |
2360 | /* cpuset refresh routine should be here */ | 2404 | /* cpuset refresh routine should be here */ |
2361 | } | 2405 | } |
2362 | vm_total_pages = nr_free_pagecache_pages(); | 2406 | vm_total_pages = nr_free_pagecache_pages(); |
@@ -2475,6 +2519,10 @@ static void setup_zone_migrate_reserve(struct zone *zone) | |||
2475 | continue; | 2519 | continue; |
2476 | page = pfn_to_page(pfn); | 2520 | page = pfn_to_page(pfn); |
2477 | 2521 | ||
2522 | /* Watch out for overlapping nodes */ | ||
2523 | if (page_to_nid(page) != zone_to_nid(zone)) | ||
2524 | continue; | ||
2525 | |||
2478 | /* Blocks with reserved pages will never free, skip them. */ | 2526 | /* Blocks with reserved pages will never free, skip them. */ |
2479 | if (PageReserved(page)) | 2527 | if (PageReserved(page)) |
2480 | continue; | 2528 | continue; |
@@ -2534,6 +2582,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2534 | } | 2582 | } |
2535 | page = pfn_to_page(pfn); | 2583 | page = pfn_to_page(pfn); |
2536 | set_page_links(page, zone, nid, pfn); | 2584 | set_page_links(page, zone, nid, pfn); |
2585 | mminit_verify_page_links(page, zone, nid, pfn); | ||
2537 | init_page_count(page); | 2586 | init_page_count(page); |
2538 | reset_page_mapcount(page); | 2587 | reset_page_mapcount(page); |
2539 | SetPageReserved(page); | 2588 | SetPageReserved(page); |
@@ -2611,7 +2660,7 @@ static int zone_batchsize(struct zone *zone) | |||
2611 | return batch; | 2660 | return batch; |
2612 | } | 2661 | } |
2613 | 2662 | ||
2614 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 2663 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
2615 | { | 2664 | { |
2616 | struct per_cpu_pages *pcp; | 2665 | struct per_cpu_pages *pcp; |
2617 | 2666 | ||
@@ -2836,6 +2885,12 @@ __meminit int init_currently_empty_zone(struct zone *zone, | |||
2836 | 2885 | ||
2837 | zone->zone_start_pfn = zone_start_pfn; | 2886 | zone->zone_start_pfn = zone_start_pfn; |
2838 | 2887 | ||
2888 | mminit_dprintk(MMINIT_TRACE, "memmap_init", | ||
2889 | "Initialising map node %d zone %lu pfns %lu -> %lu\n", | ||
2890 | pgdat->node_id, | ||
2891 | (unsigned long)zone_idx(zone), | ||
2892 | zone_start_pfn, (zone_start_pfn + size)); | ||
2893 | |||
2839 | zone_init_free_lists(zone); | 2894 | zone_init_free_lists(zone); |
2840 | 2895 | ||
2841 | return 0; | 2896 | return 0; |
@@ -2975,7 +3030,8 @@ void __init sparse_memory_present_with_active_regions(int nid) | |||
2975 | void __init push_node_boundaries(unsigned int nid, | 3030 | void __init push_node_boundaries(unsigned int nid, |
2976 | unsigned long start_pfn, unsigned long end_pfn) | 3031 | unsigned long start_pfn, unsigned long end_pfn) |
2977 | { | 3032 | { |
2978 | printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", | 3033 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3034 | "Entering push_node_boundaries(%u, %lu, %lu)\n", | ||
2979 | nid, start_pfn, end_pfn); | 3035 | nid, start_pfn, end_pfn); |
2980 | 3036 | ||
2981 | /* Initialise the boundary for this node if necessary */ | 3037 | /* Initialise the boundary for this node if necessary */ |
@@ -2993,7 +3049,8 @@ void __init push_node_boundaries(unsigned int nid, | |||
2993 | static void __meminit account_node_boundary(unsigned int nid, | 3049 | static void __meminit account_node_boundary(unsigned int nid, |
2994 | unsigned long *start_pfn, unsigned long *end_pfn) | 3050 | unsigned long *start_pfn, unsigned long *end_pfn) |
2995 | { | 3051 | { |
2996 | printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", | 3052 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3053 | "Entering account_node_boundary(%u, %lu, %lu)\n", | ||
2997 | nid, *start_pfn, *end_pfn); | 3054 | nid, *start_pfn, *end_pfn); |
2998 | 3055 | ||
2999 | /* Return if boundary information has not been provided */ | 3056 | /* Return if boundary information has not been provided */ |
@@ -3050,7 +3107,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, | |||
3050 | * assumption is made that zones within a node are ordered in monotonic | 3107 | * assumption is made that zones within a node are ordered in monotonic |
3051 | * increasing memory addresses so that the "highest" populated zone is used | 3108 | * increasing memory addresses so that the "highest" populated zone is used |
3052 | */ | 3109 | */ |
3053 | void __init find_usable_zone_for_movable(void) | 3110 | static void __init find_usable_zone_for_movable(void) |
3054 | { | 3111 | { |
3055 | int zone_index; | 3112 | int zone_index; |
3056 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { | 3113 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { |
@@ -3076,7 +3133,7 @@ void __init find_usable_zone_for_movable(void) | |||
3076 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | 3133 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
3077 | * zones within a node are in order of monotonic increases memory addresses | 3134 | * zones within a node are in order of monotonic increases memory addresses |
3078 | */ | 3135 | */ |
3079 | void __meminit adjust_zone_range_for_zone_movable(int nid, | 3136 | static void __meminit adjust_zone_range_for_zone_movable(int nid, |
3080 | unsigned long zone_type, | 3137 | unsigned long zone_type, |
3081 | unsigned long node_start_pfn, | 3138 | unsigned long node_start_pfn, |
3082 | unsigned long node_end_pfn, | 3139 | unsigned long node_end_pfn, |
@@ -3137,7 +3194,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
3137 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 3194 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
3138 | * then all holes in the requested range will be accounted for. | 3195 | * then all holes in the requested range will be accounted for. |
3139 | */ | 3196 | */ |
3140 | unsigned long __meminit __absent_pages_in_range(int nid, | 3197 | static unsigned long __meminit __absent_pages_in_range(int nid, |
3141 | unsigned long range_start_pfn, | 3198 | unsigned long range_start_pfn, |
3142 | unsigned long range_end_pfn) | 3199 | unsigned long range_end_pfn) |
3143 | { | 3200 | { |
@@ -3368,8 +3425,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3368 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | 3425 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; |
3369 | if (realsize >= memmap_pages) { | 3426 | if (realsize >= memmap_pages) { |
3370 | realsize -= memmap_pages; | 3427 | realsize -= memmap_pages; |
3371 | printk(KERN_DEBUG | 3428 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3372 | " %s zone: %lu pages used for memmap\n", | 3429 | "%s zone: %lu pages used for memmap\n", |
3373 | zone_names[j], memmap_pages); | 3430 | zone_names[j], memmap_pages); |
3374 | } else | 3431 | } else |
3375 | printk(KERN_WARNING | 3432 | printk(KERN_WARNING |
@@ -3379,7 +3436,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3379 | /* Account for reserved pages */ | 3436 | /* Account for reserved pages */ |
3380 | if (j == 0 && realsize > dma_reserve) { | 3437 | if (j == 0 && realsize > dma_reserve) { |
3381 | realsize -= dma_reserve; | 3438 | realsize -= dma_reserve; |
3382 | printk(KERN_DEBUG " %s zone: %lu pages reserved\n", | 3439 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3440 | "%s zone: %lu pages reserved\n", | ||
3383 | zone_names[0], dma_reserve); | 3441 | zone_names[0], dma_reserve); |
3384 | } | 3442 | } |
3385 | 3443 | ||
@@ -3464,10 +3522,11 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
3464 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ | 3522 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ |
3465 | } | 3523 | } |
3466 | 3524 | ||
3467 | void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, | 3525 | void __paginginit free_area_init_node(int nid, unsigned long *zones_size, |
3468 | unsigned long *zones_size, unsigned long node_start_pfn, | 3526 | unsigned long node_start_pfn, unsigned long *zholes_size) |
3469 | unsigned long *zholes_size) | ||
3470 | { | 3527 | { |
3528 | pg_data_t *pgdat = NODE_DATA(nid); | ||
3529 | |||
3471 | pgdat->node_id = nid; | 3530 | pgdat->node_id = nid; |
3472 | pgdat->node_start_pfn = node_start_pfn; | 3531 | pgdat->node_start_pfn = node_start_pfn; |
3473 | calculate_node_totalpages(pgdat, zones_size, zholes_size); | 3532 | calculate_node_totalpages(pgdat, zones_size, zholes_size); |
@@ -3520,10 +3579,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3520 | { | 3579 | { |
3521 | int i; | 3580 | int i; |
3522 | 3581 | ||
3523 | printk(KERN_DEBUG "Entering add_active_range(%d, %#lx, %#lx) " | 3582 | mminit_dprintk(MMINIT_TRACE, "memory_register", |
3524 | "%d entries of %d used\n", | 3583 | "Entering add_active_range(%d, %#lx, %#lx) " |
3525 | nid, start_pfn, end_pfn, | 3584 | "%d entries of %d used\n", |
3526 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | 3585 | nid, start_pfn, end_pfn, |
3586 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | ||
3587 | |||
3588 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | ||
3527 | 3589 | ||
3528 | /* Merge with existing active regions if possible */ | 3590 | /* Merge with existing active regions if possible */ |
3529 | for (i = 0; i < nr_nodemap_entries; i++) { | 3591 | for (i = 0; i < nr_nodemap_entries; i++) { |
@@ -3669,7 +3731,7 @@ static void __init sort_node_map(void) | |||
3669 | } | 3731 | } |
3670 | 3732 | ||
3671 | /* Find the lowest pfn for a node */ | 3733 | /* Find the lowest pfn for a node */ |
3672 | unsigned long __init find_min_pfn_for_node(int nid) | 3734 | static unsigned long __init find_min_pfn_for_node(int nid) |
3673 | { | 3735 | { |
3674 | int i; | 3736 | int i; |
3675 | unsigned long min_pfn = ULONG_MAX; | 3737 | unsigned long min_pfn = ULONG_MAX; |
@@ -3698,23 +3760,6 @@ unsigned long __init find_min_pfn_with_active_regions(void) | |||
3698 | return find_min_pfn_for_node(MAX_NUMNODES); | 3760 | return find_min_pfn_for_node(MAX_NUMNODES); |
3699 | } | 3761 | } |
3700 | 3762 | ||
3701 | /** | ||
3702 | * find_max_pfn_with_active_regions - Find the maximum PFN registered | ||
3703 | * | ||
3704 | * It returns the maximum PFN based on information provided via | ||
3705 | * add_active_range(). | ||
3706 | */ | ||
3707 | unsigned long __init find_max_pfn_with_active_regions(void) | ||
3708 | { | ||
3709 | int i; | ||
3710 | unsigned long max_pfn = 0; | ||
3711 | |||
3712 | for (i = 0; i < nr_nodemap_entries; i++) | ||
3713 | max_pfn = max(max_pfn, early_node_map[i].end_pfn); | ||
3714 | |||
3715 | return max_pfn; | ||
3716 | } | ||
3717 | |||
3718 | /* | 3763 | /* |
3719 | * early_calculate_totalpages() | 3764 | * early_calculate_totalpages() |
3720 | * Sum pages in active regions for movable zone. | 3765 | * Sum pages in active regions for movable zone. |
@@ -3741,7 +3786,7 @@ static unsigned long __init early_calculate_totalpages(void) | |||
3741 | * memory. When they don't, some nodes will have more kernelcore than | 3786 | * memory. When they don't, some nodes will have more kernelcore than |
3742 | * others | 3787 | * others |
3743 | */ | 3788 | */ |
3744 | void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | 3789 | static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) |
3745 | { | 3790 | { |
3746 | int i, nid; | 3791 | int i, nid; |
3747 | unsigned long usable_startpfn; | 3792 | unsigned long usable_startpfn; |
@@ -3957,10 +4002,11 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
3957 | early_node_map[i].end_pfn); | 4002 | early_node_map[i].end_pfn); |
3958 | 4003 | ||
3959 | /* Initialise every node */ | 4004 | /* Initialise every node */ |
4005 | mminit_verify_pageflags_layout(); | ||
3960 | setup_nr_node_ids(); | 4006 | setup_nr_node_ids(); |
3961 | for_each_online_node(nid) { | 4007 | for_each_online_node(nid) { |
3962 | pg_data_t *pgdat = NODE_DATA(nid); | 4008 | pg_data_t *pgdat = NODE_DATA(nid); |
3963 | free_area_init_node(nid, pgdat, NULL, | 4009 | free_area_init_node(nid, NULL, |
3964 | find_min_pfn_for_node(nid), NULL); | 4010 | find_min_pfn_for_node(nid), NULL); |
3965 | 4011 | ||
3966 | /* Any memory on that node */ | 4012 | /* Any memory on that node */ |
@@ -4025,15 +4071,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) | |||
4025 | } | 4071 | } |
4026 | 4072 | ||
4027 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4073 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
4028 | static bootmem_data_t contig_bootmem_data; | 4074 | struct pglist_data __refdata contig_page_data = { .bdata = &bootmem_node_data[0] }; |
4029 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; | ||
4030 | |||
4031 | EXPORT_SYMBOL(contig_page_data); | 4075 | EXPORT_SYMBOL(contig_page_data); |
4032 | #endif | 4076 | #endif |
4033 | 4077 | ||
4034 | void __init free_area_init(unsigned long *zones_size) | 4078 | void __init free_area_init(unsigned long *zones_size) |
4035 | { | 4079 | { |
4036 | free_area_init_node(0, NODE_DATA(0), zones_size, | 4080 | free_area_init_node(0, zones_size, |
4037 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 4081 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
4038 | } | 4082 | } |
4039 | 4083 | ||
@@ -4400,7 +4444,7 @@ void *__init alloc_large_system_hash(const char *tablename, | |||
4400 | do { | 4444 | do { |
4401 | size = bucketsize << log2qty; | 4445 | size = bucketsize << log2qty; |
4402 | if (flags & HASH_EARLY) | 4446 | if (flags & HASH_EARLY) |
4403 | table = alloc_bootmem(size); | 4447 | table = alloc_bootmem_nopanic(size); |
4404 | else if (hashdist) | 4448 | else if (hashdist) |
4405 | table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); | 4449 | table = __vmalloc(size, GFP_ATOMIC, PAGE_KERNEL); |
4406 | else { | 4450 | else { |