diff options
author | Ingo Molnar <mingo@elte.hu> | 2008-07-25 05:37:07 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-25 05:37:07 -0400 |
commit | 0e2f65ee30eee2db054f7fd73f462c5da33ec963 (patch) | |
tree | 26c61eb7745da0c0d9135e9d12088f570cb8530d /mm/page_alloc.c | |
parent | da7878d75b8520c9ae00d27dfbbce546a7bfdfbb (diff) | |
parent | fb2e405fc1fc8b20d9c78eaa1c7fd5a297efde43 (diff) |
Merge branch 'linus' into x86/pebs
Conflicts:
arch/x86/Kconfig.cpu
arch/x86/kernel/cpu/intel.c
arch/x86/kernel/setup_64.c
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'mm/page_alloc.c')
-rw-r--r-- | mm/page_alloc.c | 245 |
1 files changed, 178 insertions, 67 deletions
diff --git a/mm/page_alloc.c b/mm/page_alloc.c index 2f552955a02f..6da667274df5 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c | |||
@@ -153,9 +153,9 @@ static unsigned long __meminitdata dma_reserve; | |||
153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; | 153 | static unsigned long __meminitdata node_boundary_start_pfn[MAX_NUMNODES]; |
154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; | 154 | static unsigned long __meminitdata node_boundary_end_pfn[MAX_NUMNODES]; |
155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ | 155 | #endif /* CONFIG_MEMORY_HOTPLUG_RESERVE */ |
156 | unsigned long __initdata required_kernelcore; | 156 | static unsigned long __initdata required_kernelcore; |
157 | static unsigned long __initdata required_movablecore; | 157 | static unsigned long __initdata required_movablecore; |
158 | unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; | 158 | static unsigned long __meminitdata zone_movable_pfn[MAX_NUMNODES]; |
159 | 159 | ||
160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ | 160 | /* movable_zone is the "real" zone pages in ZONE_MOVABLE are taken from */ |
161 | int movable_zone; | 161 | int movable_zone; |
@@ -264,7 +264,7 @@ static void free_compound_page(struct page *page) | |||
264 | __free_pages_ok(page, compound_order(page)); | 264 | __free_pages_ok(page, compound_order(page)); |
265 | } | 265 | } |
266 | 266 | ||
267 | static void prep_compound_page(struct page *page, unsigned long order) | 267 | void prep_compound_page(struct page *page, unsigned long order) |
268 | { | 268 | { |
269 | int i; | 269 | int i; |
270 | int nr_pages = 1 << order; | 270 | int nr_pages = 1 << order; |
@@ -432,8 +432,9 @@ static inline void __free_one_page(struct page *page, | |||
432 | 432 | ||
433 | buddy = __page_find_buddy(page, page_idx, order); | 433 | buddy = __page_find_buddy(page, page_idx, order); |
434 | if (!page_is_buddy(page, buddy, order)) | 434 | if (!page_is_buddy(page, buddy, order)) |
435 | break; /* Move the buddy up one level. */ | 435 | break; |
436 | 436 | ||
437 | /* Our buddy is free, merge with it and move up one order. */ | ||
437 | list_del(&buddy->lru); | 438 | list_del(&buddy->lru); |
438 | zone->free_area[order].nr_free--; | 439 | zone->free_area[order].nr_free--; |
439 | rmv_page_order(buddy); | 440 | rmv_page_order(buddy); |
@@ -532,7 +533,7 @@ static void __free_pages_ok(struct page *page, unsigned int order) | |||
532 | /* | 533 | /* |
533 | * permit the bootmem allocator to evade page validation on high-order frees | 534 | * permit the bootmem allocator to evade page validation on high-order frees |
534 | */ | 535 | */ |
535 | void __free_pages_bootmem(struct page *page, unsigned int order) | 536 | void __meminit __free_pages_bootmem(struct page *page, unsigned int order) |
536 | { | 537 | { |
537 | if (order == 0) { | 538 | if (order == 0) { |
538 | __ClearPageReserved(page); | 539 | __ClearPageReserved(page); |
@@ -673,9 +674,9 @@ static int fallbacks[MIGRATE_TYPES][MIGRATE_TYPES-1] = { | |||
673 | * Note that start_page and end_pages are not aligned on a pageblock | 674 | * Note that start_page and end_pages are not aligned on a pageblock |
674 | * boundary. If alignment is required, use move_freepages_block() | 675 | * boundary. If alignment is required, use move_freepages_block() |
675 | */ | 676 | */ |
676 | int move_freepages(struct zone *zone, | 677 | static int move_freepages(struct zone *zone, |
677 | struct page *start_page, struct page *end_page, | 678 | struct page *start_page, struct page *end_page, |
678 | int migratetype) | 679 | int migratetype) |
679 | { | 680 | { |
680 | struct page *page; | 681 | struct page *page; |
681 | unsigned long order; | 682 | unsigned long order; |
@@ -714,7 +715,8 @@ int move_freepages(struct zone *zone, | |||
714 | return pages_moved; | 715 | return pages_moved; |
715 | } | 716 | } |
716 | 717 | ||
717 | int move_freepages_block(struct zone *zone, struct page *page, int migratetype) | 718 | static int move_freepages_block(struct zone *zone, struct page *page, |
719 | int migratetype) | ||
718 | { | 720 | { |
719 | unsigned long start_pfn, end_pfn; | 721 | unsigned long start_pfn, end_pfn; |
720 | struct page *start_page, *end_page; | 722 | struct page *start_page, *end_page; |
@@ -918,7 +920,7 @@ void drain_local_pages(void *arg) | |||
918 | */ | 920 | */ |
919 | void drain_all_pages(void) | 921 | void drain_all_pages(void) |
920 | { | 922 | { |
921 | on_each_cpu(drain_local_pages, NULL, 0, 1); | 923 | on_each_cpu(drain_local_pages, NULL, 1); |
922 | } | 924 | } |
923 | 925 | ||
924 | #ifdef CONFIG_HIBERNATION | 926 | #ifdef CONFIG_HIBERNATION |
@@ -1429,7 +1431,7 @@ try_next_zone: | |||
1429 | /* | 1431 | /* |
1430 | * This is the 'heart' of the zoned buddy allocator. | 1432 | * This is the 'heart' of the zoned buddy allocator. |
1431 | */ | 1433 | */ |
1432 | static struct page * | 1434 | struct page * |
1433 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, | 1435 | __alloc_pages_internal(gfp_t gfp_mask, unsigned int order, |
1434 | struct zonelist *zonelist, nodemask_t *nodemask) | 1436 | struct zonelist *zonelist, nodemask_t *nodemask) |
1435 | { | 1437 | { |
@@ -1632,22 +1634,7 @@ nopage: | |||
1632 | got_pg: | 1634 | got_pg: |
1633 | return page; | 1635 | return page; |
1634 | } | 1636 | } |
1635 | 1637 | EXPORT_SYMBOL(__alloc_pages_internal); | |
1636 | struct page * | ||
1637 | __alloc_pages(gfp_t gfp_mask, unsigned int order, | ||
1638 | struct zonelist *zonelist) | ||
1639 | { | ||
1640 | return __alloc_pages_internal(gfp_mask, order, zonelist, NULL); | ||
1641 | } | ||
1642 | |||
1643 | struct page * | ||
1644 | __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order, | ||
1645 | struct zonelist *zonelist, nodemask_t *nodemask) | ||
1646 | { | ||
1647 | return __alloc_pages_internal(gfp_mask, order, zonelist, nodemask); | ||
1648 | } | ||
1649 | |||
1650 | EXPORT_SYMBOL(__alloc_pages); | ||
1651 | 1638 | ||
1652 | /* | 1639 | /* |
1653 | * Common helper functions. | 1640 | * Common helper functions. |
@@ -1711,6 +1698,59 @@ void free_pages(unsigned long addr, unsigned int order) | |||
1711 | 1698 | ||
1712 | EXPORT_SYMBOL(free_pages); | 1699 | EXPORT_SYMBOL(free_pages); |
1713 | 1700 | ||
1701 | /** | ||
1702 | * alloc_pages_exact - allocate an exact number physically-contiguous pages. | ||
1703 | * @size: the number of bytes to allocate | ||
1704 | * @gfp_mask: GFP flags for the allocation | ||
1705 | * | ||
1706 | * This function is similar to alloc_pages(), except that it allocates the | ||
1707 | * minimum number of pages to satisfy the request. alloc_pages() can only | ||
1708 | * allocate memory in power-of-two pages. | ||
1709 | * | ||
1710 | * This function is also limited by MAX_ORDER. | ||
1711 | * | ||
1712 | * Memory allocated by this function must be released by free_pages_exact(). | ||
1713 | */ | ||
1714 | void *alloc_pages_exact(size_t size, gfp_t gfp_mask) | ||
1715 | { | ||
1716 | unsigned int order = get_order(size); | ||
1717 | unsigned long addr; | ||
1718 | |||
1719 | addr = __get_free_pages(gfp_mask, order); | ||
1720 | if (addr) { | ||
1721 | unsigned long alloc_end = addr + (PAGE_SIZE << order); | ||
1722 | unsigned long used = addr + PAGE_ALIGN(size); | ||
1723 | |||
1724 | split_page(virt_to_page(addr), order); | ||
1725 | while (used < alloc_end) { | ||
1726 | free_page(used); | ||
1727 | used += PAGE_SIZE; | ||
1728 | } | ||
1729 | } | ||
1730 | |||
1731 | return (void *)addr; | ||
1732 | } | ||
1733 | EXPORT_SYMBOL(alloc_pages_exact); | ||
1734 | |||
1735 | /** | ||
1736 | * free_pages_exact - release memory allocated via alloc_pages_exact() | ||
1737 | * @virt: the value returned by alloc_pages_exact. | ||
1738 | * @size: size of allocation, same value as passed to alloc_pages_exact(). | ||
1739 | * | ||
1740 | * Release the memory allocated by a previous call to alloc_pages_exact. | ||
1741 | */ | ||
1742 | void free_pages_exact(void *virt, size_t size) | ||
1743 | { | ||
1744 | unsigned long addr = (unsigned long)virt; | ||
1745 | unsigned long end = addr + PAGE_ALIGN(size); | ||
1746 | |||
1747 | while (addr < end) { | ||
1748 | free_page(addr); | ||
1749 | addr += PAGE_SIZE; | ||
1750 | } | ||
1751 | } | ||
1752 | EXPORT_SYMBOL(free_pages_exact); | ||
1753 | |||
1714 | static unsigned int nr_free_zone_pages(int offset) | 1754 | static unsigned int nr_free_zone_pages(int offset) |
1715 | { | 1755 | { |
1716 | struct zoneref *z; | 1756 | struct zoneref *z; |
@@ -2328,7 +2368,6 @@ static void build_zonelists(pg_data_t *pgdat) | |||
2328 | static void build_zonelist_cache(pg_data_t *pgdat) | 2368 | static void build_zonelist_cache(pg_data_t *pgdat) |
2329 | { | 2369 | { |
2330 | pgdat->node_zonelists[0].zlcache_ptr = NULL; | 2370 | pgdat->node_zonelists[0].zlcache_ptr = NULL; |
2331 | pgdat->node_zonelists[1].zlcache_ptr = NULL; | ||
2332 | } | 2371 | } |
2333 | 2372 | ||
2334 | #endif /* CONFIG_NUMA */ | 2373 | #endif /* CONFIG_NUMA */ |
@@ -2353,6 +2392,7 @@ void build_all_zonelists(void) | |||
2353 | 2392 | ||
2354 | if (system_state == SYSTEM_BOOTING) { | 2393 | if (system_state == SYSTEM_BOOTING) { |
2355 | __build_all_zonelists(NULL); | 2394 | __build_all_zonelists(NULL); |
2395 | mminit_verify_zonelist(); | ||
2356 | cpuset_init_current_mems_allowed(); | 2396 | cpuset_init_current_mems_allowed(); |
2357 | } else { | 2397 | } else { |
2358 | /* we have to stop all cpus to guarantee there is no user | 2398 | /* we have to stop all cpus to guarantee there is no user |
@@ -2535,6 +2575,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, | |||
2535 | } | 2575 | } |
2536 | page = pfn_to_page(pfn); | 2576 | page = pfn_to_page(pfn); |
2537 | set_page_links(page, zone, nid, pfn); | 2577 | set_page_links(page, zone, nid, pfn); |
2578 | mminit_verify_page_links(page, zone, nid, pfn); | ||
2538 | init_page_count(page); | 2579 | init_page_count(page); |
2539 | reset_page_mapcount(page); | 2580 | reset_page_mapcount(page); |
2540 | SetPageReserved(page); | 2581 | SetPageReserved(page); |
@@ -2612,7 +2653,7 @@ static int zone_batchsize(struct zone *zone) | |||
2612 | return batch; | 2653 | return batch; |
2613 | } | 2654 | } |
2614 | 2655 | ||
2615 | inline void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) | 2656 | static void setup_pageset(struct per_cpu_pageset *p, unsigned long batch) |
2616 | { | 2657 | { |
2617 | struct per_cpu_pages *pcp; | 2658 | struct per_cpu_pages *pcp; |
2618 | 2659 | ||
@@ -2837,6 +2878,12 @@ __meminit int init_currently_empty_zone(struct zone *zone, | |||
2837 | 2878 | ||
2838 | zone->zone_start_pfn = zone_start_pfn; | 2879 | zone->zone_start_pfn = zone_start_pfn; |
2839 | 2880 | ||
2881 | mminit_dprintk(MMINIT_TRACE, "memmap_init", | ||
2882 | "Initialising map node %d zone %lu pfns %lu -> %lu\n", | ||
2883 | pgdat->node_id, | ||
2884 | (unsigned long)zone_idx(zone), | ||
2885 | zone_start_pfn, (zone_start_pfn + size)); | ||
2886 | |||
2840 | zone_init_free_lists(zone); | 2887 | zone_init_free_lists(zone); |
2841 | 2888 | ||
2842 | return 0; | 2889 | return 0; |
@@ -2930,6 +2977,18 @@ void __init free_bootmem_with_active_regions(int nid, | |||
2930 | } | 2977 | } |
2931 | } | 2978 | } |
2932 | 2979 | ||
2980 | void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) | ||
2981 | { | ||
2982 | int i; | ||
2983 | int ret; | ||
2984 | |||
2985 | for_each_active_range_index_in_nid(i, nid) { | ||
2986 | ret = work_fn(early_node_map[i].start_pfn, | ||
2987 | early_node_map[i].end_pfn, data); | ||
2988 | if (ret) | ||
2989 | break; | ||
2990 | } | ||
2991 | } | ||
2933 | /** | 2992 | /** |
2934 | * sparse_memory_present_with_active_regions - Call memory_present for each active range | 2993 | * sparse_memory_present_with_active_regions - Call memory_present for each active range |
2935 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. | 2994 | * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. |
@@ -2964,7 +3023,8 @@ void __init sparse_memory_present_with_active_regions(int nid) | |||
2964 | void __init push_node_boundaries(unsigned int nid, | 3023 | void __init push_node_boundaries(unsigned int nid, |
2965 | unsigned long start_pfn, unsigned long end_pfn) | 3024 | unsigned long start_pfn, unsigned long end_pfn) |
2966 | { | 3025 | { |
2967 | printk(KERN_DEBUG "Entering push_node_boundaries(%u, %lu, %lu)\n", | 3026 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3027 | "Entering push_node_boundaries(%u, %lu, %lu)\n", | ||
2968 | nid, start_pfn, end_pfn); | 3028 | nid, start_pfn, end_pfn); |
2969 | 3029 | ||
2970 | /* Initialise the boundary for this node if necessary */ | 3030 | /* Initialise the boundary for this node if necessary */ |
@@ -2982,7 +3042,8 @@ void __init push_node_boundaries(unsigned int nid, | |||
2982 | static void __meminit account_node_boundary(unsigned int nid, | 3042 | static void __meminit account_node_boundary(unsigned int nid, |
2983 | unsigned long *start_pfn, unsigned long *end_pfn) | 3043 | unsigned long *start_pfn, unsigned long *end_pfn) |
2984 | { | 3044 | { |
2985 | printk(KERN_DEBUG "Entering account_node_boundary(%u, %lu, %lu)\n", | 3045 | mminit_dprintk(MMINIT_TRACE, "zoneboundary", |
3046 | "Entering account_node_boundary(%u, %lu, %lu)\n", | ||
2986 | nid, *start_pfn, *end_pfn); | 3047 | nid, *start_pfn, *end_pfn); |
2987 | 3048 | ||
2988 | /* Return if boundary information has not been provided */ | 3049 | /* Return if boundary information has not been provided */ |
@@ -3039,7 +3100,7 @@ void __meminit get_pfn_range_for_nid(unsigned int nid, | |||
3039 | * assumption is made that zones within a node are ordered in monotonic | 3100 | * assumption is made that zones within a node are ordered in monotonic |
3040 | * increasing memory addresses so that the "highest" populated zone is used | 3101 | * increasing memory addresses so that the "highest" populated zone is used |
3041 | */ | 3102 | */ |
3042 | void __init find_usable_zone_for_movable(void) | 3103 | static void __init find_usable_zone_for_movable(void) |
3043 | { | 3104 | { |
3044 | int zone_index; | 3105 | int zone_index; |
3045 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { | 3106 | for (zone_index = MAX_NR_ZONES - 1; zone_index >= 0; zone_index--) { |
@@ -3065,7 +3126,7 @@ void __init find_usable_zone_for_movable(void) | |||
3065 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that | 3126 | * highest usable zone for ZONE_MOVABLE. This preserves the assumption that |
3066 | * zones within a node are in order of monotonic increases memory addresses | 3127 | * zones within a node are in order of monotonic increases memory addresses |
3067 | */ | 3128 | */ |
3068 | void __meminit adjust_zone_range_for_zone_movable(int nid, | 3129 | static void __meminit adjust_zone_range_for_zone_movable(int nid, |
3069 | unsigned long zone_type, | 3130 | unsigned long zone_type, |
3070 | unsigned long node_start_pfn, | 3131 | unsigned long node_start_pfn, |
3071 | unsigned long node_end_pfn, | 3132 | unsigned long node_end_pfn, |
@@ -3126,7 +3187,7 @@ static unsigned long __meminit zone_spanned_pages_in_node(int nid, | |||
3126 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, | 3187 | * Return the number of holes in a range on a node. If nid is MAX_NUMNODES, |
3127 | * then all holes in the requested range will be accounted for. | 3188 | * then all holes in the requested range will be accounted for. |
3128 | */ | 3189 | */ |
3129 | unsigned long __meminit __absent_pages_in_range(int nid, | 3190 | static unsigned long __meminit __absent_pages_in_range(int nid, |
3130 | unsigned long range_start_pfn, | 3191 | unsigned long range_start_pfn, |
3131 | unsigned long range_end_pfn) | 3192 | unsigned long range_end_pfn) |
3132 | { | 3193 | { |
@@ -3357,8 +3418,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3357 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; | 3418 | PAGE_ALIGN(size * sizeof(struct page)) >> PAGE_SHIFT; |
3358 | if (realsize >= memmap_pages) { | 3419 | if (realsize >= memmap_pages) { |
3359 | realsize -= memmap_pages; | 3420 | realsize -= memmap_pages; |
3360 | printk(KERN_DEBUG | 3421 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3361 | " %s zone: %lu pages used for memmap\n", | 3422 | "%s zone: %lu pages used for memmap\n", |
3362 | zone_names[j], memmap_pages); | 3423 | zone_names[j], memmap_pages); |
3363 | } else | 3424 | } else |
3364 | printk(KERN_WARNING | 3425 | printk(KERN_WARNING |
@@ -3368,7 +3429,8 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat, | |||
3368 | /* Account for reserved pages */ | 3429 | /* Account for reserved pages */ |
3369 | if (j == 0 && realsize > dma_reserve) { | 3430 | if (j == 0 && realsize > dma_reserve) { |
3370 | realsize -= dma_reserve; | 3431 | realsize -= dma_reserve; |
3371 | printk(KERN_DEBUG " %s zone: %lu pages reserved\n", | 3432 | mminit_dprintk(MMINIT_TRACE, "memmap_init", |
3433 | "%s zone: %lu pages reserved\n", | ||
3372 | zone_names[0], dma_reserve); | 3434 | zone_names[0], dma_reserve); |
3373 | } | 3435 | } |
3374 | 3436 | ||
@@ -3453,15 +3515,21 @@ static void __init_refok alloc_node_mem_map(struct pglist_data *pgdat) | |||
3453 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ | 3515 | #endif /* CONFIG_FLAT_NODE_MEM_MAP */ |
3454 | } | 3516 | } |
3455 | 3517 | ||
3456 | void __paginginit free_area_init_node(int nid, struct pglist_data *pgdat, | 3518 | void __paginginit free_area_init_node(int nid, unsigned long *zones_size, |
3457 | unsigned long *zones_size, unsigned long node_start_pfn, | 3519 | unsigned long node_start_pfn, unsigned long *zholes_size) |
3458 | unsigned long *zholes_size) | ||
3459 | { | 3520 | { |
3521 | pg_data_t *pgdat = NODE_DATA(nid); | ||
3522 | |||
3460 | pgdat->node_id = nid; | 3523 | pgdat->node_id = nid; |
3461 | pgdat->node_start_pfn = node_start_pfn; | 3524 | pgdat->node_start_pfn = node_start_pfn; |
3462 | calculate_node_totalpages(pgdat, zones_size, zholes_size); | 3525 | calculate_node_totalpages(pgdat, zones_size, zholes_size); |
3463 | 3526 | ||
3464 | alloc_node_mem_map(pgdat); | 3527 | alloc_node_mem_map(pgdat); |
3528 | #ifdef CONFIG_FLAT_NODE_MEM_MAP | ||
3529 | printk(KERN_DEBUG "free_area_init_node: node %d, pgdat %08lx, node_mem_map %08lx\n", | ||
3530 | nid, (unsigned long)pgdat, | ||
3531 | (unsigned long)pgdat->node_mem_map); | ||
3532 | #endif | ||
3465 | 3533 | ||
3466 | free_area_init_core(pgdat, zones_size, zholes_size); | 3534 | free_area_init_core(pgdat, zones_size, zholes_size); |
3467 | } | 3535 | } |
@@ -3504,10 +3572,13 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3504 | { | 3572 | { |
3505 | int i; | 3573 | int i; |
3506 | 3574 | ||
3507 | printk(KERN_DEBUG "Entering add_active_range(%d, %lu, %lu) " | 3575 | mminit_dprintk(MMINIT_TRACE, "memory_register", |
3508 | "%d entries of %d used\n", | 3576 | "Entering add_active_range(%d, %#lx, %#lx) " |
3509 | nid, start_pfn, end_pfn, | 3577 | "%d entries of %d used\n", |
3510 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | 3578 | nid, start_pfn, end_pfn, |
3579 | nr_nodemap_entries, MAX_ACTIVE_REGIONS); | ||
3580 | |||
3581 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); | ||
3511 | 3582 | ||
3512 | /* Merge with existing active regions if possible */ | 3583 | /* Merge with existing active regions if possible */ |
3513 | for (i = 0; i < nr_nodemap_entries; i++) { | 3584 | for (i = 0; i < nr_nodemap_entries; i++) { |
@@ -3548,27 +3619,68 @@ void __init add_active_range(unsigned int nid, unsigned long start_pfn, | |||
3548 | } | 3619 | } |
3549 | 3620 | ||
3550 | /** | 3621 | /** |
3551 | * shrink_active_range - Shrink an existing registered range of PFNs | 3622 | * remove_active_range - Shrink an existing registered range of PFNs |
3552 | * @nid: The node id the range is on that should be shrunk | 3623 | * @nid: The node id the range is on that should be shrunk |
3553 | * @old_end_pfn: The old end PFN of the range | 3624 | * @start_pfn: The new PFN of the range |
3554 | * @new_end_pfn: The new PFN of the range | 3625 | * @end_pfn: The new PFN of the range |
3555 | * | 3626 | * |
3556 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. | 3627 | * i386 with NUMA use alloc_remap() to store a node_mem_map on a local node. |
3557 | * The map is kept at the end physical page range that has already been | 3628 | * The map is kept near the end physical page range that has already been |
3558 | * registered with add_active_range(). This function allows an arch to shrink | 3629 | * registered. This function allows an arch to shrink an existing registered |
3559 | * an existing registered range. | 3630 | * range. |
3560 | */ | 3631 | */ |
3561 | void __init shrink_active_range(unsigned int nid, unsigned long old_end_pfn, | 3632 | void __init remove_active_range(unsigned int nid, unsigned long start_pfn, |
3562 | unsigned long new_end_pfn) | 3633 | unsigned long end_pfn) |
3563 | { | 3634 | { |
3564 | int i; | 3635 | int i, j; |
3636 | int removed = 0; | ||
3637 | |||
3638 | printk(KERN_DEBUG "remove_active_range (%d, %lu, %lu)\n", | ||
3639 | nid, start_pfn, end_pfn); | ||
3565 | 3640 | ||
3566 | /* Find the old active region end and shrink */ | 3641 | /* Find the old active region end and shrink */ |
3567 | for_each_active_range_index_in_nid(i, nid) | 3642 | for_each_active_range_index_in_nid(i, nid) { |
3568 | if (early_node_map[i].end_pfn == old_end_pfn) { | 3643 | if (early_node_map[i].start_pfn >= start_pfn && |
3569 | early_node_map[i].end_pfn = new_end_pfn; | 3644 | early_node_map[i].end_pfn <= end_pfn) { |
3570 | break; | 3645 | /* clear it */ |
3646 | early_node_map[i].start_pfn = 0; | ||
3647 | early_node_map[i].end_pfn = 0; | ||
3648 | removed = 1; | ||
3649 | continue; | ||
3650 | } | ||
3651 | if (early_node_map[i].start_pfn < start_pfn && | ||
3652 | early_node_map[i].end_pfn > start_pfn) { | ||
3653 | unsigned long temp_end_pfn = early_node_map[i].end_pfn; | ||
3654 | early_node_map[i].end_pfn = start_pfn; | ||
3655 | if (temp_end_pfn > end_pfn) | ||
3656 | add_active_range(nid, end_pfn, temp_end_pfn); | ||
3657 | continue; | ||
3658 | } | ||
3659 | if (early_node_map[i].start_pfn >= start_pfn && | ||
3660 | early_node_map[i].end_pfn > end_pfn && | ||
3661 | early_node_map[i].start_pfn < end_pfn) { | ||
3662 | early_node_map[i].start_pfn = end_pfn; | ||
3663 | continue; | ||
3571 | } | 3664 | } |
3665 | } | ||
3666 | |||
3667 | if (!removed) | ||
3668 | return; | ||
3669 | |||
3670 | /* remove the blank ones */ | ||
3671 | for (i = nr_nodemap_entries - 1; i > 0; i--) { | ||
3672 | if (early_node_map[i].nid != nid) | ||
3673 | continue; | ||
3674 | if (early_node_map[i].end_pfn) | ||
3675 | continue; | ||
3676 | /* we found it, get rid of it */ | ||
3677 | for (j = i; j < nr_nodemap_entries - 1; j++) | ||
3678 | memcpy(&early_node_map[j], &early_node_map[j+1], | ||
3679 | sizeof(early_node_map[j])); | ||
3680 | j = nr_nodemap_entries - 1; | ||
3681 | memset(&early_node_map[j], 0, sizeof(early_node_map[j])); | ||
3682 | nr_nodemap_entries--; | ||
3683 | } | ||
3572 | } | 3684 | } |
3573 | 3685 | ||
3574 | /** | 3686 | /** |
@@ -3612,7 +3724,7 @@ static void __init sort_node_map(void) | |||
3612 | } | 3724 | } |
3613 | 3725 | ||
3614 | /* Find the lowest pfn for a node */ | 3726 | /* Find the lowest pfn for a node */ |
3615 | unsigned long __init find_min_pfn_for_node(unsigned long nid) | 3727 | static unsigned long __init find_min_pfn_for_node(int nid) |
3616 | { | 3728 | { |
3617 | int i; | 3729 | int i; |
3618 | unsigned long min_pfn = ULONG_MAX; | 3730 | unsigned long min_pfn = ULONG_MAX; |
@@ -3623,7 +3735,7 @@ unsigned long __init find_min_pfn_for_node(unsigned long nid) | |||
3623 | 3735 | ||
3624 | if (min_pfn == ULONG_MAX) { | 3736 | if (min_pfn == ULONG_MAX) { |
3625 | printk(KERN_WARNING | 3737 | printk(KERN_WARNING |
3626 | "Could not find start_pfn for node %lu\n", nid); | 3738 | "Could not find start_pfn for node %d\n", nid); |
3627 | return 0; | 3739 | return 0; |
3628 | } | 3740 | } |
3629 | 3741 | ||
@@ -3684,7 +3796,7 @@ static unsigned long __init early_calculate_totalpages(void) | |||
3684 | * memory. When they don't, some nodes will have more kernelcore than | 3796 | * memory. When they don't, some nodes will have more kernelcore than |
3685 | * others | 3797 | * others |
3686 | */ | 3798 | */ |
3687 | void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) | 3799 | static void __init find_zone_movable_pfns_for_nodes(unsigned long *movable_pfn) |
3688 | { | 3800 | { |
3689 | int i, nid; | 3801 | int i, nid; |
3690 | unsigned long usable_startpfn; | 3802 | unsigned long usable_startpfn; |
@@ -3879,7 +3991,7 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
3879 | for (i = 0; i < MAX_NR_ZONES; i++) { | 3991 | for (i = 0; i < MAX_NR_ZONES; i++) { |
3880 | if (i == ZONE_MOVABLE) | 3992 | if (i == ZONE_MOVABLE) |
3881 | continue; | 3993 | continue; |
3882 | printk(" %-8s %8lu -> %8lu\n", | 3994 | printk(" %-8s %0#10lx -> %0#10lx\n", |
3883 | zone_names[i], | 3995 | zone_names[i], |
3884 | arch_zone_lowest_possible_pfn[i], | 3996 | arch_zone_lowest_possible_pfn[i], |
3885 | arch_zone_highest_possible_pfn[i]); | 3997 | arch_zone_highest_possible_pfn[i]); |
@@ -3895,15 +4007,16 @@ void __init free_area_init_nodes(unsigned long *max_zone_pfn) | |||
3895 | /* Print out the early_node_map[] */ | 4007 | /* Print out the early_node_map[] */ |
3896 | printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); | 4008 | printk("early_node_map[%d] active PFN ranges\n", nr_nodemap_entries); |
3897 | for (i = 0; i < nr_nodemap_entries; i++) | 4009 | for (i = 0; i < nr_nodemap_entries; i++) |
3898 | printk(" %3d: %8lu -> %8lu\n", early_node_map[i].nid, | 4010 | printk(" %3d: %0#10lx -> %0#10lx\n", early_node_map[i].nid, |
3899 | early_node_map[i].start_pfn, | 4011 | early_node_map[i].start_pfn, |
3900 | early_node_map[i].end_pfn); | 4012 | early_node_map[i].end_pfn); |
3901 | 4013 | ||
3902 | /* Initialise every node */ | 4014 | /* Initialise every node */ |
4015 | mminit_verify_pageflags_layout(); | ||
3903 | setup_nr_node_ids(); | 4016 | setup_nr_node_ids(); |
3904 | for_each_online_node(nid) { | 4017 | for_each_online_node(nid) { |
3905 | pg_data_t *pgdat = NODE_DATA(nid); | 4018 | pg_data_t *pgdat = NODE_DATA(nid); |
3906 | free_area_init_node(nid, pgdat, NULL, | 4019 | free_area_init_node(nid, NULL, |
3907 | find_min_pfn_for_node(nid), NULL); | 4020 | find_min_pfn_for_node(nid), NULL); |
3908 | 4021 | ||
3909 | /* Any memory on that node */ | 4022 | /* Any memory on that node */ |
@@ -3968,15 +4081,13 @@ void __init set_dma_reserve(unsigned long new_dma_reserve) | |||
3968 | } | 4081 | } |
3969 | 4082 | ||
3970 | #ifndef CONFIG_NEED_MULTIPLE_NODES | 4083 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
3971 | static bootmem_data_t contig_bootmem_data; | 4084 | struct pglist_data contig_page_data = { .bdata = &bootmem_node_data[0] }; |
3972 | struct pglist_data contig_page_data = { .bdata = &contig_bootmem_data }; | ||
3973 | |||
3974 | EXPORT_SYMBOL(contig_page_data); | 4085 | EXPORT_SYMBOL(contig_page_data); |
3975 | #endif | 4086 | #endif |
3976 | 4087 | ||
3977 | void __init free_area_init(unsigned long *zones_size) | 4088 | void __init free_area_init(unsigned long *zones_size) |
3978 | { | 4089 | { |
3979 | free_area_init_node(0, NODE_DATA(0), zones_size, | 4090 | free_area_init_node(0, zones_size, |
3980 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); | 4091 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, NULL); |
3981 | } | 4092 | } |
3982 | 4093 | ||