diff options
author | Bob Picco <bob.picco@hp.com> | 2007-01-30 05:11:09 -0500 |
---|---|---|
committer | Tony Luck <tony.luck@intel.com> | 2007-02-05 18:07:47 -0500 |
commit | 139b830477ccdca21b68c40f9a83ec327e65eb56 (patch) | |
tree | 0aab2140315579525dfef89189b9bea5033af2ba /arch/ia64/mm | |
parent | d1598e05faa11d9f04e0a226122dd57674fb1dab (diff) |
[IA64] register memory ranges in a consistent manner
While pursuing and unrelated issue with 64Mb granules I noticed a problem
related to inconsistent use of add_active_range. There doesn't appear any
reason to me why FLATMEM versus DISCONTIG_MEM should register memory to
add_active_range with different code. So I've changed the code into a
common implementation.
The other subtle issue fixed by this patch was calling add_active_range in
count_node_pages before granule aligning is performed. We were lucky with
16MB granules but not so with 64MB granules. count_node_pages has reserved
regions filtered out and as a consequence linked kernel text and data
aren't covered by calls to count_node_pages. So linked kernel regions
wasn't reported to add_active_regions. This resulted in free_initmem
causing numerous bad_page reports. This won't occur with this patch
because now all known memory regions are reported by
register_active_ranges.
Acked-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Acked-by: Simon Horman <horms@verge.net.au>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Tony Luck <tony.luck@intel.com>
Diffstat (limited to 'arch/ia64/mm')
-rw-r--r-- | arch/ia64/mm/discontig.c | 4 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 19 |
2 files changed, 20 insertions, 3 deletions
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d3edb12f3cf9..999cefd2b226 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -473,6 +473,9 @@ void __init find_memory(void) | |||
473 | node_clear(node, memory_less_mask); | 473 | node_clear(node, memory_less_mask); |
474 | mem_data[node].min_pfn = ~0UL; | 474 | mem_data[node].min_pfn = ~0UL; |
475 | } | 475 | } |
476 | |||
477 | efi_memmap_walk(register_active_ranges, NULL); | ||
478 | |||
476 | /* | 479 | /* |
477 | * Initialize the boot memory maps in reverse order since that's | 480 | * Initialize the boot memory maps in reverse order since that's |
478 | * what the bootmem allocator expects | 481 | * what the bootmem allocator expects |
@@ -660,7 +663,6 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n | |||
660 | { | 663 | { |
661 | unsigned long end = start + len; | 664 | unsigned long end = start + len; |
662 | 665 | ||
663 | add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT); | ||
664 | mem_data[node].num_physpages += len >> PAGE_SHIFT; | 666 | mem_data[node].num_physpages += len >> PAGE_SHIFT; |
665 | if (start <= __pa(MAX_DMA_ADDRESS)) | 667 | if (start <= __pa(MAX_DMA_ADDRESS)) |
666 | mem_data[node].num_dma_physpages += | 668 | mem_data[node].num_dma_physpages += |
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 1373fae7657f..8b7599808dd5 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -19,6 +19,7 @@ | |||
19 | #include <linux/swap.h> | 19 | #include <linux/swap.h> |
20 | #include <linux/proc_fs.h> | 20 | #include <linux/proc_fs.h> |
21 | #include <linux/bitops.h> | 21 | #include <linux/bitops.h> |
22 | #include <linux/kexec.h> | ||
22 | 23 | ||
23 | #include <asm/a.out.h> | 24 | #include <asm/a.out.h> |
24 | #include <asm/dma.h> | 25 | #include <asm/dma.h> |
@@ -595,13 +596,27 @@ find_largest_hole (u64 start, u64 end, void *arg) | |||
595 | return 0; | 596 | return 0; |
596 | } | 597 | } |
597 | 598 | ||
599 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | ||
600 | |||
598 | int __init | 601 | int __init |
599 | register_active_ranges(u64 start, u64 end, void *arg) | 602 | register_active_ranges(u64 start, u64 end, void *arg) |
600 | { | 603 | { |
601 | add_active_range(0, __pa(start) >> PAGE_SHIFT, __pa(end) >> PAGE_SHIFT); | 604 | int nid = paddr_to_nid(__pa(start)); |
605 | |||
606 | if (nid < 0) | ||
607 | nid = 0; | ||
608 | #ifdef CONFIG_KEXEC | ||
609 | if (start > crashk_res.start && start < crashk_res.end) | ||
610 | start = crashk_res.end; | ||
611 | if (end > crashk_res.start && end < crashk_res.end) | ||
612 | end = crashk_res.start; | ||
613 | #endif | ||
614 | |||
615 | if (start < end) | ||
616 | add_active_range(nid, __pa(start) >> PAGE_SHIFT, | ||
617 | __pa(end) >> PAGE_SHIFT); | ||
602 | return 0; | 618 | return 0; |
603 | } | 619 | } |
604 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | ||
605 | 620 | ||
606 | static int __init | 621 | static int __init |
607 | count_reserved_pages (u64 start, u64 end, void *arg) | 622 | count_reserved_pages (u64 start, u64 end, void *arg) |