diff options
author | Mel Gorman <mel@csn.ul.ie> | 2006-09-27 04:49:54 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:11 -0400 |
commit | 05e0caad3b7bd0d0fbeff980bca22f186241a501 (patch) | |
tree | d213789aca5bf91b74bbf5946d428590e3e368b1 | |
parent | 5cb248abf5ab65ab543b2d5fc16c738b28031fc0 (diff) |
[PATCH] Have ia64 use add_active_range() and free_area_init_nodes
Size zones and holes in an architecture independent manner for ia64.
[bob.picco@hp.com: fix ia64 FLATMEM+VIRTUAL_MEM_MAP]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Andi Kleen <ak@muc.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Keith Mannthey" <kmannth@gmail.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Bob Picco <bob.picco@hp.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/ia64/Kconfig | 3 | ||||
-rw-r--r-- | arch/ia64/mm/contig.c | 67 | ||||
-rw-r--r-- | arch/ia64/mm/discontig.c | 44 | ||||
-rw-r--r-- | arch/ia64/mm/init.c | 12 | ||||
-rw-r--r-- | include/asm-ia64/meminit.h | 1 |
5 files changed, 40 insertions, 87 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig index f521f2f60a78..d5ee4fc8fe66 100644 --- a/arch/ia64/Kconfig +++ b/arch/ia64/Kconfig | |||
@@ -356,6 +356,9 @@ config NODES_SHIFT | |||
356 | MAX_NUMNODES will be 2^(This value). | 356 | MAX_NUMNODES will be 2^(This value). |
357 | If in doubt, use the default. | 357 | If in doubt, use the default. |
358 | 358 | ||
359 | config ARCH_POPULATES_NODE_MAP | ||
360 | def_bool y | ||
361 | |||
359 | # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. | 362 | # VIRTUAL_MEM_MAP and FLAT_NODE_MEM_MAP are functionally equivalent. |
360 | # VIRTUAL_MEM_MAP has been retained for historical reasons. | 363 | # VIRTUAL_MEM_MAP has been retained for historical reasons. |
361 | config VIRTUAL_MEM_MAP | 364 | config VIRTUAL_MEM_MAP |
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c index e004143ba86b..719d476e71ba 100644 --- a/arch/ia64/mm/contig.c +++ b/arch/ia64/mm/contig.c | |||
@@ -26,7 +26,6 @@ | |||
26 | #include <asm/mca.h> | 26 | #include <asm/mca.h> |
27 | 27 | ||
28 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 28 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
29 | static unsigned long num_dma_physpages; | ||
30 | static unsigned long max_gap; | 29 | static unsigned long max_gap; |
31 | #endif | 30 | #endif |
32 | 31 | ||
@@ -218,18 +217,6 @@ count_pages (u64 start, u64 end, void *arg) | |||
218 | return 0; | 217 | return 0; |
219 | } | 218 | } |
220 | 219 | ||
221 | #ifdef CONFIG_VIRTUAL_MEM_MAP | ||
222 | static int | ||
223 | count_dma_pages (u64 start, u64 end, void *arg) | ||
224 | { | ||
225 | unsigned long *count = arg; | ||
226 | |||
227 | if (start < MAX_DMA_ADDRESS) | ||
228 | *count += (min(end, MAX_DMA_ADDRESS) - start) >> PAGE_SHIFT; | ||
229 | return 0; | ||
230 | } | ||
231 | #endif | ||
232 | |||
233 | /* | 220 | /* |
234 | * Set up the page tables. | 221 | * Set up the page tables. |
235 | */ | 222 | */ |
@@ -238,45 +225,22 @@ void __init | |||
238 | paging_init (void) | 225 | paging_init (void) |
239 | { | 226 | { |
240 | unsigned long max_dma; | 227 | unsigned long max_dma; |
241 | unsigned long zones_size[MAX_NR_ZONES]; | 228 | unsigned long nid = 0; |
242 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 229 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
243 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
244 | #endif | ||
245 | |||
246 | /* initialize mem_map[] */ | ||
247 | |||
248 | memset(zones_size, 0, sizeof(zones_size)); | ||
249 | 230 | ||
250 | num_physpages = 0; | 231 | num_physpages = 0; |
251 | efi_memmap_walk(count_pages, &num_physpages); | 232 | efi_memmap_walk(count_pages, &num_physpages); |
252 | 233 | ||
253 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 234 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
235 | max_zone_pfns[ZONE_DMA] = max_dma; | ||
236 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; | ||
254 | 237 | ||
255 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 238 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
256 | memset(zholes_size, 0, sizeof(zholes_size)); | 239 | efi_memmap_walk(register_active_ranges, &nid); |
257 | |||
258 | num_dma_physpages = 0; | ||
259 | efi_memmap_walk(count_dma_pages, &num_dma_physpages); | ||
260 | |||
261 | if (max_low_pfn < max_dma) { | ||
262 | zones_size[ZONE_DMA] = max_low_pfn; | ||
263 | zholes_size[ZONE_DMA] = max_low_pfn - num_dma_physpages; | ||
264 | } else { | ||
265 | zones_size[ZONE_DMA] = max_dma; | ||
266 | zholes_size[ZONE_DMA] = max_dma - num_dma_physpages; | ||
267 | if (num_physpages > num_dma_physpages) { | ||
268 | zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; | ||
269 | zholes_size[ZONE_NORMAL] = | ||
270 | ((max_low_pfn - max_dma) - | ||
271 | (num_physpages - num_dma_physpages)); | ||
272 | } | ||
273 | } | ||
274 | |||
275 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); | 240 | efi_memmap_walk(find_largest_hole, (u64 *)&max_gap); |
276 | if (max_gap < LARGE_GAP) { | 241 | if (max_gap < LARGE_GAP) { |
277 | vmem_map = (struct page *) 0; | 242 | vmem_map = (struct page *) 0; |
278 | free_area_init_node(0, NODE_DATA(0), zones_size, 0, | 243 | free_area_init_nodes(max_zone_pfns); |
279 | zholes_size); | ||
280 | } else { | 244 | } else { |
281 | unsigned long map_size; | 245 | unsigned long map_size; |
282 | 246 | ||
@@ -288,20 +252,19 @@ paging_init (void) | |||
288 | vmem_map = (struct page *) vmalloc_end; | 252 | vmem_map = (struct page *) vmalloc_end; |
289 | efi_memmap_walk(create_mem_map_page_table, NULL); | 253 | efi_memmap_walk(create_mem_map_page_table, NULL); |
290 | 254 | ||
291 | NODE_DATA(0)->node_mem_map = vmem_map; | 255 | /* |
292 | free_area_init_node(0, NODE_DATA(0), zones_size, | 256 | * alloc_node_mem_map makes an adjustment for mem_map |
293 | 0, zholes_size); | 257 | * which isn't compatible with vmem_map. |
258 | */ | ||
259 | NODE_DATA(0)->node_mem_map = vmem_map + | ||
260 | find_min_pfn_with_active_regions(); | ||
261 | free_area_init_nodes(max_zone_pfns); | ||
294 | 262 | ||
295 | printk("Virtual mem_map starts at 0x%p\n", mem_map); | 263 | printk("Virtual mem_map starts at 0x%p\n", mem_map); |
296 | } | 264 | } |
297 | #else /* !CONFIG_VIRTUAL_MEM_MAP */ | 265 | #else /* !CONFIG_VIRTUAL_MEM_MAP */ |
298 | if (max_low_pfn < max_dma) | 266 | add_active_range(0, 0, max_low_pfn); |
299 | zones_size[ZONE_DMA] = max_low_pfn; | 267 | free_area_init_nodes(max_zone_pfns); |
300 | else { | ||
301 | zones_size[ZONE_DMA] = max_dma; | ||
302 | zones_size[ZONE_NORMAL] = max_low_pfn - max_dma; | ||
303 | } | ||
304 | free_area_init(zones_size); | ||
305 | #endif /* !CONFIG_VIRTUAL_MEM_MAP */ | 268 | #endif /* !CONFIG_VIRTUAL_MEM_MAP */ |
306 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 269 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
307 | } | 270 | } |
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c index d260bffa01ab..7bd28079dcc4 100644 --- a/arch/ia64/mm/discontig.c +++ b/arch/ia64/mm/discontig.c | |||
@@ -654,6 +654,7 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n | |||
654 | { | 654 | { |
655 | unsigned long end = start + len; | 655 | unsigned long end = start + len; |
656 | 656 | ||
657 | add_active_range(node, start >> PAGE_SHIFT, end >> PAGE_SHIFT); | ||
657 | mem_data[node].num_physpages += len >> PAGE_SHIFT; | 658 | mem_data[node].num_physpages += len >> PAGE_SHIFT; |
658 | if (start <= __pa(MAX_DMA_ADDRESS)) | 659 | if (start <= __pa(MAX_DMA_ADDRESS)) |
659 | mem_data[node].num_dma_physpages += | 660 | mem_data[node].num_dma_physpages += |
@@ -678,10 +679,10 @@ static __init int count_node_pages(unsigned long start, unsigned long len, int n | |||
678 | void __init paging_init(void) | 679 | void __init paging_init(void) |
679 | { | 680 | { |
680 | unsigned long max_dma; | 681 | unsigned long max_dma; |
681 | unsigned long zones_size[MAX_NR_ZONES]; | ||
682 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
683 | unsigned long pfn_offset = 0; | 682 | unsigned long pfn_offset = 0; |
683 | unsigned long max_pfn = 0; | ||
684 | int node; | 684 | int node; |
685 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
685 | 686 | ||
686 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; | 687 | max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT; |
687 | 688 | ||
@@ -698,47 +699,20 @@ void __init paging_init(void) | |||
698 | #endif | 699 | #endif |
699 | 700 | ||
700 | for_each_online_node(node) { | 701 | for_each_online_node(node) { |
701 | memset(zones_size, 0, sizeof(zones_size)); | ||
702 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
703 | |||
704 | num_physpages += mem_data[node].num_physpages; | 702 | num_physpages += mem_data[node].num_physpages; |
705 | |||
706 | if (mem_data[node].min_pfn >= max_dma) { | ||
707 | /* All of this node's memory is above ZONE_DMA */ | ||
708 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | ||
709 | mem_data[node].min_pfn; | ||
710 | zholes_size[ZONE_NORMAL] = mem_data[node].max_pfn - | ||
711 | mem_data[node].min_pfn - | ||
712 | mem_data[node].num_physpages; | ||
713 | } else if (mem_data[node].max_pfn < max_dma) { | ||
714 | /* All of this node's memory is in ZONE_DMA */ | ||
715 | zones_size[ZONE_DMA] = mem_data[node].max_pfn - | ||
716 | mem_data[node].min_pfn; | ||
717 | zholes_size[ZONE_DMA] = mem_data[node].max_pfn - | ||
718 | mem_data[node].min_pfn - | ||
719 | mem_data[node].num_dma_physpages; | ||
720 | } else { | ||
721 | /* This node has memory in both zones */ | ||
722 | zones_size[ZONE_DMA] = max_dma - | ||
723 | mem_data[node].min_pfn; | ||
724 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - | ||
725 | mem_data[node].num_dma_physpages; | ||
726 | zones_size[ZONE_NORMAL] = mem_data[node].max_pfn - | ||
727 | max_dma; | ||
728 | zholes_size[ZONE_NORMAL] = zones_size[ZONE_NORMAL] - | ||
729 | (mem_data[node].num_physpages - | ||
730 | mem_data[node].num_dma_physpages); | ||
731 | } | ||
732 | |||
733 | pfn_offset = mem_data[node].min_pfn; | 703 | pfn_offset = mem_data[node].min_pfn; |
734 | 704 | ||
735 | #ifdef CONFIG_VIRTUAL_MEM_MAP | 705 | #ifdef CONFIG_VIRTUAL_MEM_MAP |
736 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; | 706 | NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset; |
737 | #endif | 707 | #endif |
738 | free_area_init_node(node, NODE_DATA(node), zones_size, | 708 | if (mem_data[node].max_pfn > max_pfn) |
739 | pfn_offset, zholes_size); | 709 | max_pfn = mem_data[node].max_pfn; |
740 | } | 710 | } |
741 | 711 | ||
712 | max_zone_pfns[ZONE_DMA] = max_dma; | ||
713 | max_zone_pfns[ZONE_NORMAL] = max_pfn; | ||
714 | free_area_init_nodes(max_zone_pfns); | ||
715 | |||
742 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); | 716 | zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page)); |
743 | } | 717 | } |
744 | 718 | ||
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 30617ccb4f7e..ff87a5cba399 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c | |||
@@ -593,6 +593,18 @@ find_largest_hole (u64 start, u64 end, void *arg) | |||
593 | last_end = end; | 593 | last_end = end; |
594 | return 0; | 594 | return 0; |
595 | } | 595 | } |
596 | |||
597 | int __init | ||
598 | register_active_ranges(u64 start, u64 end, void *nid) | ||
599 | { | ||
600 | BUG_ON(nid == NULL); | ||
601 | BUG_ON(*(unsigned long *)nid >= MAX_NUMNODES); | ||
602 | |||
603 | add_active_range(*(unsigned long *)nid, | ||
604 | __pa(start) >> PAGE_SHIFT, | ||
605 | __pa(end) >> PAGE_SHIFT); | ||
606 | return 0; | ||
607 | } | ||
596 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ | 608 | #endif /* CONFIG_VIRTUAL_MEM_MAP */ |
597 | 609 | ||
598 | static int __init | 610 | static int __init |
diff --git a/include/asm-ia64/meminit.h b/include/asm-ia64/meminit.h index 6a33a07b3f1d..c3b1f862e6e7 100644 --- a/include/asm-ia64/meminit.h +++ b/include/asm-ia64/meminit.h | |||
@@ -55,6 +55,7 @@ extern void efi_memmap_init(unsigned long *, unsigned long *); | |||
55 | extern unsigned long vmalloc_end; | 55 | extern unsigned long vmalloc_end; |
56 | extern struct page *vmem_map; | 56 | extern struct page *vmem_map; |
57 | extern int find_largest_hole (u64 start, u64 end, void *arg); | 57 | extern int find_largest_hole (u64 start, u64 end, void *arg); |
58 | extern int register_active_ranges (u64 start, u64 end, void *arg); | ||
58 | extern int create_mem_map_page_table (u64 start, u64 end, void *arg); | 59 | extern int create_mem_map_page_table (u64 start, u64 end, void *arg); |
59 | extern int vmemmap_find_next_valid_pfn(int, int); | 60 | extern int vmemmap_find_next_valid_pfn(int, int); |
60 | #else | 61 | #else |