diff options
author | Mel Gorman <mel@csn.ul.ie> | 2006-09-27 04:49:49 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@g5.osdl.org> | 2006-09-27 11:26:11 -0400 |
commit | c67c3cb4c99fb2ee63c8733943c353d745f45b84 (patch) | |
tree | 5628da22a723aab1d11dfbedda264f3f65addc21 | |
parent | c713216deebd95d2b0ab38fef8bb2361c0180c2d (diff) |
[PATCH] Have Power use add_active_range() and free_area_init_nodes()
Size zones and holes in an architecture independent manner for Power.
[judith@osdl.org: build fix]
Signed-off-by: Mel Gorman <mel@csn.ul.ie>
Cc: Dave Hansen <haveblue@us.ibm.com>
Cc: Andy Whitcroft <apw@shadowen.org>
Cc: Andi Kleen <ak@muc.de>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Paul Mackerras <paulus@samba.org>
Cc: "Keith Mannthey" <kmannth@gmail.com>
Cc: "Luck, Tony" <tony.luck@intel.com>
Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com>
Cc: Yasunori Goto <y-goto@jp.fujitsu.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
-rw-r--r-- | arch/powerpc/Kconfig | 7 | ||||
-rw-r--r-- | arch/powerpc/mm/mem.c | 51 | ||||
-rw-r--r-- | arch/powerpc/mm/numa.c | 159 | ||||
-rw-r--r-- | arch/ppc/Kconfig | 3 | ||||
-rw-r--r-- | arch/ppc/mm/init.c | 23 |
5 files changed, 52 insertions, 191 deletions
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index de1ef2fa1a20..a0dd1b0ee483 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig | |||
@@ -731,11 +731,10 @@ config ARCH_SPARSEMEM_DEFAULT | |||
731 | def_bool y | 731 | def_bool y |
732 | depends on SMP && PPC_PSERIES | 732 | depends on SMP && PPC_PSERIES |
733 | 733 | ||
734 | source "mm/Kconfig" | 734 | config ARCH_POPULATES_NODE_MAP |
735 | |||
736 | config HAVE_ARCH_EARLY_PFN_TO_NID | ||
737 | def_bool y | 735 | def_bool y |
738 | depends on NEED_MULTIPLE_NODES | 736 | |
737 | source "mm/Kconfig" | ||
739 | 738 | ||
740 | config ARCH_MEMORY_PROBE | 739 | config ARCH_MEMORY_PROBE |
741 | def_bool y | 740 | def_bool y |
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index eebd8b83a6b0..16fe027bbc12 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c | |||
@@ -256,20 +256,22 @@ void __init do_init_bootmem(void) | |||
256 | 256 | ||
257 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); | 257 | boot_mapsize = init_bootmem(start >> PAGE_SHIFT, total_pages); |
258 | 258 | ||
259 | /* Add active regions with valid PFNs */ | ||
260 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
261 | unsigned long start_pfn, end_pfn; | ||
262 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; | ||
263 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); | ||
264 | add_active_range(0, start_pfn, end_pfn); | ||
265 | } | ||
266 | |||
259 | /* Add all physical memory to the bootmem map, mark each area | 267 | /* Add all physical memory to the bootmem map, mark each area |
260 | * present. | 268 | * present. |
261 | */ | 269 | */ |
262 | for (i = 0; i < lmb.memory.cnt; i++) { | ||
263 | unsigned long base = lmb.memory.region[i].base; | ||
264 | unsigned long size = lmb_size_bytes(&lmb.memory, i); | ||
265 | #ifdef CONFIG_HIGHMEM | 270 | #ifdef CONFIG_HIGHMEM |
266 | if (base >= total_lowmem) | 271 | free_bootmem_with_active_regions(0, total_lowmem >> PAGE_SHIFT); |
267 | continue; | 272 | #else |
268 | if (base + size > total_lowmem) | 273 | free_bootmem_with_active_regions(0, max_pfn); |
269 | size = total_lowmem - base; | ||
270 | #endif | 274 | #endif |
271 | free_bootmem(base, size); | ||
272 | } | ||
273 | 275 | ||
274 | /* reserve the sections we're already using */ | 276 | /* reserve the sections we're already using */ |
275 | for (i = 0; i < lmb.reserved.cnt; i++) | 277 | for (i = 0; i < lmb.reserved.cnt; i++) |
@@ -277,9 +279,8 @@ void __init do_init_bootmem(void) | |||
277 | lmb_size_bytes(&lmb.reserved, i)); | 279 | lmb_size_bytes(&lmb.reserved, i)); |
278 | 280 | ||
279 | /* XXX need to clip this if using highmem? */ | 281 | /* XXX need to clip this if using highmem? */ |
280 | for (i = 0; i < lmb.memory.cnt; i++) | 282 | sparse_memory_present_with_active_regions(0); |
281 | memory_present(0, lmb_start_pfn(&lmb.memory, i), | 283 | |
282 | lmb_end_pfn(&lmb.memory, i)); | ||
283 | init_bootmem_done = 1; | 284 | init_bootmem_done = 1; |
284 | } | 285 | } |
285 | 286 | ||
@@ -288,10 +289,9 @@ void __init do_init_bootmem(void) | |||
288 | */ | 289 | */ |
289 | void __init paging_init(void) | 290 | void __init paging_init(void) |
290 | { | 291 | { |
291 | unsigned long zones_size[MAX_NR_ZONES]; | ||
292 | unsigned long zholes_size[MAX_NR_ZONES]; | ||
293 | unsigned long total_ram = lmb_phys_mem_size(); | 292 | unsigned long total_ram = lmb_phys_mem_size(); |
294 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 293 | unsigned long top_of_ram = lmb_end_of_DRAM(); |
294 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | ||
295 | 295 | ||
296 | #ifdef CONFIG_HIGHMEM | 296 | #ifdef CONFIG_HIGHMEM |
297 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | 297 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ |
@@ -307,26 +307,13 @@ void __init paging_init(void) | |||
307 | top_of_ram, total_ram); | 307 | top_of_ram, total_ram); |
308 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 308 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
309 | (top_of_ram - total_ram) >> 20); | 309 | (top_of_ram - total_ram) >> 20); |
310 | /* | ||
311 | * All pages are DMA-able so we put them all in the DMA zone. | ||
312 | */ | ||
313 | memset(zones_size, 0, sizeof(zones_size)); | ||
314 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
315 | |||
316 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | ||
317 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
318 | |||
319 | #ifdef CONFIG_HIGHMEM | 310 | #ifdef CONFIG_HIGHMEM |
320 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | 311 | max_zone_pfns[0] = total_lowmem >> PAGE_SHIFT; |
321 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | 312 | max_zone_pfns[1] = top_of_ram >> PAGE_SHIFT; |
322 | zholes_size[ZONE_HIGHMEM] = (top_of_ram - total_ram) >> PAGE_SHIFT; | ||
323 | #else | 313 | #else |
324 | zones_size[ZONE_DMA] = top_of_ram >> PAGE_SHIFT; | 314 | max_zone_pfns[0] = top_of_ram >> PAGE_SHIFT; |
325 | zholes_size[ZONE_DMA] = (top_of_ram - total_ram) >> PAGE_SHIFT; | 315 | #endif |
326 | #endif /* CONFIG_HIGHMEM */ | 316 | free_area_init_nodes(max_zone_pfns); |
327 | |||
328 | free_area_init_node(0, NODE_DATA(0), zones_size, | ||
329 | __pa(PAGE_OFFSET) >> PAGE_SHIFT, zholes_size); | ||
330 | } | 317 | } |
331 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ | 318 | #endif /* ! CONFIG_NEED_MULTIPLE_NODES */ |
332 | 319 | ||
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 6c0f1c7d83e5..43c272075e1a 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -39,96 +39,6 @@ static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | |||
39 | static int min_common_depth; | 39 | static int min_common_depth; |
40 | static int n_mem_addr_cells, n_mem_size_cells; | 40 | static int n_mem_addr_cells, n_mem_size_cells; |
41 | 41 | ||
42 | /* | ||
43 | * We need somewhere to store start/end/node for each region until we have | ||
44 | * allocated the real node_data structures. | ||
45 | */ | ||
46 | #define MAX_REGIONS (MAX_LMB_REGIONS*2) | ||
47 | static struct { | ||
48 | unsigned long start_pfn; | ||
49 | unsigned long end_pfn; | ||
50 | int nid; | ||
51 | } init_node_data[MAX_REGIONS] __initdata; | ||
52 | |||
53 | int __init early_pfn_to_nid(unsigned long pfn) | ||
54 | { | ||
55 | unsigned int i; | ||
56 | |||
57 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
58 | unsigned long start_pfn = init_node_data[i].start_pfn; | ||
59 | unsigned long end_pfn = init_node_data[i].end_pfn; | ||
60 | |||
61 | if ((start_pfn <= pfn) && (pfn < end_pfn)) | ||
62 | return init_node_data[i].nid; | ||
63 | } | ||
64 | |||
65 | return -1; | ||
66 | } | ||
67 | |||
68 | void __init add_region(unsigned int nid, unsigned long start_pfn, | ||
69 | unsigned long pages) | ||
70 | { | ||
71 | unsigned int i; | ||
72 | |||
73 | dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n", | ||
74 | nid, start_pfn, pages); | ||
75 | |||
76 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
77 | if (init_node_data[i].nid != nid) | ||
78 | continue; | ||
79 | if (init_node_data[i].end_pfn == start_pfn) { | ||
80 | init_node_data[i].end_pfn += pages; | ||
81 | return; | ||
82 | } | ||
83 | if (init_node_data[i].start_pfn == (start_pfn + pages)) { | ||
84 | init_node_data[i].start_pfn -= pages; | ||
85 | return; | ||
86 | } | ||
87 | } | ||
88 | |||
89 | /* | ||
90 | * Leave last entry NULL so we dont iterate off the end (we use | ||
91 | * entry.end_pfn to terminate the walk). | ||
92 | */ | ||
93 | if (i >= (MAX_REGIONS - 1)) { | ||
94 | printk(KERN_ERR "WARNING: too many memory regions in " | ||
95 | "numa code, truncating\n"); | ||
96 | return; | ||
97 | } | ||
98 | |||
99 | init_node_data[i].start_pfn = start_pfn; | ||
100 | init_node_data[i].end_pfn = start_pfn + pages; | ||
101 | init_node_data[i].nid = nid; | ||
102 | } | ||
103 | |||
104 | /* We assume init_node_data has no overlapping regions */ | ||
105 | void __init get_region(unsigned int nid, unsigned long *start_pfn, | ||
106 | unsigned long *end_pfn, unsigned long *pages_present) | ||
107 | { | ||
108 | unsigned int i; | ||
109 | |||
110 | *start_pfn = -1UL; | ||
111 | *end_pfn = *pages_present = 0; | ||
112 | |||
113 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
114 | if (init_node_data[i].nid != nid) | ||
115 | continue; | ||
116 | |||
117 | *pages_present += init_node_data[i].end_pfn - | ||
118 | init_node_data[i].start_pfn; | ||
119 | |||
120 | if (init_node_data[i].start_pfn < *start_pfn) | ||
121 | *start_pfn = init_node_data[i].start_pfn; | ||
122 | |||
123 | if (init_node_data[i].end_pfn > *end_pfn) | ||
124 | *end_pfn = init_node_data[i].end_pfn; | ||
125 | } | ||
126 | |||
127 | /* We didnt find a matching region, return start/end as 0 */ | ||
128 | if (*start_pfn == -1UL) | ||
129 | *start_pfn = 0; | ||
130 | } | ||
131 | |||
132 | static void __cpuinit map_cpu_to_node(int cpu, int node) | 42 | static void __cpuinit map_cpu_to_node(int cpu, int node) |
133 | { | 43 | { |
134 | numa_cpu_lookup_table[cpu] = node; | 44 | numa_cpu_lookup_table[cpu] = node; |
@@ -468,8 +378,8 @@ new_range: | |||
468 | continue; | 378 | continue; |
469 | } | 379 | } |
470 | 380 | ||
471 | add_region(nid, start >> PAGE_SHIFT, | 381 | add_active_range(nid, start >> PAGE_SHIFT, |
472 | size >> PAGE_SHIFT); | 382 | (start >> PAGE_SHIFT) + (size >> PAGE_SHIFT)); |
473 | 383 | ||
474 | if (--ranges) | 384 | if (--ranges) |
475 | goto new_range; | 385 | goto new_range; |
@@ -482,6 +392,7 @@ static void __init setup_nonnuma(void) | |||
482 | { | 392 | { |
483 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 393 | unsigned long top_of_ram = lmb_end_of_DRAM(); |
484 | unsigned long total_ram = lmb_phys_mem_size(); | 394 | unsigned long total_ram = lmb_phys_mem_size(); |
395 | unsigned long start_pfn, end_pfn; | ||
485 | unsigned int i; | 396 | unsigned int i; |
486 | 397 | ||
487 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 398 | printk(KERN_DEBUG "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
@@ -489,9 +400,11 @@ static void __init setup_nonnuma(void) | |||
489 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", | 400 | printk(KERN_DEBUG "Memory hole size: %ldMB\n", |
490 | (top_of_ram - total_ram) >> 20); | 401 | (top_of_ram - total_ram) >> 20); |
491 | 402 | ||
492 | for (i = 0; i < lmb.memory.cnt; ++i) | 403 | for (i = 0; i < lmb.memory.cnt; ++i) { |
493 | add_region(0, lmb.memory.region[i].base >> PAGE_SHIFT, | 404 | start_pfn = lmb.memory.region[i].base >> PAGE_SHIFT; |
494 | lmb_size_pages(&lmb.memory, i)); | 405 | end_pfn = start_pfn + lmb_size_pages(&lmb.memory, i); |
406 | add_active_range(0, start_pfn, end_pfn); | ||
407 | } | ||
495 | node_set_online(0); | 408 | node_set_online(0); |
496 | } | 409 | } |
497 | 410 | ||
@@ -630,11 +543,11 @@ void __init do_init_bootmem(void) | |||
630 | (void *)(unsigned long)boot_cpuid); | 543 | (void *)(unsigned long)boot_cpuid); |
631 | 544 | ||
632 | for_each_online_node(nid) { | 545 | for_each_online_node(nid) { |
633 | unsigned long start_pfn, end_pfn, pages_present; | 546 | unsigned long start_pfn, end_pfn; |
634 | unsigned long bootmem_paddr; | 547 | unsigned long bootmem_paddr; |
635 | unsigned long bootmap_pages; | 548 | unsigned long bootmap_pages; |
636 | 549 | ||
637 | get_region(nid, &start_pfn, &end_pfn, &pages_present); | 550 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); |
638 | 551 | ||
639 | /* Allocate the node structure node local if possible */ | 552 | /* Allocate the node structure node local if possible */ |
640 | NODE_DATA(nid) = careful_allocation(nid, | 553 | NODE_DATA(nid) = careful_allocation(nid, |
@@ -667,19 +580,7 @@ void __init do_init_bootmem(void) | |||
667 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | 580 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, |
668 | start_pfn, end_pfn); | 581 | start_pfn, end_pfn); |
669 | 582 | ||
670 | /* Add free regions on this node */ | 583 | free_bootmem_with_active_regions(nid, end_pfn); |
671 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
672 | unsigned long start, end; | ||
673 | |||
674 | if (init_node_data[i].nid != nid) | ||
675 | continue; | ||
676 | |||
677 | start = init_node_data[i].start_pfn << PAGE_SHIFT; | ||
678 | end = init_node_data[i].end_pfn << PAGE_SHIFT; | ||
679 | |||
680 | dbg("free_bootmem %lx %lx\n", start, end - start); | ||
681 | free_bootmem_node(NODE_DATA(nid), start, end - start); | ||
682 | } | ||
683 | 584 | ||
684 | /* Mark reserved regions on this node */ | 585 | /* Mark reserved regions on this node */ |
685 | for (i = 0; i < lmb.reserved.cnt; i++) { | 586 | for (i = 0; i < lmb.reserved.cnt; i++) { |
@@ -710,44 +611,16 @@ void __init do_init_bootmem(void) | |||
710 | } | 611 | } |
711 | } | 612 | } |
712 | 613 | ||
713 | /* Add regions into sparsemem */ | 614 | sparse_memory_present_with_active_regions(nid); |
714 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
715 | unsigned long start, end; | ||
716 | |||
717 | if (init_node_data[i].nid != nid) | ||
718 | continue; | ||
719 | |||
720 | start = init_node_data[i].start_pfn; | ||
721 | end = init_node_data[i].end_pfn; | ||
722 | |||
723 | memory_present(nid, start, end); | ||
724 | } | ||
725 | } | 615 | } |
726 | } | 616 | } |
727 | 617 | ||
728 | void __init paging_init(void) | 618 | void __init paging_init(void) |
729 | { | 619 | { |
730 | unsigned long zones_size[MAX_NR_ZONES]; | 620 | unsigned long max_zone_pfns[MAX_NR_ZONES] = { |
731 | unsigned long zholes_size[MAX_NR_ZONES]; | 621 | lmb_end_of_DRAM() >> PAGE_SHIFT |
732 | int nid; | 622 | }; |
733 | 623 | free_area_init_nodes(max_zone_pfns); | |
734 | memset(zones_size, 0, sizeof(zones_size)); | ||
735 | memset(zholes_size, 0, sizeof(zholes_size)); | ||
736 | |||
737 | for_each_online_node(nid) { | ||
738 | unsigned long start_pfn, end_pfn, pages_present; | ||
739 | |||
740 | get_region(nid, &start_pfn, &end_pfn, &pages_present); | ||
741 | |||
742 | zones_size[ZONE_DMA] = end_pfn - start_pfn; | ||
743 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present; | ||
744 | |||
745 | dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, | ||
746 | zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); | ||
747 | |||
748 | free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, | ||
749 | zholes_size); | ||
750 | } | ||
751 | } | 624 | } |
752 | 625 | ||
753 | static int __init early_numa(char *p) | 626 | static int __init early_numa(char *p) |
diff --git a/arch/ppc/Kconfig b/arch/ppc/Kconfig index 8fa10cf661a8..fdd9e7b66244 100644 --- a/arch/ppc/Kconfig +++ b/arch/ppc/Kconfig | |||
@@ -953,6 +953,9 @@ config NR_CPUS | |||
953 | config HIGHMEM | 953 | config HIGHMEM |
954 | bool "High memory support" | 954 | bool "High memory support" |
955 | 955 | ||
956 | config ARCH_POPULATES_NODE_MAP | ||
957 | def_bool y | ||
958 | |||
956 | source kernel/Kconfig.hz | 959 | source kernel/Kconfig.hz |
957 | source kernel/Kconfig.preempt | 960 | source kernel/Kconfig.preempt |
958 | source "mm/Kconfig" | 961 | source "mm/Kconfig" |
diff --git a/arch/ppc/mm/init.c b/arch/ppc/mm/init.c index 523392d460fa..410200046af1 100644 --- a/arch/ppc/mm/init.c +++ b/arch/ppc/mm/init.c | |||
@@ -358,8 +358,8 @@ void __init do_init_bootmem(void) | |||
358 | */ | 358 | */ |
359 | void __init paging_init(void) | 359 | void __init paging_init(void) |
360 | { | 360 | { |
361 | unsigned long zones_size[MAX_NR_ZONES], i; | 361 | unsigned long start_pfn, end_pfn; |
362 | 362 | unsigned long max_zone_pfns[MAX_NR_ZONES]; | |
363 | #ifdef CONFIG_HIGHMEM | 363 | #ifdef CONFIG_HIGHMEM |
364 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ | 364 | map_page(PKMAP_BASE, 0, 0); /* XXX gross */ |
365 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k | 365 | pkmap_page_table = pte_offset_kernel(pmd_offset(pgd_offset_k |
@@ -369,19 +369,18 @@ void __init paging_init(void) | |||
369 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); | 369 | (KMAP_FIX_BEGIN), KMAP_FIX_BEGIN), KMAP_FIX_BEGIN); |
370 | kmap_prot = PAGE_KERNEL; | 370 | kmap_prot = PAGE_KERNEL; |
371 | #endif /* CONFIG_HIGHMEM */ | 371 | #endif /* CONFIG_HIGHMEM */ |
372 | 372 | /* All pages are DMA-able so we put them all in the DMA zone. */ | |
373 | /* | 373 | start_pfn = __pa(PAGE_OFFSET) >> PAGE_SHIFT; |
374 | * All pages are DMA-able so we put them all in the DMA zone. | 374 | end_pfn = start_pfn + (total_memory >> PAGE_SHIFT); |
375 | */ | 375 | add_active_range(0, start_pfn, end_pfn); |
376 | zones_size[ZONE_DMA] = total_lowmem >> PAGE_SHIFT; | ||
377 | for (i = 1; i < MAX_NR_ZONES; i++) | ||
378 | zones_size[i] = 0; | ||
379 | 376 | ||
380 | #ifdef CONFIG_HIGHMEM | 377 | #ifdef CONFIG_HIGHMEM |
381 | zones_size[ZONE_HIGHMEM] = (total_memory - total_lowmem) >> PAGE_SHIFT; | 378 | max_zone_pfns[0] = total_lowmem >> PAGE_SHIFT; |
379 | max_zone_pfns[1] = total_memory >> PAGE_SHIFT; | ||
380 | #else | ||
381 | max_zone_pfns[0] = total_memory >> PAGE_SHIFT; | ||
382 | #endif /* CONFIG_HIGHMEM */ | 382 | #endif /* CONFIG_HIGHMEM */ |
383 | 383 | free_area_init_nodes(max_zone_pfns); | |
384 | free_area_init(zones_size); | ||
385 | } | 384 | } |
386 | 385 | ||
387 | void __init mem_init(void) | 386 | void __init mem_init(void) |