diff options
author | Anton Blanchard <anton@samba.org> | 2005-11-10 22:22:35 -0500 |
---|---|---|
committer | Paul Mackerras <paulus@samba.org> | 2005-11-11 06:21:11 -0500 |
commit | 45fb6cea09443b2066016f895937f9c2647a1507 (patch) | |
tree | 8abd2e14c0e096d583e6dc5151d0669db9227359 /arch/powerpc/mm | |
parent | 3e66c4def14aa64ee6d1d4ef077d789abc30125d (diff) |
[PATCH] ppc64: Convert NUMA to sparsemem (3)
Convert to sparsemem and remove all the discontigmem code in the
process. This has a few advantages:
- The old numa_memory_lookup_table can go away
- All the arch specific discontigmem magic can go away
We also remove the triple pass of memory properties and instead create a
list of per node extents that we iterate through. A final cleanup would
be to change our lmb code to store extents per node, then we can reuse
that information in the numa code.
Signed-off-by: Anton Blanchard <anton@samba.org>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc/mm')
-rw-r--r-- | arch/powerpc/mm/numa.c | 365 |
1 files changed, 159 insertions, 206 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index c2d7fec50c92..bd2cf1336885 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c | |||
@@ -17,9 +17,8 @@ | |||
17 | #include <linux/nodemask.h> | 17 | #include <linux/nodemask.h> |
18 | #include <linux/cpu.h> | 18 | #include <linux/cpu.h> |
19 | #include <linux/notifier.h> | 19 | #include <linux/notifier.h> |
20 | #include <asm/sparsemem.h> | ||
20 | #include <asm/lmb.h> | 21 | #include <asm/lmb.h> |
21 | #include <asm/machdep.h> | ||
22 | #include <asm/abs_addr.h> | ||
23 | #include <asm/system.h> | 22 | #include <asm/system.h> |
24 | #include <asm/smp.h> | 23 | #include <asm/smp.h> |
25 | 24 | ||
@@ -28,42 +27,113 @@ static int numa_enabled = 1; | |||
28 | static int numa_debug; | 27 | static int numa_debug; |
29 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } | 28 | #define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } |
30 | 29 | ||
31 | #ifdef DEBUG_NUMA | 30 | int numa_cpu_lookup_table[NR_CPUS]; |
32 | #define ARRAY_INITIALISER -1 | ||
33 | #else | ||
34 | #define ARRAY_INITIALISER 0 | ||
35 | #endif | ||
36 | |||
37 | int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] = | ||
38 | ARRAY_INITIALISER}; | ||
39 | char *numa_memory_lookup_table; | ||
40 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; | 31 | cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; |
41 | |||
42 | struct pglist_data *node_data[MAX_NUMNODES]; | 32 | struct pglist_data *node_data[MAX_NUMNODES]; |
43 | bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | 33 | |
34 | EXPORT_SYMBOL(numa_cpu_lookup_table); | ||
35 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | ||
36 | EXPORT_SYMBOL(node_data); | ||
37 | |||
38 | static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; | ||
44 | static int min_common_depth; | 39 | static int min_common_depth; |
45 | 40 | ||
46 | /* | 41 | /* |
47 | * We need somewhere to store start/span for each node until we have | 42 | * We need somewhere to store start/end/node for each region until we have |
48 | * allocated the real node_data structures. | 43 | * allocated the real node_data structures. |
49 | */ | 44 | */ |
45 | #define MAX_REGIONS (MAX_LMB_REGIONS*2) | ||
50 | static struct { | 46 | static struct { |
51 | unsigned long node_start_pfn; | 47 | unsigned long start_pfn; |
52 | unsigned long node_end_pfn; | 48 | unsigned long end_pfn; |
53 | unsigned long node_present_pages; | 49 | int nid; |
54 | } init_node_data[MAX_NUMNODES] __initdata; | 50 | } init_node_data[MAX_REGIONS] __initdata; |
55 | 51 | ||
56 | EXPORT_SYMBOL(node_data); | 52 | int __init early_pfn_to_nid(unsigned long pfn) |
57 | EXPORT_SYMBOL(numa_cpu_lookup_table); | 53 | { |
58 | EXPORT_SYMBOL(numa_memory_lookup_table); | 54 | unsigned int i; |
59 | EXPORT_SYMBOL(numa_cpumask_lookup_table); | 55 | |
56 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
57 | unsigned long start_pfn = init_node_data[i].start_pfn; | ||
58 | unsigned long end_pfn = init_node_data[i].end_pfn; | ||
59 | |||
60 | if ((start_pfn <= pfn) && (pfn < end_pfn)) | ||
61 | return init_node_data[i].nid; | ||
62 | } | ||
63 | |||
64 | return -1; | ||
65 | } | ||
66 | |||
67 | void __init add_region(unsigned int nid, unsigned long start_pfn, | ||
68 | unsigned long pages) | ||
69 | { | ||
70 | unsigned int i; | ||
71 | |||
72 | dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n", | ||
73 | nid, start_pfn, pages); | ||
74 | |||
75 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
76 | if (init_node_data[i].nid != nid) | ||
77 | continue; | ||
78 | if (init_node_data[i].end_pfn == start_pfn) { | ||
79 | init_node_data[i].end_pfn += pages; | ||
80 | return; | ||
81 | } | ||
82 | if (init_node_data[i].start_pfn == (start_pfn + pages)) { | ||
83 | init_node_data[i].start_pfn -= pages; | ||
84 | return; | ||
85 | } | ||
86 | } | ||
87 | |||
88 | /* | ||
89 | * Leave last entry NULL so we dont iterate off the end (we use | ||
90 | * entry.end_pfn to terminate the walk). | ||
91 | */ | ||
92 | if (i >= (MAX_REGIONS - 1)) { | ||
93 | printk(KERN_ERR "WARNING: too many memory regions in " | ||
94 | "numa code, truncating\n"); | ||
95 | return; | ||
96 | } | ||
97 | |||
98 | init_node_data[i].start_pfn = start_pfn; | ||
99 | init_node_data[i].end_pfn = start_pfn + pages; | ||
100 | init_node_data[i].nid = nid; | ||
101 | } | ||
102 | |||
103 | /* We assume init_node_data has no overlapping regions */ | ||
104 | void __init get_region(unsigned int nid, unsigned long *start_pfn, | ||
105 | unsigned long *end_pfn, unsigned long *pages_present) | ||
106 | { | ||
107 | unsigned int i; | ||
108 | |||
109 | *start_pfn = -1UL; | ||
110 | *end_pfn = *pages_present = 0; | ||
111 | |||
112 | for (i = 0; init_node_data[i].end_pfn; i++) { | ||
113 | if (init_node_data[i].nid != nid) | ||
114 | continue; | ||
115 | |||
116 | *pages_present += init_node_data[i].end_pfn - | ||
117 | init_node_data[i].start_pfn; | ||
118 | |||
119 | if (init_node_data[i].start_pfn < *start_pfn) | ||
120 | *start_pfn = init_node_data[i].start_pfn; | ||
121 | |||
122 | if (init_node_data[i].end_pfn > *end_pfn) | ||
123 | *end_pfn = init_node_data[i].end_pfn; | ||
124 | } | ||
125 | |||
126 | /* We didnt find a matching region, return start/end as 0 */ | ||
127 | if (*start_pfn == -1UL) | ||
128 | start_pfn = 0; | ||
129 | } | ||
60 | 130 | ||
61 | static inline void map_cpu_to_node(int cpu, int node) | 131 | static inline void map_cpu_to_node(int cpu, int node) |
62 | { | 132 | { |
63 | numa_cpu_lookup_table[cpu] = node; | 133 | numa_cpu_lookup_table[cpu] = node; |
64 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) { | 134 | |
135 | if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) | ||
65 | cpu_set(cpu, numa_cpumask_lookup_table[node]); | 136 | cpu_set(cpu, numa_cpumask_lookup_table[node]); |
66 | } | ||
67 | } | 137 | } |
68 | 138 | ||
69 | #ifdef CONFIG_HOTPLUG_CPU | 139 | #ifdef CONFIG_HOTPLUG_CPU |
@@ -82,7 +152,7 @@ static void unmap_cpu_from_node(unsigned long cpu) | |||
82 | } | 152 | } |
83 | #endif /* CONFIG_HOTPLUG_CPU */ | 153 | #endif /* CONFIG_HOTPLUG_CPU */ |
84 | 154 | ||
85 | static struct device_node * __devinit find_cpu_node(unsigned int cpu) | 155 | static struct device_node *find_cpu_node(unsigned int cpu) |
86 | { | 156 | { |
87 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); | 157 | unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); |
88 | struct device_node *cpu_node = NULL; | 158 | struct device_node *cpu_node = NULL; |
@@ -209,7 +279,7 @@ static int __init get_mem_size_cells(void) | |||
209 | return rc; | 279 | return rc; |
210 | } | 280 | } |
211 | 281 | ||
212 | static unsigned long read_n_cells(int n, unsigned int **buf) | 282 | static unsigned long __init read_n_cells(int n, unsigned int **buf) |
213 | { | 283 | { |
214 | unsigned long result = 0; | 284 | unsigned long result = 0; |
215 | 285 | ||
@@ -291,7 +361,8 @@ static int cpu_numa_callback(struct notifier_block *nfb, | |||
291 | * or zero. If the returned value of size is 0 the region should be | 361 | * or zero. If the returned value of size is 0 the region should be |
292 | * discarded as it lies wholy above the memory limit. | 362 | * discarded as it lies wholy above the memory limit. |
293 | */ | 363 | */ |
294 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) | 364 | static unsigned long __init numa_enforce_memory_limit(unsigned long start, |
365 | unsigned long size) | ||
295 | { | 366 | { |
296 | /* | 367 | /* |
297 | * We use lmb_end_of_DRAM() in here instead of memory_limit because | 368 | * We use lmb_end_of_DRAM() in here instead of memory_limit because |
@@ -316,8 +387,7 @@ static int __init parse_numa_properties(void) | |||
316 | struct device_node *cpu = NULL; | 387 | struct device_node *cpu = NULL; |
317 | struct device_node *memory = NULL; | 388 | struct device_node *memory = NULL; |
318 | int addr_cells, size_cells; | 389 | int addr_cells, size_cells; |
319 | int max_domain = 0; | 390 | int max_domain; |
320 | long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT; | ||
321 | unsigned long i; | 391 | unsigned long i; |
322 | 392 | ||
323 | if (numa_enabled == 0) { | 393 | if (numa_enabled == 0) { |
@@ -325,13 +395,6 @@ static int __init parse_numa_properties(void) | |||
325 | return -1; | 395 | return -1; |
326 | } | 396 | } |
327 | 397 | ||
328 | numa_memory_lookup_table = | ||
329 | (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1)); | ||
330 | memset(numa_memory_lookup_table, 0, entries * sizeof(char)); | ||
331 | |||
332 | for (i = 0; i < entries ; i++) | ||
333 | numa_memory_lookup_table[i] = ARRAY_INITIALISER; | ||
334 | |||
335 | min_common_depth = find_min_common_depth(); | 398 | min_common_depth = find_min_common_depth(); |
336 | 399 | ||
337 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); | 400 | dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); |
@@ -383,9 +446,6 @@ new_range: | |||
383 | start = read_n_cells(addr_cells, &memcell_buf); | 446 | start = read_n_cells(addr_cells, &memcell_buf); |
384 | size = read_n_cells(size_cells, &memcell_buf); | 447 | size = read_n_cells(size_cells, &memcell_buf); |
385 | 448 | ||
386 | start = _ALIGN_DOWN(start, MEMORY_INCREMENT); | ||
387 | size = _ALIGN_UP(size, MEMORY_INCREMENT); | ||
388 | |||
389 | numa_domain = of_node_numa_domain(memory); | 449 | numa_domain = of_node_numa_domain(memory); |
390 | 450 | ||
391 | if (numa_domain >= MAX_NUMNODES) { | 451 | if (numa_domain >= MAX_NUMNODES) { |
@@ -399,44 +459,15 @@ new_range: | |||
399 | if (max_domain < numa_domain) | 459 | if (max_domain < numa_domain) |
400 | max_domain = numa_domain; | 460 | max_domain = numa_domain; |
401 | 461 | ||
402 | if (! (size = numa_enforce_memory_limit(start, size))) { | 462 | if (!(size = numa_enforce_memory_limit(start, size))) { |
403 | if (--ranges) | 463 | if (--ranges) |
404 | goto new_range; | 464 | goto new_range; |
405 | else | 465 | else |
406 | continue; | 466 | continue; |
407 | } | 467 | } |
408 | 468 | ||
409 | /* | 469 | add_region(numa_domain, start >> PAGE_SHIFT, |
410 | * Initialize new node struct, or add to an existing one. | 470 | size >> PAGE_SHIFT); |
411 | */ | ||
412 | if (init_node_data[numa_domain].node_end_pfn) { | ||
413 | if ((start / PAGE_SIZE) < | ||
414 | init_node_data[numa_domain].node_start_pfn) | ||
415 | init_node_data[numa_domain].node_start_pfn = | ||
416 | start / PAGE_SIZE; | ||
417 | if (((start / PAGE_SIZE) + (size / PAGE_SIZE)) > | ||
418 | init_node_data[numa_domain].node_end_pfn) | ||
419 | init_node_data[numa_domain].node_end_pfn = | ||
420 | (start / PAGE_SIZE) + | ||
421 | (size / PAGE_SIZE); | ||
422 | |||
423 | init_node_data[numa_domain].node_present_pages += | ||
424 | size / PAGE_SIZE; | ||
425 | } else { | ||
426 | node_set_online(numa_domain); | ||
427 | |||
428 | init_node_data[numa_domain].node_start_pfn = | ||
429 | start / PAGE_SIZE; | ||
430 | init_node_data[numa_domain].node_end_pfn = | ||
431 | init_node_data[numa_domain].node_start_pfn + | ||
432 | size / PAGE_SIZE; | ||
433 | init_node_data[numa_domain].node_present_pages = | ||
434 | size / PAGE_SIZE; | ||
435 | } | ||
436 | |||
437 | for (i = start ; i < (start+size); i += MEMORY_INCREMENT) | ||
438 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = | ||
439 | numa_domain; | ||
440 | 471 | ||
441 | if (--ranges) | 472 | if (--ranges) |
442 | goto new_range; | 473 | goto new_range; |
@@ -452,32 +483,15 @@ static void __init setup_nonnuma(void) | |||
452 | { | 483 | { |
453 | unsigned long top_of_ram = lmb_end_of_DRAM(); | 484 | unsigned long top_of_ram = lmb_end_of_DRAM(); |
454 | unsigned long total_ram = lmb_phys_mem_size(); | 485 | unsigned long total_ram = lmb_phys_mem_size(); |
455 | unsigned long i; | ||
456 | 486 | ||
457 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", | 487 | printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", |
458 | top_of_ram, total_ram); | 488 | top_of_ram, total_ram); |
459 | printk(KERN_INFO "Memory hole size: %ldMB\n", | 489 | printk(KERN_INFO "Memory hole size: %ldMB\n", |
460 | (top_of_ram - total_ram) >> 20); | 490 | (top_of_ram - total_ram) >> 20); |
461 | 491 | ||
462 | if (!numa_memory_lookup_table) { | ||
463 | long entries = top_of_ram >> MEMORY_INCREMENT_SHIFT; | ||
464 | numa_memory_lookup_table = | ||
465 | (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1)); | ||
466 | memset(numa_memory_lookup_table, 0, entries * sizeof(char)); | ||
467 | for (i = 0; i < entries ; i++) | ||
468 | numa_memory_lookup_table[i] = ARRAY_INITIALISER; | ||
469 | } | ||
470 | |||
471 | map_cpu_to_node(boot_cpuid, 0); | 492 | map_cpu_to_node(boot_cpuid, 0); |
472 | 493 | add_region(0, 0, lmb_end_of_DRAM() >> PAGE_SHIFT); | |
473 | node_set_online(0); | 494 | node_set_online(0); |
474 | |||
475 | init_node_data[0].node_start_pfn = 0; | ||
476 | init_node_data[0].node_end_pfn = lmb_end_of_DRAM() / PAGE_SIZE; | ||
477 | init_node_data[0].node_present_pages = total_ram / PAGE_SIZE; | ||
478 | |||
479 | for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) | ||
480 | numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; | ||
481 | } | 495 | } |
482 | 496 | ||
483 | static void __init dump_numa_topology(void) | 497 | static void __init dump_numa_topology(void) |
@@ -495,8 +509,9 @@ static void __init dump_numa_topology(void) | |||
495 | 509 | ||
496 | count = 0; | 510 | count = 0; |
497 | 511 | ||
498 | for (i = 0; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT) { | 512 | for (i = 0; i < lmb_end_of_DRAM(); |
499 | if (numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] == node) { | 513 | i += (1 << SECTION_SIZE_BITS)) { |
514 | if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) { | ||
500 | if (count == 0) | 515 | if (count == 0) |
501 | printk(" 0x%lx", i); | 516 | printk(" 0x%lx", i); |
502 | ++count; | 517 | ++count; |
@@ -521,10 +536,12 @@ static void __init dump_numa_topology(void) | |||
521 | * | 536 | * |
522 | * Returns the physical address of the memory. | 537 | * Returns the physical address of the memory. |
523 | */ | 538 | */ |
524 | static unsigned long careful_allocation(int nid, unsigned long size, | 539 | static void __init *careful_allocation(int nid, unsigned long size, |
525 | unsigned long align, unsigned long end) | 540 | unsigned long align, |
541 | unsigned long end_pfn) | ||
526 | { | 542 | { |
527 | unsigned long ret = lmb_alloc_base(size, align, end); | 543 | int new_nid; |
544 | unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT); | ||
528 | 545 | ||
529 | /* retry over all memory */ | 546 | /* retry over all memory */ |
530 | if (!ret) | 547 | if (!ret) |
@@ -538,28 +555,27 @@ static unsigned long careful_allocation(int nid, unsigned long size, | |||
538 | * If the memory came from a previously allocated node, we must | 555 | * If the memory came from a previously allocated node, we must |
539 | * retry with the bootmem allocator. | 556 | * retry with the bootmem allocator. |
540 | */ | 557 | */ |
541 | if (pa_to_nid(ret) < nid) { | 558 | new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT); |
542 | nid = pa_to_nid(ret); | 559 | if (new_nid < nid) { |
543 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(nid), | 560 | ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid), |
544 | size, align, 0); | 561 | size, align, 0); |
545 | 562 | ||
546 | if (!ret) | 563 | if (!ret) |
547 | panic("numa.c: cannot allocate %lu bytes on node %d", | 564 | panic("numa.c: cannot allocate %lu bytes on node %d", |
548 | size, nid); | 565 | size, new_nid); |
549 | 566 | ||
550 | ret = virt_to_abs(ret); | 567 | ret = __pa(ret); |
551 | 568 | ||
552 | dbg("alloc_bootmem %lx %lx\n", ret, size); | 569 | dbg("alloc_bootmem %lx %lx\n", ret, size); |
553 | } | 570 | } |
554 | 571 | ||
555 | return ret; | 572 | return (void *)ret; |
556 | } | 573 | } |
557 | 574 | ||
558 | void __init do_init_bootmem(void) | 575 | void __init do_init_bootmem(void) |
559 | { | 576 | { |
560 | int nid; | 577 | int nid; |
561 | int addr_cells, size_cells; | 578 | unsigned int i; |
562 | struct device_node *memory = NULL; | ||
563 | static struct notifier_block ppc64_numa_nb = { | 579 | static struct notifier_block ppc64_numa_nb = { |
564 | .notifier_call = cpu_numa_callback, | 580 | .notifier_call = cpu_numa_callback, |
565 | .priority = 1 /* Must run before sched domains notifier. */ | 581 | .priority = 1 /* Must run before sched domains notifier. */ |
@@ -577,99 +593,66 @@ void __init do_init_bootmem(void) | |||
577 | register_cpu_notifier(&ppc64_numa_nb); | 593 | register_cpu_notifier(&ppc64_numa_nb); |
578 | 594 | ||
579 | for_each_online_node(nid) { | 595 | for_each_online_node(nid) { |
580 | unsigned long start_paddr, end_paddr; | 596 | unsigned long start_pfn, end_pfn, pages_present; |
581 | int i; | ||
582 | unsigned long bootmem_paddr; | 597 | unsigned long bootmem_paddr; |
583 | unsigned long bootmap_pages; | 598 | unsigned long bootmap_pages; |
584 | 599 | ||
585 | start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE; | 600 | get_region(nid, &start_pfn, &end_pfn, &pages_present); |
586 | end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE; | ||
587 | 601 | ||
588 | /* Allocate the node structure node local if possible */ | 602 | /* Allocate the node structure node local if possible */ |
589 | NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid, | 603 | NODE_DATA(nid) = careful_allocation(nid, |
590 | sizeof(struct pglist_data), | 604 | sizeof(struct pglist_data), |
591 | SMP_CACHE_BYTES, end_paddr); | 605 | SMP_CACHE_BYTES, end_pfn); |
592 | NODE_DATA(nid) = abs_to_virt(NODE_DATA(nid)); | 606 | NODE_DATA(nid) = __va(NODE_DATA(nid)); |
593 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | 607 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); |
594 | 608 | ||
595 | dbg("node %d\n", nid); | 609 | dbg("node %d\n", nid); |
596 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); | 610 | dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); |
597 | 611 | ||
598 | NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; | 612 | NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; |
599 | NODE_DATA(nid)->node_start_pfn = | 613 | NODE_DATA(nid)->node_start_pfn = start_pfn; |
600 | init_node_data[nid].node_start_pfn; | 614 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; |
601 | NODE_DATA(nid)->node_spanned_pages = | ||
602 | end_paddr - start_paddr; | ||
603 | 615 | ||
604 | if (NODE_DATA(nid)->node_spanned_pages == 0) | 616 | if (NODE_DATA(nid)->node_spanned_pages == 0) |
605 | continue; | 617 | continue; |
606 | 618 | ||
607 | dbg("start_paddr = %lx\n", start_paddr); | 619 | dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT); |
608 | dbg("end_paddr = %lx\n", end_paddr); | 620 | dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT); |
609 | 621 | ||
610 | bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT); | 622 | bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn); |
623 | bootmem_paddr = (unsigned long)careful_allocation(nid, | ||
624 | bootmap_pages << PAGE_SHIFT, | ||
625 | PAGE_SIZE, end_pfn); | ||
626 | memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT); | ||
611 | 627 | ||
612 | bootmem_paddr = careful_allocation(nid, | ||
613 | bootmap_pages << PAGE_SHIFT, | ||
614 | PAGE_SIZE, end_paddr); | ||
615 | memset(abs_to_virt(bootmem_paddr), 0, | ||
616 | bootmap_pages << PAGE_SHIFT); | ||
617 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); | 628 | dbg("bootmap_paddr = %lx\n", bootmem_paddr); |
618 | 629 | ||
619 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, | 630 | init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, |
620 | start_paddr >> PAGE_SHIFT, | 631 | start_pfn, end_pfn); |
621 | end_paddr >> PAGE_SHIFT); | ||
622 | 632 | ||
623 | /* | 633 | /* Add free regions on this node */ |
624 | * We need to do another scan of all memory sections to | 634 | for (i = 0; init_node_data[i].end_pfn; i++) { |
625 | * associate memory with the correct node. | 635 | unsigned long start, end; |
626 | */ | ||
627 | addr_cells = get_mem_addr_cells(); | ||
628 | size_cells = get_mem_size_cells(); | ||
629 | memory = NULL; | ||
630 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | ||
631 | unsigned long mem_start, mem_size; | ||
632 | int numa_domain, ranges; | ||
633 | unsigned int *memcell_buf; | ||
634 | unsigned int len; | ||
635 | |||
636 | memcell_buf = (unsigned int *)get_property(memory, "reg", &len); | ||
637 | if (!memcell_buf || len <= 0) | ||
638 | continue; | ||
639 | 636 | ||
640 | ranges = memory->n_addrs; /* ranges in cell */ | 637 | if (init_node_data[i].nid != nid) |
641 | new_range: | ||
642 | mem_start = read_n_cells(addr_cells, &memcell_buf); | ||
643 | mem_size = read_n_cells(size_cells, &memcell_buf); | ||
644 | if (numa_enabled) { | ||
645 | numa_domain = of_node_numa_domain(memory); | ||
646 | if (numa_domain >= MAX_NUMNODES) | ||
647 | numa_domain = 0; | ||
648 | } else | ||
649 | numa_domain = 0; | ||
650 | |||
651 | if (numa_domain != nid) | ||
652 | continue; | 638 | continue; |
653 | 639 | ||
654 | mem_size = numa_enforce_memory_limit(mem_start, mem_size); | 640 | start = init_node_data[i].start_pfn << PAGE_SHIFT; |
655 | if (mem_size) { | 641 | end = init_node_data[i].end_pfn << PAGE_SHIFT; |
656 | dbg("free_bootmem %lx %lx\n", mem_start, mem_size); | ||
657 | free_bootmem_node(NODE_DATA(nid), mem_start, mem_size); | ||
658 | } | ||
659 | 642 | ||
660 | if (--ranges) /* process all ranges in cell */ | 643 | dbg("free_bootmem %lx %lx\n", start, end - start); |
661 | goto new_range; | 644 | free_bootmem_node(NODE_DATA(nid), start, end - start); |
662 | } | 645 | } |
663 | 646 | ||
664 | /* | 647 | /* Mark reserved regions on this node */ |
665 | * Mark reserved regions on this node | ||
666 | */ | ||
667 | for (i = 0; i < lmb.reserved.cnt; i++) { | 648 | for (i = 0; i < lmb.reserved.cnt; i++) { |
668 | unsigned long physbase = lmb.reserved.region[i].base; | 649 | unsigned long physbase = lmb.reserved.region[i].base; |
669 | unsigned long size = lmb.reserved.region[i].size; | 650 | unsigned long size = lmb.reserved.region[i].size; |
651 | unsigned long start_paddr = start_pfn << PAGE_SHIFT; | ||
652 | unsigned long end_paddr = end_pfn << PAGE_SHIFT; | ||
670 | 653 | ||
671 | if (pa_to_nid(physbase) != nid && | 654 | if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid && |
672 | pa_to_nid(physbase+size-1) != nid) | 655 | early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid) |
673 | continue; | 656 | continue; |
674 | 657 | ||
675 | if (physbase < end_paddr && | 658 | if (physbase < end_paddr && |
@@ -689,46 +672,19 @@ new_range: | |||
689 | size); | 672 | size); |
690 | } | 673 | } |
691 | } | 674 | } |
692 | /* | ||
693 | * This loop may look famaliar, but we have to do it again | ||
694 | * after marking our reserved memory to mark memory present | ||
695 | * for sparsemem. | ||
696 | */ | ||
697 | addr_cells = get_mem_addr_cells(); | ||
698 | size_cells = get_mem_size_cells(); | ||
699 | memory = NULL; | ||
700 | while ((memory = of_find_node_by_type(memory, "memory")) != NULL) { | ||
701 | unsigned long mem_start, mem_size; | ||
702 | int numa_domain, ranges; | ||
703 | unsigned int *memcell_buf; | ||
704 | unsigned int len; | ||
705 | |||
706 | memcell_buf = (unsigned int *)get_property(memory, "reg", &len); | ||
707 | if (!memcell_buf || len <= 0) | ||
708 | continue; | ||
709 | 675 | ||
710 | ranges = memory->n_addrs; /* ranges in cell */ | 676 | /* Add regions into sparsemem */ |
711 | new_range2: | 677 | for (i = 0; init_node_data[i].end_pfn; i++) { |
712 | mem_start = read_n_cells(addr_cells, &memcell_buf); | 678 | unsigned long start, end; |
713 | mem_size = read_n_cells(size_cells, &memcell_buf); | 679 | |
714 | if (numa_enabled) { | 680 | if (init_node_data[i].nid != nid) |
715 | numa_domain = of_node_numa_domain(memory); | ||
716 | if (numa_domain >= MAX_NUMNODES) | ||
717 | numa_domain = 0; | ||
718 | } else | ||
719 | numa_domain = 0; | ||
720 | |||
721 | if (numa_domain != nid) | ||
722 | continue; | 681 | continue; |
723 | 682 | ||
724 | mem_size = numa_enforce_memory_limit(mem_start, mem_size); | 683 | start = init_node_data[i].start_pfn; |
725 | memory_present(numa_domain, mem_start >> PAGE_SHIFT, | 684 | end = init_node_data[i].end_pfn; |
726 | (mem_start + mem_size) >> PAGE_SHIFT); | ||
727 | 685 | ||
728 | if (--ranges) /* process all ranges in cell */ | 686 | memory_present(nid, start, end); |
729 | goto new_range2; | ||
730 | } | 687 | } |
731 | |||
732 | } | 688 | } |
733 | } | 689 | } |
734 | 690 | ||
@@ -742,21 +698,18 @@ void __init paging_init(void) | |||
742 | memset(zholes_size, 0, sizeof(zholes_size)); | 698 | memset(zholes_size, 0, sizeof(zholes_size)); |
743 | 699 | ||
744 | for_each_online_node(nid) { | 700 | for_each_online_node(nid) { |
745 | unsigned long start_pfn; | 701 | unsigned long start_pfn, end_pfn, pages_present; |
746 | unsigned long end_pfn; | ||
747 | 702 | ||
748 | start_pfn = init_node_data[nid].node_start_pfn; | 703 | get_region(nid, &start_pfn, &end_pfn, &pages_present); |
749 | end_pfn = init_node_data[nid].node_end_pfn; | ||
750 | 704 | ||
751 | zones_size[ZONE_DMA] = end_pfn - start_pfn; | 705 | zones_size[ZONE_DMA] = end_pfn - start_pfn; |
752 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - | 706 | zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present; |
753 | init_node_data[nid].node_present_pages; | ||
754 | 707 | ||
755 | dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, | 708 | dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, |
756 | zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); | 709 | zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); |
757 | 710 | ||
758 | free_area_init_node(nid, NODE_DATA(nid), zones_size, | 711 | free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn, |
759 | start_pfn, zholes_size); | 712 | zholes_size); |
760 | } | 713 | } |
761 | } | 714 | } |
762 | 715 | ||