aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/numa.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/mm/numa.c')
-rw-r--r--arch/powerpc/mm/numa.c369
1 files changed, 159 insertions, 210 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
index da09ba03c424..bd2cf1336885 100644
--- a/arch/powerpc/mm/numa.c
+++ b/arch/powerpc/mm/numa.c
@@ -17,9 +17,8 @@
17#include <linux/nodemask.h> 17#include <linux/nodemask.h>
18#include <linux/cpu.h> 18#include <linux/cpu.h>
19#include <linux/notifier.h> 19#include <linux/notifier.h>
20#include <asm/sparsemem.h>
20#include <asm/lmb.h> 21#include <asm/lmb.h>
21#include <asm/machdep.h>
22#include <asm/abs_addr.h>
23#include <asm/system.h> 22#include <asm/system.h>
24#include <asm/smp.h> 23#include <asm/smp.h>
25 24
@@ -28,45 +27,113 @@ static int numa_enabled = 1;
28static int numa_debug; 27static int numa_debug;
29#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); } 28#define dbg(args...) if (numa_debug) { printk(KERN_INFO args); }
30 29
31#ifdef DEBUG_NUMA 30int numa_cpu_lookup_table[NR_CPUS];
32#define ARRAY_INITIALISER -1
33#else
34#define ARRAY_INITIALISER 0
35#endif
36
37int numa_cpu_lookup_table[NR_CPUS] = { [ 0 ... (NR_CPUS - 1)] =
38 ARRAY_INITIALISER};
39char *numa_memory_lookup_table;
40cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES]; 31cpumask_t numa_cpumask_lookup_table[MAX_NUMNODES];
41int nr_cpus_in_node[MAX_NUMNODES] = { [0 ... (MAX_NUMNODES -1)] = 0};
42
43struct pglist_data *node_data[MAX_NUMNODES]; 32struct pglist_data *node_data[MAX_NUMNODES];
44bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES]; 33
34EXPORT_SYMBOL(numa_cpu_lookup_table);
35EXPORT_SYMBOL(numa_cpumask_lookup_table);
36EXPORT_SYMBOL(node_data);
37
38static bootmem_data_t __initdata plat_node_bdata[MAX_NUMNODES];
45static int min_common_depth; 39static int min_common_depth;
46 40
47/* 41/*
48 * We need somewhere to store start/span for each node until we have 42 * We need somewhere to store start/end/node for each region until we have
49 * allocated the real node_data structures. 43 * allocated the real node_data structures.
50 */ 44 */
45#define MAX_REGIONS (MAX_LMB_REGIONS*2)
51static struct { 46static struct {
52 unsigned long node_start_pfn; 47 unsigned long start_pfn;
53 unsigned long node_end_pfn; 48 unsigned long end_pfn;
54 unsigned long node_present_pages; 49 int nid;
55} init_node_data[MAX_NUMNODES] __initdata; 50} init_node_data[MAX_REGIONS] __initdata;
56 51
57EXPORT_SYMBOL(node_data); 52int __init early_pfn_to_nid(unsigned long pfn)
58EXPORT_SYMBOL(numa_cpu_lookup_table); 53{
59EXPORT_SYMBOL(numa_memory_lookup_table); 54 unsigned int i;
60EXPORT_SYMBOL(numa_cpumask_lookup_table); 55
61EXPORT_SYMBOL(nr_cpus_in_node); 56 for (i = 0; init_node_data[i].end_pfn; i++) {
57 unsigned long start_pfn = init_node_data[i].start_pfn;
58 unsigned long end_pfn = init_node_data[i].end_pfn;
59
60 if ((start_pfn <= pfn) && (pfn < end_pfn))
61 return init_node_data[i].nid;
62 }
63
64 return -1;
65}
66
67void __init add_region(unsigned int nid, unsigned long start_pfn,
68 unsigned long pages)
69{
70 unsigned int i;
71
72 dbg("add_region nid %d start_pfn 0x%lx pages 0x%lx\n",
73 nid, start_pfn, pages);
74
75 for (i = 0; init_node_data[i].end_pfn; i++) {
76 if (init_node_data[i].nid != nid)
77 continue;
78 if (init_node_data[i].end_pfn == start_pfn) {
79 init_node_data[i].end_pfn += pages;
80 return;
81 }
82 if (init_node_data[i].start_pfn == (start_pfn + pages)) {
83 init_node_data[i].start_pfn -= pages;
84 return;
85 }
86 }
87
88 /*
89 * Leave last entry NULL so we dont iterate off the end (we use
90 * entry.end_pfn to terminate the walk).
91 */
92 if (i >= (MAX_REGIONS - 1)) {
93 printk(KERN_ERR "WARNING: too many memory regions in "
94 "numa code, truncating\n");
95 return;
96 }
97
98 init_node_data[i].start_pfn = start_pfn;
99 init_node_data[i].end_pfn = start_pfn + pages;
100 init_node_data[i].nid = nid;
101}
102
103/* We assume init_node_data has no overlapping regions */
104void __init get_region(unsigned int nid, unsigned long *start_pfn,
105 unsigned long *end_pfn, unsigned long *pages_present)
106{
107 unsigned int i;
108
109 *start_pfn = -1UL;
110 *end_pfn = *pages_present = 0;
111
112 for (i = 0; init_node_data[i].end_pfn; i++) {
113 if (init_node_data[i].nid != nid)
114 continue;
115
116 *pages_present += init_node_data[i].end_pfn -
117 init_node_data[i].start_pfn;
118
119 if (init_node_data[i].start_pfn < *start_pfn)
120 *start_pfn = init_node_data[i].start_pfn;
121
122 if (init_node_data[i].end_pfn > *end_pfn)
123 *end_pfn = init_node_data[i].end_pfn;
124 }
125
126 /* We didnt find a matching region, return start/end as 0 */
127 if (*start_pfn == -1UL)
128 start_pfn = 0;
129}
62 130
63static inline void map_cpu_to_node(int cpu, int node) 131static inline void map_cpu_to_node(int cpu, int node)
64{ 132{
65 numa_cpu_lookup_table[cpu] = node; 133 numa_cpu_lookup_table[cpu] = node;
66 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node]))) { 134
135 if (!(cpu_isset(cpu, numa_cpumask_lookup_table[node])))
67 cpu_set(cpu, numa_cpumask_lookup_table[node]); 136 cpu_set(cpu, numa_cpumask_lookup_table[node]);
68 nr_cpus_in_node[node]++;
69 }
70} 137}
71 138
72#ifdef CONFIG_HOTPLUG_CPU 139#ifdef CONFIG_HOTPLUG_CPU
@@ -78,7 +145,6 @@ static void unmap_cpu_from_node(unsigned long cpu)
78 145
79 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) { 146 if (cpu_isset(cpu, numa_cpumask_lookup_table[node])) {
80 cpu_clear(cpu, numa_cpumask_lookup_table[node]); 147 cpu_clear(cpu, numa_cpumask_lookup_table[node]);
81 nr_cpus_in_node[node]--;
82 } else { 148 } else {
83 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n", 149 printk(KERN_ERR "WARNING: cpu %lu not found in node %d\n",
84 cpu, node); 150 cpu, node);
@@ -86,7 +152,7 @@ static void unmap_cpu_from_node(unsigned long cpu)
86} 152}
87#endif /* CONFIG_HOTPLUG_CPU */ 153#endif /* CONFIG_HOTPLUG_CPU */
88 154
89static struct device_node * __devinit find_cpu_node(unsigned int cpu) 155static struct device_node *find_cpu_node(unsigned int cpu)
90{ 156{
91 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu); 157 unsigned int hw_cpuid = get_hard_smp_processor_id(cpu);
92 struct device_node *cpu_node = NULL; 158 struct device_node *cpu_node = NULL;
@@ -213,7 +279,7 @@ static int __init get_mem_size_cells(void)
213 return rc; 279 return rc;
214} 280}
215 281
216static unsigned long read_n_cells(int n, unsigned int **buf) 282static unsigned long __init read_n_cells(int n, unsigned int **buf)
217{ 283{
218 unsigned long result = 0; 284 unsigned long result = 0;
219 285
@@ -295,7 +361,8 @@ static int cpu_numa_callback(struct notifier_block *nfb,
295 * or zero. If the returned value of size is 0 the region should be 361 * or zero. If the returned value of size is 0 the region should be
296 * discarded as it lies wholy above the memory limit. 362 * discarded as it lies wholy above the memory limit.
297 */ 363 */
298static unsigned long __init numa_enforce_memory_limit(unsigned long start, unsigned long size) 364static unsigned long __init numa_enforce_memory_limit(unsigned long start,
365 unsigned long size)
299{ 366{
300 /* 367 /*
301 * We use lmb_end_of_DRAM() in here instead of memory_limit because 368 * We use lmb_end_of_DRAM() in here instead of memory_limit because
@@ -320,8 +387,7 @@ static int __init parse_numa_properties(void)
320 struct device_node *cpu = NULL; 387 struct device_node *cpu = NULL;
321 struct device_node *memory = NULL; 388 struct device_node *memory = NULL;
322 int addr_cells, size_cells; 389 int addr_cells, size_cells;
323 int max_domain = 0; 390 int max_domain;
324 long entries = lmb_end_of_DRAM() >> MEMORY_INCREMENT_SHIFT;
325 unsigned long i; 391 unsigned long i;
326 392
327 if (numa_enabled == 0) { 393 if (numa_enabled == 0) {
@@ -329,13 +395,6 @@ static int __init parse_numa_properties(void)
329 return -1; 395 return -1;
330 } 396 }
331 397
332 numa_memory_lookup_table =
333 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
334 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
335
336 for (i = 0; i < entries ; i++)
337 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
338
339 min_common_depth = find_min_common_depth(); 398 min_common_depth = find_min_common_depth();
340 399
341 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth); 400 dbg("NUMA associativity depth for CPU/Memory: %d\n", min_common_depth);
@@ -387,9 +446,6 @@ new_range:
387 start = read_n_cells(addr_cells, &memcell_buf); 446 start = read_n_cells(addr_cells, &memcell_buf);
388 size = read_n_cells(size_cells, &memcell_buf); 447 size = read_n_cells(size_cells, &memcell_buf);
389 448
390 start = _ALIGN_DOWN(start, MEMORY_INCREMENT);
391 size = _ALIGN_UP(size, MEMORY_INCREMENT);
392
393 numa_domain = of_node_numa_domain(memory); 449 numa_domain = of_node_numa_domain(memory);
394 450
395 if (numa_domain >= MAX_NUMNODES) { 451 if (numa_domain >= MAX_NUMNODES) {
@@ -403,44 +459,15 @@ new_range:
403 if (max_domain < numa_domain) 459 if (max_domain < numa_domain)
404 max_domain = numa_domain; 460 max_domain = numa_domain;
405 461
406 if (! (size = numa_enforce_memory_limit(start, size))) { 462 if (!(size = numa_enforce_memory_limit(start, size))) {
407 if (--ranges) 463 if (--ranges)
408 goto new_range; 464 goto new_range;
409 else 465 else
410 continue; 466 continue;
411 } 467 }
412 468
413 /* 469 add_region(numa_domain, start >> PAGE_SHIFT,
414 * Initialize new node struct, or add to an existing one. 470 size >> PAGE_SHIFT);
415 */
416 if (init_node_data[numa_domain].node_end_pfn) {
417 if ((start / PAGE_SIZE) <
418 init_node_data[numa_domain].node_start_pfn)
419 init_node_data[numa_domain].node_start_pfn =
420 start / PAGE_SIZE;
421 if (((start / PAGE_SIZE) + (size / PAGE_SIZE)) >
422 init_node_data[numa_domain].node_end_pfn)
423 init_node_data[numa_domain].node_end_pfn =
424 (start / PAGE_SIZE) +
425 (size / PAGE_SIZE);
426
427 init_node_data[numa_domain].node_present_pages +=
428 size / PAGE_SIZE;
429 } else {
430 node_set_online(numa_domain);
431
432 init_node_data[numa_domain].node_start_pfn =
433 start / PAGE_SIZE;
434 init_node_data[numa_domain].node_end_pfn =
435 init_node_data[numa_domain].node_start_pfn +
436 size / PAGE_SIZE;
437 init_node_data[numa_domain].node_present_pages =
438 size / PAGE_SIZE;
439 }
440
441 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
442 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
443 numa_domain;
444 471
445 if (--ranges) 472 if (--ranges)
446 goto new_range; 473 goto new_range;
@@ -456,32 +483,15 @@ static void __init setup_nonnuma(void)
456{ 483{
457 unsigned long top_of_ram = lmb_end_of_DRAM(); 484 unsigned long top_of_ram = lmb_end_of_DRAM();
458 unsigned long total_ram = lmb_phys_mem_size(); 485 unsigned long total_ram = lmb_phys_mem_size();
459 unsigned long i;
460 486
461 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n", 487 printk(KERN_INFO "Top of RAM: 0x%lx, Total RAM: 0x%lx\n",
462 top_of_ram, total_ram); 488 top_of_ram, total_ram);
463 printk(KERN_INFO "Memory hole size: %ldMB\n", 489 printk(KERN_INFO "Memory hole size: %ldMB\n",
464 (top_of_ram - total_ram) >> 20); 490 (top_of_ram - total_ram) >> 20);
465 491
466 if (!numa_memory_lookup_table) {
467 long entries = top_of_ram >> MEMORY_INCREMENT_SHIFT;
468 numa_memory_lookup_table =
469 (char *)abs_to_virt(lmb_alloc(entries * sizeof(char), 1));
470 memset(numa_memory_lookup_table, 0, entries * sizeof(char));
471 for (i = 0; i < entries ; i++)
472 numa_memory_lookup_table[i] = ARRAY_INITIALISER;
473 }
474
475 map_cpu_to_node(boot_cpuid, 0); 492 map_cpu_to_node(boot_cpuid, 0);
476 493 add_region(0, 0, lmb_end_of_DRAM() >> PAGE_SHIFT);
477 node_set_online(0); 494 node_set_online(0);
478
479 init_node_data[0].node_start_pfn = 0;
480 init_node_data[0].node_end_pfn = lmb_end_of_DRAM() / PAGE_SIZE;
481 init_node_data[0].node_present_pages = total_ram / PAGE_SIZE;
482
483 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
484 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
485} 495}
486 496
487static void __init dump_numa_topology(void) 497static void __init dump_numa_topology(void)
@@ -499,8 +509,9 @@ static void __init dump_numa_topology(void)
499 509
500 count = 0; 510 count = 0;
501 511
502 for (i = 0; i < lmb_end_of_DRAM(); i += MEMORY_INCREMENT) { 512 for (i = 0; i < lmb_end_of_DRAM();
503 if (numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] == node) { 513 i += (1 << SECTION_SIZE_BITS)) {
514 if (early_pfn_to_nid(i >> PAGE_SHIFT) == node) {
504 if (count == 0) 515 if (count == 0)
505 printk(" 0x%lx", i); 516 printk(" 0x%lx", i);
506 ++count; 517 ++count;
@@ -525,10 +536,12 @@ static void __init dump_numa_topology(void)
525 * 536 *
526 * Returns the physical address of the memory. 537 * Returns the physical address of the memory.
527 */ 538 */
528static unsigned long careful_allocation(int nid, unsigned long size, 539static void __init *careful_allocation(int nid, unsigned long size,
529 unsigned long align, unsigned long end) 540 unsigned long align,
541 unsigned long end_pfn)
530{ 542{
531 unsigned long ret = lmb_alloc_base(size, align, end); 543 int new_nid;
544 unsigned long ret = lmb_alloc_base(size, align, end_pfn << PAGE_SHIFT);
532 545
533 /* retry over all memory */ 546 /* retry over all memory */
534 if (!ret) 547 if (!ret)
@@ -542,28 +555,27 @@ static unsigned long careful_allocation(int nid, unsigned long size,
542 * If the memory came from a previously allocated node, we must 555 * If the memory came from a previously allocated node, we must
543 * retry with the bootmem allocator. 556 * retry with the bootmem allocator.
544 */ 557 */
545 if (pa_to_nid(ret) < nid) { 558 new_nid = early_pfn_to_nid(ret >> PAGE_SHIFT);
546 nid = pa_to_nid(ret); 559 if (new_nid < nid) {
547 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(nid), 560 ret = (unsigned long)__alloc_bootmem_node(NODE_DATA(new_nid),
548 size, align, 0); 561 size, align, 0);
549 562
550 if (!ret) 563 if (!ret)
551 panic("numa.c: cannot allocate %lu bytes on node %d", 564 panic("numa.c: cannot allocate %lu bytes on node %d",
552 size, nid); 565 size, new_nid);
553 566
554 ret = virt_to_abs(ret); 567 ret = __pa(ret);
555 568
556 dbg("alloc_bootmem %lx %lx\n", ret, size); 569 dbg("alloc_bootmem %lx %lx\n", ret, size);
557 } 570 }
558 571
559 return ret; 572 return (void *)ret;
560} 573}
561 574
562void __init do_init_bootmem(void) 575void __init do_init_bootmem(void)
563{ 576{
564 int nid; 577 int nid;
565 int addr_cells, size_cells; 578 unsigned int i;
566 struct device_node *memory = NULL;
567 static struct notifier_block ppc64_numa_nb = { 579 static struct notifier_block ppc64_numa_nb = {
568 .notifier_call = cpu_numa_callback, 580 .notifier_call = cpu_numa_callback,
569 .priority = 1 /* Must run before sched domains notifier. */ 581 .priority = 1 /* Must run before sched domains notifier. */
@@ -581,99 +593,66 @@ void __init do_init_bootmem(void)
581 register_cpu_notifier(&ppc64_numa_nb); 593 register_cpu_notifier(&ppc64_numa_nb);
582 594
583 for_each_online_node(nid) { 595 for_each_online_node(nid) {
584 unsigned long start_paddr, end_paddr; 596 unsigned long start_pfn, end_pfn, pages_present;
585 int i;
586 unsigned long bootmem_paddr; 597 unsigned long bootmem_paddr;
587 unsigned long bootmap_pages; 598 unsigned long bootmap_pages;
588 599
589 start_paddr = init_node_data[nid].node_start_pfn * PAGE_SIZE; 600 get_region(nid, &start_pfn, &end_pfn, &pages_present);
590 end_paddr = init_node_data[nid].node_end_pfn * PAGE_SIZE;
591 601
592 /* Allocate the node structure node local if possible */ 602 /* Allocate the node structure node local if possible */
593 NODE_DATA(nid) = (struct pglist_data *)careful_allocation(nid, 603 NODE_DATA(nid) = careful_allocation(nid,
594 sizeof(struct pglist_data), 604 sizeof(struct pglist_data),
595 SMP_CACHE_BYTES, end_paddr); 605 SMP_CACHE_BYTES, end_pfn);
596 NODE_DATA(nid) = abs_to_virt(NODE_DATA(nid)); 606 NODE_DATA(nid) = __va(NODE_DATA(nid));
597 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); 607 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
598 608
599 dbg("node %d\n", nid); 609 dbg("node %d\n", nid);
600 dbg("NODE_DATA() = %p\n", NODE_DATA(nid)); 610 dbg("NODE_DATA() = %p\n", NODE_DATA(nid));
601 611
602 NODE_DATA(nid)->bdata = &plat_node_bdata[nid]; 612 NODE_DATA(nid)->bdata = &plat_node_bdata[nid];
603 NODE_DATA(nid)->node_start_pfn = 613 NODE_DATA(nid)->node_start_pfn = start_pfn;
604 init_node_data[nid].node_start_pfn; 614 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
605 NODE_DATA(nid)->node_spanned_pages =
606 end_paddr - start_paddr;
607 615
608 if (NODE_DATA(nid)->node_spanned_pages == 0) 616 if (NODE_DATA(nid)->node_spanned_pages == 0)
609 continue; 617 continue;
610 618
611 dbg("start_paddr = %lx\n", start_paddr); 619 dbg("start_paddr = %lx\n", start_pfn << PAGE_SHIFT);
612 dbg("end_paddr = %lx\n", end_paddr); 620 dbg("end_paddr = %lx\n", end_pfn << PAGE_SHIFT);
613 621
614 bootmap_pages = bootmem_bootmap_pages((end_paddr - start_paddr) >> PAGE_SHIFT); 622 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
623 bootmem_paddr = (unsigned long)careful_allocation(nid,
624 bootmap_pages << PAGE_SHIFT,
625 PAGE_SIZE, end_pfn);
626 memset(__va(bootmem_paddr), 0, bootmap_pages << PAGE_SHIFT);
615 627
616 bootmem_paddr = careful_allocation(nid,
617 bootmap_pages << PAGE_SHIFT,
618 PAGE_SIZE, end_paddr);
619 memset(abs_to_virt(bootmem_paddr), 0,
620 bootmap_pages << PAGE_SHIFT);
621 dbg("bootmap_paddr = %lx\n", bootmem_paddr); 628 dbg("bootmap_paddr = %lx\n", bootmem_paddr);
622 629
623 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT, 630 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
624 start_paddr >> PAGE_SHIFT, 631 start_pfn, end_pfn);
625 end_paddr >> PAGE_SHIFT);
626 632
627 /* 633 /* Add free regions on this node */
628 * We need to do another scan of all memory sections to 634 for (i = 0; init_node_data[i].end_pfn; i++) {
629 * associate memory with the correct node. 635 unsigned long start, end;
630 */
631 addr_cells = get_mem_addr_cells();
632 size_cells = get_mem_size_cells();
633 memory = NULL;
634 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
635 unsigned long mem_start, mem_size;
636 int numa_domain, ranges;
637 unsigned int *memcell_buf;
638 unsigned int len;
639
640 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
641 if (!memcell_buf || len <= 0)
642 continue;
643 636
644 ranges = memory->n_addrs; /* ranges in cell */ 637 if (init_node_data[i].nid != nid)
645new_range:
646 mem_start = read_n_cells(addr_cells, &memcell_buf);
647 mem_size = read_n_cells(size_cells, &memcell_buf);
648 if (numa_enabled) {
649 numa_domain = of_node_numa_domain(memory);
650 if (numa_domain >= MAX_NUMNODES)
651 numa_domain = 0;
652 } else
653 numa_domain = 0;
654
655 if (numa_domain != nid)
656 continue; 638 continue;
657 639
658 mem_size = numa_enforce_memory_limit(mem_start, mem_size); 640 start = init_node_data[i].start_pfn << PAGE_SHIFT;
659 if (mem_size) { 641 end = init_node_data[i].end_pfn << PAGE_SHIFT;
660 dbg("free_bootmem %lx %lx\n", mem_start, mem_size);
661 free_bootmem_node(NODE_DATA(nid), mem_start, mem_size);
662 }
663 642
664 if (--ranges) /* process all ranges in cell */ 643 dbg("free_bootmem %lx %lx\n", start, end - start);
665 goto new_range; 644 free_bootmem_node(NODE_DATA(nid), start, end - start);
666 } 645 }
667 646
668 /* 647 /* Mark reserved regions on this node */
669 * Mark reserved regions on this node
670 */
671 for (i = 0; i < lmb.reserved.cnt; i++) { 648 for (i = 0; i < lmb.reserved.cnt; i++) {
672 unsigned long physbase = lmb.reserved.region[i].base; 649 unsigned long physbase = lmb.reserved.region[i].base;
673 unsigned long size = lmb.reserved.region[i].size; 650 unsigned long size = lmb.reserved.region[i].size;
651 unsigned long start_paddr = start_pfn << PAGE_SHIFT;
652 unsigned long end_paddr = end_pfn << PAGE_SHIFT;
674 653
675 if (pa_to_nid(physbase) != nid && 654 if (early_pfn_to_nid(physbase >> PAGE_SHIFT) != nid &&
676 pa_to_nid(physbase+size-1) != nid) 655 early_pfn_to_nid((physbase+size-1) >> PAGE_SHIFT) != nid)
677 continue; 656 continue;
678 657
679 if (physbase < end_paddr && 658 if (physbase < end_paddr &&
@@ -693,46 +672,19 @@ new_range:
693 size); 672 size);
694 } 673 }
695 } 674 }
696 /*
697 * This loop may look famaliar, but we have to do it again
698 * after marking our reserved memory to mark memory present
699 * for sparsemem.
700 */
701 addr_cells = get_mem_addr_cells();
702 size_cells = get_mem_size_cells();
703 memory = NULL;
704 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
705 unsigned long mem_start, mem_size;
706 int numa_domain, ranges;
707 unsigned int *memcell_buf;
708 unsigned int len;
709
710 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
711 if (!memcell_buf || len <= 0)
712 continue;
713 675
714 ranges = memory->n_addrs; /* ranges in cell */ 676 /* Add regions into sparsemem */
715new_range2: 677 for (i = 0; init_node_data[i].end_pfn; i++) {
716 mem_start = read_n_cells(addr_cells, &memcell_buf); 678 unsigned long start, end;
717 mem_size = read_n_cells(size_cells, &memcell_buf); 679
718 if (numa_enabled) { 680 if (init_node_data[i].nid != nid)
719 numa_domain = of_node_numa_domain(memory);
720 if (numa_domain >= MAX_NUMNODES)
721 numa_domain = 0;
722 } else
723 numa_domain = 0;
724
725 if (numa_domain != nid)
726 continue; 681 continue;
727 682
728 mem_size = numa_enforce_memory_limit(mem_start, mem_size); 683 start = init_node_data[i].start_pfn;
729 memory_present(numa_domain, mem_start >> PAGE_SHIFT, 684 end = init_node_data[i].end_pfn;
730 (mem_start + mem_size) >> PAGE_SHIFT);
731 685
732 if (--ranges) /* process all ranges in cell */ 686 memory_present(nid, start, end);
733 goto new_range2;
734 } 687 }
735
736 } 688 }
737} 689}
738 690
@@ -746,21 +698,18 @@ void __init paging_init(void)
746 memset(zholes_size, 0, sizeof(zholes_size)); 698 memset(zholes_size, 0, sizeof(zholes_size));
747 699
748 for_each_online_node(nid) { 700 for_each_online_node(nid) {
749 unsigned long start_pfn; 701 unsigned long start_pfn, end_pfn, pages_present;
750 unsigned long end_pfn;
751 702
752 start_pfn = init_node_data[nid].node_start_pfn; 703 get_region(nid, &start_pfn, &end_pfn, &pages_present);
753 end_pfn = init_node_data[nid].node_end_pfn;
754 704
755 zones_size[ZONE_DMA] = end_pfn - start_pfn; 705 zones_size[ZONE_DMA] = end_pfn - start_pfn;
756 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - 706 zholes_size[ZONE_DMA] = zones_size[ZONE_DMA] - pages_present;
757 init_node_data[nid].node_present_pages;
758 707
759 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid, 708 dbg("free_area_init node %d %lx %lx (hole: %lx)\n", nid,
760 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]); 709 zones_size[ZONE_DMA], start_pfn, zholes_size[ZONE_DMA]);
761 710
762 free_area_init_node(nid, NODE_DATA(nid), zones_size, 711 free_area_init_node(nid, NODE_DATA(nid), zones_size, start_pfn,
763 start_pfn, zholes_size); 712 zholes_size);
764 } 713 }
765} 714}
766 715