aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2005-09-03 18:54:26 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:38 -0400
commit802f192e4a600f7ef84ca25c8b818c8830acef5a (patch)
tree51e9a6ed164e6a2d8741af510c3954ad79bf19af /arch/ppc64
parent0216f86dafb389c0ad97529fd45e64e883298cfd (diff)
[PATCH] SPARSEMEM EXTREME
A new option for SPARSEMEM is ARCH_SPARSEMEM_EXTREME. Architecture platforms with a very sparse physical address space would likely want to select this option. For those architecture platforms that don't select the option, the code generated is equivalent to SPARSEMEM currently in -mm. I'll be posting a patch on ia64 ml which uses this new SPARSEMEM feature. ARCH_SPARSEMEM_EXTREME makes mem_section a one dimensional array of pointers to mem_sections. This two level layout scheme is able to achieve smaller memory requirements for SPARSEMEM with the tradeoff of an additional shift and load when fetching the memory section. The current SPARSEMEM -mm implementation is a one dimensional array of mem_sections which is the default SPARSEMEM configuration. The patch attempts isolates the implementation details of the physical layout of the sparsemem section array. ARCH_SPARSEMEM_EXTREME depends on 64BIT and is by default boolean false. I've boot tested under aim load ia64 configured for ARCH_SPARSEMEM_EXTREME. I've also boot tested a 4 way Opteron machine with !ARCH_SPARSEMEM_EXTREME and tested with aim. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64')
-rw-r--r--arch/ppc64/mm/init.c27
-rw-r--r--arch/ppc64/mm/numa.c43
2 files changed, 49 insertions, 21 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index c02dc9809ca5..b3b1e9c1770a 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -552,27 +552,18 @@ void __init do_init_bootmem(void)
552 /* Add all physical memory to the bootmem map, mark each area 552 /* Add all physical memory to the bootmem map, mark each area
553 * present. 553 * present.
554 */ 554 */
555 for (i=0; i < lmb.memory.cnt; i++) { 555 for (i=0; i < lmb.memory.cnt; i++)
556 unsigned long base, size; 556 free_bootmem(lmb_start_pfn(&lmb.memory, i),
557 unsigned long start_pfn, end_pfn; 557 lmb_size_bytes(&lmb.memory, i));
558
559 base = lmb.memory.region[i].base;
560 size = lmb.memory.region[i].size;
561
562 start_pfn = base >> PAGE_SHIFT;
563 end_pfn = start_pfn + (size >> PAGE_SHIFT);
564 memory_present(0, start_pfn, end_pfn);
565
566 free_bootmem(base, size);
567 }
568 558
569 /* reserve the sections we're already using */ 559 /* reserve the sections we're already using */
570 for (i=0; i < lmb.reserved.cnt; i++) { 560 for (i=0; i < lmb.reserved.cnt; i++)
571 unsigned long base = lmb.reserved.region[i].base; 561 reserve_bootmem(lmb_start_pfn(&lmb.reserved, i),
572 unsigned long size = lmb.reserved.region[i].size; 562 lmb_size_bytes(&lmb.reserved, i));
573 563
574 reserve_bootmem(base, size); 564 for (i=0; i < lmb.memory.cnt; i++)
575 } 565 memory_present(0, lmb_start_pfn(&lmb.memory, i),
566 lmb_end_pfn(&lmb.memory, i));
576} 567}
577 568
578/* 569/*
diff --git a/arch/ppc64/mm/numa.c b/arch/ppc64/mm/numa.c
index c3116f0d788c..cb864b8f2750 100644
--- a/arch/ppc64/mm/numa.c
+++ b/arch/ppc64/mm/numa.c
@@ -440,8 +440,6 @@ new_range:
440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT) 440 for (i = start ; i < (start+size); i += MEMORY_INCREMENT)
441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 441 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] =
442 numa_domain; 442 numa_domain;
443 memory_present(numa_domain, start >> PAGE_SHIFT,
444 (start + size) >> PAGE_SHIFT);
445 443
446 if (--ranges) 444 if (--ranges)
447 goto new_range; 445 goto new_range;
@@ -483,7 +481,6 @@ static void __init setup_nonnuma(void)
483 481
484 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT) 482 for (i = 0 ; i < top_of_ram; i += MEMORY_INCREMENT)
485 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0; 483 numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = 0;
486 memory_present(0, 0, init_node_data[0].node_end_pfn);
487} 484}
488 485
489static void __init dump_numa_topology(void) 486static void __init dump_numa_topology(void)
@@ -695,6 +692,46 @@ new_range:
695 size); 692 size);
696 } 693 }
697 } 694 }
695 /*
696 * This loop may look famaliar, but we have to do it again
697 * after marking our reserved memory to mark memory present
698 * for sparsemem.
699 */
700 addr_cells = get_mem_addr_cells();
701 size_cells = get_mem_size_cells();
702 memory = NULL;
703 while ((memory = of_find_node_by_type(memory, "memory")) != NULL) {
704 unsigned long mem_start, mem_size;
705 int numa_domain, ranges;
706 unsigned int *memcell_buf;
707 unsigned int len;
708
709 memcell_buf = (unsigned int *)get_property(memory, "reg", &len);
710 if (!memcell_buf || len <= 0)
711 continue;
712
713 ranges = memory->n_addrs; /* ranges in cell */
714new_range2:
715 mem_start = read_n_cells(addr_cells, &memcell_buf);
716 mem_size = read_n_cells(size_cells, &memcell_buf);
717 if (numa_enabled) {
718 numa_domain = of_node_numa_domain(memory);
719 if (numa_domain >= MAX_NUMNODES)
720 numa_domain = 0;
721 } else
722 numa_domain = 0;
723
724 if (numa_domain != nid)
725 continue;
726
727 mem_size = numa_enforce_memory_limit(mem_start, mem_size);
728 memory_present(numa_domain, mem_start >> PAGE_SHIFT,
729 (mem_start + mem_size) >> PAGE_SHIFT);
730
731 if (--ranges) /* process all ranges in cell */
732 goto new_range2;
733 }
734
698 } 735 }
699} 736}
700 737