aboutsummaryrefslogtreecommitdiffstats
path: root/arch/ppc64/mm/init.c
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2005-09-03 18:54:26 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:38 -0400
commit802f192e4a600f7ef84ca25c8b818c8830acef5a (patch)
tree51e9a6ed164e6a2d8741af510c3954ad79bf19af /arch/ppc64/mm/init.c
parent0216f86dafb389c0ad97529fd45e64e883298cfd (diff)
[PATCH] SPARSEMEM EXTREME
A new option for SPARSEMEM is ARCH_SPARSEMEM_EXTREME. Architecture platforms with a very sparse physical address space would likely want to select this option. For those architecture platforms that don't select the option, the code generated is equivalent to SPARSEMEM currently in -mm. I'll be posting a patch on ia64 ml which uses this new SPARSEMEM feature. ARCH_SPARSEMEM_EXTREME makes mem_section a one dimensional array of pointers to mem_sections. This two level layout scheme is able to achieve smaller memory requirements for SPARSEMEM with the tradeoff of an additional shift and load when fetching the memory section. The current SPARSEMEM -mm implementation is a one dimensional array of mem_sections which is the default SPARSEMEM configuration. The patch attempts isolates the implementation details of the physical layout of the sparsemem section array. ARCH_SPARSEMEM_EXTREME depends on 64BIT and is by default boolean false. I've boot tested under aim load ia64 configured for ARCH_SPARSEMEM_EXTREME. I've also boot tested a 4 way Opteron machine with !ARCH_SPARSEMEM_EXTREME and tested with aim. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/ppc64/mm/init.c')
-rw-r--r--arch/ppc64/mm/init.c27
1 files changed, 9 insertions, 18 deletions
diff --git a/arch/ppc64/mm/init.c b/arch/ppc64/mm/init.c
index c02dc9809ca5..b3b1e9c1770a 100644
--- a/arch/ppc64/mm/init.c
+++ b/arch/ppc64/mm/init.c
@@ -552,27 +552,18 @@ void __init do_init_bootmem(void)
552 /* Add all physical memory to the bootmem map, mark each area 552 /* Add all physical memory to the bootmem map, mark each area
553 * present. 553 * present.
554 */ 554 */
555 for (i=0; i < lmb.memory.cnt; i++) { 555 for (i=0; i < lmb.memory.cnt; i++)
556 unsigned long base, size; 556 free_bootmem(lmb_start_pfn(&lmb.memory, i),
557 unsigned long start_pfn, end_pfn; 557 lmb_size_bytes(&lmb.memory, i));
558
559 base = lmb.memory.region[i].base;
560 size = lmb.memory.region[i].size;
561
562 start_pfn = base >> PAGE_SHIFT;
563 end_pfn = start_pfn + (size >> PAGE_SHIFT);
564 memory_present(0, start_pfn, end_pfn);
565
566 free_bootmem(base, size);
567 }
568 558
569 /* reserve the sections we're already using */ 559 /* reserve the sections we're already using */
570 for (i=0; i < lmb.reserved.cnt; i++) { 560 for (i=0; i < lmb.reserved.cnt; i++)
571 unsigned long base = lmb.reserved.region[i].base; 561 reserve_bootmem(lmb_start_pfn(&lmb.reserved, i),
572 unsigned long size = lmb.reserved.region[i].size; 562 lmb_size_bytes(&lmb.reserved, i));
573 563
574 reserve_bootmem(base, size); 564 for (i=0; i < lmb.memory.cnt; i++)
575 } 565 memory_present(0, lmb_start_pfn(&lmb.memory, i),
566 lmb_end_pfn(&lmb.memory, i));
576} 567}
577 568
578/* 569/*