aboutsummaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorBob Picco <bob.picco@hp.com>2005-09-03 18:54:26 -0400
committerLinus Torvalds <torvalds@evo.osdl.org>2005-09-05 03:05:38 -0400
commit802f192e4a600f7ef84ca25c8b818c8830acef5a (patch)
tree51e9a6ed164e6a2d8741af510c3954ad79bf19af /mm
parent0216f86dafb389c0ad97529fd45e64e883298cfd (diff)
[PATCH] SPARSEMEM EXTREME
A new option for SPARSEMEM is ARCH_SPARSEMEM_EXTREME. Architecture platforms with a very sparse physical address space would likely want to select this option. For those architecture platforms that don't select the option, the code generated is equivalent to SPARSEMEM currently in -mm. I'll be posting a patch on ia64 ml which uses this new SPARSEMEM feature. ARCH_SPARSEMEM_EXTREME makes mem_section a one dimensional array of pointers to mem_sections. This two level layout scheme is able to achieve smaller memory requirements for SPARSEMEM with the tradeoff of an additional shift and load when fetching the memory section. The current SPARSEMEM -mm implementation is a one dimensional array of mem_sections which is the default SPARSEMEM configuration. The patch attempts isolates the implementation details of the physical layout of the sparsemem section array. ARCH_SPARSEMEM_EXTREME depends on 64BIT and is by default boolean false. I've boot tested under aim load ia64 configured for ARCH_SPARSEMEM_EXTREME. I've also boot tested a 4 way Opteron machine with !ARCH_SPARSEMEM_EXTREME and tested with aim. Signed-off-by: Andy Whitcroft <apw@shadowen.org> Signed-off-by: Bob Picco <bob.picco@hp.com> Signed-off-by: Dave Hansen <haveblue@us.ibm.com> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/Kconfig9
-rw-r--r--mm/sparse.c38
2 files changed, 41 insertions, 6 deletions
diff --git a/mm/Kconfig b/mm/Kconfig
index cd379936cac6..fc644c5c065d 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -89,3 +89,12 @@ config NEED_MULTIPLE_NODES
89config HAVE_MEMORY_PRESENT 89config HAVE_MEMORY_PRESENT
90 def_bool y 90 def_bool y
91 depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM 91 depends on ARCH_HAVE_MEMORY_PRESENT || SPARSEMEM
92
93#
94# Architectecture platforms which require a two level mem_section in SPARSEMEM
95# must select this option. This is usually for architecture platforms with
96# an extremely sparse physical address space.
97#
98config ARCH_SPARSEMEM_EXTREME
99 def_bool n
100 depends on SPARSEMEM && 64BIT
diff --git a/mm/sparse.c b/mm/sparse.c
index b54e304df4a7..b2b456bf0a5d 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -13,7 +13,26 @@
13 * 13 *
14 * 1) mem_section - memory sections, mem_map's for valid memory 14 * 1) mem_section - memory sections, mem_map's for valid memory
15 */ 15 */
16struct mem_section mem_section[NR_MEM_SECTIONS]; 16#ifdef CONFIG_ARCH_SPARSEMEM_EXTREME
17struct mem_section *mem_section[NR_SECTION_ROOTS]
18 ____cacheline_maxaligned_in_smp;
19
20static void sparse_index_init(unsigned long section, int nid)
21{
22 unsigned long root = SECTION_TO_ROOT(section);
23
24 if (mem_section[root])
25 return;
26 mem_section[root] = alloc_bootmem_node(NODE_DATA(nid), PAGE_SIZE);
27 if (mem_section[root])
28 memset(mem_section[root], 0, PAGE_SIZE);
29 else
30 panic("memory_present: NO MEMORY\n");
31}
32#else
33struct mem_section mem_section[NR_MEM_SECTIONS]
34 ____cacheline_maxaligned_in_smp;
35#endif
17EXPORT_SYMBOL(mem_section); 36EXPORT_SYMBOL(mem_section);
18 37
19/* Record a memory area against a node. */ 38/* Record a memory area against a node. */
@@ -24,8 +43,13 @@ void memory_present(int nid, unsigned long start, unsigned long end)
24 start &= PAGE_SECTION_MASK; 43 start &= PAGE_SECTION_MASK;
25 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { 44 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
26 unsigned long section = pfn_to_section_nr(pfn); 45 unsigned long section = pfn_to_section_nr(pfn);
27 if (!mem_section[section].section_mem_map) 46 struct mem_section *ms;
28 mem_section[section].section_mem_map = SECTION_MARKED_PRESENT; 47
48 sparse_index_init(section, nid);
49
50 ms = __nr_to_section(section);
51 if (!ms->section_mem_map)
52 ms->section_mem_map = SECTION_MARKED_PRESENT;
29 } 53 }
30} 54}
31 55
@@ -85,6 +109,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
85{ 109{
86 struct page *map; 110 struct page *map;
87 int nid = early_pfn_to_nid(section_nr_to_pfn(pnum)); 111 int nid = early_pfn_to_nid(section_nr_to_pfn(pnum));
112 struct mem_section *ms = __nr_to_section(pnum);
88 113
89 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); 114 map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION);
90 if (map) 115 if (map)
@@ -96,7 +121,7 @@ static struct page *sparse_early_mem_map_alloc(unsigned long pnum)
96 return map; 121 return map;
97 122
98 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); 123 printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__);
99 mem_section[pnum].section_mem_map = 0; 124 ms->section_mem_map = 0;
100 return NULL; 125 return NULL;
101} 126}
102 127
@@ -114,8 +139,9 @@ void sparse_init(void)
114 continue; 139 continue;
115 140
116 map = sparse_early_mem_map_alloc(pnum); 141 map = sparse_early_mem_map_alloc(pnum);
117 if (map) 142 if (!map)
118 sparse_init_one_section(&mem_section[pnum], pnum, map); 143 continue;
144 sparse_init_one_section(__nr_to_section(pnum), pnum, map);
119 } 145 }
120} 146}
121 147