aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-02-16 11:11:08 -0500
committerTejun Heo <tj@kernel.org>2011-02-16 11:11:08 -0500
commit43a662f04f731c331706456c9852ef7146ba5d85 (patch)
tree055e3433a69d35ad20ed405007220c28f8674f02
parentef396ec96c1a8ffd2b0bc67f1f79c7274de02b95 (diff)
x86-64, NUMA: Unify use of memblk in all init methods
Make both amd and dummy use numa_add_memblk() to describe the detected memory blocks. This allows initmem_init() to call numa_register_memblk() regardless of init method in use. Drop custom memory registration codes from amd and dummy. After this change, memblk merge/cleanup in numa_register_memblks() is applied to all init methods. As this makes compute_hash_shift() and numa_register_memblks() used only inside numa_64.c, make them static. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Shaohui Zheng <shaohui.zheng@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r--arch/x86/include/asm/numa_64.h4
-rw-r--r--arch/x86/mm/amdtopology_64.c13
-rw-r--r--arch/x86/mm/numa_64.c15
-rw-r--r--arch/x86/mm/srat_64.c5
4 files changed, 8 insertions, 29 deletions
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 3306a2b99ece..e925605150a0 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -8,9 +8,6 @@ struct bootnode {
8 u64 end; 8 u64 end;
9}; 9};
10 10
11extern int compute_hash_shift(struct bootnode *nodes, int numblks,
12 int *nodeids);
13
14#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 11#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
15 12
16extern int numa_off; 13extern int numa_off;
@@ -33,7 +30,6 @@ extern struct bootnode numa_nodes[MAX_NUMNODES] __initdata;
33 30
34extern int __cpuinit numa_cpu_node(int cpu); 31extern int __cpuinit numa_cpu_node(int cpu);
35extern int __init numa_add_memblk(int nodeid, u64 start, u64 end); 32extern int __init numa_add_memblk(int nodeid, u64 start, u64 end);
36extern int __init numa_register_memblks(void);
37 33
38#ifdef CONFIG_NUMA_EMU 34#ifdef CONFIG_NUMA_EMU
39#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) 35#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index cf29527885f8..d6d7aa4b98c6 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -167,6 +167,7 @@ int __init amd_numa_init(void)
167 167
168 numa_nodes[nodeid].start = base; 168 numa_nodes[nodeid].start = base;
169 numa_nodes[nodeid].end = limit; 169 numa_nodes[nodeid].end = limit;
170 numa_add_memblk(nodeid, base, limit);
170 171
171 prevbase = base; 172 prevbase = base;
172 173
@@ -263,18 +264,6 @@ int __init amd_scan_nodes(void)
263{ 264{
264 int i; 265 int i;
265 266
266 memnode_shift = compute_hash_shift(numa_nodes, 8, NULL);
267 if (memnode_shift < 0) {
268 pr_err("No NUMA node hash function found. Contact maintainer\n");
269 return -1;
270 }
271 pr_info("Using node hash shift of %d\n", memnode_shift);
272
273 /* use the coreid bits from early_identify_cpu */
274 for_each_node_mask(i, node_possible_map)
275 memblock_x86_register_active_regions(i,
276 numa_nodes[i].start >> PAGE_SHIFT,
277 numa_nodes[i].end >> PAGE_SHIFT);
278 init_memory_mapping_high(); 267 init_memory_mapping_high();
279 for_each_node_mask(i, node_possible_map) 268 for_each_node_mask(i, node_possible_map)
280 setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); 269 setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end);
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index a1d702d2584c..552080e8472b 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -131,8 +131,8 @@ static int __init extract_lsb_from_nodes(const struct bootnode *nodes,
131 return i; 131 return i;
132} 132}
133 133
134int __init compute_hash_shift(struct bootnode *nodes, int numnodes, 134static int __init compute_hash_shift(struct bootnode *nodes, int numnodes,
135 int *nodeids) 135 int *nodeids)
136{ 136{
137 int shift; 137 int shift;
138 138
@@ -287,7 +287,7 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
287 node_set_online(nodeid); 287 node_set_online(nodeid);
288} 288}
289 289
290int __init numa_register_memblks(void) 290static int __init numa_register_memblks(void)
291{ 291{
292 int i; 292 int i;
293 293
@@ -713,17 +713,13 @@ static int dummy_numa_init(void)
713 713
714 node_set(0, cpu_nodes_parsed); 714 node_set(0, cpu_nodes_parsed);
715 node_set(0, mem_nodes_parsed); 715 node_set(0, mem_nodes_parsed);
716 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
716 717
717 return 0; 718 return 0;
718} 719}
719 720
720static int dummy_scan_nodes(void) 721static int dummy_scan_nodes(void)
721{ 722{
722 /* setup dummy node covering all memory */
723 memnode_shift = 63;
724 memnodemap = memnode.embedded_map;
725 memnodemap[0] = 0;
726 memblock_x86_register_active_regions(0, 0, max_pfn);
727 init_memory_mapping_high(); 723 init_memory_mapping_high();
728 setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT); 724 setup_node_bootmem(0, 0, max_pfn << PAGE_SHIFT);
729 numa_init_array(); 725 numa_init_array();
@@ -784,6 +780,9 @@ void __init initmem_init(void)
784 if (WARN_ON(nodes_empty(node_possible_map))) 780 if (WARN_ON(nodes_empty(node_possible_map)))
785 continue; 781 continue;
786 782
783 if (numa_register_memblks() < 0)
784 continue;
785
787 if (!scan_nodes[i]()) 786 if (!scan_nodes[i]())
788 return; 787 return;
789 } 788 }
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 341b37193c76..69f147116da7 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -308,11 +308,6 @@ int __init acpi_scan_nodes(void)
308 if (acpi_numa <= 0) 308 if (acpi_numa <= 0)
309 return -1; 309 return -1;
310 310
311 if (numa_register_memblks() < 0) {
312 bad_srat();
313 return -1;
314 }
315
316 /* for out of order entries in SRAT */ 311 /* for out of order entries in SRAT */
317 sort_node_map(); 312 sort_node_map();
318 if (!nodes_cover_memory(numa_nodes)) { 313 if (!nodes_cover_memory(numa_nodes)) {