aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/mm
diff options
context:
space:
mode:
authorTejun Heo <tj@kernel.org>2011-02-16 11:11:09 -0500
committerTejun Heo <tj@kernel.org>2011-02-16 11:11:09 -0500
commit4697bdcc945c094d2c8a4876a24faeaf31a283e0 (patch)
tree709191c472d60a7394ba6d775ef6548748ba621d /arch/x86/mm
parent92d4a4371eeb89e1e12b9ebbed0956f499b6c2c0 (diff)
x86-64, NUMA: Kill mem_nodes_parsed
With all memory configuration information now carried in numa_meminfo, there's no need to keep mem_nodes_parsed separate. Drop it and use numa_nodes_parsed for CPU / memory-less nodes. A new helper numa_nodemask_from_meminfo() is added to calculate memnode mask on the fly which is currently used to set node_possible_map. This simplifies NUMA init methods a bit and removes a source of possible inconsistencies. Signed-off-by: Tejun Heo <tj@kernel.org> Cc: Yinghai Lu <yinghai@kernel.org> Cc: Brian Gerst <brgerst@gmail.com> Cc: Cyrill Gorcunov <gorcunov@gmail.com> Cc: Shaohui Zheng <shaohui.zheng@intel.com> Cc: David Rientjes <rientjes@google.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm')
-rw-r--r--arch/x86/mm/amdtopology_64.c5
-rw-r--r--arch/x86/mm/numa_64.c20
-rw-r--r--arch/x86/mm/srat_64.c7
3 files changed, 20 insertions, 12 deletions
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index e76bffabc09d..fd7b609025ba 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -122,7 +122,7 @@ int __init amd_numa_init(void)
122 nodeid, (base >> 8) & 3, (limit >> 8) & 3); 122 nodeid, (base >> 8) & 3, (limit >> 8) & 3);
123 return -EINVAL; 123 return -EINVAL;
124 } 124 }
125 if (node_isset(nodeid, mem_nodes_parsed)) { 125 if (node_isset(nodeid, numa_nodes_parsed)) {
126 pr_info("Node %d already present, skipping\n", 126 pr_info("Node %d already present, skipping\n",
127 nodeid); 127 nodeid);
128 continue; 128 continue;
@@ -167,11 +167,10 @@ int __init amd_numa_init(void)
167 167
168 prevbase = base; 168 prevbase = base;
169 numa_add_memblk(nodeid, base, limit); 169 numa_add_memblk(nodeid, base, limit);
170 node_set(nodeid, mem_nodes_parsed);
171 node_set(nodeid, numa_nodes_parsed); 170 node_set(nodeid, numa_nodes_parsed);
172 } 171 }
173 172
174 if (!nodes_weight(mem_nodes_parsed)) 173 if (!nodes_weight(numa_nodes_parsed))
175 return -ENOENT; 174 return -ENOENT;
176 175
177 /* 176 /*
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 6e4fbd777564..8b1f178a866e 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -37,7 +37,6 @@ struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
37EXPORT_SYMBOL(node_data); 37EXPORT_SYMBOL(node_data);
38 38
39nodemask_t numa_nodes_parsed __initdata; 39nodemask_t numa_nodes_parsed __initdata;
40nodemask_t mem_nodes_parsed __initdata;
41 40
42struct memnode memnode; 41struct memnode memnode;
43 42
@@ -344,6 +343,20 @@ static int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
344} 343}
345 344
346/* 345/*
346 * Set nodes, which have memory in @mi, in *@nodemask.
347 */
348static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
349 const struct numa_meminfo *mi)
350{
351 int i;
352
353 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
354 if (mi->blk[i].start != mi->blk[i].end &&
355 mi->blk[i].nid != NUMA_NO_NODE)
356 node_set(mi->blk[i].nid, *nodemask);
357}
358
359/*
347 * Sanity check to catch more bad NUMA configurations (they are amazingly 360 * Sanity check to catch more bad NUMA configurations (they are amazingly
348 * common). Make sure the nodes cover all memory. 361 * common). Make sure the nodes cover all memory.
349 */ 362 */
@@ -379,7 +392,8 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
379 int i, j, nid; 392 int i, j, nid;
380 393
381 /* Account for nodes with cpus and no memory */ 394 /* Account for nodes with cpus and no memory */
382 nodes_or(node_possible_map, mem_nodes_parsed, numa_nodes_parsed); 395 node_possible_map = numa_nodes_parsed;
396 numa_nodemask_from_meminfo(&node_possible_map, mi);
383 if (WARN_ON(nodes_empty(node_possible_map))) 397 if (WARN_ON(nodes_empty(node_possible_map)))
384 return -EINVAL; 398 return -EINVAL;
385 399
@@ -824,7 +838,6 @@ static int dummy_numa_init(void)
824 0LU, max_pfn << PAGE_SHIFT); 838 0LU, max_pfn << PAGE_SHIFT);
825 839
826 node_set(0, numa_nodes_parsed); 840 node_set(0, numa_nodes_parsed);
827 node_set(0, mem_nodes_parsed);
828 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT); 841 numa_add_memblk(0, 0, (u64)max_pfn << PAGE_SHIFT);
829 842
830 return 0; 843 return 0;
@@ -852,7 +865,6 @@ void __init initmem_init(void)
852 set_apicid_to_node(j, NUMA_NO_NODE); 865 set_apicid_to_node(j, NUMA_NO_NODE);
853 866
854 nodes_clear(numa_nodes_parsed); 867 nodes_clear(numa_nodes_parsed);
855 nodes_clear(mem_nodes_parsed);
856 nodes_clear(node_possible_map); 868 nodes_clear(node_possible_map);
857 nodes_clear(node_online_map); 869 nodes_clear(node_online_map);
858 memset(&numa_meminfo, 0, sizeof(numa_meminfo)); 870 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 8185189d34a2..4f8e6cde9bf6 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -238,9 +238,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma)
238 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm, 238 printk(KERN_INFO "SRAT: Node %u PXM %u %lx-%lx\n", node, pxm,
239 start, end); 239 start, end);
240 240
241 if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) 241 if (ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)
242 node_set(node, mem_nodes_parsed);
243 else
244 update_nodes_add(node, start, end); 242 update_nodes_add(node, start, end);
245} 243}
246 244
@@ -310,10 +308,9 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
310 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); 308 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
311 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); 309 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
312 310
313 nodes_clear(mem_nodes_parsed);
314 for (i = 0; i < num_nodes; i++) 311 for (i = 0; i < num_nodes; i++)
315 if (fake_nodes[i].start != fake_nodes[i].end) 312 if (fake_nodes[i].start != fake_nodes[i].end)
316 node_set(i, mem_nodes_parsed); 313 node_set(i, numa_nodes_parsed);
317} 314}
318 315
319static int null_slit_node_compare(int a, int b) 316static int null_slit_node_compare(int a, int b)