diff options
author | Tejun Heo <tj@kernel.org> | 2011-02-16 06:13:07 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2011-02-16 06:13:07 -0500 |
commit | 206e42087a037fa3adca8908fd318a0cb64d4dee (patch) | |
tree | 044cc262b03c62064a65d13b119b6f73da5c22f4 /arch/x86/mm/srat_64.c | |
parent | 45fe6c78c4ccc384044d1b4877eebe7acf359e76 (diff) |
x86-64, NUMA: Use common numa_nodes[]
ACPI and amd are using separate nodes[] array. Add numa_nodes[] and
use them in all NUMA init methods. cutoff_node() cleanup is moved
from srat_64.c to numa_64.c and applied in initmem_init() regardless
of init methods.
Signed-off-by: Tejun Heo <tj@kernel.org>
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: Cyrill Gorcunov <gorcunov@gmail.com>
Cc: Shaohui Zheng <shaohui.zheng@intel.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: H. Peter Anvin <hpa@linux.intel.com>
Diffstat (limited to 'arch/x86/mm/srat_64.c')
-rw-r--r-- | arch/x86/mm/srat_64.c | 43 |
1 files changed, 11 insertions, 32 deletions
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 33e72ec4fa4..bfa4a6af5cf 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -28,7 +28,6 @@ int acpi_numa __initdata; | |||
28 | 28 | ||
29 | static struct acpi_table_slit *acpi_slit; | 29 | static struct acpi_table_slit *acpi_slit; |
30 | 30 | ||
31 | static struct bootnode nodes[MAX_NUMNODES] __initdata; | ||
32 | static struct bootnode nodes_add[MAX_NUMNODES]; | 31 | static struct bootnode nodes_add[MAX_NUMNODES]; |
33 | 32 | ||
34 | static int num_node_memblks __initdata; | 33 | static int num_node_memblks __initdata; |
@@ -55,29 +54,13 @@ static __init int conflicting_memblks(unsigned long start, unsigned long end) | |||
55 | return -1; | 54 | return -1; |
56 | } | 55 | } |
57 | 56 | ||
58 | static __init void cutoff_node(int i, unsigned long start, unsigned long end) | ||
59 | { | ||
60 | struct bootnode *nd = &nodes[i]; | ||
61 | |||
62 | if (nd->start < start) { | ||
63 | nd->start = start; | ||
64 | if (nd->end < nd->start) | ||
65 | nd->start = nd->end; | ||
66 | } | ||
67 | if (nd->end > end) { | ||
68 | nd->end = end; | ||
69 | if (nd->start > nd->end) | ||
70 | nd->start = nd->end; | ||
71 | } | ||
72 | } | ||
73 | |||
74 | static __init void bad_srat(void) | 57 | static __init void bad_srat(void) |
75 | { | 58 | { |
76 | int i; | 59 | int i; |
77 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 60 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
78 | acpi_numa = -1; | 61 | acpi_numa = -1; |
79 | for (i = 0; i < MAX_NUMNODES; i++) { | 62 | for (i = 0; i < MAX_NUMNODES; i++) { |
80 | nodes[i].start = nodes[i].end = 0; | 63 | numa_nodes[i].start = numa_nodes[i].end = 0; |
81 | nodes_add[i].start = nodes_add[i].end = 0; | 64 | nodes_add[i].start = nodes_add[i].end = 0; |
82 | } | 65 | } |
83 | remove_all_active_ranges(); | 66 | remove_all_active_ranges(); |
@@ -276,12 +259,12 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
276 | if (i == node) { | 259 | if (i == node) { |
277 | printk(KERN_WARNING | 260 | printk(KERN_WARNING |
278 | "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", | 261 | "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n", |
279 | pxm, start, end, nodes[i].start, nodes[i].end); | 262 | pxm, start, end, numa_nodes[i].start, numa_nodes[i].end); |
280 | } else if (i >= 0) { | 263 | } else if (i >= 0) { |
281 | printk(KERN_ERR | 264 | printk(KERN_ERR |
282 | "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", | 265 | "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n", |
283 | pxm, start, end, node_to_pxm(i), | 266 | pxm, start, end, node_to_pxm(i), |
284 | nodes[i].start, nodes[i].end); | 267 | numa_nodes[i].start, numa_nodes[i].end); |
285 | bad_srat(); | 268 | bad_srat(); |
286 | return; | 269 | return; |
287 | } | 270 | } |
@@ -290,7 +273,7 @@ acpi_numa_memory_affinity_init(struct acpi_srat_mem_affinity *ma) | |||
290 | start, end); | 273 | start, end); |
291 | 274 | ||
292 | if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { | 275 | if (!(ma->flags & ACPI_SRAT_MEM_HOT_PLUGGABLE)) { |
293 | nd = &nodes[node]; | 276 | nd = &numa_nodes[node]; |
294 | if (!node_test_and_set(node, mem_nodes_parsed)) { | 277 | if (!node_test_and_set(node, mem_nodes_parsed)) { |
295 | nd->start = start; | 278 | nd->start = start; |
296 | nd->end = end; | 279 | nd->end = end; |
@@ -347,9 +330,8 @@ void __init acpi_get_nodes(struct bootnode *physnodes, unsigned long start, | |||
347 | int i; | 330 | int i; |
348 | 331 | ||
349 | for_each_node_mask(i, mem_nodes_parsed) { | 332 | for_each_node_mask(i, mem_nodes_parsed) { |
350 | cutoff_node(i, start, end); | 333 | physnodes[i].start = numa_nodes[i].start; |
351 | physnodes[i].start = nodes[i].start; | 334 | physnodes[i].end = numa_nodes[i].end; |
352 | physnodes[i].end = nodes[i].end; | ||
353 | } | 335 | } |
354 | } | 336 | } |
355 | #endif /* CONFIG_NUMA_EMU */ | 337 | #endif /* CONFIG_NUMA_EMU */ |
@@ -372,10 +354,6 @@ int __init acpi_scan_nodes(void) | |||
372 | if (acpi_numa <= 0) | 354 | if (acpi_numa <= 0) |
373 | return -1; | 355 | return -1; |
374 | 356 | ||
375 | /* First clean up the node list */ | ||
376 | for (i = 0; i < MAX_NUMNODES; i++) | ||
377 | cutoff_node(i, 0, max_pfn << PAGE_SHIFT); | ||
378 | |||
379 | /* | 357 | /* |
380 | * Join together blocks on the same node, holes between | 358 | * Join together blocks on the same node, holes between |
381 | * which don't overlap with memory on other nodes. | 359 | * which don't overlap with memory on other nodes. |
@@ -440,7 +418,7 @@ int __init acpi_scan_nodes(void) | |||
440 | 418 | ||
441 | /* for out of order entries in SRAT */ | 419 | /* for out of order entries in SRAT */ |
442 | sort_node_map(); | 420 | sort_node_map(); |
443 | if (!nodes_cover_memory(nodes)) { | 421 | if (!nodes_cover_memory(numa_nodes)) { |
444 | bad_srat(); | 422 | bad_srat(); |
445 | return -1; | 423 | return -1; |
446 | } | 424 | } |
@@ -449,12 +427,13 @@ int __init acpi_scan_nodes(void) | |||
449 | 427 | ||
450 | /* Finally register nodes */ | 428 | /* Finally register nodes */ |
451 | for_each_node_mask(i, node_possible_map) | 429 | for_each_node_mask(i, node_possible_map) |
452 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 430 | setup_node_bootmem(i, numa_nodes[i].start, numa_nodes[i].end); |
453 | /* Try again in case setup_node_bootmem missed one due | 431 | /* Try again in case setup_node_bootmem missed one due |
454 | to missing bootmem */ | 432 | to missing bootmem */ |
455 | for_each_node_mask(i, node_possible_map) | 433 | for_each_node_mask(i, node_possible_map) |
456 | if (!node_online(i)) | 434 | if (!node_online(i)) |
457 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 435 | setup_node_bootmem(i, numa_nodes[i].start, |
436 | numa_nodes[i].end); | ||
458 | 437 | ||
459 | for (i = 0; i < nr_cpu_ids; i++) { | 438 | for (i = 0; i < nr_cpu_ids; i++) { |
460 | int node = early_cpu_to_node(i); | 439 | int node = early_cpu_to_node(i); |
@@ -486,7 +465,7 @@ static int __init find_node_by_addr(unsigned long addr) | |||
486 | * the sake of simplicity, we only use a real node's starting | 465 | * the sake of simplicity, we only use a real node's starting |
487 | * address to determine which emulated node it appears on. | 466 | * address to determine which emulated node it appears on. |
488 | */ | 467 | */ |
489 | if (addr >= nodes[i].start && addr < nodes[i].end) { | 468 | if (addr >= numa_nodes[i].start && addr < numa_nodes[i].end) { |
490 | ret = i; | 469 | ret = i; |
491 | break; | 470 | break; |
492 | } | 471 | } |