diff options
author | Tejun Heo <tj@kernel.org> | 2011-01-23 08:37:39 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2011-01-28 08:54:09 -0500 |
commit | bbc9e2f452d9c4b166d1f9a78d941d80173312fe (patch) | |
tree | d75d41187b296235f833e942ed8c1dd938a7bae4 | |
parent | 89e5dc218e084e13a3996db6693b01478912f4ee (diff) |
x86: Unify cpu/apicid <-> NUMA node mapping between 32 and 64bit
The mapping between cpu/apicid and node is done via
apicid_to_node[] on 64bit and apicid_2_node[] +
apic->x86_32_numa_cpu_node() on 32bit. This difference makes it
difficult to further unify 32 and 64bit NUMA handling.
This patch unifies it by replacing both apicid_to_node[] and
apicid_2_node[] with __apicid_to_node[] array, which is accessed
by two accessors - set_apicid_to_node() and numa_cpu_node(). On
64bit, numa_cpu_node() always consults __apicid_to_node[]
directly while 32bit goes through apic->numa_cpu_node() method
to allow apic implementations to override it.
srat_detect_node() for amd cpus contains workaround for broken
NUMA configuration which assumes relationship between APIC ID,
HT node ID and NUMA topology. Leave it to access
__apicid_to_node[] directly as mapping through CPU might result
in undesirable behavior change. The comment is reformatted and
updated to note the ugliness.
Signed-off-by: Tejun Heo <tj@kernel.org>
Reviewed-by: Pekka Enberg <penberg@kernel.org>
Cc: eric.dumazet@gmail.com
Cc: yinghai@kernel.org
Cc: brgerst@gmail.com
Cc: gorcunov@gmail.com
Cc: shaohui.zheng@intel.com
Cc: rientjes@google.com
LKML-Reference: <1295789862-25482-14-git-send-email-tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Cc: David Rientjes <rientjes@google.com>
-rw-r--r-- | arch/x86/include/asm/mpspec.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/numa.h | 28 | ||||
-rw-r--r-- | arch/x86/include/asm/numa_32.h | 6 | ||||
-rw-r--r-- | arch/x86/include/asm/numa_64.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/acpi/boot.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/apic/apic.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/amd.c | 47 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/intel.c | 3 | ||||
-rw-r--r-- | arch/x86/kernel/smpboot.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/amdtopology_64.c | 4 | ||||
-rw-r--r-- | arch/x86/mm/numa.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/numa_32.c | 6 | ||||
-rw-r--r-- | arch/x86/mm/numa_64.c | 26 | ||||
-rw-r--r-- | arch/x86/mm/srat_32.c | 2 | ||||
-rw-r--r-- | arch/x86/mm/srat_64.c | 12 |
15 files changed, 101 insertions, 56 deletions
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h index edc2a455b726..9c7d95f6174b 100644 --- a/arch/x86/include/asm/mpspec.h +++ b/arch/x86/include/asm/mpspec.h | |||
@@ -25,7 +25,6 @@ extern int pic_mode; | |||
25 | #define MAX_IRQ_SOURCES 256 | 25 | #define MAX_IRQ_SOURCES 256 |
26 | 26 | ||
27 | extern unsigned int def_to_bigsmp; | 27 | extern unsigned int def_to_bigsmp; |
28 | extern u8 apicid_2_node[]; | ||
29 | 28 | ||
30 | #ifdef CONFIG_X86_NUMAQ | 29 | #ifdef CONFIG_X86_NUMAQ |
31 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; | 30 | extern int mp_bus_id_to_node[MAX_MP_BUSSES]; |
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h index 27da400d3138..5e01c768a575 100644 --- a/arch/x86/include/asm/numa.h +++ b/arch/x86/include/asm/numa.h | |||
@@ -1,5 +1,33 @@ | |||
1 | #ifndef _ASM_X86_NUMA_H | ||
2 | #define _ASM_X86_NUMA_H | ||
3 | |||
4 | #include <asm/apicdef.h> | ||
5 | |||
6 | #ifdef CONFIG_NUMA | ||
7 | /* | ||
8 | * __apicid_to_node[] stores the raw mapping between physical apicid and | ||
9 | * node and is used to initialize cpu_to_node mapping. | ||
10 | * | ||
11 | * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus | ||
12 | * should be accessed by the accessors - set_apicid_to_node() and | ||
13 | * numa_cpu_node(). | ||
14 | */ | ||
15 | extern s16 __apicid_to_node[MAX_LOCAL_APIC]; | ||
16 | |||
17 | static inline void set_apicid_to_node(int apicid, s16 node) | ||
18 | { | ||
19 | __apicid_to_node[apicid] = node; | ||
20 | } | ||
21 | #else /* CONFIG_NUMA */ | ||
22 | static inline void set_apicid_to_node(int apicid, s16 node) | ||
23 | { | ||
24 | } | ||
25 | #endif /* CONFIG_NUMA */ | ||
26 | |||
1 | #ifdef CONFIG_X86_32 | 27 | #ifdef CONFIG_X86_32 |
2 | # include "numa_32.h" | 28 | # include "numa_32.h" |
3 | #else | 29 | #else |
4 | # include "numa_64.h" | 30 | # include "numa_64.h" |
5 | #endif | 31 | #endif |
32 | |||
33 | #endif /* _ASM_X86_NUMA_H */ | ||
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h index b0ef2b449a9d..cdf8043d7a1a 100644 --- a/arch/x86/include/asm/numa_32.h +++ b/arch/x86/include/asm/numa_32.h | |||
@@ -6,6 +6,12 @@ extern int numa_off; | |||
6 | extern int pxm_to_nid(int pxm); | 6 | extern int pxm_to_nid(int pxm); |
7 | extern void numa_remove_cpu(int cpu); | 7 | extern void numa_remove_cpu(int cpu); |
8 | 8 | ||
9 | #ifdef CONFIG_NUMA | ||
10 | extern int __cpuinit numa_cpu_node(int apicid); | ||
11 | #else /* CONFIG_NUMA */ | ||
12 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } | ||
13 | #endif /* CONFIG_NUMA */ | ||
14 | |||
9 | #ifdef CONFIG_HIGHMEM | 15 | #ifdef CONFIG_HIGHMEM |
10 | extern void set_highmem_pages_init(void); | 16 | extern void set_highmem_pages_init(void); |
11 | #else | 17 | #else |
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h index 0493be39607c..4982a9c08c2f 100644 --- a/arch/x86/include/asm/numa_64.h +++ b/arch/x86/include/asm/numa_64.h | |||
@@ -2,7 +2,6 @@ | |||
2 | #define _ASM_X86_NUMA_64_H | 2 | #define _ASM_X86_NUMA_64_H |
3 | 3 | ||
4 | #include <linux/nodemask.h> | 4 | #include <linux/nodemask.h> |
5 | #include <asm/apicdef.h> | ||
6 | 5 | ||
7 | struct bootnode { | 6 | struct bootnode { |
8 | u64 start; | 7 | u64 start; |
@@ -17,8 +16,6 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks, | |||
17 | extern void numa_init_array(void); | 16 | extern void numa_init_array(void); |
18 | extern int numa_off; | 17 | extern int numa_off; |
19 | 18 | ||
20 | extern s16 apicid_to_node[MAX_LOCAL_APIC]; | ||
21 | |||
22 | extern unsigned long numa_free_all_bootmem(void); | 19 | extern unsigned long numa_free_all_bootmem(void); |
23 | extern void setup_node_bootmem(int nodeid, unsigned long start, | 20 | extern void setup_node_bootmem(int nodeid, unsigned long start, |
24 | unsigned long end); | 21 | unsigned long end); |
@@ -32,6 +29,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start, | |||
32 | #define NODE_MIN_SIZE (4*1024*1024) | 29 | #define NODE_MIN_SIZE (4*1024*1024) |
33 | 30 | ||
34 | extern void __init init_cpu_to_node(void); | 31 | extern void __init init_cpu_to_node(void); |
32 | extern int __cpuinit numa_cpu_node(int cpu); | ||
35 | extern void __cpuinit numa_set_node(int cpu, int node); | 33 | extern void __cpuinit numa_set_node(int cpu, int node); |
36 | extern void __cpuinit numa_clear_node(int cpu); | 34 | extern void __cpuinit numa_clear_node(int cpu); |
37 | extern void __cpuinit numa_add_cpu(int cpu); | 35 | extern void __cpuinit numa_add_cpu(int cpu); |
@@ -44,6 +42,7 @@ void numa_emu_cmdline(char *); | |||
44 | #endif /* CONFIG_NUMA_EMU */ | 42 | #endif /* CONFIG_NUMA_EMU */ |
45 | #else | 43 | #else |
46 | static inline void init_cpu_to_node(void) { } | 44 | static inline void init_cpu_to_node(void) { } |
45 | static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; } | ||
47 | static inline void numa_set_node(int cpu, int node) { } | 46 | static inline void numa_set_node(int cpu, int node) { } |
48 | static inline void numa_clear_node(int cpu) { } | 47 | static inline void numa_clear_node(int cpu) { } |
49 | static inline void numa_add_cpu(int cpu, int node) { } | 48 | static inline void numa_add_cpu(int cpu, int node) { } |
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c index b3a71137983a..a7bca59ec595 100644 --- a/arch/x86/kernel/acpi/boot.c +++ b/arch/x86/kernel/acpi/boot.c | |||
@@ -589,11 +589,10 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid) | |||
589 | nid = acpi_get_node(handle); | 589 | nid = acpi_get_node(handle); |
590 | if (nid == -1 || !node_online(nid)) | 590 | if (nid == -1 || !node_online(nid)) |
591 | return; | 591 | return; |
592 | set_apicid_to_node(physid, nid); | ||
592 | #ifdef CONFIG_X86_64 | 593 | #ifdef CONFIG_X86_64 |
593 | apicid_to_node[physid] = nid; | ||
594 | numa_set_node(cpu, nid); | 594 | numa_set_node(cpu, nid); |
595 | #else /* CONFIG_X86_32 */ | 595 | #else /* CONFIG_X86_32 */ |
596 | apicid_2_node[physid] = nid; | ||
597 | cpu_to_node_map[cpu] = nid; | 596 | cpu_to_node_map[cpu] = nid; |
598 | #endif | 597 | #endif |
599 | 598 | ||
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c index 0f4f3c152311..4686ea59b7a0 100644 --- a/arch/x86/kernel/apic/apic.c +++ b/arch/x86/kernel/apic/apic.c | |||
@@ -2026,7 +2026,7 @@ int default_x86_32_numa_cpu_node(int cpu) | |||
2026 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 2026 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); |
2027 | 2027 | ||
2028 | if (apicid != BAD_APICID) | 2028 | if (apicid != BAD_APICID) |
2029 | return apicid_2_node[apicid]; | 2029 | return __apicid_to_node[apicid]; |
2030 | return NUMA_NO_NODE; | 2030 | return NUMA_NO_NODE; |
2031 | #else | 2031 | #else |
2032 | return 0; | 2032 | return 0; |
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index 7c7bedb83c5a..3cce8f2bb2e1 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c | |||
@@ -234,17 +234,21 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c) | |||
234 | #endif | 234 | #endif |
235 | 235 | ||
236 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 236 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
237 | /* | ||
238 | * To workaround broken NUMA config. Read the comment in | ||
239 | * srat_detect_node(). | ||
240 | */ | ||
237 | static int __cpuinit nearby_node(int apicid) | 241 | static int __cpuinit nearby_node(int apicid) |
238 | { | 242 | { |
239 | int i, node; | 243 | int i, node; |
240 | 244 | ||
241 | for (i = apicid - 1; i >= 0; i--) { | 245 | for (i = apicid - 1; i >= 0; i--) { |
242 | node = apicid_to_node[i]; | 246 | node = __apicid_to_node[i]; |
243 | if (node != NUMA_NO_NODE && node_online(node)) | 247 | if (node != NUMA_NO_NODE && node_online(node)) |
244 | return node; | 248 | return node; |
245 | } | 249 | } |
246 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { | 250 | for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { |
247 | node = apicid_to_node[i]; | 251 | node = __apicid_to_node[i]; |
248 | if (node != NUMA_NO_NODE && node_online(node)) | 252 | if (node != NUMA_NO_NODE && node_online(node)) |
249 | return node; | 253 | return node; |
250 | } | 254 | } |
@@ -339,26 +343,35 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
339 | int node; | 343 | int node; |
340 | unsigned apicid = c->apicid; | 344 | unsigned apicid = c->apicid; |
341 | 345 | ||
342 | node = per_cpu(cpu_llc_id, cpu); | 346 | node = numa_cpu_node(cpu); |
347 | if (node == NUMA_NO_NODE) | ||
348 | node = per_cpu(cpu_llc_id, cpu); | ||
343 | 349 | ||
344 | if (apicid_to_node[apicid] != NUMA_NO_NODE) | ||
345 | node = apicid_to_node[apicid]; | ||
346 | if (!node_online(node)) { | 350 | if (!node_online(node)) { |
347 | /* Two possibilities here: | 351 | /* |
348 | - The CPU is missing memory and no node was created. | 352 | * Two possibilities here: |
349 | In that case try picking one from a nearby CPU | 353 | * |
350 | - The APIC IDs differ from the HyperTransport node IDs | 354 | * - The CPU is missing memory and no node was created. In |
351 | which the K8 northbridge parsing fills in. | 355 | * that case try picking one from a nearby CPU. |
352 | Assume they are all increased by a constant offset, | 356 | * |
353 | but in the same order as the HT nodeids. | 357 | * - The APIC IDs differ from the HyperTransport node IDs |
354 | If that doesn't result in a usable node fall back to the | 358 | * which the K8 northbridge parsing fills in. Assume |
355 | path for the previous case. */ | 359 | * they are all increased by a constant offset, but in |
356 | 360 | * the same order as the HT nodeids. If that doesn't | |
361 | * result in a usable node fall back to the path for the | ||
362 | * previous case. | ||
363 | * | ||
364 | * This workaround operates directly on the mapping between | ||
365 | * APIC ID and NUMA node, assuming certain relationship | ||
366 | * between APIC ID, HT node ID and NUMA topology. As going | ||
367 | * through CPU mapping may alter the outcome, directly | ||
368 | * access __apicid_to_node[]. | ||
369 | */ | ||
357 | int ht_nodeid = c->initial_apicid; | 370 | int ht_nodeid = c->initial_apicid; |
358 | 371 | ||
359 | if (ht_nodeid >= 0 && | 372 | if (ht_nodeid >= 0 && |
360 | apicid_to_node[ht_nodeid] != NUMA_NO_NODE) | 373 | __apicid_to_node[ht_nodeid] != NUMA_NO_NODE) |
361 | node = apicid_to_node[ht_nodeid]; | 374 | node = __apicid_to_node[ht_nodeid]; |
362 | /* Pick a nearby node */ | 375 | /* Pick a nearby node */ |
363 | if (!node_online(node)) | 376 | if (!node_online(node)) |
364 | node = nearby_node(apicid); | 377 | node = nearby_node(apicid); |
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c index d16c2c53d6bf..6052004bf4f4 100644 --- a/arch/x86/kernel/cpu/intel.c +++ b/arch/x86/kernel/cpu/intel.c | |||
@@ -279,11 +279,10 @@ static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) | |||
279 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 279 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
280 | unsigned node; | 280 | unsigned node; |
281 | int cpu = smp_processor_id(); | 281 | int cpu = smp_processor_id(); |
282 | int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; | ||
283 | 282 | ||
284 | /* Don't do the funky fallback heuristics the AMD version employs | 283 | /* Don't do the funky fallback heuristics the AMD version employs |
285 | for now. */ | 284 | for now. */ |
286 | node = apicid_to_node[apicid]; | 285 | node = numa_cpu_node(cpu); |
287 | if (node == NUMA_NO_NODE || !node_online(node)) { | 286 | if (node == NUMA_NO_NODE || !node_online(node)) { |
288 | /* reuse the value from init_cpu_to_node() */ | 287 | /* reuse the value from init_cpu_to_node() */ |
289 | node = cpu_to_node(cpu); | 288 | node = cpu_to_node(cpu); |
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c index 5319cdd53765..b7cfce535cb0 100644 --- a/arch/x86/kernel/smpboot.c +++ b/arch/x86/kernel/smpboot.c | |||
@@ -71,10 +71,6 @@ | |||
71 | #include <asm/smpboot_hooks.h> | 71 | #include <asm/smpboot_hooks.h> |
72 | #include <asm/i8259.h> | 72 | #include <asm/i8259.h> |
73 | 73 | ||
74 | #ifdef CONFIG_X86_32 | ||
75 | u8 apicid_2_node[MAX_LOCAL_APIC]; | ||
76 | #endif | ||
77 | |||
78 | /* State of each CPU */ | 74 | /* State of each CPU */ |
79 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | 75 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
80 | 76 | ||
@@ -170,7 +166,7 @@ static void map_cpu_to_logical_apicid(void) | |||
170 | int cpu = smp_processor_id(); | 166 | int cpu = smp_processor_id(); |
171 | int node; | 167 | int node; |
172 | 168 | ||
173 | node = apic->x86_32_numa_cpu_node(cpu); | 169 | node = numa_cpu_node(cpu); |
174 | if (!node_online(node)) | 170 | if (!node_online(node)) |
175 | node = first_online_node; | 171 | node = first_online_node; |
176 | 172 | ||
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c index f21962c435ed..c7fae38c4080 100644 --- a/arch/x86/mm/amdtopology_64.c +++ b/arch/x86/mm/amdtopology_64.c | |||
@@ -247,7 +247,7 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes) | |||
247 | __acpi_map_pxm_to_node(nid, i); | 247 | __acpi_map_pxm_to_node(nid, i); |
248 | #endif | 248 | #endif |
249 | } | 249 | } |
250 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | 250 | memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); |
251 | } | 251 | } |
252 | #endif /* CONFIG_NUMA_EMU */ | 252 | #endif /* CONFIG_NUMA_EMU */ |
253 | 253 | ||
@@ -285,7 +285,7 @@ int __init amd_scan_nodes(void) | |||
285 | nodes[i].start >> PAGE_SHIFT, | 285 | nodes[i].start >> PAGE_SHIFT, |
286 | nodes[i].end >> PAGE_SHIFT); | 286 | nodes[i].end >> PAGE_SHIFT); |
287 | for (j = apicid_base; j < cores + apicid_base; j++) | 287 | for (j = apicid_base; j < cores + apicid_base; j++) |
288 | apicid_to_node[(i << bits) + j] = i; | 288 | set_apicid_to_node((i << bits) + j, i); |
289 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); | 289 | setup_node_bootmem(i, nodes[i].start, nodes[i].end); |
290 | } | 290 | } |
291 | 291 | ||
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index ebf6d7887a38..480b3571c8b1 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c | |||
@@ -26,8 +26,12 @@ static __init int numa_setup(char *opt) | |||
26 | early_param("numa", numa_setup); | 26 | early_param("numa", numa_setup); |
27 | 27 | ||
28 | /* | 28 | /* |
29 | * Which logical CPUs are on which nodes | 29 | * apicid, cpu, node mappings |
30 | */ | 30 | */ |
31 | s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | ||
32 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
33 | }; | ||
34 | |||
31 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; | 35 | cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; |
32 | EXPORT_SYMBOL(node_to_cpumask_map); | 36 | EXPORT_SYMBOL(node_to_cpumask_map); |
33 | 37 | ||
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 84a3e4c9f277..8d91d227be09 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c | |||
@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); | |||
110 | 110 | ||
111 | static unsigned long kva_start_pfn; | 111 | static unsigned long kva_start_pfn; |
112 | static unsigned long kva_pages; | 112 | static unsigned long kva_pages; |
113 | |||
114 | int __cpuinit numa_cpu_node(int cpu) | ||
115 | { | ||
116 | return apic->x86_32_numa_cpu_node(cpu); | ||
117 | } | ||
118 | |||
113 | /* | 119 | /* |
114 | * FLAT - support for basic PC memory model with discontig enabled, essentially | 120 | * FLAT - support for basic PC memory model with discontig enabled, essentially |
115 | * a single node with all available processors in it with a flat | 121 | * a single node with all available processors in it with a flat |
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c index 95ea1551eebc..1e1026f61a5a 100644 --- a/arch/x86/mm/numa_64.c +++ b/arch/x86/mm/numa_64.c | |||
@@ -26,10 +26,6 @@ EXPORT_SYMBOL(node_data); | |||
26 | 26 | ||
27 | struct memnode memnode; | 27 | struct memnode memnode; |
28 | 28 | ||
29 | s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = { | ||
30 | [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE | ||
31 | }; | ||
32 | |||
33 | static unsigned long __initdata nodemap_addr; | 29 | static unsigned long __initdata nodemap_addr; |
34 | static unsigned long __initdata nodemap_size; | 30 | static unsigned long __initdata nodemap_size; |
35 | 31 | ||
@@ -716,12 +712,8 @@ void __init init_cpu_to_node(void) | |||
716 | BUG_ON(cpu_to_apicid == NULL); | 712 | BUG_ON(cpu_to_apicid == NULL); |
717 | 713 | ||
718 | for_each_possible_cpu(cpu) { | 714 | for_each_possible_cpu(cpu) { |
719 | int node; | 715 | int node = numa_cpu_node(cpu); |
720 | u16 apicid = cpu_to_apicid[cpu]; | ||
721 | 716 | ||
722 | if (apicid == BAD_APICID) | ||
723 | continue; | ||
724 | node = apicid_to_node[apicid]; | ||
725 | if (node == NUMA_NO_NODE) | 717 | if (node == NUMA_NO_NODE) |
726 | continue; | 718 | continue; |
727 | if (!node_online(node)) | 719 | if (!node_online(node)) |
@@ -731,6 +723,14 @@ void __init init_cpu_to_node(void) | |||
731 | } | 723 | } |
732 | #endif | 724 | #endif |
733 | 725 | ||
726 | int __cpuinit numa_cpu_node(int cpu) | ||
727 | { | ||
728 | int apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | ||
729 | |||
730 | if (apicid != BAD_APICID) | ||
731 | return __apicid_to_node[apicid]; | ||
732 | return NUMA_NO_NODE; | ||
733 | } | ||
734 | 734 | ||
735 | void __cpuinit numa_set_node(int cpu, int node) | 735 | void __cpuinit numa_set_node(int cpu, int node) |
736 | { | 736 | { |
@@ -776,13 +776,9 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
776 | void __cpuinit numa_add_cpu(int cpu) | 776 | void __cpuinit numa_add_cpu(int cpu) |
777 | { | 777 | { |
778 | unsigned long addr; | 778 | unsigned long addr; |
779 | u16 apicid; | 779 | int physnid, nid; |
780 | int physnid; | ||
781 | int nid = NUMA_NO_NODE; | ||
782 | 780 | ||
783 | apicid = early_per_cpu(x86_cpu_to_apicid, cpu); | 781 | nid = numa_cpu_node(cpu); |
784 | if (apicid != BAD_APICID) | ||
785 | nid = apicid_to_node[apicid]; | ||
786 | if (nid == NUMA_NO_NODE) | 782 | if (nid == NUMA_NO_NODE) |
787 | nid = early_cpu_to_node(cpu); | 783 | nid = early_cpu_to_node(cpu); |
788 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); | 784 | BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); |
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c index 6027a4810003..48651c6f657d 100644 --- a/arch/x86/mm/srat_32.c +++ b/arch/x86/mm/srat_32.c | |||
@@ -255,7 +255,7 @@ int __init get_memcfg_from_srat(void) | |||
255 | num_memory_chunks); | 255 | num_memory_chunks); |
256 | 256 | ||
257 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 257 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
258 | apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); | 258 | set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); |
259 | 259 | ||
260 | for (j = 0; j < num_memory_chunks; j++){ | 260 | for (j = 0; j < num_memory_chunks; j++){ |
261 | struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; | 261 | struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; |
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c index 603d285d1daa..9a97261a2418 100644 --- a/arch/x86/mm/srat_64.c +++ b/arch/x86/mm/srat_64.c | |||
@@ -79,7 +79,7 @@ static __init void bad_srat(void) | |||
79 | printk(KERN_ERR "SRAT: SRAT not used.\n"); | 79 | printk(KERN_ERR "SRAT: SRAT not used.\n"); |
80 | acpi_numa = -1; | 80 | acpi_numa = -1; |
81 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 81 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
82 | apicid_to_node[i] = NUMA_NO_NODE; | 82 | set_apicid_to_node(i, NUMA_NO_NODE); |
83 | for (i = 0; i < MAX_NUMNODES; i++) { | 83 | for (i = 0; i < MAX_NUMNODES; i++) { |
84 | nodes[i].start = nodes[i].end = 0; | 84 | nodes[i].start = nodes[i].end = 0; |
85 | nodes_add[i].start = nodes_add[i].end = 0; | 85 | nodes_add[i].start = nodes_add[i].end = 0; |
@@ -138,7 +138,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa) | |||
138 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); | 138 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); |
139 | return; | 139 | return; |
140 | } | 140 | } |
141 | apicid_to_node[apic_id] = node; | 141 | set_apicid_to_node(apic_id, node); |
142 | node_set(node, cpu_nodes_parsed); | 142 | node_set(node, cpu_nodes_parsed); |
143 | acpi_numa = 1; | 143 | acpi_numa = 1; |
144 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", | 144 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", |
@@ -178,7 +178,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa) | |||
178 | return; | 178 | return; |
179 | } | 179 | } |
180 | 180 | ||
181 | apicid_to_node[apic_id] = node; | 181 | set_apicid_to_node(apic_id, node); |
182 | node_set(node, cpu_nodes_parsed); | 182 | node_set(node, cpu_nodes_parsed); |
183 | acpi_numa = 1; | 183 | acpi_numa = 1; |
184 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", | 184 | printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", |
@@ -521,7 +521,7 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | |||
521 | * node, it must now point to the fake node ID. | 521 | * node, it must now point to the fake node ID. |
522 | */ | 522 | */ |
523 | for (j = 0; j < MAX_LOCAL_APIC; j++) | 523 | for (j = 0; j < MAX_LOCAL_APIC; j++) |
524 | if (apicid_to_node[j] == nid && | 524 | if (__apicid_to_node[j] == nid && |
525 | fake_apicid_to_node[j] == NUMA_NO_NODE) | 525 | fake_apicid_to_node[j] == NUMA_NO_NODE) |
526 | fake_apicid_to_node[j] = i; | 526 | fake_apicid_to_node[j] = i; |
527 | } | 527 | } |
@@ -532,13 +532,13 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes) | |||
532 | * value. | 532 | * value. |
533 | */ | 533 | */ |
534 | for (i = 0; i < MAX_LOCAL_APIC; i++) | 534 | for (i = 0; i < MAX_LOCAL_APIC; i++) |
535 | if (apicid_to_node[i] != NUMA_NO_NODE && | 535 | if (__apicid_to_node[i] != NUMA_NO_NODE && |
536 | fake_apicid_to_node[i] == NUMA_NO_NODE) | 536 | fake_apicid_to_node[i] == NUMA_NO_NODE) |
537 | fake_apicid_to_node[i] = 0; | 537 | fake_apicid_to_node[i] = 0; |
538 | 538 | ||
539 | for (i = 0; i < num_nodes; i++) | 539 | for (i = 0; i < num_nodes; i++) |
540 | __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); | 540 | __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); |
541 | memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); | 541 | memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node)); |
542 | 542 | ||
543 | nodes_clear(nodes_parsed); | 543 | nodes_clear(nodes_parsed); |
544 | for (i = 0; i < num_nodes; i++) | 544 | for (i = 0; i < num_nodes; i++) |