aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-02-16 03:44:04 -0500
committerIngo Molnar <mingo@elte.hu>2011-02-16 03:44:15 -0500
commit52b8b8d7251f8f7b8ed4a6c623618106d83e18b5 (patch)
tree13ee81932cba73deb69cd3f386d80e45e6ac23de /arch/x86
parent02ac81a812fe0575a8475a93bdc22fb291ebad91 (diff)
parent14392fd329eca9b59d51c0aa5d0acfb4965424d1 (diff)
Merge branch 'x86/numa' into x86/mm
Merge reason: consolidate it into the more generic x86/mm tree to prevent conflicts. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/Kconfig2
-rw-r--r--arch/x86/include/asm/apic.h36
-rw-r--r--arch/x86/include/asm/ipi.h8
-rw-r--r--arch/x86/include/asm/mpspec.h3
-rw-r--r--arch/x86/include/asm/numa.h49
-rw-r--r--arch/x86/include/asm/numa_32.h7
-rw-r--r--arch/x86/include/asm/numa_64.h16
-rw-r--r--arch/x86/include/asm/smp.h3
-rw-r--r--arch/x86/include/asm/topology.h17
-rw-r--r--arch/x86/kernel/acpi/boot.c8
-rw-r--r--arch/x86/kernel/apic/apic.c37
-rw-r--r--arch/x86/kernel/apic/apic_flat_64.c4
-rw-r--r--arch/x86/kernel/apic/apic_noop.c26
-rw-r--r--arch/x86/kernel/apic/bigsmp_32.c34
-rw-r--r--arch/x86/kernel/apic/es7000_32.c35
-rw-r--r--arch/x86/kernel/apic/ipi.c12
-rw-r--r--arch/x86/kernel/apic/numaq_32.c21
-rw-r--r--arch/x86/kernel/apic/probe_32.c10
-rw-r--r--arch/x86/kernel/apic/summit_32.c47
-rw-r--r--arch/x86/kernel/apic/x2apic_cluster.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_phys.c2
-rw-r--r--arch/x86/kernel/apic/x2apic_uv_x.c2
-rw-r--r--arch/x86/kernel/cpu/amd.c51
-rw-r--r--arch/x86/kernel/cpu/common.c2
-rw-r--r--arch/x86/kernel/cpu/intel.c5
-rw-r--r--arch/x86/kernel/setup.c2
-rw-r--r--arch/x86/kernel/setup_percpu.c11
-rw-r--r--arch/x86/kernel/smpboot.c68
-rw-r--r--arch/x86/mm/amdtopology_64.c4
-rw-r--r--arch/x86/mm/numa.c212
-rw-r--r--arch/x86/mm/numa_32.c7
-rw-r--r--arch/x86/mm/numa_64.c231
-rw-r--r--arch/x86/mm/srat_32.c6
-rw-r--r--arch/x86/mm/srat_64.c12
34 files changed, 504 insertions, 488 deletions
diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
index d5ed94d30aad..95c36c474766 100644
--- a/arch/x86/Kconfig
+++ b/arch/x86/Kconfig
@@ -1705,7 +1705,7 @@ config HAVE_ARCH_EARLY_PFN_TO_NID
1705 depends on NUMA 1705 depends on NUMA
1706 1706
1707config USE_PERCPU_NUMA_NODE_ID 1707config USE_PERCPU_NUMA_NODE_ID
1708 def_bool X86_64 1708 def_bool y
1709 depends on NUMA 1709 depends on NUMA
1710 1710
1711menu "Power management and ACPI options" 1711menu "Power management and ACPI options"
diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
index 5e3969c36d7f..ad30ca4b6fe9 100644
--- a/arch/x86/include/asm/apic.h
+++ b/arch/x86/include/asm/apic.h
@@ -306,8 +306,6 @@ struct apic {
306 306
307 void (*setup_apic_routing)(void); 307 void (*setup_apic_routing)(void);
308 int (*multi_timer_check)(int apic, int irq); 308 int (*multi_timer_check)(int apic, int irq);
309 int (*apicid_to_node)(int logical_apicid);
310 int (*cpu_to_logical_apicid)(int cpu);
311 int (*cpu_present_to_apicid)(int mps_cpu); 309 int (*cpu_present_to_apicid)(int mps_cpu);
312 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap); 310 void (*apicid_to_cpu_present)(int phys_apicid, physid_mask_t *retmap);
313 void (*setup_portio_remap)(void); 311 void (*setup_portio_remap)(void);
@@ -355,6 +353,23 @@ struct apic {
355 void (*icr_write)(u32 low, u32 high); 353 void (*icr_write)(u32 low, u32 high);
356 void (*wait_icr_idle)(void); 354 void (*wait_icr_idle)(void);
357 u32 (*safe_wait_icr_idle)(void); 355 u32 (*safe_wait_icr_idle)(void);
356
357#ifdef CONFIG_X86_32
358 /*
359 * Called very early during boot from get_smp_config(). It should
360 * return the logical apicid. x86_[bios]_cpu_to_apicid is
361 * initialized before this function is called.
362 *
363 * If logical apicid can't be determined that early, the function
364 * may return BAD_APICID. Logical apicid will be configured after
365 * init_apic_ldr() while bringing up CPUs. Note that NUMA affinity
366 * won't be applied properly during early boot in this case.
367 */
368 int (*x86_32_early_logical_apicid)(int cpu);
369
370 /* determine CPU -> NUMA node mapping */
371 int (*x86_32_numa_cpu_node)(int cpu);
372#endif
358}; 373};
359 374
360/* 375/*
@@ -502,6 +517,11 @@ extern struct apic apic_noop;
502 517
503extern struct apic apic_default; 518extern struct apic apic_default;
504 519
520static inline int noop_x86_32_early_logical_apicid(int cpu)
521{
522 return BAD_APICID;
523}
524
505/* 525/*
506 * Set up the logical destination ID. 526 * Set up the logical destination ID.
507 * 527 *
@@ -521,7 +541,7 @@ static inline int default_phys_pkg_id(int cpuid_apic, int index_msb)
521 return cpuid_apic >> index_msb; 541 return cpuid_apic >> index_msb;
522} 542}
523 543
524extern int default_apicid_to_node(int logical_apicid); 544extern int default_x86_32_numa_cpu_node(int cpu);
525 545
526#endif 546#endif
527 547
@@ -557,12 +577,6 @@ static inline void default_ioapic_phys_id_map(physid_mask_t *phys_map, physid_ma
557 *retmap = *phys_map; 577 *retmap = *phys_map;
558} 578}
559 579
560/* Mapping from cpu number to logical apicid */
561static inline int default_cpu_to_logical_apicid(int cpu)
562{
563 return 1 << cpu;
564}
565
566static inline int __default_cpu_present_to_apicid(int mps_cpu) 580static inline int __default_cpu_present_to_apicid(int mps_cpu)
567{ 581{
568 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu)) 582 if (mps_cpu < nr_cpu_ids && cpu_present(mps_cpu))
@@ -595,8 +609,4 @@ extern int default_check_phys_apicid_present(int phys_apicid);
595 609
596#endif /* CONFIG_X86_LOCAL_APIC */ 610#endif /* CONFIG_X86_LOCAL_APIC */
597 611
598#ifdef CONFIG_X86_32
599extern u8 cpu_2_logical_apicid[NR_CPUS];
600#endif
601
602#endif /* _ASM_X86_APIC_H */ 612#endif /* _ASM_X86_APIC_H */
diff --git a/arch/x86/include/asm/ipi.h b/arch/x86/include/asm/ipi.h
index 0b7228268a63..615fa9061b57 100644
--- a/arch/x86/include/asm/ipi.h
+++ b/arch/x86/include/asm/ipi.h
@@ -123,10 +123,6 @@ extern void default_send_IPI_mask_sequence_phys(const struct cpumask *mask,
123 int vector); 123 int vector);
124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask, 124extern void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
125 int vector); 125 int vector);
126extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
127 int vector);
128extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
129 int vector);
130 126
131/* Avoid include hell */ 127/* Avoid include hell */
132#define NMI_VECTOR 0x02 128#define NMI_VECTOR 0x02
@@ -150,6 +146,10 @@ static inline void __default_local_send_IPI_all(int vector)
150} 146}
151 147
152#ifdef CONFIG_X86_32 148#ifdef CONFIG_X86_32
149extern void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
150 int vector);
151extern void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
152 int vector);
153extern void default_send_IPI_mask_logical(const struct cpumask *mask, 153extern void default_send_IPI_mask_logical(const struct cpumask *mask,
154 int vector); 154 int vector);
155extern void default_send_IPI_allbutself(int vector); 155extern void default_send_IPI_allbutself(int vector);
diff --git a/arch/x86/include/asm/mpspec.h b/arch/x86/include/asm/mpspec.h
index 0c90dd9f0505..9c7d95f6174b 100644
--- a/arch/x86/include/asm/mpspec.h
+++ b/arch/x86/include/asm/mpspec.h
@@ -25,7 +25,6 @@ extern int pic_mode;
25#define MAX_IRQ_SOURCES 256 25#define MAX_IRQ_SOURCES 256
26 26
27extern unsigned int def_to_bigsmp; 27extern unsigned int def_to_bigsmp;
28extern u8 apicid_2_node[];
29 28
30#ifdef CONFIG_X86_NUMAQ 29#ifdef CONFIG_X86_NUMAQ
31extern int mp_bus_id_to_node[MAX_MP_BUSSES]; 30extern int mp_bus_id_to_node[MAX_MP_BUSSES];
@@ -33,8 +32,6 @@ extern int mp_bus_id_to_local[MAX_MP_BUSSES];
33extern int quad_local_to_mp_bus_id [NR_CPUS/4][4]; 32extern int quad_local_to_mp_bus_id [NR_CPUS/4][4];
34#endif 33#endif
35 34
36#define MAX_APICID 256
37
38#else /* CONFIG_X86_64: */ 35#else /* CONFIG_X86_64: */
39 36
40#define MAX_MP_BUSSES 256 37#define MAX_MP_BUSSES 256
diff --git a/arch/x86/include/asm/numa.h b/arch/x86/include/asm/numa.h
index 27da400d3138..26fc6e2dd0fb 100644
--- a/arch/x86/include/asm/numa.h
+++ b/arch/x86/include/asm/numa.h
@@ -1,5 +1,54 @@
1#ifndef _ASM_X86_NUMA_H
2#define _ASM_X86_NUMA_H
3
4#include <asm/topology.h>
5#include <asm/apicdef.h>
6
7#ifdef CONFIG_NUMA
8/*
9 * __apicid_to_node[] stores the raw mapping between physical apicid and
10 * node and is used to initialize cpu_to_node mapping.
11 *
12 * The mapping may be overridden by apic->numa_cpu_node() on 32bit and thus
13 * should be accessed by the accessors - set_apicid_to_node() and
14 * numa_cpu_node().
15 */
16extern s16 __apicid_to_node[MAX_LOCAL_APIC];
17
18static inline void set_apicid_to_node(int apicid, s16 node)
19{
20 __apicid_to_node[apicid] = node;
21}
22#else /* CONFIG_NUMA */
23static inline void set_apicid_to_node(int apicid, s16 node)
24{
25}
26#endif /* CONFIG_NUMA */
27
1#ifdef CONFIG_X86_32 28#ifdef CONFIG_X86_32
2# include "numa_32.h" 29# include "numa_32.h"
3#else 30#else
4# include "numa_64.h" 31# include "numa_64.h"
5#endif 32#endif
33
34#ifdef CONFIG_NUMA
35extern void __cpuinit numa_set_node(int cpu, int node);
36extern void __cpuinit numa_clear_node(int cpu);
37extern void __init numa_init_array(void);
38extern void __init init_cpu_to_node(void);
39extern void __cpuinit numa_add_cpu(int cpu);
40extern void __cpuinit numa_remove_cpu(int cpu);
41#else /* CONFIG_NUMA */
42static inline void numa_set_node(int cpu, int node) { }
43static inline void numa_clear_node(int cpu) { }
44static inline void numa_init_array(void) { }
45static inline void init_cpu_to_node(void) { }
46static inline void numa_add_cpu(int cpu) { }
47static inline void numa_remove_cpu(int cpu) { }
48#endif /* CONFIG_NUMA */
49
50#ifdef CONFIG_DEBUG_PER_CPU_MAPS
51struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable);
52#endif
53
54#endif /* _ASM_X86_NUMA_H */
diff --git a/arch/x86/include/asm/numa_32.h b/arch/x86/include/asm/numa_32.h
index b0ef2b449a9d..c6beed1ef103 100644
--- a/arch/x86/include/asm/numa_32.h
+++ b/arch/x86/include/asm/numa_32.h
@@ -4,7 +4,12 @@
4extern int numa_off; 4extern int numa_off;
5 5
6extern int pxm_to_nid(int pxm); 6extern int pxm_to_nid(int pxm);
7extern void numa_remove_cpu(int cpu); 7
8#ifdef CONFIG_NUMA
9extern int __cpuinit numa_cpu_node(int cpu);
10#else /* CONFIG_NUMA */
11static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
12#endif /* CONFIG_NUMA */
8 13
9#ifdef CONFIG_HIGHMEM 14#ifdef CONFIG_HIGHMEM
10extern void set_highmem_pages_init(void); 15extern void set_highmem_pages_init(void);
diff --git a/arch/x86/include/asm/numa_64.h b/arch/x86/include/asm/numa_64.h
index 0493be39607c..2819afa33632 100644
--- a/arch/x86/include/asm/numa_64.h
+++ b/arch/x86/include/asm/numa_64.h
@@ -2,7 +2,6 @@
2#define _ASM_X86_NUMA_64_H 2#define _ASM_X86_NUMA_64_H
3 3
4#include <linux/nodemask.h> 4#include <linux/nodemask.h>
5#include <asm/apicdef.h>
6 5
7struct bootnode { 6struct bootnode {
8 u64 start; 7 u64 start;
@@ -14,11 +13,8 @@ extern int compute_hash_shift(struct bootnode *nodes, int numblks,
14 13
15#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT)) 14#define ZONE_ALIGN (1UL << (MAX_ORDER+PAGE_SHIFT))
16 15
17extern void numa_init_array(void);
18extern int numa_off; 16extern int numa_off;
19 17
20extern s16 apicid_to_node[MAX_LOCAL_APIC];
21
22extern unsigned long numa_free_all_bootmem(void); 18extern unsigned long numa_free_all_bootmem(void);
23extern void setup_node_bootmem(int nodeid, unsigned long start, 19extern void setup_node_bootmem(int nodeid, unsigned long start,
24 unsigned long end); 20 unsigned long end);
@@ -31,11 +27,7 @@ extern void setup_node_bootmem(int nodeid, unsigned long start,
31 */ 27 */
32#define NODE_MIN_SIZE (4*1024*1024) 28#define NODE_MIN_SIZE (4*1024*1024)
33 29
34extern void __init init_cpu_to_node(void); 30extern int __cpuinit numa_cpu_node(int cpu);
35extern void __cpuinit numa_set_node(int cpu, int node);
36extern void __cpuinit numa_clear_node(int cpu);
37extern void __cpuinit numa_add_cpu(int cpu);
38extern void __cpuinit numa_remove_cpu(int cpu);
39 31
40#ifdef CONFIG_NUMA_EMU 32#ifdef CONFIG_NUMA_EMU
41#define FAKE_NODE_MIN_SIZE ((u64)32 << 20) 33#define FAKE_NODE_MIN_SIZE ((u64)32 << 20)
@@ -43,11 +35,7 @@ extern void __cpuinit numa_remove_cpu(int cpu);
43void numa_emu_cmdline(char *); 35void numa_emu_cmdline(char *);
44#endif /* CONFIG_NUMA_EMU */ 36#endif /* CONFIG_NUMA_EMU */
45#else 37#else
46static inline void init_cpu_to_node(void) { } 38static inline int numa_cpu_node(int cpu) { return NUMA_NO_NODE; }
47static inline void numa_set_node(int cpu, int node) { }
48static inline void numa_clear_node(int cpu) { }
49static inline void numa_add_cpu(int cpu, int node) { }
50static inline void numa_remove_cpu(int cpu) { }
51#endif 39#endif
52 40
53#endif /* _ASM_X86_NUMA_64_H */ 41#endif /* _ASM_X86_NUMA_64_H */
diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
index 1f4695136776..b296ca6f40bb 100644
--- a/arch/x86/include/asm/smp.h
+++ b/arch/x86/include/asm/smp.h
@@ -38,6 +38,9 @@ static inline struct cpumask *cpu_core_mask(int cpu)
38 38
39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid); 39DECLARE_EARLY_PER_CPU(u16, x86_cpu_to_apicid);
40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid); 40DECLARE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid);
41#if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86_32)
42DECLARE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid);
43#endif
41 44
42/* Static state in head.S used to set up a CPU */ 45/* Static state in head.S used to set up a CPU */
43extern unsigned long stack_start; /* Initial stack pointer address */ 46extern unsigned long stack_start; /* Initial stack pointer address */
diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 21899cc31e52..b101c17861f5 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -47,21 +47,6 @@
47 47
48#include <asm/mpspec.h> 48#include <asm/mpspec.h>
49 49
50#ifdef CONFIG_X86_32
51
52/* Mappings between logical cpu number and node number */
53extern int cpu_to_node_map[];
54
55/* Returns the number of the node containing CPU 'cpu' */
56static inline int __cpu_to_node(int cpu)
57{
58 return cpu_to_node_map[cpu];
59}
60#define early_cpu_to_node __cpu_to_node
61#define cpu_to_node __cpu_to_node
62
63#else /* CONFIG_X86_64 */
64
65/* Mappings between logical cpu number and node number */ 50/* Mappings between logical cpu number and node number */
66DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map); 51DECLARE_EARLY_PER_CPU(int, x86_cpu_to_node_map);
67 52
@@ -84,8 +69,6 @@ static inline int early_cpu_to_node(int cpu)
84 69
85#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */ 70#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
86 71
87#endif /* CONFIG_X86_64 */
88
89/* Mappings between node number and cpus on that node. */ 72/* Mappings between node number and cpus on that node. */
90extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 73extern cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
91 74
diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
index b3a71137983a..a2c512175395 100644
--- a/arch/x86/kernel/acpi/boot.c
+++ b/arch/x86/kernel/acpi/boot.c
@@ -589,14 +589,8 @@ static void acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
589 nid = acpi_get_node(handle); 589 nid = acpi_get_node(handle);
590 if (nid == -1 || !node_online(nid)) 590 if (nid == -1 || !node_online(nid))
591 return; 591 return;
592#ifdef CONFIG_X86_64 592 set_apicid_to_node(physid, nid);
593 apicid_to_node[physid] = nid;
594 numa_set_node(cpu, nid); 593 numa_set_node(cpu, nid);
595#else /* CONFIG_X86_32 */
596 apicid_2_node[physid] = nid;
597 cpu_to_node_map[cpu] = nid;
598#endif
599
600#endif 594#endif
601} 595}
602 596
diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
index 06c196d7e59c..1390cf985afd 100644
--- a/arch/x86/kernel/apic/apic.c
+++ b/arch/x86/kernel/apic/apic.c
@@ -78,6 +78,15 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
78EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); 78EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
79 79
80#ifdef CONFIG_X86_32 80#ifdef CONFIG_X86_32
81
82/*
83 * On x86_32, the mapping between cpu and logical apicid may vary
84 * depending on apic in use. The following early percpu variable is
85 * used for the mapping. This is where the behaviors of x86_64 and 32
86 * actually diverge. Let's keep it ugly for now.
87 */
88DEFINE_EARLY_PER_CPU(int, x86_cpu_to_logical_apicid, BAD_APICID);
89
81/* 90/*
82 * Knob to control our willingness to enable the local APIC. 91 * Knob to control our willingness to enable the local APIC.
83 * 92 *
@@ -1237,6 +1246,19 @@ void __cpuinit setup_local_APIC(void)
1237 */ 1246 */
1238 apic->init_apic_ldr(); 1247 apic->init_apic_ldr();
1239 1248
1249#ifdef CONFIG_X86_32
1250 /*
1251 * APIC LDR is initialized. If logical_apicid mapping was
1252 * initialized during get_smp_config(), make sure it matches the
1253 * actual value.
1254 */
1255 i = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
1256 WARN_ON(i != BAD_APICID && i != logical_smp_processor_id());
1257 /* always use the value from LDR */
1258 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
1259 logical_smp_processor_id();
1260#endif
1261
1240 /* 1262 /*
1241 * Set Task Priority to 'accept all'. We never change this 1263 * Set Task Priority to 'accept all'. We never change this
1242 * later on. 1264 * later on.
@@ -1972,7 +1994,10 @@ void __cpuinit generic_processor_info(int apicid, int version)
1972 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid; 1994 early_per_cpu(x86_cpu_to_apicid, cpu) = apicid;
1973 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid; 1995 early_per_cpu(x86_bios_cpu_apicid, cpu) = apicid;
1974#endif 1996#endif
1975 1997#ifdef CONFIG_X86_32
1998 early_per_cpu(x86_cpu_to_logical_apicid, cpu) =
1999 apic->x86_32_early_logical_apicid(cpu);
2000#endif
1976 set_cpu_possible(cpu, true); 2001 set_cpu_possible(cpu, true);
1977 set_cpu_present(cpu, true); 2002 set_cpu_present(cpu, true);
1978} 2003}
@@ -1993,10 +2018,14 @@ void default_init_apic_ldr(void)
1993} 2018}
1994 2019
1995#ifdef CONFIG_X86_32 2020#ifdef CONFIG_X86_32
1996int default_apicid_to_node(int logical_apicid) 2021int default_x86_32_numa_cpu_node(int cpu)
1997{ 2022{
1998#ifdef CONFIG_SMP 2023#ifdef CONFIG_NUMA
1999 return apicid_2_node[hard_smp_processor_id()]; 2024 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
2025
2026 if (apicid != BAD_APICID)
2027 return __apicid_to_node[apicid];
2028 return NUMA_NO_NODE;
2000#else 2029#else
2001 return 0; 2030 return 0;
2002#endif 2031#endif
diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
index 09d3b17ce0c2..5652d31fe108 100644
--- a/arch/x86/kernel/apic/apic_flat_64.c
+++ b/arch/x86/kernel/apic/apic_flat_64.c
@@ -185,8 +185,6 @@ struct apic apic_flat = {
185 .ioapic_phys_id_map = NULL, 185 .ioapic_phys_id_map = NULL,
186 .setup_apic_routing = NULL, 186 .setup_apic_routing = NULL,
187 .multi_timer_check = NULL, 187 .multi_timer_check = NULL,
188 .apicid_to_node = NULL,
189 .cpu_to_logical_apicid = NULL,
190 .cpu_present_to_apicid = default_cpu_present_to_apicid, 188 .cpu_present_to_apicid = default_cpu_present_to_apicid,
191 .apicid_to_cpu_present = NULL, 189 .apicid_to_cpu_present = NULL,
192 .setup_portio_remap = NULL, 190 .setup_portio_remap = NULL,
@@ -337,8 +335,6 @@ struct apic apic_physflat = {
337 .ioapic_phys_id_map = NULL, 335 .ioapic_phys_id_map = NULL,
338 .setup_apic_routing = NULL, 336 .setup_apic_routing = NULL,
339 .multi_timer_check = NULL, 337 .multi_timer_check = NULL,
340 .apicid_to_node = NULL,
341 .cpu_to_logical_apicid = NULL,
342 .cpu_present_to_apicid = default_cpu_present_to_apicid, 338 .cpu_present_to_apicid = default_cpu_present_to_apicid,
343 .apicid_to_cpu_present = NULL, 339 .apicid_to_cpu_present = NULL,
344 .setup_portio_remap = NULL, 340 .setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
index e31b9ffe25f5..f1baa2dc087a 100644
--- a/arch/x86/kernel/apic/apic_noop.c
+++ b/arch/x86/kernel/apic/apic_noop.c
@@ -54,11 +54,6 @@ static u64 noop_apic_icr_read(void)
54 return 0; 54 return 0;
55} 55}
56 56
57static int noop_cpu_to_logical_apicid(int cpu)
58{
59 return 0;
60}
61
62static int noop_phys_pkg_id(int cpuid_apic, int index_msb) 57static int noop_phys_pkg_id(int cpuid_apic, int index_msb)
63{ 58{
64 return 0; 59 return 0;
@@ -113,12 +108,6 @@ static void noop_vector_allocation_domain(int cpu, struct cpumask *retmask)
113 cpumask_set_cpu(cpu, retmask); 108 cpumask_set_cpu(cpu, retmask);
114} 109}
115 110
116int noop_apicid_to_node(int logical_apicid)
117{
118 /* we're always on node 0 */
119 return 0;
120}
121
122static u32 noop_apic_read(u32 reg) 111static u32 noop_apic_read(u32 reg)
123{ 112{
124 WARN_ON_ONCE((cpu_has_apic && !disable_apic)); 113 WARN_ON_ONCE((cpu_has_apic && !disable_apic));
@@ -130,6 +119,14 @@ static void noop_apic_write(u32 reg, u32 v)
130 WARN_ON_ONCE(cpu_has_apic && !disable_apic); 119 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
131} 120}
132 121
122#ifdef CONFIG_X86_32
123static int noop_x86_32_numa_cpu_node(int cpu)
124{
125 /* we're always on node 0 */
126 return 0;
127}
128#endif
129
133struct apic apic_noop = { 130struct apic apic_noop = {
134 .name = "noop", 131 .name = "noop",
135 .probe = noop_probe, 132 .probe = noop_probe,
@@ -153,9 +150,7 @@ struct apic apic_noop = {
153 .ioapic_phys_id_map = default_ioapic_phys_id_map, 150 .ioapic_phys_id_map = default_ioapic_phys_id_map,
154 .setup_apic_routing = NULL, 151 .setup_apic_routing = NULL,
155 .multi_timer_check = NULL, 152 .multi_timer_check = NULL,
156 .apicid_to_node = noop_apicid_to_node,
157 153
158 .cpu_to_logical_apicid = noop_cpu_to_logical_apicid,
159 .cpu_present_to_apicid = default_cpu_present_to_apicid, 154 .cpu_present_to_apicid = default_cpu_present_to_apicid,
160 .apicid_to_cpu_present = physid_set_mask_of_physid, 155 .apicid_to_cpu_present = physid_set_mask_of_physid,
161 156
@@ -197,4 +192,9 @@ struct apic apic_noop = {
197 .icr_write = noop_apic_icr_write, 192 .icr_write = noop_apic_icr_write,
198 .wait_icr_idle = noop_apic_wait_icr_idle, 193 .wait_icr_idle = noop_apic_wait_icr_idle,
199 .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle, 194 .safe_wait_icr_idle = noop_safe_apic_wait_icr_idle,
195
196#ifdef CONFIG_X86_32
197 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
198 .x86_32_numa_cpu_node = noop_x86_32_numa_cpu_node,
199#endif
200}; 200};
diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
index cb804c5091b9..541a2e431659 100644
--- a/arch/x86/kernel/apic/bigsmp_32.c
+++ b/arch/x86/kernel/apic/bigsmp_32.c
@@ -45,6 +45,12 @@ static unsigned long bigsmp_check_apicid_present(int bit)
45 return 1; 45 return 1;
46} 46}
47 47
48static int bigsmp_early_logical_apicid(int cpu)
49{
50 /* on bigsmp, logical apicid is the same as physical */
51 return early_per_cpu(x86_cpu_to_apicid, cpu);
52}
53
48static inline unsigned long calculate_ldr(int cpu) 54static inline unsigned long calculate_ldr(int cpu)
49{ 55{
50 unsigned long val, id; 56 unsigned long val, id;
@@ -80,11 +86,6 @@ static void bigsmp_setup_apic_routing(void)
80 nr_ioapics); 86 nr_ioapics);
81} 87}
82 88
83static int bigsmp_apicid_to_node(int logical_apicid)
84{
85 return apicid_2_node[hard_smp_processor_id()];
86}
87
88static int bigsmp_cpu_present_to_apicid(int mps_cpu) 89static int bigsmp_cpu_present_to_apicid(int mps_cpu)
89{ 90{
90 if (mps_cpu < nr_cpu_ids) 91 if (mps_cpu < nr_cpu_ids)
@@ -93,14 +94,6 @@ static int bigsmp_cpu_present_to_apicid(int mps_cpu)
93 return BAD_APICID; 94 return BAD_APICID;
94} 95}
95 96
96/* Mapping from cpu number to logical apicid */
97static inline int bigsmp_cpu_to_logical_apicid(int cpu)
98{
99 if (cpu >= nr_cpu_ids)
100 return BAD_APICID;
101 return cpu_physical_id(cpu);
102}
103
104static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) 97static void bigsmp_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
105{ 98{
106 /* For clustered we don't have a good way to do this yet - hack */ 99 /* For clustered we don't have a good way to do this yet - hack */
@@ -115,7 +108,11 @@ static int bigsmp_check_phys_apicid_present(int phys_apicid)
115/* As we are using single CPU as destination, pick only one CPU here */ 108/* As we are using single CPU as destination, pick only one CPU here */
116static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask) 109static unsigned int bigsmp_cpu_mask_to_apicid(const struct cpumask *cpumask)
117{ 110{
118 return bigsmp_cpu_to_logical_apicid(cpumask_first(cpumask)); 111 int cpu = cpumask_first(cpumask);
112
113 if (cpu < nr_cpu_ids)
114 return cpu_physical_id(cpu);
115 return BAD_APICID;
119} 116}
120 117
121static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask, 118static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
@@ -129,9 +126,9 @@ static unsigned int bigsmp_cpu_mask_to_apicid_and(const struct cpumask *cpumask,
129 */ 126 */
130 for_each_cpu_and(cpu, cpumask, andmask) { 127 for_each_cpu_and(cpu, cpumask, andmask) {
131 if (cpumask_test_cpu(cpu, cpu_online_mask)) 128 if (cpumask_test_cpu(cpu, cpu_online_mask))
132 break; 129 return cpu_physical_id(cpu);
133 } 130 }
134 return bigsmp_cpu_to_logical_apicid(cpu); 131 return BAD_APICID;
135} 132}
136 133
137static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb) 134static int bigsmp_phys_pkg_id(int cpuid_apic, int index_msb)
@@ -219,8 +216,6 @@ struct apic apic_bigsmp = {
219 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map, 216 .ioapic_phys_id_map = bigsmp_ioapic_phys_id_map,
220 .setup_apic_routing = bigsmp_setup_apic_routing, 217 .setup_apic_routing = bigsmp_setup_apic_routing,
221 .multi_timer_check = NULL, 218 .multi_timer_check = NULL,
222 .apicid_to_node = bigsmp_apicid_to_node,
223 .cpu_to_logical_apicid = bigsmp_cpu_to_logical_apicid,
224 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid, 219 .cpu_present_to_apicid = bigsmp_cpu_present_to_apicid,
225 .apicid_to_cpu_present = physid_set_mask_of_physid, 220 .apicid_to_cpu_present = physid_set_mask_of_physid,
226 .setup_portio_remap = NULL, 221 .setup_portio_remap = NULL,
@@ -256,4 +251,7 @@ struct apic apic_bigsmp = {
256 .icr_write = native_apic_icr_write, 251 .icr_write = native_apic_icr_write,
257 .wait_icr_idle = native_apic_wait_icr_idle, 252 .wait_icr_idle = native_apic_wait_icr_idle,
258 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 253 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
254
255 .x86_32_early_logical_apicid = bigsmp_early_logical_apicid,
256 .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
259}; 257};
diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
index 8593582d8022..3e9de4854c5b 100644
--- a/arch/x86/kernel/apic/es7000_32.c
+++ b/arch/x86/kernel/apic/es7000_32.c
@@ -460,6 +460,12 @@ static unsigned long es7000_check_apicid_present(int bit)
460 return physid_isset(bit, phys_cpu_present_map); 460 return physid_isset(bit, phys_cpu_present_map);
461} 461}
462 462
463static int es7000_early_logical_apicid(int cpu)
464{
465 /* on es7000, logical apicid is the same as physical */
466 return early_per_cpu(x86_bios_cpu_apicid, cpu);
467}
468
463static unsigned long calculate_ldr(int cpu) 469static unsigned long calculate_ldr(int cpu)
464{ 470{
465 unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu); 471 unsigned long id = per_cpu(x86_bios_cpu_apicid, cpu);
@@ -504,12 +510,11 @@ static void es7000_setup_apic_routing(void)
504 nr_ioapics, cpumask_bits(es7000_target_cpus())[0]); 510 nr_ioapics, cpumask_bits(es7000_target_cpus())[0]);
505} 511}
506 512
507static int es7000_apicid_to_node(int logical_apicid) 513static int es7000_numa_cpu_node(int cpu)
508{ 514{
509 return 0; 515 return 0;
510} 516}
511 517
512
513static int es7000_cpu_present_to_apicid(int mps_cpu) 518static int es7000_cpu_present_to_apicid(int mps_cpu)
514{ 519{
515 if (!mps_cpu) 520 if (!mps_cpu)
@@ -528,18 +533,6 @@ static void es7000_apicid_to_cpu_present(int phys_apicid, physid_mask_t *retmap)
528 ++cpu_id; 533 ++cpu_id;
529} 534}
530 535
531/* Mapping from cpu number to logical apicid */
532static int es7000_cpu_to_logical_apicid(int cpu)
533{
534#ifdef CONFIG_SMP
535 if (cpu >= nr_cpu_ids)
536 return BAD_APICID;
537 return cpu_2_logical_apicid[cpu];
538#else
539 return logical_smp_processor_id();
540#endif
541}
542
543static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap) 536static void es7000_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask_t *retmap)
544{ 537{
545 /* For clustered we don't have a good way to do this yet - hack */ 538 /* For clustered we don't have a good way to do this yet - hack */
@@ -561,7 +554,7 @@ static unsigned int es7000_cpu_mask_to_apicid(const struct cpumask *cpumask)
561 * The cpus in the mask must all be on the apic cluster. 554 * The cpus in the mask must all be on the apic cluster.
562 */ 555 */
563 for_each_cpu(cpu, cpumask) { 556 for_each_cpu(cpu, cpumask) {
564 int new_apicid = es7000_cpu_to_logical_apicid(cpu); 557 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
565 558
566 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 559 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
567 WARN(1, "Not a valid mask!"); 560 WARN(1, "Not a valid mask!");
@@ -578,7 +571,7 @@ static unsigned int
578es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask, 571es7000_cpu_mask_to_apicid_and(const struct cpumask *inmask,
579 const struct cpumask *andmask) 572 const struct cpumask *andmask)
580{ 573{
581 int apicid = es7000_cpu_to_logical_apicid(0); 574 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
582 cpumask_var_t cpumask; 575 cpumask_var_t cpumask;
583 576
584 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 577 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -655,8 +648,6 @@ struct apic __refdata apic_es7000_cluster = {
655 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 648 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
656 .setup_apic_routing = es7000_setup_apic_routing, 649 .setup_apic_routing = es7000_setup_apic_routing,
657 .multi_timer_check = NULL, 650 .multi_timer_check = NULL,
658 .apicid_to_node = es7000_apicid_to_node,
659 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
660 .cpu_present_to_apicid = es7000_cpu_present_to_apicid, 651 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
661 .apicid_to_cpu_present = es7000_apicid_to_cpu_present, 652 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
662 .setup_portio_remap = NULL, 653 .setup_portio_remap = NULL,
@@ -695,6 +686,9 @@ struct apic __refdata apic_es7000_cluster = {
695 .icr_write = native_apic_icr_write, 686 .icr_write = native_apic_icr_write,
696 .wait_icr_idle = native_apic_wait_icr_idle, 687 .wait_icr_idle = native_apic_wait_icr_idle,
697 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 688 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
689
690 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
691 .x86_32_numa_cpu_node = es7000_numa_cpu_node,
698}; 692};
699 693
700struct apic __refdata apic_es7000 = { 694struct apic __refdata apic_es7000 = {
@@ -720,8 +714,6 @@ struct apic __refdata apic_es7000 = {
720 .ioapic_phys_id_map = es7000_ioapic_phys_id_map, 714 .ioapic_phys_id_map = es7000_ioapic_phys_id_map,
721 .setup_apic_routing = es7000_setup_apic_routing, 715 .setup_apic_routing = es7000_setup_apic_routing,
722 .multi_timer_check = NULL, 716 .multi_timer_check = NULL,
723 .apicid_to_node = es7000_apicid_to_node,
724 .cpu_to_logical_apicid = es7000_cpu_to_logical_apicid,
725 .cpu_present_to_apicid = es7000_cpu_present_to_apicid, 717 .cpu_present_to_apicid = es7000_cpu_present_to_apicid,
726 .apicid_to_cpu_present = es7000_apicid_to_cpu_present, 718 .apicid_to_cpu_present = es7000_apicid_to_cpu_present,
727 .setup_portio_remap = NULL, 719 .setup_portio_remap = NULL,
@@ -758,4 +750,7 @@ struct apic __refdata apic_es7000 = {
758 .icr_write = native_apic_icr_write, 750 .icr_write = native_apic_icr_write,
759 .wait_icr_idle = native_apic_wait_icr_idle, 751 .wait_icr_idle = native_apic_wait_icr_idle,
760 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 752 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
753
754 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
755 .x86_32_numa_cpu_node = es7000_numa_cpu_node,
761}; 756};
diff --git a/arch/x86/kernel/apic/ipi.c b/arch/x86/kernel/apic/ipi.c
index 08385e090a6f..cce91bf26676 100644
--- a/arch/x86/kernel/apic/ipi.c
+++ b/arch/x86/kernel/apic/ipi.c
@@ -56,6 +56,8 @@ void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
56 local_irq_restore(flags); 56 local_irq_restore(flags);
57} 57}
58 58
59#ifdef CONFIG_X86_32
60
59void default_send_IPI_mask_sequence_logical(const struct cpumask *mask, 61void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
60 int vector) 62 int vector)
61{ 63{
@@ -71,8 +73,8 @@ void default_send_IPI_mask_sequence_logical(const struct cpumask *mask,
71 local_irq_save(flags); 73 local_irq_save(flags);
72 for_each_cpu(query_cpu, mask) 74 for_each_cpu(query_cpu, mask)
73 __default_send_IPI_dest_field( 75 __default_send_IPI_dest_field(
74 apic->cpu_to_logical_apicid(query_cpu), vector, 76 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
75 apic->dest_logical); 77 vector, apic->dest_logical);
76 local_irq_restore(flags); 78 local_irq_restore(flags);
77} 79}
78 80
@@ -90,14 +92,12 @@ void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
90 if (query_cpu == this_cpu) 92 if (query_cpu == this_cpu)
91 continue; 93 continue;
92 __default_send_IPI_dest_field( 94 __default_send_IPI_dest_field(
93 apic->cpu_to_logical_apicid(query_cpu), vector, 95 early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
94 apic->dest_logical); 96 vector, apic->dest_logical);
95 } 97 }
96 local_irq_restore(flags); 98 local_irq_restore(flags);
97} 99}
98 100
99#ifdef CONFIG_X86_32
100
101/* 101/*
102 * This is only used on smaller machines. 102 * This is only used on smaller machines.
103 */ 103 */
diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
index 960f26ab5c9f..6273eee5134b 100644
--- a/arch/x86/kernel/apic/numaq_32.c
+++ b/arch/x86/kernel/apic/numaq_32.c
@@ -373,13 +373,6 @@ static inline void numaq_ioapic_phys_id_map(physid_mask_t *phys_map, physid_mask
373 return physids_promote(0xFUL, retmap); 373 return physids_promote(0xFUL, retmap);
374} 374}
375 375
376static inline int numaq_cpu_to_logical_apicid(int cpu)
377{
378 if (cpu >= nr_cpu_ids)
379 return BAD_APICID;
380 return cpu_2_logical_apicid[cpu];
381}
382
383/* 376/*
384 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent 377 * Supporting over 60 cpus on NUMA-Q requires a locality-dependent
385 * cpu to APIC ID relation to properly interact with the intelligent 378 * cpu to APIC ID relation to properly interact with the intelligent
@@ -398,6 +391,15 @@ static inline int numaq_apicid_to_node(int logical_apicid)
398 return logical_apicid >> 4; 391 return logical_apicid >> 4;
399} 392}
400 393
394static int numaq_numa_cpu_node(int cpu)
395{
396 int logical_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
397
398 if (logical_apicid != BAD_APICID)
399 return numaq_apicid_to_node(logical_apicid);
400 return NUMA_NO_NODE;
401}
402
401static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap) 403static void numaq_apicid_to_cpu_present(int logical_apicid, physid_mask_t *retmap)
402{ 404{
403 int node = numaq_apicid_to_node(logical_apicid); 405 int node = numaq_apicid_to_node(logical_apicid);
@@ -508,8 +510,6 @@ struct apic __refdata apic_numaq = {
508 .ioapic_phys_id_map = numaq_ioapic_phys_id_map, 510 .ioapic_phys_id_map = numaq_ioapic_phys_id_map,
509 .setup_apic_routing = numaq_setup_apic_routing, 511 .setup_apic_routing = numaq_setup_apic_routing,
510 .multi_timer_check = numaq_multi_timer_check, 512 .multi_timer_check = numaq_multi_timer_check,
511 .apicid_to_node = numaq_apicid_to_node,
512 .cpu_to_logical_apicid = numaq_cpu_to_logical_apicid,
513 .cpu_present_to_apicid = numaq_cpu_present_to_apicid, 513 .cpu_present_to_apicid = numaq_cpu_present_to_apicid,
514 .apicid_to_cpu_present = numaq_apicid_to_cpu_present, 514 .apicid_to_cpu_present = numaq_apicid_to_cpu_present,
515 .setup_portio_remap = numaq_setup_portio_remap, 515 .setup_portio_remap = numaq_setup_portio_remap,
@@ -547,4 +547,7 @@ struct apic __refdata apic_numaq = {
547 .icr_write = native_apic_icr_write, 547 .icr_write = native_apic_icr_write,
548 .wait_icr_idle = native_apic_wait_icr_idle, 548 .wait_icr_idle = native_apic_wait_icr_idle,
549 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 549 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
550
551 .x86_32_early_logical_apicid = noop_x86_32_early_logical_apicid,
552 .x86_32_numa_cpu_node = numaq_numa_cpu_node,
550}; 553};
diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
index 99d2fe016084..fc84c7b61108 100644
--- a/arch/x86/kernel/apic/probe_32.c
+++ b/arch/x86/kernel/apic/probe_32.c
@@ -77,6 +77,11 @@ void __init default_setup_apic_routing(void)
77 apic->setup_apic_routing(); 77 apic->setup_apic_routing();
78} 78}
79 79
80static int default_x86_32_early_logical_apicid(int cpu)
81{
82 return 1 << cpu;
83}
84
80static void setup_apic_flat_routing(void) 85static void setup_apic_flat_routing(void)
81{ 86{
82#ifdef CONFIG_X86_IO_APIC 87#ifdef CONFIG_X86_IO_APIC
@@ -130,8 +135,6 @@ struct apic apic_default = {
130 .ioapic_phys_id_map = default_ioapic_phys_id_map, 135 .ioapic_phys_id_map = default_ioapic_phys_id_map,
131 .setup_apic_routing = setup_apic_flat_routing, 136 .setup_apic_routing = setup_apic_flat_routing,
132 .multi_timer_check = NULL, 137 .multi_timer_check = NULL,
133 .apicid_to_node = default_apicid_to_node,
134 .cpu_to_logical_apicid = default_cpu_to_logical_apicid,
135 .cpu_present_to_apicid = default_cpu_present_to_apicid, 138 .cpu_present_to_apicid = default_cpu_present_to_apicid,
136 .apicid_to_cpu_present = physid_set_mask_of_physid, 139 .apicid_to_cpu_present = physid_set_mask_of_physid,
137 .setup_portio_remap = NULL, 140 .setup_portio_remap = NULL,
@@ -167,6 +170,9 @@ struct apic apic_default = {
167 .icr_write = native_apic_icr_write, 170 .icr_write = native_apic_icr_write,
168 .wait_icr_idle = native_apic_wait_icr_idle, 171 .wait_icr_idle = native_apic_wait_icr_idle,
169 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 172 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
173
174 .x86_32_early_logical_apicid = default_x86_32_early_logical_apicid,
175 .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
170}; 176};
171 177
172extern struct apic apic_numaq; 178extern struct apic apic_numaq;
diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
index 9b419263d90d..e4b8059b414a 100644
--- a/arch/x86/kernel/apic/summit_32.c
+++ b/arch/x86/kernel/apic/summit_32.c
@@ -194,11 +194,10 @@ static unsigned long summit_check_apicid_present(int bit)
194 return 1; 194 return 1;
195} 195}
196 196
197static void summit_init_apic_ldr(void) 197static int summit_early_logical_apicid(int cpu)
198{ 198{
199 unsigned long val, id;
200 int count = 0; 199 int count = 0;
201 u8 my_id = (u8)hard_smp_processor_id(); 200 u8 my_id = early_per_cpu(x86_cpu_to_apicid, cpu);
202 u8 my_cluster = APIC_CLUSTER(my_id); 201 u8 my_cluster = APIC_CLUSTER(my_id);
203#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
204 u8 lid; 203 u8 lid;
@@ -206,7 +205,7 @@ static void summit_init_apic_ldr(void)
206 205
207 /* Create logical APIC IDs by counting CPUs already in cluster. */ 206 /* Create logical APIC IDs by counting CPUs already in cluster. */
208 for (count = 0, i = nr_cpu_ids; --i >= 0; ) { 207 for (count = 0, i = nr_cpu_ids; --i >= 0; ) {
209 lid = cpu_2_logical_apicid[i]; 208 lid = early_per_cpu(x86_cpu_to_logical_apicid, i);
210 if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster) 209 if (lid != BAD_APICID && APIC_CLUSTER(lid) == my_cluster)
211 ++count; 210 ++count;
212 } 211 }
@@ -214,7 +213,15 @@ static void summit_init_apic_ldr(void)
214 /* We only have a 4 wide bitmap in cluster mode. If a deranged 213 /* We only have a 4 wide bitmap in cluster mode. If a deranged
215 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */ 214 * BIOS puts 5 CPUs in one APIC cluster, we're hosed. */
216 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT); 215 BUG_ON(count >= XAPIC_DEST_CPUS_SHIFT);
217 id = my_cluster | (1UL << count); 216 return my_cluster | (1UL << count);
217}
218
219static void summit_init_apic_ldr(void)
220{
221 int cpu = smp_processor_id();
222 unsigned long id = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
223 unsigned long val;
224
218 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE); 225 apic_write(APIC_DFR, SUMMIT_APIC_DFR_VALUE);
219 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK; 226 val = apic_read(APIC_LDR) & ~APIC_LDR_MASK;
220 val |= SET_APIC_LOGICAL_ID(id); 227 val |= SET_APIC_LOGICAL_ID(id);
@@ -232,27 +239,6 @@ static void summit_setup_apic_routing(void)
232 nr_ioapics); 239 nr_ioapics);
233} 240}
234 241
235static int summit_apicid_to_node(int logical_apicid)
236{
237#ifdef CONFIG_SMP
238 return apicid_2_node[hard_smp_processor_id()];
239#else
240 return 0;
241#endif
242}
243
244/* Mapping from cpu number to logical apicid */
245static inline int summit_cpu_to_logical_apicid(int cpu)
246{
247#ifdef CONFIG_SMP
248 if (cpu >= nr_cpu_ids)
249 return BAD_APICID;
250 return cpu_2_logical_apicid[cpu];
251#else
252 return logical_smp_processor_id();
253#endif
254}
255
256static int summit_cpu_present_to_apicid(int mps_cpu) 242static int summit_cpu_present_to_apicid(int mps_cpu)
257{ 243{
258 if (mps_cpu < nr_cpu_ids) 244 if (mps_cpu < nr_cpu_ids)
@@ -286,7 +272,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
286 * The cpus in the mask must all be on the apic cluster. 272 * The cpus in the mask must all be on the apic cluster.
287 */ 273 */
288 for_each_cpu(cpu, cpumask) { 274 for_each_cpu(cpu, cpumask) {
289 int new_apicid = summit_cpu_to_logical_apicid(cpu); 275 int new_apicid = early_per_cpu(x86_cpu_to_logical_apicid, cpu);
290 276
291 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) { 277 if (round && APIC_CLUSTER(apicid) != APIC_CLUSTER(new_apicid)) {
292 printk("%s: Not a valid mask!\n", __func__); 278 printk("%s: Not a valid mask!\n", __func__);
@@ -301,7 +287,7 @@ static unsigned int summit_cpu_mask_to_apicid(const struct cpumask *cpumask)
301static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask, 287static unsigned int summit_cpu_mask_to_apicid_and(const struct cpumask *inmask,
302 const struct cpumask *andmask) 288 const struct cpumask *andmask)
303{ 289{
304 int apicid = summit_cpu_to_logical_apicid(0); 290 int apicid = early_per_cpu(x86_cpu_to_logical_apicid, 0);
305 cpumask_var_t cpumask; 291 cpumask_var_t cpumask;
306 292
307 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC)) 293 if (!alloc_cpumask_var(&cpumask, GFP_ATOMIC))
@@ -528,8 +514,6 @@ struct apic apic_summit = {
528 .ioapic_phys_id_map = summit_ioapic_phys_id_map, 514 .ioapic_phys_id_map = summit_ioapic_phys_id_map,
529 .setup_apic_routing = summit_setup_apic_routing, 515 .setup_apic_routing = summit_setup_apic_routing,
530 .multi_timer_check = NULL, 516 .multi_timer_check = NULL,
531 .apicid_to_node = summit_apicid_to_node,
532 .cpu_to_logical_apicid = summit_cpu_to_logical_apicid,
533 .cpu_present_to_apicid = summit_cpu_present_to_apicid, 517 .cpu_present_to_apicid = summit_cpu_present_to_apicid,
534 .apicid_to_cpu_present = summit_apicid_to_cpu_present, 518 .apicid_to_cpu_present = summit_apicid_to_cpu_present,
535 .setup_portio_remap = NULL, 519 .setup_portio_remap = NULL,
@@ -565,4 +549,7 @@ struct apic apic_summit = {
565 .icr_write = native_apic_icr_write, 549 .icr_write = native_apic_icr_write,
566 .wait_icr_idle = native_apic_wait_icr_idle, 550 .wait_icr_idle = native_apic_wait_icr_idle,
567 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle, 551 .safe_wait_icr_idle = native_safe_apic_wait_icr_idle,
552
553 .x86_32_early_logical_apicid = summit_early_logical_apicid,
554 .x86_32_numa_cpu_node = default_x86_32_numa_cpu_node,
568}; 555};
diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
index cf69c59f4910..90949bbd566d 100644
--- a/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/arch/x86/kernel/apic/x2apic_cluster.c
@@ -206,8 +206,6 @@ struct apic apic_x2apic_cluster = {
206 .ioapic_phys_id_map = NULL, 206 .ioapic_phys_id_map = NULL,
207 .setup_apic_routing = NULL, 207 .setup_apic_routing = NULL,
208 .multi_timer_check = NULL, 208 .multi_timer_check = NULL,
209 .apicid_to_node = NULL,
210 .cpu_to_logical_apicid = NULL,
211 .cpu_present_to_apicid = default_cpu_present_to_apicid, 209 .cpu_present_to_apicid = default_cpu_present_to_apicid,
212 .apicid_to_cpu_present = NULL, 210 .apicid_to_cpu_present = NULL,
213 .setup_portio_remap = NULL, 211 .setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
index 8972f38c5ced..c7e6d6645bf4 100644
--- a/arch/x86/kernel/apic/x2apic_phys.c
+++ b/arch/x86/kernel/apic/x2apic_phys.c
@@ -195,8 +195,6 @@ struct apic apic_x2apic_phys = {
195 .ioapic_phys_id_map = NULL, 195 .ioapic_phys_id_map = NULL,
196 .setup_apic_routing = NULL, 196 .setup_apic_routing = NULL,
197 .multi_timer_check = NULL, 197 .multi_timer_check = NULL,
198 .apicid_to_node = NULL,
199 .cpu_to_logical_apicid = NULL,
200 .cpu_present_to_apicid = default_cpu_present_to_apicid, 198 .cpu_present_to_apicid = default_cpu_present_to_apicid,
201 .apicid_to_cpu_present = NULL, 199 .apicid_to_cpu_present = NULL,
202 .setup_portio_remap = NULL, 200 .setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
index bd16b58b8850..3c289281394c 100644
--- a/arch/x86/kernel/apic/x2apic_uv_x.c
+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
@@ -338,8 +338,6 @@ struct apic __refdata apic_x2apic_uv_x = {
338 .ioapic_phys_id_map = NULL, 338 .ioapic_phys_id_map = NULL,
339 .setup_apic_routing = NULL, 339 .setup_apic_routing = NULL,
340 .multi_timer_check = NULL, 340 .multi_timer_check = NULL,
341 .apicid_to_node = NULL,
342 .cpu_to_logical_apicid = NULL,
343 .cpu_present_to_apicid = default_cpu_present_to_apicid, 341 .cpu_present_to_apicid = default_cpu_present_to_apicid,
344 .apicid_to_cpu_present = NULL, 342 .apicid_to_cpu_present = NULL,
345 .setup_portio_remap = NULL, 343 .setup_portio_remap = NULL,
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index 7c7bedb83c5a..77858fd64620 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -233,18 +233,22 @@ static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
233} 233}
234#endif 234#endif
235 235
236#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 236#ifdef CONFIG_NUMA
237/*
238 * To workaround broken NUMA config. Read the comment in
239 * srat_detect_node().
240 */
237static int __cpuinit nearby_node(int apicid) 241static int __cpuinit nearby_node(int apicid)
238{ 242{
239 int i, node; 243 int i, node;
240 244
241 for (i = apicid - 1; i >= 0; i--) { 245 for (i = apicid - 1; i >= 0; i--) {
242 node = apicid_to_node[i]; 246 node = __apicid_to_node[i];
243 if (node != NUMA_NO_NODE && node_online(node)) 247 if (node != NUMA_NO_NODE && node_online(node))
244 return node; 248 return node;
245 } 249 }
246 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) { 250 for (i = apicid + 1; i < MAX_LOCAL_APIC; i++) {
247 node = apicid_to_node[i]; 251 node = __apicid_to_node[i];
248 if (node != NUMA_NO_NODE && node_online(node)) 252 if (node != NUMA_NO_NODE && node_online(node))
249 return node; 253 return node;
250 } 254 }
@@ -334,31 +338,40 @@ EXPORT_SYMBOL_GPL(amd_get_nb_id);
334 338
335static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 339static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
336{ 340{
337#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 341#ifdef CONFIG_NUMA
338 int cpu = smp_processor_id(); 342 int cpu = smp_processor_id();
339 int node; 343 int node;
340 unsigned apicid = c->apicid; 344 unsigned apicid = c->apicid;
341 345
342 node = per_cpu(cpu_llc_id, cpu); 346 node = numa_cpu_node(cpu);
347 if (node == NUMA_NO_NODE)
348 node = per_cpu(cpu_llc_id, cpu);
343 349
344 if (apicid_to_node[apicid] != NUMA_NO_NODE)
345 node = apicid_to_node[apicid];
346 if (!node_online(node)) { 350 if (!node_online(node)) {
347 /* Two possibilities here: 351 /*
348 - The CPU is missing memory and no node was created. 352 * Two possibilities here:
349 In that case try picking one from a nearby CPU 353 *
350 - The APIC IDs differ from the HyperTransport node IDs 354 * - The CPU is missing memory and no node was created. In
351 which the K8 northbridge parsing fills in. 355 * that case try picking one from a nearby CPU.
352 Assume they are all increased by a constant offset, 356 *
353 but in the same order as the HT nodeids. 357 * - The APIC IDs differ from the HyperTransport node IDs
354 If that doesn't result in a usable node fall back to the 358 * which the K8 northbridge parsing fills in. Assume
355 path for the previous case. */ 359 * they are all increased by a constant offset, but in
356 360 * the same order as the HT nodeids. If that doesn't
361 * result in a usable node fall back to the path for the
362 * previous case.
363 *
364 * This workaround operates directly on the mapping between
365 * APIC ID and NUMA node, assuming certain relationship
366 * between APIC ID, HT node ID and NUMA topology. As going
367 * through CPU mapping may alter the outcome, directly
368 * access __apicid_to_node[].
369 */
357 int ht_nodeid = c->initial_apicid; 370 int ht_nodeid = c->initial_apicid;
358 371
359 if (ht_nodeid >= 0 && 372 if (ht_nodeid >= 0 &&
360 apicid_to_node[ht_nodeid] != NUMA_NO_NODE) 373 __apicid_to_node[ht_nodeid] != NUMA_NO_NODE)
361 node = apicid_to_node[ht_nodeid]; 374 node = __apicid_to_node[ht_nodeid];
362 /* Pick a nearby node */ 375 /* Pick a nearby node */
363 if (!node_online(node)) 376 if (!node_online(node))
364 node = nearby_node(apicid); 377 node = nearby_node(apicid);
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 1d59834396bd..a2559c3ed500 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -869,7 +869,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
869 869
870 select_idle_routine(c); 870 select_idle_routine(c);
871 871
872#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 872#ifdef CONFIG_NUMA
873 numa_add_cpu(smp_processor_id()); 873 numa_add_cpu(smp_processor_id());
874#endif 874#endif
875} 875}
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index d16c2c53d6bf..df86bc8c859d 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -276,14 +276,13 @@ static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
276 276
277static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) 277static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c)
278{ 278{
279#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 279#ifdef CONFIG_NUMA
280 unsigned node; 280 unsigned node;
281 int cpu = smp_processor_id(); 281 int cpu = smp_processor_id();
282 int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid;
283 282
284 /* Don't do the funky fallback heuristics the AMD version employs 283 /* Don't do the funky fallback heuristics the AMD version employs
285 for now. */ 284 for now. */
286 node = apicid_to_node[apicid]; 285 node = numa_cpu_node(cpu);
287 if (node == NUMA_NO_NODE || !node_online(node)) { 286 if (node == NUMA_NO_NODE || !node_online(node)) {
288 /* reuse the value from init_cpu_to_node() */ 287 /* reuse the value from init_cpu_to_node() */
289 node = cpu_to_node(cpu); 288 node = cpu_to_node(cpu);
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6b286d879a25..ac909ba297b9 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -1047,9 +1047,7 @@ void __init setup_arch(char **cmdline_p)
1047 1047
1048 prefill_possible_map(); 1048 prefill_possible_map();
1049 1049
1050#ifdef CONFIG_X86_64
1051 init_cpu_to_node(); 1050 init_cpu_to_node();
1052#endif
1053 1051
1054 init_apic_mappings(); 1052 init_apic_mappings();
1055 ioapic_and_gsi_init(); 1053 ioapic_and_gsi_init();
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index 002b79685f73..71f4727da373 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -225,10 +225,15 @@ void __init setup_per_cpu_areas(void)
225 per_cpu(x86_bios_cpu_apicid, cpu) = 225 per_cpu(x86_bios_cpu_apicid, cpu) =
226 early_per_cpu_map(x86_bios_cpu_apicid, cpu); 226 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
227#endif 227#endif
228#ifdef CONFIG_X86_32
229 per_cpu(x86_cpu_to_logical_apicid, cpu) =
230 early_per_cpu_map(x86_cpu_to_logical_apicid, cpu);
231#endif
228#ifdef CONFIG_X86_64 232#ifdef CONFIG_X86_64
229 per_cpu(irq_stack_ptr, cpu) = 233 per_cpu(irq_stack_ptr, cpu) =
230 per_cpu(irq_stack_union.irq_stack, cpu) + 234 per_cpu(irq_stack_union.irq_stack, cpu) +
231 IRQ_STACK_SIZE - 64; 235 IRQ_STACK_SIZE - 64;
236#endif
232#ifdef CONFIG_NUMA 237#ifdef CONFIG_NUMA
233 per_cpu(x86_cpu_to_node_map, cpu) = 238 per_cpu(x86_cpu_to_node_map, cpu) =
234 early_per_cpu_map(x86_cpu_to_node_map, cpu); 239 early_per_cpu_map(x86_cpu_to_node_map, cpu);
@@ -242,7 +247,6 @@ void __init setup_per_cpu_areas(void)
242 */ 247 */
243 set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); 248 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
244#endif 249#endif
245#endif
246 /* 250 /*
247 * Up to this point, the boot CPU has been using .init.data 251 * Up to this point, the boot CPU has been using .init.data
248 * area. Reload any changed state for the boot CPU. 252 * area. Reload any changed state for the boot CPU.
@@ -256,7 +260,10 @@ void __init setup_per_cpu_areas(void)
256 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; 260 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
257 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; 261 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
258#endif 262#endif
259#if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) 263#ifdef CONFIG_X86_32
264 early_per_cpu_ptr(x86_cpu_to_logical_apicid) = NULL;
265#endif
266#ifdef CONFIG_NUMA
260 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; 267 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
261#endif 268#endif
262 269
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 03273b6c272c..8886ef36d5dd 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -71,10 +71,6 @@
71#include <asm/smpboot_hooks.h> 71#include <asm/smpboot_hooks.h>
72#include <asm/i8259.h> 72#include <asm/i8259.h>
73 73
74#ifdef CONFIG_X86_32
75u8 apicid_2_node[MAX_APICID];
76#endif
77
78/* State of each CPU */ 74/* State of each CPU */
79DEFINE_PER_CPU(int, cpu_state) = { 0 }; 75DEFINE_PER_CPU(int, cpu_state) = { 0 };
80 76
@@ -136,62 +132,6 @@ EXPORT_PER_CPU_SYMBOL(cpu_info);
136 132
137atomic_t init_deasserted; 133atomic_t init_deasserted;
138 134
139#if defined(CONFIG_NUMA) && defined(CONFIG_X86_32)
140/* which node each logical CPU is on */
141int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 };
142EXPORT_SYMBOL(cpu_to_node_map);
143
144/* set up a mapping between cpu and node. */
145static void map_cpu_to_node(int cpu, int node)
146{
147 printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node);
148 cpumask_set_cpu(cpu, node_to_cpumask_map[node]);
149 cpu_to_node_map[cpu] = node;
150}
151
152/* undo a mapping between cpu and node. */
153static void unmap_cpu_to_node(int cpu)
154{
155 int node;
156
157 printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu);
158 for (node = 0; node < MAX_NUMNODES; node++)
159 cpumask_clear_cpu(cpu, node_to_cpumask_map[node]);
160 cpu_to_node_map[cpu] = 0;
161}
162#else /* !(CONFIG_NUMA && CONFIG_X86_32) */
163#define map_cpu_to_node(cpu, node) ({})
164#define unmap_cpu_to_node(cpu) ({})
165#endif
166
167#ifdef CONFIG_X86_32
168static int boot_cpu_logical_apicid;
169
170u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly =
171 { [0 ... NR_CPUS-1] = BAD_APICID };
172
173static void map_cpu_to_logical_apicid(void)
174{
175 int cpu = smp_processor_id();
176 int apicid = logical_smp_processor_id();
177 int node = apic->apicid_to_node(apicid);
178
179 if (!node_online(node))
180 node = first_online_node;
181
182 cpu_2_logical_apicid[cpu] = apicid;
183 map_cpu_to_node(cpu, node);
184}
185
186void numa_remove_cpu(int cpu)
187{
188 cpu_2_logical_apicid[cpu] = BAD_APICID;
189 unmap_cpu_to_node(cpu);
190}
191#else
192#define map_cpu_to_logical_apicid() do {} while (0)
193#endif
194
195/* 135/*
196 * Report back to the Boot Processor. 136 * Report back to the Boot Processor.
197 * Running on AP. 137 * Running on AP.
@@ -259,7 +199,6 @@ static void __cpuinit smp_callin(void)
259 apic->smp_callin_clear_local_apic(); 199 apic->smp_callin_clear_local_apic();
260 setup_local_APIC(); 200 setup_local_APIC();
261 end_local_APIC_setup(); 201 end_local_APIC_setup();
262 map_cpu_to_logical_apicid();
263 202
264 /* 203 /*
265 * Need to setup vector mappings before we enable interrupts. 204 * Need to setup vector mappings before we enable interrupts.
@@ -960,7 +899,6 @@ static __init void disable_smp(void)
960 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map); 899 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
961 else 900 else
962 physid_set_mask_of_physid(0, &phys_cpu_present_map); 901 physid_set_mask_of_physid(0, &phys_cpu_present_map);
963 map_cpu_to_logical_apicid();
964 cpumask_set_cpu(0, cpu_sibling_mask(0)); 902 cpumask_set_cpu(0, cpu_sibling_mask(0));
965 cpumask_set_cpu(0, cpu_core_mask(0)); 903 cpumask_set_cpu(0, cpu_core_mask(0));
966} 904}
@@ -1096,9 +1034,7 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1096 * Setup boot CPU information 1034 * Setup boot CPU information
1097 */ 1035 */
1098 smp_store_cpu_info(0); /* Final full version of the data */ 1036 smp_store_cpu_info(0); /* Final full version of the data */
1099#ifdef CONFIG_X86_32 1037
1100 boot_cpu_logical_apicid = logical_smp_processor_id();
1101#endif
1102 current_thread_info()->cpu = 0; /* needed? */ 1038 current_thread_info()->cpu = 0; /* needed? */
1103 for_each_possible_cpu(i) { 1039 for_each_possible_cpu(i) {
1104 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1040 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
@@ -1139,8 +1075,6 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1139 1075
1140 end_local_APIC_setup(); 1076 end_local_APIC_setup();
1141 1077
1142 map_cpu_to_logical_apicid();
1143
1144 if (apic->setup_portio_remap) 1078 if (apic->setup_portio_remap)
1145 apic->setup_portio_remap(); 1079 apic->setup_portio_remap();
1146 1080
diff --git a/arch/x86/mm/amdtopology_64.c b/arch/x86/mm/amdtopology_64.c
index 49b334cdd64c..2523c3554de5 100644
--- a/arch/x86/mm/amdtopology_64.c
+++ b/arch/x86/mm/amdtopology_64.c
@@ -247,7 +247,7 @@ void __init amd_fake_nodes(const struct bootnode *nodes, int nr_nodes)
247 __acpi_map_pxm_to_node(nid, i); 247 __acpi_map_pxm_to_node(nid, i);
248#endif 248#endif
249 } 249 }
250 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); 250 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
251} 251}
252#endif /* CONFIG_NUMA_EMU */ 252#endif /* CONFIG_NUMA_EMU */
253 253
@@ -287,7 +287,7 @@ int __init amd_scan_nodes(void)
287 int j; 287 int j;
288 288
289 for (j = apicid_base; j < cores + apicid_base; j++) 289 for (j = apicid_base; j < cores + apicid_base; j++)
290 apicid_to_node[(i << bits) + j] = i; 290 set_apicid_to_node((i << bits) + j, i);
291 setup_node_bootmem(i, nodes[i].start, nodes[i].end); 291 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
292 } 292 }
293 293
diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index ebf6d7887a38..9559d360fde7 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -26,12 +26,50 @@ static __init int numa_setup(char *opt)
26early_param("numa", numa_setup); 26early_param("numa", numa_setup);
27 27
28/* 28/*
29 * Which logical CPUs are on which nodes 29 * apicid, cpu, node mappings
30 */ 30 */
31s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
32 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
33};
34
31cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; 35cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
32EXPORT_SYMBOL(node_to_cpumask_map); 36EXPORT_SYMBOL(node_to_cpumask_map);
33 37
34/* 38/*
39 * Map cpu index to node index
40 */
41DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
42EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
43
44void __cpuinit numa_set_node(int cpu, int node)
45{
46 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
47
48 /* early setting, no percpu area yet */
49 if (cpu_to_node_map) {
50 cpu_to_node_map[cpu] = node;
51 return;
52 }
53
54#ifdef CONFIG_DEBUG_PER_CPU_MAPS
55 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
56 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
57 dump_stack();
58 return;
59 }
60#endif
61 per_cpu(x86_cpu_to_node_map, cpu) = node;
62
63 if (node != NUMA_NO_NODE)
64 set_cpu_numa_node(cpu, node);
65}
66
67void __cpuinit numa_clear_node(int cpu)
68{
69 numa_set_node(cpu, NUMA_NO_NODE);
70}
71
72/*
35 * Allocate node_to_cpumask_map based on number of available nodes 73 * Allocate node_to_cpumask_map based on number of available nodes
36 * Requires node_possible_map to be valid. 74 * Requires node_possible_map to be valid.
37 * 75 *
@@ -57,7 +95,174 @@ void __init setup_node_to_cpumask_map(void)
57 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids); 95 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
58} 96}
59 97
60#ifdef CONFIG_DEBUG_PER_CPU_MAPS 98/*
99 * There are unfortunately some poorly designed mainboards around that
100 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
101 * mapping. To avoid this fill in the mapping for all possible CPUs,
102 * as the number of CPUs is not known yet. We round robin the existing
103 * nodes.
104 */
105void __init numa_init_array(void)
106{
107 int rr, i;
108
109 rr = first_node(node_online_map);
110 for (i = 0; i < nr_cpu_ids; i++) {
111 if (early_cpu_to_node(i) != NUMA_NO_NODE)
112 continue;
113 numa_set_node(i, rr);
114 rr = next_node(rr, node_online_map);
115 if (rr == MAX_NUMNODES)
116 rr = first_node(node_online_map);
117 }
118}
119
120static __init int find_near_online_node(int node)
121{
122 int n, val;
123 int min_val = INT_MAX;
124 int best_node = -1;
125
126 for_each_online_node(n) {
127 val = node_distance(node, n);
128
129 if (val < min_val) {
130 min_val = val;
131 best_node = n;
132 }
133 }
134
135 return best_node;
136}
137
138/*
139 * Setup early cpu_to_node.
140 *
141 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
142 * and apicid_to_node[] tables have valid entries for a CPU.
143 * This means we skip cpu_to_node[] initialisation for NUMA
144 * emulation and faking node case (when running a kernel compiled
145 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
146 * is already initialized in a round robin manner at numa_init_array,
147 * prior to this call, and this initialization is good enough
148 * for the fake NUMA cases.
149 *
150 * Called before the per_cpu areas are setup.
151 */
152void __init init_cpu_to_node(void)
153{
154 int cpu;
155 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
156
157 BUG_ON(cpu_to_apicid == NULL);
158
159 for_each_possible_cpu(cpu) {
160 int node = numa_cpu_node(cpu);
161
162 if (node == NUMA_NO_NODE)
163 continue;
164 if (!node_online(node))
165 node = find_near_online_node(node);
166 numa_set_node(cpu, node);
167 }
168}
169
170#ifndef CONFIG_DEBUG_PER_CPU_MAPS
171
172# ifndef CONFIG_NUMA_EMU
173void __cpuinit numa_add_cpu(int cpu)
174{
175 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
176}
177
178void __cpuinit numa_remove_cpu(int cpu)
179{
180 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
181}
182# endif /* !CONFIG_NUMA_EMU */
183
184#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
185
186int __cpu_to_node(int cpu)
187{
188 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
189 printk(KERN_WARNING
190 "cpu_to_node(%d): usage too early!\n", cpu);
191 dump_stack();
192 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
193 }
194 return per_cpu(x86_cpu_to_node_map, cpu);
195}
196EXPORT_SYMBOL(__cpu_to_node);
197
198/*
199 * Same function as cpu_to_node() but used if called before the
200 * per_cpu areas are setup.
201 */
202int early_cpu_to_node(int cpu)
203{
204 if (early_per_cpu_ptr(x86_cpu_to_node_map))
205 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
206
207 if (!cpu_possible(cpu)) {
208 printk(KERN_WARNING
209 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
210 dump_stack();
211 return NUMA_NO_NODE;
212 }
213 return per_cpu(x86_cpu_to_node_map, cpu);
214}
215
216struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
217{
218 int node = early_cpu_to_node(cpu);
219 struct cpumask *mask;
220 char buf[64];
221
222 if (node == NUMA_NO_NODE) {
223 /* early_cpu_to_node() already emits a warning and trace */
224 return NULL;
225 }
226 mask = node_to_cpumask_map[node];
227 if (!mask) {
228 pr_err("node_to_cpumask_map[%i] NULL\n", node);
229 dump_stack();
230 return NULL;
231 }
232
233 cpulist_scnprintf(buf, sizeof(buf), mask);
234 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
235 enable ? "numa_add_cpu" : "numa_remove_cpu",
236 cpu, node, buf);
237 return mask;
238}
239
240# ifndef CONFIG_NUMA_EMU
241static void __cpuinit numa_set_cpumask(int cpu, int enable)
242{
243 struct cpumask *mask;
244
245 mask = debug_cpumask_set_cpu(cpu, enable);
246 if (!mask)
247 return;
248
249 if (enable)
250 cpumask_set_cpu(cpu, mask);
251 else
252 cpumask_clear_cpu(cpu, mask);
253}
254
255void __cpuinit numa_add_cpu(int cpu)
256{
257 numa_set_cpumask(cpu, 1);
258}
259
260void __cpuinit numa_remove_cpu(int cpu)
261{
262 numa_set_cpumask(cpu, 0);
263}
264# endif /* !CONFIG_NUMA_EMU */
265
61/* 266/*
62 * Returns a pointer to the bitmask of CPUs on Node 'node'. 267 * Returns a pointer to the bitmask of CPUs on Node 'node'.
63 */ 268 */
@@ -80,4 +285,5 @@ const struct cpumask *cpumask_of_node(int node)
80 return node_to_cpumask_map[node]; 285 return node_to_cpumask_map[node];
81} 286}
82EXPORT_SYMBOL(cpumask_of_node); 287EXPORT_SYMBOL(cpumask_of_node);
83#endif 288
289#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c
index 84a3e4c9f277..505bb04654b5 100644
--- a/arch/x86/mm/numa_32.c
+++ b/arch/x86/mm/numa_32.c
@@ -110,6 +110,12 @@ void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
110 110
111static unsigned long kva_start_pfn; 111static unsigned long kva_start_pfn;
112static unsigned long kva_pages; 112static unsigned long kva_pages;
113
114int __cpuinit numa_cpu_node(int cpu)
115{
116 return apic->x86_32_numa_cpu_node(cpu);
117}
118
113/* 119/*
114 * FLAT - support for basic PC memory model with discontig enabled, essentially 120 * FLAT - support for basic PC memory model with discontig enabled, essentially
115 * a single node with all available processors in it with a flat 121 * a single node with all available processors in it with a flat
@@ -361,6 +367,7 @@ void __init initmem_init(unsigned long start_pfn, unsigned long end_pfn,
361 */ 367 */
362 368
363 get_memcfg_numa(); 369 get_memcfg_numa();
370 numa_init_array();
364 371
365 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE); 372 kva_pages = roundup(calculate_numa_remap_pages(), PTRS_PER_PTE);
366 373
diff --git a/arch/x86/mm/numa_64.c b/arch/x86/mm/numa_64.c
index 62cb634b5cf8..43ad3273561a 100644
--- a/arch/x86/mm/numa_64.c
+++ b/arch/x86/mm/numa_64.c
@@ -26,20 +26,10 @@ EXPORT_SYMBOL(node_data);
26 26
27struct memnode memnode; 27struct memnode memnode;
28 28
29s16 apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
30 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
31};
32
33static unsigned long __initdata nodemap_addr; 29static unsigned long __initdata nodemap_addr;
34static unsigned long __initdata nodemap_size; 30static unsigned long __initdata nodemap_size;
35 31
36/* 32/*
37 * Map cpu index to node index
38 */
39DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
40EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
41
42/*
43 * Given a shift value, try to populate memnodemap[] 33 * Given a shift value, try to populate memnodemap[]
44 * Returns : 34 * Returns :
45 * 1 if OK 35 * 1 if OK
@@ -234,28 +224,6 @@ setup_node_bootmem(int nodeid, unsigned long start, unsigned long end)
234 node_set_online(nodeid); 224 node_set_online(nodeid);
235} 225}
236 226
237/*
238 * There are unfortunately some poorly designed mainboards around that
239 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
240 * mapping. To avoid this fill in the mapping for all possible CPUs,
241 * as the number of CPUs is not known yet. We round robin the existing
242 * nodes.
243 */
244void __init numa_init_array(void)
245{
246 int rr, i;
247
248 rr = first_node(node_online_map);
249 for (i = 0; i < nr_cpu_ids; i++) {
250 if (early_cpu_to_node(i) != NUMA_NO_NODE)
251 continue;
252 numa_set_node(i, rr);
253 rr = next_node(rr, node_online_map);
254 if (rr == MAX_NUMNODES)
255 rr = first_node(node_online_map);
256 }
257}
258
259#ifdef CONFIG_NUMA_EMU 227#ifdef CONFIG_NUMA_EMU
260/* Numa emulation */ 228/* Numa emulation */
261static struct bootnode nodes[MAX_NUMNODES] __initdata; 229static struct bootnode nodes[MAX_NUMNODES] __initdata;
@@ -676,115 +644,33 @@ unsigned long __init numa_free_all_bootmem(void)
676 return pages; 644 return pages;
677} 645}
678 646
679#ifdef CONFIG_NUMA 647int __cpuinit numa_cpu_node(int cpu)
680
681static __init int find_near_online_node(int node)
682{ 648{
683 int n, val; 649 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
684 int min_val = INT_MAX;
685 int best_node = -1;
686
687 for_each_online_node(n) {
688 val = node_distance(node, n);
689
690 if (val < min_val) {
691 min_val = val;
692 best_node = n;
693 }
694 }
695 650
696 return best_node; 651 if (apicid != BAD_APICID)
652 return __apicid_to_node[apicid];
653 return NUMA_NO_NODE;
697} 654}
698 655
699/* 656/*
700 * Setup early cpu_to_node. 657 * UGLINESS AHEAD: Currently, CONFIG_NUMA_EMU is 64bit only and makes use
658 * of 64bit specific data structures. The distinction is artificial and
659 * should be removed. numa_{add|remove}_cpu() are implemented in numa.c
660 * for both 32 and 64bit when CONFIG_NUMA_EMU is disabled but here when
661 * enabled.
701 * 662 *
702 * Populate cpu_to_node[] only if x86_cpu_to_apicid[], 663 * NUMA emulation is planned to be made generic and the following and other
703 * and apicid_to_node[] tables have valid entries for a CPU. 664 * related code should be moved to numa.c.
704 * This means we skip cpu_to_node[] initialisation for NUMA
705 * emulation and faking node case (when running a kernel compiled
706 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
707 * is already initialized in a round robin manner at numa_init_array,
708 * prior to this call, and this initialization is good enough
709 * for the fake NUMA cases.
710 *
711 * Called before the per_cpu areas are setup.
712 */ 665 */
713void __init init_cpu_to_node(void) 666#ifdef CONFIG_NUMA_EMU
714{ 667# ifndef CONFIG_DEBUG_PER_CPU_MAPS
715 int cpu;
716 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
717
718 BUG_ON(cpu_to_apicid == NULL);
719
720 for_each_possible_cpu(cpu) {
721 int node;
722 u16 apicid = cpu_to_apicid[cpu];
723
724 if (apicid == BAD_APICID)
725 continue;
726 node = apicid_to_node[apicid];
727 if (node == NUMA_NO_NODE)
728 continue;
729 if (!node_online(node))
730 node = find_near_online_node(node);
731 numa_set_node(cpu, node);
732 }
733}
734#endif
735
736
737void __cpuinit numa_set_node(int cpu, int node)
738{
739 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
740
741 /* early setting, no percpu area yet */
742 if (cpu_to_node_map) {
743 cpu_to_node_map[cpu] = node;
744 return;
745 }
746
747#ifdef CONFIG_DEBUG_PER_CPU_MAPS
748 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
749 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
750 dump_stack();
751 return;
752 }
753#endif
754 per_cpu(x86_cpu_to_node_map, cpu) = node;
755
756 if (node != NUMA_NO_NODE)
757 set_cpu_numa_node(cpu, node);
758}
759
760void __cpuinit numa_clear_node(int cpu)
761{
762 numa_set_node(cpu, NUMA_NO_NODE);
763}
764
765#ifndef CONFIG_DEBUG_PER_CPU_MAPS
766
767#ifndef CONFIG_NUMA_EMU
768void __cpuinit numa_add_cpu(int cpu)
769{
770 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
771}
772
773void __cpuinit numa_remove_cpu(int cpu)
774{
775 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
776}
777#else
778void __cpuinit numa_add_cpu(int cpu) 668void __cpuinit numa_add_cpu(int cpu)
779{ 669{
780 unsigned long addr; 670 unsigned long addr;
781 u16 apicid; 671 int physnid, nid;
782 int physnid;
783 int nid = NUMA_NO_NODE;
784 672
785 apicid = early_per_cpu(x86_cpu_to_apicid, cpu); 673 nid = numa_cpu_node(cpu);
786 if (apicid != BAD_APICID)
787 nid = apicid_to_node[apicid];
788 if (nid == NUMA_NO_NODE) 674 if (nid == NUMA_NO_NODE)
789 nid = early_cpu_to_node(cpu); 675 nid = early_cpu_to_node(cpu);
790 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid)); 676 BUG_ON(nid == NUMA_NO_NODE || !node_online(nid));
@@ -818,53 +704,17 @@ void __cpuinit numa_remove_cpu(int cpu)
818 for_each_online_node(i) 704 for_each_online_node(i)
819 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]); 705 cpumask_clear_cpu(cpu, node_to_cpumask_map[i]);
820} 706}
821#endif /* !CONFIG_NUMA_EMU */ 707# else /* !CONFIG_DEBUG_PER_CPU_MAPS */
822
823#else /* CONFIG_DEBUG_PER_CPU_MAPS */
824static struct cpumask __cpuinit *debug_cpumask_set_cpu(int cpu, int enable)
825{
826 int node = early_cpu_to_node(cpu);
827 struct cpumask *mask;
828 char buf[64];
829
830 mask = node_to_cpumask_map[node];
831 if (!mask) {
832 pr_err("node_to_cpumask_map[%i] NULL\n", node);
833 dump_stack();
834 return NULL;
835 }
836
837 cpulist_scnprintf(buf, sizeof(buf), mask);
838 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
839 enable ? "numa_add_cpu" : "numa_remove_cpu",
840 cpu, node, buf);
841 return mask;
842}
843
844/*
845 * --------- debug versions of the numa functions ---------
846 */
847#ifndef CONFIG_NUMA_EMU
848static void __cpuinit numa_set_cpumask(int cpu, int enable)
849{
850 struct cpumask *mask;
851
852 mask = debug_cpumask_set_cpu(cpu, enable);
853 if (!mask)
854 return;
855
856 if (enable)
857 cpumask_set_cpu(cpu, mask);
858 else
859 cpumask_clear_cpu(cpu, mask);
860}
861#else
862static void __cpuinit numa_set_cpumask(int cpu, int enable) 708static void __cpuinit numa_set_cpumask(int cpu, int enable)
863{ 709{
864 int node = early_cpu_to_node(cpu); 710 int node = early_cpu_to_node(cpu);
865 struct cpumask *mask; 711 struct cpumask *mask;
866 int i; 712 int i;
867 713
714 if (node == NUMA_NO_NODE) {
715 /* early_cpu_to_node() already emits a warning and trace */
716 return;
717 }
868 for_each_online_node(i) { 718 for_each_online_node(i) {
869 unsigned long addr; 719 unsigned long addr;
870 720
@@ -882,7 +732,6 @@ static void __cpuinit numa_set_cpumask(int cpu, int enable)
882 cpumask_clear_cpu(cpu, mask); 732 cpumask_clear_cpu(cpu, mask);
883 } 733 }
884} 734}
885#endif /* CONFIG_NUMA_EMU */
886 735
887void __cpuinit numa_add_cpu(int cpu) 736void __cpuinit numa_add_cpu(int cpu)
888{ 737{
@@ -893,39 +742,5 @@ void __cpuinit numa_remove_cpu(int cpu)
893{ 742{
894 numa_set_cpumask(cpu, 0); 743 numa_set_cpumask(cpu, 0);
895} 744}
896 745# endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
897int __cpu_to_node(int cpu) 746#endif /* CONFIG_NUMA_EMU */
898{
899 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
900 printk(KERN_WARNING
901 "cpu_to_node(%d): usage too early!\n", cpu);
902 dump_stack();
903 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
904 }
905 return per_cpu(x86_cpu_to_node_map, cpu);
906}
907EXPORT_SYMBOL(__cpu_to_node);
908
909/*
910 * Same function as cpu_to_node() but used if called before the
911 * per_cpu areas are setup.
912 */
913int early_cpu_to_node(int cpu)
914{
915 if (early_per_cpu_ptr(x86_cpu_to_node_map))
916 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
917
918 if (!cpu_possible(cpu)) {
919 printk(KERN_WARNING
920 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
921 dump_stack();
922 return NUMA_NO_NODE;
923 }
924 return per_cpu(x86_cpu_to_node_map, cpu);
925}
926
927/*
928 * --------- end of debug versions of the numa functions ---------
929 */
930
931#endif /* CONFIG_DEBUG_PER_CPU_MAPS */
diff --git a/arch/x86/mm/srat_32.c b/arch/x86/mm/srat_32.c
index ae96e7b8051d..48651c6f657d 100644
--- a/arch/x86/mm/srat_32.c
+++ b/arch/x86/mm/srat_32.c
@@ -57,7 +57,7 @@ struct node_memory_chunk_s {
57static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS]; 57static struct node_memory_chunk_s __initdata node_memory_chunk[MAXCHUNKS];
58 58
59static int __initdata num_memory_chunks; /* total number of memory chunks */ 59static int __initdata num_memory_chunks; /* total number of memory chunks */
60static u8 __initdata apicid_to_pxm[MAX_APICID]; 60static u8 __initdata apicid_to_pxm[MAX_LOCAL_APIC];
61 61
62int acpi_numa __initdata; 62int acpi_numa __initdata;
63 63
@@ -254,8 +254,8 @@ int __init get_memcfg_from_srat(void)
254 printk(KERN_DEBUG "Number of memory chunks in system = %d\n", 254 printk(KERN_DEBUG "Number of memory chunks in system = %d\n",
255 num_memory_chunks); 255 num_memory_chunks);
256 256
257 for (i = 0; i < MAX_APICID; i++) 257 for (i = 0; i < MAX_LOCAL_APIC; i++)
258 apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); 258 set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i]));
259 259
260 for (j = 0; j < num_memory_chunks; j++){ 260 for (j = 0; j < num_memory_chunks; j++){
261 struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; 261 struct node_memory_chunk_s * chunk = &node_memory_chunk[j];
diff --git a/arch/x86/mm/srat_64.c b/arch/x86/mm/srat_64.c
index 4c03e13da138..23498f8b09a2 100644
--- a/arch/x86/mm/srat_64.c
+++ b/arch/x86/mm/srat_64.c
@@ -79,7 +79,7 @@ static __init void bad_srat(void)
79 printk(KERN_ERR "SRAT: SRAT not used.\n"); 79 printk(KERN_ERR "SRAT: SRAT not used.\n");
80 acpi_numa = -1; 80 acpi_numa = -1;
81 for (i = 0; i < MAX_LOCAL_APIC; i++) 81 for (i = 0; i < MAX_LOCAL_APIC; i++)
82 apicid_to_node[i] = NUMA_NO_NODE; 82 set_apicid_to_node(i, NUMA_NO_NODE);
83 for (i = 0; i < MAX_NUMNODES; i++) { 83 for (i = 0; i < MAX_NUMNODES; i++) {
84 nodes[i].start = nodes[i].end = 0; 84 nodes[i].start = nodes[i].end = 0;
85 nodes_add[i].start = nodes_add[i].end = 0; 85 nodes_add[i].start = nodes_add[i].end = 0;
@@ -138,7 +138,7 @@ acpi_numa_x2apic_affinity_init(struct acpi_srat_x2apic_cpu_affinity *pa)
138 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node); 138 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u skipped apicid that is too big\n", pxm, apic_id, node);
139 return; 139 return;
140 } 140 }
141 apicid_to_node[apic_id] = node; 141 set_apicid_to_node(apic_id, node);
142 node_set(node, cpu_nodes_parsed); 142 node_set(node, cpu_nodes_parsed);
143 acpi_numa = 1; 143 acpi_numa = 1;
144 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n", 144 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%04x -> Node %u\n",
@@ -178,7 +178,7 @@ acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
178 return; 178 return;
179 } 179 }
180 180
181 apicid_to_node[apic_id] = node; 181 set_apicid_to_node(apic_id, node);
182 node_set(node, cpu_nodes_parsed); 182 node_set(node, cpu_nodes_parsed);
183 acpi_numa = 1; 183 acpi_numa = 1;
184 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n", 184 printk(KERN_INFO "SRAT: PXM %u -> APIC 0x%02x -> Node %u\n",
@@ -523,7 +523,7 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
523 * node, it must now point to the fake node ID. 523 * node, it must now point to the fake node ID.
524 */ 524 */
525 for (j = 0; j < MAX_LOCAL_APIC; j++) 525 for (j = 0; j < MAX_LOCAL_APIC; j++)
526 if (apicid_to_node[j] == nid && 526 if (__apicid_to_node[j] == nid &&
527 fake_apicid_to_node[j] == NUMA_NO_NODE) 527 fake_apicid_to_node[j] == NUMA_NO_NODE)
528 fake_apicid_to_node[j] = i; 528 fake_apicid_to_node[j] = i;
529 } 529 }
@@ -534,13 +534,13 @@ void __init acpi_fake_nodes(const struct bootnode *fake_nodes, int num_nodes)
534 * value. 534 * value.
535 */ 535 */
536 for (i = 0; i < MAX_LOCAL_APIC; i++) 536 for (i = 0; i < MAX_LOCAL_APIC; i++)
537 if (apicid_to_node[i] != NUMA_NO_NODE && 537 if (__apicid_to_node[i] != NUMA_NO_NODE &&
538 fake_apicid_to_node[i] == NUMA_NO_NODE) 538 fake_apicid_to_node[i] == NUMA_NO_NODE)
539 fake_apicid_to_node[i] = 0; 539 fake_apicid_to_node[i] = 0;
540 540
541 for (i = 0; i < num_nodes; i++) 541 for (i = 0; i < num_nodes; i++)
542 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i); 542 __acpi_map_pxm_to_node(fake_node_to_pxm_map[i], i);
543 memcpy(apicid_to_node, fake_apicid_to_node, sizeof(apicid_to_node)); 543 memcpy(__apicid_to_node, fake_apicid_to_node, sizeof(__apicid_to_node));
544 544
545 nodes_clear(nodes_parsed); 545 nodes_clear(nodes_parsed);
546 for (i = 0; i < num_nodes; i++) 546 for (i = 0; i < num_nodes; i++)