aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2008-05-12 15:21:12 -0400
committerIngo Molnar <mingo@elte.hu>2008-07-08 05:31:20 -0400
commit23ca4bba3e20c6c3cb11c1bb0ab4770b724d39ac (patch)
tree39ba5f7705e48717d7a6f2621b8ca7e7015c9802 /arch/x86/kernel/setup.c
parent1184dc2ffe2c8fb9afb766d870850f2c3165ef25 (diff)
x86: cleanup early per cpu variables/accesses v4
* Introduce a new PER_CPU macro called "EARLY_PER_CPU". This is used by some per_cpu variables that are initialized and accessed before there are per_cpu areas allocated. ["Early" in respect to per_cpu variables is "earlier than the per_cpu areas have been setup".] This patchset adds these new macros: DEFINE_EARLY_PER_CPU(_type, _name, _initvalue) EXPORT_EARLY_PER_CPU_SYMBOL(_name) DECLARE_EARLY_PER_CPU(_type, _name) early_per_cpu_ptr(_name) early_per_cpu_map(_name, _idx) early_per_cpu(_name, _cpu) The DEFINE macro defines the per_cpu variable as well as the early map and pointer. It also initializes the per_cpu variable and map elements to "_initvalue". The early_* macros provide access to the initial map (usually setup during system init) and the early pointer. This pointer is initialized to point to the early map but is then NULL'ed when the actual per_cpu areas are setup. After that the per_cpu variable is the correct access to the variable. The early_per_cpu() macro is not very efficient but does show how to access the variable if you have a function that can be called both "early" and "late". It tests the early ptr to be NULL, and if not then it's still valid. Otherwise, the per_cpu variable is used instead: #define early_per_cpu(_name, _cpu) \ (early_per_cpu_ptr(_name) ? \ early_per_cpu_ptr(_name)[_cpu] : \ per_cpu(_name, _cpu)) A better method is to actually check the pointer manually. In the case below, numa_set_node can be called both "early" and "late": void __cpuinit numa_set_node(int cpu, int node) { int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); if (cpu_to_node_map) cpu_to_node_map[cpu] = node; else per_cpu(x86_cpu_to_node_map, cpu) = node; } * Add a flag "arch_provides_topology_pointers" that indicates pointers to topology cpumask_t maps are available. Otherwise, use the function returning the cpumask_t value. This is useful if cpumask_t set size is very large to avoid copying data on to/off of the stack. * The coverage of CONFIG_DEBUG_PER_CPU_MAPS has been increased while the non-debug case has been optimized a bit. * Remove an unreferenced compiler warning in drivers/base/topology.c * Clean up #ifdef in setup.c For inclusion into sched-devel/latest tree. Based on: git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git + sched-devel/latest .../mingo/linux-2.6-sched-devel.git Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r--arch/x86/kernel/setup.c96
1 files changed, 84 insertions, 12 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
index 6f80b852a196..03caa8e4351f 100644
--- a/arch/x86/kernel/setup.c
+++ b/arch/x86/kernel/setup.c
@@ -19,13 +19,23 @@ unsigned disabled_cpus __cpuinitdata;
19unsigned int boot_cpu_physical_apicid = -1U; 19unsigned int boot_cpu_physical_apicid = -1U;
20EXPORT_SYMBOL(boot_cpu_physical_apicid); 20EXPORT_SYMBOL(boot_cpu_physical_apicid);
21 21
22DEFINE_PER_CPU(u16, x86_cpu_to_apicid) = BAD_APICID;
23EXPORT_PER_CPU_SYMBOL(x86_cpu_to_apicid);
24
25/* Bitmask of physically existing CPUs */ 22/* Bitmask of physically existing CPUs */
26physid_mask_t phys_cpu_present_map; 23physid_mask_t phys_cpu_present_map;
27#endif 24#endif
28 25
26/* map cpu index to physical APIC ID */
27DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
28DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
29EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
30EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
31
32#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
33#define X86_64_NUMA 1
34
35DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
36EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
37#endif
38
29#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 39#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP)
30/* 40/*
31 * Copy data used in early init routines from the initial arrays to the 41 * Copy data used in early init routines from the initial arrays to the
@@ -37,20 +47,21 @@ static void __init setup_per_cpu_maps(void)
37 int cpu; 47 int cpu;
38 48
39 for_each_possible_cpu(cpu) { 49 for_each_possible_cpu(cpu) {
40 per_cpu(x86_cpu_to_apicid, cpu) = x86_cpu_to_apicid_init[cpu]; 50 per_cpu(x86_cpu_to_apicid, cpu) =
51 early_per_cpu_map(x86_cpu_to_apicid, cpu);
41 per_cpu(x86_bios_cpu_apicid, cpu) = 52 per_cpu(x86_bios_cpu_apicid, cpu) =
42 x86_bios_cpu_apicid_init[cpu]; 53 early_per_cpu_map(x86_bios_cpu_apicid, cpu);
43#ifdef CONFIG_NUMA 54#ifdef X86_64_NUMA
44 per_cpu(x86_cpu_to_node_map, cpu) = 55 per_cpu(x86_cpu_to_node_map, cpu) =
45 x86_cpu_to_node_map_init[cpu]; 56 early_per_cpu_map(x86_cpu_to_node_map, cpu);
46#endif 57#endif
47 } 58 }
48 59
49 /* indicate the early static arrays will soon be gone */ 60 /* indicate the early static arrays will soon be gone */
50 x86_cpu_to_apicid_early_ptr = NULL; 61 early_per_cpu_ptr(x86_cpu_to_apicid) = NULL;
51 x86_bios_cpu_apicid_early_ptr = NULL; 62 early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL;
52#ifdef CONFIG_NUMA 63#ifdef X86_64_NUMA
53 x86_cpu_to_node_map_early_ptr = NULL; 64 early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
54#endif 65#endif
55} 66}
56 67
@@ -109,7 +120,8 @@ void __init setup_per_cpu_areas(void)
109 if (!node_online(node) || !NODE_DATA(node)) { 120 if (!node_online(node) || !NODE_DATA(node)) {
110 ptr = alloc_bootmem_pages(size); 121 ptr = alloc_bootmem_pages(size);
111 printk(KERN_INFO 122 printk(KERN_INFO
112 "cpu %d has no node or node-local memory\n", i); 123 "cpu %d has no node %d or node-local memory\n",
124 i, node);
113 } 125 }
114 else 126 else
115 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); 127 ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
@@ -137,3 +149,63 @@ void __init setup_per_cpu_areas(void)
137} 149}
138 150
139#endif 151#endif
152
153#ifdef X86_64_NUMA
154void __cpuinit numa_set_node(int cpu, int node)
155{
156 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
157
158 if (cpu_to_node_map)
159 cpu_to_node_map[cpu] = node;
160
161 else if (per_cpu_offset(cpu))
162 per_cpu(x86_cpu_to_node_map, cpu) = node;
163
164 else
165 Dprintk(KERN_INFO "Setting node for non-present cpu %d\n", cpu);
166}
167
168void __cpuinit numa_clear_node(int cpu)
169{
170 numa_set_node(cpu, NUMA_NO_NODE);
171}
172
173void __cpuinit numa_add_cpu(int cpu)
174{
175 cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
176}
177
178void __cpuinit numa_remove_cpu(int cpu)
179{
180 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]);
181}
182#endif /* CONFIG_NUMA */
183
184#if defined(CONFIG_DEBUG_PER_CPU_MAPS) && defined(CONFIG_X86_64)
185
186int cpu_to_node(int cpu)
187{
188 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
189 printk(KERN_WARNING
190 "cpu_to_node(%d): usage too early!\n", cpu);
191 dump_stack();
192 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
193 }
194 return per_cpu(x86_cpu_to_node_map, cpu);
195}
196EXPORT_SYMBOL(cpu_to_node);
197
198int early_cpu_to_node(int cpu)
199{
200 if (early_per_cpu_ptr(x86_cpu_to_node_map))
201 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
202
203 if (!per_cpu_offset(cpu)) {
204 printk(KERN_WARNING
205 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
206 dump_stack();
207 return NUMA_NO_NODE;
208 }
209 return per_cpu(x86_cpu_to_node_map, cpu);
210}
211#endif