aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/setup_percpu.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2009-01-13 06:41:34 -0500
committerIngo Molnar <mingo@elte.hu>2009-01-16 08:18:23 -0500
commitc90aa894f0240084f2c6e42e2333b211d6cfe2b2 (patch)
tree1035a49fa35c950c7340dc018be8cfba93942dfe /arch/x86/kernel/setup_percpu.c
parentf10fcd47120e80f66665567dbe17f5071c7aef52 (diff)
x86: cleanup early setup_percpu references
[ Based on original patch from Christoph Lameter and Mike Travis. ] * Ruggedize some calls in setup_percpu.c to prevent mishaps in early calls, particularly for non-critical functions. * Cleanup DEBUG_PER_CPU_MAPS usages and some comments. Signed-off-by: Mike Travis <travis@sgi.com> Signed-off-by: Tejun Heo <tj@kernel.org> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r--arch/x86/kernel/setup_percpu.c56
1 files changed, 40 insertions, 16 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index bf63de72b643..56c63ac62b10 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -15,6 +15,12 @@
15#include <asm/highmem.h> 15#include <asm/highmem.h>
16#include <asm/cpumask.h> 16#include <asm/cpumask.h>
17 17
18#ifdef CONFIG_DEBUG_PER_CPU_MAPS
19# define DBG(x...) printk(KERN_DEBUG x)
20#else
21# define DBG(x...)
22#endif
23
18#ifdef CONFIG_X86_LOCAL_APIC 24#ifdef CONFIG_X86_LOCAL_APIC
19unsigned int num_processors; 25unsigned int num_processors;
20unsigned disabled_cpus __cpuinitdata; 26unsigned disabled_cpus __cpuinitdata;
@@ -27,31 +33,39 @@ unsigned int max_physical_apicid;
27physid_mask_t phys_cpu_present_map; 33physid_mask_t phys_cpu_present_map;
28#endif 34#endif
29 35
30/* map cpu index to physical APIC ID */ 36/*
37 * Map cpu index to physical APIC ID
38 */
31DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); 39DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID);
32DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); 40DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID);
33EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); 41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid);
34EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); 42EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid);
35 43
36#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) 44#if defined(CONFIG_NUMA) && defined(CONFIG_X86_64)
37#define X86_64_NUMA 1 45#define X86_64_NUMA 1 /* (used later) */
38 46
39/* map cpu index to node index */ 47/*
48 * Map cpu index to node index
49 */
40DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); 50DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
41EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); 51EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
42 52
43/* which logical CPUs are on which nodes */ 53/*
54 * Which logical CPUs are on which nodes
55 */
44cpumask_t *node_to_cpumask_map; 56cpumask_t *node_to_cpumask_map;
45EXPORT_SYMBOL(node_to_cpumask_map); 57EXPORT_SYMBOL(node_to_cpumask_map);
46 58
47/* setup node_to_cpumask_map */ 59/*
60 * Setup node_to_cpumask_map
61 */
48static void __init setup_node_to_cpumask_map(void); 62static void __init setup_node_to_cpumask_map(void);
49 63
50#else 64#else
51static inline void setup_node_to_cpumask_map(void) { } 65static inline void setup_node_to_cpumask_map(void) { }
52#endif 66#endif
53 67
54#if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) 68#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
55/* 69/*
56 * Copy data used in early init routines from the initial arrays to the 70 * Copy data used in early init routines from the initial arrays to the
57 * per cpu data areas. These arrays then become expendable and the 71 * per cpu data areas. These arrays then become expendable and the
@@ -200,6 +214,8 @@ void __init setup_per_cpu_areas(void)
200#endif 214#endif
201 per_cpu_offset(cpu) = ptr - __per_cpu_start; 215 per_cpu_offset(cpu) = ptr - __per_cpu_start;
202 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 216 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
217
218 DBG("PERCPU: cpu %4d %p\n", cpu, ptr);
203 } 219 }
204 220
205 /* Setup percpu data maps */ 221 /* Setup percpu data maps */
@@ -221,6 +237,7 @@ void __init setup_per_cpu_areas(void)
221 * Requires node_possible_map to be valid. 237 * Requires node_possible_map to be valid.
222 * 238 *
223 * Note: node_to_cpumask() is not valid until after this is done. 239 * Note: node_to_cpumask() is not valid until after this is done.
240 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
224 */ 241 */
225static void __init setup_node_to_cpumask_map(void) 242static void __init setup_node_to_cpumask_map(void)
226{ 243{
@@ -236,6 +253,7 @@ static void __init setup_node_to_cpumask_map(void)
236 253
237 /* allocate the map */ 254 /* allocate the map */
238 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); 255 map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t));
256 DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids);
239 257
240 pr_debug("Node to cpumask map at %p for %d nodes\n", 258 pr_debug("Node to cpumask map at %p for %d nodes\n",
241 map, nr_node_ids); 259 map, nr_node_ids);
@@ -248,17 +266,23 @@ void __cpuinit numa_set_node(int cpu, int node)
248{ 266{
249 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); 267 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
250 268
251 if (cpu_pda(cpu) && node != NUMA_NO_NODE) 269 /* early setting, no percpu area yet */
252 cpu_pda(cpu)->nodenumber = node; 270 if (cpu_to_node_map) {
253
254 if (cpu_to_node_map)
255 cpu_to_node_map[cpu] = node; 271 cpu_to_node_map[cpu] = node;
272 return;
273 }
256 274
257 else if (per_cpu_offset(cpu)) 275#ifdef CONFIG_DEBUG_PER_CPU_MAPS
258 per_cpu(x86_cpu_to_node_map, cpu) = node; 276 if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) {
277 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
278 dump_stack();
279 return;
280 }
281#endif
282 per_cpu(x86_cpu_to_node_map, cpu) = node;
259 283
260 else 284 if (node != NUMA_NO_NODE)
261 pr_debug("Setting node for non-present cpu %d\n", cpu); 285 cpu_pda(cpu)->nodenumber = node;
262} 286}
263 287
264void __cpuinit numa_clear_node(int cpu) 288void __cpuinit numa_clear_node(int cpu)
@@ -275,7 +299,7 @@ void __cpuinit numa_add_cpu(int cpu)
275 299
276void __cpuinit numa_remove_cpu(int cpu) 300void __cpuinit numa_remove_cpu(int cpu)
277{ 301{
278 cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); 302 cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
279} 303}
280 304
281#else /* CONFIG_DEBUG_PER_CPU_MAPS */ 305#else /* CONFIG_DEBUG_PER_CPU_MAPS */
@@ -285,7 +309,7 @@ void __cpuinit numa_remove_cpu(int cpu)
285 */ 309 */
286static void __cpuinit numa_set_cpumask(int cpu, int enable) 310static void __cpuinit numa_set_cpumask(int cpu, int enable)
287{ 311{
288 int node = cpu_to_node(cpu); 312 int node = early_cpu_to_node(cpu);
289 cpumask_t *mask; 313 cpumask_t *mask;
290 char buf[64]; 314 char buf[64];
291 315