diff options
author | Mike Travis <travis@sgi.com> | 2009-01-13 06:41:34 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-01-16 08:18:23 -0500 |
commit | c90aa894f0240084f2c6e42e2333b211d6cfe2b2 (patch) | |
tree | 1035a49fa35c950c7340dc018be8cfba93942dfe /arch/x86/kernel/setup_percpu.c | |
parent | f10fcd47120e80f66665567dbe17f5071c7aef52 (diff) |
x86: cleanup early setup_percpu references
[ Based on original patch from Christoph Lameter and Mike Travis. ]
* Ruggedize some calls in setup_percpu.c to prevent mishaps
in early calls, particularly for non-critical functions.
* Cleanup DEBUG_PER_CPU_MAPS usages and some comments.
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 56 |
1 files changed, 40 insertions, 16 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index bf63de72b643..56c63ac62b10 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -15,6 +15,12 @@ | |||
15 | #include <asm/highmem.h> | 15 | #include <asm/highmem.h> |
16 | #include <asm/cpumask.h> | 16 | #include <asm/cpumask.h> |
17 | 17 | ||
18 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
19 | # define DBG(x...) printk(KERN_DEBUG x) | ||
20 | #else | ||
21 | # define DBG(x...) | ||
22 | #endif | ||
23 | |||
18 | #ifdef CONFIG_X86_LOCAL_APIC | 24 | #ifdef CONFIG_X86_LOCAL_APIC |
19 | unsigned int num_processors; | 25 | unsigned int num_processors; |
20 | unsigned disabled_cpus __cpuinitdata; | 26 | unsigned disabled_cpus __cpuinitdata; |
@@ -27,31 +33,39 @@ unsigned int max_physical_apicid; | |||
27 | physid_mask_t phys_cpu_present_map; | 33 | physid_mask_t phys_cpu_present_map; |
28 | #endif | 34 | #endif |
29 | 35 | ||
30 | /* map cpu index to physical APIC ID */ | 36 | /* |
37 | * Map cpu index to physical APIC ID | ||
38 | */ | ||
31 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); | 39 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); |
32 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | 40 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); |
33 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | 41 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); |
34 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | 42 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); |
35 | 43 | ||
36 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | 44 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) |
37 | #define X86_64_NUMA 1 | 45 | #define X86_64_NUMA 1 /* (used later) */ |
38 | 46 | ||
39 | /* map cpu index to node index */ | 47 | /* |
48 | * Map cpu index to node index | ||
49 | */ | ||
40 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | 50 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
41 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | 51 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
42 | 52 | ||
43 | /* which logical CPUs are on which nodes */ | 53 | /* |
54 | * Which logical CPUs are on which nodes | ||
55 | */ | ||
44 | cpumask_t *node_to_cpumask_map; | 56 | cpumask_t *node_to_cpumask_map; |
45 | EXPORT_SYMBOL(node_to_cpumask_map); | 57 | EXPORT_SYMBOL(node_to_cpumask_map); |
46 | 58 | ||
47 | /* setup node_to_cpumask_map */ | 59 | /* |
60 | * Setup node_to_cpumask_map | ||
61 | */ | ||
48 | static void __init setup_node_to_cpumask_map(void); | 62 | static void __init setup_node_to_cpumask_map(void); |
49 | 63 | ||
50 | #else | 64 | #else |
51 | static inline void setup_node_to_cpumask_map(void) { } | 65 | static inline void setup_node_to_cpumask_map(void) { } |
52 | #endif | 66 | #endif |
53 | 67 | ||
54 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) | 68 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
55 | /* | 69 | /* |
56 | * Copy data used in early init routines from the initial arrays to the | 70 | * Copy data used in early init routines from the initial arrays to the |
57 | * per cpu data areas. These arrays then become expendable and the | 71 | * per cpu data areas. These arrays then become expendable and the |
@@ -200,6 +214,8 @@ void __init setup_per_cpu_areas(void) | |||
200 | #endif | 214 | #endif |
201 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | 215 | per_cpu_offset(cpu) = ptr - __per_cpu_start; |
202 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); | 216 | memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); |
217 | |||
218 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | ||
203 | } | 219 | } |
204 | 220 | ||
205 | /* Setup percpu data maps */ | 221 | /* Setup percpu data maps */ |
@@ -221,6 +237,7 @@ void __init setup_per_cpu_areas(void) | |||
221 | * Requires node_possible_map to be valid. | 237 | * Requires node_possible_map to be valid. |
222 | * | 238 | * |
223 | * Note: node_to_cpumask() is not valid until after this is done. | 239 | * Note: node_to_cpumask() is not valid until after this is done. |
240 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | ||
224 | */ | 241 | */ |
225 | static void __init setup_node_to_cpumask_map(void) | 242 | static void __init setup_node_to_cpumask_map(void) |
226 | { | 243 | { |
@@ -236,6 +253,7 @@ static void __init setup_node_to_cpumask_map(void) | |||
236 | 253 | ||
237 | /* allocate the map */ | 254 | /* allocate the map */ |
238 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | 255 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); |
256 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); | ||
239 | 257 | ||
240 | pr_debug("Node to cpumask map at %p for %d nodes\n", | 258 | pr_debug("Node to cpumask map at %p for %d nodes\n", |
241 | map, nr_node_ids); | 259 | map, nr_node_ids); |
@@ -248,17 +266,23 @@ void __cpuinit numa_set_node(int cpu, int node) | |||
248 | { | 266 | { |
249 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | 267 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); |
250 | 268 | ||
251 | if (cpu_pda(cpu) && node != NUMA_NO_NODE) | 269 | /* early setting, no percpu area yet */ |
252 | cpu_pda(cpu)->nodenumber = node; | 270 | if (cpu_to_node_map) { |
253 | |||
254 | if (cpu_to_node_map) | ||
255 | cpu_to_node_map[cpu] = node; | 271 | cpu_to_node_map[cpu] = node; |
272 | return; | ||
273 | } | ||
256 | 274 | ||
257 | else if (per_cpu_offset(cpu)) | 275 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
258 | per_cpu(x86_cpu_to_node_map, cpu) = node; | 276 | if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { |
277 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | ||
278 | dump_stack(); | ||
279 | return; | ||
280 | } | ||
281 | #endif | ||
282 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
259 | 283 | ||
260 | else | 284 | if (node != NUMA_NO_NODE) |
261 | pr_debug("Setting node for non-present cpu %d\n", cpu); | 285 | cpu_pda(cpu)->nodenumber = node; |
262 | } | 286 | } |
263 | 287 | ||
264 | void __cpuinit numa_clear_node(int cpu) | 288 | void __cpuinit numa_clear_node(int cpu) |
@@ -275,7 +299,7 @@ void __cpuinit numa_add_cpu(int cpu) | |||
275 | 299 | ||
276 | void __cpuinit numa_remove_cpu(int cpu) | 300 | void __cpuinit numa_remove_cpu(int cpu) |
277 | { | 301 | { |
278 | cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); | 302 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
279 | } | 303 | } |
280 | 304 | ||
281 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | 305 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
@@ -285,7 +309,7 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
285 | */ | 309 | */ |
286 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | 310 | static void __cpuinit numa_set_cpumask(int cpu, int enable) |
287 | { | 311 | { |
288 | int node = cpu_to_node(cpu); | 312 | int node = early_cpu_to_node(cpu); |
289 | cpumask_t *mask; | 313 | cpumask_t *mask; |
290 | char buf[64]; | 314 | char buf[64]; |
291 | 315 | ||