diff options
author | Brian Gerst <brgerst@gmail.com> | 2009-01-26 22:56:47 -0500 |
---|---|---|
committer | Tejun Heo <tj@kernel.org> | 2009-01-26 22:56:47 -0500 |
commit | 6470aff619fbb9dff8dfe8afa5033084cd55ca20 (patch) | |
tree | c5734a7afc33ef665b1cacc3a715ab7e0c85787a /arch/x86/kernel/setup_percpu.c | |
parent | 0d77e7f04d5da160307f4f5c030a171e004f602b (diff) |
x86: move 64-bit NUMA code
Impact: Code movement, no functional change.
Move the 64-bit NUMA code from setup_percpu.c to numa_64.c
Signed-off-by: Brian Gerst <brgerst@gmail.com>
Signed-off-by: Tejun Heo <tj@kernel.org>
Diffstat (limited to 'arch/x86/kernel/setup_percpu.c')
-rw-r--r-- | arch/x86/kernel/setup_percpu.c | 237 |
1 files changed, 5 insertions, 232 deletions
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c index d0b1476490a7..cb6d622520be 100644 --- a/arch/x86/kernel/setup_percpu.c +++ b/arch/x86/kernel/setup_percpu.c | |||
@@ -51,32 +51,6 @@ DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | |||
51 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | 51 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); |
52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | 52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); |
53 | 53 | ||
54 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | ||
55 | #define X86_64_NUMA 1 /* (used later) */ | ||
56 | DEFINE_PER_CPU(int, node_number) = 0; | ||
57 | EXPORT_PER_CPU_SYMBOL(node_number); | ||
58 | |||
59 | /* | ||
60 | * Map cpu index to node index | ||
61 | */ | ||
62 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | ||
63 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | ||
64 | |||
65 | /* | ||
66 | * Which logical CPUs are on which nodes | ||
67 | */ | ||
68 | cpumask_t *node_to_cpumask_map; | ||
69 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
70 | |||
71 | /* | ||
72 | * Setup node_to_cpumask_map | ||
73 | */ | ||
74 | static void __init setup_node_to_cpumask_map(void); | ||
75 | |||
76 | #else | ||
77 | static inline void setup_node_to_cpumask_map(void) { } | ||
78 | #endif | ||
79 | |||
80 | #ifdef CONFIG_X86_64 | 54 | #ifdef CONFIG_X86_64 |
81 | 55 | ||
82 | /* correctly size the local cpu masks */ | 56 | /* correctly size the local cpu masks */ |
@@ -163,13 +137,13 @@ void __init setup_per_cpu_areas(void) | |||
163 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | 137 | early_per_cpu_map(x86_cpu_to_apicid, cpu); |
164 | per_cpu(x86_bios_cpu_apicid, cpu) = | 138 | per_cpu(x86_bios_cpu_apicid, cpu) = |
165 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); | 139 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
166 | #ifdef X86_64_NUMA | ||
167 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
168 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | ||
169 | #endif | ||
170 | #ifdef CONFIG_X86_64 | 140 | #ifdef CONFIG_X86_64 |
171 | per_cpu(irq_stack_ptr, cpu) = | 141 | per_cpu(irq_stack_ptr, cpu) = |
172 | per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; | 142 | per_cpu(irq_stack_union.irq_stack, cpu) + IRQ_STACK_SIZE - 64; |
143 | #ifdef CONFIG_NUMA | ||
144 | per_cpu(x86_cpu_to_node_map, cpu) = | ||
145 | early_per_cpu_map(x86_cpu_to_node_map, cpu); | ||
146 | #endif | ||
173 | /* | 147 | /* |
174 | * Up to this point, CPU0 has been using .data.init | 148 | * Up to this point, CPU0 has been using .data.init |
175 | * area. Reload %gs offset for CPU0. | 149 | * area. Reload %gs offset for CPU0. |
@@ -184,7 +158,7 @@ void __init setup_per_cpu_areas(void) | |||
184 | /* indicate the early static arrays will soon be gone */ | 158 | /* indicate the early static arrays will soon be gone */ |
185 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; | 159 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
186 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | 160 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; |
187 | #ifdef X86_64_NUMA | 161 | #if defined(CONFIG_X86_64) && defined(CONFIG_NUMA) |
188 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | 162 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; |
189 | #endif | 163 | #endif |
190 | 164 | ||
@@ -197,204 +171,3 @@ void __init setup_per_cpu_areas(void) | |||
197 | 171 | ||
198 | #endif | 172 | #endif |
199 | 173 | ||
200 | #ifdef X86_64_NUMA | ||
201 | |||
202 | /* | ||
203 | * Allocate node_to_cpumask_map based on number of available nodes | ||
204 | * Requires node_possible_map to be valid. | ||
205 | * | ||
206 | * Note: node_to_cpumask() is not valid until after this is done. | ||
207 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) | ||
208 | */ | ||
209 | static void __init setup_node_to_cpumask_map(void) | ||
210 | { | ||
211 | unsigned int node, num = 0; | ||
212 | cpumask_t *map; | ||
213 | |||
214 | /* setup nr_node_ids if not done yet */ | ||
215 | if (nr_node_ids == MAX_NUMNODES) { | ||
216 | for_each_node_mask(node, node_possible_map) | ||
217 | num = node; | ||
218 | nr_node_ids = num + 1; | ||
219 | } | ||
220 | |||
221 | /* allocate the map */ | ||
222 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | ||
223 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); | ||
224 | |||
225 | pr_debug("Node to cpumask map at %p for %d nodes\n", | ||
226 | map, nr_node_ids); | ||
227 | |||
228 | /* node_to_cpumask() will now work */ | ||
229 | node_to_cpumask_map = map; | ||
230 | } | ||
231 | |||
232 | void __cpuinit numa_set_node(int cpu, int node) | ||
233 | { | ||
234 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | ||
235 | |||
236 | /* early setting, no percpu area yet */ | ||
237 | if (cpu_to_node_map) { | ||
238 | cpu_to_node_map[cpu] = node; | ||
239 | return; | ||
240 | } | ||
241 | |||
242 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS | ||
243 | if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { | ||
244 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | ||
245 | dump_stack(); | ||
246 | return; | ||
247 | } | ||
248 | #endif | ||
249 | per_cpu(x86_cpu_to_node_map, cpu) = node; | ||
250 | |||
251 | if (node != NUMA_NO_NODE) | ||
252 | per_cpu(node_number, cpu) = node; | ||
253 | } | ||
254 | |||
255 | void __cpuinit numa_clear_node(int cpu) | ||
256 | { | ||
257 | numa_set_node(cpu, NUMA_NO_NODE); | ||
258 | } | ||
259 | |||
260 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | ||
261 | |||
262 | void __cpuinit numa_add_cpu(int cpu) | ||
263 | { | ||
264 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
265 | } | ||
266 | |||
267 | void __cpuinit numa_remove_cpu(int cpu) | ||
268 | { | ||
269 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | ||
270 | } | ||
271 | |||
272 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
273 | |||
274 | /* | ||
275 | * --------- debug versions of the numa functions --------- | ||
276 | */ | ||
277 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
278 | { | ||
279 | int node = early_cpu_to_node(cpu); | ||
280 | cpumask_t *mask; | ||
281 | char buf[64]; | ||
282 | |||
283 | if (node_to_cpumask_map == NULL) { | ||
284 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | ||
285 | dump_stack(); | ||
286 | return; | ||
287 | } | ||
288 | |||
289 | mask = &node_to_cpumask_map[node]; | ||
290 | if (enable) | ||
291 | cpu_set(cpu, *mask); | ||
292 | else | ||
293 | cpu_clear(cpu, *mask); | ||
294 | |||
295 | cpulist_scnprintf(buf, sizeof(buf), mask); | ||
296 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
297 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); | ||
298 | } | ||
299 | |||
300 | void __cpuinit numa_add_cpu(int cpu) | ||
301 | { | ||
302 | numa_set_cpumask(cpu, 1); | ||
303 | } | ||
304 | |||
305 | void __cpuinit numa_remove_cpu(int cpu) | ||
306 | { | ||
307 | numa_set_cpumask(cpu, 0); | ||
308 | } | ||
309 | |||
310 | int cpu_to_node(int cpu) | ||
311 | { | ||
312 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | ||
313 | printk(KERN_WARNING | ||
314 | "cpu_to_node(%d): usage too early!\n", cpu); | ||
315 | dump_stack(); | ||
316 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
317 | } | ||
318 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
319 | } | ||
320 | EXPORT_SYMBOL(cpu_to_node); | ||
321 | |||
322 | /* | ||
323 | * Same function as cpu_to_node() but used if called before the | ||
324 | * per_cpu areas are setup. | ||
325 | */ | ||
326 | int early_cpu_to_node(int cpu) | ||
327 | { | ||
328 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | ||
329 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | ||
330 | |||
331 | if (!per_cpu_offset(cpu)) { | ||
332 | printk(KERN_WARNING | ||
333 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | ||
334 | dump_stack(); | ||
335 | return NUMA_NO_NODE; | ||
336 | } | ||
337 | return per_cpu(x86_cpu_to_node_map, cpu); | ||
338 | } | ||
339 | |||
340 | |||
341 | /* empty cpumask */ | ||
342 | static const cpumask_t cpu_mask_none; | ||
343 | |||
344 | /* | ||
345 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | ||
346 | */ | ||
347 | const cpumask_t *cpumask_of_node(int node) | ||
348 | { | ||
349 | if (node_to_cpumask_map == NULL) { | ||
350 | printk(KERN_WARNING | ||
351 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", | ||
352 | node); | ||
353 | dump_stack(); | ||
354 | return (const cpumask_t *)&cpu_online_map; | ||
355 | } | ||
356 | if (node >= nr_node_ids) { | ||
357 | printk(KERN_WARNING | ||
358 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", | ||
359 | node, nr_node_ids); | ||
360 | dump_stack(); | ||
361 | return &cpu_mask_none; | ||
362 | } | ||
363 | return &node_to_cpumask_map[node]; | ||
364 | } | ||
365 | EXPORT_SYMBOL(cpumask_of_node); | ||
366 | |||
367 | /* | ||
368 | * Returns a bitmask of CPUs on Node 'node'. | ||
369 | * | ||
370 | * Side note: this function creates the returned cpumask on the stack | ||
371 | * so with a high NR_CPUS count, excessive stack space is used. The | ||
372 | * node_to_cpumask_ptr function should be used whenever possible. | ||
373 | */ | ||
374 | cpumask_t node_to_cpumask(int node) | ||
375 | { | ||
376 | if (node_to_cpumask_map == NULL) { | ||
377 | printk(KERN_WARNING | ||
378 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | ||
379 | dump_stack(); | ||
380 | return cpu_online_map; | ||
381 | } | ||
382 | if (node >= nr_node_ids) { | ||
383 | printk(KERN_WARNING | ||
384 | "node_to_cpumask(%d): node > nr_node_ids(%d)\n", | ||
385 | node, nr_node_ids); | ||
386 | dump_stack(); | ||
387 | return cpu_mask_none; | ||
388 | } | ||
389 | return node_to_cpumask_map[node]; | ||
390 | } | ||
391 | EXPORT_SYMBOL(node_to_cpumask); | ||
392 | |||
393 | /* | ||
394 | * --------- end of debug versions of the numa functions --------- | ||
395 | */ | ||
396 | |||
397 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
398 | |||
399 | #endif /* X86_64_NUMA */ | ||
400 | |||