diff options
author | Mike Travis <travis@sgi.com> | 2008-05-12 15:21:12 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2008-07-08 05:31:24 -0400 |
commit | 9f248bde9d47cc177011198c9a15fb339b9f3215 (patch) | |
tree | a6bb5bff1d8d786370b061eeb0958b413b941d41 /arch/x86/kernel/setup.c | |
parent | 7891a24e1ee50c96896c0cf7da216a8e7b573ca5 (diff) |
x86: remove the static 256k node_to_cpumask_map
* Consolidate node_to_cpumask operations and remove the 256k
byte node_to_cpumask_map. This is done by allocating the
node_to_cpumask_map array after the number of possible nodes
(nr_node_ids) is known.
* Debug printouts when CONFIG_DEBUG_PER_CPU_MAPS is active have
been increased. It now shows faults when calling node_to_cpumask()
and node_to_cpumask_ptr().
For inclusion into sched-devel/latest tree.
Based on:
git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux-2.6.git
+ sched-devel/latest .../mingo/linux-2.6-sched-devel.git
Signed-off-by: Mike Travis <travis@sgi.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/setup.c')
-rw-r--r-- | arch/x86/kernel/setup.c | 132 |
1 files changed, 127 insertions, 5 deletions
diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index 0dff17ee3d73..913af838c3c5 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c | |||
@@ -35,6 +35,16 @@ EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | |||
35 | /* map cpu index to node index */ | 35 | /* map cpu index to node index */ |
36 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); | 36 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
37 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | 37 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); |
38 | |||
39 | /* which logical CPUs are on which nodes */ | ||
40 | cpumask_t *node_to_cpumask_map; | ||
41 | EXPORT_SYMBOL(node_to_cpumask_map); | ||
42 | |||
43 | /* setup node_to_cpumask_map */ | ||
44 | static void __init setup_node_to_cpumask_map(void); | ||
45 | |||
46 | #else | ||
47 | static inline void setup_node_to_cpumask_map(void) { } | ||
38 | #endif | 48 | #endif |
39 | 49 | ||
40 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) | 50 | #if defined(CONFIG_HAVE_SETUP_PER_CPU_AREA) && defined(CONFIG_X86_SMP) |
@@ -140,11 +150,15 @@ void __init setup_per_cpu_areas(void) | |||
140 | } | 150 | } |
141 | 151 | ||
142 | nr_cpu_ids = highest_cpu + 1; | 152 | nr_cpu_ids = highest_cpu + 1; |
143 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d\n", NR_CPUS, nr_cpu_ids); | 153 | printk(KERN_DEBUG "NR_CPUS: %d, nr_cpu_ids: %d, nr_node_ids %d\n", |
154 | NR_CPUS, nr_cpu_ids, nr_node_ids); | ||
144 | 155 | ||
145 | /* Setup percpu data maps */ | 156 | /* Setup percpu data maps */ |
146 | setup_per_cpu_maps(); | 157 | setup_per_cpu_maps(); |
147 | 158 | ||
159 | /* Setup node to cpumask map */ | ||
160 | setup_node_to_cpumask_map(); | ||
161 | |||
148 | /* Setup cpumask_of_cpu map */ | 162 | /* Setup cpumask_of_cpu map */ |
149 | setup_cpumask_of_cpu(); | 163 | setup_cpumask_of_cpu(); |
150 | } | 164 | } |
@@ -152,6 +166,35 @@ void __init setup_per_cpu_areas(void) | |||
152 | #endif | 166 | #endif |
153 | 167 | ||
154 | #ifdef X86_64_NUMA | 168 | #ifdef X86_64_NUMA |
169 | |||
170 | /* | ||
171 | * Allocate node_to_cpumask_map based on number of available nodes | ||
172 | * Requires node_possible_map to be valid. | ||
173 | * | ||
174 | * Note: node_to_cpumask() is not valid until after this is done. | ||
175 | */ | ||
176 | static void __init setup_node_to_cpumask_map(void) | ||
177 | { | ||
178 | unsigned int node, num = 0; | ||
179 | cpumask_t *map; | ||
180 | |||
181 | /* setup nr_node_ids if not done yet */ | ||
182 | if (nr_node_ids == MAX_NUMNODES) { | ||
183 | for_each_node_mask(node, node_possible_map) | ||
184 | num = node; | ||
185 | nr_node_ids = num + 1; | ||
186 | } | ||
187 | |||
188 | /* allocate the map */ | ||
189 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | ||
190 | |||
191 | Dprintk(KERN_DEBUG "Node to cpumask map at %p for %d nodes\n", | ||
192 | map, nr_node_ids); | ||
193 | |||
194 | /* node_to_cpumask() will now work */ | ||
195 | node_to_cpumask_map = map; | ||
196 | } | ||
197 | |||
155 | void __cpuinit numa_set_node(int cpu, int node) | 198 | void __cpuinit numa_set_node(int cpu, int node) |
156 | { | 199 | { |
157 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | 200 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); |
@@ -174,6 +217,8 @@ void __cpuinit numa_clear_node(int cpu) | |||
174 | numa_set_node(cpu, NUMA_NO_NODE); | 217 | numa_set_node(cpu, NUMA_NO_NODE); |
175 | } | 218 | } |
176 | 219 | ||
220 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS | ||
221 | |||
177 | void __cpuinit numa_add_cpu(int cpu) | 222 | void __cpuinit numa_add_cpu(int cpu) |
178 | { | 223 | { |
179 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | 224 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
@@ -183,9 +228,44 @@ void __cpuinit numa_remove_cpu(int cpu) | |||
183 | { | 228 | { |
184 | cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); | 229 | cpu_clear(cpu, node_to_cpumask_map[cpu_to_node(cpu)]); |
185 | } | 230 | } |
186 | #endif /* CONFIG_NUMA */ | ||
187 | 231 | ||
188 | #if defined(CONFIG_DEBUG_PER_CPU_MAPS) && defined(CONFIG_X86_64) | 232 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
233 | |||
234 | /* | ||
235 | * --------- debug versions of the numa functions --------- | ||
236 | */ | ||
237 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | ||
238 | { | ||
239 | int node = cpu_to_node(cpu); | ||
240 | cpumask_t *mask; | ||
241 | char buf[64]; | ||
242 | |||
243 | if (node_to_cpumask_map == NULL) { | ||
244 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | ||
245 | dump_stack(); | ||
246 | return; | ||
247 | } | ||
248 | |||
249 | mask = &node_to_cpumask_map[node]; | ||
250 | if (enable) | ||
251 | cpu_set(cpu, *mask); | ||
252 | else | ||
253 | cpu_clear(cpu, *mask); | ||
254 | |||
255 | cpulist_scnprintf(buf, sizeof(buf), *mask); | ||
256 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", | ||
257 | enable? "numa_add_cpu":"numa_remove_cpu", cpu, node, buf); | ||
258 | } | ||
259 | |||
260 | void __cpuinit numa_add_cpu(int cpu) | ||
261 | { | ||
262 | numa_set_cpumask(cpu, 1); | ||
263 | } | ||
264 | |||
265 | void __cpuinit numa_remove_cpu(int cpu) | ||
266 | { | ||
267 | numa_set_cpumask(cpu, 0); | ||
268 | } | ||
189 | 269 | ||
190 | int cpu_to_node(int cpu) | 270 | int cpu_to_node(int cpu) |
191 | { | 271 | { |
@@ -199,6 +279,10 @@ int cpu_to_node(int cpu) | |||
199 | } | 279 | } |
200 | EXPORT_SYMBOL(cpu_to_node); | 280 | EXPORT_SYMBOL(cpu_to_node); |
201 | 281 | ||
282 | /* | ||
283 | * Same function as cpu_to_node() but used if called before the | ||
284 | * per_cpu areas are setup. | ||
285 | */ | ||
202 | int early_cpu_to_node(int cpu) | 286 | int early_cpu_to_node(int cpu) |
203 | { | 287 | { |
204 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | 288 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) |
@@ -207,9 +291,47 @@ int early_cpu_to_node(int cpu) | |||
207 | if (!per_cpu_offset(cpu)) { | 291 | if (!per_cpu_offset(cpu)) { |
208 | printk(KERN_WARNING | 292 | printk(KERN_WARNING |
209 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | 293 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); |
210 | dump_stack(); | 294 | dump_stack(); |
211 | return NUMA_NO_NODE; | 295 | return NUMA_NO_NODE; |
212 | } | 296 | } |
213 | return per_cpu(x86_cpu_to_node_map, cpu); | 297 | return per_cpu(x86_cpu_to_node_map, cpu); |
214 | } | 298 | } |
215 | #endif | 299 | |
300 | /* | ||
301 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | ||
302 | */ | ||
303 | cpumask_t *_node_to_cpumask_ptr(int node) | ||
304 | { | ||
305 | if (node_to_cpumask_map == NULL) { | ||
306 | printk(KERN_WARNING | ||
307 | "_node_to_cpumask_ptr(%d): no node_to_cpumask_map!\n", | ||
308 | node); | ||
309 | dump_stack(); | ||
310 | return &cpu_online_map; | ||
311 | } | ||
312 | return &node_to_cpumask_map[node]; | ||
313 | } | ||
314 | EXPORT_SYMBOL(_node_to_cpumask_ptr); | ||
315 | |||
316 | /* | ||
317 | * Returns a bitmask of CPUs on Node 'node'. | ||
318 | */ | ||
319 | cpumask_t node_to_cpumask(int node) | ||
320 | { | ||
321 | if (node_to_cpumask_map == NULL) { | ||
322 | printk(KERN_WARNING | ||
323 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | ||
324 | dump_stack(); | ||
325 | return cpu_online_map; | ||
326 | } | ||
327 | return node_to_cpumask_map[node]; | ||
328 | } | ||
329 | EXPORT_SYMBOL(node_to_cpumask); | ||
330 | |||
331 | /* | ||
332 | * --------- end of debug versions of the numa functions --------- | ||
333 | */ | ||
334 | |||
335 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | ||
336 | |||
337 | #endif /* X86_64_NUMA */ | ||