aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:53 -0400
committerRusty Russell <rusty@rustcorp.com.au>2009-03-13 00:19:53 -0400
commit155dd720d06a219ddf5a56b473cb3325441fc879 (patch)
tree617654f462b6c64eb954336fec1b427bc3f36211
parentc032ef60d1aa9af33730b7a35bbea751b131adc1 (diff)
cpumask: convert struct cpuinfo_x86's llc_shared_map to cpumask_var_t
Impact: reduce kernel memory usage when CONFIG_CPUMASK_OFFSTACK=y Signed-off-by: Rusty Russell <rusty@rustcorp.com.au>
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/smpboot.c33
2 files changed, 27 insertions, 8 deletions
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 76139506c3e4..d794d9483c56 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -94,7 +94,7 @@ struct cpuinfo_x86 {
94 unsigned long loops_per_jiffy; 94 unsigned long loops_per_jiffy;
95#ifdef CONFIG_SMP 95#ifdef CONFIG_SMP
96 /* cpus sharing the last level cache: */ 96 /* cpus sharing the last level cache: */
97 cpumask_t llc_shared_map; 97 cpumask_var_t llc_shared_map;
98#endif 98#endif
99 /* cpuid returned max cores value: */ 99 /* cpuid returned max cores value: */
100 u16 x86_max_cores; 100 u16 x86_max_cores;
diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
index 5a58a45ac1e3..d6427aa56966 100644
--- a/arch/x86/kernel/smpboot.c
+++ b/arch/x86/kernel/smpboot.c
@@ -329,6 +329,23 @@ notrace static void __cpuinit start_secondary(void *unused)
329 cpu_idle(); 329 cpu_idle();
330} 330}
331 331
332#ifdef CONFIG_CPUMASK_OFFSTACK
333/* In this case, llc_shared_map is a pointer to a cpumask. */
334static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
335 const struct cpuinfo_x86 *src)
336{
337 struct cpumask *llc = dst->llc_shared_map;
338 *dst = *src;
339 dst->llc_shared_map = llc;
340}
341#else
342static inline void copy_cpuinfo_x86(struct cpuinfo_x86 *dst,
343 const struct cpuinfo_x86 *src)
344{
345 *dst = *src;
346}
347#endif /* CONFIG_CPUMASK_OFFSTACK */
348
332/* 349/*
333 * The bootstrap kernel entry code has set these up. Save them for 350 * The bootstrap kernel entry code has set these up. Save them for
334 * a given CPU 351 * a given CPU
@@ -338,7 +355,7 @@ void __cpuinit smp_store_cpu_info(int id)
338{ 355{
339 struct cpuinfo_x86 *c = &cpu_data(id); 356 struct cpuinfo_x86 *c = &cpu_data(id);
340 357
341 *c = boot_cpu_data; 358 copy_cpuinfo_x86(c, &boot_cpu_data);
342 c->cpu_index = id; 359 c->cpu_index = id;
343 if (id != 0) 360 if (id != 0)
344 identify_secondary_cpu(c); 361 identify_secondary_cpu(c);
@@ -362,15 +379,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
362 cpumask_set_cpu(cpu, cpu_sibling_mask(i)); 379 cpumask_set_cpu(cpu, cpu_sibling_mask(i));
363 cpumask_set_cpu(i, cpu_core_mask(cpu)); 380 cpumask_set_cpu(i, cpu_core_mask(cpu));
364 cpumask_set_cpu(cpu, cpu_core_mask(i)); 381 cpumask_set_cpu(cpu, cpu_core_mask(i));
365 cpumask_set_cpu(i, &c->llc_shared_map); 382 cpumask_set_cpu(i, c->llc_shared_map);
366 cpumask_set_cpu(cpu, &o->llc_shared_map); 383 cpumask_set_cpu(cpu, o->llc_shared_map);
367 } 384 }
368 } 385 }
369 } else { 386 } else {
370 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu)); 387 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
371 } 388 }
372 389
373 cpumask_set_cpu(cpu, &c->llc_shared_map); 390 cpumask_set_cpu(cpu, c->llc_shared_map);
374 391
375 if (current_cpu_data.x86_max_cores == 1) { 392 if (current_cpu_data.x86_max_cores == 1) {
376 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu)); 393 cpumask_copy(cpu_core_mask(cpu), cpu_sibling_mask(cpu));
@@ -381,8 +398,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
381 for_each_cpu(i, cpu_sibling_setup_mask) { 398 for_each_cpu(i, cpu_sibling_setup_mask) {
382 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 399 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
383 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 400 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
384 cpumask_set_cpu(i, &c->llc_shared_map); 401 cpumask_set_cpu(i, c->llc_shared_map);
385 cpumask_set_cpu(cpu, &cpu_data(i).llc_shared_map); 402 cpumask_set_cpu(cpu, cpu_data(i).llc_shared_map);
386 } 403 }
387 if (c->phys_proc_id == cpu_data(i).phys_proc_id) { 404 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
388 cpumask_set_cpu(i, cpu_core_mask(cpu)); 405 cpumask_set_cpu(i, cpu_core_mask(cpu));
@@ -420,7 +437,7 @@ const struct cpumask *cpu_coregroup_mask(int cpu)
420 if (sched_mc_power_savings || sched_smt_power_savings) 437 if (sched_mc_power_savings || sched_smt_power_savings)
421 return cpu_core_mask(cpu); 438 return cpu_core_mask(cpu);
422 else 439 else
423 return &c->llc_shared_map; 440 return c->llc_shared_map;
424} 441}
425 442
426static void impress_friends(void) 443static void impress_friends(void)
@@ -1039,8 +1056,10 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
1039 for_each_possible_cpu(i) { 1056 for_each_possible_cpu(i) {
1040 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); 1057 alloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1041 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); 1058 alloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
1059 alloc_cpumask_var(&cpu_data(i).llc_shared_map, GFP_KERNEL);
1042 cpumask_clear(per_cpu(cpu_core_map, i)); 1060 cpumask_clear(per_cpu(cpu_core_map, i));
1043 cpumask_clear(per_cpu(cpu_sibling_map, i)); 1061 cpumask_clear(per_cpu(cpu_sibling_map, i));
1062 cpumask_clear(cpu_data(i).llc_shared_map);
1044 } 1063 }
1045 set_cpu_sibling_map(0); 1064 set_cpu_sibling_map(0);
1046 1065