aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot_32.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-19 14:35:04 -0400
committerThomas Gleixner <tglx@linutronix.de>2007-10-19 14:35:04 -0400
commit92cb7612aee39642d109b8d935ad265e602c0563 (patch)
tree307f4183226f52418bd6842b5d970f03524ad1c1 /arch/x86/kernel/smpboot_32.c
parentf1df280f53d7c3ce8613a3b25d1efe009b9860dd (diff)
x86: convert cpuinfo_x86 array to a per_cpu array
cpu_data is currently an array defined using NR_CPUS. This means that we overallocate since we will rarely really use maximum configured cpus. When NR_CPU count is raised to 4096 the size of cpu_data becomes 3,145,728 bytes. These changes were adopted from the sparc64 (and ia64) code. An additional field was added to cpuinfo_x86 to be a non-ambiguous cpu index. This corresponds to the index into a cpumask_t as well as the per_cpu index. It's used in various places like show_cpuinfo(). cpu_data is defined to be the boot_cpu_data structure for the NON-SMP case. Signed-off-by: Mike Travis <travis@sgi.com> Acked-by: Christoph Lameter <clameter@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: James Bottomley <James.Bottomley@steeleye.com> Cc: Dmitry Torokhov <dtor@mail.ru> Cc: "Antonino A. Daplas" <adaplas@pol.net> Cc: Mark M. Hoffman <mhoffman@lightlink.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Diffstat (limited to 'arch/x86/kernel/smpboot_32.c')
-rw-r--r--arch/x86/kernel/smpboot_32.c51
1 files changed, 26 insertions, 25 deletions
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 47bccfc283d9..7b8fdfa169dd 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -89,8 +89,8 @@ EXPORT_SYMBOL(cpu_possible_map);
89static cpumask_t smp_commenced_mask; 89static cpumask_t smp_commenced_mask;
90 90
91/* Per CPU bogomips and other parameters */ 91/* Per CPU bogomips and other parameters */
92struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned; 92DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
93EXPORT_SYMBOL(cpu_data); 93EXPORT_PER_CPU_SYMBOL(cpu_info);
94 94
95/* 95/*
96 * The following static array is used during kernel startup 96 * The following static array is used during kernel startup
@@ -158,9 +158,10 @@ void __init smp_alloc_memory(void)
158 158
159void __cpuinit smp_store_cpu_info(int id) 159void __cpuinit smp_store_cpu_info(int id)
160{ 160{
161 struct cpuinfo_x86 *c = cpu_data + id; 161 struct cpuinfo_x86 *c = &cpu_data(id);
162 162
163 *c = boot_cpu_data; 163 *c = boot_cpu_data;
164 c->cpu_index = id;
164 if (id!=0) 165 if (id!=0)
165 identify_secondary_cpu(c); 166 identify_secondary_cpu(c);
166 /* 167 /*
@@ -302,7 +303,7 @@ static int cpucount;
302/* maps the cpu to the sched domain representing multi-core */ 303/* maps the cpu to the sched domain representing multi-core */
303cpumask_t cpu_coregroup_map(int cpu) 304cpumask_t cpu_coregroup_map(int cpu)
304{ 305{
305 struct cpuinfo_x86 *c = cpu_data + cpu; 306 struct cpuinfo_x86 *c = &cpu_data(cpu);
306 /* 307 /*
307 * For perf, we return last level cache shared map. 308 * For perf, we return last level cache shared map.
308 * And for power savings, we return cpu_core_map 309 * And for power savings, we return cpu_core_map
@@ -319,41 +320,41 @@ static cpumask_t cpu_sibling_setup_map;
319void __cpuinit set_cpu_sibling_map(int cpu) 320void __cpuinit set_cpu_sibling_map(int cpu)
320{ 321{
321 int i; 322 int i;
322 struct cpuinfo_x86 *c = cpu_data; 323 struct cpuinfo_x86 *c = &cpu_data(cpu);
323 324
324 cpu_set(cpu, cpu_sibling_setup_map); 325 cpu_set(cpu, cpu_sibling_setup_map);
325 326
326 if (smp_num_siblings > 1) { 327 if (smp_num_siblings > 1) {
327 for_each_cpu_mask(i, cpu_sibling_setup_map) { 328 for_each_cpu_mask(i, cpu_sibling_setup_map) {
328 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 329 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
329 c[cpu].cpu_core_id == c[i].cpu_core_id) { 330 c->cpu_core_id == cpu_data(i).cpu_core_id) {
330 cpu_set(i, per_cpu(cpu_sibling_map, cpu)); 331 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
331 cpu_set(cpu, per_cpu(cpu_sibling_map, i)); 332 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
332 cpu_set(i, per_cpu(cpu_core_map, cpu)); 333 cpu_set(i, per_cpu(cpu_core_map, cpu));
333 cpu_set(cpu, per_cpu(cpu_core_map, i)); 334 cpu_set(cpu, per_cpu(cpu_core_map, i));
334 cpu_set(i, c[cpu].llc_shared_map); 335 cpu_set(i, c->llc_shared_map);
335 cpu_set(cpu, c[i].llc_shared_map); 336 cpu_set(cpu, cpu_data(i).llc_shared_map);
336 } 337 }
337 } 338 }
338 } else { 339 } else {
339 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); 340 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
340 } 341 }
341 342
342 cpu_set(cpu, c[cpu].llc_shared_map); 343 cpu_set(cpu, c->llc_shared_map);
343 344
344 if (current_cpu_data.x86_max_cores == 1) { 345 if (current_cpu_data.x86_max_cores == 1) {
345 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); 346 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
346 c[cpu].booted_cores = 1; 347 c->booted_cores = 1;
347 return; 348 return;
348 } 349 }
349 350
350 for_each_cpu_mask(i, cpu_sibling_setup_map) { 351 for_each_cpu_mask(i, cpu_sibling_setup_map) {
351 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && 352 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
352 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { 353 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
353 cpu_set(i, c[cpu].llc_shared_map); 354 cpu_set(i, c->llc_shared_map);
354 cpu_set(cpu, c[i].llc_shared_map); 355 cpu_set(cpu, cpu_data(i).llc_shared_map);
355 } 356 }
356 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 357 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
357 cpu_set(i, per_cpu(cpu_core_map, cpu)); 358 cpu_set(i, per_cpu(cpu_core_map, cpu));
358 cpu_set(cpu, per_cpu(cpu_core_map, i)); 359 cpu_set(cpu, per_cpu(cpu_core_map, i));
359 /* 360 /*
@@ -365,15 +366,15 @@ void __cpuinit set_cpu_sibling_map(int cpu)
365 * the booted_cores for this new cpu 366 * the booted_cores for this new cpu
366 */ 367 */
367 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) 368 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
368 c[cpu].booted_cores++; 369 c->booted_cores++;
369 /* 370 /*
370 * increment the core count for all 371 * increment the core count for all
371 * the other cpus in this package 372 * the other cpus in this package
372 */ 373 */
373 if (i != cpu) 374 if (i != cpu)
374 c[i].booted_cores++; 375 cpu_data(i).booted_cores++;
375 } else if (i != cpu && !c[cpu].booted_cores) 376 } else if (i != cpu && !c->booted_cores)
376 c[cpu].booted_cores = c[i].booted_cores; 377 c->booted_cores = cpu_data(i).booted_cores;
377 } 378 }
378 } 379 }
379} 380}
@@ -852,7 +853,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu)
852 /* number CPUs logically, starting from 1 (BSP is 0) */ 853 /* number CPUs logically, starting from 1 (BSP is 0) */
853 Dprintk("OK.\n"); 854 Dprintk("OK.\n");
854 printk("CPU%d: ", cpu); 855 printk("CPU%d: ", cpu);
855 print_cpu_info(&cpu_data[cpu]); 856 print_cpu_info(&cpu_data(cpu));
856 Dprintk("CPU has booted.\n"); 857 Dprintk("CPU has booted.\n");
857 } else { 858 } else {
858 boot_error= 1; 859 boot_error= 1;
@@ -969,7 +970,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
969 */ 970 */
970 smp_store_cpu_info(0); /* Final full version of the data */ 971 smp_store_cpu_info(0); /* Final full version of the data */
971 printk("CPU%d: ", 0); 972 printk("CPU%d: ", 0);
972 print_cpu_info(&cpu_data[0]); 973 print_cpu_info(&cpu_data(0));
973 974
974 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID)); 975 boot_cpu_physical_apicid = GET_APIC_ID(apic_read(APIC_ID));
975 boot_cpu_logical_apicid = logical_smp_processor_id(); 976 boot_cpu_logical_apicid = logical_smp_processor_id();
@@ -1092,7 +1093,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1092 Dprintk("Before bogomips.\n"); 1093 Dprintk("Before bogomips.\n");
1093 for (cpu = 0; cpu < NR_CPUS; cpu++) 1094 for (cpu = 0; cpu < NR_CPUS; cpu++)
1094 if (cpu_isset(cpu, cpu_callout_map)) 1095 if (cpu_isset(cpu, cpu_callout_map))
1095 bogosum += cpu_data[cpu].loops_per_jiffy; 1096 bogosum += cpu_data(cpu).loops_per_jiffy;
1096 printk(KERN_INFO 1097 printk(KERN_INFO
1097 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", 1098 "Total of %d processors activated (%lu.%02lu BogoMIPS).\n",
1098 cpucount+1, 1099 cpucount+1,
@@ -1162,7 +1163,7 @@ void __init native_smp_prepare_boot_cpu(void)
1162void remove_siblinginfo(int cpu) 1163void remove_siblinginfo(int cpu)
1163{ 1164{
1164 int sibling; 1165 int sibling;
1165 struct cpuinfo_x86 *c = cpu_data; 1166 struct cpuinfo_x86 *c = &cpu_data(cpu);
1166 1167
1167 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { 1168 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1168 cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); 1169 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
@@ -1170,15 +1171,15 @@ void remove_siblinginfo(int cpu)
1170 * last thread sibling in this cpu core going down 1171 * last thread sibling in this cpu core going down
1171 */ 1172 */
1172 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) 1173 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1173 c[sibling].booted_cores--; 1174 cpu_data(sibling).booted_cores--;
1174 } 1175 }
1175 1176
1176 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) 1177 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1177 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); 1178 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1178 cpus_clear(per_cpu(cpu_sibling_map, cpu)); 1179 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1179 cpus_clear(per_cpu(cpu_core_map, cpu)); 1180 cpus_clear(per_cpu(cpu_core_map, cpu));
1180 c[cpu].phys_proc_id = 0; 1181 c->phys_proc_id = 0;
1181 c[cpu].cpu_core_id = 0; 1182 c->cpu_core_id = 0;
1182 cpu_clear(cpu, cpu_sibling_setup_map); 1183 cpu_clear(cpu, cpu_sibling_setup_map);
1183} 1184}
1184 1185