aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot_32.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-16 04:24:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:50 -0400
commit083576112940fda783d716fd5ccc744f81667b2f (patch)
tree226139e7cb9863c91d1e2a1ac0babb0db94f3d11 /arch/x86/kernel/smpboot_32.c
parentcc84634f29d5a92932400a2d52ca17dee2c8a462 (diff)
x86: Convert cpu_core_map to be a per cpu variable
This is from an earlier message from 'Christoph Lameter': cpu_core_map is currently an array defined using NR_CPUS. This means that we overallocate since we will rarely really use maximum configured cpu. If we put the cpu_core_map into the per cpu area then it will be allocated for each processor as it comes online. This means that the core map cannot be accessed until the per cpu area has been allocated. Xen does a weird thing here looping over all processors and zeroing the masks that are not yet allocated and that will be zeroed when they are allocated. I commented the code out. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/smpboot_32.c')
-rw-r--r--arch/x86/kernel/smpboot_32.c34
1 files changed, 17 insertions, 17 deletions
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e4f61d1c6248..4cbab48ba865 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -74,8 +74,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
74EXPORT_SYMBOL(cpu_sibling_map); 74EXPORT_SYMBOL(cpu_sibling_map);
75 75
76/* representing HT and core siblings of each logical CPU */ 76/* representing HT and core siblings of each logical CPU */
77cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 77DEFINE_PER_CPU(cpumask_t, cpu_core_map);
78EXPORT_SYMBOL(cpu_core_map); 78EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79 79
80/* bitmap of online cpus */ 80/* bitmap of online cpus */
81cpumask_t cpu_online_map __read_mostly; 81cpumask_t cpu_online_map __read_mostly;
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
300 * And for power savings, we return cpu_core_map 300 * And for power savings, we return cpu_core_map
301 */ 301 */
302 if (sched_mc_power_savings || sched_smt_power_savings) 302 if (sched_mc_power_savings || sched_smt_power_savings)
303 return cpu_core_map[cpu]; 303 return per_cpu(cpu_core_map, cpu);
304 else 304 else
305 return c->llc_shared_map; 305 return c->llc_shared_map;
306} 306}
@@ -321,8 +321,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 321 c[cpu].cpu_core_id == c[i].cpu_core_id) {
322 cpu_set(i, cpu_sibling_map[cpu]); 322 cpu_set(i, cpu_sibling_map[cpu]);
323 cpu_set(cpu, cpu_sibling_map[i]); 323 cpu_set(cpu, cpu_sibling_map[i]);
324 cpu_set(i, cpu_core_map[cpu]); 324 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, cpu_core_map[i]); 325 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 326 cpu_set(i, c[cpu].llc_shared_map);
327 cpu_set(cpu, c[i].llc_shared_map); 327 cpu_set(cpu, c[i].llc_shared_map);
328 } 328 }
@@ -334,7 +334,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
334 cpu_set(cpu, c[cpu].llc_shared_map); 334 cpu_set(cpu, c[cpu].llc_shared_map);
335 335
336 if (current_cpu_data.x86_max_cores == 1) { 336 if (current_cpu_data.x86_max_cores == 1) {
337 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 337 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
338 c[cpu].booted_cores = 1; 338 c[cpu].booted_cores = 1;
339 return; 339 return;
340 } 340 }
@@ -346,8 +346,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
346 cpu_set(cpu, c[i].llc_shared_map); 346 cpu_set(cpu, c[i].llc_shared_map);
347 } 347 }
348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
349 cpu_set(i, cpu_core_map[cpu]); 349 cpu_set(i, per_cpu(cpu_core_map, cpu));
350 cpu_set(cpu, cpu_core_map[i]); 350 cpu_set(cpu, per_cpu(cpu_core_map, i));
351 /* 351 /*
352 * Does this new cpu bringup a new core? 352 * Does this new cpu bringup a new core?
353 */ 353 */
@@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
984 " Using dummy APIC emulation.\n"); 984 " Using dummy APIC emulation.\n");
985 map_cpu_to_logical_apicid(); 985 map_cpu_to_logical_apicid();
986 cpu_set(0, cpu_sibling_map[0]); 986 cpu_set(0, cpu_sibling_map[0]);
987 cpu_set(0, cpu_core_map[0]); 987 cpu_set(0, per_cpu(cpu_core_map, 0));
988 return; 988 return;
989 } 989 }
990 990
@@ -1009,7 +1009,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1009 smpboot_clear_io_apic_irqs(); 1009 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1010 phys_cpu_present_map = physid_mask_of_physid(0);
1011 cpu_set(0, cpu_sibling_map[0]); 1011 cpu_set(0, cpu_sibling_map[0]);
1012 cpu_set(0, cpu_core_map[0]); 1012 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1013 return;
1014 } 1014 }
1015 1015
@@ -1024,7 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1024 smpboot_clear_io_apic_irqs(); 1024 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1025 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, cpu_sibling_map[0]); 1026 cpu_set(0, cpu_sibling_map[0]);
1027 cpu_set(0, cpu_core_map[0]); 1027 cpu_set(0, per_cpu(cpu_core_map, 0));
1028 return; 1028 return;
1029 } 1029 }
1030 1030
@@ -1107,11 +1107,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1107 */ 1107 */
1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1109 cpus_clear(cpu_sibling_map[cpu]); 1109 cpus_clear(cpu_sibling_map[cpu]);
1110 cpus_clear(cpu_core_map[cpu]); 1110 cpus_clear(per_cpu(cpu_core_map, cpu));
1111 } 1111 }
1112 1112
1113 cpu_set(0, cpu_sibling_map[0]); 1113 cpu_set(0, cpu_sibling_map[0]);
1114 cpu_set(0, cpu_core_map[0]); 1114 cpu_set(0, per_cpu(cpu_core_map, 0));
1115 1115
1116 smpboot_setup_io_apic(); 1116 smpboot_setup_io_apic();
1117 1117
@@ -1148,9 +1148,9 @@ void remove_siblinginfo(int cpu)
1148 int sibling; 1148 int sibling;
1149 struct cpuinfo_x86 *c = cpu_data; 1149 struct cpuinfo_x86 *c = cpu_data;
1150 1150
1151 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 1151 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1152 cpu_clear(cpu, cpu_core_map[sibling]); 1152 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1153 /* 1153 /*/
1154 * last thread sibling in this cpu core going down 1154 * last thread sibling in this cpu core going down
1155 */ 1155 */
1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
@@ -1160,7 +1160,7 @@ void remove_siblinginfo(int cpu)
1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1161 cpu_clear(cpu, cpu_sibling_map[sibling]); 1161 cpu_clear(cpu, cpu_sibling_map[sibling]);
1162 cpus_clear(cpu_sibling_map[cpu]); 1162 cpus_clear(cpu_sibling_map[cpu]);
1163 cpus_clear(cpu_core_map[cpu]); 1163 cpus_clear(per_cpu(cpu_core_map, cpu));
1164 c[cpu].phys_proc_id = 0; 1164 c[cpu].phys_proc_id = 0;
1165 c[cpu].cpu_core_id = 0; 1165 c[cpu].cpu_core_id = 0;
1166 cpu_clear(cpu, cpu_sibling_setup_map); 1166 cpu_clear(cpu, cpu_sibling_setup_map);