aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/smpboot_64.c
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-16 04:24:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:50 -0400
commit083576112940fda783d716fd5ccc744f81667b2f (patch)
tree226139e7cb9863c91d1e2a1ac0babb0db94f3d11 /arch/x86/kernel/smpboot_64.c
parentcc84634f29d5a92932400a2d52ca17dee2c8a462 (diff)
x86: Convert cpu_core_map to be a per cpu variable
This is from an earlier message from 'Christoph Lameter': cpu_core_map is currently an array defined using NR_CPUS. This means that we overallocate since we will rarely really use maximum configured cpu. If we put the cpu_core_map into the per cpu area then it will be allocated for each processor as it comes online. This means that the core map cannot be accessed until the per cpu area has been allocated. Xen does a weird thing here looping over all processors and zeroing the masks that are not yet allocated and that will be zeroed when they are allocated. I commented the code out. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/smpboot_64.c')
-rw-r--r--arch/x86/kernel/smpboot_64.c24
1 files changed, 12 insertions, 12 deletions
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 720a7d1f8862..6723c8622828 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -95,8 +95,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
95EXPORT_SYMBOL(cpu_sibling_map); 95EXPORT_SYMBOL(cpu_sibling_map);
96 96
97/* representing HT and core siblings of each logical CPU */ 97/* representing HT and core siblings of each logical CPU */
98cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 98DEFINE_PER_CPU(cpumask_t, cpu_core_map);
99EXPORT_SYMBOL(cpu_core_map); 99EXPORT_PER_CPU_SYMBOL(cpu_core_map);
100 100
101/* 101/*
102 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
@@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
243 * And for power savings, we return cpu_core_map 243 * And for power savings, we return cpu_core_map
244 */ 244 */
245 if (sched_mc_power_savings || sched_smt_power_savings) 245 if (sched_mc_power_savings || sched_smt_power_savings)
246 return cpu_core_map[cpu]; 246 return per_cpu(cpu_core_map, cpu);
247 else 247 else
248 return c->llc_shared_map; 248 return c->llc_shared_map;
249} 249}
@@ -264,8 +264,8 @@ static inline void set_cpu_sibling_map(int cpu)
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 264 c[cpu].cpu_core_id == c[i].cpu_core_id) {
265 cpu_set(i, cpu_sibling_map[cpu]); 265 cpu_set(i, cpu_sibling_map[cpu]);
266 cpu_set(cpu, cpu_sibling_map[i]); 266 cpu_set(cpu, cpu_sibling_map[i]);
267 cpu_set(i, cpu_core_map[cpu]); 267 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, cpu_core_map[i]); 268 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 269 cpu_set(i, c[cpu].llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 270 cpu_set(cpu, c[i].llc_shared_map);
271 } 271 }
@@ -277,7 +277,7 @@ static inline void set_cpu_sibling_map(int cpu)
277 cpu_set(cpu, c[cpu].llc_shared_map); 277 cpu_set(cpu, c[cpu].llc_shared_map);
278 278
279 if (current_cpu_data.x86_max_cores == 1) { 279 if (current_cpu_data.x86_max_cores == 1) {
280 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 280 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
281 c[cpu].booted_cores = 1; 281 c[cpu].booted_cores = 1;
282 return; 282 return;
283 } 283 }
@@ -289,8 +289,8 @@ static inline void set_cpu_sibling_map(int cpu)
289 cpu_set(cpu, c[i].llc_shared_map); 289 cpu_set(cpu, c[i].llc_shared_map);
290 } 290 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
292 cpu_set(i, cpu_core_map[cpu]); 292 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, cpu_core_map[i]); 293 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 294 /*
295 * Does this new cpu bringup a new core? 295 * Does this new cpu bringup a new core?
296 */ 296 */
@@ -736,7 +736,7 @@ static __init void disable_smp(void)
736 else 736 else
737 phys_cpu_present_map = physid_mask_of_physid(0); 737 phys_cpu_present_map = physid_mask_of_physid(0);
738 cpu_set(0, cpu_sibling_map[0]); 738 cpu_set(0, cpu_sibling_map[0]);
739 cpu_set(0, cpu_core_map[0]); 739 cpu_set(0, per_cpu(cpu_core_map, 0));
740} 740}
741 741
742#ifdef CONFIG_HOTPLUG_CPU 742#ifdef CONFIG_HOTPLUG_CPU
@@ -971,8 +971,8 @@ static void remove_siblinginfo(int cpu)
971 int sibling; 971 int sibling;
972 struct cpuinfo_x86 *c = cpu_data; 972 struct cpuinfo_x86 *c = cpu_data;
973 973
974 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 974 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
975 cpu_clear(cpu, cpu_core_map[sibling]); 975 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
976 /* 976 /*
977 * last thread sibling in this cpu core going down 977 * last thread sibling in this cpu core going down
978 */ 978 */
@@ -983,7 +983,7 @@ static void remove_siblinginfo(int cpu)
983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
984 cpu_clear(cpu, cpu_sibling_map[sibling]); 984 cpu_clear(cpu, cpu_sibling_map[sibling]);
985 cpus_clear(cpu_sibling_map[cpu]); 985 cpus_clear(cpu_sibling_map[cpu]);
986 cpus_clear(cpu_core_map[cpu]); 986 cpus_clear(per_cpu(cpu_core_map, cpu));
987 c[cpu].phys_proc_id = 0; 987 c[cpu].phys_proc_id = 0;
988 c[cpu].cpu_core_id = 0; 988 c[cpu].cpu_core_id = 0;
989 cpu_clear(cpu, cpu_sibling_setup_map); 989 cpu_clear(cpu, cpu_sibling_setup_map);