aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-16 04:24:04 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:50 -0400
commit083576112940fda783d716fd5ccc744f81667b2f (patch)
tree226139e7cb9863c91d1e2a1ac0babb0db94f3d11 /arch
parentcc84634f29d5a92932400a2d52ca17dee2c8a462 (diff)
x86: Convert cpu_core_map to be a per cpu variable
This is from an earlier message from 'Christoph Lameter': cpu_core_map is currently an array defined using NR_CPUS. This means that we overallocate since we will rarely really use maximum configured cpu. If we put the cpu_core_map into the per cpu area then it will be allocated for each processor as it comes online. This means that the core map cannot be accessed until the per cpu area has been allocated. Xen does a weird thing here looping over all processors and zeroing the masks that are not yet allocated and that will be zeroed when they are allocated. I commented the code out. Signed-off-by: Christoph Lameter <clameter@sgi.com> Signed-off-by: Mike Travis <travis@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/powernow-k8.c10
-rw-r--r--arch/x86/kernel/cpu/proc.c3
-rw-r--r--arch/x86/kernel/mce_amd_64.c6
-rw-r--r--arch/x86/kernel/setup_64.c3
-rw-r--r--arch/x86/kernel/smpboot_32.c34
-rw-r--r--arch/x86/kernel/smpboot_64.c24
-rw-r--r--arch/x86/xen/smp.c14
8 files changed, 54 insertions, 42 deletions
diff --git a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
index ffd01e5dcb52..2ca43ba32bc0 100644
--- a/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
+++ b/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c
@@ -595,7 +595,7 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
595 dmi_check_system(sw_any_bug_dmi_table); 595 dmi_check_system(sw_any_bug_dmi_table);
596 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) { 596 if (bios_with_sw_any_bug && cpus_weight(policy->cpus) == 1) {
597 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL; 597 policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
598 policy->cpus = cpu_core_map[cpu]; 598 policy->cpus = per_cpu(cpu_core_map, cpu);
599 } 599 }
600#endif 600#endif
601 601
diff --git a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
index b273b69cfddf..c06ac680c9ca 100644
--- a/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
+++ b/arch/x86/kernel/cpu/cpufreq/powernow-k8.c
@@ -57,7 +57,7 @@ static struct powernow_k8_data *powernow_data[NR_CPUS];
57static int cpu_family = CPU_OPTERON; 57static int cpu_family = CPU_OPTERON;
58 58
59#ifndef CONFIG_SMP 59#ifndef CONFIG_SMP
60static cpumask_t cpu_core_map[1]; 60DEFINE_PER_CPU(cpumask_t, cpu_core_map);
61#endif 61#endif
62 62
63/* Return a frequency in MHz, given an input fid */ 63/* Return a frequency in MHz, given an input fid */
@@ -667,7 +667,7 @@ static int fill_powernow_table(struct powernow_k8_data *data, struct pst_s *pst,
667 667
668 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid); 668 dprintk("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
669 data->powernow_table = powernow_table; 669 data->powernow_table = powernow_table;
670 if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) 670 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
671 print_basics(data); 671 print_basics(data);
672 672
673 for (j = 0; j < data->numps; j++) 673 for (j = 0; j < data->numps; j++)
@@ -821,7 +821,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
821 821
822 /* fill in data */ 822 /* fill in data */
823 data->numps = data->acpi_data.state_count; 823 data->numps = data->acpi_data.state_count;
824 if (first_cpu(cpu_core_map[data->cpu]) == data->cpu) 824 if (first_cpu(per_cpu(cpu_core_map, data->cpu)) == data->cpu)
825 print_basics(data); 825 print_basics(data);
826 powernow_k8_acpi_pst_values(data, 0); 826 powernow_k8_acpi_pst_values(data, 0);
827 827
@@ -1214,7 +1214,7 @@ static int __cpuinit powernowk8_cpu_init(struct cpufreq_policy *pol)
1214 if (cpu_family == CPU_HW_PSTATE) 1214 if (cpu_family == CPU_HW_PSTATE)
1215 pol->cpus = cpumask_of_cpu(pol->cpu); 1215 pol->cpus = cpumask_of_cpu(pol->cpu);
1216 else 1216 else
1217 pol->cpus = cpu_core_map[pol->cpu]; 1217 pol->cpus = per_cpu(cpu_core_map, pol->cpu);
1218 data->available_cores = &(pol->cpus); 1218 data->available_cores = &(pol->cpus);
1219 1219
1220 /* Take a crude guess here. 1220 /* Take a crude guess here.
@@ -1281,7 +1281,7 @@ static unsigned int powernowk8_get (unsigned int cpu)
1281 cpumask_t oldmask = current->cpus_allowed; 1281 cpumask_t oldmask = current->cpus_allowed;
1282 unsigned int khz = 0; 1282 unsigned int khz = 0;
1283 1283
1284 data = powernow_data[first_cpu(cpu_core_map[cpu])]; 1284 data = powernow_data[first_cpu(per_cpu(cpu_core_map, cpu))];
1285 1285
1286 if (!data) 1286 if (!data)
1287 return -EINVAL; 1287 return -EINVAL;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 1e31b6caffb1..879a0f789b1e 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -122,7 +122,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
122#ifdef CONFIG_X86_HT 122#ifdef CONFIG_X86_HT
123 if (c->x86_max_cores * smp_num_siblings > 1) { 123 if (c->x86_max_cores * smp_num_siblings > 1) {
124 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 124 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
125 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n])); 125 seq_printf(m, "siblings\t: %d\n",
126 cpus_weight(per_cpu(cpu_core_map, n)));
126 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 127 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
127 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 128 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
128 } 129 }
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c
index 2f8a7f18b0fe..805b62b1e0df 100644
--- a/arch/x86/kernel/mce_amd_64.c
+++ b/arch/x86/kernel/mce_amd_64.c
@@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
472 472
473#ifdef CONFIG_SMP 473#ifdef CONFIG_SMP
474 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ 474 if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */
475 i = first_cpu(cpu_core_map[cpu]); 475 i = first_cpu(per_cpu(cpu_core_map, cpu));
476 476
477 /* first core not up yet */ 477 /* first core not up yet */
478 if (cpu_data[i].cpu_core_id) 478 if (cpu_data[i].cpu_core_id)
@@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
492 if (err) 492 if (err)
493 goto out; 493 goto out;
494 494
495 b->cpus = cpu_core_map[cpu]; 495 b->cpus = per_cpu(cpu_core_map, cpu);
496 per_cpu(threshold_banks, cpu)[bank] = b; 496 per_cpu(threshold_banks, cpu)[bank] = b;
497 goto out; 497 goto out;
498 } 498 }
@@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank)
509#ifndef CONFIG_SMP 509#ifndef CONFIG_SMP
510 b->cpus = CPU_MASK_ALL; 510 b->cpus = CPU_MASK_ALL;
511#else 511#else
512 b->cpus = cpu_core_map[cpu]; 512 b->cpus = per_cpu(cpu_core_map, cpu);
513#endif 513#endif
514 err = kobject_register(&b->kobj); 514 err = kobject_register(&b->kobj);
515 if (err) 515 if (err)
diff --git a/arch/x86/kernel/setup_64.c b/arch/x86/kernel/setup_64.c
index b7da90e79c78..85b5b6310acc 100644
--- a/arch/x86/kernel/setup_64.c
+++ b/arch/x86/kernel/setup_64.c
@@ -1070,7 +1070,8 @@ static int show_cpuinfo(struct seq_file *m, void *v)
1070 if (smp_num_siblings * c->x86_max_cores > 1) { 1070 if (smp_num_siblings * c->x86_max_cores > 1) {
1071 int cpu = c - cpu_data; 1071 int cpu = c - cpu_data;
1072 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id); 1072 seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
1073 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[cpu])); 1073 seq_printf(m, "siblings\t: %d\n",
1074 cpus_weight(per_cpu(cpu_core_map, cpu)));
1074 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id); 1075 seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
1075 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores); 1076 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
1076 } 1077 }
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index e4f61d1c6248..4cbab48ba865 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -74,8 +74,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
74EXPORT_SYMBOL(cpu_sibling_map); 74EXPORT_SYMBOL(cpu_sibling_map);
75 75
76/* representing HT and core siblings of each logical CPU */ 76/* representing HT and core siblings of each logical CPU */
77cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 77DEFINE_PER_CPU(cpumask_t, cpu_core_map);
78EXPORT_SYMBOL(cpu_core_map); 78EXPORT_PER_CPU_SYMBOL(cpu_core_map);
79 79
80/* bitmap of online cpus */ 80/* bitmap of online cpus */
81cpumask_t cpu_online_map __read_mostly; 81cpumask_t cpu_online_map __read_mostly;
@@ -300,7 +300,7 @@ cpumask_t cpu_coregroup_map(int cpu)
300 * And for power savings, we return cpu_core_map 300 * And for power savings, we return cpu_core_map
301 */ 301 */
302 if (sched_mc_power_savings || sched_smt_power_savings) 302 if (sched_mc_power_savings || sched_smt_power_savings)
303 return cpu_core_map[cpu]; 303 return per_cpu(cpu_core_map, cpu);
304 else 304 else
305 return c->llc_shared_map; 305 return c->llc_shared_map;
306} 306}
@@ -321,8 +321,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 321 c[cpu].cpu_core_id == c[i].cpu_core_id) {
322 cpu_set(i, cpu_sibling_map[cpu]); 322 cpu_set(i, cpu_sibling_map[cpu]);
323 cpu_set(cpu, cpu_sibling_map[i]); 323 cpu_set(cpu, cpu_sibling_map[i]);
324 cpu_set(i, cpu_core_map[cpu]); 324 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, cpu_core_map[i]); 325 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 326 cpu_set(i, c[cpu].llc_shared_map);
327 cpu_set(cpu, c[i].llc_shared_map); 327 cpu_set(cpu, c[i].llc_shared_map);
328 } 328 }
@@ -334,7 +334,7 @@ void __cpuinit set_cpu_sibling_map(int cpu)
334 cpu_set(cpu, c[cpu].llc_shared_map); 334 cpu_set(cpu, c[cpu].llc_shared_map);
335 335
336 if (current_cpu_data.x86_max_cores == 1) { 336 if (current_cpu_data.x86_max_cores == 1) {
337 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 337 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
338 c[cpu].booted_cores = 1; 338 c[cpu].booted_cores = 1;
339 return; 339 return;
340 } 340 }
@@ -346,8 +346,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
346 cpu_set(cpu, c[i].llc_shared_map); 346 cpu_set(cpu, c[i].llc_shared_map);
347 } 347 }
348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 348 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
349 cpu_set(i, cpu_core_map[cpu]); 349 cpu_set(i, per_cpu(cpu_core_map, cpu));
350 cpu_set(cpu, cpu_core_map[i]); 350 cpu_set(cpu, per_cpu(cpu_core_map, i));
351 /* 351 /*
352 * Does this new cpu bringup a new core? 352 * Does this new cpu bringup a new core?
353 */ 353 */
@@ -984,7 +984,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
984 " Using dummy APIC emulation.\n"); 984 " Using dummy APIC emulation.\n");
985 map_cpu_to_logical_apicid(); 985 map_cpu_to_logical_apicid();
986 cpu_set(0, cpu_sibling_map[0]); 986 cpu_set(0, cpu_sibling_map[0]);
987 cpu_set(0, cpu_core_map[0]); 987 cpu_set(0, per_cpu(cpu_core_map, 0));
988 return; 988 return;
989 } 989 }
990 990
@@ -1009,7 +1009,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1009 smpboot_clear_io_apic_irqs(); 1009 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1010 phys_cpu_present_map = physid_mask_of_physid(0);
1011 cpu_set(0, cpu_sibling_map[0]); 1011 cpu_set(0, cpu_sibling_map[0]);
1012 cpu_set(0, cpu_core_map[0]); 1012 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1013 return;
1014 } 1014 }
1015 1015
@@ -1024,7 +1024,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1024 smpboot_clear_io_apic_irqs(); 1024 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1025 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, cpu_sibling_map[0]); 1026 cpu_set(0, cpu_sibling_map[0]);
1027 cpu_set(0, cpu_core_map[0]); 1027 cpu_set(0, per_cpu(cpu_core_map, 0));
1028 return; 1028 return;
1029 } 1029 }
1030 1030
@@ -1107,11 +1107,11 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1107 */ 1107 */
1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1109 cpus_clear(cpu_sibling_map[cpu]); 1109 cpus_clear(cpu_sibling_map[cpu]);
1110 cpus_clear(cpu_core_map[cpu]); 1110 cpus_clear(per_cpu(cpu_core_map, cpu));
1111 } 1111 }
1112 1112
1113 cpu_set(0, cpu_sibling_map[0]); 1113 cpu_set(0, cpu_sibling_map[0]);
1114 cpu_set(0, cpu_core_map[0]); 1114 cpu_set(0, per_cpu(cpu_core_map, 0));
1115 1115
1116 smpboot_setup_io_apic(); 1116 smpboot_setup_io_apic();
1117 1117
@@ -1148,9 +1148,9 @@ void remove_siblinginfo(int cpu)
1148 int sibling; 1148 int sibling;
1149 struct cpuinfo_x86 *c = cpu_data; 1149 struct cpuinfo_x86 *c = cpu_data;
1150 1150
1151 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 1151 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
1152 cpu_clear(cpu, cpu_core_map[sibling]); 1152 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
1153 /* 1153 /*/
1154 * last thread sibling in this cpu core going down 1154 * last thread sibling in this cpu core going down
1155 */ 1155 */
1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
@@ -1160,7 +1160,7 @@ void remove_siblinginfo(int cpu)
1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1161 cpu_clear(cpu, cpu_sibling_map[sibling]); 1161 cpu_clear(cpu, cpu_sibling_map[sibling]);
1162 cpus_clear(cpu_sibling_map[cpu]); 1162 cpus_clear(cpu_sibling_map[cpu]);
1163 cpus_clear(cpu_core_map[cpu]); 1163 cpus_clear(per_cpu(cpu_core_map, cpu));
1164 c[cpu].phys_proc_id = 0; 1164 c[cpu].phys_proc_id = 0;
1165 c[cpu].cpu_core_id = 0; 1165 c[cpu].cpu_core_id = 0;
1166 cpu_clear(cpu, cpu_sibling_setup_map); 1166 cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 720a7d1f8862..6723c8622828 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -95,8 +95,8 @@ cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
95EXPORT_SYMBOL(cpu_sibling_map); 95EXPORT_SYMBOL(cpu_sibling_map);
96 96
97/* representing HT and core siblings of each logical CPU */ 97/* representing HT and core siblings of each logical CPU */
98cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 98DEFINE_PER_CPU(cpumask_t, cpu_core_map);
99EXPORT_SYMBOL(cpu_core_map); 99EXPORT_PER_CPU_SYMBOL(cpu_core_map);
100 100
101/* 101/*
102 * Trampoline 80x86 program as an array. 102 * Trampoline 80x86 program as an array.
@@ -243,7 +243,7 @@ cpumask_t cpu_coregroup_map(int cpu)
243 * And for power savings, we return cpu_core_map 243 * And for power savings, we return cpu_core_map
244 */ 244 */
245 if (sched_mc_power_savings || sched_smt_power_savings) 245 if (sched_mc_power_savings || sched_smt_power_savings)
246 return cpu_core_map[cpu]; 246 return per_cpu(cpu_core_map, cpu);
247 else 247 else
248 return c->llc_shared_map; 248 return c->llc_shared_map;
249} 249}
@@ -264,8 +264,8 @@ static inline void set_cpu_sibling_map(int cpu)
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 264 c[cpu].cpu_core_id == c[i].cpu_core_id) {
265 cpu_set(i, cpu_sibling_map[cpu]); 265 cpu_set(i, cpu_sibling_map[cpu]);
266 cpu_set(cpu, cpu_sibling_map[i]); 266 cpu_set(cpu, cpu_sibling_map[i]);
267 cpu_set(i, cpu_core_map[cpu]); 267 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, cpu_core_map[i]); 268 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 269 cpu_set(i, c[cpu].llc_shared_map);
270 cpu_set(cpu, c[i].llc_shared_map); 270 cpu_set(cpu, c[i].llc_shared_map);
271 } 271 }
@@ -277,7 +277,7 @@ static inline void set_cpu_sibling_map(int cpu)
277 cpu_set(cpu, c[cpu].llc_shared_map); 277 cpu_set(cpu, c[cpu].llc_shared_map);
278 278
279 if (current_cpu_data.x86_max_cores == 1) { 279 if (current_cpu_data.x86_max_cores == 1) {
280 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 280 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu];
281 c[cpu].booted_cores = 1; 281 c[cpu].booted_cores = 1;
282 return; 282 return;
283 } 283 }
@@ -289,8 +289,8 @@ static inline void set_cpu_sibling_map(int cpu)
289 cpu_set(cpu, c[i].llc_shared_map); 289 cpu_set(cpu, c[i].llc_shared_map);
290 } 290 }
291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) { 291 if (c[cpu].phys_proc_id == c[i].phys_proc_id) {
292 cpu_set(i, cpu_core_map[cpu]); 292 cpu_set(i, per_cpu(cpu_core_map, cpu));
293 cpu_set(cpu, cpu_core_map[i]); 293 cpu_set(cpu, per_cpu(cpu_core_map, i));
294 /* 294 /*
295 * Does this new cpu bringup a new core? 295 * Does this new cpu bringup a new core?
296 */ 296 */
@@ -736,7 +736,7 @@ static __init void disable_smp(void)
736 else 736 else
737 phys_cpu_present_map = physid_mask_of_physid(0); 737 phys_cpu_present_map = physid_mask_of_physid(0);
738 cpu_set(0, cpu_sibling_map[0]); 738 cpu_set(0, cpu_sibling_map[0]);
739 cpu_set(0, cpu_core_map[0]); 739 cpu_set(0, per_cpu(cpu_core_map, 0));
740} 740}
741 741
742#ifdef CONFIG_HOTPLUG_CPU 742#ifdef CONFIG_HOTPLUG_CPU
@@ -971,8 +971,8 @@ static void remove_siblinginfo(int cpu)
971 int sibling; 971 int sibling;
972 struct cpuinfo_x86 *c = cpu_data; 972 struct cpuinfo_x86 *c = cpu_data;
973 973
974 for_each_cpu_mask(sibling, cpu_core_map[cpu]) { 974 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
975 cpu_clear(cpu, cpu_core_map[sibling]); 975 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
976 /* 976 /*
977 * last thread sibling in this cpu core going down 977 * last thread sibling in this cpu core going down
978 */ 978 */
@@ -983,7 +983,7 @@ static void remove_siblinginfo(int cpu)
983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
984 cpu_clear(cpu, cpu_sibling_map[sibling]); 984 cpu_clear(cpu, cpu_sibling_map[sibling]);
985 cpus_clear(cpu_sibling_map[cpu]); 985 cpus_clear(cpu_sibling_map[cpu]);
986 cpus_clear(cpu_core_map[cpu]); 986 cpus_clear(per_cpu(cpu_core_map, cpu));
987 c[cpu].phys_proc_id = 0; 987 c[cpu].phys_proc_id = 0;
988 c[cpu].cpu_core_id = 0; 988 c[cpu].cpu_core_id = 0;
989 cpu_clear(cpu, cpu_sibling_setup_map); 989 cpu_clear(cpu, cpu_sibling_setup_map);
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 557b8e24706a..539d42530fc4 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -148,7 +148,12 @@ void __init xen_smp_prepare_boot_cpu(void)
148 148
149 for (cpu = 0; cpu < NR_CPUS; cpu++) { 149 for (cpu = 0; cpu < NR_CPUS; cpu++) {
150 cpus_clear(cpu_sibling_map[cpu]); 150 cpus_clear(cpu_sibling_map[cpu]);
151 cpus_clear(cpu_core_map[cpu]); 151 /*
152 * cpu_core_map lives in a per cpu area that is cleared
153 * when the per cpu array is allocated.
154 *
155 * cpus_clear(per_cpu(cpu_core_map, cpu));
156 */
152 } 157 }
153 158
154 xen_setup_vcpu_info_placement(); 159 xen_setup_vcpu_info_placement();
@@ -160,7 +165,12 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
160 165
161 for (cpu = 0; cpu < NR_CPUS; cpu++) { 166 for (cpu = 0; cpu < NR_CPUS; cpu++) {
162 cpus_clear(cpu_sibling_map[cpu]); 167 cpus_clear(cpu_sibling_map[cpu]);
163 cpus_clear(cpu_core_map[cpu]); 168 /*
169 * cpu_core_ map will be zeroed when the per
170 * cpu area is allocated.
171 *
172 * cpus_clear(per_cpu(cpu_core_map, cpu));
173 */
164 } 174 }
165 175
166 smp_store_cpu_info(0); 176 smp_store_cpu_info(0);