aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
Diffstat (limited to 'arch')
-rw-r--r--arch/ia64/kernel/setup.c12
-rw-r--r--arch/ia64/kernel/smpboot.c18
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/sparc64/kernel/smp.c17
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/io_apic_32.c4
-rw-r--r--arch/x86/kernel/smpboot_32.c36
-rw-r--r--arch/x86/kernel/smpboot_64.c26
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/xen/smp.c4
14 files changed, 86 insertions, 66 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 9e392a30d197..777c8d8bd5e7 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -528,10 +528,6 @@ setup_arch (char **cmdline_p)
528 528
529#ifdef CONFIG_SMP 529#ifdef CONFIG_SMP
530 cpu_physical_id(0) = hard_smp_processor_id(); 530 cpu_physical_id(0) = hard_smp_processor_id();
531
532 cpu_set(0, cpu_sibling_map[0]);
533 cpu_set(0, cpu_core_map[0]);
534
535 check_for_logical_procs(); 531 check_for_logical_procs();
536 if (smp_num_cpucores > 1) 532 if (smp_num_cpucores > 1)
537 printk(KERN_INFO 533 printk(KERN_INFO
@@ -873,6 +869,14 @@ cpu_init (void)
873 void *cpu_data; 869 void *cpu_data;
874 870
875 cpu_data = per_cpu_init(); 871 cpu_data = per_cpu_init();
872 /*
873 * insert boot cpu into sibling and core mapes
874 * (must be done after per_cpu area is setup)
875 */
876 if (smp_processor_id() == 0) {
877 cpu_set(0, per_cpu(cpu_sibling_map, 0));
878 cpu_set(0, cpu_core_map[0]);
879 }
876 880
877 /* 881 /*
878 * We set ar.k3 so that assembly code in MCA handler can compute 882 * We set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 308772f7cddc..c57dbce25c12 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -138,7 +138,9 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
138EXPORT_SYMBOL(cpu_possible_map); 138EXPORT_SYMBOL(cpu_possible_map);
139 139
140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 141DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
142EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
143
142int smp_num_siblings = 1; 144int smp_num_siblings = 1;
143int smp_num_cpucores = 1; 145int smp_num_cpucores = 1;
144 146
@@ -650,12 +652,12 @@ clear_cpu_sibling_map(int cpu)
650{ 652{
651 int i; 653 int i;
652 654
653 for_each_cpu_mask(i, cpu_sibling_map[cpu]) 655 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
654 cpu_clear(cpu, cpu_sibling_map[i]); 656 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
655 for_each_cpu_mask(i, cpu_core_map[cpu]) 657 for_each_cpu_mask(i, cpu_core_map[cpu])
656 cpu_clear(cpu, cpu_core_map[i]); 658 cpu_clear(cpu, cpu_core_map[i]);
657 659
658 cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; 660 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
659} 661}
660 662
661static void 663static void
@@ -666,7 +668,7 @@ remove_siblinginfo(int cpu)
666 if (cpu_data(cpu)->threads_per_core == 1 && 668 if (cpu_data(cpu)->threads_per_core == 1 &&
667 cpu_data(cpu)->cores_per_socket == 1) { 669 cpu_data(cpu)->cores_per_socket == 1) {
668 cpu_clear(cpu, cpu_core_map[cpu]); 670 cpu_clear(cpu, cpu_core_map[cpu]);
669 cpu_clear(cpu, cpu_sibling_map[cpu]); 671 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
670 return; 672 return;
671 } 673 }
672 674
@@ -807,8 +809,8 @@ set_cpu_sibling_map(int cpu)
807 cpu_set(i, cpu_core_map[cpu]); 809 cpu_set(i, cpu_core_map[cpu]);
808 cpu_set(cpu, cpu_core_map[i]); 810 cpu_set(cpu, cpu_core_map[i]);
809 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 811 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
810 cpu_set(i, cpu_sibling_map[cpu]); 812 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
811 cpu_set(cpu, cpu_sibling_map[i]); 813 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
812 } 814 }
813 } 815 }
814 } 816 }
@@ -839,7 +841,7 @@ __cpu_up (unsigned int cpu)
839 841
840 if (cpu_data(cpu)->threads_per_core == 1 && 842 if (cpu_data(cpu)->threads_per_core == 1 &&
841 cpu_data(cpu)->cores_per_socket == 1) { 843 cpu_data(cpu)->cores_per_socket == 1) {
842 cpu_set(cpu, cpu_sibling_map[cpu]); 844 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
843 cpu_set(cpu, cpu_core_map[cpu]); 845 cpu_set(cpu, cpu_core_map[cpu]);
844 return 0; 846 return 0;
845 } 847 }
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 36c90ba2d312..2de00f870edc 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -413,16 +413,28 @@ void __init smp_setup_cpu_maps(void)
413 of_node_put(dn); 413 of_node_put(dn);
414 } 414 }
415 415
416 vdso_data->processorCount = num_present_cpus();
417#endif /* CONFIG_PPC64 */
418}
419
420/*
421 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
422 * be initialized until the per_cpu areas have been created. This
423 * function is now called from setup_per_cpu_areas().
424 */
425void __init smp_setup_cpu_sibling_map(void)
426{
427#if defined(CONFIG_PPC64)
428 int cpu;
429
416 /* 430 /*
417 * Do the sibling map; assume only two threads per processor. 431 * Do the sibling map; assume only two threads per processor.
418 */ 432 */
419 for_each_possible_cpu(cpu) { 433 for_each_possible_cpu(cpu) {
420 cpu_set(cpu, cpu_sibling_map[cpu]); 434 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
421 if (cpu_has_feature(CPU_FTR_SMT)) 435 if (cpu_has_feature(CPU_FTR_SMT))
422 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 436 cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
423 } 437 }
424
425 vdso_data->processorCount = num_present_cpus();
426#endif /* CONFIG_PPC64 */ 438#endif /* CONFIG_PPC64 */
427} 439}
428#endif /* CONFIG_SMP */ 440#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 008ab6823b02..0e014550b83f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -597,6 +597,9 @@ void __init setup_per_cpu_areas(void)
597 paca[i].data_offset = ptr - __per_cpu_start; 597 paca[i].data_offset = ptr - __per_cpu_start;
598 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 598 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
599 } 599 }
600
601 /* Now that per_cpu is setup, initialize cpu_sibling_map */
602 smp_setup_cpu_sibling_map();
600} 603}
601#endif 604#endif
602 605
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d30f08fa0297..338950aeb6f6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -61,11 +61,11 @@ struct thread_info *secondary_ti;
61 61
62cpumask_t cpu_possible_map = CPU_MASK_NONE; 62cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE; 63cpumask_t cpu_online_map = CPU_MASK_NONE;
64cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
65 65
66EXPORT_SYMBOL(cpu_online_map); 66EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 67EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_SYMBOL(cpu_sibling_map); 68EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
69 69
70/* SMP operations for this machine */ 70/* SMP operations for this machine */
71struct smp_ops_t *smp_ops; 71struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index 5123e9d4164b..13d5a87f13b1 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -117,7 +117,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
117 policy->cur = cbe_freqs[cur_pmode].frequency; 117 policy->cur = cbe_freqs[cur_pmode].frequency;
118 118
119#ifdef CONFIG_SMP 119#ifdef CONFIG_SMP
120 policy->cpus = cpu_sibling_map[policy->cpu]; 120 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
121#endif 121#endif
122 122
123 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 123 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c73b7a48b036..407d74a8a542 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -52,14 +52,13 @@ int sparc64_multi_core __read_mostly;
52 52
53cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; 53cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 54cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = 55DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 56cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 57 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
59 58
60EXPORT_SYMBOL(cpu_possible_map); 59EXPORT_SYMBOL(cpu_possible_map);
61EXPORT_SYMBOL(cpu_online_map); 60EXPORT_SYMBOL(cpu_online_map);
62EXPORT_SYMBOL(cpu_sibling_map); 61EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
63EXPORT_SYMBOL(cpu_core_map); 62EXPORT_SYMBOL(cpu_core_map);
64 63
65static cpumask_t smp_commenced_mask; 64static cpumask_t smp_commenced_mask;
@@ -1261,16 +1260,16 @@ void __devinit smp_fill_in_sib_core_maps(void)
1261 for_each_present_cpu(i) { 1260 for_each_present_cpu(i) {
1262 unsigned int j; 1261 unsigned int j;
1263 1262
1264 cpus_clear(cpu_sibling_map[i]); 1263 cpus_clear(per_cpu(cpu_sibling_map, i));
1265 if (cpu_data(i).proc_id == -1) { 1264 if (cpu_data(i).proc_id == -1) {
1266 cpu_set(i, cpu_sibling_map[i]); 1265 cpu_set(i, per_cpu(cpu_sibling_map, i));
1267 continue; 1266 continue;
1268 } 1267 }
1269 1268
1270 for_each_present_cpu(j) { 1269 for_each_present_cpu(j) {
1271 if (cpu_data(i).proc_id == 1270 if (cpu_data(i).proc_id ==
1272 cpu_data(j).proc_id) 1271 cpu_data(j).proc_id)
1273 cpu_set(j, cpu_sibling_map[i]); 1272 cpu_set(j, per_cpu(cpu_sibling_map, i));
1274 } 1273 }
1275 } 1274 }
1276} 1275}
@@ -1342,9 +1341,9 @@ int __cpu_disable(void)
1342 cpu_clear(cpu, cpu_core_map[i]); 1341 cpu_clear(cpu, cpu_core_map[i]);
1343 cpus_clear(cpu_core_map[cpu]); 1342 cpus_clear(cpu_core_map[cpu]);
1344 1343
1345 for_each_cpu_mask(i, cpu_sibling_map[cpu]) 1344 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1346 cpu_clear(cpu, cpu_sibling_map[i]); 1345 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1347 cpus_clear(cpu_sibling_map[cpu]); 1346 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1348 1347
1349 c = &cpu_data(cpu); 1348 c = &cpu_data(cpu);
1350 1349
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 8eb414b906d2..793eae854f4f 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -200,7 +200,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
200 unsigned int i; 200 unsigned int i;
201 201
202#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
203 policy->cpus = cpu_sibling_map[policy->cpu]; 203 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
204#endif 204#endif
205 205
206 /* Errata workaround */ 206 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 36685e8f7be1..14d68aa301ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 policy->cpus = cpu_sibling_map[policy->cpu]; 325 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index e2f4a1c68547..4ee1e5ee9b57 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -378,7 +378,7 @@ static struct irq_cpu_info {
378 378
379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) 379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
380 380
381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) 381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
382 382
383static cpumask_t balance_irq_affinity[NR_IRQS] = { 383static cpumask_t balance_irq_affinity[NR_IRQS] = {
384 [0 ... NR_IRQS-1] = CPU_MASK_ALL 384 [0 ... NR_IRQS-1] = CPU_MASK_ALL
@@ -598,7 +598,7 @@ tryanotherirq:
598 * (A+B)/2 vs B 598 * (A+B)/2 vs B
599 */ 599 */
600 load = CPU_IRQ(min_loaded) >> 1; 600 load = CPU_IRQ(min_loaded) >> 1;
601 for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { 601 for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
602 if (load > CPU_IRQ(j)) { 602 if (load > CPU_IRQ(j)) {
603 /* This won't change cpu_sibling_map[min_loaded] */ 603 /* This won't change cpu_sibling_map[min_loaded] */
604 load = CPU_IRQ(j); 604 load = CPU_IRQ(j);
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 4cbab48ba865..31fc08bd15ef 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -70,8 +70,8 @@ EXPORT_SYMBOL(smp_num_siblings);
70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
71 71
72/* representing HT siblings of each logical CPU */ 72/* representing HT siblings of each logical CPU */
73cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
74EXPORT_SYMBOL(cpu_sibling_map); 74EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75 75
76/* representing HT and core siblings of each logical CPU */ 76/* representing HT and core siblings of each logical CPU */
77DEFINE_PER_CPU(cpumask_t, cpu_core_map); 77DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -319,8 +319,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 319 for_each_cpu_mask(i, cpu_sibling_setup_map) {
320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 320 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 321 c[cpu].cpu_core_id == c[i].cpu_core_id) {
322 cpu_set(i, cpu_sibling_map[cpu]); 322 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
323 cpu_set(cpu, cpu_sibling_map[i]); 323 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
324 cpu_set(i, per_cpu(cpu_core_map, cpu)); 324 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, per_cpu(cpu_core_map, i)); 325 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 326 cpu_set(i, c[cpu].llc_shared_map);
@@ -328,13 +328,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
328 } 328 }
329 } 329 }
330 } else { 330 } else {
331 cpu_set(cpu, cpu_sibling_map[cpu]); 331 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
332 } 332 }
333 333
334 cpu_set(cpu, c[cpu].llc_shared_map); 334 cpu_set(cpu, c[cpu].llc_shared_map);
335 335
336 if (current_cpu_data.x86_max_cores == 1) { 336 if (current_cpu_data.x86_max_cores == 1) {
337 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 337 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
338 c[cpu].booted_cores = 1; 338 c[cpu].booted_cores = 1;
339 return; 339 return;
340 } 340 }
@@ -351,12 +351,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
351 /* 351 /*
352 * Does this new cpu bringup a new core? 352 * Does this new cpu bringup a new core?
353 */ 353 */
354 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 354 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
355 /* 355 /*
356 * for each core in package, increment 356 * for each core in package, increment
357 * the booted_cores for this new cpu 357 * the booted_cores for this new cpu
358 */ 358 */
359 if (first_cpu(cpu_sibling_map[i]) == i) 359 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
360 c[cpu].booted_cores++; 360 c[cpu].booted_cores++;
361 /* 361 /*
362 * increment the core count for all 362 * increment the core count for all
@@ -983,7 +983,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
983 printk(KERN_NOTICE "Local APIC not detected." 983 printk(KERN_NOTICE "Local APIC not detected."
984 " Using dummy APIC emulation.\n"); 984 " Using dummy APIC emulation.\n");
985 map_cpu_to_logical_apicid(); 985 map_cpu_to_logical_apicid();
986 cpu_set(0, cpu_sibling_map[0]); 986 cpu_set(0, per_cpu(cpu_sibling_map, 0));
987 cpu_set(0, per_cpu(cpu_core_map, 0)); 987 cpu_set(0, per_cpu(cpu_core_map, 0));
988 return; 988 return;
989 } 989 }
@@ -1008,7 +1008,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1009 smpboot_clear_io_apic_irqs(); 1009 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1010 phys_cpu_present_map = physid_mask_of_physid(0);
1011 cpu_set(0, cpu_sibling_map[0]); 1011 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1012 cpu_set(0, per_cpu(cpu_core_map, 0)); 1012 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1013 return;
1014 } 1014 }
@@ -1023,7 +1023,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1024 smpboot_clear_io_apic_irqs(); 1024 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1025 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, cpu_sibling_map[0]); 1026 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1027 cpu_set(0, per_cpu(cpu_core_map, 0)); 1027 cpu_set(0, per_cpu(cpu_core_map, 0));
1028 return; 1028 return;
1029 } 1029 }
@@ -1102,15 +1102,15 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1102 Dprintk("Boot done.\n"); 1102 Dprintk("Boot done.\n");
1103 1103
1104 /* 1104 /*
1105 * construct cpu_sibling_map[], so that we can tell sibling CPUs 1105 * construct cpu_sibling_map, so that we can tell sibling CPUs
1106 * efficiently. 1106 * efficiently.
1107 */ 1107 */
1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1109 cpus_clear(cpu_sibling_map[cpu]); 1109 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1110 cpus_clear(per_cpu(cpu_core_map, cpu)); 1110 cpus_clear(per_cpu(cpu_core_map, cpu));
1111 } 1111 }
1112 1112
1113 cpu_set(0, cpu_sibling_map[0]); 1113 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1114 cpu_set(0, per_cpu(cpu_core_map, 0)); 1114 cpu_set(0, per_cpu(cpu_core_map, 0));
1115 1115
1116 smpboot_setup_io_apic(); 1116 smpboot_setup_io_apic();
@@ -1153,13 +1153,13 @@ void remove_siblinginfo(int cpu)
1153 /*/ 1153 /*/
1154 * last thread sibling in this cpu core going down 1154 * last thread sibling in this cpu core going down
1155 */ 1155 */
1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1157 c[sibling].booted_cores--; 1157 c[sibling].booted_cores--;
1158 } 1158 }
1159 1159
1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1160 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1161 cpu_clear(cpu, cpu_sibling_map[sibling]); 1161 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1162 cpus_clear(cpu_sibling_map[cpu]); 1162 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1163 cpus_clear(per_cpu(cpu_core_map, cpu)); 1163 cpus_clear(per_cpu(cpu_core_map, cpu));
1164 c[cpu].phys_proc_id = 0; 1164 c[cpu].phys_proc_id = 0;
1165 c[cpu].cpu_core_id = 0; 1165 c[cpu].cpu_core_id = 0;
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 6723c8622828..0faa0a0af272 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -91,8 +91,8 @@ EXPORT_SYMBOL(cpu_data);
91int smp_threads_ready; 91int smp_threads_ready;
92 92
93/* representing HT siblings of each logical CPU */ 93/* representing HT siblings of each logical CPU */
94cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 94DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
95EXPORT_SYMBOL(cpu_sibling_map); 95EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
96 96
97/* representing HT and core siblings of each logical CPU */ 97/* representing HT and core siblings of each logical CPU */
98DEFINE_PER_CPU(cpumask_t, cpu_core_map); 98DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -262,8 +262,8 @@ static inline void set_cpu_sibling_map(int cpu)
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 262 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 263 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 264 c[cpu].cpu_core_id == c[i].cpu_core_id) {
265 cpu_set(i, cpu_sibling_map[cpu]); 265 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, cpu_sibling_map[i]); 266 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 267 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 268 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 269 cpu_set(i, c[cpu].llc_shared_map);
@@ -271,13 +271,13 @@ static inline void set_cpu_sibling_map(int cpu)
271 } 271 }
272 } 272 }
273 } else { 273 } else {
274 cpu_set(cpu, cpu_sibling_map[cpu]); 274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 275 }
276 276
277 cpu_set(cpu, c[cpu].llc_shared_map); 277 cpu_set(cpu, c[cpu].llc_shared_map);
278 278
279 if (current_cpu_data.x86_max_cores == 1) { 279 if (current_cpu_data.x86_max_cores == 1) {
280 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 281 c[cpu].booted_cores = 1;
282 return; 282 return;
283 } 283 }
@@ -294,12 +294,12 @@ static inline void set_cpu_sibling_map(int cpu)
294 /* 294 /*
295 * Does this new cpu bringup a new core? 295 * Does this new cpu bringup a new core?
296 */ 296 */
297 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 297 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
298 /* 298 /*
299 * for each core in package, increment 299 * for each core in package, increment
300 * the booted_cores for this new cpu 300 * the booted_cores for this new cpu
301 */ 301 */
302 if (first_cpu(cpu_sibling_map[i]) == i) 302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 303 c[cpu].booted_cores++;
304 /* 304 /*
305 * increment the core count for all 305 * increment the core count for all
@@ -735,7 +735,7 @@ static __init void disable_smp(void)
735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); 735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
736 else 736 else
737 phys_cpu_present_map = physid_mask_of_physid(0); 737 phys_cpu_present_map = physid_mask_of_physid(0);
738 cpu_set(0, cpu_sibling_map[0]); 738 cpu_set(0, per_cpu(cpu_sibling_map, 0));
739 cpu_set(0, per_cpu(cpu_core_map, 0)); 739 cpu_set(0, per_cpu(cpu_core_map, 0));
740} 740}
741 741
@@ -976,13 +976,13 @@ static void remove_siblinginfo(int cpu)
976 /* 976 /*
977 * last thread sibling in this cpu core going down 977 * last thread sibling in this cpu core going down
978 */ 978 */
979 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 979 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
980 c[sibling].booted_cores--; 980 c[sibling].booted_cores--;
981 } 981 }
982 982
983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 983 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
984 cpu_clear(cpu, cpu_sibling_map[sibling]); 984 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
985 cpus_clear(cpu_sibling_map[cpu]); 985 cpus_clear(per_cpu(cpu_sibling_map, cpu));
986 cpus_clear(per_cpu(cpu_core_map, cpu)); 986 cpus_clear(per_cpu(cpu_core_map, cpu));
987 c[cpu].phys_proc_id = 0; 987 c[cpu].phys_proc_id = 0;
988 c[cpu].cpu_core_id = 0; 988 c[cpu].cpu_core_id = 0;
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 47925927b12f..56b4757a1f47 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -379,7 +379,7 @@ static unsigned int get_stagger(void)
379{ 379{
380#ifdef CONFIG_SMP 380#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 381 int cpu = smp_processor_id();
382 return (cpu != first_cpu(cpu_sibling_map[cpu])); 382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 383#endif
384 return 0; 384 return 0;
385} 385}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 539d42530fc4..4fa33c27ccb6 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -147,7 +147,7 @@ void __init xen_smp_prepare_boot_cpu(void)
147 make_lowmem_page_readwrite(&per_cpu__gdt_page); 147 make_lowmem_page_readwrite(&per_cpu__gdt_page);
148 148
149 for (cpu = 0; cpu < NR_CPUS; cpu++) { 149 for (cpu = 0; cpu < NR_CPUS; cpu++) {
150 cpus_clear(cpu_sibling_map[cpu]); 150 cpus_clear(per_cpu(cpu_sibling_map, cpu));
151 /* 151 /*
152 * cpu_core_map lives in a per cpu area that is cleared 152 * cpu_core_map lives in a per cpu area that is cleared
153 * when the per cpu array is allocated. 153 * when the per cpu array is allocated.
@@ -164,7 +164,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
164 unsigned cpu; 164 unsigned cpu;
165 165
166 for (cpu = 0; cpu < NR_CPUS; cpu++) { 166 for (cpu = 0; cpu < NR_CPUS; cpu++) {
167 cpus_clear(cpu_sibling_map[cpu]); 167 cpus_clear(per_cpu(cpu_sibling_map, cpu));
168 /* 168 /*
169 * cpu_core_ map will be zeroed when the per 169 * cpu_core_ map will be zeroed when the per
170 * cpu area is allocated. 170 * cpu area is allocated.