aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Travis <travis@sgi.com>2007-10-16 04:24:05 -0400
committerLinus Torvalds <torvalds@woody.linux-foundation.org>2007-10-16 12:42:50 -0400
commitd5a7430ddcdb598261d70f7eb1bf450b5be52085 (patch)
tree3b94672e0dbc2bff125de3266908f1a47a17b795
parent083576112940fda783d716fd5ccc744f81667b2f (diff)
Convert cpu_sibling_map to be a per cpu variable
Convert cpu_sibling_map from a static array sized by NR_CPUS to a per_cpu variable. This saves sizeof(cpumask_t) * NR unused cpus. Access is mostly from startup and CPU HOTPLUG functions. Signed-off-by: Mike Travis <travis@sgi.com> Cc: Andi Kleen <ak@suse.de> Cc: Christoph Lameter <clameter@sgi.com> Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com> Cc: "David S. Miller" <davem@davemloft.net> Cc: Paul Mackerras <paulus@samba.org> Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org> Cc: "Luck, Tony" <tony.luck@intel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--arch/ia64/kernel/setup.c12
-rw-r--r--arch/ia64/kernel/smpboot.c18
-rw-r--r--arch/powerpc/kernel/setup-common.c20
-rw-r--r--arch/powerpc/kernel/setup_64.c3
-rw-r--r--arch/powerpc/kernel/smp.c4
-rw-r--r--arch/powerpc/platforms/cell/cbe_cpufreq.c2
-rw-r--r--arch/sparc64/kernel/smp.c17
-rw-r--r--arch/x86/kernel/cpu/cpufreq/p4-clockmod.c2
-rw-r--r--arch/x86/kernel/cpu/cpufreq/speedstep-ich.c2
-rw-r--r--arch/x86/kernel/io_apic_32.c4
-rw-r--r--arch/x86/kernel/smpboot_32.c36
-rw-r--r--arch/x86/kernel/smpboot_64.c26
-rw-r--r--arch/x86/oprofile/op_model_p4.c2
-rw-r--r--arch/x86/xen/smp.c4
-rw-r--r--block/blktrace.c2
-rw-r--r--include/asm-ia64/smp.h2
-rw-r--r--include/asm-ia64/topology.h2
-rw-r--r--include/asm-powerpc/smp.h4
-rw-r--r--include/asm-powerpc/topology.h2
-rw-r--r--include/asm-sparc64/smp.h3
-rw-r--r--include/asm-sparc64/topology.h2
-rw-r--r--include/asm-x86/smp_32.h2
-rw-r--r--include/asm-x86/smp_64.h6
-rw-r--r--include/asm-x86/topology_32.h2
-rw-r--r--include/asm-x86/topology_64.h2
-rw-r--r--kernel/sched.c8
26 files changed, 107 insertions, 82 deletions
diff --git a/arch/ia64/kernel/setup.c b/arch/ia64/kernel/setup.c
index 9e392a30d197..777c8d8bd5e7 100644
--- a/arch/ia64/kernel/setup.c
+++ b/arch/ia64/kernel/setup.c
@@ -528,10 +528,6 @@ setup_arch (char **cmdline_p)
528 528
529#ifdef CONFIG_SMP 529#ifdef CONFIG_SMP
530 cpu_physical_id(0) = hard_smp_processor_id(); 530 cpu_physical_id(0) = hard_smp_processor_id();
531
532 cpu_set(0, cpu_sibling_map[0]);
533 cpu_set(0, cpu_core_map[0]);
534
535 check_for_logical_procs(); 531 check_for_logical_procs();
536 if (smp_num_cpucores > 1) 532 if (smp_num_cpucores > 1)
537 printk(KERN_INFO 533 printk(KERN_INFO
@@ -873,6 +869,14 @@ cpu_init (void)
873 void *cpu_data; 869 void *cpu_data;
874 870
875 cpu_data = per_cpu_init(); 871 cpu_data = per_cpu_init();
872 /*
873 * insert boot cpu into sibling and core mapes
874 * (must be done after per_cpu area is setup)
875 */
876 if (smp_processor_id() == 0) {
877 cpu_set(0, per_cpu(cpu_sibling_map, 0));
878 cpu_set(0, cpu_core_map[0]);
879 }
876 880
877 /* 881 /*
878 * We set ar.k3 so that assembly code in MCA handler can compute 882 * We set ar.k3 so that assembly code in MCA handler can compute
diff --git a/arch/ia64/kernel/smpboot.c b/arch/ia64/kernel/smpboot.c
index 308772f7cddc..c57dbce25c12 100644
--- a/arch/ia64/kernel/smpboot.c
+++ b/arch/ia64/kernel/smpboot.c
@@ -138,7 +138,9 @@ cpumask_t cpu_possible_map = CPU_MASK_NONE;
138EXPORT_SYMBOL(cpu_possible_map); 138EXPORT_SYMBOL(cpu_possible_map);
139 139
140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned; 140cpumask_t cpu_core_map[NR_CPUS] __cacheline_aligned;
141cpumask_t cpu_sibling_map[NR_CPUS] __cacheline_aligned; 141DEFINE_PER_CPU_SHARED_ALIGNED(cpumask_t, cpu_sibling_map);
142EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
143
142int smp_num_siblings = 1; 144int smp_num_siblings = 1;
143int smp_num_cpucores = 1; 145int smp_num_cpucores = 1;
144 146
@@ -650,12 +652,12 @@ clear_cpu_sibling_map(int cpu)
650{ 652{
651 int i; 653 int i;
652 654
653 for_each_cpu_mask(i, cpu_sibling_map[cpu]) 655 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
654 cpu_clear(cpu, cpu_sibling_map[i]); 656 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
655 for_each_cpu_mask(i, cpu_core_map[cpu]) 657 for_each_cpu_mask(i, cpu_core_map[cpu])
656 cpu_clear(cpu, cpu_core_map[i]); 658 cpu_clear(cpu, cpu_core_map[i]);
657 659
658 cpu_sibling_map[cpu] = cpu_core_map[cpu] = CPU_MASK_NONE; 660 per_cpu(cpu_sibling_map, cpu) = cpu_core_map[cpu] = CPU_MASK_NONE;
659} 661}
660 662
661static void 663static void
@@ -666,7 +668,7 @@ remove_siblinginfo(int cpu)
666 if (cpu_data(cpu)->threads_per_core == 1 && 668 if (cpu_data(cpu)->threads_per_core == 1 &&
667 cpu_data(cpu)->cores_per_socket == 1) { 669 cpu_data(cpu)->cores_per_socket == 1) {
668 cpu_clear(cpu, cpu_core_map[cpu]); 670 cpu_clear(cpu, cpu_core_map[cpu]);
669 cpu_clear(cpu, cpu_sibling_map[cpu]); 671 cpu_clear(cpu, per_cpu(cpu_sibling_map, cpu));
670 return; 672 return;
671 } 673 }
672 674
@@ -807,8 +809,8 @@ set_cpu_sibling_map(int cpu)
807 cpu_set(i, cpu_core_map[cpu]); 809 cpu_set(i, cpu_core_map[cpu]);
808 cpu_set(cpu, cpu_core_map[i]); 810 cpu_set(cpu, cpu_core_map[i]);
809 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) { 811 if (cpu_data(cpu)->core_id == cpu_data(i)->core_id) {
810 cpu_set(i, cpu_sibling_map[cpu]); 812 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
811 cpu_set(cpu, cpu_sibling_map[i]); 813 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
812 } 814 }
813 } 815 }
814 } 816 }
@@ -839,7 +841,7 @@ __cpu_up (unsigned int cpu)
839 841
840 if (cpu_data(cpu)->threads_per_core == 1 && 842 if (cpu_data(cpu)->threads_per_core == 1 &&
841 cpu_data(cpu)->cores_per_socket == 1) { 843 cpu_data(cpu)->cores_per_socket == 1) {
842 cpu_set(cpu, cpu_sibling_map[cpu]); 844 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
843 cpu_set(cpu, cpu_core_map[cpu]); 845 cpu_set(cpu, cpu_core_map[cpu]);
844 return 0; 846 return 0;
845 } 847 }
diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c
index 36c90ba2d312..2de00f870edc 100644
--- a/arch/powerpc/kernel/setup-common.c
+++ b/arch/powerpc/kernel/setup-common.c
@@ -413,16 +413,28 @@ void __init smp_setup_cpu_maps(void)
413 of_node_put(dn); 413 of_node_put(dn);
414 } 414 }
415 415
416 vdso_data->processorCount = num_present_cpus();
417#endif /* CONFIG_PPC64 */
418}
419
420/*
421 * Being that cpu_sibling_map is now a per_cpu array, then it cannot
422 * be initialized until the per_cpu areas have been created. This
423 * function is now called from setup_per_cpu_areas().
424 */
425void __init smp_setup_cpu_sibling_map(void)
426{
427#if defined(CONFIG_PPC64)
428 int cpu;
429
416 /* 430 /*
417 * Do the sibling map; assume only two threads per processor. 431 * Do the sibling map; assume only two threads per processor.
418 */ 432 */
419 for_each_possible_cpu(cpu) { 433 for_each_possible_cpu(cpu) {
420 cpu_set(cpu, cpu_sibling_map[cpu]); 434 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
421 if (cpu_has_feature(CPU_FTR_SMT)) 435 if (cpu_has_feature(CPU_FTR_SMT))
422 cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); 436 cpu_set(cpu ^ 0x1, per_cpu(cpu_sibling_map, cpu));
423 } 437 }
424
425 vdso_data->processorCount = num_present_cpus();
426#endif /* CONFIG_PPC64 */ 438#endif /* CONFIG_PPC64 */
427} 439}
428#endif /* CONFIG_SMP */ 440#endif /* CONFIG_SMP */
diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c
index 008ab6823b02..0e014550b83f 100644
--- a/arch/powerpc/kernel/setup_64.c
+++ b/arch/powerpc/kernel/setup_64.c
@@ -597,6 +597,9 @@ void __init setup_per_cpu_areas(void)
597 paca[i].data_offset = ptr - __per_cpu_start; 597 paca[i].data_offset = ptr - __per_cpu_start;
598 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); 598 memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
599 } 599 }
600
601 /* Now that per_cpu is setup, initialize cpu_sibling_map */
602 smp_setup_cpu_sibling_map();
600} 603}
601#endif 604#endif
602 605
diff --git a/arch/powerpc/kernel/smp.c b/arch/powerpc/kernel/smp.c
index d30f08fa0297..338950aeb6f6 100644
--- a/arch/powerpc/kernel/smp.c
+++ b/arch/powerpc/kernel/smp.c
@@ -61,11 +61,11 @@ struct thread_info *secondary_ti;
61 61
62cpumask_t cpu_possible_map = CPU_MASK_NONE; 62cpumask_t cpu_possible_map = CPU_MASK_NONE;
63cpumask_t cpu_online_map = CPU_MASK_NONE; 63cpumask_t cpu_online_map = CPU_MASK_NONE;
64cpumask_t cpu_sibling_map[NR_CPUS] = { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 64DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
65 65
66EXPORT_SYMBOL(cpu_online_map); 66EXPORT_SYMBOL(cpu_online_map);
67EXPORT_SYMBOL(cpu_possible_map); 67EXPORT_SYMBOL(cpu_possible_map);
68EXPORT_SYMBOL(cpu_sibling_map); 68EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
69 69
70/* SMP operations for this machine */ 70/* SMP operations for this machine */
71struct smp_ops_t *smp_ops; 71struct smp_ops_t *smp_ops;
diff --git a/arch/powerpc/platforms/cell/cbe_cpufreq.c b/arch/powerpc/platforms/cell/cbe_cpufreq.c
index 5123e9d4164b..13d5a87f13b1 100644
--- a/arch/powerpc/platforms/cell/cbe_cpufreq.c
+++ b/arch/powerpc/platforms/cell/cbe_cpufreq.c
@@ -117,7 +117,7 @@ static int cbe_cpufreq_cpu_init(struct cpufreq_policy *policy)
117 policy->cur = cbe_freqs[cur_pmode].frequency; 117 policy->cur = cbe_freqs[cur_pmode].frequency;
118 118
119#ifdef CONFIG_SMP 119#ifdef CONFIG_SMP
120 policy->cpus = cpu_sibling_map[policy->cpu]; 120 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
121#endif 121#endif
122 122
123 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu); 123 cpufreq_frequency_table_get_attr(cbe_freqs, policy->cpu);
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index c73b7a48b036..407d74a8a542 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -52,14 +52,13 @@ int sparc64_multi_core __read_mostly;
52 52
53cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE; 53cpumask_t cpu_possible_map __read_mostly = CPU_MASK_NONE;
54cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE; 54cpumask_t cpu_online_map __read_mostly = CPU_MASK_NONE;
55cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly = 55DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
56 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
57cpumask_t cpu_core_map[NR_CPUS] __read_mostly = 56cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
58 { [0 ... NR_CPUS-1] = CPU_MASK_NONE }; 57 { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
59 58
60EXPORT_SYMBOL(cpu_possible_map); 59EXPORT_SYMBOL(cpu_possible_map);
61EXPORT_SYMBOL(cpu_online_map); 60EXPORT_SYMBOL(cpu_online_map);
62EXPORT_SYMBOL(cpu_sibling_map); 61EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
63EXPORT_SYMBOL(cpu_core_map); 62EXPORT_SYMBOL(cpu_core_map);
64 63
65static cpumask_t smp_commenced_mask; 64static cpumask_t smp_commenced_mask;
@@ -1261,16 +1260,16 @@ void __devinit smp_fill_in_sib_core_maps(void)
1261 for_each_present_cpu(i) { 1260 for_each_present_cpu(i) {
1262 unsigned int j; 1261 unsigned int j;
1263 1262
1264 cpus_clear(cpu_sibling_map[i]); 1263 cpus_clear(per_cpu(cpu_sibling_map, i));
1265 if (cpu_data(i).proc_id == -1) { 1264 if (cpu_data(i).proc_id == -1) {
1266 cpu_set(i, cpu_sibling_map[i]); 1265 cpu_set(i, per_cpu(cpu_sibling_map, i));
1267 continue; 1266 continue;
1268 } 1267 }
1269 1268
1270 for_each_present_cpu(j) { 1269 for_each_present_cpu(j) {
1271 if (cpu_data(i).proc_id == 1270 if (cpu_data(i).proc_id ==
1272 cpu_data(j).proc_id) 1271 cpu_data(j).proc_id)
1273 cpu_set(j, cpu_sibling_map[i]); 1272 cpu_set(j, per_cpu(cpu_sibling_map, i));
1274 } 1273 }
1275 } 1274 }
1276} 1275}
@@ -1342,9 +1341,9 @@ int __cpu_disable(void)
1342 cpu_clear(cpu, cpu_core_map[i]); 1341 cpu_clear(cpu, cpu_core_map[i]);
1343 cpus_clear(cpu_core_map[cpu]); 1342 cpus_clear(cpu_core_map[cpu]);
1344 1343
1345 for_each_cpu_mask(i, cpu_sibling_map[cpu]) 1344 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu))
1346 cpu_clear(cpu, cpu_sibling_map[i]); 1345 cpu_clear(cpu, per_cpu(cpu_sibling_map, i));
1347 cpus_clear(cpu_sibling_map[cpu]); 1346 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1348 1347
1349 c = &cpu_data(cpu); 1348 c = &cpu_data(cpu);
1350 1349
diff --git a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
index 8eb414b906d2..793eae854f4f 100644
--- a/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
+++ b/arch/x86/kernel/cpu/cpufreq/p4-clockmod.c
@@ -200,7 +200,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
200 unsigned int i; 200 unsigned int i;
201 201
202#ifdef CONFIG_SMP 202#ifdef CONFIG_SMP
203 policy->cpus = cpu_sibling_map[policy->cpu]; 203 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
204#endif 204#endif
205 205
206 /* Errata workaround */ 206 /* Errata workaround */
diff --git a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
index 36685e8f7be1..14d68aa301ee 100644
--- a/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
+++ b/arch/x86/kernel/cpu/cpufreq/speedstep-ich.c
@@ -322,7 +322,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
322 322
323 /* only run on CPU to be set, or on its sibling */ 323 /* only run on CPU to be set, or on its sibling */
324#ifdef CONFIG_SMP 324#ifdef CONFIG_SMP
325 policy->cpus = cpu_sibling_map[policy->cpu]; 325 policy->cpus = per_cpu(cpu_sibling_map, policy->cpu);
326#endif 326#endif
327 327
328 cpus_allowed = current->cpus_allowed; 328 cpus_allowed = current->cpus_allowed;
diff --git a/arch/x86/kernel/io_apic_32.c b/arch/x86/kernel/io_apic_32.c
index e2f4a1c68547..4ee1e5ee9b57 100644
--- a/arch/x86/kernel/io_apic_32.c
+++ b/arch/x86/kernel/io_apic_32.c
@@ -378,7 +378,7 @@ static struct irq_cpu_info {
378 378
379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask) 379#define IRQ_ALLOWED(cpu, allowed_mask) cpu_isset(cpu, allowed_mask)
380 380
381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(cpu_sibling_map[i])) 381#define CPU_TO_PACKAGEINDEX(i) (first_cpu(per_cpu(cpu_sibling_map, i)))
382 382
383static cpumask_t balance_irq_affinity[NR_IRQS] = { 383static cpumask_t balance_irq_affinity[NR_IRQS] = {
384 [0 ... NR_IRQS-1] = CPU_MASK_ALL 384 [0 ... NR_IRQS-1] = CPU_MASK_ALL
@@ -598,7 +598,7 @@ tryanotherirq:
598 * (A+B)/2 vs B 598 * (A+B)/2 vs B
599 */ 599 */
600 load = CPU_IRQ(min_loaded) >> 1; 600 load = CPU_IRQ(min_loaded) >> 1;
601 for_each_cpu_mask(j, cpu_sibling_map[min_loaded]) { 601 for_each_cpu_mask(j, per_cpu(cpu_sibling_map, min_loaded)) {
602 if (load > CPU_IRQ(j)) { 602 if (load > CPU_IRQ(j)) {
603 /* This won't change cpu_sibling_map[min_loaded] */ 603 /* This won't change cpu_sibling_map[min_loaded] */
604 load = CPU_IRQ(j); 604 load = CPU_IRQ(j);
diff --git a/arch/x86/kernel/smpboot_32.c b/arch/x86/kernel/smpboot_32.c
index 4cbab48ba865..31fc08bd15ef 100644
--- a/arch/x86/kernel/smpboot_32.c
+++ b/arch/x86/kernel/smpboot_32.c
@@ -70,8 +70,8 @@ EXPORT_SYMBOL(smp_num_siblings);
70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID}; 70int cpu_llc_id[NR_CPUS] __cpuinitdata = {[0 ... NR_CPUS-1] = BAD_APICID};
71 71
72/* representing HT siblings of each logical CPU */ 72/* representing HT siblings of each logical CPU */
73cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 73DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
74EXPORT_SYMBOL(cpu_sibling_map); 74EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
75 75
76/* representing HT and core siblings of each logical CPU */ 76/* representing HT and core siblings of each logical CPU */
77DEFINE_PER_CPU(cpumask_t, cpu_core_map); 77DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -319,8 +319,8 @@ void __cpuinit set_cpu_sibling_map(int cpu)
319 for_each_cpu_mask(i, cpu_sibling_setup_map) { 319 for_each_cpu_mask(i, cpu_sibling_setup_map) {
320 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 320 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
321 c[cpu].cpu_core_id == c[i].cpu_core_id) { 321 c[cpu].cpu_core_id == c[i].cpu_core_id) {
322 cpu_set(i, cpu_sibling_map[cpu]); 322 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
323 cpu_set(cpu, cpu_sibling_map[i]); 323 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
324 cpu_set(i, per_cpu(cpu_core_map, cpu)); 324 cpu_set(i, per_cpu(cpu_core_map, cpu));
325 cpu_set(cpu, per_cpu(cpu_core_map, i)); 325 cpu_set(cpu, per_cpu(cpu_core_map, i));
326 cpu_set(i, c[cpu].llc_shared_map); 326 cpu_set(i, c[cpu].llc_shared_map);
@@ -328,13 +328,13 @@ void __cpuinit set_cpu_sibling_map(int cpu)
328 } 328 }
329 } 329 }
330 } else { 330 } else {
331 cpu_set(cpu, cpu_sibling_map[cpu]); 331 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
332 } 332 }
333 333
334 cpu_set(cpu, c[cpu].llc_shared_map); 334 cpu_set(cpu, c[cpu].llc_shared_map);
335 335
336 if (current_cpu_data.x86_max_cores == 1) { 336 if (current_cpu_data.x86_max_cores == 1) {
337 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 337 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
338 c[cpu].booted_cores = 1; 338 c[cpu].booted_cores = 1;
339 return; 339 return;
340 } 340 }
@@ -351,12 +351,12 @@ void __cpuinit set_cpu_sibling_map(int cpu)
351 /* 351 /*
352 * Does this new cpu bringup a new core? 352 * Does this new cpu bringup a new core?
353 */ 353 */
354 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 354 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
355 /* 355 /*
356 * for each core in package, increment 356 * for each core in package, increment
357 * the booted_cores for this new cpu 357 * the booted_cores for this new cpu
358 */ 358 */
359 if (first_cpu(cpu_sibling_map[i]) == i) 359 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
360 c[cpu].booted_cores++; 360 c[cpu].booted_cores++;
361 /* 361 /*
362 * increment the core count for all 362 * increment the core count for all
@@ -983,7 +983,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
983 printk(KERN_NOTICE "Local APIC not detected." 983 printk(KERN_NOTICE "Local APIC not detected."
984 " Using dummy APIC emulation.\n"); 984 " Using dummy APIC emulation.\n");
985 map_cpu_to_logical_apicid(); 985 map_cpu_to_logical_apicid();
986 cpu_set(0, cpu_sibling_map[0]); 986 cpu_set(0, per_cpu(cpu_sibling_map, 0));
987 cpu_set(0, per_cpu(cpu_core_map, 0)); 987 cpu_set(0, per_cpu(cpu_core_map, 0));
988 return; 988 return;
989 } 989 }
@@ -1008,7 +1008,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n"); 1008 printk(KERN_ERR "... forcing use of dummy APIC emulation. (tell your hw vendor)\n");
1009 smpboot_clear_io_apic_irqs(); 1009 smpboot_clear_io_apic_irqs();
1010 phys_cpu_present_map = physid_mask_of_physid(0); 1010 phys_cpu_present_map = physid_mask_of_physid(0);
1011 cpu_set(0, cpu_sibling_map[0]); 1011 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1012 cpu_set(0, per_cpu(cpu_core_map, 0)); 1012 cpu_set(0, per_cpu(cpu_core_map, 0));
1013 return; 1013 return;
1014 } 1014 }
@@ -1023,7 +1023,7 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n"); 1023 printk(KERN_INFO "SMP mode deactivated, forcing use of dummy APIC emulation.\n");
1024 smpboot_clear_io_apic_irqs(); 1024 smpboot_clear_io_apic_irqs();
1025 phys_cpu_present_map = physid_mask_of_physid(0); 1025 phys_cpu_present_map = physid_mask_of_physid(0);
1026 cpu_set(0, cpu_sibling_map[0]); 1026 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1027 cpu_set(0, per_cpu(cpu_core_map, 0)); 1027 cpu_set(0, per_cpu(cpu_core_map, 0));
1028 return; 1028 return;
1029 } 1029 }
@@ -1102,15 +1102,15 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1102 Dprintk("Boot done.\n"); 1102 Dprintk("Boot done.\n");
1103 1103
1104 /* 1104 /*
1105 * construct cpu_sibling_map[], so that we can tell sibling CPUs 1105 * construct cpu_sibling_map, so that we can tell sibling CPUs
1106 * efficiently. 1106 * efficiently.
1107 */ 1107 */
1108 for (cpu = 0; cpu < NR_CPUS; cpu++) { 1108 for (cpu = 0; cpu < NR_CPUS; cpu++) {
1109 cpus_clear(cpu_sibling_map[cpu]); 1109 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1110 cpus_clear(per_cpu(cpu_core_map, cpu)); 1110 cpus_clear(per_cpu(cpu_core_map, cpu));
1111 } 1111 }
1112 1112
1113 cpu_set(0, cpu_sibling_map[0]); 1113 cpu_set(0, per_cpu(cpu_sibling_map, 0));
1114 cpu_set(0, per_cpu(cpu_core_map, 0)); 1114 cpu_set(0, per_cpu(cpu_core_map, 0));
1115 1115
1116 smpboot_setup_io_apic(); 1116 smpboot_setup_io_apic();
@@ -1153,13 +1153,13 @@ void remove_siblinginfo(int cpu)
1153 /*/ 1153 /*/
1154 * last thread sibling in this cpu core going down 1154 * last thread sibling in this cpu core going down
1155 */ 1155 */
1156 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 1156 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
1157 c[sibling].booted_cores--; 1157 c[sibling].booted_cores--;
1158 } 1158 }
1159 1159
1160 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1160 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
1161 cpu_clear(cpu, cpu_sibling_map[sibling]); 1161 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
1162 cpus_clear(cpu_sibling_map[cpu]); 1162 cpus_clear(per_cpu(cpu_sibling_map, cpu));
1163 cpus_clear(per_cpu(cpu_core_map, cpu)); 1163 cpus_clear(per_cpu(cpu_core_map, cpu));
1164 c[cpu].phys_proc_id = 0; 1164 c[cpu].phys_proc_id = 0;
1165 c[cpu].cpu_core_id = 0; 1165 c[cpu].cpu_core_id = 0;
diff --git a/arch/x86/kernel/smpboot_64.c b/arch/x86/kernel/smpboot_64.c
index 6723c8622828..0faa0a0af272 100644
--- a/arch/x86/kernel/smpboot_64.c
+++ b/arch/x86/kernel/smpboot_64.c
@@ -91,8 +91,8 @@ EXPORT_SYMBOL(cpu_data);
91int smp_threads_ready; 91int smp_threads_ready;
92 92
93/* representing HT siblings of each logical CPU */ 93/* representing HT siblings of each logical CPU */
94cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 94DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
95EXPORT_SYMBOL(cpu_sibling_map); 95EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
96 96
97/* representing HT and core siblings of each logical CPU */ 97/* representing HT and core siblings of each logical CPU */
98DEFINE_PER_CPU(cpumask_t, cpu_core_map); 98DEFINE_PER_CPU(cpumask_t, cpu_core_map);
@@ -262,8 +262,8 @@ static inline void set_cpu_sibling_map(int cpu)
262 for_each_cpu_mask(i, cpu_sibling_setup_map) { 262 for_each_cpu_mask(i, cpu_sibling_setup_map) {
263 if (c[cpu].phys_proc_id == c[i].phys_proc_id && 263 if (c[cpu].phys_proc_id == c[i].phys_proc_id &&
264 c[cpu].cpu_core_id == c[i].cpu_core_id) { 264 c[cpu].cpu_core_id == c[i].cpu_core_id) {
265 cpu_set(i, cpu_sibling_map[cpu]); 265 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
266 cpu_set(cpu, cpu_sibling_map[i]); 266 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
267 cpu_set(i, per_cpu(cpu_core_map, cpu)); 267 cpu_set(i, per_cpu(cpu_core_map, cpu));
268 cpu_set(cpu, per_cpu(cpu_core_map, i)); 268 cpu_set(cpu, per_cpu(cpu_core_map, i));
269 cpu_set(i, c[cpu].llc_shared_map); 269 cpu_set(i, c[cpu].llc_shared_map);
@@ -271,13 +271,13 @@ static inline void set_cpu_sibling_map(int cpu)
271 } 271 }
272 } 272 }
273 } else { 273 } else {
274 cpu_set(cpu, cpu_sibling_map[cpu]); 274 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
275 } 275 }
276 276
277 cpu_set(cpu, c[cpu].llc_shared_map); 277 cpu_set(cpu, c[cpu].llc_shared_map);
278 278
279 if (current_cpu_data.x86_max_cores == 1) { 279 if (current_cpu_data.x86_max_cores == 1) {
280 per_cpu(cpu_core_map, cpu) = cpu_sibling_map[cpu]; 280 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
281 c[cpu].booted_cores = 1; 281 c[cpu].booted_cores = 1;
282 return; 282 return;
283 } 283 }
@@ -294,12 +294,12 @@ static inline void set_cpu_sibling_map(int cpu)
294 /* 294 /*
295 * Does this new cpu bringup a new core? 295 * Does this new cpu bringup a new core?
296 */ 296 */
297 if (cpus_weight(cpu_sibling_map[cpu]) == 1) { 297 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
298 /* 298 /*
299 * for each core in package, increment 299 * for each core in package, increment
300 * the booted_cores for this new cpu 300 * the booted_cores for this new cpu
301 */ 301 */
302 if (first_cpu(cpu_sibling_map[i]) == i) 302 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
303 c[cpu].booted_cores++; 303 c[cpu].booted_cores++;
304 /* 304 /*
305 * increment the core count for all 305 * increment the core count for all
@@ -735,7 +735,7 @@ static __init void disable_smp(void)
735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id); 735 phys_cpu_present_map = physid_mask_of_physid(boot_cpu_id);
736 else 736 else
737 phys_cpu_present_map = physid_mask_of_physid(0); 737 phys_cpu_present_map = physid_mask_of_physid(0);
738 cpu_set(0, cpu_sibling_map[0]); 738 cpu_set(0, per_cpu(cpu_sibling_map, 0));
739 cpu_set(0, per_cpu(cpu_core_map, 0)); 739 cpu_set(0, per_cpu(cpu_core_map, 0));
740} 740}
741 741
@@ -976,13 +976,13 @@ static void remove_siblinginfo(int cpu)
976 /* 976 /*
977 * last thread sibling in this cpu core going down 977 * last thread sibling in this cpu core going down
978 */ 978 */
979 if (cpus_weight(cpu_sibling_map[cpu]) == 1) 979 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
980 c[sibling].booted_cores--; 980 c[sibling].booted_cores--;
981 } 981 }
982 982
983 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 983 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
984 cpu_clear(cpu, cpu_sibling_map[sibling]); 984 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
985 cpus_clear(cpu_sibling_map[cpu]); 985 cpus_clear(per_cpu(cpu_sibling_map, cpu));
986 cpus_clear(per_cpu(cpu_core_map, cpu)); 986 cpus_clear(per_cpu(cpu_core_map, cpu));
987 c[cpu].phys_proc_id = 0; 987 c[cpu].phys_proc_id = 0;
988 c[cpu].cpu_core_id = 0; 988 c[cpu].cpu_core_id = 0;
diff --git a/arch/x86/oprofile/op_model_p4.c b/arch/x86/oprofile/op_model_p4.c
index 47925927b12f..56b4757a1f47 100644
--- a/arch/x86/oprofile/op_model_p4.c
+++ b/arch/x86/oprofile/op_model_p4.c
@@ -379,7 +379,7 @@ static unsigned int get_stagger(void)
379{ 379{
380#ifdef CONFIG_SMP 380#ifdef CONFIG_SMP
381 int cpu = smp_processor_id(); 381 int cpu = smp_processor_id();
382 return (cpu != first_cpu(cpu_sibling_map[cpu])); 382 return (cpu != first_cpu(per_cpu(cpu_sibling_map, cpu)));
383#endif 383#endif
384 return 0; 384 return 0;
385} 385}
diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
index 539d42530fc4..4fa33c27ccb6 100644
--- a/arch/x86/xen/smp.c
+++ b/arch/x86/xen/smp.c
@@ -147,7 +147,7 @@ void __init xen_smp_prepare_boot_cpu(void)
147 make_lowmem_page_readwrite(&per_cpu__gdt_page); 147 make_lowmem_page_readwrite(&per_cpu__gdt_page);
148 148
149 for (cpu = 0; cpu < NR_CPUS; cpu++) { 149 for (cpu = 0; cpu < NR_CPUS; cpu++) {
150 cpus_clear(cpu_sibling_map[cpu]); 150 cpus_clear(per_cpu(cpu_sibling_map, cpu));
151 /* 151 /*
152 * cpu_core_map lives in a per cpu area that is cleared 152 * cpu_core_map lives in a per cpu area that is cleared
153 * when the per cpu array is allocated. 153 * when the per cpu array is allocated.
@@ -164,7 +164,7 @@ void __init xen_smp_prepare_cpus(unsigned int max_cpus)
164 unsigned cpu; 164 unsigned cpu;
165 165
166 for (cpu = 0; cpu < NR_CPUS; cpu++) { 166 for (cpu = 0; cpu < NR_CPUS; cpu++) {
167 cpus_clear(cpu_sibling_map[cpu]); 167 cpus_clear(per_cpu(cpu_sibling_map, cpu));
168 /* 168 /*
169 * cpu_core_ map will be zeroed when the per 169 * cpu_core_ map will be zeroed when the per
170 * cpu area is allocated. 170 * cpu area is allocated.
diff --git a/block/blktrace.c b/block/blktrace.c
index 775471ef84a5..d00ac3993c18 100644
--- a/block/blktrace.c
+++ b/block/blktrace.c
@@ -550,7 +550,7 @@ static void blk_trace_set_ht_offsets(void)
550 for_each_online_cpu(cpu) { 550 for_each_online_cpu(cpu) {
551 unsigned long long *cpu_off, *sibling_off; 551 unsigned long long *cpu_off, *sibling_off;
552 552
553 for_each_cpu_mask(i, cpu_sibling_map[cpu]) { 553 for_each_cpu_mask(i, per_cpu(cpu_sibling_map, cpu)) {
554 if (i == cpu) 554 if (i == cpu)
555 continue; 555 continue;
556 556
diff --git a/include/asm-ia64/smp.h b/include/asm-ia64/smp.h
index 6314b29e8c4d..1703c9d885bd 100644
--- a/include/asm-ia64/smp.h
+++ b/include/asm-ia64/smp.h
@@ -58,7 +58,7 @@ extern char no_int_routing __devinitdata;
58 58
59extern cpumask_t cpu_online_map; 59extern cpumask_t cpu_online_map;
60extern cpumask_t cpu_core_map[NR_CPUS]; 60extern cpumask_t cpu_core_map[NR_CPUS];
61extern cpumask_t cpu_sibling_map[NR_CPUS]; 61DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
62extern int smp_num_siblings; 62extern int smp_num_siblings;
63extern int smp_num_cpucores; 63extern int smp_num_cpucores;
64extern void __iomem *ipi_base_addr; 64extern void __iomem *ipi_base_addr;
diff --git a/include/asm-ia64/topology.h b/include/asm-ia64/topology.h
index 233f1caae048..2d67b72b18d0 100644
--- a/include/asm-ia64/topology.h
+++ b/include/asm-ia64/topology.h
@@ -112,7 +112,7 @@ void build_cpu_to_node_map(void);
112#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id) 112#define topology_physical_package_id(cpu) (cpu_data(cpu)->socket_id)
113#define topology_core_id(cpu) (cpu_data(cpu)->core_id) 113#define topology_core_id(cpu) (cpu_data(cpu)->core_id)
114#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 114#define topology_core_siblings(cpu) (cpu_core_map[cpu])
115#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 115#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
116#define smt_capable() (smp_num_siblings > 1) 116#define smt_capable() (smp_num_siblings > 1)
117#endif 117#endif
118 118
diff --git a/include/asm-powerpc/smp.h b/include/asm-powerpc/smp.h
index 19102bfc14ca..505f35bacaa9 100644
--- a/include/asm-powerpc/smp.h
+++ b/include/asm-powerpc/smp.h
@@ -26,6 +26,7 @@
26#ifdef CONFIG_PPC64 26#ifdef CONFIG_PPC64
27#include <asm/paca.h> 27#include <asm/paca.h>
28#endif 28#endif
29#include <asm/percpu.h>
29 30
30extern int boot_cpuid; 31extern int boot_cpuid;
31 32
@@ -58,7 +59,7 @@ extern int smp_hw_index[];
58 (smp_hw_index[(cpu)] = (phys)) 59 (smp_hw_index[(cpu)] = (phys))
59#endif 60#endif
60 61
61extern cpumask_t cpu_sibling_map[NR_CPUS]; 62DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
62 63
63/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers. 64/* Since OpenPIC has only 4 IPIs, we use slightly different message numbers.
64 * 65 *
@@ -77,6 +78,7 @@ void smp_init_pSeries(void);
77void smp_init_cell(void); 78void smp_init_cell(void);
78void smp_init_celleb(void); 79void smp_init_celleb(void);
79void smp_setup_cpu_maps(void); 80void smp_setup_cpu_maps(void);
81void smp_setup_cpu_sibling_map(void);
80 82
81extern int __cpu_disable(void); 83extern int __cpu_disable(void);
82extern void __cpu_die(unsigned int cpu); 84extern void __cpu_die(unsigned int cpu);
diff --git a/include/asm-powerpc/topology.h b/include/asm-powerpc/topology.h
index 0ad21a849b5f..ca23b681ad05 100644
--- a/include/asm-powerpc/topology.h
+++ b/include/asm-powerpc/topology.h
@@ -108,7 +108,7 @@ static inline void sysfs_remove_device_from_node(struct sys_device *dev,
108#ifdef CONFIG_PPC64 108#ifdef CONFIG_PPC64
109#include <asm/smp.h> 109#include <asm/smp.h>
110 110
111#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 111#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
112#endif 112#endif
113#endif 113#endif
114 114
diff --git a/include/asm-sparc64/smp.h b/include/asm-sparc64/smp.h
index e8a96a31761b..42c09949526c 100644
--- a/include/asm-sparc64/smp.h
+++ b/include/asm-sparc64/smp.h
@@ -28,8 +28,9 @@
28 28
29#include <asm/bitops.h> 29#include <asm/bitops.h>
30#include <asm/atomic.h> 30#include <asm/atomic.h>
31#include <asm/percpu.h>
31 32
32extern cpumask_t cpu_sibling_map[NR_CPUS]; 33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
33extern cpumask_t cpu_core_map[NR_CPUS]; 34extern cpumask_t cpu_core_map[NR_CPUS];
34extern int sparc64_multi_core; 35extern int sparc64_multi_core;
35 36
diff --git a/include/asm-sparc64/topology.h b/include/asm-sparc64/topology.h
index 290ac75f385b..c6b557034f68 100644
--- a/include/asm-sparc64/topology.h
+++ b/include/asm-sparc64/topology.h
@@ -5,7 +5,7 @@
5#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id) 5#define topology_physical_package_id(cpu) (cpu_data(cpu).proc_id)
6#define topology_core_id(cpu) (cpu_data(cpu).core_id) 6#define topology_core_id(cpu) (cpu_data(cpu).core_id)
7#define topology_core_siblings(cpu) (cpu_core_map[cpu]) 7#define topology_core_siblings(cpu) (cpu_core_map[cpu])
8#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 8#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
9#define mc_capable() (sparc64_multi_core) 9#define mc_capable() (sparc64_multi_core)
10#define smt_capable() (sparc64_multi_core) 10#define smt_capable() (sparc64_multi_core)
11#endif /* CONFIG_SMP */ 11#endif /* CONFIG_SMP */
diff --git a/include/asm-x86/smp_32.h b/include/asm-x86/smp_32.h
index 01ab31bb262a..955dd7c8538f 100644
--- a/include/asm-x86/smp_32.h
+++ b/include/asm-x86/smp_32.h
@@ -30,7 +30,7 @@
30extern void smp_alloc_memory(void); 30extern void smp_alloc_memory(void);
31extern int pic_mode; 31extern int pic_mode;
32extern int smp_num_siblings; 32extern int smp_num_siblings;
33extern cpumask_t cpu_sibling_map[]; 33DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
34DECLARE_PER_CPU(cpumask_t, cpu_core_map); 34DECLARE_PER_CPU(cpumask_t, cpu_core_map);
35 35
36extern void (*mtrr_hook) (void); 36extern void (*mtrr_hook) (void);
diff --git a/include/asm-x86/smp_64.h b/include/asm-x86/smp_64.h
index 65f844864415..f5bcee1c0927 100644
--- a/include/asm-x86/smp_64.h
+++ b/include/asm-x86/smp_64.h
@@ -38,12 +38,14 @@ extern void unlock_ipi_call_lock(void);
38extern int smp_num_siblings; 38extern int smp_num_siblings;
39extern void smp_send_reschedule(int cpu); 39extern void smp_send_reschedule(int cpu);
40 40
41extern cpumask_t cpu_sibling_map[NR_CPUS];
42/* 41/*
43 * cpu_core_map lives in a per cpu area 42 * cpu_sibling_map and cpu_core_map now live
43 * in the per cpu area
44 * 44 *
45 * extern cpumask_t cpu_sibling_map[NR_CPUS];
45 * extern cpumask_t cpu_core_map[NR_CPUS]; 46 * extern cpumask_t cpu_core_map[NR_CPUS];
46 */ 47 */
48DECLARE_PER_CPU(cpumask_t, cpu_sibling_map);
47DECLARE_PER_CPU(cpumask_t, cpu_core_map); 49DECLARE_PER_CPU(cpumask_t, cpu_core_map);
48extern u8 cpu_llc_id[NR_CPUS]; 50extern u8 cpu_llc_id[NR_CPUS];
49 51
diff --git a/include/asm-x86/topology_32.h b/include/asm-x86/topology_32.h
index 7b68dbcd0eb0..ae1074603c4b 100644
--- a/include/asm-x86/topology_32.h
+++ b/include/asm-x86/topology_32.h
@@ -31,7 +31,7 @@
31#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) 31#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
32#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) 32#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
33#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 33#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
34#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 34#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
35#endif 35#endif
36 36
37#ifdef CONFIG_NUMA 37#ifdef CONFIG_NUMA
diff --git a/include/asm-x86/topology_64.h b/include/asm-x86/topology_64.h
index b8590dff34c8..848c17f92226 100644
--- a/include/asm-x86/topology_64.h
+++ b/include/asm-x86/topology_64.h
@@ -59,7 +59,7 @@ extern int __node_distance(int, int);
59#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id) 59#define topology_physical_package_id(cpu) (cpu_data[cpu].phys_proc_id)
60#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id) 60#define topology_core_id(cpu) (cpu_data[cpu].cpu_core_id)
61#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu)) 61#define topology_core_siblings(cpu) (per_cpu(cpu_core_map, cpu))
62#define topology_thread_siblings(cpu) (cpu_sibling_map[cpu]) 62#define topology_thread_siblings(cpu) (per_cpu(cpu_sibling_map, cpu))
63#define mc_capable() (boot_cpu_data.x86_max_cores > 1) 63#define mc_capable() (boot_cpu_data.x86_max_cores > 1)
64#define smt_capable() (smp_num_siblings > 1) 64#define smt_capable() (smp_num_siblings > 1)
65#endif 65#endif
diff --git a/kernel/sched.c b/kernel/sched.c
index bba57adb9504..78c8fbd373a3 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5869,7 +5869,7 @@ static int cpu_to_core_group(int cpu, const cpumask_t *cpu_map,
5869 struct sched_group **sg) 5869 struct sched_group **sg)
5870{ 5870{
5871 int group; 5871 int group;
5872 cpumask_t mask = cpu_sibling_map[cpu]; 5872 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5873 cpus_and(mask, mask, *cpu_map); 5873 cpus_and(mask, mask, *cpu_map);
5874 group = first_cpu(mask); 5874 group = first_cpu(mask);
5875 if (sg) 5875 if (sg)
@@ -5898,7 +5898,7 @@ static int cpu_to_phys_group(int cpu, const cpumask_t *cpu_map,
5898 cpus_and(mask, mask, *cpu_map); 5898 cpus_and(mask, mask, *cpu_map);
5899 group = first_cpu(mask); 5899 group = first_cpu(mask);
5900#elif defined(CONFIG_SCHED_SMT) 5900#elif defined(CONFIG_SCHED_SMT)
5901 cpumask_t mask = cpu_sibling_map[cpu]; 5901 cpumask_t mask = per_cpu(cpu_sibling_map, cpu);
5902 cpus_and(mask, mask, *cpu_map); 5902 cpus_and(mask, mask, *cpu_map);
5903 group = first_cpu(mask); 5903 group = first_cpu(mask);
5904#else 5904#else
@@ -6132,7 +6132,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6132 p = sd; 6132 p = sd;
6133 sd = &per_cpu(cpu_domains, i); 6133 sd = &per_cpu(cpu_domains, i);
6134 *sd = SD_SIBLING_INIT; 6134 *sd = SD_SIBLING_INIT;
6135 sd->span = cpu_sibling_map[i]; 6135 sd->span = per_cpu(cpu_sibling_map, i);
6136 cpus_and(sd->span, sd->span, *cpu_map); 6136 cpus_and(sd->span, sd->span, *cpu_map);
6137 sd->parent = p; 6137 sd->parent = p;
6138 p->child = sd; 6138 p->child = sd;
@@ -6143,7 +6143,7 @@ static int build_sched_domains(const cpumask_t *cpu_map)
6143#ifdef CONFIG_SCHED_SMT 6143#ifdef CONFIG_SCHED_SMT
6144 /* Set up CPU (sibling) groups */ 6144 /* Set up CPU (sibling) groups */
6145 for_each_cpu_mask(i, *cpu_map) { 6145 for_each_cpu_mask(i, *cpu_map) {
6146 cpumask_t this_sibling_map = cpu_sibling_map[i]; 6146 cpumask_t this_sibling_map = per_cpu(cpu_sibling_map, i);
6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map); 6147 cpus_and(this_sibling_map, this_sibling_map, *cpu_map);
6148 if (i != first_cpu(this_sibling_map)) 6148 if (i != first_cpu(this_sibling_map))
6149 continue; 6149 continue;