aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/smpboot.c
diff options
context:
space:
mode:
authorSiddha, Suresh B <suresh.b.siddha@intel.com>2005-11-05 11:25:54 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:55:16 -0500
commit94605eff572b727aaad9b4b29bc358b919096503 (patch)
tree657a848d8ef34d2f94bbad3aa4e5458d2d3f2d2b /arch/x86_64/kernel/smpboot.c
parente90f22edf432512219cc2952f5811961abbd164f (diff)
[PATCH] x86-64/i386: Intel HT, Multi core detection fixes
Fields obtained through cpuid vector 0x1(ebx[16:23]) and vector 0x4(eax[14:25], eax[26:31]) indicate the maximum values and might not always be the same as what is available and what OS sees. So make sure "siblings" and "cpu cores" values in /proc/cpuinfo reflect the values as seen by OS instead of what cpuid instruction says. This will also fix the buggy BIOS cases (for example where cpuid on a single core cpu says there are "2" siblings, even when HT is disabled in the BIOS. http://bugzilla.kernel.org/show_bug.cgi?id=4359) Signed-off-by: Suresh Siddha <suresh.b.siddha@intel.com> Signed-off-by: Andi Kleen <ak@suse.de> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/smpboot.c')
-rw-r--r--arch/x86_64/kernel/smpboot.c69
1 files changed, 55 insertions, 14 deletions
diff --git a/arch/x86_64/kernel/smpboot.c b/arch/x86_64/kernel/smpboot.c
index f74319a80659..2b9ddba61b37 100644
--- a/arch/x86_64/kernel/smpboot.c
+++ b/arch/x86_64/kernel/smpboot.c
@@ -64,6 +64,7 @@
64int smp_num_siblings = 1; 64int smp_num_siblings = 1;
65/* Package ID of each logical CPU */ 65/* Package ID of each logical CPU */
66u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 66u8 phys_proc_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
67/* core ID of each logical CPU */
67u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID }; 68u8 cpu_core_id[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = BAD_APICID };
68EXPORT_SYMBOL(phys_proc_id); 69EXPORT_SYMBOL(phys_proc_id);
69EXPORT_SYMBOL(cpu_core_id); 70EXPORT_SYMBOL(cpu_core_id);
@@ -89,7 +90,10 @@ struct cpuinfo_x86 cpu_data[NR_CPUS] __cacheline_aligned;
89/* Set when the idlers are all forked */ 90/* Set when the idlers are all forked */
90int smp_threads_ready; 91int smp_threads_ready;
91 92
93/* representing HT siblings of each logical CPU */
92cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 94cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
95
96/* representing HT and core siblings of each logical CPU */
93cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 97cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
94EXPORT_SYMBOL(cpu_core_map); 98EXPORT_SYMBOL(cpu_core_map);
95 99
@@ -436,30 +440,59 @@ void __cpuinit smp_callin(void)
436 cpu_set(cpuid, cpu_callin_map); 440 cpu_set(cpuid, cpu_callin_map);
437} 441}
438 442
443/* representing cpus for which sibling maps can be computed */
444static cpumask_t cpu_sibling_setup_map;
445
439static inline void set_cpu_sibling_map(int cpu) 446static inline void set_cpu_sibling_map(int cpu)
440{ 447{
441 int i; 448 int i;
449 struct cpuinfo_x86 *c = cpu_data;
450
451 cpu_set(cpu, cpu_sibling_setup_map);
442 452
443 if (smp_num_siblings > 1) { 453 if (smp_num_siblings > 1) {
444 for_each_cpu(i) { 454 for_each_cpu_mask(i, cpu_sibling_setup_map) {
445 if (cpu_core_id[cpu] == cpu_core_id[i]) { 455 if (phys_proc_id[cpu] == phys_proc_id[i] &&
456 cpu_core_id[cpu] == cpu_core_id[i]) {
446 cpu_set(i, cpu_sibling_map[cpu]); 457 cpu_set(i, cpu_sibling_map[cpu]);
447 cpu_set(cpu, cpu_sibling_map[i]); 458 cpu_set(cpu, cpu_sibling_map[i]);
459 cpu_set(i, cpu_core_map[cpu]);
460 cpu_set(cpu, cpu_core_map[i]);
448 } 461 }
449 } 462 }
450 } else { 463 } else {
451 cpu_set(cpu, cpu_sibling_map[cpu]); 464 cpu_set(cpu, cpu_sibling_map[cpu]);
452 } 465 }
453 466
454 if (current_cpu_data.x86_num_cores > 1) { 467 if (current_cpu_data.x86_max_cores == 1) {
455 for_each_cpu(i) {
456 if (phys_proc_id[cpu] == phys_proc_id[i]) {
457 cpu_set(i, cpu_core_map[cpu]);
458 cpu_set(cpu, cpu_core_map[i]);
459 }
460 }
461 } else {
462 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 468 cpu_core_map[cpu] = cpu_sibling_map[cpu];
469 c[cpu].booted_cores = 1;
470 return;
471 }
472
473 for_each_cpu_mask(i, cpu_sibling_setup_map) {
474 if (phys_proc_id[cpu] == phys_proc_id[i]) {
475 cpu_set(i, cpu_core_map[cpu]);
476 cpu_set(cpu, cpu_core_map[i]);
477 /*
478 * Does this new cpu bringup a new core?
479 */
480 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
481 /*
482 * for each core in package, increment
483 * the booted_cores for this new cpu
484 */
485 if (first_cpu(cpu_sibling_map[i]) == i)
486 c[cpu].booted_cores++;
487 /*
488 * increment the core count for all
489 * the other cpus in this package
490 */
491 if (i != cpu)
492 c[i].booted_cores++;
493 } else if (i != cpu && !c[cpu].booted_cores)
494 c[cpu].booted_cores = c[i].booted_cores;
495 }
463 } 496 }
464} 497}
465 498
@@ -993,6 +1026,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
993 nmi_watchdog_default(); 1026 nmi_watchdog_default();
994 current_cpu_data = boot_cpu_data; 1027 current_cpu_data = boot_cpu_data;
995 current_thread_info()->cpu = 0; /* needed? */ 1028 current_thread_info()->cpu = 0; /* needed? */
1029 set_cpu_sibling_map(0);
996 1030
997 if (smp_sanity_check(max_cpus) < 0) { 1031 if (smp_sanity_check(max_cpus) < 0) {
998 printk(KERN_INFO "SMP disabled\n"); 1032 printk(KERN_INFO "SMP disabled\n");
@@ -1036,8 +1070,6 @@ void __init smp_prepare_boot_cpu(void)
1036 int me = smp_processor_id(); 1070 int me = smp_processor_id();
1037 cpu_set(me, cpu_online_map); 1071 cpu_set(me, cpu_online_map);
1038 cpu_set(me, cpu_callout_map); 1072 cpu_set(me, cpu_callout_map);
1039 cpu_set(0, cpu_sibling_map[0]);
1040 cpu_set(0, cpu_core_map[0]);
1041 per_cpu(cpu_state, me) = CPU_ONLINE; 1073 per_cpu(cpu_state, me) = CPU_ONLINE;
1042} 1074}
1043 1075
@@ -1106,15 +1138,24 @@ void __init smp_cpus_done(unsigned int max_cpus)
1106static void remove_siblinginfo(int cpu) 1138static void remove_siblinginfo(int cpu)
1107{ 1139{
1108 int sibling; 1140 int sibling;
1141 struct cpuinfo_x86 *c = cpu_data;
1109 1142
1143 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1144 cpu_clear(cpu, cpu_core_map[sibling]);
1145 /*
1146 * last thread sibling in this cpu core going down
1147 */
1148 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1149 c[sibling].booted_cores--;
1150 }
1151
1110 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1152 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1111 cpu_clear(cpu, cpu_sibling_map[sibling]); 1153 cpu_clear(cpu, cpu_sibling_map[sibling]);
1112 for_each_cpu_mask(sibling, cpu_core_map[cpu])
1113 cpu_clear(cpu, cpu_core_map[sibling]);
1114 cpus_clear(cpu_sibling_map[cpu]); 1154 cpus_clear(cpu_sibling_map[cpu]);
1115 cpus_clear(cpu_core_map[cpu]); 1155 cpus_clear(cpu_core_map[cpu]);
1116 phys_proc_id[cpu] = BAD_APICID; 1156 phys_proc_id[cpu] = BAD_APICID;
1117 cpu_core_id[cpu] = BAD_APICID; 1157 cpu_core_id[cpu] = BAD_APICID;
1158 cpu_clear(cpu, cpu_sibling_setup_map);
1118} 1159}
1119 1160
1120void remove_cpu_from_maps(void) 1161void remove_cpu_from_maps(void)