aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386/kernel/cpu
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
commit4060994c3e337b40e0f6fa8ce2cc178e021baf3d (patch)
tree980297c1747ca89354bc879cc5d17903eacb19e2 /arch/i386/kernel/cpu
parent0174f72f848dfe7dc7488799776303c81b181b16 (diff)
parentd3ee871e63d0a0c70413dc0aa5534b8d6cd6ec37 (diff)
Merge x86-64 update from Andi
Diffstat (limited to 'arch/i386/kernel/cpu')
-rw-r--r--arch/i386/kernel/cpu/amd.c12
-rw-r--r--arch/i386/kernel/cpu/common.c40
-rw-r--r--arch/i386/kernel/cpu/intel.c2
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c46
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c8
-rw-r--r--arch/i386/kernel/cpu/proc.c7
6 files changed, 66 insertions, 49 deletions
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 53a1681cd964..e344ef88cfcd 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -206,9 +206,9 @@ static void __init init_amd(struct cpuinfo_x86 *c)
206 display_cacheinfo(c); 206 display_cacheinfo(c);
207 207
208 if (cpuid_eax(0x80000000) >= 0x80000008) { 208 if (cpuid_eax(0x80000000) >= 0x80000008) {
209 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 209 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
210 if (c->x86_num_cores & (c->x86_num_cores - 1)) 210 if (c->x86_max_cores & (c->x86_max_cores - 1))
211 c->x86_num_cores = 1; 211 c->x86_max_cores = 1;
212 } 212 }
213 213
214#ifdef CONFIG_X86_HT 214#ifdef CONFIG_X86_HT
@@ -217,15 +217,15 @@ static void __init init_amd(struct cpuinfo_x86 *c)
217 * distingush the cores. Assumes number of cores is a power 217 * distingush the cores. Assumes number of cores is a power
218 * of two. 218 * of two.
219 */ 219 */
220 if (c->x86_num_cores > 1) { 220 if (c->x86_max_cores > 1) {
221 int cpu = smp_processor_id(); 221 int cpu = smp_processor_id();
222 unsigned bits = 0; 222 unsigned bits = 0;
223 while ((1 << bits) < c->x86_num_cores) 223 while ((1 << bits) < c->x86_max_cores)
224 bits++; 224 bits++;
225 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 225 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
226 phys_proc_id[cpu] >>= bits; 226 phys_proc_id[cpu] >>= bits;
227 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 227 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
228 cpu, c->x86_num_cores, cpu_core_id[cpu]); 228 cpu, c->x86_max_cores, cpu_core_id[cpu]);
229 } 229 }
230#endif 230#endif
231} 231}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index c145fb30002e..31e344b26bae 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -231,10 +231,10 @@ static void __init early_cpu_detect(void)
231 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 231 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
232 c->x86 = (tfms >> 8) & 15; 232 c->x86 = (tfms >> 8) & 15;
233 c->x86_model = (tfms >> 4) & 15; 233 c->x86_model = (tfms >> 4) & 15;
234 if (c->x86 == 0xf) { 234 if (c->x86 == 0xf)
235 c->x86 += (tfms >> 20) & 0xff; 235 c->x86 += (tfms >> 20) & 0xff;
236 if (c->x86 >= 0x6)
236 c->x86_model += ((tfms >> 16) & 0xF) << 4; 237 c->x86_model += ((tfms >> 16) & 0xF) << 4;
237 }
238 c->x86_mask = tfms & 15; 238 c->x86_mask = tfms & 15;
239 if (cap0 & (1<<19)) 239 if (cap0 & (1<<19))
240 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 240 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
@@ -333,7 +333,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
333 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 333 c->x86_model = c->x86_mask = 0; /* So far unknown... */
334 c->x86_vendor_id[0] = '\0'; /* Unset */ 334 c->x86_vendor_id[0] = '\0'; /* Unset */
335 c->x86_model_id[0] = '\0'; /* Unset */ 335 c->x86_model_id[0] = '\0'; /* Unset */
336 c->x86_num_cores = 1; 336 c->x86_max_cores = 1;
337 memset(&c->x86_capability, 0, sizeof c->x86_capability); 337 memset(&c->x86_capability, 0, sizeof c->x86_capability);
338 338
339 if (!have_cpuid_p()) { 339 if (!have_cpuid_p()) {
@@ -443,52 +443,44 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
443void __devinit detect_ht(struct cpuinfo_x86 *c) 443void __devinit detect_ht(struct cpuinfo_x86 *c)
444{ 444{
445 u32 eax, ebx, ecx, edx; 445 u32 eax, ebx, ecx, edx;
446 int index_msb, tmp; 446 int index_msb, core_bits;
447 int cpu = smp_processor_id(); 447 int cpu = smp_processor_id();
448 448
449 cpuid(1, &eax, &ebx, &ecx, &edx);
450
451 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
452
449 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 453 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
450 return; 454 return;
451 455
452 cpuid(1, &eax, &ebx, &ecx, &edx);
453 smp_num_siblings = (ebx & 0xff0000) >> 16; 456 smp_num_siblings = (ebx & 0xff0000) >> 16;
454 457
455 if (smp_num_siblings == 1) { 458 if (smp_num_siblings == 1) {
456 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 459 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
457 } else if (smp_num_siblings > 1 ) { 460 } else if (smp_num_siblings > 1 ) {
458 index_msb = 31;
459 461
460 if (smp_num_siblings > NR_CPUS) { 462 if (smp_num_siblings > NR_CPUS) {
461 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 463 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
462 smp_num_siblings = 1; 464 smp_num_siblings = 1;
463 return; 465 return;
464 } 466 }
465 tmp = smp_num_siblings; 467
466 while ((tmp & 0x80000000 ) == 0) { 468 index_msb = get_count_order(smp_num_siblings);
467 tmp <<=1 ;
468 index_msb--;
469 }
470 if (smp_num_siblings & (smp_num_siblings - 1))
471 index_msb++;
472 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 469 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
473 470
474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 471 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
475 phys_proc_id[cpu]); 472 phys_proc_id[cpu]);
476 473
477 smp_num_siblings = smp_num_siblings / c->x86_num_cores; 474 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
478 475
479 tmp = smp_num_siblings; 476 index_msb = get_count_order(smp_num_siblings) ;
480 index_msb = 31;
481 while ((tmp & 0x80000000) == 0) {
482 tmp <<=1 ;
483 index_msb--;
484 }
485 477
486 if (smp_num_siblings & (smp_num_siblings - 1)) 478 core_bits = get_count_order(c->x86_max_cores);
487 index_msb++;
488 479
489 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 480 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
481 ((1 << core_bits) - 1);
490 482
491 if (c->x86_num_cores > 1) 483 if (c->x86_max_cores > 1)
492 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 484 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
493 cpu_core_id[cpu]); 485 cpu_core_id[cpu]);
494 } 486 }
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index c28d26fb5f24..5e2da704f0fa 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -158,7 +158,7 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
158 if ( p ) 158 if ( p )
159 strcpy(c->x86_model_id, p); 159 strcpy(c->x86_model_id, p);
160 160
161 c->x86_num_cores = num_cpu_cores(c); 161 c->x86_max_cores = num_cpu_cores(c);
162 162
163 detect_ht(c); 163 detect_ht(c);
164 164
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 4dc42a189ae5..fbfd374aa336 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
293#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
295{ 295{
296 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf, *sibling_leaf;
297 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
298#ifdef CONFIG_X86_HT 298 int index_msb, i;
299 struct cpuinfo_x86 *c = cpu_data + cpu; 299 struct cpuinfo_x86 *c = cpu_data;
300#endif
301 300
302 this_leaf = CPUID4_INFO_IDX(cpu, index); 301 this_leaf = CPUID4_INFO_IDX(cpu, index);
303 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 302 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
304 303
305 if (num_threads_sharing == 1) 304 if (num_threads_sharing == 1)
306 cpu_set(cpu, this_leaf->shared_cpu_map); 305 cpu_set(cpu, this_leaf->shared_cpu_map);
307#ifdef CONFIG_X86_HT 306 else {
308 else if (num_threads_sharing == smp_num_siblings) 307 index_msb = get_count_order(num_threads_sharing);
309 this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; 308
310 else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) 309 for_each_online_cpu(i) {
311 this_leaf->shared_cpu_map = cpu_core_map[cpu]; 310 if (c[i].apicid >> index_msb ==
312 else 311 c[cpu].apicid >> index_msb) {
313 printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " 312 cpu_set(i, this_leaf->shared_cpu_map);
314 "any known set of CPUs\n"); 313 if (i != cpu && cpuid4_info[i]) {
315#endif 314 sibling_leaf = CPUID4_INFO_IDX(i, index);
315 cpu_set(cpu, sibling_leaf->shared_cpu_map);
316 }
317 }
318 }
319 }
320}
321static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
322{
323 struct _cpuid4_info *this_leaf, *sibling_leaf;
324 int sibling;
325
326 this_leaf = CPUID4_INFO_IDX(cpu, index);
327 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
328 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
329 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
330 }
316} 331}
317#else 332#else
318static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 333static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
334static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
319#endif 335#endif
320 336
321static void free_cache_attributes(unsigned int cpu) 337static void free_cache_attributes(unsigned int cpu)
@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
574 unsigned int cpu = sys_dev->id; 590 unsigned int cpu = sys_dev->id;
575 unsigned long i; 591 unsigned long i;
576 592
577 for (i = 0; i < num_cache_leaves; i++) 593 for (i = 0; i < num_cache_leaves; i++) {
594 cache_remove_shared_cpu_map(cpu, i);
578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 595 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
596 }
579 kobject_unregister(cache_kobject[cpu]); 597 kobject_unregister(cache_kobject[cpu]);
580 cpuid4_cache_sysfs_exit(cpu); 598 cpuid4_cache_sysfs_exit(cpu);
581 return; 599 return;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index dd4ebd6af7e4..1e9db198c440 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -626,6 +626,14 @@ void __init mtrr_bp_init(void)
626 if (cpuid_eax(0x80000000) >= 0x80000008) { 626 if (cpuid_eax(0x80000000) >= 0x80000008) {
627 u32 phys_addr; 627 u32 phys_addr;
628 phys_addr = cpuid_eax(0x80000008) & 0xff; 628 phys_addr = cpuid_eax(0x80000008) & 0xff;
629 /* CPUID workaround for Intel 0F33/0F34 CPU */
630 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
631 boot_cpu_data.x86 == 0xF &&
632 boot_cpu_data.x86_model == 0x3 &&
633 (boot_cpu_data.x86_mask == 0x3 ||
634 boot_cpu_data.x86_mask == 0x4))
635 phys_addr = 36;
636
629 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); 637 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
630 size_and_mask = ~size_or_mask & 0xfff00000; 638 size_and_mask = ~size_or_mask & 0xfff00000;
631 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 639 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 41b871ecf4b3..e7921315ae9d 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -94,12 +94,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
94 if (c->x86_cache_size >= 0) 94 if (c->x86_cache_size >= 0)
95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
96#ifdef CONFIG_X86_HT 96#ifdef CONFIG_X86_HT
97 if (c->x86_num_cores * smp_num_siblings > 1) { 97 if (c->x86_max_cores * smp_num_siblings > 1) {
98 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 98 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
99 seq_printf(m, "siblings\t: %d\n", 99 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
100 c->x86_num_cores * smp_num_siblings);
101 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 100 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
102 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); 101 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
103 } 102 }
104#endif 103#endif
105 104