aboutsummaryrefslogtreecommitdiffstats
path: root/arch/i386
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
committerLinus Torvalds <torvalds@g5.osdl.org>2005-11-14 22:56:02 -0500
commit4060994c3e337b40e0f6fa8ce2cc178e021baf3d (patch)
tree980297c1747ca89354bc879cc5d17903eacb19e2 /arch/i386
parent0174f72f848dfe7dc7488799776303c81b181b16 (diff)
parentd3ee871e63d0a0c70413dc0aa5534b8d6cd6ec37 (diff)
Merge x86-64 update from Andi
Diffstat (limited to 'arch/i386')
-rw-r--r--arch/i386/kernel/acpi/boot.c17
-rw-r--r--arch/i386/kernel/cpu/amd.c12
-rw-r--r--arch/i386/kernel/cpu/common.c40
-rw-r--r--arch/i386/kernel/cpu/intel.c2
-rw-r--r--arch/i386/kernel/cpu/intel_cacheinfo.c46
-rw-r--r--arch/i386/kernel/cpu/mtrr/main.c8
-rw-r--r--arch/i386/kernel/cpu/proc.c7
-rw-r--r--arch/i386/kernel/smpboot.c73
-rw-r--r--arch/i386/kernel/srat.c4
9 files changed, 129 insertions, 80 deletions
diff --git a/arch/i386/kernel/acpi/boot.c b/arch/i386/kernel/acpi/boot.c
index b66c13c0cc0f..f36677241ecd 100644
--- a/arch/i386/kernel/acpi/boot.c
+++ b/arch/i386/kernel/acpi/boot.c
@@ -39,17 +39,14 @@
39 39
40#ifdef CONFIG_X86_64 40#ifdef CONFIG_X86_64
41 41
42static inline void acpi_madt_oem_check(char *oem_id, char *oem_table_id)
43{
44}
45extern void __init clustered_apic_check(void); 42extern void __init clustered_apic_check(void);
46static inline int ioapic_setup_disabled(void)
47{
48 return 0;
49}
50 43
44extern int gsi_irq_sharing(int gsi);
51#include <asm/proto.h> 45#include <asm/proto.h>
52 46
47static inline int acpi_madt_oem_check(char *oem_id, char *oem_table_id) { return 0; }
48
49
53#else /* X86 */ 50#else /* X86 */
54 51
55#ifdef CONFIG_X86_LOCAL_APIC 52#ifdef CONFIG_X86_LOCAL_APIC
@@ -57,6 +54,8 @@ static inline int ioapic_setup_disabled(void)
57#include <mach_mpparse.h> 54#include <mach_mpparse.h>
58#endif /* CONFIG_X86_LOCAL_APIC */ 55#endif /* CONFIG_X86_LOCAL_APIC */
59 56
57static inline int gsi_irq_sharing(int gsi) { return gsi; }
58
60#endif /* X86 */ 59#endif /* X86 */
61 60
62#define BAD_MADT_ENTRY(entry, end) ( \ 61#define BAD_MADT_ENTRY(entry, end) ( \
@@ -459,7 +458,7 @@ int acpi_gsi_to_irq(u32 gsi, unsigned int *irq)
459 *irq = IO_APIC_VECTOR(gsi); 458 *irq = IO_APIC_VECTOR(gsi);
460 else 459 else
461#endif 460#endif
462 *irq = gsi; 461 *irq = gsi_irq_sharing(gsi);
463 return 0; 462 return 0;
464} 463}
465 464
@@ -543,7 +542,7 @@ acpi_scan_rsdp(unsigned long start, unsigned long length)
543 * RSDP signature. 542 * RSDP signature.
544 */ 543 */
545 for (offset = 0; offset < length; offset += 16) { 544 for (offset = 0; offset < length; offset += 16) {
546 if (strncmp((char *)(start + offset), "RSD PTR ", sig_len)) 545 if (strncmp((char *)(phys_to_virt(start) + offset), "RSD PTR ", sig_len))
547 continue; 546 continue;
548 return (start + offset); 547 return (start + offset);
549 } 548 }
diff --git a/arch/i386/kernel/cpu/amd.c b/arch/i386/kernel/cpu/amd.c
index 53a1681cd964..e344ef88cfcd 100644
--- a/arch/i386/kernel/cpu/amd.c
+++ b/arch/i386/kernel/cpu/amd.c
@@ -206,9 +206,9 @@ static void __init init_amd(struct cpuinfo_x86 *c)
206 display_cacheinfo(c); 206 display_cacheinfo(c);
207 207
208 if (cpuid_eax(0x80000000) >= 0x80000008) { 208 if (cpuid_eax(0x80000000) >= 0x80000008) {
209 c->x86_num_cores = (cpuid_ecx(0x80000008) & 0xff) + 1; 209 c->x86_max_cores = (cpuid_ecx(0x80000008) & 0xff) + 1;
210 if (c->x86_num_cores & (c->x86_num_cores - 1)) 210 if (c->x86_max_cores & (c->x86_max_cores - 1))
211 c->x86_num_cores = 1; 211 c->x86_max_cores = 1;
212 } 212 }
213 213
214#ifdef CONFIG_X86_HT 214#ifdef CONFIG_X86_HT
@@ -217,15 +217,15 @@ static void __init init_amd(struct cpuinfo_x86 *c)
217 * distingush the cores. Assumes number of cores is a power 217 * distingush the cores. Assumes number of cores is a power
218 * of two. 218 * of two.
219 */ 219 */
220 if (c->x86_num_cores > 1) { 220 if (c->x86_max_cores > 1) {
221 int cpu = smp_processor_id(); 221 int cpu = smp_processor_id();
222 unsigned bits = 0; 222 unsigned bits = 0;
223 while ((1 << bits) < c->x86_num_cores) 223 while ((1 << bits) < c->x86_max_cores)
224 bits++; 224 bits++;
225 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1); 225 cpu_core_id[cpu] = phys_proc_id[cpu] & ((1<<bits)-1);
226 phys_proc_id[cpu] >>= bits; 226 phys_proc_id[cpu] >>= bits;
227 printk(KERN_INFO "CPU %d(%d) -> Core %d\n", 227 printk(KERN_INFO "CPU %d(%d) -> Core %d\n",
228 cpu, c->x86_num_cores, cpu_core_id[cpu]); 228 cpu, c->x86_max_cores, cpu_core_id[cpu]);
229 } 229 }
230#endif 230#endif
231} 231}
diff --git a/arch/i386/kernel/cpu/common.c b/arch/i386/kernel/cpu/common.c
index c145fb30002e..31e344b26bae 100644
--- a/arch/i386/kernel/cpu/common.c
+++ b/arch/i386/kernel/cpu/common.c
@@ -231,10 +231,10 @@ static void __init early_cpu_detect(void)
231 cpuid(0x00000001, &tfms, &misc, &junk, &cap0); 231 cpuid(0x00000001, &tfms, &misc, &junk, &cap0);
232 c->x86 = (tfms >> 8) & 15; 232 c->x86 = (tfms >> 8) & 15;
233 c->x86_model = (tfms >> 4) & 15; 233 c->x86_model = (tfms >> 4) & 15;
234 if (c->x86 == 0xf) { 234 if (c->x86 == 0xf)
235 c->x86 += (tfms >> 20) & 0xff; 235 c->x86 += (tfms >> 20) & 0xff;
236 if (c->x86 >= 0x6)
236 c->x86_model += ((tfms >> 16) & 0xF) << 4; 237 c->x86_model += ((tfms >> 16) & 0xF) << 4;
237 }
238 c->x86_mask = tfms & 15; 238 c->x86_mask = tfms & 15;
239 if (cap0 & (1<<19)) 239 if (cap0 & (1<<19))
240 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8; 240 c->x86_cache_alignment = ((misc >> 8) & 0xff) * 8;
@@ -333,7 +333,7 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
333 c->x86_model = c->x86_mask = 0; /* So far unknown... */ 333 c->x86_model = c->x86_mask = 0; /* So far unknown... */
334 c->x86_vendor_id[0] = '\0'; /* Unset */ 334 c->x86_vendor_id[0] = '\0'; /* Unset */
335 c->x86_model_id[0] = '\0'; /* Unset */ 335 c->x86_model_id[0] = '\0'; /* Unset */
336 c->x86_num_cores = 1; 336 c->x86_max_cores = 1;
337 memset(&c->x86_capability, 0, sizeof c->x86_capability); 337 memset(&c->x86_capability, 0, sizeof c->x86_capability);
338 338
339 if (!have_cpuid_p()) { 339 if (!have_cpuid_p()) {
@@ -443,52 +443,44 @@ void __devinit identify_cpu(struct cpuinfo_x86 *c)
443void __devinit detect_ht(struct cpuinfo_x86 *c) 443void __devinit detect_ht(struct cpuinfo_x86 *c)
444{ 444{
445 u32 eax, ebx, ecx, edx; 445 u32 eax, ebx, ecx, edx;
446 int index_msb, tmp; 446 int index_msb, core_bits;
447 int cpu = smp_processor_id(); 447 int cpu = smp_processor_id();
448 448
449 cpuid(1, &eax, &ebx, &ecx, &edx);
450
451 c->apicid = phys_pkg_id((ebx >> 24) & 0xFF, 0);
452
449 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY)) 453 if (!cpu_has(c, X86_FEATURE_HT) || cpu_has(c, X86_FEATURE_CMP_LEGACY))
450 return; 454 return;
451 455
452 cpuid(1, &eax, &ebx, &ecx, &edx);
453 smp_num_siblings = (ebx & 0xff0000) >> 16; 456 smp_num_siblings = (ebx & 0xff0000) >> 16;
454 457
455 if (smp_num_siblings == 1) { 458 if (smp_num_siblings == 1) {
456 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n"); 459 printk(KERN_INFO "CPU: Hyper-Threading is disabled\n");
457 } else if (smp_num_siblings > 1 ) { 460 } else if (smp_num_siblings > 1 ) {
458 index_msb = 31;
459 461
460 if (smp_num_siblings > NR_CPUS) { 462 if (smp_num_siblings > NR_CPUS) {
461 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings); 463 printk(KERN_WARNING "CPU: Unsupported number of the siblings %d", smp_num_siblings);
462 smp_num_siblings = 1; 464 smp_num_siblings = 1;
463 return; 465 return;
464 } 466 }
465 tmp = smp_num_siblings; 467
466 while ((tmp & 0x80000000 ) == 0) { 468 index_msb = get_count_order(smp_num_siblings);
467 tmp <<=1 ;
468 index_msb--;
469 }
470 if (smp_num_siblings & (smp_num_siblings - 1))
471 index_msb++;
472 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 469 phys_proc_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb);
473 470
474 printk(KERN_INFO "CPU: Physical Processor ID: %d\n", 471 printk(KERN_INFO "CPU: Physical Processor ID: %d\n",
475 phys_proc_id[cpu]); 472 phys_proc_id[cpu]);
476 473
477 smp_num_siblings = smp_num_siblings / c->x86_num_cores; 474 smp_num_siblings = smp_num_siblings / c->x86_max_cores;
478 475
479 tmp = smp_num_siblings; 476 index_msb = get_count_order(smp_num_siblings) ;
480 index_msb = 31;
481 while ((tmp & 0x80000000) == 0) {
482 tmp <<=1 ;
483 index_msb--;
484 }
485 477
486 if (smp_num_siblings & (smp_num_siblings - 1)) 478 core_bits = get_count_order(c->x86_max_cores);
487 index_msb++;
488 479
489 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb); 480 cpu_core_id[cpu] = phys_pkg_id((ebx >> 24) & 0xFF, index_msb) &
481 ((1 << core_bits) - 1);
490 482
491 if (c->x86_num_cores > 1) 483 if (c->x86_max_cores > 1)
492 printk(KERN_INFO "CPU: Processor Core ID: %d\n", 484 printk(KERN_INFO "CPU: Processor Core ID: %d\n",
493 cpu_core_id[cpu]); 485 cpu_core_id[cpu]);
494 } 486 }
diff --git a/arch/i386/kernel/cpu/intel.c b/arch/i386/kernel/cpu/intel.c
index c28d26fb5f24..5e2da704f0fa 100644
--- a/arch/i386/kernel/cpu/intel.c
+++ b/arch/i386/kernel/cpu/intel.c
@@ -158,7 +158,7 @@ static void __devinit init_intel(struct cpuinfo_x86 *c)
158 if ( p ) 158 if ( p )
159 strcpy(c->x86_model_id, p); 159 strcpy(c->x86_model_id, p);
160 160
161 c->x86_num_cores = num_cpu_cores(c); 161 c->x86_max_cores = num_cpu_cores(c);
162 162
163 detect_ht(c); 163 detect_ht(c);
164 164
diff --git a/arch/i386/kernel/cpu/intel_cacheinfo.c b/arch/i386/kernel/cpu/intel_cacheinfo.c
index 4dc42a189ae5..fbfd374aa336 100644
--- a/arch/i386/kernel/cpu/intel_cacheinfo.c
+++ b/arch/i386/kernel/cpu/intel_cacheinfo.c
@@ -293,29 +293,45 @@ static struct _cpuid4_info *cpuid4_info[NR_CPUS];
293#ifdef CONFIG_SMP 293#ifdef CONFIG_SMP
294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) 294static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index)
295{ 295{
296 struct _cpuid4_info *this_leaf; 296 struct _cpuid4_info *this_leaf, *sibling_leaf;
297 unsigned long num_threads_sharing; 297 unsigned long num_threads_sharing;
298#ifdef CONFIG_X86_HT 298 int index_msb, i;
299 struct cpuinfo_x86 *c = cpu_data + cpu; 299 struct cpuinfo_x86 *c = cpu_data;
300#endif
301 300
302 this_leaf = CPUID4_INFO_IDX(cpu, index); 301 this_leaf = CPUID4_INFO_IDX(cpu, index);
303 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing; 302 num_threads_sharing = 1 + this_leaf->eax.split.num_threads_sharing;
304 303
305 if (num_threads_sharing == 1) 304 if (num_threads_sharing == 1)
306 cpu_set(cpu, this_leaf->shared_cpu_map); 305 cpu_set(cpu, this_leaf->shared_cpu_map);
307#ifdef CONFIG_X86_HT 306 else {
308 else if (num_threads_sharing == smp_num_siblings) 307 index_msb = get_count_order(num_threads_sharing);
309 this_leaf->shared_cpu_map = cpu_sibling_map[cpu]; 308
310 else if (num_threads_sharing == (c->x86_num_cores * smp_num_siblings)) 309 for_each_online_cpu(i) {
311 this_leaf->shared_cpu_map = cpu_core_map[cpu]; 310 if (c[i].apicid >> index_msb ==
312 else 311 c[cpu].apicid >> index_msb) {
313 printk(KERN_DEBUG "Number of CPUs sharing cache didn't match " 312 cpu_set(i, this_leaf->shared_cpu_map);
314 "any known set of CPUs\n"); 313 if (i != cpu && cpuid4_info[i]) {
315#endif 314 sibling_leaf = CPUID4_INFO_IDX(i, index);
315 cpu_set(cpu, sibling_leaf->shared_cpu_map);
316 }
317 }
318 }
319 }
320}
321static void __devinit cache_remove_shared_cpu_map(unsigned int cpu, int index)
322{
323 struct _cpuid4_info *this_leaf, *sibling_leaf;
324 int sibling;
325
326 this_leaf = CPUID4_INFO_IDX(cpu, index);
327 for_each_cpu_mask(sibling, this_leaf->shared_cpu_map) {
328 sibling_leaf = CPUID4_INFO_IDX(sibling, index);
329 cpu_clear(cpu, sibling_leaf->shared_cpu_map);
330 }
316} 331}
317#else 332#else
318static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {} 333static void __init cache_shared_cpu_map_setup(unsigned int cpu, int index) {}
334static void __init cache_remove_shared_cpu_map(unsigned int cpu, int index) {}
319#endif 335#endif
320 336
321static void free_cache_attributes(unsigned int cpu) 337static void free_cache_attributes(unsigned int cpu)
@@ -574,8 +590,10 @@ static void __cpuexit cache_remove_dev(struct sys_device * sys_dev)
574 unsigned int cpu = sys_dev->id; 590 unsigned int cpu = sys_dev->id;
575 unsigned long i; 591 unsigned long i;
576 592
577 for (i = 0; i < num_cache_leaves; i++) 593 for (i = 0; i < num_cache_leaves; i++) {
594 cache_remove_shared_cpu_map(cpu, i);
578 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj)); 595 kobject_unregister(&(INDEX_KOBJECT_PTR(cpu,i)->kobj));
596 }
579 kobject_unregister(cache_kobject[cpu]); 597 kobject_unregister(cache_kobject[cpu]);
580 cpuid4_cache_sysfs_exit(cpu); 598 cpuid4_cache_sysfs_exit(cpu);
581 return; 599 return;
diff --git a/arch/i386/kernel/cpu/mtrr/main.c b/arch/i386/kernel/cpu/mtrr/main.c
index dd4ebd6af7e4..1e9db198c440 100644
--- a/arch/i386/kernel/cpu/mtrr/main.c
+++ b/arch/i386/kernel/cpu/mtrr/main.c
@@ -626,6 +626,14 @@ void __init mtrr_bp_init(void)
626 if (cpuid_eax(0x80000000) >= 0x80000008) { 626 if (cpuid_eax(0x80000000) >= 0x80000008) {
627 u32 phys_addr; 627 u32 phys_addr;
628 phys_addr = cpuid_eax(0x80000008) & 0xff; 628 phys_addr = cpuid_eax(0x80000008) & 0xff;
629 /* CPUID workaround for Intel 0F33/0F34 CPU */
630 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL &&
631 boot_cpu_data.x86 == 0xF &&
632 boot_cpu_data.x86_model == 0x3 &&
633 (boot_cpu_data.x86_mask == 0x3 ||
634 boot_cpu_data.x86_mask == 0x4))
635 phys_addr = 36;
636
629 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1); 637 size_or_mask = ~((1 << (phys_addr - PAGE_SHIFT)) - 1);
630 size_and_mask = ~size_or_mask & 0xfff00000; 638 size_and_mask = ~size_or_mask & 0xfff00000;
631 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && 639 } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
diff --git a/arch/i386/kernel/cpu/proc.c b/arch/i386/kernel/cpu/proc.c
index 41b871ecf4b3..e7921315ae9d 100644
--- a/arch/i386/kernel/cpu/proc.c
+++ b/arch/i386/kernel/cpu/proc.c
@@ -94,12 +94,11 @@ static int show_cpuinfo(struct seq_file *m, void *v)
94 if (c->x86_cache_size >= 0) 94 if (c->x86_cache_size >= 0)
95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size); 95 seq_printf(m, "cache size\t: %d KB\n", c->x86_cache_size);
96#ifdef CONFIG_X86_HT 96#ifdef CONFIG_X86_HT
97 if (c->x86_num_cores * smp_num_siblings > 1) { 97 if (c->x86_max_cores * smp_num_siblings > 1) {
98 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]); 98 seq_printf(m, "physical id\t: %d\n", phys_proc_id[n]);
99 seq_printf(m, "siblings\t: %d\n", 99 seq_printf(m, "siblings\t: %d\n", cpus_weight(cpu_core_map[n]));
100 c->x86_num_cores * smp_num_siblings);
101 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]); 100 seq_printf(m, "core id\t\t: %d\n", cpu_core_id[n]);
102 seq_printf(m, "cpu cores\t: %d\n", c->x86_num_cores); 101 seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
103 } 102 }
104#endif 103#endif
105 104
diff --git a/arch/i386/kernel/smpboot.c b/arch/i386/kernel/smpboot.c
index bc5a9d97466b..d16520da4550 100644
--- a/arch/i386/kernel/smpboot.c
+++ b/arch/i386/kernel/smpboot.c
@@ -72,9 +72,11 @@ int phys_proc_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
72/* Core ID of each logical CPU */ 72/* Core ID of each logical CPU */
73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID}; 73int cpu_core_id[NR_CPUS] __read_mostly = {[0 ... NR_CPUS-1] = BAD_APICID};
74 74
75/* representing HT siblings of each logical CPU */
75cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; 76cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
76EXPORT_SYMBOL(cpu_sibling_map); 77EXPORT_SYMBOL(cpu_sibling_map);
77 78
79/* representing HT and core siblings of each logical CPU */
78cpumask_t cpu_core_map[NR_CPUS] __read_mostly; 80cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
79EXPORT_SYMBOL(cpu_core_map); 81EXPORT_SYMBOL(cpu_core_map);
80 82
@@ -442,35 +444,60 @@ static void __devinit smp_callin(void)
442 444
443static int cpucount; 445static int cpucount;
444 446
447/* representing cpus for which sibling maps can be computed */
448static cpumask_t cpu_sibling_setup_map;
449
445static inline void 450static inline void
446set_cpu_sibling_map(int cpu) 451set_cpu_sibling_map(int cpu)
447{ 452{
448 int i; 453 int i;
454 struct cpuinfo_x86 *c = cpu_data;
455
456 cpu_set(cpu, cpu_sibling_setup_map);
449 457
450 if (smp_num_siblings > 1) { 458 if (smp_num_siblings > 1) {
451 for (i = 0; i < NR_CPUS; i++) { 459 for_each_cpu_mask(i, cpu_sibling_setup_map) {
452 if (!cpu_isset(i, cpu_callout_map)) 460 if (phys_proc_id[cpu] == phys_proc_id[i] &&
453 continue; 461 cpu_core_id[cpu] == cpu_core_id[i]) {
454 if (cpu_core_id[cpu] == cpu_core_id[i]) {
455 cpu_set(i, cpu_sibling_map[cpu]); 462 cpu_set(i, cpu_sibling_map[cpu]);
456 cpu_set(cpu, cpu_sibling_map[i]); 463 cpu_set(cpu, cpu_sibling_map[i]);
464 cpu_set(i, cpu_core_map[cpu]);
465 cpu_set(cpu, cpu_core_map[i]);
457 } 466 }
458 } 467 }
459 } else { 468 } else {
460 cpu_set(cpu, cpu_sibling_map[cpu]); 469 cpu_set(cpu, cpu_sibling_map[cpu]);
461 } 470 }
462 471
463 if (current_cpu_data.x86_num_cores > 1) { 472 if (current_cpu_data.x86_max_cores == 1) {
464 for (i = 0; i < NR_CPUS; i++) {
465 if (!cpu_isset(i, cpu_callout_map))
466 continue;
467 if (phys_proc_id[cpu] == phys_proc_id[i]) {
468 cpu_set(i, cpu_core_map[cpu]);
469 cpu_set(cpu, cpu_core_map[i]);
470 }
471 }
472 } else {
473 cpu_core_map[cpu] = cpu_sibling_map[cpu]; 473 cpu_core_map[cpu] = cpu_sibling_map[cpu];
474 c[cpu].booted_cores = 1;
475 return;
476 }
477
478 for_each_cpu_mask(i, cpu_sibling_setup_map) {
479 if (phys_proc_id[cpu] == phys_proc_id[i]) {
480 cpu_set(i, cpu_core_map[cpu]);
481 cpu_set(cpu, cpu_core_map[i]);
482 /*
483 * Does this new cpu bringup a new core?
484 */
485 if (cpus_weight(cpu_sibling_map[cpu]) == 1) {
486 /*
487 * for each core in package, increment
488 * the booted_cores for this new cpu
489 */
490 if (first_cpu(cpu_sibling_map[i]) == i)
491 c[cpu].booted_cores++;
492 /*
493 * increment the core count for all
494 * the other cpus in this package
495 */
496 if (i != cpu)
497 c[i].booted_cores++;
498 } else if (i != cpu && !c[cpu].booted_cores)
499 c[cpu].booted_cores = c[i].booted_cores;
500 }
474 } 501 }
475} 502}
476 503
@@ -1095,11 +1122,8 @@ static void __init smp_boot_cpus(unsigned int max_cpus)
1095 1122
1096 current_thread_info()->cpu = 0; 1123 current_thread_info()->cpu = 0;
1097 smp_tune_scheduling(); 1124 smp_tune_scheduling();
1098 cpus_clear(cpu_sibling_map[0]);
1099 cpu_set(0, cpu_sibling_map[0]);
1100 1125
1101 cpus_clear(cpu_core_map[0]); 1126 set_cpu_sibling_map(0);
1102 cpu_set(0, cpu_core_map[0]);
1103 1127
1104 /* 1128 /*
1105 * If we couldn't find an SMP configuration at boot time, 1129 * If we couldn't find an SMP configuration at boot time,
@@ -1278,15 +1302,24 @@ static void
1278remove_siblinginfo(int cpu) 1302remove_siblinginfo(int cpu)
1279{ 1303{
1280 int sibling; 1304 int sibling;
1305 struct cpuinfo_x86 *c = cpu_data;
1281 1306
1307 for_each_cpu_mask(sibling, cpu_core_map[cpu]) {
1308 cpu_clear(cpu, cpu_core_map[sibling]);
1309 /*
1310 * last thread sibling in this cpu core going down
1311 */
1312 if (cpus_weight(cpu_sibling_map[cpu]) == 1)
1313 c[sibling].booted_cores--;
1314 }
1315
1282 for_each_cpu_mask(sibling, cpu_sibling_map[cpu]) 1316 for_each_cpu_mask(sibling, cpu_sibling_map[cpu])
1283 cpu_clear(cpu, cpu_sibling_map[sibling]); 1317 cpu_clear(cpu, cpu_sibling_map[sibling]);
1284 for_each_cpu_mask(sibling, cpu_core_map[cpu])
1285 cpu_clear(cpu, cpu_core_map[sibling]);
1286 cpus_clear(cpu_sibling_map[cpu]); 1318 cpus_clear(cpu_sibling_map[cpu]);
1287 cpus_clear(cpu_core_map[cpu]); 1319 cpus_clear(cpu_core_map[cpu]);
1288 phys_proc_id[cpu] = BAD_APICID; 1320 phys_proc_id[cpu] = BAD_APICID;
1289 cpu_core_id[cpu] = BAD_APICID; 1321 cpu_core_id[cpu] = BAD_APICID;
1322 cpu_clear(cpu, cpu_sibling_setup_map);
1290} 1323}
1291 1324
1292int __cpu_disable(void) 1325int __cpu_disable(void)
diff --git a/arch/i386/kernel/srat.c b/arch/i386/kernel/srat.c
index 8de658db8146..52b3ed5d2cb5 100644
--- a/arch/i386/kernel/srat.c
+++ b/arch/i386/kernel/srat.c
@@ -137,8 +137,8 @@ static void __init parse_memory_affinity_structure (char *sratp)
137 "enabled and removable" : "enabled" ) ); 137 "enabled and removable" : "enabled" ) );
138} 138}
139 139
140#if MAX_NR_ZONES != 3 140#if MAX_NR_ZONES != 4
141#error "MAX_NR_ZONES != 3, chunk_to_zone requires review" 141#error "MAX_NR_ZONES != 4, chunk_to_zone requires review"
142#endif 142#endif
143/* Take a chunk of pages from page frame cstart to cend and count the number 143/* Take a chunk of pages from page frame cstart to cend and count the number
144 * of pages in each zone, returned via zones[]. 144 * of pages in each zone, returned via zones[].