diff options
author | Mike Travis <travis@sgi.com> | 2007-10-16 04:24:04 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@woody.linux-foundation.org> | 2007-10-16 12:42:50 -0400 |
commit | 083576112940fda783d716fd5ccc744f81667b2f (patch) | |
tree | 226139e7cb9863c91d1e2a1ac0babb0db94f3d11 /arch/x86/kernel/mce_amd_64.c | |
parent | cc84634f29d5a92932400a2d52ca17dee2c8a462 (diff) |
x86: Convert cpu_core_map to be a per cpu variable
This is from an earlier message from 'Christoph Lameter':
cpu_core_map is currently an array defined using NR_CPUS. This means that
we overallocate since we will rarely really use maximum configured cpu.
If we put the cpu_core_map into the per cpu area then it will be allocated
for each processor as it comes online.
This means that the core map cannot be accessed until the per cpu area
has been allocated. Xen does a weird thing here looping over all processors
and zeroing the masks that are not yet allocated and that will be zeroed
when they are allocated. I commented the code out.
Signed-off-by: Christoph Lameter <clameter@sgi.com>
Signed-off-by: Mike Travis <travis@sgi.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: "Siddha, Suresh B" <suresh.b.siddha@intel.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'arch/x86/kernel/mce_amd_64.c')
-rw-r--r-- | arch/x86/kernel/mce_amd_64.c | 6 |
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/x86/kernel/mce_amd_64.c b/arch/x86/kernel/mce_amd_64.c index 2f8a7f18b0fe..805b62b1e0df 100644 --- a/arch/x86/kernel/mce_amd_64.c +++ b/arch/x86/kernel/mce_amd_64.c | |||
@@ -472,7 +472,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
472 | 472 | ||
473 | #ifdef CONFIG_SMP | 473 | #ifdef CONFIG_SMP |
474 | if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ | 474 | if (cpu_data[cpu].cpu_core_id && shared_bank[bank]) { /* symlink */ |
475 | i = first_cpu(cpu_core_map[cpu]); | 475 | i = first_cpu(per_cpu(cpu_core_map, cpu)); |
476 | 476 | ||
477 | /* first core not up yet */ | 477 | /* first core not up yet */ |
478 | if (cpu_data[i].cpu_core_id) | 478 | if (cpu_data[i].cpu_core_id) |
@@ -492,7 +492,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
492 | if (err) | 492 | if (err) |
493 | goto out; | 493 | goto out; |
494 | 494 | ||
495 | b->cpus = cpu_core_map[cpu]; | 495 | b->cpus = per_cpu(cpu_core_map, cpu); |
496 | per_cpu(threshold_banks, cpu)[bank] = b; | 496 | per_cpu(threshold_banks, cpu)[bank] = b; |
497 | goto out; | 497 | goto out; |
498 | } | 498 | } |
@@ -509,7 +509,7 @@ static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | |||
509 | #ifndef CONFIG_SMP | 509 | #ifndef CONFIG_SMP |
510 | b->cpus = CPU_MASK_ALL; | 510 | b->cpus = CPU_MASK_ALL; |
511 | #else | 511 | #else |
512 | b->cpus = cpu_core_map[cpu]; | 512 | b->cpus = per_cpu(cpu_core_map, cpu); |
513 | #endif | 513 | #endif |
514 | err = kobject_register(&b->kobj); | 514 | err = kobject_register(&b->kobj); |
515 | if (err) | 515 | if (err) |