aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86_64/kernel/mce.c
diff options
context:
space:
mode:
authorAshok Raj <ashok.raj@intel.com>2005-06-25 17:54:58 -0400
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-06-25 19:24:30 -0400
commite6982c671c560da4a0bc5c908cbcbec12bd5991d (patch)
tree38e317c28afbde68d431ae139e9e943e5a818b06 /arch/x86_64/kernel/mce.c
parent52a119feaad92d44a0e97d01b22afbcbaf3fc079 (diff)
[PATCH] x86_64: Change init sections for CPU hotplug support
This patch adds __cpuinit and __cpuinitdata sections that need to exist past boot to support cpu hotplug. Caveat: This is done *only* for EM64T CPU Hotplug support, on request from Andi Kleen. Much of the generic hotplug code in kernel, and none of the other archs that support CPU hotplug today, i386, ia64, ppc64, s390 and parisc dont mark sections with __cpuinit, but only mark them as __devinit, and __devinitdata. If someone is motivated to change generic code, we need to make sure all existing hotplug code does not break, on other arch's that dont use __cpuinit, and __cpudevinit. Signed-off-by: Ashok Raj <ashok.raj@intel.com> Acked-by: Andi Kleen <ak@muc.de> Acked-by: Zwane Mwaikambo <zwane@arm.linux.org.uk> Signed-off-by: Andrew Morton <akpm@osdl.org> Signed-off-by: Linus Torvalds <torvalds@osdl.org>
Diffstat (limited to 'arch/x86_64/kernel/mce.c')
-rw-r--r--arch/x86_64/kernel/mce.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/arch/x86_64/kernel/mce.c b/arch/x86_64/kernel/mce.c
index 3a89d735a4f6..7ab15c8ab95f 100644
--- a/arch/x86_64/kernel/mce.c
+++ b/arch/x86_64/kernel/mce.c
@@ -327,7 +327,7 @@ static void mce_init(void *dummy)
327} 327}
328 328
329/* Add per CPU specific workarounds here */ 329/* Add per CPU specific workarounds here */
330static void __init mce_cpu_quirks(struct cpuinfo_x86 *c) 330static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c)
331{ 331{
332 /* This should be disabled by the BIOS, but isn't always */ 332 /* This should be disabled by the BIOS, but isn't always */
333 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { 333 if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) {
@@ -337,7 +337,7 @@ static void __init mce_cpu_quirks(struct cpuinfo_x86 *c)
337 } 337 }
338} 338}
339 339
340static void __init mce_cpu_features(struct cpuinfo_x86 *c) 340static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c)
341{ 341{
342 switch (c->x86_vendor) { 342 switch (c->x86_vendor) {
343 case X86_VENDOR_INTEL: 343 case X86_VENDOR_INTEL:
@@ -352,7 +352,7 @@ static void __init mce_cpu_features(struct cpuinfo_x86 *c)
352 * Called for each booted CPU to set up machine checks. 352 * Called for each booted CPU to set up machine checks.
353 * Must be called with preempt off. 353 * Must be called with preempt off.
354 */ 354 */
355void __init mcheck_init(struct cpuinfo_x86 *c) 355void __cpuinit mcheck_init(struct cpuinfo_x86 *c)
356{ 356{
357 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE; 357 static cpumask_t mce_cpus __initdata = CPU_MASK_NONE;
358 358
@@ -542,7 +542,7 @@ ACCESSOR(bank4ctl,bank[4],mce_restart())
542ACCESSOR(tolerant,tolerant,) 542ACCESSOR(tolerant,tolerant,)
543ACCESSOR(check_interval,check_interval,mce_restart()) 543ACCESSOR(check_interval,check_interval,mce_restart())
544 544
545static __init int mce_init_device(void) 545static __cpuinit int mce_init_device(void)
546{ 546{
547 int err; 547 int err;
548 if (!mce_available(&boot_cpu_data)) 548 if (!mce_available(&boot_cpu_data))