diff options
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/k7.c | 36 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 24 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/p4.c | 90 |
3 files changed, 79 insertions, 71 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/k7.c b/arch/x86/kernel/cpu/mcheck/k7.c index e633c9c2b764..f390c9f66351 100644 --- a/arch/x86/kernel/cpu/mcheck/k7.c +++ b/arch/x86/kernel/cpu/mcheck/k7.c | |||
@@ -9,23 +9,23 @@ | |||
9 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
10 | #include <linux/smp.h> | 10 | #include <linux/smp.h> |
11 | 11 | ||
12 | #include <asm/processor.h> | 12 | #include <asm/processor.h> |
13 | #include <asm/system.h> | 13 | #include <asm/system.h> |
14 | #include <asm/msr.h> | 14 | #include <asm/msr.h> |
15 | 15 | ||
16 | #include "mce.h" | 16 | #include "mce.h" |
17 | 17 | ||
18 | /* Machine Check Handler For AMD Athlon/Duron */ | 18 | /* Machine Check Handler For AMD Athlon/Duron */ |
19 | static void k7_machine_check(struct pt_regs * regs, long error_code) | 19 | static void k7_machine_check(struct pt_regs *regs, long error_code) |
20 | { | 20 | { |
21 | int recover=1; | 21 | int recover = 1; |
22 | u32 alow, ahigh, high, low; | 22 | u32 alow, ahigh, high, low; |
23 | u32 mcgstl, mcgsth; | 23 | u32 mcgstl, mcgsth; |
24 | int i; | 24 | int i; |
25 | 25 | ||
26 | rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 26 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
27 | if (mcgstl & (1<<0)) /* Recoverable ? */ | 27 | if (mcgstl & (1<<0)) /* Recoverable ? */ |
28 | recover=0; | 28 | recover = 0; |
29 | 29 | ||
30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", | 30 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", |
31 | smp_processor_id(), mcgsth, mcgstl); | 31 | smp_processor_id(), mcgsth, mcgstl); |
@@ -60,12 +60,12 @@ static void k7_machine_check(struct pt_regs * regs, long error_code) | |||
60 | } | 60 | } |
61 | 61 | ||
62 | if (recover&2) | 62 | if (recover&2) |
63 | panic ("CPU context corrupt"); | 63 | panic("CPU context corrupt"); |
64 | if (recover&1) | 64 | if (recover&1) |
65 | panic ("Unable to continue"); | 65 | panic("Unable to continue"); |
66 | printk (KERN_EMERG "Attempting to continue.\n"); | 66 | printk(KERN_EMERG "Attempting to continue.\n"); |
67 | mcgstl &= ~(1<<2); | 67 | mcgstl &= ~(1<<2); |
68 | wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth); | 68 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
69 | } | 69 | } |
70 | 70 | ||
71 | 71 | ||
@@ -81,25 +81,25 @@ void amd_mcheck_init(struct cpuinfo_x86 *c) | |||
81 | machine_check_vector = k7_machine_check; | 81 | machine_check_vector = k7_machine_check; |
82 | wmb(); | 82 | wmb(); |
83 | 83 | ||
84 | printk (KERN_INFO "Intel machine check architecture supported.\n"); | 84 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
85 | rdmsr (MSR_IA32_MCG_CAP, l, h); | 85 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
86 | if (l & (1<<8)) /* Control register present ? */ | 86 | if (l & (1<<8)) /* Control register present ? */ |
87 | wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 87 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
88 | nr_mce_banks = l & 0xff; | 88 | nr_mce_banks = l & 0xff; |
89 | 89 | ||
90 | /* Clear status for MC index 0 separately, we don't touch CTL, | 90 | /* Clear status for MC index 0 separately, we don't touch CTL, |
91 | * as some K7 Athlons cause spurious MCEs when its enabled. */ | 91 | * as some K7 Athlons cause spurious MCEs when its enabled. */ |
92 | if (boot_cpu_data.x86 == 6) { | 92 | if (boot_cpu_data.x86 == 6) { |
93 | wrmsr (MSR_IA32_MC0_STATUS, 0x0, 0x0); | 93 | wrmsr(MSR_IA32_MC0_STATUS, 0x0, 0x0); |
94 | i = 1; | 94 | i = 1; |
95 | } else | 95 | } else |
96 | i = 0; | 96 | i = 0; |
97 | for (; i<nr_mce_banks; i++) { | 97 | for (; i < nr_mce_banks; i++) { |
98 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 98 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
99 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 99 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
100 | } | 100 | } |
101 | 101 | ||
102 | set_in_cr4 (X86_CR4_MCE); | 102 | set_in_cr4(X86_CR4_MCE); |
103 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", | 103 | printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", |
104 | smp_processor_id()); | 104 | smp_processor_id()); |
105 | } | 105 | } |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c index e07e8c068ae0..501ca1cea27d 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -31,7 +31,7 @@ | |||
31 | #include <asm/idle.h> | 31 | #include <asm/idle.h> |
32 | 32 | ||
33 | #define MISC_MCELOG_MINOR 227 | 33 | #define MISC_MCELOG_MINOR 227 |
34 | #define NR_BANKS 6 | 34 | #define NR_SYSFS_BANKS 6 |
35 | 35 | ||
36 | atomic_t mce_entry; | 36 | atomic_t mce_entry; |
37 | 37 | ||
@@ -46,7 +46,7 @@ static int mce_dont_init; | |||
46 | */ | 46 | */ |
47 | static int tolerant = 1; | 47 | static int tolerant = 1; |
48 | static int banks; | 48 | static int banks; |
49 | static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; | 49 | static unsigned long bank[NR_SYSFS_BANKS] = { [0 ... NR_SYSFS_BANKS-1] = ~0UL }; |
50 | static unsigned long notify_user; | 50 | static unsigned long notify_user; |
51 | static int rip_msr; | 51 | static int rip_msr; |
52 | static int mce_bootlog = -1; | 52 | static int mce_bootlog = -1; |
@@ -209,7 +209,7 @@ void do_machine_check(struct pt_regs * regs, long error_code) | |||
209 | barrier(); | 209 | barrier(); |
210 | 210 | ||
211 | for (i = 0; i < banks; i++) { | 211 | for (i = 0; i < banks; i++) { |
212 | if (!bank[i]) | 212 | if (i < NR_SYSFS_BANKS && !bank[i]) |
213 | continue; | 213 | continue; |
214 | 214 | ||
215 | m.misc = 0; | 215 | m.misc = 0; |
@@ -444,9 +444,10 @@ static void mce_init(void *dummy) | |||
444 | 444 | ||
445 | rdmsrl(MSR_IA32_MCG_CAP, cap); | 445 | rdmsrl(MSR_IA32_MCG_CAP, cap); |
446 | banks = cap & 0xff; | 446 | banks = cap & 0xff; |
447 | if (banks > NR_BANKS) { | 447 | if (banks > MCE_EXTENDED_BANK) { |
448 | printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); | 448 | banks = MCE_EXTENDED_BANK; |
449 | banks = NR_BANKS; | 449 | printk(KERN_INFO "MCE: warning: using only %d banks\n", |
450 | MCE_EXTENDED_BANK); | ||
450 | } | 451 | } |
451 | /* Use accurate RIP reporting if available. */ | 452 | /* Use accurate RIP reporting if available. */ |
452 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | 453 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) |
@@ -462,7 +463,11 @@ static void mce_init(void *dummy) | |||
462 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 463 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
463 | 464 | ||
464 | for (i = 0; i < banks; i++) { | 465 | for (i = 0; i < banks; i++) { |
465 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | 466 | if (i < NR_SYSFS_BANKS) |
467 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
468 | else | ||
469 | wrmsrl(MSR_IA32_MC0_CTL+4*i, ~0UL); | ||
470 | |||
466 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | 471 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); |
467 | } | 472 | } |
468 | } | 473 | } |
@@ -766,7 +771,10 @@ DEFINE_PER_CPU(struct sys_device, device_mce); | |||
766 | } \ | 771 | } \ |
767 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | 772 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); |
768 | 773 | ||
769 | /* TBD should generate these dynamically based on number of available banks */ | 774 | /* |
775 | * TBD should generate these dynamically based on number of available banks. | ||
776 | * Have only 6 contol banks in /sysfs until then. | ||
777 | */ | ||
770 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | 778 | ACCESSOR(bank0ctl,bank[0],mce_restart()) |
771 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | 779 | ACCESSOR(bank1ctl,bank[1],mce_restart()) |
772 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | 780 | ACCESSOR(bank2ctl,bank[2],mce_restart()) |
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c index cb03345554a5..eef001ad3bde 100644 --- a/arch/x86/kernel/cpu/mcheck/p4.c +++ b/arch/x86/kernel/cpu/mcheck/p4.c | |||
@@ -8,7 +8,7 @@ | |||
8 | #include <linux/interrupt.h> | 8 | #include <linux/interrupt.h> |
9 | #include <linux/smp.h> | 9 | #include <linux/smp.h> |
10 | 10 | ||
11 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/system.h> | 12 | #include <asm/system.h> |
13 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
14 | #include <asm/apic.h> | 14 | #include <asm/apic.h> |
@@ -32,12 +32,12 @@ struct intel_mce_extended_msrs { | |||
32 | /* u32 *reserved[]; */ | 32 | /* u32 *reserved[]; */ |
33 | }; | 33 | }; |
34 | 34 | ||
35 | static int mce_num_extended_msrs = 0; | 35 | static int mce_num_extended_msrs; |
36 | 36 | ||
37 | 37 | ||
38 | #ifdef CONFIG_X86_MCE_P4THERMAL | 38 | #ifdef CONFIG_X86_MCE_P4THERMAL |
39 | static void unexpected_thermal_interrupt(struct pt_regs *regs) | 39 | static void unexpected_thermal_interrupt(struct pt_regs *regs) |
40 | { | 40 | { |
41 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", | 41 | printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", |
42 | smp_processor_id()); | 42 | smp_processor_id()); |
43 | add_taint(TAINT_MACHINE_CHECK); | 43 | add_taint(TAINT_MACHINE_CHECK); |
@@ -83,7 +83,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
83 | * be some SMM goo which handles it, so we can't even put a handler | 83 | * be some SMM goo which handles it, so we can't even put a handler |
84 | * since it might be delivered via SMI already -zwanem. | 84 | * since it might be delivered via SMI already -zwanem. |
85 | */ | 85 | */ |
86 | rdmsr (MSR_IA32_MISC_ENABLE, l, h); | 86 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
87 | h = apic_read(APIC_LVTTHMR); | 87 | h = apic_read(APIC_LVTTHMR); |
88 | if ((l & (1<<3)) && (h & APIC_DM_SMI)) { | 88 | if ((l & (1<<3)) && (h & APIC_DM_SMI)) { |
89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", | 89 | printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", |
@@ -91,7 +91,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
91 | return; /* -EBUSY */ | 91 | return; /* -EBUSY */ |
92 | } | 92 | } |
93 | 93 | ||
94 | /* check whether a vector already exists, temporarily masked? */ | 94 | /* check whether a vector already exists, temporarily masked? */ |
95 | if (h & APIC_VECTOR_MASK) { | 95 | if (h & APIC_VECTOR_MASK) { |
96 | printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already " | 96 | printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already " |
97 | "installed\n", | 97 | "installed\n", |
@@ -104,18 +104,18 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
104 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ | 104 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ |
105 | apic_write_around(APIC_LVTTHMR, h); | 105 | apic_write_around(APIC_LVTTHMR, h); |
106 | 106 | ||
107 | rdmsr (MSR_IA32_THERM_INTERRUPT, l, h); | 107 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); |
108 | wrmsr (MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); | 108 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); |
109 | 109 | ||
110 | /* ok we're good to go... */ | 110 | /* ok we're good to go... */ |
111 | vendor_thermal_interrupt = intel_thermal_interrupt; | 111 | vendor_thermal_interrupt = intel_thermal_interrupt; |
112 | |||
113 | rdmsr (MSR_IA32_MISC_ENABLE, l, h); | ||
114 | wrmsr (MSR_IA32_MISC_ENABLE, l | (1<<3), h); | ||
115 | 112 | ||
116 | l = apic_read (APIC_LVTTHMR); | 113 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
117 | apic_write_around (APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 114 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1<<3), h); |
118 | printk (KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); | 115 | |
116 | l = apic_read(APIC_LVTTHMR); | ||
117 | apic_write_around(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | ||
118 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); | ||
119 | 119 | ||
120 | /* enable thermal throttle processing */ | 120 | /* enable thermal throttle processing */ |
121 | atomic_set(&therm_throt_en, 1); | 121 | atomic_set(&therm_throt_en, 1); |
@@ -129,28 +129,28 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) | |||
129 | { | 129 | { |
130 | u32 h; | 130 | u32 h; |
131 | 131 | ||
132 | rdmsr (MSR_IA32_MCG_EAX, r->eax, h); | 132 | rdmsr(MSR_IA32_MCG_EAX, r->eax, h); |
133 | rdmsr (MSR_IA32_MCG_EBX, r->ebx, h); | 133 | rdmsr(MSR_IA32_MCG_EBX, r->ebx, h); |
134 | rdmsr (MSR_IA32_MCG_ECX, r->ecx, h); | 134 | rdmsr(MSR_IA32_MCG_ECX, r->ecx, h); |
135 | rdmsr (MSR_IA32_MCG_EDX, r->edx, h); | 135 | rdmsr(MSR_IA32_MCG_EDX, r->edx, h); |
136 | rdmsr (MSR_IA32_MCG_ESI, r->esi, h); | 136 | rdmsr(MSR_IA32_MCG_ESI, r->esi, h); |
137 | rdmsr (MSR_IA32_MCG_EDI, r->edi, h); | 137 | rdmsr(MSR_IA32_MCG_EDI, r->edi, h); |
138 | rdmsr (MSR_IA32_MCG_EBP, r->ebp, h); | 138 | rdmsr(MSR_IA32_MCG_EBP, r->ebp, h); |
139 | rdmsr (MSR_IA32_MCG_ESP, r->esp, h); | 139 | rdmsr(MSR_IA32_MCG_ESP, r->esp, h); |
140 | rdmsr (MSR_IA32_MCG_EFLAGS, r->eflags, h); | 140 | rdmsr(MSR_IA32_MCG_EFLAGS, r->eflags, h); |
141 | rdmsr (MSR_IA32_MCG_EIP, r->eip, h); | 141 | rdmsr(MSR_IA32_MCG_EIP, r->eip, h); |
142 | } | 142 | } |
143 | 143 | ||
144 | static void intel_machine_check(struct pt_regs * regs, long error_code) | 144 | static void intel_machine_check(struct pt_regs *regs, long error_code) |
145 | { | 145 | { |
146 | int recover=1; | 146 | int recover = 1; |
147 | u32 alow, ahigh, high, low; | 147 | u32 alow, ahigh, high, low; |
148 | u32 mcgstl, mcgsth; | 148 | u32 mcgstl, mcgsth; |
149 | int i; | 149 | int i; |
150 | 150 | ||
151 | rdmsr (MSR_IA32_MCG_STATUS, mcgstl, mcgsth); | 151 | rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
152 | if (mcgstl & (1<<0)) /* Recoverable ? */ | 152 | if (mcgstl & (1<<0)) /* Recoverable ? */ |
153 | recover=0; | 153 | recover = 0; |
154 | 154 | ||
155 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", | 155 | printk(KERN_EMERG "CPU %d: Machine Check Exception: %08x%08x\n", |
156 | smp_processor_id(), mcgsth, mcgstl); | 156 | smp_processor_id(), mcgsth, mcgstl); |
@@ -191,20 +191,20 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) | |||
191 | } | 191 | } |
192 | 192 | ||
193 | if (recover & 2) | 193 | if (recover & 2) |
194 | panic ("CPU context corrupt"); | 194 | panic("CPU context corrupt"); |
195 | if (recover & 1) | 195 | if (recover & 1) |
196 | panic ("Unable to continue"); | 196 | panic("Unable to continue"); |
197 | 197 | ||
198 | printk(KERN_EMERG "Attempting to continue.\n"); | 198 | printk(KERN_EMERG "Attempting to continue.\n"); |
199 | /* | 199 | /* |
200 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not | 200 | * Do not clear the MSR_IA32_MCi_STATUS if the error is not |
201 | * recoverable/continuable.This will allow BIOS to look at the MSRs | 201 | * recoverable/continuable.This will allow BIOS to look at the MSRs |
202 | * for errors if the OS could not log the error. | 202 | * for errors if the OS could not log the error. |
203 | */ | 203 | */ |
204 | for (i=0; i<nr_mce_banks; i++) { | 204 | for (i = 0; i < nr_mce_banks; i++) { |
205 | u32 msr; | 205 | u32 msr; |
206 | msr = MSR_IA32_MC0_STATUS+i*4; | 206 | msr = MSR_IA32_MC0_STATUS+i*4; |
207 | rdmsr (msr, low, high); | 207 | rdmsr(msr, low, high); |
208 | if (high&(1<<31)) { | 208 | if (high&(1<<31)) { |
209 | /* Clear it */ | 209 | /* Clear it */ |
210 | wrmsr(msr, 0UL, 0UL); | 210 | wrmsr(msr, 0UL, 0UL); |
@@ -214,7 +214,7 @@ static void intel_machine_check(struct pt_regs * regs, long error_code) | |||
214 | } | 214 | } |
215 | } | 215 | } |
216 | mcgstl &= ~(1<<2); | 216 | mcgstl &= ~(1<<2); |
217 | wrmsr (MSR_IA32_MCG_STATUS,mcgstl, mcgsth); | 217 | wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); |
218 | } | 218 | } |
219 | 219 | ||
220 | 220 | ||
@@ -222,30 +222,30 @@ void intel_p4_mcheck_init(struct cpuinfo_x86 *c) | |||
222 | { | 222 | { |
223 | u32 l, h; | 223 | u32 l, h; |
224 | int i; | 224 | int i; |
225 | 225 | ||
226 | machine_check_vector = intel_machine_check; | 226 | machine_check_vector = intel_machine_check; |
227 | wmb(); | 227 | wmb(); |
228 | 228 | ||
229 | printk (KERN_INFO "Intel machine check architecture supported.\n"); | 229 | printk(KERN_INFO "Intel machine check architecture supported.\n"); |
230 | rdmsr (MSR_IA32_MCG_CAP, l, h); | 230 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
231 | if (l & (1<<8)) /* Control register present ? */ | 231 | if (l & (1<<8)) /* Control register present ? */ |
232 | wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | 232 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); |
233 | nr_mce_banks = l & 0xff; | 233 | nr_mce_banks = l & 0xff; |
234 | 234 | ||
235 | for (i=0; i<nr_mce_banks; i++) { | 235 | for (i = 0; i < nr_mce_banks; i++) { |
236 | wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); | 236 | wrmsr(MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff); |
237 | wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); | 237 | wrmsr(MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0); |
238 | } | 238 | } |
239 | 239 | ||
240 | set_in_cr4 (X86_CR4_MCE); | 240 | set_in_cr4(X86_CR4_MCE); |
241 | printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", | 241 | printk(KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n", |
242 | smp_processor_id()); | 242 | smp_processor_id()); |
243 | 243 | ||
244 | /* Check for P4/Xeon extended MCE MSRs */ | 244 | /* Check for P4/Xeon extended MCE MSRs */ |
245 | rdmsr (MSR_IA32_MCG_CAP, l, h); | 245 | rdmsr(MSR_IA32_MCG_CAP, l, h); |
246 | if (l & (1<<9)) {/* MCG_EXT_P */ | 246 | if (l & (1<<9)) {/* MCG_EXT_P */ |
247 | mce_num_extended_msrs = (l >> 16) & 0xff; | 247 | mce_num_extended_msrs = (l >> 16) & 0xff; |
248 | printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)" | 248 | printk(KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)" |
249 | " available\n", | 249 | " available\n", |
250 | smp_processor_id(), mce_num_extended_msrs); | 250 | smp_processor_id(), mce_num_extended_msrs); |
251 | 251 | ||