aboutsummaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2009-04-08 06:31:18 -0400
committerH. Peter Anvin <hpa@zytor.com>2009-05-28 12:24:09 -0400
commitc5aaf0e0702513637278ca4e27a156caa9392817 (patch)
treeac8e58001b7ffe60ad6a42c7c4ff13ed50ca3b49 /arch
parent3b58dfd04bdfa52e717ead8f3c7622610eb7f950 (diff)
x86, mce: clean up p4.c
Make the coding style match that of the rest of the x86 arch code. [ Impact: cleanup ] Signed-off-by: Ingo Molnar <mingo@elte.hu> Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch')
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c73
1 files changed, 43 insertions, 30 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index f53bdcbaf38..cb344aba479 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -2,18 +2,17 @@
2 * P4 specific Machine Check Exception Reporting 2 * P4 specific Machine Check Exception Reporting
3 */ 3 */
4 4
5#include <linux/init.h>
6#include <linux/types.h>
7#include <linux/kernel.h>
8#include <linux/interrupt.h> 5#include <linux/interrupt.h>
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/init.h>
9#include <linux/smp.h> 9#include <linux/smp.h>
10 10
11#include <asm/therm_throt.h>
11#include <asm/processor.h> 12#include <asm/processor.h>
12#include <asm/system.h> 13#include <asm/system.h>
13#include <asm/msr.h>
14#include <asm/apic.h> 14#include <asm/apic.h>
15 15#include <asm/msr.h>
16#include <asm/therm_throt.h>
17 16
18#include "mce.h" 17#include "mce.h"
19 18
@@ -36,6 +35,7 @@ static int mce_num_extended_msrs;
36 35
37 36
38#ifdef CONFIG_X86_MCE_P4THERMAL 37#ifdef CONFIG_X86_MCE_P4THERMAL
38
39static void unexpected_thermal_interrupt(struct pt_regs *regs) 39static void unexpected_thermal_interrupt(struct pt_regs *regs)
40{ 40{
41 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n", 41 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
@@ -43,7 +43,7 @@ static void unexpected_thermal_interrupt(struct pt_regs *regs)
43 add_taint(TAINT_MACHINE_CHECK); 43 add_taint(TAINT_MACHINE_CHECK);
44} 44}
45 45
46/* P4/Xeon Thermal transition interrupt handler */ 46/* P4/Xeon Thermal transition interrupt handler: */
47static void intel_thermal_interrupt(struct pt_regs *regs) 47static void intel_thermal_interrupt(struct pt_regs *regs)
48{ 48{
49 __u64 msr_val; 49 __u64 msr_val;
@@ -54,8 +54,9 @@ static void intel_thermal_interrupt(struct pt_regs *regs)
54 therm_throt_process(msr_val & 0x1); 54 therm_throt_process(msr_val & 0x1);
55} 55}
56 56
57/* Thermal interrupt handler for this CPU setup */ 57/* Thermal interrupt handler for this CPU setup: */
58static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt; 58static void (*vendor_thermal_interrupt)(struct pt_regs *regs) =
59 unexpected_thermal_interrupt;
59 60
60void smp_thermal_interrupt(struct pt_regs *regs) 61void smp_thermal_interrupt(struct pt_regs *regs)
61{ 62{
@@ -65,67 +66,76 @@ void smp_thermal_interrupt(struct pt_regs *regs)
65 irq_exit(); 66 irq_exit();
66} 67}
67 68
68/* P4/Xeon Thermal regulation detect and init */ 69/* P4/Xeon Thermal regulation detect and init: */
69static void intel_init_thermal(struct cpuinfo_x86 *c) 70static void intel_init_thermal(struct cpuinfo_x86 *c)
70{ 71{
71 u32 l, h;
72 unsigned int cpu = smp_processor_id(); 72 unsigned int cpu = smp_processor_id();
73 u32 l, h;
73 74
74 /* Thermal monitoring */ 75 /* Thermal monitoring: */
75 if (!cpu_has(c, X86_FEATURE_ACPI)) 76 if (!cpu_has(c, X86_FEATURE_ACPI))
76 return; /* -ENODEV */ 77 return; /* -ENODEV */
77 78
78 /* Clock modulation */ 79 /* Clock modulation: */
79 if (!cpu_has(c, X86_FEATURE_ACC)) 80 if (!cpu_has(c, X86_FEATURE_ACC))
80 return; /* -ENODEV */ 81 return; /* -ENODEV */
81 82
82 /* first check if its enabled already, in which case there might 83 /*
84 * First check if its enabled already, in which case there might
83 * be some SMM goo which handles it, so we can't even put a handler 85 * be some SMM goo which handles it, so we can't even put a handler
84 * since it might be delivered via SMI already -zwanem. 86 * since it might be delivered via SMI already:
85 */ 87 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 88 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR); 89 h = apic_read(APIC_LVTTHMR);
88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { 90 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n", 91 printk(KERN_DEBUG
90 cpu); 92 "CPU%d: Thermal monitoring handled by SMI\n", cpu);
93
91 return; /* -EBUSY */ 94 return; /* -EBUSY */
92 } 95 }
93 96
94 /* check whether a vector already exists, temporarily masked? */ 97 /* Check whether a vector already exists, temporarily masked? */
95 if (h & APIC_VECTOR_MASK) { 98 if (h & APIC_VECTOR_MASK) {
96 printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already " 99 printk(KERN_DEBUG
97 "installed\n", 100 "CPU%d: Thermal LVT vector (%#x) already installed\n",
98 cpu, (h & APIC_VECTOR_MASK)); 101 cpu, (h & APIC_VECTOR_MASK));
102
99 return; /* -EBUSY */ 103 return; /* -EBUSY */
100 } 104 }
101 105
102 /* The temperature transition interrupt handler setup */ 106 /*
103 h = THERMAL_APIC_VECTOR; /* our delivery vector */ 107 * The temperature transition interrupt handler setup:
104 h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */ 108 */
109
110 /* Our delivery vector: */
111 h = THERMAL_APIC_VECTOR;
112
113 /* We'll mask the thermal vector in the lapic till we're ready: */
114 h |= APIC_DM_FIXED | APIC_LVT_MASKED;
105 apic_write(APIC_LVTTHMR, h); 115 apic_write(APIC_LVTTHMR, h);
106 116
107 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); 117 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
108 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h); 118 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
109 119
110 /* ok we're good to go... */ 120 /* Ok, we're good to go... */
111 vendor_thermal_interrupt = intel_thermal_interrupt; 121 vendor_thermal_interrupt = intel_thermal_interrupt;
112 122
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h); 123 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); 124 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115 125
126 /* Unmask the thermal vector: */
116 l = apic_read(APIC_LVTTHMR); 127 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); 128 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
129
118 printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu); 130 printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
119 131
120 /* enable thermal throttle processing */ 132 /* enable thermal throttle processing */
121 atomic_set(&therm_throt_en, 1); 133 atomic_set(&therm_throt_en, 1);
122 return;
123} 134}
124#endif /* CONFIG_X86_MCE_P4THERMAL */ 135#endif /* CONFIG_X86_MCE_P4THERMAL */
125 136
126
127/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */ 137/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
128static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) 138static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
129{ 139{
130 u32 h; 140 u32 h;
131 141
@@ -143,9 +153,9 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
143 153
144static void intel_machine_check(struct pt_regs *regs, long error_code) 154static void intel_machine_check(struct pt_regs *regs, long error_code)
145{ 155{
146 int recover = 1;
147 u32 alow, ahigh, high, low; 156 u32 alow, ahigh, high, low;
148 u32 mcgstl, mcgsth; 157 u32 mcgstl, mcgsth;
158 int recover = 1;
149 int i; 159 int i;
150 160
151 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); 161 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
@@ -157,7 +167,9 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
157 167
158 if (mce_num_extended_msrs > 0) { 168 if (mce_num_extended_msrs > 0) {
159 struct intel_mce_extended_msrs dbg; 169 struct intel_mce_extended_msrs dbg;
170
160 intel_get_extended_msrs(&dbg); 171 intel_get_extended_msrs(&dbg);
172
161 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n" 173 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
162 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n" 174 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
163 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", 175 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
@@ -171,6 +183,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
171 if (high & (1<<31)) { 183 if (high & (1<<31)) {
172 char misc[20]; 184 char misc[20];
173 char addr[24]; 185 char addr[24];
186
174 misc[0] = addr[0] = '\0'; 187 misc[0] = addr[0] = '\0';
175 if (high & (1<<29)) 188 if (high & (1<<29))
176 recover |= 1; 189 recover |= 1;
@@ -196,6 +209,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
196 panic("Unable to continue"); 209 panic("Unable to continue");
197 210
198 printk(KERN_EMERG "Attempting to continue.\n"); 211 printk(KERN_EMERG "Attempting to continue.\n");
212
199 /* 213 /*
200 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 214 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
201 * recoverable/continuable.This will allow BIOS to look at the MSRs 215 * recoverable/continuable.This will allow BIOS to look at the MSRs
@@ -217,7 +231,6 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
217 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); 231 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
218} 232}
219 233
220
221void intel_p4_mcheck_init(struct cpuinfo_x86 *c) 234void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
222{ 235{
223 u32 l, h; 236 u32 l, h;