aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/mcheck/p4.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/p4.c')
-rw-r--r--arch/x86/kernel/cpu/mcheck/p4.c112
1 files changed, 9 insertions, 103 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/p4.c b/arch/x86/kernel/cpu/mcheck/p4.c
index f53bdcbaf382..4482aea9aa2e 100644
--- a/arch/x86/kernel/cpu/mcheck/p4.c
+++ b/arch/x86/kernel/cpu/mcheck/p4.c
@@ -1,21 +1,14 @@
1/* 1/*
2 * P4 specific Machine Check Exception Reporting 2 * P4 specific Machine Check Exception Reporting
3 */ 3 */
4
5#include <linux/init.h>
6#include <linux/types.h>
7#include <linux/kernel.h> 4#include <linux/kernel.h>
8#include <linux/interrupt.h> 5#include <linux/types.h>
6#include <linux/init.h>
9#include <linux/smp.h> 7#include <linux/smp.h>
10 8
11#include <asm/processor.h> 9#include <asm/processor.h>
12#include <asm/system.h> 10#include <asm/mce.h>
13#include <asm/msr.h> 11#include <asm/msr.h>
14#include <asm/apic.h>
15
16#include <asm/therm_throt.h>
17
18#include "mce.h"
19 12
20/* as supported by the P4/Xeon family */ 13/* as supported by the P4/Xeon family */
21struct intel_mce_extended_msrs { 14struct intel_mce_extended_msrs {
@@ -34,98 +27,8 @@ struct intel_mce_extended_msrs {
34 27
35static int mce_num_extended_msrs; 28static int mce_num_extended_msrs;
36 29
37
38#ifdef CONFIG_X86_MCE_P4THERMAL
39static void unexpected_thermal_interrupt(struct pt_regs *regs)
40{
41 printk(KERN_ERR "CPU%d: Unexpected LVT TMR interrupt!\n",
42 smp_processor_id());
43 add_taint(TAINT_MACHINE_CHECK);
44}
45
46/* P4/Xeon Thermal transition interrupt handler */
47static void intel_thermal_interrupt(struct pt_regs *regs)
48{
49 __u64 msr_val;
50
51 ack_APIC_irq();
52
53 rdmsrl(MSR_IA32_THERM_STATUS, msr_val);
54 therm_throt_process(msr_val & 0x1);
55}
56
57/* Thermal interrupt handler for this CPU setup */
58static void (*vendor_thermal_interrupt)(struct pt_regs *regs) = unexpected_thermal_interrupt;
59
60void smp_thermal_interrupt(struct pt_regs *regs)
61{
62 irq_enter();
63 vendor_thermal_interrupt(regs);
64 __get_cpu_var(irq_stat).irq_thermal_count++;
65 irq_exit();
66}
67
68/* P4/Xeon Thermal regulation detect and init */
69static void intel_init_thermal(struct cpuinfo_x86 *c)
70{
71 u32 l, h;
72 unsigned int cpu = smp_processor_id();
73
74 /* Thermal monitoring */
75 if (!cpu_has(c, X86_FEATURE_ACPI))
76 return; /* -ENODEV */
77
78 /* Clock modulation */
79 if (!cpu_has(c, X86_FEATURE_ACC))
80 return; /* -ENODEV */
81
82 /* first check if its enabled already, in which case there might
83 * be some SMM goo which handles it, so we can't even put a handler
84 * since it might be delivered via SMI already -zwanem.
85 */
86 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
87 h = apic_read(APIC_LVTTHMR);
88 if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) {
89 printk(KERN_DEBUG "CPU%d: Thermal monitoring handled by SMI\n",
90 cpu);
91 return; /* -EBUSY */
92 }
93
94 /* check whether a vector already exists, temporarily masked? */
95 if (h & APIC_VECTOR_MASK) {
96 printk(KERN_DEBUG "CPU%d: Thermal LVT vector (%#x) already "
97 "installed\n",
98 cpu, (h & APIC_VECTOR_MASK));
99 return; /* -EBUSY */
100 }
101
102 /* The temperature transition interrupt handler setup */
103 h = THERMAL_APIC_VECTOR; /* our delivery vector */
104 h |= (APIC_DM_FIXED | APIC_LVT_MASKED); /* we'll mask till we're ready */
105 apic_write(APIC_LVTTHMR, h);
106
107 rdmsr(MSR_IA32_THERM_INTERRUPT, l, h);
108 wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03 , h);
109
110 /* ok we're good to go... */
111 vendor_thermal_interrupt = intel_thermal_interrupt;
112
113 rdmsr(MSR_IA32_MISC_ENABLE, l, h);
114 wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h);
115
116 l = apic_read(APIC_LVTTHMR);
117 apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED);
118 printk(KERN_INFO "CPU%d: Thermal monitoring enabled\n", cpu);
119
120 /* enable thermal throttle processing */
121 atomic_set(&therm_throt_en, 1);
122 return;
123}
124#endif /* CONFIG_X86_MCE_P4THERMAL */
125
126
127/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */ 30/* P4/Xeon Extended MCE MSR retrieval, return 0 if unsupported */
128static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r) 31static void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
129{ 32{
130 u32 h; 33 u32 h;
131 34
@@ -143,9 +46,9 @@ static inline void intel_get_extended_msrs(struct intel_mce_extended_msrs *r)
143 46
144static void intel_machine_check(struct pt_regs *regs, long error_code) 47static void intel_machine_check(struct pt_regs *regs, long error_code)
145{ 48{
146 int recover = 1;
147 u32 alow, ahigh, high, low; 49 u32 alow, ahigh, high, low;
148 u32 mcgstl, mcgsth; 50 u32 mcgstl, mcgsth;
51 int recover = 1;
149 int i; 52 int i;
150 53
151 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); 54 rdmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
@@ -157,7 +60,9 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
157 60
158 if (mce_num_extended_msrs > 0) { 61 if (mce_num_extended_msrs > 0) {
159 struct intel_mce_extended_msrs dbg; 62 struct intel_mce_extended_msrs dbg;
63
160 intel_get_extended_msrs(&dbg); 64 intel_get_extended_msrs(&dbg);
65
161 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n" 66 printk(KERN_DEBUG "CPU %d: EIP: %08x EFLAGS: %08x\n"
162 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n" 67 "\teax: %08x ebx: %08x ecx: %08x edx: %08x\n"
163 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n", 68 "\tesi: %08x edi: %08x ebp: %08x esp: %08x\n",
@@ -171,6 +76,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
171 if (high & (1<<31)) { 76 if (high & (1<<31)) {
172 char misc[20]; 77 char misc[20];
173 char addr[24]; 78 char addr[24];
79
174 misc[0] = addr[0] = '\0'; 80 misc[0] = addr[0] = '\0';
175 if (high & (1<<29)) 81 if (high & (1<<29))
176 recover |= 1; 82 recover |= 1;
@@ -196,6 +102,7 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
196 panic("Unable to continue"); 102 panic("Unable to continue");
197 103
198 printk(KERN_EMERG "Attempting to continue.\n"); 104 printk(KERN_EMERG "Attempting to continue.\n");
105
199 /* 106 /*
200 * Do not clear the MSR_IA32_MCi_STATUS if the error is not 107 * Do not clear the MSR_IA32_MCi_STATUS if the error is not
201 * recoverable/continuable.This will allow BIOS to look at the MSRs 108 * recoverable/continuable.This will allow BIOS to look at the MSRs
@@ -217,7 +124,6 @@ static void intel_machine_check(struct pt_regs *regs, long error_code)
217 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth); 124 wrmsr(MSR_IA32_MCG_STATUS, mcgstl, mcgsth);
218} 125}
219 126
220
221void intel_p4_mcheck_init(struct cpuinfo_x86 *c) 127void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
222{ 128{
223 u32 l, h; 129 u32 l, h;