diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-03-13 06:05:58 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-03-13 06:05:58 -0400 |
commit | cd80a8142efa3468c2cd9fb52845f334c3220d54 (patch) | |
tree | 919e88994bd3c09b34ce852d0a09bb0655d231d0 /arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |
parent | 641cd4cfcdc71ce01535b31cc4d57d59a1fae1fc (diff) | |
parent | a98fe7f3425c6b4e90de16f8da63b0429a8fed08 (diff) |
Merge branch 'x86/core' into core/ipi
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/mce_intel_64.c')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 214 |
1 files changed, 210 insertions, 4 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c index f44c3662436..aaa7d973093 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -1,17 +1,21 @@ | |||
1 | /* | 1 | /* |
2 | * Intel specific MCE features. | 2 | * Intel specific MCE features. |
3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | 3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> |
4 | * Copyright (C) 2008, 2009 Intel Corporation | ||
5 | * Author: Andi Kleen | ||
4 | */ | 6 | */ |
5 | 7 | ||
6 | #include <linux/init.h> | 8 | #include <linux/init.h> |
7 | #include <linux/interrupt.h> | 9 | #include <linux/interrupt.h> |
8 | #include <linux/percpu.h> | 10 | #include <linux/percpu.h> |
9 | #include <asm/processor.h> | 11 | #include <asm/processor.h> |
12 | #include <asm/apic.h> | ||
10 | #include <asm/msr.h> | 13 | #include <asm/msr.h> |
11 | #include <asm/mce.h> | 14 | #include <asm/mce.h> |
12 | #include <asm/hw_irq.h> | 15 | #include <asm/hw_irq.h> |
13 | #include <asm/idle.h> | 16 | #include <asm/idle.h> |
14 | #include <asm/therm_throt.h> | 17 | #include <asm/therm_throt.h> |
18 | #include <asm/apic.h> | ||
15 | 19 | ||
16 | asmlinkage void smp_thermal_interrupt(void) | 20 | asmlinkage void smp_thermal_interrupt(void) |
17 | { | 21 | { |
@@ -24,7 +28,7 @@ asmlinkage void smp_thermal_interrupt(void) | |||
24 | 28 | ||
25 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | 29 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); |
26 | if (therm_throt_process(msr_val & 1)) | 30 | if (therm_throt_process(msr_val & 1)) |
27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | 31 | mce_log_therm_throt_event(msr_val); |
28 | 32 | ||
29 | inc_irq_stat(irq_thermal_count); | 33 | inc_irq_stat(irq_thermal_count); |
30 | irq_exit(); | 34 | irq_exit(); |
@@ -48,13 +52,13 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
48 | */ | 52 | */ |
49 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 53 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
50 | h = apic_read(APIC_LVTTHMR); | 54 | h = apic_read(APIC_LVTTHMR); |
51 | if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { | 55 | if ((l & MSR_IA32_MISC_ENABLE_TM1) && (h & APIC_DM_SMI)) { |
52 | printk(KERN_DEBUG | 56 | printk(KERN_DEBUG |
53 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | 57 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); |
54 | return; | 58 | return; |
55 | } | 59 | } |
56 | 60 | ||
57 | if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) | 61 | if (cpu_has(c, X86_FEATURE_TM2) && (l & MSR_IA32_MISC_ENABLE_TM2)) |
58 | tm2 = 1; | 62 | tm2 = 1; |
59 | 63 | ||
60 | if (h & APIC_VECTOR_MASK) { | 64 | if (h & APIC_VECTOR_MASK) { |
@@ -72,7 +76,7 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
72 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | 76 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); |
73 | 77 | ||
74 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | 78 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); |
75 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); | 79 | wrmsr(MSR_IA32_MISC_ENABLE, l | MSR_IA32_MISC_ENABLE_TM1, h); |
76 | 80 | ||
77 | l = apic_read(APIC_LVTTHMR); | 81 | l = apic_read(APIC_LVTTHMR); |
78 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | 82 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); |
@@ -84,7 +88,209 @@ static void intel_init_thermal(struct cpuinfo_x86 *c) | |||
84 | return; | 88 | return; |
85 | } | 89 | } |
86 | 90 | ||
91 | /* | ||
92 | * Support for Intel Correct Machine Check Interrupts. This allows | ||
93 | * the CPU to raise an interrupt when a corrected machine check happened. | ||
94 | * Normally we pick those up using a regular polling timer. | ||
95 | * Also supports reliable discovery of shared banks. | ||
96 | */ | ||
97 | |||
98 | static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned); | ||
99 | |||
100 | /* | ||
101 | * cmci_discover_lock protects against parallel discovery attempts | ||
102 | * which could race against each other. | ||
103 | */ | ||
104 | static DEFINE_SPINLOCK(cmci_discover_lock); | ||
105 | |||
106 | #define CMCI_THRESHOLD 1 | ||
107 | |||
108 | static int cmci_supported(int *banks) | ||
109 | { | ||
110 | u64 cap; | ||
111 | |||
112 | /* | ||
113 | * Vendor check is not strictly needed, but the initial | ||
114 | * initialization is vendor keyed and this | ||
115 | * makes sure none of the backdoors are entered otherwise. | ||
116 | */ | ||
117 | if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) | ||
118 | return 0; | ||
119 | if (!cpu_has_apic || lapic_get_maxlvt() < 6) | ||
120 | return 0; | ||
121 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
122 | *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff); | ||
123 | return !!(cap & MCG_CMCI_P); | ||
124 | } | ||
125 | |||
126 | /* | ||
127 | * The interrupt handler. This is called on every event. | ||
128 | * Just call the poller directly to log any events. | ||
129 | * This could in theory increase the threshold under high load, | ||
130 | * but doesn't for now. | ||
131 | */ | ||
132 | static void intel_threshold_interrupt(void) | ||
133 | { | ||
134 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
135 | mce_notify_user(); | ||
136 | } | ||
137 | |||
138 | static void print_update(char *type, int *hdr, int num) | ||
139 | { | ||
140 | if (*hdr == 0) | ||
141 | printk(KERN_INFO "CPU %d MCA banks", smp_processor_id()); | ||
142 | *hdr = 1; | ||
143 | printk(KERN_CONT " %s:%d", type, num); | ||
144 | } | ||
145 | |||
146 | /* | ||
147 | * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks | ||
148 | * on this CPU. Use the algorithm recommended in the SDM to discover shared | ||
149 | * banks. | ||
150 | */ | ||
151 | static void cmci_discover(int banks, int boot) | ||
152 | { | ||
153 | unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned); | ||
154 | int hdr = 0; | ||
155 | int i; | ||
156 | |||
157 | spin_lock(&cmci_discover_lock); | ||
158 | for (i = 0; i < banks; i++) { | ||
159 | u64 val; | ||
160 | |||
161 | if (test_bit(i, owned)) | ||
162 | continue; | ||
163 | |||
164 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
165 | |||
166 | /* Already owned by someone else? */ | ||
167 | if (val & CMCI_EN) { | ||
168 | if (test_and_clear_bit(i, owned) || boot) | ||
169 | print_update("SHD", &hdr, i); | ||
170 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
171 | continue; | ||
172 | } | ||
173 | |||
174 | val |= CMCI_EN | CMCI_THRESHOLD; | ||
175 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
176 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
177 | |||
178 | /* Did the enable bit stick? -- the bank supports CMCI */ | ||
179 | if (val & CMCI_EN) { | ||
180 | if (!test_and_set_bit(i, owned) || boot) | ||
181 | print_update("CMCI", &hdr, i); | ||
182 | __clear_bit(i, __get_cpu_var(mce_poll_banks)); | ||
183 | } else { | ||
184 | WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks))); | ||
185 | } | ||
186 | } | ||
187 | spin_unlock(&cmci_discover_lock); | ||
188 | if (hdr) | ||
189 | printk(KERN_CONT "\n"); | ||
190 | } | ||
191 | |||
192 | /* | ||
193 | * Just in case we missed an event during initialization check | ||
194 | * all the CMCI owned banks. | ||
195 | */ | ||
196 | void cmci_recheck(void) | ||
197 | { | ||
198 | unsigned long flags; | ||
199 | int banks; | ||
200 | |||
201 | if (!mce_available(¤t_cpu_data) || !cmci_supported(&banks)) | ||
202 | return; | ||
203 | local_irq_save(flags); | ||
204 | machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned)); | ||
205 | local_irq_restore(flags); | ||
206 | } | ||
207 | |||
208 | /* | ||
209 | * Disable CMCI on this CPU for all banks it owns when it goes down. | ||
210 | * This allows other CPUs to claim the banks on rediscovery. | ||
211 | */ | ||
212 | void cmci_clear(void) | ||
213 | { | ||
214 | int i; | ||
215 | int banks; | ||
216 | u64 val; | ||
217 | |||
218 | if (!cmci_supported(&banks)) | ||
219 | return; | ||
220 | spin_lock(&cmci_discover_lock); | ||
221 | for (i = 0; i < banks; i++) { | ||
222 | if (!test_bit(i, __get_cpu_var(mce_banks_owned))) | ||
223 | continue; | ||
224 | /* Disable CMCI */ | ||
225 | rdmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
226 | val &= ~(CMCI_EN|CMCI_THRESHOLD_MASK); | ||
227 | wrmsrl(MSR_IA32_MC0_CTL2 + i, val); | ||
228 | __clear_bit(i, __get_cpu_var(mce_banks_owned)); | ||
229 | } | ||
230 | spin_unlock(&cmci_discover_lock); | ||
231 | } | ||
232 | |||
233 | /* | ||
234 | * After a CPU went down cycle through all the others and rediscover | ||
235 | * Must run in process context. | ||
236 | */ | ||
237 | void cmci_rediscover(int dying) | ||
238 | { | ||
239 | int banks; | ||
240 | int cpu; | ||
241 | cpumask_var_t old; | ||
242 | |||
243 | if (!cmci_supported(&banks)) | ||
244 | return; | ||
245 | if (!alloc_cpumask_var(&old, GFP_KERNEL)) | ||
246 | return; | ||
247 | cpumask_copy(old, ¤t->cpus_allowed); | ||
248 | |||
249 | for_each_online_cpu (cpu) { | ||
250 | if (cpu == dying) | ||
251 | continue; | ||
252 | if (set_cpus_allowed_ptr(current, &cpumask_of_cpu(cpu))) | ||
253 | continue; | ||
254 | /* Recheck banks in case CPUs don't all have the same */ | ||
255 | if (cmci_supported(&banks)) | ||
256 | cmci_discover(banks, 0); | ||
257 | } | ||
258 | |||
259 | set_cpus_allowed_ptr(current, old); | ||
260 | free_cpumask_var(old); | ||
261 | } | ||
262 | |||
263 | /* | ||
264 | * Reenable CMCI on this CPU in case a CPU down failed. | ||
265 | */ | ||
266 | void cmci_reenable(void) | ||
267 | { | ||
268 | int banks; | ||
269 | if (cmci_supported(&banks)) | ||
270 | cmci_discover(banks, 0); | ||
271 | } | ||
272 | |||
273 | static __cpuinit void intel_init_cmci(void) | ||
274 | { | ||
275 | int banks; | ||
276 | |||
277 | if (!cmci_supported(&banks)) | ||
278 | return; | ||
279 | |||
280 | mce_threshold_vector = intel_threshold_interrupt; | ||
281 | cmci_discover(banks, 1); | ||
282 | /* | ||
283 | * For CPU #0 this runs with still disabled APIC, but that's | ||
284 | * ok because only the vector is set up. We still do another | ||
285 | * check for the banks later for CPU #0 just to make sure | ||
286 | * to not miss any events. | ||
287 | */ | ||
288 | apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED); | ||
289 | cmci_recheck(); | ||
290 | } | ||
291 | |||
87 | void mce_intel_feature_init(struct cpuinfo_x86 *c) | 292 | void mce_intel_feature_init(struct cpuinfo_x86 *c) |
88 | { | 293 | { |
89 | intel_init_thermal(c); | 294 | intel_init_thermal(c); |
295 | intel_init_cmci(); | ||
90 | } | 296 | } |