diff options
author | Thomas Gleixner <tglx@linutronix.de> | 2007-10-23 16:37:23 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2007-10-23 16:37:23 -0400 |
commit | 01e11182e73eb36af1cc7f3b023d25aa62fd3a8d (patch) | |
tree | 0d678bb97475c3ce9a98458a662ddc6d2f0da640 /arch/x86/kernel/cpu/mcheck | |
parent | 3bc258ad87e5b0bbbca247b24ce8fac380c7d86b (diff) |
x86: consolidate the cpu/ related code usage
The x86_64 arch/x86/kernel/Makefile uses references into
arch/x86/kernel/cpu/... to use code from there.
Unifiy it with the nicely structured i386 way and reuse the existing
subdirectory make rules.
Also move the machine check related source into ...kernel/cpu/mcheck,
where the other machine check related code is.
No code change.
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_64.c | 890 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd_64.c | 690 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_intel_64.c | 90 |
4 files changed, 1673 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/Makefile b/arch/x86/kernel/cpu/mcheck/Makefile index 403e720497ee..d7d2323bbb69 100644 --- a/arch/x86/kernel/cpu/mcheck/Makefile +++ b/arch/x86/kernel/cpu/mcheck/Makefile | |||
@@ -1,3 +1,6 @@ | |||
1 | obj-y = mce_$(BITS).o therm_throt.o | 1 | obj-y = mce_$(BITS).o therm_throt.o |
2 | |||
2 | obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o | 3 | obj-$(CONFIG_X86_32) += k7.o p4.o p5.o p6.o winchip.o |
4 | obj-$(CONFIG_X86_MCE_INTEL) += mce_intel_64.o | ||
5 | obj-$(CONFIG_X86_MCE_AMD) += mce_amd_64.o | ||
3 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o | 6 | obj-$(CONFIG_X86_MCE_NONFATAL) += non-fatal.o |
diff --git a/arch/x86/kernel/cpu/mcheck/mce_64.c b/arch/x86/kernel/cpu/mcheck/mce_64.c new file mode 100644 index 000000000000..07bbfe7aa7f7 --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce_64.c | |||
@@ -0,0 +1,890 @@ | |||
1 | /* | ||
2 | * Machine check handler. | ||
3 | * K8 parts Copyright 2002,2003 Andi Kleen, SuSE Labs. | ||
4 | * Rest from unknown author(s). | ||
5 | * 2004 Andi Kleen. Rewrote most of it. | ||
6 | */ | ||
7 | |||
8 | #include <linux/init.h> | ||
9 | #include <linux/types.h> | ||
10 | #include <linux/kernel.h> | ||
11 | #include <linux/sched.h> | ||
12 | #include <linux/string.h> | ||
13 | #include <linux/rcupdate.h> | ||
14 | #include <linux/kallsyms.h> | ||
15 | #include <linux/sysdev.h> | ||
16 | #include <linux/miscdevice.h> | ||
17 | #include <linux/fs.h> | ||
18 | #include <linux/capability.h> | ||
19 | #include <linux/cpu.h> | ||
20 | #include <linux/percpu.h> | ||
21 | #include <linux/poll.h> | ||
22 | #include <linux/thread_info.h> | ||
23 | #include <linux/ctype.h> | ||
24 | #include <linux/kmod.h> | ||
25 | #include <linux/kdebug.h> | ||
26 | #include <asm/processor.h> | ||
27 | #include <asm/msr.h> | ||
28 | #include <asm/mce.h> | ||
29 | #include <asm/uaccess.h> | ||
30 | #include <asm/smp.h> | ||
31 | #include <asm/idle.h> | ||
32 | |||
33 | #define MISC_MCELOG_MINOR 227 | ||
34 | #define NR_BANKS 6 | ||
35 | |||
36 | atomic_t mce_entry; | ||
37 | |||
38 | static int mce_dont_init; | ||
39 | |||
40 | /* | ||
41 | * Tolerant levels: | ||
42 | * 0: always panic on uncorrected errors, log corrected errors | ||
43 | * 1: panic or SIGBUS on uncorrected errors, log corrected errors | ||
44 | * 2: SIGBUS or log uncorrected errors (if possible), log corrected errors | ||
45 | * 3: never panic or SIGBUS, log all errors (for testing only) | ||
46 | */ | ||
47 | static int tolerant = 1; | ||
48 | static int banks; | ||
49 | static unsigned long bank[NR_BANKS] = { [0 ... NR_BANKS-1] = ~0UL }; | ||
50 | static unsigned long notify_user; | ||
51 | static int rip_msr; | ||
52 | static int mce_bootlog = 1; | ||
53 | static atomic_t mce_events; | ||
54 | |||
55 | static char trigger[128]; | ||
56 | static char *trigger_argv[2] = { trigger, NULL }; | ||
57 | |||
58 | static DECLARE_WAIT_QUEUE_HEAD(mce_wait); | ||
59 | |||
60 | /* | ||
61 | * Lockless MCE logging infrastructure. | ||
62 | * This avoids deadlocks on printk locks without having to break locks. Also | ||
63 | * separate MCEs from kernel messages to avoid bogus bug reports. | ||
64 | */ | ||
65 | |||
66 | struct mce_log mcelog = { | ||
67 | MCE_LOG_SIGNATURE, | ||
68 | MCE_LOG_LEN, | ||
69 | }; | ||
70 | |||
71 | void mce_log(struct mce *mce) | ||
72 | { | ||
73 | unsigned next, entry; | ||
74 | atomic_inc(&mce_events); | ||
75 | mce->finished = 0; | ||
76 | wmb(); | ||
77 | for (;;) { | ||
78 | entry = rcu_dereference(mcelog.next); | ||
79 | for (;;) { | ||
80 | /* When the buffer fills up discard new entries. Assume | ||
81 | that the earlier errors are the more interesting. */ | ||
82 | if (entry >= MCE_LOG_LEN) { | ||
83 | set_bit(MCE_OVERFLOW, &mcelog.flags); | ||
84 | return; | ||
85 | } | ||
86 | /* Old left over entry. Skip. */ | ||
87 | if (mcelog.entry[entry].finished) { | ||
88 | entry++; | ||
89 | continue; | ||
90 | } | ||
91 | break; | ||
92 | } | ||
93 | smp_rmb(); | ||
94 | next = entry + 1; | ||
95 | if (cmpxchg(&mcelog.next, entry, next) == entry) | ||
96 | break; | ||
97 | } | ||
98 | memcpy(mcelog.entry + entry, mce, sizeof(struct mce)); | ||
99 | wmb(); | ||
100 | mcelog.entry[entry].finished = 1; | ||
101 | wmb(); | ||
102 | |||
103 | set_bit(0, ¬ify_user); | ||
104 | } | ||
105 | |||
106 | static void print_mce(struct mce *m) | ||
107 | { | ||
108 | printk(KERN_EMERG "\n" | ||
109 | KERN_EMERG "HARDWARE ERROR\n" | ||
110 | KERN_EMERG | ||
111 | "CPU %d: Machine Check Exception: %16Lx Bank %d: %016Lx\n", | ||
112 | m->cpu, m->mcgstatus, m->bank, m->status); | ||
113 | if (m->rip) { | ||
114 | printk(KERN_EMERG | ||
115 | "RIP%s %02x:<%016Lx> ", | ||
116 | !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", | ||
117 | m->cs, m->rip); | ||
118 | if (m->cs == __KERNEL_CS) | ||
119 | print_symbol("{%s}", m->rip); | ||
120 | printk("\n"); | ||
121 | } | ||
122 | printk(KERN_EMERG "TSC %Lx ", m->tsc); | ||
123 | if (m->addr) | ||
124 | printk("ADDR %Lx ", m->addr); | ||
125 | if (m->misc) | ||
126 | printk("MISC %Lx ", m->misc); | ||
127 | printk("\n"); | ||
128 | printk(KERN_EMERG "This is not a software problem!\n"); | ||
129 | printk(KERN_EMERG | ||
130 | "Run through mcelog --ascii to decode and contact your hardware vendor\n"); | ||
131 | } | ||
132 | |||
133 | static void mce_panic(char *msg, struct mce *backup, unsigned long start) | ||
134 | { | ||
135 | int i; | ||
136 | |||
137 | oops_begin(); | ||
138 | for (i = 0; i < MCE_LOG_LEN; i++) { | ||
139 | unsigned long tsc = mcelog.entry[i].tsc; | ||
140 | if (time_before(tsc, start)) | ||
141 | continue; | ||
142 | print_mce(&mcelog.entry[i]); | ||
143 | if (backup && mcelog.entry[i].tsc == backup->tsc) | ||
144 | backup = NULL; | ||
145 | } | ||
146 | if (backup) | ||
147 | print_mce(backup); | ||
148 | panic(msg); | ||
149 | } | ||
150 | |||
151 | static int mce_available(struct cpuinfo_x86 *c) | ||
152 | { | ||
153 | return cpu_has(c, X86_FEATURE_MCE) && cpu_has(c, X86_FEATURE_MCA); | ||
154 | } | ||
155 | |||
156 | static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | ||
157 | { | ||
158 | if (regs && (m->mcgstatus & MCG_STATUS_RIPV)) { | ||
159 | m->rip = regs->rip; | ||
160 | m->cs = regs->cs; | ||
161 | } else { | ||
162 | m->rip = 0; | ||
163 | m->cs = 0; | ||
164 | } | ||
165 | if (rip_msr) { | ||
166 | /* Assume the RIP in the MSR is exact. Is this true? */ | ||
167 | m->mcgstatus |= MCG_STATUS_EIPV; | ||
168 | rdmsrl(rip_msr, m->rip); | ||
169 | m->cs = 0; | ||
170 | } | ||
171 | } | ||
172 | |||
173 | /* | ||
174 | * The actual machine check handler | ||
175 | */ | ||
176 | |||
177 | void do_machine_check(struct pt_regs * regs, long error_code) | ||
178 | { | ||
179 | struct mce m, panicm; | ||
180 | u64 mcestart = 0; | ||
181 | int i; | ||
182 | int panicm_found = 0; | ||
183 | /* | ||
184 | * If no_way_out gets set, there is no safe way to recover from this | ||
185 | * MCE. If tolerant is cranked up, we'll try anyway. | ||
186 | */ | ||
187 | int no_way_out = 0; | ||
188 | /* | ||
189 | * If kill_it gets set, there might be a way to recover from this | ||
190 | * error. | ||
191 | */ | ||
192 | int kill_it = 0; | ||
193 | |||
194 | atomic_inc(&mce_entry); | ||
195 | |||
196 | if (regs) | ||
197 | notify_die(DIE_NMI, "machine check", regs, error_code, 18, SIGKILL); | ||
198 | if (!banks) | ||
199 | goto out2; | ||
200 | |||
201 | memset(&m, 0, sizeof(struct mce)); | ||
202 | m.cpu = smp_processor_id(); | ||
203 | rdmsrl(MSR_IA32_MCG_STATUS, m.mcgstatus); | ||
204 | /* if the restart IP is not valid, we're done for */ | ||
205 | if (!(m.mcgstatus & MCG_STATUS_RIPV)) | ||
206 | no_way_out = 1; | ||
207 | |||
208 | rdtscll(mcestart); | ||
209 | barrier(); | ||
210 | |||
211 | for (i = 0; i < banks; i++) { | ||
212 | if (!bank[i]) | ||
213 | continue; | ||
214 | |||
215 | m.misc = 0; | ||
216 | m.addr = 0; | ||
217 | m.bank = i; | ||
218 | m.tsc = 0; | ||
219 | |||
220 | rdmsrl(MSR_IA32_MC0_STATUS + i*4, m.status); | ||
221 | if ((m.status & MCI_STATUS_VAL) == 0) | ||
222 | continue; | ||
223 | |||
224 | if (m.status & MCI_STATUS_EN) { | ||
225 | /* if PCC was set, there's no way out */ | ||
226 | no_way_out |= !!(m.status & MCI_STATUS_PCC); | ||
227 | /* | ||
228 | * If this error was uncorrectable and there was | ||
229 | * an overflow, we're in trouble. If no overflow, | ||
230 | * we might get away with just killing a task. | ||
231 | */ | ||
232 | if (m.status & MCI_STATUS_UC) { | ||
233 | if (tolerant < 1 || m.status & MCI_STATUS_OVER) | ||
234 | no_way_out = 1; | ||
235 | kill_it = 1; | ||
236 | } | ||
237 | } | ||
238 | |||
239 | if (m.status & MCI_STATUS_MISCV) | ||
240 | rdmsrl(MSR_IA32_MC0_MISC + i*4, m.misc); | ||
241 | if (m.status & MCI_STATUS_ADDRV) | ||
242 | rdmsrl(MSR_IA32_MC0_ADDR + i*4, m.addr); | ||
243 | |||
244 | mce_get_rip(&m, regs); | ||
245 | if (error_code >= 0) | ||
246 | rdtscll(m.tsc); | ||
247 | if (error_code != -2) | ||
248 | mce_log(&m); | ||
249 | |||
250 | /* Did this bank cause the exception? */ | ||
251 | /* Assume that the bank with uncorrectable errors did it, | ||
252 | and that there is only a single one. */ | ||
253 | if ((m.status & MCI_STATUS_UC) && (m.status & MCI_STATUS_EN)) { | ||
254 | panicm = m; | ||
255 | panicm_found = 1; | ||
256 | } | ||
257 | |||
258 | add_taint(TAINT_MACHINE_CHECK); | ||
259 | } | ||
260 | |||
261 | /* Never do anything final in the polling timer */ | ||
262 | if (!regs) | ||
263 | goto out; | ||
264 | |||
265 | /* If we didn't find an uncorrectable error, pick | ||
266 | the last one (shouldn't happen, just being safe). */ | ||
267 | if (!panicm_found) | ||
268 | panicm = m; | ||
269 | |||
270 | /* | ||
271 | * If we have decided that we just CAN'T continue, and the user | ||
272 | * has not set tolerant to an insane level, give up and die. | ||
273 | */ | ||
274 | if (no_way_out && tolerant < 3) | ||
275 | mce_panic("Machine check", &panicm, mcestart); | ||
276 | |||
277 | /* | ||
278 | * If the error seems to be unrecoverable, something should be | ||
279 | * done. Try to kill as little as possible. If we can kill just | ||
280 | * one task, do that. If the user has set the tolerance very | ||
281 | * high, don't try to do anything at all. | ||
282 | */ | ||
283 | if (kill_it && tolerant < 3) { | ||
284 | int user_space = 0; | ||
285 | |||
286 | /* | ||
287 | * If the EIPV bit is set, it means the saved IP is the | ||
288 | * instruction which caused the MCE. | ||
289 | */ | ||
290 | if (m.mcgstatus & MCG_STATUS_EIPV) | ||
291 | user_space = panicm.rip && (panicm.cs & 3); | ||
292 | |||
293 | /* | ||
294 | * If we know that the error was in user space, send a | ||
295 | * SIGBUS. Otherwise, panic if tolerance is low. | ||
296 | * | ||
297 | * do_exit() takes an awful lot of locks and has a slight | ||
298 | * risk of deadlocking. | ||
299 | */ | ||
300 | if (user_space) { | ||
301 | do_exit(SIGBUS); | ||
302 | } else if (panic_on_oops || tolerant < 2) { | ||
303 | mce_panic("Uncorrected machine check", | ||
304 | &panicm, mcestart); | ||
305 | } | ||
306 | } | ||
307 | |||
308 | /* notify userspace ASAP */ | ||
309 | set_thread_flag(TIF_MCE_NOTIFY); | ||
310 | |||
311 | out: | ||
312 | /* the last thing we do is clear state */ | ||
313 | for (i = 0; i < banks; i++) | ||
314 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
315 | wrmsrl(MSR_IA32_MCG_STATUS, 0); | ||
316 | out2: | ||
317 | atomic_dec(&mce_entry); | ||
318 | } | ||
319 | |||
320 | #ifdef CONFIG_X86_MCE_INTEL | ||
321 | /*** | ||
322 | * mce_log_therm_throt_event - Logs the thermal throttling event to mcelog | ||
323 | * @cpu: The CPU on which the event occurred. | ||
324 | * @status: Event status information | ||
325 | * | ||
326 | * This function should be called by the thermal interrupt after the | ||
327 | * event has been processed and the decision was made to log the event | ||
328 | * further. | ||
329 | * | ||
330 | * The status parameter will be saved to the 'status' field of 'struct mce' | ||
331 | * and historically has been the register value of the | ||
332 | * MSR_IA32_THERMAL_STATUS (Intel) msr. | ||
333 | */ | ||
334 | void mce_log_therm_throt_event(unsigned int cpu, __u64 status) | ||
335 | { | ||
336 | struct mce m; | ||
337 | |||
338 | memset(&m, 0, sizeof(m)); | ||
339 | m.cpu = cpu; | ||
340 | m.bank = MCE_THERMAL_BANK; | ||
341 | m.status = status; | ||
342 | rdtscll(m.tsc); | ||
343 | mce_log(&m); | ||
344 | } | ||
345 | #endif /* CONFIG_X86_MCE_INTEL */ | ||
346 | |||
347 | /* | ||
348 | * Periodic polling timer for "silent" machine check errors. If the | ||
349 | * poller finds an MCE, poll 2x faster. When the poller finds no more | ||
350 | * errors, poll 2x slower (up to check_interval seconds). | ||
351 | */ | ||
352 | |||
353 | static int check_interval = 5 * 60; /* 5 minutes */ | ||
354 | static int next_interval; /* in jiffies */ | ||
355 | static void mcheck_timer(struct work_struct *work); | ||
356 | static DECLARE_DELAYED_WORK(mcheck_work, mcheck_timer); | ||
357 | |||
358 | static void mcheck_check_cpu(void *info) | ||
359 | { | ||
360 | if (mce_available(¤t_cpu_data)) | ||
361 | do_machine_check(NULL, 0); | ||
362 | } | ||
363 | |||
364 | static void mcheck_timer(struct work_struct *work) | ||
365 | { | ||
366 | on_each_cpu(mcheck_check_cpu, NULL, 1, 1); | ||
367 | |||
368 | /* | ||
369 | * Alert userspace if needed. If we logged an MCE, reduce the | ||
370 | * polling interval, otherwise increase the polling interval. | ||
371 | */ | ||
372 | if (mce_notify_user()) { | ||
373 | next_interval = max(next_interval/2, HZ/100); | ||
374 | } else { | ||
375 | next_interval = min(next_interval*2, | ||
376 | (int)round_jiffies_relative(check_interval*HZ)); | ||
377 | } | ||
378 | |||
379 | schedule_delayed_work(&mcheck_work, next_interval); | ||
380 | } | ||
381 | |||
382 | /* | ||
383 | * This is only called from process context. This is where we do | ||
384 | * anything we need to alert userspace about new MCEs. This is called | ||
385 | * directly from the poller and also from entry.S and idle, thanks to | ||
386 | * TIF_MCE_NOTIFY. | ||
387 | */ | ||
388 | int mce_notify_user(void) | ||
389 | { | ||
390 | clear_thread_flag(TIF_MCE_NOTIFY); | ||
391 | if (test_and_clear_bit(0, ¬ify_user)) { | ||
392 | static unsigned long last_print; | ||
393 | unsigned long now = jiffies; | ||
394 | |||
395 | wake_up_interruptible(&mce_wait); | ||
396 | if (trigger[0]) | ||
397 | call_usermodehelper(trigger, trigger_argv, NULL, | ||
398 | UMH_NO_WAIT); | ||
399 | |||
400 | if (time_after_eq(now, last_print + (check_interval*HZ))) { | ||
401 | last_print = now; | ||
402 | printk(KERN_INFO "Machine check events logged\n"); | ||
403 | } | ||
404 | |||
405 | return 1; | ||
406 | } | ||
407 | return 0; | ||
408 | } | ||
409 | |||
410 | /* see if the idle task needs to notify userspace */ | ||
411 | static int | ||
412 | mce_idle_callback(struct notifier_block *nfb, unsigned long action, void *junk) | ||
413 | { | ||
414 | /* IDLE_END should be safe - interrupts are back on */ | ||
415 | if (action == IDLE_END && test_thread_flag(TIF_MCE_NOTIFY)) | ||
416 | mce_notify_user(); | ||
417 | |||
418 | return NOTIFY_OK; | ||
419 | } | ||
420 | |||
421 | static struct notifier_block mce_idle_notifier = { | ||
422 | .notifier_call = mce_idle_callback, | ||
423 | }; | ||
424 | |||
425 | static __init int periodic_mcheck_init(void) | ||
426 | { | ||
427 | next_interval = check_interval * HZ; | ||
428 | if (next_interval) | ||
429 | schedule_delayed_work(&mcheck_work, | ||
430 | round_jiffies_relative(next_interval)); | ||
431 | idle_notifier_register(&mce_idle_notifier); | ||
432 | return 0; | ||
433 | } | ||
434 | __initcall(periodic_mcheck_init); | ||
435 | |||
436 | |||
437 | /* | ||
438 | * Initialize Machine Checks for a CPU. | ||
439 | */ | ||
440 | static void mce_init(void *dummy) | ||
441 | { | ||
442 | u64 cap; | ||
443 | int i; | ||
444 | |||
445 | rdmsrl(MSR_IA32_MCG_CAP, cap); | ||
446 | banks = cap & 0xff; | ||
447 | if (banks > NR_BANKS) { | ||
448 | printk(KERN_INFO "MCE: warning: using only %d banks\n", banks); | ||
449 | banks = NR_BANKS; | ||
450 | } | ||
451 | /* Use accurate RIP reporting if available. */ | ||
452 | if ((cap & (1<<9)) && ((cap >> 16) & 0xff) >= 9) | ||
453 | rip_msr = MSR_IA32_MCG_EIP; | ||
454 | |||
455 | /* Log the machine checks left over from the previous reset. | ||
456 | This also clears all registers */ | ||
457 | do_machine_check(NULL, mce_bootlog ? -1 : -2); | ||
458 | |||
459 | set_in_cr4(X86_CR4_MCE); | ||
460 | |||
461 | if (cap & MCG_CTL_P) | ||
462 | wrmsr(MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff); | ||
463 | |||
464 | for (i = 0; i < banks; i++) { | ||
465 | wrmsrl(MSR_IA32_MC0_CTL+4*i, bank[i]); | ||
466 | wrmsrl(MSR_IA32_MC0_STATUS+4*i, 0); | ||
467 | } | ||
468 | } | ||
469 | |||
470 | /* Add per CPU specific workarounds here */ | ||
471 | static void __cpuinit mce_cpu_quirks(struct cpuinfo_x86 *c) | ||
472 | { | ||
473 | /* This should be disabled by the BIOS, but isn't always */ | ||
474 | if (c->x86_vendor == X86_VENDOR_AMD && c->x86 == 15) { | ||
475 | /* disable GART TBL walk error reporting, which trips off | ||
476 | incorrectly with the IOMMU & 3ware & Cerberus. */ | ||
477 | clear_bit(10, &bank[4]); | ||
478 | /* Lots of broken BIOS around that don't clear them | ||
479 | by default and leave crap in there. Don't log. */ | ||
480 | mce_bootlog = 0; | ||
481 | } | ||
482 | |||
483 | } | ||
484 | |||
485 | static void __cpuinit mce_cpu_features(struct cpuinfo_x86 *c) | ||
486 | { | ||
487 | switch (c->x86_vendor) { | ||
488 | case X86_VENDOR_INTEL: | ||
489 | mce_intel_feature_init(c); | ||
490 | break; | ||
491 | case X86_VENDOR_AMD: | ||
492 | mce_amd_feature_init(c); | ||
493 | break; | ||
494 | default: | ||
495 | break; | ||
496 | } | ||
497 | } | ||
498 | |||
499 | /* | ||
500 | * Called for each booted CPU to set up machine checks. | ||
501 | * Must be called with preempt off. | ||
502 | */ | ||
503 | void __cpuinit mcheck_init(struct cpuinfo_x86 *c) | ||
504 | { | ||
505 | static cpumask_t mce_cpus = CPU_MASK_NONE; | ||
506 | |||
507 | mce_cpu_quirks(c); | ||
508 | |||
509 | if (mce_dont_init || | ||
510 | cpu_test_and_set(smp_processor_id(), mce_cpus) || | ||
511 | !mce_available(c)) | ||
512 | return; | ||
513 | |||
514 | mce_init(NULL); | ||
515 | mce_cpu_features(c); | ||
516 | } | ||
517 | |||
518 | /* | ||
519 | * Character device to read and clear the MCE log. | ||
520 | */ | ||
521 | |||
522 | static DEFINE_SPINLOCK(mce_state_lock); | ||
523 | static int open_count; /* #times opened */ | ||
524 | static int open_exclu; /* already open exclusive? */ | ||
525 | |||
526 | static int mce_open(struct inode *inode, struct file *file) | ||
527 | { | ||
528 | spin_lock(&mce_state_lock); | ||
529 | |||
530 | if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { | ||
531 | spin_unlock(&mce_state_lock); | ||
532 | return -EBUSY; | ||
533 | } | ||
534 | |||
535 | if (file->f_flags & O_EXCL) | ||
536 | open_exclu = 1; | ||
537 | open_count++; | ||
538 | |||
539 | spin_unlock(&mce_state_lock); | ||
540 | |||
541 | return nonseekable_open(inode, file); | ||
542 | } | ||
543 | |||
544 | static int mce_release(struct inode *inode, struct file *file) | ||
545 | { | ||
546 | spin_lock(&mce_state_lock); | ||
547 | |||
548 | open_count--; | ||
549 | open_exclu = 0; | ||
550 | |||
551 | spin_unlock(&mce_state_lock); | ||
552 | |||
553 | return 0; | ||
554 | } | ||
555 | |||
556 | static void collect_tscs(void *data) | ||
557 | { | ||
558 | unsigned long *cpu_tsc = (unsigned long *)data; | ||
559 | rdtscll(cpu_tsc[smp_processor_id()]); | ||
560 | } | ||
561 | |||
562 | static ssize_t mce_read(struct file *filp, char __user *ubuf, size_t usize, loff_t *off) | ||
563 | { | ||
564 | unsigned long *cpu_tsc; | ||
565 | static DECLARE_MUTEX(mce_read_sem); | ||
566 | unsigned next; | ||
567 | char __user *buf = ubuf; | ||
568 | int i, err; | ||
569 | |||
570 | cpu_tsc = kmalloc(NR_CPUS * sizeof(long), GFP_KERNEL); | ||
571 | if (!cpu_tsc) | ||
572 | return -ENOMEM; | ||
573 | |||
574 | down(&mce_read_sem); | ||
575 | next = rcu_dereference(mcelog.next); | ||
576 | |||
577 | /* Only supports full reads right now */ | ||
578 | if (*off != 0 || usize < MCE_LOG_LEN*sizeof(struct mce)) { | ||
579 | up(&mce_read_sem); | ||
580 | kfree(cpu_tsc); | ||
581 | return -EINVAL; | ||
582 | } | ||
583 | |||
584 | err = 0; | ||
585 | for (i = 0; i < next; i++) { | ||
586 | unsigned long start = jiffies; | ||
587 | while (!mcelog.entry[i].finished) { | ||
588 | if (time_after_eq(jiffies, start + 2)) { | ||
589 | memset(mcelog.entry + i,0, sizeof(struct mce)); | ||
590 | goto timeout; | ||
591 | } | ||
592 | cpu_relax(); | ||
593 | } | ||
594 | smp_rmb(); | ||
595 | err |= copy_to_user(buf, mcelog.entry + i, sizeof(struct mce)); | ||
596 | buf += sizeof(struct mce); | ||
597 | timeout: | ||
598 | ; | ||
599 | } | ||
600 | |||
601 | memset(mcelog.entry, 0, next * sizeof(struct mce)); | ||
602 | mcelog.next = 0; | ||
603 | |||
604 | synchronize_sched(); | ||
605 | |||
606 | /* Collect entries that were still getting written before the synchronize. */ | ||
607 | |||
608 | on_each_cpu(collect_tscs, cpu_tsc, 1, 1); | ||
609 | for (i = next; i < MCE_LOG_LEN; i++) { | ||
610 | if (mcelog.entry[i].finished && | ||
611 | mcelog.entry[i].tsc < cpu_tsc[mcelog.entry[i].cpu]) { | ||
612 | err |= copy_to_user(buf, mcelog.entry+i, sizeof(struct mce)); | ||
613 | smp_rmb(); | ||
614 | buf += sizeof(struct mce); | ||
615 | memset(&mcelog.entry[i], 0, sizeof(struct mce)); | ||
616 | } | ||
617 | } | ||
618 | up(&mce_read_sem); | ||
619 | kfree(cpu_tsc); | ||
620 | return err ? -EFAULT : buf - ubuf; | ||
621 | } | ||
622 | |||
623 | static unsigned int mce_poll(struct file *file, poll_table *wait) | ||
624 | { | ||
625 | poll_wait(file, &mce_wait, wait); | ||
626 | if (rcu_dereference(mcelog.next)) | ||
627 | return POLLIN | POLLRDNORM; | ||
628 | return 0; | ||
629 | } | ||
630 | |||
631 | static int mce_ioctl(struct inode *i, struct file *f,unsigned int cmd, unsigned long arg) | ||
632 | { | ||
633 | int __user *p = (int __user *)arg; | ||
634 | if (!capable(CAP_SYS_ADMIN)) | ||
635 | return -EPERM; | ||
636 | switch (cmd) { | ||
637 | case MCE_GET_RECORD_LEN: | ||
638 | return put_user(sizeof(struct mce), p); | ||
639 | case MCE_GET_LOG_LEN: | ||
640 | return put_user(MCE_LOG_LEN, p); | ||
641 | case MCE_GETCLEAR_FLAGS: { | ||
642 | unsigned flags; | ||
643 | do { | ||
644 | flags = mcelog.flags; | ||
645 | } while (cmpxchg(&mcelog.flags, flags, 0) != flags); | ||
646 | return put_user(flags, p); | ||
647 | } | ||
648 | default: | ||
649 | return -ENOTTY; | ||
650 | } | ||
651 | } | ||
652 | |||
653 | static const struct file_operations mce_chrdev_ops = { | ||
654 | .open = mce_open, | ||
655 | .release = mce_release, | ||
656 | .read = mce_read, | ||
657 | .poll = mce_poll, | ||
658 | .ioctl = mce_ioctl, | ||
659 | }; | ||
660 | |||
661 | static struct miscdevice mce_log_device = { | ||
662 | MISC_MCELOG_MINOR, | ||
663 | "mcelog", | ||
664 | &mce_chrdev_ops, | ||
665 | }; | ||
666 | |||
667 | static unsigned long old_cr4 __initdata; | ||
668 | |||
669 | void __init stop_mce(void) | ||
670 | { | ||
671 | old_cr4 = read_cr4(); | ||
672 | clear_in_cr4(X86_CR4_MCE); | ||
673 | } | ||
674 | |||
675 | void __init restart_mce(void) | ||
676 | { | ||
677 | if (old_cr4 & X86_CR4_MCE) | ||
678 | set_in_cr4(X86_CR4_MCE); | ||
679 | } | ||
680 | |||
681 | /* | ||
682 | * Old style boot options parsing. Only for compatibility. | ||
683 | */ | ||
684 | |||
685 | static int __init mcheck_disable(char *str) | ||
686 | { | ||
687 | mce_dont_init = 1; | ||
688 | return 1; | ||
689 | } | ||
690 | |||
691 | /* mce=off disables machine check. Note you can re-enable it later | ||
692 | using sysfs. | ||
693 | mce=TOLERANCELEVEL (number, see above) | ||
694 | mce=bootlog Log MCEs from before booting. Disabled by default on AMD. | ||
695 | mce=nobootlog Don't log MCEs from before booting. */ | ||
696 | static int __init mcheck_enable(char *str) | ||
697 | { | ||
698 | if (!strcmp(str, "off")) | ||
699 | mce_dont_init = 1; | ||
700 | else if (!strcmp(str, "bootlog") || !strcmp(str,"nobootlog")) | ||
701 | mce_bootlog = str[0] == 'b'; | ||
702 | else if (isdigit(str[0])) | ||
703 | get_option(&str, &tolerant); | ||
704 | else | ||
705 | printk("mce= argument %s ignored. Please use /sys", str); | ||
706 | return 1; | ||
707 | } | ||
708 | |||
709 | __setup("nomce", mcheck_disable); | ||
710 | __setup("mce=", mcheck_enable); | ||
711 | |||
712 | /* | ||
713 | * Sysfs support | ||
714 | */ | ||
715 | |||
716 | /* On resume clear all MCE state. Don't want to see leftovers from the BIOS. | ||
717 | Only one CPU is active at this time, the others get readded later using | ||
718 | CPU hotplug. */ | ||
719 | static int mce_resume(struct sys_device *dev) | ||
720 | { | ||
721 | mce_init(NULL); | ||
722 | return 0; | ||
723 | } | ||
724 | |||
725 | /* Reinit MCEs after user configuration changes */ | ||
726 | static void mce_restart(void) | ||
727 | { | ||
728 | if (next_interval) | ||
729 | cancel_delayed_work(&mcheck_work); | ||
730 | /* Timer race is harmless here */ | ||
731 | on_each_cpu(mce_init, NULL, 1, 1); | ||
732 | next_interval = check_interval * HZ; | ||
733 | if (next_interval) | ||
734 | schedule_delayed_work(&mcheck_work, | ||
735 | round_jiffies_relative(next_interval)); | ||
736 | } | ||
737 | |||
738 | static struct sysdev_class mce_sysclass = { | ||
739 | .resume = mce_resume, | ||
740 | set_kset_name("machinecheck"), | ||
741 | }; | ||
742 | |||
743 | DEFINE_PER_CPU(struct sys_device, device_mce); | ||
744 | |||
745 | /* Why are there no generic functions for this? */ | ||
746 | #define ACCESSOR(name, var, start) \ | ||
747 | static ssize_t show_ ## name(struct sys_device *s, char *buf) { \ | ||
748 | return sprintf(buf, "%lx\n", (unsigned long)var); \ | ||
749 | } \ | ||
750 | static ssize_t set_ ## name(struct sys_device *s,const char *buf,size_t siz) { \ | ||
751 | char *end; \ | ||
752 | unsigned long new = simple_strtoul(buf, &end, 0); \ | ||
753 | if (end == buf) return -EINVAL; \ | ||
754 | var = new; \ | ||
755 | start; \ | ||
756 | return end-buf; \ | ||
757 | } \ | ||
758 | static SYSDEV_ATTR(name, 0644, show_ ## name, set_ ## name); | ||
759 | |||
760 | /* TBD should generate these dynamically based on number of available banks */ | ||
761 | ACCESSOR(bank0ctl,bank[0],mce_restart()) | ||
762 | ACCESSOR(bank1ctl,bank[1],mce_restart()) | ||
763 | ACCESSOR(bank2ctl,bank[2],mce_restart()) | ||
764 | ACCESSOR(bank3ctl,bank[3],mce_restart()) | ||
765 | ACCESSOR(bank4ctl,bank[4],mce_restart()) | ||
766 | ACCESSOR(bank5ctl,bank[5],mce_restart()) | ||
767 | |||
768 | static ssize_t show_trigger(struct sys_device *s, char *buf) | ||
769 | { | ||
770 | strcpy(buf, trigger); | ||
771 | strcat(buf, "\n"); | ||
772 | return strlen(trigger) + 1; | ||
773 | } | ||
774 | |||
775 | static ssize_t set_trigger(struct sys_device *s,const char *buf,size_t siz) | ||
776 | { | ||
777 | char *p; | ||
778 | int len; | ||
779 | strncpy(trigger, buf, sizeof(trigger)); | ||
780 | trigger[sizeof(trigger)-1] = 0; | ||
781 | len = strlen(trigger); | ||
782 | p = strchr(trigger, '\n'); | ||
783 | if (*p) *p = 0; | ||
784 | return len; | ||
785 | } | ||
786 | |||
787 | static SYSDEV_ATTR(trigger, 0644, show_trigger, set_trigger); | ||
788 | ACCESSOR(tolerant,tolerant,) | ||
789 | ACCESSOR(check_interval,check_interval,mce_restart()) | ||
790 | static struct sysdev_attribute *mce_attributes[] = { | ||
791 | &attr_bank0ctl, &attr_bank1ctl, &attr_bank2ctl, | ||
792 | &attr_bank3ctl, &attr_bank4ctl, &attr_bank5ctl, | ||
793 | &attr_tolerant, &attr_check_interval, &attr_trigger, | ||
794 | NULL | ||
795 | }; | ||
796 | |||
797 | /* Per cpu sysdev init. All of the cpus still share the same ctl bank */ | ||
798 | static __cpuinit int mce_create_device(unsigned int cpu) | ||
799 | { | ||
800 | int err; | ||
801 | int i; | ||
802 | |||
803 | if (!mce_available(&cpu_data(cpu))) | ||
804 | return -EIO; | ||
805 | |||
806 | memset(&per_cpu(device_mce, cpu).kobj, 0, sizeof(struct kobject)); | ||
807 | per_cpu(device_mce,cpu).id = cpu; | ||
808 | per_cpu(device_mce,cpu).cls = &mce_sysclass; | ||
809 | |||
810 | err = sysdev_register(&per_cpu(device_mce,cpu)); | ||
811 | if (err) | ||
812 | return err; | ||
813 | |||
814 | for (i = 0; mce_attributes[i]; i++) { | ||
815 | err = sysdev_create_file(&per_cpu(device_mce,cpu), | ||
816 | mce_attributes[i]); | ||
817 | if (err) | ||
818 | goto error; | ||
819 | } | ||
820 | |||
821 | return 0; | ||
822 | error: | ||
823 | while (i--) { | ||
824 | sysdev_remove_file(&per_cpu(device_mce,cpu), | ||
825 | mce_attributes[i]); | ||
826 | } | ||
827 | sysdev_unregister(&per_cpu(device_mce,cpu)); | ||
828 | |||
829 | return err; | ||
830 | } | ||
831 | |||
832 | static void mce_remove_device(unsigned int cpu) | ||
833 | { | ||
834 | int i; | ||
835 | |||
836 | for (i = 0; mce_attributes[i]; i++) | ||
837 | sysdev_remove_file(&per_cpu(device_mce,cpu), | ||
838 | mce_attributes[i]); | ||
839 | sysdev_unregister(&per_cpu(device_mce,cpu)); | ||
840 | } | ||
841 | |||
842 | /* Get notified when a cpu comes on/off. Be hotplug friendly. */ | ||
843 | static int | ||
844 | mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu) | ||
845 | { | ||
846 | unsigned int cpu = (unsigned long)hcpu; | ||
847 | int err = 0; | ||
848 | |||
849 | switch (action) { | ||
850 | case CPU_UP_PREPARE: | ||
851 | case CPU_UP_PREPARE_FROZEN: | ||
852 | err = mce_create_device(cpu); | ||
853 | break; | ||
854 | case CPU_UP_CANCELED: | ||
855 | case CPU_UP_CANCELED_FROZEN: | ||
856 | case CPU_DEAD: | ||
857 | case CPU_DEAD_FROZEN: | ||
858 | mce_remove_device(cpu); | ||
859 | break; | ||
860 | } | ||
861 | return err ? NOTIFY_BAD : NOTIFY_OK; | ||
862 | } | ||
863 | |||
864 | static struct notifier_block mce_cpu_notifier = { | ||
865 | .notifier_call = mce_cpu_callback, | ||
866 | }; | ||
867 | |||
868 | static __init int mce_init_device(void) | ||
869 | { | ||
870 | int err; | ||
871 | int i = 0; | ||
872 | |||
873 | if (!mce_available(&boot_cpu_data)) | ||
874 | return -EIO; | ||
875 | err = sysdev_class_register(&mce_sysclass); | ||
876 | if (err) | ||
877 | return err; | ||
878 | |||
879 | for_each_online_cpu(i) { | ||
880 | err = mce_create_device(i); | ||
881 | if (err) | ||
882 | return err; | ||
883 | } | ||
884 | |||
885 | register_hotcpu_notifier(&mce_cpu_notifier); | ||
886 | misc_register(&mce_log_device); | ||
887 | return err; | ||
888 | } | ||
889 | |||
890 | device_initcall(mce_init_device); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd_64.c b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c new file mode 100644 index 000000000000..752fb16a817d --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce_amd_64.c | |||
@@ -0,0 +1,690 @@ | |||
1 | /* | ||
2 | * (c) 2005, 2006 Advanced Micro Devices, Inc. | ||
3 | * Your use of this code is subject to the terms and conditions of the | ||
4 | * GNU general public license version 2. See "COPYING" or | ||
5 | * http://www.gnu.org/licenses/gpl.html | ||
6 | * | ||
7 | * Written by Jacob Shin - AMD, Inc. | ||
8 | * | ||
9 | * Support : jacob.shin@amd.com | ||
10 | * | ||
11 | * April 2006 | ||
12 | * - added support for AMD Family 0x10 processors | ||
13 | * | ||
14 | * All MC4_MISCi registers are shared between multi-cores | ||
15 | */ | ||
16 | |||
17 | #include <linux/cpu.h> | ||
18 | #include <linux/errno.h> | ||
19 | #include <linux/init.h> | ||
20 | #include <linux/interrupt.h> | ||
21 | #include <linux/kobject.h> | ||
22 | #include <linux/notifier.h> | ||
23 | #include <linux/sched.h> | ||
24 | #include <linux/smp.h> | ||
25 | #include <linux/sysdev.h> | ||
26 | #include <linux/sysfs.h> | ||
27 | #include <asm/apic.h> | ||
28 | #include <asm/mce.h> | ||
29 | #include <asm/msr.h> | ||
30 | #include <asm/percpu.h> | ||
31 | #include <asm/idle.h> | ||
32 | |||
33 | #define PFX "mce_threshold: " | ||
34 | #define VERSION "version 1.1.1" | ||
35 | #define NR_BANKS 6 | ||
36 | #define NR_BLOCKS 9 | ||
37 | #define THRESHOLD_MAX 0xFFF | ||
38 | #define INT_TYPE_APIC 0x00020000 | ||
39 | #define MASK_VALID_HI 0x80000000 | ||
40 | #define MASK_CNTP_HI 0x40000000 | ||
41 | #define MASK_LOCKED_HI 0x20000000 | ||
42 | #define MASK_LVTOFF_HI 0x00F00000 | ||
43 | #define MASK_COUNT_EN_HI 0x00080000 | ||
44 | #define MASK_INT_TYPE_HI 0x00060000 | ||
45 | #define MASK_OVERFLOW_HI 0x00010000 | ||
46 | #define MASK_ERR_COUNT_HI 0x00000FFF | ||
47 | #define MASK_BLKPTR_LO 0xFF000000 | ||
48 | #define MCG_XBLK_ADDR 0xC0000400 | ||
49 | |||
50 | struct threshold_block { | ||
51 | unsigned int block; | ||
52 | unsigned int bank; | ||
53 | unsigned int cpu; | ||
54 | u32 address; | ||
55 | u16 interrupt_enable; | ||
56 | u16 threshold_limit; | ||
57 | struct kobject kobj; | ||
58 | struct list_head miscj; | ||
59 | }; | ||
60 | |||
61 | /* defaults used early on boot */ | ||
62 | static struct threshold_block threshold_defaults = { | ||
63 | .interrupt_enable = 0, | ||
64 | .threshold_limit = THRESHOLD_MAX, | ||
65 | }; | ||
66 | |||
67 | struct threshold_bank { | ||
68 | struct kobject kobj; | ||
69 | struct threshold_block *blocks; | ||
70 | cpumask_t cpus; | ||
71 | }; | ||
72 | static DEFINE_PER_CPU(struct threshold_bank *, threshold_banks[NR_BANKS]); | ||
73 | |||
74 | #ifdef CONFIG_SMP | ||
75 | static unsigned char shared_bank[NR_BANKS] = { | ||
76 | 0, 0, 0, 0, 1 | ||
77 | }; | ||
78 | #endif | ||
79 | |||
80 | static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */ | ||
81 | |||
82 | /* | ||
83 | * CPU Initialization | ||
84 | */ | ||
85 | |||
86 | /* must be called with correct cpu affinity */ | ||
87 | static void threshold_restart_bank(struct threshold_block *b, | ||
88 | int reset, u16 old_limit) | ||
89 | { | ||
90 | u32 mci_misc_hi, mci_misc_lo; | ||
91 | |||
92 | rdmsr(b->address, mci_misc_lo, mci_misc_hi); | ||
93 | |||
94 | if (b->threshold_limit < (mci_misc_hi & THRESHOLD_MAX)) | ||
95 | reset = 1; /* limit cannot be lower than err count */ | ||
96 | |||
97 | if (reset) { /* reset err count and overflow bit */ | ||
98 | mci_misc_hi = | ||
99 | (mci_misc_hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) | | ||
100 | (THRESHOLD_MAX - b->threshold_limit); | ||
101 | } else if (old_limit) { /* change limit w/o reset */ | ||
102 | int new_count = (mci_misc_hi & THRESHOLD_MAX) + | ||
103 | (old_limit - b->threshold_limit); | ||
104 | mci_misc_hi = (mci_misc_hi & ~MASK_ERR_COUNT_HI) | | ||
105 | (new_count & THRESHOLD_MAX); | ||
106 | } | ||
107 | |||
108 | b->interrupt_enable ? | ||
109 | (mci_misc_hi = (mci_misc_hi & ~MASK_INT_TYPE_HI) | INT_TYPE_APIC) : | ||
110 | (mci_misc_hi &= ~MASK_INT_TYPE_HI); | ||
111 | |||
112 | mci_misc_hi |= MASK_COUNT_EN_HI; | ||
113 | wrmsr(b->address, mci_misc_lo, mci_misc_hi); | ||
114 | } | ||
115 | |||
116 | /* cpu init entry point, called from mce.c with preempt off */ | ||
117 | void __cpuinit mce_amd_feature_init(struct cpuinfo_x86 *c) | ||
118 | { | ||
119 | unsigned int bank, block; | ||
120 | unsigned int cpu = smp_processor_id(); | ||
121 | u32 low = 0, high = 0, address = 0; | ||
122 | |||
123 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
124 | for (block = 0; block < NR_BLOCKS; ++block) { | ||
125 | if (block == 0) | ||
126 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
127 | else if (block == 1) { | ||
128 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
129 | if (!address) | ||
130 | break; | ||
131 | address += MCG_XBLK_ADDR; | ||
132 | } | ||
133 | else | ||
134 | ++address; | ||
135 | |||
136 | if (rdmsr_safe(address, &low, &high)) | ||
137 | break; | ||
138 | |||
139 | if (!(high & MASK_VALID_HI)) { | ||
140 | if (block) | ||
141 | continue; | ||
142 | else | ||
143 | break; | ||
144 | } | ||
145 | |||
146 | if (!(high & MASK_CNTP_HI) || | ||
147 | (high & MASK_LOCKED_HI)) | ||
148 | continue; | ||
149 | |||
150 | if (!block) | ||
151 | per_cpu(bank_map, cpu) |= (1 << bank); | ||
152 | #ifdef CONFIG_SMP | ||
153 | if (shared_bank[bank] && c->cpu_core_id) | ||
154 | break; | ||
155 | #endif | ||
156 | high &= ~MASK_LVTOFF_HI; | ||
157 | high |= K8_APIC_EXT_LVT_ENTRY_THRESHOLD << 20; | ||
158 | wrmsr(address, low, high); | ||
159 | |||
160 | setup_APIC_extended_lvt(K8_APIC_EXT_LVT_ENTRY_THRESHOLD, | ||
161 | THRESHOLD_APIC_VECTOR, | ||
162 | K8_APIC_EXT_INT_MSG_FIX, 0); | ||
163 | |||
164 | threshold_defaults.address = address; | ||
165 | threshold_restart_bank(&threshold_defaults, 0, 0); | ||
166 | } | ||
167 | } | ||
168 | } | ||
169 | |||
170 | /* | ||
171 | * APIC Interrupt Handler | ||
172 | */ | ||
173 | |||
174 | /* | ||
175 | * threshold interrupt handler will service THRESHOLD_APIC_VECTOR. | ||
176 | * the interrupt goes off when error_count reaches threshold_limit. | ||
177 | * the handler will simply log mcelog w/ software defined bank number. | ||
178 | */ | ||
179 | asmlinkage void mce_threshold_interrupt(void) | ||
180 | { | ||
181 | unsigned int bank, block; | ||
182 | struct mce m; | ||
183 | u32 low = 0, high = 0, address = 0; | ||
184 | |||
185 | ack_APIC_irq(); | ||
186 | exit_idle(); | ||
187 | irq_enter(); | ||
188 | |||
189 | memset(&m, 0, sizeof(m)); | ||
190 | rdtscll(m.tsc); | ||
191 | m.cpu = smp_processor_id(); | ||
192 | |||
193 | /* assume first bank caused it */ | ||
194 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
195 | if (!(per_cpu(bank_map, m.cpu) & (1 << bank))) | ||
196 | continue; | ||
197 | for (block = 0; block < NR_BLOCKS; ++block) { | ||
198 | if (block == 0) | ||
199 | address = MSR_IA32_MC0_MISC + bank * 4; | ||
200 | else if (block == 1) { | ||
201 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
202 | if (!address) | ||
203 | break; | ||
204 | address += MCG_XBLK_ADDR; | ||
205 | } | ||
206 | else | ||
207 | ++address; | ||
208 | |||
209 | if (rdmsr_safe(address, &low, &high)) | ||
210 | break; | ||
211 | |||
212 | if (!(high & MASK_VALID_HI)) { | ||
213 | if (block) | ||
214 | continue; | ||
215 | else | ||
216 | break; | ||
217 | } | ||
218 | |||
219 | if (!(high & MASK_CNTP_HI) || | ||
220 | (high & MASK_LOCKED_HI)) | ||
221 | continue; | ||
222 | |||
223 | /* Log the machine check that caused the threshold | ||
224 | event. */ | ||
225 | do_machine_check(NULL, 0); | ||
226 | |||
227 | if (high & MASK_OVERFLOW_HI) { | ||
228 | rdmsrl(address, m.misc); | ||
229 | rdmsrl(MSR_IA32_MC0_STATUS + bank * 4, | ||
230 | m.status); | ||
231 | m.bank = K8_MCE_THRESHOLD_BASE | ||
232 | + bank * NR_BLOCKS | ||
233 | + block; | ||
234 | mce_log(&m); | ||
235 | goto out; | ||
236 | } | ||
237 | } | ||
238 | } | ||
239 | out: | ||
240 | add_pda(irq_threshold_count, 1); | ||
241 | irq_exit(); | ||
242 | } | ||
243 | |||
244 | /* | ||
245 | * Sysfs Interface | ||
246 | */ | ||
247 | |||
248 | struct threshold_attr { | ||
249 | struct attribute attr; | ||
250 | ssize_t(*show) (struct threshold_block *, char *); | ||
251 | ssize_t(*store) (struct threshold_block *, const char *, size_t count); | ||
252 | }; | ||
253 | |||
254 | static cpumask_t affinity_set(unsigned int cpu) | ||
255 | { | ||
256 | cpumask_t oldmask = current->cpus_allowed; | ||
257 | cpumask_t newmask = CPU_MASK_NONE; | ||
258 | cpu_set(cpu, newmask); | ||
259 | set_cpus_allowed(current, newmask); | ||
260 | return oldmask; | ||
261 | } | ||
262 | |||
263 | static void affinity_restore(cpumask_t oldmask) | ||
264 | { | ||
265 | set_cpus_allowed(current, oldmask); | ||
266 | } | ||
267 | |||
268 | #define SHOW_FIELDS(name) \ | ||
269 | static ssize_t show_ ## name(struct threshold_block * b, char *buf) \ | ||
270 | { \ | ||
271 | return sprintf(buf, "%lx\n", (unsigned long) b->name); \ | ||
272 | } | ||
273 | SHOW_FIELDS(interrupt_enable) | ||
274 | SHOW_FIELDS(threshold_limit) | ||
275 | |||
276 | static ssize_t store_interrupt_enable(struct threshold_block *b, | ||
277 | const char *buf, size_t count) | ||
278 | { | ||
279 | char *end; | ||
280 | cpumask_t oldmask; | ||
281 | unsigned long new = simple_strtoul(buf, &end, 0); | ||
282 | if (end == buf) | ||
283 | return -EINVAL; | ||
284 | b->interrupt_enable = !!new; | ||
285 | |||
286 | oldmask = affinity_set(b->cpu); | ||
287 | threshold_restart_bank(b, 0, 0); | ||
288 | affinity_restore(oldmask); | ||
289 | |||
290 | return end - buf; | ||
291 | } | ||
292 | |||
293 | static ssize_t store_threshold_limit(struct threshold_block *b, | ||
294 | const char *buf, size_t count) | ||
295 | { | ||
296 | char *end; | ||
297 | cpumask_t oldmask; | ||
298 | u16 old; | ||
299 | unsigned long new = simple_strtoul(buf, &end, 0); | ||
300 | if (end == buf) | ||
301 | return -EINVAL; | ||
302 | if (new > THRESHOLD_MAX) | ||
303 | new = THRESHOLD_MAX; | ||
304 | if (new < 1) | ||
305 | new = 1; | ||
306 | old = b->threshold_limit; | ||
307 | b->threshold_limit = new; | ||
308 | |||
309 | oldmask = affinity_set(b->cpu); | ||
310 | threshold_restart_bank(b, 0, old); | ||
311 | affinity_restore(oldmask); | ||
312 | |||
313 | return end - buf; | ||
314 | } | ||
315 | |||
316 | static ssize_t show_error_count(struct threshold_block *b, char *buf) | ||
317 | { | ||
318 | u32 high, low; | ||
319 | cpumask_t oldmask; | ||
320 | oldmask = affinity_set(b->cpu); | ||
321 | rdmsr(b->address, low, high); | ||
322 | affinity_restore(oldmask); | ||
323 | return sprintf(buf, "%x\n", | ||
324 | (high & 0xFFF) - (THRESHOLD_MAX - b->threshold_limit)); | ||
325 | } | ||
326 | |||
327 | static ssize_t store_error_count(struct threshold_block *b, | ||
328 | const char *buf, size_t count) | ||
329 | { | ||
330 | cpumask_t oldmask; | ||
331 | oldmask = affinity_set(b->cpu); | ||
332 | threshold_restart_bank(b, 1, 0); | ||
333 | affinity_restore(oldmask); | ||
334 | return 1; | ||
335 | } | ||
336 | |||
337 | #define THRESHOLD_ATTR(_name,_mode,_show,_store) { \ | ||
338 | .attr = {.name = __stringify(_name), .mode = _mode }, \ | ||
339 | .show = _show, \ | ||
340 | .store = _store, \ | ||
341 | }; | ||
342 | |||
343 | #define RW_ATTR(name) \ | ||
344 | static struct threshold_attr name = \ | ||
345 | THRESHOLD_ATTR(name, 0644, show_## name, store_## name) | ||
346 | |||
347 | RW_ATTR(interrupt_enable); | ||
348 | RW_ATTR(threshold_limit); | ||
349 | RW_ATTR(error_count); | ||
350 | |||
351 | static struct attribute *default_attrs[] = { | ||
352 | &interrupt_enable.attr, | ||
353 | &threshold_limit.attr, | ||
354 | &error_count.attr, | ||
355 | NULL | ||
356 | }; | ||
357 | |||
358 | #define to_block(k) container_of(k, struct threshold_block, kobj) | ||
359 | #define to_attr(a) container_of(a, struct threshold_attr, attr) | ||
360 | |||
361 | static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf) | ||
362 | { | ||
363 | struct threshold_block *b = to_block(kobj); | ||
364 | struct threshold_attr *a = to_attr(attr); | ||
365 | ssize_t ret; | ||
366 | ret = a->show ? a->show(b, buf) : -EIO; | ||
367 | return ret; | ||
368 | } | ||
369 | |||
370 | static ssize_t store(struct kobject *kobj, struct attribute *attr, | ||
371 | const char *buf, size_t count) | ||
372 | { | ||
373 | struct threshold_block *b = to_block(kobj); | ||
374 | struct threshold_attr *a = to_attr(attr); | ||
375 | ssize_t ret; | ||
376 | ret = a->store ? a->store(b, buf, count) : -EIO; | ||
377 | return ret; | ||
378 | } | ||
379 | |||
380 | static struct sysfs_ops threshold_ops = { | ||
381 | .show = show, | ||
382 | .store = store, | ||
383 | }; | ||
384 | |||
385 | static struct kobj_type threshold_ktype = { | ||
386 | .sysfs_ops = &threshold_ops, | ||
387 | .default_attrs = default_attrs, | ||
388 | }; | ||
389 | |||
390 | static __cpuinit int allocate_threshold_blocks(unsigned int cpu, | ||
391 | unsigned int bank, | ||
392 | unsigned int block, | ||
393 | u32 address) | ||
394 | { | ||
395 | int err; | ||
396 | u32 low, high; | ||
397 | struct threshold_block *b = NULL; | ||
398 | |||
399 | if ((bank >= NR_BANKS) || (block >= NR_BLOCKS)) | ||
400 | return 0; | ||
401 | |||
402 | if (rdmsr_safe(address, &low, &high)) | ||
403 | return 0; | ||
404 | |||
405 | if (!(high & MASK_VALID_HI)) { | ||
406 | if (block) | ||
407 | goto recurse; | ||
408 | else | ||
409 | return 0; | ||
410 | } | ||
411 | |||
412 | if (!(high & MASK_CNTP_HI) || | ||
413 | (high & MASK_LOCKED_HI)) | ||
414 | goto recurse; | ||
415 | |||
416 | b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL); | ||
417 | if (!b) | ||
418 | return -ENOMEM; | ||
419 | |||
420 | b->block = block; | ||
421 | b->bank = bank; | ||
422 | b->cpu = cpu; | ||
423 | b->address = address; | ||
424 | b->interrupt_enable = 0; | ||
425 | b->threshold_limit = THRESHOLD_MAX; | ||
426 | |||
427 | INIT_LIST_HEAD(&b->miscj); | ||
428 | |||
429 | if (per_cpu(threshold_banks, cpu)[bank]->blocks) | ||
430 | list_add(&b->miscj, | ||
431 | &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj); | ||
432 | else | ||
433 | per_cpu(threshold_banks, cpu)[bank]->blocks = b; | ||
434 | |||
435 | kobject_set_name(&b->kobj, "misc%i", block); | ||
436 | b->kobj.parent = &per_cpu(threshold_banks, cpu)[bank]->kobj; | ||
437 | b->kobj.ktype = &threshold_ktype; | ||
438 | err = kobject_register(&b->kobj); | ||
439 | if (err) | ||
440 | goto out_free; | ||
441 | recurse: | ||
442 | if (!block) { | ||
443 | address = (low & MASK_BLKPTR_LO) >> 21; | ||
444 | if (!address) | ||
445 | return 0; | ||
446 | address += MCG_XBLK_ADDR; | ||
447 | } else | ||
448 | ++address; | ||
449 | |||
450 | err = allocate_threshold_blocks(cpu, bank, ++block, address); | ||
451 | if (err) | ||
452 | goto out_free; | ||
453 | |||
454 | return err; | ||
455 | |||
456 | out_free: | ||
457 | if (b) { | ||
458 | kobject_unregister(&b->kobj); | ||
459 | kfree(b); | ||
460 | } | ||
461 | return err; | ||
462 | } | ||
463 | |||
464 | /* symlinks sibling shared banks to first core. first core owns dir/files. */ | ||
465 | static __cpuinit int threshold_create_bank(unsigned int cpu, unsigned int bank) | ||
466 | { | ||
467 | int i, err = 0; | ||
468 | struct threshold_bank *b = NULL; | ||
469 | cpumask_t oldmask = CPU_MASK_NONE; | ||
470 | char name[32]; | ||
471 | |||
472 | sprintf(name, "threshold_bank%i", bank); | ||
473 | |||
474 | #ifdef CONFIG_SMP | ||
475 | if (cpu_data(cpu).cpu_core_id && shared_bank[bank]) { /* symlink */ | ||
476 | i = first_cpu(per_cpu(cpu_core_map, cpu)); | ||
477 | |||
478 | /* first core not up yet */ | ||
479 | if (cpu_data(i).cpu_core_id) | ||
480 | goto out; | ||
481 | |||
482 | /* already linked */ | ||
483 | if (per_cpu(threshold_banks, cpu)[bank]) | ||
484 | goto out; | ||
485 | |||
486 | b = per_cpu(threshold_banks, i)[bank]; | ||
487 | |||
488 | if (!b) | ||
489 | goto out; | ||
490 | |||
491 | err = sysfs_create_link(&per_cpu(device_mce, cpu).kobj, | ||
492 | &b->kobj, name); | ||
493 | if (err) | ||
494 | goto out; | ||
495 | |||
496 | b->cpus = per_cpu(cpu_core_map, cpu); | ||
497 | per_cpu(threshold_banks, cpu)[bank] = b; | ||
498 | goto out; | ||
499 | } | ||
500 | #endif | ||
501 | |||
502 | b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL); | ||
503 | if (!b) { | ||
504 | err = -ENOMEM; | ||
505 | goto out; | ||
506 | } | ||
507 | |||
508 | kobject_set_name(&b->kobj, "threshold_bank%i", bank); | ||
509 | b->kobj.parent = &per_cpu(device_mce, cpu).kobj; | ||
510 | #ifndef CONFIG_SMP | ||
511 | b->cpus = CPU_MASK_ALL; | ||
512 | #else | ||
513 | b->cpus = per_cpu(cpu_core_map, cpu); | ||
514 | #endif | ||
515 | err = kobject_register(&b->kobj); | ||
516 | if (err) | ||
517 | goto out_free; | ||
518 | |||
519 | per_cpu(threshold_banks, cpu)[bank] = b; | ||
520 | |||
521 | oldmask = affinity_set(cpu); | ||
522 | err = allocate_threshold_blocks(cpu, bank, 0, | ||
523 | MSR_IA32_MC0_MISC + bank * 4); | ||
524 | affinity_restore(oldmask); | ||
525 | |||
526 | if (err) | ||
527 | goto out_free; | ||
528 | |||
529 | for_each_cpu_mask(i, b->cpus) { | ||
530 | if (i == cpu) | ||
531 | continue; | ||
532 | |||
533 | err = sysfs_create_link(&per_cpu(device_mce, i).kobj, | ||
534 | &b->kobj, name); | ||
535 | if (err) | ||
536 | goto out; | ||
537 | |||
538 | per_cpu(threshold_banks, i)[bank] = b; | ||
539 | } | ||
540 | |||
541 | goto out; | ||
542 | |||
543 | out_free: | ||
544 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
545 | kfree(b); | ||
546 | out: | ||
547 | return err; | ||
548 | } | ||
549 | |||
550 | /* create dir/files for all valid threshold banks */ | ||
551 | static __cpuinit int threshold_create_device(unsigned int cpu) | ||
552 | { | ||
553 | unsigned int bank; | ||
554 | int err = 0; | ||
555 | |||
556 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
557 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | ||
558 | continue; | ||
559 | err = threshold_create_bank(cpu, bank); | ||
560 | if (err) | ||
561 | goto out; | ||
562 | } | ||
563 | out: | ||
564 | return err; | ||
565 | } | ||
566 | |||
567 | /* | ||
568 | * let's be hotplug friendly. | ||
569 | * in case of multiple core processors, the first core always takes ownership | ||
570 | * of shared sysfs dir/files, and rest of the cores will be symlinked to it. | ||
571 | */ | ||
572 | |||
573 | static void deallocate_threshold_block(unsigned int cpu, | ||
574 | unsigned int bank) | ||
575 | { | ||
576 | struct threshold_block *pos = NULL; | ||
577 | struct threshold_block *tmp = NULL; | ||
578 | struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank]; | ||
579 | |||
580 | if (!head) | ||
581 | return; | ||
582 | |||
583 | list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) { | ||
584 | kobject_unregister(&pos->kobj); | ||
585 | list_del(&pos->miscj); | ||
586 | kfree(pos); | ||
587 | } | ||
588 | |||
589 | kfree(per_cpu(threshold_banks, cpu)[bank]->blocks); | ||
590 | per_cpu(threshold_banks, cpu)[bank]->blocks = NULL; | ||
591 | } | ||
592 | |||
593 | static void threshold_remove_bank(unsigned int cpu, int bank) | ||
594 | { | ||
595 | int i = 0; | ||
596 | struct threshold_bank *b; | ||
597 | char name[32]; | ||
598 | |||
599 | b = per_cpu(threshold_banks, cpu)[bank]; | ||
600 | |||
601 | if (!b) | ||
602 | return; | ||
603 | |||
604 | if (!b->blocks) | ||
605 | goto free_out; | ||
606 | |||
607 | sprintf(name, "threshold_bank%i", bank); | ||
608 | |||
609 | #ifdef CONFIG_SMP | ||
610 | /* sibling symlink */ | ||
611 | if (shared_bank[bank] && b->blocks->cpu != cpu) { | ||
612 | sysfs_remove_link(&per_cpu(device_mce, cpu).kobj, name); | ||
613 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
614 | return; | ||
615 | } | ||
616 | #endif | ||
617 | |||
618 | /* remove all sibling symlinks before unregistering */ | ||
619 | for_each_cpu_mask(i, b->cpus) { | ||
620 | if (i == cpu) | ||
621 | continue; | ||
622 | |||
623 | sysfs_remove_link(&per_cpu(device_mce, i).kobj, name); | ||
624 | per_cpu(threshold_banks, i)[bank] = NULL; | ||
625 | } | ||
626 | |||
627 | deallocate_threshold_block(cpu, bank); | ||
628 | |||
629 | free_out: | ||
630 | kobject_unregister(&b->kobj); | ||
631 | kfree(b); | ||
632 | per_cpu(threshold_banks, cpu)[bank] = NULL; | ||
633 | } | ||
634 | |||
635 | static void threshold_remove_device(unsigned int cpu) | ||
636 | { | ||
637 | unsigned int bank; | ||
638 | |||
639 | for (bank = 0; bank < NR_BANKS; ++bank) { | ||
640 | if (!(per_cpu(bank_map, cpu) & 1 << bank)) | ||
641 | continue; | ||
642 | threshold_remove_bank(cpu, bank); | ||
643 | } | ||
644 | } | ||
645 | |||
646 | /* get notified when a cpu comes on/off */ | ||
647 | static int threshold_cpu_callback(struct notifier_block *nfb, | ||
648 | unsigned long action, void *hcpu) | ||
649 | { | ||
650 | /* cpu was unsigned int to begin with */ | ||
651 | unsigned int cpu = (unsigned long)hcpu; | ||
652 | |||
653 | if (cpu >= NR_CPUS) | ||
654 | goto out; | ||
655 | |||
656 | switch (action) { | ||
657 | case CPU_ONLINE: | ||
658 | case CPU_ONLINE_FROZEN: | ||
659 | threshold_create_device(cpu); | ||
660 | break; | ||
661 | case CPU_DEAD: | ||
662 | case CPU_DEAD_FROZEN: | ||
663 | threshold_remove_device(cpu); | ||
664 | break; | ||
665 | default: | ||
666 | break; | ||
667 | } | ||
668 | out: | ||
669 | return NOTIFY_OK; | ||
670 | } | ||
671 | |||
672 | static struct notifier_block threshold_cpu_notifier = { | ||
673 | .notifier_call = threshold_cpu_callback, | ||
674 | }; | ||
675 | |||
676 | static __init int threshold_init_device(void) | ||
677 | { | ||
678 | unsigned lcpu = 0; | ||
679 | |||
680 | /* to hit CPUs online before the notifier is up */ | ||
681 | for_each_online_cpu(lcpu) { | ||
682 | int err = threshold_create_device(lcpu); | ||
683 | if (err) | ||
684 | return err; | ||
685 | } | ||
686 | register_hotcpu_notifier(&threshold_cpu_notifier); | ||
687 | return 0; | ||
688 | } | ||
689 | |||
690 | device_initcall(threshold_init_device); | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce_intel_64.c b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c new file mode 100644 index 000000000000..c17eaf5dd6dd --- /dev/null +++ b/arch/x86/kernel/cpu/mcheck/mce_intel_64.c | |||
@@ -0,0 +1,90 @@ | |||
1 | /* | ||
2 | * Intel specific MCE features. | ||
3 | * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca> | ||
4 | */ | ||
5 | |||
6 | #include <linux/init.h> | ||
7 | #include <linux/interrupt.h> | ||
8 | #include <linux/percpu.h> | ||
9 | #include <asm/processor.h> | ||
10 | #include <asm/msr.h> | ||
11 | #include <asm/mce.h> | ||
12 | #include <asm/hw_irq.h> | ||
13 | #include <asm/idle.h> | ||
14 | #include <asm/therm_throt.h> | ||
15 | |||
16 | asmlinkage void smp_thermal_interrupt(void) | ||
17 | { | ||
18 | __u64 msr_val; | ||
19 | |||
20 | ack_APIC_irq(); | ||
21 | |||
22 | exit_idle(); | ||
23 | irq_enter(); | ||
24 | |||
25 | rdmsrl(MSR_IA32_THERM_STATUS, msr_val); | ||
26 | if (therm_throt_process(msr_val & 1)) | ||
27 | mce_log_therm_throt_event(smp_processor_id(), msr_val); | ||
28 | |||
29 | add_pda(irq_thermal_count, 1); | ||
30 | irq_exit(); | ||
31 | } | ||
32 | |||
33 | static void __cpuinit intel_init_thermal(struct cpuinfo_x86 *c) | ||
34 | { | ||
35 | u32 l, h; | ||
36 | int tm2 = 0; | ||
37 | unsigned int cpu = smp_processor_id(); | ||
38 | |||
39 | if (!cpu_has(c, X86_FEATURE_ACPI)) | ||
40 | return; | ||
41 | |||
42 | if (!cpu_has(c, X86_FEATURE_ACC)) | ||
43 | return; | ||
44 | |||
45 | /* first check if TM1 is already enabled by the BIOS, in which | ||
46 | * case there might be some SMM goo which handles it, so we can't even | ||
47 | * put a handler since it might be delivered via SMI already. | ||
48 | */ | ||
49 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
50 | h = apic_read(APIC_LVTTHMR); | ||
51 | if ((l & (1 << 3)) && (h & APIC_DM_SMI)) { | ||
52 | printk(KERN_DEBUG | ||
53 | "CPU%d: Thermal monitoring handled by SMI\n", cpu); | ||
54 | return; | ||
55 | } | ||
56 | |||
57 | if (cpu_has(c, X86_FEATURE_TM2) && (l & (1 << 13))) | ||
58 | tm2 = 1; | ||
59 | |||
60 | if (h & APIC_VECTOR_MASK) { | ||
61 | printk(KERN_DEBUG | ||
62 | "CPU%d: Thermal LVT vector (%#x) already " | ||
63 | "installed\n", cpu, (h & APIC_VECTOR_MASK)); | ||
64 | return; | ||
65 | } | ||
66 | |||
67 | h = THERMAL_APIC_VECTOR; | ||
68 | h |= (APIC_DM_FIXED | APIC_LVT_MASKED); | ||
69 | apic_write(APIC_LVTTHMR, h); | ||
70 | |||
71 | rdmsr(MSR_IA32_THERM_INTERRUPT, l, h); | ||
72 | wrmsr(MSR_IA32_THERM_INTERRUPT, l | 0x03, h); | ||
73 | |||
74 | rdmsr(MSR_IA32_MISC_ENABLE, l, h); | ||
75 | wrmsr(MSR_IA32_MISC_ENABLE, l | (1 << 3), h); | ||
76 | |||
77 | l = apic_read(APIC_LVTTHMR); | ||
78 | apic_write(APIC_LVTTHMR, l & ~APIC_LVT_MASKED); | ||
79 | printk(KERN_INFO "CPU%d: Thermal monitoring enabled (%s)\n", | ||
80 | cpu, tm2 ? "TM2" : "TM1"); | ||
81 | |||
82 | /* enable thermal throttle processing */ | ||
83 | atomic_set(&therm_throt_en, 1); | ||
84 | return; | ||
85 | } | ||
86 | |||
87 | void __cpuinit mce_intel_feature_init(struct cpuinfo_x86 *c) | ||
88 | { | ||
89 | intel_init_thermal(c); | ||
90 | } | ||