diff options
author | Andi Kleen <andi@firstfloor.org> | 2009-05-27 15:56:54 -0400 |
---|---|---|
committer | H. Peter Anvin <hpa@zytor.com> | 2009-06-03 17:44:05 -0400 |
commit | ccc3c3192ae78dd56dcdf5353fd1a9ef5f9a3e2b (patch) | |
tree | 65f380bd68bdd496f42ac8de88ccb74e340cc968 /arch/x86/kernel/cpu/mcheck/mce.c | |
parent | bd19a5e6b73df276e1ccedf9059e9ee70c372d7d (diff) |
x86, mce: implement bootstrapping for machine check wakeups
Machine checks support waking up the mcelog daemon quickly.
The original wake up code for this was pretty ugly, relying on
a idle notifier and a special process flag. The reason it did
it this way is that the machine check handler is not subject
to normal interrupt locking rules so it's not safe
to call wake_up(). Instead it set a process flag
and then either did the wakeup in the syscall return
or in the idle notifier.
This patch adds a new "bootstraping" method as replacement.
The idea is that the handler checks if it's in a state where
it is unsafe to call wake_up(). If it's safe it calls it directly.
When it's not safe -- that is it interrupted in a critical
section with interrupts disables -- it uses a new "self IPI" to trigger
an IPI to its own CPU. This can be done safely because IPI
triggers are atomic with some care. The IPI is raised
once the interrupts are reenabled and can then safely call
wake_up().
When APICs are disabled the event is just queued and will be picked up
eventually by the next polling timer. I think that's a reasonable
compromise, since it should only happen quite rarely.
Contains fixes from Ying Huang.
[ solve conflict on irqinit, make it work on 32bit (entry_arch.h) - HS ]
Signed-off-by: Andi Kleen <ak@linux.intel.com>
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Signed-off-by: H. Peter Anvin <hpa@zytor.com>
Diffstat (limited to 'arch/x86/kernel/cpu/mcheck/mce.c')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 54 |
1 files changed, 54 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index 5031814ac943..121781627858 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -10,6 +10,7 @@ | |||
10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
11 | #include <linux/capability.h> | 11 | #include <linux/capability.h> |
12 | #include <linux/miscdevice.h> | 12 | #include <linux/miscdevice.h> |
13 | #include <linux/interrupt.h> | ||
13 | #include <linux/ratelimit.h> | 14 | #include <linux/ratelimit.h> |
14 | #include <linux/kallsyms.h> | 15 | #include <linux/kallsyms.h> |
15 | #include <linux/rcupdate.h> | 16 | #include <linux/rcupdate.h> |
@@ -32,7 +33,10 @@ | |||
32 | #include <linux/fs.h> | 33 | #include <linux/fs.h> |
33 | 34 | ||
34 | #include <asm/processor.h> | 35 | #include <asm/processor.h> |
36 | #include <asm/hw_irq.h> | ||
37 | #include <asm/apic.h> | ||
35 | #include <asm/idle.h> | 38 | #include <asm/idle.h> |
39 | #include <asm/ipi.h> | ||
36 | #include <asm/mce.h> | 40 | #include <asm/mce.h> |
37 | #include <asm/msr.h> | 41 | #include <asm/msr.h> |
38 | 42 | ||
@@ -287,6 +291,54 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
287 | } | 291 | } |
288 | } | 292 | } |
289 | 293 | ||
294 | #ifdef CONFIG_X86_LOCAL_APIC | ||
295 | /* | ||
296 | * Called after interrupts have been reenabled again | ||
297 | * when a MCE happened during an interrupts off region | ||
298 | * in the kernel. | ||
299 | */ | ||
300 | asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) | ||
301 | { | ||
302 | ack_APIC_irq(); | ||
303 | exit_idle(); | ||
304 | irq_enter(); | ||
305 | mce_notify_user(); | ||
306 | irq_exit(); | ||
307 | } | ||
308 | #endif | ||
309 | |||
310 | static void mce_report_event(struct pt_regs *regs) | ||
311 | { | ||
312 | if (regs->flags & (X86_VM_MASK|X86_EFLAGS_IF)) { | ||
313 | mce_notify_user(); | ||
314 | return; | ||
315 | } | ||
316 | |||
317 | #ifdef CONFIG_X86_LOCAL_APIC | ||
318 | /* | ||
319 | * Without APIC do not notify. The event will be picked | ||
320 | * up eventually. | ||
321 | */ | ||
322 | if (!cpu_has_apic) | ||
323 | return; | ||
324 | |||
325 | /* | ||
326 | * When interrupts are disabled we cannot use | ||
327 | * kernel services safely. Trigger an self interrupt | ||
328 | * through the APIC to instead do the notification | ||
329 | * after interrupts are reenabled again. | ||
330 | */ | ||
331 | apic->send_IPI_self(MCE_SELF_VECTOR); | ||
332 | |||
333 | /* | ||
334 | * Wait for idle afterwards again so that we don't leave the | ||
335 | * APIC in a non idle state because the normal APIC writes | ||
336 | * cannot exclude us. | ||
337 | */ | ||
338 | apic_wait_icr_idle(); | ||
339 | #endif | ||
340 | } | ||
341 | |||
290 | DEFINE_PER_CPU(unsigned, mce_poll_count); | 342 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
291 | 343 | ||
292 | /* | 344 | /* |
@@ -530,6 +582,8 @@ void do_machine_check(struct pt_regs *regs, long error_code) | |||
530 | /* notify userspace ASAP */ | 582 | /* notify userspace ASAP */ |
531 | set_thread_flag(TIF_MCE_NOTIFY); | 583 | set_thread_flag(TIF_MCE_NOTIFY); |
532 | 584 | ||
585 | mce_report_event(regs); | ||
586 | |||
533 | /* the last thing we do is clear state */ | 587 | /* the last thing we do is clear state */ |
534 | for (i = 0; i < banks; i++) { | 588 | for (i = 0; i < banks; i++) { |
535 | if (test_bit(i, toclear)) | 589 | if (test_bit(i, toclear)) |