diff options
author | Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com> | 2011-06-07 21:56:02 -0400 |
---|---|---|
committer | Borislav Petkov <borislav.petkov@amd.com> | 2011-06-16 06:10:08 -0400 |
commit | b77e70bf3535e0bd5472e0681f41cce4ae0598bb (patch) | |
tree | 6994077a641c30eaef34c72afbfe78379df7f0ba /arch | |
parent | 7639bfc753f70321dbea83852e1cc47a45b681d7 (diff) |
x86, mce: Replace MCE_SELF_VECTOR by irq_work
The MCE handler uses a special vector for self IPI to invoke
post-emergency processing in an interrupt context, e.g. call an
NMI-unsafe function, wakeup loggers, schedule time-consuming work for
recovery, etc.
This mechanism is now generalized by the following commit:
> e360adbe29241a0194e10e20595360dd7b98a2b3
> Author: Peter Zijlstra <a.p.zijlstra@chello.nl>
> Date: Thu Oct 14 14:01:34 2010 +0800
>
> irq_work: Add generic hardirq context callbacks
>
> Provide a mechanism that allows running code in IRQ context. It is
> most useful for NMI code that needs to interact with the rest of the
> system -- like wakeup a task to drain buffers.
:
So change to use provided generic mechanism.
Signed-off-by: Hidetoshi Seto <seto.hidetoshi@jp.fujitsu.com>
Acked-by: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/4DEED6B2.6080005@jp.fujitsu.com
Signed-off-by: Borislav Petkov <borislav.petkov@amd.com>
Diffstat (limited to 'arch')
-rw-r--r-- | arch/x86/include/asm/entry_arch.h | 4 | ||||
-rw-r--r-- | arch/x86/include/asm/hw_irq.h | 1 | ||||
-rw-r--r-- | arch/x86/include/asm/irq_vectors.h | 5 | ||||
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 47 | ||||
-rw-r--r-- | arch/x86/kernel/entry_64.S | 5 | ||||
-rw-r--r-- | arch/x86/kernel/irqinit.c | 3 |
6 files changed, 6 insertions, 59 deletions
diff --git a/arch/x86/include/asm/entry_arch.h b/arch/x86/include/asm/entry_arch.h index 1cd6d26a0a8..0baa628e330 100644 --- a/arch/x86/include/asm/entry_arch.h +++ b/arch/x86/include/asm/entry_arch.h | |||
@@ -53,8 +53,4 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR) | |||
53 | BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) | 53 | BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR) |
54 | #endif | 54 | #endif |
55 | 55 | ||
56 | #ifdef CONFIG_X86_MCE | ||
57 | BUILD_INTERRUPT(mce_self_interrupt,MCE_SELF_VECTOR) | ||
58 | #endif | ||
59 | |||
60 | #endif | 56 | #endif |
diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h index bb9efe8706e..13f5504c76c 100644 --- a/arch/x86/include/asm/hw_irq.h +++ b/arch/x86/include/asm/hw_irq.h | |||
@@ -34,7 +34,6 @@ extern void irq_work_interrupt(void); | |||
34 | extern void spurious_interrupt(void); | 34 | extern void spurious_interrupt(void); |
35 | extern void thermal_interrupt(void); | 35 | extern void thermal_interrupt(void); |
36 | extern void reschedule_interrupt(void); | 36 | extern void reschedule_interrupt(void); |
37 | extern void mce_self_interrupt(void); | ||
38 | 37 | ||
39 | extern void invalidate_interrupt(void); | 38 | extern void invalidate_interrupt(void); |
40 | extern void invalidate_interrupt0(void); | 39 | extern void invalidate_interrupt0(void); |
diff --git a/arch/x86/include/asm/irq_vectors.h b/arch/x86/include/asm/irq_vectors.h index 6e976ee3b3e..6665026ea3e 100644 --- a/arch/x86/include/asm/irq_vectors.h +++ b/arch/x86/include/asm/irq_vectors.h | |||
@@ -109,11 +109,6 @@ | |||
109 | 109 | ||
110 | #define UV_BAU_MESSAGE 0xf5 | 110 | #define UV_BAU_MESSAGE 0xf5 |
111 | 111 | ||
112 | /* | ||
113 | * Self IPI vector for machine checks | ||
114 | */ | ||
115 | #define MCE_SELF_VECTOR 0xf4 | ||
116 | |||
117 | /* Xen vector callback to receive events in a HVM domain */ | 112 | /* Xen vector callback to receive events in a HVM domain */ |
118 | #define XEN_HVM_EVTCHN_CALLBACK 0xf3 | 113 | #define XEN_HVM_EVTCHN_CALLBACK 0xf3 |
119 | 114 | ||
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index ff1ae9b6464..e81d48b0561 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -10,7 +10,6 @@ | |||
10 | #include <linux/thread_info.h> | 10 | #include <linux/thread_info.h> |
11 | #include <linux/capability.h> | 11 | #include <linux/capability.h> |
12 | #include <linux/miscdevice.h> | 12 | #include <linux/miscdevice.h> |
13 | #include <linux/interrupt.h> | ||
14 | #include <linux/ratelimit.h> | 13 | #include <linux/ratelimit.h> |
15 | #include <linux/kallsyms.h> | 14 | #include <linux/kallsyms.h> |
16 | #include <linux/rcupdate.h> | 15 | #include <linux/rcupdate.h> |
@@ -38,12 +37,9 @@ | |||
38 | #include <linux/mm.h> | 37 | #include <linux/mm.h> |
39 | #include <linux/debugfs.h> | 38 | #include <linux/debugfs.h> |
40 | #include <linux/edac_mce.h> | 39 | #include <linux/edac_mce.h> |
40 | #include <linux/irq_work.h> | ||
41 | 41 | ||
42 | #include <asm/processor.h> | 42 | #include <asm/processor.h> |
43 | #include <asm/hw_irq.h> | ||
44 | #include <asm/apic.h> | ||
45 | #include <asm/idle.h> | ||
46 | #include <asm/ipi.h> | ||
47 | #include <asm/mce.h> | 43 | #include <asm/mce.h> |
48 | #include <asm/msr.h> | 44 | #include <asm/msr.h> |
49 | 45 | ||
@@ -461,22 +457,13 @@ static inline void mce_get_rip(struct mce *m, struct pt_regs *regs) | |||
461 | m->ip = mce_rdmsrl(rip_msr); | 457 | m->ip = mce_rdmsrl(rip_msr); |
462 | } | 458 | } |
463 | 459 | ||
464 | #ifdef CONFIG_X86_LOCAL_APIC | 460 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
465 | /* | 461 | |
466 | * Called after interrupts have been reenabled again | 462 | static void mce_irq_work_cb(struct irq_work *entry) |
467 | * when a MCE happened during an interrupts off region | ||
468 | * in the kernel. | ||
469 | */ | ||
470 | asmlinkage void smp_mce_self_interrupt(struct pt_regs *regs) | ||
471 | { | 463 | { |
472 | ack_APIC_irq(); | ||
473 | exit_idle(); | ||
474 | irq_enter(); | ||
475 | mce_notify_irq(); | 464 | mce_notify_irq(); |
476 | mce_schedule_work(); | 465 | mce_schedule_work(); |
477 | irq_exit(); | ||
478 | } | 466 | } |
479 | #endif | ||
480 | 467 | ||
481 | static void mce_report_event(struct pt_regs *regs) | 468 | static void mce_report_event(struct pt_regs *regs) |
482 | { | 469 | { |
@@ -492,29 +479,7 @@ static void mce_report_event(struct pt_regs *regs) | |||
492 | return; | 479 | return; |
493 | } | 480 | } |
494 | 481 | ||
495 | #ifdef CONFIG_X86_LOCAL_APIC | 482 | irq_work_queue(&__get_cpu_var(mce_irq_work)); |
496 | /* | ||
497 | * Without APIC do not notify. The event will be picked | ||
498 | * up eventually. | ||
499 | */ | ||
500 | if (!cpu_has_apic) | ||
501 | return; | ||
502 | |||
503 | /* | ||
504 | * When interrupts are disabled we cannot use | ||
505 | * kernel services safely. Trigger an self interrupt | ||
506 | * through the APIC to instead do the notification | ||
507 | * after interrupts are reenabled again. | ||
508 | */ | ||
509 | apic->send_IPI_self(MCE_SELF_VECTOR); | ||
510 | |||
511 | /* | ||
512 | * Wait for idle afterwards again so that we don't leave the | ||
513 | * APIC in a non idle state because the normal APIC writes | ||
514 | * cannot exclude us. | ||
515 | */ | ||
516 | apic_wait_icr_idle(); | ||
517 | #endif | ||
518 | } | 483 | } |
519 | 484 | ||
520 | DEFINE_PER_CPU(unsigned, mce_poll_count); | 485 | DEFINE_PER_CPU(unsigned, mce_poll_count); |
@@ -1444,7 +1409,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c) | |||
1444 | __mcheck_cpu_init_vendor(c); | 1409 | __mcheck_cpu_init_vendor(c); |
1445 | __mcheck_cpu_init_timer(); | 1410 | __mcheck_cpu_init_timer(); |
1446 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); | 1411 | INIT_WORK(&__get_cpu_var(mce_work), mce_process_work); |
1447 | 1412 | init_irq_work(&__get_cpu_var(mce_irq_work), &mce_irq_work_cb); | |
1448 | } | 1413 | } |
1449 | 1414 | ||
1450 | /* | 1415 | /* |
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0c989..9fa65460d33 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S | |||
@@ -991,11 +991,6 @@ apicinterrupt THRESHOLD_APIC_VECTOR \ | |||
991 | apicinterrupt THERMAL_APIC_VECTOR \ | 991 | apicinterrupt THERMAL_APIC_VECTOR \ |
992 | thermal_interrupt smp_thermal_interrupt | 992 | thermal_interrupt smp_thermal_interrupt |
993 | 993 | ||
994 | #ifdef CONFIG_X86_MCE | ||
995 | apicinterrupt MCE_SELF_VECTOR \ | ||
996 | mce_self_interrupt smp_mce_self_interrupt | ||
997 | #endif | ||
998 | |||
999 | #ifdef CONFIG_SMP | 994 | #ifdef CONFIG_SMP |
1000 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ | 995 | apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \ |
1001 | call_function_single_interrupt smp_call_function_single_interrupt | 996 | call_function_single_interrupt smp_call_function_single_interrupt |
diff --git a/arch/x86/kernel/irqinit.c b/arch/x86/kernel/irqinit.c index f470e4ef993..f09d4bbe2d2 100644 --- a/arch/x86/kernel/irqinit.c +++ b/arch/x86/kernel/irqinit.c | |||
@@ -272,9 +272,6 @@ static void __init apic_intr_init(void) | |||
272 | #ifdef CONFIG_X86_MCE_THRESHOLD | 272 | #ifdef CONFIG_X86_MCE_THRESHOLD |
273 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); | 273 | alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt); |
274 | #endif | 274 | #endif |
275 | #if defined(CONFIG_X86_MCE) && defined(CONFIG_X86_LOCAL_APIC) | ||
276 | alloc_intr_gate(MCE_SELF_VECTOR, mce_self_interrupt); | ||
277 | #endif | ||
278 | 275 | ||
279 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) | 276 | #if defined(CONFIG_X86_64) || defined(CONFIG_X86_LOCAL_APIC) |
280 | /* self generated IPI for local APIC timer */ | 277 | /* self generated IPI for local APIC timer */ |