aboutsummaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kernel/mce.c
diff options
context:
space:
mode:
authorMahesh Salgaonkar <mahesh@linux.vnet.ibm.com>2014-01-14 05:15:09 -0500
committerBenjamin Herrenschmidt <benh@kernel.crashing.org>2014-01-14 21:58:59 -0500
commit30c826358d10c1d6f8147de3310b97488daec830 (patch)
tree357314baea0d2abd2ea0088b8d5cb2010038d790 /arch/powerpc/kernel/mce.c
parentc0c4301c54adde05fa3652777f550e4570b87399 (diff)
Move precessing of MCE queued event out from syscall exit path.
Huge Dickins reported an issue that b5ff4211a829 "powerpc/book3s: Queue up and process delayed MCE events" breaks the PowerMac G5 boot. This patch fixes it by moving the mce even processing away from syscall exit, which was wrong to do that in first place, and using irq work framework to delay processing of mce event. Reported-by: Hugh Dickins <hughd@google.com Signed-off-by: Mahesh Salgaonkar <mahesh@linux.vnet.ibm.com> Signed-off-by: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Diffstat (limited to 'arch/powerpc/kernel/mce.c')
-rw-r--r--arch/powerpc/kernel/mce.c13
1 files changed, 10 insertions, 3 deletions
diff --git a/arch/powerpc/kernel/mce.c b/arch/powerpc/kernel/mce.c
index c0c52ec1fca7..cadef7e64e42 100644
--- a/arch/powerpc/kernel/mce.c
+++ b/arch/powerpc/kernel/mce.c
@@ -26,6 +26,7 @@
26#include <linux/ptrace.h> 26#include <linux/ptrace.h>
27#include <linux/percpu.h> 27#include <linux/percpu.h>
28#include <linux/export.h> 28#include <linux/export.h>
29#include <linux/irq_work.h>
29#include <asm/mce.h> 30#include <asm/mce.h>
30 31
31static DEFINE_PER_CPU(int, mce_nest_count); 32static DEFINE_PER_CPU(int, mce_nest_count);
@@ -35,6 +36,11 @@ static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event);
35static DEFINE_PER_CPU(int, mce_queue_count); 36static DEFINE_PER_CPU(int, mce_queue_count);
36static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue); 37static DEFINE_PER_CPU(struct machine_check_event[MAX_MC_EVT], mce_event_queue);
37 38
39static void machine_check_process_queued_event(struct irq_work *work);
40struct irq_work mce_event_process_work = {
41 .func = machine_check_process_queued_event,
42};
43
38static void mce_set_error_info(struct machine_check_event *mce, 44static void mce_set_error_info(struct machine_check_event *mce,
39 struct mce_error_info *mce_err) 45 struct mce_error_info *mce_err)
40{ 46{
@@ -185,17 +191,19 @@ void machine_check_queue_event(void)
185 return; 191 return;
186 } 192 }
187 __get_cpu_var(mce_event_queue[index]) = evt; 193 __get_cpu_var(mce_event_queue[index]) = evt;
194
195 /* Queue irq work to process this event later. */
196 irq_work_queue(&mce_event_process_work);
188} 197}
189 198
190/* 199/*
191 * process pending MCE event from the mce event queue. This function will be 200 * process pending MCE event from the mce event queue. This function will be
192 * called during syscall exit. 201 * called during syscall exit.
193 */ 202 */
194void machine_check_process_queued_event(void) 203static void machine_check_process_queued_event(struct irq_work *work)
195{ 204{
196 int index; 205 int index;
197 206
198 preempt_disable();
199 /* 207 /*
200 * For now just print it to console. 208 * For now just print it to console.
201 * TODO: log this error event to FSP or nvram. 209 * TODO: log this error event to FSP or nvram.
@@ -206,7 +214,6 @@ void machine_check_process_queued_event(void)
206 &__get_cpu_var(mce_event_queue[index])); 214 &__get_cpu_var(mce_event_queue[index]));
207 __get_cpu_var(mce_queue_count)--; 215 __get_cpu_var(mce_queue_count)--;
208 } 216 }
209 preempt_enable();
210} 217}
211 218
212void machine_check_print_event_info(struct machine_check_event *evt) 219void machine_check_print_event_info(struct machine_check_event *evt)