diff options
author | Ingo Molnar <mingo@kernel.org> | 2015-01-15 05:29:49 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2015-01-15 05:29:49 -0500 |
commit | 37e4d3b951d6d2f6e7280ee5bae6c22afe3abe1d (patch) | |
tree | d50d8a7094e9d8cc081e35f9208be8dbdd88442c | |
parent | f800c25b7a762d445ba1439a2428c8362157eba6 (diff) | |
parent | 83737691e586cd2767fa4fc87cd41251d1a49e9e (diff) |
Merge tag 'ras_for_3.20' of git://git.kernel.org/pub/scm/linux/kernel/git/bp/bp into x86/ras
Pull RAS updates from Borislav Petkov:
"Nothing special this time, just an error messages improvement from Andy
and a cleanup from me."
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce.c | 25 |
1 files changed, 14 insertions, 11 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c index d2c611699cd9..4c5cd7575d31 100644 --- a/arch/x86/kernel/cpu/mcheck/mce.c +++ b/arch/x86/kernel/cpu/mcheck/mce.c | |||
@@ -115,7 +115,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs); | |||
115 | * CPU/chipset specific EDAC code can register a notifier call here to print | 115 | * CPU/chipset specific EDAC code can register a notifier call here to print |
116 | * MCE errors in a human-readable form. | 116 | * MCE errors in a human-readable form. |
117 | */ | 117 | */ |
118 | ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); | 118 | static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); |
119 | 119 | ||
120 | /* Do initial initialization of a struct mce */ | 120 | /* Do initial initialization of a struct mce */ |
121 | void mce_setup(struct mce *m) | 121 | void mce_setup(struct mce *m) |
@@ -311,7 +311,7 @@ static void wait_for_panic(void) | |||
311 | panic("Panicing machine check CPU died"); | 311 | panic("Panicing machine check CPU died"); |
312 | } | 312 | } |
313 | 313 | ||
314 | static void mce_panic(char *msg, struct mce *final, char *exp) | 314 | static void mce_panic(const char *msg, struct mce *final, char *exp) |
315 | { | 315 | { |
316 | int i, apei_err = 0; | 316 | int i, apei_err = 0; |
317 | 317 | ||
@@ -529,7 +529,7 @@ static void mce_schedule_work(void) | |||
529 | schedule_work(this_cpu_ptr(&mce_work)); | 529 | schedule_work(this_cpu_ptr(&mce_work)); |
530 | } | 530 | } |
531 | 531 | ||
532 | DEFINE_PER_CPU(struct irq_work, mce_irq_work); | 532 | static DEFINE_PER_CPU(struct irq_work, mce_irq_work); |
533 | 533 | ||
534 | static void mce_irq_work_cb(struct irq_work *entry) | 534 | static void mce_irq_work_cb(struct irq_work *entry) |
535 | { | 535 | { |
@@ -735,7 +735,7 @@ static atomic_t mce_callin; | |||
735 | /* | 735 | /* |
736 | * Check if a timeout waiting for other CPUs happened. | 736 | * Check if a timeout waiting for other CPUs happened. |
737 | */ | 737 | */ |
738 | static int mce_timed_out(u64 *t) | 738 | static int mce_timed_out(u64 *t, const char *msg) |
739 | { | 739 | { |
740 | /* | 740 | /* |
741 | * The others already did panic for some reason. | 741 | * The others already did panic for some reason. |
@@ -750,8 +750,7 @@ static int mce_timed_out(u64 *t) | |||
750 | goto out; | 750 | goto out; |
751 | if ((s64)*t < SPINUNIT) { | 751 | if ((s64)*t < SPINUNIT) { |
752 | if (mca_cfg.tolerant <= 1) | 752 | if (mca_cfg.tolerant <= 1) |
753 | mce_panic("Timeout synchronizing machine check over CPUs", | 753 | mce_panic(msg, NULL, NULL); |
754 | NULL, NULL); | ||
755 | cpu_missing = 1; | 754 | cpu_missing = 1; |
756 | return 1; | 755 | return 1; |
757 | } | 756 | } |
@@ -867,7 +866,8 @@ static int mce_start(int *no_way_out) | |||
867 | * Wait for everyone. | 866 | * Wait for everyone. |
868 | */ | 867 | */ |
869 | while (atomic_read(&mce_callin) != cpus) { | 868 | while (atomic_read(&mce_callin) != cpus) { |
870 | if (mce_timed_out(&timeout)) { | 869 | if (mce_timed_out(&timeout, |
870 | "Timeout: Not all CPUs entered broadcast exception handler")) { | ||
871 | atomic_set(&global_nwo, 0); | 871 | atomic_set(&global_nwo, 0); |
872 | return -1; | 872 | return -1; |
873 | } | 873 | } |
@@ -892,7 +892,8 @@ static int mce_start(int *no_way_out) | |||
892 | * only seen by one CPU before cleared, avoiding duplicates. | 892 | * only seen by one CPU before cleared, avoiding duplicates. |
893 | */ | 893 | */ |
894 | while (atomic_read(&mce_executing) < order) { | 894 | while (atomic_read(&mce_executing) < order) { |
895 | if (mce_timed_out(&timeout)) { | 895 | if (mce_timed_out(&timeout, |
896 | "Timeout: Subject CPUs unable to finish machine check processing")) { | ||
896 | atomic_set(&global_nwo, 0); | 897 | atomic_set(&global_nwo, 0); |
897 | return -1; | 898 | return -1; |
898 | } | 899 | } |
@@ -936,7 +937,8 @@ static int mce_end(int order) | |||
936 | * loops. | 937 | * loops. |
937 | */ | 938 | */ |
938 | while (atomic_read(&mce_executing) <= cpus) { | 939 | while (atomic_read(&mce_executing) <= cpus) { |
939 | if (mce_timed_out(&timeout)) | 940 | if (mce_timed_out(&timeout, |
941 | "Timeout: Monarch CPU unable to finish machine check processing")) | ||
940 | goto reset; | 942 | goto reset; |
941 | ndelay(SPINUNIT); | 943 | ndelay(SPINUNIT); |
942 | } | 944 | } |
@@ -949,7 +951,8 @@ static int mce_end(int order) | |||
949 | * Subject: Wait for Monarch to finish. | 951 | * Subject: Wait for Monarch to finish. |
950 | */ | 952 | */ |
951 | while (atomic_read(&mce_executing) != 0) { | 953 | while (atomic_read(&mce_executing) != 0) { |
952 | if (mce_timed_out(&timeout)) | 954 | if (mce_timed_out(&timeout, |
955 | "Timeout: Monarch CPU did not finish machine check processing")) | ||
953 | goto reset; | 956 | goto reset; |
954 | ndelay(SPINUNIT); | 957 | ndelay(SPINUNIT); |
955 | } | 958 | } |
@@ -1009,7 +1012,7 @@ static void mce_clear_state(unsigned long *toclear) | |||
1009 | */ | 1012 | */ |
1010 | #define MCE_INFO_MAX 16 | 1013 | #define MCE_INFO_MAX 16 |
1011 | 1014 | ||
1012 | struct mce_info { | 1015 | static struct mce_info { |
1013 | atomic_t inuse; | 1016 | atomic_t inuse; |
1014 | struct task_struct *t; | 1017 | struct task_struct *t; |
1015 | __u64 paddr; | 1018 | __u64 paddr; |