aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-02-09 21:22:04 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-02-09 21:22:04 -0500
commite07e0d4cb0c4bfe822ec8491cc06269096a38bea (patch)
tree856bbf6c33d4de660d0b1e2c85019d0d3555123f /arch/x86
parent57d3629410599a1074b02f9b2139c2a6aa2b787e (diff)
parent93d76c802644e0cab62545603381988cef84d1d7 (diff)
Merge branch 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 RAS update from Ingo Molnar: "The changes in this cycle were: - allow mmcfg access to APEI error injection handlers - improve MCE error messages - smaller cleanups" * 'x86-ras-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip: x86, mce: Fix sparse errors x86, mce: Improve timeout error messages ACPI, EINJ: Enhance error injection tolerance level
Diffstat (limited to 'arch/x86')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c23
-rw-r--r--arch/x86/pci/mmconfig-shared.c28
2 files changed, 41 insertions, 10 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index d23179900755..cdfed7953963 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -116,7 +116,7 @@ static void (*quirk_no_way_out)(int bank, struct mce *m, struct pt_regs *regs);
116 * CPU/chipset specific EDAC code can register a notifier call here to print 116 * CPU/chipset specific EDAC code can register a notifier call here to print
117 * MCE errors in a human-readable form. 117 * MCE errors in a human-readable form.
118 */ 118 */
119ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain); 119static ATOMIC_NOTIFIER_HEAD(x86_mce_decoder_chain);
120 120
121/* Do initial initialization of a struct mce */ 121/* Do initial initialization of a struct mce */
122void mce_setup(struct mce *m) 122void mce_setup(struct mce *m)
@@ -312,7 +312,7 @@ static void wait_for_panic(void)
312 panic("Panicing machine check CPU died"); 312 panic("Panicing machine check CPU died");
313} 313}
314 314
315static void mce_panic(char *msg, struct mce *final, char *exp) 315static void mce_panic(const char *msg, struct mce *final, char *exp)
316{ 316{
317 int i, apei_err = 0; 317 int i, apei_err = 0;
318 318
@@ -530,7 +530,7 @@ static void mce_schedule_work(void)
530 schedule_work(this_cpu_ptr(&mce_work)); 530 schedule_work(this_cpu_ptr(&mce_work));
531} 531}
532 532
533DEFINE_PER_CPU(struct irq_work, mce_irq_work); 533static DEFINE_PER_CPU(struct irq_work, mce_irq_work);
534 534
535static void mce_irq_work_cb(struct irq_work *entry) 535static void mce_irq_work_cb(struct irq_work *entry)
536{ 536{
@@ -736,7 +736,7 @@ static atomic_t mce_callin;
736/* 736/*
737 * Check if a timeout waiting for other CPUs happened. 737 * Check if a timeout waiting for other CPUs happened.
738 */ 738 */
739static int mce_timed_out(u64 *t) 739static int mce_timed_out(u64 *t, const char *msg)
740{ 740{
741 /* 741 /*
742 * The others already did panic for some reason. 742 * The others already did panic for some reason.
@@ -751,8 +751,7 @@ static int mce_timed_out(u64 *t)
751 goto out; 751 goto out;
752 if ((s64)*t < SPINUNIT) { 752 if ((s64)*t < SPINUNIT) {
753 if (mca_cfg.tolerant <= 1) 753 if (mca_cfg.tolerant <= 1)
754 mce_panic("Timeout synchronizing machine check over CPUs", 754 mce_panic(msg, NULL, NULL);
755 NULL, NULL);
756 cpu_missing = 1; 755 cpu_missing = 1;
757 return 1; 756 return 1;
758 } 757 }
@@ -868,7 +867,8 @@ static int mce_start(int *no_way_out)
868 * Wait for everyone. 867 * Wait for everyone.
869 */ 868 */
870 while (atomic_read(&mce_callin) != cpus) { 869 while (atomic_read(&mce_callin) != cpus) {
871 if (mce_timed_out(&timeout)) { 870 if (mce_timed_out(&timeout,
871 "Timeout: Not all CPUs entered broadcast exception handler")) {
872 atomic_set(&global_nwo, 0); 872 atomic_set(&global_nwo, 0);
873 return -1; 873 return -1;
874 } 874 }
@@ -893,7 +893,8 @@ static int mce_start(int *no_way_out)
893 * only seen by one CPU before cleared, avoiding duplicates. 893 * only seen by one CPU before cleared, avoiding duplicates.
894 */ 894 */
895 while (atomic_read(&mce_executing) < order) { 895 while (atomic_read(&mce_executing) < order) {
896 if (mce_timed_out(&timeout)) { 896 if (mce_timed_out(&timeout,
897 "Timeout: Subject CPUs unable to finish machine check processing")) {
897 atomic_set(&global_nwo, 0); 898 atomic_set(&global_nwo, 0);
898 return -1; 899 return -1;
899 } 900 }
@@ -937,7 +938,8 @@ static int mce_end(int order)
937 * loops. 938 * loops.
938 */ 939 */
939 while (atomic_read(&mce_executing) <= cpus) { 940 while (atomic_read(&mce_executing) <= cpus) {
940 if (mce_timed_out(&timeout)) 941 if (mce_timed_out(&timeout,
942 "Timeout: Monarch CPU unable to finish machine check processing"))
941 goto reset; 943 goto reset;
942 ndelay(SPINUNIT); 944 ndelay(SPINUNIT);
943 } 945 }
@@ -950,7 +952,8 @@ static int mce_end(int order)
950 * Subject: Wait for Monarch to finish. 952 * Subject: Wait for Monarch to finish.
951 */ 953 */
952 while (atomic_read(&mce_executing) != 0) { 954 while (atomic_read(&mce_executing) != 0) {
953 if (mce_timed_out(&timeout)) 955 if (mce_timed_out(&timeout,
956 "Timeout: Monarch CPU did not finish machine check processing"))
954 goto reset; 957 goto reset;
955 ndelay(SPINUNIT); 958 ndelay(SPINUNIT);
956 } 959 }
diff --git a/arch/x86/pci/mmconfig-shared.c b/arch/x86/pci/mmconfig-shared.c
index 326198a4434e..676e5e04e4d4 100644
--- a/arch/x86/pci/mmconfig-shared.c
+++ b/arch/x86/pci/mmconfig-shared.c
@@ -610,6 +610,32 @@ static int __init pci_parse_mcfg(struct acpi_table_header *header)
610 return 0; 610 return 0;
611} 611}
612 612
613#ifdef CONFIG_ACPI_APEI
614extern int (*arch_apei_filter_addr)(int (*func)(__u64 start, __u64 size,
615 void *data), void *data);
616
617static int pci_mmcfg_for_each_region(int (*func)(__u64 start, __u64 size,
618 void *data), void *data)
619{
620 struct pci_mmcfg_region *cfg;
621 int rc;
622
623 if (list_empty(&pci_mmcfg_list))
624 return 0;
625
626 list_for_each_entry(cfg, &pci_mmcfg_list, list) {
627 rc = func(cfg->res.start, resource_size(&cfg->res), data);
628 if (rc)
629 return rc;
630 }
631
632 return 0;
633}
634#define set_apei_filter() (arch_apei_filter_addr = pci_mmcfg_for_each_region)
635#else
636#define set_apei_filter()
637#endif
638
613static void __init __pci_mmcfg_init(int early) 639static void __init __pci_mmcfg_init(int early)
614{ 640{
615 pci_mmcfg_reject_broken(early); 641 pci_mmcfg_reject_broken(early);
@@ -644,6 +670,8 @@ void __init pci_mmcfg_early_init(void)
644 else 670 else
645 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg); 671 acpi_sfi_table_parse(ACPI_SIG_MCFG, pci_parse_mcfg);
646 __pci_mmcfg_init(1); 672 __pci_mmcfg_init(1);
673
674 set_apei_filter();
647 } 675 }
648} 676}
649 677