aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYazen Ghannam <Yazen.Ghannam@amd.com>2017-03-15 13:30:55 -0400
committerThomas Gleixner <tglx@linutronix.de>2017-03-18 08:03:44 -0400
commit5204bf17031b69fa5faa4dc80a9dc1e2446d74f9 (patch)
treef24b6f774be261341c9ef216e2e61d2b9ba032cb
parent5bc329503e8191c91c4c40836f062ef771d8ba83 (diff)
x86/mce: Init some CPU features early
When the MCA banks in __mcheck_cpu_init_generic() are polled for leftover errors logged during boot or from the previous boot, its required to have CPU features detected sufficiently so that the reading out and handling of those early errors is done correctly. If those features are not available, the decoding may miss some information and get incomplete errors logged. For example, on SMCA systems the MCA_IPID and MCA_SYND registers are not logged and MCA_ADDR is not masked appropriately. To cure that, do a subset of the basic feature detection early while the rest happens in its usual place in __mcheck_cpu_init_vendor(). Signed-off-by: Yazen Ghannam <Yazen.Ghannam@amd.com> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Cc: x86-ml <x86@kernel.org> Link: http://lkml.kernel.org/r/1489599055-20756-1-git-send-email-Yazen.Ghannam@amd.com [ Massage commit message and simplify. ] Signed-off-by: Borislav Petkov <bp@suse.de> Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c30
1 files changed, 18 insertions, 12 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index 177472ace838..5e365a2fabe5 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -1702,30 +1702,35 @@ static int __mcheck_cpu_ancient_init(struct cpuinfo_x86 *c)
1702 return 0; 1702 return 0;
1703} 1703}
1704 1704
1705static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c) 1705/*
1706 * Init basic CPU features needed for early decoding of MCEs.
1707 */
1708static void __mcheck_cpu_init_early(struct cpuinfo_x86 *c)
1706{ 1709{
1707 switch (c->x86_vendor) { 1710 if (c->x86_vendor == X86_VENDOR_AMD) {
1708 case X86_VENDOR_INTEL:
1709 mce_intel_feature_init(c);
1710 mce_adjust_timer = cmci_intel_adjust_timer;
1711 break;
1712
1713 case X86_VENDOR_AMD: {
1714 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV); 1711 mce_flags.overflow_recov = !!cpu_has(c, X86_FEATURE_OVERFLOW_RECOV);
1715 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR); 1712 mce_flags.succor = !!cpu_has(c, X86_FEATURE_SUCCOR);
1716 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA); 1713 mce_flags.smca = !!cpu_has(c, X86_FEATURE_SMCA);
1717 1714
1718 /*
1719 * Install proper ops for Scalable MCA enabled processors
1720 */
1721 if (mce_flags.smca) { 1715 if (mce_flags.smca) {
1722 msr_ops.ctl = smca_ctl_reg; 1716 msr_ops.ctl = smca_ctl_reg;
1723 msr_ops.status = smca_status_reg; 1717 msr_ops.status = smca_status_reg;
1724 msr_ops.addr = smca_addr_reg; 1718 msr_ops.addr = smca_addr_reg;
1725 msr_ops.misc = smca_misc_reg; 1719 msr_ops.misc = smca_misc_reg;
1726 } 1720 }
1727 mce_amd_feature_init(c); 1721 }
1722}
1728 1723
1724static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
1725{
1726 switch (c->x86_vendor) {
1727 case X86_VENDOR_INTEL:
1728 mce_intel_feature_init(c);
1729 mce_adjust_timer = cmci_intel_adjust_timer;
1730 break;
1731
1732 case X86_VENDOR_AMD: {
1733 mce_amd_feature_init(c);
1729 break; 1734 break;
1730 } 1735 }
1731 1736
@@ -1812,6 +1817,7 @@ void mcheck_cpu_init(struct cpuinfo_x86 *c)
1812 1817
1813 machine_check_vector = do_machine_check; 1818 machine_check_vector = do_machine_check;
1814 1819
1820 __mcheck_cpu_init_early(c);
1815 __mcheck_cpu_init_generic(); 1821 __mcheck_cpu_init_generic();
1816 __mcheck_cpu_init_vendor(c); 1822 __mcheck_cpu_init_vendor(c);
1817 __mcheck_cpu_init_clear_banks(); 1823 __mcheck_cpu_init_clear_banks();