aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>2016-01-25 14:41:51 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-01 04:53:57 -0500
commitf57a1f3c14b9182f1fea667f5a38a1094699db7c (patch)
treee5d2ccfac61136ce12868f37eb8837bbb5682ee6 /arch/x86/kernel
parent60f116fca162d9488f783f5014779463243ab7a2 (diff)
x86/mce/AMD: Fix LVT offset configuration for thresholding
For processor families with the Scalable MCA feature, the LVT offset for threshold interrupts is configured only in MSR 0xC0000410 and not in each per bank MISC register as was done in earlier families. Obtain the LVT offset from the correct MSR for those families. Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Link: http://lkml.kernel.org/r/1453750913-4781-7-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c27
1 files changed, 26 insertions, 1 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index 5982227990c9..a77a4521976a 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -49,6 +49,11 @@
49#define DEF_LVT_OFF 0x2 49#define DEF_LVT_OFF 0x2
50#define DEF_INT_TYPE_APIC 0x2 50#define DEF_INT_TYPE_APIC 0x2
51 51
52/* Scalable MCA: */
53
54/* Threshold LVT offset is at MSR0xC0000410[15:12] */
55#define SMCA_THR_LVT_OFF 0xF000
56
52static const char * const th_names[] = { 57static const char * const th_names[] = {
53 "load_store", 58 "load_store",
54 "insn_fetch", 59 "insn_fetch",
@@ -142,6 +147,14 @@ static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
142 } 147 }
143 148
144 if (apic != msr) { 149 if (apic != msr) {
150 /*
151 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
152 * the BIOS provides the value. The original field where LVT offset
153 * was set is reserved. Return early here:
154 */
155 if (mce_flags.smca)
156 return 0;
157
145 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d " 158 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
146 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", 159 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
147 b->cpu, apic, b->bank, b->block, b->address, hi, lo); 160 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
@@ -300,7 +313,19 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
300 goto init; 313 goto init;
301 314
302 b.interrupt_enable = 1; 315 b.interrupt_enable = 1;
303 new = (high & MASK_LVTOFF_HI) >> 20; 316
317 if (mce_flags.smca) {
318 u32 smca_low, smca_high;
319
320 /* Gather LVT offset for thresholding: */
321 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
322 break;
323
324 new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
325 } else {
326 new = (high & MASK_LVTOFF_HI) >> 20;
327 }
328
304 offset = setup_APIC_mce_threshold(offset, new); 329 offset = setup_APIC_mce_threshold(offset, new);
305 330
306 if ((offset == new) && 331 if ((offset == new) &&