aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel
diff options
context:
space:
mode:
authorAravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>2016-03-07 08:02:19 -0500
committerIngo Molnar <mingo@kernel.org>2016-03-08 05:48:14 -0500
commit8dd1e17a55b0bb1206c71c7a4344c5e3037cdf65 (patch)
tree965582ba90770e01a42f051b2af5351b1ae95911 /arch/x86/kernel
parentbe0aec23bf4624fd55650629fe8df20483487049 (diff)
x86/mce/AMD: Fix logic to obtain block address
In upcoming processors, the BLKPTR field is no longer used to indicate the MSR number of the additional register. Insted, it simply indicates the prescence of additional MSRs. Fix the logic here to gather MSR address from MSR_AMD64_SMCA_MCx_MISC() for newer processors and fall back to existing logic for older processors. [ Drop nextaddr_out label; style cleanups. ] Signed-off-by: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Borislav Petkov <bp@alien8.de> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Link: http://lkml.kernel.org/r/1457021458-2522-4-git-send-email-Aravind.Gopalakrishnan@amd.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c84
1 files changed, 55 insertions, 29 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index ee487a93ebe7..a53eb1ba587e 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -304,6 +304,51 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
304 wrmsr(MSR_CU_DEF_ERR, low, high); 304 wrmsr(MSR_CU_DEF_ERR, low, high);
305} 305}
306 306
307static u32 get_block_address(u32 current_addr, u32 low, u32 high,
308 unsigned int bank, unsigned int block)
309{
310 u32 addr = 0, offset = 0;
311
312 if (mce_flags.smca) {
313 if (!block) {
314 addr = MSR_AMD64_SMCA_MCx_MISC(bank);
315 } else {
316 /*
317 * For SMCA enabled processors, BLKPTR field of the
318 * first MISC register (MCx_MISC0) indicates presence of
319 * additional MISC register set (MISC1-4).
320 */
321 u32 low, high;
322
323 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
324 return addr;
325
326 if (!(low & MCI_CONFIG_MCAX))
327 return addr;
328
329 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
330 (low & MASK_BLKPTR_LO))
331 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
332 }
333 return addr;
334 }
335
336 /* Fall back to method we used for older processors: */
337 switch (block) {
338 case 0:
339 addr = MSR_IA32_MCx_MISC(bank);
340 break;
341 case 1:
342 offset = ((low & MASK_BLKPTR_LO) >> 21);
343 if (offset)
344 addr = MCG_XBLK_ADDR + offset;
345 break;
346 default:
347 addr = ++current_addr;
348 }
349 return addr;
350}
351
307static int 352static int
308prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, 353prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
309 int offset, u32 misc_high) 354 int offset, u32 misc_high)
@@ -366,16 +411,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
366 411
367 for (bank = 0; bank < mca_cfg.banks; ++bank) { 412 for (bank = 0; bank < mca_cfg.banks; ++bank) {
368 for (block = 0; block < NR_BLOCKS; ++block) { 413 for (block = 0; block < NR_BLOCKS; ++block) {
369 if (block == 0) 414 address = get_block_address(address, low, high, bank, block);
370 address = MSR_IA32_MCx_MISC(bank); 415 if (!address)
371 else if (block == 1) { 416 break;
372 address = (low & MASK_BLKPTR_LO) >> 21;
373 if (!address)
374 break;
375
376 address += MCG_XBLK_ADDR;
377 } else
378 ++address;
379 417
380 if (rdmsr_safe(address, &low, &high)) 418 if (rdmsr_safe(address, &low, &high))
381 break; 419 break;
@@ -480,16 +518,9 @@ static void amd_threshold_interrupt(void)
480 if (!(per_cpu(bank_map, cpu) & (1 << bank))) 518 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
481 continue; 519 continue;
482 for (block = 0; block < NR_BLOCKS; ++block) { 520 for (block = 0; block < NR_BLOCKS; ++block) {
483 if (block == 0) { 521 address = get_block_address(address, low, high, bank, block);
484 address = MSR_IA32_MCx_MISC(bank); 522 if (!address)
485 } else if (block == 1) { 523 break;
486 address = (low & MASK_BLKPTR_LO) >> 21;
487 if (!address)
488 break;
489 address += MCG_XBLK_ADDR;
490 } else {
491 ++address;
492 }
493 524
494 if (rdmsr_safe(address, &low, &high)) 525 if (rdmsr_safe(address, &low, &high))
495 break; 526 break;
@@ -709,16 +740,11 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
709 if (err) 740 if (err)
710 goto out_free; 741 goto out_free;
711recurse: 742recurse:
712 if (!block) { 743 address = get_block_address(address, low, high, bank, ++block);
713 address = (low & MASK_BLKPTR_LO) >> 21; 744 if (!address)
714 if (!address) 745 return 0;
715 return 0;
716 address += MCG_XBLK_ADDR;
717 } else {
718 ++address;
719 }
720 746
721 err = allocate_threshold_blocks(cpu, bank, ++block, address); 747 err = allocate_threshold_blocks(cpu, bank, block, address);
722 if (err) 748 if (err)
723 goto out_free; 749 goto out_free;
724 750