diff options
author | Yazen Ghannam <Yazen.Ghannam@amd.com> | 2016-09-12 03:59:31 -0400 |
---|---|---|
committer | Thomas Gleixner <tglx@linutronix.de> | 2016-09-13 09:23:08 -0400 |
commit | cfee4f6f0b2026380c6bc6913dbd27943df17371 (patch) | |
tree | 835891bc3c6371bd62617c198d3c5b547bf33222 | |
parent | bad744b7f29d264c2c2ad8fb723dd480e6c9b007 (diff) |
x86/mce/AMD: Read MSRs on the CPU allocating the threshold blocks
Scalable MCA systems allow non-core MCA banks to only be accessible by
certain CPUs. The MSRs for these banks are Read-as-Zero on other CPUs.
During allocate_threshold_blocks(), get_block_address() can be scheduled
on CPUs other than the one allocating the block. This causes the MSRs to
be read on the wrong CPU and results in incorrect behavior.
Add a @cpu parameter to get_block_address() and pass this in to ensure
that the MSRs are only read on the CPU that is allocating the block.
Signed-off-by: Yazen Ghannam <Yazen.Ghannam@amd.com>
Signed-off-by: Borislav Petkov <bp@suse.de>
Link: http://lkml.kernel.org/r/1472673994-12235-2-git-send-email-Yazen.Ghannam@amd.com
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 17 |
1 files changed, 8 insertions, 9 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index 419e0ee3b12f..9da92fb2e073 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -293,7 +293,7 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) | |||
293 | wrmsr(MSR_CU_DEF_ERR, low, high); | 293 | wrmsr(MSR_CU_DEF_ERR, low, high); |
294 | } | 294 | } |
295 | 295 | ||
296 | static u32 get_block_address(u32 current_addr, u32 low, u32 high, | 296 | static u32 get_block_address(unsigned int cpu, u32 current_addr, u32 low, u32 high, |
297 | unsigned int bank, unsigned int block) | 297 | unsigned int bank, unsigned int block) |
298 | { | 298 | { |
299 | u32 addr = 0, offset = 0; | 299 | u32 addr = 0, offset = 0; |
@@ -309,13 +309,13 @@ static u32 get_block_address(u32 current_addr, u32 low, u32 high, | |||
309 | */ | 309 | */ |
310 | u32 low, high; | 310 | u32 low, high; |
311 | 311 | ||
312 | if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) | 312 | if (rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high)) |
313 | return addr; | 313 | return addr; |
314 | 314 | ||
315 | if (!(low & MCI_CONFIG_MCAX)) | 315 | if (!(low & MCI_CONFIG_MCAX)) |
316 | return addr; | 316 | return addr; |
317 | 317 | ||
318 | if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && | 318 | if (!rdmsr_safe_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) && |
319 | (low & MASK_BLKPTR_LO)) | 319 | (low & MASK_BLKPTR_LO)) |
320 | addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); | 320 | addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1); |
321 | } | 321 | } |
@@ -421,12 +421,12 @@ out: | |||
421 | void mce_amd_feature_init(struct cpuinfo_x86 *c) | 421 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
422 | { | 422 | { |
423 | u32 low = 0, high = 0, address = 0; | 423 | u32 low = 0, high = 0, address = 0; |
424 | unsigned int bank, block; | 424 | unsigned int bank, block, cpu = smp_processor_id(); |
425 | int offset = -1; | 425 | int offset = -1; |
426 | 426 | ||
427 | for (bank = 0; bank < mca_cfg.banks; ++bank) { | 427 | for (bank = 0; bank < mca_cfg.banks; ++bank) { |
428 | for (block = 0; block < NR_BLOCKS; ++block) { | 428 | for (block = 0; block < NR_BLOCKS; ++block) { |
429 | address = get_block_address(address, low, high, bank, block); | 429 | address = get_block_address(cpu, address, low, high, bank, block); |
430 | if (!address) | 430 | if (!address) |
431 | break; | 431 | break; |
432 | 432 | ||
@@ -544,15 +544,14 @@ static void amd_deferred_error_interrupt(void) | |||
544 | static void amd_threshold_interrupt(void) | 544 | static void amd_threshold_interrupt(void) |
545 | { | 545 | { |
546 | u32 low = 0, high = 0, address = 0; | 546 | u32 low = 0, high = 0, address = 0; |
547 | int cpu = smp_processor_id(); | 547 | unsigned int bank, block, cpu = smp_processor_id(); |
548 | unsigned int bank, block; | ||
549 | 548 | ||
550 | /* assume first bank caused it */ | 549 | /* assume first bank caused it */ |
551 | for (bank = 0; bank < mca_cfg.banks; ++bank) { | 550 | for (bank = 0; bank < mca_cfg.banks; ++bank) { |
552 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) | 551 | if (!(per_cpu(bank_map, cpu) & (1 << bank))) |
553 | continue; | 552 | continue; |
554 | for (block = 0; block < NR_BLOCKS; ++block) { | 553 | for (block = 0; block < NR_BLOCKS; ++block) { |
555 | address = get_block_address(address, low, high, bank, block); | 554 | address = get_block_address(cpu, address, low, high, bank, block); |
556 | if (!address) | 555 | if (!address) |
557 | break; | 556 | break; |
558 | 557 | ||
@@ -774,7 +773,7 @@ static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank, | |||
774 | if (err) | 773 | if (err) |
775 | goto out_free; | 774 | goto out_free; |
776 | recurse: | 775 | recurse: |
777 | address = get_block_address(address, low, high, bank, ++block); | 776 | address = get_block_address(cpu, address, low, high, bank, ++block); |
778 | if (!address) | 777 | if (!address) |
779 | return 0; | 778 | return 0; |
780 | 779 | ||