diff options
author | Borislav Petkov <bp@suse.de> | 2016-01-25 14:41:52 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2016-02-01 04:53:58 -0500 |
commit | 429893b16d35d309ed6b35136aad5f908a08d9b9 (patch) | |
tree | d7b7a9476a0da3e54a5844c5f17ffd23c18e2584 /arch/x86/kernel | |
parent | f57a1f3c14b9182f1fea667f5a38a1094699db7c (diff) |
x86/mce/AMD: Carve out threshold block preparation
mce_amd_feature_init() was getting pretty fat, carve out the
threshold_block setup into a separate function in order to
simplify flow and make it more understandable.
No functionality change.
Signed-off-by: Borislav Petkov <bp@suse.de>
Cc: Aravind Gopalakrishnan <Aravind.Gopalakrishnan@amd.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Tony Luck <tony.luck@intel.com>
Link: http://lkml.kernel.org/r/1453750913-4781-8-git-send-email-bp@alien8.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel')
-rw-r--r-- | arch/x86/kernel/cpu/mcheck/mce_amd.c | 87 |
1 files changed, 49 insertions, 38 deletions
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c index a77a4521976a..f2860a118b71 100644 --- a/arch/x86/kernel/cpu/mcheck/mce_amd.c +++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c | |||
@@ -267,14 +267,59 @@ static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c) | |||
267 | wrmsr(MSR_CU_DEF_ERR, low, high); | 267 | wrmsr(MSR_CU_DEF_ERR, low, high); |
268 | } | 268 | } |
269 | 269 | ||
270 | static int | ||
271 | prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr, | ||
272 | int offset, u32 misc_high) | ||
273 | { | ||
274 | unsigned int cpu = smp_processor_id(); | ||
275 | struct threshold_block b; | ||
276 | int new; | ||
277 | |||
278 | if (!block) | ||
279 | per_cpu(bank_map, cpu) |= (1 << bank); | ||
280 | |||
281 | memset(&b, 0, sizeof(b)); | ||
282 | b.cpu = cpu; | ||
283 | b.bank = bank; | ||
284 | b.block = block; | ||
285 | b.address = addr; | ||
286 | b.interrupt_capable = lvt_interrupt_supported(bank, misc_high); | ||
287 | |||
288 | if (!b.interrupt_capable) | ||
289 | goto done; | ||
290 | |||
291 | b.interrupt_enable = 1; | ||
292 | |||
293 | if (mce_flags.smca) { | ||
294 | u32 smca_low, smca_high; | ||
295 | |||
296 | /* Gather LVT offset for thresholding: */ | ||
297 | if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) | ||
298 | goto out; | ||
299 | |||
300 | new = (smca_low & SMCA_THR_LVT_OFF) >> 12; | ||
301 | } else { | ||
302 | new = (misc_high & MASK_LVTOFF_HI) >> 20; | ||
303 | } | ||
304 | |||
305 | offset = setup_APIC_mce_threshold(offset, new); | ||
306 | |||
307 | if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt)) | ||
308 | mce_threshold_vector = amd_threshold_interrupt; | ||
309 | |||
310 | done: | ||
311 | mce_threshold_block_init(&b, offset); | ||
312 | |||
313 | out: | ||
314 | return offset; | ||
315 | } | ||
316 | |||
270 | /* cpu init entry point, called from mce.c with preempt off */ | 317 | /* cpu init entry point, called from mce.c with preempt off */ |
271 | void mce_amd_feature_init(struct cpuinfo_x86 *c) | 318 | void mce_amd_feature_init(struct cpuinfo_x86 *c) |
272 | { | 319 | { |
273 | struct threshold_block b; | ||
274 | unsigned int cpu = smp_processor_id(); | ||
275 | u32 low = 0, high = 0, address = 0; | 320 | u32 low = 0, high = 0, address = 0; |
276 | unsigned int bank, block; | 321 | unsigned int bank, block; |
277 | int offset = -1, new; | 322 | int offset = -1; |
278 | 323 | ||
279 | for (bank = 0; bank < mca_cfg.banks; ++bank) { | 324 | for (bank = 0; bank < mca_cfg.banks; ++bank) { |
280 | for (block = 0; block < NR_BLOCKS; ++block) { | 325 | for (block = 0; block < NR_BLOCKS; ++block) { |
@@ -299,41 +344,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c) | |||
299 | (high & MASK_LOCKED_HI)) | 344 | (high & MASK_LOCKED_HI)) |
300 | continue; | 345 | continue; |
301 | 346 | ||
302 | if (!block) | 347 | offset = prepare_threshold_block(bank, block, address, offset, high); |
303 | per_cpu(bank_map, cpu) |= (1 << bank); | ||
304 | |||
305 | memset(&b, 0, sizeof(b)); | ||
306 | b.cpu = cpu; | ||
307 | b.bank = bank; | ||
308 | b.block = block; | ||
309 | b.address = address; | ||
310 | b.interrupt_capable = lvt_interrupt_supported(bank, high); | ||
311 | |||
312 | if (!b.interrupt_capable) | ||
313 | goto init; | ||
314 | |||
315 | b.interrupt_enable = 1; | ||
316 | |||
317 | if (mce_flags.smca) { | ||
318 | u32 smca_low, smca_high; | ||
319 | |||
320 | /* Gather LVT offset for thresholding: */ | ||
321 | if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high)) | ||
322 | break; | ||
323 | |||
324 | new = (smca_low & SMCA_THR_LVT_OFF) >> 12; | ||
325 | } else { | ||
326 | new = (high & MASK_LVTOFF_HI) >> 20; | ||
327 | } | ||
328 | |||
329 | offset = setup_APIC_mce_threshold(offset, new); | ||
330 | |||
331 | if ((offset == new) && | ||
332 | (mce_threshold_vector != amd_threshold_interrupt)) | ||
333 | mce_threshold_vector = amd_threshold_interrupt; | ||
334 | |||
335 | init: | ||
336 | mce_threshold_block_init(&b, offset); | ||
337 | } | 348 | } |
338 | } | 349 | } |
339 | 350 | ||