aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorYazen Ghannam <Yazen.Ghannam@amd.com>2016-07-08 05:09:39 -0400
committerIngo Molnar <mingo@kernel.org>2016-07-08 05:29:26 -0400
commit340e983ab8afd02b59d698dd1365d7773bf136b3 (patch)
tree70ea850331c7285b7672ea13488c78e487e6ed43
parent955d1427a91b18f53e082bd7c19c40ce13b0a0f4 (diff)
x86/RAS/AMD: Reduce the number of IPIs when prepping error injection
We currently use wrmsr_on_cpu() 4 times when prepping for an error injection. This will generate 4 IPIs for each MSR write. We can reduce the number of IPIs to 1 by grouping the MSR writes and executing them serially on the appropriate CPU. Suggested-by: Borislav Petkov <bp@suse.de> Signed-off-by: Yazen Ghannam <Yazen.Ghannam@amd.com> Signed-off-by: Borislav Petkov <bp@suse.de> Cc: Andy Lutomirski <luto@kernel.org> Cc: Aravind Gopalakrishnan <aravindksg.lkml@gmail.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Brian Gerst <brgerst@gmail.com> Cc: Denys Vlasenko <dvlasenk@redhat.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Josh Poimboeuf <jpoimboe@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Tony Luck <tony.luck@intel.com> Cc: linux-edac <linux-edac@vger.kernel.org> Link: http://lkml.kernel.org/r/1467968983-4874-3-git-send-email-bp@alien8.de Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/ras/mce_amd_inj.c58
1 files changed, 28 insertions, 30 deletions
diff --git a/arch/x86/ras/mce_amd_inj.c b/arch/x86/ras/mce_amd_inj.c
index e69f4701a076..1104515d5ad2 100644
--- a/arch/x86/ras/mce_amd_inj.c
+++ b/arch/x86/ras/mce_amd_inj.c
@@ -241,6 +241,31 @@ static void toggle_nb_mca_mst_cpu(u16 nid)
241 __func__, PCI_FUNC(F3->devfn), NBCFG); 241 __func__, PCI_FUNC(F3->devfn), NBCFG);
242} 242}
243 243
244static void prepare_msrs(void *info)
245{
246 struct mce i_mce = *(struct mce *)info;
247 u8 b = i_mce.bank;
248
249 wrmsrl(MSR_IA32_MCG_STATUS, i_mce.mcgstatus);
250
251 if (boot_cpu_has(X86_FEATURE_SMCA)) {
252 if (i_mce.inject_flags == DFR_INT_INJ) {
253 wrmsrl(MSR_AMD64_SMCA_MCx_DESTAT(b), i_mce.status);
254 wrmsrl(MSR_AMD64_SMCA_MCx_DEADDR(b), i_mce.addr);
255 } else {
256 wrmsrl(MSR_AMD64_SMCA_MCx_STATUS(b), i_mce.status);
257 wrmsrl(MSR_AMD64_SMCA_MCx_ADDR(b), i_mce.addr);
258 }
259
260 wrmsrl(MSR_AMD64_SMCA_MCx_MISC(b), i_mce.misc);
261 } else {
262 wrmsrl(MSR_IA32_MCx_STATUS(b), i_mce.status);
263 wrmsrl(MSR_IA32_MCx_ADDR(b), i_mce.addr);
264 wrmsrl(MSR_IA32_MCx_MISC(b), i_mce.misc);
265 }
266
267}
268
244static void do_inject(void) 269static void do_inject(void)
245{ 270{
246 u64 mcg_status = 0; 271 u64 mcg_status = 0;
@@ -287,36 +312,9 @@ static void do_inject(void)
287 312
288 toggle_hw_mce_inject(cpu, true); 313 toggle_hw_mce_inject(cpu, true);
289 314
290 wrmsr_on_cpu(cpu, MSR_IA32_MCG_STATUS, 315 i_mce.mcgstatus = mcg_status;
291 (u32)mcg_status, (u32)(mcg_status >> 32)); 316 i_mce.inject_flags = inj_type;
292 317 smp_call_function_single(cpu, prepare_msrs, &i_mce, 0);
293 if (boot_cpu_has(X86_FEATURE_SMCA)) {
294 if (inj_type == DFR_INT_INJ) {
295 wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DESTAT(b),
296 (u32)i_mce.status, (u32)(i_mce.status >> 32));
297
298 wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_DEADDR(b),
299 (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
300 } else {
301 wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_STATUS(b),
302 (u32)i_mce.status, (u32)(i_mce.status >> 32));
303
304 wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_ADDR(b),
305 (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
306 }
307
308 wrmsr_on_cpu(cpu, MSR_AMD64_SMCA_MCx_MISC(b),
309 (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
310 } else {
311 wrmsr_on_cpu(cpu, MSR_IA32_MCx_STATUS(b),
312 (u32)i_mce.status, (u32)(i_mce.status >> 32));
313
314 wrmsr_on_cpu(cpu, MSR_IA32_MCx_ADDR(b),
315 (u32)i_mce.addr, (u32)(i_mce.addr >> 32));
316
317 wrmsr_on_cpu(cpu, MSR_IA32_MCx_MISC(b),
318 (u32)i_mce.misc, (u32)(i_mce.misc >> 32));
319 }
320 318
321 toggle_hw_mce_inject(cpu, false); 319 toggle_hw_mce_inject(cpu, false);
322 320