aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorDavid Carrillo-Cisneros <davidcc@google.com>2016-06-21 14:31:13 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-27 05:34:20 -0400
commit71adae99ed187de9fcf988cc8873ee2c3af3385f (patch)
treea7904984a9867ad248a2dadd5ecb0f16ed6c8a85
parent3812bba84f3d721ff7dc3bb90360bc5ed6771994 (diff)
perf/x86/intel: Add MSR_LAST_BRANCH_FROM_x quirk for ctx switch
Add quirk for context switch to save/restore the value of MSR_LAST_BRANCH_FROM_x when LBR is enabled and there is potential for kernel addresses to be in the lbr_from register. To test this patch, use a perf tool and kernel with the patch next in this series. That patch removes the work around that masked the hw bug: $ ./lbr_perf record --call-graph lbr -e cycles:k sleep 1 where lbr_perf is the patched perf tool, that allows to specify :k on lbr mode. The above command will trigger a #GPF : WARNING: CPU: 28 PID: 14096 at arch/x86/mm/extable.c:65 ex_handler_wrmsr_unsafe+0x70/0x80 unchecked MSR access error: WRMSR to 0x681 (tried to write 0x1fffffff81010794) ... Call Trace: [<ffffffff8167af49>] dump_stack+0x4d/0x63 [<ffffffff810b9b15>] __warn+0xe5/0x100 [<ffffffff810b9be9>] warn_slowpath_fmt+0x49/0x50 [<ffffffff810abb40>] ex_handler_wrmsr_unsafe+0x70/0x80 [<ffffffff810abc42>] fixup_exception+0x42/0x50 [<ffffffff81079d1a>] do_general_protection+0x8a/0x160 [<ffffffff81684ec2>] general_protection+0x22/0x30 [<ffffffff810101b9>] ? intel_pmu_lbr_sched_task+0xc9/0x380 [<ffffffff81009d7c>] intel_pmu_sched_task+0x3c/0x60 [<ffffffff81003a2b>] x86_pmu_sched_task+0x1b/0x20 [<ffffffff81192a5b>] perf_pmu_sched_task+0x6b/0xb0 [<ffffffff8119746d>] __perf_event_task_sched_in+0x7d/0x150 [<ffffffff810dd9dc>] finish_task_switch+0x15c/0x200 [<ffffffff8167f894>] __schedule+0x274/0x6cc [<ffffffff8167fdd9>] schedule+0x39/0x90 [<ffffffff81675398>] exit_to_usermode_loop+0x39/0x89 [<ffffffff810028ce>] prepare_exit_to_usermode+0x2e/0x30 [<ffffffff81683c1b>] retint_user+0x8/0x10 Signed-off-by: David Carrillo-Cisneros <davidcc@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Reviewed-by: Stephane Eranian <eranian@google.com> Reviewed-by: Andi Kleen <ak@linux.intel.com> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Link: http://lkml.kernel.org/r/1466533874-52003-5-git-send-email-davidcc@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/lbr.c24
1 files changed, 21 insertions, 3 deletions
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 0da0eb0d875d..52bef15c7615 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -284,6 +284,20 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
284 return val; 284 return val;
285} 285}
286 286
287/*
288 * If quirk is needed, ensure sign extension is 61 bits:
289 */
290u64 lbr_from_signext_quirk_rd(u64 val)
291{
292 if (static_branch_unlikely(&lbr_from_quirk_key))
293 /*
294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF.
296 */
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
298 return val;
299}
300
287static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx) 301static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
288{ 302{
289 int i; 303 int i;
@@ -300,7 +314,8 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
300 tos = task_ctx->tos; 314 tos = task_ctx->tos;
301 for (i = 0; i < tos; i++) { 315 for (i = 0; i < tos; i++) {
302 lbr_idx = (tos - i) & mask; 316 lbr_idx = (tos - i) & mask;
303 wrmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 317 wrmsrl(x86_pmu.lbr_from + lbr_idx,
318 lbr_from_signext_quirk_wr(task_ctx->lbr_from[i]));
304 wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); 319 wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
305 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 320 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
306 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 321 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
@@ -313,7 +328,7 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
313{ 328{
314 int i; 329 int i;
315 unsigned lbr_idx, mask; 330 unsigned lbr_idx, mask;
316 u64 tos; 331 u64 tos, val;
317 332
318 if (task_ctx->lbr_callstack_users == 0) { 333 if (task_ctx->lbr_callstack_users == 0) {
319 task_ctx->lbr_stack_state = LBR_NONE; 334 task_ctx->lbr_stack_state = LBR_NONE;
@@ -324,7 +339,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
324 tos = intel_pmu_lbr_tos(); 339 tos = intel_pmu_lbr_tos();
325 for (i = 0; i < tos; i++) { 340 for (i = 0; i < tos; i++) {
326 lbr_idx = (tos - i) & mask; 341 lbr_idx = (tos - i) & mask;
327 rdmsrl(x86_pmu.lbr_from + lbr_idx, task_ctx->lbr_from[i]); 342 rdmsrl(x86_pmu.lbr_from + lbr_idx, val);
343 task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val);
328 rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); 344 rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
329 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 345 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
330 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 346 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
@@ -502,6 +518,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
502 int lbr_flags = lbr_desc[lbr_format]; 518 int lbr_flags = lbr_desc[lbr_format];
503 519
504 rdmsrl(x86_pmu.lbr_from + lbr_idx, from); 520 rdmsrl(x86_pmu.lbr_from + lbr_idx, from);
521 from = lbr_from_signext_quirk_rd(from);
522
505 rdmsrl(x86_pmu.lbr_to + lbr_idx, to); 523 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
506 524
507 if (lbr_format == LBR_FORMAT_INFO && need_info) { 525 if (lbr_format == LBR_FORMAT_INFO && need_info) {