aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-06-23 04:44:49 -0400
committerIngo Molnar <mingo@kernel.org>2016-06-27 05:34:21 -0400
commitd4cf1949f9689314aef962eea95df84a8288d097 (patch)
tree97dd9211c7b91f1f775d66a50354acf5dd16783a
parent71adae99ed187de9fcf988cc8873ee2c3af3385f (diff)
perf/x86/intel: Add {rd,wr}lbr_{to,from} wrappers
The whole rdmsr()/wrmsr() for lbr_from got a little unweildy with the sign extension quirk, provide a few simple wrappers to clean things up. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Carrillo-Cisneros <davidcc@google.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/intel/lbr.c53
1 files changed, 40 insertions, 13 deletions
diff --git a/arch/x86/events/intel/lbr.c b/arch/x86/events/intel/lbr.c
index 52bef15c7615..cc4555a9e876 100644
--- a/arch/x86/events/intel/lbr.c
+++ b/arch/x86/events/intel/lbr.c
@@ -289,12 +289,42 @@ inline u64 lbr_from_signext_quirk_wr(u64 val)
289 */ 289 */
290u64 lbr_from_signext_quirk_rd(u64 val) 290u64 lbr_from_signext_quirk_rd(u64 val)
291{ 291{
292 if (static_branch_unlikely(&lbr_from_quirk_key)) 292 if (static_branch_unlikely(&lbr_from_quirk_key)) {
293 /* 293 /*
294 * Quirk is on when TSX is not enabled. Therefore TSX 294 * Quirk is on when TSX is not enabled. Therefore TSX
295 * flags must be read as OFF. 295 * flags must be read as OFF.
296 */ 296 */
297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT); 297 val &= ~(LBR_FROM_FLAG_IN_TX | LBR_FROM_FLAG_ABORT);
298 }
299 return val;
300}
301
302static inline void wrlbr_from(unsigned int idx, u64 val)
303{
304 val = lbr_from_signext_quirk_wr(val);
305 wrmsrl(x86_pmu.lbr_from + idx, val);
306}
307
308static inline void wrlbr_to(unsigned int idx, u64 val)
309{
310 wrmsrl(x86_pmu.lbr_to + idx, val);
311}
312
313static inline u64 rdlbr_from(unsigned int idx)
314{
315 u64 val;
316
317 rdmsrl(x86_pmu.lbr_from + idx, val);
318
319 return lbr_from_signext_quirk_rd(val);
320}
321
322static inline u64 rdlbr_to(unsigned int idx)
323{
324 u64 val;
325
326 rdmsrl(x86_pmu.lbr_from + idx, val);
327
298 return val; 328 return val;
299} 329}
300 330
@@ -314,9 +344,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
314 tos = task_ctx->tos; 344 tos = task_ctx->tos;
315 for (i = 0; i < tos; i++) { 345 for (i = 0; i < tos; i++) {
316 lbr_idx = (tos - i) & mask; 346 lbr_idx = (tos - i) & mask;
317 wrmsrl(x86_pmu.lbr_from + lbr_idx, 347 wrlbr_from(lbr_idx, task_ctx->lbr_from[i]);
318 lbr_from_signext_quirk_wr(task_ctx->lbr_from[i])); 348 wrlbr_to (lbr_idx, task_ctx->lbr_to[i]);
319 wrmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]); 349
320 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 350 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
321 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 351 wrmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
322 } 352 }
@@ -326,9 +356,9 @@ static void __intel_pmu_lbr_restore(struct x86_perf_task_context *task_ctx)
326 356
327static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx) 357static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
328{ 358{
329 int i;
330 unsigned lbr_idx, mask; 359 unsigned lbr_idx, mask;
331 u64 tos, val; 360 u64 tos;
361 int i;
332 362
333 if (task_ctx->lbr_callstack_users == 0) { 363 if (task_ctx->lbr_callstack_users == 0) {
334 task_ctx->lbr_stack_state = LBR_NONE; 364 task_ctx->lbr_stack_state = LBR_NONE;
@@ -339,9 +369,8 @@ static void __intel_pmu_lbr_save(struct x86_perf_task_context *task_ctx)
339 tos = intel_pmu_lbr_tos(); 369 tos = intel_pmu_lbr_tos();
340 for (i = 0; i < tos; i++) { 370 for (i = 0; i < tos; i++) {
341 lbr_idx = (tos - i) & mask; 371 lbr_idx = (tos - i) & mask;
342 rdmsrl(x86_pmu.lbr_from + lbr_idx, val); 372 task_ctx->lbr_from[i] = rdlbr_from(lbr_idx);
343 task_ctx->lbr_from[i] = lbr_from_signext_quirk_rd(val); 373 task_ctx->lbr_to[i] = rdlbr_to(lbr_idx);
344 rdmsrl(x86_pmu.lbr_to + lbr_idx, task_ctx->lbr_to[i]);
345 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO) 374 if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_INFO)
346 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]); 375 rdmsrl(MSR_LBR_INFO_0 + lbr_idx, task_ctx->lbr_info[i]);
347 } 376 }
@@ -517,10 +546,8 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc)
517 u16 cycles = 0; 546 u16 cycles = 0;
518 int lbr_flags = lbr_desc[lbr_format]; 547 int lbr_flags = lbr_desc[lbr_format];
519 548
520 rdmsrl(x86_pmu.lbr_from + lbr_idx, from); 549 from = rdlbr_from(lbr_idx);
521 from = lbr_from_signext_quirk_rd(from); 550 to = rdlbr_to(lbr_idx);
522
523 rdmsrl(x86_pmu.lbr_to + lbr_idx, to);
524 551
525 if (lbr_format == LBR_FORMAT_INFO && need_info) { 552 if (lbr_format == LBR_FORMAT_INFO && need_info) {
526 u64 info; 553 u64 info;