aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2016-04-28 11:35:46 -0400
committerIngo Molnar <mingo@kernel.org>2016-05-05 04:16:28 -0400
commitccbebba4c6bfda8e3ef9e431ce2c3d91c5fc5a63 (patch)
treea74c2e0104acc52371857ced3723a956d222824e
parent5101ef20f0ef1de79091a1fdb6b1a7f07565545a (diff)
perf/x86/intel/pt: Bypass PT vs. LBR exclusivity if the core supports it
Not all cores prevent using Intel PT and LBRs simultaneously, although most of them still do as of today. This patch adds an opt-in flag for such cores to disable mutual exclusivity between PT and LBR; also flip it on for Goldmont. Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Borislav Petkov <bp@alien8.de> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mathieu Poirier <mathieu.poirier@linaro.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: vince@deater.net Link: http://lkml.kernel.org/r/1461857746-31346-4-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/events/core.c6
-rw-r--r--arch/x86/events/intel/core.c1
-rw-r--r--arch/x86/events/perf_event.h1
3 files changed, 8 insertions, 0 deletions
diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c
index 41d93d0e972b..5e5e76a52f58 100644
--- a/arch/x86/events/core.c
+++ b/arch/x86/events/core.c
@@ -360,6 +360,9 @@ int x86_add_exclusive(unsigned int what)
360{ 360{
361 int i; 361 int i;
362 362
363 if (x86_pmu.lbr_pt_coexist)
364 return 0;
365
363 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) { 366 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
364 mutex_lock(&pmc_reserve_mutex); 367 mutex_lock(&pmc_reserve_mutex);
365 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) { 368 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
@@ -380,6 +383,9 @@ fail_unlock:
380 383
381void x86_del_exclusive(unsigned int what) 384void x86_del_exclusive(unsigned int what)
382{ 385{
386 if (x86_pmu.lbr_pt_coexist)
387 return;
388
383 atomic_dec(&x86_pmu.lbr_exclusive[what]); 389 atomic_dec(&x86_pmu.lbr_exclusive[what]);
384 atomic_dec(&active_events); 390 atomic_dec(&active_events);
385} 391}
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index 90ba3ae3074e..cd319400dc10 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3609,6 +3609,7 @@ __init int intel_pmu_init(void)
3609 */ 3609 */
3610 x86_pmu.pebs_aliases = NULL; 3610 x86_pmu.pebs_aliases = NULL;
3611 x86_pmu.pebs_prec_dist = true; 3611 x86_pmu.pebs_prec_dist = true;
3612 x86_pmu.lbr_pt_coexist = true;
3612 x86_pmu.flags |= PMU_FL_HAS_RSP_1; 3613 x86_pmu.flags |= PMU_FL_HAS_RSP_1;
3613 pr_cont("Goldmont events, "); 3614 pr_cont("Goldmont events, ");
3614 break; 3615 break;
diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
index 7d62a02f49a4..8bd764df815d 100644
--- a/arch/x86/events/perf_event.h
+++ b/arch/x86/events/perf_event.h
@@ -601,6 +601,7 @@ struct x86_pmu {
601 u64 lbr_sel_mask; /* LBR_SELECT valid bits */ 601 u64 lbr_sel_mask; /* LBR_SELECT valid bits */
602 const int *lbr_sel_map; /* lbr_select mappings */ 602 const int *lbr_sel_map; /* lbr_select mappings */
603 bool lbr_double_abort; /* duplicated lbr aborts */ 603 bool lbr_double_abort; /* duplicated lbr aborts */
604 bool lbr_pt_coexist; /* LBR may coexist with PT */
604 605
605 /* 606 /*
606 * Intel PT/LBR/BTS are exclusive 607 * Intel PT/LBR/BTS are exclusive