aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Shishkin <alexander.shishkin@linux.intel.com>2015-01-14 07:18:20 -0500
committerIngo Molnar <mingo@kernel.org>2015-04-02 11:14:19 -0400
commit4807034248bed58d49a4f9f450c024e3b0f58577 (patch)
treef5d76d32a1046852d6748d9405c3706910a1d1d6
parented69628b3b04578179334393d7f5fe60a2681f1c (diff)
perf/x86: Mark Intel PT and LBR/BTS as mutually exclusive
Intel PT cannot be used at the same time as LBR or BTS and will cause a general protection fault if they are used together. In order to avoid fixing up GPs in the fast path, instead we disallow creating LBR/BTS events when PT events are present and vice versa. Signed-off-by: Alexander Shishkin <alexander.shishkin@linux.intel.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Borislav Petkov <bp@alien8.de> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: H. Peter Anvin <hpa@zytor.com> Cc: Kaixu Xia <kaixu.xia@linaro.org> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Robert Richter <rric@kernel.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: acme@infradead.org Cc: adrian.hunter@intel.com Cc: kan.liang@intel.com Cc: markus.t.metzger@intel.com Cc: mathieu.poirier@linaro.org Link: http://lkml.kernel.org/r/1421237903-181015-12-git-send-email-alexander.shishkin@linux.intel.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--arch/x86/kernel/cpu/perf_event.c43
-rw-r--r--arch/x86/kernel/cpu/perf_event.h40
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel.c11
3 files changed, 94 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
index 0420ebcac116..549d01d6d996 100644
--- a/arch/x86/kernel/cpu/perf_event.c
+++ b/arch/x86/kernel/cpu/perf_event.c
@@ -263,6 +263,14 @@ static void hw_perf_event_destroy(struct perf_event *event)
263 } 263 }
264} 264}
265 265
266void hw_perf_lbr_event_destroy(struct perf_event *event)
267{
268 hw_perf_event_destroy(event);
269
270 /* undo the lbr/bts event accounting */
271 x86_del_exclusive(x86_lbr_exclusive_lbr);
272}
273
266static inline int x86_pmu_initialized(void) 274static inline int x86_pmu_initialized(void)
267{ 275{
268 return x86_pmu.handle_irq != NULL; 276 return x86_pmu.handle_irq != NULL;
@@ -302,6 +310,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
302 return x86_pmu_extra_regs(val, event); 310 return x86_pmu_extra_regs(val, event);
303} 311}
304 312
313/*
314 * Check if we can create event of a certain type (that no conflicting events
315 * are present).
316 */
317int x86_add_exclusive(unsigned int what)
318{
319 int ret = -EBUSY, i;
320
321 if (atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what]))
322 return 0;
323
324 mutex_lock(&pmc_reserve_mutex);
325 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
326 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
327 goto out;
328
329 atomic_inc(&x86_pmu.lbr_exclusive[what]);
330 ret = 0;
331
332out:
333 mutex_unlock(&pmc_reserve_mutex);
334 return ret;
335}
336
337void x86_del_exclusive(unsigned int what)
338{
339 atomic_dec(&x86_pmu.lbr_exclusive[what]);
340}
341
305int x86_setup_perfctr(struct perf_event *event) 342int x86_setup_perfctr(struct perf_event *event)
306{ 343{
307 struct perf_event_attr *attr = &event->attr; 344 struct perf_event_attr *attr = &event->attr;
@@ -346,6 +383,12 @@ int x86_setup_perfctr(struct perf_event *event)
346 /* BTS is currently only allowed for user-mode. */ 383 /* BTS is currently only allowed for user-mode. */
347 if (!attr->exclude_kernel) 384 if (!attr->exclude_kernel)
348 return -EOPNOTSUPP; 385 return -EOPNOTSUPP;
386
387 /* disallow bts if conflicting events are present */
388 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
389 return -EBUSY;
390
391 event->destroy = hw_perf_lbr_event_destroy;
349 } 392 }
350 393
351 hwc->config |= config; 394 hwc->config |= config;
diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h
index 87e5081f4cdc..47499661e8d4 100644
--- a/arch/x86/kernel/cpu/perf_event.h
+++ b/arch/x86/kernel/cpu/perf_event.h
@@ -408,6 +408,12 @@ union x86_pmu_config {
408 408
409#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value 409#define X86_CONFIG(args...) ((union x86_pmu_config){.bits = {args}}).value
410 410
411enum {
412 x86_lbr_exclusive_lbr,
413 x86_lbr_exclusive_pt,
414 x86_lbr_exclusive_max,
415};
416
411/* 417/*
412 * struct x86_pmu - generic x86 pmu 418 * struct x86_pmu - generic x86 pmu
413 */ 419 */
@@ -506,6 +512,11 @@ struct x86_pmu {
506 bool lbr_double_abort; /* duplicated lbr aborts */ 512 bool lbr_double_abort; /* duplicated lbr aborts */
507 513
508 /* 514 /*
515 * Intel PT/LBR/BTS are exclusive
516 */
517 atomic_t lbr_exclusive[x86_lbr_exclusive_max];
518
519 /*
509 * Extra registers for events 520 * Extra registers for events
510 */ 521 */
511 struct extra_reg *extra_regs; 522 struct extra_reg *extra_regs;
@@ -603,6 +614,12 @@ static inline int x86_pmu_rdpmc_index(int index)
603 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index; 614 return x86_pmu.rdpmc_index ? x86_pmu.rdpmc_index(index) : index;
604} 615}
605 616
617int x86_add_exclusive(unsigned int what);
618
619void x86_del_exclusive(unsigned int what);
620
621void hw_perf_lbr_event_destroy(struct perf_event *event);
622
606int x86_setup_perfctr(struct perf_event *event); 623int x86_setup_perfctr(struct perf_event *event);
607 624
608int x86_pmu_hw_config(struct perf_event *event); 625int x86_pmu_hw_config(struct perf_event *event);
@@ -689,6 +706,29 @@ static inline int amd_pmu_init(void)
689 706
690#ifdef CONFIG_CPU_SUP_INTEL 707#ifdef CONFIG_CPU_SUP_INTEL
691 708
709static inline bool intel_pmu_needs_lbr_smpl(struct perf_event *event)
710{
711 /* user explicitly requested branch sampling */
712 if (has_branch_stack(event))
713 return true;
714
715 /* implicit branch sampling to correct PEBS skid */
716 if (x86_pmu.intel_cap.pebs_trap && event->attr.precise_ip > 1 &&
717 x86_pmu.intel_cap.pebs_format < 2)
718 return true;
719
720 return false;
721}
722
723static inline bool intel_pmu_has_bts(struct perf_event *event)
724{
725 if (event->attr.config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
726 !event->attr.freq && event->hw.sample_period == 1)
727 return true;
728
729 return false;
730}
731
692int intel_pmu_save_and_restart(struct perf_event *event); 732int intel_pmu_save_and_restart(struct perf_event *event);
693 733
694struct event_constraint * 734struct event_constraint *
diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
index fc6dbc46af4a..b7b3ff21c832 100644
--- a/arch/x86/kernel/cpu/perf_event_intel.c
+++ b/arch/x86/kernel/cpu/perf_event_intel.c
@@ -1942,6 +1942,17 @@ static int intel_pmu_hw_config(struct perf_event *event)
1942 ret = intel_pmu_setup_lbr_filter(event); 1942 ret = intel_pmu_setup_lbr_filter(event);
1943 if (ret) 1943 if (ret)
1944 return ret; 1944 return ret;
1945
1946 /*
1947 * BTS is set up earlier in this path, so don't account twice
1948 */
1949 if (!intel_pmu_has_bts(event)) {
1950 /* disallow lbr if conflicting events are present */
1951 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
1952 return -EBUSY;
1953
1954 event->destroy = hw_perf_lbr_event_destroy;
1955 }
1945 } 1956 }
1946 1957
1947 if (event->attr.type != PERF_TYPE_RAW) 1958 if (event->attr.type != PERF_TYPE_RAW)