aboutsummaryrefslogtreecommitdiffstats
path: root/arch/x86/kernel/cpu/perf_event_intel_lbr.c
diff options
context:
space:
mode:
authorStephane Eranian <eranian@google.com>2015-10-13 03:09:09 -0400
committerIngo Molnar <mingo@kernel.org>2015-10-20 04:30:53 -0400
commitd892819faa6860d469aae71d70c336b391c25505 (patch)
tree0773213c84d278a0504d5ee1f0990c0b18110e40 /arch/x86/kernel/cpu/perf_event_intel_lbr.c
parentc229bf9dc179d2023e185c0f705bdf68484c1e73 (diff)
perf/x86: Add support for PERF_SAMPLE_BRANCH_CALL
This patch enables the suport for the PERF_SAMPLE_BRANCH_CALL for Intel x86 processors. When the processor support LBR filtering this the selection is done in hardware. Otherwise, the filter is applied by software. Note that we chose to include zero length calls because they also represent calls. Signed-off-by: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: khandual@linux.vnet.ibm.com Link: http://lkml.kernel.org/r/1444720151-10275-3-git-send-email-eranian@google.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'arch/x86/kernel/cpu/perf_event_intel_lbr.c')
-rw-r--r--arch/x86/kernel/cpu/perf_event_intel_lbr.c4
1 files changed, 4 insertions, 0 deletions
diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
index ad0b8b0490a0..bfd0b717e944 100644
--- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c
+++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c
@@ -555,6 +555,8 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
555 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP) 555 if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
556 mask |= X86_BR_IND_JMP; 556 mask |= X86_BR_IND_JMP;
557 557
558 if (br_type & PERF_SAMPLE_BRANCH_CALL)
559 mask |= X86_BR_CALL | X86_BR_ZERO_CALL;
558 /* 560 /*
559 * stash actual user request into reg, it may 561 * stash actual user request into reg, it may
560 * be used by fixup code for some CPU 562 * be used by fixup code for some CPU
@@ -890,6 +892,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
890 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL, 892 [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL,
891 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC, 893 [PERF_SAMPLE_BRANCH_COND_SHIFT] = LBR_JCC,
892 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, 894 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
895 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
893}; 896};
894 897
895static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = { 898static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -905,6 +908,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
905 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL 908 [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT] = LBR_REL_CALL | LBR_IND_CALL
906 | LBR_RETURN | LBR_CALL_STACK, 909 | LBR_RETURN | LBR_CALL_STACK,
907 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP, 910 [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
911 [PERF_SAMPLE_BRANCH_CALL_SHIFT] = LBR_REL_CALL,
908}; 912};
909 913
910/* core */ 914/* core */