aboutsummaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2014-11-05 04:36:45 -0500
committerIngo Molnar <mingo@kernel.org>2015-02-18 11:16:15 -0500
commit2c44b1936bb3b135a3fac8b3493394d42e51cf70 (patch)
treeb9b212a14c2ce1043ac7537678c5415a03ceb2ed /include
parentaa54ae9b87b83af7edabcc34a299e7e014609af4 (diff)
perf/x86/intel: Expose LBR callstack to user space tooling
With LBR call stack feature enable, there are three callchain options. Enable the 3rd callchain option (LBR callstack) to user space tooling. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Arnaldo Carvalho de Melo <acme@kernel.org> Cc: Andy Lutomirski <luto@amacapital.net> Cc: Kan Liang <kan.liang@intel.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Vince Weaver <vincent.weaver@maine.edu> Cc: linux-api@vger.kernel.org Link: http://lkml.kernel.org/r/20141105093759.GQ10501@worktop.programming.kicks-ass.net Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'include')
-rw-r--r--include/uapi/linux/perf_event.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index e46b93279e3d..1e3cd07cf76e 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -166,6 +166,8 @@ enum perf_branch_sample_type_shift {
166 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */ 166 PERF_SAMPLE_BRANCH_NO_TX_SHIFT = 9, /* not in transaction */
167 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */ 167 PERF_SAMPLE_BRANCH_COND_SHIFT = 10, /* conditional branches */
168 168
169 PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT = 11, /* call/ret stack */
170
169 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */ 171 PERF_SAMPLE_BRANCH_MAX_SHIFT /* non-ABI */
170}; 172};
171 173
@@ -175,18 +177,16 @@ enum perf_branch_sample_type {
175 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT, 177 PERF_SAMPLE_BRANCH_HV = 1U << PERF_SAMPLE_BRANCH_HV_SHIFT,
176 178
177 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT, 179 PERF_SAMPLE_BRANCH_ANY = 1U << PERF_SAMPLE_BRANCH_ANY_SHIFT,
178 PERF_SAMPLE_BRANCH_ANY_CALL = 180 PERF_SAMPLE_BRANCH_ANY_CALL = 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT,
179 1U << PERF_SAMPLE_BRANCH_ANY_CALL_SHIFT, 181 PERF_SAMPLE_BRANCH_ANY_RETURN = 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT,
180 PERF_SAMPLE_BRANCH_ANY_RETURN = 182 PERF_SAMPLE_BRANCH_IND_CALL = 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
181 1U << PERF_SAMPLE_BRANCH_ANY_RETURN_SHIFT, 183 PERF_SAMPLE_BRANCH_ABORT_TX = 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
182 PERF_SAMPLE_BRANCH_IND_CALL =
183 1U << PERF_SAMPLE_BRANCH_IND_CALL_SHIFT,
184 PERF_SAMPLE_BRANCH_ABORT_TX =
185 1U << PERF_SAMPLE_BRANCH_ABORT_TX_SHIFT,
186 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT, 184 PERF_SAMPLE_BRANCH_IN_TX = 1U << PERF_SAMPLE_BRANCH_IN_TX_SHIFT,
187 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT, 185 PERF_SAMPLE_BRANCH_NO_TX = 1U << PERF_SAMPLE_BRANCH_NO_TX_SHIFT,
188 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT, 186 PERF_SAMPLE_BRANCH_COND = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
189 187
188 PERF_SAMPLE_BRANCH_CALL_STACK = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
189
190 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT, 190 PERF_SAMPLE_BRANCH_MAX = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
191}; 191};
192 192