aboutsummaryrefslogtreecommitdiffstats
path: root/include/linux/perf_counter.h
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-06-25 05:27:12 -0400
committerIngo Molnar <mingo@elte.hu>2009-06-25 15:39:08 -0400
commite6e18ec79b023d5fe84226cef533cf0e3770ce93 (patch)
tree6fc1bd9afd21454864abe2aec6a0e35e17d47f04 /include/linux/perf_counter.h
parentbfbd3381e63aa2a14c6706afb50ce4630aa0d9a2 (diff)
perf_counter: Rework the sample ABI
The PERF_EVENT_READ implementation made me realize we don't actually need the sample_type int the output sample, since we already have that in the perf_counter_attr information. Therefore, remove the PERF_EVENT_MISC_OVERFLOW bit and the event->type overloading, and imply put counter overflow samples in a PERF_EVENT_SAMPLE type. This also fixes the issue that event->type was only 32-bit and sample_type had 64 usable bits. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/perf_counter.h')
-rw-r--r--include/linux/perf_counter.h10
1 files changed, 5 insertions, 5 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h
index de70a10b5ec8..3078e23c91eb 100644
--- a/include/linux/perf_counter.h
+++ b/include/linux/perf_counter.h
@@ -262,7 +262,6 @@ struct perf_counter_mmap_page {
262#define PERF_EVENT_MISC_KERNEL (1 << 0) 262#define PERF_EVENT_MISC_KERNEL (1 << 0)
263#define PERF_EVENT_MISC_USER (2 << 0) 263#define PERF_EVENT_MISC_USER (2 << 0)
264#define PERF_EVENT_MISC_HYPERVISOR (3 << 0) 264#define PERF_EVENT_MISC_HYPERVISOR (3 << 0)
265#define PERF_EVENT_MISC_OVERFLOW (1 << 2)
266 265
267struct perf_event_header { 266struct perf_event_header {
268 __u32 type; 267 __u32 type;
@@ -348,9 +347,6 @@ enum perf_event_type {
348 PERF_EVENT_READ = 8, 347 PERF_EVENT_READ = 8,
349 348
350 /* 349 /*
351 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field
352 * will be PERF_SAMPLE_*
353 *
354 * struct { 350 * struct {
355 * struct perf_event_header header; 351 * struct perf_event_header header;
356 * 352 *
@@ -358,8 +354,9 @@ enum perf_event_type {
358 * { u32 pid, tid; } && PERF_SAMPLE_TID 354 * { u32 pid, tid; } && PERF_SAMPLE_TID
359 * { u64 time; } && PERF_SAMPLE_TIME 355 * { u64 time; } && PERF_SAMPLE_TIME
360 * { u64 addr; } && PERF_SAMPLE_ADDR 356 * { u64 addr; } && PERF_SAMPLE_ADDR
361 * { u64 config; } && PERF_SAMPLE_CONFIG 357 * { u64 id; } && PERF_SAMPLE_ID
362 * { u32 cpu, res; } && PERF_SAMPLE_CPU 358 * { u32 cpu, res; } && PERF_SAMPLE_CPU
359 * { u64 period; } && PERF_SAMPLE_PERIOD
363 * 360 *
364 * { u64 nr; 361 * { u64 nr;
365 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP 362 * { u64 id, val; } cnt[nr]; } && PERF_SAMPLE_GROUP
@@ -368,6 +365,9 @@ enum perf_event_type {
368 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN 365 * u64 ips[nr]; } && PERF_SAMPLE_CALLCHAIN
369 * }; 366 * };
370 */ 367 */
368 PERF_EVENT_SAMPLE = 9,
369
370 PERF_EVENT_MAX, /* non-ABI */
371}; 371};
372 372
373enum perf_callchain_context { 373enum perf_callchain_context {