aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/builtin-trace.c
diff options
context:
space:
mode:
authorOGAWA Hirofumi <hirofumi@mail.parknet.co.jp>2009-12-06 06:08:24 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-06 12:15:01 -0500
commit180f95e29aa8782c019caa64ede2a28d8ab62564 (patch)
treeeefd1631820b00992f4495cc8cdba66a9155ee6c /tools/perf/builtin-trace.c
parent028c515253761084c6594bf9ac9b194b51d87065 (diff)
perf: Make common SAMPLE_EVENT parser
Currently, sample event data is parsed for each commands, and it is assuming that the data is not including other data. (E.g. timechart, trace, etc. can't parse the event if it has PERF_SAMPLE_CALLCHAIN) So, even if we record the superset data for multiple commands at a time, commands can't parse. etc. To fix it, this makes common sample event parser, and use it to parse sample event correctly. (PERF_SAMPLE_READ is unsupported for now though, it seems to be not using.) Signed-off-by: OGAWA Hirofumi <hirofumi@mail.parknet.co.jp> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <87hbs48imv.fsf@devron.myhome.or.jp> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf/builtin-trace.c')
-rw-r--r--tools/perf/builtin-trace.c48
1 files changed, 15 insertions, 33 deletions
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index abb914aa7be6..c2fcc34486f5 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -66,58 +66,40 @@ static u64 sample_type;
66 66
67static int process_sample_event(event_t *event) 67static int process_sample_event(event_t *event)
68{ 68{
69 u64 ip = event->ip.ip; 69 struct sample_data data;
70 u64 timestamp = -1; 70 struct thread *thread;
71 u32 cpu = -1;
72 u64 period = 1;
73 void *more_data = event->ip.__more_data;
74 struct thread *thread = threads__findnew(event->ip.pid);
75
76 if (sample_type & PERF_SAMPLE_TIME) {
77 timestamp = *(u64 *)more_data;
78 more_data += sizeof(u64);
79 }
80 71
81 if (sample_type & PERF_SAMPLE_CPU) { 72 memset(&data, 0, sizeof(data));
82 cpu = *(u32 *)more_data; 73 data.time = -1;
83 more_data += sizeof(u32); 74 data.cpu = -1;
84 more_data += sizeof(u32); /* reserved */ 75 data.period = 1;
85 }
86 76
87 if (sample_type & PERF_SAMPLE_PERIOD) { 77 event__parse_sample(event, sample_type, &data);
88 period = *(u64 *)more_data;
89 more_data += sizeof(u64);
90 }
91 78
92 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n", 79 dump_printf("(IP, %d): %d/%d: %p period: %Ld\n",
93 event->header.misc, 80 event->header.misc,
94 event->ip.pid, event->ip.tid, 81 data.pid, data.tid,
95 (void *)(long)ip, 82 (void *)(long)data.ip,
96 (long long)period); 83 (long long)data.period);
97 84
85 thread = threads__findnew(event->ip.pid);
98 if (thread == NULL) { 86 if (thread == NULL) {
99 pr_debug("problem processing %d event, skipping it.\n", 87 pr_debug("problem processing %d event, skipping it.\n",
100 event->header.type); 88 event->header.type);
101 return -1; 89 return -1;
102 } 90 }
103 91
104 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
105
106 if (sample_type & PERF_SAMPLE_RAW) { 92 if (sample_type & PERF_SAMPLE_RAW) {
107 struct {
108 u32 size;
109 char data[0];
110 } *raw = more_data;
111
112 /* 93 /*
113 * FIXME: better resolve from pid from the struct trace_entry 94 * FIXME: better resolve from pid from the struct trace_entry
114 * field, although it should be the same than this perf 95 * field, although it should be the same than this perf
115 * event pid 96 * event pid
116 */ 97 */
117 scripting_ops->process_event(cpu, raw->data, raw->size, 98 scripting_ops->process_event(data.cpu, data.raw_data,
118 timestamp, thread->comm); 99 data.raw_size,
100 data.time, thread->comm);
119 } 101 }
120 event__stats.total += period; 102 event__stats.total += data.period;
121 103
122 return 0; 104 return 0;
123} 105}