diff options
author | Arjan van de Ven <arjan@linux.intel.com> | 2009-09-12 01:53:05 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-09-19 05:42:13 -0400 |
commit | 10274989fd595db455874fc2c83272fb33f6b27b (patch) | |
tree | bb2396ee910f480b4859ca5c3554ed2f5a822d17 /tools/perf | |
parent | f48d55ce7871824eae3065f4d81956d7113eff19 (diff) |
perf: Add the timechart tool
timechart is a tool to visualize what is going on in the system.
The user makes a trace of what is going on with
> perf record --timechart /usr/bin/some_command
and then can turn the output of this into an svg file
> perf timechart
which then can be viewed with any SVG view; inkscape works well
enough for me.
The idea behind timechart is to create a "infinitely zoomable"
picture; something that has high level information on a 1:1 zoom
level, but which exposes more details every time you zoom into a
specific area.
Signed-off-by: Arjan van de Ven <arjan@linux.intel.com>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <20090912130713.6a77bbc0@infradead.org>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/Makefile | 7 | ||||
-rw-r--r-- | tools/perf/builtin-timechart.c | 1120 | ||||
-rw-r--r-- | tools/perf/builtin.h | 1 | ||||
-rw-r--r-- | tools/perf/perf.c | 1 |
4 files changed, 1129 insertions, 0 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index 0388e36587a8..0aba8b6e9c54 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -382,6 +382,7 @@ BUILTIN_OBJS += builtin-list.o | |||
382 | BUILTIN_OBJS += builtin-record.o | 382 | BUILTIN_OBJS += builtin-record.o |
383 | BUILTIN_OBJS += builtin-report.o | 383 | BUILTIN_OBJS += builtin-report.o |
384 | BUILTIN_OBJS += builtin-stat.o | 384 | BUILTIN_OBJS += builtin-stat.o |
385 | BUILTIN_OBJS += builtin-timechart.o | ||
385 | BUILTIN_OBJS += builtin-top.o | 386 | BUILTIN_OBJS += builtin-top.o |
386 | BUILTIN_OBJS += builtin-trace.o | 387 | BUILTIN_OBJS += builtin-trace.o |
387 | 388 | ||
@@ -712,6 +713,12 @@ builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS | |||
712 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ | 713 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ |
713 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< | 714 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< |
714 | 715 | ||
716 | builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS | ||
717 | $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \ | ||
718 | '-DPERF_HTML_PATH="$(htmldir_SQ)"' \ | ||
719 | '-DPERF_MAN_PATH="$(mandir_SQ)"' \ | ||
720 | '-DPERF_INFO_PATH="$(infodir_SQ)"' $< | ||
721 | |||
715 | $(BUILT_INS): perf$X | 722 | $(BUILT_INS): perf$X |
716 | $(QUIET_BUILT_IN)$(RM) $@ && \ | 723 | $(QUIET_BUILT_IN)$(RM) $@ && \ |
717 | ln perf$X $@ 2>/dev/null || \ | 724 | ln perf$X $@ 2>/dev/null || \ |
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c new file mode 100644 index 000000000000..00fac1b362fd --- /dev/null +++ b/tools/perf/builtin-timechart.c | |||
@@ -0,0 +1,1120 @@ | |||
1 | /* | ||
2 | * builtin-timechart.c - make an svg timechart of system activity | ||
3 | * | ||
4 | * (C) Copyright 2009 Intel Corporation | ||
5 | * | ||
6 | * Authors: | ||
7 | * Arjan van de Ven <arjan@linux.intel.com> | ||
8 | * | ||
9 | * This program is free software; you can redistribute it and/or | ||
10 | * modify it under the terms of the GNU General Public License | ||
11 | * as published by the Free Software Foundation; version 2 | ||
12 | * of the License. | ||
13 | */ | ||
14 | |||
15 | #include "builtin.h" | ||
16 | |||
17 | #include "util/util.h" | ||
18 | |||
19 | #include "util/color.h" | ||
20 | #include <linux/list.h> | ||
21 | #include "util/cache.h" | ||
22 | #include <linux/rbtree.h> | ||
23 | #include "util/symbol.h" | ||
24 | #include "util/string.h" | ||
25 | #include "util/callchain.h" | ||
26 | #include "util/strlist.h" | ||
27 | |||
28 | #include "perf.h" | ||
29 | #include "util/header.h" | ||
30 | #include "util/parse-options.h" | ||
31 | #include "util/parse-events.h" | ||
32 | #include "util/svghelper.h" | ||
33 | |||
34 | static char const *input_name = "perf.data"; | ||
35 | static char const *output_name = "output.svg"; | ||
36 | |||
37 | |||
38 | static unsigned long page_size; | ||
39 | static unsigned long mmap_window = 32; | ||
40 | static u64 sample_type; | ||
41 | |||
42 | static unsigned int numcpus; | ||
43 | static u64 min_freq; /* Lowest CPU frequency seen */ | ||
44 | static u64 max_freq; /* Highest CPU frequency seen */ | ||
45 | static u64 turbo_frequency; | ||
46 | |||
47 | static u64 first_time, last_time; | ||
48 | |||
49 | |||
50 | static struct perf_header *header; | ||
51 | |||
52 | struct per_pid; | ||
53 | struct per_pidcomm; | ||
54 | |||
55 | struct cpu_sample; | ||
56 | struct power_event; | ||
57 | struct wake_event; | ||
58 | |||
59 | struct sample_wrapper; | ||
60 | |||
61 | /* | ||
62 | * Datastructure layout: | ||
63 | * We keep an list of "pid"s, matching the kernels notion of a task struct. | ||
64 | * Each "pid" entry, has a list of "comm"s. | ||
65 | * this is because we want to track different programs different, while | ||
66 | * exec will reuse the original pid (by design). | ||
67 | * Each comm has a list of samples that will be used to draw | ||
68 | * final graph. | ||
69 | */ | ||
70 | |||
71 | struct per_pid { | ||
72 | struct per_pid *next; | ||
73 | |||
74 | int pid; | ||
75 | int ppid; | ||
76 | |||
77 | u64 start_time; | ||
78 | u64 end_time; | ||
79 | u64 total_time; | ||
80 | int display; | ||
81 | |||
82 | struct per_pidcomm *all; | ||
83 | struct per_pidcomm *current; | ||
84 | |||
85 | int painted; | ||
86 | }; | ||
87 | |||
88 | |||
89 | struct per_pidcomm { | ||
90 | struct per_pidcomm *next; | ||
91 | |||
92 | u64 start_time; | ||
93 | u64 end_time; | ||
94 | u64 total_time; | ||
95 | |||
96 | int Y; | ||
97 | int display; | ||
98 | |||
99 | long state; | ||
100 | u64 state_since; | ||
101 | |||
102 | char *comm; | ||
103 | |||
104 | struct cpu_sample *samples; | ||
105 | }; | ||
106 | |||
107 | struct sample_wrapper { | ||
108 | struct sample_wrapper *next; | ||
109 | |||
110 | u64 timestamp; | ||
111 | unsigned char data[0]; | ||
112 | }; | ||
113 | |||
114 | #define TYPE_NONE 0 | ||
115 | #define TYPE_RUNNING 1 | ||
116 | #define TYPE_WAITING 2 | ||
117 | #define TYPE_BLOCKED 3 | ||
118 | |||
119 | struct cpu_sample { | ||
120 | struct cpu_sample *next; | ||
121 | |||
122 | u64 start_time; | ||
123 | u64 end_time; | ||
124 | int type; | ||
125 | int cpu; | ||
126 | }; | ||
127 | |||
128 | static struct per_pid *all_data; | ||
129 | |||
130 | #define CSTATE 1 | ||
131 | #define PSTATE 2 | ||
132 | |||
133 | struct power_event { | ||
134 | struct power_event *next; | ||
135 | int type; | ||
136 | int state; | ||
137 | u64 start_time; | ||
138 | u64 end_time; | ||
139 | int cpu; | ||
140 | }; | ||
141 | |||
142 | struct wake_event { | ||
143 | struct wake_event *next; | ||
144 | int waker; | ||
145 | int wakee; | ||
146 | u64 time; | ||
147 | }; | ||
148 | |||
149 | static struct power_event *power_events; | ||
150 | static struct wake_event *wake_events; | ||
151 | |||
152 | struct sample_wrapper *all_samples; | ||
153 | |||
154 | static struct per_pid *find_create_pid(int pid) | ||
155 | { | ||
156 | struct per_pid *cursor = all_data; | ||
157 | |||
158 | while (cursor) { | ||
159 | if (cursor->pid == pid) | ||
160 | return cursor; | ||
161 | cursor = cursor->next; | ||
162 | } | ||
163 | cursor = malloc(sizeof(struct per_pid)); | ||
164 | assert(cursor != NULL); | ||
165 | memset(cursor, 0, sizeof(struct per_pid)); | ||
166 | cursor->pid = pid; | ||
167 | cursor->next = all_data; | ||
168 | all_data = cursor; | ||
169 | return cursor; | ||
170 | } | ||
171 | |||
172 | static void pid_set_comm(int pid, char *comm) | ||
173 | { | ||
174 | struct per_pid *p; | ||
175 | struct per_pidcomm *c; | ||
176 | p = find_create_pid(pid); | ||
177 | c = p->all; | ||
178 | while (c) { | ||
179 | if (c->comm && strcmp(c->comm, comm) == 0) { | ||
180 | p->current = c; | ||
181 | return; | ||
182 | } | ||
183 | if (!c->comm) { | ||
184 | c->comm = strdup(comm); | ||
185 | p->current = c; | ||
186 | return; | ||
187 | } | ||
188 | c = c->next; | ||
189 | } | ||
190 | c = malloc(sizeof(struct per_pidcomm)); | ||
191 | assert(c != NULL); | ||
192 | memset(c, 0, sizeof(struct per_pidcomm)); | ||
193 | c->comm = strdup(comm); | ||
194 | p->current = c; | ||
195 | c->next = p->all; | ||
196 | p->all = c; | ||
197 | } | ||
198 | |||
199 | static void pid_fork(int pid, int ppid, u64 timestamp) | ||
200 | { | ||
201 | struct per_pid *p, *pp; | ||
202 | p = find_create_pid(pid); | ||
203 | pp = find_create_pid(ppid); | ||
204 | p->ppid = ppid; | ||
205 | if (pp->current && pp->current->comm && !p->current) | ||
206 | pid_set_comm(pid, pp->current->comm); | ||
207 | |||
208 | p->start_time = timestamp; | ||
209 | if (p->current) { | ||
210 | p->current->start_time = timestamp; | ||
211 | p->current->state_since = timestamp; | ||
212 | } | ||
213 | } | ||
214 | |||
215 | static void pid_exit(int pid, u64 timestamp) | ||
216 | { | ||
217 | struct per_pid *p; | ||
218 | p = find_create_pid(pid); | ||
219 | p->end_time = timestamp; | ||
220 | if (p->current) | ||
221 | p->current->end_time = timestamp; | ||
222 | } | ||
223 | |||
224 | static void | ||
225 | pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end) | ||
226 | { | ||
227 | struct per_pid *p; | ||
228 | struct per_pidcomm *c; | ||
229 | struct cpu_sample *sample; | ||
230 | |||
231 | p = find_create_pid(pid); | ||
232 | c = p->current; | ||
233 | if (!c) { | ||
234 | c = malloc(sizeof(struct per_pidcomm)); | ||
235 | assert(c != NULL); | ||
236 | memset(c, 0, sizeof(struct per_pidcomm)); | ||
237 | p->current = c; | ||
238 | c->next = p->all; | ||
239 | p->all = c; | ||
240 | } | ||
241 | |||
242 | sample = malloc(sizeof(struct cpu_sample)); | ||
243 | assert(sample != NULL); | ||
244 | memset(sample, 0, sizeof(struct cpu_sample)); | ||
245 | sample->start_time = start; | ||
246 | sample->end_time = end; | ||
247 | sample->type = type; | ||
248 | sample->next = c->samples; | ||
249 | sample->cpu = cpu; | ||
250 | c->samples = sample; | ||
251 | |||
252 | if (sample->type == TYPE_RUNNING && end > start && start > 0) { | ||
253 | c->total_time += (end-start); | ||
254 | p->total_time += (end-start); | ||
255 | } | ||
256 | |||
257 | if (c->start_time == 0 || c->start_time > start) | ||
258 | c->start_time = start; | ||
259 | if (p->start_time == 0 || p->start_time > start) | ||
260 | p->start_time = start; | ||
261 | |||
262 | if (cpu > numcpus) | ||
263 | numcpus = cpu; | ||
264 | } | ||
265 | |||
266 | #define MAX_CPUS 4096 | ||
267 | |||
268 | static u64 cpus_cstate_start_times[MAX_CPUS]; | ||
269 | static int cpus_cstate_state[MAX_CPUS]; | ||
270 | static u64 cpus_pstate_start_times[MAX_CPUS]; | ||
271 | static u64 cpus_pstate_state[MAX_CPUS]; | ||
272 | |||
273 | static int | ||
274 | process_comm_event(event_t *event) | ||
275 | { | ||
276 | pid_set_comm(event->comm.pid, event->comm.comm); | ||
277 | return 0; | ||
278 | } | ||
279 | static int | ||
280 | process_fork_event(event_t *event) | ||
281 | { | ||
282 | pid_fork(event->fork.pid, event->fork.ppid, event->fork.time); | ||
283 | return 0; | ||
284 | } | ||
285 | |||
286 | static int | ||
287 | process_exit_event(event_t *event) | ||
288 | { | ||
289 | pid_exit(event->fork.pid, event->fork.time); | ||
290 | return 0; | ||
291 | } | ||
292 | |||
293 | struct trace_entry { | ||
294 | u32 size; | ||
295 | unsigned short type; | ||
296 | unsigned char flags; | ||
297 | unsigned char preempt_count; | ||
298 | int pid; | ||
299 | int tgid; | ||
300 | }; | ||
301 | |||
302 | struct power_entry { | ||
303 | struct trace_entry te; | ||
304 | s64 type; | ||
305 | s64 value; | ||
306 | }; | ||
307 | |||
308 | #define TASK_COMM_LEN 16 | ||
309 | struct wakeup_entry { | ||
310 | struct trace_entry te; | ||
311 | char comm[TASK_COMM_LEN]; | ||
312 | int pid; | ||
313 | int prio; | ||
314 | int success; | ||
315 | }; | ||
316 | |||
317 | /* | ||
318 | * trace_flag_type is an enumeration that holds different | ||
319 | * states when a trace occurs. These are: | ||
320 | * IRQS_OFF - interrupts were disabled | ||
321 | * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags | ||
322 | * NEED_RESCED - reschedule is requested | ||
323 | * HARDIRQ - inside an interrupt handler | ||
324 | * SOFTIRQ - inside a softirq handler | ||
325 | */ | ||
326 | enum trace_flag_type { | ||
327 | TRACE_FLAG_IRQS_OFF = 0x01, | ||
328 | TRACE_FLAG_IRQS_NOSUPPORT = 0x02, | ||
329 | TRACE_FLAG_NEED_RESCHED = 0x04, | ||
330 | TRACE_FLAG_HARDIRQ = 0x08, | ||
331 | TRACE_FLAG_SOFTIRQ = 0x10, | ||
332 | }; | ||
333 | |||
334 | |||
335 | |||
336 | struct sched_switch { | ||
337 | struct trace_entry te; | ||
338 | char prev_comm[TASK_COMM_LEN]; | ||
339 | int prev_pid; | ||
340 | int prev_prio; | ||
341 | long prev_state; /* Arjan weeps. */ | ||
342 | char next_comm[TASK_COMM_LEN]; | ||
343 | int next_pid; | ||
344 | int next_prio; | ||
345 | }; | ||
346 | |||
347 | static void c_state_start(int cpu, u64 timestamp, int state) | ||
348 | { | ||
349 | cpus_cstate_start_times[cpu] = timestamp; | ||
350 | cpus_cstate_state[cpu] = state; | ||
351 | } | ||
352 | |||
353 | static void c_state_end(int cpu, u64 timestamp) | ||
354 | { | ||
355 | struct power_event *pwr; | ||
356 | pwr = malloc(sizeof(struct power_event)); | ||
357 | if (!pwr) | ||
358 | return; | ||
359 | memset(pwr, 0, sizeof(struct power_event)); | ||
360 | |||
361 | pwr->state = cpus_cstate_state[cpu]; | ||
362 | pwr->start_time = cpus_cstate_start_times[cpu]; | ||
363 | pwr->end_time = timestamp; | ||
364 | pwr->cpu = cpu; | ||
365 | pwr->type = CSTATE; | ||
366 | pwr->next = power_events; | ||
367 | |||
368 | power_events = pwr; | ||
369 | } | ||
370 | |||
371 | static void p_state_change(int cpu, u64 timestamp, u64 new_freq) | ||
372 | { | ||
373 | struct power_event *pwr; | ||
374 | pwr = malloc(sizeof(struct power_event)); | ||
375 | |||
376 | if (new_freq > 8000000) /* detect invalid data */ | ||
377 | return; | ||
378 | |||
379 | if (!pwr) | ||
380 | return; | ||
381 | memset(pwr, 0, sizeof(struct power_event)); | ||
382 | |||
383 | pwr->state = cpus_pstate_state[cpu]; | ||
384 | pwr->start_time = cpus_pstate_start_times[cpu]; | ||
385 | pwr->end_time = timestamp; | ||
386 | pwr->cpu = cpu; | ||
387 | pwr->type = PSTATE; | ||
388 | pwr->next = power_events; | ||
389 | |||
390 | if (!pwr->start_time) | ||
391 | pwr->start_time = first_time; | ||
392 | |||
393 | power_events = pwr; | ||
394 | |||
395 | cpus_pstate_state[cpu] = new_freq; | ||
396 | cpus_pstate_start_times[cpu] = timestamp; | ||
397 | |||
398 | if ((u64)new_freq > max_freq) | ||
399 | max_freq = new_freq; | ||
400 | |||
401 | if (new_freq < min_freq || min_freq == 0) | ||
402 | min_freq = new_freq; | ||
403 | |||
404 | if (new_freq == max_freq - 1000) | ||
405 | turbo_frequency = max_freq; | ||
406 | } | ||
407 | |||
408 | static void | ||
409 | sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te) | ||
410 | { | ||
411 | struct wake_event *we; | ||
412 | struct per_pid *p; | ||
413 | struct wakeup_entry *wake = (void *)te; | ||
414 | |||
415 | we = malloc(sizeof(struct wake_event)); | ||
416 | if (!we) | ||
417 | return; | ||
418 | |||
419 | memset(we, 0, sizeof(struct wake_event)); | ||
420 | we->time = timestamp; | ||
421 | we->waker = pid; | ||
422 | |||
423 | if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ)) | ||
424 | we->waker = -1; | ||
425 | |||
426 | we->wakee = wake->pid; | ||
427 | we->next = wake_events; | ||
428 | wake_events = we; | ||
429 | p = find_create_pid(we->wakee); | ||
430 | |||
431 | if (p && p->current && p->current->state == TYPE_NONE) { | ||
432 | p->current->state_since = timestamp; | ||
433 | p->current->state = TYPE_WAITING; | ||
434 | } | ||
435 | if (p && p->current && p->current->state == TYPE_BLOCKED) { | ||
436 | pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp); | ||
437 | p->current->state_since = timestamp; | ||
438 | p->current->state = TYPE_WAITING; | ||
439 | } | ||
440 | } | ||
441 | |||
442 | static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te) | ||
443 | { | ||
444 | struct per_pid *p = NULL, *prev_p; | ||
445 | struct sched_switch *sw = (void *)te; | ||
446 | |||
447 | |||
448 | prev_p = find_create_pid(sw->prev_pid); | ||
449 | |||
450 | p = find_create_pid(sw->next_pid); | ||
451 | |||
452 | if (prev_p->current && prev_p->current->state != TYPE_NONE) | ||
453 | pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp); | ||
454 | if (p && p->current) { | ||
455 | if (p->current->state != TYPE_NONE) | ||
456 | pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp); | ||
457 | |||
458 | p->current->state_since = timestamp; | ||
459 | p->current->state = TYPE_RUNNING; | ||
460 | } | ||
461 | |||
462 | if (prev_p->current) { | ||
463 | prev_p->current->state = TYPE_NONE; | ||
464 | prev_p->current->state_since = timestamp; | ||
465 | if (sw->prev_state & 2) | ||
466 | prev_p->current->state = TYPE_BLOCKED; | ||
467 | if (sw->prev_state == 0) | ||
468 | prev_p->current->state = TYPE_WAITING; | ||
469 | } | ||
470 | } | ||
471 | |||
472 | |||
473 | static int | ||
474 | process_sample_event(event_t *event) | ||
475 | { | ||
476 | int cursor = 0; | ||
477 | u64 addr = 0; | ||
478 | u64 stamp = 0; | ||
479 | u32 cpu = 0; | ||
480 | u32 pid = 0; | ||
481 | struct trace_entry *te; | ||
482 | |||
483 | if (sample_type & PERF_SAMPLE_IP) | ||
484 | cursor++; | ||
485 | |||
486 | if (sample_type & PERF_SAMPLE_TID) { | ||
487 | pid = event->sample.array[cursor]>>32; | ||
488 | cursor++; | ||
489 | } | ||
490 | if (sample_type & PERF_SAMPLE_TIME) { | ||
491 | stamp = event->sample.array[cursor++]; | ||
492 | |||
493 | if (!first_time || first_time > stamp) | ||
494 | first_time = stamp; | ||
495 | if (last_time < stamp) | ||
496 | last_time = stamp; | ||
497 | |||
498 | } | ||
499 | if (sample_type & PERF_SAMPLE_ADDR) | ||
500 | addr = event->sample.array[cursor++]; | ||
501 | if (sample_type & PERF_SAMPLE_ID) | ||
502 | cursor++; | ||
503 | if (sample_type & PERF_SAMPLE_STREAM_ID) | ||
504 | cursor++; | ||
505 | if (sample_type & PERF_SAMPLE_CPU) | ||
506 | cpu = event->sample.array[cursor++] & 0xFFFFFFFF; | ||
507 | if (sample_type & PERF_SAMPLE_PERIOD) | ||
508 | cursor++; | ||
509 | |||
510 | te = (void *)&event->sample.array[cursor]; | ||
511 | |||
512 | if (sample_type & PERF_SAMPLE_RAW && te->size > 0) { | ||
513 | char *event_str; | ||
514 | struct power_entry *pe; | ||
515 | |||
516 | pe = (void *)te; | ||
517 | |||
518 | event_str = perf_header__find_event(te->type); | ||
519 | |||
520 | if (!event_str) | ||
521 | return 0; | ||
522 | |||
523 | if (strcmp(event_str, "power:power_start") == 0) | ||
524 | c_state_start(cpu, stamp, pe->value); | ||
525 | |||
526 | if (strcmp(event_str, "power:power_end") == 0) | ||
527 | c_state_end(cpu, stamp); | ||
528 | |||
529 | if (strcmp(event_str, "power:power_frequency") == 0) | ||
530 | p_state_change(cpu, stamp, pe->value); | ||
531 | |||
532 | if (strcmp(event_str, "sched:sched_wakeup") == 0) | ||
533 | sched_wakeup(cpu, stamp, pid, te); | ||
534 | |||
535 | if (strcmp(event_str, "sched:sched_switch") == 0) | ||
536 | sched_switch(cpu, stamp, te); | ||
537 | } | ||
538 | return 0; | ||
539 | } | ||
540 | |||
541 | /* | ||
542 | * After the last sample we need to wrap up the current C/P state | ||
543 | * and close out each CPU for these. | ||
544 | */ | ||
545 | static void end_sample_processing(void) | ||
546 | { | ||
547 | u64 cpu; | ||
548 | struct power_event *pwr; | ||
549 | |||
550 | for (cpu = 0; cpu < numcpus; cpu++) { | ||
551 | pwr = malloc(sizeof(struct power_event)); | ||
552 | if (!pwr) | ||
553 | return; | ||
554 | memset(pwr, 0, sizeof(struct power_event)); | ||
555 | |||
556 | /* C state */ | ||
557 | #if 0 | ||
558 | pwr->state = cpus_cstate_state[cpu]; | ||
559 | pwr->start_time = cpus_cstate_start_times[cpu]; | ||
560 | pwr->end_time = last_time; | ||
561 | pwr->cpu = cpu; | ||
562 | pwr->type = CSTATE; | ||
563 | pwr->next = power_events; | ||
564 | |||
565 | power_events = pwr; | ||
566 | #endif | ||
567 | /* P state */ | ||
568 | |||
569 | pwr = malloc(sizeof(struct power_event)); | ||
570 | if (!pwr) | ||
571 | return; | ||
572 | memset(pwr, 0, sizeof(struct power_event)); | ||
573 | |||
574 | pwr->state = cpus_pstate_state[cpu]; | ||
575 | pwr->start_time = cpus_pstate_start_times[cpu]; | ||
576 | pwr->end_time = last_time; | ||
577 | pwr->cpu = cpu; | ||
578 | pwr->type = PSTATE; | ||
579 | pwr->next = power_events; | ||
580 | |||
581 | if (!pwr->start_time) | ||
582 | pwr->start_time = first_time; | ||
583 | if (!pwr->state) | ||
584 | pwr->state = min_freq; | ||
585 | power_events = pwr; | ||
586 | } | ||
587 | } | ||
588 | |||
589 | static u64 sample_time(event_t *event) | ||
590 | { | ||
591 | int cursor; | ||
592 | |||
593 | cursor = 0; | ||
594 | if (sample_type & PERF_SAMPLE_IP) | ||
595 | cursor++; | ||
596 | if (sample_type & PERF_SAMPLE_TID) | ||
597 | cursor++; | ||
598 | if (sample_type & PERF_SAMPLE_TIME) | ||
599 | return event->sample.array[cursor]; | ||
600 | return 0; | ||
601 | } | ||
602 | |||
603 | |||
604 | /* | ||
605 | * We first queue all events, sorted backwards by insertion. | ||
606 | * The order will get flipped later. | ||
607 | */ | ||
608 | static int | ||
609 | queue_sample_event(event_t *event) | ||
610 | { | ||
611 | struct sample_wrapper *copy, *prev; | ||
612 | int size; | ||
613 | |||
614 | size = event->sample.header.size + sizeof(struct sample_wrapper) + 8; | ||
615 | |||
616 | copy = malloc(size); | ||
617 | if (!copy) | ||
618 | return 1; | ||
619 | |||
620 | memset(copy, 0, size); | ||
621 | |||
622 | copy->next = NULL; | ||
623 | copy->timestamp = sample_time(event); | ||
624 | |||
625 | memcpy(©->data, event, event->sample.header.size); | ||
626 | |||
627 | /* insert in the right place in the list */ | ||
628 | |||
629 | if (!all_samples) { | ||
630 | /* first sample ever */ | ||
631 | all_samples = copy; | ||
632 | return 0; | ||
633 | } | ||
634 | |||
635 | if (all_samples->timestamp < copy->timestamp) { | ||
636 | /* insert at the head of the list */ | ||
637 | copy->next = all_samples; | ||
638 | all_samples = copy; | ||
639 | return 0; | ||
640 | } | ||
641 | |||
642 | prev = all_samples; | ||
643 | while (prev->next) { | ||
644 | if (prev->next->timestamp < copy->timestamp) { | ||
645 | copy->next = prev->next; | ||
646 | prev->next = copy; | ||
647 | return 0; | ||
648 | } | ||
649 | prev = prev->next; | ||
650 | } | ||
651 | /* insert at the end of the list */ | ||
652 | prev->next = copy; | ||
653 | |||
654 | return 0; | ||
655 | } | ||
656 | |||
657 | static void sort_queued_samples(void) | ||
658 | { | ||
659 | struct sample_wrapper *cursor, *next; | ||
660 | |||
661 | cursor = all_samples; | ||
662 | all_samples = NULL; | ||
663 | |||
664 | while (cursor) { | ||
665 | next = cursor->next; | ||
666 | cursor->next = all_samples; | ||
667 | all_samples = cursor; | ||
668 | cursor = next; | ||
669 | } | ||
670 | } | ||
671 | |||
672 | /* | ||
673 | * Sort the pid datastructure | ||
674 | */ | ||
675 | static void sort_pids(void) | ||
676 | { | ||
677 | struct per_pid *new_list, *p, *cursor, *prev; | ||
678 | /* sort by ppid first, then by pid, lowest to highest */ | ||
679 | |||
680 | new_list = NULL; | ||
681 | |||
682 | while (all_data) { | ||
683 | p = all_data; | ||
684 | all_data = p->next; | ||
685 | p->next = NULL; | ||
686 | |||
687 | if (new_list == NULL) { | ||
688 | new_list = p; | ||
689 | p->next = NULL; | ||
690 | continue; | ||
691 | } | ||
692 | prev = NULL; | ||
693 | cursor = new_list; | ||
694 | while (cursor) { | ||
695 | if (cursor->ppid > p->ppid || | ||
696 | (cursor->ppid == p->ppid && cursor->pid > p->pid)) { | ||
697 | /* must insert before */ | ||
698 | if (prev) { | ||
699 | p->next = prev->next; | ||
700 | prev->next = p; | ||
701 | cursor = NULL; | ||
702 | continue; | ||
703 | } else { | ||
704 | p->next = new_list; | ||
705 | new_list = p; | ||
706 | cursor = NULL; | ||
707 | continue; | ||
708 | } | ||
709 | } | ||
710 | |||
711 | prev = cursor; | ||
712 | cursor = cursor->next; | ||
713 | if (!cursor) | ||
714 | prev->next = p; | ||
715 | } | ||
716 | } | ||
717 | all_data = new_list; | ||
718 | } | ||
719 | |||
720 | |||
721 | static void draw_c_p_states(void) | ||
722 | { | ||
723 | struct power_event *pwr; | ||
724 | pwr = power_events; | ||
725 | |||
726 | /* | ||
727 | * two pass drawing so that the P state bars are on top of the C state blocks | ||
728 | */ | ||
729 | while (pwr) { | ||
730 | if (pwr->type == CSTATE) | ||
731 | svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); | ||
732 | pwr = pwr->next; | ||
733 | } | ||
734 | |||
735 | pwr = power_events; | ||
736 | while (pwr) { | ||
737 | if (pwr->type == PSTATE) { | ||
738 | if (!pwr->state) | ||
739 | pwr->state = min_freq; | ||
740 | svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state); | ||
741 | } | ||
742 | pwr = pwr->next; | ||
743 | } | ||
744 | } | ||
745 | |||
746 | static void draw_wakeups(void) | ||
747 | { | ||
748 | struct wake_event *we; | ||
749 | struct per_pid *p; | ||
750 | struct per_pidcomm *c; | ||
751 | |||
752 | we = wake_events; | ||
753 | while (we) { | ||
754 | int from = 0, to = 0; | ||
755 | |||
756 | /* locate the column of the waker and wakee */ | ||
757 | p = all_data; | ||
758 | while (p) { | ||
759 | if (p->pid == we->waker || p->pid == we->wakee) { | ||
760 | c = p->all; | ||
761 | while (c) { | ||
762 | if (c->Y && c->start_time <= we->time && c->end_time >= we->time) { | ||
763 | if (p->pid == we->waker) | ||
764 | from = c->Y; | ||
765 | if (p->pid == we->wakee) | ||
766 | to = c->Y; | ||
767 | } | ||
768 | c = c->next; | ||
769 | } | ||
770 | } | ||
771 | p = p->next; | ||
772 | } | ||
773 | |||
774 | if (we->waker == -1) | ||
775 | svg_interrupt(we->time, to); | ||
776 | else if (from && to && abs(from - to) == 1) | ||
777 | svg_wakeline(we->time, from, to); | ||
778 | else | ||
779 | svg_partial_wakeline(we->time, from, to); | ||
780 | we = we->next; | ||
781 | } | ||
782 | } | ||
783 | |||
784 | static void draw_cpu_usage(void) | ||
785 | { | ||
786 | struct per_pid *p; | ||
787 | struct per_pidcomm *c; | ||
788 | struct cpu_sample *sample; | ||
789 | p = all_data; | ||
790 | while (p) { | ||
791 | c = p->all; | ||
792 | while (c) { | ||
793 | sample = c->samples; | ||
794 | while (sample) { | ||
795 | if (sample->type == TYPE_RUNNING) | ||
796 | svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm); | ||
797 | |||
798 | sample = sample->next; | ||
799 | } | ||
800 | c = c->next; | ||
801 | } | ||
802 | p = p->next; | ||
803 | } | ||
804 | } | ||
805 | |||
806 | static void draw_process_bars(void) | ||
807 | { | ||
808 | struct per_pid *p; | ||
809 | struct per_pidcomm *c; | ||
810 | struct cpu_sample *sample; | ||
811 | int Y = 0; | ||
812 | |||
813 | Y = 2 * numcpus + 2; | ||
814 | |||
815 | p = all_data; | ||
816 | while (p) { | ||
817 | c = p->all; | ||
818 | while (c) { | ||
819 | if (!c->display) { | ||
820 | c->Y = 0; | ||
821 | c = c->next; | ||
822 | continue; | ||
823 | } | ||
824 | |||
825 | svg_box(Y, p->start_time, p->end_time, "process"); | ||
826 | sample = c->samples; | ||
827 | while (sample) { | ||
828 | if (sample->type == TYPE_RUNNING) | ||
829 | svg_sample(Y, sample->cpu, sample->start_time, sample->end_time, "sample"); | ||
830 | if (sample->type == TYPE_BLOCKED) | ||
831 | svg_box(Y, sample->start_time, sample->end_time, "blocked"); | ||
832 | if (sample->type == TYPE_WAITING) | ||
833 | svg_box(Y, sample->start_time, sample->end_time, "waiting"); | ||
834 | sample = sample->next; | ||
835 | } | ||
836 | |||
837 | if (c->comm) { | ||
838 | char comm[256]; | ||
839 | if (c->total_time > 5000000000) /* 5 seconds */ | ||
840 | sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0); | ||
841 | else | ||
842 | sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0); | ||
843 | |||
844 | svg_text(Y, c->start_time, comm); | ||
845 | } | ||
846 | c->Y = Y; | ||
847 | Y++; | ||
848 | c = c->next; | ||
849 | } | ||
850 | p = p->next; | ||
851 | } | ||
852 | } | ||
853 | |||
854 | static int determine_display_tasks(u64 threshold) | ||
855 | { | ||
856 | struct per_pid *p; | ||
857 | struct per_pidcomm *c; | ||
858 | int count = 0; | ||
859 | |||
860 | p = all_data; | ||
861 | while (p) { | ||
862 | p->display = 0; | ||
863 | if (p->start_time == 1) | ||
864 | p->start_time = first_time; | ||
865 | |||
866 | /* no exit marker, task kept running to the end */ | ||
867 | if (p->end_time == 0) | ||
868 | p->end_time = last_time; | ||
869 | if (p->total_time >= threshold) | ||
870 | p->display = 1; | ||
871 | |||
872 | c = p->all; | ||
873 | |||
874 | while (c) { | ||
875 | c->display = 0; | ||
876 | |||
877 | if (c->start_time == 1) | ||
878 | c->start_time = first_time; | ||
879 | |||
880 | if (c->total_time >= threshold) { | ||
881 | c->display = 1; | ||
882 | count++; | ||
883 | } | ||
884 | |||
885 | if (c->end_time == 0) | ||
886 | c->end_time = last_time; | ||
887 | |||
888 | c = c->next; | ||
889 | } | ||
890 | p = p->next; | ||
891 | } | ||
892 | return count; | ||
893 | } | ||
894 | |||
895 | |||
896 | |||
897 | #define TIME_THRESH 10000000 | ||
898 | |||
899 | static void write_svg_file(const char *filename) | ||
900 | { | ||
901 | u64 i; | ||
902 | int count; | ||
903 | |||
904 | numcpus++; | ||
905 | |||
906 | |||
907 | count = determine_display_tasks(TIME_THRESH); | ||
908 | |||
909 | /* We'd like to show at least 15 tasks; be less picky if we have fewer */ | ||
910 | if (count < 15) | ||
911 | count = determine_display_tasks(TIME_THRESH / 10); | ||
912 | |||
913 | open_svg(filename, numcpus, count); | ||
914 | |||
915 | svg_time_grid(first_time, last_time); | ||
916 | svg_legenda(); | ||
917 | |||
918 | for (i = 0; i < numcpus; i++) | ||
919 | svg_cpu_box(i, max_freq, turbo_frequency); | ||
920 | |||
921 | draw_cpu_usage(); | ||
922 | draw_process_bars(); | ||
923 | draw_c_p_states(); | ||
924 | draw_wakeups(); | ||
925 | |||
926 | svg_close(); | ||
927 | } | ||
928 | |||
929 | static int | ||
930 | process_event(event_t *event) | ||
931 | { | ||
932 | |||
933 | switch (event->header.type) { | ||
934 | |||
935 | case PERF_EVENT_COMM: | ||
936 | return process_comm_event(event); | ||
937 | case PERF_EVENT_FORK: | ||
938 | return process_fork_event(event); | ||
939 | case PERF_EVENT_EXIT: | ||
940 | return process_exit_event(event); | ||
941 | case PERF_EVENT_SAMPLE: | ||
942 | return queue_sample_event(event); | ||
943 | |||
944 | /* | ||
945 | * We dont process them right now but they are fine: | ||
946 | */ | ||
947 | case PERF_EVENT_MMAP: | ||
948 | case PERF_EVENT_THROTTLE: | ||
949 | case PERF_EVENT_UNTHROTTLE: | ||
950 | return 0; | ||
951 | |||
952 | default: | ||
953 | return -1; | ||
954 | } | ||
955 | |||
956 | return 0; | ||
957 | } | ||
958 | |||
959 | static void process_samples(void) | ||
960 | { | ||
961 | struct sample_wrapper *cursor; | ||
962 | event_t *event; | ||
963 | |||
964 | sort_queued_samples(); | ||
965 | |||
966 | cursor = all_samples; | ||
967 | while (cursor) { | ||
968 | event = (void *)&cursor->data; | ||
969 | cursor = cursor->next; | ||
970 | process_sample_event(event); | ||
971 | } | ||
972 | } | ||
973 | |||
974 | |||
975 | static int __cmd_timechart(void) | ||
976 | { | ||
977 | int ret, rc = EXIT_FAILURE; | ||
978 | unsigned long offset = 0; | ||
979 | unsigned long head, shift; | ||
980 | struct stat statbuf; | ||
981 | event_t *event; | ||
982 | uint32_t size; | ||
983 | char *buf; | ||
984 | int input; | ||
985 | |||
986 | input = open(input_name, O_RDONLY); | ||
987 | if (input < 0) { | ||
988 | fprintf(stderr, " failed to open file: %s", input_name); | ||
989 | if (!strcmp(input_name, "perf.data")) | ||
990 | fprintf(stderr, " (try 'perf record' first)"); | ||
991 | fprintf(stderr, "\n"); | ||
992 | exit(-1); | ||
993 | } | ||
994 | |||
995 | ret = fstat(input, &statbuf); | ||
996 | if (ret < 0) { | ||
997 | perror("failed to stat file"); | ||
998 | exit(-1); | ||
999 | } | ||
1000 | |||
1001 | if (!statbuf.st_size) { | ||
1002 | fprintf(stderr, "zero-sized file, nothing to do!\n"); | ||
1003 | exit(0); | ||
1004 | } | ||
1005 | |||
1006 | header = perf_header__read(input); | ||
1007 | head = header->data_offset; | ||
1008 | |||
1009 | sample_type = perf_header__sample_type(header); | ||
1010 | |||
1011 | shift = page_size * (head / page_size); | ||
1012 | offset += shift; | ||
1013 | head -= shift; | ||
1014 | |||
1015 | remap: | ||
1016 | buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ, | ||
1017 | MAP_SHARED, input, offset); | ||
1018 | if (buf == MAP_FAILED) { | ||
1019 | perror("failed to mmap file"); | ||
1020 | exit(-1); | ||
1021 | } | ||
1022 | |||
1023 | more: | ||
1024 | event = (event_t *)(buf + head); | ||
1025 | |||
1026 | size = event->header.size; | ||
1027 | if (!size) | ||
1028 | size = 8; | ||
1029 | |||
1030 | if (head + event->header.size >= page_size * mmap_window) { | ||
1031 | int ret2; | ||
1032 | |||
1033 | shift = page_size * (head / page_size); | ||
1034 | |||
1035 | ret2 = munmap(buf, page_size * mmap_window); | ||
1036 | assert(ret2 == 0); | ||
1037 | |||
1038 | offset += shift; | ||
1039 | head -= shift; | ||
1040 | goto remap; | ||
1041 | } | ||
1042 | |||
1043 | size = event->header.size; | ||
1044 | |||
1045 | if (!size || process_event(event) < 0) { | ||
1046 | |||
1047 | printf("%p [%p]: skipping unknown header type: %d\n", | ||
1048 | (void *)(offset + head), | ||
1049 | (void *)(long)(event->header.size), | ||
1050 | event->header.type); | ||
1051 | |||
1052 | /* | ||
1053 | * assume we lost track of the stream, check alignment, and | ||
1054 | * increment a single u64 in the hope to catch on again 'soon'. | ||
1055 | */ | ||
1056 | |||
1057 | if (unlikely(head & 7)) | ||
1058 | head &= ~7ULL; | ||
1059 | |||
1060 | size = 8; | ||
1061 | } | ||
1062 | |||
1063 | head += size; | ||
1064 | |||
1065 | if (offset + head >= header->data_offset + header->data_size) | ||
1066 | goto done; | ||
1067 | |||
1068 | if (offset + head < (unsigned long)statbuf.st_size) | ||
1069 | goto more; | ||
1070 | |||
1071 | done: | ||
1072 | rc = EXIT_SUCCESS; | ||
1073 | close(input); | ||
1074 | |||
1075 | |||
1076 | process_samples(); | ||
1077 | |||
1078 | end_sample_processing(); | ||
1079 | |||
1080 | sort_pids(); | ||
1081 | |||
1082 | write_svg_file(output_name); | ||
1083 | |||
1084 | printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name); | ||
1085 | |||
1086 | return rc; | ||
1087 | } | ||
1088 | |||
1089 | static const char * const report_usage[] = { | ||
1090 | "perf report [<options>] <command>", | ||
1091 | NULL | ||
1092 | }; | ||
1093 | |||
1094 | static const struct option options[] = { | ||
1095 | OPT_STRING('i', "input", &input_name, "file", | ||
1096 | "input file name"), | ||
1097 | OPT_STRING('o', "output", &output_name, "file", | ||
1098 | "output file name"), | ||
1099 | OPT_END() | ||
1100 | }; | ||
1101 | |||
1102 | |||
1103 | int cmd_timechart(int argc, const char **argv, const char *prefix __used) | ||
1104 | { | ||
1105 | symbol__init(); | ||
1106 | |||
1107 | page_size = getpagesize(); | ||
1108 | |||
1109 | argc = parse_options(argc, argv, options, report_usage, 0); | ||
1110 | |||
1111 | /* | ||
1112 | * Any (unrecognized) arguments left? | ||
1113 | */ | ||
1114 | if (argc) | ||
1115 | usage_with_options(report_usage, options); | ||
1116 | |||
1117 | setup_pager(); | ||
1118 | |||
1119 | return __cmd_timechart(); | ||
1120 | } | ||
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h index b09cadbd76b1..e11d8d231c3b 100644 --- a/tools/perf/builtin.h +++ b/tools/perf/builtin.h | |||
@@ -21,6 +21,7 @@ extern int cmd_list(int argc, const char **argv, const char *prefix); | |||
21 | extern int cmd_record(int argc, const char **argv, const char *prefix); | 21 | extern int cmd_record(int argc, const char **argv, const char *prefix); |
22 | extern int cmd_report(int argc, const char **argv, const char *prefix); | 22 | extern int cmd_report(int argc, const char **argv, const char *prefix); |
23 | extern int cmd_stat(int argc, const char **argv, const char *prefix); | 23 | extern int cmd_stat(int argc, const char **argv, const char *prefix); |
24 | extern int cmd_timechart(int argc, const char **argv, const char *prefix); | ||
24 | extern int cmd_top(int argc, const char **argv, const char *prefix); | 25 | extern int cmd_top(int argc, const char **argv, const char *prefix); |
25 | extern int cmd_trace(int argc, const char **argv, const char *prefix); | 26 | extern int cmd_trace(int argc, const char **argv, const char *prefix); |
26 | extern int cmd_version(int argc, const char **argv, const char *prefix); | 27 | extern int cmd_version(int argc, const char **argv, const char *prefix); |
diff --git a/tools/perf/perf.c b/tools/perf/perf.c index c972d1c35489..19fc7feb9d59 100644 --- a/tools/perf/perf.c +++ b/tools/perf/perf.c | |||
@@ -289,6 +289,7 @@ static void handle_internal_command(int argc, const char **argv) | |||
289 | { "record", cmd_record, 0 }, | 289 | { "record", cmd_record, 0 }, |
290 | { "report", cmd_report, 0 }, | 290 | { "report", cmd_report, 0 }, |
291 | { "stat", cmd_stat, 0 }, | 291 | { "stat", cmd_stat, 0 }, |
292 | { "timechart", cmd_timechart, 0 }, | ||
292 | { "top", cmd_top, 0 }, | 293 | { "top", cmd_top, 0 }, |
293 | { "annotate", cmd_annotate, 0 }, | 294 | { "annotate", cmd_annotate, 0 }, |
294 | { "version", cmd_version, 0 }, | 295 | { "version", cmd_version, 0 }, |