aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
committerRussell King <rmk+kernel@arm.linux.org.uk>2009-09-24 16:22:33 -0400
commitbaea7b946f00a291b166ccae7fcfed6c01530cc6 (patch)
tree4aa275fbdbec9c7b9b4629e8bee2bbecd3c6a6af /tools
parentae19ffbadc1b2100285a5b5b3d0a4e0a11390904 (diff)
parent94e0fb086fc5663c38bbc0fe86d698be8314f82f (diff)
Merge branch 'origin' into for-linus
Conflicts: MAINTAINERS
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/Documentation/perf-sched.txt41
-rw-r--r--tools/perf/Documentation/perf-timechart.txt38
-rw-r--r--tools/perf/Documentation/perf-trace.txt25
-rw-r--r--tools/perf/Makefile11
-rw-r--r--tools/perf/builtin-annotate.c28
-rw-r--r--tools/perf/builtin-record.c74
-rw-r--r--tools/perf/builtin-report.c48
-rw-r--r--tools/perf/builtin-sched.c2004
-rw-r--r--tools/perf/builtin-stat.c10
-rw-r--r--tools/perf/builtin-timechart.c1158
-rw-r--r--tools/perf/builtin-top.c12
-rw-r--r--tools/perf/builtin-trace.c22
-rw-r--r--tools/perf/builtin.h6
-rw-r--r--tools/perf/command-list.txt3
-rw-r--r--tools/perf/design.txt58
-rw-r--r--tools/perf/perf.c2
-rw-r--r--tools/perf/perf.h12
-rw-r--r--tools/perf/util/event.h14
-rw-r--r--tools/perf/util/header.c73
-rw-r--r--tools/perf/util/header.h14
-rw-r--r--tools/perf/util/parse-events.c263
-rw-r--r--tools/perf/util/parse-events.h2
-rw-r--r--tools/perf/util/parse-options.h2
-rw-r--r--tools/perf/util/svghelper.c488
-rw-r--r--tools/perf/util/svghelper.h28
-rw-r--r--tools/perf/util/thread.c4
-rw-r--r--tools/perf/util/thread.h9
-rw-r--r--tools/perf/util/trace-event-info.c15
-rw-r--r--tools/perf/util/trace-event-parse.c45
-rw-r--r--tools/perf/util/trace-event-read.c6
-rw-r--r--tools/perf/util/trace-event.h7
31 files changed, 4294 insertions, 228 deletions
diff --git a/tools/perf/Documentation/perf-sched.txt b/tools/perf/Documentation/perf-sched.txt
new file mode 100644
index 000000000000..1ce79198997b
--- /dev/null
+++ b/tools/perf/Documentation/perf-sched.txt
@@ -0,0 +1,41 @@
1perf-sched(1)
2==============
3
4NAME
5----
6perf-sched - Tool to trace/measure scheduler properties (latencies)
7
8SYNOPSIS
9--------
10[verse]
11'perf sched' {record|latency|replay|trace}
12
13DESCRIPTION
14-----------
15There's four variants of perf sched:
16
17 'perf sched record <command>' to record the scheduling events
18 of an arbitrary workload.
19
20 'perf sched latency' to report the per task scheduling latencies
21 and other scheduling properties of the workload.
22
23 'perf sched trace' to see a detailed trace of the workload that
24 was recorded.
25
26 'perf sched replay' to simulate the workload that was recorded
27 via perf sched record. (this is done by starting up mockup threads
28 that mimic the workload based on the events in the trace. These
29 threads can then replay the timings (CPU runtime and sleep patterns)
30 of the workload as it occured when it was recorded - and can repeat
31 it a number of times, measuring its performance.)
32
33OPTIONS
34-------
35-D::
36--dump-raw-trace=::
37 Display verbose dump of the sched data.
38
39SEE ALSO
40--------
41linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-timechart.txt b/tools/perf/Documentation/perf-timechart.txt
new file mode 100644
index 000000000000..1c2ed3090cce
--- /dev/null
+++ b/tools/perf/Documentation/perf-timechart.txt
@@ -0,0 +1,38 @@
1perf-timechart(1)
2=================
3
4NAME
5----
6perf-timechart - Tool to visualize total system behavior during a workload
7
8SYNOPSIS
9--------
10[verse]
11'perf timechart' {record}
12
13DESCRIPTION
14-----------
15There are two variants of perf timechart:
16
17 'perf timechart record <command>' to record the system level events
18 of an arbitrary workload.
19
20 'perf timechart' to turn a trace into a Scalable Vector Graphics file,
21 that can be viewed with popular SVG viewers such as 'Inkscape'.
22
23OPTIONS
24-------
25-o::
26--output=::
27 Select the output file (default: output.svg)
28-i::
29--input=::
30 Select the input file (default: perf.data)
31-w::
32--width=::
33 Select the width of the SVG file (default: 1000)
34
35
36SEE ALSO
37--------
38linkperf:perf-record[1]
diff --git a/tools/perf/Documentation/perf-trace.txt b/tools/perf/Documentation/perf-trace.txt
new file mode 100644
index 000000000000..41ed75398ca9
--- /dev/null
+++ b/tools/perf/Documentation/perf-trace.txt
@@ -0,0 +1,25 @@
1perf-trace(1)
2==============
3
4NAME
5----
6perf-trace - Read perf.data (created by perf record) and display trace output
7
8SYNOPSIS
9--------
10[verse]
11'perf trace' [-i <file> | --input=file] symbol_name
12
13DESCRIPTION
14-----------
15This command reads the input file and displays the trace recorded.
16
17OPTIONS
18-------
19-D::
20--dump-raw-trace=::
21 Display verbose dump of the trace data.
22
23SEE ALSO
24--------
25linkperf:perf-record[1]
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index 9f8d207a91bf..b5f1953b6144 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -318,7 +318,7 @@ export PERL_PATH
318 318
319LIB_FILE=libperf.a 319LIB_FILE=libperf.a
320 320
321LIB_H += ../../include/linux/perf_counter.h 321LIB_H += ../../include/linux/perf_event.h
322LIB_H += ../../include/linux/rbtree.h 322LIB_H += ../../include/linux/rbtree.h
323LIB_H += ../../include/linux/list.h 323LIB_H += ../../include/linux/list.h
324LIB_H += util/include/linux/list.h 324LIB_H += util/include/linux/list.h
@@ -373,13 +373,16 @@ LIB_OBJS += util/thread.o
373LIB_OBJS += util/trace-event-parse.o 373LIB_OBJS += util/trace-event-parse.o
374LIB_OBJS += util/trace-event-read.o 374LIB_OBJS += util/trace-event-read.o
375LIB_OBJS += util/trace-event-info.o 375LIB_OBJS += util/trace-event-info.o
376LIB_OBJS += util/svghelper.o
376 377
377BUILTIN_OBJS += builtin-annotate.o 378BUILTIN_OBJS += builtin-annotate.o
378BUILTIN_OBJS += builtin-help.o 379BUILTIN_OBJS += builtin-help.o
380BUILTIN_OBJS += builtin-sched.o
379BUILTIN_OBJS += builtin-list.o 381BUILTIN_OBJS += builtin-list.o
380BUILTIN_OBJS += builtin-record.o 382BUILTIN_OBJS += builtin-record.o
381BUILTIN_OBJS += builtin-report.o 383BUILTIN_OBJS += builtin-report.o
382BUILTIN_OBJS += builtin-stat.o 384BUILTIN_OBJS += builtin-stat.o
385BUILTIN_OBJS += builtin-timechart.o
383BUILTIN_OBJS += builtin-top.o 386BUILTIN_OBJS += builtin-top.o
384BUILTIN_OBJS += builtin-trace.o 387BUILTIN_OBJS += builtin-trace.o
385 388
@@ -710,6 +713,12 @@ builtin-help.o: builtin-help.c common-cmds.h PERF-CFLAGS
710 '-DPERF_MAN_PATH="$(mandir_SQ)"' \ 713 '-DPERF_MAN_PATH="$(mandir_SQ)"' \
711 '-DPERF_INFO_PATH="$(infodir_SQ)"' $< 714 '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
712 715
716builtin-timechart.o: builtin-timechart.c common-cmds.h PERF-CFLAGS
717 $(QUIET_CC)$(CC) -o $*.o -c $(ALL_CFLAGS) \
718 '-DPERF_HTML_PATH="$(htmldir_SQ)"' \
719 '-DPERF_MAN_PATH="$(mandir_SQ)"' \
720 '-DPERF_INFO_PATH="$(infodir_SQ)"' $<
721
713$(BUILT_INS): perf$X 722$(BUILT_INS): perf$X
714 $(QUIET_BUILT_IN)$(RM) $@ && \ 723 $(QUIET_BUILT_IN)$(RM) $@ && \
715 ln perf$X $@ 2>/dev/null || \ 724 ln perf$X $@ 2>/dev/null || \
diff --git a/tools/perf/builtin-annotate.c b/tools/perf/builtin-annotate.c
index 043d85b7e254..1ec741615814 100644
--- a/tools/perf/builtin-annotate.c
+++ b/tools/perf/builtin-annotate.c
@@ -505,7 +505,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
505 return -1; 505 return -1;
506 } 506 }
507 507
508 if (event->header.misc & PERF_EVENT_MISC_KERNEL) { 508 if (event->header.misc & PERF_RECORD_MISC_KERNEL) {
509 show = SHOW_KERNEL; 509 show = SHOW_KERNEL;
510 level = 'k'; 510 level = 'k';
511 511
@@ -513,7 +513,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
513 513
514 dump_printf(" ...... dso: %s\n", dso->name); 514 dump_printf(" ...... dso: %s\n", dso->name);
515 515
516 } else if (event->header.misc & PERF_EVENT_MISC_USER) { 516 } else if (event->header.misc & PERF_RECORD_MISC_USER) {
517 517
518 show = SHOW_USER; 518 show = SHOW_USER;
519 level = '.'; 519 level = '.';
@@ -565,7 +565,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
565 565
566 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 566 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
567 567
568 dump_printf("%p [%p]: PERF_EVENT_MMAP %d: [%p(%p) @ %p]: %s\n", 568 dump_printf("%p [%p]: PERF_RECORD_MMAP %d: [%p(%p) @ %p]: %s\n",
569 (void *)(offset + head), 569 (void *)(offset + head),
570 (void *)(long)(event->header.size), 570 (void *)(long)(event->header.size),
571 event->mmap.pid, 571 event->mmap.pid,
@@ -575,7 +575,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
575 event->mmap.filename); 575 event->mmap.filename);
576 576
577 if (thread == NULL || map == NULL) { 577 if (thread == NULL || map == NULL) {
578 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 578 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
579 return 0; 579 return 0;
580 } 580 }
581 581
@@ -591,14 +591,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
591 struct thread *thread; 591 struct thread *thread;
592 592
593 thread = threads__findnew(event->comm.pid, &threads, &last_match); 593 thread = threads__findnew(event->comm.pid, &threads, &last_match);
594 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 594 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
595 (void *)(offset + head), 595 (void *)(offset + head),
596 (void *)(long)(event->header.size), 596 (void *)(long)(event->header.size),
597 event->comm.comm, event->comm.pid); 597 event->comm.comm, event->comm.pid);
598 598
599 if (thread == NULL || 599 if (thread == NULL ||
600 thread__set_comm(thread, event->comm.comm)) { 600 thread__set_comm(thread, event->comm.comm)) {
601 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 601 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
602 return -1; 602 return -1;
603 } 603 }
604 total_comm++; 604 total_comm++;
@@ -614,7 +614,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
614 614
615 thread = threads__findnew(event->fork.pid, &threads, &last_match); 615 thread = threads__findnew(event->fork.pid, &threads, &last_match);
616 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 616 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
617 dump_printf("%p [%p]: PERF_EVENT_FORK: %d:%d\n", 617 dump_printf("%p [%p]: PERF_RECORD_FORK: %d:%d\n",
618 (void *)(offset + head), 618 (void *)(offset + head),
619 (void *)(long)(event->header.size), 619 (void *)(long)(event->header.size),
620 event->fork.pid, event->fork.ppid); 620 event->fork.pid, event->fork.ppid);
@@ -627,7 +627,7 @@ process_fork_event(event_t *event, unsigned long offset, unsigned long head)
627 return 0; 627 return 0;
628 628
629 if (!thread || !parent || thread__fork(thread, parent)) { 629 if (!thread || !parent || thread__fork(thread, parent)) {
630 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 630 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
631 return -1; 631 return -1;
632 } 632 }
633 total_fork++; 633 total_fork++;
@@ -639,23 +639,23 @@ static int
639process_event(event_t *event, unsigned long offset, unsigned long head) 639process_event(event_t *event, unsigned long offset, unsigned long head)
640{ 640{
641 switch (event->header.type) { 641 switch (event->header.type) {
642 case PERF_EVENT_SAMPLE: 642 case PERF_RECORD_SAMPLE:
643 return process_sample_event(event, offset, head); 643 return process_sample_event(event, offset, head);
644 644
645 case PERF_EVENT_MMAP: 645 case PERF_RECORD_MMAP:
646 return process_mmap_event(event, offset, head); 646 return process_mmap_event(event, offset, head);
647 647
648 case PERF_EVENT_COMM: 648 case PERF_RECORD_COMM:
649 return process_comm_event(event, offset, head); 649 return process_comm_event(event, offset, head);
650 650
651 case PERF_EVENT_FORK: 651 case PERF_RECORD_FORK:
652 return process_fork_event(event, offset, head); 652 return process_fork_event(event, offset, head);
653 /* 653 /*
654 * We dont process them right now but they are fine: 654 * We dont process them right now but they are fine:
655 */ 655 */
656 656
657 case PERF_EVENT_THROTTLE: 657 case PERF_RECORD_THROTTLE:
658 case PERF_EVENT_UNTHROTTLE: 658 case PERF_RECORD_UNTHROTTLE:
659 return 0; 659 return 0;
660 660
661 default: 661 default:
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 99a12fe86e9f..a5a050af8e7d 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -48,6 +48,8 @@ static int call_graph = 0;
48static int inherit_stat = 0; 48static int inherit_stat = 0;
49static int no_samples = 0; 49static int no_samples = 0;
50static int sample_address = 0; 50static int sample_address = 0;
51static int multiplex = 0;
52static int multiplex_fd = -1;
51 53
52static long samples; 54static long samples;
53static struct timeval last_read; 55static struct timeval last_read;
@@ -75,7 +77,7 @@ static struct mmap_data mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
75 77
76static unsigned long mmap_read_head(struct mmap_data *md) 78static unsigned long mmap_read_head(struct mmap_data *md)
77{ 79{
78 struct perf_counter_mmap_page *pc = md->base; 80 struct perf_event_mmap_page *pc = md->base;
79 long head; 81 long head;
80 82
81 head = pc->data_head; 83 head = pc->data_head;
@@ -86,7 +88,7 @@ static unsigned long mmap_read_head(struct mmap_data *md)
86 88
87static void mmap_write_tail(struct mmap_data *md, unsigned long tail) 89static void mmap_write_tail(struct mmap_data *md, unsigned long tail)
88{ 90{
89 struct perf_counter_mmap_page *pc = md->base; 91 struct perf_event_mmap_page *pc = md->base;
90 92
91 /* 93 /*
92 * ensure all reads are done before we write the tail out. 94 * ensure all reads are done before we write the tail out.
@@ -231,7 +233,7 @@ static pid_t pid_synthesize_comm_event(pid_t pid, int full)
231 } 233 }
232 } 234 }
233 235
234 comm_ev.header.type = PERF_EVENT_COMM; 236 comm_ev.header.type = PERF_RECORD_COMM;
235 size = ALIGN(size, sizeof(u64)); 237 size = ALIGN(size, sizeof(u64));
236 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size); 238 comm_ev.header.size = sizeof(comm_ev) - (sizeof(comm_ev.comm) - size);
237 239
@@ -286,7 +288,7 @@ static void pid_synthesize_mmap_samples(pid_t pid, pid_t tgid)
286 while (1) { 288 while (1) {
287 char bf[BUFSIZ], *pbf = bf; 289 char bf[BUFSIZ], *pbf = bf;
288 struct mmap_event mmap_ev = { 290 struct mmap_event mmap_ev = {
289 .header = { .type = PERF_EVENT_MMAP }, 291 .header = { .type = PERF_RECORD_MMAP },
290 }; 292 };
291 int n; 293 int n;
292 size_t size; 294 size_t size;
@@ -353,7 +355,7 @@ static void synthesize_all(void)
353 355
354static int group_fd; 356static int group_fd;
355 357
356static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int nr) 358static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int nr)
357{ 359{
358 struct perf_header_attr *h_attr; 360 struct perf_header_attr *h_attr;
359 361
@@ -369,7 +371,7 @@ static struct perf_header_attr *get_header_attr(struct perf_counter_attr *a, int
369 371
370static void create_counter(int counter, int cpu, pid_t pid) 372static void create_counter(int counter, int cpu, pid_t pid)
371{ 373{
372 struct perf_counter_attr *attr = attrs + counter; 374 struct perf_event_attr *attr = attrs + counter;
373 struct perf_header_attr *h_attr; 375 struct perf_header_attr *h_attr;
374 int track = !counter; /* only the first counter needs these */ 376 int track = !counter; /* only the first counter needs these */
375 struct { 377 struct {
@@ -415,7 +417,7 @@ static void create_counter(int counter, int cpu, pid_t pid)
415 attr->disabled = 1; 417 attr->disabled = 1;
416 418
417try_again: 419try_again:
418 fd[nr_cpu][counter] = sys_perf_counter_open(attr, pid, cpu, group_fd, 0); 420 fd[nr_cpu][counter] = sys_perf_event_open(attr, pid, cpu, group_fd, 0);
419 421
420 if (fd[nr_cpu][counter] < 0) { 422 if (fd[nr_cpu][counter] < 0) {
421 int err = errno; 423 int err = errno;
@@ -442,7 +444,7 @@ try_again:
442 printf("\n"); 444 printf("\n");
443 error("perfcounter syscall returned with %d (%s)\n", 445 error("perfcounter syscall returned with %d (%s)\n",
444 fd[nr_cpu][counter], strerror(err)); 446 fd[nr_cpu][counter], strerror(err));
445 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 447 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
446 exit(-1); 448 exit(-1);
447 } 449 }
448 450
@@ -470,22 +472,31 @@ try_again:
470 */ 472 */
471 if (group && group_fd == -1) 473 if (group && group_fd == -1)
472 group_fd = fd[nr_cpu][counter]; 474 group_fd = fd[nr_cpu][counter];
475 if (multiplex && multiplex_fd == -1)
476 multiplex_fd = fd[nr_cpu][counter];
473 477
474 event_array[nr_poll].fd = fd[nr_cpu][counter]; 478 if (multiplex && fd[nr_cpu][counter] != multiplex_fd) {
475 event_array[nr_poll].events = POLLIN; 479 int ret;
476 nr_poll++; 480
477 481 ret = ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_SET_OUTPUT, multiplex_fd);
478 mmap_array[nr_cpu][counter].counter = counter; 482 assert(ret != -1);
479 mmap_array[nr_cpu][counter].prev = 0; 483 } else {
480 mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1; 484 event_array[nr_poll].fd = fd[nr_cpu][counter];
481 mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size, 485 event_array[nr_poll].events = POLLIN;
482 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0); 486 nr_poll++;
483 if (mmap_array[nr_cpu][counter].base == MAP_FAILED) { 487
484 error("failed to mmap with %d (%s)\n", errno, strerror(errno)); 488 mmap_array[nr_cpu][counter].counter = counter;
485 exit(-1); 489 mmap_array[nr_cpu][counter].prev = 0;
490 mmap_array[nr_cpu][counter].mask = mmap_pages*page_size - 1;
491 mmap_array[nr_cpu][counter].base = mmap(NULL, (mmap_pages+1)*page_size,
492 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter], 0);
493 if (mmap_array[nr_cpu][counter].base == MAP_FAILED) {
494 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
495 exit(-1);
496 }
486 } 497 }
487 498
488 ioctl(fd[nr_cpu][counter], PERF_COUNTER_IOC_ENABLE); 499 ioctl(fd[nr_cpu][counter], PERF_EVENT_IOC_ENABLE);
489} 500}
490 501
491static void open_counters(int cpu, pid_t pid) 502static void open_counters(int cpu, pid_t pid)
@@ -513,6 +524,7 @@ static int __cmd_record(int argc, const char **argv)
513 pid_t pid = 0; 524 pid_t pid = 0;
514 int flags; 525 int flags;
515 int ret; 526 int ret;
527 unsigned long waking = 0;
516 528
517 page_size = sysconf(_SC_PAGE_SIZE); 529 page_size = sysconf(_SC_PAGE_SIZE);
518 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 530 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
@@ -614,17 +626,29 @@ static int __cmd_record(int argc, const char **argv)
614 int hits = samples; 626 int hits = samples;
615 627
616 for (i = 0; i < nr_cpu; i++) { 628 for (i = 0; i < nr_cpu; i++) {
617 for (counter = 0; counter < nr_counters; counter++) 629 for (counter = 0; counter < nr_counters; counter++) {
618 mmap_read(&mmap_array[i][counter]); 630 if (mmap_array[i][counter].base)
631 mmap_read(&mmap_array[i][counter]);
632 }
619 } 633 }
620 634
621 if (hits == samples) { 635 if (hits == samples) {
622 if (done) 636 if (done)
623 break; 637 break;
624 ret = poll(event_array, nr_poll, 100); 638 ret = poll(event_array, nr_poll, -1);
639 waking++;
640 }
641
642 if (done) {
643 for (i = 0; i < nr_cpu; i++) {
644 for (counter = 0; counter < nr_counters; counter++)
645 ioctl(fd[i][counter], PERF_EVENT_IOC_DISABLE);
646 }
625 } 647 }
626 } 648 }
627 649
650 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
651
628 /* 652 /*
629 * Approximate RIP event size: 24 bytes. 653 * Approximate RIP event size: 24 bytes.
630 */ 654 */
@@ -681,6 +705,8 @@ static const struct option options[] = {
681 "Sample addresses"), 705 "Sample addresses"),
682 OPT_BOOLEAN('n', "no-samples", &no_samples, 706 OPT_BOOLEAN('n', "no-samples", &no_samples,
683 "don't sample"), 707 "don't sample"),
708 OPT_BOOLEAN('M', "multiplex", &multiplex,
709 "multiplex counter output in a single channel"),
684 OPT_END() 710 OPT_END()
685}; 711};
686 712
diff --git a/tools/perf/builtin-report.c b/tools/perf/builtin-report.c
index cdf9a8d27bb9..19669c20088e 100644
--- a/tools/perf/builtin-report.c
+++ b/tools/perf/builtin-report.c
@@ -1121,7 +1121,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1121 more_data += sizeof(u64); 1121 more_data += sizeof(u64);
1122 } 1122 }
1123 1123
1124 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 1124 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1125 (void *)(offset + head), 1125 (void *)(offset + head),
1126 (void *)(long)(event->header.size), 1126 (void *)(long)(event->header.size),
1127 event->header.misc, 1127 event->header.misc,
@@ -1158,9 +1158,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm)) 1158 if (comm_list && !strlist__has_entry(comm_list, thread->comm))
1159 return 0; 1159 return 0;
1160 1160
1161 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 1161 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1162 1162
1163 if (cpumode == PERF_EVENT_MISC_KERNEL) { 1163 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1164 show = SHOW_KERNEL; 1164 show = SHOW_KERNEL;
1165 level = 'k'; 1165 level = 'k';
1166 1166
@@ -1168,7 +1168,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1168 1168
1169 dump_printf(" ...... dso: %s\n", dso->name); 1169 dump_printf(" ...... dso: %s\n", dso->name);
1170 1170
1171 } else if (cpumode == PERF_EVENT_MISC_USER) { 1171 } else if (cpumode == PERF_RECORD_MISC_USER) {
1172 1172
1173 show = SHOW_USER; 1173 show = SHOW_USER;
1174 level = '.'; 1174 level = '.';
@@ -1210,7 +1210,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1210 1210
1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match); 1211 thread = threads__findnew(event->mmap.pid, &threads, &last_match);
1212 1212
1213 dump_printf("%p [%p]: PERF_EVENT_MMAP %d/%d: [%p(%p) @ %p]: %s\n", 1213 dump_printf("%p [%p]: PERF_RECORD_MMAP %d/%d: [%p(%p) @ %p]: %s\n",
1214 (void *)(offset + head), 1214 (void *)(offset + head),
1215 (void *)(long)(event->header.size), 1215 (void *)(long)(event->header.size),
1216 event->mmap.pid, 1216 event->mmap.pid,
@@ -1221,7 +1221,7 @@ process_mmap_event(event_t *event, unsigned long offset, unsigned long head)
1221 event->mmap.filename); 1221 event->mmap.filename);
1222 1222
1223 if (thread == NULL || map == NULL) { 1223 if (thread == NULL || map == NULL) {
1224 dump_printf("problem processing PERF_EVENT_MMAP, skipping event.\n"); 1224 dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
1225 return 0; 1225 return 0;
1226 } 1226 }
1227 1227
@@ -1238,14 +1238,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
1238 1238
1239 thread = threads__findnew(event->comm.pid, &threads, &last_match); 1239 thread = threads__findnew(event->comm.pid, &threads, &last_match);
1240 1240
1241 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 1241 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
1242 (void *)(offset + head), 1242 (void *)(offset + head),
1243 (void *)(long)(event->header.size), 1243 (void *)(long)(event->header.size),
1244 event->comm.comm, event->comm.pid); 1244 event->comm.comm, event->comm.pid);
1245 1245
1246 if (thread == NULL || 1246 if (thread == NULL ||
1247 thread__set_comm_adjust(thread, event->comm.comm)) { 1247 thread__set_comm_adjust(thread, event->comm.comm)) {
1248 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 1248 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
1249 return -1; 1249 return -1;
1250 } 1250 }
1251 total_comm++; 1251 total_comm++;
@@ -1262,10 +1262,10 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1262 thread = threads__findnew(event->fork.pid, &threads, &last_match); 1262 thread = threads__findnew(event->fork.pid, &threads, &last_match);
1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match); 1263 parent = threads__findnew(event->fork.ppid, &threads, &last_match);
1264 1264
1265 dump_printf("%p [%p]: PERF_EVENT_%s: (%d:%d):(%d:%d)\n", 1265 dump_printf("%p [%p]: PERF_RECORD_%s: (%d:%d):(%d:%d)\n",
1266 (void *)(offset + head), 1266 (void *)(offset + head),
1267 (void *)(long)(event->header.size), 1267 (void *)(long)(event->header.size),
1268 event->header.type == PERF_EVENT_FORK ? "FORK" : "EXIT", 1268 event->header.type == PERF_RECORD_FORK ? "FORK" : "EXIT",
1269 event->fork.pid, event->fork.tid, 1269 event->fork.pid, event->fork.tid,
1270 event->fork.ppid, event->fork.ptid); 1270 event->fork.ppid, event->fork.ptid);
1271 1271
@@ -1276,11 +1276,11 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1276 if (thread == parent) 1276 if (thread == parent)
1277 return 0; 1277 return 0;
1278 1278
1279 if (event->header.type == PERF_EVENT_EXIT) 1279 if (event->header.type == PERF_RECORD_EXIT)
1280 return 0; 1280 return 0;
1281 1281
1282 if (!thread || !parent || thread__fork(thread, parent)) { 1282 if (!thread || !parent || thread__fork(thread, parent)) {
1283 dump_printf("problem processing PERF_EVENT_FORK, skipping event.\n"); 1283 dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
1284 return -1; 1284 return -1;
1285 } 1285 }
1286 total_fork++; 1286 total_fork++;
@@ -1291,7 +1291,7 @@ process_task_event(event_t *event, unsigned long offset, unsigned long head)
1291static int 1291static int
1292process_lost_event(event_t *event, unsigned long offset, unsigned long head) 1292process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1293{ 1293{
1294 dump_printf("%p [%p]: PERF_EVENT_LOST: id:%Ld: lost:%Ld\n", 1294 dump_printf("%p [%p]: PERF_RECORD_LOST: id:%Ld: lost:%Ld\n",
1295 (void *)(offset + head), 1295 (void *)(offset + head),
1296 (void *)(long)(event->header.size), 1296 (void *)(long)(event->header.size),
1297 event->lost.id, 1297 event->lost.id,
@@ -1305,7 +1305,7 @@ process_lost_event(event_t *event, unsigned long offset, unsigned long head)
1305static int 1305static int
1306process_read_event(event_t *event, unsigned long offset, unsigned long head) 1306process_read_event(event_t *event, unsigned long offset, unsigned long head)
1307{ 1307{
1308 struct perf_counter_attr *attr; 1308 struct perf_event_attr *attr;
1309 1309
1310 attr = perf_header__find_attr(event->read.id, header); 1310 attr = perf_header__find_attr(event->read.id, header);
1311 1311
@@ -1319,7 +1319,7 @@ process_read_event(event_t *event, unsigned long offset, unsigned long head)
1319 event->read.value); 1319 event->read.value);
1320 } 1320 }
1321 1321
1322 dump_printf("%p [%p]: PERF_EVENT_READ: %d %d %s %Lu\n", 1322 dump_printf("%p [%p]: PERF_RECORD_READ: %d %d %s %Lu\n",
1323 (void *)(offset + head), 1323 (void *)(offset + head),
1324 (void *)(long)(event->header.size), 1324 (void *)(long)(event->header.size),
1325 event->read.pid, 1325 event->read.pid,
@@ -1337,31 +1337,31 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
1337 trace_event(event); 1337 trace_event(event);
1338 1338
1339 switch (event->header.type) { 1339 switch (event->header.type) {
1340 case PERF_EVENT_SAMPLE: 1340 case PERF_RECORD_SAMPLE:
1341 return process_sample_event(event, offset, head); 1341 return process_sample_event(event, offset, head);
1342 1342
1343 case PERF_EVENT_MMAP: 1343 case PERF_RECORD_MMAP:
1344 return process_mmap_event(event, offset, head); 1344 return process_mmap_event(event, offset, head);
1345 1345
1346 case PERF_EVENT_COMM: 1346 case PERF_RECORD_COMM:
1347 return process_comm_event(event, offset, head); 1347 return process_comm_event(event, offset, head);
1348 1348
1349 case PERF_EVENT_FORK: 1349 case PERF_RECORD_FORK:
1350 case PERF_EVENT_EXIT: 1350 case PERF_RECORD_EXIT:
1351 return process_task_event(event, offset, head); 1351 return process_task_event(event, offset, head);
1352 1352
1353 case PERF_EVENT_LOST: 1353 case PERF_RECORD_LOST:
1354 return process_lost_event(event, offset, head); 1354 return process_lost_event(event, offset, head);
1355 1355
1356 case PERF_EVENT_READ: 1356 case PERF_RECORD_READ:
1357 return process_read_event(event, offset, head); 1357 return process_read_event(event, offset, head);
1358 1358
1359 /* 1359 /*
1360 * We dont process them right now but they are fine: 1360 * We dont process them right now but they are fine:
1361 */ 1361 */
1362 1362
1363 case PERF_EVENT_THROTTLE: 1363 case PERF_RECORD_THROTTLE:
1364 case PERF_EVENT_UNTHROTTLE: 1364 case PERF_RECORD_UNTHROTTLE:
1365 return 0; 1365 return 0;
1366 1366
1367 default: 1367 default:
diff --git a/tools/perf/builtin-sched.c b/tools/perf/builtin-sched.c
new file mode 100644
index 000000000000..ea9c15c0cdfe
--- /dev/null
+++ b/tools/perf/builtin-sched.c
@@ -0,0 +1,2004 @@
1#include "builtin.h"
2#include "perf.h"
3
4#include "util/util.h"
5#include "util/cache.h"
6#include "util/symbol.h"
7#include "util/thread.h"
8#include "util/header.h"
9
10#include "util/parse-options.h"
11#include "util/trace-event.h"
12
13#include "util/debug.h"
14
15#include <sys/types.h>
16#include <sys/prctl.h>
17
18#include <semaphore.h>
19#include <pthread.h>
20#include <math.h>
21
22static char const *input_name = "perf.data";
23static int input;
24static unsigned long page_size;
25static unsigned long mmap_window = 32;
26
27static unsigned long total_comm = 0;
28
29static struct rb_root threads;
30static struct thread *last_match;
31
32static struct perf_header *header;
33static u64 sample_type;
34
35static char default_sort_order[] = "avg, max, switch, runtime";
36static char *sort_order = default_sort_order;
37
38#define PR_SET_NAME 15 /* Set process name */
39#define MAX_CPUS 4096
40
41#define BUG_ON(x) assert(!(x))
42
43static u64 run_measurement_overhead;
44static u64 sleep_measurement_overhead;
45
46#define COMM_LEN 20
47#define SYM_LEN 129
48
49#define MAX_PID 65536
50
51static unsigned long nr_tasks;
52
53struct sched_atom;
54
55struct task_desc {
56 unsigned long nr;
57 unsigned long pid;
58 char comm[COMM_LEN];
59
60 unsigned long nr_events;
61 unsigned long curr_event;
62 struct sched_atom **atoms;
63
64 pthread_t thread;
65 sem_t sleep_sem;
66
67 sem_t ready_for_work;
68 sem_t work_done_sem;
69
70 u64 cpu_usage;
71};
72
73enum sched_event_type {
74 SCHED_EVENT_RUN,
75 SCHED_EVENT_SLEEP,
76 SCHED_EVENT_WAKEUP,
77};
78
79struct sched_atom {
80 enum sched_event_type type;
81 u64 timestamp;
82 u64 duration;
83 unsigned long nr;
84 int specific_wait;
85 sem_t *wait_sem;
86 struct task_desc *wakee;
87};
88
89static struct task_desc *pid_to_task[MAX_PID];
90
91static struct task_desc **tasks;
92
93static pthread_mutex_t start_work_mutex = PTHREAD_MUTEX_INITIALIZER;
94static u64 start_time;
95
96static pthread_mutex_t work_done_wait_mutex = PTHREAD_MUTEX_INITIALIZER;
97
98static unsigned long nr_run_events;
99static unsigned long nr_sleep_events;
100static unsigned long nr_wakeup_events;
101
102static unsigned long nr_sleep_corrections;
103static unsigned long nr_run_events_optimized;
104
105static unsigned long targetless_wakeups;
106static unsigned long multitarget_wakeups;
107
108static u64 cpu_usage;
109static u64 runavg_cpu_usage;
110static u64 parent_cpu_usage;
111static u64 runavg_parent_cpu_usage;
112
113static unsigned long nr_runs;
114static u64 sum_runtime;
115static u64 sum_fluct;
116static u64 run_avg;
117
118static unsigned long replay_repeat = 10;
119static unsigned long nr_timestamps;
120static unsigned long nr_unordered_timestamps;
121static unsigned long nr_state_machine_bugs;
122static unsigned long nr_context_switch_bugs;
123static unsigned long nr_events;
124static unsigned long nr_lost_chunks;
125static unsigned long nr_lost_events;
126
127#define TASK_STATE_TO_CHAR_STR "RSDTtZX"
128
129enum thread_state {
130 THREAD_SLEEPING = 0,
131 THREAD_WAIT_CPU,
132 THREAD_SCHED_IN,
133 THREAD_IGNORE
134};
135
136struct work_atom {
137 struct list_head list;
138 enum thread_state state;
139 u64 sched_out_time;
140 u64 wake_up_time;
141 u64 sched_in_time;
142 u64 runtime;
143};
144
145struct work_atoms {
146 struct list_head work_list;
147 struct thread *thread;
148 struct rb_node node;
149 u64 max_lat;
150 u64 total_lat;
151 u64 nb_atoms;
152 u64 total_runtime;
153};
154
155typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
156
157static struct rb_root atom_root, sorted_atom_root;
158
159static u64 all_runtime;
160static u64 all_count;
161
162
163static u64 get_nsecs(void)
164{
165 struct timespec ts;
166
167 clock_gettime(CLOCK_MONOTONIC, &ts);
168
169 return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
170}
171
172static void burn_nsecs(u64 nsecs)
173{
174 u64 T0 = get_nsecs(), T1;
175
176 do {
177 T1 = get_nsecs();
178 } while (T1 + run_measurement_overhead < T0 + nsecs);
179}
180
181static void sleep_nsecs(u64 nsecs)
182{
183 struct timespec ts;
184
185 ts.tv_nsec = nsecs % 999999999;
186 ts.tv_sec = nsecs / 999999999;
187
188 nanosleep(&ts, NULL);
189}
190
191static void calibrate_run_measurement_overhead(void)
192{
193 u64 T0, T1, delta, min_delta = 1000000000ULL;
194 int i;
195
196 for (i = 0; i < 10; i++) {
197 T0 = get_nsecs();
198 burn_nsecs(0);
199 T1 = get_nsecs();
200 delta = T1-T0;
201 min_delta = min(min_delta, delta);
202 }
203 run_measurement_overhead = min_delta;
204
205 printf("run measurement overhead: %Ld nsecs\n", min_delta);
206}
207
208static void calibrate_sleep_measurement_overhead(void)
209{
210 u64 T0, T1, delta, min_delta = 1000000000ULL;
211 int i;
212
213 for (i = 0; i < 10; i++) {
214 T0 = get_nsecs();
215 sleep_nsecs(10000);
216 T1 = get_nsecs();
217 delta = T1-T0;
218 min_delta = min(min_delta, delta);
219 }
220 min_delta -= 10000;
221 sleep_measurement_overhead = min_delta;
222
223 printf("sleep measurement overhead: %Ld nsecs\n", min_delta);
224}
225
226static struct sched_atom *
227get_new_event(struct task_desc *task, u64 timestamp)
228{
229 struct sched_atom *event = calloc(1, sizeof(*event));
230 unsigned long idx = task->nr_events;
231 size_t size;
232
233 event->timestamp = timestamp;
234 event->nr = idx;
235
236 task->nr_events++;
237 size = sizeof(struct sched_atom *) * task->nr_events;
238 task->atoms = realloc(task->atoms, size);
239 BUG_ON(!task->atoms);
240
241 task->atoms[idx] = event;
242
243 return event;
244}
245
246static struct sched_atom *last_event(struct task_desc *task)
247{
248 if (!task->nr_events)
249 return NULL;
250
251 return task->atoms[task->nr_events - 1];
252}
253
254static void
255add_sched_event_run(struct task_desc *task, u64 timestamp, u64 duration)
256{
257 struct sched_atom *event, *curr_event = last_event(task);
258
259 /*
260 * optimize an existing RUN event by merging this one
261 * to it:
262 */
263 if (curr_event && curr_event->type == SCHED_EVENT_RUN) {
264 nr_run_events_optimized++;
265 curr_event->duration += duration;
266 return;
267 }
268
269 event = get_new_event(task, timestamp);
270
271 event->type = SCHED_EVENT_RUN;
272 event->duration = duration;
273
274 nr_run_events++;
275}
276
277static void
278add_sched_event_wakeup(struct task_desc *task, u64 timestamp,
279 struct task_desc *wakee)
280{
281 struct sched_atom *event, *wakee_event;
282
283 event = get_new_event(task, timestamp);
284 event->type = SCHED_EVENT_WAKEUP;
285 event->wakee = wakee;
286
287 wakee_event = last_event(wakee);
288 if (!wakee_event || wakee_event->type != SCHED_EVENT_SLEEP) {
289 targetless_wakeups++;
290 return;
291 }
292 if (wakee_event->wait_sem) {
293 multitarget_wakeups++;
294 return;
295 }
296
297 wakee_event->wait_sem = calloc(1, sizeof(*wakee_event->wait_sem));
298 sem_init(wakee_event->wait_sem, 0, 0);
299 wakee_event->specific_wait = 1;
300 event->wait_sem = wakee_event->wait_sem;
301
302 nr_wakeup_events++;
303}
304
305static void
306add_sched_event_sleep(struct task_desc *task, u64 timestamp,
307 u64 task_state __used)
308{
309 struct sched_atom *event = get_new_event(task, timestamp);
310
311 event->type = SCHED_EVENT_SLEEP;
312
313 nr_sleep_events++;
314}
315
316static struct task_desc *register_pid(unsigned long pid, const char *comm)
317{
318 struct task_desc *task;
319
320 BUG_ON(pid >= MAX_PID);
321
322 task = pid_to_task[pid];
323
324 if (task)
325 return task;
326
327 task = calloc(1, sizeof(*task));
328 task->pid = pid;
329 task->nr = nr_tasks;
330 strcpy(task->comm, comm);
331 /*
332 * every task starts in sleeping state - this gets ignored
333 * if there's no wakeup pointing to this sleep state:
334 */
335 add_sched_event_sleep(task, 0, 0);
336
337 pid_to_task[pid] = task;
338 nr_tasks++;
339 tasks = realloc(tasks, nr_tasks*sizeof(struct task_task *));
340 BUG_ON(!tasks);
341 tasks[task->nr] = task;
342
343 if (verbose)
344 printf("registered task #%ld, PID %ld (%s)\n", nr_tasks, pid, comm);
345
346 return task;
347}
348
349
350static void print_task_traces(void)
351{
352 struct task_desc *task;
353 unsigned long i;
354
355 for (i = 0; i < nr_tasks; i++) {
356 task = tasks[i];
357 printf("task %6ld (%20s:%10ld), nr_events: %ld\n",
358 task->nr, task->comm, task->pid, task->nr_events);
359 }
360}
361
362static void add_cross_task_wakeups(void)
363{
364 struct task_desc *task1, *task2;
365 unsigned long i, j;
366
367 for (i = 0; i < nr_tasks; i++) {
368 task1 = tasks[i];
369 j = i + 1;
370 if (j == nr_tasks)
371 j = 0;
372 task2 = tasks[j];
373 add_sched_event_wakeup(task1, 0, task2);
374 }
375}
376
377static void
378process_sched_event(struct task_desc *this_task __used, struct sched_atom *atom)
379{
380 int ret = 0;
381 u64 now;
382 long long delta;
383
384 now = get_nsecs();
385 delta = start_time + atom->timestamp - now;
386
387 switch (atom->type) {
388 case SCHED_EVENT_RUN:
389 burn_nsecs(atom->duration);
390 break;
391 case SCHED_EVENT_SLEEP:
392 if (atom->wait_sem)
393 ret = sem_wait(atom->wait_sem);
394 BUG_ON(ret);
395 break;
396 case SCHED_EVENT_WAKEUP:
397 if (atom->wait_sem)
398 ret = sem_post(atom->wait_sem);
399 BUG_ON(ret);
400 break;
401 default:
402 BUG_ON(1);
403 }
404}
405
406static u64 get_cpu_usage_nsec_parent(void)
407{
408 struct rusage ru;
409 u64 sum;
410 int err;
411
412 err = getrusage(RUSAGE_SELF, &ru);
413 BUG_ON(err);
414
415 sum = ru.ru_utime.tv_sec*1e9 + ru.ru_utime.tv_usec*1e3;
416 sum += ru.ru_stime.tv_sec*1e9 + ru.ru_stime.tv_usec*1e3;
417
418 return sum;
419}
420
421static u64 get_cpu_usage_nsec_self(void)
422{
423 char filename [] = "/proc/1234567890/sched";
424 unsigned long msecs, nsecs;
425 char *line = NULL;
426 u64 total = 0;
427 size_t len = 0;
428 ssize_t chars;
429 FILE *file;
430 int ret;
431
432 sprintf(filename, "/proc/%d/sched", getpid());
433 file = fopen(filename, "r");
434 BUG_ON(!file);
435
436 while ((chars = getline(&line, &len, file)) != -1) {
437 ret = sscanf(line, "se.sum_exec_runtime : %ld.%06ld\n",
438 &msecs, &nsecs);
439 if (ret == 2) {
440 total = msecs*1e6 + nsecs;
441 break;
442 }
443 }
444 if (line)
445 free(line);
446 fclose(file);
447
448 return total;
449}
450
451static void *thread_func(void *ctx)
452{
453 struct task_desc *this_task = ctx;
454 u64 cpu_usage_0, cpu_usage_1;
455 unsigned long i, ret;
456 char comm2[22];
457
458 sprintf(comm2, ":%s", this_task->comm);
459 prctl(PR_SET_NAME, comm2);
460
461again:
462 ret = sem_post(&this_task->ready_for_work);
463 BUG_ON(ret);
464 ret = pthread_mutex_lock(&start_work_mutex);
465 BUG_ON(ret);
466 ret = pthread_mutex_unlock(&start_work_mutex);
467 BUG_ON(ret);
468
469 cpu_usage_0 = get_cpu_usage_nsec_self();
470
471 for (i = 0; i < this_task->nr_events; i++) {
472 this_task->curr_event = i;
473 process_sched_event(this_task, this_task->atoms[i]);
474 }
475
476 cpu_usage_1 = get_cpu_usage_nsec_self();
477 this_task->cpu_usage = cpu_usage_1 - cpu_usage_0;
478
479 ret = sem_post(&this_task->work_done_sem);
480 BUG_ON(ret);
481
482 ret = pthread_mutex_lock(&work_done_wait_mutex);
483 BUG_ON(ret);
484 ret = pthread_mutex_unlock(&work_done_wait_mutex);
485 BUG_ON(ret);
486
487 goto again;
488}
489
490static void create_tasks(void)
491{
492 struct task_desc *task;
493 pthread_attr_t attr;
494 unsigned long i;
495 int err;
496
497 err = pthread_attr_init(&attr);
498 BUG_ON(err);
499 err = pthread_attr_setstacksize(&attr, (size_t)(16*1024));
500 BUG_ON(err);
501 err = pthread_mutex_lock(&start_work_mutex);
502 BUG_ON(err);
503 err = pthread_mutex_lock(&work_done_wait_mutex);
504 BUG_ON(err);
505 for (i = 0; i < nr_tasks; i++) {
506 task = tasks[i];
507 sem_init(&task->sleep_sem, 0, 0);
508 sem_init(&task->ready_for_work, 0, 0);
509 sem_init(&task->work_done_sem, 0, 0);
510 task->curr_event = 0;
511 err = pthread_create(&task->thread, &attr, thread_func, task);
512 BUG_ON(err);
513 }
514}
515
516static void wait_for_tasks(void)
517{
518 u64 cpu_usage_0, cpu_usage_1;
519 struct task_desc *task;
520 unsigned long i, ret;
521
522 start_time = get_nsecs();
523 cpu_usage = 0;
524 pthread_mutex_unlock(&work_done_wait_mutex);
525
526 for (i = 0; i < nr_tasks; i++) {
527 task = tasks[i];
528 ret = sem_wait(&task->ready_for_work);
529 BUG_ON(ret);
530 sem_init(&task->ready_for_work, 0, 0);
531 }
532 ret = pthread_mutex_lock(&work_done_wait_mutex);
533 BUG_ON(ret);
534
535 cpu_usage_0 = get_cpu_usage_nsec_parent();
536
537 pthread_mutex_unlock(&start_work_mutex);
538
539 for (i = 0; i < nr_tasks; i++) {
540 task = tasks[i];
541 ret = sem_wait(&task->work_done_sem);
542 BUG_ON(ret);
543 sem_init(&task->work_done_sem, 0, 0);
544 cpu_usage += task->cpu_usage;
545 task->cpu_usage = 0;
546 }
547
548 cpu_usage_1 = get_cpu_usage_nsec_parent();
549 if (!runavg_cpu_usage)
550 runavg_cpu_usage = cpu_usage;
551 runavg_cpu_usage = (runavg_cpu_usage*9 + cpu_usage)/10;
552
553 parent_cpu_usage = cpu_usage_1 - cpu_usage_0;
554 if (!runavg_parent_cpu_usage)
555 runavg_parent_cpu_usage = parent_cpu_usage;
556 runavg_parent_cpu_usage = (runavg_parent_cpu_usage*9 +
557 parent_cpu_usage)/10;
558
559 ret = pthread_mutex_lock(&start_work_mutex);
560 BUG_ON(ret);
561
562 for (i = 0; i < nr_tasks; i++) {
563 task = tasks[i];
564 sem_init(&task->sleep_sem, 0, 0);
565 task->curr_event = 0;
566 }
567}
568
569static void run_one_test(void)
570{
571 u64 T0, T1, delta, avg_delta, fluct, std_dev;
572
573 T0 = get_nsecs();
574 wait_for_tasks();
575 T1 = get_nsecs();
576
577 delta = T1 - T0;
578 sum_runtime += delta;
579 nr_runs++;
580
581 avg_delta = sum_runtime / nr_runs;
582 if (delta < avg_delta)
583 fluct = avg_delta - delta;
584 else
585 fluct = delta - avg_delta;
586 sum_fluct += fluct;
587 std_dev = sum_fluct / nr_runs / sqrt(nr_runs);
588 if (!run_avg)
589 run_avg = delta;
590 run_avg = (run_avg*9 + delta)/10;
591
592 printf("#%-3ld: %0.3f, ",
593 nr_runs, (double)delta/1000000.0);
594
595 printf("ravg: %0.2f, ",
596 (double)run_avg/1e6);
597
598 printf("cpu: %0.2f / %0.2f",
599 (double)cpu_usage/1e6, (double)runavg_cpu_usage/1e6);
600
601#if 0
602 /*
603 * rusage statistics done by the parent, these are less
604 * accurate than the sum_exec_runtime based statistics:
605 */
606 printf(" [%0.2f / %0.2f]",
607 (double)parent_cpu_usage/1e6,
608 (double)runavg_parent_cpu_usage/1e6);
609#endif
610
611 printf("\n");
612
613 if (nr_sleep_corrections)
614 printf(" (%ld sleep corrections)\n", nr_sleep_corrections);
615 nr_sleep_corrections = 0;
616}
617
618static void test_calibrations(void)
619{
620 u64 T0, T1;
621
622 T0 = get_nsecs();
623 burn_nsecs(1e6);
624 T1 = get_nsecs();
625
626 printf("the run test took %Ld nsecs\n", T1-T0);
627
628 T0 = get_nsecs();
629 sleep_nsecs(1e6);
630 T1 = get_nsecs();
631
632 printf("the sleep test took %Ld nsecs\n", T1-T0);
633}
634
635static int
636process_comm_event(event_t *event, unsigned long offset, unsigned long head)
637{
638 struct thread *thread;
639
640 thread = threads__findnew(event->comm.pid, &threads, &last_match);
641
642 dump_printf("%p [%p]: perf_event_comm: %s:%d\n",
643 (void *)(offset + head),
644 (void *)(long)(event->header.size),
645 event->comm.comm, event->comm.pid);
646
647 if (thread == NULL ||
648 thread__set_comm(thread, event->comm.comm)) {
649 dump_printf("problem processing perf_event_comm, skipping event.\n");
650 return -1;
651 }
652 total_comm++;
653
654 return 0;
655}
656
657
658struct raw_event_sample {
659 u32 size;
660 char data[0];
661};
662
663#define FILL_FIELD(ptr, field, event, data) \
664 ptr.field = (typeof(ptr.field)) raw_field_value(event, #field, data)
665
666#define FILL_ARRAY(ptr, array, event, data) \
667do { \
668 void *__array = raw_field_ptr(event, #array, data); \
669 memcpy(ptr.array, __array, sizeof(ptr.array)); \
670} while(0)
671
672#define FILL_COMMON_FIELDS(ptr, event, data) \
673do { \
674 FILL_FIELD(ptr, common_type, event, data); \
675 FILL_FIELD(ptr, common_flags, event, data); \
676 FILL_FIELD(ptr, common_preempt_count, event, data); \
677 FILL_FIELD(ptr, common_pid, event, data); \
678 FILL_FIELD(ptr, common_tgid, event, data); \
679} while (0)
680
681
682
683struct trace_switch_event {
684 u32 size;
685
686 u16 common_type;
687 u8 common_flags;
688 u8 common_preempt_count;
689 u32 common_pid;
690 u32 common_tgid;
691
692 char prev_comm[16];
693 u32 prev_pid;
694 u32 prev_prio;
695 u64 prev_state;
696 char next_comm[16];
697 u32 next_pid;
698 u32 next_prio;
699};
700
701struct trace_runtime_event {
702 u32 size;
703
704 u16 common_type;
705 u8 common_flags;
706 u8 common_preempt_count;
707 u32 common_pid;
708 u32 common_tgid;
709
710 char comm[16];
711 u32 pid;
712 u64 runtime;
713 u64 vruntime;
714};
715
716struct trace_wakeup_event {
717 u32 size;
718
719 u16 common_type;
720 u8 common_flags;
721 u8 common_preempt_count;
722 u32 common_pid;
723 u32 common_tgid;
724
725 char comm[16];
726 u32 pid;
727
728 u32 prio;
729 u32 success;
730 u32 cpu;
731};
732
733struct trace_fork_event {
734 u32 size;
735
736 u16 common_type;
737 u8 common_flags;
738 u8 common_preempt_count;
739 u32 common_pid;
740 u32 common_tgid;
741
742 char parent_comm[16];
743 u32 parent_pid;
744 char child_comm[16];
745 u32 child_pid;
746};
747
748struct trace_sched_handler {
749 void (*switch_event)(struct trace_switch_event *,
750 struct event *,
751 int cpu,
752 u64 timestamp,
753 struct thread *thread);
754
755 void (*runtime_event)(struct trace_runtime_event *,
756 struct event *,
757 int cpu,
758 u64 timestamp,
759 struct thread *thread);
760
761 void (*wakeup_event)(struct trace_wakeup_event *,
762 struct event *,
763 int cpu,
764 u64 timestamp,
765 struct thread *thread);
766
767 void (*fork_event)(struct trace_fork_event *,
768 struct event *,
769 int cpu,
770 u64 timestamp,
771 struct thread *thread);
772};
773
774
775static void
776replay_wakeup_event(struct trace_wakeup_event *wakeup_event,
777 struct event *event,
778 int cpu __used,
779 u64 timestamp __used,
780 struct thread *thread __used)
781{
782 struct task_desc *waker, *wakee;
783
784 if (verbose) {
785 printf("sched_wakeup event %p\n", event);
786
787 printf(" ... pid %d woke up %s/%d\n",
788 wakeup_event->common_pid,
789 wakeup_event->comm,
790 wakeup_event->pid);
791 }
792
793 waker = register_pid(wakeup_event->common_pid, "<unknown>");
794 wakee = register_pid(wakeup_event->pid, wakeup_event->comm);
795
796 add_sched_event_wakeup(waker, timestamp, wakee);
797}
798
799static u64 cpu_last_switched[MAX_CPUS];
800
801static void
802replay_switch_event(struct trace_switch_event *switch_event,
803 struct event *event,
804 int cpu,
805 u64 timestamp,
806 struct thread *thread __used)
807{
808 struct task_desc *prev, *next;
809 u64 timestamp0;
810 s64 delta;
811
812 if (verbose)
813 printf("sched_switch event %p\n", event);
814
815 if (cpu >= MAX_CPUS || cpu < 0)
816 return;
817
818 timestamp0 = cpu_last_switched[cpu];
819 if (timestamp0)
820 delta = timestamp - timestamp0;
821 else
822 delta = 0;
823
824 if (delta < 0)
825 die("hm, delta: %Ld < 0 ?\n", delta);
826
827 if (verbose) {
828 printf(" ... switch from %s/%d to %s/%d [ran %Ld nsecs]\n",
829 switch_event->prev_comm, switch_event->prev_pid,
830 switch_event->next_comm, switch_event->next_pid,
831 delta);
832 }
833
834 prev = register_pid(switch_event->prev_pid, switch_event->prev_comm);
835 next = register_pid(switch_event->next_pid, switch_event->next_comm);
836
837 cpu_last_switched[cpu] = timestamp;
838
839 add_sched_event_run(prev, timestamp, delta);
840 add_sched_event_sleep(prev, timestamp, switch_event->prev_state);
841}
842
843
844static void
845replay_fork_event(struct trace_fork_event *fork_event,
846 struct event *event,
847 int cpu __used,
848 u64 timestamp __used,
849 struct thread *thread __used)
850{
851 if (verbose) {
852 printf("sched_fork event %p\n", event);
853 printf("... parent: %s/%d\n", fork_event->parent_comm, fork_event->parent_pid);
854 printf("... child: %s/%d\n", fork_event->child_comm, fork_event->child_pid);
855 }
856 register_pid(fork_event->parent_pid, fork_event->parent_comm);
857 register_pid(fork_event->child_pid, fork_event->child_comm);
858}
859
860static struct trace_sched_handler replay_ops = {
861 .wakeup_event = replay_wakeup_event,
862 .switch_event = replay_switch_event,
863 .fork_event = replay_fork_event,
864};
865
866struct sort_dimension {
867 const char *name;
868 sort_fn_t cmp;
869 struct list_head list;
870};
871
872static LIST_HEAD(cmp_pid);
873
874static int
875thread_lat_cmp(struct list_head *list, struct work_atoms *l, struct work_atoms *r)
876{
877 struct sort_dimension *sort;
878 int ret = 0;
879
880 BUG_ON(list_empty(list));
881
882 list_for_each_entry(sort, list, list) {
883 ret = sort->cmp(l, r);
884 if (ret)
885 return ret;
886 }
887
888 return ret;
889}
890
891static struct work_atoms *
892thread_atoms_search(struct rb_root *root, struct thread *thread,
893 struct list_head *sort_list)
894{
895 struct rb_node *node = root->rb_node;
896 struct work_atoms key = { .thread = thread };
897
898 while (node) {
899 struct work_atoms *atoms;
900 int cmp;
901
902 atoms = container_of(node, struct work_atoms, node);
903
904 cmp = thread_lat_cmp(sort_list, &key, atoms);
905 if (cmp > 0)
906 node = node->rb_left;
907 else if (cmp < 0)
908 node = node->rb_right;
909 else {
910 BUG_ON(thread != atoms->thread);
911 return atoms;
912 }
913 }
914 return NULL;
915}
916
917static void
918__thread_latency_insert(struct rb_root *root, struct work_atoms *data,
919 struct list_head *sort_list)
920{
921 struct rb_node **new = &(root->rb_node), *parent = NULL;
922
923 while (*new) {
924 struct work_atoms *this;
925 int cmp;
926
927 this = container_of(*new, struct work_atoms, node);
928 parent = *new;
929
930 cmp = thread_lat_cmp(sort_list, data, this);
931
932 if (cmp > 0)
933 new = &((*new)->rb_left);
934 else
935 new = &((*new)->rb_right);
936 }
937
938 rb_link_node(&data->node, parent, new);
939 rb_insert_color(&data->node, root);
940}
941
942static void thread_atoms_insert(struct thread *thread)
943{
944 struct work_atoms *atoms;
945
946 atoms = calloc(sizeof(*atoms), 1);
947 if (!atoms)
948 die("No memory");
949
950 atoms->thread = thread;
951 INIT_LIST_HEAD(&atoms->work_list);
952 __thread_latency_insert(&atom_root, atoms, &cmp_pid);
953}
954
955static void
956latency_fork_event(struct trace_fork_event *fork_event __used,
957 struct event *event __used,
958 int cpu __used,
959 u64 timestamp __used,
960 struct thread *thread __used)
961{
962 /* should insert the newcomer */
963}
964
965__used
966static char sched_out_state(struct trace_switch_event *switch_event)
967{
968 const char *str = TASK_STATE_TO_CHAR_STR;
969
970 return str[switch_event->prev_state];
971}
972
973static void
974add_sched_out_event(struct work_atoms *atoms,
975 char run_state,
976 u64 timestamp)
977{
978 struct work_atom *atom;
979
980 atom = calloc(sizeof(*atom), 1);
981 if (!atom)
982 die("Non memory");
983
984 atom->sched_out_time = timestamp;
985
986 if (run_state == 'R') {
987 atom->state = THREAD_WAIT_CPU;
988 atom->wake_up_time = atom->sched_out_time;
989 }
990
991 list_add_tail(&atom->list, &atoms->work_list);
992}
993
994static void
995add_runtime_event(struct work_atoms *atoms, u64 delta, u64 timestamp __used)
996{
997 struct work_atom *atom;
998
999 BUG_ON(list_empty(&atoms->work_list));
1000
1001 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1002
1003 atom->runtime += delta;
1004 atoms->total_runtime += delta;
1005}
1006
1007static void
1008add_sched_in_event(struct work_atoms *atoms, u64 timestamp)
1009{
1010 struct work_atom *atom;
1011 u64 delta;
1012
1013 if (list_empty(&atoms->work_list))
1014 return;
1015
1016 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1017
1018 if (atom->state != THREAD_WAIT_CPU)
1019 return;
1020
1021 if (timestamp < atom->wake_up_time) {
1022 atom->state = THREAD_IGNORE;
1023 return;
1024 }
1025
1026 atom->state = THREAD_SCHED_IN;
1027 atom->sched_in_time = timestamp;
1028
1029 delta = atom->sched_in_time - atom->wake_up_time;
1030 atoms->total_lat += delta;
1031 if (delta > atoms->max_lat)
1032 atoms->max_lat = delta;
1033 atoms->nb_atoms++;
1034}
1035
1036static void
1037latency_switch_event(struct trace_switch_event *switch_event,
1038 struct event *event __used,
1039 int cpu,
1040 u64 timestamp,
1041 struct thread *thread __used)
1042{
1043 struct work_atoms *out_events, *in_events;
1044 struct thread *sched_out, *sched_in;
1045 u64 timestamp0;
1046 s64 delta;
1047
1048 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1049
1050 timestamp0 = cpu_last_switched[cpu];
1051 cpu_last_switched[cpu] = timestamp;
1052 if (timestamp0)
1053 delta = timestamp - timestamp0;
1054 else
1055 delta = 0;
1056
1057 if (delta < 0)
1058 die("hm, delta: %Ld < 0 ?\n", delta);
1059
1060
1061 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1062 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1063
1064 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1065 if (!out_events) {
1066 thread_atoms_insert(sched_out);
1067 out_events = thread_atoms_search(&atom_root, sched_out, &cmp_pid);
1068 if (!out_events)
1069 die("out-event: Internal tree error");
1070 }
1071 add_sched_out_event(out_events, sched_out_state(switch_event), timestamp);
1072
1073 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1074 if (!in_events) {
1075 thread_atoms_insert(sched_in);
1076 in_events = thread_atoms_search(&atom_root, sched_in, &cmp_pid);
1077 if (!in_events)
1078 die("in-event: Internal tree error");
1079 /*
1080 * Take came in we have not heard about yet,
1081 * add in an initial atom in runnable state:
1082 */
1083 add_sched_out_event(in_events, 'R', timestamp);
1084 }
1085 add_sched_in_event(in_events, timestamp);
1086}
1087
1088static void
1089latency_runtime_event(struct trace_runtime_event *runtime_event,
1090 struct event *event __used,
1091 int cpu,
1092 u64 timestamp,
1093 struct thread *this_thread __used)
1094{
1095 struct work_atoms *atoms;
1096 struct thread *thread;
1097
1098 BUG_ON(cpu >= MAX_CPUS || cpu < 0);
1099
1100 thread = threads__findnew(runtime_event->pid, &threads, &last_match);
1101 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1102 if (!atoms) {
1103 thread_atoms_insert(thread);
1104 atoms = thread_atoms_search(&atom_root, thread, &cmp_pid);
1105 if (!atoms)
1106 die("in-event: Internal tree error");
1107 add_sched_out_event(atoms, 'R', timestamp);
1108 }
1109
1110 add_runtime_event(atoms, runtime_event->runtime, timestamp);
1111}
1112
1113static void
1114latency_wakeup_event(struct trace_wakeup_event *wakeup_event,
1115 struct event *__event __used,
1116 int cpu __used,
1117 u64 timestamp,
1118 struct thread *thread __used)
1119{
1120 struct work_atoms *atoms;
1121 struct work_atom *atom;
1122 struct thread *wakee;
1123
1124 /* Note for later, it may be interesting to observe the failing cases */
1125 if (!wakeup_event->success)
1126 return;
1127
1128 wakee = threads__findnew(wakeup_event->pid, &threads, &last_match);
1129 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1130 if (!atoms) {
1131 thread_atoms_insert(wakee);
1132 atoms = thread_atoms_search(&atom_root, wakee, &cmp_pid);
1133 if (!atoms)
1134 die("wakeup-event: Internal tree error");
1135 add_sched_out_event(atoms, 'S', timestamp);
1136 }
1137
1138 BUG_ON(list_empty(&atoms->work_list));
1139
1140 atom = list_entry(atoms->work_list.prev, struct work_atom, list);
1141
1142 if (atom->state != THREAD_SLEEPING)
1143 nr_state_machine_bugs++;
1144
1145 nr_timestamps++;
1146 if (atom->sched_out_time > timestamp) {
1147 nr_unordered_timestamps++;
1148 return;
1149 }
1150
1151 atom->state = THREAD_WAIT_CPU;
1152 atom->wake_up_time = timestamp;
1153}
1154
1155static struct trace_sched_handler lat_ops = {
1156 .wakeup_event = latency_wakeup_event,
1157 .switch_event = latency_switch_event,
1158 .runtime_event = latency_runtime_event,
1159 .fork_event = latency_fork_event,
1160};
1161
1162static void output_lat_thread(struct work_atoms *work_list)
1163{
1164 int i;
1165 int ret;
1166 u64 avg;
1167
1168 if (!work_list->nb_atoms)
1169 return;
1170 /*
1171 * Ignore idle threads:
1172 */
1173 if (!strcmp(work_list->thread->comm, "swapper"))
1174 return;
1175
1176 all_runtime += work_list->total_runtime;
1177 all_count += work_list->nb_atoms;
1178
1179 ret = printf(" %s:%d ", work_list->thread->comm, work_list->thread->pid);
1180
1181 for (i = 0; i < 24 - ret; i++)
1182 printf(" ");
1183
1184 avg = work_list->total_lat / work_list->nb_atoms;
1185
1186 printf("|%11.3f ms |%9llu | avg:%9.3f ms | max:%9.3f ms |\n",
1187 (double)work_list->total_runtime / 1e6,
1188 work_list->nb_atoms, (double)avg / 1e6,
1189 (double)work_list->max_lat / 1e6);
1190}
1191
1192static int pid_cmp(struct work_atoms *l, struct work_atoms *r)
1193{
1194 if (l->thread->pid < r->thread->pid)
1195 return -1;
1196 if (l->thread->pid > r->thread->pid)
1197 return 1;
1198
1199 return 0;
1200}
1201
1202static struct sort_dimension pid_sort_dimension = {
1203 .name = "pid",
1204 .cmp = pid_cmp,
1205};
1206
1207static int avg_cmp(struct work_atoms *l, struct work_atoms *r)
1208{
1209 u64 avgl, avgr;
1210
1211 if (!l->nb_atoms)
1212 return -1;
1213
1214 if (!r->nb_atoms)
1215 return 1;
1216
1217 avgl = l->total_lat / l->nb_atoms;
1218 avgr = r->total_lat / r->nb_atoms;
1219
1220 if (avgl < avgr)
1221 return -1;
1222 if (avgl > avgr)
1223 return 1;
1224
1225 return 0;
1226}
1227
1228static struct sort_dimension avg_sort_dimension = {
1229 .name = "avg",
1230 .cmp = avg_cmp,
1231};
1232
1233static int max_cmp(struct work_atoms *l, struct work_atoms *r)
1234{
1235 if (l->max_lat < r->max_lat)
1236 return -1;
1237 if (l->max_lat > r->max_lat)
1238 return 1;
1239
1240 return 0;
1241}
1242
1243static struct sort_dimension max_sort_dimension = {
1244 .name = "max",
1245 .cmp = max_cmp,
1246};
1247
1248static int switch_cmp(struct work_atoms *l, struct work_atoms *r)
1249{
1250 if (l->nb_atoms < r->nb_atoms)
1251 return -1;
1252 if (l->nb_atoms > r->nb_atoms)
1253 return 1;
1254
1255 return 0;
1256}
1257
1258static struct sort_dimension switch_sort_dimension = {
1259 .name = "switch",
1260 .cmp = switch_cmp,
1261};
1262
1263static int runtime_cmp(struct work_atoms *l, struct work_atoms *r)
1264{
1265 if (l->total_runtime < r->total_runtime)
1266 return -1;
1267 if (l->total_runtime > r->total_runtime)
1268 return 1;
1269
1270 return 0;
1271}
1272
1273static struct sort_dimension runtime_sort_dimension = {
1274 .name = "runtime",
1275 .cmp = runtime_cmp,
1276};
1277
1278static struct sort_dimension *available_sorts[] = {
1279 &pid_sort_dimension,
1280 &avg_sort_dimension,
1281 &max_sort_dimension,
1282 &switch_sort_dimension,
1283 &runtime_sort_dimension,
1284};
1285
1286#define NB_AVAILABLE_SORTS (int)(sizeof(available_sorts) / sizeof(struct sort_dimension *))
1287
1288static LIST_HEAD(sort_list);
1289
1290static int sort_dimension__add(char *tok, struct list_head *list)
1291{
1292 int i;
1293
1294 for (i = 0; i < NB_AVAILABLE_SORTS; i++) {
1295 if (!strcmp(available_sorts[i]->name, tok)) {
1296 list_add_tail(&available_sorts[i]->list, list);
1297
1298 return 0;
1299 }
1300 }
1301
1302 return -1;
1303}
1304
1305static void setup_sorting(void);
1306
1307static void sort_lat(void)
1308{
1309 struct rb_node *node;
1310
1311 for (;;) {
1312 struct work_atoms *data;
1313 node = rb_first(&atom_root);
1314 if (!node)
1315 break;
1316
1317 rb_erase(node, &atom_root);
1318 data = rb_entry(node, struct work_atoms, node);
1319 __thread_latency_insert(&sorted_atom_root, data, &sort_list);
1320 }
1321}
1322
1323static struct trace_sched_handler *trace_handler;
1324
1325static void
1326process_sched_wakeup_event(struct raw_event_sample *raw,
1327 struct event *event,
1328 int cpu __used,
1329 u64 timestamp __used,
1330 struct thread *thread __used)
1331{
1332 struct trace_wakeup_event wakeup_event;
1333
1334 FILL_COMMON_FIELDS(wakeup_event, event, raw->data);
1335
1336 FILL_ARRAY(wakeup_event, comm, event, raw->data);
1337 FILL_FIELD(wakeup_event, pid, event, raw->data);
1338 FILL_FIELD(wakeup_event, prio, event, raw->data);
1339 FILL_FIELD(wakeup_event, success, event, raw->data);
1340 FILL_FIELD(wakeup_event, cpu, event, raw->data);
1341
1342 if (trace_handler->wakeup_event)
1343 trace_handler->wakeup_event(&wakeup_event, event, cpu, timestamp, thread);
1344}
1345
1346/*
1347 * Track the current task - that way we can know whether there's any
1348 * weird events, such as a task being switched away that is not current.
1349 */
1350static int max_cpu;
1351
1352static u32 curr_pid[MAX_CPUS] = { [0 ... MAX_CPUS-1] = -1 };
1353
1354static struct thread *curr_thread[MAX_CPUS];
1355
1356static char next_shortname1 = 'A';
1357static char next_shortname2 = '0';
1358
1359static void
1360map_switch_event(struct trace_switch_event *switch_event,
1361 struct event *event __used,
1362 int this_cpu,
1363 u64 timestamp,
1364 struct thread *thread __used)
1365{
1366 struct thread *sched_out, *sched_in;
1367 int new_shortname;
1368 u64 timestamp0;
1369 s64 delta;
1370 int cpu;
1371
1372 BUG_ON(this_cpu >= MAX_CPUS || this_cpu < 0);
1373
1374 if (this_cpu > max_cpu)
1375 max_cpu = this_cpu;
1376
1377 timestamp0 = cpu_last_switched[this_cpu];
1378 cpu_last_switched[this_cpu] = timestamp;
1379 if (timestamp0)
1380 delta = timestamp - timestamp0;
1381 else
1382 delta = 0;
1383
1384 if (delta < 0)
1385 die("hm, delta: %Ld < 0 ?\n", delta);
1386
1387
1388 sched_out = threads__findnew(switch_event->prev_pid, &threads, &last_match);
1389 sched_in = threads__findnew(switch_event->next_pid, &threads, &last_match);
1390
1391 curr_thread[this_cpu] = sched_in;
1392
1393 printf(" ");
1394
1395 new_shortname = 0;
1396 if (!sched_in->shortname[0]) {
1397 sched_in->shortname[0] = next_shortname1;
1398 sched_in->shortname[1] = next_shortname2;
1399
1400 if (next_shortname1 < 'Z') {
1401 next_shortname1++;
1402 } else {
1403 next_shortname1='A';
1404 if (next_shortname2 < '9') {
1405 next_shortname2++;
1406 } else {
1407 next_shortname2='0';
1408 }
1409 }
1410 new_shortname = 1;
1411 }
1412
1413 for (cpu = 0; cpu <= max_cpu; cpu++) {
1414 if (cpu != this_cpu)
1415 printf(" ");
1416 else
1417 printf("*");
1418
1419 if (curr_thread[cpu]) {
1420 if (curr_thread[cpu]->pid)
1421 printf("%2s ", curr_thread[cpu]->shortname);
1422 else
1423 printf(". ");
1424 } else
1425 printf(" ");
1426 }
1427
1428 printf(" %12.6f secs ", (double)timestamp/1e9);
1429 if (new_shortname) {
1430 printf("%s => %s:%d\n",
1431 sched_in->shortname, sched_in->comm, sched_in->pid);
1432 } else {
1433 printf("\n");
1434 }
1435}
1436
1437
1438static void
1439process_sched_switch_event(struct raw_event_sample *raw,
1440 struct event *event,
1441 int this_cpu,
1442 u64 timestamp __used,
1443 struct thread *thread __used)
1444{
1445 struct trace_switch_event switch_event;
1446
1447 FILL_COMMON_FIELDS(switch_event, event, raw->data);
1448
1449 FILL_ARRAY(switch_event, prev_comm, event, raw->data);
1450 FILL_FIELD(switch_event, prev_pid, event, raw->data);
1451 FILL_FIELD(switch_event, prev_prio, event, raw->data);
1452 FILL_FIELD(switch_event, prev_state, event, raw->data);
1453 FILL_ARRAY(switch_event, next_comm, event, raw->data);
1454 FILL_FIELD(switch_event, next_pid, event, raw->data);
1455 FILL_FIELD(switch_event, next_prio, event, raw->data);
1456
1457 if (curr_pid[this_cpu] != (u32)-1) {
1458 /*
1459 * Are we trying to switch away a PID that is
1460 * not current?
1461 */
1462 if (curr_pid[this_cpu] != switch_event.prev_pid)
1463 nr_context_switch_bugs++;
1464 }
1465 if (trace_handler->switch_event)
1466 trace_handler->switch_event(&switch_event, event, this_cpu, timestamp, thread);
1467
1468 curr_pid[this_cpu] = switch_event.next_pid;
1469}
1470
1471static void
1472process_sched_runtime_event(struct raw_event_sample *raw,
1473 struct event *event,
1474 int cpu __used,
1475 u64 timestamp __used,
1476 struct thread *thread __used)
1477{
1478 struct trace_runtime_event runtime_event;
1479
1480 FILL_ARRAY(runtime_event, comm, event, raw->data);
1481 FILL_FIELD(runtime_event, pid, event, raw->data);
1482 FILL_FIELD(runtime_event, runtime, event, raw->data);
1483 FILL_FIELD(runtime_event, vruntime, event, raw->data);
1484
1485 if (trace_handler->runtime_event)
1486 trace_handler->runtime_event(&runtime_event, event, cpu, timestamp, thread);
1487}
1488
1489static void
1490process_sched_fork_event(struct raw_event_sample *raw,
1491 struct event *event,
1492 int cpu __used,
1493 u64 timestamp __used,
1494 struct thread *thread __used)
1495{
1496 struct trace_fork_event fork_event;
1497
1498 FILL_COMMON_FIELDS(fork_event, event, raw->data);
1499
1500 FILL_ARRAY(fork_event, parent_comm, event, raw->data);
1501 FILL_FIELD(fork_event, parent_pid, event, raw->data);
1502 FILL_ARRAY(fork_event, child_comm, event, raw->data);
1503 FILL_FIELD(fork_event, child_pid, event, raw->data);
1504
1505 if (trace_handler->fork_event)
1506 trace_handler->fork_event(&fork_event, event, cpu, timestamp, thread);
1507}
1508
1509static void
1510process_sched_exit_event(struct event *event,
1511 int cpu __used,
1512 u64 timestamp __used,
1513 struct thread *thread __used)
1514{
1515 if (verbose)
1516 printf("sched_exit event %p\n", event);
1517}
1518
1519static void
1520process_raw_event(event_t *raw_event __used, void *more_data,
1521 int cpu, u64 timestamp, struct thread *thread)
1522{
1523 struct raw_event_sample *raw = more_data;
1524 struct event *event;
1525 int type;
1526
1527 type = trace_parse_common_type(raw->data);
1528 event = trace_find_event(type);
1529
1530 if (!strcmp(event->name, "sched_switch"))
1531 process_sched_switch_event(raw, event, cpu, timestamp, thread);
1532 if (!strcmp(event->name, "sched_stat_runtime"))
1533 process_sched_runtime_event(raw, event, cpu, timestamp, thread);
1534 if (!strcmp(event->name, "sched_wakeup"))
1535 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1536 if (!strcmp(event->name, "sched_wakeup_new"))
1537 process_sched_wakeup_event(raw, event, cpu, timestamp, thread);
1538 if (!strcmp(event->name, "sched_process_fork"))
1539 process_sched_fork_event(raw, event, cpu, timestamp, thread);
1540 if (!strcmp(event->name, "sched_process_exit"))
1541 process_sched_exit_event(event, cpu, timestamp, thread);
1542}
1543
1544static int
1545process_sample_event(event_t *event, unsigned long offset, unsigned long head)
1546{
1547 char level;
1548 int show = 0;
1549 struct dso *dso = NULL;
1550 struct thread *thread;
1551 u64 ip = event->ip.ip;
1552 u64 timestamp = -1;
1553 u32 cpu = -1;
1554 u64 period = 1;
1555 void *more_data = event->ip.__more_data;
1556 int cpumode;
1557
1558 thread = threads__findnew(event->ip.pid, &threads, &last_match);
1559
1560 if (sample_type & PERF_SAMPLE_TIME) {
1561 timestamp = *(u64 *)more_data;
1562 more_data += sizeof(u64);
1563 }
1564
1565 if (sample_type & PERF_SAMPLE_CPU) {
1566 cpu = *(u32 *)more_data;
1567 more_data += sizeof(u32);
1568 more_data += sizeof(u32); /* reserved */
1569 }
1570
1571 if (sample_type & PERF_SAMPLE_PERIOD) {
1572 period = *(u64 *)more_data;
1573 more_data += sizeof(u64);
1574 }
1575
1576 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
1577 (void *)(offset + head),
1578 (void *)(long)(event->header.size),
1579 event->header.misc,
1580 event->ip.pid, event->ip.tid,
1581 (void *)(long)ip,
1582 (long long)period);
1583
1584 dump_printf(" ... thread: %s:%d\n", thread->comm, thread->pid);
1585
1586 if (thread == NULL) {
1587 eprintf("problem processing %d event, skipping it.\n",
1588 event->header.type);
1589 return -1;
1590 }
1591
1592 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
1593
1594 if (cpumode == PERF_RECORD_MISC_KERNEL) {
1595 show = SHOW_KERNEL;
1596 level = 'k';
1597
1598 dso = kernel_dso;
1599
1600 dump_printf(" ...... dso: %s\n", dso->name);
1601
1602 } else if (cpumode == PERF_RECORD_MISC_USER) {
1603
1604 show = SHOW_USER;
1605 level = '.';
1606
1607 } else {
1608 show = SHOW_HV;
1609 level = 'H';
1610
1611 dso = hypervisor_dso;
1612
1613 dump_printf(" ...... dso: [hypervisor]\n");
1614 }
1615
1616 if (sample_type & PERF_SAMPLE_RAW)
1617 process_raw_event(event, more_data, cpu, timestamp, thread);
1618
1619 return 0;
1620}
1621
1622static int
1623process_event(event_t *event, unsigned long offset, unsigned long head)
1624{
1625 trace_event(event);
1626
1627 nr_events++;
1628 switch (event->header.type) {
1629 case PERF_RECORD_MMAP:
1630 return 0;
1631 case PERF_RECORD_LOST:
1632 nr_lost_chunks++;
1633 nr_lost_events += event->lost.lost;
1634 return 0;
1635
1636 case PERF_RECORD_COMM:
1637 return process_comm_event(event, offset, head);
1638
1639 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
1640 return 0;
1641
1642 case PERF_RECORD_SAMPLE:
1643 return process_sample_event(event, offset, head);
1644
1645 case PERF_RECORD_MAX:
1646 default:
1647 return -1;
1648 }
1649
1650 return 0;
1651}
1652
1653static int read_events(void)
1654{
1655 int ret, rc = EXIT_FAILURE;
1656 unsigned long offset = 0;
1657 unsigned long head = 0;
1658 struct stat perf_stat;
1659 event_t *event;
1660 uint32_t size;
1661 char *buf;
1662
1663 trace_report();
1664 register_idle_thread(&threads, &last_match);
1665
1666 input = open(input_name, O_RDONLY);
1667 if (input < 0) {
1668 perror("failed to open file");
1669 exit(-1);
1670 }
1671
1672 ret = fstat(input, &perf_stat);
1673 if (ret < 0) {
1674 perror("failed to stat file");
1675 exit(-1);
1676 }
1677
1678 if (!perf_stat.st_size) {
1679 fprintf(stderr, "zero-sized file, nothing to do!\n");
1680 exit(0);
1681 }
1682 header = perf_header__read(input);
1683 head = header->data_offset;
1684 sample_type = perf_header__sample_type(header);
1685
1686 if (!(sample_type & PERF_SAMPLE_RAW))
1687 die("No trace sample to read. Did you call perf record "
1688 "without -R?");
1689
1690 if (load_kernel() < 0) {
1691 perror("failed to load kernel symbols");
1692 return EXIT_FAILURE;
1693 }
1694
1695remap:
1696 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1697 MAP_SHARED, input, offset);
1698 if (buf == MAP_FAILED) {
1699 perror("failed to mmap file");
1700 exit(-1);
1701 }
1702
1703more:
1704 event = (event_t *)(buf + head);
1705
1706 size = event->header.size;
1707 if (!size)
1708 size = 8;
1709
1710 if (head + event->header.size >= page_size * mmap_window) {
1711 unsigned long shift = page_size * (head / page_size);
1712 int res;
1713
1714 res = munmap(buf, page_size * mmap_window);
1715 assert(res == 0);
1716
1717 offset += shift;
1718 head -= shift;
1719 goto remap;
1720 }
1721
1722 size = event->header.size;
1723
1724
1725 if (!size || process_event(event, offset, head) < 0) {
1726
1727 /*
1728 * assume we lost track of the stream, check alignment, and
1729 * increment a single u64 in the hope to catch on again 'soon'.
1730 */
1731
1732 if (unlikely(head & 7))
1733 head &= ~7ULL;
1734
1735 size = 8;
1736 }
1737
1738 head += size;
1739
1740 if (offset + head < (unsigned long)perf_stat.st_size)
1741 goto more;
1742
1743 rc = EXIT_SUCCESS;
1744 close(input);
1745
1746 return rc;
1747}
1748
1749static void print_bad_events(void)
1750{
1751 if (nr_unordered_timestamps && nr_timestamps) {
1752 printf(" INFO: %.3f%% unordered timestamps (%ld out of %ld)\n",
1753 (double)nr_unordered_timestamps/(double)nr_timestamps*100.0,
1754 nr_unordered_timestamps, nr_timestamps);
1755 }
1756 if (nr_lost_events && nr_events) {
1757 printf(" INFO: %.3f%% lost events (%ld out of %ld, in %ld chunks)\n",
1758 (double)nr_lost_events/(double)nr_events*100.0,
1759 nr_lost_events, nr_events, nr_lost_chunks);
1760 }
1761 if (nr_state_machine_bugs && nr_timestamps) {
1762 printf(" INFO: %.3f%% state machine bugs (%ld out of %ld)",
1763 (double)nr_state_machine_bugs/(double)nr_timestamps*100.0,
1764 nr_state_machine_bugs, nr_timestamps);
1765 if (nr_lost_events)
1766 printf(" (due to lost events?)");
1767 printf("\n");
1768 }
1769 if (nr_context_switch_bugs && nr_timestamps) {
1770 printf(" INFO: %.3f%% context switch bugs (%ld out of %ld)",
1771 (double)nr_context_switch_bugs/(double)nr_timestamps*100.0,
1772 nr_context_switch_bugs, nr_timestamps);
1773 if (nr_lost_events)
1774 printf(" (due to lost events?)");
1775 printf("\n");
1776 }
1777}
1778
1779static void __cmd_lat(void)
1780{
1781 struct rb_node *next;
1782
1783 setup_pager();
1784 read_events();
1785 sort_lat();
1786
1787 printf("\n -----------------------------------------------------------------------------------------\n");
1788 printf(" Task | Runtime ms | Switches | Average delay ms | Maximum delay ms |\n");
1789 printf(" -----------------------------------------------------------------------------------------\n");
1790
1791 next = rb_first(&sorted_atom_root);
1792
1793 while (next) {
1794 struct work_atoms *work_list;
1795
1796 work_list = rb_entry(next, struct work_atoms, node);
1797 output_lat_thread(work_list);
1798 next = rb_next(next);
1799 }
1800
1801 printf(" -----------------------------------------------------------------------------------------\n");
1802 printf(" TOTAL: |%11.3f ms |%9Ld |\n",
1803 (double)all_runtime/1e6, all_count);
1804
1805 printf(" ---------------------------------------------------\n");
1806
1807 print_bad_events();
1808 printf("\n");
1809
1810}
1811
1812static struct trace_sched_handler map_ops = {
1813 .wakeup_event = NULL,
1814 .switch_event = map_switch_event,
1815 .runtime_event = NULL,
1816 .fork_event = NULL,
1817};
1818
1819static void __cmd_map(void)
1820{
1821 max_cpu = sysconf(_SC_NPROCESSORS_CONF);
1822
1823 setup_pager();
1824 read_events();
1825 print_bad_events();
1826}
1827
1828static void __cmd_replay(void)
1829{
1830 unsigned long i;
1831
1832 calibrate_run_measurement_overhead();
1833 calibrate_sleep_measurement_overhead();
1834
1835 test_calibrations();
1836
1837 read_events();
1838
1839 printf("nr_run_events: %ld\n", nr_run_events);
1840 printf("nr_sleep_events: %ld\n", nr_sleep_events);
1841 printf("nr_wakeup_events: %ld\n", nr_wakeup_events);
1842
1843 if (targetless_wakeups)
1844 printf("target-less wakeups: %ld\n", targetless_wakeups);
1845 if (multitarget_wakeups)
1846 printf("multi-target wakeups: %ld\n", multitarget_wakeups);
1847 if (nr_run_events_optimized)
1848 printf("run atoms optimized: %ld\n",
1849 nr_run_events_optimized);
1850
1851 print_task_traces();
1852 add_cross_task_wakeups();
1853
1854 create_tasks();
1855 printf("------------------------------------------------------------\n");
1856 for (i = 0; i < replay_repeat; i++)
1857 run_one_test();
1858}
1859
1860
1861static const char * const sched_usage[] = {
1862 "perf sched [<options>] {record|latency|map|replay|trace}",
1863 NULL
1864};
1865
1866static const struct option sched_options[] = {
1867 OPT_STRING('i', "input", &input_name, "file",
1868 "input file name"),
1869 OPT_BOOLEAN('v', "verbose", &verbose,
1870 "be more verbose (show symbol address, etc)"),
1871 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1872 "dump raw trace in ASCII"),
1873 OPT_END()
1874};
1875
1876static const char * const latency_usage[] = {
1877 "perf sched latency [<options>]",
1878 NULL
1879};
1880
1881static const struct option latency_options[] = {
1882 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1883 "sort by key(s): runtime, switch, avg, max"),
1884 OPT_BOOLEAN('v', "verbose", &verbose,
1885 "be more verbose (show symbol address, etc)"),
1886 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1887 "dump raw trace in ASCII"),
1888 OPT_END()
1889};
1890
1891static const char * const replay_usage[] = {
1892 "perf sched replay [<options>]",
1893 NULL
1894};
1895
1896static const struct option replay_options[] = {
1897 OPT_INTEGER('r', "repeat", &replay_repeat,
1898 "repeat the workload replay N times (-1: infinite)"),
1899 OPT_BOOLEAN('v', "verbose", &verbose,
1900 "be more verbose (show symbol address, etc)"),
1901 OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
1902 "dump raw trace in ASCII"),
1903 OPT_END()
1904};
1905
1906static void setup_sorting(void)
1907{
1908 char *tmp, *tok, *str = strdup(sort_order);
1909
1910 for (tok = strtok_r(str, ", ", &tmp);
1911 tok; tok = strtok_r(NULL, ", ", &tmp)) {
1912 if (sort_dimension__add(tok, &sort_list) < 0) {
1913 error("Unknown --sort key: `%s'", tok);
1914 usage_with_options(latency_usage, latency_options);
1915 }
1916 }
1917
1918 free(str);
1919
1920 sort_dimension__add((char *)"pid", &cmp_pid);
1921}
1922
1923static const char *record_args[] = {
1924 "record",
1925 "-a",
1926 "-R",
1927 "-M",
1928 "-f",
1929 "-m", "1024",
1930 "-c", "1",
1931 "-e", "sched:sched_switch:r",
1932 "-e", "sched:sched_stat_wait:r",
1933 "-e", "sched:sched_stat_sleep:r",
1934 "-e", "sched:sched_stat_iowait:r",
1935 "-e", "sched:sched_stat_runtime:r",
1936 "-e", "sched:sched_process_exit:r",
1937 "-e", "sched:sched_process_fork:r",
1938 "-e", "sched:sched_wakeup:r",
1939 "-e", "sched:sched_migrate_task:r",
1940};
1941
1942static int __cmd_record(int argc, const char **argv)
1943{
1944 unsigned int rec_argc, i, j;
1945 const char **rec_argv;
1946
1947 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1948 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1949
1950 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1951 rec_argv[i] = strdup(record_args[i]);
1952
1953 for (j = 1; j < (unsigned int)argc; j++, i++)
1954 rec_argv[i] = argv[j];
1955
1956 BUG_ON(i != rec_argc);
1957
1958 return cmd_record(i, rec_argv, NULL);
1959}
1960
1961int cmd_sched(int argc, const char **argv, const char *prefix __used)
1962{
1963 symbol__init();
1964 page_size = getpagesize();
1965
1966 argc = parse_options(argc, argv, sched_options, sched_usage,
1967 PARSE_OPT_STOP_AT_NON_OPTION);
1968 if (!argc)
1969 usage_with_options(sched_usage, sched_options);
1970
1971 if (!strncmp(argv[0], "rec", 3)) {
1972 return __cmd_record(argc, argv);
1973 } else if (!strncmp(argv[0], "lat", 3)) {
1974 trace_handler = &lat_ops;
1975 if (argc > 1) {
1976 argc = parse_options(argc, argv, latency_options, latency_usage, 0);
1977 if (argc)
1978 usage_with_options(latency_usage, latency_options);
1979 }
1980 setup_sorting();
1981 __cmd_lat();
1982 } else if (!strcmp(argv[0], "map")) {
1983 trace_handler = &map_ops;
1984 setup_sorting();
1985 __cmd_map();
1986 } else if (!strncmp(argv[0], "rep", 3)) {
1987 trace_handler = &replay_ops;
1988 if (argc) {
1989 argc = parse_options(argc, argv, replay_options, replay_usage, 0);
1990 if (argc)
1991 usage_with_options(replay_usage, replay_options);
1992 }
1993 __cmd_replay();
1994 } else if (!strcmp(argv[0], "trace")) {
1995 /*
1996 * Aliased to 'perf trace' for now:
1997 */
1998 return cmd_trace(argc, argv, prefix);
1999 } else {
2000 usage_with_options(sched_usage, sched_options);
2001 }
2002
2003 return 0;
2004}
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 61b828236c11..16af2d82e858 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -48,7 +48,7 @@
48#include <sys/prctl.h> 48#include <sys/prctl.h>
49#include <math.h> 49#include <math.h>
50 50
51static struct perf_counter_attr default_attrs[] = { 51static struct perf_event_attr default_attrs[] = {
52 52
53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK }, 53 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES}, 54 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES},
@@ -130,11 +130,11 @@ struct stats runtime_cycles_stats;
130 attrs[counter].config == PERF_COUNT_##c) 130 attrs[counter].config == PERF_COUNT_##c)
131 131
132#define ERR_PERF_OPEN \ 132#define ERR_PERF_OPEN \
133"Error: counter %d, sys_perf_counter_open() syscall returned with %d (%s)\n" 133"Error: counter %d, sys_perf_event_open() syscall returned with %d (%s)\n"
134 134
135static void create_perf_stat_counter(int counter, int pid) 135static void create_perf_stat_counter(int counter, int pid)
136{ 136{
137 struct perf_counter_attr *attr = attrs + counter; 137 struct perf_event_attr *attr = attrs + counter;
138 138
139 if (scale) 139 if (scale)
140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 140 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
@@ -144,7 +144,7 @@ static void create_perf_stat_counter(int counter, int pid)
144 unsigned int cpu; 144 unsigned int cpu;
145 145
146 for (cpu = 0; cpu < nr_cpus; cpu++) { 146 for (cpu = 0; cpu < nr_cpus; cpu++) {
147 fd[cpu][counter] = sys_perf_counter_open(attr, -1, cpu, -1, 0); 147 fd[cpu][counter] = sys_perf_event_open(attr, -1, cpu, -1, 0);
148 if (fd[cpu][counter] < 0 && verbose) 148 if (fd[cpu][counter] < 0 && verbose)
149 fprintf(stderr, ERR_PERF_OPEN, counter, 149 fprintf(stderr, ERR_PERF_OPEN, counter,
150 fd[cpu][counter], strerror(errno)); 150 fd[cpu][counter], strerror(errno));
@@ -154,7 +154,7 @@ static void create_perf_stat_counter(int counter, int pid)
154 attr->disabled = 1; 154 attr->disabled = 1;
155 attr->enable_on_exec = 1; 155 attr->enable_on_exec = 1;
156 156
157 fd[0][counter] = sys_perf_counter_open(attr, pid, -1, -1, 0); 157 fd[0][counter] = sys_perf_event_open(attr, pid, -1, -1, 0);
158 if (fd[0][counter] < 0 && verbose) 158 if (fd[0][counter] < 0 && verbose)
159 fprintf(stderr, ERR_PERF_OPEN, counter, 159 fprintf(stderr, ERR_PERF_OPEN, counter,
160 fd[0][counter], strerror(errno)); 160 fd[0][counter], strerror(errno));
diff --git a/tools/perf/builtin-timechart.c b/tools/perf/builtin-timechart.c
new file mode 100644
index 000000000000..4405681b3134
--- /dev/null
+++ b/tools/perf/builtin-timechart.c
@@ -0,0 +1,1158 @@
1/*
2 * builtin-timechart.c - make an svg timechart of system activity
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * Authors:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
15#include "builtin.h"
16
17#include "util/util.h"
18
19#include "util/color.h"
20#include <linux/list.h>
21#include "util/cache.h"
22#include <linux/rbtree.h>
23#include "util/symbol.h"
24#include "util/string.h"
25#include "util/callchain.h"
26#include "util/strlist.h"
27
28#include "perf.h"
29#include "util/header.h"
30#include "util/parse-options.h"
31#include "util/parse-events.h"
32#include "util/svghelper.h"
33
34static char const *input_name = "perf.data";
35static char const *output_name = "output.svg";
36
37
38static unsigned long page_size;
39static unsigned long mmap_window = 32;
40static u64 sample_type;
41
42static unsigned int numcpus;
43static u64 min_freq; /* Lowest CPU frequency seen */
44static u64 max_freq; /* Highest CPU frequency seen */
45static u64 turbo_frequency;
46
47static u64 first_time, last_time;
48
49
50static struct perf_header *header;
51
52struct per_pid;
53struct per_pidcomm;
54
55struct cpu_sample;
56struct power_event;
57struct wake_event;
58
59struct sample_wrapper;
60
61/*
62 * Datastructure layout:
63 * We keep an list of "pid"s, matching the kernels notion of a task struct.
64 * Each "pid" entry, has a list of "comm"s.
65 * this is because we want to track different programs different, while
66 * exec will reuse the original pid (by design).
67 * Each comm has a list of samples that will be used to draw
68 * final graph.
69 */
70
71struct per_pid {
72 struct per_pid *next;
73
74 int pid;
75 int ppid;
76
77 u64 start_time;
78 u64 end_time;
79 u64 total_time;
80 int display;
81
82 struct per_pidcomm *all;
83 struct per_pidcomm *current;
84
85 int painted;
86};
87
88
89struct per_pidcomm {
90 struct per_pidcomm *next;
91
92 u64 start_time;
93 u64 end_time;
94 u64 total_time;
95
96 int Y;
97 int display;
98
99 long state;
100 u64 state_since;
101
102 char *comm;
103
104 struct cpu_sample *samples;
105};
106
107struct sample_wrapper {
108 struct sample_wrapper *next;
109
110 u64 timestamp;
111 unsigned char data[0];
112};
113
114#define TYPE_NONE 0
115#define TYPE_RUNNING 1
116#define TYPE_WAITING 2
117#define TYPE_BLOCKED 3
118
119struct cpu_sample {
120 struct cpu_sample *next;
121
122 u64 start_time;
123 u64 end_time;
124 int type;
125 int cpu;
126};
127
128static struct per_pid *all_data;
129
130#define CSTATE 1
131#define PSTATE 2
132
133struct power_event {
134 struct power_event *next;
135 int type;
136 int state;
137 u64 start_time;
138 u64 end_time;
139 int cpu;
140};
141
142struct wake_event {
143 struct wake_event *next;
144 int waker;
145 int wakee;
146 u64 time;
147};
148
149static struct power_event *power_events;
150static struct wake_event *wake_events;
151
152struct sample_wrapper *all_samples;
153
154static struct per_pid *find_create_pid(int pid)
155{
156 struct per_pid *cursor = all_data;
157
158 while (cursor) {
159 if (cursor->pid == pid)
160 return cursor;
161 cursor = cursor->next;
162 }
163 cursor = malloc(sizeof(struct per_pid));
164 assert(cursor != NULL);
165 memset(cursor, 0, sizeof(struct per_pid));
166 cursor->pid = pid;
167 cursor->next = all_data;
168 all_data = cursor;
169 return cursor;
170}
171
172static void pid_set_comm(int pid, char *comm)
173{
174 struct per_pid *p;
175 struct per_pidcomm *c;
176 p = find_create_pid(pid);
177 c = p->all;
178 while (c) {
179 if (c->comm && strcmp(c->comm, comm) == 0) {
180 p->current = c;
181 return;
182 }
183 if (!c->comm) {
184 c->comm = strdup(comm);
185 p->current = c;
186 return;
187 }
188 c = c->next;
189 }
190 c = malloc(sizeof(struct per_pidcomm));
191 assert(c != NULL);
192 memset(c, 0, sizeof(struct per_pidcomm));
193 c->comm = strdup(comm);
194 p->current = c;
195 c->next = p->all;
196 p->all = c;
197}
198
199static void pid_fork(int pid, int ppid, u64 timestamp)
200{
201 struct per_pid *p, *pp;
202 p = find_create_pid(pid);
203 pp = find_create_pid(ppid);
204 p->ppid = ppid;
205 if (pp->current && pp->current->comm && !p->current)
206 pid_set_comm(pid, pp->current->comm);
207
208 p->start_time = timestamp;
209 if (p->current) {
210 p->current->start_time = timestamp;
211 p->current->state_since = timestamp;
212 }
213}
214
215static void pid_exit(int pid, u64 timestamp)
216{
217 struct per_pid *p;
218 p = find_create_pid(pid);
219 p->end_time = timestamp;
220 if (p->current)
221 p->current->end_time = timestamp;
222}
223
224static void
225pid_put_sample(int pid, int type, unsigned int cpu, u64 start, u64 end)
226{
227 struct per_pid *p;
228 struct per_pidcomm *c;
229 struct cpu_sample *sample;
230
231 p = find_create_pid(pid);
232 c = p->current;
233 if (!c) {
234 c = malloc(sizeof(struct per_pidcomm));
235 assert(c != NULL);
236 memset(c, 0, sizeof(struct per_pidcomm));
237 p->current = c;
238 c->next = p->all;
239 p->all = c;
240 }
241
242 sample = malloc(sizeof(struct cpu_sample));
243 assert(sample != NULL);
244 memset(sample, 0, sizeof(struct cpu_sample));
245 sample->start_time = start;
246 sample->end_time = end;
247 sample->type = type;
248 sample->next = c->samples;
249 sample->cpu = cpu;
250 c->samples = sample;
251
252 if (sample->type == TYPE_RUNNING && end > start && start > 0) {
253 c->total_time += (end-start);
254 p->total_time += (end-start);
255 }
256
257 if (c->start_time == 0 || c->start_time > start)
258 c->start_time = start;
259 if (p->start_time == 0 || p->start_time > start)
260 p->start_time = start;
261
262 if (cpu > numcpus)
263 numcpus = cpu;
264}
265
266#define MAX_CPUS 4096
267
268static u64 cpus_cstate_start_times[MAX_CPUS];
269static int cpus_cstate_state[MAX_CPUS];
270static u64 cpus_pstate_start_times[MAX_CPUS];
271static u64 cpus_pstate_state[MAX_CPUS];
272
273static int
274process_comm_event(event_t *event)
275{
276 pid_set_comm(event->comm.pid, event->comm.comm);
277 return 0;
278}
279static int
280process_fork_event(event_t *event)
281{
282 pid_fork(event->fork.pid, event->fork.ppid, event->fork.time);
283 return 0;
284}
285
286static int
287process_exit_event(event_t *event)
288{
289 pid_exit(event->fork.pid, event->fork.time);
290 return 0;
291}
292
293struct trace_entry {
294 u32 size;
295 unsigned short type;
296 unsigned char flags;
297 unsigned char preempt_count;
298 int pid;
299 int tgid;
300};
301
302struct power_entry {
303 struct trace_entry te;
304 s64 type;
305 s64 value;
306};
307
308#define TASK_COMM_LEN 16
309struct wakeup_entry {
310 struct trace_entry te;
311 char comm[TASK_COMM_LEN];
312 int pid;
313 int prio;
314 int success;
315};
316
317/*
318 * trace_flag_type is an enumeration that holds different
319 * states when a trace occurs. These are:
320 * IRQS_OFF - interrupts were disabled
321 * IRQS_NOSUPPORT - arch does not support irqs_disabled_flags
322 * NEED_RESCED - reschedule is requested
323 * HARDIRQ - inside an interrupt handler
324 * SOFTIRQ - inside a softirq handler
325 */
326enum trace_flag_type {
327 TRACE_FLAG_IRQS_OFF = 0x01,
328 TRACE_FLAG_IRQS_NOSUPPORT = 0x02,
329 TRACE_FLAG_NEED_RESCHED = 0x04,
330 TRACE_FLAG_HARDIRQ = 0x08,
331 TRACE_FLAG_SOFTIRQ = 0x10,
332};
333
334
335
336struct sched_switch {
337 struct trace_entry te;
338 char prev_comm[TASK_COMM_LEN];
339 int prev_pid;
340 int prev_prio;
341 long prev_state; /* Arjan weeps. */
342 char next_comm[TASK_COMM_LEN];
343 int next_pid;
344 int next_prio;
345};
346
347static void c_state_start(int cpu, u64 timestamp, int state)
348{
349 cpus_cstate_start_times[cpu] = timestamp;
350 cpus_cstate_state[cpu] = state;
351}
352
353static void c_state_end(int cpu, u64 timestamp)
354{
355 struct power_event *pwr;
356 pwr = malloc(sizeof(struct power_event));
357 if (!pwr)
358 return;
359 memset(pwr, 0, sizeof(struct power_event));
360
361 pwr->state = cpus_cstate_state[cpu];
362 pwr->start_time = cpus_cstate_start_times[cpu];
363 pwr->end_time = timestamp;
364 pwr->cpu = cpu;
365 pwr->type = CSTATE;
366 pwr->next = power_events;
367
368 power_events = pwr;
369}
370
371static void p_state_change(int cpu, u64 timestamp, u64 new_freq)
372{
373 struct power_event *pwr;
374 pwr = malloc(sizeof(struct power_event));
375
376 if (new_freq > 8000000) /* detect invalid data */
377 return;
378
379 if (!pwr)
380 return;
381 memset(pwr, 0, sizeof(struct power_event));
382
383 pwr->state = cpus_pstate_state[cpu];
384 pwr->start_time = cpus_pstate_start_times[cpu];
385 pwr->end_time = timestamp;
386 pwr->cpu = cpu;
387 pwr->type = PSTATE;
388 pwr->next = power_events;
389
390 if (!pwr->start_time)
391 pwr->start_time = first_time;
392
393 power_events = pwr;
394
395 cpus_pstate_state[cpu] = new_freq;
396 cpus_pstate_start_times[cpu] = timestamp;
397
398 if ((u64)new_freq > max_freq)
399 max_freq = new_freq;
400
401 if (new_freq < min_freq || min_freq == 0)
402 min_freq = new_freq;
403
404 if (new_freq == max_freq - 1000)
405 turbo_frequency = max_freq;
406}
407
408static void
409sched_wakeup(int cpu, u64 timestamp, int pid, struct trace_entry *te)
410{
411 struct wake_event *we;
412 struct per_pid *p;
413 struct wakeup_entry *wake = (void *)te;
414
415 we = malloc(sizeof(struct wake_event));
416 if (!we)
417 return;
418
419 memset(we, 0, sizeof(struct wake_event));
420 we->time = timestamp;
421 we->waker = pid;
422
423 if ((te->flags & TRACE_FLAG_HARDIRQ) || (te->flags & TRACE_FLAG_SOFTIRQ))
424 we->waker = -1;
425
426 we->wakee = wake->pid;
427 we->next = wake_events;
428 wake_events = we;
429 p = find_create_pid(we->wakee);
430
431 if (p && p->current && p->current->state == TYPE_NONE) {
432 p->current->state_since = timestamp;
433 p->current->state = TYPE_WAITING;
434 }
435 if (p && p->current && p->current->state == TYPE_BLOCKED) {
436 pid_put_sample(p->pid, p->current->state, cpu, p->current->state_since, timestamp);
437 p->current->state_since = timestamp;
438 p->current->state = TYPE_WAITING;
439 }
440}
441
442static void sched_switch(int cpu, u64 timestamp, struct trace_entry *te)
443{
444 struct per_pid *p = NULL, *prev_p;
445 struct sched_switch *sw = (void *)te;
446
447
448 prev_p = find_create_pid(sw->prev_pid);
449
450 p = find_create_pid(sw->next_pid);
451
452 if (prev_p->current && prev_p->current->state != TYPE_NONE)
453 pid_put_sample(sw->prev_pid, TYPE_RUNNING, cpu, prev_p->current->state_since, timestamp);
454 if (p && p->current) {
455 if (p->current->state != TYPE_NONE)
456 pid_put_sample(sw->next_pid, p->current->state, cpu, p->current->state_since, timestamp);
457
458 p->current->state_since = timestamp;
459 p->current->state = TYPE_RUNNING;
460 }
461
462 if (prev_p->current) {
463 prev_p->current->state = TYPE_NONE;
464 prev_p->current->state_since = timestamp;
465 if (sw->prev_state & 2)
466 prev_p->current->state = TYPE_BLOCKED;
467 if (sw->prev_state == 0)
468 prev_p->current->state = TYPE_WAITING;
469 }
470}
471
472
473static int
474process_sample_event(event_t *event)
475{
476 int cursor = 0;
477 u64 addr = 0;
478 u64 stamp = 0;
479 u32 cpu = 0;
480 u32 pid = 0;
481 struct trace_entry *te;
482
483 if (sample_type & PERF_SAMPLE_IP)
484 cursor++;
485
486 if (sample_type & PERF_SAMPLE_TID) {
487 pid = event->sample.array[cursor]>>32;
488 cursor++;
489 }
490 if (sample_type & PERF_SAMPLE_TIME) {
491 stamp = event->sample.array[cursor++];
492
493 if (!first_time || first_time > stamp)
494 first_time = stamp;
495 if (last_time < stamp)
496 last_time = stamp;
497
498 }
499 if (sample_type & PERF_SAMPLE_ADDR)
500 addr = event->sample.array[cursor++];
501 if (sample_type & PERF_SAMPLE_ID)
502 cursor++;
503 if (sample_type & PERF_SAMPLE_STREAM_ID)
504 cursor++;
505 if (sample_type & PERF_SAMPLE_CPU)
506 cpu = event->sample.array[cursor++] & 0xFFFFFFFF;
507 if (sample_type & PERF_SAMPLE_PERIOD)
508 cursor++;
509
510 te = (void *)&event->sample.array[cursor];
511
512 if (sample_type & PERF_SAMPLE_RAW && te->size > 0) {
513 char *event_str;
514 struct power_entry *pe;
515
516 pe = (void *)te;
517
518 event_str = perf_header__find_event(te->type);
519
520 if (!event_str)
521 return 0;
522
523 if (strcmp(event_str, "power:power_start") == 0)
524 c_state_start(cpu, stamp, pe->value);
525
526 if (strcmp(event_str, "power:power_end") == 0)
527 c_state_end(cpu, stamp);
528
529 if (strcmp(event_str, "power:power_frequency") == 0)
530 p_state_change(cpu, stamp, pe->value);
531
532 if (strcmp(event_str, "sched:sched_wakeup") == 0)
533 sched_wakeup(cpu, stamp, pid, te);
534
535 if (strcmp(event_str, "sched:sched_switch") == 0)
536 sched_switch(cpu, stamp, te);
537 }
538 return 0;
539}
540
541/*
542 * After the last sample we need to wrap up the current C/P state
543 * and close out each CPU for these.
544 */
545static void end_sample_processing(void)
546{
547 u64 cpu;
548 struct power_event *pwr;
549
550 for (cpu = 0; cpu < numcpus; cpu++) {
551 pwr = malloc(sizeof(struct power_event));
552 if (!pwr)
553 return;
554 memset(pwr, 0, sizeof(struct power_event));
555
556 /* C state */
557#if 0
558 pwr->state = cpus_cstate_state[cpu];
559 pwr->start_time = cpus_cstate_start_times[cpu];
560 pwr->end_time = last_time;
561 pwr->cpu = cpu;
562 pwr->type = CSTATE;
563 pwr->next = power_events;
564
565 power_events = pwr;
566#endif
567 /* P state */
568
569 pwr = malloc(sizeof(struct power_event));
570 if (!pwr)
571 return;
572 memset(pwr, 0, sizeof(struct power_event));
573
574 pwr->state = cpus_pstate_state[cpu];
575 pwr->start_time = cpus_pstate_start_times[cpu];
576 pwr->end_time = last_time;
577 pwr->cpu = cpu;
578 pwr->type = PSTATE;
579 pwr->next = power_events;
580
581 if (!pwr->start_time)
582 pwr->start_time = first_time;
583 if (!pwr->state)
584 pwr->state = min_freq;
585 power_events = pwr;
586 }
587}
588
589static u64 sample_time(event_t *event)
590{
591 int cursor;
592
593 cursor = 0;
594 if (sample_type & PERF_SAMPLE_IP)
595 cursor++;
596 if (sample_type & PERF_SAMPLE_TID)
597 cursor++;
598 if (sample_type & PERF_SAMPLE_TIME)
599 return event->sample.array[cursor];
600 return 0;
601}
602
603
604/*
605 * We first queue all events, sorted backwards by insertion.
606 * The order will get flipped later.
607 */
608static int
609queue_sample_event(event_t *event)
610{
611 struct sample_wrapper *copy, *prev;
612 int size;
613
614 size = event->sample.header.size + sizeof(struct sample_wrapper) + 8;
615
616 copy = malloc(size);
617 if (!copy)
618 return 1;
619
620 memset(copy, 0, size);
621
622 copy->next = NULL;
623 copy->timestamp = sample_time(event);
624
625 memcpy(&copy->data, event, event->sample.header.size);
626
627 /* insert in the right place in the list */
628
629 if (!all_samples) {
630 /* first sample ever */
631 all_samples = copy;
632 return 0;
633 }
634
635 if (all_samples->timestamp < copy->timestamp) {
636 /* insert at the head of the list */
637 copy->next = all_samples;
638 all_samples = copy;
639 return 0;
640 }
641
642 prev = all_samples;
643 while (prev->next) {
644 if (prev->next->timestamp < copy->timestamp) {
645 copy->next = prev->next;
646 prev->next = copy;
647 return 0;
648 }
649 prev = prev->next;
650 }
651 /* insert at the end of the list */
652 prev->next = copy;
653
654 return 0;
655}
656
657static void sort_queued_samples(void)
658{
659 struct sample_wrapper *cursor, *next;
660
661 cursor = all_samples;
662 all_samples = NULL;
663
664 while (cursor) {
665 next = cursor->next;
666 cursor->next = all_samples;
667 all_samples = cursor;
668 cursor = next;
669 }
670}
671
672/*
673 * Sort the pid datastructure
674 */
675static void sort_pids(void)
676{
677 struct per_pid *new_list, *p, *cursor, *prev;
678 /* sort by ppid first, then by pid, lowest to highest */
679
680 new_list = NULL;
681
682 while (all_data) {
683 p = all_data;
684 all_data = p->next;
685 p->next = NULL;
686
687 if (new_list == NULL) {
688 new_list = p;
689 p->next = NULL;
690 continue;
691 }
692 prev = NULL;
693 cursor = new_list;
694 while (cursor) {
695 if (cursor->ppid > p->ppid ||
696 (cursor->ppid == p->ppid && cursor->pid > p->pid)) {
697 /* must insert before */
698 if (prev) {
699 p->next = prev->next;
700 prev->next = p;
701 cursor = NULL;
702 continue;
703 } else {
704 p->next = new_list;
705 new_list = p;
706 cursor = NULL;
707 continue;
708 }
709 }
710
711 prev = cursor;
712 cursor = cursor->next;
713 if (!cursor)
714 prev->next = p;
715 }
716 }
717 all_data = new_list;
718}
719
720
721static void draw_c_p_states(void)
722{
723 struct power_event *pwr;
724 pwr = power_events;
725
726 /*
727 * two pass drawing so that the P state bars are on top of the C state blocks
728 */
729 while (pwr) {
730 if (pwr->type == CSTATE)
731 svg_cstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
732 pwr = pwr->next;
733 }
734
735 pwr = power_events;
736 while (pwr) {
737 if (pwr->type == PSTATE) {
738 if (!pwr->state)
739 pwr->state = min_freq;
740 svg_pstate(pwr->cpu, pwr->start_time, pwr->end_time, pwr->state);
741 }
742 pwr = pwr->next;
743 }
744}
745
746static void draw_wakeups(void)
747{
748 struct wake_event *we;
749 struct per_pid *p;
750 struct per_pidcomm *c;
751
752 we = wake_events;
753 while (we) {
754 int from = 0, to = 0;
755 char *task_from = NULL, *task_to = NULL;
756
757 /* locate the column of the waker and wakee */
758 p = all_data;
759 while (p) {
760 if (p->pid == we->waker || p->pid == we->wakee) {
761 c = p->all;
762 while (c) {
763 if (c->Y && c->start_time <= we->time && c->end_time >= we->time) {
764 if (p->pid == we->waker) {
765 from = c->Y;
766 task_from = c->comm;
767 }
768 if (p->pid == we->wakee) {
769 to = c->Y;
770 task_to = c->comm;
771 }
772 }
773 c = c->next;
774 }
775 }
776 p = p->next;
777 }
778
779 if (we->waker == -1)
780 svg_interrupt(we->time, to);
781 else if (from && to && abs(from - to) == 1)
782 svg_wakeline(we->time, from, to);
783 else
784 svg_partial_wakeline(we->time, from, task_from, to, task_to);
785 we = we->next;
786 }
787}
788
789static void draw_cpu_usage(void)
790{
791 struct per_pid *p;
792 struct per_pidcomm *c;
793 struct cpu_sample *sample;
794 p = all_data;
795 while (p) {
796 c = p->all;
797 while (c) {
798 sample = c->samples;
799 while (sample) {
800 if (sample->type == TYPE_RUNNING)
801 svg_process(sample->cpu, sample->start_time, sample->end_time, "sample", c->comm);
802
803 sample = sample->next;
804 }
805 c = c->next;
806 }
807 p = p->next;
808 }
809}
810
811static void draw_process_bars(void)
812{
813 struct per_pid *p;
814 struct per_pidcomm *c;
815 struct cpu_sample *sample;
816 int Y = 0;
817
818 Y = 2 * numcpus + 2;
819
820 p = all_data;
821 while (p) {
822 c = p->all;
823 while (c) {
824 if (!c->display) {
825 c->Y = 0;
826 c = c->next;
827 continue;
828 }
829
830 svg_box(Y, c->start_time, c->end_time, "process");
831 sample = c->samples;
832 while (sample) {
833 if (sample->type == TYPE_RUNNING)
834 svg_sample(Y, sample->cpu, sample->start_time, sample->end_time);
835 if (sample->type == TYPE_BLOCKED)
836 svg_box(Y, sample->start_time, sample->end_time, "blocked");
837 if (sample->type == TYPE_WAITING)
838 svg_waiting(Y, sample->start_time, sample->end_time);
839 sample = sample->next;
840 }
841
842 if (c->comm) {
843 char comm[256];
844 if (c->total_time > 5000000000) /* 5 seconds */
845 sprintf(comm, "%s:%i (%2.2fs)", c->comm, p->pid, c->total_time / 1000000000.0);
846 else
847 sprintf(comm, "%s:%i (%3.1fms)", c->comm, p->pid, c->total_time / 1000000.0);
848
849 svg_text(Y, c->start_time, comm);
850 }
851 c->Y = Y;
852 Y++;
853 c = c->next;
854 }
855 p = p->next;
856 }
857}
858
859static int determine_display_tasks(u64 threshold)
860{
861 struct per_pid *p;
862 struct per_pidcomm *c;
863 int count = 0;
864
865 p = all_data;
866 while (p) {
867 p->display = 0;
868 if (p->start_time == 1)
869 p->start_time = first_time;
870
871 /* no exit marker, task kept running to the end */
872 if (p->end_time == 0)
873 p->end_time = last_time;
874 if (p->total_time >= threshold)
875 p->display = 1;
876
877 c = p->all;
878
879 while (c) {
880 c->display = 0;
881
882 if (c->start_time == 1)
883 c->start_time = first_time;
884
885 if (c->total_time >= threshold) {
886 c->display = 1;
887 count++;
888 }
889
890 if (c->end_time == 0)
891 c->end_time = last_time;
892
893 c = c->next;
894 }
895 p = p->next;
896 }
897 return count;
898}
899
900
901
902#define TIME_THRESH 10000000
903
904static void write_svg_file(const char *filename)
905{
906 u64 i;
907 int count;
908
909 numcpus++;
910
911
912 count = determine_display_tasks(TIME_THRESH);
913
914 /* We'd like to show at least 15 tasks; be less picky if we have fewer */
915 if (count < 15)
916 count = determine_display_tasks(TIME_THRESH / 10);
917
918 open_svg(filename, numcpus, count, first_time, last_time);
919
920 svg_time_grid();
921 svg_legenda();
922
923 for (i = 0; i < numcpus; i++)
924 svg_cpu_box(i, max_freq, turbo_frequency);
925
926 draw_cpu_usage();
927 draw_process_bars();
928 draw_c_p_states();
929 draw_wakeups();
930
931 svg_close();
932}
933
934static int
935process_event(event_t *event)
936{
937
938 switch (event->header.type) {
939
940 case PERF_RECORD_COMM:
941 return process_comm_event(event);
942 case PERF_RECORD_FORK:
943 return process_fork_event(event);
944 case PERF_RECORD_EXIT:
945 return process_exit_event(event);
946 case PERF_RECORD_SAMPLE:
947 return queue_sample_event(event);
948
949 /*
950 * We dont process them right now but they are fine:
951 */
952 case PERF_RECORD_MMAP:
953 case PERF_RECORD_THROTTLE:
954 case PERF_RECORD_UNTHROTTLE:
955 return 0;
956
957 default:
958 return -1;
959 }
960
961 return 0;
962}
963
964static void process_samples(void)
965{
966 struct sample_wrapper *cursor;
967 event_t *event;
968
969 sort_queued_samples();
970
971 cursor = all_samples;
972 while (cursor) {
973 event = (void *)&cursor->data;
974 cursor = cursor->next;
975 process_sample_event(event);
976 }
977}
978
979
980static int __cmd_timechart(void)
981{
982 int ret, rc = EXIT_FAILURE;
983 unsigned long offset = 0;
984 unsigned long head, shift;
985 struct stat statbuf;
986 event_t *event;
987 uint32_t size;
988 char *buf;
989 int input;
990
991 input = open(input_name, O_RDONLY);
992 if (input < 0) {
993 fprintf(stderr, " failed to open file: %s", input_name);
994 if (!strcmp(input_name, "perf.data"))
995 fprintf(stderr, " (try 'perf record' first)");
996 fprintf(stderr, "\n");
997 exit(-1);
998 }
999
1000 ret = fstat(input, &statbuf);
1001 if (ret < 0) {
1002 perror("failed to stat file");
1003 exit(-1);
1004 }
1005
1006 if (!statbuf.st_size) {
1007 fprintf(stderr, "zero-sized file, nothing to do!\n");
1008 exit(0);
1009 }
1010
1011 header = perf_header__read(input);
1012 head = header->data_offset;
1013
1014 sample_type = perf_header__sample_type(header);
1015
1016 shift = page_size * (head / page_size);
1017 offset += shift;
1018 head -= shift;
1019
1020remap:
1021 buf = (char *)mmap(NULL, page_size * mmap_window, PROT_READ,
1022 MAP_SHARED, input, offset);
1023 if (buf == MAP_FAILED) {
1024 perror("failed to mmap file");
1025 exit(-1);
1026 }
1027
1028more:
1029 event = (event_t *)(buf + head);
1030
1031 size = event->header.size;
1032 if (!size)
1033 size = 8;
1034
1035 if (head + event->header.size >= page_size * mmap_window) {
1036 int ret2;
1037
1038 shift = page_size * (head / page_size);
1039
1040 ret2 = munmap(buf, page_size * mmap_window);
1041 assert(ret2 == 0);
1042
1043 offset += shift;
1044 head -= shift;
1045 goto remap;
1046 }
1047
1048 size = event->header.size;
1049
1050 if (!size || process_event(event) < 0) {
1051
1052 printf("%p [%p]: skipping unknown header type: %d\n",
1053 (void *)(offset + head),
1054 (void *)(long)(event->header.size),
1055 event->header.type);
1056
1057 /*
1058 * assume we lost track of the stream, check alignment, and
1059 * increment a single u64 in the hope to catch on again 'soon'.
1060 */
1061
1062 if (unlikely(head & 7))
1063 head &= ~7ULL;
1064
1065 size = 8;
1066 }
1067
1068 head += size;
1069
1070 if (offset + head >= header->data_offset + header->data_size)
1071 goto done;
1072
1073 if (offset + head < (unsigned long)statbuf.st_size)
1074 goto more;
1075
1076done:
1077 rc = EXIT_SUCCESS;
1078 close(input);
1079
1080
1081 process_samples();
1082
1083 end_sample_processing();
1084
1085 sort_pids();
1086
1087 write_svg_file(output_name);
1088
1089 printf("Written %2.1f seconds of trace to %s.\n", (last_time - first_time) / 1000000000.0, output_name);
1090
1091 return rc;
1092}
1093
1094static const char * const timechart_usage[] = {
1095 "perf timechart [<options>] {record}",
1096 NULL
1097};
1098
1099static const char *record_args[] = {
1100 "record",
1101 "-a",
1102 "-R",
1103 "-M",
1104 "-f",
1105 "-c", "1",
1106 "-e", "power:power_start",
1107 "-e", "power:power_end",
1108 "-e", "power:power_frequency",
1109 "-e", "sched:sched_wakeup",
1110 "-e", "sched:sched_switch",
1111};
1112
1113static int __cmd_record(int argc, const char **argv)
1114{
1115 unsigned int rec_argc, i, j;
1116 const char **rec_argv;
1117
1118 rec_argc = ARRAY_SIZE(record_args) + argc - 1;
1119 rec_argv = calloc(rec_argc + 1, sizeof(char *));
1120
1121 for (i = 0; i < ARRAY_SIZE(record_args); i++)
1122 rec_argv[i] = strdup(record_args[i]);
1123
1124 for (j = 1; j < (unsigned int)argc; j++, i++)
1125 rec_argv[i] = argv[j];
1126
1127 return cmd_record(i, rec_argv, NULL);
1128}
1129
1130static const struct option options[] = {
1131 OPT_STRING('i', "input", &input_name, "file",
1132 "input file name"),
1133 OPT_STRING('o', "output", &output_name, "file",
1134 "output file name"),
1135 OPT_INTEGER('w', "width", &svg_page_width,
1136 "page width"),
1137 OPT_END()
1138};
1139
1140
1141int cmd_timechart(int argc, const char **argv, const char *prefix __used)
1142{
1143 symbol__init();
1144
1145 page_size = getpagesize();
1146
1147 argc = parse_options(argc, argv, options, timechart_usage,
1148 PARSE_OPT_STOP_AT_NON_OPTION);
1149
1150 if (argc && !strncmp(argv[0], "rec", 3))
1151 return __cmd_record(argc, argv);
1152 else if (argc)
1153 usage_with_options(timechart_usage, options);
1154
1155 setup_pager();
1156
1157 return __cmd_timechart();
1158}
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 4002ccb36750..1ca88896eee4 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -901,7 +901,7 @@ struct mmap_data {
901 901
902static unsigned int mmap_read_head(struct mmap_data *md) 902static unsigned int mmap_read_head(struct mmap_data *md)
903{ 903{
904 struct perf_counter_mmap_page *pc = md->base; 904 struct perf_event_mmap_page *pc = md->base;
905 int head; 905 int head;
906 906
907 head = pc->data_head; 907 head = pc->data_head;
@@ -977,9 +977,9 @@ static void mmap_read_counter(struct mmap_data *md)
977 977
978 old += size; 978 old += size;
979 979
980 if (event->header.type == PERF_EVENT_SAMPLE) { 980 if (event->header.type == PERF_RECORD_SAMPLE) {
981 int user = 981 int user =
982 (event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK) == PERF_EVENT_MISC_USER; 982 (event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK) == PERF_RECORD_MISC_USER;
983 process_event(event->ip.ip, md->counter, user); 983 process_event(event->ip.ip, md->counter, user);
984 } 984 }
985 } 985 }
@@ -1005,7 +1005,7 @@ int group_fd;
1005 1005
1006static void start_counter(int i, int counter) 1006static void start_counter(int i, int counter)
1007{ 1007{
1008 struct perf_counter_attr *attr; 1008 struct perf_event_attr *attr;
1009 int cpu; 1009 int cpu;
1010 1010
1011 cpu = profile_cpu; 1011 cpu = profile_cpu;
@@ -1019,7 +1019,7 @@ static void start_counter(int i, int counter)
1019 attr->inherit = (cpu < 0) && inherit; 1019 attr->inherit = (cpu < 0) && inherit;
1020 1020
1021try_again: 1021try_again:
1022 fd[i][counter] = sys_perf_counter_open(attr, target_pid, cpu, group_fd, 0); 1022 fd[i][counter] = sys_perf_event_open(attr, target_pid, cpu, group_fd, 0);
1023 1023
1024 if (fd[i][counter] < 0) { 1024 if (fd[i][counter] < 0) {
1025 int err = errno; 1025 int err = errno;
@@ -1044,7 +1044,7 @@ try_again:
1044 printf("\n"); 1044 printf("\n");
1045 error("perfcounter syscall returned with %d (%s)\n", 1045 error("perfcounter syscall returned with %d (%s)\n",
1046 fd[i][counter], strerror(err)); 1046 fd[i][counter], strerror(err));
1047 die("No CONFIG_PERF_COUNTERS=y kernel support configured?\n"); 1047 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1048 exit(-1); 1048 exit(-1);
1049 } 1049 }
1050 assert(fd[i][counter] >= 0); 1050 assert(fd[i][counter] >= 0);
diff --git a/tools/perf/builtin-trace.c b/tools/perf/builtin-trace.c
index 914ab366e369..e9d256e2f47d 100644
--- a/tools/perf/builtin-trace.c
+++ b/tools/perf/builtin-trace.c
@@ -35,14 +35,14 @@ process_comm_event(event_t *event, unsigned long offset, unsigned long head)
35 35
36 thread = threads__findnew(event->comm.pid, &threads, &last_match); 36 thread = threads__findnew(event->comm.pid, &threads, &last_match);
37 37
38 dump_printf("%p [%p]: PERF_EVENT_COMM: %s:%d\n", 38 dump_printf("%p [%p]: PERF_RECORD_COMM: %s:%d\n",
39 (void *)(offset + head), 39 (void *)(offset + head),
40 (void *)(long)(event->header.size), 40 (void *)(long)(event->header.size),
41 event->comm.comm, event->comm.pid); 41 event->comm.comm, event->comm.pid);
42 42
43 if (thread == NULL || 43 if (thread == NULL ||
44 thread__set_comm(thread, event->comm.comm)) { 44 thread__set_comm(thread, event->comm.comm)) {
45 dump_printf("problem processing PERF_EVENT_COMM, skipping event.\n"); 45 dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
46 return -1; 46 return -1;
47 } 47 }
48 total_comm++; 48 total_comm++;
@@ -82,7 +82,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
82 more_data += sizeof(u64); 82 more_data += sizeof(u64);
83 } 83 }
84 84
85 dump_printf("%p [%p]: PERF_EVENT_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n", 85 dump_printf("%p [%p]: PERF_RECORD_SAMPLE (IP, %d): %d/%d: %p period: %Ld\n",
86 (void *)(offset + head), 86 (void *)(offset + head),
87 (void *)(long)(event->header.size), 87 (void *)(long)(event->header.size),
88 event->header.misc, 88 event->header.misc,
@@ -98,9 +98,9 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
98 return -1; 98 return -1;
99 } 99 }
100 100
101 cpumode = event->header.misc & PERF_EVENT_MISC_CPUMODE_MASK; 101 cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
102 102
103 if (cpumode == PERF_EVENT_MISC_KERNEL) { 103 if (cpumode == PERF_RECORD_MISC_KERNEL) {
104 show = SHOW_KERNEL; 104 show = SHOW_KERNEL;
105 level = 'k'; 105 level = 'k';
106 106
@@ -108,7 +108,7 @@ process_sample_event(event_t *event, unsigned long offset, unsigned long head)
108 108
109 dump_printf(" ...... dso: %s\n", dso->name); 109 dump_printf(" ...... dso: %s\n", dso->name);
110 110
111 } else if (cpumode == PERF_EVENT_MISC_USER) { 111 } else if (cpumode == PERF_RECORD_MISC_USER) {
112 112
113 show = SHOW_USER; 113 show = SHOW_USER;
114 level = '.'; 114 level = '.';
@@ -146,19 +146,19 @@ process_event(event_t *event, unsigned long offset, unsigned long head)
146 trace_event(event); 146 trace_event(event);
147 147
148 switch (event->header.type) { 148 switch (event->header.type) {
149 case PERF_EVENT_MMAP ... PERF_EVENT_LOST: 149 case PERF_RECORD_MMAP ... PERF_RECORD_LOST:
150 return 0; 150 return 0;
151 151
152 case PERF_EVENT_COMM: 152 case PERF_RECORD_COMM:
153 return process_comm_event(event, offset, head); 153 return process_comm_event(event, offset, head);
154 154
155 case PERF_EVENT_EXIT ... PERF_EVENT_READ: 155 case PERF_RECORD_EXIT ... PERF_RECORD_READ:
156 return 0; 156 return 0;
157 157
158 case PERF_EVENT_SAMPLE: 158 case PERF_RECORD_SAMPLE:
159 return process_sample_event(event, offset, head); 159 return process_sample_event(event, offset, head);
160 160
161 case PERF_EVENT_MAX: 161 case PERF_RECORD_MAX:
162 default: 162 default:
163 return -1; 163 return -1;
164 } 164 }
diff --git a/tools/perf/builtin.h b/tools/perf/builtin.h
index 3a63e41fb44e..e11d8d231c3b 100644
--- a/tools/perf/builtin.h
+++ b/tools/perf/builtin.h
@@ -16,12 +16,14 @@ extern int check_pager_config(const char *cmd);
16 16
17extern int cmd_annotate(int argc, const char **argv, const char *prefix); 17extern int cmd_annotate(int argc, const char **argv, const char *prefix);
18extern int cmd_help(int argc, const char **argv, const char *prefix); 18extern int cmd_help(int argc, const char **argv, const char *prefix);
19extern int cmd_sched(int argc, const char **argv, const char *prefix);
20extern int cmd_list(int argc, const char **argv, const char *prefix);
19extern int cmd_record(int argc, const char **argv, const char *prefix); 21extern int cmd_record(int argc, const char **argv, const char *prefix);
20extern int cmd_report(int argc, const char **argv, const char *prefix); 22extern int cmd_report(int argc, const char **argv, const char *prefix);
21extern int cmd_stat(int argc, const char **argv, const char *prefix); 23extern int cmd_stat(int argc, const char **argv, const char *prefix);
24extern int cmd_timechart(int argc, const char **argv, const char *prefix);
22extern int cmd_top(int argc, const char **argv, const char *prefix); 25extern int cmd_top(int argc, const char **argv, const char *prefix);
23extern int cmd_version(int argc, const char **argv, const char *prefix);
24extern int cmd_list(int argc, const char **argv, const char *prefix);
25extern int cmd_trace(int argc, const char **argv, const char *prefix); 26extern int cmd_trace(int argc, const char **argv, const char *prefix);
27extern int cmd_version(int argc, const char **argv, const char *prefix);
26 28
27#endif 29#endif
diff --git a/tools/perf/command-list.txt b/tools/perf/command-list.txt
index eebce30afbc0..00326e230d87 100644
--- a/tools/perf/command-list.txt
+++ b/tools/perf/command-list.txt
@@ -4,7 +4,10 @@
4# 4#
5perf-annotate mainporcelain common 5perf-annotate mainporcelain common
6perf-list mainporcelain common 6perf-list mainporcelain common
7perf-sched mainporcelain common
7perf-record mainporcelain common 8perf-record mainporcelain common
8perf-report mainporcelain common 9perf-report mainporcelain common
9perf-stat mainporcelain common 10perf-stat mainporcelain common
11perf-timechart mainporcelain common
10perf-top mainporcelain common 12perf-top mainporcelain common
13perf-trace mainporcelain common
diff --git a/tools/perf/design.txt b/tools/perf/design.txt
index f71e0d245cba..f1946d107b10 100644
--- a/tools/perf/design.txt
+++ b/tools/perf/design.txt
@@ -18,10 +18,10 @@ underlying hardware counters.
18Performance counters are accessed via special file descriptors. 18Performance counters are accessed via special file descriptors.
19There's one file descriptor per virtual counter used. 19There's one file descriptor per virtual counter used.
20 20
21The special file descriptor is opened via the perf_counter_open() 21The special file descriptor is opened via the perf_event_open()
22system call: 22system call:
23 23
24 int sys_perf_counter_open(struct perf_counter_hw_event *hw_event_uptr, 24 int sys_perf_event_open(struct perf_event_hw_event *hw_event_uptr,
25 pid_t pid, int cpu, int group_fd, 25 pid_t pid, int cpu, int group_fd,
26 unsigned long flags); 26 unsigned long flags);
27 27
@@ -32,9 +32,9 @@ can be used to set the blocking mode, etc.
32Multiple counters can be kept open at a time, and the counters 32Multiple counters can be kept open at a time, and the counters
33can be poll()ed. 33can be poll()ed.
34 34
35When creating a new counter fd, 'perf_counter_hw_event' is: 35When creating a new counter fd, 'perf_event_hw_event' is:
36 36
37struct perf_counter_hw_event { 37struct perf_event_hw_event {
38 /* 38 /*
39 * The MSB of the config word signifies if the rest contains cpu 39 * The MSB of the config word signifies if the rest contains cpu
40 * specific (raw) counter configuration data, if unset, the next 40 * specific (raw) counter configuration data, if unset, the next
@@ -93,7 +93,7 @@ specified by 'event_id':
93 93
94/* 94/*
95 * Generalized performance counter event types, used by the hw_event.event_id 95 * Generalized performance counter event types, used by the hw_event.event_id
96 * parameter of the sys_perf_counter_open() syscall: 96 * parameter of the sys_perf_event_open() syscall:
97 */ 97 */
98enum hw_event_ids { 98enum hw_event_ids {
99 /* 99 /*
@@ -159,7 +159,7 @@ in size.
159 * reads on the counter should return the indicated quantities, 159 * reads on the counter should return the indicated quantities,
160 * in increasing order of bit value, after the counter value. 160 * in increasing order of bit value, after the counter value.
161 */ 161 */
162enum perf_counter_read_format { 162enum perf_event_read_format {
163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1, 163 PERF_FORMAT_TOTAL_TIME_ENABLED = 1,
164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2, 164 PERF_FORMAT_TOTAL_TIME_RUNNING = 2,
165}; 165};
@@ -178,7 +178,7 @@ interrupt:
178 * Bits that can be set in hw_event.record_type to request information 178 * Bits that can be set in hw_event.record_type to request information
179 * in the overflow packets. 179 * in the overflow packets.
180 */ 180 */
181enum perf_counter_record_format { 181enum perf_event_record_format {
182 PERF_RECORD_IP = 1U << 0, 182 PERF_RECORD_IP = 1U << 0,
183 PERF_RECORD_TID = 1U << 1, 183 PERF_RECORD_TID = 1U << 1,
184 PERF_RECORD_TIME = 1U << 2, 184 PERF_RECORD_TIME = 1U << 2,
@@ -228,7 +228,7 @@ these events are recorded in the ring-buffer (see below).
228The 'comm' bit allows tracking of process comm data on process creation. 228The 'comm' bit allows tracking of process comm data on process creation.
229This too is recorded in the ring-buffer (see below). 229This too is recorded in the ring-buffer (see below).
230 230
231The 'pid' parameter to the perf_counter_open() system call allows the 231The 'pid' parameter to the perf_event_open() system call allows the
232counter to be specific to a task: 232counter to be specific to a task:
233 233
234 pid == 0: if the pid parameter is zero, the counter is attached to the 234 pid == 0: if the pid parameter is zero, the counter is attached to the
@@ -258,7 +258,7 @@ The 'flags' parameter is currently unused and must be zero.
258 258
259The 'group_fd' parameter allows counter "groups" to be set up. A 259The 'group_fd' parameter allows counter "groups" to be set up. A
260counter group has one counter which is the group "leader". The leader 260counter group has one counter which is the group "leader". The leader
261is created first, with group_fd = -1 in the perf_counter_open call 261is created first, with group_fd = -1 in the perf_event_open call
262that creates it. The rest of the group members are created 262that creates it. The rest of the group members are created
263subsequently, with group_fd giving the fd of the group leader. 263subsequently, with group_fd giving the fd of the group leader.
264(A single counter on its own is created with group_fd = -1 and is 264(A single counter on its own is created with group_fd = -1 and is
@@ -277,13 +277,13 @@ tracking are logged into a ring-buffer. This ring-buffer is created and
277accessed through mmap(). 277accessed through mmap().
278 278
279The mmap size should be 1+2^n pages, where the first page is a meta-data page 279The mmap size should be 1+2^n pages, where the first page is a meta-data page
280(struct perf_counter_mmap_page) that contains various bits of information such 280(struct perf_event_mmap_page) that contains various bits of information such
281as where the ring-buffer head is. 281as where the ring-buffer head is.
282 282
283/* 283/*
284 * Structure of the page that can be mapped via mmap 284 * Structure of the page that can be mapped via mmap
285 */ 285 */
286struct perf_counter_mmap_page { 286struct perf_event_mmap_page {
287 __u32 version; /* version number of this structure */ 287 __u32 version; /* version number of this structure */
288 __u32 compat_version; /* lowest version this is compat with */ 288 __u32 compat_version; /* lowest version this is compat with */
289 289
@@ -317,7 +317,7 @@ struct perf_counter_mmap_page {
317 * Control data for the mmap() data buffer. 317 * Control data for the mmap() data buffer.
318 * 318 *
319 * User-space reading this value should issue an rmb(), on SMP capable 319 * User-space reading this value should issue an rmb(), on SMP capable
320 * platforms, after reading this value -- see perf_counter_wakeup(). 320 * platforms, after reading this value -- see perf_event_wakeup().
321 */ 321 */
322 __u32 data_head; /* head in the data section */ 322 __u32 data_head; /* head in the data section */
323}; 323};
@@ -327,9 +327,9 @@ NOTE: the hw-counter userspace bits are arch specific and are currently only
327 327
328The following 2^n pages are the ring-buffer which contains events of the form: 328The following 2^n pages are the ring-buffer which contains events of the form:
329 329
330#define PERF_EVENT_MISC_KERNEL (1 << 0) 330#define PERF_RECORD_MISC_KERNEL (1 << 0)
331#define PERF_EVENT_MISC_USER (1 << 1) 331#define PERF_RECORD_MISC_USER (1 << 1)
332#define PERF_EVENT_MISC_OVERFLOW (1 << 2) 332#define PERF_RECORD_MISC_OVERFLOW (1 << 2)
333 333
334struct perf_event_header { 334struct perf_event_header {
335 __u32 type; 335 __u32 type;
@@ -353,8 +353,8 @@ enum perf_event_type {
353 * char filename[]; 353 * char filename[];
354 * }; 354 * };
355 */ 355 */
356 PERF_EVENT_MMAP = 1, 356 PERF_RECORD_MMAP = 1,
357 PERF_EVENT_MUNMAP = 2, 357 PERF_RECORD_MUNMAP = 2,
358 358
359 /* 359 /*
360 * struct { 360 * struct {
@@ -364,10 +364,10 @@ enum perf_event_type {
364 * char comm[]; 364 * char comm[];
365 * }; 365 * };
366 */ 366 */
367 PERF_EVENT_COMM = 3, 367 PERF_RECORD_COMM = 3,
368 368
369 /* 369 /*
370 * When header.misc & PERF_EVENT_MISC_OVERFLOW the event_type field 370 * When header.misc & PERF_RECORD_MISC_OVERFLOW the event_type field
371 * will be PERF_RECORD_* 371 * will be PERF_RECORD_*
372 * 372 *
373 * struct { 373 * struct {
@@ -397,7 +397,7 @@ Notification of new events is possible through poll()/select()/epoll() and
397fcntl() managing signals. 397fcntl() managing signals.
398 398
399Normally a notification is generated for every page filled, however one can 399Normally a notification is generated for every page filled, however one can
400additionally set perf_counter_hw_event.wakeup_events to generate one every 400additionally set perf_event_hw_event.wakeup_events to generate one every
401so many counter overflow events. 401so many counter overflow events.
402 402
403Future work will include a splice() interface to the ring-buffer. 403Future work will include a splice() interface to the ring-buffer.
@@ -409,11 +409,11 @@ events but does continue to exist and maintain its count value.
409 409
410An individual counter or counter group can be enabled with 410An individual counter or counter group can be enabled with
411 411
412 ioctl(fd, PERF_COUNTER_IOC_ENABLE); 412 ioctl(fd, PERF_EVENT_IOC_ENABLE);
413 413
414or disabled with 414or disabled with
415 415
416 ioctl(fd, PERF_COUNTER_IOC_DISABLE); 416 ioctl(fd, PERF_EVENT_IOC_DISABLE);
417 417
418Enabling or disabling the leader of a group enables or disables the 418Enabling or disabling the leader of a group enables or disables the
419whole group; that is, while the group leader is disabled, none of the 419whole group; that is, while the group leader is disabled, none of the
@@ -424,16 +424,16 @@ other counter.
424 424
425Additionally, non-inherited overflow counters can use 425Additionally, non-inherited overflow counters can use
426 426
427 ioctl(fd, PERF_COUNTER_IOC_REFRESH, nr); 427 ioctl(fd, PERF_EVENT_IOC_REFRESH, nr);
428 428
429to enable a counter for 'nr' events, after which it gets disabled again. 429to enable a counter for 'nr' events, after which it gets disabled again.
430 430
431A process can enable or disable all the counter groups that are 431A process can enable or disable all the counter groups that are
432attached to it, using prctl: 432attached to it, using prctl:
433 433
434 prctl(PR_TASK_PERF_COUNTERS_ENABLE); 434 prctl(PR_TASK_PERF_EVENTS_ENABLE);
435 435
436 prctl(PR_TASK_PERF_COUNTERS_DISABLE); 436 prctl(PR_TASK_PERF_EVENTS_DISABLE);
437 437
438This applies to all counters on the current process, whether created 438This applies to all counters on the current process, whether created
439by this process or by another, and doesn't affect any counters that 439by this process or by another, and doesn't affect any counters that
@@ -447,11 +447,11 @@ Arch requirements
447If your architecture does not have hardware performance metrics, you can 447If your architecture does not have hardware performance metrics, you can
448still use the generic software counters based on hrtimers for sampling. 448still use the generic software counters based on hrtimers for sampling.
449 449
450So to start with, in order to add HAVE_PERF_COUNTERS to your Kconfig, you 450So to start with, in order to add HAVE_PERF_EVENTS to your Kconfig, you
451will need at least this: 451will need at least this:
452 - asm/perf_counter.h - a basic stub will suffice at first 452 - asm/perf_event.h - a basic stub will suffice at first
453 - support for atomic64 types (and associated helper functions) 453 - support for atomic64 types (and associated helper functions)
454 - set_perf_counter_pending() implemented 454 - set_perf_event_pending() implemented
455 455
456If your architecture does have hardware capabilities, you can override the 456If your architecture does have hardware capabilities, you can override the
457weak stub hw_perf_counter_init() to register hardware counters. 457weak stub hw_perf_event_init() to register hardware counters.
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index fe4589dde950..19fc7feb9d59 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -289,10 +289,12 @@ static void handle_internal_command(int argc, const char **argv)
289 { "record", cmd_record, 0 }, 289 { "record", cmd_record, 0 },
290 { "report", cmd_report, 0 }, 290 { "report", cmd_report, 0 },
291 { "stat", cmd_stat, 0 }, 291 { "stat", cmd_stat, 0 },
292 { "timechart", cmd_timechart, 0 },
292 { "top", cmd_top, 0 }, 293 { "top", cmd_top, 0 },
293 { "annotate", cmd_annotate, 0 }, 294 { "annotate", cmd_annotate, 0 },
294 { "version", cmd_version, 0 }, 295 { "version", cmd_version, 0 },
295 { "trace", cmd_trace, 0 }, 296 { "trace", cmd_trace, 0 },
297 { "sched", cmd_sched, 0 },
296 }; 298 };
297 unsigned int i; 299 unsigned int i;
298 static const char ext[] = STRIP_EXTENSION; 300 static const char ext[] = STRIP_EXTENSION;
diff --git a/tools/perf/perf.h b/tools/perf/perf.h
index 2abeb20d0bf3..8cc4623afd6f 100644
--- a/tools/perf/perf.h
+++ b/tools/perf/perf.h
@@ -52,15 +52,15 @@
52#include <sys/types.h> 52#include <sys/types.h>
53#include <sys/syscall.h> 53#include <sys/syscall.h>
54 54
55#include "../../include/linux/perf_counter.h" 55#include "../../include/linux/perf_event.h"
56#include "util/types.h" 56#include "util/types.h"
57 57
58/* 58/*
59 * prctl(PR_TASK_PERF_COUNTERS_DISABLE) will (cheaply) disable all 59 * prctl(PR_TASK_PERF_EVENTS_DISABLE) will (cheaply) disable all
60 * counters in the current task. 60 * counters in the current task.
61 */ 61 */
62#define PR_TASK_PERF_COUNTERS_DISABLE 31 62#define PR_TASK_PERF_EVENTS_DISABLE 31
63#define PR_TASK_PERF_COUNTERS_ENABLE 32 63#define PR_TASK_PERF_EVENTS_ENABLE 32
64 64
65#ifndef NSEC_PER_SEC 65#ifndef NSEC_PER_SEC
66# define NSEC_PER_SEC 1000000000ULL 66# define NSEC_PER_SEC 1000000000ULL
@@ -90,12 +90,12 @@ static inline unsigned long long rdclock(void)
90 _min1 < _min2 ? _min1 : _min2; }) 90 _min1 < _min2 ? _min1 : _min2; })
91 91
92static inline int 92static inline int
93sys_perf_counter_open(struct perf_counter_attr *attr, 93sys_perf_event_open(struct perf_event_attr *attr,
94 pid_t pid, int cpu, int group_fd, 94 pid_t pid, int cpu, int group_fd,
95 unsigned long flags) 95 unsigned long flags)
96{ 96{
97 attr->size = sizeof(*attr); 97 attr->size = sizeof(*attr);
98 return syscall(__NR_perf_counter_open, attr, pid, cpu, 98 return syscall(__NR_perf_event_open, attr, pid, cpu,
99 group_fd, flags); 99 group_fd, flags);
100} 100}
101 101
diff --git a/tools/perf/util/event.h b/tools/perf/util/event.h
index fa2d4e91d329..2c9c26d6ded0 100644
--- a/tools/perf/util/event.h
+++ b/tools/perf/util/event.h
@@ -1,5 +1,5 @@
1#ifndef __PERF_EVENT_H 1#ifndef __PERF_RECORD_H
2#define __PERF_EVENT_H 2#define __PERF_RECORD_H
3#include "../perf.h" 3#include "../perf.h"
4#include "util.h" 4#include "util.h"
5#include <linux/list.h> 5#include <linux/list.h>
@@ -39,6 +39,7 @@ struct fork_event {
39 struct perf_event_header header; 39 struct perf_event_header header;
40 u32 pid, ppid; 40 u32 pid, ppid;
41 u32 tid, ptid; 41 u32 tid, ptid;
42 u64 time;
42}; 43};
43 44
44struct lost_event { 45struct lost_event {
@@ -52,13 +53,19 @@ struct lost_event {
52 */ 53 */
53struct read_event { 54struct read_event {
54 struct perf_event_header header; 55 struct perf_event_header header;
55 u32 pid,tid; 56 u32 pid, tid;
56 u64 value; 57 u64 value;
57 u64 time_enabled; 58 u64 time_enabled;
58 u64 time_running; 59 u64 time_running;
59 u64 id; 60 u64 id;
60}; 61};
61 62
63struct sample_event{
64 struct perf_event_header header;
65 u64 array[];
66};
67
68
62typedef union event_union { 69typedef union event_union {
63 struct perf_event_header header; 70 struct perf_event_header header;
64 struct ip_event ip; 71 struct ip_event ip;
@@ -67,6 +74,7 @@ typedef union event_union {
67 struct fork_event fork; 74 struct fork_event fork;
68 struct lost_event lost; 75 struct lost_event lost;
69 struct read_event read; 76 struct read_event read;
77 struct sample_event sample;
70} event_t; 78} event_t;
71 79
72struct map { 80struct map {
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index ec4d4c2f9522..e306857b2c2b 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -7,10 +7,9 @@
7#include "header.h" 7#include "header.h"
8 8
9/* 9/*
10 * 10 * Create new perf.data header attribute:
11 */ 11 */
12 12struct perf_header_attr *perf_header_attr__new(struct perf_event_attr *attr)
13struct perf_header_attr *perf_header_attr__new(struct perf_counter_attr *attr)
14{ 13{
15 struct perf_header_attr *self = malloc(sizeof(*self)); 14 struct perf_header_attr *self = malloc(sizeof(*self));
16 15
@@ -43,9 +42,8 @@ void perf_header_attr__add_id(struct perf_header_attr *self, u64 id)
43} 42}
44 43
45/* 44/*
46 * 45 * Create new perf.data header:
47 */ 46 */
48
49struct perf_header *perf_header__new(void) 47struct perf_header *perf_header__new(void)
50{ 48{
51 struct perf_header *self = malloc(sizeof(*self)); 49 struct perf_header *self = malloc(sizeof(*self));
@@ -86,6 +84,46 @@ void perf_header__add_attr(struct perf_header *self,
86 self->attr[pos] = attr; 84 self->attr[pos] = attr;
87} 85}
88 86
87#define MAX_EVENT_NAME 64
88
89struct perf_trace_event_type {
90 u64 event_id;
91 char name[MAX_EVENT_NAME];
92};
93
94static int event_count;
95static struct perf_trace_event_type *events;
96
97void perf_header__push_event(u64 id, const char *name)
98{
99 if (strlen(name) > MAX_EVENT_NAME)
100 printf("Event %s will be truncated\n", name);
101
102 if (!events) {
103 events = malloc(sizeof(struct perf_trace_event_type));
104 if (!events)
105 die("nomem");
106 } else {
107 events = realloc(events, (event_count + 1) * sizeof(struct perf_trace_event_type));
108 if (!events)
109 die("nomem");
110 }
111 memset(&events[event_count], 0, sizeof(struct perf_trace_event_type));
112 events[event_count].event_id = id;
113 strncpy(events[event_count].name, name, MAX_EVENT_NAME - 1);
114 event_count++;
115}
116
117char *perf_header__find_event(u64 id)
118{
119 int i;
120 for (i = 0 ; i < event_count; i++) {
121 if (events[i].event_id == id)
122 return events[i].name;
123 }
124 return NULL;
125}
126
89static const char *__perf_magic = "PERFFILE"; 127static const char *__perf_magic = "PERFFILE";
90 128
91#define PERF_MAGIC (*(u64 *)__perf_magic) 129#define PERF_MAGIC (*(u64 *)__perf_magic)
@@ -96,7 +134,7 @@ struct perf_file_section {
96}; 134};
97 135
98struct perf_file_attr { 136struct perf_file_attr {
99 struct perf_counter_attr attr; 137 struct perf_event_attr attr;
100 struct perf_file_section ids; 138 struct perf_file_section ids;
101}; 139};
102 140
@@ -106,6 +144,7 @@ struct perf_file_header {
106 u64 attr_size; 144 u64 attr_size;
107 struct perf_file_section attrs; 145 struct perf_file_section attrs;
108 struct perf_file_section data; 146 struct perf_file_section data;
147 struct perf_file_section event_types;
109}; 148};
110 149
111static void do_write(int fd, void *buf, size_t size) 150static void do_write(int fd, void *buf, size_t size)
@@ -154,6 +193,11 @@ void perf_header__write(struct perf_header *self, int fd)
154 do_write(fd, &f_attr, sizeof(f_attr)); 193 do_write(fd, &f_attr, sizeof(f_attr));
155 } 194 }
156 195
196 self->event_offset = lseek(fd, 0, SEEK_CUR);
197 self->event_size = event_count * sizeof(struct perf_trace_event_type);
198 if (events)
199 do_write(fd, events, self->event_size);
200
157 201
158 self->data_offset = lseek(fd, 0, SEEK_CUR); 202 self->data_offset = lseek(fd, 0, SEEK_CUR);
159 203
@@ -169,6 +213,10 @@ void perf_header__write(struct perf_header *self, int fd)
169 .offset = self->data_offset, 213 .offset = self->data_offset,
170 .size = self->data_size, 214 .size = self->data_size,
171 }, 215 },
216 .event_types = {
217 .offset = self->event_offset,
218 .size = self->event_size,
219 },
172 }; 220 };
173 221
174 lseek(fd, 0, SEEK_SET); 222 lseek(fd, 0, SEEK_SET);
@@ -234,6 +282,17 @@ struct perf_header *perf_header__read(int fd)
234 lseek(fd, tmp, SEEK_SET); 282 lseek(fd, tmp, SEEK_SET);
235 } 283 }
236 284
285 if (f_header.event_types.size) {
286 lseek(fd, f_header.event_types.offset, SEEK_SET);
287 events = malloc(f_header.event_types.size);
288 if (!events)
289 die("nomem");
290 do_read(fd, events, f_header.event_types.size);
291 event_count = f_header.event_types.size / sizeof(struct perf_trace_event_type);
292 }
293 self->event_offset = f_header.event_types.offset;
294 self->event_size = f_header.event_types.size;
295
237 self->data_offset = f_header.data.offset; 296 self->data_offset = f_header.data.offset;
238 self->data_size = f_header.data.size; 297 self->data_size = f_header.data.size;
239 298
@@ -261,7 +320,7 @@ u64 perf_header__sample_type(struct perf_header *header)
261 return type; 320 return type;
262} 321}
263 322
264struct perf_counter_attr * 323struct perf_event_attr *
265perf_header__find_attr(u64 id, struct perf_header *header) 324perf_header__find_attr(u64 id, struct perf_header *header)
266{ 325{
267 int i; 326 int i;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 5d0a72ecc919..a0761bc7863c 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -1,12 +1,12 @@
1#ifndef _PERF_HEADER_H 1#ifndef _PERF_HEADER_H
2#define _PERF_HEADER_H 2#define _PERF_HEADER_H
3 3
4#include "../../../include/linux/perf_counter.h" 4#include "../../../include/linux/perf_event.h"
5#include <sys/types.h> 5#include <sys/types.h>
6#include "types.h" 6#include "types.h"
7 7
8struct perf_header_attr { 8struct perf_header_attr {
9 struct perf_counter_attr attr; 9 struct perf_event_attr attr;
10 int ids, size; 10 int ids, size;
11 u64 *id; 11 u64 *id;
12 off_t id_offset; 12 off_t id_offset;
@@ -19,6 +19,8 @@ struct perf_header {
19 s64 attr_offset; 19 s64 attr_offset;
20 u64 data_offset; 20 u64 data_offset;
21 u64 data_size; 21 u64 data_size;
22 u64 event_offset;
23 u64 event_size;
22}; 24};
23 25
24struct perf_header *perf_header__read(int fd); 26struct perf_header *perf_header__read(int fd);
@@ -27,12 +29,16 @@ void perf_header__write(struct perf_header *self, int fd);
27void perf_header__add_attr(struct perf_header *self, 29void perf_header__add_attr(struct perf_header *self,
28 struct perf_header_attr *attr); 30 struct perf_header_attr *attr);
29 31
32void perf_header__push_event(u64 id, const char *name);
33char *perf_header__find_event(u64 id);
34
35
30struct perf_header_attr * 36struct perf_header_attr *
31perf_header_attr__new(struct perf_counter_attr *attr); 37perf_header_attr__new(struct perf_event_attr *attr);
32void perf_header_attr__add_id(struct perf_header_attr *self, u64 id); 38void perf_header_attr__add_id(struct perf_header_attr *self, u64 id);
33 39
34u64 perf_header__sample_type(struct perf_header *header); 40u64 perf_header__sample_type(struct perf_header *header);
35struct perf_counter_attr * 41struct perf_event_attr *
36perf_header__find_attr(u64 id, struct perf_header *header); 42perf_header__find_attr(u64 id, struct perf_header *header);
37 43
38 44
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index a587d41ae3c9..13ab4b842d49 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -6,10 +6,11 @@
6#include "exec_cmd.h" 6#include "exec_cmd.h"
7#include "string.h" 7#include "string.h"
8#include "cache.h" 8#include "cache.h"
9#include "header.h"
9 10
10int nr_counters; 11int nr_counters;
11 12
12struct perf_counter_attr attrs[MAX_COUNTERS]; 13struct perf_event_attr attrs[MAX_COUNTERS];
13 14
14struct event_symbol { 15struct event_symbol {
15 u8 type; 16 u8 type;
@@ -18,6 +19,12 @@ struct event_symbol {
18 const char *alias; 19 const char *alias;
19}; 20};
20 21
22enum event_result {
23 EVT_FAILED,
24 EVT_HANDLED,
25 EVT_HANDLED_ALL
26};
27
21char debugfs_path[MAXPATHLEN]; 28char debugfs_path[MAXPATHLEN];
22 29
23#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x 30#define CHW(x) .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_##x
@@ -41,13 +48,13 @@ static struct event_symbol event_symbols[] = {
41 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" }, 48 { CSW(CPU_MIGRATIONS), "cpu-migrations", "migrations" },
42}; 49};
43 50
44#define __PERF_COUNTER_FIELD(config, name) \ 51#define __PERF_EVENT_FIELD(config, name) \
45 ((config & PERF_COUNTER_##name##_MASK) >> PERF_COUNTER_##name##_SHIFT) 52 ((config & PERF_EVENT_##name##_MASK) >> PERF_EVENT_##name##_SHIFT)
46 53
47#define PERF_COUNTER_RAW(config) __PERF_COUNTER_FIELD(config, RAW) 54#define PERF_EVENT_RAW(config) __PERF_EVENT_FIELD(config, RAW)
48#define PERF_COUNTER_CONFIG(config) __PERF_COUNTER_FIELD(config, CONFIG) 55#define PERF_EVENT_CONFIG(config) __PERF_EVENT_FIELD(config, CONFIG)
49#define PERF_COUNTER_TYPE(config) __PERF_COUNTER_FIELD(config, TYPE) 56#define PERF_EVENT_TYPE(config) __PERF_EVENT_FIELD(config, TYPE)
50#define PERF_COUNTER_ID(config) __PERF_COUNTER_FIELD(config, EVENT) 57#define PERF_EVENT_ID(config) __PERF_EVENT_FIELD(config, EVENT)
51 58
52static const char *hw_event_names[] = { 59static const char *hw_event_names[] = {
53 "cycles", 60 "cycles",
@@ -139,7 +146,7 @@ static int tp_event_has_id(struct dirent *sys_dir, struct dirent *evt_dir)
139 (strcmp(evt_dirent.d_name, "..")) && \ 146 (strcmp(evt_dirent.d_name, "..")) && \
140 (!tp_event_has_id(&sys_dirent, &evt_dirent))) 147 (!tp_event_has_id(&sys_dirent, &evt_dirent)))
141 148
142#define MAX_EVENT_LENGTH 30 149#define MAX_EVENT_LENGTH 512
143 150
144int valid_debugfs_mount(const char *debugfs) 151int valid_debugfs_mount(const char *debugfs)
145{ 152{
@@ -344,8 +351,8 @@ static int parse_aliases(const char **str, const char *names[][MAX_ALIASES], int
344 return -1; 351 return -1;
345} 352}
346 353
347static int 354static enum event_result
348parse_generic_hw_event(const char **str, struct perf_counter_attr *attr) 355parse_generic_hw_event(const char **str, struct perf_event_attr *attr)
349{ 356{
350 const char *s = *str; 357 const char *s = *str;
351 int cache_type = -1, cache_op = -1, cache_result = -1; 358 int cache_type = -1, cache_op = -1, cache_result = -1;
@@ -356,7 +363,7 @@ parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
356 * then bail out: 363 * then bail out:
357 */ 364 */
358 if (cache_type == -1) 365 if (cache_type == -1)
359 return 0; 366 return EVT_FAILED;
360 367
361 while ((cache_op == -1 || cache_result == -1) && *s == '-') { 368 while ((cache_op == -1 || cache_result == -1) && *s == '-') {
362 ++s; 369 ++s;
@@ -402,27 +409,115 @@ parse_generic_hw_event(const char **str, struct perf_counter_attr *attr)
402 attr->type = PERF_TYPE_HW_CACHE; 409 attr->type = PERF_TYPE_HW_CACHE;
403 410
404 *str = s; 411 *str = s;
405 return 1; 412 return EVT_HANDLED;
413}
414
415static enum event_result
416parse_single_tracepoint_event(char *sys_name,
417 const char *evt_name,
418 unsigned int evt_length,
419 char *flags,
420 struct perf_event_attr *attr,
421 const char **strp)
422{
423 char evt_path[MAXPATHLEN];
424 char id_buf[4];
425 u64 id;
426 int fd;
427
428 if (flags) {
429 if (!strncmp(flags, "record", strlen(flags))) {
430 attr->sample_type |= PERF_SAMPLE_RAW;
431 attr->sample_type |= PERF_SAMPLE_TIME;
432 attr->sample_type |= PERF_SAMPLE_CPU;
433 }
434 }
435
436 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
437 sys_name, evt_name);
438
439 fd = open(evt_path, O_RDONLY);
440 if (fd < 0)
441 return EVT_FAILED;
442
443 if (read(fd, id_buf, sizeof(id_buf)) < 0) {
444 close(fd);
445 return EVT_FAILED;
446 }
447
448 close(fd);
449 id = atoll(id_buf);
450 attr->config = id;
451 attr->type = PERF_TYPE_TRACEPOINT;
452 *strp = evt_name + evt_length;
453
454 return EVT_HANDLED;
455}
456
457/* sys + ':' + event + ':' + flags*/
458#define MAX_EVOPT_LEN (MAX_EVENT_LENGTH * 2 + 2 + 128)
459static enum event_result
460parse_subsystem_tracepoint_event(char *sys_name, char *flags)
461{
462 char evt_path[MAXPATHLEN];
463 struct dirent *evt_ent;
464 DIR *evt_dir;
465
466 snprintf(evt_path, MAXPATHLEN, "%s/%s", debugfs_path, sys_name);
467 evt_dir = opendir(evt_path);
468
469 if (!evt_dir) {
470 perror("Can't open event dir");
471 return EVT_FAILED;
472 }
473
474 while ((evt_ent = readdir(evt_dir))) {
475 char event_opt[MAX_EVOPT_LEN + 1];
476 int len;
477 unsigned int rem = MAX_EVOPT_LEN;
478
479 if (!strcmp(evt_ent->d_name, ".")
480 || !strcmp(evt_ent->d_name, "..")
481 || !strcmp(evt_ent->d_name, "enable")
482 || !strcmp(evt_ent->d_name, "filter"))
483 continue;
484
485 len = snprintf(event_opt, MAX_EVOPT_LEN, "%s:%s", sys_name,
486 evt_ent->d_name);
487 if (len < 0)
488 return EVT_FAILED;
489
490 rem -= len;
491 if (flags) {
492 if (rem < strlen(flags) + 1)
493 return EVT_FAILED;
494
495 strcat(event_opt, ":");
496 strcat(event_opt, flags);
497 }
498
499 if (parse_events(NULL, event_opt, 0))
500 return EVT_FAILED;
501 }
502
503 return EVT_HANDLED_ALL;
406} 504}
407 505
408static int parse_tracepoint_event(const char **strp, 506
409 struct perf_counter_attr *attr) 507static enum event_result parse_tracepoint_event(const char **strp,
508 struct perf_event_attr *attr)
410{ 509{
411 const char *evt_name; 510 const char *evt_name;
412 char *flags; 511 char *flags;
413 char sys_name[MAX_EVENT_LENGTH]; 512 char sys_name[MAX_EVENT_LENGTH];
414 char id_buf[4];
415 int fd;
416 unsigned int sys_length, evt_length; 513 unsigned int sys_length, evt_length;
417 u64 id;
418 char evt_path[MAXPATHLEN];
419 514
420 if (valid_debugfs_mount(debugfs_path)) 515 if (valid_debugfs_mount(debugfs_path))
421 return 0; 516 return 0;
422 517
423 evt_name = strchr(*strp, ':'); 518 evt_name = strchr(*strp, ':');
424 if (!evt_name) 519 if (!evt_name)
425 return 0; 520 return EVT_FAILED;
426 521
427 sys_length = evt_name - *strp; 522 sys_length = evt_name - *strp;
428 if (sys_length >= MAX_EVENT_LENGTH) 523 if (sys_length >= MAX_EVENT_LENGTH)
@@ -434,32 +529,22 @@ static int parse_tracepoint_event(const char **strp,
434 529
435 flags = strchr(evt_name, ':'); 530 flags = strchr(evt_name, ':');
436 if (flags) { 531 if (flags) {
437 *flags = '\0'; 532 /* split it out: */
533 evt_name = strndup(evt_name, flags - evt_name);
438 flags++; 534 flags++;
439 if (!strncmp(flags, "record", strlen(flags)))
440 attr->sample_type |= PERF_SAMPLE_RAW;
441 } 535 }
442 536
443 evt_length = strlen(evt_name); 537 evt_length = strlen(evt_name);
444 if (evt_length >= MAX_EVENT_LENGTH) 538 if (evt_length >= MAX_EVENT_LENGTH)
445 return 0; 539 return EVT_FAILED;
446
447 snprintf(evt_path, MAXPATHLEN, "%s/%s/%s/id", debugfs_path,
448 sys_name, evt_name);
449 fd = open(evt_path, O_RDONLY);
450 if (fd < 0)
451 return 0;
452 540
453 if (read(fd, id_buf, sizeof(id_buf)) < 0) { 541 if (!strcmp(evt_name, "*")) {
454 close(fd); 542 *strp = evt_name + evt_length;
455 return 0; 543 return parse_subsystem_tracepoint_event(sys_name, flags);
456 } 544 } else
457 close(fd); 545 return parse_single_tracepoint_event(sys_name, evt_name,
458 id = atoll(id_buf); 546 evt_length, flags,
459 attr->config = id; 547 attr, strp);
460 attr->type = PERF_TYPE_TRACEPOINT;
461 *strp = evt_name + evt_length;
462 return 1;
463} 548}
464 549
465static int check_events(const char *str, unsigned int i) 550static int check_events(const char *str, unsigned int i)
@@ -477,8 +562,8 @@ static int check_events(const char *str, unsigned int i)
477 return 0; 562 return 0;
478} 563}
479 564
480static int 565static enum event_result
481parse_symbolic_event(const char **strp, struct perf_counter_attr *attr) 566parse_symbolic_event(const char **strp, struct perf_event_attr *attr)
482{ 567{
483 const char *str = *strp; 568 const char *str = *strp;
484 unsigned int i; 569 unsigned int i;
@@ -490,32 +575,33 @@ parse_symbolic_event(const char **strp, struct perf_counter_attr *attr)
490 attr->type = event_symbols[i].type; 575 attr->type = event_symbols[i].type;
491 attr->config = event_symbols[i].config; 576 attr->config = event_symbols[i].config;
492 *strp = str + n; 577 *strp = str + n;
493 return 1; 578 return EVT_HANDLED;
494 } 579 }
495 } 580 }
496 return 0; 581 return EVT_FAILED;
497} 582}
498 583
499static int parse_raw_event(const char **strp, struct perf_counter_attr *attr) 584static enum event_result
585parse_raw_event(const char **strp, struct perf_event_attr *attr)
500{ 586{
501 const char *str = *strp; 587 const char *str = *strp;
502 u64 config; 588 u64 config;
503 int n; 589 int n;
504 590
505 if (*str != 'r') 591 if (*str != 'r')
506 return 0; 592 return EVT_FAILED;
507 n = hex2u64(str + 1, &config); 593 n = hex2u64(str + 1, &config);
508 if (n > 0) { 594 if (n > 0) {
509 *strp = str + n + 1; 595 *strp = str + n + 1;
510 attr->type = PERF_TYPE_RAW; 596 attr->type = PERF_TYPE_RAW;
511 attr->config = config; 597 attr->config = config;
512 return 1; 598 return EVT_HANDLED;
513 } 599 }
514 return 0; 600 return EVT_FAILED;
515} 601}
516 602
517static int 603static enum event_result
518parse_numeric_event(const char **strp, struct perf_counter_attr *attr) 604parse_numeric_event(const char **strp, struct perf_event_attr *attr)
519{ 605{
520 const char *str = *strp; 606 const char *str = *strp;
521 char *endp; 607 char *endp;
@@ -530,14 +616,14 @@ parse_numeric_event(const char **strp, struct perf_counter_attr *attr)
530 attr->type = type; 616 attr->type = type;
531 attr->config = config; 617 attr->config = config;
532 *strp = endp; 618 *strp = endp;
533 return 1; 619 return EVT_HANDLED;
534 } 620 }
535 } 621 }
536 return 0; 622 return EVT_FAILED;
537} 623}
538 624
539static int 625static enum event_result
540parse_event_modifier(const char **strp, struct perf_counter_attr *attr) 626parse_event_modifier(const char **strp, struct perf_event_attr *attr)
541{ 627{
542 const char *str = *strp; 628 const char *str = *strp;
543 int eu = 1, ek = 1, eh = 1; 629 int eu = 1, ek = 1, eh = 1;
@@ -569,37 +655,84 @@ parse_event_modifier(const char **strp, struct perf_counter_attr *attr)
569 * Each event can have multiple symbolic names. 655 * Each event can have multiple symbolic names.
570 * Symbolic names are (almost) exactly matched. 656 * Symbolic names are (almost) exactly matched.
571 */ 657 */
572static int parse_event_symbols(const char **str, struct perf_counter_attr *attr) 658static enum event_result
659parse_event_symbols(const char **str, struct perf_event_attr *attr)
573{ 660{
574 if (!(parse_tracepoint_event(str, attr) || 661 enum event_result ret;
575 parse_raw_event(str, attr) || 662
576 parse_numeric_event(str, attr) || 663 ret = parse_tracepoint_event(str, attr);
577 parse_symbolic_event(str, attr) || 664 if (ret != EVT_FAILED)
578 parse_generic_hw_event(str, attr))) 665 goto modifier;
579 return 0; 666
667 ret = parse_raw_event(str, attr);
668 if (ret != EVT_FAILED)
669 goto modifier;
670
671 ret = parse_numeric_event(str, attr);
672 if (ret != EVT_FAILED)
673 goto modifier;
674
675 ret = parse_symbolic_event(str, attr);
676 if (ret != EVT_FAILED)
677 goto modifier;
580 678
679 ret = parse_generic_hw_event(str, attr);
680 if (ret != EVT_FAILED)
681 goto modifier;
682
683 return EVT_FAILED;
684
685modifier:
581 parse_event_modifier(str, attr); 686 parse_event_modifier(str, attr);
582 687
583 return 1; 688 return ret;
584} 689}
585 690
691static void store_event_type(const char *orgname)
692{
693 char filename[PATH_MAX], *c;
694 FILE *file;
695 int id;
696
697 sprintf(filename, "/sys/kernel/debug/tracing/events/%s/id", orgname);
698 c = strchr(filename, ':');
699 if (c)
700 *c = '/';
701
702 file = fopen(filename, "r");
703 if (!file)
704 return;
705 if (fscanf(file, "%i", &id) < 1)
706 die("cannot store event ID");
707 fclose(file);
708 perf_header__push_event(id, orgname);
709}
710
711
586int parse_events(const struct option *opt __used, const char *str, int unset __used) 712int parse_events(const struct option *opt __used, const char *str, int unset __used)
587{ 713{
588 struct perf_counter_attr attr; 714 struct perf_event_attr attr;
715 enum event_result ret;
716
717 if (strchr(str, ':'))
718 store_event_type(str);
589 719
590 for (;;) { 720 for (;;) {
591 if (nr_counters == MAX_COUNTERS) 721 if (nr_counters == MAX_COUNTERS)
592 return -1; 722 return -1;
593 723
594 memset(&attr, 0, sizeof(attr)); 724 memset(&attr, 0, sizeof(attr));
595 if (!parse_event_symbols(&str, &attr)) 725 ret = parse_event_symbols(&str, &attr);
726 if (ret == EVT_FAILED)
596 return -1; 727 return -1;
597 728
598 if (!(*str == 0 || *str == ',' || isspace(*str))) 729 if (!(*str == 0 || *str == ',' || isspace(*str)))
599 return -1; 730 return -1;
600 731
601 attrs[nr_counters] = attr; 732 if (ret != EVT_HANDLED_ALL) {
602 nr_counters++; 733 attrs[nr_counters] = attr;
734 nr_counters++;
735 }
603 736
604 if (*str == 0) 737 if (*str == 0)
605 break; 738 break;
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index 60704c15961f..30c608112845 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -16,7 +16,7 @@ extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16 16
17extern int nr_counters; 17extern int nr_counters;
18 18
19extern struct perf_counter_attr attrs[MAX_COUNTERS]; 19extern struct perf_event_attr attrs[MAX_COUNTERS];
20 20
21extern const char *event_name(int ctr); 21extern const char *event_name(int ctr);
22extern const char *__event_name(int type, u64 config); 22extern const char *__event_name(int type, u64 config);
diff --git a/tools/perf/util/parse-options.h b/tools/perf/util/parse-options.h
index 8aa3464c7090..2ee248ff27e5 100644
--- a/tools/perf/util/parse-options.h
+++ b/tools/perf/util/parse-options.h
@@ -104,6 +104,8 @@ struct option {
104 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb } 104 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
105#define OPT_CALLBACK(s, l, v, a, h, f) \ 105#define OPT_CALLBACK(s, l, v, a, h, f) \
106 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) } 106 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f) }
107#define OPT_CALLBACK_NOOPT(s, l, v, a, h, f) \
108 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .flags = PARSE_OPT_NOARG }
107#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \ 109#define OPT_CALLBACK_DEFAULT(s, l, v, a, h, f, d) \
108 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT } 110 { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), (a), .help = (h), .callback = (f), .defval = (intptr_t)d, .flags = PARSE_OPT_LASTARG_DEFAULT }
109 111
diff --git a/tools/perf/util/svghelper.c b/tools/perf/util/svghelper.c
new file mode 100644
index 000000000000..a778fd0f4ae4
--- /dev/null
+++ b/tools/perf/util/svghelper.c
@@ -0,0 +1,488 @@
1/*
2 * svghelper.c - helper functions for outputting svg
3 *
4 * (C) Copyright 2009 Intel Corporation
5 *
6 * Authors:
7 * Arjan van de Ven <arjan@linux.intel.com>
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License
11 * as published by the Free Software Foundation; version 2
12 * of the License.
13 */
14
15#include <stdio.h>
16#include <stdlib.h>
17#include <unistd.h>
18#include <string.h>
19
20#include "svghelper.h"
21
22static u64 first_time, last_time;
23static u64 turbo_frequency, max_freq;
24
25
26#define SLOT_MULT 30.0
27#define SLOT_HEIGHT 25.0
28
29int svg_page_width = 1000;
30
31#define MIN_TEXT_SIZE 0.001
32
33static u64 total_height;
34static FILE *svgfile;
35
36static double cpu2slot(int cpu)
37{
38 return 2 * cpu + 1;
39}
40
41static double cpu2y(int cpu)
42{
43 return cpu2slot(cpu) * SLOT_MULT;
44}
45
46static double time2pixels(u64 time)
47{
48 double X;
49
50 X = 1.0 * svg_page_width * (time - first_time) / (last_time - first_time);
51 return X;
52}
53
54/*
55 * Round text sizes so that the svg viewer only needs a discrete
56 * number of renderings of the font
57 */
58static double round_text_size(double size)
59{
60 int loop = 100;
61 double target = 10.0;
62
63 if (size >= 10.0)
64 return size;
65 while (loop--) {
66 if (size >= target)
67 return target;
68 target = target / 2.0;
69 }
70 return size;
71}
72
73void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end)
74{
75 int new_width;
76
77 svgfile = fopen(filename, "w");
78 if (!svgfile) {
79 fprintf(stderr, "Cannot open %s for output\n", filename);
80 return;
81 }
82 first_time = start;
83 first_time = first_time / 100000000 * 100000000;
84 last_time = end;
85
86 /*
87 * if the recording is short, we default to a width of 1000, but
88 * for longer recordings we want at least 200 units of width per second
89 */
90 new_width = (last_time - first_time) / 5000000;
91
92 if (new_width > svg_page_width)
93 svg_page_width = new_width;
94
95 total_height = (1 + rows + cpu2slot(cpus)) * SLOT_MULT;
96 fprintf(svgfile, "<?xml version=\"1.0\" standalone=\"no\"?> \n");
97 fprintf(svgfile, "<svg width=\"%i\" height=\"%llu\" version=\"1.1\" xmlns=\"http://www.w3.org/2000/svg\">\n", svg_page_width, total_height);
98
99 fprintf(svgfile, "<defs>\n <style type=\"text/css\">\n <![CDATA[\n");
100
101 fprintf(svgfile, " rect { stroke-width: 1; }\n");
102 fprintf(svgfile, " rect.process { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:1; stroke:rgb( 0, 0, 0); } \n");
103 fprintf(svgfile, " rect.process2 { fill:rgb(180,180,180); fill-opacity:0.9; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
104 fprintf(svgfile, " rect.sample { fill:rgb( 0, 0,255); fill-opacity:0.8; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
105 fprintf(svgfile, " rect.blocked { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
106 fprintf(svgfile, " rect.waiting { fill:rgb(214,214, 0); fill-opacity:0.3; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
107 fprintf(svgfile, " rect.WAITING { fill:rgb(255,214, 48); fill-opacity:0.6; stroke-width:0; stroke:rgb( 0, 0, 0); } \n");
108 fprintf(svgfile, " rect.cpu { fill:rgb(192,192,192); fill-opacity:0.2; stroke-width:0.5; stroke:rgb(128,128,128); } \n");
109 fprintf(svgfile, " rect.pstate { fill:rgb(128,128,128); fill-opacity:0.8; stroke-width:0; } \n");
110 fprintf(svgfile, " rect.c1 { fill:rgb(255,214,214); fill-opacity:0.5; stroke-width:0; } \n");
111 fprintf(svgfile, " rect.c2 { fill:rgb(255,172,172); fill-opacity:0.5; stroke-width:0; } \n");
112 fprintf(svgfile, " rect.c3 { fill:rgb(255,130,130); fill-opacity:0.5; stroke-width:0; } \n");
113 fprintf(svgfile, " rect.c4 { fill:rgb(255, 88, 88); fill-opacity:0.5; stroke-width:0; } \n");
114 fprintf(svgfile, " rect.c5 { fill:rgb(255, 44, 44); fill-opacity:0.5; stroke-width:0; } \n");
115 fprintf(svgfile, " rect.c6 { fill:rgb(255, 0, 0); fill-opacity:0.5; stroke-width:0; } \n");
116 fprintf(svgfile, " line.pstate { stroke:rgb(255,255, 0); stroke-opacity:0.8; stroke-width:2; } \n");
117
118 fprintf(svgfile, " ]]>\n </style>\n</defs>\n");
119}
120
121void svg_box(int Yslot, u64 start, u64 end, const char *type)
122{
123 if (!svgfile)
124 return;
125
126 fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"%s\"/>\n",
127 time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT, type);
128}
129
130void svg_sample(int Yslot, int cpu, u64 start, u64 end)
131{
132 double text_size;
133 if (!svgfile)
134 return;
135
136 fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"sample\"/>\n",
137 time2pixels(start), time2pixels(end)-time2pixels(start), Yslot * SLOT_MULT, SLOT_HEIGHT);
138
139 text_size = (time2pixels(end)-time2pixels(start));
140 if (cpu > 9)
141 text_size = text_size/2;
142 if (text_size > 1.25)
143 text_size = 1.25;
144 text_size = round_text_size(text_size);
145
146 if (text_size > MIN_TEXT_SIZE)
147 fprintf(svgfile, "<text x=\"%1.8f\" y=\"%1.8f\" font-size=\"%1.8fpt\">%i</text>\n",
148 time2pixels(start), Yslot * SLOT_MULT + SLOT_HEIGHT - 1, text_size, cpu + 1);
149
150}
151
152static char *time_to_string(u64 duration)
153{
154 static char text[80];
155
156 text[0] = 0;
157
158 if (duration < 1000) /* less than 1 usec */
159 return text;
160
161 if (duration < 1000 * 1000) { /* less than 1 msec */
162 sprintf(text, "%4.1f us", duration / 1000.0);
163 return text;
164 }
165 sprintf(text, "%4.1f ms", duration / 1000.0 / 1000);
166
167 return text;
168}
169
170void svg_waiting(int Yslot, u64 start, u64 end)
171{
172 char *text;
173 const char *style;
174 double font_size;
175
176 if (!svgfile)
177 return;
178
179 style = "waiting";
180
181 if (end-start > 10 * 1000000) /* 10 msec */
182 style = "WAITING";
183
184 text = time_to_string(end-start);
185
186 font_size = 1.0 * (time2pixels(end)-time2pixels(start));
187
188 if (font_size > 3)
189 font_size = 3;
190
191 font_size = round_text_size(font_size);
192
193 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), Yslot * SLOT_MULT);
194 fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
195 time2pixels(end)-time2pixels(start), SLOT_HEIGHT, style);
196 if (font_size > MIN_TEXT_SIZE)
197 fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%1.8fpt\"> %s</text>\n",
198 font_size, text);
199 fprintf(svgfile, "</g>\n");
200}
201
202static char *cpu_model(void)
203{
204 static char cpu_m[255];
205 char buf[256];
206 FILE *file;
207
208 cpu_m[0] = 0;
209 /* CPU type */
210 file = fopen("/proc/cpuinfo", "r");
211 if (file) {
212 while (fgets(buf, 255, file)) {
213 if (strstr(buf, "model name")) {
214 strncpy(cpu_m, &buf[13], 255);
215 break;
216 }
217 }
218 fclose(file);
219 }
220 return cpu_m;
221}
222
223void svg_cpu_box(int cpu, u64 __max_freq, u64 __turbo_freq)
224{
225 char cpu_string[80];
226 if (!svgfile)
227 return;
228
229 max_freq = __max_freq;
230 turbo_frequency = __turbo_freq;
231
232 fprintf(svgfile, "<rect x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\" class=\"cpu\"/>\n",
233 time2pixels(first_time),
234 time2pixels(last_time)-time2pixels(first_time),
235 cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
236
237 sprintf(cpu_string, "CPU %i", (int)cpu+1);
238 fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
239 10+time2pixels(first_time), cpu2y(cpu) + SLOT_HEIGHT/2, cpu_string);
240
241 fprintf(svgfile, "<text transform=\"translate(%4.8f,%4.8f)\" font-size=\"1.25pt\">%s</text>\n",
242 10+time2pixels(first_time), cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - 4, cpu_model());
243}
244
245void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name)
246{
247 double width;
248
249 if (!svgfile)
250 return;
251
252
253 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\">\n", time2pixels(start), cpu2y(cpu));
254 fprintf(svgfile, "<rect x=\"0\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
255 time2pixels(end)-time2pixels(start), SLOT_MULT+SLOT_HEIGHT, type);
256 width = time2pixels(end)-time2pixels(start);
257 if (width > 6)
258 width = 6;
259
260 width = round_text_size(width);
261
262 if (width > MIN_TEXT_SIZE)
263 fprintf(svgfile, "<text transform=\"rotate(90)\" font-size=\"%3.8fpt\">%s</text>\n",
264 width, name);
265
266 fprintf(svgfile, "</g>\n");
267}
268
269void svg_cstate(int cpu, u64 start, u64 end, int type)
270{
271 double width;
272 char style[128];
273
274 if (!svgfile)
275 return;
276
277
278 if (type > 6)
279 type = 6;
280 sprintf(style, "c%i", type);
281
282 fprintf(svgfile, "<rect class=\"%s\" x=\"%4.8f\" width=\"%4.8f\" y=\"%4.1f\" height=\"%4.1f\"/>\n",
283 style,
284 time2pixels(start), time2pixels(end)-time2pixels(start),
285 cpu2y(cpu), SLOT_MULT+SLOT_HEIGHT);
286
287 width = (time2pixels(end)-time2pixels(start))/2.0;
288 if (width > 6)
289 width = 6;
290
291 width = round_text_size(width);
292
293 if (width > MIN_TEXT_SIZE)
294 fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"%3.8fpt\">C%i</text>\n",
295 time2pixels(start), cpu2y(cpu)+width, width, type);
296}
297
298static char *HzToHuman(unsigned long hz)
299{
300 static char buffer[1024];
301 unsigned long long Hz;
302
303 memset(buffer, 0, 1024);
304
305 Hz = hz;
306
307 /* default: just put the Number in */
308 sprintf(buffer, "%9lli", Hz);
309
310 if (Hz > 1000)
311 sprintf(buffer, " %6lli Mhz", (Hz+500)/1000);
312
313 if (Hz > 1500000)
314 sprintf(buffer, " %6.2f Ghz", (Hz+5000.0)/1000000);
315
316 if (Hz == turbo_frequency)
317 sprintf(buffer, "Turbo");
318
319 return buffer;
320}
321
322void svg_pstate(int cpu, u64 start, u64 end, u64 freq)
323{
324 double height = 0;
325
326 if (!svgfile)
327 return;
328
329 if (max_freq)
330 height = freq * 1.0 / max_freq * (SLOT_HEIGHT + SLOT_MULT);
331 height = 1 + cpu2y(cpu) + SLOT_MULT + SLOT_HEIGHT - height;
332 fprintf(svgfile, "<line x1=\"%4.8f\" x2=\"%4.8f\" y1=\"%4.1f\" y2=\"%4.1f\" class=\"pstate\"/>\n",
333 time2pixels(start), time2pixels(end), height, height);
334 fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\" font-size=\"0.25pt\">%s</text>\n",
335 time2pixels(start), height+0.9, HzToHuman(freq));
336
337}
338
339
340void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2)
341{
342 double height;
343
344 if (!svgfile)
345 return;
346
347
348 if (row1 < row2) {
349 if (row1) {
350 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
351 time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
352 if (desc2)
353 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
354 time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT + SLOT_HEIGHT/48, desc2);
355 }
356 if (row2) {
357 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
358 time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row2 * SLOT_MULT);
359 if (desc1)
360 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &gt;</text></g>\n",
361 time2pixels(start), row2 * SLOT_MULT - SLOT_MULT/32, desc1);
362 }
363 } else {
364 if (row2) {
365 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
366 time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/32);
367 if (desc1)
368 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
369 time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT + SLOT_MULT/48, desc1);
370 }
371 if (row1) {
372 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
373 time2pixels(start), row1 * SLOT_MULT - SLOT_MULT/32, time2pixels(start), row1 * SLOT_MULT);
374 if (desc2)
375 fprintf(svgfile, "<g transform=\"translate(%4.8f,%4.8f)\"><text transform=\"rotate(90)\" font-size=\"0.02pt\">%s &lt;</text></g>\n",
376 time2pixels(start), row1 * SLOT_MULT - SLOT_HEIGHT/32, desc2);
377 }
378 }
379 height = row1 * SLOT_MULT;
380 if (row2 > row1)
381 height += SLOT_HEIGHT;
382 if (row1)
383 fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
384 time2pixels(start), height);
385}
386
387void svg_wakeline(u64 start, int row1, int row2)
388{
389 double height;
390
391 if (!svgfile)
392 return;
393
394
395 if (row1 < row2)
396 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
397 time2pixels(start), row1 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row2 * SLOT_MULT);
398 else
399 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%4.2f\" style=\"stroke:rgb(32,255,32);stroke-width:0.009\"/>\n",
400 time2pixels(start), row2 * SLOT_MULT + SLOT_HEIGHT, time2pixels(start), row1 * SLOT_MULT);
401
402 height = row1 * SLOT_MULT;
403 if (row2 > row1)
404 height += SLOT_HEIGHT;
405 fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(32,255,32)\"/>\n",
406 time2pixels(start), height);
407}
408
409void svg_interrupt(u64 start, int row)
410{
411 if (!svgfile)
412 return;
413
414 fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
415 time2pixels(start), row * SLOT_MULT);
416 fprintf(svgfile, "<circle cx=\"%4.8f\" cy=\"%4.2f\" r = \"0.01\" style=\"fill:rgb(255,128,128)\"/>\n",
417 time2pixels(start), row * SLOT_MULT + SLOT_HEIGHT);
418}
419
420void svg_text(int Yslot, u64 start, const char *text)
421{
422 if (!svgfile)
423 return;
424
425 fprintf(svgfile, "<text x=\"%4.8f\" y=\"%4.8f\">%s</text>\n",
426 time2pixels(start), Yslot * SLOT_MULT+SLOT_HEIGHT/2, text);
427}
428
429static void svg_legenda_box(int X, const char *text, const char *style)
430{
431 double boxsize;
432 boxsize = SLOT_HEIGHT / 2;
433
434 fprintf(svgfile, "<rect x=\"%i\" width=\"%4.8f\" y=\"0\" height=\"%4.1f\" class=\"%s\"/>\n",
435 X, boxsize, boxsize, style);
436 fprintf(svgfile, "<text transform=\"translate(%4.8f, %4.8f)\" font-size=\"%4.8fpt\">%s</text>\n",
437 X + boxsize + 5, boxsize, 0.8 * boxsize, text);
438}
439
440void svg_legenda(void)
441{
442 if (!svgfile)
443 return;
444
445 svg_legenda_box(0, "Running", "sample");
446 svg_legenda_box(100, "Idle","rect.c1");
447 svg_legenda_box(200, "Deeper Idle", "rect.c3");
448 svg_legenda_box(350, "Deepest Idle", "rect.c6");
449 svg_legenda_box(550, "Sleeping", "process2");
450 svg_legenda_box(650, "Waiting for cpu", "waiting");
451 svg_legenda_box(800, "Blocked on IO", "blocked");
452}
453
454void svg_time_grid(void)
455{
456 u64 i;
457
458 if (!svgfile)
459 return;
460
461 i = first_time;
462 while (i < last_time) {
463 int color = 220;
464 double thickness = 0.075;
465 if ((i % 100000000) == 0) {
466 thickness = 0.5;
467 color = 192;
468 }
469 if ((i % 1000000000) == 0) {
470 thickness = 2.0;
471 color = 128;
472 }
473
474 fprintf(svgfile, "<line x1=\"%4.8f\" y1=\"%4.2f\" x2=\"%4.8f\" y2=\"%llu\" style=\"stroke:rgb(%i,%i,%i);stroke-width:%1.3f\"/>\n",
475 time2pixels(i), SLOT_MULT/2, time2pixels(i), total_height, color, color, color, thickness);
476
477 i += 10000000;
478 }
479}
480
481void svg_close(void)
482{
483 if (svgfile) {
484 fprintf(svgfile, "</svg>\n");
485 fclose(svgfile);
486 svgfile = NULL;
487 }
488}
diff --git a/tools/perf/util/svghelper.h b/tools/perf/util/svghelper.h
new file mode 100644
index 000000000000..cd93195aedb3
--- /dev/null
+++ b/tools/perf/util/svghelper.h
@@ -0,0 +1,28 @@
1#ifndef _INCLUDE_GUARD_SVG_HELPER_
2#define _INCLUDE_GUARD_SVG_HELPER_
3
4#include "types.h"
5
6extern void open_svg(const char *filename, int cpus, int rows, u64 start, u64 end);
7extern void svg_box(int Yslot, u64 start, u64 end, const char *type);
8extern void svg_sample(int Yslot, int cpu, u64 start, u64 end);
9extern void svg_waiting(int Yslot, u64 start, u64 end);
10extern void svg_cpu_box(int cpu, u64 max_frequency, u64 turbo_frequency);
11
12
13extern void svg_process(int cpu, u64 start, u64 end, const char *type, const char *name);
14extern void svg_cstate(int cpu, u64 start, u64 end, int type);
15extern void svg_pstate(int cpu, u64 start, u64 end, u64 freq);
16
17
18extern void svg_time_grid(void);
19extern void svg_legenda(void);
20extern void svg_wakeline(u64 start, int row1, int row2);
21extern void svg_partial_wakeline(u64 start, int row1, char *desc1, int row2, char *desc2);
22extern void svg_interrupt(u64 start, int row);
23extern void svg_text(int Yslot, u64 start, const char *text);
24extern void svg_close(void);
25
26extern int svg_page_width;
27
28#endif
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 7635928ca278..45efb5db0d19 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -8,7 +8,7 @@
8 8
9static struct thread *thread__new(pid_t pid) 9static struct thread *thread__new(pid_t pid)
10{ 10{
11 struct thread *self = malloc(sizeof(*self)); 11 struct thread *self = calloc(1, sizeof(*self));
12 12
13 if (self != NULL) { 13 if (self != NULL) {
14 self->pid = pid; 14 self->pid = pid;
@@ -85,7 +85,7 @@ register_idle_thread(struct rb_root *threads, struct thread **last_match)
85{ 85{
86 struct thread *thread = threads__findnew(0, threads, last_match); 86 struct thread *thread = threads__findnew(0, threads, last_match);
87 87
88 if (!thread || thread__set_comm(thread, "[init]")) { 88 if (!thread || thread__set_comm(thread, "swapper")) {
89 fprintf(stderr, "problem inserting idle task.\n"); 89 fprintf(stderr, "problem inserting idle task.\n");
90 exit(-1); 90 exit(-1);
91 } 91 }
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 634f2809a342..32aea3c1c2ad 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -4,10 +4,11 @@
4#include "symbol.h" 4#include "symbol.h"
5 5
6struct thread { 6struct thread {
7 struct rb_node rb_node; 7 struct rb_node rb_node;
8 struct list_head maps; 8 struct list_head maps;
9 pid_t pid; 9 pid_t pid;
10 char *comm; 10 char shortname[3];
11 char *comm;
11}; 12};
12 13
13int thread__set_comm(struct thread *self, const char *comm); 14int thread__set_comm(struct thread *self, const char *comm);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index 6c9302a7274c..af4b0573b37f 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -458,7 +458,7 @@ static void read_proc_kallsyms(void)
458static void read_ftrace_printk(void) 458static void read_ftrace_printk(void)
459{ 459{
460 unsigned int size, check_size; 460 unsigned int size, check_size;
461 const char *path; 461 char *path;
462 struct stat st; 462 struct stat st;
463 int ret; 463 int ret;
464 464
@@ -468,23 +468,24 @@ static void read_ftrace_printk(void)
468 /* not found */ 468 /* not found */
469 size = 0; 469 size = 0;
470 write_or_die(&size, 4); 470 write_or_die(&size, 4);
471 return; 471 goto out;
472 } 472 }
473 size = get_size(path); 473 size = get_size(path);
474 write_or_die(&size, 4); 474 write_or_die(&size, 4);
475 check_size = copy_file(path); 475 check_size = copy_file(path);
476 if (size != check_size) 476 if (size != check_size)
477 die("error in size of file '%s'", path); 477 die("error in size of file '%s'", path);
478 478out:
479 put_tracing_file(path);
479} 480}
480 481
481static struct tracepoint_path * 482static struct tracepoint_path *
482get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters) 483get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
483{ 484{
484 struct tracepoint_path path, *ppath = &path; 485 struct tracepoint_path path, *ppath = &path;
485 int i; 486 int i;
486 487
487 for (i = 0; i < nb_counters; i++) { 488 for (i = 0; i < nb_events; i++) {
488 if (pattrs[i].type != PERF_TYPE_TRACEPOINT) 489 if (pattrs[i].type != PERF_TYPE_TRACEPOINT)
489 continue; 490 continue;
490 ppath->next = tracepoint_id_to_path(pattrs[i].config); 491 ppath->next = tracepoint_id_to_path(pattrs[i].config);
@@ -495,7 +496,7 @@ get_tracepoints_path(struct perf_counter_attr *pattrs, int nb_counters)
495 496
496 return path.next; 497 return path.next;
497} 498}
498void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters) 499void read_tracing_data(struct perf_event_attr *pattrs, int nb_events)
499{ 500{
500 char buf[BUFSIZ]; 501 char buf[BUFSIZ];
501 struct tracepoint_path *tps; 502 struct tracepoint_path *tps;
@@ -529,7 +530,7 @@ void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters)
529 page_size = getpagesize(); 530 page_size = getpagesize();
530 write_or_die(&page_size, 4); 531 write_or_die(&page_size, 4);
531 532
532 tps = get_tracepoints_path(pattrs, nb_counters); 533 tps = get_tracepoints_path(pattrs, nb_events);
533 534
534 read_header_files(); 535 read_header_files();
535 read_ftrace_files(tps); 536 read_ftrace_files(tps);
diff --git a/tools/perf/util/trace-event-parse.c b/tools/perf/util/trace-event-parse.c
index 629e602d9405..f6a8437141c8 100644
--- a/tools/perf/util/trace-event-parse.c
+++ b/tools/perf/util/trace-event-parse.c
@@ -1776,6 +1776,29 @@ static unsigned long long read_size(void *ptr, int size)
1776 } 1776 }
1777} 1777}
1778 1778
1779unsigned long long
1780raw_field_value(struct event *event, const char *name, void *data)
1781{
1782 struct format_field *field;
1783
1784 field = find_any_field(event, name);
1785 if (!field)
1786 return 0ULL;
1787
1788 return read_size(data + field->offset, field->size);
1789}
1790
1791void *raw_field_ptr(struct event *event, const char *name, void *data)
1792{
1793 struct format_field *field;
1794
1795 field = find_any_field(event, name);
1796 if (!field)
1797 return NULL;
1798
1799 return data + field->offset;
1800}
1801
1779static int get_common_info(const char *type, int *offset, int *size) 1802static int get_common_info(const char *type, int *offset, int *size)
1780{ 1803{
1781 struct event *event; 1804 struct event *event;
@@ -1799,7 +1822,7 @@ static int get_common_info(const char *type, int *offset, int *size)
1799 return 0; 1822 return 0;
1800} 1823}
1801 1824
1802static int parse_common_type(void *data) 1825int trace_parse_common_type(void *data)
1803{ 1826{
1804 static int type_offset; 1827 static int type_offset;
1805 static int type_size; 1828 static int type_size;
@@ -1832,7 +1855,7 @@ static int parse_common_pid(void *data)
1832 return read_size(data + pid_offset, pid_size); 1855 return read_size(data + pid_offset, pid_size);
1833} 1856}
1834 1857
1835static struct event *find_event(int id) 1858struct event *trace_find_event(int id)
1836{ 1859{
1837 struct event *event; 1860 struct event *event;
1838 1861
@@ -2420,8 +2443,8 @@ get_return_for_leaf(int cpu, int cur_pid, unsigned long long cur_func,
2420 int type; 2443 int type;
2421 int pid; 2444 int pid;
2422 2445
2423 type = parse_common_type(next->data); 2446 type = trace_parse_common_type(next->data);
2424 event = find_event(type); 2447 event = trace_find_event(type);
2425 if (!event) 2448 if (!event)
2426 return NULL; 2449 return NULL;
2427 2450
@@ -2502,8 +2525,8 @@ print_graph_entry_leaf(struct event *event, void *data, struct record *ret_rec)
2502 int type; 2525 int type;
2503 int i; 2526 int i;
2504 2527
2505 type = parse_common_type(ret_rec->data); 2528 type = trace_parse_common_type(ret_rec->data);
2506 ret_event = find_event(type); 2529 ret_event = trace_find_event(type);
2507 2530
2508 field = find_field(ret_event, "rettime"); 2531 field = find_field(ret_event, "rettime");
2509 if (!field) 2532 if (!field)
@@ -2696,11 +2719,13 @@ void print_event(int cpu, void *data, int size, unsigned long long nsecs,
2696 nsecs -= secs * NSECS_PER_SEC; 2719 nsecs -= secs * NSECS_PER_SEC;
2697 usecs = nsecs / NSECS_PER_USEC; 2720 usecs = nsecs / NSECS_PER_USEC;
2698 2721
2699 type = parse_common_type(data); 2722 type = trace_parse_common_type(data);
2700 2723
2701 event = find_event(type); 2724 event = trace_find_event(type);
2702 if (!event) 2725 if (!event) {
2703 die("ug! no event found for type %d", type); 2726 printf("ug! no event found for type %d\n", type);
2727 return;
2728 }
2704 2729
2705 pid = parse_common_pid(data); 2730 pid = parse_common_pid(data);
2706 2731
diff --git a/tools/perf/util/trace-event-read.c b/tools/perf/util/trace-event-read.c
index a1217a10632f..1b5c847d2c22 100644
--- a/tools/perf/util/trace-event-read.c
+++ b/tools/perf/util/trace-event-read.c
@@ -458,12 +458,13 @@ struct record *trace_read_data(int cpu)
458 return data; 458 return data;
459} 459}
460 460
461void trace_report (void) 461void trace_report(void)
462{ 462{
463 const char *input_file = "trace.info"; 463 const char *input_file = "trace.info";
464 char buf[BUFSIZ]; 464 char buf[BUFSIZ];
465 char test[] = { 23, 8, 68 }; 465 char test[] = { 23, 8, 68 };
466 char *version; 466 char *version;
467 int show_version = 0;
467 int show_funcs = 0; 468 int show_funcs = 0;
468 int show_printk = 0; 469 int show_printk = 0;
469 470
@@ -480,7 +481,8 @@ void trace_report (void)
480 die("not a trace file (missing tracing)"); 481 die("not a trace file (missing tracing)");
481 482
482 version = read_string(); 483 version = read_string();
483 printf("version = %s\n", version); 484 if (show_version)
485 printf("version = %s\n", version);
484 free(version); 486 free(version);
485 487
486 read_or_die(buf, 1); 488 read_or_die(buf, 1);
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index 420294a5773e..693f815c9429 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -234,7 +234,12 @@ extern int header_page_data_offset;
234extern int header_page_data_size; 234extern int header_page_data_size;
235 235
236int parse_header_page(char *buf, unsigned long size); 236int parse_header_page(char *buf, unsigned long size);
237int trace_parse_common_type(void *data);
238struct event *trace_find_event(int id);
239unsigned long long
240raw_field_value(struct event *event, const char *name, void *data);
241void *raw_field_ptr(struct event *event, const char *name, void *data);
237 242
238void read_tracing_data(struct perf_counter_attr *pattrs, int nb_counters); 243void read_tracing_data(struct perf_event_attr *pattrs, int nb_events);
239 244
240#endif /* _TRACE_EVENTS_H */ 245#endif /* _TRACE_EVENTS_H */