diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-03 13:39:04 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-03 13:39:04 -0500 |
commit | 69aad6f1ee69546dea8535ab8f3da9f445d57328 (patch) | |
tree | b328ec140a6a90703a049fcc661d623025d7e81f /tools/perf | |
parent | 56f4c400349157289b474a3fd49ee96acab0a4d7 (diff) |
perf tools: Introduce event selectors
Out of ad-hoc code and global arrays with hard coded sizes.
This is the first step on having a library that will be first
used on regression tests in the 'perf test' tool.
[acme@felicio linux]$ size /tmp/perf.before
text data bss dec hex filename
1273776 97384 5104416 6475576 62cf38 /tmp/perf.before
[acme@felicio linux]$ size /tmp/perf.new
text data bss dec hex filename
1275422 97416 1392416 2765254 2a31c6 /tmp/perf.new
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools/perf')
-rw-r--r-- | tools/perf/Makefile | 4 | ||||
-rw-r--r-- | tools/perf/builtin-record.c | 113 | ||||
-rw-r--r-- | tools/perf/builtin-stat.c | 175 | ||||
-rw-r--r-- | tools/perf/builtin-top.c | 176 | ||||
-rw-r--r-- | tools/perf/util/evsel.c | 35 | ||||
-rw-r--r-- | tools/perf/util/evsel.h | 24 | ||||
-rw-r--r-- | tools/perf/util/header.c | 9 | ||||
-rw-r--r-- | tools/perf/util/header.h | 3 | ||||
-rw-r--r-- | tools/perf/util/parse-events.c | 47 | ||||
-rw-r--r-- | tools/perf/util/parse-events.h | 17 | ||||
-rw-r--r-- | tools/perf/util/trace-event-info.c | 30 | ||||
-rw-r--r-- | tools/perf/util/trace-event.h | 5 | ||||
-rw-r--r-- | tools/perf/util/xyarray.c | 20 | ||||
-rw-r--r-- | tools/perf/util/xyarray.h | 20 |
14 files changed, 433 insertions, 245 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile index ac6692cf5508..1b9b13ee2a72 100644 --- a/tools/perf/Makefile +++ b/tools/perf/Makefile | |||
@@ -396,6 +396,7 @@ LIB_H += util/build-id.h | |||
396 | LIB_H += util/debug.h | 396 | LIB_H += util/debug.h |
397 | LIB_H += util/debugfs.h | 397 | LIB_H += util/debugfs.h |
398 | LIB_H += util/event.h | 398 | LIB_H += util/event.h |
399 | LIB_H += util/evsel.h | ||
399 | LIB_H += util/exec_cmd.h | 400 | LIB_H += util/exec_cmd.h |
400 | LIB_H += util/types.h | 401 | LIB_H += util/types.h |
401 | LIB_H += util/levenshtein.h | 402 | LIB_H += util/levenshtein.h |
@@ -404,6 +405,7 @@ LIB_H += util/parse-options.h | |||
404 | LIB_H += util/parse-events.h | 405 | LIB_H += util/parse-events.h |
405 | LIB_H += util/quote.h | 406 | LIB_H += util/quote.h |
406 | LIB_H += util/util.h | 407 | LIB_H += util/util.h |
408 | LIB_H += util/xyarray.h | ||
407 | LIB_H += util/header.h | 409 | LIB_H += util/header.h |
408 | LIB_H += util/help.h | 410 | LIB_H += util/help.h |
409 | LIB_H += util/session.h | 411 | LIB_H += util/session.h |
@@ -433,6 +435,7 @@ LIB_OBJS += $(OUTPUT)util/ctype.o | |||
433 | LIB_OBJS += $(OUTPUT)util/debugfs.o | 435 | LIB_OBJS += $(OUTPUT)util/debugfs.o |
434 | LIB_OBJS += $(OUTPUT)util/environment.o | 436 | LIB_OBJS += $(OUTPUT)util/environment.o |
435 | LIB_OBJS += $(OUTPUT)util/event.o | 437 | LIB_OBJS += $(OUTPUT)util/event.o |
438 | LIB_OBJS += $(OUTPUT)util/evsel.o | ||
436 | LIB_OBJS += $(OUTPUT)util/exec_cmd.o | 439 | LIB_OBJS += $(OUTPUT)util/exec_cmd.o |
437 | LIB_OBJS += $(OUTPUT)util/help.o | 440 | LIB_OBJS += $(OUTPUT)util/help.o |
438 | LIB_OBJS += $(OUTPUT)util/levenshtein.o | 441 | LIB_OBJS += $(OUTPUT)util/levenshtein.o |
@@ -470,6 +473,7 @@ LIB_OBJS += $(OUTPUT)util/sort.o | |||
470 | LIB_OBJS += $(OUTPUT)util/hist.o | 473 | LIB_OBJS += $(OUTPUT)util/hist.o |
471 | LIB_OBJS += $(OUTPUT)util/probe-event.o | 474 | LIB_OBJS += $(OUTPUT)util/probe-event.o |
472 | LIB_OBJS += $(OUTPUT)util/util.o | 475 | LIB_OBJS += $(OUTPUT)util/util.o |
476 | LIB_OBJS += $(OUTPUT)util/xyarray.o | ||
473 | LIB_OBJS += $(OUTPUT)util/cpumap.o | 477 | LIB_OBJS += $(OUTPUT)util/cpumap.o |
474 | 478 | ||
475 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o | 479 | BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o |
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c index 50efbd509b8f..e68aee33bc19 100644 --- a/tools/perf/builtin-record.c +++ b/tools/perf/builtin-record.c | |||
@@ -18,6 +18,7 @@ | |||
18 | 18 | ||
19 | #include "util/header.h" | 19 | #include "util/header.h" |
20 | #include "util/event.h" | 20 | #include "util/event.h" |
21 | #include "util/evsel.h" | ||
21 | #include "util/debug.h" | 22 | #include "util/debug.h" |
22 | #include "util/session.h" | 23 | #include "util/session.h" |
23 | #include "util/symbol.h" | 24 | #include "util/symbol.h" |
@@ -27,13 +28,13 @@ | |||
27 | #include <sched.h> | 28 | #include <sched.h> |
28 | #include <sys/mman.h> | 29 | #include <sys/mman.h> |
29 | 30 | ||
31 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
32 | |||
30 | enum write_mode_t { | 33 | enum write_mode_t { |
31 | WRITE_FORCE, | 34 | WRITE_FORCE, |
32 | WRITE_APPEND | 35 | WRITE_APPEND |
33 | }; | 36 | }; |
34 | 37 | ||
35 | static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; | ||
36 | |||
37 | static u64 user_interval = ULLONG_MAX; | 38 | static u64 user_interval = ULLONG_MAX; |
38 | static u64 default_interval = 0; | 39 | static u64 default_interval = 0; |
39 | static u64 sample_type; | 40 | static u64 sample_type; |
@@ -81,7 +82,6 @@ static struct perf_session *session; | |||
81 | static const char *cpu_list; | 82 | static const char *cpu_list; |
82 | 83 | ||
83 | struct mmap_data { | 84 | struct mmap_data { |
84 | int counter; | ||
85 | void *base; | 85 | void *base; |
86 | unsigned int mask; | 86 | unsigned int mask; |
87 | unsigned int prev; | 87 | unsigned int prev; |
@@ -229,12 +229,12 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n | |||
229 | return h_attr; | 229 | return h_attr; |
230 | } | 230 | } |
231 | 231 | ||
232 | static void create_counter(int counter, int cpu) | 232 | static void create_counter(struct perf_evsel *evsel, int cpu) |
233 | { | 233 | { |
234 | char *filter = filters[counter]; | 234 | char *filter = evsel->filter; |
235 | struct perf_event_attr *attr = attrs + counter; | 235 | struct perf_event_attr *attr = &evsel->attr; |
236 | struct perf_header_attr *h_attr; | 236 | struct perf_header_attr *h_attr; |
237 | int track = !counter; /* only the first counter needs these */ | 237 | int track = !evsel->idx; /* only the first counter needs these */ |
238 | int thread_index; | 238 | int thread_index; |
239 | int ret; | 239 | int ret; |
240 | struct { | 240 | struct { |
@@ -320,10 +320,9 @@ retry_sample_id: | |||
320 | 320 | ||
321 | for (thread_index = 0; thread_index < thread_num; thread_index++) { | 321 | for (thread_index = 0; thread_index < thread_num; thread_index++) { |
322 | try_again: | 322 | try_again: |
323 | fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr, | 323 | FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, all_tids[thread_index], cpu, group_fd, 0); |
324 | all_tids[thread_index], cpu, group_fd, 0); | ||
325 | 324 | ||
326 | if (fd[nr_cpu][counter][thread_index] < 0) { | 325 | if (FD(evsel, nr_cpu, thread_index) < 0) { |
327 | int err = errno; | 326 | int err = errno; |
328 | 327 | ||
329 | if (err == EPERM || err == EACCES) | 328 | if (err == EPERM || err == EACCES) |
@@ -360,7 +359,7 @@ try_again: | |||
360 | } | 359 | } |
361 | printf("\n"); | 360 | printf("\n"); |
362 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", | 361 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", |
363 | fd[nr_cpu][counter][thread_index], strerror(err)); | 362 | FD(evsel, nr_cpu, thread_index), strerror(err)); |
364 | 363 | ||
365 | #if defined(__i386__) || defined(__x86_64__) | 364 | #if defined(__i386__) || defined(__x86_64__) |
366 | if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) | 365 | if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) |
@@ -374,7 +373,7 @@ try_again: | |||
374 | exit(-1); | 373 | exit(-1); |
375 | } | 374 | } |
376 | 375 | ||
377 | h_attr = get_header_attr(attr, counter); | 376 | h_attr = get_header_attr(attr, evsel->idx); |
378 | if (h_attr == NULL) | 377 | if (h_attr == NULL) |
379 | die("nomem\n"); | 378 | die("nomem\n"); |
380 | 379 | ||
@@ -385,7 +384,7 @@ try_again: | |||
385 | } | 384 | } |
386 | } | 385 | } |
387 | 386 | ||
388 | if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) { | 387 | if (read(FD(evsel, nr_cpu, thread_index), &read_data, sizeof(read_data)) == -1) { |
389 | perror("Unable to read perf file descriptor"); | 388 | perror("Unable to read perf file descriptor"); |
390 | exit(-1); | 389 | exit(-1); |
391 | } | 390 | } |
@@ -395,43 +394,44 @@ try_again: | |||
395 | exit(-1); | 394 | exit(-1); |
396 | } | 395 | } |
397 | 396 | ||
398 | assert(fd[nr_cpu][counter][thread_index] >= 0); | 397 | assert(FD(evsel, nr_cpu, thread_index) >= 0); |
399 | fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK); | 398 | fcntl(FD(evsel, nr_cpu, thread_index), F_SETFL, O_NONBLOCK); |
400 | 399 | ||
401 | /* | 400 | /* |
402 | * First counter acts as the group leader: | 401 | * First counter acts as the group leader: |
403 | */ | 402 | */ |
404 | if (group && group_fd == -1) | 403 | if (group && group_fd == -1) |
405 | group_fd = fd[nr_cpu][counter][thread_index]; | 404 | group_fd = FD(evsel, nr_cpu, thread_index); |
406 | 405 | ||
407 | if (counter || thread_index) { | 406 | if (evsel->idx || thread_index) { |
408 | ret = ioctl(fd[nr_cpu][counter][thread_index], | 407 | struct perf_evsel *first; |
409 | PERF_EVENT_IOC_SET_OUTPUT, | 408 | first = list_entry(evsel_list.next, struct perf_evsel, node); |
410 | fd[nr_cpu][0][0]); | 409 | ret = ioctl(FD(evsel, nr_cpu, thread_index), |
410 | PERF_EVENT_IOC_SET_OUTPUT, | ||
411 | FD(first, nr_cpu, 0)); | ||
411 | if (ret) { | 412 | if (ret) { |
412 | error("failed to set output: %d (%s)\n", errno, | 413 | error("failed to set output: %d (%s)\n", errno, |
413 | strerror(errno)); | 414 | strerror(errno)); |
414 | exit(-1); | 415 | exit(-1); |
415 | } | 416 | } |
416 | } else { | 417 | } else { |
417 | mmap_array[nr_cpu].counter = counter; | ||
418 | mmap_array[nr_cpu].prev = 0; | 418 | mmap_array[nr_cpu].prev = 0; |
419 | mmap_array[nr_cpu].mask = mmap_pages*page_size - 1; | 419 | mmap_array[nr_cpu].mask = mmap_pages*page_size - 1; |
420 | mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size, | 420 | mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size, |
421 | PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0); | 421 | PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, nr_cpu, thread_index), 0); |
422 | if (mmap_array[nr_cpu].base == MAP_FAILED) { | 422 | if (mmap_array[nr_cpu].base == MAP_FAILED) { |
423 | error("failed to mmap with %d (%s)\n", errno, strerror(errno)); | 423 | error("failed to mmap with %d (%s)\n", errno, strerror(errno)); |
424 | exit(-1); | 424 | exit(-1); |
425 | } | 425 | } |
426 | 426 | ||
427 | event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index]; | 427 | event_array[nr_poll].fd = FD(evsel, nr_cpu, thread_index); |
428 | event_array[nr_poll].events = POLLIN; | 428 | event_array[nr_poll].events = POLLIN; |
429 | nr_poll++; | 429 | nr_poll++; |
430 | } | 430 | } |
431 | 431 | ||
432 | if (filter != NULL) { | 432 | if (filter != NULL) { |
433 | ret = ioctl(fd[nr_cpu][counter][thread_index], | 433 | ret = ioctl(FD(evsel, nr_cpu, thread_index), |
434 | PERF_EVENT_IOC_SET_FILTER, filter); | 434 | PERF_EVENT_IOC_SET_FILTER, filter); |
435 | if (ret) { | 435 | if (ret) { |
436 | error("failed to set filter with %d (%s)\n", errno, | 436 | error("failed to set filter with %d (%s)\n", errno, |
437 | strerror(errno)); | 437 | strerror(errno)); |
@@ -446,11 +446,12 @@ try_again: | |||
446 | 446 | ||
447 | static void open_counters(int cpu) | 447 | static void open_counters(int cpu) |
448 | { | 448 | { |
449 | int counter; | 449 | struct perf_evsel *pos; |
450 | 450 | ||
451 | group_fd = -1; | 451 | group_fd = -1; |
452 | for (counter = 0; counter < nr_counters; counter++) | 452 | |
453 | create_counter(counter, cpu); | 453 | list_for_each_entry(pos, &evsel_list, node) |
454 | create_counter(pos, cpu); | ||
454 | 455 | ||
455 | nr_cpu++; | 456 | nr_cpu++; |
456 | } | 457 | } |
@@ -537,7 +538,7 @@ static void mmap_read_all(void) | |||
537 | 538 | ||
538 | static int __cmd_record(int argc, const char **argv) | 539 | static int __cmd_record(int argc, const char **argv) |
539 | { | 540 | { |
540 | int i, counter; | 541 | int i; |
541 | struct stat st; | 542 | struct stat st; |
542 | int flags; | 543 | int flags; |
543 | int err; | 544 | int err; |
@@ -604,7 +605,7 @@ static int __cmd_record(int argc, const char **argv) | |||
604 | goto out_delete_session; | 605 | goto out_delete_session; |
605 | } | 606 | } |
606 | 607 | ||
607 | if (have_tracepoints(attrs, nr_counters)) | 608 | if (have_tracepoints(&evsel_list)) |
608 | perf_header__set_feat(&session->header, HEADER_TRACE_INFO); | 609 | perf_header__set_feat(&session->header, HEADER_TRACE_INFO); |
609 | 610 | ||
610 | /* | 611 | /* |
@@ -666,12 +667,6 @@ static int __cmd_record(int argc, const char **argv) | |||
666 | close(child_ready_pipe[0]); | 667 | close(child_ready_pipe[0]); |
667 | } | 668 | } |
668 | 669 | ||
669 | nr_cpus = read_cpu_map(cpu_list); | ||
670 | if (nr_cpus < 1) { | ||
671 | perror("failed to collect number of CPUs"); | ||
672 | return -1; | ||
673 | } | ||
674 | |||
675 | if (!system_wide && no_inherit && !cpu_list) { | 670 | if (!system_wide && no_inherit && !cpu_list) { |
676 | open_counters(-1); | 671 | open_counters(-1); |
677 | } else { | 672 | } else { |
@@ -711,7 +706,7 @@ static int __cmd_record(int argc, const char **argv) | |||
711 | return err; | 706 | return err; |
712 | } | 707 | } |
713 | 708 | ||
714 | if (have_tracepoints(attrs, nr_counters)) { | 709 | if (have_tracepoints(&evsel_list)) { |
715 | /* | 710 | /* |
716 | * FIXME err <= 0 here actually means that | 711 | * FIXME err <= 0 here actually means that |
717 | * there were no tracepoints so its not really | 712 | * there were no tracepoints so its not really |
@@ -720,8 +715,7 @@ static int __cmd_record(int argc, const char **argv) | |||
720 | * return this more properly and also | 715 | * return this more properly and also |
721 | * propagate errors that now are calling die() | 716 | * propagate errors that now are calling die() |
722 | */ | 717 | */ |
723 | err = event__synthesize_tracing_data(output, attrs, | 718 | err = event__synthesize_tracing_data(output, &evsel_list, |
724 | nr_counters, | ||
725 | process_synthesized_event, | 719 | process_synthesized_event, |
726 | session); | 720 | session); |
727 | if (err <= 0) { | 721 | if (err <= 0) { |
@@ -795,13 +789,13 @@ static int __cmd_record(int argc, const char **argv) | |||
795 | 789 | ||
796 | if (done) { | 790 | if (done) { |
797 | for (i = 0; i < nr_cpu; i++) { | 791 | for (i = 0; i < nr_cpu; i++) { |
798 | for (counter = 0; | 792 | struct perf_evsel *pos; |
799 | counter < nr_counters; | 793 | |
800 | counter++) { | 794 | list_for_each_entry(pos, &evsel_list, node) { |
801 | for (thread = 0; | 795 | for (thread = 0; |
802 | thread < thread_num; | 796 | thread < thread_num; |
803 | thread++) | 797 | thread++) |
804 | ioctl(fd[i][counter][thread], | 798 | ioctl(FD(pos, i, thread), |
805 | PERF_EVENT_IOC_DISABLE); | 799 | PERF_EVENT_IOC_DISABLE); |
806 | } | 800 | } |
807 | } | 801 | } |
@@ -887,7 +881,8 @@ const struct option record_options[] = { | |||
887 | 881 | ||
888 | int cmd_record(int argc, const char **argv, const char *prefix __used) | 882 | int cmd_record(int argc, const char **argv, const char *prefix __used) |
889 | { | 883 | { |
890 | int i, j, err = -ENOMEM; | 884 | int err = -ENOMEM; |
885 | struct perf_evsel *pos; | ||
891 | 886 | ||
892 | argc = parse_options(argc, argv, record_options, record_usage, | 887 | argc = parse_options(argc, argv, record_options, record_usage, |
893 | PARSE_OPT_STOP_AT_NON_OPTION); | 888 | PARSE_OPT_STOP_AT_NON_OPTION); |
@@ -910,10 +905,9 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
910 | if (no_buildid_cache || no_buildid) | 905 | if (no_buildid_cache || no_buildid) |
911 | disable_buildid_cache(); | 906 | disable_buildid_cache(); |
912 | 907 | ||
913 | if (!nr_counters) { | 908 | if (list_empty(&evsel_list) && perf_evsel_list__create_default() < 0) { |
914 | nr_counters = 1; | 909 | pr_err("Not enough memory for event selector list\n"); |
915 | attrs[0].type = PERF_TYPE_HARDWARE; | 910 | goto out_symbol_exit; |
916 | attrs[0].config = PERF_COUNT_HW_CPU_CYCLES; | ||
917 | } | 911 | } |
918 | 912 | ||
919 | if (target_pid != -1) { | 913 | if (target_pid != -1) { |
@@ -933,12 +927,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
933 | thread_num = 1; | 927 | thread_num = 1; |
934 | } | 928 | } |
935 | 929 | ||
936 | for (i = 0; i < MAX_NR_CPUS; i++) { | 930 | nr_cpus = read_cpu_map(cpu_list); |
937 | for (j = 0; j < MAX_COUNTERS; j++) { | 931 | if (nr_cpus < 1) { |
938 | fd[i][j] = malloc(sizeof(int)*thread_num); | 932 | perror("failed to collect number of CPUs"); |
939 | if (!fd[i][j]) | 933 | return -1; |
940 | goto out_free_fd; | 934 | } |
941 | } | 935 | |
936 | list_for_each_entry(pos, &evsel_list, node) { | ||
937 | if (perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0) | ||
938 | goto out_free_fd; | ||
942 | } | 939 | } |
943 | event_array = malloc( | 940 | event_array = malloc( |
944 | sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); | 941 | sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); |
@@ -968,10 +965,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used) | |||
968 | out_free_event_array: | 965 | out_free_event_array: |
969 | free(event_array); | 966 | free(event_array); |
970 | out_free_fd: | 967 | out_free_fd: |
971 | for (i = 0; i < MAX_NR_CPUS; i++) { | 968 | list_for_each_entry(pos, &evsel_list, node) |
972 | for (j = 0; j < MAX_COUNTERS; j++) | 969 | perf_evsel__free_fd(pos); |
973 | free(fd[i][j]); | ||
974 | } | ||
975 | free(all_tids); | 970 | free(all_tids); |
976 | all_tids = NULL; | 971 | all_tids = NULL; |
977 | out_symbol_exit: | 972 | out_symbol_exit: |
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c index 7ff746da7e6c..511ebaff9a66 100644 --- a/tools/perf/builtin-stat.c +++ b/tools/perf/builtin-stat.c | |||
@@ -43,6 +43,7 @@ | |||
43 | #include "util/parse-options.h" | 43 | #include "util/parse-options.h" |
44 | #include "util/parse-events.h" | 44 | #include "util/parse-events.h" |
45 | #include "util/event.h" | 45 | #include "util/event.h" |
46 | #include "util/evsel.h" | ||
46 | #include "util/debug.h" | 47 | #include "util/debug.h" |
47 | #include "util/header.h" | 48 | #include "util/header.h" |
48 | #include "util/cpumap.h" | 49 | #include "util/cpumap.h" |
@@ -52,6 +53,8 @@ | |||
52 | #include <math.h> | 53 | #include <math.h> |
53 | #include <locale.h> | 54 | #include <locale.h> |
54 | 55 | ||
56 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
57 | |||
55 | #define DEFAULT_SEPARATOR " " | 58 | #define DEFAULT_SEPARATOR " " |
56 | 59 | ||
57 | static struct perf_event_attr default_attrs[] = { | 60 | static struct perf_event_attr default_attrs[] = { |
@@ -90,16 +93,11 @@ static const char *cpu_list; | |||
90 | static const char *csv_sep = NULL; | 93 | static const char *csv_sep = NULL; |
91 | static bool csv_output = false; | 94 | static bool csv_output = false; |
92 | 95 | ||
93 | 96 | struct cpu_counts { | |
94 | static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; | ||
95 | |||
96 | static int event_scaled[MAX_COUNTERS]; | ||
97 | |||
98 | static struct { | ||
99 | u64 val; | 97 | u64 val; |
100 | u64 ena; | 98 | u64 ena; |
101 | u64 run; | 99 | u64 run; |
102 | } cpu_counts[MAX_NR_CPUS][MAX_COUNTERS]; | 100 | }; |
103 | 101 | ||
104 | static volatile int done = 0; | 102 | static volatile int done = 0; |
105 | 103 | ||
@@ -108,6 +106,26 @@ struct stats | |||
108 | double n, mean, M2; | 106 | double n, mean, M2; |
109 | }; | 107 | }; |
110 | 108 | ||
109 | struct perf_stat { | ||
110 | struct stats res_stats[3]; | ||
111 | int scaled; | ||
112 | struct cpu_counts cpu_counts[]; | ||
113 | }; | ||
114 | |||
115 | static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel, int ncpus) | ||
116 | { | ||
117 | size_t priv_size = (sizeof(struct perf_stat) + | ||
118 | (ncpus * sizeof(struct cpu_counts))); | ||
119 | evsel->priv = zalloc(priv_size); | ||
120 | return evsel->priv == NULL ? -ENOMEM : 0; | ||
121 | } | ||
122 | |||
123 | static void perf_evsel__free_stat_priv(struct perf_evsel *evsel) | ||
124 | { | ||
125 | free(evsel->priv); | ||
126 | evsel->priv = NULL; | ||
127 | } | ||
128 | |||
111 | static void update_stats(struct stats *stats, u64 val) | 129 | static void update_stats(struct stats *stats, u64 val) |
112 | { | 130 | { |
113 | double delta; | 131 | double delta; |
@@ -147,22 +165,21 @@ static double stddev_stats(struct stats *stats) | |||
147 | return sqrt(variance_mean); | 165 | return sqrt(variance_mean); |
148 | } | 166 | } |
149 | 167 | ||
150 | struct stats event_res_stats[MAX_COUNTERS][3]; | ||
151 | struct stats runtime_nsecs_stats[MAX_NR_CPUS]; | 168 | struct stats runtime_nsecs_stats[MAX_NR_CPUS]; |
152 | struct stats runtime_cycles_stats[MAX_NR_CPUS]; | 169 | struct stats runtime_cycles_stats[MAX_NR_CPUS]; |
153 | struct stats runtime_branches_stats[MAX_NR_CPUS]; | 170 | struct stats runtime_branches_stats[MAX_NR_CPUS]; |
154 | struct stats walltime_nsecs_stats; | 171 | struct stats walltime_nsecs_stats; |
155 | 172 | ||
156 | #define MATCH_EVENT(t, c, counter) \ | 173 | #define MATCH_EVENT(t, c, evsel) \ |
157 | (attrs[counter].type == PERF_TYPE_##t && \ | 174 | (evsel->attr.type == PERF_TYPE_##t && \ |
158 | attrs[counter].config == PERF_COUNT_##c) | 175 | evsel->attr.config == PERF_COUNT_##c) |
159 | 176 | ||
160 | #define ERR_PERF_OPEN \ | 177 | #define ERR_PERF_OPEN \ |
161 | "counter %d, sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information." | 178 | "counter %d, sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information." |
162 | 179 | ||
163 | static int create_perf_stat_counter(int counter, bool *perm_err) | 180 | static int create_perf_stat_counter(struct perf_evsel *evsel, bool *perm_err) |
164 | { | 181 | { |
165 | struct perf_event_attr *attr = attrs + counter; | 182 | struct perf_event_attr *attr = &evsel->attr; |
166 | int thread; | 183 | int thread; |
167 | int ncreated = 0; | 184 | int ncreated = 0; |
168 | 185 | ||
@@ -174,13 +191,13 @@ static int create_perf_stat_counter(int counter, bool *perm_err) | |||
174 | int cpu; | 191 | int cpu; |
175 | 192 | ||
176 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 193 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
177 | fd[cpu][counter][0] = sys_perf_event_open(attr, | 194 | FD(evsel, cpu, 0) = sys_perf_event_open(attr, |
178 | -1, cpumap[cpu], -1, 0); | 195 | -1, cpumap[cpu], -1, 0); |
179 | if (fd[cpu][counter][0] < 0) { | 196 | if (FD(evsel, cpu, 0) < 0) { |
180 | if (errno == EPERM || errno == EACCES) | 197 | if (errno == EPERM || errno == EACCES) |
181 | *perm_err = true; | 198 | *perm_err = true; |
182 | error(ERR_PERF_OPEN, counter, | 199 | error(ERR_PERF_OPEN, evsel->idx, |
183 | fd[cpu][counter][0], strerror(errno)); | 200 | FD(evsel, cpu, 0), strerror(errno)); |
184 | } else { | 201 | } else { |
185 | ++ncreated; | 202 | ++ncreated; |
186 | } | 203 | } |
@@ -192,13 +209,13 @@ static int create_perf_stat_counter(int counter, bool *perm_err) | |||
192 | attr->enable_on_exec = 1; | 209 | attr->enable_on_exec = 1; |
193 | } | 210 | } |
194 | for (thread = 0; thread < thread_num; thread++) { | 211 | for (thread = 0; thread < thread_num; thread++) { |
195 | fd[0][counter][thread] = sys_perf_event_open(attr, | 212 | FD(evsel, 0, thread) = sys_perf_event_open(attr, |
196 | all_tids[thread], -1, -1, 0); | 213 | all_tids[thread], -1, -1, 0); |
197 | if (fd[0][counter][thread] < 0) { | 214 | if (FD(evsel, 0, thread) < 0) { |
198 | if (errno == EPERM || errno == EACCES) | 215 | if (errno == EPERM || errno == EACCES) |
199 | *perm_err = true; | 216 | *perm_err = true; |
200 | error(ERR_PERF_OPEN, counter, | 217 | error(ERR_PERF_OPEN, evsel->idx, |
201 | fd[0][counter][thread], | 218 | FD(evsel, 0, thread), |
202 | strerror(errno)); | 219 | strerror(errno)); |
203 | } else { | 220 | } else { |
204 | ++ncreated; | 221 | ++ncreated; |
@@ -212,7 +229,7 @@ static int create_perf_stat_counter(int counter, bool *perm_err) | |||
212 | /* | 229 | /* |
213 | * Does the counter have nsecs as a unit? | 230 | * Does the counter have nsecs as a unit? |
214 | */ | 231 | */ |
215 | static inline int nsec_counter(int counter) | 232 | static inline int nsec_counter(struct perf_evsel *counter) |
216 | { | 233 | { |
217 | if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) || | 234 | if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) || |
218 | MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) | 235 | MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) |
@@ -225,8 +242,9 @@ static inline int nsec_counter(int counter) | |||
225 | * Read out the results of a single counter: | 242 | * Read out the results of a single counter: |
226 | * aggregate counts across CPUs in system-wide mode | 243 | * aggregate counts across CPUs in system-wide mode |
227 | */ | 244 | */ |
228 | static void read_counter_aggr(int counter) | 245 | static void read_counter_aggr(struct perf_evsel *counter) |
229 | { | 246 | { |
247 | struct perf_stat *ps = counter->priv; | ||
230 | u64 count[3], single_count[3]; | 248 | u64 count[3], single_count[3]; |
231 | int cpu; | 249 | int cpu; |
232 | size_t res, nv; | 250 | size_t res, nv; |
@@ -238,15 +256,15 @@ static void read_counter_aggr(int counter) | |||
238 | nv = scale ? 3 : 1; | 256 | nv = scale ? 3 : 1; |
239 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 257 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
240 | for (thread = 0; thread < thread_num; thread++) { | 258 | for (thread = 0; thread < thread_num; thread++) { |
241 | if (fd[cpu][counter][thread] < 0) | 259 | if (FD(counter, cpu, thread) < 0) |
242 | continue; | 260 | continue; |
243 | 261 | ||
244 | res = read(fd[cpu][counter][thread], | 262 | res = read(FD(counter, cpu, thread), |
245 | single_count, nv * sizeof(u64)); | 263 | single_count, nv * sizeof(u64)); |
246 | assert(res == nv * sizeof(u64)); | 264 | assert(res == nv * sizeof(u64)); |
247 | 265 | ||
248 | close(fd[cpu][counter][thread]); | 266 | close(FD(counter, cpu, thread)); |
249 | fd[cpu][counter][thread] = -1; | 267 | FD(counter, cpu, thread) = -1; |
250 | 268 | ||
251 | count[0] += single_count[0]; | 269 | count[0] += single_count[0]; |
252 | if (scale) { | 270 | if (scale) { |
@@ -259,20 +277,20 @@ static void read_counter_aggr(int counter) | |||
259 | scaled = 0; | 277 | scaled = 0; |
260 | if (scale) { | 278 | if (scale) { |
261 | if (count[2] == 0) { | 279 | if (count[2] == 0) { |
262 | event_scaled[counter] = -1; | 280 | ps->scaled = -1; |
263 | count[0] = 0; | 281 | count[0] = 0; |
264 | return; | 282 | return; |
265 | } | 283 | } |
266 | 284 | ||
267 | if (count[2] < count[1]) { | 285 | if (count[2] < count[1]) { |
268 | event_scaled[counter] = 1; | 286 | ps->scaled = 1; |
269 | count[0] = (unsigned long long) | 287 | count[0] = (unsigned long long) |
270 | ((double)count[0] * count[1] / count[2] + 0.5); | 288 | ((double)count[0] * count[1] / count[2] + 0.5); |
271 | } | 289 | } |
272 | } | 290 | } |
273 | 291 | ||
274 | for (i = 0; i < 3; i++) | 292 | for (i = 0; i < 3; i++) |
275 | update_stats(&event_res_stats[counter][i], count[i]); | 293 | update_stats(&ps->res_stats[i], count[i]); |
276 | 294 | ||
277 | if (verbose) { | 295 | if (verbose) { |
278 | fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter), | 296 | fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter), |
@@ -294,8 +312,9 @@ static void read_counter_aggr(int counter) | |||
294 | * Read out the results of a single counter: | 312 | * Read out the results of a single counter: |
295 | * do not aggregate counts across CPUs in system-wide mode | 313 | * do not aggregate counts across CPUs in system-wide mode |
296 | */ | 314 | */ |
297 | static void read_counter(int counter) | 315 | static void read_counter(struct perf_evsel *counter) |
298 | { | 316 | { |
317 | struct cpu_counts *cpu_counts = counter->priv; | ||
299 | u64 count[3]; | 318 | u64 count[3]; |
300 | int cpu; | 319 | int cpu; |
301 | size_t res, nv; | 320 | size_t res, nv; |
@@ -306,15 +325,15 @@ static void read_counter(int counter) | |||
306 | 325 | ||
307 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 326 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
308 | 327 | ||
309 | if (fd[cpu][counter][0] < 0) | 328 | if (FD(counter, cpu, 0) < 0) |
310 | continue; | 329 | continue; |
311 | 330 | ||
312 | res = read(fd[cpu][counter][0], count, nv * sizeof(u64)); | 331 | res = read(FD(counter, cpu, 0), count, nv * sizeof(u64)); |
313 | 332 | ||
314 | assert(res == nv * sizeof(u64)); | 333 | assert(res == nv * sizeof(u64)); |
315 | 334 | ||
316 | close(fd[cpu][counter][0]); | 335 | close(FD(counter, cpu, 0)); |
317 | fd[cpu][counter][0] = -1; | 336 | FD(counter, cpu, 0) = -1; |
318 | 337 | ||
319 | if (scale) { | 338 | if (scale) { |
320 | if (count[2] == 0) { | 339 | if (count[2] == 0) { |
@@ -324,9 +343,9 @@ static void read_counter(int counter) | |||
324 | ((double)count[0] * count[1] / count[2] + 0.5); | 343 | ((double)count[0] * count[1] / count[2] + 0.5); |
325 | } | 344 | } |
326 | } | 345 | } |
327 | cpu_counts[cpu][counter].val = count[0]; /* scaled count */ | 346 | cpu_counts[cpu].val = count[0]; /* scaled count */ |
328 | cpu_counts[cpu][counter].ena = count[1]; | 347 | cpu_counts[cpu].ena = count[1]; |
329 | cpu_counts[cpu][counter].run = count[2]; | 348 | cpu_counts[cpu].run = count[2]; |
330 | 349 | ||
331 | if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) | 350 | if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) |
332 | update_stats(&runtime_nsecs_stats[cpu], count[0]); | 351 | update_stats(&runtime_nsecs_stats[cpu], count[0]); |
@@ -340,8 +359,9 @@ static void read_counter(int counter) | |||
340 | static int run_perf_stat(int argc __used, const char **argv) | 359 | static int run_perf_stat(int argc __used, const char **argv) |
341 | { | 360 | { |
342 | unsigned long long t0, t1; | 361 | unsigned long long t0, t1; |
362 | struct perf_evsel *counter; | ||
343 | int status = 0; | 363 | int status = 0; |
344 | int counter, ncreated = 0; | 364 | int ncreated = 0; |
345 | int child_ready_pipe[2], go_pipe[2]; | 365 | int child_ready_pipe[2], go_pipe[2]; |
346 | bool perm_err = false; | 366 | bool perm_err = false; |
347 | const bool forks = (argc > 0); | 367 | const bool forks = (argc > 0); |
@@ -401,7 +421,7 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
401 | close(child_ready_pipe[0]); | 421 | close(child_ready_pipe[0]); |
402 | } | 422 | } |
403 | 423 | ||
404 | for (counter = 0; counter < nr_counters; counter++) | 424 | list_for_each_entry(counter, &evsel_list, node) |
405 | ncreated += create_perf_stat_counter(counter, &perm_err); | 425 | ncreated += create_perf_stat_counter(counter, &perm_err); |
406 | 426 | ||
407 | if (ncreated < nr_counters) { | 427 | if (ncreated < nr_counters) { |
@@ -433,25 +453,28 @@ static int run_perf_stat(int argc __used, const char **argv) | |||
433 | update_stats(&walltime_nsecs_stats, t1 - t0); | 453 | update_stats(&walltime_nsecs_stats, t1 - t0); |
434 | 454 | ||
435 | if (no_aggr) { | 455 | if (no_aggr) { |
436 | for (counter = 0; counter < nr_counters; counter++) | 456 | list_for_each_entry(counter, &evsel_list, node) |
437 | read_counter(counter); | 457 | read_counter(counter); |
438 | } else { | 458 | } else { |
439 | for (counter = 0; counter < nr_counters; counter++) | 459 | list_for_each_entry(counter, &evsel_list, node) |
440 | read_counter_aggr(counter); | 460 | read_counter_aggr(counter); |
441 | } | 461 | } |
442 | return WEXITSTATUS(status); | 462 | return WEXITSTATUS(status); |
443 | } | 463 | } |
444 | 464 | ||
445 | static void print_noise(int counter, double avg) | 465 | static void print_noise(struct perf_evsel *evsel, double avg) |
446 | { | 466 | { |
467 | struct perf_stat *ps; | ||
468 | |||
447 | if (run_count == 1) | 469 | if (run_count == 1) |
448 | return; | 470 | return; |
449 | 471 | ||
472 | ps = evsel->priv; | ||
450 | fprintf(stderr, " ( +- %7.3f%% )", | 473 | fprintf(stderr, " ( +- %7.3f%% )", |
451 | 100 * stddev_stats(&event_res_stats[counter][0]) / avg); | 474 | 100 * stddev_stats(&ps->res_stats[0]) / avg); |
452 | } | 475 | } |
453 | 476 | ||
454 | static void nsec_printout(int cpu, int counter, double avg) | 477 | static void nsec_printout(int cpu, struct perf_evsel *counter, double avg) |
455 | { | 478 | { |
456 | double msecs = avg / 1e6; | 479 | double msecs = avg / 1e6; |
457 | char cpustr[16] = { '\0', }; | 480 | char cpustr[16] = { '\0', }; |
@@ -473,7 +496,7 @@ static void nsec_printout(int cpu, int counter, double avg) | |||
473 | } | 496 | } |
474 | } | 497 | } |
475 | 498 | ||
476 | static void abs_printout(int cpu, int counter, double avg) | 499 | static void abs_printout(int cpu, struct perf_evsel *counter, double avg) |
477 | { | 500 | { |
478 | double total, ratio = 0.0; | 501 | double total, ratio = 0.0; |
479 | char cpustr[16] = { '\0', }; | 502 | char cpustr[16] = { '\0', }; |
@@ -528,10 +551,11 @@ static void abs_printout(int cpu, int counter, double avg) | |||
528 | * Print out the results of a single counter: | 551 | * Print out the results of a single counter: |
529 | * aggregated counts in system-wide mode | 552 | * aggregated counts in system-wide mode |
530 | */ | 553 | */ |
531 | static void print_counter_aggr(int counter) | 554 | static void print_counter_aggr(struct perf_evsel *counter) |
532 | { | 555 | { |
533 | double avg = avg_stats(&event_res_stats[counter][0]); | 556 | struct perf_stat *ps = counter->priv; |
534 | int scaled = event_scaled[counter]; | 557 | double avg = avg_stats(&ps->res_stats[0]); |
558 | int scaled = ps->scaled; | ||
535 | 559 | ||
536 | if (scaled == -1) { | 560 | if (scaled == -1) { |
537 | fprintf(stderr, "%*s%s%-24s\n", | 561 | fprintf(stderr, "%*s%s%-24s\n", |
@@ -555,8 +579,8 @@ static void print_counter_aggr(int counter) | |||
555 | if (scaled) { | 579 | if (scaled) { |
556 | double avg_enabled, avg_running; | 580 | double avg_enabled, avg_running; |
557 | 581 | ||
558 | avg_enabled = avg_stats(&event_res_stats[counter][1]); | 582 | avg_enabled = avg_stats(&ps->res_stats[1]); |
559 | avg_running = avg_stats(&event_res_stats[counter][2]); | 583 | avg_running = avg_stats(&ps->res_stats[2]); |
560 | 584 | ||
561 | fprintf(stderr, " (scaled from %.2f%%)", | 585 | fprintf(stderr, " (scaled from %.2f%%)", |
562 | 100 * avg_running / avg_enabled); | 586 | 100 * avg_running / avg_enabled); |
@@ -569,15 +593,16 @@ static void print_counter_aggr(int counter) | |||
569 | * Print out the results of a single counter: | 593 | * Print out the results of a single counter: |
570 | * does not use aggregated count in system-wide | 594 | * does not use aggregated count in system-wide |
571 | */ | 595 | */ |
572 | static void print_counter(int counter) | 596 | static void print_counter(struct perf_evsel *counter) |
573 | { | 597 | { |
598 | struct perf_stat *ps = counter->priv; | ||
574 | u64 ena, run, val; | 599 | u64 ena, run, val; |
575 | int cpu; | 600 | int cpu; |
576 | 601 | ||
577 | for (cpu = 0; cpu < nr_cpus; cpu++) { | 602 | for (cpu = 0; cpu < nr_cpus; cpu++) { |
578 | val = cpu_counts[cpu][counter].val; | 603 | val = ps->cpu_counts[cpu].val; |
579 | ena = cpu_counts[cpu][counter].ena; | 604 | ena = ps->cpu_counts[cpu].ena; |
580 | run = cpu_counts[cpu][counter].run; | 605 | run = ps->cpu_counts[cpu].run; |
581 | if (run == 0 || ena == 0) { | 606 | if (run == 0 || ena == 0) { |
582 | fprintf(stderr, "CPU%*d%s%*s%s%-24s", | 607 | fprintf(stderr, "CPU%*d%s%*s%s%-24s", |
583 | csv_output ? 0 : -4, | 608 | csv_output ? 0 : -4, |
@@ -609,7 +634,8 @@ static void print_counter(int counter) | |||
609 | 634 | ||
610 | static void print_stat(int argc, const char **argv) | 635 | static void print_stat(int argc, const char **argv) |
611 | { | 636 | { |
612 | int i, counter; | 637 | struct perf_evsel *counter; |
638 | int i; | ||
613 | 639 | ||
614 | fflush(stdout); | 640 | fflush(stdout); |
615 | 641 | ||
@@ -632,10 +658,10 @@ static void print_stat(int argc, const char **argv) | |||
632 | } | 658 | } |
633 | 659 | ||
634 | if (no_aggr) { | 660 | if (no_aggr) { |
635 | for (counter = 0; counter < nr_counters; counter++) | 661 | list_for_each_entry(counter, &evsel_list, node) |
636 | print_counter(counter); | 662 | print_counter(counter); |
637 | } else { | 663 | } else { |
638 | for (counter = 0; counter < nr_counters; counter++) | 664 | list_for_each_entry(counter, &evsel_list, node) |
639 | print_counter_aggr(counter); | 665 | print_counter_aggr(counter); |
640 | } | 666 | } |
641 | 667 | ||
@@ -720,8 +746,8 @@ static const struct option options[] = { | |||
720 | 746 | ||
721 | int cmd_stat(int argc, const char **argv, const char *prefix __used) | 747 | int cmd_stat(int argc, const char **argv, const char *prefix __used) |
722 | { | 748 | { |
723 | int status; | 749 | struct perf_evsel *pos; |
724 | int i,j; | 750 | int status = -ENOMEM; |
725 | 751 | ||
726 | setlocale(LC_ALL, ""); | 752 | setlocale(LC_ALL, ""); |
727 | 753 | ||
@@ -757,8 +783,18 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
757 | 783 | ||
758 | /* Set attrs and nr_counters if no event is selected and !null_run */ | 784 | /* Set attrs and nr_counters if no event is selected and !null_run */ |
759 | if (!null_run && !nr_counters) { | 785 | if (!null_run && !nr_counters) { |
760 | memcpy(attrs, default_attrs, sizeof(default_attrs)); | 786 | size_t c; |
787 | |||
761 | nr_counters = ARRAY_SIZE(default_attrs); | 788 | nr_counters = ARRAY_SIZE(default_attrs); |
789 | |||
790 | for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) { | ||
791 | pos = perf_evsel__new(default_attrs[c].type, | ||
792 | default_attrs[c].config, | ||
793 | nr_counters); | ||
794 | if (pos == NULL) | ||
795 | goto out; | ||
796 | list_add(&pos->node, &evsel_list); | ||
797 | } | ||
762 | } | 798 | } |
763 | 799 | ||
764 | if (system_wide) | 800 | if (system_wide) |
@@ -786,12 +822,10 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
786 | thread_num = 1; | 822 | thread_num = 1; |
787 | } | 823 | } |
788 | 824 | ||
789 | for (i = 0; i < MAX_NR_CPUS; i++) { | 825 | list_for_each_entry(pos, &evsel_list, node) { |
790 | for (j = 0; j < MAX_COUNTERS; j++) { | 826 | if (perf_evsel__alloc_stat_priv(pos, nr_cpus) < 0 || |
791 | fd[i][j] = malloc(sizeof(int)*thread_num); | 827 | perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0) |
792 | if (!fd[i][j]) | 828 | goto out_free_fd; |
793 | return -ENOMEM; | ||
794 | } | ||
795 | } | 829 | } |
796 | 830 | ||
797 | /* | 831 | /* |
@@ -814,6 +848,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used) | |||
814 | 848 | ||
815 | if (status != -1) | 849 | if (status != -1) |
816 | print_stat(argc, argv); | 850 | print_stat(argc, argv); |
817 | 851 | out_free_fd: | |
852 | list_for_each_entry(pos, &evsel_list, node) { | ||
853 | perf_evsel__free_fd(pos); | ||
854 | perf_evsel__free_stat_priv(pos); | ||
855 | } | ||
856 | out: | ||
818 | return status; | 857 | return status; |
819 | } | 858 | } |
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c index ae15f046c405..13a836efa1e1 100644 --- a/tools/perf/builtin-top.c +++ b/tools/perf/builtin-top.c | |||
@@ -21,6 +21,7 @@ | |||
21 | #include "perf.h" | 21 | #include "perf.h" |
22 | 22 | ||
23 | #include "util/color.h" | 23 | #include "util/color.h" |
24 | #include "util/evsel.h" | ||
24 | #include "util/session.h" | 25 | #include "util/session.h" |
25 | #include "util/symbol.h" | 26 | #include "util/symbol.h" |
26 | #include "util/thread.h" | 27 | #include "util/thread.h" |
@@ -29,6 +30,7 @@ | |||
29 | #include "util/parse-options.h" | 30 | #include "util/parse-options.h" |
30 | #include "util/parse-events.h" | 31 | #include "util/parse-events.h" |
31 | #include "util/cpumap.h" | 32 | #include "util/cpumap.h" |
33 | #include "util/xyarray.h" | ||
32 | 34 | ||
33 | #include "util/debug.h" | 35 | #include "util/debug.h" |
34 | 36 | ||
@@ -55,7 +57,7 @@ | |||
55 | #include <linux/unistd.h> | 57 | #include <linux/unistd.h> |
56 | #include <linux/types.h> | 58 | #include <linux/types.h> |
57 | 59 | ||
58 | static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; | 60 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
59 | 61 | ||
60 | static bool system_wide = false; | 62 | static bool system_wide = false; |
61 | 63 | ||
@@ -100,6 +102,7 @@ struct sym_entry *sym_filter_entry = NULL; | |||
100 | struct sym_entry *sym_filter_entry_sched = NULL; | 102 | struct sym_entry *sym_filter_entry_sched = NULL; |
101 | static int sym_pcnt_filter = 5; | 103 | static int sym_pcnt_filter = 5; |
102 | static int sym_counter = 0; | 104 | static int sym_counter = 0; |
105 | static struct perf_evsel *sym_evsel = NULL; | ||
103 | static int display_weighted = -1; | 106 | static int display_weighted = -1; |
104 | static const char *cpu_list; | 107 | static const char *cpu_list; |
105 | 108 | ||
@@ -353,7 +356,7 @@ static void show_details(struct sym_entry *syme) | |||
353 | return; | 356 | return; |
354 | 357 | ||
355 | symbol = sym_entry__symbol(syme); | 358 | symbol = sym_entry__symbol(syme); |
356 | printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); | 359 | printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name); |
357 | printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); | 360 | printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); |
358 | 361 | ||
359 | pthread_mutex_lock(&syme->src->lock); | 362 | pthread_mutex_lock(&syme->src->lock); |
@@ -460,7 +463,8 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se) | |||
460 | static void print_sym_table(void) | 463 | static void print_sym_table(void) |
461 | { | 464 | { |
462 | int printed = 0, j; | 465 | int printed = 0, j; |
463 | int counter, snap = !display_weighted ? sym_counter : 0; | 466 | struct perf_evsel *counter; |
467 | int snap = !display_weighted ? sym_counter : 0; | ||
464 | float samples_per_sec = samples/delay_secs; | 468 | float samples_per_sec = samples/delay_secs; |
465 | float ksamples_per_sec = kernel_samples/delay_secs; | 469 | float ksamples_per_sec = kernel_samples/delay_secs; |
466 | float us_samples_per_sec = (us_samples)/delay_secs; | 470 | float us_samples_per_sec = (us_samples)/delay_secs; |
@@ -532,7 +536,9 @@ static void print_sym_table(void) | |||
532 | } | 536 | } |
533 | 537 | ||
534 | if (nr_counters == 1 || !display_weighted) { | 538 | if (nr_counters == 1 || !display_weighted) { |
535 | printf("%Ld", (u64)attrs[0].sample_period); | 539 | struct perf_evsel *first; |
540 | first = list_entry(evsel_list.next, struct perf_evsel, node); | ||
541 | printf("%Ld", first->attr.sample_period); | ||
536 | if (freq) | 542 | if (freq) |
537 | printf("Hz "); | 543 | printf("Hz "); |
538 | else | 544 | else |
@@ -540,9 +546,9 @@ static void print_sym_table(void) | |||
540 | } | 546 | } |
541 | 547 | ||
542 | if (!display_weighted) | 548 | if (!display_weighted) |
543 | printf("%s", event_name(sym_counter)); | 549 | printf("%s", event_name(sym_evsel)); |
544 | else for (counter = 0; counter < nr_counters; counter++) { | 550 | else list_for_each_entry(counter, &evsel_list, node) { |
545 | if (counter) | 551 | if (counter->idx) |
546 | printf("/"); | 552 | printf("/"); |
547 | 553 | ||
548 | printf("%s", event_name(counter)); | 554 | printf("%s", event_name(counter)); |
@@ -739,7 +745,7 @@ static void print_mapped_keys(void) | |||
739 | fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries); | 745 | fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries); |
740 | 746 | ||
741 | if (nr_counters > 1) | 747 | if (nr_counters > 1) |
742 | fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter)); | 748 | fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_evsel)); |
743 | 749 | ||
744 | fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); | 750 | fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); |
745 | 751 | ||
@@ -826,19 +832,23 @@ static void handle_keypress(struct perf_session *session, int c) | |||
826 | break; | 832 | break; |
827 | case 'E': | 833 | case 'E': |
828 | if (nr_counters > 1) { | 834 | if (nr_counters > 1) { |
829 | int i; | ||
830 | |||
831 | fprintf(stderr, "\nAvailable events:"); | 835 | fprintf(stderr, "\nAvailable events:"); |
832 | for (i = 0; i < nr_counters; i++) | 836 | |
833 | fprintf(stderr, "\n\t%d %s", i, event_name(i)); | 837 | list_for_each_entry(sym_evsel, &evsel_list, node) |
838 | fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel)); | ||
834 | 839 | ||
835 | prompt_integer(&sym_counter, "Enter details event counter"); | 840 | prompt_integer(&sym_counter, "Enter details event counter"); |
836 | 841 | ||
837 | if (sym_counter >= nr_counters) { | 842 | if (sym_counter >= nr_counters) { |
838 | fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0)); | 843 | sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node); |
839 | sym_counter = 0; | 844 | sym_counter = 0; |
845 | fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel)); | ||
840 | sleep(1); | 846 | sleep(1); |
847 | break; | ||
841 | } | 848 | } |
849 | list_for_each_entry(sym_evsel, &evsel_list, node) | ||
850 | if (sym_evsel->idx == sym_counter) | ||
851 | break; | ||
842 | } else sym_counter = 0; | 852 | } else sym_counter = 0; |
843 | break; | 853 | break; |
844 | case 'f': | 854 | case 'f': |
@@ -978,7 +988,8 @@ static int symbol_filter(struct map *map, struct symbol *sym) | |||
978 | 988 | ||
979 | static void event__process_sample(const event_t *self, | 989 | static void event__process_sample(const event_t *self, |
980 | struct sample_data *sample, | 990 | struct sample_data *sample, |
981 | struct perf_session *session, int counter) | 991 | struct perf_session *session, |
992 | struct perf_evsel *evsel) | ||
982 | { | 993 | { |
983 | u64 ip = self->ip.ip; | 994 | u64 ip = self->ip.ip; |
984 | struct sym_entry *syme; | 995 | struct sym_entry *syme; |
@@ -1071,9 +1082,9 @@ static void event__process_sample(const event_t *self, | |||
1071 | 1082 | ||
1072 | syme = symbol__priv(al.sym); | 1083 | syme = symbol__priv(al.sym); |
1073 | if (!syme->skip) { | 1084 | if (!syme->skip) { |
1074 | syme->count[counter]++; | 1085 | syme->count[evsel->idx]++; |
1075 | syme->origin = origin; | 1086 | syme->origin = origin; |
1076 | record_precise_ip(syme, counter, ip); | 1087 | record_precise_ip(syme, evsel->idx, ip); |
1077 | pthread_mutex_lock(&active_symbols_lock); | 1088 | pthread_mutex_lock(&active_symbols_lock); |
1078 | if (list_empty(&syme->node) || !syme->node.next) | 1089 | if (list_empty(&syme->node) || !syme->node.next) |
1079 | __list_insert_active_sym(syme); | 1090 | __list_insert_active_sym(syme); |
@@ -1082,12 +1093,24 @@ static void event__process_sample(const event_t *self, | |||
1082 | } | 1093 | } |
1083 | 1094 | ||
1084 | struct mmap_data { | 1095 | struct mmap_data { |
1085 | int counter; | ||
1086 | void *base; | 1096 | void *base; |
1087 | int mask; | 1097 | int mask; |
1088 | unsigned int prev; | 1098 | unsigned int prev; |
1089 | }; | 1099 | }; |
1090 | 1100 | ||
1101 | static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel, | ||
1102 | int ncpus, int nthreads) | ||
1103 | { | ||
1104 | evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data)); | ||
1105 | return evsel->priv != NULL ? 0 : -ENOMEM; | ||
1106 | } | ||
1107 | |||
1108 | static void perf_evsel__free_mmap(struct perf_evsel *evsel) | ||
1109 | { | ||
1110 | xyarray__delete(evsel->priv); | ||
1111 | evsel->priv = NULL; | ||
1112 | } | ||
1113 | |||
1091 | static unsigned int mmap_read_head(struct mmap_data *md) | 1114 | static unsigned int mmap_read_head(struct mmap_data *md) |
1092 | { | 1115 | { |
1093 | struct perf_event_mmap_page *pc = md->base; | 1116 | struct perf_event_mmap_page *pc = md->base; |
@@ -1100,8 +1123,11 @@ static unsigned int mmap_read_head(struct mmap_data *md) | |||
1100 | } | 1123 | } |
1101 | 1124 | ||
1102 | static void perf_session__mmap_read_counter(struct perf_session *self, | 1125 | static void perf_session__mmap_read_counter(struct perf_session *self, |
1103 | struct mmap_data *md) | 1126 | struct perf_evsel *evsel, |
1127 | int cpu, int thread_idx) | ||
1104 | { | 1128 | { |
1129 | struct xyarray *mmap_array = evsel->priv; | ||
1130 | struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx); | ||
1105 | unsigned int head = mmap_read_head(md); | 1131 | unsigned int head = mmap_read_head(md); |
1106 | unsigned int old = md->prev; | 1132 | unsigned int old = md->prev; |
1107 | unsigned char *data = md->base + page_size; | 1133 | unsigned char *data = md->base + page_size; |
@@ -1155,7 +1181,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self, | |||
1155 | 1181 | ||
1156 | event__parse_sample(event, self, &sample); | 1182 | event__parse_sample(event, self, &sample); |
1157 | if (event->header.type == PERF_RECORD_SAMPLE) | 1183 | if (event->header.type == PERF_RECORD_SAMPLE) |
1158 | event__process_sample(event, &sample, self, md->counter); | 1184 | event__process_sample(event, &sample, self, evsel); |
1159 | else | 1185 | else |
1160 | event__process(event, &sample, self); | 1186 | event__process(event, &sample, self); |
1161 | old += size; | 1187 | old += size; |
@@ -1165,28 +1191,31 @@ static void perf_session__mmap_read_counter(struct perf_session *self, | |||
1165 | } | 1191 | } |
1166 | 1192 | ||
1167 | static struct pollfd *event_array; | 1193 | static struct pollfd *event_array; |
1168 | static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS]; | ||
1169 | 1194 | ||
1170 | static void perf_session__mmap_read(struct perf_session *self) | 1195 | static void perf_session__mmap_read(struct perf_session *self) |
1171 | { | 1196 | { |
1172 | int i, counter, thread_index; | 1197 | struct perf_evsel *counter; |
1198 | int i, thread_index; | ||
1173 | 1199 | ||
1174 | for (i = 0; i < nr_cpus; i++) { | 1200 | for (i = 0; i < nr_cpus; i++) { |
1175 | for (counter = 0; counter < nr_counters; counter++) | 1201 | list_for_each_entry(counter, &evsel_list, node) { |
1176 | for (thread_index = 0; | 1202 | for (thread_index = 0; |
1177 | thread_index < thread_num; | 1203 | thread_index < thread_num; |
1178 | thread_index++) { | 1204 | thread_index++) { |
1179 | perf_session__mmap_read_counter(self, | 1205 | perf_session__mmap_read_counter(self, |
1180 | &mmap_array[i][counter][thread_index]); | 1206 | counter, i, thread_index); |
1181 | } | 1207 | } |
1208 | } | ||
1182 | } | 1209 | } |
1183 | } | 1210 | } |
1184 | 1211 | ||
1185 | int nr_poll; | 1212 | int nr_poll; |
1186 | int group_fd; | 1213 | int group_fd; |
1187 | 1214 | ||
1188 | static void start_counter(int i, int counter) | 1215 | static void start_counter(int i, struct perf_evsel *evsel) |
1189 | { | 1216 | { |
1217 | struct xyarray *mmap_array = evsel->priv; | ||
1218 | struct mmap_data *mm; | ||
1190 | struct perf_event_attr *attr; | 1219 | struct perf_event_attr *attr; |
1191 | int cpu = -1; | 1220 | int cpu = -1; |
1192 | int thread_index; | 1221 | int thread_index; |
@@ -1194,7 +1223,7 @@ static void start_counter(int i, int counter) | |||
1194 | if (target_tid == -1) | 1223 | if (target_tid == -1) |
1195 | cpu = cpumap[i]; | 1224 | cpu = cpumap[i]; |
1196 | 1225 | ||
1197 | attr = attrs + counter; | 1226 | attr = &evsel->attr; |
1198 | 1227 | ||
1199 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; | 1228 | attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; |
1200 | 1229 | ||
@@ -1209,10 +1238,10 @@ static void start_counter(int i, int counter) | |||
1209 | 1238 | ||
1210 | for (thread_index = 0; thread_index < thread_num; thread_index++) { | 1239 | for (thread_index = 0; thread_index < thread_num; thread_index++) { |
1211 | try_again: | 1240 | try_again: |
1212 | fd[i][counter][thread_index] = sys_perf_event_open(attr, | 1241 | FD(evsel, i, thread_index) = sys_perf_event_open(attr, |
1213 | all_tids[thread_index], cpu, group_fd, 0); | 1242 | all_tids[thread_index], cpu, group_fd, 0); |
1214 | 1243 | ||
1215 | if (fd[i][counter][thread_index] < 0) { | 1244 | if (FD(evsel, i, thread_index) < 0) { |
1216 | int err = errno; | 1245 | int err = errno; |
1217 | 1246 | ||
1218 | if (err == EPERM || err == EACCES) | 1247 | if (err == EPERM || err == EACCES) |
@@ -1236,29 +1265,29 @@ try_again: | |||
1236 | } | 1265 | } |
1237 | printf("\n"); | 1266 | printf("\n"); |
1238 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", | 1267 | error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", |
1239 | fd[i][counter][thread_index], strerror(err)); | 1268 | FD(evsel, i, thread_index), strerror(err)); |
1240 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); | 1269 | die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); |
1241 | exit(-1); | 1270 | exit(-1); |
1242 | } | 1271 | } |
1243 | assert(fd[i][counter][thread_index] >= 0); | 1272 | assert(FD(evsel, i, thread_index) >= 0); |
1244 | fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK); | 1273 | fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK); |
1245 | 1274 | ||
1246 | /* | 1275 | /* |
1247 | * First counter acts as the group leader: | 1276 | * First counter acts as the group leader: |
1248 | */ | 1277 | */ |
1249 | if (group && group_fd == -1) | 1278 | if (group && group_fd == -1) |
1250 | group_fd = fd[i][counter][thread_index]; | 1279 | group_fd = FD(evsel, i, thread_index); |
1251 | 1280 | ||
1252 | event_array[nr_poll].fd = fd[i][counter][thread_index]; | 1281 | event_array[nr_poll].fd = FD(evsel, i, thread_index); |
1253 | event_array[nr_poll].events = POLLIN; | 1282 | event_array[nr_poll].events = POLLIN; |
1254 | nr_poll++; | 1283 | nr_poll++; |
1255 | 1284 | ||
1256 | mmap_array[i][counter][thread_index].counter = counter; | 1285 | mm = xyarray__entry(mmap_array, i, thread_index); |
1257 | mmap_array[i][counter][thread_index].prev = 0; | 1286 | mm->prev = 0; |
1258 | mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1; | 1287 | mm->mask = mmap_pages*page_size - 1; |
1259 | mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size, | 1288 | mm->base = mmap(NULL, (mmap_pages+1)*page_size, |
1260 | PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0); | 1289 | PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0); |
1261 | if (mmap_array[i][counter][thread_index].base == MAP_FAILED) | 1290 | if (mm->base == MAP_FAILED) |
1262 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); | 1291 | die("failed to mmap with %d (%s)\n", errno, strerror(errno)); |
1263 | } | 1292 | } |
1264 | } | 1293 | } |
@@ -1266,8 +1295,8 @@ try_again: | |||
1266 | static int __cmd_top(void) | 1295 | static int __cmd_top(void) |
1267 | { | 1296 | { |
1268 | pthread_t thread; | 1297 | pthread_t thread; |
1269 | int i, counter; | 1298 | struct perf_evsel *counter; |
1270 | int ret; | 1299 | int i, ret; |
1271 | /* | 1300 | /* |
1272 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this | 1301 | * FIXME: perf_session__new should allow passing a O_MMAP, so that all this |
1273 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. | 1302 | * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. |
@@ -1283,7 +1312,7 @@ static int __cmd_top(void) | |||
1283 | 1312 | ||
1284 | for (i = 0; i < nr_cpus; i++) { | 1313 | for (i = 0; i < nr_cpus; i++) { |
1285 | group_fd = -1; | 1314 | group_fd = -1; |
1286 | for (counter = 0; counter < nr_counters; counter++) | 1315 | list_for_each_entry(counter, &evsel_list, node) |
1287 | start_counter(i, counter); | 1316 | start_counter(i, counter); |
1288 | } | 1317 | } |
1289 | 1318 | ||
@@ -1372,8 +1401,8 @@ static const struct option options[] = { | |||
1372 | 1401 | ||
1373 | int cmd_top(int argc, const char **argv, const char *prefix __used) | 1402 | int cmd_top(int argc, const char **argv, const char *prefix __used) |
1374 | { | 1403 | { |
1375 | int counter; | 1404 | struct perf_evsel *pos; |
1376 | int i,j; | 1405 | int status = -ENOMEM; |
1377 | 1406 | ||
1378 | page_size = sysconf(_SC_PAGE_SIZE); | 1407 | page_size = sysconf(_SC_PAGE_SIZE); |
1379 | 1408 | ||
@@ -1398,15 +1427,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1398 | thread_num = 1; | 1427 | thread_num = 1; |
1399 | } | 1428 | } |
1400 | 1429 | ||
1401 | for (i = 0; i < MAX_NR_CPUS; i++) { | ||
1402 | for (j = 0; j < MAX_COUNTERS; j++) { | ||
1403 | fd[i][j] = malloc(sizeof(int)*thread_num); | ||
1404 | mmap_array[i][j] = zalloc( | ||
1405 | sizeof(struct mmap_data)*thread_num); | ||
1406 | if (!fd[i][j] || !mmap_array[i][j]) | ||
1407 | return -ENOMEM; | ||
1408 | } | ||
1409 | } | ||
1410 | event_array = malloc( | 1430 | event_array = malloc( |
1411 | sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); | 1431 | sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); |
1412 | if (!event_array) | 1432 | if (!event_array) |
@@ -1419,15 +1439,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1419 | cpu_list = NULL; | 1439 | cpu_list = NULL; |
1420 | } | 1440 | } |
1421 | 1441 | ||
1422 | if (!nr_counters) | 1442 | if (!nr_counters && perf_evsel_list__create_default() < 0) { |
1423 | nr_counters = 1; | 1443 | pr_err("Not enough memory for event selector list\n"); |
1424 | 1444 | return -ENOMEM; | |
1425 | symbol_conf.priv_size = (sizeof(struct sym_entry) + | 1445 | } |
1426 | (nr_counters + 1) * sizeof(unsigned long)); | ||
1427 | |||
1428 | symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); | ||
1429 | if (symbol__init() < 0) | ||
1430 | return -1; | ||
1431 | 1446 | ||
1432 | if (delay_secs < 1) | 1447 | if (delay_secs < 1) |
1433 | delay_secs = 1; | 1448 | delay_secs = 1; |
@@ -1444,16 +1459,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1444 | exit(EXIT_FAILURE); | 1459 | exit(EXIT_FAILURE); |
1445 | } | 1460 | } |
1446 | 1461 | ||
1447 | /* | ||
1448 | * Fill in the ones not specifically initialized via -c: | ||
1449 | */ | ||
1450 | for (counter = 0; counter < nr_counters; counter++) { | ||
1451 | if (attrs[counter].sample_period) | ||
1452 | continue; | ||
1453 | |||
1454 | attrs[counter].sample_period = default_interval; | ||
1455 | } | ||
1456 | |||
1457 | if (target_tid != -1) | 1462 | if (target_tid != -1) |
1458 | nr_cpus = 1; | 1463 | nr_cpus = 1; |
1459 | else | 1464 | else |
@@ -1462,11 +1467,38 @@ int cmd_top(int argc, const char **argv, const char *prefix __used) | |||
1462 | if (nr_cpus < 1) | 1467 | if (nr_cpus < 1) |
1463 | usage_with_options(top_usage, options); | 1468 | usage_with_options(top_usage, options); |
1464 | 1469 | ||
1470 | list_for_each_entry(pos, &evsel_list, node) { | ||
1471 | if (perf_evsel__alloc_mmap_per_thread(pos, nr_cpus, thread_num) < 0 || | ||
1472 | perf_evsel__alloc_fd(pos, nr_cpus, thread_num) < 0) | ||
1473 | goto out_free_fd; | ||
1474 | /* | ||
1475 | * Fill in the ones not specifically initialized via -c: | ||
1476 | */ | ||
1477 | if (pos->attr.sample_period) | ||
1478 | continue; | ||
1479 | |||
1480 | pos->attr.sample_period = default_interval; | ||
1481 | } | ||
1482 | |||
1483 | symbol_conf.priv_size = (sizeof(struct sym_entry) + | ||
1484 | (nr_counters + 1) * sizeof(unsigned long)); | ||
1485 | |||
1486 | symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL); | ||
1487 | if (symbol__init() < 0) | ||
1488 | return -1; | ||
1489 | |||
1465 | get_term_dimensions(&winsize); | 1490 | get_term_dimensions(&winsize); |
1466 | if (print_entries == 0) { | 1491 | if (print_entries == 0) { |
1467 | update_print_entries(&winsize); | 1492 | update_print_entries(&winsize); |
1468 | signal(SIGWINCH, sig_winch_handler); | 1493 | signal(SIGWINCH, sig_winch_handler); |
1469 | } | 1494 | } |
1470 | 1495 | ||
1471 | return __cmd_top(); | 1496 | status = __cmd_top(); |
1497 | out_free_fd: | ||
1498 | list_for_each_entry(pos, &evsel_list, node) { | ||
1499 | perf_evsel__free_fd(pos); | ||
1500 | perf_evsel__free_mmap(pos); | ||
1501 | } | ||
1502 | |||
1503 | return status; | ||
1472 | } | 1504 | } |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c new file mode 100644 index 000000000000..6539ec912c70 --- /dev/null +++ b/tools/perf/util/evsel.c | |||
@@ -0,0 +1,35 @@ | |||
1 | #include "evsel.h" | ||
2 | #include "util.h" | ||
3 | |||
4 | struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx) | ||
5 | { | ||
6 | struct perf_evsel *evsel = zalloc(sizeof(*evsel)); | ||
7 | |||
8 | if (evsel != NULL) { | ||
9 | evsel->idx = idx; | ||
10 | evsel->attr.type = type; | ||
11 | evsel->attr.config = config; | ||
12 | INIT_LIST_HEAD(&evsel->node); | ||
13 | } | ||
14 | |||
15 | return evsel; | ||
16 | } | ||
17 | |||
18 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | ||
19 | { | ||
20 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | ||
21 | return evsel->fd != NULL ? 0 : -ENOMEM; | ||
22 | } | ||
23 | |||
24 | void perf_evsel__free_fd(struct perf_evsel *evsel) | ||
25 | { | ||
26 | xyarray__delete(evsel->fd); | ||
27 | evsel->fd = NULL; | ||
28 | } | ||
29 | |||
30 | void perf_evsel__delete(struct perf_evsel *evsel) | ||
31 | { | ||
32 | assert(list_empty(&evsel->node)); | ||
33 | xyarray__delete(evsel->fd); | ||
34 | free(evsel); | ||
35 | } | ||
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h new file mode 100644 index 000000000000..3eb3989a2110 --- /dev/null +++ b/tools/perf/util/evsel.h | |||
@@ -0,0 +1,24 @@ | |||
1 | #ifndef __PERF_EVSEL_H | ||
2 | #define __PERF_EVSEL_H 1 | ||
3 | |||
4 | #include <linux/list.h> | ||
5 | #include <linux/perf_event.h> | ||
6 | #include "types.h" | ||
7 | #include "xyarray.h" | ||
8 | |||
9 | struct perf_evsel { | ||
10 | struct list_head node; | ||
11 | struct perf_event_attr attr; | ||
12 | char *filter; | ||
13 | struct xyarray *fd; | ||
14 | int idx; | ||
15 | void *priv; | ||
16 | }; | ||
17 | |||
18 | struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx); | ||
19 | void perf_evsel__delete(struct perf_evsel *evsel); | ||
20 | |||
21 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | ||
22 | void perf_evsel__free_fd(struct perf_evsel *evsel); | ||
23 | |||
24 | #endif /* __PERF_EVSEL_H */ | ||
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c index 16a16021eaa6..ecb5a8444f42 100644 --- a/tools/perf/util/header.c +++ b/tools/perf/util/header.c | |||
@@ -461,7 +461,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd) | |||
461 | 461 | ||
462 | /* Write trace info */ | 462 | /* Write trace info */ |
463 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); | 463 | trace_sec->offset = lseek(fd, 0, SEEK_CUR); |
464 | read_tracing_data(fd, attrs, nr_counters); | 464 | read_tracing_data(fd, &evsel_list); |
465 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; | 465 | trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; |
466 | } | 466 | } |
467 | 467 | ||
@@ -1131,8 +1131,7 @@ int event__process_event_type(event_t *self, | |||
1131 | return 0; | 1131 | return 0; |
1132 | } | 1132 | } |
1133 | 1133 | ||
1134 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | 1134 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, |
1135 | int nb_events, | ||
1136 | event__handler_t process, | 1135 | event__handler_t process, |
1137 | struct perf_session *session __unused) | 1136 | struct perf_session *session __unused) |
1138 | { | 1137 | { |
@@ -1143,7 +1142,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | |||
1143 | memset(&ev, 0, sizeof(ev)); | 1142 | memset(&ev, 0, sizeof(ev)); |
1144 | 1143 | ||
1145 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; | 1144 | ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; |
1146 | size = read_tracing_data_size(fd, pattrs, nb_events); | 1145 | size = read_tracing_data_size(fd, pattrs); |
1147 | if (size <= 0) | 1146 | if (size <= 0) |
1148 | return size; | 1147 | return size; |
1149 | aligned_size = ALIGN(size, sizeof(u64)); | 1148 | aligned_size = ALIGN(size, sizeof(u64)); |
@@ -1153,7 +1152,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | |||
1153 | 1152 | ||
1154 | process(&ev, NULL, session); | 1153 | process(&ev, NULL, session); |
1155 | 1154 | ||
1156 | err = read_tracing_data(fd, pattrs, nb_events); | 1155 | err = read_tracing_data(fd, pattrs); |
1157 | write_padded(fd, NULL, 0, padding); | 1156 | write_padded(fd, NULL, 0, padding); |
1158 | 1157 | ||
1159 | return aligned_size; | 1158 | return aligned_size; |
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h index 6335965e1f93..33f16be7b72f 100644 --- a/tools/perf/util/header.h +++ b/tools/perf/util/header.h | |||
@@ -113,8 +113,7 @@ int event__synthesize_event_types(event__handler_t process, | |||
113 | int event__process_event_type(event_t *self, | 113 | int event__process_event_type(event_t *self, |
114 | struct perf_session *session); | 114 | struct perf_session *session); |
115 | 115 | ||
116 | int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, | 116 | int event__synthesize_tracing_data(int fd, struct list_head *pattrs, |
117 | int nb_events, | ||
118 | event__handler_t process, | 117 | event__handler_t process, |
119 | struct perf_session *session); | 118 | struct perf_session *session); |
120 | int event__process_tracing_data(event_t *self, | 119 | int event__process_tracing_data(event_t *self, |
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c index c305305a3884..2d948ad471f4 100644 --- a/tools/perf/util/parse-events.c +++ b/tools/perf/util/parse-events.c | |||
@@ -1,6 +1,7 @@ | |||
1 | #include "../../../include/linux/hw_breakpoint.h" | 1 | #include "../../../include/linux/hw_breakpoint.h" |
2 | #include "util.h" | 2 | #include "util.h" |
3 | #include "../perf.h" | 3 | #include "../perf.h" |
4 | #include "evsel.h" | ||
4 | #include "parse-options.h" | 5 | #include "parse-options.h" |
5 | #include "parse-events.h" | 6 | #include "parse-events.h" |
6 | #include "exec_cmd.h" | 7 | #include "exec_cmd.h" |
@@ -12,8 +13,7 @@ | |||
12 | 13 | ||
13 | int nr_counters; | 14 | int nr_counters; |
14 | 15 | ||
15 | struct perf_event_attr attrs[MAX_COUNTERS]; | 16 | LIST_HEAD(evsel_list); |
16 | char *filters[MAX_COUNTERS]; | ||
17 | 17 | ||
18 | struct event_symbol { | 18 | struct event_symbol { |
19 | u8 type; | 19 | u8 type; |
@@ -266,10 +266,10 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result) | |||
266 | return name; | 266 | return name; |
267 | } | 267 | } |
268 | 268 | ||
269 | const char *event_name(int counter) | 269 | const char *event_name(struct perf_evsel *evsel) |
270 | { | 270 | { |
271 | u64 config = attrs[counter].config; | 271 | u64 config = evsel->attr.config; |
272 | int type = attrs[counter].type; | 272 | int type = evsel->attr.type; |
273 | 273 | ||
274 | return __event_name(type, config); | 274 | return __event_name(type, config); |
275 | } | 275 | } |
@@ -814,9 +814,6 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
814 | return -1; | 814 | return -1; |
815 | 815 | ||
816 | for (;;) { | 816 | for (;;) { |
817 | if (nr_counters == MAX_COUNTERS) | ||
818 | return -1; | ||
819 | |||
820 | memset(&attr, 0, sizeof(attr)); | 817 | memset(&attr, 0, sizeof(attr)); |
821 | ret = parse_event_symbols(&str, &attr); | 818 | ret = parse_event_symbols(&str, &attr); |
822 | if (ret == EVT_FAILED) | 819 | if (ret == EVT_FAILED) |
@@ -826,8 +823,13 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
826 | return -1; | 823 | return -1; |
827 | 824 | ||
828 | if (ret != EVT_HANDLED_ALL) { | 825 | if (ret != EVT_HANDLED_ALL) { |
829 | attrs[nr_counters] = attr; | 826 | struct perf_evsel *evsel; |
830 | nr_counters++; | 827 | evsel = perf_evsel__new(attr.type, attr.config, |
828 | nr_counters); | ||
829 | if (evsel == NULL) | ||
830 | return -1; | ||
831 | list_add_tail(&evsel->node, &evsel_list); | ||
832 | ++nr_counters; | ||
831 | } | 833 | } |
832 | 834 | ||
833 | if (*str == 0) | 835 | if (*str == 0) |
@@ -844,21 +846,22 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u | |||
844 | int parse_filter(const struct option *opt __used, const char *str, | 846 | int parse_filter(const struct option *opt __used, const char *str, |
845 | int unset __used) | 847 | int unset __used) |
846 | { | 848 | { |
847 | int i = nr_counters - 1; | 849 | struct perf_evsel *last = NULL; |
848 | int len = strlen(str); | ||
849 | 850 | ||
850 | if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { | 851 | if (!list_empty(&evsel_list)) |
852 | last = list_entry(evsel_list.prev, struct perf_evsel, node); | ||
853 | |||
854 | if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) { | ||
851 | fprintf(stderr, | 855 | fprintf(stderr, |
852 | "-F option should follow a -e tracepoint option\n"); | 856 | "-F option should follow a -e tracepoint option\n"); |
853 | return -1; | 857 | return -1; |
854 | } | 858 | } |
855 | 859 | ||
856 | filters[i] = malloc(len + 1); | 860 | last->filter = strdup(str); |
857 | if (!filters[i]) { | 861 | if (last->filter == NULL) { |
858 | fprintf(stderr, "not enough memory to hold filter string\n"); | 862 | fprintf(stderr, "not enough memory to hold filter string\n"); |
859 | return -1; | 863 | return -1; |
860 | } | 864 | } |
861 | strcpy(filters[i], str); | ||
862 | 865 | ||
863 | return 0; | 866 | return 0; |
864 | } | 867 | } |
@@ -967,3 +970,15 @@ void print_events(void) | |||
967 | 970 | ||
968 | exit(129); | 971 | exit(129); |
969 | } | 972 | } |
973 | |||
974 | int perf_evsel_list__create_default(void) | ||
975 | { | ||
976 | struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE, | ||
977 | PERF_COUNT_HW_CPU_CYCLES, 0); | ||
978 | if (evsel == NULL) | ||
979 | return -ENOMEM; | ||
980 | |||
981 | list_add(&evsel->node, &evsel_list); | ||
982 | ++nr_counters; | ||
983 | return 0; | ||
984 | } | ||
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h index fc4ab3fe877a..0f915a01a3f7 100644 --- a/tools/perf/util/parse-events.h +++ b/tools/perf/util/parse-events.h | |||
@@ -4,6 +4,15 @@ | |||
4 | * Parse symbolic events/counts passed in as options: | 4 | * Parse symbolic events/counts passed in as options: |
5 | */ | 5 | */ |
6 | 6 | ||
7 | #include <linux/perf_event.h> | ||
8 | |||
9 | struct list_head; | ||
10 | struct perf_evsel; | ||
11 | |||
12 | extern struct list_head evsel_list; | ||
13 | |||
14 | int perf_evsel_list__create_default(void); | ||
15 | |||
7 | struct option; | 16 | struct option; |
8 | 17 | ||
9 | struct tracepoint_path { | 18 | struct tracepoint_path { |
@@ -13,14 +22,11 @@ struct tracepoint_path { | |||
13 | }; | 22 | }; |
14 | 23 | ||
15 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); | 24 | extern struct tracepoint_path *tracepoint_id_to_path(u64 config); |
16 | extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events); | 25 | extern bool have_tracepoints(struct list_head *evsel_list); |
17 | 26 | ||
18 | extern int nr_counters; | 27 | extern int nr_counters; |
19 | 28 | ||
20 | extern struct perf_event_attr attrs[MAX_COUNTERS]; | 29 | const char *event_name(struct perf_evsel *event); |
21 | extern char *filters[MAX_COUNTERS]; | ||
22 | |||
23 | extern const char *event_name(int ctr); | ||
24 | extern const char *__event_name(int type, u64 config); | 30 | extern const char *__event_name(int type, u64 config); |
25 | 31 | ||
26 | extern int parse_events(const struct option *opt, const char *str, int unset); | 32 | extern int parse_events(const struct option *opt, const char *str, int unset); |
@@ -33,5 +39,4 @@ extern void print_events(void); | |||
33 | extern char debugfs_path[]; | 39 | extern char debugfs_path[]; |
34 | extern int valid_debugfs_mount(const char *debugfs); | 40 | extern int valid_debugfs_mount(const char *debugfs); |
35 | 41 | ||
36 | |||
37 | #endif /* __PERF_PARSE_EVENTS_H */ | 42 | #endif /* __PERF_PARSE_EVENTS_H */ |
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c index b1572601286c..35729f4c40cb 100644 --- a/tools/perf/util/trace-event-info.c +++ b/tools/perf/util/trace-event-info.c | |||
@@ -34,11 +34,13 @@ | |||
34 | #include <ctype.h> | 34 | #include <ctype.h> |
35 | #include <errno.h> | 35 | #include <errno.h> |
36 | #include <stdbool.h> | 36 | #include <stdbool.h> |
37 | #include <linux/list.h> | ||
37 | #include <linux/kernel.h> | 38 | #include <linux/kernel.h> |
38 | 39 | ||
39 | #include "../perf.h" | 40 | #include "../perf.h" |
40 | #include "trace-event.h" | 41 | #include "trace-event.h" |
41 | #include "debugfs.h" | 42 | #include "debugfs.h" |
43 | #include "evsel.h" | ||
42 | 44 | ||
43 | #define VERSION "0.5" | 45 | #define VERSION "0.5" |
44 | 46 | ||
@@ -469,16 +471,17 @@ out: | |||
469 | } | 471 | } |
470 | 472 | ||
471 | static struct tracepoint_path * | 473 | static struct tracepoint_path * |
472 | get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | 474 | get_tracepoints_path(struct list_head *pattrs) |
473 | { | 475 | { |
474 | struct tracepoint_path path, *ppath = &path; | 476 | struct tracepoint_path path, *ppath = &path; |
475 | int i, nr_tracepoints = 0; | 477 | struct perf_evsel *pos; |
478 | int nr_tracepoints = 0; | ||
476 | 479 | ||
477 | for (i = 0; i < nb_events; i++) { | 480 | list_for_each_entry(pos, pattrs, node) { |
478 | if (pattrs[i].type != PERF_TYPE_TRACEPOINT) | 481 | if (pos->attr.type != PERF_TYPE_TRACEPOINT) |
479 | continue; | 482 | continue; |
480 | ++nr_tracepoints; | 483 | ++nr_tracepoints; |
481 | ppath->next = tracepoint_id_to_path(pattrs[i].config); | 484 | ppath->next = tracepoint_id_to_path(pos->attr.config); |
482 | if (!ppath->next) | 485 | if (!ppath->next) |
483 | die("%s\n", "No memory to alloc tracepoints list"); | 486 | die("%s\n", "No memory to alloc tracepoints list"); |
484 | ppath = ppath->next; | 487 | ppath = ppath->next; |
@@ -487,21 +490,21 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) | |||
487 | return nr_tracepoints > 0 ? path.next : NULL; | 490 | return nr_tracepoints > 0 ? path.next : NULL; |
488 | } | 491 | } |
489 | 492 | ||
490 | bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events) | 493 | bool have_tracepoints(struct list_head *pattrs) |
491 | { | 494 | { |
492 | int i; | 495 | struct perf_evsel *pos; |
493 | 496 | ||
494 | for (i = 0; i < nb_events; i++) | 497 | list_for_each_entry(pos, pattrs, node) |
495 | if (pattrs[i].type == PERF_TYPE_TRACEPOINT) | 498 | if (pos->attr.type == PERF_TYPE_TRACEPOINT) |
496 | return true; | 499 | return true; |
497 | 500 | ||
498 | return false; | 501 | return false; |
499 | } | 502 | } |
500 | 503 | ||
501 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | 504 | int read_tracing_data(int fd, struct list_head *pattrs) |
502 | { | 505 | { |
503 | char buf[BUFSIZ]; | 506 | char buf[BUFSIZ]; |
504 | struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); | 507 | struct tracepoint_path *tps = get_tracepoints_path(pattrs); |
505 | 508 | ||
506 | /* | 509 | /* |
507 | * What? No tracepoints? No sense writing anything here, bail out. | 510 | * What? No tracepoints? No sense writing anything here, bail out. |
@@ -545,14 +548,13 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) | |||
545 | return 0; | 548 | return 0; |
546 | } | 549 | } |
547 | 550 | ||
548 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 551 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs) |
549 | int nb_events) | ||
550 | { | 552 | { |
551 | ssize_t size; | 553 | ssize_t size; |
552 | int err = 0; | 554 | int err = 0; |
553 | 555 | ||
554 | calc_data_size = 1; | 556 | calc_data_size = 1; |
555 | err = read_tracing_data(fd, pattrs, nb_events); | 557 | err = read_tracing_data(fd, pattrs); |
556 | size = calc_data_size - 1; | 558 | size = calc_data_size - 1; |
557 | calc_data_size = 0; | 559 | calc_data_size = 0; |
558 | 560 | ||
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h index b3e86b1e4444..b5f12ca24d99 100644 --- a/tools/perf/util/trace-event.h +++ b/tools/perf/util/trace-event.h | |||
@@ -262,9 +262,8 @@ raw_field_value(struct event *event, const char *name, void *data); | |||
262 | void *raw_field_ptr(struct event *event, const char *name, void *data); | 262 | void *raw_field_ptr(struct event *event, const char *name, void *data); |
263 | unsigned long long eval_flag(const char *flag); | 263 | unsigned long long eval_flag(const char *flag); |
264 | 264 | ||
265 | int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); | 265 | int read_tracing_data(int fd, struct list_head *pattrs); |
266 | ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, | 266 | ssize_t read_tracing_data_size(int fd, struct list_head *pattrs); |
267 | int nb_events); | ||
268 | 267 | ||
269 | /* taken from kernel/trace/trace.h */ | 268 | /* taken from kernel/trace/trace.h */ |
270 | enum trace_flag_type { | 269 | enum trace_flag_type { |
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c new file mode 100644 index 000000000000..22afbf6c536a --- /dev/null +++ b/tools/perf/util/xyarray.c | |||
@@ -0,0 +1,20 @@ | |||
1 | #include "xyarray.h" | ||
2 | #include "util.h" | ||
3 | |||
4 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size) | ||
5 | { | ||
6 | size_t row_size = ylen * entry_size; | ||
7 | struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size); | ||
8 | |||
9 | if (xy != NULL) { | ||
10 | xy->entry_size = entry_size; | ||
11 | xy->row_size = row_size; | ||
12 | } | ||
13 | |||
14 | return xy; | ||
15 | } | ||
16 | |||
17 | void xyarray__delete(struct xyarray *xy) | ||
18 | { | ||
19 | free(xy); | ||
20 | } | ||
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h new file mode 100644 index 000000000000..c488a07275dd --- /dev/null +++ b/tools/perf/util/xyarray.h | |||
@@ -0,0 +1,20 @@ | |||
1 | #ifndef _PERF_XYARRAY_H_ | ||
2 | #define _PERF_XYARRAY_H_ 1 | ||
3 | |||
4 | #include <sys/types.h> | ||
5 | |||
6 | struct xyarray { | ||
7 | size_t row_size; | ||
8 | size_t entry_size; | ||
9 | char contents[]; | ||
10 | }; | ||
11 | |||
12 | struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size); | ||
13 | void xyarray__delete(struct xyarray *xy); | ||
14 | |||
15 | static inline void *xyarray__entry(struct xyarray *xy, int x, int y) | ||
16 | { | ||
17 | return &xy->contents[x * xy->row_size + y * xy->entry_size]; | ||
18 | } | ||
19 | |||
20 | #endif /* _PERF_XYARRAY_H_ */ | ||