aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf')
-rw-r--r--tools/perf/Makefile4
-rw-r--r--tools/perf/builtin-record.c152
-rw-r--r--tools/perf/builtin-stat.c368
-rw-r--r--tools/perf/builtin-test.c83
-rw-r--r--tools/perf/builtin-top.c221
-rw-r--r--tools/perf/perf.c2
-rw-r--r--tools/perf/util/cpumap.c123
-rw-r--r--tools/perf/util/cpumap.h10
-rw-r--r--tools/perf/util/evsel.c186
-rw-r--r--tools/perf/util/evsel.h115
-rw-r--r--tools/perf/util/header.c15
-rw-r--r--tools/perf/util/header.h3
-rw-r--r--tools/perf/util/parse-events.c58
-rw-r--r--tools/perf/util/parse-events.h18
-rw-r--r--tools/perf/util/session.c22
-rw-r--r--tools/perf/util/session.h1
-rw-r--r--tools/perf/util/thread.c43
-rw-r--r--tools/perf/util/thread.h15
-rw-r--r--tools/perf/util/trace-event-info.c30
-rw-r--r--tools/perf/util/trace-event.h5
-rw-r--r--tools/perf/util/util.c17
-rw-r--r--tools/perf/util/util.h1
-rw-r--r--tools/perf/util/xyarray.c20
-rw-r--r--tools/perf/util/xyarray.h20
24 files changed, 1013 insertions, 519 deletions
diff --git a/tools/perf/Makefile b/tools/perf/Makefile
index ac6692cf5508..1b9b13ee2a72 100644
--- a/tools/perf/Makefile
+++ b/tools/perf/Makefile
@@ -396,6 +396,7 @@ LIB_H += util/build-id.h
396LIB_H += util/debug.h 396LIB_H += util/debug.h
397LIB_H += util/debugfs.h 397LIB_H += util/debugfs.h
398LIB_H += util/event.h 398LIB_H += util/event.h
399LIB_H += util/evsel.h
399LIB_H += util/exec_cmd.h 400LIB_H += util/exec_cmd.h
400LIB_H += util/types.h 401LIB_H += util/types.h
401LIB_H += util/levenshtein.h 402LIB_H += util/levenshtein.h
@@ -404,6 +405,7 @@ LIB_H += util/parse-options.h
404LIB_H += util/parse-events.h 405LIB_H += util/parse-events.h
405LIB_H += util/quote.h 406LIB_H += util/quote.h
406LIB_H += util/util.h 407LIB_H += util/util.h
408LIB_H += util/xyarray.h
407LIB_H += util/header.h 409LIB_H += util/header.h
408LIB_H += util/help.h 410LIB_H += util/help.h
409LIB_H += util/session.h 411LIB_H += util/session.h
@@ -433,6 +435,7 @@ LIB_OBJS += $(OUTPUT)util/ctype.o
433LIB_OBJS += $(OUTPUT)util/debugfs.o 435LIB_OBJS += $(OUTPUT)util/debugfs.o
434LIB_OBJS += $(OUTPUT)util/environment.o 436LIB_OBJS += $(OUTPUT)util/environment.o
435LIB_OBJS += $(OUTPUT)util/event.o 437LIB_OBJS += $(OUTPUT)util/event.o
438LIB_OBJS += $(OUTPUT)util/evsel.o
436LIB_OBJS += $(OUTPUT)util/exec_cmd.o 439LIB_OBJS += $(OUTPUT)util/exec_cmd.o
437LIB_OBJS += $(OUTPUT)util/help.o 440LIB_OBJS += $(OUTPUT)util/help.o
438LIB_OBJS += $(OUTPUT)util/levenshtein.o 441LIB_OBJS += $(OUTPUT)util/levenshtein.o
@@ -470,6 +473,7 @@ LIB_OBJS += $(OUTPUT)util/sort.o
470LIB_OBJS += $(OUTPUT)util/hist.o 473LIB_OBJS += $(OUTPUT)util/hist.o
471LIB_OBJS += $(OUTPUT)util/probe-event.o 474LIB_OBJS += $(OUTPUT)util/probe-event.o
472LIB_OBJS += $(OUTPUT)util/util.o 475LIB_OBJS += $(OUTPUT)util/util.o
476LIB_OBJS += $(OUTPUT)util/xyarray.o
473LIB_OBJS += $(OUTPUT)util/cpumap.o 477LIB_OBJS += $(OUTPUT)util/cpumap.o
474 478
475BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o 479BUILTIN_OBJS += $(OUTPUT)builtin-annotate.o
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index 50efbd509b8f..7bc049035484 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -18,6 +18,7 @@
18 18
19#include "util/header.h" 19#include "util/header.h"
20#include "util/event.h" 20#include "util/event.h"
21#include "util/evsel.h"
21#include "util/debug.h" 22#include "util/debug.h"
22#include "util/session.h" 23#include "util/session.h"
23#include "util/symbol.h" 24#include "util/symbol.h"
@@ -27,18 +28,18 @@
27#include <sched.h> 28#include <sched.h>
28#include <sys/mman.h> 29#include <sys/mman.h>
29 30
31#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
32
30enum write_mode_t { 33enum write_mode_t {
31 WRITE_FORCE, 34 WRITE_FORCE,
32 WRITE_APPEND 35 WRITE_APPEND
33}; 36};
34 37
35static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
36
37static u64 user_interval = ULLONG_MAX; 38static u64 user_interval = ULLONG_MAX;
38static u64 default_interval = 0; 39static u64 default_interval = 0;
39static u64 sample_type; 40static u64 sample_type;
40 41
41static int nr_cpus = 0; 42static struct cpu_map *cpus;
42static unsigned int page_size; 43static unsigned int page_size;
43static unsigned int mmap_pages = 128; 44static unsigned int mmap_pages = 128;
44static unsigned int user_freq = UINT_MAX; 45static unsigned int user_freq = UINT_MAX;
@@ -53,8 +54,7 @@ static bool sample_id_all_avail = true;
53static bool system_wide = false; 54static bool system_wide = false;
54static pid_t target_pid = -1; 55static pid_t target_pid = -1;
55static pid_t target_tid = -1; 56static pid_t target_tid = -1;
56static pid_t *all_tids = NULL; 57static struct thread_map *threads;
57static int thread_num = 0;
58static pid_t child_pid = -1; 58static pid_t child_pid = -1;
59static bool no_inherit = false; 59static bool no_inherit = false;
60static enum write_mode_t write_mode = WRITE_FORCE; 60static enum write_mode_t write_mode = WRITE_FORCE;
@@ -81,7 +81,6 @@ static struct perf_session *session;
81static const char *cpu_list; 81static const char *cpu_list;
82 82
83struct mmap_data { 83struct mmap_data {
84 int counter;
85 void *base; 84 void *base;
86 unsigned int mask; 85 unsigned int mask;
87 unsigned int prev; 86 unsigned int prev;
@@ -229,12 +228,12 @@ static struct perf_header_attr *get_header_attr(struct perf_event_attr *a, int n
229 return h_attr; 228 return h_attr;
230} 229}
231 230
232static void create_counter(int counter, int cpu) 231static void create_counter(struct perf_evsel *evsel, int cpu)
233{ 232{
234 char *filter = filters[counter]; 233 char *filter = evsel->filter;
235 struct perf_event_attr *attr = attrs + counter; 234 struct perf_event_attr *attr = &evsel->attr;
236 struct perf_header_attr *h_attr; 235 struct perf_header_attr *h_attr;
237 int track = !counter; /* only the first counter needs these */ 236 int track = !evsel->idx; /* only the first counter needs these */
238 int thread_index; 237 int thread_index;
239 int ret; 238 int ret;
240 struct { 239 struct {
@@ -318,12 +317,11 @@ static void create_counter(int counter, int cpu)
318retry_sample_id: 317retry_sample_id:
319 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 318 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
320 319
321 for (thread_index = 0; thread_index < thread_num; thread_index++) { 320 for (thread_index = 0; thread_index < threads->nr; thread_index++) {
322try_again: 321try_again:
323 fd[nr_cpu][counter][thread_index] = sys_perf_event_open(attr, 322 FD(evsel, nr_cpu, thread_index) = sys_perf_event_open(attr, threads->map[thread_index], cpu, group_fd, 0);
324 all_tids[thread_index], cpu, group_fd, 0);
325 323
326 if (fd[nr_cpu][counter][thread_index] < 0) { 324 if (FD(evsel, nr_cpu, thread_index) < 0) {
327 int err = errno; 325 int err = errno;
328 326
329 if (err == EPERM || err == EACCES) 327 if (err == EPERM || err == EACCES)
@@ -360,7 +358,7 @@ try_again:
360 } 358 }
361 printf("\n"); 359 printf("\n");
362 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", 360 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
363 fd[nr_cpu][counter][thread_index], strerror(err)); 361 FD(evsel, nr_cpu, thread_index), strerror(err));
364 362
365#if defined(__i386__) || defined(__x86_64__) 363#if defined(__i386__) || defined(__x86_64__)
366 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP) 364 if (attr->type == PERF_TYPE_HARDWARE && err == EOPNOTSUPP)
@@ -374,7 +372,7 @@ try_again:
374 exit(-1); 372 exit(-1);
375 } 373 }
376 374
377 h_attr = get_header_attr(attr, counter); 375 h_attr = get_header_attr(attr, evsel->idx);
378 if (h_attr == NULL) 376 if (h_attr == NULL)
379 die("nomem\n"); 377 die("nomem\n");
380 378
@@ -385,7 +383,7 @@ try_again:
385 } 383 }
386 } 384 }
387 385
388 if (read(fd[nr_cpu][counter][thread_index], &read_data, sizeof(read_data)) == -1) { 386 if (read(FD(evsel, nr_cpu, thread_index), &read_data, sizeof(read_data)) == -1) {
389 perror("Unable to read perf file descriptor"); 387 perror("Unable to read perf file descriptor");
390 exit(-1); 388 exit(-1);
391 } 389 }
@@ -395,43 +393,44 @@ try_again:
395 exit(-1); 393 exit(-1);
396 } 394 }
397 395
398 assert(fd[nr_cpu][counter][thread_index] >= 0); 396 assert(FD(evsel, nr_cpu, thread_index) >= 0);
399 fcntl(fd[nr_cpu][counter][thread_index], F_SETFL, O_NONBLOCK); 397 fcntl(FD(evsel, nr_cpu, thread_index), F_SETFL, O_NONBLOCK);
400 398
401 /* 399 /*
402 * First counter acts as the group leader: 400 * First counter acts as the group leader:
403 */ 401 */
404 if (group && group_fd == -1) 402 if (group && group_fd == -1)
405 group_fd = fd[nr_cpu][counter][thread_index]; 403 group_fd = FD(evsel, nr_cpu, thread_index);
406 404
407 if (counter || thread_index) { 405 if (evsel->idx || thread_index) {
408 ret = ioctl(fd[nr_cpu][counter][thread_index], 406 struct perf_evsel *first;
409 PERF_EVENT_IOC_SET_OUTPUT, 407 first = list_entry(evsel_list.next, struct perf_evsel, node);
410 fd[nr_cpu][0][0]); 408 ret = ioctl(FD(evsel, nr_cpu, thread_index),
409 PERF_EVENT_IOC_SET_OUTPUT,
410 FD(first, nr_cpu, 0));
411 if (ret) { 411 if (ret) {
412 error("failed to set output: %d (%s)\n", errno, 412 error("failed to set output: %d (%s)\n", errno,
413 strerror(errno)); 413 strerror(errno));
414 exit(-1); 414 exit(-1);
415 } 415 }
416 } else { 416 } else {
417 mmap_array[nr_cpu].counter = counter;
418 mmap_array[nr_cpu].prev = 0; 417 mmap_array[nr_cpu].prev = 0;
419 mmap_array[nr_cpu].mask = mmap_pages*page_size - 1; 418 mmap_array[nr_cpu].mask = mmap_pages*page_size - 1;
420 mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size, 419 mmap_array[nr_cpu].base = mmap(NULL, (mmap_pages+1)*page_size,
421 PROT_READ|PROT_WRITE, MAP_SHARED, fd[nr_cpu][counter][thread_index], 0); 420 PROT_READ | PROT_WRITE, MAP_SHARED, FD(evsel, nr_cpu, thread_index), 0);
422 if (mmap_array[nr_cpu].base == MAP_FAILED) { 421 if (mmap_array[nr_cpu].base == MAP_FAILED) {
423 error("failed to mmap with %d (%s)\n", errno, strerror(errno)); 422 error("failed to mmap with %d (%s)\n", errno, strerror(errno));
424 exit(-1); 423 exit(-1);
425 } 424 }
426 425
427 event_array[nr_poll].fd = fd[nr_cpu][counter][thread_index]; 426 event_array[nr_poll].fd = FD(evsel, nr_cpu, thread_index);
428 event_array[nr_poll].events = POLLIN; 427 event_array[nr_poll].events = POLLIN;
429 nr_poll++; 428 nr_poll++;
430 } 429 }
431 430
432 if (filter != NULL) { 431 if (filter != NULL) {
433 ret = ioctl(fd[nr_cpu][counter][thread_index], 432 ret = ioctl(FD(evsel, nr_cpu, thread_index),
434 PERF_EVENT_IOC_SET_FILTER, filter); 433 PERF_EVENT_IOC_SET_FILTER, filter);
435 if (ret) { 434 if (ret) {
436 error("failed to set filter with %d (%s)\n", errno, 435 error("failed to set filter with %d (%s)\n", errno,
437 strerror(errno)); 436 strerror(errno));
@@ -446,11 +445,12 @@ try_again:
446 445
447static void open_counters(int cpu) 446static void open_counters(int cpu)
448{ 447{
449 int counter; 448 struct perf_evsel *pos;
450 449
451 group_fd = -1; 450 group_fd = -1;
452 for (counter = 0; counter < nr_counters; counter++) 451
453 create_counter(counter, cpu); 452 list_for_each_entry(pos, &evsel_list, node)
453 create_counter(pos, cpu);
454 454
455 nr_cpu++; 455 nr_cpu++;
456} 456}
@@ -537,7 +537,7 @@ static void mmap_read_all(void)
537 537
538static int __cmd_record(int argc, const char **argv) 538static int __cmd_record(int argc, const char **argv)
539{ 539{
540 int i, counter; 540 int i;
541 struct stat st; 541 struct stat st;
542 int flags; 542 int flags;
543 int err; 543 int err;
@@ -604,7 +604,7 @@ static int __cmd_record(int argc, const char **argv)
604 goto out_delete_session; 604 goto out_delete_session;
605 } 605 }
606 606
607 if (have_tracepoints(attrs, nr_counters)) 607 if (have_tracepoints(&evsel_list))
608 perf_header__set_feat(&session->header, HEADER_TRACE_INFO); 608 perf_header__set_feat(&session->header, HEADER_TRACE_INFO);
609 609
610 /* 610 /*
@@ -652,7 +652,7 @@ static int __cmd_record(int argc, const char **argv)
652 } 652 }
653 653
654 if (!system_wide && target_tid == -1 && target_pid == -1) 654 if (!system_wide && target_tid == -1 && target_pid == -1)
655 all_tids[0] = child_pid; 655 threads->map[0] = child_pid;
656 656
657 close(child_ready_pipe[1]); 657 close(child_ready_pipe[1]);
658 close(go_pipe[0]); 658 close(go_pipe[0]);
@@ -666,17 +666,11 @@ static int __cmd_record(int argc, const char **argv)
666 close(child_ready_pipe[0]); 666 close(child_ready_pipe[0]);
667 } 667 }
668 668
669 nr_cpus = read_cpu_map(cpu_list);
670 if (nr_cpus < 1) {
671 perror("failed to collect number of CPUs");
672 return -1;
673 }
674
675 if (!system_wide && no_inherit && !cpu_list) { 669 if (!system_wide && no_inherit && !cpu_list) {
676 open_counters(-1); 670 open_counters(-1);
677 } else { 671 } else {
678 for (i = 0; i < nr_cpus; i++) 672 for (i = 0; i < cpus->nr; i++)
679 open_counters(cpumap[i]); 673 open_counters(cpus->map[i]);
680 } 674 }
681 675
682 perf_session__set_sample_type(session, sample_type); 676 perf_session__set_sample_type(session, sample_type);
@@ -711,7 +705,7 @@ static int __cmd_record(int argc, const char **argv)
711 return err; 705 return err;
712 } 706 }
713 707
714 if (have_tracepoints(attrs, nr_counters)) { 708 if (have_tracepoints(&evsel_list)) {
715 /* 709 /*
716 * FIXME err <= 0 here actually means that 710 * FIXME err <= 0 here actually means that
717 * there were no tracepoints so its not really 711 * there were no tracepoints so its not really
@@ -720,8 +714,7 @@ static int __cmd_record(int argc, const char **argv)
720 * return this more properly and also 714 * return this more properly and also
721 * propagate errors that now are calling die() 715 * propagate errors that now are calling die()
722 */ 716 */
723 err = event__synthesize_tracing_data(output, attrs, 717 err = event__synthesize_tracing_data(output, &evsel_list,
724 nr_counters,
725 process_synthesized_event, 718 process_synthesized_event,
726 session); 719 session);
727 if (err <= 0) { 720 if (err <= 0) {
@@ -795,13 +788,13 @@ static int __cmd_record(int argc, const char **argv)
795 788
796 if (done) { 789 if (done) {
797 for (i = 0; i < nr_cpu; i++) { 790 for (i = 0; i < nr_cpu; i++) {
798 for (counter = 0; 791 struct perf_evsel *pos;
799 counter < nr_counters; 792
800 counter++) { 793 list_for_each_entry(pos, &evsel_list, node) {
801 for (thread = 0; 794 for (thread = 0;
802 thread < thread_num; 795 thread < threads->nr;
803 thread++) 796 thread++)
804 ioctl(fd[i][counter][thread], 797 ioctl(FD(pos, i, thread),
805 PERF_EVENT_IOC_DISABLE); 798 PERF_EVENT_IOC_DISABLE);
806 } 799 }
807 } 800 }
@@ -887,7 +880,8 @@ const struct option record_options[] = {
887 880
888int cmd_record(int argc, const char **argv, const char *prefix __used) 881int cmd_record(int argc, const char **argv, const char *prefix __used)
889{ 882{
890 int i, j, err = -ENOMEM; 883 int err = -ENOMEM;
884 struct perf_evsel *pos;
891 885
892 argc = parse_options(argc, argv, record_options, record_usage, 886 argc = parse_options(argc, argv, record_options, record_usage,
893 PARSE_OPT_STOP_AT_NON_OPTION); 887 PARSE_OPT_STOP_AT_NON_OPTION);
@@ -910,38 +904,32 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
910 if (no_buildid_cache || no_buildid) 904 if (no_buildid_cache || no_buildid)
911 disable_buildid_cache(); 905 disable_buildid_cache();
912 906
913 if (!nr_counters) { 907 if (list_empty(&evsel_list) && perf_evsel_list__create_default() < 0) {
914 nr_counters = 1; 908 pr_err("Not enough memory for event selector list\n");
915 attrs[0].type = PERF_TYPE_HARDWARE; 909 goto out_symbol_exit;
916 attrs[0].config = PERF_COUNT_HW_CPU_CYCLES;
917 } 910 }
918 911
919 if (target_pid != -1) { 912 if (target_pid != -1)
920 target_tid = target_pid; 913 target_tid = target_pid;
921 thread_num = find_all_tid(target_pid, &all_tids);
922 if (thread_num <= 0) {
923 fprintf(stderr, "Can't find all threads of pid %d\n",
924 target_pid);
925 usage_with_options(record_usage, record_options);
926 }
927 } else {
928 all_tids=malloc(sizeof(pid_t));
929 if (!all_tids)
930 goto out_symbol_exit;
931 914
932 all_tids[0] = target_tid; 915 threads = thread_map__new(target_pid, target_tid);
933 thread_num = 1; 916 if (threads == NULL) {
917 pr_err("Problems finding threads of monitor\n");
918 usage_with_options(record_usage, record_options);
934 } 919 }
935 920
936 for (i = 0; i < MAX_NR_CPUS; i++) { 921 cpus = cpu_map__new(cpu_list);
937 for (j = 0; j < MAX_COUNTERS; j++) { 922 if (cpus == NULL) {
938 fd[i][j] = malloc(sizeof(int)*thread_num); 923 perror("failed to parse CPUs map");
939 if (!fd[i][j]) 924 return -1;
940 goto out_free_fd; 925 }
941 } 926
927 list_for_each_entry(pos, &evsel_list, node) {
928 if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
929 goto out_free_fd;
942 } 930 }
943 event_array = malloc( 931 event_array = malloc((sizeof(struct pollfd) * MAX_NR_CPUS *
944 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num); 932 MAX_COUNTERS * threads->nr));
945 if (!event_array) 933 if (!event_array)
946 goto out_free_fd; 934 goto out_free_fd;
947 935
@@ -968,12 +956,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
968out_free_event_array: 956out_free_event_array:
969 free(event_array); 957 free(event_array);
970out_free_fd: 958out_free_fd:
971 for (i = 0; i < MAX_NR_CPUS; i++) { 959 thread_map__delete(threads);
972 for (j = 0; j < MAX_COUNTERS; j++) 960 threads = NULL;
973 free(fd[i][j]);
974 }
975 free(all_tids);
976 all_tids = NULL;
977out_symbol_exit: 961out_symbol_exit:
978 symbol__exit(); 962 symbol__exit();
979 return err; 963 return err;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 7ff746da7e6c..02b2d8013a61 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -43,6 +43,7 @@
43#include "util/parse-options.h" 43#include "util/parse-options.h"
44#include "util/parse-events.h" 44#include "util/parse-events.h"
45#include "util/event.h" 45#include "util/event.h"
46#include "util/evsel.h"
46#include "util/debug.h" 47#include "util/debug.h"
47#include "util/header.h" 48#include "util/header.h"
48#include "util/cpumap.h" 49#include "util/cpumap.h"
@@ -71,7 +72,7 @@ static struct perf_event_attr default_attrs[] = {
71}; 72};
72 73
73static bool system_wide = false; 74static bool system_wide = false;
74static int nr_cpus = 0; 75static struct cpu_map *cpus;
75static int run_idx = 0; 76static int run_idx = 0;
76 77
77static int run_count = 1; 78static int run_count = 1;
@@ -80,8 +81,7 @@ static bool scale = true;
80static bool no_aggr = false; 81static bool no_aggr = false;
81static pid_t target_pid = -1; 82static pid_t target_pid = -1;
82static pid_t target_tid = -1; 83static pid_t target_tid = -1;
83static pid_t *all_tids = NULL; 84static struct thread_map *threads;
84static int thread_num = 0;
85static pid_t child_pid = -1; 85static pid_t child_pid = -1;
86static bool null_run = false; 86static bool null_run = false;
87static bool big_num = true; 87static bool big_num = true;
@@ -90,17 +90,6 @@ static const char *cpu_list;
90static const char *csv_sep = NULL; 90static const char *csv_sep = NULL;
91static bool csv_output = false; 91static bool csv_output = false;
92 92
93
94static int *fd[MAX_NR_CPUS][MAX_COUNTERS];
95
96static int event_scaled[MAX_COUNTERS];
97
98static struct {
99 u64 val;
100 u64 ena;
101 u64 run;
102} cpu_counts[MAX_NR_CPUS][MAX_COUNTERS];
103
104static volatile int done = 0; 93static volatile int done = 0;
105 94
106struct stats 95struct stats
@@ -108,6 +97,22 @@ struct stats
108 double n, mean, M2; 97 double n, mean, M2;
109}; 98};
110 99
100struct perf_stat {
101 struct stats res_stats[3];
102};
103
104static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
105{
106 evsel->priv = zalloc(sizeof(struct perf_stat));
107 return evsel->priv == NULL ? -ENOMEM : 0;
108}
109
110static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
111{
112 free(evsel->priv);
113 evsel->priv = NULL;
114}
115
111static void update_stats(struct stats *stats, u64 val) 116static void update_stats(struct stats *stats, u64 val)
112{ 117{
113 double delta; 118 double delta;
@@ -147,75 +152,38 @@ static double stddev_stats(struct stats *stats)
147 return sqrt(variance_mean); 152 return sqrt(variance_mean);
148} 153}
149 154
150struct stats event_res_stats[MAX_COUNTERS][3];
151struct stats runtime_nsecs_stats[MAX_NR_CPUS]; 155struct stats runtime_nsecs_stats[MAX_NR_CPUS];
152struct stats runtime_cycles_stats[MAX_NR_CPUS]; 156struct stats runtime_cycles_stats[MAX_NR_CPUS];
153struct stats runtime_branches_stats[MAX_NR_CPUS]; 157struct stats runtime_branches_stats[MAX_NR_CPUS];
154struct stats walltime_nsecs_stats; 158struct stats walltime_nsecs_stats;
155 159
156#define MATCH_EVENT(t, c, counter) \ 160static int create_perf_stat_counter(struct perf_evsel *evsel)
157 (attrs[counter].type == PERF_TYPE_##t && \
158 attrs[counter].config == PERF_COUNT_##c)
159
160#define ERR_PERF_OPEN \
161"counter %d, sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information."
162
163static int create_perf_stat_counter(int counter, bool *perm_err)
164{ 161{
165 struct perf_event_attr *attr = attrs + counter; 162 struct perf_event_attr *attr = &evsel->attr;
166 int thread;
167 int ncreated = 0;
168 163
169 if (scale) 164 if (scale)
170 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED | 165 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
171 PERF_FORMAT_TOTAL_TIME_RUNNING; 166 PERF_FORMAT_TOTAL_TIME_RUNNING;
172 167
173 if (system_wide) { 168 if (system_wide)
174 int cpu; 169 return perf_evsel__open_per_cpu(evsel, cpus);
175 170
176 for (cpu = 0; cpu < nr_cpus; cpu++) { 171 attr->inherit = !no_inherit;
177 fd[cpu][counter][0] = sys_perf_event_open(attr, 172 if (target_pid == -1 && target_tid == -1) {
178 -1, cpumap[cpu], -1, 0); 173 attr->disabled = 1;
179 if (fd[cpu][counter][0] < 0) { 174 attr->enable_on_exec = 1;
180 if (errno == EPERM || errno == EACCES)
181 *perm_err = true;
182 error(ERR_PERF_OPEN, counter,
183 fd[cpu][counter][0], strerror(errno));
184 } else {
185 ++ncreated;
186 }
187 }
188 } else {
189 attr->inherit = !no_inherit;
190 if (target_pid == -1 && target_tid == -1) {
191 attr->disabled = 1;
192 attr->enable_on_exec = 1;
193 }
194 for (thread = 0; thread < thread_num; thread++) {
195 fd[0][counter][thread] = sys_perf_event_open(attr,
196 all_tids[thread], -1, -1, 0);
197 if (fd[0][counter][thread] < 0) {
198 if (errno == EPERM || errno == EACCES)
199 *perm_err = true;
200 error(ERR_PERF_OPEN, counter,
201 fd[0][counter][thread],
202 strerror(errno));
203 } else {
204 ++ncreated;
205 }
206 }
207 } 175 }
208 176
209 return ncreated; 177 return perf_evsel__open_per_thread(evsel, threads);
210} 178}
211 179
212/* 180/*
213 * Does the counter have nsecs as a unit? 181 * Does the counter have nsecs as a unit?
214 */ 182 */
215static inline int nsec_counter(int counter) 183static inline int nsec_counter(struct perf_evsel *evsel)
216{ 184{
217 if (MATCH_EVENT(SOFTWARE, SW_CPU_CLOCK, counter) || 185 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
218 MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) 186 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
219 return 1; 187 return 1;
220 188
221 return 0; 189 return 0;
@@ -225,54 +193,17 @@ static inline int nsec_counter(int counter)
225 * Read out the results of a single counter: 193 * Read out the results of a single counter:
226 * aggregate counts across CPUs in system-wide mode 194 * aggregate counts across CPUs in system-wide mode
227 */ 195 */
228static void read_counter_aggr(int counter) 196static int read_counter_aggr(struct perf_evsel *counter)
229{ 197{
230 u64 count[3], single_count[3]; 198 struct perf_stat *ps = counter->priv;
231 int cpu; 199 u64 *count = counter->counts->aggr.values;
232 size_t res, nv; 200 int i;
233 int scaled;
234 int i, thread;
235 201
236 count[0] = count[1] = count[2] = 0; 202 if (__perf_evsel__read(counter, cpus->nr, threads->nr, scale) < 0)
237 203 return -1;
238 nv = scale ? 3 : 1;
239 for (cpu = 0; cpu < nr_cpus; cpu++) {
240 for (thread = 0; thread < thread_num; thread++) {
241 if (fd[cpu][counter][thread] < 0)
242 continue;
243
244 res = read(fd[cpu][counter][thread],
245 single_count, nv * sizeof(u64));
246 assert(res == nv * sizeof(u64));
247
248 close(fd[cpu][counter][thread]);
249 fd[cpu][counter][thread] = -1;
250
251 count[0] += single_count[0];
252 if (scale) {
253 count[1] += single_count[1];
254 count[2] += single_count[2];
255 }
256 }
257 }
258
259 scaled = 0;
260 if (scale) {
261 if (count[2] == 0) {
262 event_scaled[counter] = -1;
263 count[0] = 0;
264 return;
265 }
266
267 if (count[2] < count[1]) {
268 event_scaled[counter] = 1;
269 count[0] = (unsigned long long)
270 ((double)count[0] * count[1] / count[2] + 0.5);
271 }
272 }
273 204
274 for (i = 0; i < 3; i++) 205 for (i = 0; i < 3; i++)
275 update_stats(&event_res_stats[counter][i], count[i]); 206 update_stats(&ps->res_stats[i], count[i]);
276 207
277 if (verbose) { 208 if (verbose) {
278 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter), 209 fprintf(stderr, "%s: %Ld %Ld %Ld\n", event_name(counter),
@@ -282,74 +213,51 @@ static void read_counter_aggr(int counter)
282 /* 213 /*
283 * Save the full runtime - to allow normalization during printout: 214 * Save the full runtime - to allow normalization during printout:
284 */ 215 */
285 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) 216 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
286 update_stats(&runtime_nsecs_stats[0], count[0]); 217 update_stats(&runtime_nsecs_stats[0], count[0]);
287 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) 218 if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
288 update_stats(&runtime_cycles_stats[0], count[0]); 219 update_stats(&runtime_cycles_stats[0], count[0]);
289 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) 220 if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
290 update_stats(&runtime_branches_stats[0], count[0]); 221 update_stats(&runtime_branches_stats[0], count[0]);
222
223 return 0;
291} 224}
292 225
293/* 226/*
294 * Read out the results of a single counter: 227 * Read out the results of a single counter:
295 * do not aggregate counts across CPUs in system-wide mode 228 * do not aggregate counts across CPUs in system-wide mode
296 */ 229 */
297static void read_counter(int counter) 230static int read_counter(struct perf_evsel *counter)
298{ 231{
299 u64 count[3]; 232 u64 *count;
300 int cpu; 233 int cpu;
301 size_t res, nv;
302
303 count[0] = count[1] = count[2] = 0;
304
305 nv = scale ? 3 : 1;
306
307 for (cpu = 0; cpu < nr_cpus; cpu++) {
308
309 if (fd[cpu][counter][0] < 0)
310 continue;
311
312 res = read(fd[cpu][counter][0], count, nv * sizeof(u64));
313
314 assert(res == nv * sizeof(u64));
315 234
316 close(fd[cpu][counter][0]); 235 for (cpu = 0; cpu < cpus->nr; cpu++) {
317 fd[cpu][counter][0] = -1; 236 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
237 return -1;
318 238
319 if (scale) { 239 count = counter->counts->cpu[cpu].values;
320 if (count[2] == 0) {
321 count[0] = 0;
322 } else if (count[2] < count[1]) {
323 count[0] = (unsigned long long)
324 ((double)count[0] * count[1] / count[2] + 0.5);
325 }
326 }
327 cpu_counts[cpu][counter].val = count[0]; /* scaled count */
328 cpu_counts[cpu][counter].ena = count[1];
329 cpu_counts[cpu][counter].run = count[2];
330 240
331 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) 241 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
332 update_stats(&runtime_nsecs_stats[cpu], count[0]); 242 update_stats(&runtime_nsecs_stats[cpu], count[0]);
333 if (MATCH_EVENT(HARDWARE, HW_CPU_CYCLES, counter)) 243 if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
334 update_stats(&runtime_cycles_stats[cpu], count[0]); 244 update_stats(&runtime_cycles_stats[cpu], count[0]);
335 if (MATCH_EVENT(HARDWARE, HW_BRANCH_INSTRUCTIONS, counter)) 245 if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
336 update_stats(&runtime_branches_stats[cpu], count[0]); 246 update_stats(&runtime_branches_stats[cpu], count[0]);
337 } 247 }
248
249 return 0;
338} 250}
339 251
340static int run_perf_stat(int argc __used, const char **argv) 252static int run_perf_stat(int argc __used, const char **argv)
341{ 253{
342 unsigned long long t0, t1; 254 unsigned long long t0, t1;
255 struct perf_evsel *counter;
343 int status = 0; 256 int status = 0;
344 int counter, ncreated = 0;
345 int child_ready_pipe[2], go_pipe[2]; 257 int child_ready_pipe[2], go_pipe[2];
346 bool perm_err = false;
347 const bool forks = (argc > 0); 258 const bool forks = (argc > 0);
348 char buf; 259 char buf;
349 260
350 if (!system_wide)
351 nr_cpus = 1;
352
353 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) { 261 if (forks && (pipe(child_ready_pipe) < 0 || pipe(go_pipe) < 0)) {
354 perror("failed to create pipes"); 262 perror("failed to create pipes");
355 exit(1); 263 exit(1);
@@ -389,7 +297,7 @@ static int run_perf_stat(int argc __used, const char **argv)
389 } 297 }
390 298
391 if (target_tid == -1 && target_pid == -1 && !system_wide) 299 if (target_tid == -1 && target_pid == -1 && !system_wide)
392 all_tids[0] = child_pid; 300 threads->map[0] = child_pid;
393 301
394 /* 302 /*
395 * Wait for the child to be ready to exec. 303 * Wait for the child to be ready to exec.
@@ -401,19 +309,23 @@ static int run_perf_stat(int argc __used, const char **argv)
401 close(child_ready_pipe[0]); 309 close(child_ready_pipe[0]);
402 } 310 }
403 311
404 for (counter = 0; counter < nr_counters; counter++) 312 list_for_each_entry(counter, &evsel_list, node) {
405 ncreated += create_perf_stat_counter(counter, &perm_err); 313 if (create_perf_stat_counter(counter) < 0) {
406 314 if (errno == -EPERM || errno == -EACCES) {
407 if (ncreated < nr_counters) { 315 error("You may not have permission to collect %sstats.\n"
408 if (perm_err) 316 "\t Consider tweaking"
409 error("You may not have permission to collect %sstats.\n" 317 " /proc/sys/kernel/perf_event_paranoid or running as root.",
410 "\t Consider tweaking" 318 system_wide ? "system-wide " : "");
411 " /proc/sys/kernel/perf_event_paranoid or running as root.", 319 } else {
412 system_wide ? "system-wide " : ""); 320 error("open_counter returned with %d (%s). "
413 die("Not all events could be opened.\n"); 321 "/bin/dmesg may provide additional information.\n",
414 if (child_pid != -1) 322 errno, strerror(errno));
415 kill(child_pid, SIGTERM); 323 }
416 return -1; 324 if (child_pid != -1)
325 kill(child_pid, SIGTERM);
326 die("Not all events could be opened.\n");
327 return -1;
328 }
417 } 329 }
418 330
419 /* 331 /*
@@ -433,25 +345,33 @@ static int run_perf_stat(int argc __used, const char **argv)
433 update_stats(&walltime_nsecs_stats, t1 - t0); 345 update_stats(&walltime_nsecs_stats, t1 - t0);
434 346
435 if (no_aggr) { 347 if (no_aggr) {
436 for (counter = 0; counter < nr_counters; counter++) 348 list_for_each_entry(counter, &evsel_list, node) {
437 read_counter(counter); 349 read_counter(counter);
350 perf_evsel__close_fd(counter, cpus->nr, 1);
351 }
438 } else { 352 } else {
439 for (counter = 0; counter < nr_counters; counter++) 353 list_for_each_entry(counter, &evsel_list, node) {
440 read_counter_aggr(counter); 354 read_counter_aggr(counter);
355 perf_evsel__close_fd(counter, cpus->nr, threads->nr);
356 }
441 } 357 }
358
442 return WEXITSTATUS(status); 359 return WEXITSTATUS(status);
443} 360}
444 361
445static void print_noise(int counter, double avg) 362static void print_noise(struct perf_evsel *evsel, double avg)
446{ 363{
364 struct perf_stat *ps;
365
447 if (run_count == 1) 366 if (run_count == 1)
448 return; 367 return;
449 368
369 ps = evsel->priv;
450 fprintf(stderr, " ( +- %7.3f%% )", 370 fprintf(stderr, " ( +- %7.3f%% )",
451 100 * stddev_stats(&event_res_stats[counter][0]) / avg); 371 100 * stddev_stats(&ps->res_stats[0]) / avg);
452} 372}
453 373
454static void nsec_printout(int cpu, int counter, double avg) 374static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
455{ 375{
456 double msecs = avg / 1e6; 376 double msecs = avg / 1e6;
457 char cpustr[16] = { '\0', }; 377 char cpustr[16] = { '\0', };
@@ -460,20 +380,19 @@ static void nsec_printout(int cpu, int counter, double avg)
460 if (no_aggr) 380 if (no_aggr)
461 sprintf(cpustr, "CPU%*d%s", 381 sprintf(cpustr, "CPU%*d%s",
462 csv_output ? 0 : -4, 382 csv_output ? 0 : -4,
463 cpumap[cpu], csv_sep); 383 cpus->map[cpu], csv_sep);
464 384
465 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(counter)); 385 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
466 386
467 if (csv_output) 387 if (csv_output)
468 return; 388 return;
469 389
470 if (MATCH_EVENT(SOFTWARE, SW_TASK_CLOCK, counter)) { 390 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
471 fprintf(stderr, " # %10.3f CPUs ", 391 fprintf(stderr, " # %10.3f CPUs ",
472 avg / avg_stats(&walltime_nsecs_stats)); 392 avg / avg_stats(&walltime_nsecs_stats));
473 }
474} 393}
475 394
476static void abs_printout(int cpu, int counter, double avg) 395static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
477{ 396{
478 double total, ratio = 0.0; 397 double total, ratio = 0.0;
479 char cpustr[16] = { '\0', }; 398 char cpustr[16] = { '\0', };
@@ -489,23 +408,23 @@ static void abs_printout(int cpu, int counter, double avg)
489 if (no_aggr) 408 if (no_aggr)
490 sprintf(cpustr, "CPU%*d%s", 409 sprintf(cpustr, "CPU%*d%s",
491 csv_output ? 0 : -4, 410 csv_output ? 0 : -4,
492 cpumap[cpu], csv_sep); 411 cpus->map[cpu], csv_sep);
493 else 412 else
494 cpu = 0; 413 cpu = 0;
495 414
496 fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(counter)); 415 fprintf(stderr, fmt, cpustr, avg, csv_sep, event_name(evsel));
497 416
498 if (csv_output) 417 if (csv_output)
499 return; 418 return;
500 419
501 if (MATCH_EVENT(HARDWARE, HW_INSTRUCTIONS, counter)) { 420 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
502 total = avg_stats(&runtime_cycles_stats[cpu]); 421 total = avg_stats(&runtime_cycles_stats[cpu]);
503 422
504 if (total) 423 if (total)
505 ratio = avg / total; 424 ratio = avg / total;
506 425
507 fprintf(stderr, " # %10.3f IPC ", ratio); 426 fprintf(stderr, " # %10.3f IPC ", ratio);
508 } else if (MATCH_EVENT(HARDWARE, HW_BRANCH_MISSES, counter) && 427 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
509 runtime_branches_stats[cpu].n != 0) { 428 runtime_branches_stats[cpu].n != 0) {
510 total = avg_stats(&runtime_branches_stats[cpu]); 429 total = avg_stats(&runtime_branches_stats[cpu]);
511 430
@@ -528,10 +447,11 @@ static void abs_printout(int cpu, int counter, double avg)
528 * Print out the results of a single counter: 447 * Print out the results of a single counter:
529 * aggregated counts in system-wide mode 448 * aggregated counts in system-wide mode
530 */ 449 */
531static void print_counter_aggr(int counter) 450static void print_counter_aggr(struct perf_evsel *counter)
532{ 451{
533 double avg = avg_stats(&event_res_stats[counter][0]); 452 struct perf_stat *ps = counter->priv;
534 int scaled = event_scaled[counter]; 453 double avg = avg_stats(&ps->res_stats[0]);
454 int scaled = counter->counts->scaled;
535 455
536 if (scaled == -1) { 456 if (scaled == -1) {
537 fprintf(stderr, "%*s%s%-24s\n", 457 fprintf(stderr, "%*s%s%-24s\n",
@@ -555,8 +475,8 @@ static void print_counter_aggr(int counter)
555 if (scaled) { 475 if (scaled) {
556 double avg_enabled, avg_running; 476 double avg_enabled, avg_running;
557 477
558 avg_enabled = avg_stats(&event_res_stats[counter][1]); 478 avg_enabled = avg_stats(&ps->res_stats[1]);
559 avg_running = avg_stats(&event_res_stats[counter][2]); 479 avg_running = avg_stats(&ps->res_stats[2]);
560 480
561 fprintf(stderr, " (scaled from %.2f%%)", 481 fprintf(stderr, " (scaled from %.2f%%)",
562 100 * avg_running / avg_enabled); 482 100 * avg_running / avg_enabled);
@@ -569,19 +489,19 @@ static void print_counter_aggr(int counter)
569 * Print out the results of a single counter: 489 * Print out the results of a single counter:
570 * does not use aggregated count in system-wide 490 * does not use aggregated count in system-wide
571 */ 491 */
572static void print_counter(int counter) 492static void print_counter(struct perf_evsel *counter)
573{ 493{
574 u64 ena, run, val; 494 u64 ena, run, val;
575 int cpu; 495 int cpu;
576 496
577 for (cpu = 0; cpu < nr_cpus; cpu++) { 497 for (cpu = 0; cpu < cpus->nr; cpu++) {
578 val = cpu_counts[cpu][counter].val; 498 val = counter->counts->cpu[cpu].val;
579 ena = cpu_counts[cpu][counter].ena; 499 ena = counter->counts->cpu[cpu].ena;
580 run = cpu_counts[cpu][counter].run; 500 run = counter->counts->cpu[cpu].run;
581 if (run == 0 || ena == 0) { 501 if (run == 0 || ena == 0) {
582 fprintf(stderr, "CPU%*d%s%*s%s%-24s", 502 fprintf(stderr, "CPU%*d%s%*s%s%-24s",
583 csv_output ? 0 : -4, 503 csv_output ? 0 : -4,
584 cpumap[cpu], csv_sep, 504 cpus->map[cpu], csv_sep,
585 csv_output ? 0 : 18, 505 csv_output ? 0 : 18,
586 "<not counted>", csv_sep, 506 "<not counted>", csv_sep,
587 event_name(counter)); 507 event_name(counter));
@@ -609,7 +529,8 @@ static void print_counter(int counter)
609 529
610static void print_stat(int argc, const char **argv) 530static void print_stat(int argc, const char **argv)
611{ 531{
612 int i, counter; 532 struct perf_evsel *counter;
533 int i;
613 534
614 fflush(stdout); 535 fflush(stdout);
615 536
@@ -632,10 +553,10 @@ static void print_stat(int argc, const char **argv)
632 } 553 }
633 554
634 if (no_aggr) { 555 if (no_aggr) {
635 for (counter = 0; counter < nr_counters; counter++) 556 list_for_each_entry(counter, &evsel_list, node)
636 print_counter(counter); 557 print_counter(counter);
637 } else { 558 } else {
638 for (counter = 0; counter < nr_counters; counter++) 559 list_for_each_entry(counter, &evsel_list, node)
639 print_counter_aggr(counter); 560 print_counter_aggr(counter);
640 } 561 }
641 562
@@ -720,8 +641,8 @@ static const struct option options[] = {
720 641
721int cmd_stat(int argc, const char **argv, const char *prefix __used) 642int cmd_stat(int argc, const char **argv, const char *prefix __used)
722{ 643{
723 int status; 644 struct perf_evsel *pos;
724 int i,j; 645 int status = -ENOMEM;
725 646
726 setlocale(LC_ALL, ""); 647 setlocale(LC_ALL, "");
727 648
@@ -757,41 +678,45 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
757 678
758 /* Set attrs and nr_counters if no event is selected and !null_run */ 679 /* Set attrs and nr_counters if no event is selected and !null_run */
759 if (!null_run && !nr_counters) { 680 if (!null_run && !nr_counters) {
760 memcpy(attrs, default_attrs, sizeof(default_attrs)); 681 size_t c;
682
761 nr_counters = ARRAY_SIZE(default_attrs); 683 nr_counters = ARRAY_SIZE(default_attrs);
684
685 for (c = 0; c < ARRAY_SIZE(default_attrs); ++c) {
686 pos = perf_evsel__new(default_attrs[c].type,
687 default_attrs[c].config,
688 nr_counters);
689 if (pos == NULL)
690 goto out;
691 list_add(&pos->node, &evsel_list);
692 }
762 } 693 }
763 694
764 if (system_wide) 695 if (target_pid != -1)
765 nr_cpus = read_cpu_map(cpu_list); 696 target_tid = target_pid;
766 else
767 nr_cpus = 1;
768 697
769 if (nr_cpus < 1) 698 threads = thread_map__new(target_pid, target_tid);
699 if (threads == NULL) {
700 pr_err("Problems finding threads of monitor\n");
770 usage_with_options(stat_usage, options); 701 usage_with_options(stat_usage, options);
702 }
771 703
772 if (target_pid != -1) { 704 if (system_wide)
773 target_tid = target_pid; 705 cpus = cpu_map__new(cpu_list);
774 thread_num = find_all_tid(target_pid, &all_tids); 706 else
775 if (thread_num <= 0) { 707 cpus = cpu_map__dummy_new();
776 fprintf(stderr, "Can't find all threads of pid %d\n",
777 target_pid);
778 usage_with_options(stat_usage, options);
779 }
780 } else {
781 all_tids=malloc(sizeof(pid_t));
782 if (!all_tids)
783 return -ENOMEM;
784 708
785 all_tids[0] = target_tid; 709 if (cpus == NULL) {
786 thread_num = 1; 710 perror("failed to parse CPUs map");
711 usage_with_options(stat_usage, options);
712 return -1;
787 } 713 }
788 714
789 for (i = 0; i < MAX_NR_CPUS; i++) { 715 list_for_each_entry(pos, &evsel_list, node) {
790 for (j = 0; j < MAX_COUNTERS; j++) { 716 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
791 fd[i][j] = malloc(sizeof(int)*thread_num); 717 perf_evsel__alloc_counts(pos, cpus->nr) < 0 ||
792 if (!fd[i][j]) 718 perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
793 return -ENOMEM; 719 goto out_free_fd;
794 }
795 } 720 }
796 721
797 /* 722 /*
@@ -814,6 +739,11 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
814 739
815 if (status != -1) 740 if (status != -1)
816 print_stat(argc, argv); 741 print_stat(argc, argv);
817 742out_free_fd:
743 list_for_each_entry(pos, &evsel_list, node)
744 perf_evsel__free_stat_priv(pos);
745out:
746 thread_map__delete(threads);
747 threads = NULL;
818 return status; 748 return status;
819} 749}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index e0c3f471f22d..6c991529f62b 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -234,6 +234,85 @@ out:
234 return err; 234 return err;
235} 235}
236 236
237#include "util/evsel.h"
238#include <sys/types.h>
239
240static int trace_event__id(const char *event_name)
241{
242 char *filename;
243 int err = -1, fd;
244
245 if (asprintf(&filename,
246 "/sys/kernel/debug/tracing/events/syscalls/%s/id",
247 event_name) < 0)
248 return -1;
249
250 fd = open(filename, O_RDONLY);
251 if (fd >= 0) {
252 char id[16];
253 if (read(fd, id, sizeof(id)) > 0)
254 err = atoi(id);
255 close(fd);
256 }
257
258 free(filename);
259 return err;
260}
261
262static int test__open_syscall_event(void)
263{
264 int err = -1, fd;
265 struct thread_map *threads;
266 struct perf_evsel *evsel;
267 unsigned int nr_open_calls = 111, i;
268 int id = trace_event__id("sys_enter_open");
269
270 if (id < 0) {
271 pr_debug("trace_event__id(\"sys_enter_open\") ");
272 return -1;
273 }
274
275 threads = thread_map__new(-1, getpid());
276 if (threads == NULL) {
277 pr_debug("thread_map__new ");
278 return -1;
279 }
280
281 evsel = perf_evsel__new(PERF_TYPE_TRACEPOINT, id, 0);
282 if (evsel == NULL) {
283 pr_debug("perf_evsel__new ");
284 goto out_thread_map_delete;
285 }
286
287 if (perf_evsel__open_per_thread(evsel, threads) < 0) {
288 pr_debug("perf_evsel__open_per_thread ");
289 goto out_evsel_delete;
290 }
291
292 for (i = 0; i < nr_open_calls; ++i) {
293 fd = open("/etc/passwd", O_RDONLY);
294 close(fd);
295 }
296
297 if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
298 pr_debug("perf_evsel__open_read_on_cpu ");
299 goto out_close_fd;
300 }
301
302 if (evsel->counts->cpu[0].val != nr_open_calls)
303 pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %Ld ",
304 nr_open_calls, evsel->counts->cpu[0].val);
305
306 err = 0;
307out_close_fd:
308 perf_evsel__close_fd(evsel, 1, threads->nr);
309out_evsel_delete:
310 perf_evsel__delete(evsel);
311out_thread_map_delete:
312 thread_map__delete(threads);
313 return err;
314}
315
237static struct test { 316static struct test {
238 const char *desc; 317 const char *desc;
239 int (*func)(void); 318 int (*func)(void);
@@ -243,6 +322,10 @@ static struct test {
243 .func = test__vmlinux_matches_kallsyms, 322 .func = test__vmlinux_matches_kallsyms,
244 }, 323 },
245 { 324 {
325 .desc = "detect open syscall event",
326 .func = test__open_syscall_event,
327 },
328 {
246 .func = NULL, 329 .func = NULL,
247 }, 330 },
248}; 331};
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index ae15f046c405..1e67ab9c7ebc 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -21,6 +21,7 @@
21#include "perf.h" 21#include "perf.h"
22 22
23#include "util/color.h" 23#include "util/color.h"
24#include "util/evsel.h"
24#include "util/session.h" 25#include "util/session.h"
25#include "util/symbol.h" 26#include "util/symbol.h"
26#include "util/thread.h" 27#include "util/thread.h"
@@ -29,6 +30,7 @@
29#include "util/parse-options.h" 30#include "util/parse-options.h"
30#include "util/parse-events.h" 31#include "util/parse-events.h"
31#include "util/cpumap.h" 32#include "util/cpumap.h"
33#include "util/xyarray.h"
32 34
33#include "util/debug.h" 35#include "util/debug.h"
34 36
@@ -55,7 +57,7 @@
55#include <linux/unistd.h> 57#include <linux/unistd.h>
56#include <linux/types.h> 58#include <linux/types.h>
57 59
58static int *fd[MAX_NR_CPUS][MAX_COUNTERS]; 60#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
59 61
60static bool system_wide = false; 62static bool system_wide = false;
61 63
@@ -66,10 +68,9 @@ static int print_entries;
66 68
67static int target_pid = -1; 69static int target_pid = -1;
68static int target_tid = -1; 70static int target_tid = -1;
69static pid_t *all_tids = NULL; 71static struct thread_map *threads;
70static int thread_num = 0;
71static bool inherit = false; 72static bool inherit = false;
72static int nr_cpus = 0; 73static struct cpu_map *cpus;
73static int realtime_prio = 0; 74static int realtime_prio = 0;
74static bool group = false; 75static bool group = false;
75static unsigned int page_size; 76static unsigned int page_size;
@@ -100,6 +101,7 @@ struct sym_entry *sym_filter_entry = NULL;
100struct sym_entry *sym_filter_entry_sched = NULL; 101struct sym_entry *sym_filter_entry_sched = NULL;
101static int sym_pcnt_filter = 5; 102static int sym_pcnt_filter = 5;
102static int sym_counter = 0; 103static int sym_counter = 0;
104static struct perf_evsel *sym_evsel = NULL;
103static int display_weighted = -1; 105static int display_weighted = -1;
104static const char *cpu_list; 106static const char *cpu_list;
105 107
@@ -353,7 +355,7 @@ static void show_details(struct sym_entry *syme)
353 return; 355 return;
354 356
355 symbol = sym_entry__symbol(syme); 357 symbol = sym_entry__symbol(syme);
356 printf("Showing %s for %s\n", event_name(sym_counter), symbol->name); 358 printf("Showing %s for %s\n", event_name(sym_evsel), symbol->name);
357 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter); 359 printf(" Events Pcnt (>=%d%%)\n", sym_pcnt_filter);
358 360
359 pthread_mutex_lock(&syme->src->lock); 361 pthread_mutex_lock(&syme->src->lock);
@@ -460,7 +462,8 @@ static void rb_insert_active_sym(struct rb_root *tree, struct sym_entry *se)
460static void print_sym_table(void) 462static void print_sym_table(void)
461{ 463{
462 int printed = 0, j; 464 int printed = 0, j;
463 int counter, snap = !display_weighted ? sym_counter : 0; 465 struct perf_evsel *counter;
466 int snap = !display_weighted ? sym_counter : 0;
464 float samples_per_sec = samples/delay_secs; 467 float samples_per_sec = samples/delay_secs;
465 float ksamples_per_sec = kernel_samples/delay_secs; 468 float ksamples_per_sec = kernel_samples/delay_secs;
466 float us_samples_per_sec = (us_samples)/delay_secs; 469 float us_samples_per_sec = (us_samples)/delay_secs;
@@ -532,7 +535,9 @@ static void print_sym_table(void)
532 } 535 }
533 536
534 if (nr_counters == 1 || !display_weighted) { 537 if (nr_counters == 1 || !display_weighted) {
535 printf("%Ld", (u64)attrs[0].sample_period); 538 struct perf_evsel *first;
539 first = list_entry(evsel_list.next, struct perf_evsel, node);
540 printf("%Ld", first->attr.sample_period);
536 if (freq) 541 if (freq)
537 printf("Hz "); 542 printf("Hz ");
538 else 543 else
@@ -540,9 +545,9 @@ static void print_sym_table(void)
540 } 545 }
541 546
542 if (!display_weighted) 547 if (!display_weighted)
543 printf("%s", event_name(sym_counter)); 548 printf("%s", event_name(sym_evsel));
544 else for (counter = 0; counter < nr_counters; counter++) { 549 else list_for_each_entry(counter, &evsel_list, node) {
545 if (counter) 550 if (counter->idx)
546 printf("/"); 551 printf("/");
547 552
548 printf("%s", event_name(counter)); 553 printf("%s", event_name(counter));
@@ -558,12 +563,12 @@ static void print_sym_table(void)
558 printf(" (all"); 563 printf(" (all");
559 564
560 if (cpu_list) 565 if (cpu_list)
561 printf(", CPU%s: %s)\n", nr_cpus > 1 ? "s" : "", cpu_list); 566 printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list);
562 else { 567 else {
563 if (target_tid != -1) 568 if (target_tid != -1)
564 printf(")\n"); 569 printf(")\n");
565 else 570 else
566 printf(", %d CPU%s)\n", nr_cpus, nr_cpus > 1 ? "s" : ""); 571 printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : "");
567 } 572 }
568 573
569 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 574 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -739,7 +744,7 @@ static void print_mapped_keys(void)
739 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries); 744 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", print_entries);
740 745
741 if (nr_counters > 1) 746 if (nr_counters > 1)
742 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_counter)); 747 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", event_name(sym_evsel));
743 748
744 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter); 749 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", count_filter);
745 750
@@ -826,19 +831,23 @@ static void handle_keypress(struct perf_session *session, int c)
826 break; 831 break;
827 case 'E': 832 case 'E':
828 if (nr_counters > 1) { 833 if (nr_counters > 1) {
829 int i;
830
831 fprintf(stderr, "\nAvailable events:"); 834 fprintf(stderr, "\nAvailable events:");
832 for (i = 0; i < nr_counters; i++) 835
833 fprintf(stderr, "\n\t%d %s", i, event_name(i)); 836 list_for_each_entry(sym_evsel, &evsel_list, node)
837 fprintf(stderr, "\n\t%d %s", sym_evsel->idx, event_name(sym_evsel));
834 838
835 prompt_integer(&sym_counter, "Enter details event counter"); 839 prompt_integer(&sym_counter, "Enter details event counter");
836 840
837 if (sym_counter >= nr_counters) { 841 if (sym_counter >= nr_counters) {
838 fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(0)); 842 sym_evsel = list_entry(evsel_list.next, struct perf_evsel, node);
839 sym_counter = 0; 843 sym_counter = 0;
844 fprintf(stderr, "Sorry, no such event, using %s.\n", event_name(sym_evsel));
840 sleep(1); 845 sleep(1);
846 break;
841 } 847 }
848 list_for_each_entry(sym_evsel, &evsel_list, node)
849 if (sym_evsel->idx == sym_counter)
850 break;
842 } else sym_counter = 0; 851 } else sym_counter = 0;
843 break; 852 break;
844 case 'f': 853 case 'f':
@@ -978,7 +987,8 @@ static int symbol_filter(struct map *map, struct symbol *sym)
978 987
979static void event__process_sample(const event_t *self, 988static void event__process_sample(const event_t *self,
980 struct sample_data *sample, 989 struct sample_data *sample,
981 struct perf_session *session, int counter) 990 struct perf_session *session,
991 struct perf_evsel *evsel)
982{ 992{
983 u64 ip = self->ip.ip; 993 u64 ip = self->ip.ip;
984 struct sym_entry *syme; 994 struct sym_entry *syme;
@@ -1071,9 +1081,9 @@ static void event__process_sample(const event_t *self,
1071 1081
1072 syme = symbol__priv(al.sym); 1082 syme = symbol__priv(al.sym);
1073 if (!syme->skip) { 1083 if (!syme->skip) {
1074 syme->count[counter]++; 1084 syme->count[evsel->idx]++;
1075 syme->origin = origin; 1085 syme->origin = origin;
1076 record_precise_ip(syme, counter, ip); 1086 record_precise_ip(syme, evsel->idx, ip);
1077 pthread_mutex_lock(&active_symbols_lock); 1087 pthread_mutex_lock(&active_symbols_lock);
1078 if (list_empty(&syme->node) || !syme->node.next) 1088 if (list_empty(&syme->node) || !syme->node.next)
1079 __list_insert_active_sym(syme); 1089 __list_insert_active_sym(syme);
@@ -1082,12 +1092,24 @@ static void event__process_sample(const event_t *self,
1082} 1092}
1083 1093
1084struct mmap_data { 1094struct mmap_data {
1085 int counter;
1086 void *base; 1095 void *base;
1087 int mask; 1096 int mask;
1088 unsigned int prev; 1097 unsigned int prev;
1089}; 1098};
1090 1099
1100static int perf_evsel__alloc_mmap_per_thread(struct perf_evsel *evsel,
1101 int ncpus, int nthreads)
1102{
1103 evsel->priv = xyarray__new(ncpus, nthreads, sizeof(struct mmap_data));
1104 return evsel->priv != NULL ? 0 : -ENOMEM;
1105}
1106
1107static void perf_evsel__free_mmap(struct perf_evsel *evsel)
1108{
1109 xyarray__delete(evsel->priv);
1110 evsel->priv = NULL;
1111}
1112
1091static unsigned int mmap_read_head(struct mmap_data *md) 1113static unsigned int mmap_read_head(struct mmap_data *md)
1092{ 1114{
1093 struct perf_event_mmap_page *pc = md->base; 1115 struct perf_event_mmap_page *pc = md->base;
@@ -1100,8 +1122,11 @@ static unsigned int mmap_read_head(struct mmap_data *md)
1100} 1122}
1101 1123
1102static void perf_session__mmap_read_counter(struct perf_session *self, 1124static void perf_session__mmap_read_counter(struct perf_session *self,
1103 struct mmap_data *md) 1125 struct perf_evsel *evsel,
1126 int cpu, int thread_idx)
1104{ 1127{
1128 struct xyarray *mmap_array = evsel->priv;
1129 struct mmap_data *md = xyarray__entry(mmap_array, cpu, thread_idx);
1105 unsigned int head = mmap_read_head(md); 1130 unsigned int head = mmap_read_head(md);
1106 unsigned int old = md->prev; 1131 unsigned int old = md->prev;
1107 unsigned char *data = md->base + page_size; 1132 unsigned char *data = md->base + page_size;
@@ -1155,7 +1180,7 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
1155 1180
1156 event__parse_sample(event, self, &sample); 1181 event__parse_sample(event, self, &sample);
1157 if (event->header.type == PERF_RECORD_SAMPLE) 1182 if (event->header.type == PERF_RECORD_SAMPLE)
1158 event__process_sample(event, &sample, self, md->counter); 1183 event__process_sample(event, &sample, self, evsel);
1159 else 1184 else
1160 event__process(event, &sample, self); 1185 event__process(event, &sample, self);
1161 old += size; 1186 old += size;
@@ -1165,36 +1190,39 @@ static void perf_session__mmap_read_counter(struct perf_session *self,
1165} 1190}
1166 1191
1167static struct pollfd *event_array; 1192static struct pollfd *event_array;
1168static struct mmap_data *mmap_array[MAX_NR_CPUS][MAX_COUNTERS];
1169 1193
1170static void perf_session__mmap_read(struct perf_session *self) 1194static void perf_session__mmap_read(struct perf_session *self)
1171{ 1195{
1172 int i, counter, thread_index; 1196 struct perf_evsel *counter;
1197 int i, thread_index;
1173 1198
1174 for (i = 0; i < nr_cpus; i++) { 1199 for (i = 0; i < cpus->nr; i++) {
1175 for (counter = 0; counter < nr_counters; counter++) 1200 list_for_each_entry(counter, &evsel_list, node) {
1176 for (thread_index = 0; 1201 for (thread_index = 0;
1177 thread_index < thread_num; 1202 thread_index < threads->nr;
1178 thread_index++) { 1203 thread_index++) {
1179 perf_session__mmap_read_counter(self, 1204 perf_session__mmap_read_counter(self,
1180 &mmap_array[i][counter][thread_index]); 1205 counter, i, thread_index);
1181 } 1206 }
1207 }
1182 } 1208 }
1183} 1209}
1184 1210
1185int nr_poll; 1211int nr_poll;
1186int group_fd; 1212int group_fd;
1187 1213
1188static void start_counter(int i, int counter) 1214static void start_counter(int i, struct perf_evsel *evsel)
1189{ 1215{
1216 struct xyarray *mmap_array = evsel->priv;
1217 struct mmap_data *mm;
1190 struct perf_event_attr *attr; 1218 struct perf_event_attr *attr;
1191 int cpu = -1; 1219 int cpu = -1;
1192 int thread_index; 1220 int thread_index;
1193 1221
1194 if (target_tid == -1) 1222 if (target_tid == -1)
1195 cpu = cpumap[i]; 1223 cpu = cpus->map[i];
1196 1224
1197 attr = attrs + counter; 1225 attr = &evsel->attr;
1198 1226
1199 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID; 1227 attr->sample_type = PERF_SAMPLE_IP | PERF_SAMPLE_TID;
1200 1228
@@ -1207,12 +1235,12 @@ static void start_counter(int i, int counter)
1207 attr->inherit = (cpu < 0) && inherit; 1235 attr->inherit = (cpu < 0) && inherit;
1208 attr->mmap = 1; 1236 attr->mmap = 1;
1209 1237
1210 for (thread_index = 0; thread_index < thread_num; thread_index++) { 1238 for (thread_index = 0; thread_index < threads->nr; thread_index++) {
1211try_again: 1239try_again:
1212 fd[i][counter][thread_index] = sys_perf_event_open(attr, 1240 FD(evsel, i, thread_index) = sys_perf_event_open(attr,
1213 all_tids[thread_index], cpu, group_fd, 0); 1241 threads->map[thread_index], cpu, group_fd, 0);
1214 1242
1215 if (fd[i][counter][thread_index] < 0) { 1243 if (FD(evsel, i, thread_index) < 0) {
1216 int err = errno; 1244 int err = errno;
1217 1245
1218 if (err == EPERM || err == EACCES) 1246 if (err == EPERM || err == EACCES)
@@ -1236,29 +1264,29 @@ try_again:
1236 } 1264 }
1237 printf("\n"); 1265 printf("\n");
1238 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n", 1266 error("sys_perf_event_open() syscall returned with %d (%s). /bin/dmesg may provide additional information.\n",
1239 fd[i][counter][thread_index], strerror(err)); 1267 FD(evsel, i, thread_index), strerror(err));
1240 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n"); 1268 die("No CONFIG_PERF_EVENTS=y kernel support configured?\n");
1241 exit(-1); 1269 exit(-1);
1242 } 1270 }
1243 assert(fd[i][counter][thread_index] >= 0); 1271 assert(FD(evsel, i, thread_index) >= 0);
1244 fcntl(fd[i][counter][thread_index], F_SETFL, O_NONBLOCK); 1272 fcntl(FD(evsel, i, thread_index), F_SETFL, O_NONBLOCK);
1245 1273
1246 /* 1274 /*
1247 * First counter acts as the group leader: 1275 * First counter acts as the group leader:
1248 */ 1276 */
1249 if (group && group_fd == -1) 1277 if (group && group_fd == -1)
1250 group_fd = fd[i][counter][thread_index]; 1278 group_fd = FD(evsel, i, thread_index);
1251 1279
1252 event_array[nr_poll].fd = fd[i][counter][thread_index]; 1280 event_array[nr_poll].fd = FD(evsel, i, thread_index);
1253 event_array[nr_poll].events = POLLIN; 1281 event_array[nr_poll].events = POLLIN;
1254 nr_poll++; 1282 nr_poll++;
1255 1283
1256 mmap_array[i][counter][thread_index].counter = counter; 1284 mm = xyarray__entry(mmap_array, i, thread_index);
1257 mmap_array[i][counter][thread_index].prev = 0; 1285 mm->prev = 0;
1258 mmap_array[i][counter][thread_index].mask = mmap_pages*page_size - 1; 1286 mm->mask = mmap_pages*page_size - 1;
1259 mmap_array[i][counter][thread_index].base = mmap(NULL, (mmap_pages+1)*page_size, 1287 mm->base = mmap(NULL, (mmap_pages+1)*page_size,
1260 PROT_READ, MAP_SHARED, fd[i][counter][thread_index], 0); 1288 PROT_READ, MAP_SHARED, FD(evsel, i, thread_index), 0);
1261 if (mmap_array[i][counter][thread_index].base == MAP_FAILED) 1289 if (mm->base == MAP_FAILED)
1262 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 1290 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1263 } 1291 }
1264} 1292}
@@ -1266,8 +1294,8 @@ try_again:
1266static int __cmd_top(void) 1294static int __cmd_top(void)
1267{ 1295{
1268 pthread_t thread; 1296 pthread_t thread;
1269 int i, counter; 1297 struct perf_evsel *counter;
1270 int ret; 1298 int i, ret;
1271 /* 1299 /*
1272 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this 1300 * FIXME: perf_session__new should allow passing a O_MMAP, so that all this
1273 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now. 1301 * mmap reading, etc is encapsulated in it. Use O_WRONLY for now.
@@ -1281,9 +1309,9 @@ static int __cmd_top(void)
1281 else 1309 else
1282 event__synthesize_threads(event__process, session); 1310 event__synthesize_threads(event__process, session);
1283 1311
1284 for (i = 0; i < nr_cpus; i++) { 1312 for (i = 0; i < cpus->nr; i++) {
1285 group_fd = -1; 1313 group_fd = -1;
1286 for (counter = 0; counter < nr_counters; counter++) 1314 list_for_each_entry(counter, &evsel_list, node)
1287 start_counter(i, counter); 1315 start_counter(i, counter);
1288 } 1316 }
1289 1317
@@ -1372,8 +1400,8 @@ static const struct option options[] = {
1372 1400
1373int cmd_top(int argc, const char **argv, const char *prefix __used) 1401int cmd_top(int argc, const char **argv, const char *prefix __used)
1374{ 1402{
1375 int counter; 1403 struct perf_evsel *pos;
1376 int i,j; 1404 int status = -ENOMEM;
1377 1405
1378 page_size = sysconf(_SC_PAGE_SIZE); 1406 page_size = sysconf(_SC_PAGE_SIZE);
1379 1407
@@ -1381,34 +1409,17 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1381 if (argc) 1409 if (argc)
1382 usage_with_options(top_usage, options); 1410 usage_with_options(top_usage, options);
1383 1411
1384 if (target_pid != -1) { 1412 if (target_pid != -1)
1385 target_tid = target_pid; 1413 target_tid = target_pid;
1386 thread_num = find_all_tid(target_pid, &all_tids);
1387 if (thread_num <= 0) {
1388 fprintf(stderr, "Can't find all threads of pid %d\n",
1389 target_pid);
1390 usage_with_options(top_usage, options);
1391 }
1392 } else {
1393 all_tids=malloc(sizeof(pid_t));
1394 if (!all_tids)
1395 return -ENOMEM;
1396 1414
1397 all_tids[0] = target_tid; 1415 threads = thread_map__new(target_pid, target_tid);
1398 thread_num = 1; 1416 if (threads == NULL) {
1417 pr_err("Problems finding threads of monitor\n");
1418 usage_with_options(top_usage, options);
1399 } 1419 }
1400 1420
1401 for (i = 0; i < MAX_NR_CPUS; i++) { 1421 event_array = malloc((sizeof(struct pollfd) *
1402 for (j = 0; j < MAX_COUNTERS; j++) { 1422 MAX_NR_CPUS * MAX_COUNTERS * threads->nr));
1403 fd[i][j] = malloc(sizeof(int)*thread_num);
1404 mmap_array[i][j] = zalloc(
1405 sizeof(struct mmap_data)*thread_num);
1406 if (!fd[i][j] || !mmap_array[i][j])
1407 return -ENOMEM;
1408 }
1409 }
1410 event_array = malloc(
1411 sizeof(struct pollfd)*MAX_NR_CPUS*MAX_COUNTERS*thread_num);
1412 if (!event_array) 1423 if (!event_array)
1413 return -ENOMEM; 1424 return -ENOMEM;
1414 1425
@@ -1419,15 +1430,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1419 cpu_list = NULL; 1430 cpu_list = NULL;
1420 } 1431 }
1421 1432
1422 if (!nr_counters) 1433 if (!nr_counters && perf_evsel_list__create_default() < 0) {
1423 nr_counters = 1; 1434 pr_err("Not enough memory for event selector list\n");
1424 1435 return -ENOMEM;
1425 symbol_conf.priv_size = (sizeof(struct sym_entry) + 1436 }
1426 (nr_counters + 1) * sizeof(unsigned long));
1427
1428 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1429 if (symbol__init() < 0)
1430 return -1;
1431 1437
1432 if (delay_secs < 1) 1438 if (delay_secs < 1)
1433 delay_secs = 1; 1439 delay_secs = 1;
@@ -1444,23 +1450,33 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1444 exit(EXIT_FAILURE); 1450 exit(EXIT_FAILURE);
1445 } 1451 }
1446 1452
1447 /* 1453 if (target_tid != -1)
1448 * Fill in the ones not specifically initialized via -c: 1454 cpus = cpu_map__dummy_new();
1449 */ 1455 else
1450 for (counter = 0; counter < nr_counters; counter++) { 1456 cpus = cpu_map__new(cpu_list);
1451 if (attrs[counter].sample_period) 1457
1458 if (cpus == NULL)
1459 usage_with_options(top_usage, options);
1460
1461 list_for_each_entry(pos, &evsel_list, node) {
1462 if (perf_evsel__alloc_mmap_per_thread(pos, cpus->nr, threads->nr) < 0 ||
1463 perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0)
1464 goto out_free_fd;
1465 /*
1466 * Fill in the ones not specifically initialized via -c:
1467 */
1468 if (pos->attr.sample_period)
1452 continue; 1469 continue;
1453 1470
1454 attrs[counter].sample_period = default_interval; 1471 pos->attr.sample_period = default_interval;
1455 } 1472 }
1456 1473
1457 if (target_tid != -1) 1474 symbol_conf.priv_size = (sizeof(struct sym_entry) +
1458 nr_cpus = 1; 1475 (nr_counters + 1) * sizeof(unsigned long));
1459 else
1460 nr_cpus = read_cpu_map(cpu_list);
1461 1476
1462 if (nr_cpus < 1) 1477 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1463 usage_with_options(top_usage, options); 1478 if (symbol__init() < 0)
1479 return -1;
1464 1480
1465 get_term_dimensions(&winsize); 1481 get_term_dimensions(&winsize);
1466 if (print_entries == 0) { 1482 if (print_entries == 0) {
@@ -1468,5 +1484,10 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1468 signal(SIGWINCH, sig_winch_handler); 1484 signal(SIGWINCH, sig_winch_handler);
1469 } 1485 }
1470 1486
1471 return __cmd_top(); 1487 status = __cmd_top();
1488out_free_fd:
1489 list_for_each_entry(pos, &evsel_list, node)
1490 perf_evsel__free_mmap(pos);
1491
1492 return status;
1472} 1493}
diff --git a/tools/perf/perf.c b/tools/perf/perf.c
index 595d0f4a7103..5b1ecd66bb36 100644
--- a/tools/perf/perf.c
+++ b/tools/perf/perf.c
@@ -286,6 +286,8 @@ static int run_builtin(struct cmd_struct *p, int argc, const char **argv)
286 status = p->fn(argc, argv, prefix); 286 status = p->fn(argc, argv, prefix);
287 exit_browser(status); 287 exit_browser(status);
288 288
289 perf_evsel_list__delete();
290
289 if (status) 291 if (status)
290 return status & 0xff; 292 return status & 0xff;
291 293
diff --git a/tools/perf/util/cpumap.c b/tools/perf/util/cpumap.c
index 0f9b8d7a7d7e..3ccaa1043383 100644
--- a/tools/perf/util/cpumap.c
+++ b/tools/perf/util/cpumap.c
@@ -4,32 +4,53 @@
4#include <assert.h> 4#include <assert.h>
5#include <stdio.h> 5#include <stdio.h>
6 6
7int cpumap[MAX_NR_CPUS]; 7static struct cpu_map *cpu_map__default_new(void)
8
9static int default_cpu_map(void)
10{ 8{
11 int nr_cpus, i; 9 struct cpu_map *cpus;
10 int nr_cpus;
12 11
13 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN); 12 nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
14 assert(nr_cpus <= MAX_NR_CPUS); 13 if (nr_cpus < 0)
15 assert((int)nr_cpus >= 0); 14 return NULL;
15
16 cpus = malloc(sizeof(*cpus) + nr_cpus * sizeof(int));
17 if (cpus != NULL) {
18 int i;
19 for (i = 0; i < nr_cpus; ++i)
20 cpus->map[i] = i;
16 21
17 for (i = 0; i < nr_cpus; ++i) 22 cpus->nr = nr_cpus;
18 cpumap[i] = i; 23 }
19 24
20 return nr_cpus; 25 return cpus;
21} 26}
22 27
23static int read_all_cpu_map(void) 28static struct cpu_map *cpu_map__trim_new(int nr_cpus, int *tmp_cpus)
24{ 29{
30 size_t payload_size = nr_cpus * sizeof(int);
31 struct cpu_map *cpus = malloc(sizeof(*cpus) + payload_size);
32
33 if (cpus != NULL) {
34 cpus->nr = nr_cpus;
35 memcpy(cpus->map, tmp_cpus, payload_size);
36 }
37
38 return cpus;
39}
40
41static struct cpu_map *cpu_map__read_all_cpu_map(void)
42{
43 struct cpu_map *cpus = NULL;
25 FILE *onlnf; 44 FILE *onlnf;
26 int nr_cpus = 0; 45 int nr_cpus = 0;
46 int *tmp_cpus = NULL, *tmp;
47 int max_entries = 0;
27 int n, cpu, prev; 48 int n, cpu, prev;
28 char sep; 49 char sep;
29 50
30 onlnf = fopen("/sys/devices/system/cpu/online", "r"); 51 onlnf = fopen("/sys/devices/system/cpu/online", "r");
31 if (!onlnf) 52 if (!onlnf)
32 return default_cpu_map(); 53 return cpu_map__default_new();
33 54
34 sep = 0; 55 sep = 0;
35 prev = -1; 56 prev = -1;
@@ -38,12 +59,28 @@ static int read_all_cpu_map(void)
38 if (n <= 0) 59 if (n <= 0)
39 break; 60 break;
40 if (prev >= 0) { 61 if (prev >= 0) {
41 assert(nr_cpus + cpu - prev - 1 < MAX_NR_CPUS); 62 int new_max = nr_cpus + cpu - prev - 1;
63
64 if (new_max >= max_entries) {
65 max_entries = new_max + MAX_NR_CPUS / 2;
66 tmp = realloc(tmp_cpus, max_entries * sizeof(int));
67 if (tmp == NULL)
68 goto out_free_tmp;
69 tmp_cpus = tmp;
70 }
71
42 while (++prev < cpu) 72 while (++prev < cpu)
43 cpumap[nr_cpus++] = prev; 73 tmp_cpus[nr_cpus++] = prev;
74 }
75 if (nr_cpus == max_entries) {
76 max_entries += MAX_NR_CPUS;
77 tmp = realloc(tmp_cpus, max_entries * sizeof(int));
78 if (tmp == NULL)
79 goto out_free_tmp;
80 tmp_cpus = tmp;
44 } 81 }
45 assert (nr_cpus < MAX_NR_CPUS); 82
46 cpumap[nr_cpus++] = cpu; 83 tmp_cpus[nr_cpus++] = cpu;
47 if (n == 2 && sep == '-') 84 if (n == 2 && sep == '-')
48 prev = cpu; 85 prev = cpu;
49 else 86 else
@@ -51,24 +88,31 @@ static int read_all_cpu_map(void)
51 if (n == 1 || sep == '\n') 88 if (n == 1 || sep == '\n')
52 break; 89 break;
53 } 90 }
54 fclose(onlnf);
55 if (nr_cpus > 0)
56 return nr_cpus;
57 91
58 return default_cpu_map(); 92 if (nr_cpus > 0)
93 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
94 else
95 cpus = cpu_map__default_new();
96out_free_tmp:
97 free(tmp_cpus);
98 fclose(onlnf);
99 return cpus;
59} 100}
60 101
61int read_cpu_map(const char *cpu_list) 102struct cpu_map *cpu_map__new(const char *cpu_list)
62{ 103{
104 struct cpu_map *cpus = NULL;
63 unsigned long start_cpu, end_cpu = 0; 105 unsigned long start_cpu, end_cpu = 0;
64 char *p = NULL; 106 char *p = NULL;
65 int i, nr_cpus = 0; 107 int i, nr_cpus = 0;
108 int *tmp_cpus = NULL, *tmp;
109 int max_entries = 0;
66 110
67 if (!cpu_list) 111 if (!cpu_list)
68 return read_all_cpu_map(); 112 return cpu_map__read_all_cpu_map();
69 113
70 if (!isdigit(*cpu_list)) 114 if (!isdigit(*cpu_list))
71 goto invalid; 115 goto out;
72 116
73 while (isdigit(*cpu_list)) { 117 while (isdigit(*cpu_list)) {
74 p = NULL; 118 p = NULL;
@@ -94,21 +138,42 @@ int read_cpu_map(const char *cpu_list)
94 for (; start_cpu <= end_cpu; start_cpu++) { 138 for (; start_cpu <= end_cpu; start_cpu++) {
95 /* check for duplicates */ 139 /* check for duplicates */
96 for (i = 0; i < nr_cpus; i++) 140 for (i = 0; i < nr_cpus; i++)
97 if (cpumap[i] == (int)start_cpu) 141 if (tmp_cpus[i] == (int)start_cpu)
98 goto invalid; 142 goto invalid;
99 143
100 assert(nr_cpus < MAX_NR_CPUS); 144 if (nr_cpus == max_entries) {
101 cpumap[nr_cpus++] = (int)start_cpu; 145 max_entries += MAX_NR_CPUS;
146 tmp = realloc(tmp_cpus, max_entries * sizeof(int));
147 if (tmp == NULL)
148 goto invalid;
149 tmp_cpus = tmp;
150 }
151 tmp_cpus[nr_cpus++] = (int)start_cpu;
102 } 152 }
103 if (*p) 153 if (*p)
104 ++p; 154 ++p;
105 155
106 cpu_list = p; 156 cpu_list = p;
107 } 157 }
108 if (nr_cpus > 0)
109 return nr_cpus;
110 158
111 return default_cpu_map(); 159 if (nr_cpus > 0)
160 cpus = cpu_map__trim_new(nr_cpus, tmp_cpus);
161 else
162 cpus = cpu_map__default_new();
112invalid: 163invalid:
113 return -1; 164 free(tmp_cpus);
165out:
166 return cpus;
167}
168
169struct cpu_map *cpu_map__dummy_new(void)
170{
171 struct cpu_map *cpus = malloc(sizeof(*cpus) + sizeof(int));
172
173 if (cpus != NULL) {
174 cpus->nr = 1;
175 cpus->map[0] = -1;
176 }
177
178 return cpus;
114} 179}
diff --git a/tools/perf/util/cpumap.h b/tools/perf/util/cpumap.h
index 3e60f56e490e..f7a4f42f6307 100644
--- a/tools/perf/util/cpumap.h
+++ b/tools/perf/util/cpumap.h
@@ -1,7 +1,13 @@
1#ifndef __PERF_CPUMAP_H 1#ifndef __PERF_CPUMAP_H
2#define __PERF_CPUMAP_H 2#define __PERF_CPUMAP_H
3 3
4extern int read_cpu_map(const char *cpu_list); 4struct cpu_map {
5extern int cpumap[]; 5 int nr;
6 int map[];
7};
8
9struct cpu_map *cpu_map__new(const char *cpu_list);
10struct cpu_map *cpu_map__dummy_new(void);
11void *cpu_map__delete(struct cpu_map *map);
6 12
7#endif /* __PERF_CPUMAP_H */ 13#endif /* __PERF_CPUMAP_H */
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
new file mode 100644
index 000000000000..c95267e63c5b
--- /dev/null
+++ b/tools/perf/util/evsel.c
@@ -0,0 +1,186 @@
1#include "evsel.h"
2#include "../perf.h"
3#include "util.h"
4#include "cpumap.h"
5#include "thread.h"
6
7#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
8
9struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
10{
11 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
12
13 if (evsel != NULL) {
14 evsel->idx = idx;
15 evsel->attr.type = type;
16 evsel->attr.config = config;
17 INIT_LIST_HEAD(&evsel->node);
18 }
19
20 return evsel;
21}
22
23int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
24{
25 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
26 return evsel->fd != NULL ? 0 : -ENOMEM;
27}
28
29int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
30{
31 evsel->counts = zalloc((sizeof(*evsel->counts) +
32 (ncpus * sizeof(struct perf_counts_values))));
33 return evsel->counts != NULL ? 0 : -ENOMEM;
34}
35
36void perf_evsel__free_fd(struct perf_evsel *evsel)
37{
38 xyarray__delete(evsel->fd);
39 evsel->fd = NULL;
40}
41
42void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
43{
44 int cpu, thread;
45
46 for (cpu = 0; cpu < ncpus; cpu++)
47 for (thread = 0; thread < nthreads; ++thread) {
48 close(FD(evsel, cpu, thread));
49 FD(evsel, cpu, thread) = -1;
50 }
51}
52
53void perf_evsel__delete(struct perf_evsel *evsel)
54{
55 assert(list_empty(&evsel->node));
56 xyarray__delete(evsel->fd);
57 free(evsel);
58}
59
60int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
61 int cpu, int thread, bool scale)
62{
63 struct perf_counts_values count;
64 size_t nv = scale ? 3 : 1;
65
66 if (FD(evsel, cpu, thread) < 0)
67 return -EINVAL;
68
69 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
70 return -ENOMEM;
71
72 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
73 return -errno;
74
75 if (scale) {
76 if (count.run == 0)
77 count.val = 0;
78 else if (count.run < count.ena)
79 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
80 } else
81 count.ena = count.run = 0;
82
83 evsel->counts->cpu[cpu] = count;
84 return 0;
85}
86
87int __perf_evsel__read(struct perf_evsel *evsel,
88 int ncpus, int nthreads, bool scale)
89{
90 size_t nv = scale ? 3 : 1;
91 int cpu, thread;
92 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
93
94 aggr->val = 0;
95
96 for (cpu = 0; cpu < ncpus; cpu++) {
97 for (thread = 0; thread < nthreads; thread++) {
98 if (FD(evsel, cpu, thread) < 0)
99 continue;
100
101 if (readn(FD(evsel, cpu, thread),
102 &count, nv * sizeof(u64)) < 0)
103 return -errno;
104
105 aggr->val += count.val;
106 if (scale) {
107 aggr->ena += count.ena;
108 aggr->run += count.run;
109 }
110 }
111 }
112
113 evsel->counts->scaled = 0;
114 if (scale) {
115 if (aggr->run == 0) {
116 evsel->counts->scaled = -1;
117 aggr->val = 0;
118 return 0;
119 }
120
121 if (aggr->run < aggr->ena) {
122 evsel->counts->scaled = 1;
123 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
124 }
125 } else
126 aggr->ena = aggr->run = 0;
127
128 return 0;
129}
130
131int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
132{
133 int cpu;
134
135 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
136 return -1;
137
138 for (cpu = 0; cpu < cpus->nr; cpu++) {
139 FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
140 cpus->map[cpu], -1, 0);
141 if (FD(evsel, cpu, 0) < 0)
142 goto out_close;
143 }
144
145 return 0;
146
147out_close:
148 while (--cpu >= 0) {
149 close(FD(evsel, cpu, 0));
150 FD(evsel, cpu, 0) = -1;
151 }
152 return -1;
153}
154
155int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
156{
157 int thread;
158
159 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
160 return -1;
161
162 for (thread = 0; thread < threads->nr; thread++) {
163 FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
164 threads->map[thread], -1, -1, 0);
165 if (FD(evsel, 0, thread) < 0)
166 goto out_close;
167 }
168
169 return 0;
170
171out_close:
172 while (--thread >= 0) {
173 close(FD(evsel, 0, thread));
174 FD(evsel, 0, thread) = -1;
175 }
176 return -1;
177}
178
179int perf_evsel__open(struct perf_evsel *evsel,
180 struct cpu_map *cpus, struct thread_map *threads)
181{
182 if (threads == NULL)
183 return perf_evsel__open_per_cpu(evsel, cpus);
184
185 return perf_evsel__open_per_thread(evsel, threads);
186}
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h
new file mode 100644
index 000000000000..863d78d5ef1a
--- /dev/null
+++ b/tools/perf/util/evsel.h
@@ -0,0 +1,115 @@
1#ifndef __PERF_EVSEL_H
2#define __PERF_EVSEL_H 1
3
4#include <linux/list.h>
5#include <stdbool.h>
6#include <linux/perf_event.h>
7#include "types.h"
8#include "xyarray.h"
9
10struct perf_counts_values {
11 union {
12 struct {
13 u64 val;
14 u64 ena;
15 u64 run;
16 };
17 u64 values[3];
18 };
19};
20
21struct perf_counts {
22 s8 scaled;
23 struct perf_counts_values aggr;
24 struct perf_counts_values cpu[];
25};
26
27struct perf_evsel {
28 struct list_head node;
29 struct perf_event_attr attr;
30 char *filter;
31 struct xyarray *fd;
32 struct perf_counts *counts;
33 int idx;
34 void *priv;
35};
36
37struct cpu_map;
38struct thread_map;
39
40struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx);
41void perf_evsel__delete(struct perf_evsel *evsel);
42
43int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
44int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
45void perf_evsel__free_fd(struct perf_evsel *evsel);
46void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
47
48int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus);
49int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads);
50int perf_evsel__open(struct perf_evsel *evsel,
51 struct cpu_map *cpus, struct thread_map *threads);
52
53#define perf_evsel__match(evsel, t, c) \
54 (evsel->attr.type == PERF_TYPE_##t && \
55 evsel->attr.config == PERF_COUNT_##c)
56
57int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
58 int cpu, int thread, bool scale);
59
60/**
61 * perf_evsel__read_on_cpu - Read out the results on a CPU and thread
62 *
63 * @evsel - event selector to read value
64 * @cpu - CPU of interest
65 * @thread - thread of interest
66 */
67static inline int perf_evsel__read_on_cpu(struct perf_evsel *evsel,
68 int cpu, int thread)
69{
70 return __perf_evsel__read_on_cpu(evsel, cpu, thread, false);
71}
72
73/**
74 * perf_evsel__read_on_cpu_scaled - Read out the results on a CPU and thread, scaled
75 *
76 * @evsel - event selector to read value
77 * @cpu - CPU of interest
78 * @thread - thread of interest
79 */
80static inline int perf_evsel__read_on_cpu_scaled(struct perf_evsel *evsel,
81 int cpu, int thread)
82{
83 return __perf_evsel__read_on_cpu(evsel, cpu, thread, true);
84}
85
86int __perf_evsel__read(struct perf_evsel *evsel, int ncpus, int nthreads,
87 bool scale);
88
89/**
90 * perf_evsel__read - Read the aggregate results on all CPUs
91 *
92 * @evsel - event selector to read value
93 * @ncpus - Number of cpus affected, from zero
94 * @nthreads - Number of threads affected, from zero
95 */
96static inline int perf_evsel__read(struct perf_evsel *evsel,
97 int ncpus, int nthreads)
98{
99 return __perf_evsel__read(evsel, ncpus, nthreads, false);
100}
101
102/**
103 * perf_evsel__read_scaled - Read the aggregate results on all CPUs, scaled
104 *
105 * @evsel - event selector to read value
106 * @ncpus - Number of cpus affected, from zero
107 * @nthreads - Number of threads affected, from zero
108 */
109static inline int perf_evsel__read_scaled(struct perf_evsel *evsel,
110 int ncpus, int nthreads)
111{
112 return __perf_evsel__read(evsel, ncpus, nthreads, true);
113}
114
115#endif /* __PERF_EVSEL_H */
diff --git a/tools/perf/util/header.c b/tools/perf/util/header.c
index 4b8c8397a947..989fa2dee2fd 100644
--- a/tools/perf/util/header.c
+++ b/tools/perf/util/header.c
@@ -463,7 +463,7 @@ static int perf_header__adds_write(struct perf_header *self, int fd)
463 463
464 /* Write trace info */ 464 /* Write trace info */
465 trace_sec->offset = lseek(fd, 0, SEEK_CUR); 465 trace_sec->offset = lseek(fd, 0, SEEK_CUR);
466 read_tracing_data(fd, attrs, nr_counters); 466 read_tracing_data(fd, &evsel_list);
467 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset; 467 trace_sec->size = lseek(fd, 0, SEEK_CUR) - trace_sec->offset;
468 } 468 }
469 469
@@ -606,7 +606,7 @@ int perf_header__write(struct perf_header *self, int fd, bool at_exit)
606static int perf_header__getbuffer64(struct perf_header *self, 606static int perf_header__getbuffer64(struct perf_header *self,
607 int fd, void *buf, size_t size) 607 int fd, void *buf, size_t size)
608{ 608{
609 if (do_read(fd, buf, size) <= 0) 609 if (readn(fd, buf, size) <= 0)
610 return -1; 610 return -1;
611 611
612 if (self->needs_swap) 612 if (self->needs_swap)
@@ -662,7 +662,7 @@ int perf_file_header__read(struct perf_file_header *self,
662{ 662{
663 lseek(fd, 0, SEEK_SET); 663 lseek(fd, 0, SEEK_SET);
664 664
665 if (do_read(fd, self, sizeof(*self)) <= 0 || 665 if (readn(fd, self, sizeof(*self)) <= 0 ||
666 memcmp(&self->magic, __perf_magic, sizeof(self->magic))) 666 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
667 return -1; 667 return -1;
668 668
@@ -823,7 +823,7 @@ static int perf_file_header__read_pipe(struct perf_pipe_file_header *self,
823 struct perf_header *ph, int fd, 823 struct perf_header *ph, int fd,
824 bool repipe) 824 bool repipe)
825{ 825{
826 if (do_read(fd, self, sizeof(*self)) <= 0 || 826 if (readn(fd, self, sizeof(*self)) <= 0 ||
827 memcmp(&self->magic, __perf_magic, sizeof(self->magic))) 827 memcmp(&self->magic, __perf_magic, sizeof(self->magic)))
828 return -1; 828 return -1;
829 829
@@ -1133,8 +1133,7 @@ int event__process_event_type(event_t *self,
1133 return 0; 1133 return 0;
1134} 1134}
1135 1135
1136int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, 1136int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
1137 int nb_events,
1138 event__handler_t process, 1137 event__handler_t process,
1139 struct perf_session *session __unused) 1138 struct perf_session *session __unused)
1140{ 1139{
@@ -1145,7 +1144,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
1145 memset(&ev, 0, sizeof(ev)); 1144 memset(&ev, 0, sizeof(ev));
1146 1145
1147 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA; 1146 ev.tracing_data.header.type = PERF_RECORD_HEADER_TRACING_DATA;
1148 size = read_tracing_data_size(fd, pattrs, nb_events); 1147 size = read_tracing_data_size(fd, pattrs);
1149 if (size <= 0) 1148 if (size <= 0)
1150 return size; 1149 return size;
1151 aligned_size = ALIGN(size, sizeof(u64)); 1150 aligned_size = ALIGN(size, sizeof(u64));
@@ -1155,7 +1154,7 @@ int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs,
1155 1154
1156 process(&ev, NULL, session); 1155 process(&ev, NULL, session);
1157 1156
1158 err = read_tracing_data(fd, pattrs, nb_events); 1157 err = read_tracing_data(fd, pattrs);
1159 write_padded(fd, NULL, 0, padding); 1158 write_padded(fd, NULL, 0, padding);
1160 1159
1161 return aligned_size; 1160 return aligned_size;
diff --git a/tools/perf/util/header.h b/tools/perf/util/header.h
index 6335965e1f93..33f16be7b72f 100644
--- a/tools/perf/util/header.h
+++ b/tools/perf/util/header.h
@@ -113,8 +113,7 @@ int event__synthesize_event_types(event__handler_t process,
113int event__process_event_type(event_t *self, 113int event__process_event_type(event_t *self,
114 struct perf_session *session); 114 struct perf_session *session);
115 115
116int event__synthesize_tracing_data(int fd, struct perf_event_attr *pattrs, 116int event__synthesize_tracing_data(int fd, struct list_head *pattrs,
117 int nb_events,
118 event__handler_t process, 117 event__handler_t process,
119 struct perf_session *session); 118 struct perf_session *session);
120int event__process_tracing_data(event_t *self, 119int event__process_tracing_data(event_t *self,
diff --git a/tools/perf/util/parse-events.c b/tools/perf/util/parse-events.c
index c305305a3884..3a142e90d609 100644
--- a/tools/perf/util/parse-events.c
+++ b/tools/perf/util/parse-events.c
@@ -1,6 +1,7 @@
1#include "../../../include/linux/hw_breakpoint.h" 1#include "../../../include/linux/hw_breakpoint.h"
2#include "util.h" 2#include "util.h"
3#include "../perf.h" 3#include "../perf.h"
4#include "evsel.h"
4#include "parse-options.h" 5#include "parse-options.h"
5#include "parse-events.h" 6#include "parse-events.h"
6#include "exec_cmd.h" 7#include "exec_cmd.h"
@@ -12,8 +13,7 @@
12 13
13int nr_counters; 14int nr_counters;
14 15
15struct perf_event_attr attrs[MAX_COUNTERS]; 16LIST_HEAD(evsel_list);
16char *filters[MAX_COUNTERS];
17 17
18struct event_symbol { 18struct event_symbol {
19 u8 type; 19 u8 type;
@@ -266,10 +266,10 @@ static char *event_cache_name(u8 cache_type, u8 cache_op, u8 cache_result)
266 return name; 266 return name;
267} 267}
268 268
269const char *event_name(int counter) 269const char *event_name(struct perf_evsel *evsel)
270{ 270{
271 u64 config = attrs[counter].config; 271 u64 config = evsel->attr.config;
272 int type = attrs[counter].type; 272 int type = evsel->attr.type;
273 273
274 return __event_name(type, config); 274 return __event_name(type, config);
275} 275}
@@ -814,9 +814,6 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
814 return -1; 814 return -1;
815 815
816 for (;;) { 816 for (;;) {
817 if (nr_counters == MAX_COUNTERS)
818 return -1;
819
820 memset(&attr, 0, sizeof(attr)); 817 memset(&attr, 0, sizeof(attr));
821 ret = parse_event_symbols(&str, &attr); 818 ret = parse_event_symbols(&str, &attr);
822 if (ret == EVT_FAILED) 819 if (ret == EVT_FAILED)
@@ -826,8 +823,13 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
826 return -1; 823 return -1;
827 824
828 if (ret != EVT_HANDLED_ALL) { 825 if (ret != EVT_HANDLED_ALL) {
829 attrs[nr_counters] = attr; 826 struct perf_evsel *evsel;
830 nr_counters++; 827 evsel = perf_evsel__new(attr.type, attr.config,
828 nr_counters);
829 if (evsel == NULL)
830 return -1;
831 list_add_tail(&evsel->node, &evsel_list);
832 ++nr_counters;
831 } 833 }
832 834
833 if (*str == 0) 835 if (*str == 0)
@@ -844,21 +846,22 @@ int parse_events(const struct option *opt __used, const char *str, int unset __u
844int parse_filter(const struct option *opt __used, const char *str, 846int parse_filter(const struct option *opt __used, const char *str,
845 int unset __used) 847 int unset __used)
846{ 848{
847 int i = nr_counters - 1; 849 struct perf_evsel *last = NULL;
848 int len = strlen(str);
849 850
850 if (i < 0 || attrs[i].type != PERF_TYPE_TRACEPOINT) { 851 if (!list_empty(&evsel_list))
852 last = list_entry(evsel_list.prev, struct perf_evsel, node);
853
854 if (last == NULL || last->attr.type != PERF_TYPE_TRACEPOINT) {
851 fprintf(stderr, 855 fprintf(stderr,
852 "-F option should follow a -e tracepoint option\n"); 856 "-F option should follow a -e tracepoint option\n");
853 return -1; 857 return -1;
854 } 858 }
855 859
856 filters[i] = malloc(len + 1); 860 last->filter = strdup(str);
857 if (!filters[i]) { 861 if (last->filter == NULL) {
858 fprintf(stderr, "not enough memory to hold filter string\n"); 862 fprintf(stderr, "not enough memory to hold filter string\n");
859 return -1; 863 return -1;
860 } 864 }
861 strcpy(filters[i], str);
862 865
863 return 0; 866 return 0;
864} 867}
@@ -967,3 +970,26 @@ void print_events(void)
967 970
968 exit(129); 971 exit(129);
969} 972}
973
974int perf_evsel_list__create_default(void)
975{
976 struct perf_evsel *evsel = perf_evsel__new(PERF_TYPE_HARDWARE,
977 PERF_COUNT_HW_CPU_CYCLES, 0);
978 if (evsel == NULL)
979 return -ENOMEM;
980
981 list_add(&evsel->node, &evsel_list);
982 ++nr_counters;
983 return 0;
984}
985
986void perf_evsel_list__delete(void)
987{
988 struct perf_evsel *pos, *n;
989
990 list_for_each_entry_safe(pos, n, &evsel_list, node) {
991 list_del_init(&pos->node);
992 perf_evsel__delete(pos);
993 }
994 nr_counters = 0;
995}
diff --git a/tools/perf/util/parse-events.h b/tools/perf/util/parse-events.h
index fc4ab3fe877a..0a0abc1d10eb 100644
--- a/tools/perf/util/parse-events.h
+++ b/tools/perf/util/parse-events.h
@@ -4,6 +4,16 @@
4 * Parse symbolic events/counts passed in as options: 4 * Parse symbolic events/counts passed in as options:
5 */ 5 */
6 6
7#include <linux/perf_event.h>
8
9struct list_head;
10struct perf_evsel;
11
12extern struct list_head evsel_list;
13
14int perf_evsel_list__create_default(void);
15void perf_evsel_list__delete(void);
16
7struct option; 17struct option;
8 18
9struct tracepoint_path { 19struct tracepoint_path {
@@ -13,14 +23,11 @@ struct tracepoint_path {
13}; 23};
14 24
15extern struct tracepoint_path *tracepoint_id_to_path(u64 config); 25extern struct tracepoint_path *tracepoint_id_to_path(u64 config);
16extern bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events); 26extern bool have_tracepoints(struct list_head *evsel_list);
17 27
18extern int nr_counters; 28extern int nr_counters;
19 29
20extern struct perf_event_attr attrs[MAX_COUNTERS]; 30const char *event_name(struct perf_evsel *event);
21extern char *filters[MAX_COUNTERS];
22
23extern const char *event_name(int ctr);
24extern const char *__event_name(int type, u64 config); 31extern const char *__event_name(int type, u64 config);
25 32
26extern int parse_events(const struct option *opt, const char *str, int unset); 33extern int parse_events(const struct option *opt, const char *str, int unset);
@@ -33,5 +40,4 @@ extern void print_events(void);
33extern char debugfs_path[]; 40extern char debugfs_path[];
34extern int valid_debugfs_mount(const char *debugfs); 41extern int valid_debugfs_mount(const char *debugfs);
35 42
36
37#endif /* __PERF_PARSE_EVENTS_H */ 43#endif /* __PERF_PARSE_EVENTS_H */
diff --git a/tools/perf/util/session.c b/tools/perf/util/session.c
index 0f7e544544f5..b163dfd6cbc5 100644
--- a/tools/perf/util/session.c
+++ b/tools/perf/util/session.c
@@ -838,23 +838,6 @@ static struct thread *perf_session__register_idle_thread(struct perf_session *se
838 return thread; 838 return thread;
839} 839}
840 840
841int do_read(int fd, void *buf, size_t size)
842{
843 void *buf_start = buf;
844
845 while (size) {
846 int ret = read(fd, buf, size);
847
848 if (ret <= 0)
849 return ret;
850
851 size -= ret;
852 buf += ret;
853 }
854
855 return buf - buf_start;
856}
857
858#define session_done() (*(volatile int *)(&session_done)) 841#define session_done() (*(volatile int *)(&session_done))
859volatile int session_done; 842volatile int session_done;
860 843
@@ -872,7 +855,7 @@ static int __perf_session__process_pipe_events(struct perf_session *self,
872 855
873 head = 0; 856 head = 0;
874more: 857more:
875 err = do_read(self->fd, &event, sizeof(struct perf_event_header)); 858 err = readn(self->fd, &event, sizeof(struct perf_event_header));
876 if (err <= 0) { 859 if (err <= 0) {
877 if (err == 0) 860 if (err == 0)
878 goto done; 861 goto done;
@@ -892,8 +875,7 @@ more:
892 p += sizeof(struct perf_event_header); 875 p += sizeof(struct perf_event_header);
893 876
894 if (size - sizeof(struct perf_event_header)) { 877 if (size - sizeof(struct perf_event_header)) {
895 err = do_read(self->fd, p, 878 err = readn(self->fd, p, size - sizeof(struct perf_event_header));
896 size - sizeof(struct perf_event_header));
897 if (err <= 0) { 879 if (err <= 0) {
898 if (err == 0) { 880 if (err == 0) {
899 pr_err("unexpected end of event stream\n"); 881 pr_err("unexpected end of event stream\n");
diff --git a/tools/perf/util/session.h b/tools/perf/util/session.h
index ffe4b98db8f0..decd83f274fd 100644
--- a/tools/perf/util/session.h
+++ b/tools/perf/util/session.h
@@ -109,7 +109,6 @@ void mem_bswap_64(void *src, int byte_size);
109 109
110int perf_session__create_kernel_maps(struct perf_session *self); 110int perf_session__create_kernel_maps(struct perf_session *self);
111 111
112int do_read(int fd, void *buf, size_t size);
113void perf_session__update_sample_type(struct perf_session *self); 112void perf_session__update_sample_type(struct perf_session *self);
114void perf_session__set_sample_id_all(struct perf_session *session, bool value); 113void perf_session__set_sample_id_all(struct perf_session *session, bool value);
115void perf_session__set_sample_type(struct perf_session *session, u64 type); 114void perf_session__set_sample_type(struct perf_session *session, u64 type);
diff --git a/tools/perf/util/thread.c b/tools/perf/util/thread.c
index 8c72d888e449..00f4eade2e3e 100644
--- a/tools/perf/util/thread.c
+++ b/tools/perf/util/thread.c
@@ -16,35 +16,50 @@ static int filter(const struct dirent *dir)
16 return 1; 16 return 1;
17} 17}
18 18
19int find_all_tid(int pid, pid_t ** all_tid) 19struct thread_map *thread_map__new_by_pid(pid_t pid)
20{ 20{
21 struct thread_map *threads;
21 char name[256]; 22 char name[256];
22 int items; 23 int items;
23 struct dirent **namelist = NULL; 24 struct dirent **namelist = NULL;
24 int ret = 0;
25 int i; 25 int i;
26 26
27 sprintf(name, "/proc/%d/task", pid); 27 sprintf(name, "/proc/%d/task", pid);
28 items = scandir(name, &namelist, filter, NULL); 28 items = scandir(name, &namelist, filter, NULL);
29 if (items <= 0) 29 if (items <= 0)
30 return -ENOENT; 30 return NULL;
31 *all_tid = malloc(sizeof(pid_t) * items);
32 if (!*all_tid) {
33 ret = -ENOMEM;
34 goto failure;
35 }
36
37 for (i = 0; i < items; i++)
38 (*all_tid)[i] = atoi(namelist[i]->d_name);
39 31
40 ret = items; 32 threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
33 if (threads != NULL) {
34 for (i = 0; i < items; i++)
35 threads->map[i] = atoi(namelist[i]->d_name);
36 threads->nr = items;
37 }
41 38
42failure:
43 for (i=0; i<items; i++) 39 for (i=0; i<items; i++)
44 free(namelist[i]); 40 free(namelist[i]);
45 free(namelist); 41 free(namelist);
46 42
47 return ret; 43 return threads;
44}
45
46struct thread_map *thread_map__new_by_tid(pid_t tid)
47{
48 struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
49
50 if (threads != NULL) {
51 threads->map[0] = tid;
52 threads->nr = 1;
53 }
54
55 return threads;
56}
57
58struct thread_map *thread_map__new(pid_t pid, pid_t tid)
59{
60 if (pid != -1)
61 return thread_map__new_by_pid(pid);
62 return thread_map__new_by_tid(tid);
48} 63}
49 64
50static struct thread *thread__new(pid_t pid) 65static struct thread *thread__new(pid_t pid)
diff --git a/tools/perf/util/thread.h b/tools/perf/util/thread.h
index 688500ff826f..d7574101054a 100644
--- a/tools/perf/util/thread.h
+++ b/tools/perf/util/thread.h
@@ -18,11 +18,24 @@ struct thread {
18 int comm_len; 18 int comm_len;
19}; 19};
20 20
21struct thread_map {
22 int nr;
23 int map[];
24};
25
21struct perf_session; 26struct perf_session;
22 27
23void thread__delete(struct thread *self); 28void thread__delete(struct thread *self);
24 29
25int find_all_tid(int pid, pid_t ** all_tid); 30struct thread_map *thread_map__new_by_pid(pid_t pid);
31struct thread_map *thread_map__new_by_tid(pid_t tid);
32struct thread_map *thread_map__new(pid_t pid, pid_t tid);
33
34static inline void thread_map__delete(struct thread_map *threads)
35{
36 free(threads);
37}
38
26int thread__set_comm(struct thread *self, const char *comm); 39int thread__set_comm(struct thread *self, const char *comm);
27int thread__comm_len(struct thread *self); 40int thread__comm_len(struct thread *self);
28struct thread *perf_session__findnew(struct perf_session *self, pid_t pid); 41struct thread *perf_session__findnew(struct perf_session *self, pid_t pid);
diff --git a/tools/perf/util/trace-event-info.c b/tools/perf/util/trace-event-info.c
index b1572601286c..35729f4c40cb 100644
--- a/tools/perf/util/trace-event-info.c
+++ b/tools/perf/util/trace-event-info.c
@@ -34,11 +34,13 @@
34#include <ctype.h> 34#include <ctype.h>
35#include <errno.h> 35#include <errno.h>
36#include <stdbool.h> 36#include <stdbool.h>
37#include <linux/list.h>
37#include <linux/kernel.h> 38#include <linux/kernel.h>
38 39
39#include "../perf.h" 40#include "../perf.h"
40#include "trace-event.h" 41#include "trace-event.h"
41#include "debugfs.h" 42#include "debugfs.h"
43#include "evsel.h"
42 44
43#define VERSION "0.5" 45#define VERSION "0.5"
44 46
@@ -469,16 +471,17 @@ out:
469} 471}
470 472
471static struct tracepoint_path * 473static struct tracepoint_path *
472get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events) 474get_tracepoints_path(struct list_head *pattrs)
473{ 475{
474 struct tracepoint_path path, *ppath = &path; 476 struct tracepoint_path path, *ppath = &path;
475 int i, nr_tracepoints = 0; 477 struct perf_evsel *pos;
478 int nr_tracepoints = 0;
476 479
477 for (i = 0; i < nb_events; i++) { 480 list_for_each_entry(pos, pattrs, node) {
478 if (pattrs[i].type != PERF_TYPE_TRACEPOINT) 481 if (pos->attr.type != PERF_TYPE_TRACEPOINT)
479 continue; 482 continue;
480 ++nr_tracepoints; 483 ++nr_tracepoints;
481 ppath->next = tracepoint_id_to_path(pattrs[i].config); 484 ppath->next = tracepoint_id_to_path(pos->attr.config);
482 if (!ppath->next) 485 if (!ppath->next)
483 die("%s\n", "No memory to alloc tracepoints list"); 486 die("%s\n", "No memory to alloc tracepoints list");
484 ppath = ppath->next; 487 ppath = ppath->next;
@@ -487,21 +490,21 @@ get_tracepoints_path(struct perf_event_attr *pattrs, int nb_events)
487 return nr_tracepoints > 0 ? path.next : NULL; 490 return nr_tracepoints > 0 ? path.next : NULL;
488} 491}
489 492
490bool have_tracepoints(struct perf_event_attr *pattrs, int nb_events) 493bool have_tracepoints(struct list_head *pattrs)
491{ 494{
492 int i; 495 struct perf_evsel *pos;
493 496
494 for (i = 0; i < nb_events; i++) 497 list_for_each_entry(pos, pattrs, node)
495 if (pattrs[i].type == PERF_TYPE_TRACEPOINT) 498 if (pos->attr.type == PERF_TYPE_TRACEPOINT)
496 return true; 499 return true;
497 500
498 return false; 501 return false;
499} 502}
500 503
501int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events) 504int read_tracing_data(int fd, struct list_head *pattrs)
502{ 505{
503 char buf[BUFSIZ]; 506 char buf[BUFSIZ];
504 struct tracepoint_path *tps = get_tracepoints_path(pattrs, nb_events); 507 struct tracepoint_path *tps = get_tracepoints_path(pattrs);
505 508
506 /* 509 /*
507 * What? No tracepoints? No sense writing anything here, bail out. 510 * What? No tracepoints? No sense writing anything here, bail out.
@@ -545,14 +548,13 @@ int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events)
545 return 0; 548 return 0;
546} 549}
547 550
548ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, 551ssize_t read_tracing_data_size(int fd, struct list_head *pattrs)
549 int nb_events)
550{ 552{
551 ssize_t size; 553 ssize_t size;
552 int err = 0; 554 int err = 0;
553 555
554 calc_data_size = 1; 556 calc_data_size = 1;
555 err = read_tracing_data(fd, pattrs, nb_events); 557 err = read_tracing_data(fd, pattrs);
556 size = calc_data_size - 1; 558 size = calc_data_size - 1;
557 calc_data_size = 0; 559 calc_data_size = 0;
558 560
diff --git a/tools/perf/util/trace-event.h b/tools/perf/util/trace-event.h
index b3e86b1e4444..b5f12ca24d99 100644
--- a/tools/perf/util/trace-event.h
+++ b/tools/perf/util/trace-event.h
@@ -262,9 +262,8 @@ raw_field_value(struct event *event, const char *name, void *data);
262void *raw_field_ptr(struct event *event, const char *name, void *data); 262void *raw_field_ptr(struct event *event, const char *name, void *data);
263unsigned long long eval_flag(const char *flag); 263unsigned long long eval_flag(const char *flag);
264 264
265int read_tracing_data(int fd, struct perf_event_attr *pattrs, int nb_events); 265int read_tracing_data(int fd, struct list_head *pattrs);
266ssize_t read_tracing_data_size(int fd, struct perf_event_attr *pattrs, 266ssize_t read_tracing_data_size(int fd, struct list_head *pattrs);
267 int nb_events);
268 267
269/* taken from kernel/trace/trace.h */ 268/* taken from kernel/trace/trace.h */
270enum trace_flag_type { 269enum trace_flag_type {
diff --git a/tools/perf/util/util.c b/tools/perf/util/util.c
index 214265674ddd..5b3ea49aa63e 100644
--- a/tools/perf/util/util.c
+++ b/tools/perf/util/util.c
@@ -114,3 +114,20 @@ unsigned long convert_unit(unsigned long value, char *unit)
114 114
115 return value; 115 return value;
116} 116}
117
118int readn(int fd, void *buf, size_t n)
119{
120 void *buf_start = buf;
121
122 while (n) {
123 int ret = read(fd, buf, n);
124
125 if (ret <= 0)
126 return ret;
127
128 n -= ret;
129 buf += ret;
130 }
131
132 return buf - buf_start;
133}
diff --git a/tools/perf/util/util.h b/tools/perf/util/util.h
index 7562707ddd1c..e833f26f3bfc 100644
--- a/tools/perf/util/util.h
+++ b/tools/perf/util/util.h
@@ -265,6 +265,7 @@ void argv_free(char **argv);
265bool strglobmatch(const char *str, const char *pat); 265bool strglobmatch(const char *str, const char *pat);
266bool strlazymatch(const char *str, const char *pat); 266bool strlazymatch(const char *str, const char *pat);
267unsigned long convert_unit(unsigned long value, char *unit); 267unsigned long convert_unit(unsigned long value, char *unit);
268int readn(int fd, void *buf, size_t size);
268 269
269#define _STR(x) #x 270#define _STR(x) #x
270#define STR(x) _STR(x) 271#define STR(x) _STR(x)
diff --git a/tools/perf/util/xyarray.c b/tools/perf/util/xyarray.c
new file mode 100644
index 000000000000..22afbf6c536a
--- /dev/null
+++ b/tools/perf/util/xyarray.c
@@ -0,0 +1,20 @@
1#include "xyarray.h"
2#include "util.h"
3
4struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
5{
6 size_t row_size = ylen * entry_size;
7 struct xyarray *xy = zalloc(sizeof(*xy) + xlen * row_size);
8
9 if (xy != NULL) {
10 xy->entry_size = entry_size;
11 xy->row_size = row_size;
12 }
13
14 return xy;
15}
16
17void xyarray__delete(struct xyarray *xy)
18{
19 free(xy);
20}
diff --git a/tools/perf/util/xyarray.h b/tools/perf/util/xyarray.h
new file mode 100644
index 000000000000..c488a07275dd
--- /dev/null
+++ b/tools/perf/util/xyarray.h
@@ -0,0 +1,20 @@
1#ifndef _PERF_XYARRAY_H_
2#define _PERF_XYARRAY_H_ 1
3
4#include <sys/types.h>
5
6struct xyarray {
7 size_t row_size;
8 size_t entry_size;
9 char contents[];
10};
11
12struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
13void xyarray__delete(struct xyarray *xy);
14
15static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
16{
17 return &xy->contents[x * xy->row_size + y * xy->entry_size];
18}
19
20#endif /* _PERF_XYARRAY_H_ */