aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2011-01-30 08:59:43 -0500
committerArnaldo Carvalho de Melo <acme@redhat.com>2011-01-31 09:40:52 -0500
commit7e2ed097538c57ff5268e9a6bced7c0b885809c8 (patch)
tree44f9998cc6054d5bef07d6c2979afb0e81ddf13c
parentf8a9530939ed87b9a1b1a038b90e355098b679a2 (diff)
perf evlist: Store pointer to the cpu and thread maps
So that we don't have to pass it around to the several methods that needs it, simplifying usage. There is one case where we don't have the thread/cpu map in advance, which is in the parsing routines used by top, stat, record, that we have to wait till all options are parsed to know if a cpu or thread list was passed to then create those maps. For that case consolidate the cpu and thread map creation via perf_evlist__create_maps() out of the code in top and record, while also providing a perf_evlist__set_maps() for cases where multiple evlists share maps or for when maps that represent CPU sockets, for instance, get crafted out of topology information or subsets of threads in a particular application are to be monitored, providing more granularity in specifying which cpus and threads to monitor. Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@elte.hu> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Tom Zanussi <tzanussi@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r--tools/perf/builtin-record.c44
-rw-r--r--tools/perf/builtin-stat.c45
-rw-r--r--tools/perf/builtin-test.c6
-rw-r--r--tools/perf/builtin-top.c47
-rwxr-xr-xtools/perf/python/twatch.py4
-rw-r--r--tools/perf/util/evlist.c67
-rw-r--r--tools/perf/util/evlist.h29
-rw-r--r--tools/perf/util/python.c25
8 files changed, 148 insertions, 119 deletions
diff --git a/tools/perf/builtin-record.c b/tools/perf/builtin-record.c
index edc3555098c8..07f8d6d852c2 100644
--- a/tools/perf/builtin-record.c
+++ b/tools/perf/builtin-record.c
@@ -42,7 +42,6 @@ static u64 user_interval = ULLONG_MAX;
42static u64 default_interval = 0; 42static u64 default_interval = 0;
43static u64 sample_type; 43static u64 sample_type;
44 44
45static struct cpu_map *cpus;
46static unsigned int page_size; 45static unsigned int page_size;
47static unsigned int mmap_pages = 128; 46static unsigned int mmap_pages = 128;
48static unsigned int user_freq = UINT_MAX; 47static unsigned int user_freq = UINT_MAX;
@@ -58,7 +57,6 @@ static bool sample_id_all_avail = true;
58static bool system_wide = false; 57static bool system_wide = false;
59static pid_t target_pid = -1; 58static pid_t target_pid = -1;
60static pid_t target_tid = -1; 59static pid_t target_tid = -1;
61static struct thread_map *threads;
62static pid_t child_pid = -1; 60static pid_t child_pid = -1;
63static bool no_inherit = false; 61static bool no_inherit = false;
64static enum write_mode_t write_mode = WRITE_FORCE; 62static enum write_mode_t write_mode = WRITE_FORCE;
@@ -189,7 +187,7 @@ static void create_counter(struct perf_evsel *evsel, int cpu)
189 int thread_index; 187 int thread_index;
190 int ret; 188 int ret;
191 189
192 for (thread_index = 0; thread_index < threads->nr; thread_index++) { 190 for (thread_index = 0; thread_index < evsel_list->threads->nr; thread_index++) {
193 h_attr = get_header_attr(attr, evsel->idx); 191 h_attr = get_header_attr(attr, evsel->idx);
194 if (h_attr == NULL) 192 if (h_attr == NULL)
195 die("nomem\n"); 193 die("nomem\n");
@@ -317,7 +315,8 @@ static void open_counters(struct perf_evlist *evlist)
317retry_sample_id: 315retry_sample_id:
318 attr->sample_id_all = sample_id_all_avail ? 1 : 0; 316 attr->sample_id_all = sample_id_all_avail ? 1 : 0;
319try_again: 317try_again:
320 if (perf_evsel__open(pos, cpus, threads, group, !no_inherit) < 0) { 318 if (perf_evsel__open(pos, evlist->cpus, evlist->threads, group,
319 !no_inherit) < 0) {
321 int err = errno; 320 int err = errno;
322 321
323 if (err == EPERM || err == EACCES) 322 if (err == EPERM || err == EACCES)
@@ -368,10 +367,10 @@ try_again:
368 } 367 }
369 } 368 }
370 369
371 if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, false) < 0) 370 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
372 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 371 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
373 372
374 for (cpu = 0; cpu < cpus->nr; ++cpu) { 373 for (cpu = 0; cpu < evsel_list->cpus->nr; ++cpu) {
375 list_for_each_entry(pos, &evlist->entries, node) 374 list_for_each_entry(pos, &evlist->entries, node)
376 create_counter(pos, cpu); 375 create_counter(pos, cpu);
377 } 376 }
@@ -450,7 +449,7 @@ static void mmap_read_all(void)
450{ 449{
451 int i; 450 int i;
452 451
453 for (i = 0; i < cpus->nr; i++) { 452 for (i = 0; i < evsel_list->cpus->nr; i++) {
454 if (evsel_list->mmap[i].base) 453 if (evsel_list->mmap[i].base)
455 mmap_read(&evsel_list->mmap[i]); 454 mmap_read(&evsel_list->mmap[i]);
456 } 455 }
@@ -584,7 +583,7 @@ static int __cmd_record(int argc, const char **argv)
584 } 583 }
585 584
586 if (!system_wide && target_tid == -1 && target_pid == -1) 585 if (!system_wide && target_tid == -1 && target_pid == -1)
587 threads->map[0] = child_pid; 586 evsel_list->threads->map[0] = child_pid;
588 587
589 close(child_ready_pipe[1]); 588 close(child_ready_pipe[1]);
590 close(go_pipe[0]); 589 close(go_pipe[0]);
@@ -718,12 +717,12 @@ static int __cmd_record(int argc, const char **argv)
718 } 717 }
719 718
720 if (done) { 719 if (done) {
721 for (i = 0; i < cpus->nr; i++) { 720 for (i = 0; i < evsel_list->cpus->nr; i++) {
722 struct perf_evsel *pos; 721 struct perf_evsel *pos;
723 722
724 list_for_each_entry(pos, &evsel_list->entries, node) { 723 list_for_each_entry(pos, &evsel_list->entries, node) {
725 for (thread = 0; 724 for (thread = 0;
726 thread < threads->nr; 725 thread < evsel_list->threads->nr;
727 thread++) 726 thread++)
728 ioctl(FD(pos, i, thread), 727 ioctl(FD(pos, i, thread),
729 PERF_EVENT_IOC_DISABLE); 728 PERF_EVENT_IOC_DISABLE);
@@ -816,7 +815,7 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
816 int err = -ENOMEM; 815 int err = -ENOMEM;
817 struct perf_evsel *pos; 816 struct perf_evsel *pos;
818 817
819 evsel_list = perf_evlist__new(); 818 evsel_list = perf_evlist__new(NULL, NULL);
820 if (evsel_list == NULL) 819 if (evsel_list == NULL)
821 return -ENOMEM; 820 return -ENOMEM;
822 821
@@ -850,28 +849,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
850 if (target_pid != -1) 849 if (target_pid != -1)
851 target_tid = target_pid; 850 target_tid = target_pid;
852 851
853 threads = thread_map__new(target_pid, target_tid); 852 if (perf_evlist__create_maps(evsel_list, target_pid,
854 if (threads == NULL) { 853 target_tid, cpu_list) < 0)
855 pr_err("Problems finding threads of monitor\n");
856 usage_with_options(record_usage, record_options);
857 }
858
859 if (target_tid != -1)
860 cpus = cpu_map__dummy_new();
861 else
862 cpus = cpu_map__new(cpu_list);
863
864 if (cpus == NULL)
865 usage_with_options(record_usage, record_options); 854 usage_with_options(record_usage, record_options);
866 855
867 list_for_each_entry(pos, &evsel_list->entries, node) { 856 list_for_each_entry(pos, &evsel_list->entries, node) {
868 if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0) 857 if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
858 evsel_list->threads->nr) < 0)
869 goto out_free_fd; 859 goto out_free_fd;
870 if (perf_header__push_event(pos->attr.config, event_name(pos))) 860 if (perf_header__push_event(pos->attr.config, event_name(pos)))
871 goto out_free_fd; 861 goto out_free_fd;
872 } 862 }
873 863
874 if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0) 864 if (perf_evlist__alloc_pollfd(evsel_list) < 0)
875 goto out_free_fd; 865 goto out_free_fd;
876 866
877 if (user_interval != ULLONG_MAX) 867 if (user_interval != ULLONG_MAX)
@@ -893,10 +883,8 @@ int cmd_record(int argc, const char **argv, const char *prefix __used)
893 } 883 }
894 884
895 err = __cmd_record(argc, argv); 885 err = __cmd_record(argc, argv);
896
897out_free_fd: 886out_free_fd:
898 thread_map__delete(threads); 887 perf_evlist__delete_maps(evsel_list);
899 threads = NULL;
900out_symbol_exit: 888out_symbol_exit:
901 symbol__exit(); 889 symbol__exit();
902 return err; 890 return err;
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 8906adfdbd8e..e0f95755361b 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -76,7 +76,6 @@ static struct perf_event_attr default_attrs[] = {
76struct perf_evlist *evsel_list; 76struct perf_evlist *evsel_list;
77 77
78static bool system_wide = false; 78static bool system_wide = false;
79static struct cpu_map *cpus;
80static int run_idx = 0; 79static int run_idx = 0;
81 80
82static int run_count = 1; 81static int run_count = 1;
@@ -85,7 +84,6 @@ static bool scale = true;
85static bool no_aggr = false; 84static bool no_aggr = false;
86static pid_t target_pid = -1; 85static pid_t target_pid = -1;
87static pid_t target_tid = -1; 86static pid_t target_tid = -1;
88static struct thread_map *threads;
89static pid_t child_pid = -1; 87static pid_t child_pid = -1;
90static bool null_run = false; 88static bool null_run = false;
91static bool big_num = true; 89static bool big_num = true;
@@ -170,7 +168,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
170 PERF_FORMAT_TOTAL_TIME_RUNNING; 168 PERF_FORMAT_TOTAL_TIME_RUNNING;
171 169
172 if (system_wide) 170 if (system_wide)
173 return perf_evsel__open_per_cpu(evsel, cpus, false, false); 171 return perf_evsel__open_per_cpu(evsel, evsel_list->cpus, false, false);
174 172
175 attr->inherit = !no_inherit; 173 attr->inherit = !no_inherit;
176 if (target_pid == -1 && target_tid == -1) { 174 if (target_pid == -1 && target_tid == -1) {
@@ -178,7 +176,7 @@ static int create_perf_stat_counter(struct perf_evsel *evsel)
178 attr->enable_on_exec = 1; 176 attr->enable_on_exec = 1;
179 } 177 }
180 178
181 return perf_evsel__open_per_thread(evsel, threads, false, false); 179 return perf_evsel__open_per_thread(evsel, evsel_list->threads, false, false);
182} 180}
183 181
184/* 182/*
@@ -203,7 +201,8 @@ static int read_counter_aggr(struct perf_evsel *counter)
203 u64 *count = counter->counts->aggr.values; 201 u64 *count = counter->counts->aggr.values;
204 int i; 202 int i;
205 203
206 if (__perf_evsel__read(counter, cpus->nr, threads->nr, scale) < 0) 204 if (__perf_evsel__read(counter, evsel_list->cpus->nr,
205 evsel_list->threads->nr, scale) < 0)
207 return -1; 206 return -1;
208 207
209 for (i = 0; i < 3; i++) 208 for (i = 0; i < 3; i++)
@@ -236,7 +235,7 @@ static int read_counter(struct perf_evsel *counter)
236 u64 *count; 235 u64 *count;
237 int cpu; 236 int cpu;
238 237
239 for (cpu = 0; cpu < cpus->nr; cpu++) { 238 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
240 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0) 239 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
241 return -1; 240 return -1;
242 241
@@ -301,7 +300,7 @@ static int run_perf_stat(int argc __used, const char **argv)
301 } 300 }
302 301
303 if (target_tid == -1 && target_pid == -1 && !system_wide) 302 if (target_tid == -1 && target_pid == -1 && !system_wide)
304 threads->map[0] = child_pid; 303 evsel_list->threads->map[0] = child_pid;
305 304
306 /* 305 /*
307 * Wait for the child to be ready to exec. 306 * Wait for the child to be ready to exec.
@@ -353,12 +352,13 @@ static int run_perf_stat(int argc __used, const char **argv)
353 if (no_aggr) { 352 if (no_aggr) {
354 list_for_each_entry(counter, &evsel_list->entries, node) { 353 list_for_each_entry(counter, &evsel_list->entries, node) {
355 read_counter(counter); 354 read_counter(counter);
356 perf_evsel__close_fd(counter, cpus->nr, 1); 355 perf_evsel__close_fd(counter, evsel_list->cpus->nr, 1);
357 } 356 }
358 } else { 357 } else {
359 list_for_each_entry(counter, &evsel_list->entries, node) { 358 list_for_each_entry(counter, &evsel_list->entries, node) {
360 read_counter_aggr(counter); 359 read_counter_aggr(counter);
361 perf_evsel__close_fd(counter, cpus->nr, threads->nr); 360 perf_evsel__close_fd(counter, evsel_list->cpus->nr,
361 evsel_list->threads->nr);
362 } 362 }
363 } 363 }
364 364
@@ -386,7 +386,7 @@ static void nsec_printout(int cpu, struct perf_evsel *evsel, double avg)
386 if (no_aggr) 386 if (no_aggr)
387 sprintf(cpustr, "CPU%*d%s", 387 sprintf(cpustr, "CPU%*d%s",
388 csv_output ? 0 : -4, 388 csv_output ? 0 : -4,
389 cpus->map[cpu], csv_sep); 389 evsel_list->cpus->map[cpu], csv_sep);
390 390
391 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel)); 391 fprintf(stderr, fmt, cpustr, msecs, csv_sep, event_name(evsel));
392 392
@@ -414,7 +414,7 @@ static void abs_printout(int cpu, struct perf_evsel *evsel, double avg)
414 if (no_aggr) 414 if (no_aggr)
415 sprintf(cpustr, "CPU%*d%s", 415 sprintf(cpustr, "CPU%*d%s",
416 csv_output ? 0 : -4, 416 csv_output ? 0 : -4,
417 cpus->map[cpu], csv_sep); 417 evsel_list->cpus->map[cpu], csv_sep);
418 else 418 else
419 cpu = 0; 419 cpu = 0;
420 420
@@ -500,14 +500,14 @@ static void print_counter(struct perf_evsel *counter)
500 u64 ena, run, val; 500 u64 ena, run, val;
501 int cpu; 501 int cpu;
502 502
503 for (cpu = 0; cpu < cpus->nr; cpu++) { 503 for (cpu = 0; cpu < evsel_list->cpus->nr; cpu++) {
504 val = counter->counts->cpu[cpu].val; 504 val = counter->counts->cpu[cpu].val;
505 ena = counter->counts->cpu[cpu].ena; 505 ena = counter->counts->cpu[cpu].ena;
506 run = counter->counts->cpu[cpu].run; 506 run = counter->counts->cpu[cpu].run;
507 if (run == 0 || ena == 0) { 507 if (run == 0 || ena == 0) {
508 fprintf(stderr, "CPU%*d%s%*s%s%-24s", 508 fprintf(stderr, "CPU%*d%s%*s%s%-24s",
509 csv_output ? 0 : -4, 509 csv_output ? 0 : -4,
510 cpus->map[cpu], csv_sep, 510 evsel_list->cpus->map[cpu], csv_sep,
511 csv_output ? 0 : 18, 511 csv_output ? 0 : 18,
512 "<not counted>", csv_sep, 512 "<not counted>", csv_sep,
513 event_name(counter)); 513 event_name(counter));
@@ -652,7 +652,7 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
652 652
653 setlocale(LC_ALL, ""); 653 setlocale(LC_ALL, "");
654 654
655 evsel_list = perf_evlist__new(); 655 evsel_list = perf_evlist__new(NULL, NULL);
656 if (evsel_list == NULL) 656 if (evsel_list == NULL)
657 return -ENOMEM; 657 return -ENOMEM;
658 658
@@ -701,18 +701,18 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
701 if (target_pid != -1) 701 if (target_pid != -1)
702 target_tid = target_pid; 702 target_tid = target_pid;
703 703
704 threads = thread_map__new(target_pid, target_tid); 704 evsel_list->threads = thread_map__new(target_pid, target_tid);
705 if (threads == NULL) { 705 if (evsel_list->threads == NULL) {
706 pr_err("Problems finding threads of monitor\n"); 706 pr_err("Problems finding threads of monitor\n");
707 usage_with_options(stat_usage, options); 707 usage_with_options(stat_usage, options);
708 } 708 }
709 709
710 if (system_wide) 710 if (system_wide)
711 cpus = cpu_map__new(cpu_list); 711 evsel_list->cpus = cpu_map__new(cpu_list);
712 else 712 else
713 cpus = cpu_map__dummy_new(); 713 evsel_list->cpus = cpu_map__dummy_new();
714 714
715 if (cpus == NULL) { 715 if (evsel_list->cpus == NULL) {
716 perror("failed to parse CPUs map"); 716 perror("failed to parse CPUs map");
717 usage_with_options(stat_usage, options); 717 usage_with_options(stat_usage, options);
718 return -1; 718 return -1;
@@ -720,8 +720,8 @@ int cmd_stat(int argc, const char **argv, const char *prefix __used)
720 720
721 list_for_each_entry(pos, &evsel_list->entries, node) { 721 list_for_each_entry(pos, &evsel_list->entries, node) {
722 if (perf_evsel__alloc_stat_priv(pos) < 0 || 722 if (perf_evsel__alloc_stat_priv(pos) < 0 ||
723 perf_evsel__alloc_counts(pos, cpus->nr) < 0 || 723 perf_evsel__alloc_counts(pos, evsel_list->cpus->nr) < 0 ||
724 perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0) 724 perf_evsel__alloc_fd(pos, evsel_list->cpus->nr, evsel_list->threads->nr) < 0)
725 goto out_free_fd; 725 goto out_free_fd;
726 } 726 }
727 727
@@ -750,7 +750,6 @@ out_free_fd:
750 perf_evsel__free_stat_priv(pos); 750 perf_evsel__free_stat_priv(pos);
751 perf_evlist__delete(evsel_list); 751 perf_evlist__delete(evsel_list);
752out: 752out:
753 thread_map__delete(threads); 753 perf_evlist__delete_maps(evsel_list);
754 threads = NULL;
755 return status; 754 return status;
756} 755}
diff --git a/tools/perf/builtin-test.c b/tools/perf/builtin-test.c
index 845b9bd54ed4..1b2106c58f66 100644
--- a/tools/perf/builtin-test.c
+++ b/tools/perf/builtin-test.c
@@ -509,7 +509,7 @@ static int test__basic_mmap(void)
509 goto out_free_cpus; 509 goto out_free_cpus;
510 } 510 }
511 511
512 evlist = perf_evlist__new(); 512 evlist = perf_evlist__new(cpus, threads);
513 if (evlist == NULL) { 513 if (evlist == NULL) {
514 pr_debug("perf_evlist__new\n"); 514 pr_debug("perf_evlist__new\n");
515 goto out_free_cpus; 515 goto out_free_cpus;
@@ -537,7 +537,7 @@ static int test__basic_mmap(void)
537 } 537 }
538 } 538 }
539 539
540 if (perf_evlist__mmap(evlist, cpus, threads, 128, true) < 0) { 540 if (perf_evlist__mmap(evlist, 128, true) < 0) {
541 pr_debug("failed to mmap events: %d (%s)\n", errno, 541 pr_debug("failed to mmap events: %d (%s)\n", errno,
542 strerror(errno)); 542 strerror(errno));
543 goto out_close_fd; 543 goto out_close_fd;
@@ -579,7 +579,7 @@ static int test__basic_mmap(void)
579 579
580 err = 0; 580 err = 0;
581out_munmap: 581out_munmap:
582 perf_evlist__munmap(evlist, 1); 582 perf_evlist__munmap(evlist);
583out_close_fd: 583out_close_fd:
584 for (i = 0; i < nsyscalls; ++i) 584 for (i = 0; i < nsyscalls; ++i)
585 perf_evsel__close_fd(evsels[i], 1, threads->nr); 585 perf_evsel__close_fd(evsels[i], 1, threads->nr);
diff --git a/tools/perf/builtin-top.c b/tools/perf/builtin-top.c
index 2f4d1f244be1..599036b06730 100644
--- a/tools/perf/builtin-top.c
+++ b/tools/perf/builtin-top.c
@@ -73,9 +73,7 @@ static int print_entries;
73 73
74static int target_pid = -1; 74static int target_pid = -1;
75static int target_tid = -1; 75static int target_tid = -1;
76static struct thread_map *threads;
77static bool inherit = false; 76static bool inherit = false;
78static struct cpu_map *cpus;
79static int realtime_prio = 0; 77static int realtime_prio = 0;
80static bool group = false; 78static bool group = false;
81static unsigned int page_size; 79static unsigned int page_size;
@@ -567,12 +565,13 @@ static void print_sym_table(struct perf_session *session)
567 printf(" (all"); 565 printf(" (all");
568 566
569 if (cpu_list) 567 if (cpu_list)
570 printf(", CPU%s: %s)\n", cpus->nr > 1 ? "s" : "", cpu_list); 568 printf(", CPU%s: %s)\n", evsel_list->cpus->nr > 1 ? "s" : "", cpu_list);
571 else { 569 else {
572 if (target_tid != -1) 570 if (target_tid != -1)
573 printf(")\n"); 571 printf(")\n");
574 else 572 else
575 printf(", %d CPU%s)\n", cpus->nr, cpus->nr > 1 ? "s" : ""); 573 printf(", %d CPU%s)\n", evsel_list->cpus->nr,
574 evsel_list->cpus->nr > 1 ? "s" : "");
576 } 575 }
577 576
578 printf("%-*.*s\n", win_width, win_width, graph_dotted_line); 577 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
@@ -1124,7 +1123,7 @@ static void perf_session__mmap_read(struct perf_session *self)
1124{ 1123{
1125 int i; 1124 int i;
1126 1125
1127 for (i = 0; i < cpus->nr; i++) 1126 for (i = 0; i < evsel_list->cpus->nr; i++)
1128 perf_session__mmap_read_cpu(self, i); 1127 perf_session__mmap_read_cpu(self, i);
1129} 1128}
1130 1129
@@ -1150,7 +1149,8 @@ static void start_counters(struct perf_evlist *evlist)
1150 1149
1151 attr->mmap = 1; 1150 attr->mmap = 1;
1152try_again: 1151try_again:
1153 if (perf_evsel__open(counter, cpus, threads, group, inherit) < 0) { 1152 if (perf_evsel__open(counter, evsel_list->cpus,
1153 evsel_list->threads, group, inherit) < 0) {
1154 int err = errno; 1154 int err = errno;
1155 1155
1156 if (err == EPERM || err == EACCES) 1156 if (err == EPERM || err == EACCES)
@@ -1181,7 +1181,7 @@ try_again:
1181 } 1181 }
1182 } 1182 }
1183 1183
1184 if (perf_evlist__mmap(evlist, cpus, threads, mmap_pages, false) < 0) 1184 if (perf_evlist__mmap(evlist, mmap_pages, false) < 0)
1185 die("failed to mmap with %d (%s)\n", errno, strerror(errno)); 1185 die("failed to mmap with %d (%s)\n", errno, strerror(errno));
1186} 1186}
1187 1187
@@ -1296,7 +1296,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1296 struct perf_evsel *pos; 1296 struct perf_evsel *pos;
1297 int status = -ENOMEM; 1297 int status = -ENOMEM;
1298 1298
1299 evsel_list = perf_evlist__new(); 1299 evsel_list = perf_evlist__new(NULL, NULL);
1300 if (evsel_list == NULL) 1300 if (evsel_list == NULL)
1301 return -ENOMEM; 1301 return -ENOMEM;
1302 1302
@@ -1306,15 +1306,6 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1306 if (argc) 1306 if (argc)
1307 usage_with_options(top_usage, options); 1307 usage_with_options(top_usage, options);
1308 1308
1309 if (target_pid != -1)
1310 target_tid = target_pid;
1311
1312 threads = thread_map__new(target_pid, target_tid);
1313 if (threads == NULL) {
1314 pr_err("Problems finding threads of monitor\n");
1315 usage_with_options(top_usage, options);
1316 }
1317
1318 /* CPU and PID are mutually exclusive */ 1309 /* CPU and PID are mutually exclusive */
1319 if (target_tid > 0 && cpu_list) { 1310 if (target_tid > 0 && cpu_list) {
1320 printf("WARNING: PID switch overriding CPU\n"); 1311 printf("WARNING: PID switch overriding CPU\n");
@@ -1322,6 +1313,13 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1322 cpu_list = NULL; 1313 cpu_list = NULL;
1323 } 1314 }
1324 1315
1316 if (target_pid != -1)
1317 target_tid = target_pid;
1318
1319 if (perf_evlist__create_maps(evsel_list, target_pid,
1320 target_tid, cpu_list) < 0)
1321 usage_with_options(top_usage, options);
1322
1325 if (!evsel_list->nr_entries && 1323 if (!evsel_list->nr_entries &&
1326 perf_evlist__add_default(evsel_list) < 0) { 1324 perf_evlist__add_default(evsel_list) < 0) {
1327 pr_err("Not enough memory for event selector list\n"); 1325 pr_err("Not enough memory for event selector list\n");
@@ -1343,16 +1341,9 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1343 exit(EXIT_FAILURE); 1341 exit(EXIT_FAILURE);
1344 } 1342 }
1345 1343
1346 if (target_tid != -1)
1347 cpus = cpu_map__dummy_new();
1348 else
1349 cpus = cpu_map__new(cpu_list);
1350
1351 if (cpus == NULL)
1352 usage_with_options(top_usage, options);
1353
1354 list_for_each_entry(pos, &evsel_list->entries, node) { 1344 list_for_each_entry(pos, &evsel_list->entries, node) {
1355 if (perf_evsel__alloc_fd(pos, cpus->nr, threads->nr) < 0) 1345 if (perf_evsel__alloc_fd(pos, evsel_list->cpus->nr,
1346 evsel_list->threads->nr) < 0)
1356 goto out_free_fd; 1347 goto out_free_fd;
1357 /* 1348 /*
1358 * Fill in the ones not specifically initialized via -c: 1349 * Fill in the ones not specifically initialized via -c:
@@ -1363,8 +1354,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __used)
1363 pos->attr.sample_period = default_interval; 1354 pos->attr.sample_period = default_interval;
1364 } 1355 }
1365 1356
1366 if (perf_evlist__alloc_pollfd(evsel_list, cpus->nr, threads->nr) < 0 || 1357 if (perf_evlist__alloc_pollfd(evsel_list) < 0 ||
1367 perf_evlist__alloc_mmap(evsel_list, cpus->nr) < 0) 1358 perf_evlist__alloc_mmap(evsel_list) < 0)
1368 goto out_free_fd; 1359 goto out_free_fd;
1369 1360
1370 sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node); 1361 sym_evsel = list_entry(evsel_list->entries.next, struct perf_evsel, node);
diff --git a/tools/perf/python/twatch.py b/tools/perf/python/twatch.py
index 5e9f3b7b7ee8..df638c438a9f 100755
--- a/tools/perf/python/twatch.py
+++ b/tools/perf/python/twatch.py
@@ -23,9 +23,9 @@ def main():
23 sample_id_all = 1, 23 sample_id_all = 1,
24 sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID) 24 sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
25 evsel.open(cpus = cpus, threads = threads); 25 evsel.open(cpus = cpus, threads = threads);
26 evlist = perf.evlist() 26 evlist = perf.evlist(cpus, threads)
27 evlist.add(evsel) 27 evlist.add(evsel)
28 evlist.mmap(cpus = cpus, threads = threads) 28 evlist.mmap()
29 while True: 29 while True:
30 evlist.poll(timeout = -1) 30 evlist.poll(timeout = -1)
31 for cpu in cpus: 31 for cpu in cpus:
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index dcd59328bb49..95b21fece2ce 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -21,21 +21,24 @@
21#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 21#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
22#define SID(e, x, y) xyarray__entry(e->id, x, y) 22#define SID(e, x, y) xyarray__entry(e->id, x, y)
23 23
24void perf_evlist__init(struct perf_evlist *evlist) 24void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
25 struct thread_map *threads)
25{ 26{
26 int i; 27 int i;
27 28
28 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i) 29 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
29 INIT_HLIST_HEAD(&evlist->heads[i]); 30 INIT_HLIST_HEAD(&evlist->heads[i]);
30 INIT_LIST_HEAD(&evlist->entries); 31 INIT_LIST_HEAD(&evlist->entries);
32 perf_evlist__set_maps(evlist, cpus, threads);
31} 33}
32 34
33struct perf_evlist *perf_evlist__new(void) 35struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
36 struct thread_map *threads)
34{ 37{
35 struct perf_evlist *evlist = zalloc(sizeof(*evlist)); 38 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
36 39
37 if (evlist != NULL) 40 if (evlist != NULL)
38 perf_evlist__init(evlist); 41 perf_evlist__init(evlist, cpus, threads);
39 42
40 return evlist; 43 return evlist;
41} 44}
@@ -88,9 +91,9 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
88 return 0; 91 return 0;
89} 92}
90 93
91int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads) 94int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
92{ 95{
93 int nfds = ncpus * nthreads * evlist->nr_entries; 96 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
94 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 97 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
95 return evlist->pollfd != NULL ? 0 : -ENOMEM; 98 return evlist->pollfd != NULL ? 0 : -ENOMEM;
96} 99}
@@ -213,11 +216,11 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu)
213 return event; 216 return event;
214} 217}
215 218
216void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus) 219void perf_evlist__munmap(struct perf_evlist *evlist)
217{ 220{
218 int cpu; 221 int cpu;
219 222
220 for (cpu = 0; cpu < ncpus; cpu++) { 223 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
221 if (evlist->mmap[cpu].base != NULL) { 224 if (evlist->mmap[cpu].base != NULL) {
222 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 225 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
223 evlist->mmap[cpu].base = NULL; 226 evlist->mmap[cpu].base = NULL;
@@ -225,9 +228,9 @@ void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus)
225 } 228 }
226} 229}
227 230
228int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus) 231int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
229{ 232{
230 evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap)); 233 evlist->mmap = zalloc(evlist->cpus->nr * sizeof(struct perf_mmap));
231 return evlist->mmap != NULL ? 0 : -ENOMEM; 234 return evlist->mmap != NULL ? 0 : -ENOMEM;
232} 235}
233 236
@@ -248,8 +251,6 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
248/** perf_evlist__mmap - Create per cpu maps to receive events 251/** perf_evlist__mmap - Create per cpu maps to receive events
249 * 252 *
250 * @evlist - list of events 253 * @evlist - list of events
251 * @cpus - cpu map being monitored
252 * @threads - threads map being monitored
253 * @pages - map length in pages 254 * @pages - map length in pages
254 * @overwrite - overwrite older events? 255 * @overwrite - overwrite older events?
255 * 256 *
@@ -259,21 +260,22 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot,
259 * unsigned int head = perf_mmap__read_head(m); 260 * unsigned int head = perf_mmap__read_head(m);
260 * 261 *
261 * perf_mmap__write_tail(m, head) 262 * perf_mmap__write_tail(m, head)
263 *
264 * Using perf_evlist__read_on_cpu does this automatically.
262 */ 265 */
263int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, 266int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite)
264 struct thread_map *threads, int pages, bool overwrite)
265{ 267{
266 unsigned int page_size = sysconf(_SC_PAGE_SIZE); 268 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
267 int mask = pages * page_size - 1, cpu; 269 int mask = pages * page_size - 1, cpu;
268 struct perf_evsel *first_evsel, *evsel; 270 struct perf_evsel *first_evsel, *evsel;
271 const struct cpu_map *cpus = evlist->cpus;
272 const struct thread_map *threads = evlist->threads;
269 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); 273 int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE);
270 274
271 if (evlist->mmap == NULL && 275 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
272 perf_evlist__alloc_mmap(evlist, cpus->nr) < 0)
273 return -ENOMEM; 276 return -ENOMEM;
274 277
275 if (evlist->pollfd == NULL && 278 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
276 perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0)
277 return -ENOMEM; 279 return -ENOMEM;
278 280
279 evlist->overwrite = overwrite; 281 evlist->overwrite = overwrite;
@@ -315,3 +317,34 @@ out_unmap:
315 } 317 }
316 return -1; 318 return -1;
317} 319}
320
321int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
322 pid_t target_tid, const char *cpu_list)
323{
324 evlist->threads = thread_map__new(target_pid, target_tid);
325
326 if (evlist->threads == NULL)
327 return -1;
328
329 if (target_tid != -1)
330 evlist->cpus = cpu_map__dummy_new();
331 else
332 evlist->cpus = cpu_map__new(cpu_list);
333
334 if (evlist->cpus == NULL)
335 goto out_delete_threads;
336
337 return 0;
338
339out_delete_threads:
340 thread_map__delete(evlist->threads);
341 return -1;
342}
343
344void perf_evlist__delete_maps(struct perf_evlist *evlist)
345{
346 cpu_map__delete(evlist->cpus);
347 thread_map__delete(evlist->threads);
348 evlist->cpus = NULL;
349 evlist->threads = NULL;
350}
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h
index 85aca6eba16b..c9884056097c 100644
--- a/tools/perf/util/evlist.h
+++ b/tools/perf/util/evlist.h
@@ -22,28 +22,43 @@ struct perf_evlist {
22 union perf_event event_copy; 22 union perf_event event_copy;
23 struct perf_mmap *mmap; 23 struct perf_mmap *mmap;
24 struct pollfd *pollfd; 24 struct pollfd *pollfd;
25 struct thread_map *threads;
26 struct cpu_map *cpus;
25}; 27};
26 28
27struct perf_evsel; 29struct perf_evsel;
28 30
29struct perf_evlist *perf_evlist__new(void); 31struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
30void perf_evlist__init(struct perf_evlist *evlist); 32 struct thread_map *threads);
33void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
34 struct thread_map *threads);
31void perf_evlist__exit(struct perf_evlist *evlist); 35void perf_evlist__exit(struct perf_evlist *evlist);
32void perf_evlist__delete(struct perf_evlist *evlist); 36void perf_evlist__delete(struct perf_evlist *evlist);
33 37
34void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry); 38void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry);
35int perf_evlist__add_default(struct perf_evlist *evlist); 39int perf_evlist__add_default(struct perf_evlist *evlist);
36 40
37int perf_evlist__alloc_pollfd(struct perf_evlist *evlist, int ncpus, int nthreads); 41int perf_evlist__alloc_pollfd(struct perf_evlist *evlist);
38void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd); 42void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd);
39 43
40struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); 44struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id);
41 45
42union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); 46union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu);
43 47
44int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus); 48int perf_evlist__alloc_mmap(struct perf_evlist *evlist);
45int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, 49int perf_evlist__mmap(struct perf_evlist *evlist, int pages, bool overwrite);
46 struct thread_map *threads, int pages, bool overwrite); 50void perf_evlist__munmap(struct perf_evlist *evlist);
47void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus); 51
52static inline void perf_evlist__set_maps(struct perf_evlist *evlist,
53 struct cpu_map *cpus,
54 struct thread_map *threads)
55{
56 evlist->cpus = cpus;
57 evlist->threads = threads;
58}
59
60int perf_evlist__create_maps(struct perf_evlist *evlist, pid_t target_pid,
61 pid_t target_tid, const char *cpu_list);
62void perf_evlist__delete_maps(struct perf_evlist *evlist);
48 63
49#endif /* __PERF_EVLIST_H */ 64#endif /* __PERF_EVLIST_H */
diff --git a/tools/perf/util/python.c b/tools/perf/util/python.c
index 88d47895a79c..d2d52175362e 100644
--- a/tools/perf/util/python.c
+++ b/tools/perf/util/python.c
@@ -553,7 +553,16 @@ struct pyrf_evlist {
553static int pyrf_evlist__init(struct pyrf_evlist *pevlist, 553static int pyrf_evlist__init(struct pyrf_evlist *pevlist,
554 PyObject *args, PyObject *kwargs) 554 PyObject *args, PyObject *kwargs)
555{ 555{
556 perf_evlist__init(&pevlist->evlist); 556 PyObject *pcpus = NULL, *pthreads = NULL;
557 struct cpu_map *cpus;
558 struct thread_map *threads;
559
560 if (!PyArg_ParseTuple(args, "OO", &pcpus, &pthreads))
561 return -1;
562
563 threads = ((struct pyrf_thread_map *)pthreads)->threads;
564 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
565 perf_evlist__init(&pevlist->evlist, cpus, threads);
557 return 0; 566 return 0;
558} 567}
559 568
@@ -567,21 +576,15 @@ static PyObject *pyrf_evlist__mmap(struct pyrf_evlist *pevlist,
567 PyObject *args, PyObject *kwargs) 576 PyObject *args, PyObject *kwargs)
568{ 577{
569 struct perf_evlist *evlist = &pevlist->evlist; 578 struct perf_evlist *evlist = &pevlist->evlist;
570 PyObject *pcpus = NULL, *pthreads = NULL; 579 static char *kwlist[] = {"pages", "overwrite",
571 struct cpu_map *cpus = NULL;
572 struct thread_map *threads = NULL;
573 static char *kwlist[] = {"cpus", "threads", "pages", "overwrite",
574 NULL, NULL}; 580 NULL, NULL};
575 int pages = 128, overwrite = false; 581 int pages = 128, overwrite = false;
576 582
577 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "OO|ii", kwlist, 583 if (!PyArg_ParseTupleAndKeywords(args, kwargs, "|ii", kwlist,
578 &pcpus, &pthreads, &pages, &overwrite)) 584 &pages, &overwrite))
579 return NULL; 585 return NULL;
580 586
581 threads = ((struct pyrf_thread_map *)pthreads)->threads; 587 if (perf_evlist__mmap(evlist, pages, overwrite) < 0) {
582 cpus = ((struct pyrf_cpu_map *)pcpus)->cpus;
583
584 if (perf_evlist__mmap(evlist, cpus, threads, pages, overwrite) < 0) {
585 PyErr_SetFromErrno(PyExc_OSError); 588 PyErr_SetFromErrno(PyExc_OSError);
586 return NULL; 589 return NULL;
587 } 590 }