aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/builtin-stat.c4
-rw-r--r--tools/perf/util/evlist.c37
-rw-r--r--tools/perf/util/thread_map.h5
3 files changed, 30 insertions, 16 deletions
diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 020329dca005..20ffaf98782e 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -249,7 +249,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
249 int i; 249 int i;
250 250
251 if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter), 251 if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
252 evsel_list->threads->nr, scale) < 0) 252 thread_map__nr(evsel_list->threads), scale) < 0)
253 return -1; 253 return -1;
254 254
255 for (i = 0; i < 3; i++) 255 for (i = 0; i < 3; i++)
@@ -488,7 +488,7 @@ static int __run_perf_stat(int argc __maybe_unused, const char **argv)
488 list_for_each_entry(counter, &evsel_list->entries, node) { 488 list_for_each_entry(counter, &evsel_list->entries, node) {
489 read_counter_aggr(counter); 489 read_counter_aggr(counter);
490 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 490 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
491 evsel_list->threads->nr); 491 thread_map__nr(evsel_list->threads));
492 } 492 }
493 } 493 }
494 494
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index a482547495b6..7d71a691b864 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -227,12 +227,14 @@ void perf_evlist__disable(struct perf_evlist *evlist)
227{ 227{
228 int cpu, thread; 228 int cpu, thread;
229 struct perf_evsel *pos; 229 struct perf_evsel *pos;
230 int nr_cpus = cpu_map__nr(evlist->cpus);
231 int nr_threads = thread_map__nr(evlist->threads);
230 232
231 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 233 for (cpu = 0; cpu < nr_cpus; cpu++) {
232 list_for_each_entry(pos, &evlist->entries, node) { 234 list_for_each_entry(pos, &evlist->entries, node) {
233 if (!perf_evsel__is_group_leader(pos)) 235 if (!perf_evsel__is_group_leader(pos))
234 continue; 236 continue;
235 for (thread = 0; thread < evlist->threads->nr; thread++) 237 for (thread = 0; thread < nr_threads; thread++)
236 ioctl(FD(pos, cpu, thread), 238 ioctl(FD(pos, cpu, thread),
237 PERF_EVENT_IOC_DISABLE, 0); 239 PERF_EVENT_IOC_DISABLE, 0);
238 } 240 }
@@ -243,12 +245,14 @@ void perf_evlist__enable(struct perf_evlist *evlist)
243{ 245{
244 int cpu, thread; 246 int cpu, thread;
245 struct perf_evsel *pos; 247 struct perf_evsel *pos;
248 int nr_cpus = cpu_map__nr(evlist->cpus);
249 int nr_threads = thread_map__nr(evlist->threads);
246 250
247 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 251 for (cpu = 0; cpu < nr_cpus; cpu++) {
248 list_for_each_entry(pos, &evlist->entries, node) { 252 list_for_each_entry(pos, &evlist->entries, node) {
249 if (!perf_evsel__is_group_leader(pos)) 253 if (!perf_evsel__is_group_leader(pos))
250 continue; 254 continue;
251 for (thread = 0; thread < evlist->threads->nr; thread++) 255 for (thread = 0; thread < nr_threads; thread++)
252 ioctl(FD(pos, cpu, thread), 256 ioctl(FD(pos, cpu, thread),
253 PERF_EVENT_IOC_ENABLE, 0); 257 PERF_EVENT_IOC_ENABLE, 0);
254 } 258 }
@@ -257,7 +261,9 @@ void perf_evlist__enable(struct perf_evlist *evlist)
257 261
258static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 262static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
259{ 263{
260 int nfds = cpu_map__nr(evlist->cpus) * evlist->threads->nr * evlist->nr_entries; 264 int nr_cpus = cpu_map__nr(evlist->cpus);
265 int nr_threads = thread_map__nr(evlist->threads);
266 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
261 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 267 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
262 return evlist->pollfd != NULL ? 0 : -ENOMEM; 268 return evlist->pollfd != NULL ? 0 : -ENOMEM;
263} 269}
@@ -417,7 +423,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
417{ 423{
418 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 424 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
419 if (cpu_map__all(evlist->cpus)) 425 if (cpu_map__all(evlist->cpus))
420 evlist->nr_mmaps = evlist->threads->nr; 426 evlist->nr_mmaps = thread_map__nr(evlist->threads);
421 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 427 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
422 return evlist->mmap != NULL ? 0 : -ENOMEM; 428 return evlist->mmap != NULL ? 0 : -ENOMEM;
423} 429}
@@ -442,11 +448,13 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
442{ 448{
443 struct perf_evsel *evsel; 449 struct perf_evsel *evsel;
444 int cpu, thread; 450 int cpu, thread;
451 int nr_cpus = cpu_map__nr(evlist->cpus);
452 int nr_threads = thread_map__nr(evlist->threads);
445 453
446 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 454 for (cpu = 0; cpu < nr_cpus; cpu++) {
447 int output = -1; 455 int output = -1;
448 456
449 for (thread = 0; thread < evlist->threads->nr; thread++) { 457 for (thread = 0; thread < nr_threads; thread++) {
450 list_for_each_entry(evsel, &evlist->entries, node) { 458 list_for_each_entry(evsel, &evlist->entries, node) {
451 int fd = FD(evsel, cpu, thread); 459 int fd = FD(evsel, cpu, thread);
452 460
@@ -470,7 +478,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
470 return 0; 478 return 0;
471 479
472out_unmap: 480out_unmap:
473 for (cpu = 0; cpu < cpu_map__nr(evlist->cpus); cpu++) { 481 for (cpu = 0; cpu < nr_cpus; cpu++) {
474 if (evlist->mmap[cpu].base != NULL) { 482 if (evlist->mmap[cpu].base != NULL) {
475 munmap(evlist->mmap[cpu].base, evlist->mmap_len); 483 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
476 evlist->mmap[cpu].base = NULL; 484 evlist->mmap[cpu].base = NULL;
@@ -483,8 +491,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
483{ 491{
484 struct perf_evsel *evsel; 492 struct perf_evsel *evsel;
485 int thread; 493 int thread;
494 int nr_threads = thread_map__nr(evlist->threads);
486 495
487 for (thread = 0; thread < evlist->threads->nr; thread++) { 496 for (thread = 0; thread < nr_threads; thread++) {
488 int output = -1; 497 int output = -1;
489 498
490 list_for_each_entry(evsel, &evlist->entries, node) { 499 list_for_each_entry(evsel, &evlist->entries, node) {
@@ -509,7 +518,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
509 return 0; 518 return 0;
510 519
511out_unmap: 520out_unmap:
512 for (thread = 0; thread < evlist->threads->nr; thread++) { 521 for (thread = 0; thread < nr_threads; thread++) {
513 if (evlist->mmap[thread].base != NULL) { 522 if (evlist->mmap[thread].base != NULL) {
514 munmap(evlist->mmap[thread].base, evlist->mmap_len); 523 munmap(evlist->mmap[thread].base, evlist->mmap_len);
515 evlist->mmap[thread].base = NULL; 524 evlist->mmap[thread].base = NULL;
@@ -610,7 +619,7 @@ int perf_evlist__apply_filters(struct perf_evlist *evlist)
610 struct perf_evsel *evsel; 619 struct perf_evsel *evsel;
611 int err = 0; 620 int err = 0;
612 const int ncpus = cpu_map__nr(evlist->cpus), 621 const int ncpus = cpu_map__nr(evlist->cpus),
613 nthreads = evlist->threads->nr; 622 nthreads = thread_map__nr(evlist->threads);
614 623
615 list_for_each_entry(evsel, &evlist->entries, node) { 624 list_for_each_entry(evsel, &evlist->entries, node) {
616 if (evsel->filter == NULL) 625 if (evsel->filter == NULL)
@@ -629,7 +638,7 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
629 struct perf_evsel *evsel; 638 struct perf_evsel *evsel;
630 int err = 0; 639 int err = 0;
631 const int ncpus = cpu_map__nr(evlist->cpus), 640 const int ncpus = cpu_map__nr(evlist->cpus),
632 nthreads = evlist->threads->nr; 641 nthreads = thread_map__nr(evlist->threads);
633 642
634 list_for_each_entry(evsel, &evlist->entries, node) { 643 list_for_each_entry(evsel, &evlist->entries, node) {
635 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter); 644 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
@@ -726,7 +735,7 @@ int perf_evlist__open(struct perf_evlist *evlist)
726 return 0; 735 return 0;
727out_err: 736out_err:
728 ncpus = cpu_map__nr(evlist->cpus); 737 ncpus = cpu_map__nr(evlist->cpus);
729 nthreads = evlist->threads ? evlist->threads->nr : 1; 738 nthreads = thread_map__nr(evlist->threads);
730 739
731 list_for_each_entry_reverse(evsel, &evlist->entries, node) 740 list_for_each_entry_reverse(evsel, &evlist->entries, node)
732 perf_evsel__close(evsel, ncpus, nthreads); 741 perf_evsel__close(evsel, ncpus, nthreads);
diff --git a/tools/perf/util/thread_map.h b/tools/perf/util/thread_map.h
index f718df8a3c59..0cd8b3108084 100644
--- a/tools/perf/util/thread_map.h
+++ b/tools/perf/util/thread_map.h
@@ -21,4 +21,9 @@ void thread_map__delete(struct thread_map *threads);
21 21
22size_t thread_map__fprintf(struct thread_map *threads, FILE *fp); 22size_t thread_map__fprintf(struct thread_map *threads, FILE *fp);
23 23
24static inline int thread_map__nr(struct thread_map *threads)
25{
26 return threads ? threads->nr : 1;
27}
28
24#endif /* __PERF_THREAD_MAP_H */ 29#endif /* __PERF_THREAD_MAP_H */