aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c307
1 files changed, 250 insertions, 57 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 8065ce8fa9a5..f9f77bee0b1b 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -14,6 +14,7 @@
14#include "target.h" 14#include "target.h"
15#include "evlist.h" 15#include "evlist.h"
16#include "evsel.h" 16#include "evsel.h"
17#include "debug.h"
17#include <unistd.h> 18#include <unistd.h>
18 19
19#include "parse-events.h" 20#include "parse-events.h"
@@ -48,26 +49,29 @@ struct perf_evlist *perf_evlist__new(void)
48 return evlist; 49 return evlist;
49} 50}
50 51
51void perf_evlist__config(struct perf_evlist *evlist, 52/**
52 struct perf_record_opts *opts) 53 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list
55 *
56 * Events with compatible sample types all have the same id_pos
57 * and is_pos. For convenience, put a copy on evlist.
58 */
59void perf_evlist__set_id_pos(struct perf_evlist *evlist)
53{ 60{
54 struct perf_evsel *evsel; 61 struct perf_evsel *first = perf_evlist__first(evlist);
55 /*
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
58 */
59 if (opts->group)
60 perf_evlist__set_leader(evlist);
61 62
62 if (evlist->cpus->map[0] < 0) 63 evlist->id_pos = first->id_pos;
63 opts->no_inherit = true; 64 evlist->is_pos = first->is_pos;
65}
64 66
65 list_for_each_entry(evsel, &evlist->entries, node) { 67static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
66 perf_evsel__config(evsel, opts); 68{
69 struct perf_evsel *evsel;
67 70
68 if (evlist->nr_entries > 1) 71 list_for_each_entry(evsel, &evlist->entries, node)
69 perf_evsel__set_sample_id(evsel); 72 perf_evsel__calc_id_pos(evsel);
70 } 73
74 perf_evlist__set_id_pos(evlist);
71} 75}
72 76
73static void perf_evlist__purge(struct perf_evlist *evlist) 77static void perf_evlist__purge(struct perf_evlist *evlist)
@@ -100,15 +104,20 @@ void perf_evlist__delete(struct perf_evlist *evlist)
100void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 104void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
101{ 105{
102 list_add_tail(&entry->node, &evlist->entries); 106 list_add_tail(&entry->node, &evlist->entries);
103 ++evlist->nr_entries; 107 if (!evlist->nr_entries++)
108 perf_evlist__set_id_pos(evlist);
104} 109}
105 110
106void perf_evlist__splice_list_tail(struct perf_evlist *evlist, 111void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
107 struct list_head *list, 112 struct list_head *list,
108 int nr_entries) 113 int nr_entries)
109{ 114{
115 bool set_id_pos = !evlist->nr_entries;
116
110 list_splice_tail(list, &evlist->entries); 117 list_splice_tail(list, &evlist->entries);
111 evlist->nr_entries += nr_entries; 118 evlist->nr_entries += nr_entries;
119 if (set_id_pos)
120 perf_evlist__set_id_pos(evlist);
112} 121}
113 122
114void __perf_evlist__set_leader(struct list_head *list) 123void __perf_evlist__set_leader(struct list_head *list)
@@ -209,6 +218,21 @@ perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
209 return NULL; 218 return NULL;
210} 219}
211 220
221struct perf_evsel *
222perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
223 const char *name)
224{
225 struct perf_evsel *evsel;
226
227 list_for_each_entry(evsel, &evlist->entries, node) {
228 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
229 (strcmp(evsel->name, name) == 0))
230 return evsel;
231 }
232
233 return NULL;
234}
235
212int perf_evlist__add_newtp(struct perf_evlist *evlist, 236int perf_evlist__add_newtp(struct perf_evlist *evlist,
213 const char *sys, const char *name, void *handler) 237 const char *sys, const char *name, void *handler)
214{ 238{
@@ -232,7 +256,7 @@ void perf_evlist__disable(struct perf_evlist *evlist)
232 256
233 for (cpu = 0; cpu < nr_cpus; cpu++) { 257 for (cpu = 0; cpu < nr_cpus; cpu++) {
234 list_for_each_entry(pos, &evlist->entries, node) { 258 list_for_each_entry(pos, &evlist->entries, node) {
235 if (!perf_evsel__is_group_leader(pos)) 259 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
236 continue; 260 continue;
237 for (thread = 0; thread < nr_threads; thread++) 261 for (thread = 0; thread < nr_threads; thread++)
238 ioctl(FD(pos, cpu, thread), 262 ioctl(FD(pos, cpu, thread),
@@ -250,7 +274,7 @@ void perf_evlist__enable(struct perf_evlist *evlist)
250 274
251 for (cpu = 0; cpu < nr_cpus; cpu++) { 275 for (cpu = 0; cpu < nr_cpus; cpu++) {
252 list_for_each_entry(pos, &evlist->entries, node) { 276 list_for_each_entry(pos, &evlist->entries, node) {
253 if (!perf_evsel__is_group_leader(pos)) 277 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
254 continue; 278 continue;
255 for (thread = 0; thread < nr_threads; thread++) 279 for (thread = 0; thread < nr_threads; thread++)
256 ioctl(FD(pos, cpu, thread), 280 ioctl(FD(pos, cpu, thread),
@@ -259,6 +283,44 @@ void perf_evlist__enable(struct perf_evlist *evlist)
259 } 283 }
260} 284}
261 285
286int perf_evlist__disable_event(struct perf_evlist *evlist,
287 struct perf_evsel *evsel)
288{
289 int cpu, thread, err;
290
291 if (!evsel->fd)
292 return 0;
293
294 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
295 for (thread = 0; thread < evlist->threads->nr; thread++) {
296 err = ioctl(FD(evsel, cpu, thread),
297 PERF_EVENT_IOC_DISABLE, 0);
298 if (err)
299 return err;
300 }
301 }
302 return 0;
303}
304
305int perf_evlist__enable_event(struct perf_evlist *evlist,
306 struct perf_evsel *evsel)
307{
308 int cpu, thread, err;
309
310 if (!evsel->fd)
311 return -EINVAL;
312
313 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
314 for (thread = 0; thread < evlist->threads->nr; thread++) {
315 err = ioctl(FD(evsel, cpu, thread),
316 PERF_EVENT_IOC_ENABLE, 0);
317 if (err)
318 return err;
319 }
320 }
321 return 0;
322}
323
262static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 324static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
263{ 325{
264 int nr_cpus = cpu_map__nr(evlist->cpus); 326 int nr_cpus = cpu_map__nr(evlist->cpus);
@@ -302,6 +364,24 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
302{ 364{
303 u64 read_data[4] = { 0, }; 365 u64 read_data[4] = { 0, };
304 int id_idx = 1; /* The first entry is the counter value */ 366 int id_idx = 1; /* The first entry is the counter value */
367 u64 id;
368 int ret;
369
370 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
371 if (!ret)
372 goto add;
373
374 if (errno != ENOTTY)
375 return -1;
376
377 /* Legacy way to get event id.. All hail to old kernels! */
378
379 /*
380 * This way does not work with group format read, so bail
381 * out in that case.
382 */
383 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
384 return -1;
305 385
306 if (!(evsel->attr.read_format & PERF_FORMAT_ID) || 386 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
307 read(fd, &read_data, sizeof(read_data)) == -1) 387 read(fd, &read_data, sizeof(read_data)) == -1)
@@ -312,25 +392,39 @@ static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
312 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) 392 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
313 ++id_idx; 393 ++id_idx;
314 394
315 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]); 395 id = read_data[id_idx];
396
397 add:
398 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
316 return 0; 399 return 0;
317} 400}
318 401
319struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) 402struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
320{ 403{
321 struct hlist_head *head; 404 struct hlist_head *head;
322 struct perf_sample_id *sid; 405 struct perf_sample_id *sid;
323 int hash; 406 int hash;
324 407
325 if (evlist->nr_entries == 1)
326 return perf_evlist__first(evlist);
327
328 hash = hash_64(id, PERF_EVLIST__HLIST_BITS); 408 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
329 head = &evlist->heads[hash]; 409 head = &evlist->heads[hash];
330 410
331 hlist_for_each_entry(sid, head, node) 411 hlist_for_each_entry(sid, head, node)
332 if (sid->id == id) 412 if (sid->id == id)
333 return sid->evsel; 413 return sid;
414
415 return NULL;
416}
417
418struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
419{
420 struct perf_sample_id *sid;
421
422 if (evlist->nr_entries == 1)
423 return perf_evlist__first(evlist);
424
425 sid = perf_evlist__id2sid(evlist, id);
426 if (sid)
427 return sid->evsel;
334 428
335 if (!perf_evlist__sample_id_all(evlist)) 429 if (!perf_evlist__sample_id_all(evlist))
336 return perf_evlist__first(evlist); 430 return perf_evlist__first(evlist);
@@ -338,6 +432,60 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
338 return NULL; 432 return NULL;
339} 433}
340 434
435static int perf_evlist__event2id(struct perf_evlist *evlist,
436 union perf_event *event, u64 *id)
437{
438 const u64 *array = event->sample.array;
439 ssize_t n;
440
441 n = (event->header.size - sizeof(event->header)) >> 3;
442
443 if (event->header.type == PERF_RECORD_SAMPLE) {
444 if (evlist->id_pos >= n)
445 return -1;
446 *id = array[evlist->id_pos];
447 } else {
448 if (evlist->is_pos > n)
449 return -1;
450 n -= evlist->is_pos;
451 *id = array[n];
452 }
453 return 0;
454}
455
456static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
457 union perf_event *event)
458{
459 struct perf_evsel *first = perf_evlist__first(evlist);
460 struct hlist_head *head;
461 struct perf_sample_id *sid;
462 int hash;
463 u64 id;
464
465 if (evlist->nr_entries == 1)
466 return first;
467
468 if (!first->attr.sample_id_all &&
469 event->header.type != PERF_RECORD_SAMPLE)
470 return first;
471
472 if (perf_evlist__event2id(evlist, event, &id))
473 return NULL;
474
475 /* Synthesized events have an id of zero */
476 if (!id)
477 return first;
478
479 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
480 head = &evlist->heads[hash];
481
482 hlist_for_each_entry(sid, head, node) {
483 if (sid->id == id)
484 return sid->evsel;
485 }
486 return NULL;
487}
488
341union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx) 489union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
342{ 490{
343 struct perf_mmap *md = &evlist->mmap[idx]; 491 struct perf_mmap *md = &evlist->mmap[idx];
@@ -403,16 +551,20 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
403 return event; 551 return event;
404} 552}
405 553
554static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
555{
556 if (evlist->mmap[idx].base != NULL) {
557 munmap(evlist->mmap[idx].base, evlist->mmap_len);
558 evlist->mmap[idx].base = NULL;
559 }
560}
561
406void perf_evlist__munmap(struct perf_evlist *evlist) 562void perf_evlist__munmap(struct perf_evlist *evlist)
407{ 563{
408 int i; 564 int i;
409 565
410 for (i = 0; i < evlist->nr_mmaps; i++) { 566 for (i = 0; i < evlist->nr_mmaps; i++)
411 if (evlist->mmap[i].base != NULL) { 567 __perf_evlist__munmap(evlist, i);
412 munmap(evlist->mmap[i].base, evlist->mmap_len);
413 evlist->mmap[i].base = NULL;
414 }
415 }
416 568
417 free(evlist->mmap); 569 free(evlist->mmap);
418 evlist->mmap = NULL; 570 evlist->mmap = NULL;
@@ -421,7 +573,7 @@ void perf_evlist__munmap(struct perf_evlist *evlist)
421static int perf_evlist__alloc_mmap(struct perf_evlist *evlist) 573static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
422{ 574{
423 evlist->nr_mmaps = cpu_map__nr(evlist->cpus); 575 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
424 if (cpu_map__all(evlist->cpus)) 576 if (cpu_map__empty(evlist->cpus))
425 evlist->nr_mmaps = thread_map__nr(evlist->threads); 577 evlist->nr_mmaps = thread_map__nr(evlist->threads);
426 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap)); 578 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
427 return evlist->mmap != NULL ? 0 : -ENOMEM; 579 return evlist->mmap != NULL ? 0 : -ENOMEM;
@@ -450,6 +602,7 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
450 int nr_cpus = cpu_map__nr(evlist->cpus); 602 int nr_cpus = cpu_map__nr(evlist->cpus);
451 int nr_threads = thread_map__nr(evlist->threads); 603 int nr_threads = thread_map__nr(evlist->threads);
452 604
605 pr_debug2("perf event ring buffer mmapped per cpu\n");
453 for (cpu = 0; cpu < nr_cpus; cpu++) { 606 for (cpu = 0; cpu < nr_cpus; cpu++) {
454 int output = -1; 607 int output = -1;
455 608
@@ -477,12 +630,8 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
477 return 0; 630 return 0;
478 631
479out_unmap: 632out_unmap:
480 for (cpu = 0; cpu < nr_cpus; cpu++) { 633 for (cpu = 0; cpu < nr_cpus; cpu++)
481 if (evlist->mmap[cpu].base != NULL) { 634 __perf_evlist__munmap(evlist, cpu);
482 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
483 evlist->mmap[cpu].base = NULL;
484 }
485 }
486 return -1; 635 return -1;
487} 636}
488 637
@@ -492,6 +641,7 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
492 int thread; 641 int thread;
493 int nr_threads = thread_map__nr(evlist->threads); 642 int nr_threads = thread_map__nr(evlist->threads);
494 643
644 pr_debug2("perf event ring buffer mmapped per thread\n");
495 for (thread = 0; thread < nr_threads; thread++) { 645 for (thread = 0; thread < nr_threads; thread++) {
496 int output = -1; 646 int output = -1;
497 647
@@ -517,12 +667,8 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
517 return 0; 667 return 0;
518 668
519out_unmap: 669out_unmap:
520 for (thread = 0; thread < nr_threads; thread++) { 670 for (thread = 0; thread < nr_threads; thread++)
521 if (evlist->mmap[thread].base != NULL) { 671 __perf_evlist__munmap(evlist, thread);
522 munmap(evlist->mmap[thread].base, evlist->mmap_len);
523 evlist->mmap[thread].base = NULL;
524 }
525 }
526 return -1; 672 return -1;
527} 673}
528 674
@@ -573,7 +719,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
573 return -ENOMEM; 719 return -ENOMEM;
574 } 720 }
575 721
576 if (cpu_map__all(cpus)) 722 if (cpu_map__empty(cpus))
577 return perf_evlist__mmap_per_thread(evlist, prot, mask); 723 return perf_evlist__mmap_per_thread(evlist, prot, mask);
578 724
579 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 725 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
@@ -650,20 +796,66 @@ int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
650 796
651bool perf_evlist__valid_sample_type(struct perf_evlist *evlist) 797bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
652{ 798{
799 struct perf_evsel *pos;
800
801 if (evlist->nr_entries == 1)
802 return true;
803
804 if (evlist->id_pos < 0 || evlist->is_pos < 0)
805 return false;
806
807 list_for_each_entry(pos, &evlist->entries, node) {
808 if (pos->id_pos != evlist->id_pos ||
809 pos->is_pos != evlist->is_pos)
810 return false;
811 }
812
813 return true;
814}
815
816u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
817{
818 struct perf_evsel *evsel;
819
820 if (evlist->combined_sample_type)
821 return evlist->combined_sample_type;
822
823 list_for_each_entry(evsel, &evlist->entries, node)
824 evlist->combined_sample_type |= evsel->attr.sample_type;
825
826 return evlist->combined_sample_type;
827}
828
829u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
830{
831 evlist->combined_sample_type = 0;
832 return __perf_evlist__combined_sample_type(evlist);
833}
834
835bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
836{
653 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first; 837 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
838 u64 read_format = first->attr.read_format;
839 u64 sample_type = first->attr.sample_type;
654 840
655 list_for_each_entry_continue(pos, &evlist->entries, node) { 841 list_for_each_entry_continue(pos, &evlist->entries, node) {
656 if (first->attr.sample_type != pos->attr.sample_type) 842 if (read_format != pos->attr.read_format)
657 return false; 843 return false;
658 } 844 }
659 845
846 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
847 if ((sample_type & PERF_SAMPLE_READ) &&
848 !(read_format & PERF_FORMAT_ID)) {
849 return false;
850 }
851
660 return true; 852 return true;
661} 853}
662 854
663u64 perf_evlist__sample_type(struct perf_evlist *evlist) 855u64 perf_evlist__read_format(struct perf_evlist *evlist)
664{ 856{
665 struct perf_evsel *first = perf_evlist__first(evlist); 857 struct perf_evsel *first = perf_evlist__first(evlist);
666 return first->attr.sample_type; 858 return first->attr.read_format;
667} 859}
668 860
669u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist) 861u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
@@ -692,6 +884,9 @@ u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
692 884
693 if (sample_type & PERF_SAMPLE_CPU) 885 if (sample_type & PERF_SAMPLE_CPU)
694 size += sizeof(data->cpu) * 2; 886 size += sizeof(data->cpu) * 2;
887
888 if (sample_type & PERF_SAMPLE_IDENTIFIER)
889 size += sizeof(data->id);
695out: 890out:
696 return size; 891 return size;
697} 892}
@@ -735,6 +930,8 @@ int perf_evlist__open(struct perf_evlist *evlist)
735 struct perf_evsel *evsel; 930 struct perf_evsel *evsel;
736 int err; 931 int err;
737 932
933 perf_evlist__update_id_pos(evlist);
934
738 list_for_each_entry(evsel, &evlist->entries, node) { 935 list_for_each_entry(evsel, &evlist->entries, node) {
739 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads); 936 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
740 if (err < 0) 937 if (err < 0)
@@ -783,13 +980,6 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
783 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC); 980 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
784 981
785 /* 982 /*
786 * Do a dummy execvp to get the PLT entry resolved,
787 * so we avoid the resolver overhead on the real
788 * execvp call.
789 */
790 execvp("", (char **)argv);
791
792 /*
793 * Tell the parent we're ready to go 983 * Tell the parent we're ready to go
794 */ 984 */
795 close(child_ready_pipe[1]); 985 close(child_ready_pipe[1]);
@@ -838,7 +1028,7 @@ out_close_ready_pipe:
838int perf_evlist__start_workload(struct perf_evlist *evlist) 1028int perf_evlist__start_workload(struct perf_evlist *evlist)
839{ 1029{
840 if (evlist->workload.cork_fd > 0) { 1030 if (evlist->workload.cork_fd > 0) {
841 char bf; 1031 char bf = 0;
842 int ret; 1032 int ret;
843 /* 1033 /*
844 * Remove the cork, let it rip! 1034 * Remove the cork, let it rip!
@@ -857,7 +1047,10 @@ int perf_evlist__start_workload(struct perf_evlist *evlist)
857int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event, 1047int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
858 struct perf_sample *sample) 1048 struct perf_sample *sample)
859{ 1049{
860 struct perf_evsel *evsel = perf_evlist__first(evlist); 1050 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1051
1052 if (!evsel)
1053 return -EFAULT;
861 return perf_evsel__parse_sample(evsel, event, sample); 1054 return perf_evsel__parse_sample(evsel, event, sample);
862} 1055}
863 1056