aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c301
1 files changed, 225 insertions, 76 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index f9f77bee0b1b..bbc746aa5716 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -18,6 +18,7 @@
18#include <unistd.h> 18#include <unistd.h>
19 19
20#include "parse-events.h" 20#include "parse-events.h"
21#include "parse-options.h"
21 22
22#include <sys/mman.h> 23#include <sys/mman.h>
23 24
@@ -49,6 +50,18 @@ struct perf_evlist *perf_evlist__new(void)
49 return evlist; 50 return evlist;
50} 51}
51 52
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
52/** 65/**
53 * perf_evlist__set_id_pos - set the positions of event ids. 66 * perf_evlist__set_id_pos - set the positions of event ids.
54 * @evlist: selected event list 67 * @evlist: selected event list
@@ -104,6 +117,8 @@ void perf_evlist__delete(struct perf_evlist *evlist)
104void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry) 117void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
105{ 118{
106 list_add_tail(&entry->node, &evlist->entries); 119 list_add_tail(&entry->node, &evlist->entries);
120 entry->idx = evlist->nr_entries;
121
107 if (!evlist->nr_entries++) 122 if (!evlist->nr_entries++)
108 perf_evlist__set_id_pos(evlist); 123 perf_evlist__set_id_pos(evlist);
109} 124}
@@ -152,7 +167,7 @@ int perf_evlist__add_default(struct perf_evlist *evlist)
152 167
153 event_attr_init(&attr); 168 event_attr_init(&attr);
154 169
155 evsel = perf_evsel__new(&attr, 0); 170 evsel = perf_evsel__new(&attr);
156 if (evsel == NULL) 171 if (evsel == NULL)
157 goto error; 172 goto error;
158 173
@@ -177,7 +192,7 @@ static int perf_evlist__add_attrs(struct perf_evlist *evlist,
177 size_t i; 192 size_t i;
178 193
179 for (i = 0; i < nr_attrs; i++) { 194 for (i = 0; i < nr_attrs; i++) {
180 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i); 195 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
181 if (evsel == NULL) 196 if (evsel == NULL)
182 goto out_delete_partial_list; 197 goto out_delete_partial_list;
183 list_add_tail(&evsel->node, &head); 198 list_add_tail(&evsel->node, &head);
@@ -236,13 +251,12 @@ perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
236int perf_evlist__add_newtp(struct perf_evlist *evlist, 251int perf_evlist__add_newtp(struct perf_evlist *evlist,
237 const char *sys, const char *name, void *handler) 252 const char *sys, const char *name, void *handler)
238{ 253{
239 struct perf_evsel *evsel; 254 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
240 255
241 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
242 if (evsel == NULL) 256 if (evsel == NULL)
243 return -1; 257 return -1;
244 258
245 evsel->handler.func = handler; 259 evsel->handler = handler;
246 perf_evlist__add(evlist, evsel); 260 perf_evlist__add(evlist, evsel);
247 return 0; 261 return 0;
248} 262}
@@ -527,7 +541,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
527 if ((old & md->mask) + size != ((old + size) & md->mask)) { 541 if ((old & md->mask) + size != ((old + size) & md->mask)) {
528 unsigned int offset = old; 542 unsigned int offset = old;
529 unsigned int len = min(sizeof(*event), size), cpy; 543 unsigned int len = min(sizeof(*event), size), cpy;
530 void *dst = &md->event_copy; 544 void *dst = md->event_copy;
531 545
532 do { 546 do {
533 cpy = min(md->mask + 1 - (offset & md->mask), len); 547 cpy = min(md->mask + 1 - (offset & md->mask), len);
@@ -537,7 +551,7 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
537 len -= cpy; 551 len -= cpy;
538 } while (len); 552 } while (len);
539 553
540 event = &md->event_copy; 554 event = (union perf_event *) md->event_copy;
541 } 555 }
542 556
543 old += size; 557 old += size;
@@ -545,12 +559,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
545 559
546 md->prev = old; 560 md->prev = old;
547 561
548 if (!evlist->overwrite)
549 perf_mmap__write_tail(md, old);
550
551 return event; 562 return event;
552} 563}
553 564
565void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
566{
567 if (!evlist->overwrite) {
568 struct perf_mmap *md = &evlist->mmap[idx];
569 unsigned int old = md->prev;
570
571 perf_mmap__write_tail(md, old);
572 }
573}
574
554static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 575static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
555{ 576{
556 if (evlist->mmap[idx].base != NULL) { 577 if (evlist->mmap[idx].base != NULL) {
@@ -587,6 +608,8 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
587 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot, 608 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
588 MAP_SHARED, fd, 0); 609 MAP_SHARED, fd, 0);
589 if (evlist->mmap[idx].base == MAP_FAILED) { 610 if (evlist->mmap[idx].base == MAP_FAILED) {
611 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
612 errno);
590 evlist->mmap[idx].base = NULL; 613 evlist->mmap[idx].base = NULL;
591 return -1; 614 return -1;
592 } 615 }
@@ -595,9 +618,36 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist,
595 return 0; 618 return 0;
596} 619}
597 620
598static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask) 621static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
622 int prot, int mask, int cpu, int thread,
623 int *output)
599{ 624{
600 struct perf_evsel *evsel; 625 struct perf_evsel *evsel;
626
627 list_for_each_entry(evsel, &evlist->entries, node) {
628 int fd = FD(evsel, cpu, thread);
629
630 if (*output == -1) {
631 *output = fd;
632 if (__perf_evlist__mmap(evlist, idx, prot, mask,
633 *output) < 0)
634 return -1;
635 } else {
636 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
637 return -1;
638 }
639
640 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
641 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
642 return -1;
643 }
644
645 return 0;
646}
647
648static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot,
649 int mask)
650{
601 int cpu, thread; 651 int cpu, thread;
602 int nr_cpus = cpu_map__nr(evlist->cpus); 652 int nr_cpus = cpu_map__nr(evlist->cpus);
603 int nr_threads = thread_map__nr(evlist->threads); 653 int nr_threads = thread_map__nr(evlist->threads);
@@ -607,23 +657,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int m
607 int output = -1; 657 int output = -1;
608 658
609 for (thread = 0; thread < nr_threads; thread++) { 659 for (thread = 0; thread < nr_threads; thread++) {
610 list_for_each_entry(evsel, &evlist->entries, node) { 660 if (perf_evlist__mmap_per_evsel(evlist, cpu, prot, mask,
611 int fd = FD(evsel, cpu, thread); 661 cpu, thread, &output))
612 662 goto out_unmap;
613 if (output == -1) {
614 output = fd;
615 if (__perf_evlist__mmap(evlist, cpu,
616 prot, mask, output) < 0)
617 goto out_unmap;
618 } else {
619 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
620 goto out_unmap;
621 }
622
623 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
624 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
625 goto out_unmap;
626 }
627 } 663 }
628 } 664 }
629 665
@@ -635,9 +671,9 @@ out_unmap:
635 return -1; 671 return -1;
636} 672}
637 673
638static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask) 674static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot,
675 int mask)
639{ 676{
640 struct perf_evsel *evsel;
641 int thread; 677 int thread;
642 int nr_threads = thread_map__nr(evlist->threads); 678 int nr_threads = thread_map__nr(evlist->threads);
643 679
@@ -645,23 +681,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, in
645 for (thread = 0; thread < nr_threads; thread++) { 681 for (thread = 0; thread < nr_threads; thread++) {
646 int output = -1; 682 int output = -1;
647 683
648 list_for_each_entry(evsel, &evlist->entries, node) { 684 if (perf_evlist__mmap_per_evsel(evlist, thread, prot, mask, 0,
649 int fd = FD(evsel, 0, thread); 685 thread, &output))
650 686 goto out_unmap;
651 if (output == -1) {
652 output = fd;
653 if (__perf_evlist__mmap(evlist, thread,
654 prot, mask, output) < 0)
655 goto out_unmap;
656 } else {
657 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
658 goto out_unmap;
659 }
660
661 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
662 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
663 goto out_unmap;
664 }
665 } 687 }
666 688
667 return 0; 689 return 0;
@@ -672,20 +694,90 @@ out_unmap:
672 return -1; 694 return -1;
673} 695}
674 696
675/** perf_evlist__mmap - Create per cpu maps to receive events 697static size_t perf_evlist__mmap_size(unsigned long pages)
676 * 698{
677 * @evlist - list of events 699 /* 512 kiB: default amount of unprivileged mlocked memory */
678 * @pages - map length in pages 700 if (pages == UINT_MAX)
679 * @overwrite - overwrite older events? 701 pages = (512 * 1024) / page_size;
680 * 702 else if (!is_power_of_2(pages))
681 * If overwrite is false the user needs to signal event consuption using: 703 return 0;
682 * 704
683 * struct perf_mmap *m = &evlist->mmap[cpu]; 705 return (pages + 1) * page_size;
684 * unsigned int head = perf_mmap__read_head(m); 706}
707
708static long parse_pages_arg(const char *str, unsigned long min,
709 unsigned long max)
710{
711 unsigned long pages, val;
712 static struct parse_tag tags[] = {
713 { .tag = 'B', .mult = 1 },
714 { .tag = 'K', .mult = 1 << 10 },
715 { .tag = 'M', .mult = 1 << 20 },
716 { .tag = 'G', .mult = 1 << 30 },
717 { .tag = 0 },
718 };
719
720 if (str == NULL)
721 return -EINVAL;
722
723 val = parse_tag_value(str, tags);
724 if (val != (unsigned long) -1) {
725 /* we got file size value */
726 pages = PERF_ALIGN(val, page_size) / page_size;
727 } else {
728 /* we got pages count value */
729 char *eptr;
730 pages = strtoul(str, &eptr, 10);
731 if (*eptr != '\0')
732 return -EINVAL;
733 }
734
735 if ((pages == 0) && (min == 0)) {
736 /* leave number of pages at 0 */
737 } else if (pages < (1UL << 31) && !is_power_of_2(pages)) {
738 /* round pages up to next power of 2 */
739 pages = next_pow2(pages);
740 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
741 pages * page_size, pages);
742 }
743
744 if (pages > max)
745 return -EINVAL;
746
747 return pages;
748}
749
750int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
751 int unset __maybe_unused)
752{
753 unsigned int *mmap_pages = opt->value;
754 unsigned long max = UINT_MAX;
755 long pages;
756
757 if (max < SIZE_MAX / page_size)
758 max = SIZE_MAX / page_size;
759
760 pages = parse_pages_arg(str, 1, max);
761 if (pages < 0) {
762 pr_err("Invalid argument for --mmap_pages/-m\n");
763 return -1;
764 }
765
766 *mmap_pages = pages;
767 return 0;
768}
769
770/**
771 * perf_evlist__mmap - Create mmaps to receive events.
772 * @evlist: list of events
773 * @pages: map length in pages
774 * @overwrite: overwrite older events?
685 * 775 *
686 * perf_mmap__write_tail(m, head) 776 * If @overwrite is %false the user needs to signal event consumption using
777 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
778 * automatically.
687 * 779 *
688 * Using perf_evlist__read_on_cpu does this automatically. 780 * Return: %0 on success, negative error code otherwise.
689 */ 781 */
690int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages, 782int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
691 bool overwrite) 783 bool overwrite)
@@ -695,14 +787,6 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
695 const struct thread_map *threads = evlist->threads; 787 const struct thread_map *threads = evlist->threads;
696 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask; 788 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
697 789
698 /* 512 kiB: default amount of unprivileged mlocked memory */
699 if (pages == UINT_MAX)
700 pages = (512 * 1024) / page_size;
701 else if (!is_power_of_2(pages))
702 return -EINVAL;
703
704 mask = pages * page_size - 1;
705
706 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 790 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
707 return -ENOMEM; 791 return -ENOMEM;
708 792
@@ -710,7 +794,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
710 return -ENOMEM; 794 return -ENOMEM;
711 795
712 evlist->overwrite = overwrite; 796 evlist->overwrite = overwrite;
713 evlist->mmap_len = (pages + 1) * page_size; 797 evlist->mmap_len = perf_evlist__mmap_size(pages);
798 pr_debug("mmap size %zuB\n", evlist->mmap_len);
799 mask = evlist->mmap_len - page_size - 1;
714 800
715 list_for_each_entry(evsel, &evlist->entries, node) { 801 list_for_each_entry(evsel, &evlist->entries, node) {
716 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 802 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -725,8 +811,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
725 return perf_evlist__mmap_per_cpu(evlist, prot, mask); 811 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
726} 812}
727 813
728int perf_evlist__create_maps(struct perf_evlist *evlist, 814int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
729 struct perf_target *target)
730{ 815{
731 evlist->threads = thread_map__new_str(target->pid, target->tid, 816 evlist->threads = thread_map__new_str(target->pid, target->tid,
732 target->uid); 817 target->uid);
@@ -734,9 +819,11 @@ int perf_evlist__create_maps(struct perf_evlist *evlist,
734 if (evlist->threads == NULL) 819 if (evlist->threads == NULL)
735 return -1; 820 return -1;
736 821
737 if (perf_target__has_task(target)) 822 if (target->force_per_cpu)
823 evlist->cpus = cpu_map__new(target->cpu_list);
824 else if (target__has_task(target))
738 evlist->cpus = cpu_map__dummy_new(); 825 evlist->cpus = cpu_map__dummy_new();
739 else if (!perf_target__has_cpu(target) && !target->uses_mmap) 826 else if (!target__has_cpu(target) && !target->uses_mmap)
740 evlist->cpus = cpu_map__dummy_new(); 827 evlist->cpus = cpu_map__dummy_new();
741 else 828 else
742 evlist->cpus = cpu_map__new(target->cpu_list); 829 evlist->cpus = cpu_map__new(target->cpu_list);
@@ -945,8 +1032,7 @@ out_err:
945 return err; 1032 return err;
946} 1033}
947 1034
948int perf_evlist__prepare_workload(struct perf_evlist *evlist, 1035int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
949 struct perf_target *target,
950 const char *argv[], bool pipe_output, 1036 const char *argv[], bool pipe_output,
951 bool want_signal) 1037 bool want_signal)
952{ 1038{
@@ -998,7 +1084,7 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist,
998 exit(-1); 1084 exit(-1);
999 } 1085 }
1000 1086
1001 if (perf_target__none(target)) 1087 if (target__none(target))
1002 evlist->threads->map[0] = evlist->workload.pid; 1088 evlist->threads->map[0] = evlist->workload.pid;
1003 1089
1004 close(child_ready_pipe[1]); 1090 close(child_ready_pipe[1]);
@@ -1064,5 +1150,68 @@ size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1064 perf_evsel__name(evsel)); 1150 perf_evsel__name(evsel));
1065 } 1151 }
1066 1152
1067 return printed + fprintf(fp, "\n");; 1153 return printed + fprintf(fp, "\n");
1154}
1155
1156int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1157 int err, char *buf, size_t size)
1158{
1159 char sbuf[128];
1160
1161 switch (err) {
1162 case ENOENT:
1163 scnprintf(buf, size, "%s",
1164 "Error:\tUnable to find debugfs\n"
1165 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1166 "Hint:\tIs the debugfs filesystem mounted?\n"
1167 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1168 break;
1169 case EACCES:
1170 scnprintf(buf, size,
1171 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1172 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1173 debugfs_mountpoint, debugfs_mountpoint);
1174 break;
1175 default:
1176 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1177 break;
1178 }
1179
1180 return 0;
1181}
1182
1183int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1184 int err, char *buf, size_t size)
1185{
1186 int printed, value;
1187 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1188
1189 switch (err) {
1190 case EACCES:
1191 case EPERM:
1192 printed = scnprintf(buf, size,
1193 "Error:\t%s.\n"
1194 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1195
1196 if (filename__read_int("/proc/sys/kernel/perf_event_paranoid", &value))
1197 break;
1198
1199 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1200
1201 if (value >= 2) {
1202 printed += scnprintf(buf + printed, size - printed,
1203 "For your workloads it needs to be <= 1\nHint:\t");
1204 }
1205 printed += scnprintf(buf + printed, size - printed,
1206 "For system wide tracing it needs to be set to -1");
1207
1208 printed += scnprintf(buf + printed, size - printed,
1209 ".\nHint:\tThe current value is %d.", value);
1210 break;
1211 default:
1212 scnprintf(buf, size, "%s", emsg);
1213 break;
1214 }
1215
1216 return 0;
1068} 1217}