diff options
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r-- | tools/perf/util/evsel.c | 67 |
1 files changed, 50 insertions, 17 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index 21a373ebea22..e0868a901c4a 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -162,6 +162,7 @@ void perf_evsel__init(struct perf_evsel *evsel, | |||
162 | struct perf_event_attr *attr, int idx) | 162 | struct perf_event_attr *attr, int idx) |
163 | { | 163 | { |
164 | evsel->idx = idx; | 164 | evsel->idx = idx; |
165 | evsel->tracking = !idx; | ||
165 | evsel->attr = *attr; | 166 | evsel->attr = *attr; |
166 | evsel->leader = evsel; | 167 | evsel->leader = evsel; |
167 | evsel->unit = ""; | 168 | evsel->unit = ""; |
@@ -502,20 +503,19 @@ int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size) | |||
502 | } | 503 | } |
503 | 504 | ||
504 | static void | 505 | static void |
505 | perf_evsel__config_callgraph(struct perf_evsel *evsel, | 506 | perf_evsel__config_callgraph(struct perf_evsel *evsel) |
506 | struct record_opts *opts) | ||
507 | { | 507 | { |
508 | bool function = perf_evsel__is_function_event(evsel); | 508 | bool function = perf_evsel__is_function_event(evsel); |
509 | struct perf_event_attr *attr = &evsel->attr; | 509 | struct perf_event_attr *attr = &evsel->attr; |
510 | 510 | ||
511 | perf_evsel__set_sample_bit(evsel, CALLCHAIN); | 511 | perf_evsel__set_sample_bit(evsel, CALLCHAIN); |
512 | 512 | ||
513 | if (opts->call_graph == CALLCHAIN_DWARF) { | 513 | if (callchain_param.record_mode == CALLCHAIN_DWARF) { |
514 | if (!function) { | 514 | if (!function) { |
515 | perf_evsel__set_sample_bit(evsel, REGS_USER); | 515 | perf_evsel__set_sample_bit(evsel, REGS_USER); |
516 | perf_evsel__set_sample_bit(evsel, STACK_USER); | 516 | perf_evsel__set_sample_bit(evsel, STACK_USER); |
517 | attr->sample_regs_user = PERF_REGS_MASK; | 517 | attr->sample_regs_user = PERF_REGS_MASK; |
518 | attr->sample_stack_user = opts->stack_dump_size; | 518 | attr->sample_stack_user = callchain_param.dump_size; |
519 | attr->exclude_callchain_user = 1; | 519 | attr->exclude_callchain_user = 1; |
520 | } else { | 520 | } else { |
521 | pr_info("Cannot use DWARF unwind for function trace event," | 521 | pr_info("Cannot use DWARF unwind for function trace event," |
@@ -561,7 +561,7 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) | |||
561 | { | 561 | { |
562 | struct perf_evsel *leader = evsel->leader; | 562 | struct perf_evsel *leader = evsel->leader; |
563 | struct perf_event_attr *attr = &evsel->attr; | 563 | struct perf_event_attr *attr = &evsel->attr; |
564 | int track = !evsel->idx; /* only the first counter needs these */ | 564 | int track = evsel->tracking; |
565 | bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; | 565 | bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread; |
566 | 566 | ||
567 | attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; | 567 | attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1; |
@@ -624,8 +624,8 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) | |||
624 | attr->mmap_data = track; | 624 | attr->mmap_data = track; |
625 | } | 625 | } |
626 | 626 | ||
627 | if (opts->call_graph_enabled && !evsel->no_aux_samples) | 627 | if (callchain_param.enabled && !evsel->no_aux_samples) |
628 | perf_evsel__config_callgraph(evsel, opts); | 628 | perf_evsel__config_callgraph(evsel); |
629 | 629 | ||
630 | if (target__has_cpu(&opts->target)) | 630 | if (target__has_cpu(&opts->target)) |
631 | perf_evsel__set_sample_bit(evsel, CPU); | 631 | perf_evsel__set_sample_bit(evsel, CPU); |
@@ -633,9 +633,12 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) | |||
633 | if (opts->period) | 633 | if (opts->period) |
634 | perf_evsel__set_sample_bit(evsel, PERIOD); | 634 | perf_evsel__set_sample_bit(evsel, PERIOD); |
635 | 635 | ||
636 | if (!perf_missing_features.sample_id_all && | 636 | /* |
637 | (opts->sample_time || !opts->no_inherit || | 637 | * When the user explicitely disabled time don't force it here. |
638 | target__has_cpu(&opts->target) || per_cpu)) | 638 | */ |
639 | if (opts->sample_time && | ||
640 | (!perf_missing_features.sample_id_all && | ||
641 | (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu))) | ||
639 | perf_evsel__set_sample_bit(evsel, TIME); | 642 | perf_evsel__set_sample_bit(evsel, TIME); |
640 | 643 | ||
641 | if (opts->raw_samples && !evsel->no_aux_samples) { | 644 | if (opts->raw_samples && !evsel->no_aux_samples) { |
@@ -692,6 +695,10 @@ void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts) | |||
692 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | 695 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads) |
693 | { | 696 | { |
694 | int cpu, thread; | 697 | int cpu, thread; |
698 | |||
699 | if (evsel->system_wide) | ||
700 | nthreads = 1; | ||
701 | |||
695 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); | 702 | evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int)); |
696 | 703 | ||
697 | if (evsel->fd) { | 704 | if (evsel->fd) { |
@@ -710,6 +717,9 @@ static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthrea | |||
710 | { | 717 | { |
711 | int cpu, thread; | 718 | int cpu, thread; |
712 | 719 | ||
720 | if (evsel->system_wide) | ||
721 | nthreads = 1; | ||
722 | |||
713 | for (cpu = 0; cpu < ncpus; cpu++) { | 723 | for (cpu = 0; cpu < ncpus; cpu++) { |
714 | for (thread = 0; thread < nthreads; thread++) { | 724 | for (thread = 0; thread < nthreads; thread++) { |
715 | int fd = FD(evsel, cpu, thread), | 725 | int fd = FD(evsel, cpu, thread), |
@@ -740,6 +750,9 @@ int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
740 | 750 | ||
741 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) | 751 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads) |
742 | { | 752 | { |
753 | if (evsel->system_wide) | ||
754 | nthreads = 1; | ||
755 | |||
743 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); | 756 | evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id)); |
744 | if (evsel->sample_id == NULL) | 757 | if (evsel->sample_id == NULL) |
745 | return -ENOMEM; | 758 | return -ENOMEM; |
@@ -784,6 +797,9 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
784 | { | 797 | { |
785 | int cpu, thread; | 798 | int cpu, thread; |
786 | 799 | ||
800 | if (evsel->system_wide) | ||
801 | nthreads = 1; | ||
802 | |||
787 | for (cpu = 0; cpu < ncpus; cpu++) | 803 | for (cpu = 0; cpu < ncpus; cpu++) |
788 | for (thread = 0; thread < nthreads; ++thread) { | 804 | for (thread = 0; thread < nthreads; ++thread) { |
789 | close(FD(evsel, cpu, thread)); | 805 | close(FD(evsel, cpu, thread)); |
@@ -872,6 +888,9 @@ int __perf_evsel__read(struct perf_evsel *evsel, | |||
872 | int cpu, thread; | 888 | int cpu, thread; |
873 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; | 889 | struct perf_counts_values *aggr = &evsel->counts->aggr, count; |
874 | 890 | ||
891 | if (evsel->system_wide) | ||
892 | nthreads = 1; | ||
893 | |||
875 | aggr->val = aggr->ena = aggr->run = 0; | 894 | aggr->val = aggr->ena = aggr->run = 0; |
876 | 895 | ||
877 | for (cpu = 0; cpu < ncpus; cpu++) { | 896 | for (cpu = 0; cpu < ncpus; cpu++) { |
@@ -994,13 +1013,18 @@ static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp) | |||
994 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 1013 | static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
995 | struct thread_map *threads) | 1014 | struct thread_map *threads) |
996 | { | 1015 | { |
997 | int cpu, thread; | 1016 | int cpu, thread, nthreads; |
998 | unsigned long flags = PERF_FLAG_FD_CLOEXEC; | 1017 | unsigned long flags = PERF_FLAG_FD_CLOEXEC; |
999 | int pid = -1, err; | 1018 | int pid = -1, err; |
1000 | enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; | 1019 | enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE; |
1001 | 1020 | ||
1021 | if (evsel->system_wide) | ||
1022 | nthreads = 1; | ||
1023 | else | ||
1024 | nthreads = threads->nr; | ||
1025 | |||
1002 | if (evsel->fd == NULL && | 1026 | if (evsel->fd == NULL && |
1003 | perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0) | 1027 | perf_evsel__alloc_fd(evsel, cpus->nr, nthreads) < 0) |
1004 | return -ENOMEM; | 1028 | return -ENOMEM; |
1005 | 1029 | ||
1006 | if (evsel->cgrp) { | 1030 | if (evsel->cgrp) { |
@@ -1024,10 +1048,10 @@ retry_sample_id: | |||
1024 | 1048 | ||
1025 | for (cpu = 0; cpu < cpus->nr; cpu++) { | 1049 | for (cpu = 0; cpu < cpus->nr; cpu++) { |
1026 | 1050 | ||
1027 | for (thread = 0; thread < threads->nr; thread++) { | 1051 | for (thread = 0; thread < nthreads; thread++) { |
1028 | int group_fd; | 1052 | int group_fd; |
1029 | 1053 | ||
1030 | if (!evsel->cgrp) | 1054 | if (!evsel->cgrp && !evsel->system_wide) |
1031 | pid = threads->map[thread]; | 1055 | pid = threads->map[thread]; |
1032 | 1056 | ||
1033 | group_fd = get_group_fd(evsel, cpu, thread); | 1057 | group_fd = get_group_fd(evsel, cpu, thread); |
@@ -1100,7 +1124,7 @@ out_close: | |||
1100 | close(FD(evsel, cpu, thread)); | 1124 | close(FD(evsel, cpu, thread)); |
1101 | FD(evsel, cpu, thread) = -1; | 1125 | FD(evsel, cpu, thread) = -1; |
1102 | } | 1126 | } |
1103 | thread = threads->nr; | 1127 | thread = nthreads; |
1104 | } while (--cpu >= 0); | 1128 | } while (--cpu >= 0); |
1105 | return err; | 1129 | return err; |
1106 | } | 1130 | } |
@@ -2002,6 +2026,8 @@ bool perf_evsel__fallback(struct perf_evsel *evsel, int err, | |||
2002 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | 2026 | int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, |
2003 | int err, char *msg, size_t size) | 2027 | int err, char *msg, size_t size) |
2004 | { | 2028 | { |
2029 | char sbuf[STRERR_BUFSIZE]; | ||
2030 | |||
2005 | switch (err) { | 2031 | switch (err) { |
2006 | case EPERM: | 2032 | case EPERM: |
2007 | case EACCES: | 2033 | case EACCES: |
@@ -2036,13 +2062,20 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target, | |||
2036 | "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); | 2062 | "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it."); |
2037 | #endif | 2063 | #endif |
2038 | break; | 2064 | break; |
2065 | case EBUSY: | ||
2066 | if (find_process("oprofiled")) | ||
2067 | return scnprintf(msg, size, | ||
2068 | "The PMU counters are busy/taken by another profiler.\n" | ||
2069 | "We found oprofile daemon running, please stop it and try again."); | ||
2070 | break; | ||
2039 | default: | 2071 | default: |
2040 | break; | 2072 | break; |
2041 | } | 2073 | } |
2042 | 2074 | ||
2043 | return scnprintf(msg, size, | 2075 | return scnprintf(msg, size, |
2044 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n" | 2076 | "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n" |
2045 | "/bin/dmesg may provide additional information.\n" | 2077 | "/bin/dmesg may provide additional information.\n" |
2046 | "No CONFIG_PERF_EVENTS=y kernel support configured?\n", | 2078 | "No CONFIG_PERF_EVENTS=y kernel support configured?\n", |
2047 | err, strerror(err), perf_evsel__name(evsel)); | 2079 | err, strerror_r(err, sbuf, sizeof(sbuf)), |
2080 | perf_evsel__name(evsel)); | ||
2048 | } | 2081 | } |