aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evsel.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evsel.c')
-rw-r--r--tools/perf/util/evsel.c186
1 files changed, 186 insertions, 0 deletions
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c
new file mode 100644
index 00000000000..c95267e63c5
--- /dev/null
+++ b/tools/perf/util/evsel.c
@@ -0,0 +1,186 @@
1#include "evsel.h"
2#include "../perf.h"
3#include "util.h"
4#include "cpumap.h"
5#include "thread.h"
6
7#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
8
9struct perf_evsel *perf_evsel__new(u32 type, u64 config, int idx)
10{
11 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
12
13 if (evsel != NULL) {
14 evsel->idx = idx;
15 evsel->attr.type = type;
16 evsel->attr.config = config;
17 INIT_LIST_HEAD(&evsel->node);
18 }
19
20 return evsel;
21}
22
23int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
24{
25 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
26 return evsel->fd != NULL ? 0 : -ENOMEM;
27}
28
29int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
30{
31 evsel->counts = zalloc((sizeof(*evsel->counts) +
32 (ncpus * sizeof(struct perf_counts_values))));
33 return evsel->counts != NULL ? 0 : -ENOMEM;
34}
35
36void perf_evsel__free_fd(struct perf_evsel *evsel)
37{
38 xyarray__delete(evsel->fd);
39 evsel->fd = NULL;
40}
41
42void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
43{
44 int cpu, thread;
45
46 for (cpu = 0; cpu < ncpus; cpu++)
47 for (thread = 0; thread < nthreads; ++thread) {
48 close(FD(evsel, cpu, thread));
49 FD(evsel, cpu, thread) = -1;
50 }
51}
52
53void perf_evsel__delete(struct perf_evsel *evsel)
54{
55 assert(list_empty(&evsel->node));
56 xyarray__delete(evsel->fd);
57 free(evsel);
58}
59
60int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
61 int cpu, int thread, bool scale)
62{
63 struct perf_counts_values count;
64 size_t nv = scale ? 3 : 1;
65
66 if (FD(evsel, cpu, thread) < 0)
67 return -EINVAL;
68
69 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
70 return -ENOMEM;
71
72 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
73 return -errno;
74
75 if (scale) {
76 if (count.run == 0)
77 count.val = 0;
78 else if (count.run < count.ena)
79 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
80 } else
81 count.ena = count.run = 0;
82
83 evsel->counts->cpu[cpu] = count;
84 return 0;
85}
86
87int __perf_evsel__read(struct perf_evsel *evsel,
88 int ncpus, int nthreads, bool scale)
89{
90 size_t nv = scale ? 3 : 1;
91 int cpu, thread;
92 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
93
94 aggr->val = 0;
95
96 for (cpu = 0; cpu < ncpus; cpu++) {
97 for (thread = 0; thread < nthreads; thread++) {
98 if (FD(evsel, cpu, thread) < 0)
99 continue;
100
101 if (readn(FD(evsel, cpu, thread),
102 &count, nv * sizeof(u64)) < 0)
103 return -errno;
104
105 aggr->val += count.val;
106 if (scale) {
107 aggr->ena += count.ena;
108 aggr->run += count.run;
109 }
110 }
111 }
112
113 evsel->counts->scaled = 0;
114 if (scale) {
115 if (aggr->run == 0) {
116 evsel->counts->scaled = -1;
117 aggr->val = 0;
118 return 0;
119 }
120
121 if (aggr->run < aggr->ena) {
122 evsel->counts->scaled = 1;
123 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
124 }
125 } else
126 aggr->ena = aggr->run = 0;
127
128 return 0;
129}
130
131int perf_evsel__open_per_cpu(struct perf_evsel *evsel, struct cpu_map *cpus)
132{
133 int cpu;
134
135 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, cpus->nr, 1) < 0)
136 return -1;
137
138 for (cpu = 0; cpu < cpus->nr; cpu++) {
139 FD(evsel, cpu, 0) = sys_perf_event_open(&evsel->attr, -1,
140 cpus->map[cpu], -1, 0);
141 if (FD(evsel, cpu, 0) < 0)
142 goto out_close;
143 }
144
145 return 0;
146
147out_close:
148 while (--cpu >= 0) {
149 close(FD(evsel, cpu, 0));
150 FD(evsel, cpu, 0) = -1;
151 }
152 return -1;
153}
154
155int perf_evsel__open_per_thread(struct perf_evsel *evsel, struct thread_map *threads)
156{
157 int thread;
158
159 if (evsel->fd == NULL && perf_evsel__alloc_fd(evsel, 1, threads->nr))
160 return -1;
161
162 for (thread = 0; thread < threads->nr; thread++) {
163 FD(evsel, 0, thread) = sys_perf_event_open(&evsel->attr,
164 threads->map[thread], -1, -1, 0);
165 if (FD(evsel, 0, thread) < 0)
166 goto out_close;
167 }
168
169 return 0;
170
171out_close:
172 while (--thread >= 0) {
173 close(FD(evsel, 0, thread));
174 FD(evsel, 0, thread) = -1;
175 }
176 return -1;
177}
178
179int perf_evsel__open(struct perf_evsel *evsel,
180 struct cpu_map *cpus, struct thread_map *threads)
181{
182 if (threads == NULL)
183 return perf_evsel__open_per_cpu(evsel, cpus);
184
185 return perf_evsel__open_per_thread(evsel, threads);
186}