diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-30 07:46:46 -0500 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2011-01-30 08:41:13 -0500 |
commit | f8a9530939ed87b9a1b1a038b90e355098b679a2 (patch) | |
tree | a24954b748120ae6f83f30c00af747c22acfb89e /tools | |
parent | 877108e42b1b9ba64857c4030cf356ecc120fd18 (diff) |
perf evlist: Move evlist methods to evlist.c
They were on evsel.c because they came from refactoring existing evsel
methods, so, to make reviewing the changes easier, I kept it there, now
its a plain move.
Cc: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Ingo Molnar <mingo@elte.hu>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Stephane Eranian <eranian@google.com>
Cc: Tom Zanussi <tzanussi@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r-- | tools/perf/util/evlist.c | 142 | ||||
-rw-r--r-- | tools/perf/util/evlist.h | 7 | ||||
-rw-r--r-- | tools/perf/util/evsel.c | 144 | ||||
-rw-r--r-- | tools/perf/util/evsel.h | 4 |
4 files changed, 158 insertions, 139 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c index 917fc18d0bed..dcd59328bb49 100644 --- a/tools/perf/util/evlist.c +++ b/tools/perf/util/evlist.c | |||
@@ -1,11 +1,26 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
1 | #include <poll.h> | 9 | #include <poll.h> |
10 | #include "cpumap.h" | ||
11 | #include "thread_map.h" | ||
2 | #include "evlist.h" | 12 | #include "evlist.h" |
3 | #include "evsel.h" | 13 | #include "evsel.h" |
4 | #include "util.h" | 14 | #include "util.h" |
5 | 15 | ||
16 | #include <sys/mman.h> | ||
17 | |||
6 | #include <linux/bitops.h> | 18 | #include <linux/bitops.h> |
7 | #include <linux/hash.h> | 19 | #include <linux/hash.h> |
8 | 20 | ||
21 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | ||
22 | #define SID(e, x, y) xyarray__entry(e->id, x, y) | ||
23 | |||
9 | void perf_evlist__init(struct perf_evlist *evlist) | 24 | void perf_evlist__init(struct perf_evlist *evlist) |
10 | { | 25 | { |
11 | int i; | 26 | int i; |
@@ -88,6 +103,30 @@ void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) | |||
88 | evlist->nr_fds++; | 103 | evlist->nr_fds++; |
89 | } | 104 | } |
90 | 105 | ||
106 | static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
107 | int cpu, int thread, int fd) | ||
108 | { | ||
109 | struct perf_sample_id *sid; | ||
110 | u64 read_data[4] = { 0, }; | ||
111 | int hash, id_idx = 1; /* The first entry is the counter value */ | ||
112 | |||
113 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | ||
114 | read(fd, &read_data, sizeof(read_data)) == -1) | ||
115 | return -1; | ||
116 | |||
117 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
118 | ++id_idx; | ||
119 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
120 | ++id_idx; | ||
121 | |||
122 | sid = SID(evsel, cpu, thread); | ||
123 | sid->id = read_data[id_idx]; | ||
124 | sid->evsel = evsel; | ||
125 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | ||
126 | hlist_add_head(&sid->node, &evlist->heads[hash]); | ||
127 | return 0; | ||
128 | } | ||
129 | |||
91 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) | 130 | struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id) |
92 | { | 131 | { |
93 | struct hlist_head *head; | 132 | struct hlist_head *head; |
@@ -173,3 +212,106 @@ union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *evlist, int cpu) | |||
173 | 212 | ||
174 | return event; | 213 | return event; |
175 | } | 214 | } |
215 | |||
216 | void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus) | ||
217 | { | ||
218 | int cpu; | ||
219 | |||
220 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
221 | if (evlist->mmap[cpu].base != NULL) { | ||
222 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
223 | evlist->mmap[cpu].base = NULL; | ||
224 | } | ||
225 | } | ||
226 | } | ||
227 | |||
228 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus) | ||
229 | { | ||
230 | evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap)); | ||
231 | return evlist->mmap != NULL ? 0 : -ENOMEM; | ||
232 | } | ||
233 | |||
234 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, | ||
235 | int mask, int fd) | ||
236 | { | ||
237 | evlist->mmap[cpu].prev = 0; | ||
238 | evlist->mmap[cpu].mask = mask; | ||
239 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | ||
240 | MAP_SHARED, fd, 0); | ||
241 | if (evlist->mmap[cpu].base == MAP_FAILED) | ||
242 | return -1; | ||
243 | |||
244 | perf_evlist__add_pollfd(evlist, fd); | ||
245 | return 0; | ||
246 | } | ||
247 | |||
248 | /** perf_evlist__mmap - Create per cpu maps to receive events | ||
249 | * | ||
250 | * @evlist - list of events | ||
251 | * @cpus - cpu map being monitored | ||
252 | * @threads - threads map being monitored | ||
253 | * @pages - map length in pages | ||
254 | * @overwrite - overwrite older events? | ||
255 | * | ||
256 | * If overwrite is false the user needs to signal event consuption using: | ||
257 | * | ||
258 | * struct perf_mmap *m = &evlist->mmap[cpu]; | ||
259 | * unsigned int head = perf_mmap__read_head(m); | ||
260 | * | ||
261 | * perf_mmap__write_tail(m, head) | ||
262 | */ | ||
263 | int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
264 | struct thread_map *threads, int pages, bool overwrite) | ||
265 | { | ||
266 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
267 | int mask = pages * page_size - 1, cpu; | ||
268 | struct perf_evsel *first_evsel, *evsel; | ||
269 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | ||
270 | |||
271 | if (evlist->mmap == NULL && | ||
272 | perf_evlist__alloc_mmap(evlist, cpus->nr) < 0) | ||
273 | return -ENOMEM; | ||
274 | |||
275 | if (evlist->pollfd == NULL && | ||
276 | perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0) | ||
277 | return -ENOMEM; | ||
278 | |||
279 | evlist->overwrite = overwrite; | ||
280 | evlist->mmap_len = (pages + 1) * page_size; | ||
281 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
282 | |||
283 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
284 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
285 | evsel->id == NULL && | ||
286 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | ||
287 | return -ENOMEM; | ||
288 | |||
289 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
290 | for (thread = 0; thread < threads->nr; thread++) { | ||
291 | int fd = FD(evsel, cpu, thread); | ||
292 | |||
293 | if (evsel->idx || thread) { | ||
294 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
295 | FD(first_evsel, cpu, 0)) != 0) | ||
296 | goto out_unmap; | ||
297 | } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) | ||
298 | goto out_unmap; | ||
299 | |||
300 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
301 | perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0) | ||
302 | goto out_unmap; | ||
303 | } | ||
304 | } | ||
305 | } | ||
306 | |||
307 | return 0; | ||
308 | |||
309 | out_unmap: | ||
310 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
311 | if (evlist->mmap[cpu].base != NULL) { | ||
312 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
313 | evlist->mmap[cpu].base = NULL; | ||
314 | } | ||
315 | } | ||
316 | return -1; | ||
317 | } | ||
diff --git a/tools/perf/util/evlist.h b/tools/perf/util/evlist.h index 022ae404b908..85aca6eba16b 100644 --- a/tools/perf/util/evlist.h +++ b/tools/perf/util/evlist.h | |||
@@ -6,6 +6,8 @@ | |||
6 | #include "event.h" | 6 | #include "event.h" |
7 | 7 | ||
8 | struct pollfd; | 8 | struct pollfd; |
9 | struct thread_map; | ||
10 | struct cpu_map; | ||
9 | 11 | ||
10 | #define PERF_EVLIST__HLIST_BITS 8 | 12 | #define PERF_EVLIST__HLIST_BITS 8 |
11 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) | 13 | #define PERF_EVLIST__HLIST_SIZE (1 << PERF_EVLIST__HLIST_BITS) |
@@ -39,4 +41,9 @@ struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id); | |||
39 | 41 | ||
40 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); | 42 | union perf_event *perf_evlist__read_on_cpu(struct perf_evlist *self, int cpu); |
41 | 43 | ||
44 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus); | ||
45 | int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
46 | struct thread_map *threads, int pages, bool overwrite); | ||
47 | void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus); | ||
48 | |||
42 | #endif /* __PERF_EVLIST_H */ | 49 | #endif /* __PERF_EVLIST_H */ |
diff --git a/tools/perf/util/evsel.c b/tools/perf/util/evsel.c index fddeb08f48a7..2720bc1d578b 100644 --- a/tools/perf/util/evsel.c +++ b/tools/perf/util/evsel.c | |||
@@ -1,18 +1,19 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com> | ||
3 | * | ||
4 | * Parts came from builtin-{top,stat,record}.c, see those files for further | ||
5 | * copyright notes. | ||
6 | * | ||
7 | * Released under the GPL v2. (and only v2, not any later version) | ||
8 | */ | ||
9 | |||
1 | #include "evsel.h" | 10 | #include "evsel.h" |
2 | #include "evlist.h" | 11 | #include "evlist.h" |
3 | #include "../perf.h" | ||
4 | #include "util.h" | 12 | #include "util.h" |
5 | #include "cpumap.h" | 13 | #include "cpumap.h" |
6 | #include "thread_map.h" | 14 | #include "thread_map.h" |
7 | 15 | ||
8 | #include <unistd.h> | ||
9 | #include <sys/mman.h> | ||
10 | |||
11 | #include <linux/bitops.h> | ||
12 | #include <linux/hash.h> | ||
13 | |||
14 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) | 16 | #define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) |
15 | #define SID(e, x, y) xyarray__entry(e->id, x, y) | ||
16 | 17 | ||
17 | void perf_evsel__init(struct perf_evsel *evsel, | 18 | void perf_evsel__init(struct perf_evsel *evsel, |
18 | struct perf_event_attr *attr, int idx) | 19 | struct perf_event_attr *attr, int idx) |
@@ -74,24 +75,6 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads) | |||
74 | } | 75 | } |
75 | } | 76 | } |
76 | 77 | ||
77 | void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus) | ||
78 | { | ||
79 | int cpu; | ||
80 | |||
81 | for (cpu = 0; cpu < ncpus; cpu++) { | ||
82 | if (evlist->mmap[cpu].base != NULL) { | ||
83 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
84 | evlist->mmap[cpu].base = NULL; | ||
85 | } | ||
86 | } | ||
87 | } | ||
88 | |||
89 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus) | ||
90 | { | ||
91 | evlist->mmap = zalloc(ncpus * sizeof(struct perf_mmap)); | ||
92 | return evlist->mmap != NULL ? 0 : -ENOMEM; | ||
93 | } | ||
94 | |||
95 | void perf_evsel__exit(struct perf_evsel *evsel) | 78 | void perf_evsel__exit(struct perf_evsel *evsel) |
96 | { | 79 | { |
97 | assert(list_empty(&evsel->node)); | 80 | assert(list_empty(&evsel->node)); |
@@ -258,115 +241,6 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, | |||
258 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); | 241 | return __perf_evsel__open(evsel, &empty_cpu_map.map, threads, group, inherit); |
259 | } | 242 | } |
260 | 243 | ||
261 | static int __perf_evlist__mmap(struct perf_evlist *evlist, int cpu, int prot, | ||
262 | int mask, int fd) | ||
263 | { | ||
264 | evlist->mmap[cpu].prev = 0; | ||
265 | evlist->mmap[cpu].mask = mask; | ||
266 | evlist->mmap[cpu].base = mmap(NULL, evlist->mmap_len, prot, | ||
267 | MAP_SHARED, fd, 0); | ||
268 | if (evlist->mmap[cpu].base == MAP_FAILED) | ||
269 | return -1; | ||
270 | |||
271 | perf_evlist__add_pollfd(evlist, fd); | ||
272 | return 0; | ||
273 | } | ||
274 | |||
275 | static int perf_evlist__id_hash(struct perf_evlist *evlist, struct perf_evsel *evsel, | ||
276 | int cpu, int thread, int fd) | ||
277 | { | ||
278 | struct perf_sample_id *sid; | ||
279 | u64 read_data[4] = { 0, }; | ||
280 | int hash, id_idx = 1; /* The first entry is the counter value */ | ||
281 | |||
282 | if (!(evsel->attr.read_format & PERF_FORMAT_ID) || | ||
283 | read(fd, &read_data, sizeof(read_data)) == -1) | ||
284 | return -1; | ||
285 | |||
286 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) | ||
287 | ++id_idx; | ||
288 | if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) | ||
289 | ++id_idx; | ||
290 | |||
291 | sid = SID(evsel, cpu, thread); | ||
292 | sid->id = read_data[id_idx]; | ||
293 | sid->evsel = evsel; | ||
294 | hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS); | ||
295 | hlist_add_head(&sid->node, &evlist->heads[hash]); | ||
296 | return 0; | ||
297 | } | ||
298 | |||
299 | /** perf_evlist__mmap - Create per cpu maps to receive events | ||
300 | * | ||
301 | * @evlist - list of events | ||
302 | * @cpus - cpu map being monitored | ||
303 | * @threads - threads map being monitored | ||
304 | * @pages - map length in pages | ||
305 | * @overwrite - overwrite older events? | ||
306 | * | ||
307 | * If overwrite is false the user needs to signal event consuption using: | ||
308 | * | ||
309 | * struct perf_mmap *m = &evlist->mmap[cpu]; | ||
310 | * unsigned int head = perf_mmap__read_head(m); | ||
311 | * | ||
312 | * perf_mmap__write_tail(m, head) | ||
313 | */ | ||
314 | int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
315 | struct thread_map *threads, int pages, bool overwrite) | ||
316 | { | ||
317 | unsigned int page_size = sysconf(_SC_PAGE_SIZE); | ||
318 | int mask = pages * page_size - 1, cpu; | ||
319 | struct perf_evsel *first_evsel, *evsel; | ||
320 | int thread, prot = PROT_READ | (overwrite ? 0 : PROT_WRITE); | ||
321 | |||
322 | if (evlist->mmap == NULL && | ||
323 | perf_evlist__alloc_mmap(evlist, cpus->nr) < 0) | ||
324 | return -ENOMEM; | ||
325 | |||
326 | if (evlist->pollfd == NULL && | ||
327 | perf_evlist__alloc_pollfd(evlist, cpus->nr, threads->nr) < 0) | ||
328 | return -ENOMEM; | ||
329 | |||
330 | evlist->overwrite = overwrite; | ||
331 | evlist->mmap_len = (pages + 1) * page_size; | ||
332 | first_evsel = list_entry(evlist->entries.next, struct perf_evsel, node); | ||
333 | |||
334 | list_for_each_entry(evsel, &evlist->entries, node) { | ||
335 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
336 | evsel->id == NULL && | ||
337 | perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0) | ||
338 | return -ENOMEM; | ||
339 | |||
340 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
341 | for (thread = 0; thread < threads->nr; thread++) { | ||
342 | int fd = FD(evsel, cpu, thread); | ||
343 | |||
344 | if (evsel->idx || thread) { | ||
345 | if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, | ||
346 | FD(first_evsel, cpu, 0)) != 0) | ||
347 | goto out_unmap; | ||
348 | } else if (__perf_evlist__mmap(evlist, cpu, prot, mask, fd) < 0) | ||
349 | goto out_unmap; | ||
350 | |||
351 | if ((evsel->attr.read_format & PERF_FORMAT_ID) && | ||
352 | perf_evlist__id_hash(evlist, evsel, cpu, thread, fd) < 0) | ||
353 | goto out_unmap; | ||
354 | } | ||
355 | } | ||
356 | } | ||
357 | |||
358 | return 0; | ||
359 | |||
360 | out_unmap: | ||
361 | for (cpu = 0; cpu < cpus->nr; cpu++) { | ||
362 | if (evlist->mmap[cpu].base != NULL) { | ||
363 | munmap(evlist->mmap[cpu].base, evlist->mmap_len); | ||
364 | evlist->mmap[cpu].base = NULL; | ||
365 | } | ||
366 | } | ||
367 | return -1; | ||
368 | } | ||
369 | |||
370 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, | 244 | static int perf_event__parse_id_sample(const union perf_event *event, u64 type, |
371 | struct perf_sample *sample) | 245 | struct perf_sample *sample) |
372 | { | 246 | { |
diff --git a/tools/perf/util/evsel.h b/tools/perf/util/evsel.h index 7962e7587dea..eecdc3aabc14 100644 --- a/tools/perf/util/evsel.h +++ b/tools/perf/util/evsel.h | |||
@@ -60,7 +60,6 @@ void perf_evsel__delete(struct perf_evsel *evsel); | |||
60 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | 60 | int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads); |
61 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); | 61 | int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads); |
62 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); | 62 | int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus); |
63 | int perf_evlist__alloc_mmap(struct perf_evlist *evlist, int ncpus); | ||
64 | void perf_evsel__free_fd(struct perf_evsel *evsel); | 63 | void perf_evsel__free_fd(struct perf_evsel *evsel); |
65 | void perf_evsel__free_id(struct perf_evsel *evsel); | 64 | void perf_evsel__free_id(struct perf_evsel *evsel); |
66 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); | 65 | void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads); |
@@ -71,9 +70,6 @@ int perf_evsel__open_per_thread(struct perf_evsel *evsel, | |||
71 | struct thread_map *threads, bool group, bool inherit); | 70 | struct thread_map *threads, bool group, bool inherit); |
72 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, | 71 | int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus, |
73 | struct thread_map *threads, bool group, bool inherit); | 72 | struct thread_map *threads, bool group, bool inherit); |
74 | int perf_evlist__mmap(struct perf_evlist *evlist, struct cpu_map *cpus, | ||
75 | struct thread_map *threads, int pages, bool overwrite); | ||
76 | void perf_evlist__munmap(struct perf_evlist *evlist, int ncpus); | ||
77 | 73 | ||
78 | #define perf_evsel__match(evsel, t, c) \ | 74 | #define perf_evsel__match(evsel, t, c) \ |
79 | (evsel->attr.type == PERF_TYPE_##t && \ | 75 | (evsel->attr.type == PERF_TYPE_##t && \ |