aboutsummaryrefslogtreecommitdiffstats
path: root/tools/perf/util/evlist.c
diff options
context:
space:
mode:
Diffstat (limited to 'tools/perf/util/evlist.c')
-rw-r--r--tools/perf/util/evlist.c236
1 files changed, 213 insertions, 23 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 814e954c1318..3cebc9a8d52e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,6 +25,9 @@
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/hash.h> 26#include <linux/hash.h>
27 27
28static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
29static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
30
28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 31#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 32#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 33
@@ -37,6 +40,7 @@ void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
37 INIT_HLIST_HEAD(&evlist->heads[i]); 40 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries); 41 INIT_LIST_HEAD(&evlist->entries);
39 perf_evlist__set_maps(evlist, cpus, threads); 42 perf_evlist__set_maps(evlist, cpus, threads);
43 fdarray__init(&evlist->pollfd, 64);
40 evlist->workload.pid = -1; 44 evlist->workload.pid = -1;
41} 45}
42 46
@@ -102,7 +106,7 @@ static void perf_evlist__purge(struct perf_evlist *evlist)
102void perf_evlist__exit(struct perf_evlist *evlist) 106void perf_evlist__exit(struct perf_evlist *evlist)
103{ 107{
104 zfree(&evlist->mmap); 108 zfree(&evlist->mmap);
105 zfree(&evlist->pollfd); 109 fdarray__exit(&evlist->pollfd);
106} 110}
107 111
108void perf_evlist__delete(struct perf_evlist *evlist) 112void perf_evlist__delete(struct perf_evlist *evlist)
@@ -122,6 +126,7 @@ void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
122{ 126{
123 list_add_tail(&entry->node, &evlist->entries); 127 list_add_tail(&entry->node, &evlist->entries);
124 entry->idx = evlist->nr_entries; 128 entry->idx = evlist->nr_entries;
129 entry->tracking = !entry->idx;
125 130
126 if (!evlist->nr_entries++) 131 if (!evlist->nr_entries++)
127 perf_evlist__set_id_pos(evlist); 132 perf_evlist__set_id_pos(evlist);
@@ -265,17 +270,27 @@ int perf_evlist__add_newtp(struct perf_evlist *evlist,
265 return 0; 270 return 0;
266} 271}
267 272
273static int perf_evlist__nr_threads(struct perf_evlist *evlist,
274 struct perf_evsel *evsel)
275{
276 if (evsel->system_wide)
277 return 1;
278 else
279 return thread_map__nr(evlist->threads);
280}
281
268void perf_evlist__disable(struct perf_evlist *evlist) 282void perf_evlist__disable(struct perf_evlist *evlist)
269{ 283{
270 int cpu, thread; 284 int cpu, thread;
271 struct perf_evsel *pos; 285 struct perf_evsel *pos;
272 int nr_cpus = cpu_map__nr(evlist->cpus); 286 int nr_cpus = cpu_map__nr(evlist->cpus);
273 int nr_threads = thread_map__nr(evlist->threads); 287 int nr_threads;
274 288
275 for (cpu = 0; cpu < nr_cpus; cpu++) { 289 for (cpu = 0; cpu < nr_cpus; cpu++) {
276 evlist__for_each(evlist, pos) { 290 evlist__for_each(evlist, pos) {
277 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 291 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
278 continue; 292 continue;
293 nr_threads = perf_evlist__nr_threads(evlist, pos);
279 for (thread = 0; thread < nr_threads; thread++) 294 for (thread = 0; thread < nr_threads; thread++)
280 ioctl(FD(pos, cpu, thread), 295 ioctl(FD(pos, cpu, thread),
281 PERF_EVENT_IOC_DISABLE, 0); 296 PERF_EVENT_IOC_DISABLE, 0);
@@ -288,12 +303,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
288 int cpu, thread; 303 int cpu, thread;
289 struct perf_evsel *pos; 304 struct perf_evsel *pos;
290 int nr_cpus = cpu_map__nr(evlist->cpus); 305 int nr_cpus = cpu_map__nr(evlist->cpus);
291 int nr_threads = thread_map__nr(evlist->threads); 306 int nr_threads;
292 307
293 for (cpu = 0; cpu < nr_cpus; cpu++) { 308 for (cpu = 0; cpu < nr_cpus; cpu++) {
294 evlist__for_each(evlist, pos) { 309 evlist__for_each(evlist, pos) {
295 if (!perf_evsel__is_group_leader(pos) || !pos->fd) 310 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
296 continue; 311 continue;
312 nr_threads = perf_evlist__nr_threads(evlist, pos);
297 for (thread = 0; thread < nr_threads; thread++) 313 for (thread = 0; thread < nr_threads; thread++)
298 ioctl(FD(pos, cpu, thread), 314 ioctl(FD(pos, cpu, thread),
299 PERF_EVENT_IOC_ENABLE, 0); 315 PERF_EVENT_IOC_ENABLE, 0);
@@ -305,12 +321,14 @@ int perf_evlist__disable_event(struct perf_evlist *evlist,
305 struct perf_evsel *evsel) 321 struct perf_evsel *evsel)
306{ 322{
307 int cpu, thread, err; 323 int cpu, thread, err;
324 int nr_cpus = cpu_map__nr(evlist->cpus);
325 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
308 326
309 if (!evsel->fd) 327 if (!evsel->fd)
310 return 0; 328 return 0;
311 329
312 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 330 for (cpu = 0; cpu < nr_cpus; cpu++) {
313 for (thread = 0; thread < evlist->threads->nr; thread++) { 331 for (thread = 0; thread < nr_threads; thread++) {
314 err = ioctl(FD(evsel, cpu, thread), 332 err = ioctl(FD(evsel, cpu, thread),
315 PERF_EVENT_IOC_DISABLE, 0); 333 PERF_EVENT_IOC_DISABLE, 0);
316 if (err) 334 if (err)
@@ -324,12 +342,14 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
324 struct perf_evsel *evsel) 342 struct perf_evsel *evsel)
325{ 343{
326 int cpu, thread, err; 344 int cpu, thread, err;
345 int nr_cpus = cpu_map__nr(evlist->cpus);
346 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
327 347
328 if (!evsel->fd) 348 if (!evsel->fd)
329 return -EINVAL; 349 return -EINVAL;
330 350
331 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) { 351 for (cpu = 0; cpu < nr_cpus; cpu++) {
332 for (thread = 0; thread < evlist->threads->nr; thread++) { 352 for (thread = 0; thread < nr_threads; thread++) {
333 err = ioctl(FD(evsel, cpu, thread), 353 err = ioctl(FD(evsel, cpu, thread),
334 PERF_EVENT_IOC_ENABLE, 0); 354 PERF_EVENT_IOC_ENABLE, 0);
335 if (err) 355 if (err)
@@ -339,21 +359,111 @@ int perf_evlist__enable_event(struct perf_evlist *evlist,
339 return 0; 359 return 0;
340} 360}
341 361
342static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist) 362static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
363 struct perf_evsel *evsel, int cpu)
364{
365 int thread, err;
366 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
367
368 if (!evsel->fd)
369 return -EINVAL;
370
371 for (thread = 0; thread < nr_threads; thread++) {
372 err = ioctl(FD(evsel, cpu, thread),
373 PERF_EVENT_IOC_ENABLE, 0);
374 if (err)
375 return err;
376 }
377 return 0;
378}
379
380static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
381 struct perf_evsel *evsel,
382 int thread)
383{
384 int cpu, err;
385 int nr_cpus = cpu_map__nr(evlist->cpus);
386
387 if (!evsel->fd)
388 return -EINVAL;
389
390 for (cpu = 0; cpu < nr_cpus; cpu++) {
391 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
392 if (err)
393 return err;
394 }
395 return 0;
396}
397
398int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
399 struct perf_evsel *evsel, int idx)
400{
401 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
402
403 if (per_cpu_mmaps)
404 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
405 else
406 return perf_evlist__enable_event_thread(evlist, evsel, idx);
407}
408
409int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
343{ 410{
344 int nr_cpus = cpu_map__nr(evlist->cpus); 411 int nr_cpus = cpu_map__nr(evlist->cpus);
345 int nr_threads = thread_map__nr(evlist->threads); 412 int nr_threads = thread_map__nr(evlist->threads);
346 int nfds = nr_cpus * nr_threads * evlist->nr_entries; 413 int nfds = 0;
347 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds); 414 struct perf_evsel *evsel;
348 return evlist->pollfd != NULL ? 0 : -ENOMEM; 415
416 list_for_each_entry(evsel, &evlist->entries, node) {
417 if (evsel->system_wide)
418 nfds += nr_cpus;
419 else
420 nfds += nr_cpus * nr_threads;
421 }
422
423 if (fdarray__available_entries(&evlist->pollfd) < nfds &&
424 fdarray__grow(&evlist->pollfd, nfds) < 0)
425 return -ENOMEM;
426
427 return 0;
428}
429
430static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
431{
432 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
433 /*
434 * Save the idx so that when we filter out fds POLLHUP'ed we can
435 * close the associated evlist->mmap[] entry.
436 */
437 if (pos >= 0) {
438 evlist->pollfd.priv[pos].idx = idx;
439
440 fcntl(fd, F_SETFL, O_NONBLOCK);
441 }
442
443 return pos;
444}
445
446int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
447{
448 return __perf_evlist__add_pollfd(evlist, fd, -1);
449}
450
451static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
452{
453 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
454
455 perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
456}
457
458int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
459{
460 return fdarray__filter(&evlist->pollfd, revents_and_mask,
461 perf_evlist__munmap_filtered);
349} 462}
350 463
351void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 464int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
352{ 465{
353 fcntl(fd, F_SETFL, O_NONBLOCK); 466 return fdarray__poll(&evlist->pollfd, timeout);
354 evlist->pollfd[evlist->nr_fds].fd = fd;
355 evlist->pollfd[evlist->nr_fds].events = POLLIN;
356 evlist->nr_fds++;
357} 467}
358 468
359static void perf_evlist__id_hash(struct perf_evlist *evlist, 469static void perf_evlist__id_hash(struct perf_evlist *evlist,
@@ -566,14 +676,36 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
566 return event; 676 return event;
567} 677}
568 678
679static bool perf_mmap__empty(struct perf_mmap *md)
680{
681 return perf_mmap__read_head(md) != md->prev;
682}
683
684static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
685{
686 ++evlist->mmap[idx].refcnt;
687}
688
689static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
690{
691 BUG_ON(evlist->mmap[idx].refcnt == 0);
692
693 if (--evlist->mmap[idx].refcnt == 0)
694 __perf_evlist__munmap(evlist, idx);
695}
696
569void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx) 697void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
570{ 698{
699 struct perf_mmap *md = &evlist->mmap[idx];
700
571 if (!evlist->overwrite) { 701 if (!evlist->overwrite) {
572 struct perf_mmap *md = &evlist->mmap[idx];
573 unsigned int old = md->prev; 702 unsigned int old = md->prev;
574 703
575 perf_mmap__write_tail(md, old); 704 perf_mmap__write_tail(md, old);
576 } 705 }
706
707 if (md->refcnt == 1 && perf_mmap__empty(md))
708 perf_evlist__mmap_put(evlist, idx);
577} 709}
578 710
579static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx) 711static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
@@ -581,6 +713,7 @@ static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
581 if (evlist->mmap[idx].base != NULL) { 713 if (evlist->mmap[idx].base != NULL) {
582 munmap(evlist->mmap[idx].base, evlist->mmap_len); 714 munmap(evlist->mmap[idx].base, evlist->mmap_len);
583 evlist->mmap[idx].base = NULL; 715 evlist->mmap[idx].base = NULL;
716 evlist->mmap[idx].refcnt = 0;
584 } 717 }
585} 718}
586 719
@@ -614,6 +747,20 @@ struct mmap_params {
614static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx, 747static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
615 struct mmap_params *mp, int fd) 748 struct mmap_params *mp, int fd)
616{ 749{
750 /*
751 * The last one will be done at perf_evlist__mmap_consume(), so that we
752 * make sure we don't prevent tools from consuming every last event in
753 * the ring buffer.
754 *
755 * I.e. we can get the POLLHUP meaning that the fd doesn't exist
756 * anymore, but the last events for it are still in the ring buffer,
757 * waiting to be consumed.
758 *
759 * Tools can chose to ignore this at their own discretion, but the
760 * evlist layer can't just drop it when filtering events in
761 * perf_evlist__filter_pollfd().
762 */
763 evlist->mmap[idx].refcnt = 2;
617 evlist->mmap[idx].prev = 0; 764 evlist->mmap[idx].prev = 0;
618 evlist->mmap[idx].mask = mp->mask; 765 evlist->mmap[idx].mask = mp->mask;
619 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot, 766 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
@@ -625,7 +772,6 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
625 return -1; 772 return -1;
626 } 773 }
627 774
628 perf_evlist__add_pollfd(evlist, fd);
629 return 0; 775 return 0;
630} 776}
631 777
@@ -636,7 +782,12 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
636 struct perf_evsel *evsel; 782 struct perf_evsel *evsel;
637 783
638 evlist__for_each(evlist, evsel) { 784 evlist__for_each(evlist, evsel) {
639 int fd = FD(evsel, cpu, thread); 785 int fd;
786
787 if (evsel->system_wide && thread)
788 continue;
789
790 fd = FD(evsel, cpu, thread);
640 791
641 if (*output == -1) { 792 if (*output == -1) {
642 *output = fd; 793 *output = fd;
@@ -645,6 +796,13 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
645 } else { 796 } else {
646 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0) 797 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
647 return -1; 798 return -1;
799
800 perf_evlist__mmap_get(evlist, idx);
801 }
802
803 if (__perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
804 perf_evlist__mmap_put(evlist, idx);
805 return -1;
648 } 806 }
649 807
650 if ((evsel->attr.read_format & PERF_FORMAT_ID) && 808 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
@@ -804,7 +962,7 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
804 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0) 962 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
805 return -ENOMEM; 963 return -ENOMEM;
806 964
807 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0) 965 if (evlist->pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
808 return -ENOMEM; 966 return -ENOMEM;
809 967
810 evlist->overwrite = overwrite; 968 evlist->overwrite = overwrite;
@@ -1061,6 +1219,8 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
1061 } 1219 }
1062 1220
1063 if (!evlist->workload.pid) { 1221 if (!evlist->workload.pid) {
1222 int ret;
1223
1064 if (pipe_output) 1224 if (pipe_output)
1065 dup2(2, 1); 1225 dup2(2, 1);
1066 1226
@@ -1078,8 +1238,22 @@ int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *tar
1078 /* 1238 /*
1079 * Wait until the parent tells us to go. 1239 * Wait until the parent tells us to go.
1080 */ 1240 */
1081 if (read(go_pipe[0], &bf, 1) == -1) 1241 ret = read(go_pipe[0], &bf, 1);
1082 perror("unable to read pipe"); 1242 /*
1243 * The parent will ask for the execvp() to be performed by
1244 * writing exactly one byte, in workload.cork_fd, usually via
1245 * perf_evlist__start_workload().
1246 *
1247 * For cancelling the workload without actuallin running it,
1248 * the parent will just close workload.cork_fd, without writing
1249 * anything, i.e. read will return zero and we just exit()
1250 * here.
1251 */
1252 if (ret != 1) {
1253 if (ret == -1)
1254 perror("unable to read pipe");
1255 exit(ret);
1256 }
1083 1257
1084 execvp(argv[0], (char **)argv); 1258 execvp(argv[0], (char **)argv);
1085 1259
@@ -1202,7 +1376,7 @@ int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1202 int err, char *buf, size_t size) 1376 int err, char *buf, size_t size)
1203{ 1377{
1204 int printed, value; 1378 int printed, value;
1205 char sbuf[128], *emsg = strerror_r(err, sbuf, sizeof(sbuf)); 1379 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
1206 1380
1207 switch (err) { 1381 switch (err) {
1208 case EACCES: 1382 case EACCES:
@@ -1250,3 +1424,19 @@ void perf_evlist__to_front(struct perf_evlist *evlist,
1250 1424
1251 list_splice(&move, &evlist->entries); 1425 list_splice(&move, &evlist->entries);
1252} 1426}
1427
1428void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1429 struct perf_evsel *tracking_evsel)
1430{
1431 struct perf_evsel *evsel;
1432
1433 if (tracking_evsel->tracking)
1434 return;
1435
1436 evlist__for_each(evlist, evsel) {
1437 if (evsel != tracking_evsel)
1438 evsel->tracking = false;
1439 }
1440
1441 tracking_evsel->tracking = true;
1442}