aboutsummaryrefslogtreecommitdiffstats
path: root/tools
diff options
context:
space:
mode:
authorArnaldo Carvalho de Melo <acme@redhat.com>2014-09-08 10:27:49 -0400
committerArnaldo Carvalho de Melo <acme@redhat.com>2014-09-25 15:46:55 -0400
commite4b356b56cfe77b800a9bc2e6efefa6a069b8a78 (patch)
treea028e761082e6979dfff390b37cfd6a5fb8a7e2b /tools
parent2171a9256862ec139a042832a9ae737b942ca882 (diff)
perf evlist: Unmap when all refcounts to fd are gone and events drained
As noticed by receiving a POLLHUP for all its pollfd entries. That will remove the refcount taken in perf_evlist__mmap_per_evsel(), and when all events are consumed via perf_evlist__mmap_read() + perf_evlist__mmap_consume(), the ring buffer will be unmap'ed. Thanks to Jiri Olsa for pointing out that we must wait till all events are consumed, not being ok to unmmap just when receiving all the POLLHUPs. Cc: Adrian Hunter <adrian.hunter@intel.com> Cc: Borislav Petkov <bp@suse.de> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: David Ahern <dsahern@gmail.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Ingo Molnar <mingo@kernel.org> Cc: Jean Pihet <jean.pihet@linaro.org> Cc: Jiri Olsa <jolsa@kernel.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/n/tip-t10w1xk4myp7ca7m9fvip6a0@git.kernel.org Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
Diffstat (limited to 'tools')
-rw-r--r--tools/perf/util/evlist.c35
1 files changed, 29 insertions, 6 deletions
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 61d18dc83e8e..3cebc9a8d52e 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -25,11 +25,12 @@
25#include <linux/bitops.h> 25#include <linux/bitops.h>
26#include <linux/hash.h> 26#include <linux/hash.h>
27 27
28static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx);
29static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
30
28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y)) 31#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y) 32#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
30 33
31static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx);
32
33void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus, 34void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
34 struct thread_map *threads) 35 struct thread_map *threads)
35{ 36{
@@ -426,16 +427,38 @@ int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
426 return 0; 427 return 0;
427} 428}
428 429
430static int __perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd, int idx)
431{
432 int pos = fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP);
433 /*
434 * Save the idx so that when we filter out fds POLLHUP'ed we can
435 * close the associated evlist->mmap[] entry.
436 */
437 if (pos >= 0) {
438 evlist->pollfd.priv[pos].idx = idx;
439
440 fcntl(fd, F_SETFL, O_NONBLOCK);
441 }
442
443 return pos;
444}
445
429int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd) 446int perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
430{ 447{
431 fcntl(fd, F_SETFL, O_NONBLOCK); 448 return __perf_evlist__add_pollfd(evlist, fd, -1);
449}
450
451static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd)
452{
453 struct perf_evlist *evlist = container_of(fda, struct perf_evlist, pollfd);
432 454
433 return fdarray__add(&evlist->pollfd, fd, POLLIN | POLLERR | POLLHUP); 455 perf_evlist__mmap_put(evlist, fda->priv[fd].idx);
434} 456}
435 457
436int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask) 458int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
437{ 459{
438 return fdarray__filter(&evlist->pollfd, revents_and_mask, NULL); 460 return fdarray__filter(&evlist->pollfd, revents_and_mask,
461 perf_evlist__munmap_filtered);
439} 462}
440 463
441int perf_evlist__poll(struct perf_evlist *evlist, int timeout) 464int perf_evlist__poll(struct perf_evlist *evlist, int timeout)
@@ -777,7 +800,7 @@ static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
777 perf_evlist__mmap_get(evlist, idx); 800 perf_evlist__mmap_get(evlist, idx);
778 } 801 }
779 802
780 if (perf_evlist__add_pollfd(evlist, fd) < 0) { 803 if (__perf_evlist__add_pollfd(evlist, fd, idx) < 0) {
781 perf_evlist__mmap_put(evlist, idx); 804 perf_evlist__mmap_put(evlist, idx);
782 return -1; 805 return -1;
783 } 806 }