diff options
author | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-03-26 10:42:15 -0400 |
---|---|---|
committer | Arnaldo Carvalho de Melo <acme@redhat.com> | 2018-03-27 12:13:38 -0400 |
commit | 895e3b06fc2ce438adc62cb13d31ea001dcfda16 (patch) | |
tree | 15651e1f12cc6aff270f86f2da3a23c8ae4bc673 | |
parent | f58385f629c87a9e210108b39c1f4950d0363ad2 (diff) |
perf mmap: Be consistent when checking for an unmaped ring buffer
The previous patch is insufficient to cure the reported 'perf trace'
segfault, as it only cures the perf_mmap__read_done() case, moving the
segfault to perf_mmap__read_init() functio, fix it by doing the same
refcount check.
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: David Ahern <dsahern@gmail.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Kan Liang <kan.liang@intel.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Wang Nan <wangnan0@huawei.com>
Fixes: 8872481bd048 ("perf mmap: Introduce perf_mmap__read_init()")
Link: https://lkml.kernel.org/r/20180326144127.GF18897@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
-rw-r--r-- | tools/perf/util/mmap.c | 13 |
1 files changed, 12 insertions, 1 deletions
diff --git a/tools/perf/util/mmap.c b/tools/perf/util/mmap.c index f6cfc52ff1fe..fc832676a798 100644 --- a/tools/perf/util/mmap.c +++ b/tools/perf/util/mmap.c | |||
@@ -234,7 +234,7 @@ static int overwrite_rb_find_range(void *buf, int mask, u64 *start, u64 *end) | |||
234 | /* | 234 | /* |
235 | * Report the start and end of the available data in ringbuffer | 235 | * Report the start and end of the available data in ringbuffer |
236 | */ | 236 | */ |
237 | int perf_mmap__read_init(struct perf_mmap *md) | 237 | static int __perf_mmap__read_init(struct perf_mmap *md) |
238 | { | 238 | { |
239 | u64 head = perf_mmap__read_head(md); | 239 | u64 head = perf_mmap__read_head(md); |
240 | u64 old = md->prev; | 240 | u64 old = md->prev; |
@@ -268,6 +268,17 @@ int perf_mmap__read_init(struct perf_mmap *md) | |||
268 | return 0; | 268 | return 0; |
269 | } | 269 | } |
270 | 270 | ||
271 | int perf_mmap__read_init(struct perf_mmap *map) | ||
272 | { | ||
273 | /* | ||
274 | * Check if event was unmapped due to a POLLHUP/POLLERR. | ||
275 | */ | ||
276 | if (!refcount_read(&map->refcnt)) | ||
277 | return -ENOENT; | ||
278 | |||
279 | return __perf_mmap__read_init(map); | ||
280 | } | ||
281 | |||
271 | int perf_mmap__push(struct perf_mmap *md, void *to, | 282 | int perf_mmap__push(struct perf_mmap *md, void *to, |
272 | int push(void *to, void *buf, size_t size)) | 283 | int push(void *to, void *buf, size_t size)) |
273 | { | 284 | { |