diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-07-22 20:30:59 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-30 16:22:58 -0400 |
commit | 6050cb0b0b366092d1383bc23d7b16cd26db00f0 (patch) | |
tree | e29edecc20a04e17aa4c7f83d21a140d3e377356 /kernel | |
parent | 7d9ffa8961482232d964173cccba6e14d2d543b2 (diff) |
perf: Fix branch stack refcount leak on callchain init failure
On callchain buffers allocation failure, free_event() is
called and all the accounting performed in perf_event_alloc()
for that event is cancelled.
But if the event has branch stack sampling, it is unaccounted
as well from the branch stack sampling events refcounts.
This is a bug because this accounting is performed after the
callchain buffer allocation. As a result, the branch stack sampling
events refcount can become negative.
To fix this, move the branch stack event accounting before the
callchain buffer allocation.
Reported-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-2-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/events/core.c | 12 |
1 files changed, 6 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 127411400116..f35aa7e69e2d 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -6567,6 +6567,12 @@ done: | |||
6567 | atomic_inc(&nr_comm_events); | 6567 | atomic_inc(&nr_comm_events); |
6568 | if (event->attr.task) | 6568 | if (event->attr.task) |
6569 | atomic_inc(&nr_task_events); | 6569 | atomic_inc(&nr_task_events); |
6570 | if (has_branch_stack(event)) { | ||
6571 | static_key_slow_inc(&perf_sched_events.key); | ||
6572 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
6573 | atomic_inc(&per_cpu(perf_branch_stack_events, | ||
6574 | event->cpu)); | ||
6575 | } | ||
6570 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { | 6576 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) { |
6571 | err = get_callchain_buffers(); | 6577 | err = get_callchain_buffers(); |
6572 | if (err) { | 6578 | if (err) { |
@@ -6574,12 +6580,6 @@ done: | |||
6574 | return ERR_PTR(err); | 6580 | return ERR_PTR(err); |
6575 | } | 6581 | } |
6576 | } | 6582 | } |
6577 | if (has_branch_stack(event)) { | ||
6578 | static_key_slow_inc(&perf_sched_events.key); | ||
6579 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
6580 | atomic_inc(&per_cpu(perf_branch_stack_events, | ||
6581 | event->cpu)); | ||
6582 | } | ||
6583 | } | 6583 | } |
6584 | 6584 | ||
6585 | return event; | 6585 | return event; |