aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events/core.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-07-22 20:31:00 -0400
committerIngo Molnar <mingo@kernel.org>2013-07-30 16:29:12 -0400
commit90983b16078ab0fdc58f0dab3e8e3da79c9579a2 (patch)
tree7cae7a4dfc4f86014446e300e600ce666fbd012a /kernel/events/core.c
parent6050cb0b0b366092d1383bc23d7b16cd26db00f0 (diff)
perf: Sanitize get_callchain_buffer()
In case of allocation failure, get_callchain_buffer() keeps the refcount incremented for the current event. As a result, when get_callchain_buffers() returns an error, we must cleanup what it did by cancelling its last refcount with a call to put_callchain_buffers(). This is a hack in order to be able to call free_event() after that failure. The original purpose of that was to simplify the failure path. But this error handling is actually counter intuitive, ugly and not very easy to follow because one expect to see the resources used to perform a service to be cleaned by the callee if case of failure, not by the caller. So lets clean this up by cancelling the refcount from get_callchain_buffer() in case of failure. And correctly free the event accordingly in perf_event_alloc(). Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1374539466-4799-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events/core.c')
-rw-r--r--kernel/events/core.c41
1 files changed, 21 insertions, 20 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f35aa7e69e2d..3b998626b7a0 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6457,7 +6457,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6457 struct pmu *pmu; 6457 struct pmu *pmu;
6458 struct perf_event *event; 6458 struct perf_event *event;
6459 struct hw_perf_event *hwc; 6459 struct hw_perf_event *hwc;
6460 long err; 6460 long err = -EINVAL;
6461 6461
6462 if ((unsigned)cpu >= nr_cpu_ids) { 6462 if ((unsigned)cpu >= nr_cpu_ids) {
6463 if (!task || cpu != -1) 6463 if (!task || cpu != -1)
@@ -6540,25 +6540,23 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
6540 * we currently do not support PERF_FORMAT_GROUP on inherited events 6540 * we currently do not support PERF_FORMAT_GROUP on inherited events
6541 */ 6541 */
6542 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP)) 6542 if (attr->inherit && (attr->read_format & PERF_FORMAT_GROUP))
6543 goto done; 6543 goto err_ns;
6544 6544
6545 pmu = perf_init_event(event); 6545 pmu = perf_init_event(event);
6546
6547done:
6548 err = 0;
6549 if (!pmu) 6546 if (!pmu)
6550 err = -EINVAL; 6547 goto err_ns;
6551 else if (IS_ERR(pmu)) 6548 else if (IS_ERR(pmu)) {
6552 err = PTR_ERR(pmu); 6549 err = PTR_ERR(pmu);
6553 6550 goto err_ns;
6554 if (err) {
6555 if (event->ns)
6556 put_pid_ns(event->ns);
6557 kfree(event);
6558 return ERR_PTR(err);
6559 } 6551 }
6560 6552
6561 if (!event->parent) { 6553 if (!event->parent) {
6554 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6555 err = get_callchain_buffers();
6556 if (err)
6557 goto err_pmu;
6558 }
6559
6562 if (event->attach_state & PERF_ATTACH_TASK) 6560 if (event->attach_state & PERF_ATTACH_TASK)
6563 static_key_slow_inc(&perf_sched_events.key); 6561 static_key_slow_inc(&perf_sched_events.key);
6564 if (event->attr.mmap || event->attr.mmap_data) 6562 if (event->attr.mmap || event->attr.mmap_data)
@@ -6573,16 +6571,19 @@ done:
6573 atomic_inc(&per_cpu(perf_branch_stack_events, 6571 atomic_inc(&per_cpu(perf_branch_stack_events,
6574 event->cpu)); 6572 event->cpu));
6575 } 6573 }
6576 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) {
6577 err = get_callchain_buffers();
6578 if (err) {
6579 free_event(event);
6580 return ERR_PTR(err);
6581 }
6582 }
6583 } 6574 }
6584 6575
6585 return event; 6576 return event;
6577
6578err_pmu:
6579 if (event->destroy)
6580 event->destroy(event);
6581err_ns:
6582 if (event->ns)
6583 put_pid_ns(event->ns);
6584 kfree(event);
6585
6586 return ERR_PTR(err);
6586} 6587}
6587 6588
6588static int perf_copy_attr(struct perf_event_attr __user *uattr, 6589static int perf_copy_attr(struct perf_event_attr __user *uattr,