aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-26 06:14:40 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-29 02:35:25 -0500
commita0733e695b83a9c31f779e41dcaec8ef924716b5 (patch)
tree556587ac9903d9ad306e1079df72b75e53503f3c
parente03e7ee34fdd1c3ef494949a75cb8c61c7265fa9 (diff)
perf: Remove __free_event()
There is but a single caller, remove the function - we already have _free_event(), the extra indirection is nonsensical.. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c45
1 files changed, 20 insertions, 25 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index eb44730afea5..024adf0e34eb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3590,7 +3590,7 @@ static void unaccount_event(struct perf_event *event)
3590 * 3) two matching events on the same context. 3590 * 3) two matching events on the same context.
3591 * 3591 *
3592 * The former two cases are handled in the allocation path (perf_event_alloc(), 3592 * The former two cases are handled in the allocation path (perf_event_alloc(),
3593 * __free_event()), the latter -- before the first perf_install_in_context(). 3593 * _free_event()), the latter -- before the first perf_install_in_context().
3594 */ 3594 */
3595static int exclusive_event_init(struct perf_event *event) 3595static int exclusive_event_init(struct perf_event *event)
3596{ 3596{
@@ -3665,29 +3665,6 @@ static bool exclusive_event_installable(struct perf_event *event,
3665 return true; 3665 return true;
3666} 3666}
3667 3667
3668static void __free_event(struct perf_event *event)
3669{
3670 if (!event->parent) {
3671 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3672 put_callchain_buffers();
3673 }
3674
3675 perf_event_free_bpf_prog(event);
3676
3677 if (event->destroy)
3678 event->destroy(event);
3679
3680 if (event->ctx)
3681 put_ctx(event->ctx);
3682
3683 if (event->pmu) {
3684 exclusive_event_destroy(event);
3685 module_put(event->pmu->module);
3686 }
3687
3688 call_rcu(&event->rcu_head, free_event_rcu);
3689}
3690
3691static void _free_event(struct perf_event *event) 3668static void _free_event(struct perf_event *event)
3692{ 3669{
3693 irq_work_sync(&event->pending); 3670 irq_work_sync(&event->pending);
@@ -3709,7 +3686,25 @@ static void _free_event(struct perf_event *event)
3709 if (is_cgroup_event(event)) 3686 if (is_cgroup_event(event))
3710 perf_detach_cgroup(event); 3687 perf_detach_cgroup(event);
3711 3688
3712 __free_event(event); 3689 if (!event->parent) {
3690 if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN)
3691 put_callchain_buffers();
3692 }
3693
3694 perf_event_free_bpf_prog(event);
3695
3696 if (event->destroy)
3697 event->destroy(event);
3698
3699 if (event->ctx)
3700 put_ctx(event->ctx);
3701
3702 if (event->pmu) {
3703 exclusive_event_destroy(event);
3704 module_put(event->pmu->module);
3705 }
3706
3707 call_rcu(&event->rcu_head, free_event_rcu);
3713} 3708}
3714 3709
3715/* 3710/*