diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-07-22 20:31:01 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-30 16:29:12 -0400 |
commit | 766d6c076928191d75ad5b0d0f58f52b1e7682d8 (patch) | |
tree | 147cb729a6f11ef96cc11bf404a1484fcd129303 /kernel/events | |
parent | 90983b16078ab0fdc58f0dab3e8e3da79c9579a2 (diff) |
perf: Factor out event accounting code to account_event()/__free_event()
Gather all the event accounting code to a single place,
once all the prerequisites are completed. This simplifies
the refcounting.
Original-patch-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-4-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r-- | kernel/events/core.c | 79 |
1 files changed, 47 insertions, 32 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 3b998626b7a0..158fd5789e58 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3128,6 +3128,21 @@ static void free_event_rcu(struct rcu_head *head) | |||
3128 | static void ring_buffer_put(struct ring_buffer *rb); | 3128 | static void ring_buffer_put(struct ring_buffer *rb); |
3129 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); | 3129 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); |
3130 | 3130 | ||
3131 | static void __free_event(struct perf_event *event) | ||
3132 | { | ||
3133 | if (!event->parent) { | ||
3134 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) | ||
3135 | put_callchain_buffers(); | ||
3136 | } | ||
3137 | |||
3138 | if (event->destroy) | ||
3139 | event->destroy(event); | ||
3140 | |||
3141 | if (event->ctx) | ||
3142 | put_ctx(event->ctx); | ||
3143 | |||
3144 | call_rcu(&event->rcu_head, free_event_rcu); | ||
3145 | } | ||
3131 | static void free_event(struct perf_event *event) | 3146 | static void free_event(struct perf_event *event) |
3132 | { | 3147 | { |
3133 | irq_work_sync(&event->pending); | 3148 | irq_work_sync(&event->pending); |
@@ -3141,8 +3156,6 @@ static void free_event(struct perf_event *event) | |||
3141 | atomic_dec(&nr_comm_events); | 3156 | atomic_dec(&nr_comm_events); |
3142 | if (event->attr.task) | 3157 | if (event->attr.task) |
3143 | atomic_dec(&nr_task_events); | 3158 | atomic_dec(&nr_task_events); |
3144 | if (event->attr.sample_type & PERF_SAMPLE_CALLCHAIN) | ||
3145 | put_callchain_buffers(); | ||
3146 | if (is_cgroup_event(event)) { | 3159 | if (is_cgroup_event(event)) { |
3147 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | 3160 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); |
3148 | static_key_slow_dec_deferred(&perf_sched_events); | 3161 | static_key_slow_dec_deferred(&perf_sched_events); |
@@ -3180,13 +3193,8 @@ static void free_event(struct perf_event *event) | |||
3180 | if (is_cgroup_event(event)) | 3193 | if (is_cgroup_event(event)) |
3181 | perf_detach_cgroup(event); | 3194 | perf_detach_cgroup(event); |
3182 | 3195 | ||
3183 | if (event->destroy) | ||
3184 | event->destroy(event); | ||
3185 | |||
3186 | if (event->ctx) | ||
3187 | put_ctx(event->ctx); | ||
3188 | 3196 | ||
3189 | call_rcu(&event->rcu_head, free_event_rcu); | 3197 | __free_event(event); |
3190 | } | 3198 | } |
3191 | 3199 | ||
3192 | int perf_event_release_kernel(struct perf_event *event) | 3200 | int perf_event_release_kernel(struct perf_event *event) |
@@ -6443,6 +6451,29 @@ unlock: | |||
6443 | return pmu; | 6451 | return pmu; |
6444 | } | 6452 | } |
6445 | 6453 | ||
6454 | static void account_event(struct perf_event *event) | ||
6455 | { | ||
6456 | if (event->attach_state & PERF_ATTACH_TASK) | ||
6457 | static_key_slow_inc(&perf_sched_events.key); | ||
6458 | if (event->attr.mmap || event->attr.mmap_data) | ||
6459 | atomic_inc(&nr_mmap_events); | ||
6460 | if (event->attr.comm) | ||
6461 | atomic_inc(&nr_comm_events); | ||
6462 | if (event->attr.task) | ||
6463 | atomic_inc(&nr_task_events); | ||
6464 | if (has_branch_stack(event)) { | ||
6465 | static_key_slow_inc(&perf_sched_events.key); | ||
6466 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
6467 | atomic_inc(&per_cpu(perf_branch_stack_events, | ||
6468 | event->cpu)); | ||
6469 | } | ||
6470 | |||
6471 | if (is_cgroup_event(event)) { | ||
6472 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | ||
6473 | static_key_slow_inc(&perf_sched_events.key); | ||
6474 | } | ||
6475 | } | ||
6476 | |||
6446 | /* | 6477 | /* |
6447 | * Allocate and initialize a event structure | 6478 | * Allocate and initialize a event structure |
6448 | */ | 6479 | */ |
@@ -6556,21 +6587,6 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, | |||
6556 | if (err) | 6587 | if (err) |
6557 | goto err_pmu; | 6588 | goto err_pmu; |
6558 | } | 6589 | } |
6559 | |||
6560 | if (event->attach_state & PERF_ATTACH_TASK) | ||
6561 | static_key_slow_inc(&perf_sched_events.key); | ||
6562 | if (event->attr.mmap || event->attr.mmap_data) | ||
6563 | atomic_inc(&nr_mmap_events); | ||
6564 | if (event->attr.comm) | ||
6565 | atomic_inc(&nr_comm_events); | ||
6566 | if (event->attr.task) | ||
6567 | atomic_inc(&nr_task_events); | ||
6568 | if (has_branch_stack(event)) { | ||
6569 | static_key_slow_inc(&perf_sched_events.key); | ||
6570 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
6571 | atomic_inc(&per_cpu(perf_branch_stack_events, | ||
6572 | event->cpu)); | ||
6573 | } | ||
6574 | } | 6590 | } |
6575 | 6591 | ||
6576 | return event; | 6592 | return event; |
@@ -6865,17 +6881,14 @@ SYSCALL_DEFINE5(perf_event_open, | |||
6865 | 6881 | ||
6866 | if (flags & PERF_FLAG_PID_CGROUP) { | 6882 | if (flags & PERF_FLAG_PID_CGROUP) { |
6867 | err = perf_cgroup_connect(pid, event, &attr, group_leader); | 6883 | err = perf_cgroup_connect(pid, event, &attr, group_leader); |
6868 | if (err) | 6884 | if (err) { |
6869 | goto err_alloc; | 6885 | __free_event(event); |
6870 | /* | 6886 | goto err_task; |
6871 | * one more event: | 6887 | } |
6872 | * - that has cgroup constraint on event->cpu | ||
6873 | * - that may need work on context switch | ||
6874 | */ | ||
6875 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | ||
6876 | static_key_slow_inc(&perf_sched_events.key); | ||
6877 | } | 6888 | } |
6878 | 6889 | ||
6890 | account_event(event); | ||
6891 | |||
6879 | /* | 6892 | /* |
6880 | * Special case software events and allow them to be part of | 6893 | * Special case software events and allow them to be part of |
6881 | * any hardware group. | 6894 | * any hardware group. |
@@ -7071,6 +7084,8 @@ perf_event_create_kernel_counter(struct perf_event_attr *attr, int cpu, | |||
7071 | goto err; | 7084 | goto err; |
7072 | } | 7085 | } |
7073 | 7086 | ||
7087 | account_event(event); | ||
7088 | |||
7074 | ctx = find_get_context(event->pmu, task, cpu); | 7089 | ctx = find_get_context(event->pmu, task, cpu); |
7075 | if (IS_ERR(ctx)) { | 7090 | if (IS_ERR(ctx)) { |
7076 | err = PTR_ERR(ctx); | 7091 | err = PTR_ERR(ctx); |