diff options
author | Frederic Weisbecker <fweisbec@gmail.com> | 2013-07-22 20:31:02 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@kernel.org> | 2013-07-30 16:29:13 -0400 |
commit | 4beb31f3657348a8b702dd014d01c520e522012f (patch) | |
tree | cd233359c759608a0680a5738956cc79144afc93 | |
parent | 766d6c076928191d75ad5b0d0f58f52b1e7682d8 (diff) |
perf: Split the per-cpu accounting part of the event accounting code
This way we can use the per-cpu handling seperately.
This is going to be used by to fix the event migration
code accounting.
Original-patch-by: Peter Zijlstra <peterz@infradead.org>
Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com>
Cc: Jiri Olsa <jolsa@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Stephane Eranian <eranian@google.com>
Signed-off-by: Peter Zijlstra <peterz@infradead.org>
Link: http://lkml.kernel.org/r/1374539466-4799-5-git-send-email-fweisbec@gmail.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r-- | kernel/events/core.c | 87 |
1 files changed, 55 insertions, 32 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c index 158fd5789e58..3a4b73aebc42 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c | |||
@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head) | |||
3128 | static void ring_buffer_put(struct ring_buffer *rb); | 3128 | static void ring_buffer_put(struct ring_buffer *rb); |
3129 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); | 3129 | static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); |
3130 | 3130 | ||
3131 | static void unaccount_event_cpu(struct perf_event *event, int cpu) | ||
3132 | { | ||
3133 | if (event->parent) | ||
3134 | return; | ||
3135 | |||
3136 | if (has_branch_stack(event)) { | ||
3137 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
3138 | atomic_dec(&per_cpu(perf_branch_stack_events, cpu)); | ||
3139 | } | ||
3140 | if (is_cgroup_event(event)) | ||
3141 | atomic_dec(&per_cpu(perf_cgroup_events, cpu)); | ||
3142 | } | ||
3143 | |||
3144 | static void unaccount_event(struct perf_event *event) | ||
3145 | { | ||
3146 | if (event->parent) | ||
3147 | return; | ||
3148 | |||
3149 | if (event->attach_state & PERF_ATTACH_TASK) | ||
3150 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3151 | if (event->attr.mmap || event->attr.mmap_data) | ||
3152 | atomic_dec(&nr_mmap_events); | ||
3153 | if (event->attr.comm) | ||
3154 | atomic_dec(&nr_comm_events); | ||
3155 | if (event->attr.task) | ||
3156 | atomic_dec(&nr_task_events); | ||
3157 | if (is_cgroup_event(event)) | ||
3158 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3159 | if (has_branch_stack(event)) | ||
3160 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3161 | |||
3162 | unaccount_event_cpu(event, event->cpu); | ||
3163 | } | ||
3164 | |||
3131 | static void __free_event(struct perf_event *event) | 3165 | static void __free_event(struct perf_event *event) |
3132 | { | 3166 | { |
3133 | if (!event->parent) { | 3167 | if (!event->parent) { |
@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event) | |||
3147 | { | 3181 | { |
3148 | irq_work_sync(&event->pending); | 3182 | irq_work_sync(&event->pending); |
3149 | 3183 | ||
3150 | if (!event->parent) { | 3184 | unaccount_event(event); |
3151 | if (event->attach_state & PERF_ATTACH_TASK) | ||
3152 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3153 | if (event->attr.mmap || event->attr.mmap_data) | ||
3154 | atomic_dec(&nr_mmap_events); | ||
3155 | if (event->attr.comm) | ||
3156 | atomic_dec(&nr_comm_events); | ||
3157 | if (event->attr.task) | ||
3158 | atomic_dec(&nr_task_events); | ||
3159 | if (is_cgroup_event(event)) { | ||
3160 | atomic_dec(&per_cpu(perf_cgroup_events, event->cpu)); | ||
3161 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3162 | } | ||
3163 | |||
3164 | if (has_branch_stack(event)) { | ||
3165 | static_key_slow_dec_deferred(&perf_sched_events); | ||
3166 | /* is system-wide event */ | ||
3167 | if (!(event->attach_state & PERF_ATTACH_TASK)) { | ||
3168 | atomic_dec(&per_cpu(perf_branch_stack_events, | ||
3169 | event->cpu)); | ||
3170 | } | ||
3171 | } | ||
3172 | } | ||
3173 | 3185 | ||
3174 | if (event->rb) { | 3186 | if (event->rb) { |
3175 | struct ring_buffer *rb; | 3187 | struct ring_buffer *rb; |
@@ -6451,8 +6463,24 @@ unlock: | |||
6451 | return pmu; | 6463 | return pmu; |
6452 | } | 6464 | } |
6453 | 6465 | ||
6466 | static void account_event_cpu(struct perf_event *event, int cpu) | ||
6467 | { | ||
6468 | if (event->parent) | ||
6469 | return; | ||
6470 | |||
6471 | if (has_branch_stack(event)) { | ||
6472 | if (!(event->attach_state & PERF_ATTACH_TASK)) | ||
6473 | atomic_inc(&per_cpu(perf_branch_stack_events, cpu)); | ||
6474 | } | ||
6475 | if (is_cgroup_event(event)) | ||
6476 | atomic_inc(&per_cpu(perf_cgroup_events, cpu)); | ||
6477 | } | ||
6478 | |||
6454 | static void account_event(struct perf_event *event) | 6479 | static void account_event(struct perf_event *event) |
6455 | { | 6480 | { |
6481 | if (event->parent) | ||
6482 | return; | ||
6483 | |||
6456 | if (event->attach_state & PERF_ATTACH_TASK) | 6484 | if (event->attach_state & PERF_ATTACH_TASK) |
6457 | static_key_slow_inc(&perf_sched_events.key); | 6485 | static_key_slow_inc(&perf_sched_events.key); |
6458 | if (event->attr.mmap || event->attr.mmap_data) | 6486 | if (event->attr.mmap || event->attr.mmap_data) |
@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event) | |||
6461 | atomic_inc(&nr_comm_events); | 6489 | atomic_inc(&nr_comm_events); |
6462 | if (event->attr.task) | 6490 | if (event->attr.task) |
6463 | atomic_inc(&nr_task_events); | 6491 | atomic_inc(&nr_task_events); |
6464 | if (has_branch_stack(event)) { | 6492 | if (has_branch_stack(event)) |
6465 | static_key_slow_inc(&perf_sched_events.key); | 6493 | static_key_slow_inc(&perf_sched_events.key); |
6466 | if (!(event->attach_state & PERF_ATTACH_TASK)) | 6494 | if (is_cgroup_event(event)) |
6467 | atomic_inc(&per_cpu(perf_branch_stack_events, | ||
6468 | event->cpu)); | ||
6469 | } | ||
6470 | |||
6471 | if (is_cgroup_event(event)) { | ||
6472 | atomic_inc(&per_cpu(perf_cgroup_events, event->cpu)); | ||
6473 | static_key_slow_inc(&perf_sched_events.key); | 6495 | static_key_slow_inc(&perf_sched_events.key); |
6474 | } | 6496 | |
6497 | account_event_cpu(event, event->cpu); | ||
6475 | } | 6498 | } |
6476 | 6499 | ||
6477 | /* | 6500 | /* |