aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--kernel/events/core.c87
1 files changed, 55 insertions, 32 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 158fd5789e58..3a4b73aebc42 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3128,6 +3128,40 @@ static void free_event_rcu(struct rcu_head *head)
3128static void ring_buffer_put(struct ring_buffer *rb); 3128static void ring_buffer_put(struct ring_buffer *rb);
3129static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb); 3129static void ring_buffer_detach(struct perf_event *event, struct ring_buffer *rb);
3130 3130
3131static void unaccount_event_cpu(struct perf_event *event, int cpu)
3132{
3133 if (event->parent)
3134 return;
3135
3136 if (has_branch_stack(event)) {
3137 if (!(event->attach_state & PERF_ATTACH_TASK))
3138 atomic_dec(&per_cpu(perf_branch_stack_events, cpu));
3139 }
3140 if (is_cgroup_event(event))
3141 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3142}
3143
3144static void unaccount_event(struct perf_event *event)
3145{
3146 if (event->parent)
3147 return;
3148
3149 if (event->attach_state & PERF_ATTACH_TASK)
3150 static_key_slow_dec_deferred(&perf_sched_events);
3151 if (event->attr.mmap || event->attr.mmap_data)
3152 atomic_dec(&nr_mmap_events);
3153 if (event->attr.comm)
3154 atomic_dec(&nr_comm_events);
3155 if (event->attr.task)
3156 atomic_dec(&nr_task_events);
3157 if (is_cgroup_event(event))
3158 static_key_slow_dec_deferred(&perf_sched_events);
3159 if (has_branch_stack(event))
3160 static_key_slow_dec_deferred(&perf_sched_events);
3161
3162 unaccount_event_cpu(event, event->cpu);
3163}
3164
3131static void __free_event(struct perf_event *event) 3165static void __free_event(struct perf_event *event)
3132{ 3166{
3133 if (!event->parent) { 3167 if (!event->parent) {
@@ -3147,29 +3181,7 @@ static void free_event(struct perf_event *event)
3147{ 3181{
3148 irq_work_sync(&event->pending); 3182 irq_work_sync(&event->pending);
3149 3183
3150 if (!event->parent) { 3184 unaccount_event(event);
3151 if (event->attach_state & PERF_ATTACH_TASK)
3152 static_key_slow_dec_deferred(&perf_sched_events);
3153 if (event->attr.mmap || event->attr.mmap_data)
3154 atomic_dec(&nr_mmap_events);
3155 if (event->attr.comm)
3156 atomic_dec(&nr_comm_events);
3157 if (event->attr.task)
3158 atomic_dec(&nr_task_events);
3159 if (is_cgroup_event(event)) {
3160 atomic_dec(&per_cpu(perf_cgroup_events, event->cpu));
3161 static_key_slow_dec_deferred(&perf_sched_events);
3162 }
3163
3164 if (has_branch_stack(event)) {
3165 static_key_slow_dec_deferred(&perf_sched_events);
3166 /* is system-wide event */
3167 if (!(event->attach_state & PERF_ATTACH_TASK)) {
3168 atomic_dec(&per_cpu(perf_branch_stack_events,
3169 event->cpu));
3170 }
3171 }
3172 }
3173 3185
3174 if (event->rb) { 3186 if (event->rb) {
3175 struct ring_buffer *rb; 3187 struct ring_buffer *rb;
@@ -6451,8 +6463,24 @@ unlock:
6451 return pmu; 6463 return pmu;
6452} 6464}
6453 6465
6466static void account_event_cpu(struct perf_event *event, int cpu)
6467{
6468 if (event->parent)
6469 return;
6470
6471 if (has_branch_stack(event)) {
6472 if (!(event->attach_state & PERF_ATTACH_TASK))
6473 atomic_inc(&per_cpu(perf_branch_stack_events, cpu));
6474 }
6475 if (is_cgroup_event(event))
6476 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6477}
6478
6454static void account_event(struct perf_event *event) 6479static void account_event(struct perf_event *event)
6455{ 6480{
6481 if (event->parent)
6482 return;
6483
6456 if (event->attach_state & PERF_ATTACH_TASK) 6484 if (event->attach_state & PERF_ATTACH_TASK)
6457 static_key_slow_inc(&perf_sched_events.key); 6485 static_key_slow_inc(&perf_sched_events.key);
6458 if (event->attr.mmap || event->attr.mmap_data) 6486 if (event->attr.mmap || event->attr.mmap_data)
@@ -6461,17 +6489,12 @@ static void account_event(struct perf_event *event)
6461 atomic_inc(&nr_comm_events); 6489 atomic_inc(&nr_comm_events);
6462 if (event->attr.task) 6490 if (event->attr.task)
6463 atomic_inc(&nr_task_events); 6491 atomic_inc(&nr_task_events);
6464 if (has_branch_stack(event)) { 6492 if (has_branch_stack(event))
6465 static_key_slow_inc(&perf_sched_events.key); 6493 static_key_slow_inc(&perf_sched_events.key);
6466 if (!(event->attach_state & PERF_ATTACH_TASK)) 6494 if (is_cgroup_event(event))
6467 atomic_inc(&per_cpu(perf_branch_stack_events,
6468 event->cpu));
6469 }
6470
6471 if (is_cgroup_event(event)) {
6472 atomic_inc(&per_cpu(perf_cgroup_events, event->cpu));
6473 static_key_slow_inc(&perf_sched_events.key); 6495 static_key_slow_inc(&perf_sched_events.key);
6474 } 6496
6497 account_event_cpu(event, event->cpu);
6475} 6498}
6476 6499
6477/* 6500/*