aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 05:05:09 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:22 -0500
commit25432ae96a9889774a05bf5f0f6fd8dbcdec5e72 (patch)
treeb089019db7903744581c3440c25da35a38afab7b /kernel/events
parentaee7dbc45f8aa976913de9b352fa6da816f1f3cd (diff)
perf: Optimize perf_sched_events() usage
It doesn't make sense to take up-to _4_ references on perf_sched_events() per event, avoid doing this. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c22
1 files changed, 16 insertions, 6 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 079eb9fcaaa8..935aefd16354 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3491,11 +3491,13 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
3491 3491
3492static void unaccount_event(struct perf_event *event) 3492static void unaccount_event(struct perf_event *event)
3493{ 3493{
3494 bool dec = false;
3495
3494 if (event->parent) 3496 if (event->parent)
3495 return; 3497 return;
3496 3498
3497 if (event->attach_state & PERF_ATTACH_TASK) 3499 if (event->attach_state & PERF_ATTACH_TASK)
3498 static_key_slow_dec_deferred(&perf_sched_events); 3500 dec = true;
3499 if (event->attr.mmap || event->attr.mmap_data) 3501 if (event->attr.mmap || event->attr.mmap_data)
3500 atomic_dec(&nr_mmap_events); 3502 atomic_dec(&nr_mmap_events);
3501 if (event->attr.comm) 3503 if (event->attr.comm)
@@ -3505,12 +3507,15 @@ static void unaccount_event(struct perf_event *event)
3505 if (event->attr.freq) 3507 if (event->attr.freq)
3506 atomic_dec(&nr_freq_events); 3508 atomic_dec(&nr_freq_events);
3507 if (event->attr.context_switch) { 3509 if (event->attr.context_switch) {
3508 static_key_slow_dec_deferred(&perf_sched_events); 3510 dec = true;
3509 atomic_dec(&nr_switch_events); 3511 atomic_dec(&nr_switch_events);
3510 } 3512 }
3511 if (is_cgroup_event(event)) 3513 if (is_cgroup_event(event))
3512 static_key_slow_dec_deferred(&perf_sched_events); 3514 dec = true;
3513 if (has_branch_stack(event)) 3515 if (has_branch_stack(event))
3516 dec = true;
3517
3518 if (dec)
3514 static_key_slow_dec_deferred(&perf_sched_events); 3519 static_key_slow_dec_deferred(&perf_sched_events);
3515 3520
3516 unaccount_event_cpu(event, event->cpu); 3521 unaccount_event_cpu(event, event->cpu);
@@ -7723,11 +7728,13 @@ static void account_event_cpu(struct perf_event *event, int cpu)
7723 7728
7724static void account_event(struct perf_event *event) 7729static void account_event(struct perf_event *event)
7725{ 7730{
7731 bool inc = false;
7732
7726 if (event->parent) 7733 if (event->parent)
7727 return; 7734 return;
7728 7735
7729 if (event->attach_state & PERF_ATTACH_TASK) 7736 if (event->attach_state & PERF_ATTACH_TASK)
7730 static_key_slow_inc(&perf_sched_events.key); 7737 inc = true;
7731 if (event->attr.mmap || event->attr.mmap_data) 7738 if (event->attr.mmap || event->attr.mmap_data)
7732 atomic_inc(&nr_mmap_events); 7739 atomic_inc(&nr_mmap_events);
7733 if (event->attr.comm) 7740 if (event->attr.comm)
@@ -7740,11 +7747,14 @@ static void account_event(struct perf_event *event)
7740 } 7747 }
7741 if (event->attr.context_switch) { 7748 if (event->attr.context_switch) {
7742 atomic_inc(&nr_switch_events); 7749 atomic_inc(&nr_switch_events);
7743 static_key_slow_inc(&perf_sched_events.key); 7750 inc = true;
7744 } 7751 }
7745 if (has_branch_stack(event)) 7752 if (has_branch_stack(event))
7746 static_key_slow_inc(&perf_sched_events.key); 7753 inc = true;
7747 if (is_cgroup_event(event)) 7754 if (is_cgroup_event(event))
7755 inc = true;
7756
7757 if (inc)
7748 static_key_slow_inc(&perf_sched_events.key); 7758 static_key_slow_inc(&perf_sched_events.key);
7749 7759
7750 account_event_cpu(event, event->cpu); 7760 account_event_cpu(event, event->cpu);