aboutsummaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2013-08-02 12:29:55 -0400
committerIngo Molnar <mingo@kernel.org>2013-08-16 11:55:51 -0400
commit948b26b6ddd08a57cb95ebb0dc96fde2edd5c383 (patch)
tree7b358b7d8ab23ffadd170aba686768a74d7b8b01 /kernel
parentfc3b86d673e41ac66b4ba5b75a90c2fcafb90089 (diff)
perf: Account freq events globally
Freq events may not always be affine to a particular CPU. As such, account_event_cpu() may crash if we account per cpu a freq event that has event->cpu == -1. To solve this, lets account freq events globally. In practice this doesn't change much the picture because perf tools create per-task perf events with one event per CPU by default. Profiling a single CPU is usually a corner case so there is no much point in optimizing things that way. Reported-by: Jiri Olsa <jolsa@redhat.com> Suggested-by: Peter Zijlstra <peterz@infradead.org> Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Tested-by: Jiri Olsa <jolsa@redhat.com> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Stephane Eranian <eranian@google.com> Signed-off-by: Peter Zijlstra <peterz@infradead.org> Link: http://lkml.kernel.org/r/1375460996-16329-3-git-send-email-fweisbec@gmail.com Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c19
1 files changed, 8 insertions, 11 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index e82e70025d42..2e675e830976 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -141,11 +141,11 @@ enum event_type_t {
141struct static_key_deferred perf_sched_events __read_mostly; 141struct static_key_deferred perf_sched_events __read_mostly;
142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events); 142static DEFINE_PER_CPU(atomic_t, perf_cgroup_events);
143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events); 143static DEFINE_PER_CPU(atomic_t, perf_branch_stack_events);
144static DEFINE_PER_CPU(atomic_t, perf_freq_events);
145 144
146static atomic_t nr_mmap_events __read_mostly; 145static atomic_t nr_mmap_events __read_mostly;
147static atomic_t nr_comm_events __read_mostly; 146static atomic_t nr_comm_events __read_mostly;
148static atomic_t nr_task_events __read_mostly; 147static atomic_t nr_task_events __read_mostly;
148static atomic_t nr_freq_events __read_mostly;
149 149
150static LIST_HEAD(pmus); 150static LIST_HEAD(pmus);
151static DEFINE_MUTEX(pmus_lock); 151static DEFINE_MUTEX(pmus_lock);
@@ -1871,9 +1871,6 @@ static int __perf_install_in_context(void *info)
1871 perf_pmu_enable(cpuctx->ctx.pmu); 1871 perf_pmu_enable(cpuctx->ctx.pmu);
1872 perf_ctx_unlock(cpuctx, task_ctx); 1872 perf_ctx_unlock(cpuctx, task_ctx);
1873 1873
1874 if (atomic_read(&__get_cpu_var(perf_freq_events)))
1875 tick_nohz_full_kick();
1876
1877 return 0; 1874 return 0;
1878} 1875}
1879 1876
@@ -2811,7 +2808,7 @@ done:
2811#ifdef CONFIG_NO_HZ_FULL 2808#ifdef CONFIG_NO_HZ_FULL
2812bool perf_event_can_stop_tick(void) 2809bool perf_event_can_stop_tick(void)
2813{ 2810{
2814 if (atomic_read(&__get_cpu_var(perf_freq_events)) || 2811 if (atomic_read(&nr_freq_events) ||
2815 __this_cpu_read(perf_throttled_count)) 2812 __this_cpu_read(perf_throttled_count))
2816 return false; 2813 return false;
2817 else 2814 else
@@ -3140,9 +3137,6 @@ static void unaccount_event_cpu(struct perf_event *event, int cpu)
3140 } 3137 }
3141 if (is_cgroup_event(event)) 3138 if (is_cgroup_event(event))
3142 atomic_dec(&per_cpu(perf_cgroup_events, cpu)); 3139 atomic_dec(&per_cpu(perf_cgroup_events, cpu));
3143
3144 if (event->attr.freq)
3145 atomic_dec(&per_cpu(perf_freq_events, cpu));
3146} 3140}
3147 3141
3148static void unaccount_event(struct perf_event *event) 3142static void unaccount_event(struct perf_event *event)
@@ -3158,6 +3152,8 @@ static void unaccount_event(struct perf_event *event)
3158 atomic_dec(&nr_comm_events); 3152 atomic_dec(&nr_comm_events);
3159 if (event->attr.task) 3153 if (event->attr.task)
3160 atomic_dec(&nr_task_events); 3154 atomic_dec(&nr_task_events);
3155 if (event->attr.freq)
3156 atomic_dec(&nr_freq_events);
3161 if (is_cgroup_event(event)) 3157 if (is_cgroup_event(event))
3162 static_key_slow_dec_deferred(&perf_sched_events); 3158 static_key_slow_dec_deferred(&perf_sched_events);
3163 if (has_branch_stack(event)) 3159 if (has_branch_stack(event))
@@ -6489,9 +6485,6 @@ static void account_event_cpu(struct perf_event *event, int cpu)
6489 } 6485 }
6490 if (is_cgroup_event(event)) 6486 if (is_cgroup_event(event))
6491 atomic_inc(&per_cpu(perf_cgroup_events, cpu)); 6487 atomic_inc(&per_cpu(perf_cgroup_events, cpu));
6492
6493 if (event->attr.freq)
6494 atomic_inc(&per_cpu(perf_freq_events, cpu));
6495} 6488}
6496 6489
6497static void account_event(struct perf_event *event) 6490static void account_event(struct perf_event *event)
@@ -6507,6 +6500,10 @@ static void account_event(struct perf_event *event)
6507 atomic_inc(&nr_comm_events); 6500 atomic_inc(&nr_comm_events);
6508 if (event->attr.task) 6501 if (event->attr.task)
6509 atomic_inc(&nr_task_events); 6502 atomic_inc(&nr_task_events);
6503 if (event->attr.freq) {
6504 if (atomic_inc_return(&nr_freq_events) == 1)
6505 tick_nohz_full_kick_all();
6506 }
6510 if (has_branch_stack(event)) 6507 if (has_branch_stack(event))
6511 static_key_slow_inc(&perf_sched_events.key); 6508 static_key_slow_inc(&perf_sched_events.key);
6512 if (is_cgroup_event(event)) 6509 if (is_cgroup_event(event))