summaryrefslogtreecommitdiffstats
path: root/kernel
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-08-16 07:33:26 -0400
committerIngo Molnar <mingo@kernel.org>2016-08-18 04:35:49 -0400
commitcca2094605efe6ccf43ff2876dd5bccc799202d8 (patch)
tree602620076d8e75ee2cf71154fbafe6243caa405f /kernel
parent6c4687cc17a788a6dd8de3e27dbeabb7cbd3e066 (diff)
perf/core: Fix event_function_local()
Vincent reported triggering the WARN_ON_ONCE() in event_function_local(). While thinking through cases I noticed that by using event_function() directly, we miss the inactive case usually handled by event_function_call(). Therefore construct a blend of event_function_call() and event_function() that handles the cases relevant to event_function_local(). Reported-by: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: stable@vger.kernel.org # 4.5+ Fixes: fae3fde65138 ("perf: Collapse and fix event_function_call() users") Signed-off-by: Ingo Molnar <mingo@kernel.org>
Diffstat (limited to 'kernel')
-rw-r--r--kernel/events/core.c60
1 files changed, 48 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 1903b8f3a705..6e454bfd514f 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -242,18 +242,6 @@ unlock:
242 return ret; 242 return ret;
243} 243}
244 244
245static void event_function_local(struct perf_event *event, event_f func, void *data)
246{
247 struct event_function_struct efs = {
248 .event = event,
249 .func = func,
250 .data = data,
251 };
252
253 int ret = event_function(&efs);
254 WARN_ON_ONCE(ret);
255}
256
257static void event_function_call(struct perf_event *event, event_f func, void *data) 245static void event_function_call(struct perf_event *event, event_f func, void *data)
258{ 246{
259 struct perf_event_context *ctx = event->ctx; 247 struct perf_event_context *ctx = event->ctx;
@@ -303,6 +291,54 @@ again:
303 raw_spin_unlock_irq(&ctx->lock); 291 raw_spin_unlock_irq(&ctx->lock);
304} 292}
305 293
294/*
295 * Similar to event_function_call() + event_function(), but hard assumes IRQs
296 * are already disabled and we're on the right CPU.
297 */
298static void event_function_local(struct perf_event *event, event_f func, void *data)
299{
300 struct perf_event_context *ctx = event->ctx;
301 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
302 struct task_struct *task = READ_ONCE(ctx->task);
303 struct perf_event_context *task_ctx = NULL;
304
305 WARN_ON_ONCE(!irqs_disabled());
306
307 if (task) {
308 if (task == TASK_TOMBSTONE)
309 return;
310
311 task_ctx = ctx;
312 }
313
314 perf_ctx_lock(cpuctx, task_ctx);
315
316 task = ctx->task;
317 if (task == TASK_TOMBSTONE)
318 goto unlock;
319
320 if (task) {
321 /*
322 * We must be either inactive or active and the right task,
323 * otherwise we're screwed, since we cannot IPI to somewhere
324 * else.
325 */
326 if (ctx->is_active) {
327 if (WARN_ON_ONCE(task != current))
328 goto unlock;
329
330 if (WARN_ON_ONCE(cpuctx->task_ctx != ctx))
331 goto unlock;
332 }
333 } else {
334 WARN_ON_ONCE(&cpuctx->ctx != ctx);
335 }
336
337 func(event, cpuctx, ctx, data);
338unlock:
339 perf_ctx_unlock(cpuctx, task_ctx);
340}
341
306#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\ 342#define PERF_FLAG_ALL (PERF_FLAG_FD_NO_GROUP |\
307 PERF_FLAG_FD_OUTPUT |\ 343 PERF_FLAG_FD_OUTPUT |\
308 PERF_FLAG_PID_CGROUP |\ 344 PERF_FLAG_PID_CGROUP |\