aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-01-08 05:39:10 -0500
committerIngo Molnar <mingo@kernel.org>2016-01-21 12:54:23 -0500
commit63e30d3e52d4d85854ce6c761ffc6ab55209a630 (patch)
treef0833f9173a358737aadfefe56e7e60bb741b337
parent25432ae96a9889774a05bf5f0f6fd8dbcdec5e72 (diff)
perf: Make ctx->is_active and cpuctx->task_ctx consistent
For no apparent reason and to great confusion the rules for ctx->is_active and cpuctx->task_ctx are different. This means that its not always possible to find all active (task) contexts. Fix this such that if ctx->is_active gets set, we also set (or verify) cpuctx->task_ctx. Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: David Ahern <dsahern@gmail.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Namhyung Kim <namhyung@kernel.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Stephane Eranian <eranian@google.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Vince Weaver <vincent.weaver@maine.edu> Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c21
1 files changed, 14 insertions, 7 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 935aefd16354..89b47050a2e8 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2329,6 +2329,12 @@ static void ctx_sched_out(struct perf_event_context *ctx,
2329 lockdep_assert_held(&ctx->lock); 2329 lockdep_assert_held(&ctx->lock);
2330 2330
2331 ctx->is_active &= ~event_type; 2331 ctx->is_active &= ~event_type;
2332 if (ctx->task) {
2333 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2334 if (!ctx->is_active)
2335 cpuctx->task_ctx = NULL;
2336 }
2337
2332 if (likely(!ctx->nr_events)) 2338 if (likely(!ctx->nr_events))
2333 return; 2339 return;
2334 2340
@@ -2629,7 +2635,6 @@ static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2629 return; 2635 return;
2630 2636
2631 ctx_sched_out(ctx, cpuctx, EVENT_ALL); 2637 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2632 cpuctx->task_ctx = NULL;
2633} 2638}
2634 2639
2635/* 2640/*
@@ -2712,6 +2717,13 @@ ctx_sched_in(struct perf_event_context *ctx,
2712 lockdep_assert_held(&ctx->lock); 2717 lockdep_assert_held(&ctx->lock);
2713 2718
2714 ctx->is_active |= event_type; 2719 ctx->is_active |= event_type;
2720 if (ctx->task) {
2721 if (!is_active)
2722 cpuctx->task_ctx = ctx;
2723 else
2724 WARN_ON_ONCE(cpuctx->task_ctx != ctx);
2725 }
2726
2715 if (likely(!ctx->nr_events)) 2727 if (likely(!ctx->nr_events))
2716 return; 2728 return;
2717 2729
@@ -2756,12 +2768,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2756 * cpu flexible, task flexible. 2768 * cpu flexible, task flexible.
2757 */ 2769 */
2758 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2770 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2759 2771 perf_event_sched_in(cpuctx, ctx, task);
2760 if (ctx->nr_events)
2761 cpuctx->task_ctx = ctx;
2762
2763 perf_event_sched_in(cpuctx, cpuctx->task_ctx, task);
2764
2765 perf_pmu_enable(ctx->pmu); 2772 perf_pmu_enable(ctx->pmu);
2766 perf_ctx_unlock(cpuctx, ctx); 2773 perf_ctx_unlock(cpuctx, ctx);
2767} 2774}