aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <peterz@infradead.org>2016-02-24 12:45:49 -0500
committerIngo Molnar <mingo@kernel.org>2016-02-25 02:44:19 -0500
commitbd2afa49d194c6412c333e9fdd48bc5d06bb465d (patch)
tree4568fb1b834f6b70d067157d4f6134b9e3667b6e
parent7fce250915efca0f8f51dddee3ae89bf30d86ca5 (diff)
perf: Fix scaling vs. perf_event_enable()
Similar to the perf_enable_on_exec(), ensure that event timings are consistent across perf_event_enable(). Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org> Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Jiri Olsa <jolsa@redhat.com> Cc: Linus Torvalds <torvalds@linux-foundation.org> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: dvyukov@google.com Cc: eranian@google.com Cc: oleg@redhat.com Cc: panand@redhat.com Cc: sasha.levin@oracle.com Cc: vince@deater.net Link: http://lkml.kernel.org/r/20160224174948.218288698@infradead.org Signed-off-by: Ingo Molnar <mingo@kernel.org>
-rw-r--r--kernel/events/core.c42
1 files changed, 23 insertions, 19 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d0030886c402..57c25faecfa5 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -2069,14 +2069,27 @@ static void add_event_to_ctx(struct perf_event *event,
2069 event->tstamp_stopped = tstamp; 2069 event->tstamp_stopped = tstamp;
2070} 2070}
2071 2071
2072static void task_ctx_sched_out(struct perf_cpu_context *cpuctx, 2072static void ctx_sched_out(struct perf_event_context *ctx,
2073 struct perf_event_context *ctx); 2073 struct perf_cpu_context *cpuctx,
2074 enum event_type_t event_type);
2074static void 2075static void
2075ctx_sched_in(struct perf_event_context *ctx, 2076ctx_sched_in(struct perf_event_context *ctx,
2076 struct perf_cpu_context *cpuctx, 2077 struct perf_cpu_context *cpuctx,
2077 enum event_type_t event_type, 2078 enum event_type_t event_type,
2078 struct task_struct *task); 2079 struct task_struct *task);
2079 2080
2081static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2082 struct perf_event_context *ctx)
2083{
2084 if (!cpuctx->task_ctx)
2085 return;
2086
2087 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2088 return;
2089
2090 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2091}
2092
2080static void perf_event_sched_in(struct perf_cpu_context *cpuctx, 2093static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
2081 struct perf_event_context *ctx, 2094 struct perf_event_context *ctx,
2082 struct task_struct *task) 2095 struct task_struct *task)
@@ -2227,17 +2240,18 @@ static void __perf_event_enable(struct perf_event *event,
2227 event->state <= PERF_EVENT_STATE_ERROR) 2240 event->state <= PERF_EVENT_STATE_ERROR)
2228 return; 2241 return;
2229 2242
2230 update_context_time(ctx); 2243 if (ctx->is_active)
2244 ctx_sched_out(ctx, cpuctx, EVENT_TIME);
2245
2231 __perf_event_mark_enabled(event); 2246 __perf_event_mark_enabled(event);
2232 2247
2233 if (!ctx->is_active) 2248 if (!ctx->is_active)
2234 return; 2249 return;
2235 2250
2236 if (!event_filter_match(event)) { 2251 if (!event_filter_match(event)) {
2237 if (is_cgroup_event(event)) { 2252 if (is_cgroup_event(event))
2238 perf_cgroup_set_timestamp(current, ctx); // XXX ?
2239 perf_cgroup_defer_enabled(event); 2253 perf_cgroup_defer_enabled(event);
2240 } 2254 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2241 return; 2255 return;
2242 } 2256 }
2243 2257
@@ -2245,8 +2259,10 @@ static void __perf_event_enable(struct perf_event *event,
2245 * If the event is in a group and isn't the group leader, 2259 * If the event is in a group and isn't the group leader,
2246 * then don't put it on unless the group is on. 2260 * then don't put it on unless the group is on.
2247 */ 2261 */
2248 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) 2262 if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
2263 ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
2249 return; 2264 return;
2265 }
2250 2266
2251 task_ctx = cpuctx->task_ctx; 2267 task_ctx = cpuctx->task_ctx;
2252 if (ctx->task) 2268 if (ctx->task)
@@ -2658,18 +2674,6 @@ void __perf_event_task_sched_out(struct task_struct *task,
2658 perf_cgroup_sched_out(task, next); 2674 perf_cgroup_sched_out(task, next);
2659} 2675}
2660 2676
2661static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
2662 struct perf_event_context *ctx)
2663{
2664 if (!cpuctx->task_ctx)
2665 return;
2666
2667 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
2668 return;
2669
2670 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
2671}
2672
2673/* 2677/*
2674 * Called with IRQs disabled 2678 * Called with IRQs disabled
2675 */ 2679 */