aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-09 15:17:43 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-28 12:01:14 -0400
commit04dc2dbbfe1c6f81b996d4dab255da75f9efbb4a (patch)
treea99e0c849f61d5bf7f3d9777f0e5c4bf69c61d9a /kernel/events
parentfacc43071cc0d4821c176d7d34570714eb348df9 (diff)
perf: Remove task_ctx_sched_in()
Make task_ctx_sched_*() imply EVENT_ALL, since anything less will not actually have scheduled the task in/out at all. Since there's no site that schedules all of a task in (due to the interleave with flexible cpuctx) we can remove this function. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110409192141.817893268@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c26
1 files changed, 6 insertions, 20 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index d243af954dcc..66b3dd809409 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1979,8 +1979,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
1979 perf_cgroup_sched_out(task); 1979 perf_cgroup_sched_out(task);
1980} 1980}
1981 1981
1982static void task_ctx_sched_out(struct perf_event_context *ctx, 1982static void task_ctx_sched_out(struct perf_event_context *ctx)
1983 enum event_type_t event_type)
1984{ 1983{
1985 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); 1984 struct perf_cpu_context *cpuctx = __get_cpu_context(ctx);
1986 1985
@@ -1990,7 +1989,7 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
1990 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx)) 1989 if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
1991 return; 1990 return;
1992 1991
1993 ctx_sched_out(ctx, cpuctx, event_type); 1992 ctx_sched_out(ctx, cpuctx, EVENT_ALL);
1994 cpuctx->task_ctx = NULL; 1993 cpuctx->task_ctx = NULL;
1995} 1994}
1996 1995
@@ -2098,19 +2097,6 @@ static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
2098 ctx_sched_in(ctx, cpuctx, event_type, task); 2097 ctx_sched_in(ctx, cpuctx, event_type, task);
2099} 2098}
2100 2099
2101static void task_ctx_sched_in(struct perf_event_context *ctx,
2102 enum event_type_t event_type)
2103{
2104 struct perf_cpu_context *cpuctx;
2105
2106 cpuctx = __get_cpu_context(ctx);
2107 if (cpuctx->task_ctx == ctx)
2108 return;
2109
2110 ctx_sched_in(ctx, cpuctx, event_type, NULL);
2111 cpuctx->task_ctx = ctx;
2112}
2113
2114static void perf_event_context_sched_in(struct perf_event_context *ctx, 2100static void perf_event_context_sched_in(struct perf_event_context *ctx,
2115 struct task_struct *task) 2101 struct task_struct *task)
2116{ 2102{
@@ -2363,7 +2349,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2363 2349
2364 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2350 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2365 if (ctx) 2351 if (ctx)
2366 task_ctx_sched_out(ctx, EVENT_FLEXIBLE); 2352 ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
2367 2353
2368 rotate_ctx(&cpuctx->ctx); 2354 rotate_ctx(&cpuctx->ctx);
2369 if (ctx) 2355 if (ctx)
@@ -2371,7 +2357,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2371 2357
2372 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); 2358 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current);
2373 if (ctx) 2359 if (ctx)
2374 task_ctx_sched_in(ctx, EVENT_FLEXIBLE); 2360 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);
2375 2361
2376done: 2362done:
2377 if (remove) 2363 if (remove)
@@ -2435,7 +2421,7 @@ static void perf_event_enable_on_exec(struct perf_event_context *ctx)
2435 perf_cgroup_sched_out(current); 2421 perf_cgroup_sched_out(current);
2436 2422
2437 raw_spin_lock(&ctx->lock); 2423 raw_spin_lock(&ctx->lock);
2438 task_ctx_sched_out(ctx, EVENT_ALL); 2424 task_ctx_sched_out(ctx);
2439 2425
2440 list_for_each_entry(event, &ctx->pinned_groups, group_entry) { 2426 list_for_each_entry(event, &ctx->pinned_groups, group_entry) {
2441 ret = event_enable_on_exec(event, ctx); 2427 ret = event_enable_on_exec(event, ctx);
@@ -6794,7 +6780,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
6794 * incremented the context's refcount before we do put_ctx below. 6780 * incremented the context's refcount before we do put_ctx below.
6795 */ 6781 */
6796 raw_spin_lock(&child_ctx->lock); 6782 raw_spin_lock(&child_ctx->lock);
6797 task_ctx_sched_out(child_ctx, EVENT_ALL); 6783 task_ctx_sched_out(child_ctx);
6798 child->perf_event_ctxp[ctxn] = NULL; 6784 child->perf_event_ctxp[ctxn] = NULL;
6799 /* 6785 /*
6800 * If this context is a clone; unclone it so it can't get 6786 * If this context is a clone; unclone it so it can't get