aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2011-04-09 15:17:46 -0400
committerIngo Molnar <mingo@elte.hu>2011-05-28 12:01:19 -0400
commitdce5855bba5df9e87bb04584d505c1f1b103c652 (patch)
tree167312131a85a176ec71775fa81ddbf14a33dcb6 /kernel/events
parentdb24d33e08b88e990991760a44d72006a5dc6102 (diff)
perf: Collect the schedule-in rules in one function
This was scattered out - refactor it into a single function. No change in functionality. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Link: http://lkml.kernel.org/r/20110409192141.979862055@chello.nl Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c27
1 files changed, 15 insertions, 12 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 71c2d44ff95d..802f3b24eeef 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -1476,6 +1476,18 @@ ctx_sched_in(struct perf_event_context *ctx,
1476 enum event_type_t event_type, 1476 enum event_type_t event_type,
1477 struct task_struct *task); 1477 struct task_struct *task);
1478 1478
1479static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
1480 struct perf_event_context *ctx,
1481 struct task_struct *task)
1482{
1483 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
1484 if (ctx)
1485 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
1486 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1487 if (ctx)
1488 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
1489}
1490
1479/* 1491/*
1480 * Cross CPU call to install and enable a performance event 1492 * Cross CPU call to install and enable a performance event
1481 * 1493 *
@@ -1523,12 +1535,7 @@ static int __perf_install_in_context(void *info)
1523 /* 1535 /*
1524 * Schedule everything back in 1536 * Schedule everything back in
1525 */ 1537 */
1526 cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task); 1538 perf_event_sched_in(cpuctx, task_ctx, task);
1527 if (task_ctx)
1528 ctx_sched_in(task_ctx, cpuctx, EVENT_PINNED, task);
1529 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
1530 if (task_ctx)
1531 ctx_sched_in(task_ctx, cpuctx, EVENT_FLEXIBLE, task);
1532 1539
1533 perf_pmu_enable(cpuctx->ctx.pmu); 1540 perf_pmu_enable(cpuctx->ctx.pmu);
1534 perf_ctx_unlock(cpuctx, task_ctx); 1541 perf_ctx_unlock(cpuctx, task_ctx);
@@ -2107,9 +2114,7 @@ static void perf_event_context_sched_in(struct perf_event_context *ctx,
2107 */ 2114 */
2108 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE); 2115 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
2109 2116
2110 ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task); 2117 perf_event_sched_in(cpuctx, ctx, task);
2111 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
2112 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
2113 2118
2114 cpuctx->task_ctx = ctx; 2119 cpuctx->task_ctx = ctx;
2115 2120
@@ -2347,9 +2352,7 @@ static void perf_rotate_context(struct perf_cpu_context *cpuctx)
2347 if (ctx) 2352 if (ctx)
2348 rotate_ctx(ctx); 2353 rotate_ctx(ctx);
2349 2354
2350 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, current); 2355 perf_event_sched_in(cpuctx, ctx, current);
2351 if (ctx)
2352 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, current);
2353 2356
2354done: 2357done:
2355 if (remove) 2358 if (remove)