aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 06:56:05 -0500
committerFrederic Weisbecker <fweisbec@gmail.com>2010-01-17 07:11:05 -0500
commit329c0e012b99fa2325a0be205c052e4aba690f16 (patch)
tree07a0ae5bf264c622525d315d87b95ad5f6a22044 /kernel/perf_event.c
parent7defb0f879bbcfe29e3c6f29d685d4f29b7a0700 (diff)
perf: Better order flexible and pinned scheduling
When a task gets scheduled in. We don't touch the cpu bound events so the priority order becomes: cpu pinned, cpu flexible, task pinned, task flexible. So schedule out cpu flexibles when a new task context gets in and correctly order the groups to schedule in: task pinned, cpu flexible, task flexible. Cpu pinned groups don't need to be touched at this time. Signed-off-by: Frederic Weisbecker <fweisbec@gmail.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Paul Mackerras <paulus@samba.org> Cc: Ingo Molnar <mingo@elte.hu> Cc: Arnaldo Carvalho de Melo <acme@infradead.org>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c34
1 files changed, 27 insertions, 7 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index a90ae694cbc1..edc46b92b508 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1362,6 +1362,14 @@ ctx_sched_in(struct perf_event_context *ctx,
1362 raw_spin_unlock(&ctx->lock); 1362 raw_spin_unlock(&ctx->lock);
1363} 1363}
1364 1364
1365static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
1366 enum event_type_t event_type)
1367{
1368 struct perf_event_context *ctx = &cpuctx->ctx;
1369
1370 ctx_sched_in(ctx, cpuctx, event_type);
1371}
1372
1365static void task_ctx_sched_in(struct task_struct *task, 1373static void task_ctx_sched_in(struct task_struct *task,
1366 enum event_type_t event_type) 1374 enum event_type_t event_type)
1367{ 1375{
@@ -1388,15 +1396,27 @@ static void task_ctx_sched_in(struct task_struct *task,
1388 */ 1396 */
1389void perf_event_task_sched_in(struct task_struct *task) 1397void perf_event_task_sched_in(struct task_struct *task)
1390{ 1398{
1391 task_ctx_sched_in(task, EVENT_ALL); 1399 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1392} 1400 struct perf_event_context *ctx = task->perf_event_ctxp;
1393 1401
1394static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx, 1402 if (likely(!ctx))
1395 enum event_type_t event_type) 1403 return;
1396{
1397 struct perf_event_context *ctx = &cpuctx->ctx;
1398 1404
1399 ctx_sched_in(ctx, cpuctx, event_type); 1405 if (cpuctx->task_ctx == ctx)
1406 return;
1407
1408 /*
1409 * We want to keep the following priority order:
1410 * cpu pinned (that don't need to move), task pinned,
1411 * cpu flexible, task flexible.
1412 */
1413 cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
1414
1415 ctx_sched_in(ctx, cpuctx, EVENT_PINNED);
1416 cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE);
1417 ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE);
1418
1419 cpuctx->task_ctx = ctx;
1400} 1420}
1401 1421
1402#define MAX_INTERRUPTS (~0ULL) 1422#define MAX_INTERRUPTS (~0ULL)