aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-27 05:51:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-28 03:21:33 -0500
commit49f474331e563a6ecf3b1e87ec27ec5482b3e4f1 (patch)
treee3e4b5c718fd3d35ce01d4fe015886d9f3336760 /kernel/perf_event.c
parent4cf40131a5cf4918e83b3756e58a1fc9e984f8ef (diff)
perf events: Remove arg from perf sched hooks
Since we only ever schedule the local cpu, there is no need to pass the cpu number to the perf sched hooks. This micro-optimizes things a bit. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 03cc061398d1..099bd662daa6 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1170 * not restart the event. 1170 * not restart the event.
1171 */ 1171 */
1172void perf_event_task_sched_out(struct task_struct *task, 1172void perf_event_task_sched_out(struct task_struct *task,
1173 struct task_struct *next, int cpu) 1173 struct task_struct *next)
1174{ 1174{
1175 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1175 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1176 struct perf_event_context *ctx = task->perf_event_ctxp; 1176 struct perf_event_context *ctx = task->perf_event_ctxp;
1177 struct perf_event_context *next_ctx; 1177 struct perf_event_context *next_ctx;
1178 struct perf_event_context *parent; 1178 struct perf_event_context *parent;
@@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1252 1252
1253static void 1253static void
1254__perf_event_sched_in(struct perf_event_context *ctx, 1254__perf_event_sched_in(struct perf_event_context *ctx,
1255 struct perf_cpu_context *cpuctx, int cpu) 1255 struct perf_cpu_context *cpuctx)
1256{ 1256{
1257 int cpu = smp_processor_id();
1257 struct perf_event *event; 1258 struct perf_event *event;
1258 int can_add_hw = 1; 1259 int can_add_hw = 1;
1259 1260
@@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1326 * accessing the event control register. If a NMI hits, then it will 1327 * accessing the event control register. If a NMI hits, then it will
1327 * keep the event running. 1328 * keep the event running.
1328 */ 1329 */
1329void perf_event_task_sched_in(struct task_struct *task, int cpu) 1330void perf_event_task_sched_in(struct task_struct *task)
1330{ 1331{
1331 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1332 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1332 struct perf_event_context *ctx = task->perf_event_ctxp; 1333 struct perf_event_context *ctx = task->perf_event_ctxp;
1333 1334
1334 if (likely(!ctx)) 1335 if (likely(!ctx))
1335 return; 1336 return;
1336 if (cpuctx->task_ctx == ctx) 1337 if (cpuctx->task_ctx == ctx)
1337 return; 1338 return;
1338 __perf_event_sched_in(ctx, cpuctx, cpu); 1339 __perf_event_sched_in(ctx, cpuctx);
1339 cpuctx->task_ctx = ctx; 1340 cpuctx->task_ctx = ctx;
1340} 1341}
1341 1342
1342static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1343static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
1343{ 1344{
1344 struct perf_event_context *ctx = &cpuctx->ctx; 1345 struct perf_event_context *ctx = &cpuctx->ctx;
1345 1346
1346 __perf_event_sched_in(ctx, cpuctx, cpu); 1347 __perf_event_sched_in(ctx, cpuctx);
1347} 1348}
1348 1349
1349#define MAX_INTERRUPTS (~0ULL) 1350#define MAX_INTERRUPTS (~0ULL)
@@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1461 raw_spin_unlock(&ctx->lock); 1462 raw_spin_unlock(&ctx->lock);
1462} 1463}
1463 1464
1464void perf_event_task_tick(struct task_struct *curr, int cpu) 1465void perf_event_task_tick(struct task_struct *curr)
1465{ 1466{
1466 struct perf_cpu_context *cpuctx; 1467 struct perf_cpu_context *cpuctx;
1467 struct perf_event_context *ctx; 1468 struct perf_event_context *ctx;
@@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
1469 if (!atomic_read(&nr_events)) 1470 if (!atomic_read(&nr_events))
1470 return; 1471 return;
1471 1472
1472 cpuctx = &per_cpu(perf_cpu_context, cpu); 1473 cpuctx = &__get_cpu_var(perf_cpu_context);
1473 ctx = curr->perf_event_ctxp; 1474 ctx = curr->perf_event_ctxp;
1474 1475
1475 perf_ctx_adjust_freq(&cpuctx->ctx); 1476 perf_ctx_adjust_freq(&cpuctx->ctx);
@@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
1484 if (ctx) 1485 if (ctx)
1485 rotate_ctx(ctx); 1486 rotate_ctx(ctx);
1486 1487
1487 perf_event_cpu_sched_in(cpuctx, cpu); 1488 perf_event_cpu_sched_in(cpuctx);
1488 if (ctx) 1489 if (ctx)
1489 perf_event_task_sched_in(curr, cpu); 1490 perf_event_task_sched_in(curr);
1490} 1491}
1491 1492
1492/* 1493/*
@@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1527 1528
1528 raw_spin_unlock(&ctx->lock); 1529 raw_spin_unlock(&ctx->lock);
1529 1530
1530 perf_event_task_sched_in(task, smp_processor_id()); 1531 perf_event_task_sched_in(task);
1531 out: 1532 out:
1532 local_irq_restore(flags); 1533 local_irq_restore(flags);
1533} 1534}