aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-12-27 05:51:52 -0500
committerIngo Molnar <mingo@elte.hu>2009-12-28 03:21:33 -0500
commit49f474331e563a6ecf3b1e87ec27ec5482b3e4f1 (patch)
treee3e4b5c718fd3d35ce01d4fe015886d9f3336760
parent4cf40131a5cf4918e83b3756e58a1fc9e984f8ef (diff)
perf events: Remove arg from perf sched hooks
Since we only ever schedule the local cpu, there is no need to pass the cpu number to the perf sched hooks. This micro-optimizes things a bit. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Mike Galbraith <efault@gmx.de> Cc: Paul Mackerras <paulus@samba.org> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--include/linux/perf_event.h12
-rw-r--r--kernel/perf_event.c27
-rw-r--r--kernel/sched.c6
3 files changed, 23 insertions, 22 deletions
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index c66b34f75eea..a494e7501292 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -746,10 +746,10 @@ extern int perf_max_events;
746 746
747extern const struct pmu *hw_perf_event_init(struct perf_event *event); 747extern const struct pmu *hw_perf_event_init(struct perf_event *event);
748 748
749extern void perf_event_task_sched_in(struct task_struct *task, int cpu); 749extern void perf_event_task_sched_in(struct task_struct *task);
750extern void perf_event_task_sched_out(struct task_struct *task, 750extern void perf_event_task_sched_out(struct task_struct *task,
751 struct task_struct *next, int cpu); 751 struct task_struct *next);
752extern void perf_event_task_tick(struct task_struct *task, int cpu); 752extern void perf_event_task_tick(struct task_struct *task);
753extern int perf_event_init_task(struct task_struct *child); 753extern int perf_event_init_task(struct task_struct *child);
754extern void perf_event_exit_task(struct task_struct *child); 754extern void perf_event_exit_task(struct task_struct *child);
755extern void perf_event_free_task(struct task_struct *task); 755extern void perf_event_free_task(struct task_struct *task);
@@ -870,12 +870,12 @@ extern void perf_event_enable(struct perf_event *event);
870extern void perf_event_disable(struct perf_event *event); 870extern void perf_event_disable(struct perf_event *event);
871#else 871#else
872static inline void 872static inline void
873perf_event_task_sched_in(struct task_struct *task, int cpu) { } 873perf_event_task_sched_in(struct task_struct *task) { }
874static inline void 874static inline void
875perf_event_task_sched_out(struct task_struct *task, 875perf_event_task_sched_out(struct task_struct *task,
876 struct task_struct *next, int cpu) { } 876 struct task_struct *next) { }
877static inline void 877static inline void
878perf_event_task_tick(struct task_struct *task, int cpu) { } 878perf_event_task_tick(struct task_struct *task) { }
879static inline int perf_event_init_task(struct task_struct *child) { return 0; } 879static inline int perf_event_init_task(struct task_struct *child) { return 0; }
880static inline void perf_event_exit_task(struct task_struct *child) { } 880static inline void perf_event_exit_task(struct task_struct *child) { }
881static inline void perf_event_free_task(struct task_struct *task) { } 881static inline void perf_event_free_task(struct task_struct *task) { }
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 03cc061398d1..099bd662daa6 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1170,9 +1170,9 @@ static void perf_event_sync_stat(struct perf_event_context *ctx,
1170 * not restart the event. 1170 * not restart the event.
1171 */ 1171 */
1172void perf_event_task_sched_out(struct task_struct *task, 1172void perf_event_task_sched_out(struct task_struct *task,
1173 struct task_struct *next, int cpu) 1173 struct task_struct *next)
1174{ 1174{
1175 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1175 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1176 struct perf_event_context *ctx = task->perf_event_ctxp; 1176 struct perf_event_context *ctx = task->perf_event_ctxp;
1177 struct perf_event_context *next_ctx; 1177 struct perf_event_context *next_ctx;
1178 struct perf_event_context *parent; 1178 struct perf_event_context *parent;
@@ -1252,8 +1252,9 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
1252 1252
1253static void 1253static void
1254__perf_event_sched_in(struct perf_event_context *ctx, 1254__perf_event_sched_in(struct perf_event_context *ctx,
1255 struct perf_cpu_context *cpuctx, int cpu) 1255 struct perf_cpu_context *cpuctx)
1256{ 1256{
1257 int cpu = smp_processor_id();
1257 struct perf_event *event; 1258 struct perf_event *event;
1258 int can_add_hw = 1; 1259 int can_add_hw = 1;
1259 1260
@@ -1326,24 +1327,24 @@ __perf_event_sched_in(struct perf_event_context *ctx,
1326 * accessing the event control register. If a NMI hits, then it will 1327 * accessing the event control register. If a NMI hits, then it will
1327 * keep the event running. 1328 * keep the event running.
1328 */ 1329 */
1329void perf_event_task_sched_in(struct task_struct *task, int cpu) 1330void perf_event_task_sched_in(struct task_struct *task)
1330{ 1331{
1331 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1332 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
1332 struct perf_event_context *ctx = task->perf_event_ctxp; 1333 struct perf_event_context *ctx = task->perf_event_ctxp;
1333 1334
1334 if (likely(!ctx)) 1335 if (likely(!ctx))
1335 return; 1336 return;
1336 if (cpuctx->task_ctx == ctx) 1337 if (cpuctx->task_ctx == ctx)
1337 return; 1338 return;
1338 __perf_event_sched_in(ctx, cpuctx, cpu); 1339 __perf_event_sched_in(ctx, cpuctx);
1339 cpuctx->task_ctx = ctx; 1340 cpuctx->task_ctx = ctx;
1340} 1341}
1341 1342
1342static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx, int cpu) 1343static void perf_event_cpu_sched_in(struct perf_cpu_context *cpuctx)
1343{ 1344{
1344 struct perf_event_context *ctx = &cpuctx->ctx; 1345 struct perf_event_context *ctx = &cpuctx->ctx;
1345 1346
1346 __perf_event_sched_in(ctx, cpuctx, cpu); 1347 __perf_event_sched_in(ctx, cpuctx);
1347} 1348}
1348 1349
1349#define MAX_INTERRUPTS (~0ULL) 1350#define MAX_INTERRUPTS (~0ULL)
@@ -1461,7 +1462,7 @@ static void rotate_ctx(struct perf_event_context *ctx)
1461 raw_spin_unlock(&ctx->lock); 1462 raw_spin_unlock(&ctx->lock);
1462} 1463}
1463 1464
1464void perf_event_task_tick(struct task_struct *curr, int cpu) 1465void perf_event_task_tick(struct task_struct *curr)
1465{ 1466{
1466 struct perf_cpu_context *cpuctx; 1467 struct perf_cpu_context *cpuctx;
1467 struct perf_event_context *ctx; 1468 struct perf_event_context *ctx;
@@ -1469,7 +1470,7 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
1469 if (!atomic_read(&nr_events)) 1470 if (!atomic_read(&nr_events))
1470 return; 1471 return;
1471 1472
1472 cpuctx = &per_cpu(perf_cpu_context, cpu); 1473 cpuctx = &__get_cpu_var(perf_cpu_context);
1473 ctx = curr->perf_event_ctxp; 1474 ctx = curr->perf_event_ctxp;
1474 1475
1475 perf_ctx_adjust_freq(&cpuctx->ctx); 1476 perf_ctx_adjust_freq(&cpuctx->ctx);
@@ -1484,9 +1485,9 @@ void perf_event_task_tick(struct task_struct *curr, int cpu)
1484 if (ctx) 1485 if (ctx)
1485 rotate_ctx(ctx); 1486 rotate_ctx(ctx);
1486 1487
1487 perf_event_cpu_sched_in(cpuctx, cpu); 1488 perf_event_cpu_sched_in(cpuctx);
1488 if (ctx) 1489 if (ctx)
1489 perf_event_task_sched_in(curr, cpu); 1490 perf_event_task_sched_in(curr);
1490} 1491}
1491 1492
1492/* 1493/*
@@ -1527,7 +1528,7 @@ static void perf_event_enable_on_exec(struct task_struct *task)
1527 1528
1528 raw_spin_unlock(&ctx->lock); 1529 raw_spin_unlock(&ctx->lock);
1529 1530
1530 perf_event_task_sched_in(task, smp_processor_id()); 1531 perf_event_task_sched_in(task);
1531 out: 1532 out:
1532 local_irq_restore(flags); 1533 local_irq_restore(flags);
1533} 1534}
diff --git a/kernel/sched.c b/kernel/sched.c
index 18cceeecce35..d6527ac0f6e7 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -2752,7 +2752,7 @@ static void finish_task_switch(struct rq *rq, struct task_struct *prev)
2752 */ 2752 */
2753 prev_state = prev->state; 2753 prev_state = prev->state;
2754 finish_arch_switch(prev); 2754 finish_arch_switch(prev);
2755 perf_event_task_sched_in(current, cpu_of(rq)); 2755 perf_event_task_sched_in(current);
2756 finish_lock_switch(rq, prev); 2756 finish_lock_switch(rq, prev);
2757 2757
2758 fire_sched_in_preempt_notifiers(current); 2758 fire_sched_in_preempt_notifiers(current);
@@ -5266,7 +5266,7 @@ void scheduler_tick(void)
5266 curr->sched_class->task_tick(rq, curr, 0); 5266 curr->sched_class->task_tick(rq, curr, 0);
5267 raw_spin_unlock(&rq->lock); 5267 raw_spin_unlock(&rq->lock);
5268 5268
5269 perf_event_task_tick(curr, cpu); 5269 perf_event_task_tick(curr);
5270 5270
5271#ifdef CONFIG_SMP 5271#ifdef CONFIG_SMP
5272 rq->idle_at_tick = idle_cpu(cpu); 5272 rq->idle_at_tick = idle_cpu(cpu);
@@ -5480,7 +5480,7 @@ need_resched_nonpreemptible:
5480 5480
5481 if (likely(prev != next)) { 5481 if (likely(prev != next)) {
5482 sched_info_switch(prev, next); 5482 sched_info_switch(prev, next);
5483 perf_event_task_sched_out(prev, next, cpu); 5483 perf_event_task_sched_out(prev, next);
5484 5484
5485 rq->nr_switches++; 5485 rq->nr_switches++;
5486 rq->curr = next; 5486 rq->curr = next;