aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-10-14 11:57:23 -0400
committerIngo Molnar <mingo@elte.hu>2010-10-18 13:58:58 -0400
commit82cd6def9806dcb6a325fb6abbc1d61388a15f6a (patch)
tree569ce440f8a0619adb8c3791d6b68eecdb2fd29c /kernel/perf_event.c
parent8b92538d84e50062560ba33adbaed7887b6e4a42 (diff)
perf: Use jump_labels to optimize the scheduler hooks
Trades a call + conditional + ret for an unconditional jmp. Acked-by: Frederic Weisbecker <fweisbec@gmail.com> Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> LKML-Reference: <20101014203625.501657727@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c24
1 files changed, 9 insertions, 15 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 856e20baf13f..f7febb02ab97 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -34,7 +34,7 @@
34 34
35#include <asm/irq_regs.h> 35#include <asm/irq_regs.h>
36 36
37static atomic_t nr_events __read_mostly; 37atomic_t perf_task_events __read_mostly;
38static atomic_t nr_mmap_events __read_mostly; 38static atomic_t nr_mmap_events __read_mostly;
39static atomic_t nr_comm_events __read_mostly; 39static atomic_t nr_comm_events __read_mostly;
40static atomic_t nr_task_events __read_mostly; 40static atomic_t nr_task_events __read_mostly;
@@ -1311,8 +1311,8 @@ void perf_event_context_sched_out(struct task_struct *task, int ctxn,
1311 * accessing the event control register. If a NMI hits, then it will 1311 * accessing the event control register. If a NMI hits, then it will
1312 * not restart the event. 1312 * not restart the event.
1313 */ 1313 */
1314void perf_event_task_sched_out(struct task_struct *task, 1314void __perf_event_task_sched_out(struct task_struct *task,
1315 struct task_struct *next) 1315 struct task_struct *next)
1316{ 1316{
1317 int ctxn; 1317 int ctxn;
1318 1318
@@ -1340,14 +1340,6 @@ static void task_ctx_sched_out(struct perf_event_context *ctx,
1340/* 1340/*
1341 * Called with IRQs disabled 1341 * Called with IRQs disabled
1342 */ 1342 */
1343static void __perf_event_task_sched_out(struct perf_event_context *ctx)
1344{
1345 task_ctx_sched_out(ctx, EVENT_ALL);
1346}
1347
1348/*
1349 * Called with IRQs disabled
1350 */
1351static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx, 1343static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
1352 enum event_type_t event_type) 1344 enum event_type_t event_type)
1353{ 1345{
@@ -1494,7 +1486,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
1494 * accessing the event control register. If a NMI hits, then it will 1486 * accessing the event control register. If a NMI hits, then it will
1495 * keep the event running. 1487 * keep the event running.
1496 */ 1488 */
1497void perf_event_task_sched_in(struct task_struct *task) 1489void __perf_event_task_sched_in(struct task_struct *task)
1498{ 1490{
1499 struct perf_event_context *ctx; 1491 struct perf_event_context *ctx;
1500 int ctxn; 1492 int ctxn;
@@ -2216,7 +2208,8 @@ static void free_event(struct perf_event *event)
2216 irq_work_sync(&event->pending); 2208 irq_work_sync(&event->pending);
2217 2209
2218 if (!event->parent) { 2210 if (!event->parent) {
2219 atomic_dec(&nr_events); 2211 if (event->attach_state & PERF_ATTACH_TASK)
2212 jump_label_dec(&perf_task_events);
2220 if (event->attr.mmap || event->attr.mmap_data) 2213 if (event->attr.mmap || event->attr.mmap_data)
2221 atomic_dec(&nr_mmap_events); 2214 atomic_dec(&nr_mmap_events);
2222 if (event->attr.comm) 2215 if (event->attr.comm)
@@ -5354,7 +5347,8 @@ done:
5354 event->pmu = pmu; 5347 event->pmu = pmu;
5355 5348
5356 if (!event->parent) { 5349 if (!event->parent) {
5357 atomic_inc(&nr_events); 5350 if (event->attach_state & PERF_ATTACH_TASK)
5351 jump_label_inc(&perf_task_events);
5358 if (event->attr.mmap || event->attr.mmap_data) 5352 if (event->attr.mmap || event->attr.mmap_data)
5359 atomic_inc(&nr_mmap_events); 5353 atomic_inc(&nr_mmap_events);
5360 if (event->attr.comm) 5354 if (event->attr.comm)
@@ -5849,7 +5843,7 @@ static void perf_event_exit_task_context(struct task_struct *child, int ctxn)
5849 * our context. 5843 * our context.
5850 */ 5844 */
5851 child_ctx = child->perf_event_ctxp[ctxn]; 5845 child_ctx = child->perf_event_ctxp[ctxn];
5852 __perf_event_task_sched_out(child_ctx); 5846 task_ctx_sched_out(child_ctx, EVENT_ALL);
5853 5847
5854 /* 5848 /*
5855 * Take the context lock here so that if find_get_context is 5849 * Take the context lock here so that if find_get_context is