diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2010-01-28 07:57:44 -0500 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2010-02-04 03:59:49 -0500 |
commit | 9717e6cd3db22eade7dbae0fc9235c66325a7132 (patch) | |
tree | 7227e92b67e91d6b7b8270e397cdf9e1e245a789 /kernel/perf_event.c | |
parent | f24bb999d2b9f2950e5cac5b69bffedf73c24ea4 (diff) |
perf_events: Optimize perf_event_task_tick()
Pretty much all of the calls do perf_disable/perf_enable cycles, pull
that out to cut back on hardware programming.
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Mike Galbraith <efault@gmx.de>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Frederic Weisbecker <fweisbec@gmail.com>
LKML-Reference: <new-submission>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r-- | kernel/perf_event.c | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c index 40f8b07c5601..087025fe3ba1 100644 --- a/kernel/perf_event.c +++ b/kernel/perf_event.c | |||
@@ -1573,12 +1573,8 @@ static void rotate_ctx(struct perf_event_context *ctx) | |||
1573 | raw_spin_lock(&ctx->lock); | 1573 | raw_spin_lock(&ctx->lock); |
1574 | 1574 | ||
1575 | /* Rotate the first entry last of non-pinned groups */ | 1575 | /* Rotate the first entry last of non-pinned groups */ |
1576 | perf_disable(); | ||
1577 | |||
1578 | list_rotate_left(&ctx->flexible_groups); | 1576 | list_rotate_left(&ctx->flexible_groups); |
1579 | 1577 | ||
1580 | perf_enable(); | ||
1581 | |||
1582 | raw_spin_unlock(&ctx->lock); | 1578 | raw_spin_unlock(&ctx->lock); |
1583 | } | 1579 | } |
1584 | 1580 | ||
@@ -1593,6 +1589,8 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1593 | cpuctx = &__get_cpu_var(perf_cpu_context); | 1589 | cpuctx = &__get_cpu_var(perf_cpu_context); |
1594 | ctx = curr->perf_event_ctxp; | 1590 | ctx = curr->perf_event_ctxp; |
1595 | 1591 | ||
1592 | perf_disable(); | ||
1593 | |||
1596 | perf_ctx_adjust_freq(&cpuctx->ctx); | 1594 | perf_ctx_adjust_freq(&cpuctx->ctx); |
1597 | if (ctx) | 1595 | if (ctx) |
1598 | perf_ctx_adjust_freq(ctx); | 1596 | perf_ctx_adjust_freq(ctx); |
@@ -1608,6 +1606,8 @@ void perf_event_task_tick(struct task_struct *curr) | |||
1608 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); | 1606 | cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE); |
1609 | if (ctx) | 1607 | if (ctx) |
1610 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); | 1608 | task_ctx_sched_in(curr, EVENT_FLEXIBLE); |
1609 | |||
1610 | perf_enable(); | ||
1611 | } | 1611 | } |
1612 | 1612 | ||
1613 | static int event_enable_on_exec(struct perf_event *event, | 1613 | static int event_enable_on_exec(struct perf_event *event, |