aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_event.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2010-09-07 12:32:22 -0400
committerIngo Molnar <mingo@elte.hu>2010-09-09 14:46:34 -0400
commit1b9a644fece117cfa5474a2388d6b89d1baf8ddf (patch)
treec89be6de269578501a365f7526a81fb64cb6ba93 /kernel/perf_event.c
parent89a1e18731959e9953fae15ddc1a983eb15a4f19 (diff)
perf: Optimize context ops
Assuming we don't mix events of different pmus onto a single context (with the exeption of software events inside a hardware group) we can now assume that all events on a particular context belong to the same pmu, hence we can disable the pmu for the entire context operations. This reduces the amount of hardware writes. The exception for swevents comes from the fact that the sw pmu disable is a nop. Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: paulus <paulus@samba.org> Cc: stephane eranian <eranian@googlemail.com> Cc: Robert Richter <robert.richter@amd.com> Cc: Frederic Weisbecker <fweisbec@gmail.com> Cc: Lin Ming <ming.m.lin@intel.com> Cc: Yanmin <yanmin_zhang@linux.intel.com> LKML-Reference: <new-submission> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_event.c')
-rw-r--r--kernel/perf_event.c6
1 files changed, 6 insertions, 0 deletions
diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 357ee8d5e8ae..9819a69a61a1 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1065,6 +1065,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1065 struct perf_event *event; 1065 struct perf_event *event;
1066 1066
1067 raw_spin_lock(&ctx->lock); 1067 raw_spin_lock(&ctx->lock);
1068 perf_pmu_disable(ctx->pmu);
1068 ctx->is_active = 0; 1069 ctx->is_active = 0;
1069 if (likely(!ctx->nr_events)) 1070 if (likely(!ctx->nr_events))
1070 goto out; 1071 goto out;
@@ -1083,6 +1084,7 @@ static void ctx_sched_out(struct perf_event_context *ctx,
1083 group_sched_out(event, cpuctx, ctx); 1084 group_sched_out(event, cpuctx, ctx);
1084 } 1085 }
1085out: 1086out:
1087 perf_pmu_enable(ctx->pmu);
1086 raw_spin_unlock(&ctx->lock); 1088 raw_spin_unlock(&ctx->lock);
1087} 1089}
1088 1090
@@ -1400,6 +1402,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
1400 if (cpuctx->task_ctx == ctx) 1402 if (cpuctx->task_ctx == ctx)
1401 return; 1403 return;
1402 1404
1405 perf_pmu_disable(ctx->pmu);
1403 /* 1406 /*
1404 * We want to keep the following priority order: 1407 * We want to keep the following priority order:
1405 * cpu pinned (that don't need to move), task pinned, 1408 * cpu pinned (that don't need to move), task pinned,
@@ -1418,6 +1421,7 @@ void perf_event_context_sched_in(struct perf_event_context *ctx)
1418 * cpu-context we got scheduled on is actually rotating. 1421 * cpu-context we got scheduled on is actually rotating.
1419 */ 1422 */
1420 perf_pmu_rotate_start(ctx->pmu); 1423 perf_pmu_rotate_start(ctx->pmu);
1424 perf_pmu_enable(ctx->pmu);
1421} 1425}
1422 1426
1423/* 1427/*
@@ -1629,6 +1633,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
1629 rotate = 1; 1633 rotate = 1;
1630 } 1634 }
1631 1635
1636 perf_pmu_disable(cpuctx->ctx.pmu);
1632 perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval); 1637 perf_ctx_adjust_freq(&cpuctx->ctx, cpuctx->timer_interval);
1633 if (ctx) 1638 if (ctx)
1634 perf_ctx_adjust_freq(ctx, cpuctx->timer_interval); 1639 perf_ctx_adjust_freq(ctx, cpuctx->timer_interval);
@@ -1649,6 +1654,7 @@ static enum hrtimer_restart perf_event_context_tick(struct hrtimer *timer)
1649 task_ctx_sched_in(ctx, EVENT_FLEXIBLE); 1654 task_ctx_sched_in(ctx, EVENT_FLEXIBLE);
1650 1655
1651done: 1656done:
1657 perf_pmu_enable(cpuctx->ctx.pmu);
1652 hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval)); 1658 hrtimer_forward_now(timer, ns_to_ktime(cpuctx->timer_interval));
1653 1659
1654 return restart; 1660 return restart;