aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-08 12:52:21 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-08 14:36:57 -0400
commit7fc23a5380797012e92a9633169440f2f4a21253 (patch)
treeb136302efccd407ef4ee608de40042bedd0ab660 /kernel/perf_counter.c
parent3611dfb8eda847c1c8e1a052f57206f7fddc6a7c (diff)
perf_counter: optimize perf_counter_task_tick()
perf_counter_task_tick() does way too much work to find out there's nothing to do. Provide an easy short-circuit for the normal case where there are no counters on the system. [ Impact: micro-optimization ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <20090508170028.750619201@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c13
1 files changed, 11 insertions, 2 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 60e55f0b48f4..fdb0d2421276 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -39,6 +39,7 @@ int perf_max_counters __read_mostly = 1;
39static int perf_reserved_percpu __read_mostly; 39static int perf_reserved_percpu __read_mostly;
40static int perf_overcommit __read_mostly = 1; 40static int perf_overcommit __read_mostly = 1;
41 41
42static atomic_t nr_counters __read_mostly;
42static atomic_t nr_mmap_tracking __read_mostly; 43static atomic_t nr_mmap_tracking __read_mostly;
43static atomic_t nr_munmap_tracking __read_mostly; 44static atomic_t nr_munmap_tracking __read_mostly;
44static atomic_t nr_comm_tracking __read_mostly; 45static atomic_t nr_comm_tracking __read_mostly;
@@ -1076,8 +1077,14 @@ static void rotate_ctx(struct perf_counter_context *ctx)
1076 1077
1077void perf_counter_task_tick(struct task_struct *curr, int cpu) 1078void perf_counter_task_tick(struct task_struct *curr, int cpu)
1078{ 1079{
1079 struct perf_cpu_context *cpuctx = &per_cpu(perf_cpu_context, cpu); 1080 struct perf_cpu_context *cpuctx;
1080 struct perf_counter_context *ctx = &curr->perf_counter_ctx; 1081 struct perf_counter_context *ctx;
1082
1083 if (!atomic_read(&nr_counters))
1084 return;
1085
1086 cpuctx = &per_cpu(perf_cpu_context, cpu);
1087 ctx = &curr->perf_counter_ctx;
1081 1088
1082 perf_counter_cpu_sched_out(cpuctx); 1089 perf_counter_cpu_sched_out(cpuctx);
1083 perf_counter_task_sched_out(curr, cpu); 1090 perf_counter_task_sched_out(curr, cpu);
@@ -1197,6 +1204,7 @@ static void free_counter(struct perf_counter *counter)
1197{ 1204{
1198 perf_pending_sync(counter); 1205 perf_pending_sync(counter);
1199 1206
1207 atomic_dec(&nr_counters);
1200 if (counter->hw_event.mmap) 1208 if (counter->hw_event.mmap)
1201 atomic_dec(&nr_mmap_tracking); 1209 atomic_dec(&nr_mmap_tracking);
1202 if (counter->hw_event.munmap) 1210 if (counter->hw_event.munmap)
@@ -2861,6 +2869,7 @@ done:
2861 2869
2862 counter->pmu = pmu; 2870 counter->pmu = pmu;
2863 2871
2872 atomic_inc(&nr_counters);
2864 if (counter->hw_event.mmap) 2873 if (counter->hw_event.mmap)
2865 atomic_inc(&nr_mmap_tracking); 2874 atomic_inc(&nr_mmap_tracking);
2866 if (counter->hw_event.munmap) 2875 if (counter->hw_event.munmap)