aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-20 06:21:22 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-20 06:43:34 -0400
commitafedadf23a2c90f3ba0d963282cbe6a6be129494 (patch)
tree3fa284b19482158c0a8dab8fa79bf41180ebd256 /kernel/perf_counter.c
parentb986d7ec0f8b7ea3cc7366d80a137fbe839df227 (diff)
perf_counter: Optimize sched in/out of counters
Avoid a function call for !group counters by directly calling the counter function. [ Impact: micro-optimize the code ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.511933670@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c25
1 files changed, 19 insertions, 6 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 473ed2cafbfc..69d4de815963 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -826,8 +826,12 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
826 826
827 perf_disable(); 827 perf_disable();
828 if (ctx->nr_active) { 828 if (ctx->nr_active) {
829 list_for_each_entry(counter, &ctx->counter_list, list_entry) 829 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
830 group_sched_out(counter, cpuctx, ctx); 830 if (counter != counter->group_leader)
831 counter_sched_out(counter, cpuctx, ctx);
832 else
833 group_sched_out(counter, cpuctx, ctx);
834 }
831 } 835 }
832 perf_enable(); 836 perf_enable();
833 out: 837 out:
@@ -903,8 +907,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
903 if (counter->cpu != -1 && counter->cpu != cpu) 907 if (counter->cpu != -1 && counter->cpu != cpu)
904 continue; 908 continue;
905 909
906 if (group_can_go_on(counter, cpuctx, 1)) 910 if (counter != counter->group_leader)
907 group_sched_in(counter, cpuctx, ctx, cpu); 911 counter_sched_in(counter, cpuctx, ctx, cpu);
912 else {
913 if (group_can_go_on(counter, cpuctx, 1))
914 group_sched_in(counter, cpuctx, ctx, cpu);
915 }
908 916
909 /* 917 /*
910 * If this pinned group hasn't been scheduled, 918 * If this pinned group hasn't been scheduled,
@@ -932,9 +940,14 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
932 if (counter->cpu != -1 && counter->cpu != cpu) 940 if (counter->cpu != -1 && counter->cpu != cpu)
933 continue; 941 continue;
934 942
935 if (group_can_go_on(counter, cpuctx, can_add_hw)) { 943 if (counter != counter->group_leader) {
936 if (group_sched_in(counter, cpuctx, ctx, cpu)) 944 if (counter_sched_in(counter, cpuctx, ctx, cpu))
937 can_add_hw = 0; 945 can_add_hw = 0;
946 } else {
947 if (group_can_go_on(counter, cpuctx, can_add_hw)) {
948 if (group_sched_in(counter, cpuctx, ctx, cpu))
949 can_add_hw = 0;
950 }
938 } 951 }
939 } 952 }
940 perf_enable(); 953 perf_enable();