aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-12 07:59:01 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-12 09:31:06 -0400
commite758a33d6fc5b9d6a3ae489863d04fcecad8120b (patch)
tree3345d35fd5c9ee41a2f5a22fc5795672c0db7c2b /kernel/perf_counter.c
parent615a3f1e055ac9b0ae74e1f935a12ea2cfe2a2ad (diff)
perf_counter: call hw_perf_save_disable/restore around group_sched_in
I noticed that when enabling a group via the PERF_COUNTER_IOC_ENABLE ioctl on the group leader, the counters weren't enabled and counting immediately on return from the ioctl, but did start counting a little while later (presumably after a context switch). The reason was that __perf_counter_enable calls group_sched_in which calls hw_perf_group_sched_in, which on powerpc assumes that the caller has called hw_perf_save_disable already. Until commit 46d686c6 ("perf_counter: put whole group on when enabling group leader") it was true that all callers of group_sched_in had called hw_perf_save_disable first, and the powerpc hw_perf_group_sched_in relies on that (there isn't an x86 version). This fixes the problem by putting calls to hw_perf_save_disable / hw_perf_restore around the calls to group_sched_in and counter_sched_in in __perf_counter_enable. Having the calls to hw_perf_save_disable/restore around the counter_sched_in call is harmless and makes this call consistent with the other call sites of counter_sched_in, which have all called hw_perf_save_disable first. [ Impact: more precise counter group disable/enable functionality ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18953.25733.53359.147452@cargo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c19
1 files changed, 12 insertions, 7 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 5ea0240adab2..ff166c11b69a 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -663,6 +663,7 @@ static void __perf_counter_enable(void *info)
663 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context); 663 struct perf_cpu_context *cpuctx = &__get_cpu_var(perf_cpu_context);
664 struct perf_counter_context *ctx = counter->ctx; 664 struct perf_counter_context *ctx = counter->ctx;
665 struct perf_counter *leader = counter->group_leader; 665 struct perf_counter *leader = counter->group_leader;
666 unsigned long pmuflags;
666 unsigned long flags; 667 unsigned long flags;
667 int err; 668 int err;
668 669
@@ -689,14 +690,18 @@ static void __perf_counter_enable(void *info)
689 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE) 690 if (leader != counter && leader->state != PERF_COUNTER_STATE_ACTIVE)
690 goto unlock; 691 goto unlock;
691 692
692 if (!group_can_go_on(counter, cpuctx, 1)) 693 if (!group_can_go_on(counter, cpuctx, 1)) {
693 err = -EEXIST; 694 err = -EEXIST;
694 else if (counter == leader) 695 } else {
695 err = group_sched_in(counter, cpuctx, ctx, 696 pmuflags = hw_perf_save_disable();
696 smp_processor_id()); 697 if (counter == leader)
697 else 698 err = group_sched_in(counter, cpuctx, ctx,
698 err = counter_sched_in(counter, cpuctx, ctx, 699 smp_processor_id());
699 smp_processor_id()); 700 else
701 err = counter_sched_in(counter, cpuctx, ctx,
702 smp_processor_id());
703 hw_perf_restore(pmuflags);
704 }
700 705
701 if (err) { 706 if (err) {
702 /* 707 /*