aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-05-10 22:08:02 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-11 06:10:53 -0400
commit6751b71ea2c7ab8c0d65f01973a3fc8ea16992f4 (patch)
treed4342bd89a10391caf648828ddea7550de6fc82d /kernel/perf_counter.c
parent8823392360dc4992f87bf4c623834d315f297493 (diff)
perf_counter: Put whole group on when enabling group leader
Currently, if you have a group where the leader is disabled and there are siblings that are enabled, and then you enable the leader, we only put the leader on the PMU, and not its enabled siblings. This is incorrect, since the enabled group members should be all on or all off at any given point. This fixes it by adding a call to group_sched_in in __perf_counter_enable in the case where we're enabling a group leader. To avoid the need for a forward declaration this also moves group_sched_in up before __perf_counter_enable. The actual content of group_sched_in is unchanged by this patch. [ Impact: fix bug in counter enable code ] Signed-off-by: Paul Mackerras <paulus@samba.org> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> LKML-Reference: <18951.34946.451546.691693@drongo.ozlabs.ibm.com> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c99
1 files changed, 51 insertions, 48 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index d850a1fb8d4c..a5bdc93ac477 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -419,6 +419,54 @@ counter_sched_in(struct perf_counter *counter,
419 return 0; 419 return 0;
420} 420}
421 421
422static int
423group_sched_in(struct perf_counter *group_counter,
424 struct perf_cpu_context *cpuctx,
425 struct perf_counter_context *ctx,
426 int cpu)
427{
428 struct perf_counter *counter, *partial_group;
429 int ret;
430
431 if (group_counter->state == PERF_COUNTER_STATE_OFF)
432 return 0;
433
434 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
435 if (ret)
436 return ret < 0 ? ret : 0;
437
438 group_counter->prev_state = group_counter->state;
439 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
440 return -EAGAIN;
441
442 /*
443 * Schedule in siblings as one group (if any):
444 */
445 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
446 counter->prev_state = counter->state;
447 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
448 partial_group = counter;
449 goto group_error;
450 }
451 }
452
453 return 0;
454
455group_error:
456 /*
457 * Groups can be scheduled in as one unit only, so undo any
458 * partial group before returning:
459 */
460 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
461 if (counter == partial_group)
462 break;
463 counter_sched_out(counter, cpuctx, ctx);
464 }
465 counter_sched_out(group_counter, cpuctx, ctx);
466
467 return -EAGAIN;
468}
469
422/* 470/*
423 * Return 1 for a group consisting entirely of software counters, 471 * Return 1 for a group consisting entirely of software counters,
424 * 0 if the group contains any hardware counters. 472 * 0 if the group contains any hardware counters.
@@ -643,6 +691,9 @@ static void __perf_counter_enable(void *info)
643 691
644 if (!group_can_go_on(counter, cpuctx, 1)) 692 if (!group_can_go_on(counter, cpuctx, 1))
645 err = -EEXIST; 693 err = -EEXIST;
694 else if (counter == leader)
695 err = group_sched_in(counter, cpuctx, ctx,
696 smp_processor_id());
646 else 697 else
647 err = counter_sched_in(counter, cpuctx, ctx, 698 err = counter_sched_in(counter, cpuctx, ctx,
648 smp_processor_id()); 699 smp_processor_id());
@@ -791,54 +842,6 @@ static void perf_counter_cpu_sched_out(struct perf_cpu_context *cpuctx)
791 __perf_counter_sched_out(&cpuctx->ctx, cpuctx); 842 __perf_counter_sched_out(&cpuctx->ctx, cpuctx);
792} 843}
793 844
794static int
795group_sched_in(struct perf_counter *group_counter,
796 struct perf_cpu_context *cpuctx,
797 struct perf_counter_context *ctx,
798 int cpu)
799{
800 struct perf_counter *counter, *partial_group;
801 int ret;
802
803 if (group_counter->state == PERF_COUNTER_STATE_OFF)
804 return 0;
805
806 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
807 if (ret)
808 return ret < 0 ? ret : 0;
809
810 group_counter->prev_state = group_counter->state;
811 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
812 return -EAGAIN;
813
814 /*
815 * Schedule in siblings as one group (if any):
816 */
817 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
818 counter->prev_state = counter->state;
819 if (counter_sched_in(counter, cpuctx, ctx, cpu)) {
820 partial_group = counter;
821 goto group_error;
822 }
823 }
824
825 return 0;
826
827group_error:
828 /*
829 * Groups can be scheduled in as one unit only, so undo any
830 * partial group before returning:
831 */
832 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
833 if (counter == partial_group)
834 break;
835 counter_sched_out(counter, cpuctx, ctx);
836 }
837 counter_sched_out(group_counter, cpuctx, ctx);
838
839 return -EAGAIN;
840}
841
842static void 845static void
843__perf_counter_sched_in(struct perf_counter_context *ctx, 846__perf_counter_sched_in(struct perf_counter_context *ctx,
844 struct perf_cpu_context *cpuctx, int cpu) 847 struct perf_cpu_context *cpuctx, int cpu)