diff options
author | Paul Mackerras <paulus@samba.org> | 2009-08-25 01:17:20 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-08-25 03:34:38 -0400 |
commit | fa289beca9de9119c7760bd984f3640da21bc94c (patch) | |
tree | bb952b339092ac18dd1057e1edc89ead56777a98 /kernel/perf_counter.c | |
parent | 96d6e48bc6b38342a59ccd23e25907d12caaeaf8 (diff) |
perf_counter: Start counting time enabled when group leader gets enabled
Currently, if a group is created where the group leader is
initially disabled but a non-leader member is initially
enabled, and then the leader is subsequently enabled some time
later, the time_enabled for the non-leader member will reflect
the whole time since it was created, not just the time since
the leader was enabled.
This is incorrect, because all of the members are effectively
disabled while the leader is disabled, since none of the
members can go on the PMU if the leader can't.
Thus we have to update the ->tstamp_enabled for all the enabled
group members when a group leader is enabled, so that the
time_enabled computation only counts the time since the leader
was enabled.
Similarly, when disabling a group leader we have to update the
time_enabled and time_running for all of the group members.
Also, in update_counter_times, we have to treat a counter whose
group leader is disabled as being disabled.
Reported-by: Stephane Eranian <eranian@googlemail.com>
Signed-off-by: Paul Mackerras <paulus@samba.org>
Acked-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: <stable@kernel.org>
LKML-Reference: <19091.29664.342227.445006@drongo.ozlabs.ibm.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 43 |
1 files changed, 30 insertions, 13 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f274e1959885..06bf6a4f2608 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -469,7 +469,8 @@ static void update_counter_times(struct perf_counter *counter) | |||
469 | struct perf_counter_context *ctx = counter->ctx; | 469 | struct perf_counter_context *ctx = counter->ctx; |
470 | u64 run_end; | 470 | u64 run_end; |
471 | 471 | ||
472 | if (counter->state < PERF_COUNTER_STATE_INACTIVE) | 472 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || |
473 | counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) | ||
473 | return; | 474 | return; |
474 | 475 | ||
475 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; | 476 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; |
@@ -518,7 +519,7 @@ static void __perf_counter_disable(void *info) | |||
518 | */ | 519 | */ |
519 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | 520 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { |
520 | update_context_time(ctx); | 521 | update_context_time(ctx); |
521 | update_counter_times(counter); | 522 | update_group_times(counter); |
522 | if (counter == counter->group_leader) | 523 | if (counter == counter->group_leader) |
523 | group_sched_out(counter, cpuctx, ctx); | 524 | group_sched_out(counter, cpuctx, ctx); |
524 | else | 525 | else |
@@ -573,7 +574,7 @@ static void perf_counter_disable(struct perf_counter *counter) | |||
573 | * in, so we can change the state safely. | 574 | * in, so we can change the state safely. |
574 | */ | 575 | */ |
575 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 576 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { |
576 | update_counter_times(counter); | 577 | update_group_times(counter); |
577 | counter->state = PERF_COUNTER_STATE_OFF; | 578 | counter->state = PERF_COUNTER_STATE_OFF; |
578 | } | 579 | } |
579 | 580 | ||
@@ -851,6 +852,27 @@ retry: | |||
851 | } | 852 | } |
852 | 853 | ||
853 | /* | 854 | /* |
855 | * Put a counter into inactive state and update time fields. | ||
856 | * Enabling the leader of a group effectively enables all | ||
857 | * the group members that aren't explicitly disabled, so we | ||
858 | * have to update their ->tstamp_enabled also. | ||
859 | * Note: this works for group members as well as group leaders | ||
860 | * since the non-leader members' sibling_lists will be empty. | ||
861 | */ | ||
862 | static void __perf_counter_mark_enabled(struct perf_counter *counter, | ||
863 | struct perf_counter_context *ctx) | ||
864 | { | ||
865 | struct perf_counter *sub; | ||
866 | |||
867 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
868 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
869 | list_for_each_entry(sub, &counter->sibling_list, list_entry) | ||
870 | if (sub->state >= PERF_COUNTER_STATE_INACTIVE) | ||
871 | sub->tstamp_enabled = | ||
872 | ctx->time - sub->total_time_enabled; | ||
873 | } | ||
874 | |||
875 | /* | ||
854 | * Cross CPU call to enable a performance counter | 876 | * Cross CPU call to enable a performance counter |
855 | */ | 877 | */ |
856 | static void __perf_counter_enable(void *info) | 878 | static void __perf_counter_enable(void *info) |
@@ -877,8 +899,7 @@ static void __perf_counter_enable(void *info) | |||
877 | 899 | ||
878 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 900 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) |
879 | goto unlock; | 901 | goto unlock; |
880 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 902 | __perf_counter_mark_enabled(counter, ctx); |
881 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
882 | 903 | ||
883 | /* | 904 | /* |
884 | * If the counter is in a group and isn't the group leader, | 905 | * If the counter is in a group and isn't the group leader, |
@@ -971,11 +992,9 @@ static void perf_counter_enable(struct perf_counter *counter) | |||
971 | * Since we have the lock this context can't be scheduled | 992 | * Since we have the lock this context can't be scheduled |
972 | * in, so we can change the state safely. | 993 | * in, so we can change the state safely. |
973 | */ | 994 | */ |
974 | if (counter->state == PERF_COUNTER_STATE_OFF) { | 995 | if (counter->state == PERF_COUNTER_STATE_OFF) |
975 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 996 | __perf_counter_mark_enabled(counter, ctx); |
976 | counter->tstamp_enabled = | 997 | |
977 | ctx->time - counter->total_time_enabled; | ||
978 | } | ||
979 | out: | 998 | out: |
980 | spin_unlock_irq(&ctx->lock); | 999 | spin_unlock_irq(&ctx->lock); |
981 | } | 1000 | } |
@@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1479 | counter->attr.enable_on_exec = 0; | 1498 | counter->attr.enable_on_exec = 0; |
1480 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 1499 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) |
1481 | continue; | 1500 | continue; |
1482 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 1501 | __perf_counter_mark_enabled(counter, ctx); |
1483 | counter->tstamp_enabled = | ||
1484 | ctx->time - counter->total_time_enabled; | ||
1485 | enabled = 1; | 1502 | enabled = 1; |
1486 | } | 1503 | } |
1487 | 1504 | ||