diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 43 |
1 files changed, 30 insertions, 13 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index f274e1959885..06bf6a4f2608 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -469,7 +469,8 @@ static void update_counter_times(struct perf_counter *counter) | |||
469 | struct perf_counter_context *ctx = counter->ctx; | 469 | struct perf_counter_context *ctx = counter->ctx; |
470 | u64 run_end; | 470 | u64 run_end; |
471 | 471 | ||
472 | if (counter->state < PERF_COUNTER_STATE_INACTIVE) | 472 | if (counter->state < PERF_COUNTER_STATE_INACTIVE || |
473 | counter->group_leader->state < PERF_COUNTER_STATE_INACTIVE) | ||
473 | return; | 474 | return; |
474 | 475 | ||
475 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; | 476 | counter->total_time_enabled = ctx->time - counter->tstamp_enabled; |
@@ -518,7 +519,7 @@ static void __perf_counter_disable(void *info) | |||
518 | */ | 519 | */ |
519 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { | 520 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) { |
520 | update_context_time(ctx); | 521 | update_context_time(ctx); |
521 | update_counter_times(counter); | 522 | update_group_times(counter); |
522 | if (counter == counter->group_leader) | 523 | if (counter == counter->group_leader) |
523 | group_sched_out(counter, cpuctx, ctx); | 524 | group_sched_out(counter, cpuctx, ctx); |
524 | else | 525 | else |
@@ -573,7 +574,7 @@ static void perf_counter_disable(struct perf_counter *counter) | |||
573 | * in, so we can change the state safely. | 574 | * in, so we can change the state safely. |
574 | */ | 575 | */ |
575 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { | 576 | if (counter->state == PERF_COUNTER_STATE_INACTIVE) { |
576 | update_counter_times(counter); | 577 | update_group_times(counter); |
577 | counter->state = PERF_COUNTER_STATE_OFF; | 578 | counter->state = PERF_COUNTER_STATE_OFF; |
578 | } | 579 | } |
579 | 580 | ||
@@ -851,6 +852,27 @@ retry: | |||
851 | } | 852 | } |
852 | 853 | ||
853 | /* | 854 | /* |
855 | * Put a counter into inactive state and update time fields. | ||
856 | * Enabling the leader of a group effectively enables all | ||
857 | * the group members that aren't explicitly disabled, so we | ||
858 | * have to update their ->tstamp_enabled also. | ||
859 | * Note: this works for group members as well as group leaders | ||
860 | * since the non-leader members' sibling_lists will be empty. | ||
861 | */ | ||
862 | static void __perf_counter_mark_enabled(struct perf_counter *counter, | ||
863 | struct perf_counter_context *ctx) | ||
864 | { | ||
865 | struct perf_counter *sub; | ||
866 | |||
867 | counter->state = PERF_COUNTER_STATE_INACTIVE; | ||
868 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
869 | list_for_each_entry(sub, &counter->sibling_list, list_entry) | ||
870 | if (sub->state >= PERF_COUNTER_STATE_INACTIVE) | ||
871 | sub->tstamp_enabled = | ||
872 | ctx->time - sub->total_time_enabled; | ||
873 | } | ||
874 | |||
875 | /* | ||
854 | * Cross CPU call to enable a performance counter | 876 | * Cross CPU call to enable a performance counter |
855 | */ | 877 | */ |
856 | static void __perf_counter_enable(void *info) | 878 | static void __perf_counter_enable(void *info) |
@@ -877,8 +899,7 @@ static void __perf_counter_enable(void *info) | |||
877 | 899 | ||
878 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 900 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) |
879 | goto unlock; | 901 | goto unlock; |
880 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 902 | __perf_counter_mark_enabled(counter, ctx); |
881 | counter->tstamp_enabled = ctx->time - counter->total_time_enabled; | ||
882 | 903 | ||
883 | /* | 904 | /* |
884 | * If the counter is in a group and isn't the group leader, | 905 | * If the counter is in a group and isn't the group leader, |
@@ -971,11 +992,9 @@ static void perf_counter_enable(struct perf_counter *counter) | |||
971 | * Since we have the lock this context can't be scheduled | 992 | * Since we have the lock this context can't be scheduled |
972 | * in, so we can change the state safely. | 993 | * in, so we can change the state safely. |
973 | */ | 994 | */ |
974 | if (counter->state == PERF_COUNTER_STATE_OFF) { | 995 | if (counter->state == PERF_COUNTER_STATE_OFF) |
975 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 996 | __perf_counter_mark_enabled(counter, ctx); |
976 | counter->tstamp_enabled = | 997 | |
977 | ctx->time - counter->total_time_enabled; | ||
978 | } | ||
979 | out: | 998 | out: |
980 | spin_unlock_irq(&ctx->lock); | 999 | spin_unlock_irq(&ctx->lock); |
981 | } | 1000 | } |
@@ -1479,9 +1498,7 @@ static void perf_counter_enable_on_exec(struct task_struct *task) | |||
1479 | counter->attr.enable_on_exec = 0; | 1498 | counter->attr.enable_on_exec = 0; |
1480 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) | 1499 | if (counter->state >= PERF_COUNTER_STATE_INACTIVE) |
1481 | continue; | 1500 | continue; |
1482 | counter->state = PERF_COUNTER_STATE_INACTIVE; | 1501 | __perf_counter_mark_enabled(counter, ctx); |
1483 | counter->tstamp_enabled = | ||
1484 | ctx->time - counter->total_time_enabled; | ||
1485 | enabled = 1; | 1502 | enabled = 1; |
1486 | } | 1503 | } |
1487 | 1504 | ||