diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 32 |
1 files changed, 15 insertions, 17 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 57840a94b163..59a926d04baf 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -115,6 +115,7 @@ list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
115 | } | 115 | } |
116 | 116 | ||
117 | list_add_rcu(&counter->event_entry, &ctx->event_list); | 117 | list_add_rcu(&counter->event_entry, &ctx->event_list); |
118 | ctx->nr_counters++; | ||
118 | } | 119 | } |
119 | 120 | ||
120 | static void | 121 | static void |
@@ -122,6 +123,8 @@ list_del_counter(struct perf_counter *counter, struct perf_counter_context *ctx) | |||
122 | { | 123 | { |
123 | struct perf_counter *sibling, *tmp; | 124 | struct perf_counter *sibling, *tmp; |
124 | 125 | ||
126 | ctx->nr_counters--; | ||
127 | |||
125 | list_del_init(&counter->list_entry); | 128 | list_del_init(&counter->list_entry); |
126 | list_del_rcu(&counter->event_entry); | 129 | list_del_rcu(&counter->event_entry); |
127 | 130 | ||
@@ -209,7 +212,6 @@ static void __perf_counter_remove_from_context(void *info) | |||
209 | counter_sched_out(counter, cpuctx, ctx); | 212 | counter_sched_out(counter, cpuctx, ctx); |
210 | 213 | ||
211 | counter->task = NULL; | 214 | counter->task = NULL; |
212 | ctx->nr_counters--; | ||
213 | 215 | ||
214 | /* | 216 | /* |
215 | * Protect the list operation against NMI by disabling the | 217 | * Protect the list operation against NMI by disabling the |
@@ -276,7 +278,6 @@ retry: | |||
276 | * succeed. | 278 | * succeed. |
277 | */ | 279 | */ |
278 | if (!list_empty(&counter->list_entry)) { | 280 | if (!list_empty(&counter->list_entry)) { |
279 | ctx->nr_counters--; | ||
280 | list_del_counter(counter, ctx); | 281 | list_del_counter(counter, ctx); |
281 | counter->task = NULL; | 282 | counter->task = NULL; |
282 | } | 283 | } |
@@ -544,7 +545,6 @@ static void add_counter_to_ctx(struct perf_counter *counter, | |||
544 | struct perf_counter_context *ctx) | 545 | struct perf_counter_context *ctx) |
545 | { | 546 | { |
546 | list_add_counter(counter, ctx); | 547 | list_add_counter(counter, ctx); |
547 | ctx->nr_counters++; | ||
548 | counter->prev_state = PERF_COUNTER_STATE_OFF; | 548 | counter->prev_state = PERF_COUNTER_STATE_OFF; |
549 | counter->tstamp_enabled = ctx->time; | 549 | counter->tstamp_enabled = ctx->time; |
550 | counter->tstamp_running = ctx->time; | 550 | counter->tstamp_running = ctx->time; |
@@ -3206,9 +3206,8 @@ static int inherit_group(struct perf_counter *parent_counter, | |||
3206 | static void sync_child_counter(struct perf_counter *child_counter, | 3206 | static void sync_child_counter(struct perf_counter *child_counter, |
3207 | struct perf_counter *parent_counter) | 3207 | struct perf_counter *parent_counter) |
3208 | { | 3208 | { |
3209 | u64 parent_val, child_val; | 3209 | u64 child_val; |
3210 | 3210 | ||
3211 | parent_val = atomic64_read(&parent_counter->count); | ||
3212 | child_val = atomic64_read(&child_counter->count); | 3211 | child_val = atomic64_read(&child_counter->count); |
3213 | 3212 | ||
3214 | /* | 3213 | /* |
@@ -3240,7 +3239,6 @@ __perf_counter_exit_task(struct task_struct *child, | |||
3240 | struct perf_counter_context *child_ctx) | 3239 | struct perf_counter_context *child_ctx) |
3241 | { | 3240 | { |
3242 | struct perf_counter *parent_counter; | 3241 | struct perf_counter *parent_counter; |
3243 | struct perf_counter *sub, *tmp; | ||
3244 | 3242 | ||
3245 | /* | 3243 | /* |
3246 | * If we do not self-reap then we have to wait for the | 3244 | * If we do not self-reap then we have to wait for the |
@@ -3252,8 +3250,8 @@ __perf_counter_exit_task(struct task_struct *child, | |||
3252 | */ | 3250 | */ |
3253 | if (child != current) { | 3251 | if (child != current) { |
3254 | wait_task_inactive(child, 0); | 3252 | wait_task_inactive(child, 0); |
3255 | list_del_init(&child_counter->list_entry); | ||
3256 | update_counter_times(child_counter); | 3253 | update_counter_times(child_counter); |
3254 | list_del_counter(child_counter, child_ctx); | ||
3257 | } else { | 3255 | } else { |
3258 | struct perf_cpu_context *cpuctx; | 3256 | struct perf_cpu_context *cpuctx; |
3259 | unsigned long flags; | 3257 | unsigned long flags; |
@@ -3272,9 +3270,7 @@ __perf_counter_exit_task(struct task_struct *child, | |||
3272 | group_sched_out(child_counter, cpuctx, child_ctx); | 3270 | group_sched_out(child_counter, cpuctx, child_ctx); |
3273 | update_counter_times(child_counter); | 3271 | update_counter_times(child_counter); |
3274 | 3272 | ||
3275 | list_del_init(&child_counter->list_entry); | 3273 | list_del_counter(child_counter, child_ctx); |
3276 | |||
3277 | child_ctx->nr_counters--; | ||
3278 | 3274 | ||
3279 | perf_enable(); | 3275 | perf_enable(); |
3280 | local_irq_restore(flags); | 3276 | local_irq_restore(flags); |
@@ -3288,13 +3284,6 @@ __perf_counter_exit_task(struct task_struct *child, | |||
3288 | */ | 3284 | */ |
3289 | if (parent_counter) { | 3285 | if (parent_counter) { |
3290 | sync_child_counter(child_counter, parent_counter); | 3286 | sync_child_counter(child_counter, parent_counter); |
3291 | list_for_each_entry_safe(sub, tmp, &child_counter->sibling_list, | ||
3292 | list_entry) { | ||
3293 | if (sub->parent) { | ||
3294 | sync_child_counter(sub, sub->parent); | ||
3295 | free_counter(sub); | ||
3296 | } | ||
3297 | } | ||
3298 | free_counter(child_counter); | 3287 | free_counter(child_counter); |
3299 | } | 3288 | } |
3300 | } | 3289 | } |
@@ -3315,9 +3304,18 @@ void perf_counter_exit_task(struct task_struct *child) | |||
3315 | if (likely(!child_ctx->nr_counters)) | 3304 | if (likely(!child_ctx->nr_counters)) |
3316 | return; | 3305 | return; |
3317 | 3306 | ||
3307 | again: | ||
3318 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, | 3308 | list_for_each_entry_safe(child_counter, tmp, &child_ctx->counter_list, |
3319 | list_entry) | 3309 | list_entry) |
3320 | __perf_counter_exit_task(child, child_counter, child_ctx); | 3310 | __perf_counter_exit_task(child, child_counter, child_ctx); |
3311 | |||
3312 | /* | ||
3313 | * If the last counter was a group counter, it will have appended all | ||
3314 | * its siblings to the list, but we obtained 'tmp' before that which | ||
3315 | * will still point to the list head terminating the iteration. | ||
3316 | */ | ||
3317 | if (!list_empty(&child_ctx->counter_list)) | ||
3318 | goto again; | ||
3321 | } | 3319 | } |
3322 | 3320 | ||
3323 | /* | 3321 | /* |