diff options
author | Ingo Molnar <mingo@elte.hu> | 2009-05-17 05:24:08 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-05-19 18:22:30 -0400 |
commit | c44d70a340554a33071339064a303ac0f1a31623 (patch) | |
tree | a620d5b82a07b2e2c8c7c57d2e81d5bfdcdd4c25 | |
parent | 33b2fb303fe7f6b08bbb32f708e67b96eaa94a7a (diff) |
perf_counter: fix counter inheritance race
Context rotation should not occur when we are in the middle of
walking the counter list when inheriting counters ...
[ Impact: fix occasionally incorrect perf stat results ]
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com>
Cc: Arnaldo Carvalho de Melo <acme@redhat.com>
Cc: Marcelo Tosatti <mtosatti@redhat.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r-- | include/linux/perf_counter.h | 1 | ||||
-rw-r--r-- | kernel/perf_counter.c | 10 |
2 files changed, 10 insertions, 1 deletions
diff --git a/include/linux/perf_counter.h b/include/linux/perf_counter.h index c8c1dfc22c93..13cb2fbbf334 100644 --- a/include/linux/perf_counter.h +++ b/include/linux/perf_counter.h | |||
@@ -508,6 +508,7 @@ struct perf_counter_context { | |||
508 | int nr_counters; | 508 | int nr_counters; |
509 | int nr_active; | 509 | int nr_active; |
510 | int is_active; | 510 | int is_active; |
511 | int rr_allowed; | ||
511 | struct task_struct *task; | 512 | struct task_struct *task; |
512 | 513 | ||
513 | /* | 514 | /* |
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 7af16d1c480f..4d8f97375f3a 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1120,7 +1120,8 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu) | |||
1120 | __perf_counter_task_sched_out(ctx); | 1120 | __perf_counter_task_sched_out(ctx); |
1121 | 1121 | ||
1122 | rotate_ctx(&cpuctx->ctx); | 1122 | rotate_ctx(&cpuctx->ctx); |
1123 | rotate_ctx(ctx); | 1123 | if (ctx->rr_allowed) |
1124 | rotate_ctx(ctx); | ||
1124 | 1125 | ||
1125 | perf_counter_cpu_sched_in(cpuctx, cpu); | 1126 | perf_counter_cpu_sched_in(cpuctx, cpu); |
1126 | perf_counter_task_sched_in(curr, cpu); | 1127 | perf_counter_task_sched_in(curr, cpu); |
@@ -3108,6 +3109,7 @@ __perf_counter_init_context(struct perf_counter_context *ctx, | |||
3108 | mutex_init(&ctx->mutex); | 3109 | mutex_init(&ctx->mutex); |
3109 | INIT_LIST_HEAD(&ctx->counter_list); | 3110 | INIT_LIST_HEAD(&ctx->counter_list); |
3110 | INIT_LIST_HEAD(&ctx->event_list); | 3111 | INIT_LIST_HEAD(&ctx->event_list); |
3112 | ctx->rr_allowed = 1; | ||
3111 | ctx->task = task; | 3113 | ctx->task = task; |
3112 | } | 3114 | } |
3113 | 3115 | ||
@@ -3348,6 +3350,9 @@ void perf_counter_init_task(struct task_struct *child) | |||
3348 | */ | 3350 | */ |
3349 | mutex_lock(&parent_ctx->mutex); | 3351 | mutex_lock(&parent_ctx->mutex); |
3350 | 3352 | ||
3353 | parent_ctx->rr_allowed = 0; | ||
3354 | barrier(); /* irqs */ | ||
3355 | |||
3351 | /* | 3356 | /* |
3352 | * We dont have to disable NMIs - we are only looking at | 3357 | * We dont have to disable NMIs - we are only looking at |
3353 | * the list, not manipulating it: | 3358 | * the list, not manipulating it: |
@@ -3361,6 +3366,9 @@ void perf_counter_init_task(struct task_struct *child) | |||
3361 | break; | 3366 | break; |
3362 | } | 3367 | } |
3363 | 3368 | ||
3369 | barrier(); /* irqs */ | ||
3370 | parent_ctx->rr_allowed = 1; | ||
3371 | |||
3364 | mutex_unlock(&parent_ctx->mutex); | 3372 | mutex_unlock(&parent_ctx->mutex); |
3365 | } | 3373 | } |
3366 | 3374 | ||