aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPeter Zijlstra <a.p.zijlstra@chello.nl>2009-05-20 06:21:19 -0400
committerIngo Molnar <mingo@elte.hu>2009-05-20 06:43:32 -0400
commitd7b629a34fc4134a43c730b5f0197855dc4948d0 (patch)
tree040157a5fa338216644f6866e20b3994bd4eaf01 /kernel/perf_counter.c
parentc44d70a340554a33071339064a303ac0f1a31623 (diff)
perf_counter: Solve the rotate_ctx vs inherit race differently
Instead of disabling RR scheduling of the counters, use a different list that does not get rotated to iterate the counters on inheritance. [ Impact: cleanup, optimization ] Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Paul Mackerras <paulus@samba.org> Cc: Corey Ashford <cjashfor@linux.vnet.ibm.com> Cc: Peter Zijlstra <a.p.zijlstra@chello.nl> Cc: Arnaldo Carvalho de Melo <acme@redhat.com> Cc: Thomas Gleixner <tglx@linutronix.de> Cc: Marcelo Tosatti <mtosatti@redhat.com> Cc: John Kacur <jkacur@redhat.com> LKML-Reference: <20090520102553.237504544@chello.nl> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c15
1 files changed, 5 insertions, 10 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4d8f97375f3..64113e6d194 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -1120,8 +1120,7 @@ void perf_counter_task_tick(struct task_struct *curr, int cpu)
1120 __perf_counter_task_sched_out(ctx); 1120 __perf_counter_task_sched_out(ctx);
1121 1121
1122 rotate_ctx(&cpuctx->ctx); 1122 rotate_ctx(&cpuctx->ctx);
1123 if (ctx->rr_allowed) 1123 rotate_ctx(ctx);
1124 rotate_ctx(ctx);
1125 1124
1126 perf_counter_cpu_sched_in(cpuctx, cpu); 1125 perf_counter_cpu_sched_in(cpuctx, cpu);
1127 perf_counter_task_sched_in(curr, cpu); 1126 perf_counter_task_sched_in(curr, cpu);
@@ -3109,7 +3108,6 @@ __perf_counter_init_context(struct perf_counter_context *ctx,
3109 mutex_init(&ctx->mutex); 3108 mutex_init(&ctx->mutex);
3110 INIT_LIST_HEAD(&ctx->counter_list); 3109 INIT_LIST_HEAD(&ctx->counter_list);
3111 INIT_LIST_HEAD(&ctx->event_list); 3110 INIT_LIST_HEAD(&ctx->event_list);
3112 ctx->rr_allowed = 1;
3113 ctx->task = task; 3111 ctx->task = task;
3114} 3112}
3115 3113
@@ -3350,14 +3348,14 @@ void perf_counter_init_task(struct task_struct *child)
3350 */ 3348 */
3351 mutex_lock(&parent_ctx->mutex); 3349 mutex_lock(&parent_ctx->mutex);
3352 3350
3353 parent_ctx->rr_allowed = 0;
3354 barrier(); /* irqs */
3355
3356 /* 3351 /*
3357 * We dont have to disable NMIs - we are only looking at 3352 * We dont have to disable NMIs - we are only looking at
3358 * the list, not manipulating it: 3353 * the list, not manipulating it:
3359 */ 3354 */
3360 list_for_each_entry(counter, &parent_ctx->counter_list, list_entry) { 3355 list_for_each_entry_rcu(counter, &parent_ctx->event_list, event_entry) {
3356 if (counter != counter->group_leader)
3357 continue;
3358
3361 if (!counter->hw_event.inherit) 3359 if (!counter->hw_event.inherit)
3362 continue; 3360 continue;
3363 3361
@@ -3366,9 +3364,6 @@ void perf_counter_init_task(struct task_struct *child)
3366 break; 3364 break;
3367 } 3365 }
3368 3366
3369 barrier(); /* irqs */
3370 parent_ctx->rr_allowed = 1;
3371
3372 mutex_unlock(&parent_ctx->mutex); 3367 mutex_unlock(&parent_ctx->mutex);
3373} 3368}
3374 3369