aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2008-12-17 02:54:56 -0500
committerIngo Molnar <mingo@elte.hu>2008-12-23 06:45:09 -0500
commit7995888fcb0246543ee8027bf2835a250ba8c925 (patch)
treeef7a2f699d99c3613ba7559d3b9772fa00014718 /kernel/perf_counter.c
parent8fb9331391af95ca1f4e5c0a0da8120b13cbae01 (diff)
perfcounters: tweak group scheduling
Impact: schedule in groups atomically If there are multiple groups in a task, make sure they are scheduled in and out atomically. Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c16
1 files changed, 13 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index aab6c123b02c..f8a4d9a5d5d3 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -367,21 +367,26 @@ counter_sched_in(struct perf_counter *counter,
367 ctx->nr_active++; 367 ctx->nr_active++;
368} 368}
369 369
370static void 370static int
371group_sched_in(struct perf_counter *group_counter, 371group_sched_in(struct perf_counter *group_counter,
372 struct perf_cpu_context *cpuctx, 372 struct perf_cpu_context *cpuctx,
373 struct perf_counter_context *ctx, 373 struct perf_counter_context *ctx,
374 int cpu) 374 int cpu)
375{ 375{
376 struct perf_counter *counter; 376 struct perf_counter *counter;
377 int was_group = 0;
377 378
378 counter_sched_in(group_counter, cpuctx, ctx, cpu); 379 counter_sched_in(group_counter, cpuctx, ctx, cpu);
379 380
380 /* 381 /*
381 * Schedule in siblings as one group (if any): 382 * Schedule in siblings as one group (if any):
382 */ 383 */
383 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) 384 list_for_each_entry(counter, &group_counter->sibling_list, list_entry) {
384 counter_sched_in(counter, cpuctx, ctx, cpu); 385 counter_sched_in(counter, cpuctx, ctx, cpu);
386 was_group = 1;
387 }
388
389 return was_group;
385} 390}
386 391
387/* 392/*
@@ -416,7 +421,12 @@ void perf_counter_task_sched_in(struct task_struct *task, int cpu)
416 if (counter->cpu != -1 && counter->cpu != cpu) 421 if (counter->cpu != -1 && counter->cpu != cpu)
417 continue; 422 continue;
418 423
419 group_sched_in(counter, cpuctx, ctx, cpu); 424 /*
425 * If we scheduled in a group atomically and
426 * exclusively, break out:
427 */
428 if (group_sched_in(counter, cpuctx, ctx, cpu))
429 break;
420 } 430 }
421 spin_unlock(&ctx->lock); 431 spin_unlock(&ctx->lock);
422 432