aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-09 00:43:42 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-09 00:43:42 -0500
commit3cbed429a9ccdb7a243f733b1056fe5c39e9004c (patch)
tree1b41a966a1f2598257b8d51299d180bce33b835e /kernel/perf_counter.c
parent9abf8a08bc8f18a3b125f834f00e2e71b49c15d2 (diff)
perf_counter: Add optional hw_perf_group_sched_in arch function
Impact: extend perf_counter infrastructure This adds an optional hw_perf_group_sched_in() arch function that enables a whole group of counters in one go. It returns 1 if it added the group successfully, 0 if it did nothing (and therefore the core needs to add the counters individually), or a negative number if an error occurred. It should add all the counters and enable any software counters in the group, or else add none of them and return an error. There are a couple of related changes/improvements in the group handling here: * As an optimization, group_sched_out() and group_sched_in() now check the state of the group leader, and do nothing if the leader is not active or disabled. * We now call hw_perf_save_disable/hw_perf_restore around the complete set of counter enable/disable calls in __perf_counter_sched_in/out, to give the arch code the opportunity to defer updating the hardware state until the hw_perf_restore call if it wants. * We no longer stop adding groups after we get to a group that has more than one counter. We will ultimately add an option for a group to be exclusive. The current code doesn't really implement exclusive groups anyway, since a group could end up going on with other counters that get added before it. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c31
1 files changed, 26 insertions, 5 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index b7a027a2ef02..9ad11e44d9ab 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -47,6 +47,12 @@ hw_perf_counter_init(struct perf_counter *counter)
47u64 __weak hw_perf_save_disable(void) { return 0; } 47u64 __weak hw_perf_save_disable(void) { return 0; }
48void __weak hw_perf_restore(u64 ctrl) { barrier(); } 48void __weak hw_perf_restore(u64 ctrl) { barrier(); }
49void __weak hw_perf_counter_setup(void) { barrier(); } 49void __weak hw_perf_counter_setup(void) { barrier(); }
50int __weak hw_perf_group_sched_in(struct perf_counter *group_leader,
51 struct perf_cpu_context *cpuctx,
52 struct perf_counter_context *ctx, int cpu)
53{
54 return 0;
55}
50 56
51static void 57static void
52list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx) 58list_add_counter(struct perf_counter *counter, struct perf_counter_context *ctx)
@@ -341,6 +347,9 @@ group_sched_out(struct perf_counter *group_counter,
341{ 347{
342 struct perf_counter *counter; 348 struct perf_counter *counter;
343 349
350 if (group_counter->state != PERF_COUNTER_STATE_ACTIVE)
351 return;
352
344 counter_sched_out(group_counter, cpuctx, ctx); 353 counter_sched_out(group_counter, cpuctx, ctx);
345 354
346 /* 355 /*
@@ -354,15 +363,18 @@ void __perf_counter_sched_out(struct perf_counter_context *ctx,
354 struct perf_cpu_context *cpuctx) 363 struct perf_cpu_context *cpuctx)
355{ 364{
356 struct perf_counter *counter; 365 struct perf_counter *counter;
366 u64 flags;
357 367
358 if (likely(!ctx->nr_counters)) 368 if (likely(!ctx->nr_counters))
359 return; 369 return;
360 370
361 spin_lock(&ctx->lock); 371 spin_lock(&ctx->lock);
372 flags = hw_perf_save_disable();
362 if (ctx->nr_active) { 373 if (ctx->nr_active) {
363 list_for_each_entry(counter, &ctx->counter_list, list_entry) 374 list_for_each_entry(counter, &ctx->counter_list, list_entry)
364 group_sched_out(counter, cpuctx, ctx); 375 group_sched_out(counter, cpuctx, ctx);
365 } 376 }
377 hw_perf_restore(flags);
366 spin_unlock(&ctx->lock); 378 spin_unlock(&ctx->lock);
367} 379}
368 380
@@ -402,7 +414,14 @@ group_sched_in(struct perf_counter *group_counter,
402 int cpu) 414 int cpu)
403{ 415{
404 struct perf_counter *counter, *partial_group; 416 struct perf_counter *counter, *partial_group;
405 int ret = 0; 417 int ret;
418
419 if (group_counter->state == PERF_COUNTER_STATE_OFF)
420 return 0;
421
422 ret = hw_perf_group_sched_in(group_counter, cpuctx, ctx, cpu);
423 if (ret)
424 return ret < 0 ? ret : 0;
406 425
407 if (counter_sched_in(group_counter, cpuctx, ctx, cpu)) 426 if (counter_sched_in(group_counter, cpuctx, ctx, cpu))
408 return -EAGAIN; 427 return -EAGAIN;
@@ -415,10 +434,9 @@ group_sched_in(struct perf_counter *group_counter,
415 partial_group = counter; 434 partial_group = counter;
416 goto group_error; 435 goto group_error;
417 } 436 }
418 ret = -EAGAIN;
419 } 437 }
420 438
421 return ret; 439 return 0;
422 440
423group_error: 441group_error:
424 /* 442 /*
@@ -440,11 +458,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
440 struct perf_cpu_context *cpuctx, int cpu) 458 struct perf_cpu_context *cpuctx, int cpu)
441{ 459{
442 struct perf_counter *counter; 460 struct perf_counter *counter;
461 u64 flags;
443 462
444 if (likely(!ctx->nr_counters)) 463 if (likely(!ctx->nr_counters))
445 return; 464 return;
446 465
447 spin_lock(&ctx->lock); 466 spin_lock(&ctx->lock);
467 flags = hw_perf_save_disable();
448 list_for_each_entry(counter, &ctx->counter_list, list_entry) { 468 list_for_each_entry(counter, &ctx->counter_list, list_entry) {
449 /* 469 /*
450 * Listen to the 'cpu' scheduling filter constraint 470 * Listen to the 'cpu' scheduling filter constraint
@@ -454,12 +474,13 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
454 continue; 474 continue;
455 475
456 /* 476 /*
457 * If we scheduled in a group atomically and 477 * If we scheduled in a group atomically and exclusively,
458 * exclusively, break out: 478 * or if this group can't go on, break out:
459 */ 479 */
460 if (group_sched_in(counter, cpuctx, ctx, cpu)) 480 if (group_sched_in(counter, cpuctx, ctx, cpu))
461 break; 481 break;
462 } 482 }
483 hw_perf_restore(flags);
463 spin_unlock(&ctx->lock); 484 spin_unlock(&ctx->lock);
464} 485}
465 486