aboutsummaryrefslogtreecommitdiffstats
path: root/kernel/perf_counter.c
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2009-01-11 23:11:00 -0500
committerPaul Mackerras <paulus@samba.org>2009-01-11 23:12:50 -0500
commitdd0e6ba22ea21bcc2c420b385a170593c58f4c08 (patch)
treea6b1b30cc7e873615d24c4eef04ef61b2ccb3ebe /kernel/perf_counter.c
parentc0d362a832ee70435fc4555a64f820893b1da0bd (diff)
perf_counter: Always schedule all software counters in
Software counters aren't subject to the limitations imposed by the fixed number of hardware counter registers, so there is no reason not to enable them all in __perf_counter_sched_in. Previously we used to break out of the loop when we got to a group that wouldn't fit on the PMU; with this we continue through the list but only schedule in software counters (or groups containing only software counters) from there on. Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r--kernel/perf_counter.c33
1 files changed, 30 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c
index 4c0dccb756ad..3aef3062ff78 100644
--- a/kernel/perf_counter.c
+++ b/kernel/perf_counter.c
@@ -455,12 +455,37 @@ group_error:
455 return -EAGAIN; 455 return -EAGAIN;
456} 456}
457 457
458/*
459 * Return 1 for a software counter, 0 for a hardware counter
460 */
461static inline int is_software_counter(struct perf_counter *counter)
462{
463 return !counter->hw_event.raw && counter->hw_event.type < 0;
464}
465
466/*
467 * Return 1 for a group consisting entirely of software counters,
468 * 0 if the group contains any hardware counters.
469 */
470static int is_software_only_group(struct perf_counter *leader)
471{
472 struct perf_counter *counter;
473
474 if (!is_software_counter(leader))
475 return 0;
476 list_for_each_entry(counter, &leader->sibling_list, list_entry)
477 if (!is_software_counter(counter))
478 return 0;
479 return 1;
480}
481
458static void 482static void
459__perf_counter_sched_in(struct perf_counter_context *ctx, 483__perf_counter_sched_in(struct perf_counter_context *ctx,
460 struct perf_cpu_context *cpuctx, int cpu) 484 struct perf_cpu_context *cpuctx, int cpu)
461{ 485{
462 struct perf_counter *counter; 486 struct perf_counter *counter;
463 u64 flags; 487 u64 flags;
488 int can_add_hw = 1;
464 489
465 if (likely(!ctx->nr_counters)) 490 if (likely(!ctx->nr_counters))
466 return; 491 return;
@@ -477,10 +502,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx,
477 502
478 /* 503 /*
479 * If we scheduled in a group atomically and exclusively, 504 * If we scheduled in a group atomically and exclusively,
480 * or if this group can't go on, break out: 505 * or if this group can't go on, don't add any more
506 * hardware counters.
481 */ 507 */
482 if (group_sched_in(counter, cpuctx, ctx, cpu)) 508 if (can_add_hw || is_software_only_group(counter))
483 break; 509 if (group_sched_in(counter, cpuctx, ctx, cpu))
510 can_add_hw = 0;
484 } 511 }
485 hw_perf_restore(flags); 512 hw_perf_restore(flags);
486 spin_unlock(&ctx->lock); 513 spin_unlock(&ctx->lock);