diff options
Diffstat (limited to 'kernel/perf_counter.c')
-rw-r--r-- | kernel/perf_counter.c | 33 |
1 files changed, 30 insertions, 3 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 4c0dccb756ad..3aef3062ff78 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -455,12 +455,37 @@ group_error: | |||
455 | return -EAGAIN; | 455 | return -EAGAIN; |
456 | } | 456 | } |
457 | 457 | ||
458 | /* | ||
459 | * Return 1 for a software counter, 0 for a hardware counter | ||
460 | */ | ||
461 | static inline int is_software_counter(struct perf_counter *counter) | ||
462 | { | ||
463 | return !counter->hw_event.raw && counter->hw_event.type < 0; | ||
464 | } | ||
465 | |||
466 | /* | ||
467 | * Return 1 for a group consisting entirely of software counters, | ||
468 | * 0 if the group contains any hardware counters. | ||
469 | */ | ||
470 | static int is_software_only_group(struct perf_counter *leader) | ||
471 | { | ||
472 | struct perf_counter *counter; | ||
473 | |||
474 | if (!is_software_counter(leader)) | ||
475 | return 0; | ||
476 | list_for_each_entry(counter, &leader->sibling_list, list_entry) | ||
477 | if (!is_software_counter(counter)) | ||
478 | return 0; | ||
479 | return 1; | ||
480 | } | ||
481 | |||
458 | static void | 482 | static void |
459 | __perf_counter_sched_in(struct perf_counter_context *ctx, | 483 | __perf_counter_sched_in(struct perf_counter_context *ctx, |
460 | struct perf_cpu_context *cpuctx, int cpu) | 484 | struct perf_cpu_context *cpuctx, int cpu) |
461 | { | 485 | { |
462 | struct perf_counter *counter; | 486 | struct perf_counter *counter; |
463 | u64 flags; | 487 | u64 flags; |
488 | int can_add_hw = 1; | ||
464 | 489 | ||
465 | if (likely(!ctx->nr_counters)) | 490 | if (likely(!ctx->nr_counters)) |
466 | return; | 491 | return; |
@@ -477,10 +502,12 @@ __perf_counter_sched_in(struct perf_counter_context *ctx, | |||
477 | 502 | ||
478 | /* | 503 | /* |
479 | * If we scheduled in a group atomically and exclusively, | 504 | * If we scheduled in a group atomically and exclusively, |
480 | * or if this group can't go on, break out: | 505 | * or if this group can't go on, don't add any more |
506 | * hardware counters. | ||
481 | */ | 507 | */ |
482 | if (group_sched_in(counter, cpuctx, ctx, cpu)) | 508 | if (can_add_hw || is_software_only_group(counter)) |
483 | break; | 509 | if (group_sched_in(counter, cpuctx, ctx, cpu)) |
510 | can_add_hw = 0; | ||
484 | } | 511 | } |
485 | hw_perf_restore(flags); | 512 | hw_perf_restore(flags); |
486 | spin_unlock(&ctx->lock); | 513 | spin_unlock(&ctx->lock); |