diff options
author | Peter Zijlstra <a.p.zijlstra@chello.nl> | 2009-06-22 07:58:35 -0400 |
---|---|---|
committer | Ingo Molnar <mingo@elte.hu> | 2009-06-23 05:42:46 -0400 |
commit | f344011ccb85469445369153c3d27c4ee4bc2ac8 (patch) | |
tree | 66f041a826d09c690a5e6bb3d091494d52bc8565 /kernel | |
parent | b84fbc9fb1d943e2c5f4efe52ed0e3c93a4bdb6a (diff) |
perf_counter: Optimize perf_counter_alloc()'s inherit case
We don't need to add usage counts for swcounter and attr usage
models for inherited counters since the parent counter will
always have one, which suffices to generate the needed output.
This avoids up to 3 global atomic increments per inherited
counter.
LKML-Reference: <new-submission>
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'kernel')
-rw-r--r-- | kernel/perf_counter.c | 32 |
1 files changed, 20 insertions, 12 deletions
diff --git a/kernel/perf_counter.c b/kernel/perf_counter.c index 0a45490f4029..c2b19c111718 100644 --- a/kernel/perf_counter.c +++ b/kernel/perf_counter.c | |||
@@ -1508,11 +1508,13 @@ static void free_counter(struct perf_counter *counter) | |||
1508 | { | 1508 | { |
1509 | perf_pending_sync(counter); | 1509 | perf_pending_sync(counter); |
1510 | 1510 | ||
1511 | atomic_dec(&nr_counters); | 1511 | if (!counter->parent) { |
1512 | if (counter->attr.mmap) | 1512 | atomic_dec(&nr_counters); |
1513 | atomic_dec(&nr_mmap_counters); | 1513 | if (counter->attr.mmap) |
1514 | if (counter->attr.comm) | 1514 | atomic_dec(&nr_mmap_counters); |
1515 | atomic_dec(&nr_comm_counters); | 1515 | if (counter->attr.comm) |
1516 | atomic_dec(&nr_comm_counters); | ||
1517 | } | ||
1516 | 1518 | ||
1517 | if (counter->destroy) | 1519 | if (counter->destroy) |
1518 | counter->destroy(counter); | 1520 | counter->destroy(counter); |
@@ -3515,6 +3517,8 @@ static void sw_perf_counter_destroy(struct perf_counter *counter) | |||
3515 | { | 3517 | { |
3516 | u64 event = counter->attr.config; | 3518 | u64 event = counter->attr.config; |
3517 | 3519 | ||
3520 | WARN_ON(counter->parent); | ||
3521 | |||
3518 | atomic_dec(&perf_swcounter_enabled[event]); | 3522 | atomic_dec(&perf_swcounter_enabled[event]); |
3519 | } | 3523 | } |
3520 | 3524 | ||
@@ -3551,8 +3555,10 @@ static const struct pmu *sw_perf_counter_init(struct perf_counter *counter) | |||
3551 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: | 3555 | case PERF_COUNT_SW_PAGE_FAULTS_MAJ: |
3552 | case PERF_COUNT_SW_CONTEXT_SWITCHES: | 3556 | case PERF_COUNT_SW_CONTEXT_SWITCHES: |
3553 | case PERF_COUNT_SW_CPU_MIGRATIONS: | 3557 | case PERF_COUNT_SW_CPU_MIGRATIONS: |
3554 | atomic_inc(&perf_swcounter_enabled[event]); | 3558 | if (!counter->parent) { |
3555 | counter->destroy = sw_perf_counter_destroy; | 3559 | atomic_inc(&perf_swcounter_enabled[event]); |
3560 | counter->destroy = sw_perf_counter_destroy; | ||
3561 | } | ||
3556 | pmu = &perf_ops_generic; | 3562 | pmu = &perf_ops_generic; |
3557 | break; | 3563 | break; |
3558 | } | 3564 | } |
@@ -3663,11 +3669,13 @@ done: | |||
3663 | 3669 | ||
3664 | counter->pmu = pmu; | 3670 | counter->pmu = pmu; |
3665 | 3671 | ||
3666 | atomic_inc(&nr_counters); | 3672 | if (!counter->parent) { |
3667 | if (counter->attr.mmap) | 3673 | atomic_inc(&nr_counters); |
3668 | atomic_inc(&nr_mmap_counters); | 3674 | if (counter->attr.mmap) |
3669 | if (counter->attr.comm) | 3675 | atomic_inc(&nr_mmap_counters); |
3670 | atomic_inc(&nr_comm_counters); | 3676 | if (counter->attr.comm) |
3677 | atomic_inc(&nr_comm_counters); | ||
3678 | } | ||
3671 | 3679 | ||
3672 | return counter; | 3680 | return counter; |
3673 | } | 3681 | } |