diff options
author | Mark Rutland <mark.rutland@arm.com> | 2011-06-24 06:30:59 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2011-08-31 05:50:11 -0400 |
commit | 3fc2c83087717dc88003428245d97b9d432fff2d (patch) | |
tree | 5dc3f1633c40df8a0b52f046754013355a9f65c1 /arch/arm/kernel/perf_event.c | |
parent | 8a16b34e21199eb5fcf2c5050d3bc414fc5d6563 (diff) |
ARM: perf: remove event limit from pmu_hw_events
Currently the event accounting data in pmu_hw_events is stored in
fixed-sized arrays within the structure.
This patch refactors the accounting data to allow any number of events
to be managed.
Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Will Deacon <will.deacon@arm.com>
Reviewed-by: Jamie Iles <jamie@jamieiles.com>
Reviewed-by: Ashwin Chaugule <ashwinc@codeaurora.org>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 9 |
1 files changed, 7 insertions, 2 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 7f31eff00b80..e1db55500784 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -42,13 +42,13 @@ struct cpu_hw_events { | |||
42 | /* | 42 | /* |
43 | * The events that are active on the CPU for the given index. | 43 | * The events that are active on the CPU for the given index. |
44 | */ | 44 | */ |
45 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | 45 | struct perf_event **events; |
46 | 46 | ||
47 | /* | 47 | /* |
48 | * A 1 bit for an index indicates that the counter is being used for | 48 | * A 1 bit for an index indicates that the counter is being used for |
49 | * an event. A 0 means that the counter can be used. | 49 | * an event. A 0 means that the counter can be used. |
50 | */ | 50 | */ |
51 | unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | 51 | unsigned long *used_mask; |
52 | 52 | ||
53 | /* | 53 | /* |
54 | * Hardware lock to serialize accesses to PMU registers. Needed for the | 54 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
@@ -56,6 +56,9 @@ struct cpu_hw_events { | |||
56 | */ | 56 | */ |
57 | raw_spinlock_t pmu_lock; | 57 | raw_spinlock_t pmu_lock; |
58 | }; | 58 | }; |
59 | |||
60 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); | ||
61 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); | ||
59 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | 62 | static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); |
60 | 63 | ||
61 | struct arm_pmu { | 64 | struct arm_pmu { |
@@ -714,6 +717,8 @@ static void __init cpu_pmu_init(struct arm_pmu *armpmu) | |||
714 | int cpu; | 717 | int cpu; |
715 | for_each_possible_cpu(cpu) { | 718 | for_each_possible_cpu(cpu) { |
716 | struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu); | 719 | struct cpu_hw_events *events = &per_cpu(cpu_hw_events, cpu); |
720 | events->events = per_cpu(hw_events, cpu); | ||
721 | events->used_mask = per_cpu(used_mask, cpu); | ||
717 | raw_spin_lock_init(&events->pmu_lock); | 722 | raw_spin_lock_init(&events->pmu_lock); |
718 | } | 723 | } |
719 | armpmu->get_hw_events = armpmu_get_cpu_events; | 724 | armpmu->get_hw_events = armpmu_get_cpu_events; |