aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arm/kernel/perf_event_cpu.c
diff options
context:
space:
mode:
authorMark Rutland <mark.rutland@arm.com>2014-05-13 14:08:19 -0400
committerWill Deacon <will.deacon@arm.com>2014-10-30 08:16:59 -0400
commita4560846eba60830a444d9e336c8a18f92e099ee (patch)
treec1f488dbcf522fd3a561f8fadf226d190d1c739d /arch/arm/kernel/perf_event_cpu.c
parent67b4305aab0fa993d91fa4c6ea2169cfb3f41c93 (diff)
arm: perf: limit size of accounting data
Commit 3fc2c83087 (ARM: perf: remove event limit from pmu_hw_events) got rid of the upper limit on the number of events an arm_pmu could handle, but introduced additional complexity and places a burden on each PMU driver to allocate accounting data somehow. So far this has not generally been useful as the only users of arm_pmu are the CPU backend and the CCI driver. Now that the CCI driver plugs into the perf subsystem directly, we can remove some of the complexities that get in the way of supporting heterogeneous CPU PMUs. This patch restores the original limits on pmu_hw_events fields such that the pmu_hw_events data can be allocated as a contiguous block. This will simplify dynamic pmu_hw_events allocation in later patches. Signed-off-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Will Deacon <will.deacon@arm.com> Reviewed-by: Stephen Boyd <sboyd@codeaurora.org> Tested-by: Stephen Boyd <sboyd@codeaurora.org> Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event_cpu.c')
-rw-r--r--arch/arm/kernel/perf_event_cpu.c4
1 files changed, 0 insertions, 4 deletions
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
index 7677d73cccc8..28d04642fa33 100644
--- a/arch/arm/kernel/perf_event_cpu.c
+++ b/arch/arm/kernel/perf_event_cpu.c
@@ -36,8 +36,6 @@
36static struct arm_pmu *cpu_pmu; 36static struct arm_pmu *cpu_pmu;
37 37
38static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu); 38static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu);
39static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events);
40static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask);
41static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); 39static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events);
42 40
43/* 41/*
@@ -172,8 +170,6 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu)
172 int cpu; 170 int cpu;
173 for_each_possible_cpu(cpu) { 171 for_each_possible_cpu(cpu) {
174 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); 172 struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu);
175 events->events = per_cpu(hw_events, cpu);
176 events->used_mask = per_cpu(used_mask, cpu);
177 raw_spin_lock_init(&events->pmu_lock); 173 raw_spin_lock_init(&events->pmu_lock);
178 per_cpu(percpu_pmu, cpu) = cpu_pmu; 174 per_cpu(percpu_pmu, cpu) = cpu_pmu;
179 } 175 }