diff options
| -rw-r--r-- | arch/arm/include/asm/pmu.h | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 4 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 4 |
3 files changed, 3 insertions, 9 deletions
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h index ff39290965af..3d7e30bc9ffb 100644 --- a/arch/arm/include/asm/pmu.h +++ b/arch/arm/include/asm/pmu.h | |||
| @@ -68,13 +68,13 @@ struct pmu_hw_events { | |||
| 68 | /* | 68 | /* |
| 69 | * The events that are active on the PMU for the given index. | 69 | * The events that are active on the PMU for the given index. |
| 70 | */ | 70 | */ |
| 71 | struct perf_event **events; | 71 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; |
| 72 | 72 | ||
| 73 | /* | 73 | /* |
| 74 | * A 1 bit for an index indicates that the counter is being used for | 74 | * A 1 bit for an index indicates that the counter is being used for |
| 75 | * an event. A 0 means that the counter can be used. | 75 | * an event. A 0 means that the counter can be used. |
| 76 | */ | 76 | */ |
| 77 | unsigned long *used_mask; | 77 | DECLARE_BITMAP(used_mask, ARMPMU_MAX_HWEVENTS); |
| 78 | 78 | ||
| 79 | /* | 79 | /* |
| 80 | * Hardware lock to serialize accesses to PMU registers. Needed for the | 80 | * Hardware lock to serialize accesses to PMU registers. Needed for the |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 7ffb267fb628..864810713cfc 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -275,14 +275,12 @@ validate_group(struct perf_event *event) | |||
| 275 | { | 275 | { |
| 276 | struct perf_event *sibling, *leader = event->group_leader; | 276 | struct perf_event *sibling, *leader = event->group_leader; |
| 277 | struct pmu_hw_events fake_pmu; | 277 | struct pmu_hw_events fake_pmu; |
| 278 | DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS); | ||
| 279 | 278 | ||
| 280 | /* | 279 | /* |
| 281 | * Initialise the fake PMU. We only need to populate the | 280 | * Initialise the fake PMU. We only need to populate the |
| 282 | * used_mask for the purposes of validation. | 281 | * used_mask for the purposes of validation. |
| 283 | */ | 282 | */ |
| 284 | memset(fake_used_mask, 0, sizeof(fake_used_mask)); | 283 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
| 285 | fake_pmu.used_mask = fake_used_mask; | ||
| 286 | 284 | ||
| 287 | if (!validate_event(&fake_pmu, leader)) | 285 | if (!validate_event(&fake_pmu, leader)) |
| 288 | return -EINVAL; | 286 | return -EINVAL; |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 7677d73cccc8..28d04642fa33 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
| @@ -36,8 +36,6 @@ | |||
| 36 | static struct arm_pmu *cpu_pmu; | 36 | static struct arm_pmu *cpu_pmu; |
| 37 | 37 | ||
| 38 | static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu); | 38 | static DEFINE_PER_CPU(struct arm_pmu *, percpu_pmu); |
| 39 | static DEFINE_PER_CPU(struct perf_event * [ARMPMU_MAX_HWEVENTS], hw_events); | ||
| 40 | static DEFINE_PER_CPU(unsigned long [BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)], used_mask); | ||
| 41 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); | 39 | static DEFINE_PER_CPU(struct pmu_hw_events, cpu_hw_events); |
| 42 | 40 | ||
| 43 | /* | 41 | /* |
| @@ -172,8 +170,6 @@ static void cpu_pmu_init(struct arm_pmu *cpu_pmu) | |||
| 172 | int cpu; | 170 | int cpu; |
| 173 | for_each_possible_cpu(cpu) { | 171 | for_each_possible_cpu(cpu) { |
| 174 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); | 172 | struct pmu_hw_events *events = &per_cpu(cpu_hw_events, cpu); |
| 175 | events->events = per_cpu(hw_events, cpu); | ||
| 176 | events->used_mask = per_cpu(used_mask, cpu); | ||
| 177 | raw_spin_lock_init(&events->pmu_lock); | 173 | raw_spin_lock_init(&events->pmu_lock); |
| 178 | per_cpu(percpu_pmu, cpu) = cpu_pmu; | 174 | per_cpu(percpu_pmu, cpu) = cpu_pmu; |
| 179 | } | 175 | } |
