diff options
| author | Suzuki K Poulose <suzuki.poulose@arm.com> | 2018-07-10 04:58:00 -0400 |
|---|---|---|
| committer | Will Deacon <will.deacon@arm.com> | 2018-07-10 13:19:02 -0400 |
| commit | e2da97d328d4951d25f6634eda7213f7257417b6 (patch) | |
| tree | 720f1eb257839024079bd868eea080cc895be84e /drivers/perf | |
| parent | 3a95200d3f89afd8b67f39d88d36cc7ec96ce385 (diff) | |
arm_pmu: Add support for 64bit event counters
Each PMU has a set of 32bit event counters. But in some
special cases, the events could be counted using counters
which are effectively 64bit wide.
e.g, Arm V8 PMUv3 has a 64 bit cycle counter which can count
only the CPU cycles. Also, the PMU can chain the event counters
to effectively count as a 64bit counter.
Add support for tracking the events that uses 64bit counters.
This only affects the periods set for each counter in the core
driver.
Cc: Will Deacon <will.deacon@arm.com>
Reviewed-by: Julien Thierry <julien.thierry@arm.com>
Acked-by: Mark Rutland <mark.rutland@arm.com>
Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'drivers/perf')
| -rw-r--r-- | drivers/perf/arm_pmu.c | 16 |
1 files changed, 10 insertions, 6 deletions
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c index 6ddc00da5373..8cad6b535a2c 100644 --- a/drivers/perf/arm_pmu.c +++ b/drivers/perf/arm_pmu.c | |||
| @@ -28,9 +28,12 @@ | |||
| 28 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); | 28 | static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); |
| 29 | static DEFINE_PER_CPU(int, cpu_irq); | 29 | static DEFINE_PER_CPU(int, cpu_irq); |
| 30 | 30 | ||
| 31 | static inline u64 arm_pmu_max_period(void) | 31 | static inline u64 arm_pmu_event_max_period(struct perf_event *event) |
| 32 | { | 32 | { |
| 33 | return (1ULL << 32) - 1; | 33 | if (event->hw.flags & ARMPMU_EVT_64BIT) |
| 34 | return GENMASK_ULL(63, 0); | ||
| 35 | else | ||
| 36 | return GENMASK_ULL(31, 0); | ||
| 34 | } | 37 | } |
| 35 | 38 | ||
| 36 | static int | 39 | static int |
| @@ -122,7 +125,7 @@ int armpmu_event_set_period(struct perf_event *event) | |||
| 122 | u64 max_period; | 125 | u64 max_period; |
| 123 | int ret = 0; | 126 | int ret = 0; |
| 124 | 127 | ||
| 125 | max_period = arm_pmu_max_period(); | 128 | max_period = arm_pmu_event_max_period(event); |
| 126 | if (unlikely(left <= -period)) { | 129 | if (unlikely(left <= -period)) { |
| 127 | left = period; | 130 | left = period; |
| 128 | local64_set(&hwc->period_left, left); | 131 | local64_set(&hwc->period_left, left); |
| @@ -148,7 +151,7 @@ int armpmu_event_set_period(struct perf_event *event) | |||
| 148 | 151 | ||
| 149 | local64_set(&hwc->prev_count, (u64)-left); | 152 | local64_set(&hwc->prev_count, (u64)-left); |
| 150 | 153 | ||
| 151 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); | 154 | armpmu->write_counter(event, (u64)(-left) & max_period); |
| 152 | 155 | ||
| 153 | perf_event_update_userpage(event); | 156 | perf_event_update_userpage(event); |
| 154 | 157 | ||
| @@ -160,7 +163,7 @@ u64 armpmu_event_update(struct perf_event *event) | |||
| 160 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 163 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
| 161 | struct hw_perf_event *hwc = &event->hw; | 164 | struct hw_perf_event *hwc = &event->hw; |
| 162 | u64 delta, prev_raw_count, new_raw_count; | 165 | u64 delta, prev_raw_count, new_raw_count; |
| 163 | u64 max_period = arm_pmu_max_period(); | 166 | u64 max_period = arm_pmu_event_max_period(event); |
| 164 | 167 | ||
| 165 | again: | 168 | again: |
| 166 | prev_raw_count = local64_read(&hwc->prev_count); | 169 | prev_raw_count = local64_read(&hwc->prev_count); |
| @@ -368,6 +371,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
| 368 | struct hw_perf_event *hwc = &event->hw; | 371 | struct hw_perf_event *hwc = &event->hw; |
| 369 | int mapping; | 372 | int mapping; |
| 370 | 373 | ||
| 374 | hwc->flags = 0; | ||
| 371 | mapping = armpmu->map_event(event); | 375 | mapping = armpmu->map_event(event); |
| 372 | 376 | ||
| 373 | if (mapping < 0) { | 377 | if (mapping < 0) { |
| @@ -410,7 +414,7 @@ __hw_perf_event_init(struct perf_event *event) | |||
| 410 | * is far less likely to overtake the previous one unless | 414 | * is far less likely to overtake the previous one unless |
| 411 | * you have some serious IRQ latency issues. | 415 | * you have some serious IRQ latency issues. |
| 412 | */ | 416 | */ |
| 413 | hwc->sample_period = arm_pmu_max_period() >> 1; | 417 | hwc->sample_period = arm_pmu_event_max_period(event) >> 1; |
| 414 | hwc->last_period = hwc->sample_period; | 418 | hwc->last_period = hwc->sample_period; |
| 415 | local64_set(&hwc->period_left, hwc->sample_period); | 419 | local64_set(&hwc->period_left, hwc->sample_period); |
| 416 | } | 420 | } |
