aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorSuzuki K Poulose <suzuki.poulose@arm.com>2018-07-10 04:57:58 -0400
committerWill Deacon <will.deacon@arm.com>2018-07-10 13:19:02 -0400
commit8d3e994241e6bcc7ead2b918c4f15b7683afa90a (patch)
treefb2ec1748c8748c7f38b0e077daa1c3649de31c3
parent64b2f025715a68bed49fb14588c2d893dfbd00a8 (diff)
arm_pmu: Clean up maximum period handling
Each PMU defines their max_period of the counter as the maximum value that can be counted. Since all the PMU backends support 32bit counters by default, let us remove the redundant field. No functional changes. Cc: Will Deacon <will.deacon@arm.com> Acked-by: Mark Rutland <mark.rutland@arm.com> Reviewed-by: Julien Thierry <julien.thierry@arm.com> Signed-off-by: Suzuki K Poulose <suzuki.poulose@arm.com> Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r--arch/arm/kernel/perf_event_v6.c2
-rw-r--r--arch/arm/kernel/perf_event_v7.c1
-rw-r--r--arch/arm/kernel/perf_event_xscale.c2
-rw-r--r--arch/arm64/kernel/perf_event.c1
-rw-r--r--drivers/perf/arm_pmu.c16
-rw-r--r--include/linux/perf/arm_pmu.h1
6 files changed, 12 insertions, 11 deletions
diff --git a/arch/arm/kernel/perf_event_v6.c b/arch/arm/kernel/perf_event_v6.c
index be42c4f66a40..f64a6bfebcec 100644
--- a/arch/arm/kernel/perf_event_v6.c
+++ b/arch/arm/kernel/perf_event_v6.c
@@ -495,7 +495,6 @@ static void armv6pmu_init(struct arm_pmu *cpu_pmu)
495 cpu_pmu->stop = armv6pmu_stop; 495 cpu_pmu->stop = armv6pmu_stop;
496 cpu_pmu->map_event = armv6_map_event; 496 cpu_pmu->map_event = armv6_map_event;
497 cpu_pmu->num_events = 3; 497 cpu_pmu->num_events = 3;
498 cpu_pmu->max_period = (1LLU << 32) - 1;
499} 498}
500 499
501static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu) 500static int armv6_1136_pmu_init(struct arm_pmu *cpu_pmu)
@@ -546,7 +545,6 @@ static int armv6mpcore_pmu_init(struct arm_pmu *cpu_pmu)
546 cpu_pmu->stop = armv6pmu_stop; 545 cpu_pmu->stop = armv6pmu_stop;
547 cpu_pmu->map_event = armv6mpcore_map_event; 546 cpu_pmu->map_event = armv6mpcore_map_event;
548 cpu_pmu->num_events = 3; 547 cpu_pmu->num_events = 3;
549 cpu_pmu->max_period = (1LLU << 32) - 1;
550 548
551 return 0; 549 return 0;
552} 550}
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c
index 5a5116794440..2cf1ca2925c8 100644
--- a/arch/arm/kernel/perf_event_v7.c
+++ b/arch/arm/kernel/perf_event_v7.c
@@ -1170,7 +1170,6 @@ static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1170 cpu_pmu->start = armv7pmu_start; 1170 cpu_pmu->start = armv7pmu_start;
1171 cpu_pmu->stop = armv7pmu_stop; 1171 cpu_pmu->stop = armv7pmu_stop;
1172 cpu_pmu->reset = armv7pmu_reset; 1172 cpu_pmu->reset = armv7pmu_reset;
1173 cpu_pmu->max_period = (1LLU << 32) - 1;
1174}; 1173};
1175 1174
1176static void armv7_read_num_pmnc_events(void *info) 1175static void armv7_read_num_pmnc_events(void *info)
diff --git a/arch/arm/kernel/perf_event_xscale.c b/arch/arm/kernel/perf_event_xscale.c
index 88d1a76f5367..c4f029458b52 100644
--- a/arch/arm/kernel/perf_event_xscale.c
+++ b/arch/arm/kernel/perf_event_xscale.c
@@ -374,7 +374,6 @@ static int xscale1pmu_init(struct arm_pmu *cpu_pmu)
374 cpu_pmu->stop = xscale1pmu_stop; 374 cpu_pmu->stop = xscale1pmu_stop;
375 cpu_pmu->map_event = xscale_map_event; 375 cpu_pmu->map_event = xscale_map_event;
376 cpu_pmu->num_events = 3; 376 cpu_pmu->num_events = 3;
377 cpu_pmu->max_period = (1LLU << 32) - 1;
378 377
379 return 0; 378 return 0;
380} 379}
@@ -743,7 +742,6 @@ static int xscale2pmu_init(struct arm_pmu *cpu_pmu)
743 cpu_pmu->stop = xscale2pmu_stop; 742 cpu_pmu->stop = xscale2pmu_stop;
744 cpu_pmu->map_event = xscale_map_event; 743 cpu_pmu->map_event = xscale_map_event;
745 cpu_pmu->num_events = 5; 744 cpu_pmu->num_events = 5;
746 cpu_pmu->max_period = (1LLU << 32) - 1;
747 745
748 return 0; 746 return 0;
749} 747}
diff --git a/arch/arm64/kernel/perf_event.c b/arch/arm64/kernel/perf_event.c
index 33147aacdafd..678ecffd3724 100644
--- a/arch/arm64/kernel/perf_event.c
+++ b/arch/arm64/kernel/perf_event.c
@@ -960,7 +960,6 @@ static int armv8_pmu_init(struct arm_pmu *cpu_pmu)
960 cpu_pmu->start = armv8pmu_start, 960 cpu_pmu->start = armv8pmu_start,
961 cpu_pmu->stop = armv8pmu_stop, 961 cpu_pmu->stop = armv8pmu_stop,
962 cpu_pmu->reset = armv8pmu_reset, 962 cpu_pmu->reset = armv8pmu_reset,
963 cpu_pmu->max_period = (1LLU << 32) - 1,
964 cpu_pmu->set_event_filter = armv8pmu_set_event_filter; 963 cpu_pmu->set_event_filter = armv8pmu_set_event_filter;
965 964
966 return 0; 965 return 0;
diff --git a/drivers/perf/arm_pmu.c b/drivers/perf/arm_pmu.c
index a6347d487635..6ddc00da5373 100644
--- a/drivers/perf/arm_pmu.c
+++ b/drivers/perf/arm_pmu.c
@@ -28,6 +28,11 @@
28static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu); 28static DEFINE_PER_CPU(struct arm_pmu *, cpu_armpmu);
29static DEFINE_PER_CPU(int, cpu_irq); 29static DEFINE_PER_CPU(int, cpu_irq);
30 30
31static inline u64 arm_pmu_max_period(void)
32{
33 return (1ULL << 32) - 1;
34}
35
31static int 36static int
32armpmu_map_cache_event(const unsigned (*cache_map) 37armpmu_map_cache_event(const unsigned (*cache_map)
33 [PERF_COUNT_HW_CACHE_MAX] 38 [PERF_COUNT_HW_CACHE_MAX]
@@ -114,8 +119,10 @@ int armpmu_event_set_period(struct perf_event *event)
114 struct hw_perf_event *hwc = &event->hw; 119 struct hw_perf_event *hwc = &event->hw;
115 s64 left = local64_read(&hwc->period_left); 120 s64 left = local64_read(&hwc->period_left);
116 s64 period = hwc->sample_period; 121 s64 period = hwc->sample_period;
122 u64 max_period;
117 int ret = 0; 123 int ret = 0;
118 124
125 max_period = arm_pmu_max_period();
119 if (unlikely(left <= -period)) { 126 if (unlikely(left <= -period)) {
120 left = period; 127 left = period;
121 local64_set(&hwc->period_left, left); 128 local64_set(&hwc->period_left, left);
@@ -136,8 +143,8 @@ int armpmu_event_set_period(struct perf_event *event)
136 * effect we are reducing max_period to account for 143 * effect we are reducing max_period to account for
137 * interrupt latency (and we are being very conservative). 144 * interrupt latency (and we are being very conservative).
138 */ 145 */
139 if (left > (armpmu->max_period >> 1)) 146 if (left > (max_period >> 1))
140 left = armpmu->max_period >> 1; 147 left = (max_period >> 1);
141 148
142 local64_set(&hwc->prev_count, (u64)-left); 149 local64_set(&hwc->prev_count, (u64)-left);
143 150
@@ -153,6 +160,7 @@ u64 armpmu_event_update(struct perf_event *event)
153 struct arm_pmu *armpmu = to_arm_pmu(event->pmu); 160 struct arm_pmu *armpmu = to_arm_pmu(event->pmu);
154 struct hw_perf_event *hwc = &event->hw; 161 struct hw_perf_event *hwc = &event->hw;
155 u64 delta, prev_raw_count, new_raw_count; 162 u64 delta, prev_raw_count, new_raw_count;
163 u64 max_period = arm_pmu_max_period();
156 164
157again: 165again:
158 prev_raw_count = local64_read(&hwc->prev_count); 166 prev_raw_count = local64_read(&hwc->prev_count);
@@ -162,7 +170,7 @@ again:
162 new_raw_count) != prev_raw_count) 170 new_raw_count) != prev_raw_count)
163 goto again; 171 goto again;
164 172
165 delta = (new_raw_count - prev_raw_count) & armpmu->max_period; 173 delta = (new_raw_count - prev_raw_count) & max_period;
166 174
167 local64_add(delta, &event->count); 175 local64_add(delta, &event->count);
168 local64_sub(delta, &hwc->period_left); 176 local64_sub(delta, &hwc->period_left);
@@ -402,7 +410,7 @@ __hw_perf_event_init(struct perf_event *event)
402 * is far less likely to overtake the previous one unless 410 * is far less likely to overtake the previous one unless
403 * you have some serious IRQ latency issues. 411 * you have some serious IRQ latency issues.
404 */ 412 */
405 hwc->sample_period = armpmu->max_period >> 1; 413 hwc->sample_period = arm_pmu_max_period() >> 1;
406 hwc->last_period = hwc->sample_period; 414 hwc->last_period = hwc->sample_period;
407 local64_set(&hwc->period_left, hwc->sample_period); 415 local64_set(&hwc->period_left, hwc->sample_period);
408 } 416 }
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index ad5444491975..12c30a22fc8d 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -94,7 +94,6 @@ struct arm_pmu {
94 void (*reset)(void *); 94 void (*reset)(void *);
95 int (*map_event)(struct perf_event *event); 95 int (*map_event)(struct perf_event *event);
96 int num_events; 96 int num_events;
97 u64 max_period;
98 bool secure_access; /* 32-bit ARM only */ 97 bool secure_access; /* 32-bit ARM only */
99#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40 98#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
100 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS); 99 DECLARE_BITMAP(pmceid_bitmap, ARMV8_PMUV3_MAX_COMMON_EVENTS);