diff options
author | Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com> | 2012-07-30 07:00:02 -0400 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2012-11-09 06:37:25 -0500 |
commit | ed6f2a522398c26559f4da23a80aa6195e6284c7 (patch) | |
tree | f07a2bb16e7d5b121820256b51cf22c3be9bc352 /arch/arm/kernel/perf_event.c | |
parent | 513c99ce4e64245be1f83f56039ec4891b451955 (diff) |
ARM: perf: consistently use struct perf_event in arm_pmu functions
The arm_pmu functions have wildly varied parameters which can often be
derived from struct perf_event.
This patch changes the arm_pmu function prototypes so that struct
perf_event pointers are passed in preference to fields that can be
derived from the event.
Signed-off-by: Sudeep KarkadaNagesha <Sudeep.KarkadaNagesha@arm.com>
Signed-off-by: Will Deacon <will.deacon@arm.com>
Diffstat (limited to 'arch/arm/kernel/perf_event.c')
-rw-r--r-- | arch/arm/kernel/perf_event.c | 44 |
1 files changed, 19 insertions, 25 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index f8406af03279..1cfa3f35713e 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
@@ -86,12 +86,10 @@ armpmu_map_event(struct perf_event *event, | |||
86 | return -ENOENT; | 86 | return -ENOENT; |
87 | } | 87 | } |
88 | 88 | ||
89 | int | 89 | int armpmu_event_set_period(struct perf_event *event) |
90 | armpmu_event_set_period(struct perf_event *event, | ||
91 | struct hw_perf_event *hwc, | ||
92 | int idx) | ||
93 | { | 90 | { |
94 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 91 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
92 | struct hw_perf_event *hwc = &event->hw; | ||
95 | s64 left = local64_read(&hwc->period_left); | 93 | s64 left = local64_read(&hwc->period_left); |
96 | s64 period = hwc->sample_period; | 94 | s64 period = hwc->sample_period; |
97 | int ret = 0; | 95 | int ret = 0; |
@@ -119,24 +117,22 @@ armpmu_event_set_period(struct perf_event *event, | |||
119 | 117 | ||
120 | local64_set(&hwc->prev_count, (u64)-left); | 118 | local64_set(&hwc->prev_count, (u64)-left); |
121 | 119 | ||
122 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | 120 | armpmu->write_counter(event, (u64)(-left) & 0xffffffff); |
123 | 121 | ||
124 | perf_event_update_userpage(event); | 122 | perf_event_update_userpage(event); |
125 | 123 | ||
126 | return ret; | 124 | return ret; |
127 | } | 125 | } |
128 | 126 | ||
129 | u64 | 127 | u64 armpmu_event_update(struct perf_event *event) |
130 | armpmu_event_update(struct perf_event *event, | ||
131 | struct hw_perf_event *hwc, | ||
132 | int idx) | ||
133 | { | 128 | { |
134 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 129 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
130 | struct hw_perf_event *hwc = &event->hw; | ||
135 | u64 delta, prev_raw_count, new_raw_count; | 131 | u64 delta, prev_raw_count, new_raw_count; |
136 | 132 | ||
137 | again: | 133 | again: |
138 | prev_raw_count = local64_read(&hwc->prev_count); | 134 | prev_raw_count = local64_read(&hwc->prev_count); |
139 | new_raw_count = armpmu->read_counter(idx); | 135 | new_raw_count = armpmu->read_counter(event); |
140 | 136 | ||
141 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | 137 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, |
142 | new_raw_count) != prev_raw_count) | 138 | new_raw_count) != prev_raw_count) |
@@ -159,7 +155,7 @@ armpmu_read(struct perf_event *event) | |||
159 | if (hwc->idx < 0) | 155 | if (hwc->idx < 0) |
160 | return; | 156 | return; |
161 | 157 | ||
162 | armpmu_event_update(event, hwc, hwc->idx); | 158 | armpmu_event_update(event); |
163 | } | 159 | } |
164 | 160 | ||
165 | static void | 161 | static void |
@@ -173,14 +169,13 @@ armpmu_stop(struct perf_event *event, int flags) | |||
173 | * PERF_EF_UPDATE, see comments in armpmu_start(). | 169 | * PERF_EF_UPDATE, see comments in armpmu_start(). |
174 | */ | 170 | */ |
175 | if (!(hwc->state & PERF_HES_STOPPED)) { | 171 | if (!(hwc->state & PERF_HES_STOPPED)) { |
176 | armpmu->disable(hwc, hwc->idx); | 172 | armpmu->disable(event); |
177 | armpmu_event_update(event, hwc, hwc->idx); | 173 | armpmu_event_update(event); |
178 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | 174 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; |
179 | } | 175 | } |
180 | } | 176 | } |
181 | 177 | ||
182 | static void | 178 | static void armpmu_start(struct perf_event *event, int flags) |
183 | armpmu_start(struct perf_event *event, int flags) | ||
184 | { | 179 | { |
185 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 180 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
186 | struct hw_perf_event *hwc = &event->hw; | 181 | struct hw_perf_event *hwc = &event->hw; |
@@ -200,8 +195,8 @@ armpmu_start(struct perf_event *event, int flags) | |||
200 | * get an interrupt too soon or *way* too late if the overflow has | 195 | * get an interrupt too soon or *way* too late if the overflow has |
201 | * happened since disabling. | 196 | * happened since disabling. |
202 | */ | 197 | */ |
203 | armpmu_event_set_period(event, hwc, hwc->idx); | 198 | armpmu_event_set_period(event); |
204 | armpmu->enable(hwc, hwc->idx); | 199 | armpmu->enable(event); |
205 | } | 200 | } |
206 | 201 | ||
207 | static void | 202 | static void |
@@ -233,7 +228,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
233 | perf_pmu_disable(event->pmu); | 228 | perf_pmu_disable(event->pmu); |
234 | 229 | ||
235 | /* If we don't have a space for the counter then finish early. */ | 230 | /* If we don't have a space for the counter then finish early. */ |
236 | idx = armpmu->get_event_idx(hw_events, hwc); | 231 | idx = armpmu->get_event_idx(hw_events, event); |
237 | if (idx < 0) { | 232 | if (idx < 0) { |
238 | err = idx; | 233 | err = idx; |
239 | goto out; | 234 | goto out; |
@@ -244,7 +239,7 @@ armpmu_add(struct perf_event *event, int flags) | |||
244 | * sure it is disabled. | 239 | * sure it is disabled. |
245 | */ | 240 | */ |
246 | event->hw.idx = idx; | 241 | event->hw.idx = idx; |
247 | armpmu->disable(hwc, idx); | 242 | armpmu->disable(event); |
248 | hw_events->events[idx] = event; | 243 | hw_events->events[idx] = event; |
249 | 244 | ||
250 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | 245 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; |
@@ -264,13 +259,12 @@ validate_event(struct pmu_hw_events *hw_events, | |||
264 | struct perf_event *event) | 259 | struct perf_event *event) |
265 | { | 260 | { |
266 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 261 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); |
267 | struct hw_perf_event fake_event = event->hw; | ||
268 | struct pmu *leader_pmu = event->group_leader->pmu; | 262 | struct pmu *leader_pmu = event->group_leader->pmu; |
269 | 263 | ||
270 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) | 264 | if (event->pmu != leader_pmu || event->state <= PERF_EVENT_STATE_OFF) |
271 | return 1; | 265 | return 1; |
272 | 266 | ||
273 | return armpmu->get_event_idx(hw_events, &fake_event) >= 0; | 267 | return armpmu->get_event_idx(hw_events, event) >= 0; |
274 | } | 268 | } |
275 | 269 | ||
276 | static int | 270 | static int |
@@ -316,7 +310,7 @@ static irqreturn_t armpmu_dispatch_irq(int irq, void *dev) | |||
316 | static void | 310 | static void |
317 | armpmu_release_hardware(struct arm_pmu *armpmu) | 311 | armpmu_release_hardware(struct arm_pmu *armpmu) |
318 | { | 312 | { |
319 | armpmu->free_irq(); | 313 | armpmu->free_irq(armpmu); |
320 | pm_runtime_put_sync(&armpmu->plat_device->dev); | 314 | pm_runtime_put_sync(&armpmu->plat_device->dev); |
321 | } | 315 | } |
322 | 316 | ||
@@ -330,7 +324,7 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) | |||
330 | return -ENODEV; | 324 | return -ENODEV; |
331 | 325 | ||
332 | pm_runtime_get_sync(&pmu_device->dev); | 326 | pm_runtime_get_sync(&pmu_device->dev); |
333 | err = armpmu->request_irq(armpmu_dispatch_irq); | 327 | err = armpmu->request_irq(armpmu, armpmu_dispatch_irq); |
334 | if (err) { | 328 | if (err) { |
335 | armpmu_release_hardware(armpmu); | 329 | armpmu_release_hardware(armpmu); |
336 | return err; | 330 | return err; |
@@ -465,13 +459,13 @@ static void armpmu_enable(struct pmu *pmu) | |||
465 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); | 459 | int enabled = bitmap_weight(hw_events->used_mask, armpmu->num_events); |
466 | 460 | ||
467 | if (enabled) | 461 | if (enabled) |
468 | armpmu->start(); | 462 | armpmu->start(armpmu); |
469 | } | 463 | } |
470 | 464 | ||
471 | static void armpmu_disable(struct pmu *pmu) | 465 | static void armpmu_disable(struct pmu *pmu) |
472 | { | 466 | { |
473 | struct arm_pmu *armpmu = to_arm_pmu(pmu); | 467 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
474 | armpmu->stop(); | 468 | armpmu->stop(armpmu); |
475 | } | 469 | } |
476 | 470 | ||
477 | #ifdef CONFIG_PM_RUNTIME | 471 | #ifdef CONFIG_PM_RUNTIME |