diff options
Diffstat (limited to 'arch/sh/kernel/perf_event.c')
-rw-r--r-- | arch/sh/kernel/perf_event.c | 159 |
1 files changed, 112 insertions, 47 deletions
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c index 7a3dc3567258..5a4b33435650 100644 --- a/arch/sh/kernel/perf_event.c +++ b/arch/sh/kernel/perf_event.c | |||
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void) | |||
59 | return !!sh_pmu; | 59 | return !!sh_pmu; |
60 | } | 60 | } |
61 | 61 | ||
62 | const char *perf_pmu_name(void) | ||
63 | { | ||
64 | if (!sh_pmu) | ||
65 | return NULL; | ||
66 | |||
67 | return sh_pmu->name; | ||
68 | } | ||
69 | EXPORT_SYMBOL_GPL(perf_pmu_name); | ||
70 | |||
71 | int perf_num_counters(void) | ||
72 | { | ||
73 | if (!sh_pmu) | ||
74 | return 0; | ||
75 | |||
76 | return sh_pmu->num_events; | ||
77 | } | ||
78 | EXPORT_SYMBOL_GPL(perf_num_counters); | ||
79 | |||
62 | /* | 80 | /* |
63 | * Release the PMU if this is the last perf_event. | 81 | * Release the PMU if this is the last perf_event. |
64 | */ | 82 | */ |
@@ -206,50 +224,80 @@ again: | |||
206 | local64_add(delta, &event->count); | 224 | local64_add(delta, &event->count); |
207 | } | 225 | } |
208 | 226 | ||
209 | static void sh_pmu_disable(struct perf_event *event) | 227 | static void sh_pmu_stop(struct perf_event *event, int flags) |
210 | { | 228 | { |
211 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 229 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
212 | struct hw_perf_event *hwc = &event->hw; | 230 | struct hw_perf_event *hwc = &event->hw; |
213 | int idx = hwc->idx; | 231 | int idx = hwc->idx; |
214 | 232 | ||
215 | clear_bit(idx, cpuc->active_mask); | 233 | if (!(event->hw.state & PERF_HES_STOPPED)) { |
216 | sh_pmu->disable(hwc, idx); | 234 | sh_pmu->disable(hwc, idx); |
235 | cpuc->events[idx] = NULL; | ||
236 | event->hw.state |= PERF_HES_STOPPED; | ||
237 | } | ||
238 | |||
239 | if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) { | ||
240 | sh_perf_event_update(event, &event->hw, idx); | ||
241 | event->hw.state |= PERF_HES_UPTODATE; | ||
242 | } | ||
243 | } | ||
244 | |||
245 | static void sh_pmu_start(struct perf_event *event, int flags) | ||
246 | { | ||
247 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
248 | struct hw_perf_event *hwc = &event->hw; | ||
249 | int idx = hwc->idx; | ||
250 | |||
251 | if (WARN_ON_ONCE(idx == -1)) | ||
252 | return; | ||
253 | |||
254 | if (flags & PERF_EF_RELOAD) | ||
255 | WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE)); | ||
217 | 256 | ||
218 | barrier(); | 257 | cpuc->events[idx] = event; |
258 | event->hw.state = 0; | ||
259 | sh_pmu->enable(hwc, idx); | ||
260 | } | ||
219 | 261 | ||
220 | sh_perf_event_update(event, &event->hw, idx); | 262 | static void sh_pmu_del(struct perf_event *event, int flags) |
263 | { | ||
264 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
221 | 265 | ||
222 | cpuc->events[idx] = NULL; | 266 | sh_pmu_stop(event, PERF_EF_UPDATE); |
223 | clear_bit(idx, cpuc->used_mask); | 267 | __clear_bit(event->hw.idx, cpuc->used_mask); |
224 | 268 | ||
225 | perf_event_update_userpage(event); | 269 | perf_event_update_userpage(event); |
226 | } | 270 | } |
227 | 271 | ||
228 | static int sh_pmu_enable(struct perf_event *event) | 272 | static int sh_pmu_add(struct perf_event *event, int flags) |
229 | { | 273 | { |
230 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | 274 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
231 | struct hw_perf_event *hwc = &event->hw; | 275 | struct hw_perf_event *hwc = &event->hw; |
232 | int idx = hwc->idx; | 276 | int idx = hwc->idx; |
277 | int ret = -EAGAIN; | ||
278 | |||
279 | perf_pmu_disable(event->pmu); | ||
233 | 280 | ||
234 | if (test_and_set_bit(idx, cpuc->used_mask)) { | 281 | if (__test_and_set_bit(idx, cpuc->used_mask)) { |
235 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); | 282 | idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); |
236 | if (idx == sh_pmu->num_events) | 283 | if (idx == sh_pmu->num_events) |
237 | return -EAGAIN; | 284 | goto out; |
238 | 285 | ||
239 | set_bit(idx, cpuc->used_mask); | 286 | __set_bit(idx, cpuc->used_mask); |
240 | hwc->idx = idx; | 287 | hwc->idx = idx; |
241 | } | 288 | } |
242 | 289 | ||
243 | sh_pmu->disable(hwc, idx); | 290 | sh_pmu->disable(hwc, idx); |
244 | 291 | ||
245 | cpuc->events[idx] = event; | 292 | event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED; |
246 | set_bit(idx, cpuc->active_mask); | 293 | if (flags & PERF_EF_START) |
247 | 294 | sh_pmu_start(event, PERF_EF_RELOAD); | |
248 | sh_pmu->enable(hwc, idx); | ||
249 | 295 | ||
250 | perf_event_update_userpage(event); | 296 | perf_event_update_userpage(event); |
251 | 297 | ret = 0; | |
252 | return 0; | 298 | out: |
299 | perf_pmu_enable(event->pmu); | ||
300 | return ret; | ||
253 | } | 301 | } |
254 | 302 | ||
255 | static void sh_pmu_read(struct perf_event *event) | 303 | static void sh_pmu_read(struct perf_event *event) |
@@ -257,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event) | |||
257 | sh_perf_event_update(event, &event->hw, event->hw.idx); | 305 | sh_perf_event_update(event, &event->hw, event->hw.idx); |
258 | } | 306 | } |
259 | 307 | ||
260 | static const struct pmu pmu = { | 308 | static int sh_pmu_event_init(struct perf_event *event) |
261 | .enable = sh_pmu_enable, | ||
262 | .disable = sh_pmu_disable, | ||
263 | .read = sh_pmu_read, | ||
264 | }; | ||
265 | |||
266 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
267 | { | 309 | { |
268 | int err = __hw_perf_event_init(event); | 310 | int err; |
311 | |||
312 | switch (event->attr.type) { | ||
313 | case PERF_TYPE_RAW: | ||
314 | case PERF_TYPE_HW_CACHE: | ||
315 | case PERF_TYPE_HARDWARE: | ||
316 | err = __hw_perf_event_init(event); | ||
317 | break; | ||
318 | |||
319 | default: | ||
320 | return -ENOENT; | ||
321 | } | ||
322 | |||
269 | if (unlikely(err)) { | 323 | if (unlikely(err)) { |
270 | if (event->destroy) | 324 | if (event->destroy) |
271 | event->destroy(event); | 325 | event->destroy(event); |
272 | return ERR_PTR(err); | ||
273 | } | 326 | } |
274 | 327 | ||
275 | return &pmu; | 328 | return err; |
329 | } | ||
330 | |||
331 | static void sh_pmu_enable(struct pmu *pmu) | ||
332 | { | ||
333 | if (!sh_pmu_initialized()) | ||
334 | return; | ||
335 | |||
336 | sh_pmu->enable_all(); | ||
337 | } | ||
338 | |||
339 | static void sh_pmu_disable(struct pmu *pmu) | ||
340 | { | ||
341 | if (!sh_pmu_initialized()) | ||
342 | return; | ||
343 | |||
344 | sh_pmu->disable_all(); | ||
276 | } | 345 | } |
277 | 346 | ||
347 | static struct pmu pmu = { | ||
348 | .pmu_enable = sh_pmu_enable, | ||
349 | .pmu_disable = sh_pmu_disable, | ||
350 | .event_init = sh_pmu_event_init, | ||
351 | .add = sh_pmu_add, | ||
352 | .del = sh_pmu_del, | ||
353 | .start = sh_pmu_start, | ||
354 | .stop = sh_pmu_stop, | ||
355 | .read = sh_pmu_read, | ||
356 | }; | ||
357 | |||
278 | static void sh_pmu_setup(int cpu) | 358 | static void sh_pmu_setup(int cpu) |
279 | { | 359 | { |
280 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); | 360 | struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); |
@@ -299,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) | |||
299 | return NOTIFY_OK; | 379 | return NOTIFY_OK; |
300 | } | 380 | } |
301 | 381 | ||
302 | void hw_perf_enable(void) | 382 | int __cpuinit register_sh_pmu(struct sh_pmu *_pmu) |
303 | { | ||
304 | if (!sh_pmu_initialized()) | ||
305 | return; | ||
306 | |||
307 | sh_pmu->enable_all(); | ||
308 | } | ||
309 | |||
310 | void hw_perf_disable(void) | ||
311 | { | ||
312 | if (!sh_pmu_initialized()) | ||
313 | return; | ||
314 | |||
315 | sh_pmu->disable_all(); | ||
316 | } | ||
317 | |||
318 | int __cpuinit register_sh_pmu(struct sh_pmu *pmu) | ||
319 | { | 383 | { |
320 | if (sh_pmu) | 384 | if (sh_pmu) |
321 | return -EBUSY; | 385 | return -EBUSY; |
322 | sh_pmu = pmu; | 386 | sh_pmu = _pmu; |
323 | 387 | ||
324 | pr_info("Performance Events: %s support registered\n", pmu->name); | 388 | pr_info("Performance Events: %s support registered\n", _pmu->name); |
325 | 389 | ||
326 | WARN_ON(pmu->num_events > MAX_HWEVENTS); | 390 | WARN_ON(_pmu->num_events > MAX_HWEVENTS); |
327 | 391 | ||
392 | perf_pmu_register(&pmu); | ||
328 | perf_cpu_notifier(sh_pmu_notifier); | 393 | perf_cpu_notifier(sh_pmu_notifier); |
329 | return 0; | 394 | return 0; |
330 | } | 395 | } |