aboutsummaryrefslogtreecommitdiffstats
path: root/arch/sh/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/sh/kernel')
-rw-r--r--arch/sh/kernel/perf_callchain.c50
-rw-r--r--arch/sh/kernel/perf_event.c159
2 files changed, 116 insertions, 93 deletions
diff --git a/arch/sh/kernel/perf_callchain.c b/arch/sh/kernel/perf_callchain.c
index a9dd3abde28e..d5ca1ef50fa9 100644
--- a/arch/sh/kernel/perf_callchain.c
+++ b/arch/sh/kernel/perf_callchain.c
@@ -14,11 +14,6 @@
14#include <asm/unwinder.h> 14#include <asm/unwinder.h>
15#include <asm/ptrace.h> 15#include <asm/ptrace.h>
16 16
17static inline void callchain_store(struct perf_callchain_entry *entry, u64 ip)
18{
19 if (entry->nr < PERF_MAX_STACK_DEPTH)
20 entry->ip[entry->nr++] = ip;
21}
22 17
23static void callchain_warning(void *data, char *msg) 18static void callchain_warning(void *data, char *msg)
24{ 19{
@@ -39,7 +34,7 @@ static void callchain_address(void *data, unsigned long addr, int reliable)
39 struct perf_callchain_entry *entry = data; 34 struct perf_callchain_entry *entry = data;
40 35
41 if (reliable) 36 if (reliable)
42 callchain_store(entry, addr); 37 perf_callchain_store(entry, addr);
43} 38}
44 39
45static const struct stacktrace_ops callchain_ops = { 40static const struct stacktrace_ops callchain_ops = {
@@ -49,47 +44,10 @@ static const struct stacktrace_ops callchain_ops = {
49 .address = callchain_address, 44 .address = callchain_address,
50}; 45};
51 46
52static void 47void
53perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry) 48perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
54{ 49{
55 callchain_store(entry, PERF_CONTEXT_KERNEL); 50 perf_callchain_store(entry, regs->pc);
56 callchain_store(entry, regs->pc);
57 51
58 unwind_stack(NULL, regs, NULL, &callchain_ops, entry); 52 unwind_stack(NULL, regs, NULL, &callchain_ops, entry);
59} 53}
60
61static void
62perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
63{
64 int is_user;
65
66 if (!regs)
67 return;
68
69 is_user = user_mode(regs);
70
71 if (is_user && current->state != TASK_RUNNING)
72 return;
73
74 /*
75 * Only the kernel side is implemented for now.
76 */
77 if (!is_user)
78 perf_callchain_kernel(regs, entry);
79}
80
81/*
82 * No need for separate IRQ and NMI entries.
83 */
84static DEFINE_PER_CPU(struct perf_callchain_entry, callchain);
85
86struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
87{
88 struct perf_callchain_entry *entry = &__get_cpu_var(callchain);
89
90 entry->nr = 0;
91
92 perf_do_callchain(regs, entry);
93
94 return entry;
95}
diff --git a/arch/sh/kernel/perf_event.c b/arch/sh/kernel/perf_event.c
index 7a3dc3567258..5a4b33435650 100644
--- a/arch/sh/kernel/perf_event.c
+++ b/arch/sh/kernel/perf_event.c
@@ -59,6 +59,24 @@ static inline int sh_pmu_initialized(void)
59 return !!sh_pmu; 59 return !!sh_pmu;
60} 60}
61 61
62const char *perf_pmu_name(void)
63{
64 if (!sh_pmu)
65 return NULL;
66
67 return sh_pmu->name;
68}
69EXPORT_SYMBOL_GPL(perf_pmu_name);
70
71int perf_num_counters(void)
72{
73 if (!sh_pmu)
74 return 0;
75
76 return sh_pmu->num_events;
77}
78EXPORT_SYMBOL_GPL(perf_num_counters);
79
62/* 80/*
63 * Release the PMU if this is the last perf_event. 81 * Release the PMU if this is the last perf_event.
64 */ 82 */
@@ -206,50 +224,80 @@ again:
206 local64_add(delta, &event->count); 224 local64_add(delta, &event->count);
207} 225}
208 226
209static void sh_pmu_disable(struct perf_event *event) 227static void sh_pmu_stop(struct perf_event *event, int flags)
210{ 228{
211 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 229 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
212 struct hw_perf_event *hwc = &event->hw; 230 struct hw_perf_event *hwc = &event->hw;
213 int idx = hwc->idx; 231 int idx = hwc->idx;
214 232
215 clear_bit(idx, cpuc->active_mask); 233 if (!(event->hw.state & PERF_HES_STOPPED)) {
216 sh_pmu->disable(hwc, idx); 234 sh_pmu->disable(hwc, idx);
235 cpuc->events[idx] = NULL;
236 event->hw.state |= PERF_HES_STOPPED;
237 }
238
239 if ((flags & PERF_EF_UPDATE) && !(event->hw.state & PERF_HES_UPTODATE)) {
240 sh_perf_event_update(event, &event->hw, idx);
241 event->hw.state |= PERF_HES_UPTODATE;
242 }
243}
244
245static void sh_pmu_start(struct perf_event *event, int flags)
246{
247 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
248 struct hw_perf_event *hwc = &event->hw;
249 int idx = hwc->idx;
250
251 if (WARN_ON_ONCE(idx == -1))
252 return;
253
254 if (flags & PERF_EF_RELOAD)
255 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
217 256
218 barrier(); 257 cpuc->events[idx] = event;
258 event->hw.state = 0;
259 sh_pmu->enable(hwc, idx);
260}
219 261
220 sh_perf_event_update(event, &event->hw, idx); 262static void sh_pmu_del(struct perf_event *event, int flags)
263{
264 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
221 265
222 cpuc->events[idx] = NULL; 266 sh_pmu_stop(event, PERF_EF_UPDATE);
223 clear_bit(idx, cpuc->used_mask); 267 __clear_bit(event->hw.idx, cpuc->used_mask);
224 268
225 perf_event_update_userpage(event); 269 perf_event_update_userpage(event);
226} 270}
227 271
228static int sh_pmu_enable(struct perf_event *event) 272static int sh_pmu_add(struct perf_event *event, int flags)
229{ 273{
230 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); 274 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
231 struct hw_perf_event *hwc = &event->hw; 275 struct hw_perf_event *hwc = &event->hw;
232 int idx = hwc->idx; 276 int idx = hwc->idx;
277 int ret = -EAGAIN;
278
279 perf_pmu_disable(event->pmu);
233 280
234 if (test_and_set_bit(idx, cpuc->used_mask)) { 281 if (__test_and_set_bit(idx, cpuc->used_mask)) {
235 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events); 282 idx = find_first_zero_bit(cpuc->used_mask, sh_pmu->num_events);
236 if (idx == sh_pmu->num_events) 283 if (idx == sh_pmu->num_events)
237 return -EAGAIN; 284 goto out;
238 285
239 set_bit(idx, cpuc->used_mask); 286 __set_bit(idx, cpuc->used_mask);
240 hwc->idx = idx; 287 hwc->idx = idx;
241 } 288 }
242 289
243 sh_pmu->disable(hwc, idx); 290 sh_pmu->disable(hwc, idx);
244 291
245 cpuc->events[idx] = event; 292 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
246 set_bit(idx, cpuc->active_mask); 293 if (flags & PERF_EF_START)
247 294 sh_pmu_start(event, PERF_EF_RELOAD);
248 sh_pmu->enable(hwc, idx);
249 295
250 perf_event_update_userpage(event); 296 perf_event_update_userpage(event);
251 297 ret = 0;
252 return 0; 298out:
299 perf_pmu_enable(event->pmu);
300 return ret;
253} 301}
254 302
255static void sh_pmu_read(struct perf_event *event) 303static void sh_pmu_read(struct perf_event *event)
@@ -257,24 +305,56 @@ static void sh_pmu_read(struct perf_event *event)
257 sh_perf_event_update(event, &event->hw, event->hw.idx); 305 sh_perf_event_update(event, &event->hw, event->hw.idx);
258} 306}
259 307
260static const struct pmu pmu = { 308static int sh_pmu_event_init(struct perf_event *event)
261 .enable = sh_pmu_enable,
262 .disable = sh_pmu_disable,
263 .read = sh_pmu_read,
264};
265
266const struct pmu *hw_perf_event_init(struct perf_event *event)
267{ 309{
268 int err = __hw_perf_event_init(event); 310 int err;
311
312 switch (event->attr.type) {
313 case PERF_TYPE_RAW:
314 case PERF_TYPE_HW_CACHE:
315 case PERF_TYPE_HARDWARE:
316 err = __hw_perf_event_init(event);
317 break;
318
319 default:
320 return -ENOENT;
321 }
322
269 if (unlikely(err)) { 323 if (unlikely(err)) {
270 if (event->destroy) 324 if (event->destroy)
271 event->destroy(event); 325 event->destroy(event);
272 return ERR_PTR(err);
273 } 326 }
274 327
275 return &pmu; 328 return err;
329}
330
331static void sh_pmu_enable(struct pmu *pmu)
332{
333 if (!sh_pmu_initialized())
334 return;
335
336 sh_pmu->enable_all();
337}
338
339static void sh_pmu_disable(struct pmu *pmu)
340{
341 if (!sh_pmu_initialized())
342 return;
343
344 sh_pmu->disable_all();
276} 345}
277 346
347static struct pmu pmu = {
348 .pmu_enable = sh_pmu_enable,
349 .pmu_disable = sh_pmu_disable,
350 .event_init = sh_pmu_event_init,
351 .add = sh_pmu_add,
352 .del = sh_pmu_del,
353 .start = sh_pmu_start,
354 .stop = sh_pmu_stop,
355 .read = sh_pmu_read,
356};
357
278static void sh_pmu_setup(int cpu) 358static void sh_pmu_setup(int cpu)
279{ 359{
280 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu); 360 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
@@ -299,32 +379,17 @@ sh_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
299 return NOTIFY_OK; 379 return NOTIFY_OK;
300} 380}
301 381
302void hw_perf_enable(void) 382int __cpuinit register_sh_pmu(struct sh_pmu *_pmu)
303{
304 if (!sh_pmu_initialized())
305 return;
306
307 sh_pmu->enable_all();
308}
309
310void hw_perf_disable(void)
311{
312 if (!sh_pmu_initialized())
313 return;
314
315 sh_pmu->disable_all();
316}
317
318int __cpuinit register_sh_pmu(struct sh_pmu *pmu)
319{ 383{
320 if (sh_pmu) 384 if (sh_pmu)
321 return -EBUSY; 385 return -EBUSY;
322 sh_pmu = pmu; 386 sh_pmu = _pmu;
323 387
324 pr_info("Performance Events: %s support registered\n", pmu->name); 388 pr_info("Performance Events: %s support registered\n", _pmu->name);
325 389
326 WARN_ON(pmu->num_events > MAX_HWEVENTS); 390 WARN_ON(_pmu->num_events > MAX_HWEVENTS);
327 391
392 perf_pmu_register(&pmu);
328 perf_cpu_notifier(sh_pmu_notifier); 393 perf_cpu_notifier(sh_pmu_notifier);
329 return 0; 394 return 0;
330} 395}