diff options
| author | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-29 14:13:10 -0400 |
|---|---|---|
| committer | Linus Torvalds <torvalds@linux-foundation.org> | 2010-10-29 14:13:10 -0400 |
| commit | b77d643ced576bdd1e918aebda869de74696cde9 (patch) | |
| tree | bb5d708f4bbb48461923a28e63a8cf8bdd477dd7 /arch/mips/kernel/perf_event.c | |
| parent | b4020c1b198c0f0c0b0ff0cfdd824a26b93edd6f (diff) | |
| parent | 64575f918f3279d8487cf670dbefa956ce16a526 (diff) | |
Merge branch 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus
* 'upstream' of git://git.linux-mips.org/pub/scm/upstream-linus: (46 commits)
ftrace/MIPS: Enable C Version of recordmcount
ftrace/MIPS: Add module support for C version of recordmcount
ftrace/MIPS: Add MIPS64 support for C version of recordmcount
MIPS: Make TASK_SIZE reflect proper size for both 32 and 64 bit processes.
MIPS: Allow UserLocal on MIPS_R1 processors
MIPS: Honor L2 bypass bit
MIPS: Add BMIPS CP0 register definitions
MIPS: Add BMIPS processor types to Kconfig
MIPS: Decouple BMIPS CPU support from bcm47xx/bcm63xx SoC code
MIPS: Add support for hardware performance events (mipsxx)
MIPS: Perf-events: Add callchain support
MIPS: add support for hardware performance events (skeleton)
MIPS: add support for software performance events
MIPS: define local_xchg from xchg_local to atomic_long_xchg
MIPS: AR7: Add support for Titan (TNETV10xx) SoC variant
MIPS: AR7: Initialize GPIO earlier
MIPS: Add platform device and Kconfig for Octeon USB EHCI / OHCI
USB: Add EHCI and OHCH glue for OCTEON II SOCs.
MIPS: Octeon: Add register definitions for EHCI / OHCI USB glue logic.
MIPS: Octeon: Apply CN63XXP1 errata workarounds.
...
Diffstat (limited to 'arch/mips/kernel/perf_event.c')
| -rw-r--r-- | arch/mips/kernel/perf_event.c | 601 |
1 files changed, 601 insertions, 0 deletions
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c new file mode 100644 index 000000000000..2b7f3f703b83 --- /dev/null +++ b/arch/mips/kernel/perf_event.c | |||
| @@ -0,0 +1,601 @@ | |||
| 1 | /* | ||
| 2 | * Linux performance counter support for MIPS. | ||
| 3 | * | ||
| 4 | * Copyright (C) 2010 MIPS Technologies, Inc. | ||
| 5 | * Author: Deng-Cheng Zhu | ||
| 6 | * | ||
| 7 | * This code is based on the implementation for ARM, which is in turn | ||
| 8 | * based on the sparc64 perf event code and the x86 code. Performance | ||
| 9 | * counter access is based on the MIPS Oprofile code. And the callchain | ||
| 10 | * support references the code of MIPS stacktrace.c. | ||
| 11 | * | ||
| 12 | * This program is free software; you can redistribute it and/or modify | ||
| 13 | * it under the terms of the GNU General Public License version 2 as | ||
| 14 | * published by the Free Software Foundation. | ||
| 15 | */ | ||
| 16 | |||
| 17 | #include <linux/cpumask.h> | ||
| 18 | #include <linux/interrupt.h> | ||
| 19 | #include <linux/smp.h> | ||
| 20 | #include <linux/kernel.h> | ||
| 21 | #include <linux/perf_event.h> | ||
| 22 | #include <linux/uaccess.h> | ||
| 23 | |||
| 24 | #include <asm/irq.h> | ||
| 25 | #include <asm/irq_regs.h> | ||
| 26 | #include <asm/stacktrace.h> | ||
| 27 | #include <asm/time.h> /* For perf_irq */ | ||
| 28 | |||
| 29 | /* These are for 32bit counters. For 64bit ones, define them accordingly. */ | ||
| 30 | #define MAX_PERIOD ((1ULL << 32) - 1) | ||
| 31 | #define VALID_COUNT 0x7fffffff | ||
| 32 | #define TOTAL_BITS 32 | ||
| 33 | #define HIGHEST_BIT 31 | ||
| 34 | |||
| 35 | #define MIPS_MAX_HWEVENTS 4 | ||
| 36 | |||
| 37 | struct cpu_hw_events { | ||
| 38 | /* Array of events on this cpu. */ | ||
| 39 | struct perf_event *events[MIPS_MAX_HWEVENTS]; | ||
| 40 | |||
| 41 | /* | ||
| 42 | * Set the bit (indexed by the counter number) when the counter | ||
| 43 | * is used for an event. | ||
| 44 | */ | ||
| 45 | unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
| 46 | |||
| 47 | /* | ||
| 48 | * The borrowed MSB for the performance counter. A MIPS performance | ||
| 49 | * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit | ||
| 50 | * counters) as a factor of determining whether a counter overflow | ||
| 51 | * should be signaled. So here we use a separate MSB for each | ||
| 52 | * counter to make things easy. | ||
| 53 | */ | ||
| 54 | unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)]; | ||
| 55 | |||
| 56 | /* | ||
| 57 | * Software copy of the control register for each performance counter. | ||
| 58 | * MIPS CPUs vary in performance counters. They use this differently, | ||
| 59 | * and even may not use it. | ||
| 60 | */ | ||
| 61 | unsigned int saved_ctrl[MIPS_MAX_HWEVENTS]; | ||
| 62 | }; | ||
| 63 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { | ||
| 64 | .saved_ctrl = {0}, | ||
| 65 | }; | ||
| 66 | |||
| 67 | /* The description of MIPS performance events. */ | ||
| 68 | struct mips_perf_event { | ||
| 69 | unsigned int event_id; | ||
| 70 | /* | ||
| 71 | * MIPS performance counters are indexed starting from 0. | ||
| 72 | * CNTR_EVEN indicates the indexes of the counters to be used are | ||
| 73 | * even numbers. | ||
| 74 | */ | ||
| 75 | unsigned int cntr_mask; | ||
| 76 | #define CNTR_EVEN 0x55555555 | ||
| 77 | #define CNTR_ODD 0xaaaaaaaa | ||
| 78 | #ifdef CONFIG_MIPS_MT_SMP | ||
| 79 | enum { | ||
| 80 | T = 0, | ||
| 81 | V = 1, | ||
| 82 | P = 2, | ||
| 83 | } range; | ||
| 84 | #else | ||
| 85 | #define T | ||
| 86 | #define V | ||
| 87 | #define P | ||
| 88 | #endif | ||
| 89 | }; | ||
| 90 | |||
| 91 | static struct mips_perf_event raw_event; | ||
| 92 | static DEFINE_MUTEX(raw_event_mutex); | ||
| 93 | |||
| 94 | #define UNSUPPORTED_PERF_EVENT_ID 0xffffffff | ||
| 95 | #define C(x) PERF_COUNT_HW_CACHE_##x | ||
| 96 | |||
| 97 | struct mips_pmu { | ||
| 98 | const char *name; | ||
| 99 | int irq; | ||
| 100 | irqreturn_t (*handle_irq)(int irq, void *dev); | ||
| 101 | int (*handle_shared_irq)(void); | ||
| 102 | void (*start)(void); | ||
| 103 | void (*stop)(void); | ||
| 104 | int (*alloc_counter)(struct cpu_hw_events *cpuc, | ||
| 105 | struct hw_perf_event *hwc); | ||
| 106 | u64 (*read_counter)(unsigned int idx); | ||
| 107 | void (*write_counter)(unsigned int idx, u64 val); | ||
| 108 | void (*enable_event)(struct hw_perf_event *evt, int idx); | ||
| 109 | void (*disable_event)(int idx); | ||
| 110 | const struct mips_perf_event *(*map_raw_event)(u64 config); | ||
| 111 | const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX]; | ||
| 112 | const struct mips_perf_event (*cache_event_map) | ||
| 113 | [PERF_COUNT_HW_CACHE_MAX] | ||
| 114 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 115 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 116 | unsigned int num_counters; | ||
| 117 | }; | ||
| 118 | |||
| 119 | static const struct mips_pmu *mipspmu; | ||
| 120 | |||
| 121 | static int | ||
| 122 | mipspmu_event_set_period(struct perf_event *event, | ||
| 123 | struct hw_perf_event *hwc, | ||
| 124 | int idx) | ||
| 125 | { | ||
| 126 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 127 | s64 left = local64_read(&hwc->period_left); | ||
| 128 | s64 period = hwc->sample_period; | ||
| 129 | int ret = 0; | ||
| 130 | u64 uleft; | ||
| 131 | unsigned long flags; | ||
| 132 | |||
| 133 | if (unlikely(left <= -period)) { | ||
| 134 | left = period; | ||
| 135 | local64_set(&hwc->period_left, left); | ||
| 136 | hwc->last_period = period; | ||
| 137 | ret = 1; | ||
| 138 | } | ||
| 139 | |||
| 140 | if (unlikely(left <= 0)) { | ||
| 141 | left += period; | ||
| 142 | local64_set(&hwc->period_left, left); | ||
| 143 | hwc->last_period = period; | ||
| 144 | ret = 1; | ||
| 145 | } | ||
| 146 | |||
| 147 | if (left > (s64)MAX_PERIOD) | ||
| 148 | left = MAX_PERIOD; | ||
| 149 | |||
| 150 | local64_set(&hwc->prev_count, (u64)-left); | ||
| 151 | |||
| 152 | local_irq_save(flags); | ||
| 153 | uleft = (u64)(-left) & MAX_PERIOD; | ||
| 154 | uleft > VALID_COUNT ? | ||
| 155 | set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs); | ||
| 156 | mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT); | ||
| 157 | local_irq_restore(flags); | ||
| 158 | |||
| 159 | perf_event_update_userpage(event); | ||
| 160 | |||
| 161 | return ret; | ||
| 162 | } | ||
| 163 | |||
| 164 | static int mipspmu_enable(struct perf_event *event) | ||
| 165 | { | ||
| 166 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 167 | struct hw_perf_event *hwc = &event->hw; | ||
| 168 | int idx; | ||
| 169 | int err = 0; | ||
| 170 | |||
| 171 | /* To look for a free counter for this event. */ | ||
| 172 | idx = mipspmu->alloc_counter(cpuc, hwc); | ||
| 173 | if (idx < 0) { | ||
| 174 | err = idx; | ||
| 175 | goto out; | ||
| 176 | } | ||
| 177 | |||
| 178 | /* | ||
| 179 | * If there is an event in the counter we are going to use then | ||
| 180 | * make sure it is disabled. | ||
| 181 | */ | ||
| 182 | event->hw.idx = idx; | ||
| 183 | mipspmu->disable_event(idx); | ||
| 184 | cpuc->events[idx] = event; | ||
| 185 | |||
| 186 | /* Set the period for the event. */ | ||
| 187 | mipspmu_event_set_period(event, hwc, idx); | ||
| 188 | |||
| 189 | /* Enable the event. */ | ||
| 190 | mipspmu->enable_event(hwc, idx); | ||
| 191 | |||
| 192 | /* Propagate our changes to the userspace mapping. */ | ||
| 193 | perf_event_update_userpage(event); | ||
| 194 | |||
| 195 | out: | ||
| 196 | return err; | ||
| 197 | } | ||
| 198 | |||
| 199 | static void mipspmu_event_update(struct perf_event *event, | ||
| 200 | struct hw_perf_event *hwc, | ||
| 201 | int idx) | ||
| 202 | { | ||
| 203 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 204 | unsigned long flags; | ||
| 205 | int shift = 64 - TOTAL_BITS; | ||
| 206 | s64 prev_raw_count, new_raw_count; | ||
| 207 | s64 delta; | ||
| 208 | |||
| 209 | again: | ||
| 210 | prev_raw_count = local64_read(&hwc->prev_count); | ||
| 211 | local_irq_save(flags); | ||
| 212 | /* Make the counter value be a "real" one. */ | ||
| 213 | new_raw_count = mipspmu->read_counter(idx); | ||
| 214 | if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) { | ||
| 215 | new_raw_count &= VALID_COUNT; | ||
| 216 | clear_bit(idx, cpuc->msbs); | ||
| 217 | } else | ||
| 218 | new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT); | ||
| 219 | local_irq_restore(flags); | ||
| 220 | |||
| 221 | if (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
| 222 | new_raw_count) != prev_raw_count) | ||
| 223 | goto again; | ||
| 224 | |||
| 225 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
| 226 | delta >>= shift; | ||
| 227 | |||
| 228 | local64_add(delta, &event->count); | ||
| 229 | local64_sub(delta, &hwc->period_left); | ||
| 230 | |||
| 231 | return; | ||
| 232 | } | ||
| 233 | |||
| 234 | static void mipspmu_disable(struct perf_event *event) | ||
| 235 | { | ||
| 236 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 237 | struct hw_perf_event *hwc = &event->hw; | ||
| 238 | int idx = hwc->idx; | ||
| 239 | |||
| 240 | |||
| 241 | WARN_ON(idx < 0 || idx >= mipspmu->num_counters); | ||
| 242 | |||
| 243 | /* We are working on a local event. */ | ||
| 244 | mipspmu->disable_event(idx); | ||
| 245 | |||
| 246 | barrier(); | ||
| 247 | |||
| 248 | mipspmu_event_update(event, hwc, idx); | ||
| 249 | cpuc->events[idx] = NULL; | ||
| 250 | clear_bit(idx, cpuc->used_mask); | ||
| 251 | |||
| 252 | perf_event_update_userpage(event); | ||
| 253 | } | ||
| 254 | |||
| 255 | static void mipspmu_unthrottle(struct perf_event *event) | ||
| 256 | { | ||
| 257 | struct hw_perf_event *hwc = &event->hw; | ||
| 258 | |||
| 259 | mipspmu->enable_event(hwc, hwc->idx); | ||
| 260 | } | ||
| 261 | |||
| 262 | static void mipspmu_read(struct perf_event *event) | ||
| 263 | { | ||
| 264 | struct hw_perf_event *hwc = &event->hw; | ||
| 265 | |||
| 266 | /* Don't read disabled counters! */ | ||
| 267 | if (hwc->idx < 0) | ||
| 268 | return; | ||
| 269 | |||
| 270 | mipspmu_event_update(event, hwc, hwc->idx); | ||
| 271 | } | ||
| 272 | |||
| 273 | static struct pmu pmu = { | ||
| 274 | .enable = mipspmu_enable, | ||
| 275 | .disable = mipspmu_disable, | ||
| 276 | .unthrottle = mipspmu_unthrottle, | ||
| 277 | .read = mipspmu_read, | ||
| 278 | }; | ||
| 279 | |||
| 280 | static atomic_t active_events = ATOMIC_INIT(0); | ||
| 281 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
| 282 | static int (*save_perf_irq)(void); | ||
| 283 | |||
| 284 | static int mipspmu_get_irq(void) | ||
| 285 | { | ||
| 286 | int err; | ||
| 287 | |||
| 288 | if (mipspmu->irq >= 0) { | ||
| 289 | /* Request my own irq handler. */ | ||
| 290 | err = request_irq(mipspmu->irq, mipspmu->handle_irq, | ||
| 291 | IRQF_DISABLED | IRQF_NOBALANCING, | ||
| 292 | "mips_perf_pmu", NULL); | ||
| 293 | if (err) { | ||
| 294 | pr_warning("Unable to request IRQ%d for MIPS " | ||
| 295 | "performance counters!\n", mipspmu->irq); | ||
| 296 | } | ||
| 297 | } else if (cp0_perfcount_irq < 0) { | ||
| 298 | /* | ||
| 299 | * We are sharing the irq number with the timer interrupt. | ||
| 300 | */ | ||
| 301 | save_perf_irq = perf_irq; | ||
| 302 | perf_irq = mipspmu->handle_shared_irq; | ||
| 303 | err = 0; | ||
| 304 | } else { | ||
| 305 | pr_warning("The platform hasn't properly defined its " | ||
| 306 | "interrupt controller.\n"); | ||
| 307 | err = -ENOENT; | ||
| 308 | } | ||
| 309 | |||
| 310 | return err; | ||
| 311 | } | ||
| 312 | |||
| 313 | static void mipspmu_free_irq(void) | ||
| 314 | { | ||
| 315 | if (mipspmu->irq >= 0) | ||
| 316 | free_irq(mipspmu->irq, NULL); | ||
| 317 | else if (cp0_perfcount_irq < 0) | ||
| 318 | perf_irq = save_perf_irq; | ||
| 319 | } | ||
| 320 | |||
| 321 | static inline unsigned int | ||
| 322 | mipspmu_perf_event_encode(const struct mips_perf_event *pev) | ||
| 323 | { | ||
| 324 | /* | ||
| 325 | * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for | ||
| 326 | * event_id. | ||
| 327 | */ | ||
| 328 | #ifdef CONFIG_MIPS_MT_SMP | ||
| 329 | return ((unsigned int)pev->range << 24) | | ||
| 330 | (pev->cntr_mask & 0xffff00) | | ||
| 331 | (pev->event_id & 0xff); | ||
| 332 | #else | ||
| 333 | return (pev->cntr_mask & 0xffff00) | | ||
| 334 | (pev->event_id & 0xff); | ||
| 335 | #endif | ||
| 336 | } | ||
| 337 | |||
| 338 | static const struct mips_perf_event * | ||
| 339 | mipspmu_map_general_event(int idx) | ||
| 340 | { | ||
| 341 | const struct mips_perf_event *pev; | ||
| 342 | |||
| 343 | pev = ((*mipspmu->general_event_map)[idx].event_id == | ||
| 344 | UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) : | ||
| 345 | &(*mipspmu->general_event_map)[idx]); | ||
| 346 | |||
| 347 | return pev; | ||
| 348 | } | ||
| 349 | |||
| 350 | static const struct mips_perf_event * | ||
| 351 | mipspmu_map_cache_event(u64 config) | ||
| 352 | { | ||
| 353 | unsigned int cache_type, cache_op, cache_result; | ||
| 354 | const struct mips_perf_event *pev; | ||
| 355 | |||
| 356 | cache_type = (config >> 0) & 0xff; | ||
| 357 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
| 358 | return ERR_PTR(-EINVAL); | ||
| 359 | |||
| 360 | cache_op = (config >> 8) & 0xff; | ||
| 361 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
| 362 | return ERR_PTR(-EINVAL); | ||
| 363 | |||
| 364 | cache_result = (config >> 16) & 0xff; | ||
| 365 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 366 | return ERR_PTR(-EINVAL); | ||
| 367 | |||
| 368 | pev = &((*mipspmu->cache_event_map) | ||
| 369 | [cache_type] | ||
| 370 | [cache_op] | ||
| 371 | [cache_result]); | ||
| 372 | |||
| 373 | if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID) | ||
| 374 | return ERR_PTR(-EOPNOTSUPP); | ||
| 375 | |||
| 376 | return pev; | ||
| 377 | |||
| 378 | } | ||
| 379 | |||
| 380 | static int validate_event(struct cpu_hw_events *cpuc, | ||
| 381 | struct perf_event *event) | ||
| 382 | { | ||
| 383 | struct hw_perf_event fake_hwc = event->hw; | ||
| 384 | |||
| 385 | if (event->pmu && event->pmu != &pmu) | ||
| 386 | return 0; | ||
| 387 | |||
| 388 | return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0; | ||
| 389 | } | ||
| 390 | |||
| 391 | static int validate_group(struct perf_event *event) | ||
| 392 | { | ||
| 393 | struct perf_event *sibling, *leader = event->group_leader; | ||
| 394 | struct cpu_hw_events fake_cpuc; | ||
| 395 | |||
| 396 | memset(&fake_cpuc, 0, sizeof(fake_cpuc)); | ||
| 397 | |||
| 398 | if (!validate_event(&fake_cpuc, leader)) | ||
| 399 | return -ENOSPC; | ||
| 400 | |||
| 401 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
| 402 | if (!validate_event(&fake_cpuc, sibling)) | ||
| 403 | return -ENOSPC; | ||
| 404 | } | ||
| 405 | |||
| 406 | if (!validate_event(&fake_cpuc, event)) | ||
| 407 | return -ENOSPC; | ||
| 408 | |||
| 409 | return 0; | ||
| 410 | } | ||
| 411 | |||
| 412 | /* | ||
| 413 | * mipsxx/rm9000/loongson2 have different performance counters, they have | ||
| 414 | * specific low-level init routines. | ||
| 415 | */ | ||
| 416 | static void reset_counters(void *arg); | ||
| 417 | static int __hw_perf_event_init(struct perf_event *event); | ||
| 418 | |||
| 419 | static void hw_perf_event_destroy(struct perf_event *event) | ||
| 420 | { | ||
| 421 | if (atomic_dec_and_mutex_lock(&active_events, | ||
| 422 | &pmu_reserve_mutex)) { | ||
| 423 | /* | ||
| 424 | * We must not call the destroy function with interrupts | ||
| 425 | * disabled. | ||
| 426 | */ | ||
| 427 | on_each_cpu(reset_counters, | ||
| 428 | (void *)(long)mipspmu->num_counters, 1); | ||
| 429 | mipspmu_free_irq(); | ||
| 430 | mutex_unlock(&pmu_reserve_mutex); | ||
| 431 | } | ||
| 432 | } | ||
| 433 | |||
| 434 | const struct pmu *hw_perf_event_init(struct perf_event *event) | ||
| 435 | { | ||
| 436 | int err = 0; | ||
| 437 | |||
| 438 | if (!mipspmu || event->cpu >= nr_cpumask_bits || | ||
| 439 | (event->cpu >= 0 && !cpu_online(event->cpu))) | ||
| 440 | return ERR_PTR(-ENODEV); | ||
| 441 | |||
| 442 | if (!atomic_inc_not_zero(&active_events)) { | ||
| 443 | if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) { | ||
| 444 | atomic_dec(&active_events); | ||
| 445 | return ERR_PTR(-ENOSPC); | ||
| 446 | } | ||
| 447 | |||
| 448 | mutex_lock(&pmu_reserve_mutex); | ||
| 449 | if (atomic_read(&active_events) == 0) | ||
| 450 | err = mipspmu_get_irq(); | ||
| 451 | |||
| 452 | if (!err) | ||
| 453 | atomic_inc(&active_events); | ||
| 454 | mutex_unlock(&pmu_reserve_mutex); | ||
| 455 | } | ||
| 456 | |||
| 457 | if (err) | ||
| 458 | return ERR_PTR(err); | ||
| 459 | |||
| 460 | err = __hw_perf_event_init(event); | ||
| 461 | if (err) | ||
| 462 | hw_perf_event_destroy(event); | ||
| 463 | |||
| 464 | return err ? ERR_PTR(err) : &pmu; | ||
| 465 | } | ||
| 466 | |||
| 467 | void hw_perf_enable(void) | ||
| 468 | { | ||
| 469 | if (mipspmu) | ||
| 470 | mipspmu->start(); | ||
| 471 | } | ||
| 472 | |||
| 473 | void hw_perf_disable(void) | ||
| 474 | { | ||
| 475 | if (mipspmu) | ||
| 476 | mipspmu->stop(); | ||
| 477 | } | ||
| 478 | |||
| 479 | /* This is needed by specific irq handlers in perf_event_*.c */ | ||
| 480 | static void | ||
| 481 | handle_associated_event(struct cpu_hw_events *cpuc, | ||
| 482 | int idx, struct perf_sample_data *data, struct pt_regs *regs) | ||
| 483 | { | ||
| 484 | struct perf_event *event = cpuc->events[idx]; | ||
| 485 | struct hw_perf_event *hwc = &event->hw; | ||
| 486 | |||
| 487 | mipspmu_event_update(event, hwc, idx); | ||
| 488 | data->period = event->hw.last_period; | ||
| 489 | if (!mipspmu_event_set_period(event, hwc, idx)) | ||
| 490 | return; | ||
| 491 | |||
| 492 | if (perf_event_overflow(event, 0, data, regs)) | ||
| 493 | mipspmu->disable_event(idx); | ||
| 494 | } | ||
| 495 | |||
| 496 | #include "perf_event_mipsxx.c" | ||
| 497 | |||
| 498 | /* Callchain handling code. */ | ||
| 499 | static inline void | ||
| 500 | callchain_store(struct perf_callchain_entry *entry, | ||
| 501 | u64 ip) | ||
| 502 | { | ||
| 503 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
| 504 | entry->ip[entry->nr++] = ip; | ||
| 505 | } | ||
| 506 | |||
| 507 | /* | ||
| 508 | * Leave userspace callchain empty for now. When we find a way to trace | ||
| 509 | * the user stack callchains, we add here. | ||
| 510 | */ | ||
| 511 | static void | ||
| 512 | perf_callchain_user(struct pt_regs *regs, | ||
| 513 | struct perf_callchain_entry *entry) | ||
| 514 | { | ||
| 515 | } | ||
| 516 | |||
| 517 | static void save_raw_perf_callchain(struct perf_callchain_entry *entry, | ||
| 518 | unsigned long reg29) | ||
| 519 | { | ||
| 520 | unsigned long *sp = (unsigned long *)reg29; | ||
| 521 | unsigned long addr; | ||
| 522 | |||
| 523 | while (!kstack_end(sp)) { | ||
| 524 | addr = *sp++; | ||
| 525 | if (__kernel_text_address(addr)) { | ||
| 526 | callchain_store(entry, addr); | ||
| 527 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | ||
| 528 | break; | ||
| 529 | } | ||
| 530 | } | ||
| 531 | } | ||
| 532 | |||
| 533 | static void | ||
| 534 | perf_callchain_kernel(struct pt_regs *regs, | ||
| 535 | struct perf_callchain_entry *entry) | ||
| 536 | { | ||
| 537 | unsigned long sp = regs->regs[29]; | ||
| 538 | #ifdef CONFIG_KALLSYMS | ||
| 539 | unsigned long ra = regs->regs[31]; | ||
| 540 | unsigned long pc = regs->cp0_epc; | ||
| 541 | |||
| 542 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
| 543 | if (raw_show_trace || !__kernel_text_address(pc)) { | ||
| 544 | unsigned long stack_page = | ||
| 545 | (unsigned long)task_stack_page(current); | ||
| 546 | if (stack_page && sp >= stack_page && | ||
| 547 | sp <= stack_page + THREAD_SIZE - 32) | ||
| 548 | save_raw_perf_callchain(entry, sp); | ||
| 549 | return; | ||
| 550 | } | ||
| 551 | do { | ||
| 552 | callchain_store(entry, pc); | ||
| 553 | if (entry->nr >= PERF_MAX_STACK_DEPTH) | ||
| 554 | break; | ||
| 555 | pc = unwind_stack(current, &sp, pc, &ra); | ||
| 556 | } while (pc); | ||
| 557 | #else | ||
| 558 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
| 559 | save_raw_perf_callchain(entry, sp); | ||
| 560 | #endif | ||
| 561 | } | ||
| 562 | |||
| 563 | static void | ||
| 564 | perf_do_callchain(struct pt_regs *regs, | ||
| 565 | struct perf_callchain_entry *entry) | ||
| 566 | { | ||
| 567 | int is_user; | ||
| 568 | |||
| 569 | if (!regs) | ||
| 570 | return; | ||
| 571 | |||
| 572 | is_user = user_mode(regs); | ||
| 573 | |||
| 574 | if (!current || !current->pid) | ||
| 575 | return; | ||
| 576 | |||
| 577 | if (is_user && current->state != TASK_RUNNING) | ||
| 578 | return; | ||
| 579 | |||
| 580 | if (!is_user) { | ||
| 581 | perf_callchain_kernel(regs, entry); | ||
| 582 | if (current->mm) | ||
| 583 | regs = task_pt_regs(current); | ||
| 584 | else | ||
| 585 | regs = NULL; | ||
| 586 | } | ||
| 587 | if (regs) | ||
| 588 | perf_callchain_user(regs, entry); | ||
| 589 | } | ||
| 590 | |||
| 591 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
| 592 | |||
| 593 | struct perf_callchain_entry * | ||
| 594 | perf_callchain(struct pt_regs *regs) | ||
| 595 | { | ||
| 596 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
| 597 | |||
| 598 | entry->nr = 0; | ||
| 599 | perf_do_callchain(regs, entry); | ||
| 600 | return entry; | ||
| 601 | } | ||
