diff options
| -rw-r--r-- | arch/arm/Kconfig | 15 | ||||
| -rw-r--r-- | arch/arm/include/asm/perf_event.h | 31 | ||||
| -rw-r--r-- | arch/arm/include/asm/pmu.h | 75 | ||||
| -rw-r--r-- | arch/arm/kernel/Makefile | 2 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 2276 | ||||
| -rw-r--r-- | arch/arm/kernel/pmu.c | 103 | ||||
| -rw-r--r-- | arch/arm/mm/fault.c | 7 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_arm11_core.c | 4 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_arm11_core.h | 4 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_mpcore.c | 42 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_v6.c | 30 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_v7.c | 30 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_v7.h | 4 | ||||
| -rw-r--r-- | arch/arm/oprofile/op_model_xscale.c | 35 |
14 files changed, 2594 insertions, 64 deletions
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 7afc6150ac71..100b90f3778a 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig | |||
| @@ -20,6 +20,8 @@ config ARM | |||
| 20 | select HAVE_GENERIC_DMA_COHERENT | 20 | select HAVE_GENERIC_DMA_COHERENT |
| 21 | select HAVE_KERNEL_GZIP | 21 | select HAVE_KERNEL_GZIP |
| 22 | select HAVE_KERNEL_LZO | 22 | select HAVE_KERNEL_LZO |
| 23 | select HAVE_PERF_EVENTS | ||
| 24 | select PERF_USE_VMALLOC | ||
| 23 | help | 25 | help |
| 24 | The ARM series is a line of low-power-consumption RISC chip designs | 26 | The ARM series is a line of low-power-consumption RISC chip designs |
| 25 | licensed by ARM Ltd and targeted at embedded applications and | 27 | licensed by ARM Ltd and targeted at embedded applications and |
| @@ -877,6 +879,11 @@ config XSCALE_PMU | |||
| 877 | depends on CPU_XSCALE && !XSCALE_PMU_TIMER | 879 | depends on CPU_XSCALE && !XSCALE_PMU_TIMER |
| 878 | default y | 880 | default y |
| 879 | 881 | ||
| 882 | config CPU_HAS_PMU | ||
| 883 | depends on CPU_V6 || CPU_V7 || XSCALE_PMU | ||
| 884 | default y | ||
| 885 | bool | ||
| 886 | |||
| 880 | if !MMU | 887 | if !MMU |
| 881 | source "arch/arm/Kconfig-nommu" | 888 | source "arch/arm/Kconfig-nommu" |
| 882 | endif | 889 | endif |
| @@ -1181,6 +1188,14 @@ config HIGHPTE | |||
| 1181 | depends on HIGHMEM | 1188 | depends on HIGHMEM |
| 1182 | depends on !OUTER_CACHE | 1189 | depends on !OUTER_CACHE |
| 1183 | 1190 | ||
| 1191 | config HW_PERF_EVENTS | ||
| 1192 | bool "Enable hardware performance counter support for perf events" | ||
| 1193 | depends on PERF_EVENTS && CPU_HAS_PMU && (CPU_V6 || CPU_V7) | ||
| 1194 | default y | ||
| 1195 | help | ||
| 1196 | Enable hardware performance counter support for perf events. If | ||
| 1197 | disabled, perf events will use software events only. | ||
| 1198 | |||
| 1184 | source "mm/Kconfig" | 1199 | source "mm/Kconfig" |
| 1185 | 1200 | ||
| 1186 | config LEDS | 1201 | config LEDS |
diff --git a/arch/arm/include/asm/perf_event.h b/arch/arm/include/asm/perf_event.h new file mode 100644 index 000000000000..49e3049aba32 --- /dev/null +++ b/arch/arm/include/asm/perf_event.h | |||
| @@ -0,0 +1,31 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/include/asm/perf_event.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef __ARM_PERF_EVENT_H__ | ||
| 13 | #define __ARM_PERF_EVENT_H__ | ||
| 14 | |||
| 15 | /* | ||
| 16 | * NOP: on *most* (read: all supported) ARM platforms, the performance | ||
| 17 | * counter interrupts are regular interrupts and not an NMI. This | ||
| 18 | * means that when we receive the interrupt we can call | ||
| 19 | * perf_event_do_pending() that handles all of the work with | ||
| 20 | * interrupts enabled. | ||
| 21 | */ | ||
| 22 | static inline void | ||
| 23 | set_perf_event_pending(void) | ||
| 24 | { | ||
| 25 | } | ||
| 26 | |||
| 27 | /* ARM performance counters start from 1 (in the cp15 accesses) so use the | ||
| 28 | * same indexes here for consistency. */ | ||
| 29 | #define PERF_EVENT_INDEX_OFFSET 1 | ||
| 30 | |||
| 31 | #endif /* __ARM_PERF_EVENT_H__ */ | ||
diff --git a/arch/arm/include/asm/pmu.h b/arch/arm/include/asm/pmu.h new file mode 100644 index 000000000000..2829b9f981a1 --- /dev/null +++ b/arch/arm/include/asm/pmu.h | |||
| @@ -0,0 +1,75 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/include/asm/pmu.h | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #ifndef __ARM_PMU_H__ | ||
| 13 | #define __ARM_PMU_H__ | ||
| 14 | |||
| 15 | #ifdef CONFIG_CPU_HAS_PMU | ||
| 16 | |||
| 17 | struct pmu_irqs { | ||
| 18 | const int *irqs; | ||
| 19 | int num_irqs; | ||
| 20 | }; | ||
| 21 | |||
| 22 | /** | ||
| 23 | * reserve_pmu() - reserve the hardware performance counters | ||
| 24 | * | ||
| 25 | * Reserve the hardware performance counters in the system for exclusive use. | ||
| 26 | * The 'struct pmu_irqs' for the system is returned on success, ERR_PTR() | ||
| 27 | * encoded error on failure. | ||
| 28 | */ | ||
| 29 | extern const struct pmu_irqs * | ||
| 30 | reserve_pmu(void); | ||
| 31 | |||
| 32 | /** | ||
| 33 | * release_pmu() - Relinquish control of the performance counters | ||
| 34 | * | ||
| 35 | * Release the performance counters and allow someone else to use them. | ||
| 36 | * Callers must have disabled the counters and released IRQs before calling | ||
| 37 | * this. The 'struct pmu_irqs' returned from reserve_pmu() must be passed as | ||
| 38 | * a cookie. | ||
| 39 | */ | ||
| 40 | extern int | ||
| 41 | release_pmu(const struct pmu_irqs *irqs); | ||
| 42 | |||
| 43 | /** | ||
| 44 | * init_pmu() - Initialise the PMU. | ||
| 45 | * | ||
| 46 | * Initialise the system ready for PMU enabling. This should typically set the | ||
| 47 | * IRQ affinity and nothing else. The users (oprofile/perf events etc) will do | ||
| 48 | * the actual hardware initialisation. | ||
| 49 | */ | ||
| 50 | extern int | ||
| 51 | init_pmu(void); | ||
| 52 | |||
| 53 | #else /* CONFIG_CPU_HAS_PMU */ | ||
| 54 | |||
| 55 | static inline const struct pmu_irqs * | ||
| 56 | reserve_pmu(void) | ||
| 57 | { | ||
| 58 | return ERR_PTR(-ENODEV); | ||
| 59 | } | ||
| 60 | |||
| 61 | static inline int | ||
| 62 | release_pmu(const struct pmu_irqs *irqs) | ||
| 63 | { | ||
| 64 | return -ENODEV; | ||
| 65 | } | ||
| 66 | |||
| 67 | static inline int | ||
| 68 | init_pmu(void) | ||
| 69 | { | ||
| 70 | return -ENODEV; | ||
| 71 | } | ||
| 72 | |||
| 73 | #endif /* CONFIG_CPU_HAS_PMU */ | ||
| 74 | |||
| 75 | #endif /* __ARM_PMU_H__ */ | ||
diff --git a/arch/arm/kernel/Makefile b/arch/arm/kernel/Makefile index dd00f747e2ad..c76e6d2679b8 100644 --- a/arch/arm/kernel/Makefile +++ b/arch/arm/kernel/Makefile | |||
| @@ -46,6 +46,8 @@ obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o | |||
| 46 | obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o | 46 | obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o |
| 47 | obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o | 47 | obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o |
| 48 | obj-$(CONFIG_IWMMXT) += iwmmxt.o | 48 | obj-$(CONFIG_IWMMXT) += iwmmxt.o |
| 49 | obj-$(CONFIG_CPU_HAS_PMU) += pmu.o | ||
| 50 | obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o | ||
| 49 | AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt | 51 | AFLAGS_iwmmxt.o := -Wa,-mcpu=iwmmxt |
| 50 | 52 | ||
| 51 | ifneq ($(CONFIG_ARCH_EBSA110),y) | 53 | ifneq ($(CONFIG_ARCH_EBSA110),y) |
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c new file mode 100644 index 000000000000..c54ceb3d1f97 --- /dev/null +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -0,0 +1,2276 @@ | |||
| 1 | #undef DEBUG | ||
| 2 | |||
| 3 | /* | ||
| 4 | * ARM performance counter support. | ||
| 5 | * | ||
| 6 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | ||
| 7 | * | ||
| 8 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
| 9 | * 2010 (c) MontaVista Software, LLC. | ||
| 10 | * | ||
| 11 | * This code is based on the sparc64 perf event code, which is in turn based | ||
| 12 | * on the x86 code. Callchain code is based on the ARM OProfile backtrace | ||
| 13 | * code. | ||
| 14 | */ | ||
| 15 | #define pr_fmt(fmt) "hw perfevents: " fmt | ||
| 16 | |||
| 17 | #include <linux/interrupt.h> | ||
| 18 | #include <linux/kernel.h> | ||
| 19 | #include <linux/perf_event.h> | ||
| 20 | #include <linux/spinlock.h> | ||
| 21 | #include <linux/uaccess.h> | ||
| 22 | |||
| 23 | #include <asm/cputype.h> | ||
| 24 | #include <asm/irq.h> | ||
| 25 | #include <asm/irq_regs.h> | ||
| 26 | #include <asm/pmu.h> | ||
| 27 | #include <asm/stacktrace.h> | ||
| 28 | |||
| 29 | static const struct pmu_irqs *pmu_irqs; | ||
| 30 | |||
| 31 | /* | ||
| 32 | * Hardware lock to serialize accesses to PMU registers. Needed for the | ||
| 33 | * read/modify/write sequences. | ||
| 34 | */ | ||
| 35 | DEFINE_SPINLOCK(pmu_lock); | ||
| 36 | |||
| 37 | /* | ||
| 38 | * ARMv6 supports a maximum of 3 events, starting from index 1. If we add | ||
| 39 | * another platform that supports more, we need to increase this to be the | ||
| 40 | * largest of all platforms. | ||
| 41 | * | ||
| 42 | * ARMv7 supports up to 32 events: | ||
| 43 | * cycle counter CCNT + 31 events counters CNT0..30. | ||
| 44 | * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters. | ||
| 45 | */ | ||
| 46 | #define ARMPMU_MAX_HWEVENTS 33 | ||
| 47 | |||
| 48 | /* The events for a given CPU. */ | ||
| 49 | struct cpu_hw_events { | ||
| 50 | /* | ||
| 51 | * The events that are active on the CPU for the given index. Index 0 | ||
| 52 | * is reserved. | ||
| 53 | */ | ||
| 54 | struct perf_event *events[ARMPMU_MAX_HWEVENTS]; | ||
| 55 | |||
| 56 | /* | ||
| 57 | * A 1 bit for an index indicates that the counter is being used for | ||
| 58 | * an event. A 0 means that the counter can be used. | ||
| 59 | */ | ||
| 60 | unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
| 61 | |||
| 62 | /* | ||
| 63 | * A 1 bit for an index indicates that the counter is actively being | ||
| 64 | * used. | ||
| 65 | */ | ||
| 66 | unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)]; | ||
| 67 | }; | ||
| 68 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events); | ||
| 69 | |||
| 70 | struct arm_pmu { | ||
| 71 | char *name; | ||
| 72 | irqreturn_t (*handle_irq)(int irq_num, void *dev); | ||
| 73 | void (*enable)(struct hw_perf_event *evt, int idx); | ||
| 74 | void (*disable)(struct hw_perf_event *evt, int idx); | ||
| 75 | int (*event_map)(int evt); | ||
| 76 | u64 (*raw_event)(u64); | ||
| 77 | int (*get_event_idx)(struct cpu_hw_events *cpuc, | ||
| 78 | struct hw_perf_event *hwc); | ||
| 79 | u32 (*read_counter)(int idx); | ||
| 80 | void (*write_counter)(int idx, u32 val); | ||
| 81 | void (*start)(void); | ||
| 82 | void (*stop)(void); | ||
| 83 | int num_events; | ||
| 84 | u64 max_period; | ||
| 85 | }; | ||
| 86 | |||
| 87 | /* Set at runtime when we know what CPU type we are. */ | ||
| 88 | static const struct arm_pmu *armpmu; | ||
| 89 | |||
| 90 | #define HW_OP_UNSUPPORTED 0xFFFF | ||
| 91 | |||
| 92 | #define C(_x) \ | ||
| 93 | PERF_COUNT_HW_CACHE_##_x | ||
| 94 | |||
| 95 | #define CACHE_OP_UNSUPPORTED 0xFFFF | ||
| 96 | |||
| 97 | static unsigned armpmu_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 98 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 99 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | ||
| 100 | |||
| 101 | static int | ||
| 102 | armpmu_map_cache_event(u64 config) | ||
| 103 | { | ||
| 104 | unsigned int cache_type, cache_op, cache_result, ret; | ||
| 105 | |||
| 106 | cache_type = (config >> 0) & 0xff; | ||
| 107 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | ||
| 108 | return -EINVAL; | ||
| 109 | |||
| 110 | cache_op = (config >> 8) & 0xff; | ||
| 111 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | ||
| 112 | return -EINVAL; | ||
| 113 | |||
| 114 | cache_result = (config >> 16) & 0xff; | ||
| 115 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | ||
| 116 | return -EINVAL; | ||
| 117 | |||
| 118 | ret = (int)armpmu_perf_cache_map[cache_type][cache_op][cache_result]; | ||
| 119 | |||
| 120 | if (ret == CACHE_OP_UNSUPPORTED) | ||
| 121 | return -ENOENT; | ||
| 122 | |||
| 123 | return ret; | ||
| 124 | } | ||
| 125 | |||
| 126 | static int | ||
| 127 | armpmu_event_set_period(struct perf_event *event, | ||
| 128 | struct hw_perf_event *hwc, | ||
| 129 | int idx) | ||
| 130 | { | ||
| 131 | s64 left = atomic64_read(&hwc->period_left); | ||
| 132 | s64 period = hwc->sample_period; | ||
| 133 | int ret = 0; | ||
| 134 | |||
| 135 | if (unlikely(left <= -period)) { | ||
| 136 | left = period; | ||
| 137 | atomic64_set(&hwc->period_left, left); | ||
| 138 | hwc->last_period = period; | ||
| 139 | ret = 1; | ||
| 140 | } | ||
| 141 | |||
| 142 | if (unlikely(left <= 0)) { | ||
| 143 | left += period; | ||
| 144 | atomic64_set(&hwc->period_left, left); | ||
| 145 | hwc->last_period = period; | ||
| 146 | ret = 1; | ||
| 147 | } | ||
| 148 | |||
| 149 | if (left > (s64)armpmu->max_period) | ||
| 150 | left = armpmu->max_period; | ||
| 151 | |||
| 152 | atomic64_set(&hwc->prev_count, (u64)-left); | ||
| 153 | |||
| 154 | armpmu->write_counter(idx, (u64)(-left) & 0xffffffff); | ||
| 155 | |||
| 156 | perf_event_update_userpage(event); | ||
| 157 | |||
| 158 | return ret; | ||
| 159 | } | ||
| 160 | |||
| 161 | static u64 | ||
| 162 | armpmu_event_update(struct perf_event *event, | ||
| 163 | struct hw_perf_event *hwc, | ||
| 164 | int idx) | ||
| 165 | { | ||
| 166 | int shift = 64 - 32; | ||
| 167 | s64 prev_raw_count, new_raw_count; | ||
| 168 | s64 delta; | ||
| 169 | |||
| 170 | again: | ||
| 171 | prev_raw_count = atomic64_read(&hwc->prev_count); | ||
| 172 | new_raw_count = armpmu->read_counter(idx); | ||
| 173 | |||
| 174 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | ||
| 175 | new_raw_count) != prev_raw_count) | ||
| 176 | goto again; | ||
| 177 | |||
| 178 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | ||
| 179 | delta >>= shift; | ||
| 180 | |||
| 181 | atomic64_add(delta, &event->count); | ||
| 182 | atomic64_sub(delta, &hwc->period_left); | ||
| 183 | |||
| 184 | return new_raw_count; | ||
| 185 | } | ||
| 186 | |||
| 187 | static void | ||
| 188 | armpmu_disable(struct perf_event *event) | ||
| 189 | { | ||
| 190 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 191 | struct hw_perf_event *hwc = &event->hw; | ||
| 192 | int idx = hwc->idx; | ||
| 193 | |||
| 194 | WARN_ON(idx < 0); | ||
| 195 | |||
| 196 | clear_bit(idx, cpuc->active_mask); | ||
| 197 | armpmu->disable(hwc, idx); | ||
| 198 | |||
| 199 | barrier(); | ||
| 200 | |||
| 201 | armpmu_event_update(event, hwc, idx); | ||
| 202 | cpuc->events[idx] = NULL; | ||
| 203 | clear_bit(idx, cpuc->used_mask); | ||
| 204 | |||
| 205 | perf_event_update_userpage(event); | ||
| 206 | } | ||
| 207 | |||
| 208 | static void | ||
| 209 | armpmu_read(struct perf_event *event) | ||
| 210 | { | ||
| 211 | struct hw_perf_event *hwc = &event->hw; | ||
| 212 | |||
| 213 | /* Don't read disabled counters! */ | ||
| 214 | if (hwc->idx < 0) | ||
| 215 | return; | ||
| 216 | |||
| 217 | armpmu_event_update(event, hwc, hwc->idx); | ||
| 218 | } | ||
| 219 | |||
| 220 | static void | ||
| 221 | armpmu_unthrottle(struct perf_event *event) | ||
| 222 | { | ||
| 223 | struct hw_perf_event *hwc = &event->hw; | ||
| 224 | |||
| 225 | /* | ||
| 226 | * Set the period again. Some counters can't be stopped, so when we | ||
| 227 | * were throttled we simply disabled the IRQ source and the counter | ||
| 228 | * may have been left counting. If we don't do this step then we may | ||
| 229 | * get an interrupt too soon or *way* too late if the overflow has | ||
| 230 | * happened since disabling. | ||
| 231 | */ | ||
| 232 | armpmu_event_set_period(event, hwc, hwc->idx); | ||
| 233 | armpmu->enable(hwc, hwc->idx); | ||
| 234 | } | ||
| 235 | |||
| 236 | static int | ||
| 237 | armpmu_enable(struct perf_event *event) | ||
| 238 | { | ||
| 239 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 240 | struct hw_perf_event *hwc = &event->hw; | ||
| 241 | int idx; | ||
| 242 | int err = 0; | ||
| 243 | |||
| 244 | /* If we don't have a space for the counter then finish early. */ | ||
| 245 | idx = armpmu->get_event_idx(cpuc, hwc); | ||
| 246 | if (idx < 0) { | ||
| 247 | err = idx; | ||
| 248 | goto out; | ||
| 249 | } | ||
| 250 | |||
| 251 | /* | ||
| 252 | * If there is an event in the counter we are going to use then make | ||
| 253 | * sure it is disabled. | ||
| 254 | */ | ||
| 255 | event->hw.idx = idx; | ||
| 256 | armpmu->disable(hwc, idx); | ||
| 257 | cpuc->events[idx] = event; | ||
| 258 | set_bit(idx, cpuc->active_mask); | ||
| 259 | |||
| 260 | /* Set the period for the event. */ | ||
| 261 | armpmu_event_set_period(event, hwc, idx); | ||
| 262 | |||
| 263 | /* Enable the event. */ | ||
| 264 | armpmu->enable(hwc, idx); | ||
| 265 | |||
| 266 | /* Propagate our changes to the userspace mapping. */ | ||
| 267 | perf_event_update_userpage(event); | ||
| 268 | |||
| 269 | out: | ||
| 270 | return err; | ||
| 271 | } | ||
| 272 | |||
| 273 | static struct pmu pmu = { | ||
| 274 | .enable = armpmu_enable, | ||
| 275 | .disable = armpmu_disable, | ||
| 276 | .unthrottle = armpmu_unthrottle, | ||
| 277 | .read = armpmu_read, | ||
| 278 | }; | ||
| 279 | |||
| 280 | static int | ||
| 281 | validate_event(struct cpu_hw_events *cpuc, | ||
| 282 | struct perf_event *event) | ||
| 283 | { | ||
| 284 | struct hw_perf_event fake_event = event->hw; | ||
| 285 | |||
| 286 | if (event->pmu && event->pmu != &pmu) | ||
| 287 | return 0; | ||
| 288 | |||
| 289 | return armpmu->get_event_idx(cpuc, &fake_event) >= 0; | ||
| 290 | } | ||
| 291 | |||
| 292 | static int | ||
| 293 | validate_group(struct perf_event *event) | ||
| 294 | { | ||
| 295 | struct perf_event *sibling, *leader = event->group_leader; | ||
| 296 | struct cpu_hw_events fake_pmu; | ||
| 297 | |||
| 298 | memset(&fake_pmu, 0, sizeof(fake_pmu)); | ||
| 299 | |||
| 300 | if (!validate_event(&fake_pmu, leader)) | ||
| 301 | return -ENOSPC; | ||
| 302 | |||
| 303 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | ||
| 304 | if (!validate_event(&fake_pmu, sibling)) | ||
| 305 | return -ENOSPC; | ||
| 306 | } | ||
| 307 | |||
| 308 | if (!validate_event(&fake_pmu, event)) | ||
| 309 | return -ENOSPC; | ||
| 310 | |||
| 311 | return 0; | ||
| 312 | } | ||
| 313 | |||
| 314 | static int | ||
| 315 | armpmu_reserve_hardware(void) | ||
| 316 | { | ||
| 317 | int i; | ||
| 318 | int err; | ||
| 319 | |||
| 320 | pmu_irqs = reserve_pmu(); | ||
| 321 | if (IS_ERR(pmu_irqs)) { | ||
| 322 | pr_warning("unable to reserve pmu\n"); | ||
| 323 | return PTR_ERR(pmu_irqs); | ||
| 324 | } | ||
| 325 | |||
| 326 | init_pmu(); | ||
| 327 | |||
| 328 | if (pmu_irqs->num_irqs < 1) { | ||
| 329 | pr_err("no irqs for PMUs defined\n"); | ||
| 330 | return -ENODEV; | ||
| 331 | } | ||
| 332 | |||
| 333 | for (i = 0; i < pmu_irqs->num_irqs; ++i) { | ||
| 334 | err = request_irq(pmu_irqs->irqs[i], armpmu->handle_irq, | ||
| 335 | IRQF_DISABLED, "armpmu", NULL); | ||
| 336 | if (err) { | ||
| 337 | pr_warning("unable to request IRQ%d for ARM " | ||
| 338 | "perf counters\n", pmu_irqs->irqs[i]); | ||
| 339 | break; | ||
| 340 | } | ||
| 341 | } | ||
| 342 | |||
| 343 | if (err) { | ||
| 344 | for (i = i - 1; i >= 0; --i) | ||
| 345 | free_irq(pmu_irqs->irqs[i], NULL); | ||
| 346 | release_pmu(pmu_irqs); | ||
| 347 | pmu_irqs = NULL; | ||
| 348 | } | ||
| 349 | |||
| 350 | return err; | ||
| 351 | } | ||
| 352 | |||
| 353 | static void | ||
| 354 | armpmu_release_hardware(void) | ||
| 355 | { | ||
| 356 | int i; | ||
| 357 | |||
| 358 | for (i = pmu_irqs->num_irqs - 1; i >= 0; --i) | ||
| 359 | free_irq(pmu_irqs->irqs[i], NULL); | ||
| 360 | armpmu->stop(); | ||
| 361 | |||
| 362 | release_pmu(pmu_irqs); | ||
| 363 | pmu_irqs = NULL; | ||
| 364 | } | ||
| 365 | |||
| 366 | static atomic_t active_events = ATOMIC_INIT(0); | ||
| 367 | static DEFINE_MUTEX(pmu_reserve_mutex); | ||
| 368 | |||
| 369 | static void | ||
| 370 | hw_perf_event_destroy(struct perf_event *event) | ||
| 371 | { | ||
| 372 | if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) { | ||
| 373 | armpmu_release_hardware(); | ||
| 374 | mutex_unlock(&pmu_reserve_mutex); | ||
| 375 | } | ||
| 376 | } | ||
| 377 | |||
| 378 | static int | ||
| 379 | __hw_perf_event_init(struct perf_event *event) | ||
| 380 | { | ||
| 381 | struct hw_perf_event *hwc = &event->hw; | ||
| 382 | int mapping, err; | ||
| 383 | |||
| 384 | /* Decode the generic type into an ARM event identifier. */ | ||
| 385 | if (PERF_TYPE_HARDWARE == event->attr.type) { | ||
| 386 | mapping = armpmu->event_map(event->attr.config); | ||
| 387 | } else if (PERF_TYPE_HW_CACHE == event->attr.type) { | ||
| 388 | mapping = armpmu_map_cache_event(event->attr.config); | ||
| 389 | } else if (PERF_TYPE_RAW == event->attr.type) { | ||
| 390 | mapping = armpmu->raw_event(event->attr.config); | ||
| 391 | } else { | ||
| 392 | pr_debug("event type %x not supported\n", event->attr.type); | ||
| 393 | return -EOPNOTSUPP; | ||
| 394 | } | ||
| 395 | |||
| 396 | if (mapping < 0) { | ||
| 397 | pr_debug("event %x:%llx not supported\n", event->attr.type, | ||
| 398 | event->attr.config); | ||
| 399 | return mapping; | ||
| 400 | } | ||
| 401 | |||
| 402 | /* | ||
| 403 | * Check whether we need to exclude the counter from certain modes. | ||
| 404 | * The ARM performance counters are on all of the time so if someone | ||
| 405 | * has asked us for some excludes then we have to fail. | ||
| 406 | */ | ||
| 407 | if (event->attr.exclude_kernel || event->attr.exclude_user || | ||
| 408 | event->attr.exclude_hv || event->attr.exclude_idle) { | ||
| 409 | pr_debug("ARM performance counters do not support " | ||
| 410 | "mode exclusion\n"); | ||
| 411 | return -EPERM; | ||
| 412 | } | ||
| 413 | |||
| 414 | /* | ||
| 415 | * We don't assign an index until we actually place the event onto | ||
| 416 | * hardware. Use -1 to signify that we haven't decided where to put it | ||
| 417 | * yet. For SMP systems, each core has it's own PMU so we can't do any | ||
| 418 | * clever allocation or constraints checking at this point. | ||
| 419 | */ | ||
| 420 | hwc->idx = -1; | ||
| 421 | |||
| 422 | /* | ||
| 423 | * Store the event encoding into the config_base field. config and | ||
| 424 | * event_base are unused as the only 2 things we need to know are | ||
| 425 | * the event mapping and the counter to use. The counter to use is | ||
| 426 | * also the indx and the config_base is the event type. | ||
| 427 | */ | ||
| 428 | hwc->config_base = (unsigned long)mapping; | ||
| 429 | hwc->config = 0; | ||
| 430 | hwc->event_base = 0; | ||
| 431 | |||
| 432 | if (!hwc->sample_period) { | ||
| 433 | hwc->sample_period = armpmu->max_period; | ||
| 434 | hwc->last_period = hwc->sample_period; | ||
| 435 | atomic64_set(&hwc->period_left, hwc->sample_period); | ||
| 436 | } | ||
| 437 | |||
| 438 | err = 0; | ||
| 439 | if (event->group_leader != event) { | ||
| 440 | err = validate_group(event); | ||
| 441 | if (err) | ||
| 442 | return -EINVAL; | ||
| 443 | } | ||
| 444 | |||
| 445 | return err; | ||
| 446 | } | ||
| 447 | |||
| 448 | const struct pmu * | ||
| 449 | hw_perf_event_init(struct perf_event *event) | ||
| 450 | { | ||
| 451 | int err = 0; | ||
| 452 | |||
| 453 | if (!armpmu) | ||
| 454 | return ERR_PTR(-ENODEV); | ||
| 455 | |||
| 456 | event->destroy = hw_perf_event_destroy; | ||
| 457 | |||
| 458 | if (!atomic_inc_not_zero(&active_events)) { | ||
| 459 | if (atomic_read(&active_events) > perf_max_events) { | ||
| 460 | atomic_dec(&active_events); | ||
| 461 | return ERR_PTR(-ENOSPC); | ||
| 462 | } | ||
| 463 | |||
| 464 | mutex_lock(&pmu_reserve_mutex); | ||
| 465 | if (atomic_read(&active_events) == 0) { | ||
| 466 | err = armpmu_reserve_hardware(); | ||
| 467 | } | ||
| 468 | |||
| 469 | if (!err) | ||
| 470 | atomic_inc(&active_events); | ||
| 471 | mutex_unlock(&pmu_reserve_mutex); | ||
| 472 | } | ||
| 473 | |||
| 474 | if (err) | ||
| 475 | return ERR_PTR(err); | ||
| 476 | |||
| 477 | err = __hw_perf_event_init(event); | ||
| 478 | if (err) | ||
| 479 | hw_perf_event_destroy(event); | ||
| 480 | |||
| 481 | return err ? ERR_PTR(err) : &pmu; | ||
| 482 | } | ||
| 483 | |||
| 484 | void | ||
| 485 | hw_perf_enable(void) | ||
| 486 | { | ||
| 487 | /* Enable all of the perf events on hardware. */ | ||
| 488 | int idx; | ||
| 489 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 490 | |||
| 491 | if (!armpmu) | ||
| 492 | return; | ||
| 493 | |||
| 494 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
| 495 | struct perf_event *event = cpuc->events[idx]; | ||
| 496 | |||
| 497 | if (!event) | ||
| 498 | continue; | ||
| 499 | |||
| 500 | armpmu->enable(&event->hw, idx); | ||
| 501 | } | ||
| 502 | |||
| 503 | armpmu->start(); | ||
| 504 | } | ||
| 505 | |||
| 506 | void | ||
| 507 | hw_perf_disable(void) | ||
| 508 | { | ||
| 509 | if (armpmu) | ||
| 510 | armpmu->stop(); | ||
| 511 | } | ||
| 512 | |||
| 513 | /* | ||
| 514 | * ARMv6 Performance counter handling code. | ||
| 515 | * | ||
| 516 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | ||
| 517 | * They all share a single reset bit but can be written to zero so we can use | ||
| 518 | * that for a reset. | ||
| 519 | * | ||
| 520 | * The counters can't be individually enabled or disabled so when we remove | ||
| 521 | * one event and replace it with another we could get spurious counts from the | ||
| 522 | * wrong event. However, we can take advantage of the fact that the | ||
| 523 | * performance counters can export events to the event bus, and the event bus | ||
| 524 | * itself can be monitored. This requires that we *don't* export the events to | ||
| 525 | * the event bus. The procedure for disabling a configurable counter is: | ||
| 526 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | ||
| 527 | * effectively stops the counter from counting. | ||
| 528 | * - disable the counter's interrupt generation (each counter has it's | ||
| 529 | * own interrupt enable bit). | ||
| 530 | * Once stopped, the counter value can be written as 0 to reset. | ||
| 531 | * | ||
| 532 | * To enable a counter: | ||
| 533 | * - enable the counter's interrupt generation. | ||
| 534 | * - set the new event type. | ||
| 535 | * | ||
| 536 | * Note: the dedicated cycle counter only counts cycles and can't be | ||
| 537 | * enabled/disabled independently of the others. When we want to disable the | ||
| 538 | * cycle counter, we have to just disable the interrupt reporting and start | ||
| 539 | * ignoring that counter. When re-enabling, we have to reset the value and | ||
| 540 | * enable the interrupt. | ||
| 541 | */ | ||
| 542 | |||
| 543 | enum armv6_perf_types { | ||
| 544 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | ||
| 545 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | ||
| 546 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | ||
| 547 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | ||
| 548 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | ||
| 549 | ARMV6_PERFCTR_BR_EXEC = 0x5, | ||
| 550 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | ||
| 551 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | ||
| 552 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | ||
| 553 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | ||
| 554 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | ||
| 555 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | ||
| 556 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | ||
| 557 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | ||
| 558 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | ||
| 559 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | ||
| 560 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | ||
| 561 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | ||
| 562 | ARMV6_PERFCTR_NOP = 0x20, | ||
| 563 | }; | ||
| 564 | |||
| 565 | enum armv6_counters { | ||
| 566 | ARMV6_CYCLE_COUNTER = 1, | ||
| 567 | ARMV6_COUNTER0, | ||
| 568 | ARMV6_COUNTER1, | ||
| 569 | }; | ||
| 570 | |||
| 571 | /* | ||
| 572 | * The hardware events that we support. We do support cache operations but | ||
| 573 | * we have harvard caches and no way to combine instruction and data | ||
| 574 | * accesses/misses in hardware. | ||
| 575 | */ | ||
| 576 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 577 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, | ||
| 578 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | ||
| 579 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
| 580 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
| 581 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | ||
| 582 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | ||
| 583 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
| 584 | }; | ||
| 585 | |||
| 586 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 587 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 588 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 589 | [C(L1D)] = { | ||
| 590 | /* | ||
| 591 | * The performance counters don't differentiate between read | ||
| 592 | * and write accesses/misses so this isn't strictly correct, | ||
| 593 | * but it's the best we can do. Writes and reads get | ||
| 594 | * combined. | ||
| 595 | */ | ||
| 596 | [C(OP_READ)] = { | ||
| 597 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
| 598 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
| 599 | }, | ||
| 600 | [C(OP_WRITE)] = { | ||
| 601 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | ||
| 602 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | ||
| 603 | }, | ||
| 604 | [C(OP_PREFETCH)] = { | ||
| 605 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 606 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 607 | }, | ||
| 608 | }, | ||
| 609 | [C(L1I)] = { | ||
| 610 | [C(OP_READ)] = { | ||
| 611 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 612 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
| 613 | }, | ||
| 614 | [C(OP_WRITE)] = { | ||
| 615 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 616 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | ||
| 617 | }, | ||
| 618 | [C(OP_PREFETCH)] = { | ||
| 619 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 620 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 621 | }, | ||
| 622 | }, | ||
| 623 | [C(LL)] = { | ||
| 624 | [C(OP_READ)] = { | ||
| 625 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 626 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 627 | }, | ||
| 628 | [C(OP_WRITE)] = { | ||
| 629 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 630 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 631 | }, | ||
| 632 | [C(OP_PREFETCH)] = { | ||
| 633 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 634 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 635 | }, | ||
| 636 | }, | ||
| 637 | [C(DTLB)] = { | ||
| 638 | /* | ||
| 639 | * The ARM performance counters can count micro DTLB misses, | ||
| 640 | * micro ITLB misses and main TLB misses. There isn't an event | ||
| 641 | * for TLB misses, so use the micro misses here and if users | ||
| 642 | * want the main TLB misses they can use a raw counter. | ||
| 643 | */ | ||
| 644 | [C(OP_READ)] = { | ||
| 645 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 646 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
| 647 | }, | ||
| 648 | [C(OP_WRITE)] = { | ||
| 649 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 650 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | ||
| 651 | }, | ||
| 652 | [C(OP_PREFETCH)] = { | ||
| 653 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 654 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 655 | }, | ||
| 656 | }, | ||
| 657 | [C(ITLB)] = { | ||
| 658 | [C(OP_READ)] = { | ||
| 659 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 660 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
| 661 | }, | ||
| 662 | [C(OP_WRITE)] = { | ||
| 663 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 664 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | ||
| 665 | }, | ||
| 666 | [C(OP_PREFETCH)] = { | ||
| 667 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 668 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 669 | }, | ||
| 670 | }, | ||
| 671 | [C(BPU)] = { | ||
| 672 | [C(OP_READ)] = { | ||
| 673 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 674 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 675 | }, | ||
| 676 | [C(OP_WRITE)] = { | ||
| 677 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 678 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 679 | }, | ||
| 680 | [C(OP_PREFETCH)] = { | ||
| 681 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 682 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 683 | }, | ||
| 684 | }, | ||
| 685 | }; | ||
| 686 | |||
| 687 | enum armv6mpcore_perf_types { | ||
| 688 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | ||
| 689 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | ||
| 690 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | ||
| 691 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | ||
| 692 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | ||
| 693 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | ||
| 694 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | ||
| 695 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | ||
| 696 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | ||
| 697 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | ||
| 698 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | ||
| 699 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | ||
| 700 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | ||
| 701 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | ||
| 702 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | ||
| 703 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | ||
| 704 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | ||
| 705 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | ||
| 706 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | ||
| 707 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | ||
| 708 | }; | ||
| 709 | |||
| 710 | /* | ||
| 711 | * The hardware events that we support. We do support cache operations but | ||
| 712 | * we have harvard caches and no way to combine instruction and data | ||
| 713 | * accesses/misses in hardware. | ||
| 714 | */ | ||
| 715 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 716 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, | ||
| 717 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | ||
| 718 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
| 719 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
| 720 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | ||
| 721 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | ||
| 722 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | ||
| 723 | }; | ||
| 724 | |||
| 725 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 726 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 727 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 728 | [C(L1D)] = { | ||
| 729 | [C(OP_READ)] = { | ||
| 730 | [C(RESULT_ACCESS)] = | ||
| 731 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | ||
| 732 | [C(RESULT_MISS)] = | ||
| 733 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | ||
| 734 | }, | ||
| 735 | [C(OP_WRITE)] = { | ||
| 736 | [C(RESULT_ACCESS)] = | ||
| 737 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | ||
| 738 | [C(RESULT_MISS)] = | ||
| 739 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | ||
| 740 | }, | ||
| 741 | [C(OP_PREFETCH)] = { | ||
| 742 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 743 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 744 | }, | ||
| 745 | }, | ||
| 746 | [C(L1I)] = { | ||
| 747 | [C(OP_READ)] = { | ||
| 748 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 749 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
| 750 | }, | ||
| 751 | [C(OP_WRITE)] = { | ||
| 752 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 753 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | ||
| 754 | }, | ||
| 755 | [C(OP_PREFETCH)] = { | ||
| 756 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 757 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 758 | }, | ||
| 759 | }, | ||
| 760 | [C(LL)] = { | ||
| 761 | [C(OP_READ)] = { | ||
| 762 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 763 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 764 | }, | ||
| 765 | [C(OP_WRITE)] = { | ||
| 766 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 767 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 768 | }, | ||
| 769 | [C(OP_PREFETCH)] = { | ||
| 770 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 771 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 772 | }, | ||
| 773 | }, | ||
| 774 | [C(DTLB)] = { | ||
| 775 | /* | ||
| 776 | * The ARM performance counters can count micro DTLB misses, | ||
| 777 | * micro ITLB misses and main TLB misses. There isn't an event | ||
| 778 | * for TLB misses, so use the micro misses here and if users | ||
| 779 | * want the main TLB misses they can use a raw counter. | ||
| 780 | */ | ||
| 781 | [C(OP_READ)] = { | ||
| 782 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 783 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
| 784 | }, | ||
| 785 | [C(OP_WRITE)] = { | ||
| 786 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 787 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | ||
| 788 | }, | ||
| 789 | [C(OP_PREFETCH)] = { | ||
| 790 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 791 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 792 | }, | ||
| 793 | }, | ||
| 794 | [C(ITLB)] = { | ||
| 795 | [C(OP_READ)] = { | ||
| 796 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 797 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
| 798 | }, | ||
| 799 | [C(OP_WRITE)] = { | ||
| 800 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 801 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | ||
| 802 | }, | ||
| 803 | [C(OP_PREFETCH)] = { | ||
| 804 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 805 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 806 | }, | ||
| 807 | }, | ||
| 808 | [C(BPU)] = { | ||
| 809 | [C(OP_READ)] = { | ||
| 810 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 811 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 812 | }, | ||
| 813 | [C(OP_WRITE)] = { | ||
| 814 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 815 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 816 | }, | ||
| 817 | [C(OP_PREFETCH)] = { | ||
| 818 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 819 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 820 | }, | ||
| 821 | }, | ||
| 822 | }; | ||
| 823 | |||
| 824 | static inline unsigned long | ||
| 825 | armv6_pmcr_read(void) | ||
| 826 | { | ||
| 827 | u32 val; | ||
| 828 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | ||
| 829 | return val; | ||
| 830 | } | ||
| 831 | |||
| 832 | static inline void | ||
| 833 | armv6_pmcr_write(unsigned long val) | ||
| 834 | { | ||
| 835 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | ||
| 836 | } | ||
| 837 | |||
| 838 | #define ARMV6_PMCR_ENABLE (1 << 0) | ||
| 839 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | ||
| 840 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | ||
| 841 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | ||
| 842 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | ||
| 843 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | ||
| 844 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | ||
| 845 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | ||
| 846 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | ||
| 847 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | ||
| 848 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | ||
| 849 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | ||
| 850 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | ||
| 851 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | ||
| 852 | |||
| 853 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | ||
| 854 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | ||
| 855 | ARMV6_PMCR_CCOUNT_OVERFLOW) | ||
| 856 | |||
| 857 | static inline int | ||
| 858 | armv6_pmcr_has_overflowed(unsigned long pmcr) | ||
| 859 | { | ||
| 860 | return (pmcr & ARMV6_PMCR_OVERFLOWED_MASK); | ||
| 861 | } | ||
| 862 | |||
| 863 | static inline int | ||
| 864 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | ||
| 865 | enum armv6_counters counter) | ||
| 866 | { | ||
| 867 | int ret = 0; | ||
| 868 | |||
| 869 | if (ARMV6_CYCLE_COUNTER == counter) | ||
| 870 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | ||
| 871 | else if (ARMV6_COUNTER0 == counter) | ||
| 872 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | ||
| 873 | else if (ARMV6_COUNTER1 == counter) | ||
| 874 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | ||
| 875 | else | ||
| 876 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
| 877 | |||
| 878 | return ret; | ||
| 879 | } | ||
| 880 | |||
| 881 | static inline u32 | ||
| 882 | armv6pmu_read_counter(int counter) | ||
| 883 | { | ||
| 884 | unsigned long value = 0; | ||
| 885 | |||
| 886 | if (ARMV6_CYCLE_COUNTER == counter) | ||
| 887 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | ||
| 888 | else if (ARMV6_COUNTER0 == counter) | ||
| 889 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | ||
| 890 | else if (ARMV6_COUNTER1 == counter) | ||
| 891 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | ||
| 892 | else | ||
| 893 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
| 894 | |||
| 895 | return value; | ||
| 896 | } | ||
| 897 | |||
| 898 | static inline void | ||
| 899 | armv6pmu_write_counter(int counter, | ||
| 900 | u32 value) | ||
| 901 | { | ||
| 902 | if (ARMV6_CYCLE_COUNTER == counter) | ||
| 903 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | ||
| 904 | else if (ARMV6_COUNTER0 == counter) | ||
| 905 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | ||
| 906 | else if (ARMV6_COUNTER1 == counter) | ||
| 907 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | ||
| 908 | else | ||
| 909 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | ||
| 910 | } | ||
| 911 | |||
| 912 | void | ||
| 913 | armv6pmu_enable_event(struct hw_perf_event *hwc, | ||
| 914 | int idx) | ||
| 915 | { | ||
| 916 | unsigned long val, mask, evt, flags; | ||
| 917 | |||
| 918 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
| 919 | mask = 0; | ||
| 920 | evt = ARMV6_PMCR_CCOUNT_IEN; | ||
| 921 | } else if (ARMV6_COUNTER0 == idx) { | ||
| 922 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | ||
| 923 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | ||
| 924 | ARMV6_PMCR_COUNT0_IEN; | ||
| 925 | } else if (ARMV6_COUNTER1 == idx) { | ||
| 926 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | ||
| 927 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | ||
| 928 | ARMV6_PMCR_COUNT1_IEN; | ||
| 929 | } else { | ||
| 930 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
| 931 | return; | ||
| 932 | } | ||
| 933 | |||
| 934 | /* | ||
| 935 | * Mask out the current event and set the counter to count the event | ||
| 936 | * that we're interested in. | ||
| 937 | */ | ||
| 938 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 939 | val = armv6_pmcr_read(); | ||
| 940 | val &= ~mask; | ||
| 941 | val |= evt; | ||
| 942 | armv6_pmcr_write(val); | ||
| 943 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 944 | } | ||
| 945 | |||
| 946 | static irqreturn_t | ||
| 947 | armv6pmu_handle_irq(int irq_num, | ||
| 948 | void *dev) | ||
| 949 | { | ||
| 950 | unsigned long pmcr = armv6_pmcr_read(); | ||
| 951 | struct perf_sample_data data; | ||
| 952 | struct cpu_hw_events *cpuc; | ||
| 953 | struct pt_regs *regs; | ||
| 954 | int idx; | ||
| 955 | |||
| 956 | if (!armv6_pmcr_has_overflowed(pmcr)) | ||
| 957 | return IRQ_NONE; | ||
| 958 | |||
| 959 | regs = get_irq_regs(); | ||
| 960 | |||
| 961 | /* | ||
| 962 | * The interrupts are cleared by writing the overflow flags back to | ||
| 963 | * the control register. All of the other bits don't have any effect | ||
| 964 | * if they are rewritten, so write the whole value back. | ||
| 965 | */ | ||
| 966 | armv6_pmcr_write(pmcr); | ||
| 967 | |||
| 968 | data.addr = 0; | ||
| 969 | |||
| 970 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 971 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
| 972 | struct perf_event *event = cpuc->events[idx]; | ||
| 973 | struct hw_perf_event *hwc; | ||
| 974 | |||
| 975 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 976 | continue; | ||
| 977 | |||
| 978 | /* | ||
| 979 | * We have a single interrupt for all counters. Check that | ||
| 980 | * each counter has overflowed before we process it. | ||
| 981 | */ | ||
| 982 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | ||
| 983 | continue; | ||
| 984 | |||
| 985 | hwc = &event->hw; | ||
| 986 | armpmu_event_update(event, hwc, idx); | ||
| 987 | data.period = event->hw.last_period; | ||
| 988 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
| 989 | continue; | ||
| 990 | |||
| 991 | if (perf_event_overflow(event, 0, &data, regs)) | ||
| 992 | armpmu->disable(hwc, idx); | ||
| 993 | } | ||
| 994 | |||
| 995 | /* | ||
| 996 | * Handle the pending perf events. | ||
| 997 | * | ||
| 998 | * Note: this call *must* be run with interrupts enabled. For | ||
| 999 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
| 1000 | * will not work. | ||
| 1001 | */ | ||
| 1002 | perf_event_do_pending(); | ||
| 1003 | |||
| 1004 | return IRQ_HANDLED; | ||
| 1005 | } | ||
| 1006 | |||
| 1007 | static void | ||
| 1008 | armv6pmu_start(void) | ||
| 1009 | { | ||
| 1010 | unsigned long flags, val; | ||
| 1011 | |||
| 1012 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1013 | val = armv6_pmcr_read(); | ||
| 1014 | val |= ARMV6_PMCR_ENABLE; | ||
| 1015 | armv6_pmcr_write(val); | ||
| 1016 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1017 | } | ||
| 1018 | |||
| 1019 | void | ||
| 1020 | armv6pmu_stop(void) | ||
| 1021 | { | ||
| 1022 | unsigned long flags, val; | ||
| 1023 | |||
| 1024 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1025 | val = armv6_pmcr_read(); | ||
| 1026 | val &= ~ARMV6_PMCR_ENABLE; | ||
| 1027 | armv6_pmcr_write(val); | ||
| 1028 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1029 | } | ||
| 1030 | |||
| 1031 | static inline int | ||
| 1032 | armv6pmu_event_map(int config) | ||
| 1033 | { | ||
| 1034 | int mapping = armv6_perf_map[config]; | ||
| 1035 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 1036 | mapping = -EOPNOTSUPP; | ||
| 1037 | return mapping; | ||
| 1038 | } | ||
| 1039 | |||
| 1040 | static inline int | ||
| 1041 | armv6mpcore_pmu_event_map(int config) | ||
| 1042 | { | ||
| 1043 | int mapping = armv6mpcore_perf_map[config]; | ||
| 1044 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 1045 | mapping = -EOPNOTSUPP; | ||
| 1046 | return mapping; | ||
| 1047 | } | ||
| 1048 | |||
| 1049 | static u64 | ||
| 1050 | armv6pmu_raw_event(u64 config) | ||
| 1051 | { | ||
| 1052 | return config & 0xff; | ||
| 1053 | } | ||
| 1054 | |||
| 1055 | static int | ||
| 1056 | armv6pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
| 1057 | struct hw_perf_event *event) | ||
| 1058 | { | ||
| 1059 | /* Always place a cycle counter into the cycle counter. */ | ||
| 1060 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | ||
| 1061 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | ||
| 1062 | return -EAGAIN; | ||
| 1063 | |||
| 1064 | return ARMV6_CYCLE_COUNTER; | ||
| 1065 | } else { | ||
| 1066 | /* | ||
| 1067 | * For anything other than a cycle counter, try and use | ||
| 1068 | * counter0 and counter1. | ||
| 1069 | */ | ||
| 1070 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) { | ||
| 1071 | return ARMV6_COUNTER1; | ||
| 1072 | } | ||
| 1073 | |||
| 1074 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) { | ||
| 1075 | return ARMV6_COUNTER0; | ||
| 1076 | } | ||
| 1077 | |||
| 1078 | /* The counters are all in use. */ | ||
| 1079 | return -EAGAIN; | ||
| 1080 | } | ||
| 1081 | } | ||
| 1082 | |||
| 1083 | static void | ||
| 1084 | armv6pmu_disable_event(struct hw_perf_event *hwc, | ||
| 1085 | int idx) | ||
| 1086 | { | ||
| 1087 | unsigned long val, mask, evt, flags; | ||
| 1088 | |||
| 1089 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
| 1090 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
| 1091 | evt = 0; | ||
| 1092 | } else if (ARMV6_COUNTER0 == idx) { | ||
| 1093 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | ||
| 1094 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | ||
| 1095 | } else if (ARMV6_COUNTER1 == idx) { | ||
| 1096 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | ||
| 1097 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | ||
| 1098 | } else { | ||
| 1099 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
| 1100 | return; | ||
| 1101 | } | ||
| 1102 | |||
| 1103 | /* | ||
| 1104 | * Mask out the current event and set the counter to count the number | ||
| 1105 | * of ETM bus signal assertion cycles. The external reporting should | ||
| 1106 | * be disabled and so this should never increment. | ||
| 1107 | */ | ||
| 1108 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1109 | val = armv6_pmcr_read(); | ||
| 1110 | val &= ~mask; | ||
| 1111 | val |= evt; | ||
| 1112 | armv6_pmcr_write(val); | ||
| 1113 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1114 | } | ||
| 1115 | |||
| 1116 | static void | ||
| 1117 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | ||
| 1118 | int idx) | ||
| 1119 | { | ||
| 1120 | unsigned long val, mask, flags, evt = 0; | ||
| 1121 | |||
| 1122 | if (ARMV6_CYCLE_COUNTER == idx) { | ||
| 1123 | mask = ARMV6_PMCR_CCOUNT_IEN; | ||
| 1124 | } else if (ARMV6_COUNTER0 == idx) { | ||
| 1125 | mask = ARMV6_PMCR_COUNT0_IEN; | ||
| 1126 | } else if (ARMV6_COUNTER1 == idx) { | ||
| 1127 | mask = ARMV6_PMCR_COUNT1_IEN; | ||
| 1128 | } else { | ||
| 1129 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | ||
| 1130 | return; | ||
| 1131 | } | ||
| 1132 | |||
| 1133 | /* | ||
| 1134 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | ||
| 1135 | * simply disable the interrupt reporting. | ||
| 1136 | */ | ||
| 1137 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1138 | val = armv6_pmcr_read(); | ||
| 1139 | val &= ~mask; | ||
| 1140 | val |= evt; | ||
| 1141 | armv6_pmcr_write(val); | ||
| 1142 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1143 | } | ||
| 1144 | |||
| 1145 | static const struct arm_pmu armv6pmu = { | ||
| 1146 | .name = "v6", | ||
| 1147 | .handle_irq = armv6pmu_handle_irq, | ||
| 1148 | .enable = armv6pmu_enable_event, | ||
| 1149 | .disable = armv6pmu_disable_event, | ||
| 1150 | .event_map = armv6pmu_event_map, | ||
| 1151 | .raw_event = armv6pmu_raw_event, | ||
| 1152 | .read_counter = armv6pmu_read_counter, | ||
| 1153 | .write_counter = armv6pmu_write_counter, | ||
| 1154 | .get_event_idx = armv6pmu_get_event_idx, | ||
| 1155 | .start = armv6pmu_start, | ||
| 1156 | .stop = armv6pmu_stop, | ||
| 1157 | .num_events = 3, | ||
| 1158 | .max_period = (1LLU << 32) - 1, | ||
| 1159 | }; | ||
| 1160 | |||
| 1161 | /* | ||
| 1162 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | ||
| 1163 | * that some of the events have different enumerations and that there is no | ||
| 1164 | * *hack* to stop the programmable counters. To stop the counters we simply | ||
| 1165 | * disable the interrupt reporting and update the event. When unthrottling we | ||
| 1166 | * reset the period and enable the interrupt reporting. | ||
| 1167 | */ | ||
| 1168 | static const struct arm_pmu armv6mpcore_pmu = { | ||
| 1169 | .name = "v6mpcore", | ||
| 1170 | .handle_irq = armv6pmu_handle_irq, | ||
| 1171 | .enable = armv6pmu_enable_event, | ||
| 1172 | .disable = armv6mpcore_pmu_disable_event, | ||
| 1173 | .event_map = armv6mpcore_pmu_event_map, | ||
| 1174 | .raw_event = armv6pmu_raw_event, | ||
| 1175 | .read_counter = armv6pmu_read_counter, | ||
| 1176 | .write_counter = armv6pmu_write_counter, | ||
| 1177 | .get_event_idx = armv6pmu_get_event_idx, | ||
| 1178 | .start = armv6pmu_start, | ||
| 1179 | .stop = armv6pmu_stop, | ||
| 1180 | .num_events = 3, | ||
| 1181 | .max_period = (1LLU << 32) - 1, | ||
| 1182 | }; | ||
| 1183 | |||
| 1184 | /* | ||
| 1185 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
| 1186 | * | ||
| 1187 | * Copied from ARMv6 code, with the low level code inspired | ||
| 1188 | * by the ARMv7 Oprofile code. | ||
| 1189 | * | ||
| 1190 | * Cortex-A8 has up to 4 configurable performance counters and | ||
| 1191 | * a single cycle counter. | ||
| 1192 | * Cortex-A9 has up to 31 configurable performance counters and | ||
| 1193 | * a single cycle counter. | ||
| 1194 | * | ||
| 1195 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
| 1196 | * counter and all 4 performance counters together can be reset separately. | ||
| 1197 | */ | ||
| 1198 | |||
| 1199 | #define ARMV7_PMU_CORTEX_A8_NAME "ARMv7 Cortex-A8" | ||
| 1200 | |||
| 1201 | #define ARMV7_PMU_CORTEX_A9_NAME "ARMv7 Cortex-A9" | ||
| 1202 | |||
| 1203 | /* Common ARMv7 event types */ | ||
| 1204 | enum armv7_perf_types { | ||
| 1205 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
| 1206 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
| 1207 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
| 1208 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
| 1209 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
| 1210 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
| 1211 | ARMV7_PERFCTR_DREAD = 0x06, | ||
| 1212 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
| 1213 | |||
| 1214 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
| 1215 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
| 1216 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
| 1217 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
| 1218 | * It counts: | ||
| 1219 | * - all branch instructions, | ||
| 1220 | * - instructions that explicitly write the PC, | ||
| 1221 | * - exception generating instructions. | ||
| 1222 | */ | ||
| 1223 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
| 1224 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
| 1225 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
| 1226 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
| 1227 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
| 1228 | |||
| 1229 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
| 1230 | |||
| 1231 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
| 1232 | }; | ||
| 1233 | |||
| 1234 | /* ARMv7 Cortex-A8 specific event types */ | ||
| 1235 | enum armv7_a8_perf_types { | ||
| 1236 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
| 1237 | |||
| 1238 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
| 1239 | |||
| 1240 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
| 1241 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
| 1242 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
| 1243 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
| 1244 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
| 1245 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
| 1246 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
| 1247 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
| 1248 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
| 1249 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
| 1250 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
| 1251 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
| 1252 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
| 1253 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
| 1254 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
| 1255 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
| 1256 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
| 1257 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
| 1258 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
| 1259 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
| 1260 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
| 1261 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
| 1262 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
| 1263 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
| 1264 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
| 1265 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
| 1266 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
| 1267 | |||
| 1268 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
| 1269 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
| 1270 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
| 1271 | }; | ||
| 1272 | |||
| 1273 | /* ARMv7 Cortex-A9 specific event types */ | ||
| 1274 | enum armv7_a9_perf_types { | ||
| 1275 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
| 1276 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
| 1277 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
| 1278 | |||
| 1279 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
| 1280 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
| 1281 | |||
| 1282 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
| 1283 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
| 1284 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
| 1285 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
| 1286 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
| 1287 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
| 1288 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
| 1289 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
| 1290 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
| 1291 | |||
| 1292 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
| 1293 | |||
| 1294 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
| 1295 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
| 1296 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
| 1297 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
| 1298 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
| 1299 | |||
| 1300 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
| 1301 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
| 1302 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
| 1303 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
| 1304 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
| 1305 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
| 1306 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
| 1307 | |||
| 1308 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
| 1309 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
| 1310 | |||
| 1311 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
| 1312 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
| 1313 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
| 1314 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
| 1315 | |||
| 1316 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
| 1317 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
| 1318 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
| 1319 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
| 1320 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
| 1321 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
| 1322 | }; | ||
| 1323 | |||
| 1324 | /* | ||
| 1325 | * Cortex-A8 HW events mapping | ||
| 1326 | * | ||
| 1327 | * The hardware events that we support. We do support cache operations but | ||
| 1328 | * we have harvard caches and no way to combine instruction and data | ||
| 1329 | * accesses/misses in hardware. | ||
| 1330 | */ | ||
| 1331 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 1332 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
| 1333 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
| 1334 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
| 1335 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
| 1336 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1337 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1338 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
| 1339 | }; | ||
| 1340 | |||
| 1341 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 1342 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 1343 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 1344 | [C(L1D)] = { | ||
| 1345 | /* | ||
| 1346 | * The performance counters don't differentiate between read | ||
| 1347 | * and write accesses/misses so this isn't strictly correct, | ||
| 1348 | * but it's the best we can do. Writes and reads get | ||
| 1349 | * combined. | ||
| 1350 | */ | ||
| 1351 | [C(OP_READ)] = { | ||
| 1352 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 1353 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 1354 | }, | ||
| 1355 | [C(OP_WRITE)] = { | ||
| 1356 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 1357 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 1358 | }, | ||
| 1359 | [C(OP_PREFETCH)] = { | ||
| 1360 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1361 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1362 | }, | ||
| 1363 | }, | ||
| 1364 | [C(L1I)] = { | ||
| 1365 | [C(OP_READ)] = { | ||
| 1366 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
| 1367 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
| 1368 | }, | ||
| 1369 | [C(OP_WRITE)] = { | ||
| 1370 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
| 1371 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
| 1372 | }, | ||
| 1373 | [C(OP_PREFETCH)] = { | ||
| 1374 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1375 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1376 | }, | ||
| 1377 | }, | ||
| 1378 | [C(LL)] = { | ||
| 1379 | [C(OP_READ)] = { | ||
| 1380 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
| 1381 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
| 1382 | }, | ||
| 1383 | [C(OP_WRITE)] = { | ||
| 1384 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
| 1385 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
| 1386 | }, | ||
| 1387 | [C(OP_PREFETCH)] = { | ||
| 1388 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1389 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1390 | }, | ||
| 1391 | }, | ||
| 1392 | [C(DTLB)] = { | ||
| 1393 | /* | ||
| 1394 | * Only ITLB misses and DTLB refills are supported. | ||
| 1395 | * If users want the DTLB refills misses a raw counter | ||
| 1396 | * must be used. | ||
| 1397 | */ | ||
| 1398 | [C(OP_READ)] = { | ||
| 1399 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1400 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 1401 | }, | ||
| 1402 | [C(OP_WRITE)] = { | ||
| 1403 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1404 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 1405 | }, | ||
| 1406 | [C(OP_PREFETCH)] = { | ||
| 1407 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1408 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1409 | }, | ||
| 1410 | }, | ||
| 1411 | [C(ITLB)] = { | ||
| 1412 | [C(OP_READ)] = { | ||
| 1413 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1414 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 1415 | }, | ||
| 1416 | [C(OP_WRITE)] = { | ||
| 1417 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1418 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 1419 | }, | ||
| 1420 | [C(OP_PREFETCH)] = { | ||
| 1421 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1422 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1423 | }, | ||
| 1424 | }, | ||
| 1425 | [C(BPU)] = { | ||
| 1426 | [C(OP_READ)] = { | ||
| 1427 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1428 | [C(RESULT_MISS)] | ||
| 1429 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1430 | }, | ||
| 1431 | [C(OP_WRITE)] = { | ||
| 1432 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1433 | [C(RESULT_MISS)] | ||
| 1434 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1435 | }, | ||
| 1436 | [C(OP_PREFETCH)] = { | ||
| 1437 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1438 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1439 | }, | ||
| 1440 | }, | ||
| 1441 | }; | ||
| 1442 | |||
| 1443 | /* | ||
| 1444 | * Cortex-A9 HW events mapping | ||
| 1445 | */ | ||
| 1446 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 1447 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
| 1448 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
| 1449 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
| 1450 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
| 1451 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
| 1452 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1453 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1454 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
| 1455 | }; | ||
| 1456 | |||
| 1457 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 1458 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 1459 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 1460 | [C(L1D)] = { | ||
| 1461 | /* | ||
| 1462 | * The performance counters don't differentiate between read | ||
| 1463 | * and write accesses/misses so this isn't strictly correct, | ||
| 1464 | * but it's the best we can do. Writes and reads get | ||
| 1465 | * combined. | ||
| 1466 | */ | ||
| 1467 | [C(OP_READ)] = { | ||
| 1468 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 1469 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 1470 | }, | ||
| 1471 | [C(OP_WRITE)] = { | ||
| 1472 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 1473 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 1474 | }, | ||
| 1475 | [C(OP_PREFETCH)] = { | ||
| 1476 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1477 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1478 | }, | ||
| 1479 | }, | ||
| 1480 | [C(L1I)] = { | ||
| 1481 | [C(OP_READ)] = { | ||
| 1482 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1483 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
| 1484 | }, | ||
| 1485 | [C(OP_WRITE)] = { | ||
| 1486 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1487 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
| 1488 | }, | ||
| 1489 | [C(OP_PREFETCH)] = { | ||
| 1490 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1491 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1492 | }, | ||
| 1493 | }, | ||
| 1494 | [C(LL)] = { | ||
| 1495 | [C(OP_READ)] = { | ||
| 1496 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1497 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1498 | }, | ||
| 1499 | [C(OP_WRITE)] = { | ||
| 1500 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1501 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1502 | }, | ||
| 1503 | [C(OP_PREFETCH)] = { | ||
| 1504 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1505 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1506 | }, | ||
| 1507 | }, | ||
| 1508 | [C(DTLB)] = { | ||
| 1509 | /* | ||
| 1510 | * Only ITLB misses and DTLB refills are supported. | ||
| 1511 | * If users want the DTLB refills misses a raw counter | ||
| 1512 | * must be used. | ||
| 1513 | */ | ||
| 1514 | [C(OP_READ)] = { | ||
| 1515 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1516 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 1517 | }, | ||
| 1518 | [C(OP_WRITE)] = { | ||
| 1519 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1520 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 1521 | }, | ||
| 1522 | [C(OP_PREFETCH)] = { | ||
| 1523 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1524 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1525 | }, | ||
| 1526 | }, | ||
| 1527 | [C(ITLB)] = { | ||
| 1528 | [C(OP_READ)] = { | ||
| 1529 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1530 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 1531 | }, | ||
| 1532 | [C(OP_WRITE)] = { | ||
| 1533 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1534 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 1535 | }, | ||
| 1536 | [C(OP_PREFETCH)] = { | ||
| 1537 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1538 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1539 | }, | ||
| 1540 | }, | ||
| 1541 | [C(BPU)] = { | ||
| 1542 | [C(OP_READ)] = { | ||
| 1543 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1544 | [C(RESULT_MISS)] | ||
| 1545 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1546 | }, | ||
| 1547 | [C(OP_WRITE)] = { | ||
| 1548 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 1549 | [C(RESULT_MISS)] | ||
| 1550 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 1551 | }, | ||
| 1552 | [C(OP_PREFETCH)] = { | ||
| 1553 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 1554 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 1555 | }, | ||
| 1556 | }, | ||
| 1557 | }; | ||
| 1558 | |||
| 1559 | /* | ||
| 1560 | * Perf Events counters | ||
| 1561 | */ | ||
| 1562 | enum armv7_counters { | ||
| 1563 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
| 1564 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
| 1565 | }; | ||
| 1566 | |||
| 1567 | /* | ||
| 1568 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
| 1569 | * The first event counter is ARMV7_COUNTER0. | ||
| 1570 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
| 1571 | */ | ||
| 1572 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
| 1573 | |||
| 1574 | /* | ||
| 1575 | * ARMv7 low level PMNC access | ||
| 1576 | */ | ||
| 1577 | |||
| 1578 | /* | ||
| 1579 | * Per-CPU PMNC: config reg | ||
| 1580 | */ | ||
| 1581 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
| 1582 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
| 1583 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
| 1584 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
| 1585 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
| 1586 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
| 1587 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
| 1588 | #define ARMV7_PMNC_N_MASK 0x1f | ||
| 1589 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
| 1590 | |||
| 1591 | /* | ||
| 1592 | * Available counters | ||
| 1593 | */ | ||
| 1594 | #define ARMV7_CNT0 0 /* First event counter */ | ||
| 1595 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
| 1596 | |||
| 1597 | /* Perf Event to low level counters mapping */ | ||
| 1598 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
| 1599 | |||
| 1600 | /* | ||
| 1601 | * CNTENS: counters enable reg | ||
| 1602 | */ | ||
| 1603 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 1604 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
| 1605 | |||
| 1606 | /* | ||
| 1607 | * CNTENC: counters disable reg | ||
| 1608 | */ | ||
| 1609 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 1610 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
| 1611 | |||
| 1612 | /* | ||
| 1613 | * INTENS: counters overflow interrupt enable reg | ||
| 1614 | */ | ||
| 1615 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 1616 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
| 1617 | |||
| 1618 | /* | ||
| 1619 | * INTENC: counters overflow interrupt disable reg | ||
| 1620 | */ | ||
| 1621 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 1622 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
| 1623 | |||
| 1624 | /* | ||
| 1625 | * EVTSEL: Event selection reg | ||
| 1626 | */ | ||
| 1627 | #define ARMV7_EVTSEL_MASK 0x7f /* Mask for writable bits */ | ||
| 1628 | |||
| 1629 | /* | ||
| 1630 | * SELECT: Counter selection reg | ||
| 1631 | */ | ||
| 1632 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
| 1633 | |||
| 1634 | /* | ||
| 1635 | * FLAG: counters overflow flag status reg | ||
| 1636 | */ | ||
| 1637 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 1638 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
| 1639 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
| 1640 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
| 1641 | |||
| 1642 | static inline unsigned long armv7_pmnc_read(void) | ||
| 1643 | { | ||
| 1644 | u32 val; | ||
| 1645 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
| 1646 | return val; | ||
| 1647 | } | ||
| 1648 | |||
| 1649 | static inline void armv7_pmnc_write(unsigned long val) | ||
| 1650 | { | ||
| 1651 | val &= ARMV7_PMNC_MASK; | ||
| 1652 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
| 1653 | } | ||
| 1654 | |||
| 1655 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
| 1656 | { | ||
| 1657 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
| 1658 | } | ||
| 1659 | |||
| 1660 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
| 1661 | enum armv7_counters counter) | ||
| 1662 | { | ||
| 1663 | int ret; | ||
| 1664 | |||
| 1665 | if (counter == ARMV7_CYCLE_COUNTER) | ||
| 1666 | ret = pmnc & ARMV7_FLAG_C; | ||
| 1667 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
| 1668 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
| 1669 | else | ||
| 1670 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
| 1671 | smp_processor_id(), counter); | ||
| 1672 | |||
| 1673 | return ret; | ||
| 1674 | } | ||
| 1675 | |||
| 1676 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
| 1677 | { | ||
| 1678 | u32 val; | ||
| 1679 | |||
| 1680 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
| 1681 | pr_err("CPU%u selecting wrong PMNC counter" | ||
| 1682 | " %d\n", smp_processor_id(), idx); | ||
| 1683 | return -1; | ||
| 1684 | } | ||
| 1685 | |||
| 1686 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
| 1687 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
| 1688 | |||
| 1689 | return idx; | ||
| 1690 | } | ||
| 1691 | |||
| 1692 | static inline u32 armv7pmu_read_counter(int idx) | ||
| 1693 | { | ||
| 1694 | unsigned long value = 0; | ||
| 1695 | |||
| 1696 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1697 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
| 1698 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
| 1699 | if (armv7_pmnc_select_counter(idx) == idx) | ||
| 1700 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
| 1701 | : "=r" (value)); | ||
| 1702 | } else | ||
| 1703 | pr_err("CPU%u reading wrong counter %d\n", | ||
| 1704 | smp_processor_id(), idx); | ||
| 1705 | |||
| 1706 | return value; | ||
| 1707 | } | ||
| 1708 | |||
| 1709 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
| 1710 | { | ||
| 1711 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1712 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
| 1713 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
| 1714 | if (armv7_pmnc_select_counter(idx) == idx) | ||
| 1715 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
| 1716 | : : "r" (value)); | ||
| 1717 | } else | ||
| 1718 | pr_err("CPU%u writing wrong counter %d\n", | ||
| 1719 | smp_processor_id(), idx); | ||
| 1720 | } | ||
| 1721 | |||
| 1722 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
| 1723 | { | ||
| 1724 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
| 1725 | val &= ARMV7_EVTSEL_MASK; | ||
| 1726 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
| 1727 | } | ||
| 1728 | } | ||
| 1729 | |||
| 1730 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
| 1731 | { | ||
| 1732 | u32 val; | ||
| 1733 | |||
| 1734 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 1735 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 1736 | pr_err("CPU%u enabling wrong PMNC counter" | ||
| 1737 | " %d\n", smp_processor_id(), idx); | ||
| 1738 | return -1; | ||
| 1739 | } | ||
| 1740 | |||
| 1741 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1742 | val = ARMV7_CNTENS_C; | ||
| 1743 | else | ||
| 1744 | val = ARMV7_CNTENS_P(idx); | ||
| 1745 | |||
| 1746 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
| 1747 | |||
| 1748 | return idx; | ||
| 1749 | } | ||
| 1750 | |||
| 1751 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
| 1752 | { | ||
| 1753 | u32 val; | ||
| 1754 | |||
| 1755 | |||
| 1756 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 1757 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 1758 | pr_err("CPU%u disabling wrong PMNC counter" | ||
| 1759 | " %d\n", smp_processor_id(), idx); | ||
| 1760 | return -1; | ||
| 1761 | } | ||
| 1762 | |||
| 1763 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1764 | val = ARMV7_CNTENC_C; | ||
| 1765 | else | ||
| 1766 | val = ARMV7_CNTENC_P(idx); | ||
| 1767 | |||
| 1768 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
| 1769 | |||
| 1770 | return idx; | ||
| 1771 | } | ||
| 1772 | |||
| 1773 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
| 1774 | { | ||
| 1775 | u32 val; | ||
| 1776 | |||
| 1777 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 1778 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 1779 | pr_err("CPU%u enabling wrong PMNC counter" | ||
| 1780 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
| 1781 | return -1; | ||
| 1782 | } | ||
| 1783 | |||
| 1784 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1785 | val = ARMV7_INTENS_C; | ||
| 1786 | else | ||
| 1787 | val = ARMV7_INTENS_P(idx); | ||
| 1788 | |||
| 1789 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
| 1790 | |||
| 1791 | return idx; | ||
| 1792 | } | ||
| 1793 | |||
| 1794 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
| 1795 | { | ||
| 1796 | u32 val; | ||
| 1797 | |||
| 1798 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 1799 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 1800 | pr_err("CPU%u disabling wrong PMNC counter" | ||
| 1801 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
| 1802 | return -1; | ||
| 1803 | } | ||
| 1804 | |||
| 1805 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 1806 | val = ARMV7_INTENC_C; | ||
| 1807 | else | ||
| 1808 | val = ARMV7_INTENC_P(idx); | ||
| 1809 | |||
| 1810 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
| 1811 | |||
| 1812 | return idx; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
| 1816 | { | ||
| 1817 | u32 val; | ||
| 1818 | |||
| 1819 | /* Read */ | ||
| 1820 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
| 1821 | |||
| 1822 | /* Write to clear flags */ | ||
| 1823 | val &= ARMV7_FLAG_MASK; | ||
| 1824 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
| 1825 | |||
| 1826 | return val; | ||
| 1827 | } | ||
| 1828 | |||
| 1829 | #ifdef DEBUG | ||
| 1830 | static void armv7_pmnc_dump_regs(void) | ||
| 1831 | { | ||
| 1832 | u32 val; | ||
| 1833 | unsigned int cnt; | ||
| 1834 | |||
| 1835 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
| 1836 | |||
| 1837 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
| 1838 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
| 1839 | |||
| 1840 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
| 1841 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
| 1842 | |||
| 1843 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
| 1844 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
| 1845 | |||
| 1846 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
| 1847 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
| 1848 | |||
| 1849 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
| 1850 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
| 1851 | |||
| 1852 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
| 1853 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
| 1854 | |||
| 1855 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
| 1856 | armv7_pmnc_select_counter(cnt); | ||
| 1857 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
| 1858 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
| 1859 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
| 1860 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
| 1861 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
| 1862 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
| 1863 | } | ||
| 1864 | } | ||
| 1865 | #endif | ||
| 1866 | |||
| 1867 | void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
| 1868 | { | ||
| 1869 | unsigned long flags; | ||
| 1870 | |||
| 1871 | /* | ||
| 1872 | * Enable counter and interrupt, and set the counter to count | ||
| 1873 | * the event that we're interested in. | ||
| 1874 | */ | ||
| 1875 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1876 | |||
| 1877 | /* | ||
| 1878 | * Disable counter | ||
| 1879 | */ | ||
| 1880 | armv7_pmnc_disable_counter(idx); | ||
| 1881 | |||
| 1882 | /* | ||
| 1883 | * Set event (if destined for PMNx counters) | ||
| 1884 | * We don't need to set the event if it's a cycle count | ||
| 1885 | */ | ||
| 1886 | if (idx != ARMV7_CYCLE_COUNTER) | ||
| 1887 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
| 1888 | |||
| 1889 | /* | ||
| 1890 | * Enable interrupt for this counter | ||
| 1891 | */ | ||
| 1892 | armv7_pmnc_enable_intens(idx); | ||
| 1893 | |||
| 1894 | /* | ||
| 1895 | * Enable counter | ||
| 1896 | */ | ||
| 1897 | armv7_pmnc_enable_counter(idx); | ||
| 1898 | |||
| 1899 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1900 | } | ||
| 1901 | |||
| 1902 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
| 1903 | { | ||
| 1904 | unsigned long flags; | ||
| 1905 | |||
| 1906 | /* | ||
| 1907 | * Disable counter and interrupt | ||
| 1908 | */ | ||
| 1909 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1910 | |||
| 1911 | /* | ||
| 1912 | * Disable counter | ||
| 1913 | */ | ||
| 1914 | armv7_pmnc_disable_counter(idx); | ||
| 1915 | |||
| 1916 | /* | ||
| 1917 | * Disable interrupt for this counter | ||
| 1918 | */ | ||
| 1919 | armv7_pmnc_disable_intens(idx); | ||
| 1920 | |||
| 1921 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1922 | } | ||
| 1923 | |||
| 1924 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
| 1925 | { | ||
| 1926 | unsigned long pmnc; | ||
| 1927 | struct perf_sample_data data; | ||
| 1928 | struct cpu_hw_events *cpuc; | ||
| 1929 | struct pt_regs *regs; | ||
| 1930 | int idx; | ||
| 1931 | |||
| 1932 | /* | ||
| 1933 | * Get and reset the IRQ flags | ||
| 1934 | */ | ||
| 1935 | pmnc = armv7_pmnc_getreset_flags(); | ||
| 1936 | |||
| 1937 | /* | ||
| 1938 | * Did an overflow occur? | ||
| 1939 | */ | ||
| 1940 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
| 1941 | return IRQ_NONE; | ||
| 1942 | |||
| 1943 | /* | ||
| 1944 | * Handle the counter(s) overflow(s) | ||
| 1945 | */ | ||
| 1946 | regs = get_irq_regs(); | ||
| 1947 | |||
| 1948 | data.addr = 0; | ||
| 1949 | |||
| 1950 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 1951 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
| 1952 | struct perf_event *event = cpuc->events[idx]; | ||
| 1953 | struct hw_perf_event *hwc; | ||
| 1954 | |||
| 1955 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 1956 | continue; | ||
| 1957 | |||
| 1958 | /* | ||
| 1959 | * We have a single interrupt for all counters. Check that | ||
| 1960 | * each counter has overflowed before we process it. | ||
| 1961 | */ | ||
| 1962 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
| 1963 | continue; | ||
| 1964 | |||
| 1965 | hwc = &event->hw; | ||
| 1966 | armpmu_event_update(event, hwc, idx); | ||
| 1967 | data.period = event->hw.last_period; | ||
| 1968 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
| 1969 | continue; | ||
| 1970 | |||
| 1971 | if (perf_event_overflow(event, 0, &data, regs)) | ||
| 1972 | armpmu->disable(hwc, idx); | ||
| 1973 | } | ||
| 1974 | |||
| 1975 | /* | ||
| 1976 | * Handle the pending perf events. | ||
| 1977 | * | ||
| 1978 | * Note: this call *must* be run with interrupts enabled. For | ||
| 1979 | * platforms that can have the PMU interrupts raised as a PMI, this | ||
| 1980 | * will not work. | ||
| 1981 | */ | ||
| 1982 | perf_event_do_pending(); | ||
| 1983 | |||
| 1984 | return IRQ_HANDLED; | ||
| 1985 | } | ||
| 1986 | |||
| 1987 | static void armv7pmu_start(void) | ||
| 1988 | { | ||
| 1989 | unsigned long flags; | ||
| 1990 | |||
| 1991 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 1992 | /* Enable all counters */ | ||
| 1993 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
| 1994 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 1995 | } | ||
| 1996 | |||
| 1997 | static void armv7pmu_stop(void) | ||
| 1998 | { | ||
| 1999 | unsigned long flags; | ||
| 2000 | |||
| 2001 | spin_lock_irqsave(&pmu_lock, flags); | ||
| 2002 | /* Disable all counters */ | ||
| 2003 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
| 2004 | spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 2005 | } | ||
| 2006 | |||
| 2007 | static inline int armv7_a8_pmu_event_map(int config) | ||
| 2008 | { | ||
| 2009 | int mapping = armv7_a8_perf_map[config]; | ||
| 2010 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 2011 | mapping = -EOPNOTSUPP; | ||
| 2012 | return mapping; | ||
| 2013 | } | ||
| 2014 | |||
| 2015 | static inline int armv7_a9_pmu_event_map(int config) | ||
| 2016 | { | ||
| 2017 | int mapping = armv7_a9_perf_map[config]; | ||
| 2018 | if (HW_OP_UNSUPPORTED == mapping) | ||
| 2019 | mapping = -EOPNOTSUPP; | ||
| 2020 | return mapping; | ||
| 2021 | } | ||
| 2022 | |||
| 2023 | static u64 armv7pmu_raw_event(u64 config) | ||
| 2024 | { | ||
| 2025 | return config & 0xff; | ||
| 2026 | } | ||
| 2027 | |||
| 2028 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
| 2029 | struct hw_perf_event *event) | ||
| 2030 | { | ||
| 2031 | int idx; | ||
| 2032 | |||
| 2033 | /* Always place a cycle counter into the cycle counter. */ | ||
| 2034 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
| 2035 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
| 2036 | return -EAGAIN; | ||
| 2037 | |||
| 2038 | return ARMV7_CYCLE_COUNTER; | ||
| 2039 | } else { | ||
| 2040 | /* | ||
| 2041 | * For anything other than a cycle counter, try and use | ||
| 2042 | * the events counters | ||
| 2043 | */ | ||
| 2044 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
| 2045 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
| 2046 | return idx; | ||
| 2047 | } | ||
| 2048 | |||
| 2049 | /* The counters are all in use. */ | ||
| 2050 | return -EAGAIN; | ||
| 2051 | } | ||
| 2052 | } | ||
| 2053 | |||
| 2054 | static struct arm_pmu armv7pmu = { | ||
| 2055 | .handle_irq = armv7pmu_handle_irq, | ||
| 2056 | .enable = armv7pmu_enable_event, | ||
| 2057 | .disable = armv7pmu_disable_event, | ||
| 2058 | .raw_event = armv7pmu_raw_event, | ||
| 2059 | .read_counter = armv7pmu_read_counter, | ||
| 2060 | .write_counter = armv7pmu_write_counter, | ||
| 2061 | .get_event_idx = armv7pmu_get_event_idx, | ||
| 2062 | .start = armv7pmu_start, | ||
| 2063 | .stop = armv7pmu_stop, | ||
| 2064 | .max_period = (1LLU << 32) - 1, | ||
| 2065 | }; | ||
| 2066 | |||
| 2067 | static u32 __init armv7_reset_read_pmnc(void) | ||
| 2068 | { | ||
| 2069 | u32 nb_cnt; | ||
| 2070 | |||
| 2071 | /* Initialize & Reset PMNC: C and P bits */ | ||
| 2072 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
| 2073 | |||
| 2074 | /* Read the nb of CNTx counters supported from PMNC */ | ||
| 2075 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
| 2076 | |||
| 2077 | /* Add the CPU cycles counter and return */ | ||
| 2078 | return nb_cnt + 1; | ||
| 2079 | } | ||
| 2080 | |||
| 2081 | static int __init | ||
| 2082 | init_hw_perf_events(void) | ||
| 2083 | { | ||
| 2084 | unsigned long cpuid = read_cpuid_id(); | ||
| 2085 | unsigned long implementor = (cpuid & 0xFF000000) >> 24; | ||
| 2086 | unsigned long part_number = (cpuid & 0xFFF0); | ||
| 2087 | |||
| 2088 | /* We only support ARM CPUs implemented by ARM at the moment. */ | ||
| 2089 | if (0x41 == implementor) { | ||
| 2090 | switch (part_number) { | ||
| 2091 | case 0xB360: /* ARM1136 */ | ||
| 2092 | case 0xB560: /* ARM1156 */ | ||
| 2093 | case 0xB760: /* ARM1176 */ | ||
| 2094 | armpmu = &armv6pmu; | ||
| 2095 | memcpy(armpmu_perf_cache_map, armv6_perf_cache_map, | ||
| 2096 | sizeof(armv6_perf_cache_map)); | ||
| 2097 | perf_max_events = armv6pmu.num_events; | ||
| 2098 | break; | ||
| 2099 | case 0xB020: /* ARM11mpcore */ | ||
| 2100 | armpmu = &armv6mpcore_pmu; | ||
| 2101 | memcpy(armpmu_perf_cache_map, | ||
| 2102 | armv6mpcore_perf_cache_map, | ||
| 2103 | sizeof(armv6mpcore_perf_cache_map)); | ||
| 2104 | perf_max_events = armv6mpcore_pmu.num_events; | ||
| 2105 | break; | ||
| 2106 | case 0xC080: /* Cortex-A8 */ | ||
| 2107 | armv7pmu.name = ARMV7_PMU_CORTEX_A8_NAME; | ||
| 2108 | memcpy(armpmu_perf_cache_map, armv7_a8_perf_cache_map, | ||
| 2109 | sizeof(armv7_a8_perf_cache_map)); | ||
| 2110 | armv7pmu.event_map = armv7_a8_pmu_event_map; | ||
| 2111 | armpmu = &armv7pmu; | ||
| 2112 | |||
| 2113 | /* Reset PMNC and read the nb of CNTx counters | ||
| 2114 | supported */ | ||
| 2115 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
| 2116 | perf_max_events = armv7pmu.num_events; | ||
| 2117 | break; | ||
| 2118 | case 0xC090: /* Cortex-A9 */ | ||
| 2119 | armv7pmu.name = ARMV7_PMU_CORTEX_A9_NAME; | ||
| 2120 | memcpy(armpmu_perf_cache_map, armv7_a9_perf_cache_map, | ||
| 2121 | sizeof(armv7_a9_perf_cache_map)); | ||
| 2122 | armv7pmu.event_map = armv7_a9_pmu_event_map; | ||
| 2123 | armpmu = &armv7pmu; | ||
| 2124 | |||
| 2125 | /* Reset PMNC and read the nb of CNTx counters | ||
| 2126 | supported */ | ||
| 2127 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
| 2128 | perf_max_events = armv7pmu.num_events; | ||
| 2129 | break; | ||
| 2130 | default: | ||
| 2131 | pr_info("no hardware support available\n"); | ||
| 2132 | perf_max_events = -1; | ||
| 2133 | } | ||
| 2134 | } | ||
| 2135 | |||
| 2136 | if (armpmu) | ||
| 2137 | pr_info("enabled with %s PMU driver, %d counters available\n", | ||
| 2138 | armpmu->name, armpmu->num_events); | ||
| 2139 | |||
| 2140 | return 0; | ||
| 2141 | } | ||
| 2142 | arch_initcall(init_hw_perf_events); | ||
| 2143 | |||
| 2144 | /* | ||
| 2145 | * Callchain handling code. | ||
| 2146 | */ | ||
| 2147 | static inline void | ||
| 2148 | callchain_store(struct perf_callchain_entry *entry, | ||
| 2149 | u64 ip) | ||
| 2150 | { | ||
| 2151 | if (entry->nr < PERF_MAX_STACK_DEPTH) | ||
| 2152 | entry->ip[entry->nr++] = ip; | ||
| 2153 | } | ||
| 2154 | |||
| 2155 | /* | ||
| 2156 | * The registers we're interested in are at the end of the variable | ||
| 2157 | * length saved register structure. The fp points at the end of this | ||
| 2158 | * structure so the address of this struct is: | ||
| 2159 | * (struct frame_tail *)(xxx->fp)-1 | ||
| 2160 | * | ||
| 2161 | * This code has been adapted from the ARM OProfile support. | ||
| 2162 | */ | ||
| 2163 | struct frame_tail { | ||
| 2164 | struct frame_tail *fp; | ||
| 2165 | unsigned long sp; | ||
| 2166 | unsigned long lr; | ||
| 2167 | } __attribute__((packed)); | ||
| 2168 | |||
| 2169 | /* | ||
| 2170 | * Get the return address for a single stackframe and return a pointer to the | ||
| 2171 | * next frame tail. | ||
| 2172 | */ | ||
| 2173 | static struct frame_tail * | ||
| 2174 | user_backtrace(struct frame_tail *tail, | ||
| 2175 | struct perf_callchain_entry *entry) | ||
| 2176 | { | ||
| 2177 | struct frame_tail buftail; | ||
| 2178 | |||
| 2179 | /* Also check accessibility of one struct frame_tail beyond */ | ||
| 2180 | if (!access_ok(VERIFY_READ, tail, sizeof(buftail))) | ||
| 2181 | return NULL; | ||
| 2182 | if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail))) | ||
| 2183 | return NULL; | ||
| 2184 | |||
| 2185 | callchain_store(entry, buftail.lr); | ||
| 2186 | |||
| 2187 | /* | ||
| 2188 | * Frame pointers should strictly progress back up the stack | ||
| 2189 | * (towards higher addresses). | ||
| 2190 | */ | ||
| 2191 | if (tail >= buftail.fp) | ||
| 2192 | return NULL; | ||
| 2193 | |||
| 2194 | return buftail.fp - 1; | ||
| 2195 | } | ||
| 2196 | |||
| 2197 | static void | ||
| 2198 | perf_callchain_user(struct pt_regs *regs, | ||
| 2199 | struct perf_callchain_entry *entry) | ||
| 2200 | { | ||
| 2201 | struct frame_tail *tail; | ||
| 2202 | |||
| 2203 | callchain_store(entry, PERF_CONTEXT_USER); | ||
| 2204 | |||
| 2205 | if (!user_mode(regs)) | ||
| 2206 | regs = task_pt_regs(current); | ||
| 2207 | |||
| 2208 | tail = (struct frame_tail *)regs->ARM_fp - 1; | ||
| 2209 | |||
| 2210 | while (tail && !((unsigned long)tail & 0x3)) | ||
| 2211 | tail = user_backtrace(tail, entry); | ||
| 2212 | } | ||
| 2213 | |||
| 2214 | /* | ||
| 2215 | * Gets called by walk_stackframe() for every stackframe. This will be called | ||
| 2216 | * whist unwinding the stackframe and is like a subroutine return so we use | ||
| 2217 | * the PC. | ||
| 2218 | */ | ||
| 2219 | static int | ||
| 2220 | callchain_trace(struct stackframe *fr, | ||
| 2221 | void *data) | ||
| 2222 | { | ||
| 2223 | struct perf_callchain_entry *entry = data; | ||
| 2224 | callchain_store(entry, fr->pc); | ||
| 2225 | return 0; | ||
| 2226 | } | ||
| 2227 | |||
| 2228 | static void | ||
| 2229 | perf_callchain_kernel(struct pt_regs *regs, | ||
| 2230 | struct perf_callchain_entry *entry) | ||
| 2231 | { | ||
| 2232 | struct stackframe fr; | ||
| 2233 | |||
| 2234 | callchain_store(entry, PERF_CONTEXT_KERNEL); | ||
| 2235 | fr.fp = regs->ARM_fp; | ||
| 2236 | fr.sp = regs->ARM_sp; | ||
| 2237 | fr.lr = regs->ARM_lr; | ||
| 2238 | fr.pc = regs->ARM_pc; | ||
| 2239 | walk_stackframe(&fr, callchain_trace, entry); | ||
| 2240 | } | ||
| 2241 | |||
| 2242 | static void | ||
| 2243 | perf_do_callchain(struct pt_regs *regs, | ||
| 2244 | struct perf_callchain_entry *entry) | ||
| 2245 | { | ||
| 2246 | int is_user; | ||
| 2247 | |||
| 2248 | if (!regs) | ||
| 2249 | return; | ||
| 2250 | |||
| 2251 | is_user = user_mode(regs); | ||
| 2252 | |||
| 2253 | if (!current || !current->pid) | ||
| 2254 | return; | ||
| 2255 | |||
| 2256 | if (is_user && current->state != TASK_RUNNING) | ||
| 2257 | return; | ||
| 2258 | |||
| 2259 | if (!is_user) | ||
| 2260 | perf_callchain_kernel(regs, entry); | ||
| 2261 | |||
| 2262 | if (current->mm) | ||
| 2263 | perf_callchain_user(regs, entry); | ||
| 2264 | } | ||
| 2265 | |||
| 2266 | static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry); | ||
| 2267 | |||
| 2268 | struct perf_callchain_entry * | ||
| 2269 | perf_callchain(struct pt_regs *regs) | ||
| 2270 | { | ||
| 2271 | struct perf_callchain_entry *entry = &__get_cpu_var(pmc_irq_entry); | ||
| 2272 | |||
| 2273 | entry->nr = 0; | ||
| 2274 | perf_do_callchain(regs, entry); | ||
| 2275 | return entry; | ||
| 2276 | } | ||
diff --git a/arch/arm/kernel/pmu.c b/arch/arm/kernel/pmu.c new file mode 100644 index 000000000000..a124312e343f --- /dev/null +++ b/arch/arm/kernel/pmu.c | |||
| @@ -0,0 +1,103 @@ | |||
| 1 | /* | ||
| 2 | * linux/arch/arm/kernel/pmu.c | ||
| 3 | * | ||
| 4 | * Copyright (C) 2009 picoChip Designs Ltd, Jamie Iles | ||
| 5 | * | ||
| 6 | * This program is free software; you can redistribute it and/or modify | ||
| 7 | * it under the terms of the GNU General Public License version 2 as | ||
| 8 | * published by the Free Software Foundation. | ||
| 9 | * | ||
| 10 | */ | ||
| 11 | |||
| 12 | #include <linux/cpumask.h> | ||
| 13 | #include <linux/err.h> | ||
| 14 | #include <linux/interrupt.h> | ||
| 15 | #include <linux/kernel.h> | ||
| 16 | #include <linux/module.h> | ||
| 17 | |||
| 18 | #include <asm/pmu.h> | ||
| 19 | |||
| 20 | /* | ||
| 21 | * Define the IRQs for the system. We could use something like a platform | ||
| 22 | * device but that seems fairly heavyweight for this. Also, the performance | ||
| 23 | * counters can't be removed or hotplugged. | ||
| 24 | * | ||
| 25 | * Ordering is important: init_pmu() will use the ordering to set the affinity | ||
| 26 | * to the corresponding core. e.g. the first interrupt will go to cpu 0, the | ||
| 27 | * second goes to cpu 1 etc. | ||
| 28 | */ | ||
| 29 | static const int irqs[] = { | ||
| 30 | #if defined(CONFIG_ARCH_OMAP2) | ||
| 31 | 3, | ||
| 32 | #elif defined(CONFIG_ARCH_BCMRING) | ||
| 33 | IRQ_PMUIRQ, | ||
| 34 | #elif defined(CONFIG_MACH_REALVIEW_EB) | ||
| 35 | IRQ_EB11MP_PMU_CPU0, | ||
| 36 | IRQ_EB11MP_PMU_CPU1, | ||
| 37 | IRQ_EB11MP_PMU_CPU2, | ||
| 38 | IRQ_EB11MP_PMU_CPU3, | ||
| 39 | #elif defined(CONFIG_ARCH_OMAP3) | ||
| 40 | INT_34XX_BENCH_MPU_EMUL, | ||
| 41 | #elif defined(CONFIG_ARCH_IOP32X) | ||
| 42 | IRQ_IOP32X_CORE_PMU, | ||
| 43 | #elif defined(CONFIG_ARCH_IOP33X) | ||
| 44 | IRQ_IOP33X_CORE_PMU, | ||
| 45 | #elif defined(CONFIG_ARCH_PXA) | ||
| 46 | IRQ_PMU, | ||
| 47 | #endif | ||
| 48 | }; | ||
| 49 | |||
| 50 | static const struct pmu_irqs pmu_irqs = { | ||
| 51 | .irqs = irqs, | ||
| 52 | .num_irqs = ARRAY_SIZE(irqs), | ||
| 53 | }; | ||
| 54 | |||
| 55 | static volatile long pmu_lock; | ||
| 56 | |||
| 57 | const struct pmu_irqs * | ||
| 58 | reserve_pmu(void) | ||
| 59 | { | ||
| 60 | return test_and_set_bit_lock(0, &pmu_lock) ? ERR_PTR(-EBUSY) : | ||
| 61 | &pmu_irqs; | ||
| 62 | } | ||
| 63 | EXPORT_SYMBOL_GPL(reserve_pmu); | ||
| 64 | |||
| 65 | int | ||
| 66 | release_pmu(const struct pmu_irqs *irqs) | ||
| 67 | { | ||
| 68 | if (WARN_ON(irqs != &pmu_irqs)) | ||
| 69 | return -EINVAL; | ||
| 70 | clear_bit_unlock(0, &pmu_lock); | ||
| 71 | return 0; | ||
| 72 | } | ||
| 73 | EXPORT_SYMBOL_GPL(release_pmu); | ||
| 74 | |||
| 75 | static int | ||
| 76 | set_irq_affinity(int irq, | ||
| 77 | unsigned int cpu) | ||
| 78 | { | ||
| 79 | #ifdef CONFIG_SMP | ||
| 80 | int err = irq_set_affinity(irq, cpumask_of(cpu)); | ||
| 81 | if (err) | ||
| 82 | pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n", | ||
| 83 | irq, cpu); | ||
| 84 | return err; | ||
| 85 | #else | ||
| 86 | return 0; | ||
| 87 | #endif | ||
| 88 | } | ||
| 89 | |||
| 90 | int | ||
| 91 | init_pmu(void) | ||
| 92 | { | ||
| 93 | int i, err = 0; | ||
| 94 | |||
| 95 | for (i = 0; i < pmu_irqs.num_irqs; ++i) { | ||
| 96 | err = set_irq_affinity(pmu_irqs.irqs[i], i); | ||
| 97 | if (err) | ||
| 98 | break; | ||
| 99 | } | ||
| 100 | |||
| 101 | return err; | ||
| 102 | } | ||
| 103 | EXPORT_SYMBOL_GPL(init_pmu); | ||
diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c index 10e06801afb3..9d40c341e07e 100644 --- a/arch/arm/mm/fault.c +++ b/arch/arm/mm/fault.c | |||
| @@ -18,6 +18,7 @@ | |||
| 18 | #include <linux/page-flags.h> | 18 | #include <linux/page-flags.h> |
| 19 | #include <linux/sched.h> | 19 | #include <linux/sched.h> |
| 20 | #include <linux/highmem.h> | 20 | #include <linux/highmem.h> |
| 21 | #include <linux/perf_event.h> | ||
| 21 | 22 | ||
| 22 | #include <asm/system.h> | 23 | #include <asm/system.h> |
| 23 | #include <asm/pgtable.h> | 24 | #include <asm/pgtable.h> |
| @@ -302,6 +303,12 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs) | |||
| 302 | fault = __do_page_fault(mm, addr, fsr, tsk); | 303 | fault = __do_page_fault(mm, addr, fsr, tsk); |
| 303 | up_read(&mm->mmap_sem); | 304 | up_read(&mm->mmap_sem); |
| 304 | 305 | ||
| 306 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, 0, regs, addr); | ||
| 307 | if (fault & VM_FAULT_MAJOR) | ||
| 308 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MAJ, 1, 0, regs, addr); | ||
| 309 | else if (fault & VM_FAULT_MINOR) | ||
| 310 | perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS_MIN, 1, 0, regs, addr); | ||
| 311 | |||
| 305 | /* | 312 | /* |
| 306 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR | 313 | * Handle the "normal" case first - VM_FAULT_MAJOR / VM_FAULT_MINOR |
| 307 | */ | 314 | */ |
diff --git a/arch/arm/oprofile/op_model_arm11_core.c b/arch/arm/oprofile/op_model_arm11_core.c index ad80752cb9fb..ef3e2653b90c 100644 --- a/arch/arm/oprofile/op_model_arm11_core.c +++ b/arch/arm/oprofile/op_model_arm11_core.c | |||
| @@ -132,7 +132,7 @@ static irqreturn_t arm11_pmu_interrupt(int irq, void *arg) | |||
| 132 | return IRQ_HANDLED; | 132 | return IRQ_HANDLED; |
| 133 | } | 133 | } |
| 134 | 134 | ||
| 135 | int arm11_request_interrupts(int *irqs, int nr) | 135 | int arm11_request_interrupts(const int *irqs, int nr) |
| 136 | { | 136 | { |
| 137 | unsigned int i; | 137 | unsigned int i; |
| 138 | int ret = 0; | 138 | int ret = 0; |
| @@ -153,7 +153,7 @@ int arm11_request_interrupts(int *irqs, int nr) | |||
| 153 | return ret; | 153 | return ret; |
| 154 | } | 154 | } |
| 155 | 155 | ||
| 156 | void arm11_release_interrupts(int *irqs, int nr) | 156 | void arm11_release_interrupts(const int *irqs, int nr) |
| 157 | { | 157 | { |
| 158 | unsigned int i; | 158 | unsigned int i; |
| 159 | 159 | ||
diff --git a/arch/arm/oprofile/op_model_arm11_core.h b/arch/arm/oprofile/op_model_arm11_core.h index 6f8538e5a960..1902b99d9dfd 100644 --- a/arch/arm/oprofile/op_model_arm11_core.h +++ b/arch/arm/oprofile/op_model_arm11_core.h | |||
| @@ -39,7 +39,7 @@ | |||
| 39 | int arm11_setup_pmu(void); | 39 | int arm11_setup_pmu(void); |
| 40 | int arm11_start_pmu(void); | 40 | int arm11_start_pmu(void); |
| 41 | int arm11_stop_pmu(void); | 41 | int arm11_stop_pmu(void); |
| 42 | int arm11_request_interrupts(int *, int); | 42 | int arm11_request_interrupts(const int *, int); |
| 43 | void arm11_release_interrupts(int *, int); | 43 | void arm11_release_interrupts(const int *, int); |
| 44 | 44 | ||
| 45 | #endif | 45 | #endif |
diff --git a/arch/arm/oprofile/op_model_mpcore.c b/arch/arm/oprofile/op_model_mpcore.c index 4ce0f9801e2e..f73ce875a395 100644 --- a/arch/arm/oprofile/op_model_mpcore.c +++ b/arch/arm/oprofile/op_model_mpcore.c | |||
| @@ -32,6 +32,7 @@ | |||
| 32 | /* #define DEBUG */ | 32 | /* #define DEBUG */ |
| 33 | #include <linux/types.h> | 33 | #include <linux/types.h> |
| 34 | #include <linux/errno.h> | 34 | #include <linux/errno.h> |
| 35 | #include <linux/err.h> | ||
| 35 | #include <linux/sched.h> | 36 | #include <linux/sched.h> |
| 36 | #include <linux/oprofile.h> | 37 | #include <linux/oprofile.h> |
| 37 | #include <linux/interrupt.h> | 38 | #include <linux/interrupt.h> |
| @@ -43,6 +44,7 @@ | |||
| 43 | #include <mach/hardware.h> | 44 | #include <mach/hardware.h> |
| 44 | #include <mach/board-eb.h> | 45 | #include <mach/board-eb.h> |
| 45 | #include <asm/system.h> | 46 | #include <asm/system.h> |
| 47 | #include <asm/pmu.h> | ||
| 46 | 48 | ||
| 47 | #include "op_counter.h" | 49 | #include "op_counter.h" |
| 48 | #include "op_arm_model.h" | 50 | #include "op_arm_model.h" |
| @@ -58,6 +60,7 @@ | |||
| 58 | * Bitmask of used SCU counters | 60 | * Bitmask of used SCU counters |
| 59 | */ | 61 | */ |
| 60 | static unsigned int scu_em_used; | 62 | static unsigned int scu_em_used; |
| 63 | static const struct pmu_irqs *pmu_irqs; | ||
| 61 | 64 | ||
| 62 | /* | 65 | /* |
| 63 | * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number) | 66 | * 2 helper fns take a counter number from 0-7 (not the userspace-visible counter number) |
| @@ -225,33 +228,40 @@ static int em_setup_ctrs(void) | |||
| 225 | return 0; | 228 | return 0; |
| 226 | } | 229 | } |
| 227 | 230 | ||
| 228 | static int arm11_irqs[] = { | ||
| 229 | [0] = IRQ_EB11MP_PMU_CPU0, | ||
| 230 | [1] = IRQ_EB11MP_PMU_CPU1, | ||
| 231 | [2] = IRQ_EB11MP_PMU_CPU2, | ||
| 232 | [3] = IRQ_EB11MP_PMU_CPU3 | ||
| 233 | }; | ||
| 234 | |||
| 235 | static int em_start(void) | 231 | static int em_start(void) |
| 236 | { | 232 | { |
| 237 | int ret; | 233 | int ret; |
| 238 | 234 | ||
| 239 | ret = arm11_request_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs)); | 235 | pmu_irqs = reserve_pmu(); |
| 236 | if (IS_ERR(pmu_irqs)) { | ||
| 237 | ret = PTR_ERR(pmu_irqs); | ||
| 238 | goto out; | ||
| 239 | } | ||
| 240 | |||
| 241 | ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); | ||
| 240 | if (ret == 0) { | 242 | if (ret == 0) { |
| 241 | em_call_function(arm11_start_pmu); | 243 | em_call_function(arm11_start_pmu); |
| 242 | 244 | ||
| 243 | ret = scu_start(); | 245 | ret = scu_start(); |
| 244 | if (ret) | 246 | if (ret) { |
| 245 | arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs)); | 247 | arm11_release_interrupts(pmu_irqs->irqs, |
| 248 | pmu_irqs->num_irqs); | ||
| 249 | } else { | ||
| 250 | release_pmu(pmu_irqs); | ||
| 251 | pmu_irqs = NULL; | ||
| 252 | } | ||
| 246 | } | 253 | } |
| 254 | |||
| 255 | out: | ||
| 247 | return ret; | 256 | return ret; |
| 248 | } | 257 | } |
| 249 | 258 | ||
| 250 | static void em_stop(void) | 259 | static void em_stop(void) |
| 251 | { | 260 | { |
| 252 | em_call_function(arm11_stop_pmu); | 261 | em_call_function(arm11_stop_pmu); |
| 253 | arm11_release_interrupts(arm11_irqs, ARRAY_SIZE(arm11_irqs)); | 262 | arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); |
| 254 | scu_stop(); | 263 | scu_stop(); |
| 264 | release_pmu(pmu_irqs); | ||
| 255 | } | 265 | } |
| 256 | 266 | ||
| 257 | /* | 267 | /* |
| @@ -283,15 +293,7 @@ static int em_setup(void) | |||
| 283 | em_route_irq(IRQ_EB11MP_PMU_SCU6, 3); | 293 | em_route_irq(IRQ_EB11MP_PMU_SCU6, 3); |
| 284 | em_route_irq(IRQ_EB11MP_PMU_SCU7, 3); | 294 | em_route_irq(IRQ_EB11MP_PMU_SCU7, 3); |
| 285 | 295 | ||
| 286 | /* | 296 | return init_pmu(); |
| 287 | * Send CP15 PMU interrupts to the owner CPU. | ||
| 288 | */ | ||
| 289 | em_route_irq(IRQ_EB11MP_PMU_CPU0, 0); | ||
| 290 | em_route_irq(IRQ_EB11MP_PMU_CPU1, 1); | ||
| 291 | em_route_irq(IRQ_EB11MP_PMU_CPU2, 2); | ||
| 292 | em_route_irq(IRQ_EB11MP_PMU_CPU3, 3); | ||
| 293 | |||
| 294 | return 0; | ||
| 295 | } | 297 | } |
| 296 | 298 | ||
| 297 | struct op_arm_model_spec op_mpcore_spec = { | 299 | struct op_arm_model_spec op_mpcore_spec = { |
diff --git a/arch/arm/oprofile/op_model_v6.c b/arch/arm/oprofile/op_model_v6.c index f7d2ec5ee9a1..a22357a2fd08 100644 --- a/arch/arm/oprofile/op_model_v6.c +++ b/arch/arm/oprofile/op_model_v6.c | |||
| @@ -19,39 +19,47 @@ | |||
| 19 | /* #define DEBUG */ | 19 | /* #define DEBUG */ |
| 20 | #include <linux/types.h> | 20 | #include <linux/types.h> |
| 21 | #include <linux/errno.h> | 21 | #include <linux/errno.h> |
| 22 | #include <linux/err.h> | ||
| 22 | #include <linux/sched.h> | 23 | #include <linux/sched.h> |
| 23 | #include <linux/oprofile.h> | 24 | #include <linux/oprofile.h> |
| 24 | #include <linux/interrupt.h> | 25 | #include <linux/interrupt.h> |
| 25 | #include <asm/irq.h> | 26 | #include <asm/irq.h> |
| 26 | #include <asm/system.h> | 27 | #include <asm/system.h> |
| 28 | #include <asm/pmu.h> | ||
| 27 | 29 | ||
| 28 | #include "op_counter.h" | 30 | #include "op_counter.h" |
| 29 | #include "op_arm_model.h" | 31 | #include "op_arm_model.h" |
| 30 | #include "op_model_arm11_core.h" | 32 | #include "op_model_arm11_core.h" |
| 31 | 33 | ||
| 32 | static int irqs[] = { | 34 | static const struct pmu_irqs *pmu_irqs; |
| 33 | #ifdef CONFIG_ARCH_OMAP2 | ||
| 34 | 3, | ||
| 35 | #endif | ||
| 36 | #ifdef CONFIG_ARCH_BCMRING | ||
| 37 | IRQ_PMUIRQ, /* for BCMRING, ARM PMU interrupt is 43 */ | ||
| 38 | #endif | ||
| 39 | }; | ||
| 40 | 35 | ||
| 41 | static void armv6_pmu_stop(void) | 36 | static void armv6_pmu_stop(void) |
| 42 | { | 37 | { |
| 43 | arm11_stop_pmu(); | 38 | arm11_stop_pmu(); |
| 44 | arm11_release_interrupts(irqs, ARRAY_SIZE(irqs)); | 39 | arm11_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); |
| 40 | release_pmu(pmu_irqs); | ||
| 41 | pmu_irqs = NULL; | ||
| 45 | } | 42 | } |
| 46 | 43 | ||
| 47 | static int armv6_pmu_start(void) | 44 | static int armv6_pmu_start(void) |
| 48 | { | 45 | { |
| 49 | int ret; | 46 | int ret; |
| 50 | 47 | ||
| 51 | ret = arm11_request_interrupts(irqs, ARRAY_SIZE(irqs)); | 48 | pmu_irqs = reserve_pmu(); |
| 52 | if (ret >= 0) | 49 | if (IS_ERR(pmu_irqs)) { |
| 50 | ret = PTR_ERR(pmu_irqs); | ||
| 51 | goto out; | ||
| 52 | } | ||
| 53 | |||
| 54 | ret = arm11_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); | ||
| 55 | if (ret >= 0) { | ||
| 53 | ret = arm11_start_pmu(); | 56 | ret = arm11_start_pmu(); |
| 57 | } else { | ||
| 58 | release_pmu(pmu_irqs); | ||
| 59 | pmu_irqs = NULL; | ||
| 60 | } | ||
| 54 | 61 | ||
| 62 | out: | ||
| 55 | return ret; | 63 | return ret; |
| 56 | } | 64 | } |
| 57 | 65 | ||
diff --git a/arch/arm/oprofile/op_model_v7.c b/arch/arm/oprofile/op_model_v7.c index 2088a6c0cc0e..8642d0891ae1 100644 --- a/arch/arm/oprofile/op_model_v7.c +++ b/arch/arm/oprofile/op_model_v7.c | |||
| @@ -11,11 +11,14 @@ | |||
| 11 | */ | 11 | */ |
| 12 | #include <linux/types.h> | 12 | #include <linux/types.h> |
| 13 | #include <linux/errno.h> | 13 | #include <linux/errno.h> |
| 14 | #include <linux/err.h> | ||
| 14 | #include <linux/oprofile.h> | 15 | #include <linux/oprofile.h> |
| 15 | #include <linux/interrupt.h> | 16 | #include <linux/interrupt.h> |
| 16 | #include <linux/irq.h> | 17 | #include <linux/irq.h> |
| 17 | #include <linux/smp.h> | 18 | #include <linux/smp.h> |
| 18 | 19 | ||
| 20 | #include <asm/pmu.h> | ||
| 21 | |||
| 19 | #include "op_counter.h" | 22 | #include "op_counter.h" |
| 20 | #include "op_arm_model.h" | 23 | #include "op_arm_model.h" |
| 21 | #include "op_model_v7.h" | 24 | #include "op_model_v7.h" |
| @@ -295,7 +298,7 @@ static irqreturn_t armv7_pmnc_interrupt(int irq, void *arg) | |||
| 295 | return IRQ_HANDLED; | 298 | return IRQ_HANDLED; |
| 296 | } | 299 | } |
| 297 | 300 | ||
| 298 | int armv7_request_interrupts(int *irqs, int nr) | 301 | int armv7_request_interrupts(const int *irqs, int nr) |
| 299 | { | 302 | { |
| 300 | unsigned int i; | 303 | unsigned int i; |
| 301 | int ret = 0; | 304 | int ret = 0; |
| @@ -318,7 +321,7 @@ int armv7_request_interrupts(int *irqs, int nr) | |||
| 318 | return ret; | 321 | return ret; |
| 319 | } | 322 | } |
| 320 | 323 | ||
| 321 | void armv7_release_interrupts(int *irqs, int nr) | 324 | void armv7_release_interrupts(const int *irqs, int nr) |
| 322 | { | 325 | { |
| 323 | unsigned int i; | 326 | unsigned int i; |
| 324 | 327 | ||
| @@ -362,12 +365,7 @@ static void armv7_pmnc_dump_regs(void) | |||
| 362 | } | 365 | } |
| 363 | #endif | 366 | #endif |
| 364 | 367 | ||
| 365 | 368 | static const struct pmu_irqs *pmu_irqs; | |
| 366 | static int irqs[] = { | ||
| 367 | #ifdef CONFIG_ARCH_OMAP3 | ||
| 368 | INT_34XX_BENCH_MPU_EMUL, | ||
| 369 | #endif | ||
| 370 | }; | ||
| 371 | 369 | ||
| 372 | static void armv7_pmnc_stop(void) | 370 | static void armv7_pmnc_stop(void) |
| 373 | { | 371 | { |
| @@ -375,19 +373,29 @@ static void armv7_pmnc_stop(void) | |||
| 375 | armv7_pmnc_dump_regs(); | 373 | armv7_pmnc_dump_regs(); |
| 376 | #endif | 374 | #endif |
| 377 | armv7_stop_pmnc(); | 375 | armv7_stop_pmnc(); |
| 378 | armv7_release_interrupts(irqs, ARRAY_SIZE(irqs)); | 376 | armv7_release_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); |
| 377 | release_pmu(pmu_irqs); | ||
| 378 | pmu_irqs = NULL; | ||
| 379 | } | 379 | } |
| 380 | 380 | ||
| 381 | static int armv7_pmnc_start(void) | 381 | static int armv7_pmnc_start(void) |
| 382 | { | 382 | { |
| 383 | int ret; | 383 | int ret; |
| 384 | 384 | ||
| 385 | pmu_irqs = reserve_pmu(); | ||
| 386 | if (IS_ERR(pmu_irqs)) | ||
| 387 | return PTR_ERR(pmu_irqs); | ||
| 388 | |||
| 385 | #ifdef DEBUG | 389 | #ifdef DEBUG |
| 386 | armv7_pmnc_dump_regs(); | 390 | armv7_pmnc_dump_regs(); |
| 387 | #endif | 391 | #endif |
| 388 | ret = armv7_request_interrupts(irqs, ARRAY_SIZE(irqs)); | 392 | ret = armv7_request_interrupts(pmu_irqs->irqs, pmu_irqs->num_irqs); |
| 389 | if (ret >= 0) | 393 | if (ret >= 0) { |
| 390 | armv7_start_pmnc(); | 394 | armv7_start_pmnc(); |
| 395 | } else { | ||
| 396 | release_pmu(pmu_irqs); | ||
| 397 | pmu_irqs = NULL; | ||
| 398 | } | ||
| 391 | 399 | ||
| 392 | return ret; | 400 | return ret; |
| 393 | } | 401 | } |
diff --git a/arch/arm/oprofile/op_model_v7.h b/arch/arm/oprofile/op_model_v7.h index 0e19bcc2e100..9ca334b39c75 100644 --- a/arch/arm/oprofile/op_model_v7.h +++ b/arch/arm/oprofile/op_model_v7.h | |||
| @@ -97,7 +97,7 @@ | |||
| 97 | int armv7_setup_pmu(void); | 97 | int armv7_setup_pmu(void); |
| 98 | int armv7_start_pmu(void); | 98 | int armv7_start_pmu(void); |
| 99 | int armv7_stop_pmu(void); | 99 | int armv7_stop_pmu(void); |
| 100 | int armv7_request_interrupts(int *, int); | 100 | int armv7_request_interrupts(const int *, int); |
| 101 | void armv7_release_interrupts(int *, int); | 101 | void armv7_release_interrupts(const int *, int); |
| 102 | 102 | ||
| 103 | #endif | 103 | #endif |
diff --git a/arch/arm/oprofile/op_model_xscale.c b/arch/arm/oprofile/op_model_xscale.c index 724ab9ce2526..1d34a02048bd 100644 --- a/arch/arm/oprofile/op_model_xscale.c +++ b/arch/arm/oprofile/op_model_xscale.c | |||
| @@ -17,12 +17,14 @@ | |||
| 17 | /* #define DEBUG */ | 17 | /* #define DEBUG */ |
| 18 | #include <linux/types.h> | 18 | #include <linux/types.h> |
| 19 | #include <linux/errno.h> | 19 | #include <linux/errno.h> |
| 20 | #include <linux/err.h> | ||
| 20 | #include <linux/sched.h> | 21 | #include <linux/sched.h> |
| 21 | #include <linux/oprofile.h> | 22 | #include <linux/oprofile.h> |
| 22 | #include <linux/interrupt.h> | 23 | #include <linux/interrupt.h> |
| 23 | #include <linux/irq.h> | 24 | #include <linux/irq.h> |
| 24 | 25 | ||
| 25 | #include <asm/cputype.h> | 26 | #include <asm/cputype.h> |
| 27 | #include <asm/pmu.h> | ||
| 26 | 28 | ||
| 27 | #include "op_counter.h" | 29 | #include "op_counter.h" |
| 28 | #include "op_arm_model.h" | 30 | #include "op_arm_model.h" |
| @@ -33,17 +35,6 @@ | |||
| 33 | #define PMU_RESET (CCNT_RESET | PMN_RESET) | 35 | #define PMU_RESET (CCNT_RESET | PMN_RESET) |
| 34 | #define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */ | 36 | #define PMU_CNT64 0x008 /* Make CCNT count every 64th cycle */ |
| 35 | 37 | ||
| 36 | /* TODO do runtime detection */ | ||
| 37 | #ifdef CONFIG_ARCH_IOP32X | ||
| 38 | #define XSCALE_PMU_IRQ IRQ_IOP32X_CORE_PMU | ||
| 39 | #endif | ||
| 40 | #ifdef CONFIG_ARCH_IOP33X | ||
| 41 | #define XSCALE_PMU_IRQ IRQ_IOP33X_CORE_PMU | ||
| 42 | #endif | ||
| 43 | #ifdef CONFIG_ARCH_PXA | ||
| 44 | #define XSCALE_PMU_IRQ IRQ_PMU | ||
| 45 | #endif | ||
| 46 | |||
| 47 | /* | 38 | /* |
| 48 | * Different types of events that can be counted by the XScale PMU | 39 | * Different types of events that can be counted by the XScale PMU |
| 49 | * as used by Oprofile userspace. Here primarily for documentation | 40 | * as used by Oprofile userspace. Here primarily for documentation |
| @@ -367,6 +358,8 @@ static irqreturn_t xscale_pmu_interrupt(int irq, void *arg) | |||
| 367 | return IRQ_HANDLED; | 358 | return IRQ_HANDLED; |
| 368 | } | 359 | } |
| 369 | 360 | ||
| 361 | static const struct pmu_irqs *pmu_irqs; | ||
| 362 | |||
| 370 | static void xscale_pmu_stop(void) | 363 | static void xscale_pmu_stop(void) |
| 371 | { | 364 | { |
| 372 | u32 pmnc = read_pmnc(); | 365 | u32 pmnc = read_pmnc(); |
| @@ -374,20 +367,30 @@ static void xscale_pmu_stop(void) | |||
| 374 | pmnc &= ~PMU_ENABLE; | 367 | pmnc &= ~PMU_ENABLE; |
| 375 | write_pmnc(pmnc); | 368 | write_pmnc(pmnc); |
| 376 | 369 | ||
| 377 | free_irq(XSCALE_PMU_IRQ, results); | 370 | free_irq(pmu_irqs->irqs[0], results); |
| 371 | release_pmu(pmu_irqs); | ||
| 372 | pmu_irqs = NULL; | ||
| 378 | } | 373 | } |
| 379 | 374 | ||
| 380 | static int xscale_pmu_start(void) | 375 | static int xscale_pmu_start(void) |
| 381 | { | 376 | { |
| 382 | int ret; | 377 | int ret; |
| 383 | u32 pmnc = read_pmnc(); | 378 | u32 pmnc; |
| 379 | |||
| 380 | pmu_irqs = reserve_pmu(); | ||
| 381 | if (IS_ERR(pmu_irqs)) | ||
| 382 | return PTR_ERR(pmu_irqs); | ||
| 383 | |||
| 384 | pmnc = read_pmnc(); | ||
| 384 | 385 | ||
| 385 | ret = request_irq(XSCALE_PMU_IRQ, xscale_pmu_interrupt, IRQF_DISABLED, | 386 | ret = request_irq(pmu_irqs->irqs[0], xscale_pmu_interrupt, |
| 386 | "XScale PMU", (void *)results); | 387 | IRQF_DISABLED, "XScale PMU", (void *)results); |
| 387 | 388 | ||
| 388 | if (ret < 0) { | 389 | if (ret < 0) { |
| 389 | printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n", | 390 | printk(KERN_ERR "oprofile: unable to request IRQ%d for XScale PMU\n", |
| 390 | XSCALE_PMU_IRQ); | 391 | pmu_irqs->irqs[0]); |
| 392 | release_pmu(pmu_irqs); | ||
| 393 | pmu_irqs = NULL; | ||
| 391 | return ret; | 394 | return ret; |
| 392 | } | 395 | } |
| 393 | 396 | ||
