diff options
Diffstat (limited to 'arch/arm/kernel/perf_event_v7.c')
| -rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 906 |
1 files changed, 906 insertions, 0 deletions
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c new file mode 100644 index 000000000000..2e1402556fa0 --- /dev/null +++ b/arch/arm/kernel/perf_event_v7.c | |||
| @@ -0,0 +1,906 @@ | |||
| 1 | /* | ||
| 2 | * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code. | ||
| 3 | * | ||
| 4 | * ARMv7 support: Jean Pihet <jpihet@mvista.com> | ||
| 5 | * 2010 (c) MontaVista Software, LLC. | ||
| 6 | * | ||
| 7 | * Copied from ARMv6 code, with the low level code inspired | ||
| 8 | * by the ARMv7 Oprofile code. | ||
| 9 | * | ||
| 10 | * Cortex-A8 has up to 4 configurable performance counters and | ||
| 11 | * a single cycle counter. | ||
| 12 | * Cortex-A9 has up to 31 configurable performance counters and | ||
| 13 | * a single cycle counter. | ||
| 14 | * | ||
| 15 | * All counters can be enabled/disabled and IRQ masked separately. The cycle | ||
| 16 | * counter and all 4 performance counters together can be reset separately. | ||
| 17 | */ | ||
| 18 | |||
| 19 | #ifdef CONFIG_CPU_V7 | ||
| 20 | /* Common ARMv7 event types */ | ||
| 21 | enum armv7_perf_types { | ||
| 22 | ARMV7_PERFCTR_PMNC_SW_INCR = 0x00, | ||
| 23 | ARMV7_PERFCTR_IFETCH_MISS = 0x01, | ||
| 24 | ARMV7_PERFCTR_ITLB_MISS = 0x02, | ||
| 25 | ARMV7_PERFCTR_DCACHE_REFILL = 0x03, | ||
| 26 | ARMV7_PERFCTR_DCACHE_ACCESS = 0x04, | ||
| 27 | ARMV7_PERFCTR_DTLB_REFILL = 0x05, | ||
| 28 | ARMV7_PERFCTR_DREAD = 0x06, | ||
| 29 | ARMV7_PERFCTR_DWRITE = 0x07, | ||
| 30 | |||
| 31 | ARMV7_PERFCTR_EXC_TAKEN = 0x09, | ||
| 32 | ARMV7_PERFCTR_EXC_EXECUTED = 0x0A, | ||
| 33 | ARMV7_PERFCTR_CID_WRITE = 0x0B, | ||
| 34 | /* ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS. | ||
| 35 | * It counts: | ||
| 36 | * - all branch instructions, | ||
| 37 | * - instructions that explicitly write the PC, | ||
| 38 | * - exception generating instructions. | ||
| 39 | */ | ||
| 40 | ARMV7_PERFCTR_PC_WRITE = 0x0C, | ||
| 41 | ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D, | ||
| 42 | ARMV7_PERFCTR_UNALIGNED_ACCESS = 0x0F, | ||
| 43 | ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10, | ||
| 44 | ARMV7_PERFCTR_CLOCK_CYCLES = 0x11, | ||
| 45 | |||
| 46 | ARMV7_PERFCTR_PC_BRANCH_MIS_USED = 0x12, | ||
| 47 | |||
| 48 | ARMV7_PERFCTR_CPU_CYCLES = 0xFF | ||
| 49 | }; | ||
| 50 | |||
| 51 | /* ARMv7 Cortex-A8 specific event types */ | ||
| 52 | enum armv7_a8_perf_types { | ||
| 53 | ARMV7_PERFCTR_INSTR_EXECUTED = 0x08, | ||
| 54 | |||
| 55 | ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E, | ||
| 56 | |||
| 57 | ARMV7_PERFCTR_WRITE_BUFFER_FULL = 0x40, | ||
| 58 | ARMV7_PERFCTR_L2_STORE_MERGED = 0x41, | ||
| 59 | ARMV7_PERFCTR_L2_STORE_BUFF = 0x42, | ||
| 60 | ARMV7_PERFCTR_L2_ACCESS = 0x43, | ||
| 61 | ARMV7_PERFCTR_L2_CACH_MISS = 0x44, | ||
| 62 | ARMV7_PERFCTR_AXI_READ_CYCLES = 0x45, | ||
| 63 | ARMV7_PERFCTR_AXI_WRITE_CYCLES = 0x46, | ||
| 64 | ARMV7_PERFCTR_MEMORY_REPLAY = 0x47, | ||
| 65 | ARMV7_PERFCTR_UNALIGNED_ACCESS_REPLAY = 0x48, | ||
| 66 | ARMV7_PERFCTR_L1_DATA_MISS = 0x49, | ||
| 67 | ARMV7_PERFCTR_L1_INST_MISS = 0x4A, | ||
| 68 | ARMV7_PERFCTR_L1_DATA_COLORING = 0x4B, | ||
| 69 | ARMV7_PERFCTR_L1_NEON_DATA = 0x4C, | ||
| 70 | ARMV7_PERFCTR_L1_NEON_CACH_DATA = 0x4D, | ||
| 71 | ARMV7_PERFCTR_L2_NEON = 0x4E, | ||
| 72 | ARMV7_PERFCTR_L2_NEON_HIT = 0x4F, | ||
| 73 | ARMV7_PERFCTR_L1_INST = 0x50, | ||
| 74 | ARMV7_PERFCTR_PC_RETURN_MIS_PRED = 0x51, | ||
| 75 | ARMV7_PERFCTR_PC_BRANCH_FAILED = 0x52, | ||
| 76 | ARMV7_PERFCTR_PC_BRANCH_TAKEN = 0x53, | ||
| 77 | ARMV7_PERFCTR_PC_BRANCH_EXECUTED = 0x54, | ||
| 78 | ARMV7_PERFCTR_OP_EXECUTED = 0x55, | ||
| 79 | ARMV7_PERFCTR_CYCLES_INST_STALL = 0x56, | ||
| 80 | ARMV7_PERFCTR_CYCLES_INST = 0x57, | ||
| 81 | ARMV7_PERFCTR_CYCLES_NEON_DATA_STALL = 0x58, | ||
| 82 | ARMV7_PERFCTR_CYCLES_NEON_INST_STALL = 0x59, | ||
| 83 | ARMV7_PERFCTR_NEON_CYCLES = 0x5A, | ||
| 84 | |||
| 85 | ARMV7_PERFCTR_PMU0_EVENTS = 0x70, | ||
| 86 | ARMV7_PERFCTR_PMU1_EVENTS = 0x71, | ||
| 87 | ARMV7_PERFCTR_PMU_EVENTS = 0x72, | ||
| 88 | }; | ||
| 89 | |||
| 90 | /* ARMv7 Cortex-A9 specific event types */ | ||
| 91 | enum armv7_a9_perf_types { | ||
| 92 | ARMV7_PERFCTR_JAVA_HW_BYTECODE_EXEC = 0x40, | ||
| 93 | ARMV7_PERFCTR_JAVA_SW_BYTECODE_EXEC = 0x41, | ||
| 94 | ARMV7_PERFCTR_JAZELLE_BRANCH_EXEC = 0x42, | ||
| 95 | |||
| 96 | ARMV7_PERFCTR_COHERENT_LINE_MISS = 0x50, | ||
| 97 | ARMV7_PERFCTR_COHERENT_LINE_HIT = 0x51, | ||
| 98 | |||
| 99 | ARMV7_PERFCTR_ICACHE_DEP_STALL_CYCLES = 0x60, | ||
| 100 | ARMV7_PERFCTR_DCACHE_DEP_STALL_CYCLES = 0x61, | ||
| 101 | ARMV7_PERFCTR_TLB_MISS_DEP_STALL_CYCLES = 0x62, | ||
| 102 | ARMV7_PERFCTR_STREX_EXECUTED_PASSED = 0x63, | ||
| 103 | ARMV7_PERFCTR_STREX_EXECUTED_FAILED = 0x64, | ||
| 104 | ARMV7_PERFCTR_DATA_EVICTION = 0x65, | ||
| 105 | ARMV7_PERFCTR_ISSUE_STAGE_NO_INST = 0x66, | ||
| 106 | ARMV7_PERFCTR_ISSUE_STAGE_EMPTY = 0x67, | ||
| 107 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE = 0x68, | ||
| 108 | |||
| 109 | ARMV7_PERFCTR_PREDICTABLE_FUNCT_RETURNS = 0x6E, | ||
| 110 | |||
| 111 | ARMV7_PERFCTR_MAIN_UNIT_EXECUTED_INST = 0x70, | ||
| 112 | ARMV7_PERFCTR_SECOND_UNIT_EXECUTED_INST = 0x71, | ||
| 113 | ARMV7_PERFCTR_LD_ST_UNIT_EXECUTED_INST = 0x72, | ||
| 114 | ARMV7_PERFCTR_FP_EXECUTED_INST = 0x73, | ||
| 115 | ARMV7_PERFCTR_NEON_EXECUTED_INST = 0x74, | ||
| 116 | |||
| 117 | ARMV7_PERFCTR_PLD_FULL_DEP_STALL_CYCLES = 0x80, | ||
| 118 | ARMV7_PERFCTR_DATA_WR_DEP_STALL_CYCLES = 0x81, | ||
| 119 | ARMV7_PERFCTR_ITLB_MISS_DEP_STALL_CYCLES = 0x82, | ||
| 120 | ARMV7_PERFCTR_DTLB_MISS_DEP_STALL_CYCLES = 0x83, | ||
| 121 | ARMV7_PERFCTR_MICRO_ITLB_MISS_DEP_STALL_CYCLES = 0x84, | ||
| 122 | ARMV7_PERFCTR_MICRO_DTLB_MISS_DEP_STALL_CYCLES = 0x85, | ||
| 123 | ARMV7_PERFCTR_DMB_DEP_STALL_CYCLES = 0x86, | ||
| 124 | |||
| 125 | ARMV7_PERFCTR_INTGR_CLK_ENABLED_CYCLES = 0x8A, | ||
| 126 | ARMV7_PERFCTR_DATA_ENGINE_CLK_EN_CYCLES = 0x8B, | ||
| 127 | |||
| 128 | ARMV7_PERFCTR_ISB_INST = 0x90, | ||
| 129 | ARMV7_PERFCTR_DSB_INST = 0x91, | ||
| 130 | ARMV7_PERFCTR_DMB_INST = 0x92, | ||
| 131 | ARMV7_PERFCTR_EXT_INTERRUPTS = 0x93, | ||
| 132 | |||
| 133 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_COMPLETED = 0xA0, | ||
| 134 | ARMV7_PERFCTR_PLE_CACHE_LINE_RQST_SKIPPED = 0xA1, | ||
| 135 | ARMV7_PERFCTR_PLE_FIFO_FLUSH = 0xA2, | ||
| 136 | ARMV7_PERFCTR_PLE_RQST_COMPLETED = 0xA3, | ||
| 137 | ARMV7_PERFCTR_PLE_FIFO_OVERFLOW = 0xA4, | ||
| 138 | ARMV7_PERFCTR_PLE_RQST_PROG = 0xA5 | ||
| 139 | }; | ||
| 140 | |||
| 141 | /* | ||
| 142 | * Cortex-A8 HW events mapping | ||
| 143 | * | ||
| 144 | * The hardware events that we support. We do support cache operations but | ||
| 145 | * we have harvard caches and no way to combine instruction and data | ||
| 146 | * accesses/misses in hardware. | ||
| 147 | */ | ||
| 148 | static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 149 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
| 150 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
| 151 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | ||
| 152 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | ||
| 153 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
| 154 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 155 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
| 156 | }; | ||
| 157 | |||
| 158 | static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 159 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 160 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 161 | [C(L1D)] = { | ||
| 162 | /* | ||
| 163 | * The performance counters don't differentiate between read | ||
| 164 | * and write accesses/misses so this isn't strictly correct, | ||
| 165 | * but it's the best we can do. Writes and reads get | ||
| 166 | * combined. | ||
| 167 | */ | ||
| 168 | [C(OP_READ)] = { | ||
| 169 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 170 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 171 | }, | ||
| 172 | [C(OP_WRITE)] = { | ||
| 173 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 174 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 175 | }, | ||
| 176 | [C(OP_PREFETCH)] = { | ||
| 177 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 178 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 179 | }, | ||
| 180 | }, | ||
| 181 | [C(L1I)] = { | ||
| 182 | [C(OP_READ)] = { | ||
| 183 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
| 184 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
| 185 | }, | ||
| 186 | [C(OP_WRITE)] = { | ||
| 187 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_INST, | ||
| 188 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L1_INST_MISS, | ||
| 189 | }, | ||
| 190 | [C(OP_PREFETCH)] = { | ||
| 191 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 192 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 193 | }, | ||
| 194 | }, | ||
| 195 | [C(LL)] = { | ||
| 196 | [C(OP_READ)] = { | ||
| 197 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
| 198 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
| 199 | }, | ||
| 200 | [C(OP_WRITE)] = { | ||
| 201 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_ACCESS, | ||
| 202 | [C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACH_MISS, | ||
| 203 | }, | ||
| 204 | [C(OP_PREFETCH)] = { | ||
| 205 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 206 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 207 | }, | ||
| 208 | }, | ||
| 209 | [C(DTLB)] = { | ||
| 210 | /* | ||
| 211 | * Only ITLB misses and DTLB refills are supported. | ||
| 212 | * If users want the DTLB refills misses a raw counter | ||
| 213 | * must be used. | ||
| 214 | */ | ||
| 215 | [C(OP_READ)] = { | ||
| 216 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 217 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 218 | }, | ||
| 219 | [C(OP_WRITE)] = { | ||
| 220 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 221 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 222 | }, | ||
| 223 | [C(OP_PREFETCH)] = { | ||
| 224 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 225 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 226 | }, | ||
| 227 | }, | ||
| 228 | [C(ITLB)] = { | ||
| 229 | [C(OP_READ)] = { | ||
| 230 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 231 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 232 | }, | ||
| 233 | [C(OP_WRITE)] = { | ||
| 234 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 235 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 236 | }, | ||
| 237 | [C(OP_PREFETCH)] = { | ||
| 238 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 239 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 240 | }, | ||
| 241 | }, | ||
| 242 | [C(BPU)] = { | ||
| 243 | [C(OP_READ)] = { | ||
| 244 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 245 | [C(RESULT_MISS)] | ||
| 246 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 247 | }, | ||
| 248 | [C(OP_WRITE)] = { | ||
| 249 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 250 | [C(RESULT_MISS)] | ||
| 251 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 252 | }, | ||
| 253 | [C(OP_PREFETCH)] = { | ||
| 254 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 255 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 256 | }, | ||
| 257 | }, | ||
| 258 | }; | ||
| 259 | |||
| 260 | /* | ||
| 261 | * Cortex-A9 HW events mapping | ||
| 262 | */ | ||
| 263 | static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 264 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
| 265 | [PERF_COUNT_HW_INSTRUCTIONS] = | ||
| 266 | ARMV7_PERFCTR_INST_OUT_OF_RENAME_STAGE, | ||
| 267 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_COHERENT_LINE_HIT, | ||
| 268 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_COHERENT_LINE_MISS, | ||
| 269 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
| 270 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 271 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
| 272 | }; | ||
| 273 | |||
| 274 | static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 275 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 276 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 277 | [C(L1D)] = { | ||
| 278 | /* | ||
| 279 | * The performance counters don't differentiate between read | ||
| 280 | * and write accesses/misses so this isn't strictly correct, | ||
| 281 | * but it's the best we can do. Writes and reads get | ||
| 282 | * combined. | ||
| 283 | */ | ||
| 284 | [C(OP_READ)] = { | ||
| 285 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 286 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 287 | }, | ||
| 288 | [C(OP_WRITE)] = { | ||
| 289 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_DCACHE_ACCESS, | ||
| 290 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DCACHE_REFILL, | ||
| 291 | }, | ||
| 292 | [C(OP_PREFETCH)] = { | ||
| 293 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 294 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 295 | }, | ||
| 296 | }, | ||
| 297 | [C(L1I)] = { | ||
| 298 | [C(OP_READ)] = { | ||
| 299 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 300 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
| 301 | }, | ||
| 302 | [C(OP_WRITE)] = { | ||
| 303 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 304 | [C(RESULT_MISS)] = ARMV7_PERFCTR_IFETCH_MISS, | ||
| 305 | }, | ||
| 306 | [C(OP_PREFETCH)] = { | ||
| 307 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 308 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 309 | }, | ||
| 310 | }, | ||
| 311 | [C(LL)] = { | ||
| 312 | [C(OP_READ)] = { | ||
| 313 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 314 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 315 | }, | ||
| 316 | [C(OP_WRITE)] = { | ||
| 317 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 318 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 319 | }, | ||
| 320 | [C(OP_PREFETCH)] = { | ||
| 321 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 322 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 323 | }, | ||
| 324 | }, | ||
| 325 | [C(DTLB)] = { | ||
| 326 | /* | ||
| 327 | * Only ITLB misses and DTLB refills are supported. | ||
| 328 | * If users want the DTLB refills misses a raw counter | ||
| 329 | * must be used. | ||
| 330 | */ | ||
| 331 | [C(OP_READ)] = { | ||
| 332 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 333 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 334 | }, | ||
| 335 | [C(OP_WRITE)] = { | ||
| 336 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 337 | [C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL, | ||
| 338 | }, | ||
| 339 | [C(OP_PREFETCH)] = { | ||
| 340 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 341 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 342 | }, | ||
| 343 | }, | ||
| 344 | [C(ITLB)] = { | ||
| 345 | [C(OP_READ)] = { | ||
| 346 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 347 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 348 | }, | ||
| 349 | [C(OP_WRITE)] = { | ||
| 350 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 351 | [C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_MISS, | ||
| 352 | }, | ||
| 353 | [C(OP_PREFETCH)] = { | ||
| 354 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 355 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 356 | }, | ||
| 357 | }, | ||
| 358 | [C(BPU)] = { | ||
| 359 | [C(OP_READ)] = { | ||
| 360 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 361 | [C(RESULT_MISS)] | ||
| 362 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 363 | }, | ||
| 364 | [C(OP_WRITE)] = { | ||
| 365 | [C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_WRITE, | ||
| 366 | [C(RESULT_MISS)] | ||
| 367 | = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 368 | }, | ||
| 369 | [C(OP_PREFETCH)] = { | ||
| 370 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | ||
| 371 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | ||
| 372 | }, | ||
| 373 | }, | ||
| 374 | }; | ||
| 375 | |||
| 376 | /* | ||
| 377 | * Perf Events counters | ||
| 378 | */ | ||
| 379 | enum armv7_counters { | ||
| 380 | ARMV7_CYCLE_COUNTER = 1, /* Cycle counter */ | ||
| 381 | ARMV7_COUNTER0 = 2, /* First event counter */ | ||
| 382 | }; | ||
| 383 | |||
| 384 | /* | ||
| 385 | * The cycle counter is ARMV7_CYCLE_COUNTER. | ||
| 386 | * The first event counter is ARMV7_COUNTER0. | ||
| 387 | * The last event counter is (ARMV7_COUNTER0 + armpmu->num_events - 1). | ||
| 388 | */ | ||
| 389 | #define ARMV7_COUNTER_LAST (ARMV7_COUNTER0 + armpmu->num_events - 1) | ||
| 390 | |||
| 391 | /* | ||
| 392 | * ARMv7 low level PMNC access | ||
| 393 | */ | ||
| 394 | |||
| 395 | /* | ||
| 396 | * Per-CPU PMNC: config reg | ||
| 397 | */ | ||
| 398 | #define ARMV7_PMNC_E (1 << 0) /* Enable all counters */ | ||
| 399 | #define ARMV7_PMNC_P (1 << 1) /* Reset all counters */ | ||
| 400 | #define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */ | ||
| 401 | #define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */ | ||
| 402 | #define ARMV7_PMNC_X (1 << 4) /* Export to ETM */ | ||
| 403 | #define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/ | ||
| 404 | #define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */ | ||
| 405 | #define ARMV7_PMNC_N_MASK 0x1f | ||
| 406 | #define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */ | ||
| 407 | |||
| 408 | /* | ||
| 409 | * Available counters | ||
| 410 | */ | ||
| 411 | #define ARMV7_CNT0 0 /* First event counter */ | ||
| 412 | #define ARMV7_CCNT 31 /* Cycle counter */ | ||
| 413 | |||
| 414 | /* Perf Event to low level counters mapping */ | ||
| 415 | #define ARMV7_EVENT_CNT_TO_CNTx (ARMV7_COUNTER0 - ARMV7_CNT0) | ||
| 416 | |||
| 417 | /* | ||
| 418 | * CNTENS: counters enable reg | ||
| 419 | */ | ||
| 420 | #define ARMV7_CNTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 421 | #define ARMV7_CNTENS_C (1 << ARMV7_CCNT) | ||
| 422 | |||
| 423 | /* | ||
| 424 | * CNTENC: counters disable reg | ||
| 425 | */ | ||
| 426 | #define ARMV7_CNTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 427 | #define ARMV7_CNTENC_C (1 << ARMV7_CCNT) | ||
| 428 | |||
| 429 | /* | ||
| 430 | * INTENS: counters overflow interrupt enable reg | ||
| 431 | */ | ||
| 432 | #define ARMV7_INTENS_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 433 | #define ARMV7_INTENS_C (1 << ARMV7_CCNT) | ||
| 434 | |||
| 435 | /* | ||
| 436 | * INTENC: counters overflow interrupt disable reg | ||
| 437 | */ | ||
| 438 | #define ARMV7_INTENC_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 439 | #define ARMV7_INTENC_C (1 << ARMV7_CCNT) | ||
| 440 | |||
| 441 | /* | ||
| 442 | * EVTSEL: Event selection reg | ||
| 443 | */ | ||
| 444 | #define ARMV7_EVTSEL_MASK 0xff /* Mask for writable bits */ | ||
| 445 | |||
| 446 | /* | ||
| 447 | * SELECT: Counter selection reg | ||
| 448 | */ | ||
| 449 | #define ARMV7_SELECT_MASK 0x1f /* Mask for writable bits */ | ||
| 450 | |||
| 451 | /* | ||
| 452 | * FLAG: counters overflow flag status reg | ||
| 453 | */ | ||
| 454 | #define ARMV7_FLAG_P(idx) (1 << (idx - ARMV7_EVENT_CNT_TO_CNTx)) | ||
| 455 | #define ARMV7_FLAG_C (1 << ARMV7_CCNT) | ||
| 456 | #define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */ | ||
| 457 | #define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK | ||
| 458 | |||
| 459 | static inline unsigned long armv7_pmnc_read(void) | ||
| 460 | { | ||
| 461 | u32 val; | ||
| 462 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val)); | ||
| 463 | return val; | ||
| 464 | } | ||
| 465 | |||
| 466 | static inline void armv7_pmnc_write(unsigned long val) | ||
| 467 | { | ||
| 468 | val &= ARMV7_PMNC_MASK; | ||
| 469 | asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val)); | ||
| 470 | } | ||
| 471 | |||
| 472 | static inline int armv7_pmnc_has_overflowed(unsigned long pmnc) | ||
| 473 | { | ||
| 474 | return pmnc & ARMV7_OVERFLOWED_MASK; | ||
| 475 | } | ||
| 476 | |||
| 477 | static inline int armv7_pmnc_counter_has_overflowed(unsigned long pmnc, | ||
| 478 | enum armv7_counters counter) | ||
| 479 | { | ||
| 480 | int ret = 0; | ||
| 481 | |||
| 482 | if (counter == ARMV7_CYCLE_COUNTER) | ||
| 483 | ret = pmnc & ARMV7_FLAG_C; | ||
| 484 | else if ((counter >= ARMV7_COUNTER0) && (counter <= ARMV7_COUNTER_LAST)) | ||
| 485 | ret = pmnc & ARMV7_FLAG_P(counter); | ||
| 486 | else | ||
| 487 | pr_err("CPU%u checking wrong counter %d overflow status\n", | ||
| 488 | smp_processor_id(), counter); | ||
| 489 | |||
| 490 | return ret; | ||
| 491 | } | ||
| 492 | |||
| 493 | static inline int armv7_pmnc_select_counter(unsigned int idx) | ||
| 494 | { | ||
| 495 | u32 val; | ||
| 496 | |||
| 497 | if ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST)) { | ||
| 498 | pr_err("CPU%u selecting wrong PMNC counter" | ||
| 499 | " %d\n", smp_processor_id(), idx); | ||
| 500 | return -1; | ||
| 501 | } | ||
| 502 | |||
| 503 | val = (idx - ARMV7_EVENT_CNT_TO_CNTx) & ARMV7_SELECT_MASK; | ||
| 504 | asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (val)); | ||
| 505 | |||
| 506 | return idx; | ||
| 507 | } | ||
| 508 | |||
| 509 | static inline u32 armv7pmu_read_counter(int idx) | ||
| 510 | { | ||
| 511 | unsigned long value = 0; | ||
| 512 | |||
| 513 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 514 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value)); | ||
| 515 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
| 516 | if (armv7_pmnc_select_counter(idx) == idx) | ||
| 517 | asm volatile("mrc p15, 0, %0, c9, c13, 2" | ||
| 518 | : "=r" (value)); | ||
| 519 | } else | ||
| 520 | pr_err("CPU%u reading wrong counter %d\n", | ||
| 521 | smp_processor_id(), idx); | ||
| 522 | |||
| 523 | return value; | ||
| 524 | } | ||
| 525 | |||
| 526 | static inline void armv7pmu_write_counter(int idx, u32 value) | ||
| 527 | { | ||
| 528 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 529 | asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value)); | ||
| 530 | else if ((idx >= ARMV7_COUNTER0) && (idx <= ARMV7_COUNTER_LAST)) { | ||
| 531 | if (armv7_pmnc_select_counter(idx) == idx) | ||
| 532 | asm volatile("mcr p15, 0, %0, c9, c13, 2" | ||
| 533 | : : "r" (value)); | ||
| 534 | } else | ||
| 535 | pr_err("CPU%u writing wrong counter %d\n", | ||
| 536 | smp_processor_id(), idx); | ||
| 537 | } | ||
| 538 | |||
| 539 | static inline void armv7_pmnc_write_evtsel(unsigned int idx, u32 val) | ||
| 540 | { | ||
| 541 | if (armv7_pmnc_select_counter(idx) == idx) { | ||
| 542 | val &= ARMV7_EVTSEL_MASK; | ||
| 543 | asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val)); | ||
| 544 | } | ||
| 545 | } | ||
| 546 | |||
| 547 | static inline u32 armv7_pmnc_enable_counter(unsigned int idx) | ||
| 548 | { | ||
| 549 | u32 val; | ||
| 550 | |||
| 551 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 552 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 553 | pr_err("CPU%u enabling wrong PMNC counter" | ||
| 554 | " %d\n", smp_processor_id(), idx); | ||
| 555 | return -1; | ||
| 556 | } | ||
| 557 | |||
| 558 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 559 | val = ARMV7_CNTENS_C; | ||
| 560 | else | ||
| 561 | val = ARMV7_CNTENS_P(idx); | ||
| 562 | |||
| 563 | asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (val)); | ||
| 564 | |||
| 565 | return idx; | ||
| 566 | } | ||
| 567 | |||
| 568 | static inline u32 armv7_pmnc_disable_counter(unsigned int idx) | ||
| 569 | { | ||
| 570 | u32 val; | ||
| 571 | |||
| 572 | |||
| 573 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 574 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 575 | pr_err("CPU%u disabling wrong PMNC counter" | ||
| 576 | " %d\n", smp_processor_id(), idx); | ||
| 577 | return -1; | ||
| 578 | } | ||
| 579 | |||
| 580 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 581 | val = ARMV7_CNTENC_C; | ||
| 582 | else | ||
| 583 | val = ARMV7_CNTENC_P(idx); | ||
| 584 | |||
| 585 | asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (val)); | ||
| 586 | |||
| 587 | return idx; | ||
| 588 | } | ||
| 589 | |||
| 590 | static inline u32 armv7_pmnc_enable_intens(unsigned int idx) | ||
| 591 | { | ||
| 592 | u32 val; | ||
| 593 | |||
| 594 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 595 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 596 | pr_err("CPU%u enabling wrong PMNC counter" | ||
| 597 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
| 598 | return -1; | ||
| 599 | } | ||
| 600 | |||
| 601 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 602 | val = ARMV7_INTENS_C; | ||
| 603 | else | ||
| 604 | val = ARMV7_INTENS_P(idx); | ||
| 605 | |||
| 606 | asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (val)); | ||
| 607 | |||
| 608 | return idx; | ||
| 609 | } | ||
| 610 | |||
| 611 | static inline u32 armv7_pmnc_disable_intens(unsigned int idx) | ||
| 612 | { | ||
| 613 | u32 val; | ||
| 614 | |||
| 615 | if ((idx != ARMV7_CYCLE_COUNTER) && | ||
| 616 | ((idx < ARMV7_COUNTER0) || (idx > ARMV7_COUNTER_LAST))) { | ||
| 617 | pr_err("CPU%u disabling wrong PMNC counter" | ||
| 618 | " interrupt enable %d\n", smp_processor_id(), idx); | ||
| 619 | return -1; | ||
| 620 | } | ||
| 621 | |||
| 622 | if (idx == ARMV7_CYCLE_COUNTER) | ||
| 623 | val = ARMV7_INTENC_C; | ||
| 624 | else | ||
| 625 | val = ARMV7_INTENC_P(idx); | ||
| 626 | |||
| 627 | asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (val)); | ||
| 628 | |||
| 629 | return idx; | ||
| 630 | } | ||
| 631 | |||
| 632 | static inline u32 armv7_pmnc_getreset_flags(void) | ||
| 633 | { | ||
| 634 | u32 val; | ||
| 635 | |||
| 636 | /* Read */ | ||
| 637 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
| 638 | |||
| 639 | /* Write to clear flags */ | ||
| 640 | val &= ARMV7_FLAG_MASK; | ||
| 641 | asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val)); | ||
| 642 | |||
| 643 | return val; | ||
| 644 | } | ||
| 645 | |||
| 646 | #ifdef DEBUG | ||
| 647 | static void armv7_pmnc_dump_regs(void) | ||
| 648 | { | ||
| 649 | u32 val; | ||
| 650 | unsigned int cnt; | ||
| 651 | |||
| 652 | printk(KERN_INFO "PMNC registers dump:\n"); | ||
| 653 | |||
| 654 | asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val)); | ||
| 655 | printk(KERN_INFO "PMNC =0x%08x\n", val); | ||
| 656 | |||
| 657 | asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val)); | ||
| 658 | printk(KERN_INFO "CNTENS=0x%08x\n", val); | ||
| 659 | |||
| 660 | asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val)); | ||
| 661 | printk(KERN_INFO "INTENS=0x%08x\n", val); | ||
| 662 | |||
| 663 | asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val)); | ||
| 664 | printk(KERN_INFO "FLAGS =0x%08x\n", val); | ||
| 665 | |||
| 666 | asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val)); | ||
| 667 | printk(KERN_INFO "SELECT=0x%08x\n", val); | ||
| 668 | |||
| 669 | asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val)); | ||
| 670 | printk(KERN_INFO "CCNT =0x%08x\n", val); | ||
| 671 | |||
| 672 | for (cnt = ARMV7_COUNTER0; cnt < ARMV7_COUNTER_LAST; cnt++) { | ||
| 673 | armv7_pmnc_select_counter(cnt); | ||
| 674 | asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val)); | ||
| 675 | printk(KERN_INFO "CNT[%d] count =0x%08x\n", | ||
| 676 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
| 677 | asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val)); | ||
| 678 | printk(KERN_INFO "CNT[%d] evtsel=0x%08x\n", | ||
| 679 | cnt-ARMV7_EVENT_CNT_TO_CNTx, val); | ||
| 680 | } | ||
| 681 | } | ||
| 682 | #endif | ||
| 683 | |||
| 684 | static void armv7pmu_enable_event(struct hw_perf_event *hwc, int idx) | ||
| 685 | { | ||
| 686 | unsigned long flags; | ||
| 687 | |||
| 688 | /* | ||
| 689 | * Enable counter and interrupt, and set the counter to count | ||
| 690 | * the event that we're interested in. | ||
| 691 | */ | ||
| 692 | raw_spin_lock_irqsave(&pmu_lock, flags); | ||
| 693 | |||
| 694 | /* | ||
| 695 | * Disable counter | ||
| 696 | */ | ||
| 697 | armv7_pmnc_disable_counter(idx); | ||
| 698 | |||
| 699 | /* | ||
| 700 | * Set event (if destined for PMNx counters) | ||
| 701 | * We don't need to set the event if it's a cycle count | ||
| 702 | */ | ||
| 703 | if (idx != ARMV7_CYCLE_COUNTER) | ||
| 704 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
| 705 | |||
| 706 | /* | ||
| 707 | * Enable interrupt for this counter | ||
| 708 | */ | ||
| 709 | armv7_pmnc_enable_intens(idx); | ||
| 710 | |||
| 711 | /* | ||
| 712 | * Enable counter | ||
| 713 | */ | ||
| 714 | armv7_pmnc_enable_counter(idx); | ||
| 715 | |||
| 716 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 717 | } | ||
| 718 | |||
| 719 | static void armv7pmu_disable_event(struct hw_perf_event *hwc, int idx) | ||
| 720 | { | ||
| 721 | unsigned long flags; | ||
| 722 | |||
| 723 | /* | ||
| 724 | * Disable counter and interrupt | ||
| 725 | */ | ||
| 726 | raw_spin_lock_irqsave(&pmu_lock, flags); | ||
| 727 | |||
| 728 | /* | ||
| 729 | * Disable counter | ||
| 730 | */ | ||
| 731 | armv7_pmnc_disable_counter(idx); | ||
| 732 | |||
| 733 | /* | ||
| 734 | * Disable interrupt for this counter | ||
| 735 | */ | ||
| 736 | armv7_pmnc_disable_intens(idx); | ||
| 737 | |||
| 738 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 739 | } | ||
| 740 | |||
| 741 | static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev) | ||
| 742 | { | ||
| 743 | unsigned long pmnc; | ||
| 744 | struct perf_sample_data data; | ||
| 745 | struct cpu_hw_events *cpuc; | ||
| 746 | struct pt_regs *regs; | ||
| 747 | int idx; | ||
| 748 | |||
| 749 | /* | ||
| 750 | * Get and reset the IRQ flags | ||
| 751 | */ | ||
| 752 | pmnc = armv7_pmnc_getreset_flags(); | ||
| 753 | |||
| 754 | /* | ||
| 755 | * Did an overflow occur? | ||
| 756 | */ | ||
| 757 | if (!armv7_pmnc_has_overflowed(pmnc)) | ||
| 758 | return IRQ_NONE; | ||
| 759 | |||
| 760 | /* | ||
| 761 | * Handle the counter(s) overflow(s) | ||
| 762 | */ | ||
| 763 | regs = get_irq_regs(); | ||
| 764 | |||
| 765 | perf_sample_data_init(&data, 0); | ||
| 766 | |||
| 767 | cpuc = &__get_cpu_var(cpu_hw_events); | ||
| 768 | for (idx = 0; idx <= armpmu->num_events; ++idx) { | ||
| 769 | struct perf_event *event = cpuc->events[idx]; | ||
| 770 | struct hw_perf_event *hwc; | ||
| 771 | |||
| 772 | if (!test_bit(idx, cpuc->active_mask)) | ||
| 773 | continue; | ||
| 774 | |||
| 775 | /* | ||
| 776 | * We have a single interrupt for all counters. Check that | ||
| 777 | * each counter has overflowed before we process it. | ||
| 778 | */ | ||
| 779 | if (!armv7_pmnc_counter_has_overflowed(pmnc, idx)) | ||
| 780 | continue; | ||
| 781 | |||
| 782 | hwc = &event->hw; | ||
| 783 | armpmu_event_update(event, hwc, idx); | ||
| 784 | data.period = event->hw.last_period; | ||
| 785 | if (!armpmu_event_set_period(event, hwc, idx)) | ||
| 786 | continue; | ||
| 787 | |||
| 788 | if (perf_event_overflow(event, 0, &data, regs)) | ||
| 789 | armpmu->disable(hwc, idx); | ||
| 790 | } | ||
| 791 | |||
| 792 | /* | ||
| 793 | * Handle the pending perf events. | ||
| 794 | * | ||
| 795 | * Note: this call *must* be run with interrupts disabled. For | ||
| 796 | * platforms that can have the PMU interrupts raised as an NMI, this | ||
| 797 | * will not work. | ||
| 798 | */ | ||
| 799 | irq_work_run(); | ||
| 800 | |||
| 801 | return IRQ_HANDLED; | ||
| 802 | } | ||
| 803 | |||
| 804 | static void armv7pmu_start(void) | ||
| 805 | { | ||
| 806 | unsigned long flags; | ||
| 807 | |||
| 808 | raw_spin_lock_irqsave(&pmu_lock, flags); | ||
| 809 | /* Enable all counters */ | ||
| 810 | armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E); | ||
| 811 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 812 | } | ||
| 813 | |||
| 814 | static void armv7pmu_stop(void) | ||
| 815 | { | ||
| 816 | unsigned long flags; | ||
| 817 | |||
| 818 | raw_spin_lock_irqsave(&pmu_lock, flags); | ||
| 819 | /* Disable all counters */ | ||
| 820 | armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E); | ||
| 821 | raw_spin_unlock_irqrestore(&pmu_lock, flags); | ||
| 822 | } | ||
| 823 | |||
| 824 | static int armv7pmu_get_event_idx(struct cpu_hw_events *cpuc, | ||
| 825 | struct hw_perf_event *event) | ||
| 826 | { | ||
| 827 | int idx; | ||
| 828 | |||
| 829 | /* Always place a cycle counter into the cycle counter. */ | ||
| 830 | if (event->config_base == ARMV7_PERFCTR_CPU_CYCLES) { | ||
| 831 | if (test_and_set_bit(ARMV7_CYCLE_COUNTER, cpuc->used_mask)) | ||
| 832 | return -EAGAIN; | ||
| 833 | |||
| 834 | return ARMV7_CYCLE_COUNTER; | ||
| 835 | } else { | ||
| 836 | /* | ||
| 837 | * For anything other than a cycle counter, try and use | ||
| 838 | * the events counters | ||
| 839 | */ | ||
| 840 | for (idx = ARMV7_COUNTER0; idx <= armpmu->num_events; ++idx) { | ||
| 841 | if (!test_and_set_bit(idx, cpuc->used_mask)) | ||
| 842 | return idx; | ||
| 843 | } | ||
| 844 | |||
| 845 | /* The counters are all in use. */ | ||
| 846 | return -EAGAIN; | ||
| 847 | } | ||
| 848 | } | ||
| 849 | |||
| 850 | static struct arm_pmu armv7pmu = { | ||
| 851 | .handle_irq = armv7pmu_handle_irq, | ||
| 852 | .enable = armv7pmu_enable_event, | ||
| 853 | .disable = armv7pmu_disable_event, | ||
| 854 | .read_counter = armv7pmu_read_counter, | ||
| 855 | .write_counter = armv7pmu_write_counter, | ||
| 856 | .get_event_idx = armv7pmu_get_event_idx, | ||
| 857 | .start = armv7pmu_start, | ||
| 858 | .stop = armv7pmu_stop, | ||
| 859 | .raw_event_mask = 0xFF, | ||
| 860 | .max_period = (1LLU << 32) - 1, | ||
| 861 | }; | ||
| 862 | |||
| 863 | static u32 __init armv7_reset_read_pmnc(void) | ||
| 864 | { | ||
| 865 | u32 nb_cnt; | ||
| 866 | |||
| 867 | /* Initialize & Reset PMNC: C and P bits */ | ||
| 868 | armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C); | ||
| 869 | |||
| 870 | /* Read the nb of CNTx counters supported from PMNC */ | ||
| 871 | nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK; | ||
| 872 | |||
| 873 | /* Add the CPU cycles counter and return */ | ||
| 874 | return nb_cnt + 1; | ||
| 875 | } | ||
| 876 | |||
| 877 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | ||
| 878 | { | ||
| 879 | armv7pmu.id = ARM_PERF_PMU_ID_CA8; | ||
| 880 | armv7pmu.name = "ARMv7 Cortex-A8"; | ||
| 881 | armv7pmu.cache_map = &armv7_a8_perf_cache_map; | ||
| 882 | armv7pmu.event_map = &armv7_a8_perf_map; | ||
| 883 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
| 884 | return &armv7pmu; | ||
| 885 | } | ||
| 886 | |||
| 887 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | ||
| 888 | { | ||
| 889 | armv7pmu.id = ARM_PERF_PMU_ID_CA9; | ||
| 890 | armv7pmu.name = "ARMv7 Cortex-A9"; | ||
| 891 | armv7pmu.cache_map = &armv7_a9_perf_cache_map; | ||
| 892 | armv7pmu.event_map = &armv7_a9_perf_map; | ||
| 893 | armv7pmu.num_events = armv7_reset_read_pmnc(); | ||
| 894 | return &armv7pmu; | ||
| 895 | } | ||
| 896 | #else | ||
| 897 | static const struct arm_pmu *__init armv7_a8_pmu_init(void) | ||
| 898 | { | ||
| 899 | return NULL; | ||
| 900 | } | ||
| 901 | |||
| 902 | static const struct arm_pmu *__init armv7_a9_pmu_init(void) | ||
| 903 | { | ||
| 904 | return NULL; | ||
| 905 | } | ||
| 906 | #endif /* CONFIG_CPU_V7 */ | ||
