diff options
| author | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-04-14 17:28:32 -0400 |
|---|---|---|
| committer | Russell King <rmk+kernel@arm.linux.org.uk> | 2015-04-14 17:28:32 -0400 |
| commit | 4b2f8838479eb2abe042e094f7d2cced6d5ea772 (patch) | |
| tree | 5ef3236b354a494c8d71a572896283e44989c696 /arch/arm/kernel | |
| parent | c848791f0336914a3081ea3fe029cf177d81de81 (diff) | |
| parent | 9fd85eb502a78bd812db58bd1f668b2a06ee30a5 (diff) | |
Merge branch 'devel-stable' into for-next
Diffstat (limited to 'arch/arm/kernel')
| -rw-r--r-- | arch/arm/kernel/perf_event.c | 21 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_cpu.c | 71 | ||||
| -rw-r--r-- | arch/arm/kernel/perf_event_v7.c | 525 |
3 files changed, 545 insertions, 72 deletions
diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 557e128e4df0..4a86a0133ac3 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c | |||
| @@ -259,20 +259,29 @@ out: | |||
| 259 | } | 259 | } |
| 260 | 260 | ||
| 261 | static int | 261 | static int |
| 262 | validate_event(struct pmu_hw_events *hw_events, | 262 | validate_event(struct pmu *pmu, struct pmu_hw_events *hw_events, |
| 263 | struct perf_event *event) | 263 | struct perf_event *event) |
| 264 | { | 264 | { |
| 265 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | 265 | struct arm_pmu *armpmu; |
| 266 | 266 | ||
| 267 | if (is_software_event(event)) | 267 | if (is_software_event(event)) |
| 268 | return 1; | 268 | return 1; |
| 269 | 269 | ||
| 270 | /* | ||
| 271 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | ||
| 272 | * core perf code won't check that the pmu->ctx == leader->ctx | ||
| 273 | * until after pmu->event_init(event). | ||
| 274 | */ | ||
| 275 | if (event->pmu != pmu) | ||
| 276 | return 0; | ||
| 277 | |||
| 270 | if (event->state < PERF_EVENT_STATE_OFF) | 278 | if (event->state < PERF_EVENT_STATE_OFF) |
| 271 | return 1; | 279 | return 1; |
| 272 | 280 | ||
| 273 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | 281 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) |
| 274 | return 1; | 282 | return 1; |
| 275 | 283 | ||
| 284 | armpmu = to_arm_pmu(event->pmu); | ||
| 276 | return armpmu->get_event_idx(hw_events, event) >= 0; | 285 | return armpmu->get_event_idx(hw_events, event) >= 0; |
| 277 | } | 286 | } |
| 278 | 287 | ||
| @@ -288,15 +297,15 @@ validate_group(struct perf_event *event) | |||
| 288 | */ | 297 | */ |
| 289 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); | 298 | memset(&fake_pmu.used_mask, 0, sizeof(fake_pmu.used_mask)); |
| 290 | 299 | ||
| 291 | if (!validate_event(&fake_pmu, leader)) | 300 | if (!validate_event(event->pmu, &fake_pmu, leader)) |
| 292 | return -EINVAL; | 301 | return -EINVAL; |
| 293 | 302 | ||
| 294 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { | 303 | list_for_each_entry(sibling, &leader->sibling_list, group_entry) { |
| 295 | if (!validate_event(&fake_pmu, sibling)) | 304 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
| 296 | return -EINVAL; | 305 | return -EINVAL; |
| 297 | } | 306 | } |
| 298 | 307 | ||
| 299 | if (!validate_event(&fake_pmu, event)) | 308 | if (!validate_event(event->pmu, &fake_pmu, event)) |
| 300 | return -EINVAL; | 309 | return -EINVAL; |
| 301 | 310 | ||
| 302 | return 0; | 311 | return 0; |
diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c index 61b53c46edfa..91c7ba182dcd 100644 --- a/arch/arm/kernel/perf_event_cpu.c +++ b/arch/arm/kernel/perf_event_cpu.c | |||
| @@ -92,11 +92,16 @@ static void cpu_pmu_free_irq(struct arm_pmu *cpu_pmu) | |||
| 92 | free_percpu_irq(irq, &hw_events->percpu_pmu); | 92 | free_percpu_irq(irq, &hw_events->percpu_pmu); |
| 93 | } else { | 93 | } else { |
| 94 | for (i = 0; i < irqs; ++i) { | 94 | for (i = 0; i < irqs; ++i) { |
| 95 | if (!cpumask_test_and_clear_cpu(i, &cpu_pmu->active_irqs)) | 95 | int cpu = i; |
| 96 | |||
| 97 | if (cpu_pmu->irq_affinity) | ||
| 98 | cpu = cpu_pmu->irq_affinity[i]; | ||
| 99 | |||
| 100 | if (!cpumask_test_and_clear_cpu(cpu, &cpu_pmu->active_irqs)) | ||
| 96 | continue; | 101 | continue; |
| 97 | irq = platform_get_irq(pmu_device, i); | 102 | irq = platform_get_irq(pmu_device, i); |
| 98 | if (irq >= 0) | 103 | if (irq >= 0) |
| 99 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, i)); | 104 | free_irq(irq, per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
| 100 | } | 105 | } |
| 101 | } | 106 | } |
| 102 | } | 107 | } |
| @@ -128,32 +133,37 @@ static int cpu_pmu_request_irq(struct arm_pmu *cpu_pmu, irq_handler_t handler) | |||
| 128 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); | 133 | on_each_cpu(cpu_pmu_enable_percpu_irq, &irq, 1); |
| 129 | } else { | 134 | } else { |
| 130 | for (i = 0; i < irqs; ++i) { | 135 | for (i = 0; i < irqs; ++i) { |
| 136 | int cpu = i; | ||
| 137 | |||
| 131 | err = 0; | 138 | err = 0; |
| 132 | irq = platform_get_irq(pmu_device, i); | 139 | irq = platform_get_irq(pmu_device, i); |
| 133 | if (irq < 0) | 140 | if (irq < 0) |
| 134 | continue; | 141 | continue; |
| 135 | 142 | ||
| 143 | if (cpu_pmu->irq_affinity) | ||
| 144 | cpu = cpu_pmu->irq_affinity[i]; | ||
| 145 | |||
| 136 | /* | 146 | /* |
| 137 | * If we have a single PMU interrupt that we can't shift, | 147 | * If we have a single PMU interrupt that we can't shift, |
| 138 | * assume that we're running on a uniprocessor machine and | 148 | * assume that we're running on a uniprocessor machine and |
| 139 | * continue. Otherwise, continue without this interrupt. | 149 | * continue. Otherwise, continue without this interrupt. |
| 140 | */ | 150 | */ |
| 141 | if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) { | 151 | if (irq_set_affinity(irq, cpumask_of(cpu)) && irqs > 1) { |
| 142 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", | 152 | pr_warn("unable to set irq affinity (irq=%d, cpu=%u)\n", |
| 143 | irq, i); | 153 | irq, cpu); |
| 144 | continue; | 154 | continue; |
| 145 | } | 155 | } |
| 146 | 156 | ||
| 147 | err = request_irq(irq, handler, | 157 | err = request_irq(irq, handler, |
| 148 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", | 158 | IRQF_NOBALANCING | IRQF_NO_THREAD, "arm-pmu", |
| 149 | per_cpu_ptr(&hw_events->percpu_pmu, i)); | 159 | per_cpu_ptr(&hw_events->percpu_pmu, cpu)); |
| 150 | if (err) { | 160 | if (err) { |
| 151 | pr_err("unable to request IRQ%d for ARM PMU counters\n", | 161 | pr_err("unable to request IRQ%d for ARM PMU counters\n", |
| 152 | irq); | 162 | irq); |
| 153 | return err; | 163 | return err; |
| 154 | } | 164 | } |
| 155 | 165 | ||
| 156 | cpumask_set_cpu(i, &cpu_pmu->active_irqs); | 166 | cpumask_set_cpu(cpu, &cpu_pmu->active_irqs); |
| 157 | } | 167 | } |
| 158 | } | 168 | } |
| 159 | 169 | ||
| @@ -243,6 +253,8 @@ static const struct of_device_id cpu_pmu_of_device_ids[] = { | |||
| 243 | {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, | 253 | {.compatible = "arm,arm1176-pmu", .data = armv6_1176_pmu_init}, |
| 244 | {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, | 254 | {.compatible = "arm,arm1136-pmu", .data = armv6_1136_pmu_init}, |
| 245 | {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, | 255 | {.compatible = "qcom,krait-pmu", .data = krait_pmu_init}, |
| 256 | {.compatible = "qcom,scorpion-pmu", .data = scorpion_pmu_init}, | ||
| 257 | {.compatible = "qcom,scorpion-mp-pmu", .data = scorpion_mp_pmu_init}, | ||
| 246 | {}, | 258 | {}, |
| 247 | }; | 259 | }; |
| 248 | 260 | ||
| @@ -289,6 +301,48 @@ static int probe_current_pmu(struct arm_pmu *pmu) | |||
| 289 | return ret; | 301 | return ret; |
| 290 | } | 302 | } |
| 291 | 303 | ||
| 304 | static int of_pmu_irq_cfg(struct platform_device *pdev) | ||
| 305 | { | ||
| 306 | int i; | ||
| 307 | int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL); | ||
| 308 | |||
| 309 | if (!irqs) | ||
| 310 | return -ENOMEM; | ||
| 311 | |||
| 312 | for (i = 0; i < pdev->num_resources; ++i) { | ||
| 313 | struct device_node *dn; | ||
| 314 | int cpu; | ||
| 315 | |||
| 316 | dn = of_parse_phandle(pdev->dev.of_node, "interrupt-affinity", | ||
| 317 | i); | ||
| 318 | if (!dn) { | ||
| 319 | pr_warn("Failed to parse %s/interrupt-affinity[%d]\n", | ||
| 320 | of_node_full_name(dn), i); | ||
| 321 | break; | ||
| 322 | } | ||
| 323 | |||
| 324 | for_each_possible_cpu(cpu) | ||
| 325 | if (arch_find_n_match_cpu_physical_id(dn, cpu, NULL)) | ||
| 326 | break; | ||
| 327 | |||
| 328 | of_node_put(dn); | ||
| 329 | if (cpu >= nr_cpu_ids) { | ||
| 330 | pr_warn("Failed to find logical CPU for %s\n", | ||
| 331 | dn->name); | ||
| 332 | break; | ||
| 333 | } | ||
| 334 | |||
| 335 | irqs[i] = cpu; | ||
| 336 | } | ||
| 337 | |||
| 338 | if (i == pdev->num_resources) | ||
| 339 | cpu_pmu->irq_affinity = irqs; | ||
| 340 | else | ||
| 341 | kfree(irqs); | ||
| 342 | |||
| 343 | return 0; | ||
| 344 | } | ||
| 345 | |||
| 292 | static int cpu_pmu_device_probe(struct platform_device *pdev) | 346 | static int cpu_pmu_device_probe(struct platform_device *pdev) |
| 293 | { | 347 | { |
| 294 | const struct of_device_id *of_id; | 348 | const struct of_device_id *of_id; |
| @@ -313,7 +367,10 @@ static int cpu_pmu_device_probe(struct platform_device *pdev) | |||
| 313 | 367 | ||
| 314 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { | 368 | if (node && (of_id = of_match_node(cpu_pmu_of_device_ids, pdev->dev.of_node))) { |
| 315 | init_fn = of_id->data; | 369 | init_fn = of_id->data; |
| 316 | ret = init_fn(pmu); | 370 | |
| 371 | ret = of_pmu_irq_cfg(pdev); | ||
| 372 | if (!ret) | ||
| 373 | ret = init_fn(pmu); | ||
| 317 | } else { | 374 | } else { |
| 318 | ret = probe_current_pmu(pmu); | 375 | ret = probe_current_pmu(pmu); |
| 319 | } | 376 | } |
diff --git a/arch/arm/kernel/perf_event_v7.c b/arch/arm/kernel/perf_event_v7.c index 8993770c47de..f4207a4dcb01 100644 --- a/arch/arm/kernel/perf_event_v7.c +++ b/arch/arm/kernel/perf_event_v7.c | |||
| @@ -140,6 +140,23 @@ enum krait_perf_types { | |||
| 140 | KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, | 140 | KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210, |
| 141 | }; | 141 | }; |
| 142 | 142 | ||
| 143 | /* ARMv7 Scorpion specific event types */ | ||
| 144 | enum scorpion_perf_types { | ||
| 145 | SCORPION_LPM0_GROUP0 = 0x4c, | ||
| 146 | SCORPION_LPM1_GROUP0 = 0x50, | ||
| 147 | SCORPION_LPM2_GROUP0 = 0x54, | ||
| 148 | SCORPION_L2LPM_GROUP0 = 0x58, | ||
| 149 | SCORPION_VLPM_GROUP0 = 0x5c, | ||
| 150 | |||
| 151 | SCORPION_ICACHE_ACCESS = 0x10053, | ||
| 152 | SCORPION_ICACHE_MISS = 0x10052, | ||
| 153 | |||
| 154 | SCORPION_DTLB_ACCESS = 0x12013, | ||
| 155 | SCORPION_DTLB_MISS = 0x12012, | ||
| 156 | |||
| 157 | SCORPION_ITLB_MISS = 0x12021, | ||
| 158 | }; | ||
| 159 | |||
| 143 | /* | 160 | /* |
| 144 | * Cortex-A8 HW events mapping | 161 | * Cortex-A8 HW events mapping |
| 145 | * | 162 | * |
| @@ -482,6 +499,49 @@ static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |||
| 482 | }; | 499 | }; |
| 483 | 500 | ||
| 484 | /* | 501 | /* |
| 502 | * Scorpion HW events mapping | ||
| 503 | */ | ||
| 504 | static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = { | ||
| 505 | PERF_MAP_ALL_UNSUPPORTED, | ||
| 506 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES, | ||
| 507 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED, | ||
| 508 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE, | ||
| 509 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 510 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES, | ||
| 511 | }; | ||
| 512 | |||
| 513 | static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | ||
| 514 | [PERF_COUNT_HW_CACHE_OP_MAX] | ||
| 515 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | ||
| 516 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | ||
| 517 | /* | ||
| 518 | * The performance counters don't differentiate between read and write | ||
| 519 | * accesses/misses so this isn't strictly correct, but it's the best we | ||
| 520 | * can do. Writes and reads get combined. | ||
| 521 | */ | ||
| 522 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | ||
| 523 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | ||
| 524 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS, | ||
| 525 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL, | ||
| 526 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS, | ||
| 527 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS, | ||
| 528 | /* | ||
| 529 | * Only ITLB misses and DTLB refills are supported. If users want the | ||
| 530 | * DTLB refills misses a raw counter must be used. | ||
| 531 | */ | ||
| 532 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, | ||
| 533 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, | ||
| 534 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS, | ||
| 535 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS, | ||
| 536 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, | ||
| 537 | [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS, | ||
| 538 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
| 539 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 540 | [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED, | ||
| 541 | [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED, | ||
| 542 | }; | ||
| 543 | |||
| 544 | /* | ||
| 485 | * Perf Events' indices | 545 | * Perf Events' indices |
| 486 | */ | 546 | */ |
| 487 | #define ARMV7_IDX_CYCLE_COUNTER 0 | 547 | #define ARMV7_IDX_CYCLE_COUNTER 0 |
| @@ -976,6 +1036,12 @@ static int krait_map_event_no_branch(struct perf_event *event) | |||
| 976 | &krait_perf_cache_map, 0xFFFFF); | 1036 | &krait_perf_cache_map, 0xFFFFF); |
| 977 | } | 1037 | } |
| 978 | 1038 | ||
| 1039 | static int scorpion_map_event(struct perf_event *event) | ||
| 1040 | { | ||
| 1041 | return armpmu_map_event(event, &scorpion_perf_map, | ||
| 1042 | &scorpion_perf_cache_map, 0xFFFFF); | ||
| 1043 | } | ||
| 1044 | |||
| 979 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) | 1045 | static void armv7pmu_init(struct arm_pmu *cpu_pmu) |
| 980 | { | 1046 | { |
| 981 | cpu_pmu->handle_irq = armv7pmu_handle_irq; | 1047 | cpu_pmu->handle_irq = armv7pmu_handle_irq; |
| @@ -1103,6 +1169,12 @@ static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu) | |||
| 1103 | #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) | 1169 | #define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT) |
| 1104 | #define PMRESRn_EN BIT(31) | 1170 | #define PMRESRn_EN BIT(31) |
| 1105 | 1171 | ||
| 1172 | #define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */ | ||
| 1173 | #define EVENT_GROUP(event) ((event) & 0xf) /* G */ | ||
| 1174 | #define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */ | ||
| 1175 | #define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */ | ||
| 1176 | #define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */ | ||
| 1177 | |||
| 1106 | static u32 krait_read_pmresrn(int n) | 1178 | static u32 krait_read_pmresrn(int n) |
| 1107 | { | 1179 | { |
| 1108 | u32 val; | 1180 | u32 val; |
| @@ -1141,19 +1213,19 @@ static void krait_write_pmresrn(int n, u32 val) | |||
| 1141 | } | 1213 | } |
| 1142 | } | 1214 | } |
| 1143 | 1215 | ||
| 1144 | static u32 krait_read_vpmresr0(void) | 1216 | static u32 venum_read_pmresr(void) |
| 1145 | { | 1217 | { |
| 1146 | u32 val; | 1218 | u32 val; |
| 1147 | asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); | 1219 | asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val)); |
| 1148 | return val; | 1220 | return val; |
| 1149 | } | 1221 | } |
| 1150 | 1222 | ||
| 1151 | static void krait_write_vpmresr0(u32 val) | 1223 | static void venum_write_pmresr(u32 val) |
| 1152 | { | 1224 | { |
| 1153 | asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); | 1225 | asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val)); |
| 1154 | } | 1226 | } |
| 1155 | 1227 | ||
| 1156 | static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) | 1228 | static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val) |
| 1157 | { | 1229 | { |
| 1158 | u32 venum_new_val; | 1230 | u32 venum_new_val; |
| 1159 | u32 fp_new_val; | 1231 | u32 fp_new_val; |
| @@ -1170,7 +1242,7 @@ static void krait_pre_vpmresr0(u32 *venum_orig_val, u32 *fp_orig_val) | |||
| 1170 | fmxr(FPEXC, fp_new_val); | 1242 | fmxr(FPEXC, fp_new_val); |
| 1171 | } | 1243 | } |
| 1172 | 1244 | ||
| 1173 | static void krait_post_vpmresr0(u32 venum_orig_val, u32 fp_orig_val) | 1245 | static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val) |
| 1174 | { | 1246 | { |
| 1175 | BUG_ON(preemptible()); | 1247 | BUG_ON(preemptible()); |
| 1176 | /* Restore FPEXC */ | 1248 | /* Restore FPEXC */ |
| @@ -1193,16 +1265,11 @@ static void krait_evt_setup(int idx, u32 config_base) | |||
| 1193 | u32 val; | 1265 | u32 val; |
| 1194 | u32 mask; | 1266 | u32 mask; |
| 1195 | u32 vval, fval; | 1267 | u32 vval, fval; |
| 1196 | unsigned int region; | 1268 | unsigned int region = EVENT_REGION(config_base); |
| 1197 | unsigned int group; | 1269 | unsigned int group = EVENT_GROUP(config_base); |
| 1198 | unsigned int code; | 1270 | unsigned int code = EVENT_CODE(config_base); |
| 1199 | unsigned int group_shift; | 1271 | unsigned int group_shift; |
| 1200 | bool venum_event; | 1272 | bool venum_event = EVENT_VENUM(config_base); |
| 1201 | |||
| 1202 | venum_event = !!(config_base & VENUM_EVENT); | ||
| 1203 | region = (config_base >> 12) & 0xf; | ||
| 1204 | code = (config_base >> 4) & 0xff; | ||
| 1205 | group = (config_base >> 0) & 0xf; | ||
| 1206 | 1273 | ||
| 1207 | group_shift = group * 8; | 1274 | group_shift = group * 8; |
| 1208 | mask = 0xff << group_shift; | 1275 | mask = 0xff << group_shift; |
| @@ -1217,16 +1284,14 @@ static void krait_evt_setup(int idx, u32 config_base) | |||
| 1217 | val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); | 1284 | val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); |
| 1218 | armv7_pmnc_write_evtsel(idx, val); | 1285 | armv7_pmnc_write_evtsel(idx, val); |
| 1219 | 1286 | ||
| 1220 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | ||
| 1221 | |||
| 1222 | if (venum_event) { | 1287 | if (venum_event) { |
| 1223 | krait_pre_vpmresr0(&vval, &fval); | 1288 | venum_pre_pmresr(&vval, &fval); |
| 1224 | val = krait_read_vpmresr0(); | 1289 | val = venum_read_pmresr(); |
| 1225 | val &= ~mask; | 1290 | val &= ~mask; |
| 1226 | val |= code << group_shift; | 1291 | val |= code << group_shift; |
| 1227 | val |= PMRESRn_EN; | 1292 | val |= PMRESRn_EN; |
| 1228 | krait_write_vpmresr0(val); | 1293 | venum_write_pmresr(val); |
| 1229 | krait_post_vpmresr0(vval, fval); | 1294 | venum_post_pmresr(vval, fval); |
| 1230 | } else { | 1295 | } else { |
| 1231 | val = krait_read_pmresrn(region); | 1296 | val = krait_read_pmresrn(region); |
| 1232 | val &= ~mask; | 1297 | val &= ~mask; |
| @@ -1236,7 +1301,7 @@ static void krait_evt_setup(int idx, u32 config_base) | |||
| 1236 | } | 1301 | } |
| 1237 | } | 1302 | } |
| 1238 | 1303 | ||
| 1239 | static u32 krait_clear_pmresrn_group(u32 val, int group) | 1304 | static u32 clear_pmresrn_group(u32 val, int group) |
| 1240 | { | 1305 | { |
| 1241 | u32 mask; | 1306 | u32 mask; |
| 1242 | int group_shift; | 1307 | int group_shift; |
| @@ -1256,23 +1321,19 @@ static void krait_clearpmu(u32 config_base) | |||
| 1256 | { | 1321 | { |
| 1257 | u32 val; | 1322 | u32 val; |
| 1258 | u32 vval, fval; | 1323 | u32 vval, fval; |
| 1259 | unsigned int region; | 1324 | unsigned int region = EVENT_REGION(config_base); |
| 1260 | unsigned int group; | 1325 | unsigned int group = EVENT_GROUP(config_base); |
| 1261 | bool venum_event; | 1326 | bool venum_event = EVENT_VENUM(config_base); |
| 1262 | |||
| 1263 | venum_event = !!(config_base & VENUM_EVENT); | ||
| 1264 | region = (config_base >> 12) & 0xf; | ||
| 1265 | group = (config_base >> 0) & 0xf; | ||
| 1266 | 1327 | ||
| 1267 | if (venum_event) { | 1328 | if (venum_event) { |
| 1268 | krait_pre_vpmresr0(&vval, &fval); | 1329 | venum_pre_pmresr(&vval, &fval); |
| 1269 | val = krait_read_vpmresr0(); | 1330 | val = venum_read_pmresr(); |
| 1270 | val = krait_clear_pmresrn_group(val, group); | 1331 | val = clear_pmresrn_group(val, group); |
| 1271 | krait_write_vpmresr0(val); | 1332 | venum_write_pmresr(val); |
| 1272 | krait_post_vpmresr0(vval, fval); | 1333 | venum_post_pmresr(vval, fval); |
| 1273 | } else { | 1334 | } else { |
| 1274 | val = krait_read_pmresrn(region); | 1335 | val = krait_read_pmresrn(region); |
| 1275 | val = krait_clear_pmresrn_group(val, group); | 1336 | val = clear_pmresrn_group(val, group); |
| 1276 | krait_write_pmresrn(region, val); | 1337 | krait_write_pmresrn(region, val); |
| 1277 | } | 1338 | } |
| 1278 | } | 1339 | } |
| @@ -1342,6 +1403,8 @@ static void krait_pmu_enable_event(struct perf_event *event) | |||
| 1342 | static void krait_pmu_reset(void *info) | 1403 | static void krait_pmu_reset(void *info) |
| 1343 | { | 1404 | { |
| 1344 | u32 vval, fval; | 1405 | u32 vval, fval; |
| 1406 | struct arm_pmu *cpu_pmu = info; | ||
| 1407 | u32 idx, nb_cnt = cpu_pmu->num_events; | ||
| 1345 | 1408 | ||
| 1346 | armv7pmu_reset(info); | 1409 | armv7pmu_reset(info); |
| 1347 | 1410 | ||
| @@ -1350,9 +1413,16 @@ static void krait_pmu_reset(void *info) | |||
| 1350 | krait_write_pmresrn(1, 0); | 1413 | krait_write_pmresrn(1, 0); |
| 1351 | krait_write_pmresrn(2, 0); | 1414 | krait_write_pmresrn(2, 0); |
| 1352 | 1415 | ||
| 1353 | krait_pre_vpmresr0(&vval, &fval); | 1416 | venum_pre_pmresr(&vval, &fval); |
| 1354 | krait_write_vpmresr0(0); | 1417 | venum_write_pmresr(0); |
| 1355 | krait_post_vpmresr0(vval, fval); | 1418 | venum_post_pmresr(vval, fval); |
| 1419 | |||
| 1420 | /* Reset PMxEVNCTCR to sane default */ | ||
| 1421 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { | ||
| 1422 | armv7_pmnc_select_counter(idx); | ||
| 1423 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | ||
| 1424 | } | ||
| 1425 | |||
| 1356 | } | 1426 | } |
| 1357 | 1427 | ||
| 1358 | static int krait_event_to_bit(struct perf_event *event, unsigned int region, | 1428 | static int krait_event_to_bit(struct perf_event *event, unsigned int region, |
| @@ -1386,26 +1456,18 @@ static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc, | |||
| 1386 | { | 1456 | { |
| 1387 | int idx; | 1457 | int idx; |
| 1388 | int bit = -1; | 1458 | int bit = -1; |
| 1389 | unsigned int prefix; | ||
| 1390 | unsigned int region; | ||
| 1391 | unsigned int code; | ||
| 1392 | unsigned int group; | ||
| 1393 | bool krait_event; | ||
| 1394 | struct hw_perf_event *hwc = &event->hw; | 1459 | struct hw_perf_event *hwc = &event->hw; |
| 1460 | unsigned int region = EVENT_REGION(hwc->config_base); | ||
| 1461 | unsigned int code = EVENT_CODE(hwc->config_base); | ||
| 1462 | unsigned int group = EVENT_GROUP(hwc->config_base); | ||
| 1463 | bool venum_event = EVENT_VENUM(hwc->config_base); | ||
| 1464 | bool krait_event = EVENT_CPU(hwc->config_base); | ||
| 1395 | 1465 | ||
| 1396 | region = (hwc->config_base >> 12) & 0xf; | 1466 | if (venum_event || krait_event) { |
| 1397 | code = (hwc->config_base >> 4) & 0xff; | ||
| 1398 | group = (hwc->config_base >> 0) & 0xf; | ||
| 1399 | krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); | ||
| 1400 | |||
| 1401 | if (krait_event) { | ||
| 1402 | /* Ignore invalid events */ | 1467 | /* Ignore invalid events */ |
| 1403 | if (group > 3 || region > 2) | 1468 | if (group > 3 || region > 2) |
| 1404 | return -EINVAL; | 1469 | return -EINVAL; |
| 1405 | prefix = hwc->config_base & KRAIT_EVENT_MASK; | 1470 | if (venum_event && (code & 0xe0)) |
| 1406 | if (prefix != KRAIT_EVENT && prefix != VENUM_EVENT) | ||
| 1407 | return -EINVAL; | ||
| 1408 | if (prefix == VENUM_EVENT && (code & 0xe0)) | ||
| 1409 | return -EINVAL; | 1471 | return -EINVAL; |
| 1410 | 1472 | ||
| 1411 | bit = krait_event_to_bit(event, region, group); | 1473 | bit = krait_event_to_bit(event, region, group); |
| @@ -1425,15 +1487,12 @@ static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc, | |||
| 1425 | { | 1487 | { |
| 1426 | int bit; | 1488 | int bit; |
| 1427 | struct hw_perf_event *hwc = &event->hw; | 1489 | struct hw_perf_event *hwc = &event->hw; |
| 1428 | unsigned int region; | 1490 | unsigned int region = EVENT_REGION(hwc->config_base); |
| 1429 | unsigned int group; | 1491 | unsigned int group = EVENT_GROUP(hwc->config_base); |
| 1430 | bool krait_event; | 1492 | bool venum_event = EVENT_VENUM(hwc->config_base); |
| 1493 | bool krait_event = EVENT_CPU(hwc->config_base); | ||
| 1431 | 1494 | ||
| 1432 | region = (hwc->config_base >> 12) & 0xf; | 1495 | if (venum_event || krait_event) { |
| 1433 | group = (hwc->config_base >> 0) & 0xf; | ||
| 1434 | krait_event = !!(hwc->config_base & KRAIT_EVENT_MASK); | ||
| 1435 | |||
| 1436 | if (krait_event) { | ||
| 1437 | bit = krait_event_to_bit(event, region, group); | 1496 | bit = krait_event_to_bit(event, region, group); |
| 1438 | clear_bit(bit, cpuc->used_mask); | 1497 | clear_bit(bit, cpuc->used_mask); |
| 1439 | } | 1498 | } |
| @@ -1458,6 +1517,344 @@ static int krait_pmu_init(struct arm_pmu *cpu_pmu) | |||
| 1458 | cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; | 1517 | cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx; |
| 1459 | return 0; | 1518 | return 0; |
| 1460 | } | 1519 | } |
| 1520 | |||
| 1521 | /* | ||
| 1522 | * Scorpion Local Performance Monitor Register (LPMn) | ||
| 1523 | * | ||
| 1524 | * 31 30 24 16 8 0 | ||
| 1525 | * +--------------------------------+ | ||
| 1526 | * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0 | ||
| 1527 | * +--------------------------------+ | ||
| 1528 | * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1 | ||
| 1529 | * +--------------------------------+ | ||
| 1530 | * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2 | ||
| 1531 | * +--------------------------------+ | ||
| 1532 | * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3 | ||
| 1533 | * +--------------------------------+ | ||
| 1534 | * VLPM | EN | CC | CC | CC | CC | N = 2, R = ? | ||
| 1535 | * +--------------------------------+ | ||
| 1536 | * EN | G=3 | G=2 | G=1 | G=0 | ||
| 1537 | * | ||
| 1538 | * | ||
| 1539 | * Event Encoding: | ||
| 1540 | * | ||
| 1541 | * hwc->config_base = 0xNRCCG | ||
| 1542 | * | ||
| 1543 | * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM) | ||
| 1544 | * R = region register | ||
| 1545 | * CC = class of events the group G is choosing from | ||
| 1546 | * G = group or particular event | ||
| 1547 | * | ||
| 1548 | * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2 | ||
| 1549 | * | ||
| 1550 | * A region (R) corresponds to a piece of the CPU (execution unit, instruction | ||
| 1551 | * unit, etc.) while the event code (CC) corresponds to a particular class of | ||
| 1552 | * events (interrupts for example). An event code is broken down into | ||
| 1553 | * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for | ||
| 1554 | * example). | ||
| 1555 | */ | ||
| 1556 | |||
| 1557 | static u32 scorpion_read_pmresrn(int n) | ||
| 1558 | { | ||
| 1559 | u32 val; | ||
| 1560 | |||
| 1561 | switch (n) { | ||
| 1562 | case 0: | ||
| 1563 | asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val)); | ||
| 1564 | break; | ||
| 1565 | case 1: | ||
| 1566 | asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val)); | ||
| 1567 | break; | ||
| 1568 | case 2: | ||
| 1569 | asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val)); | ||
| 1570 | break; | ||
| 1571 | case 3: | ||
| 1572 | asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val)); | ||
| 1573 | break; | ||
| 1574 | default: | ||
| 1575 | BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ | ||
| 1576 | } | ||
| 1577 | |||
| 1578 | return val; | ||
| 1579 | } | ||
| 1580 | |||
| 1581 | static void scorpion_write_pmresrn(int n, u32 val) | ||
| 1582 | { | ||
| 1583 | switch (n) { | ||
| 1584 | case 0: | ||
| 1585 | asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val)); | ||
| 1586 | break; | ||
| 1587 | case 1: | ||
| 1588 | asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val)); | ||
| 1589 | break; | ||
| 1590 | case 2: | ||
| 1591 | asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val)); | ||
| 1592 | break; | ||
| 1593 | case 3: | ||
| 1594 | asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val)); | ||
| 1595 | break; | ||
| 1596 | default: | ||
| 1597 | BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */ | ||
| 1598 | } | ||
| 1599 | } | ||
| 1600 | |||
| 1601 | static u32 scorpion_get_pmresrn_event(unsigned int region) | ||
| 1602 | { | ||
| 1603 | static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0, | ||
| 1604 | SCORPION_LPM1_GROUP0, | ||
| 1605 | SCORPION_LPM2_GROUP0, | ||
| 1606 | SCORPION_L2LPM_GROUP0 }; | ||
| 1607 | return pmresrn_table[region]; | ||
| 1608 | } | ||
| 1609 | |||
| 1610 | static void scorpion_evt_setup(int idx, u32 config_base) | ||
| 1611 | { | ||
| 1612 | u32 val; | ||
| 1613 | u32 mask; | ||
| 1614 | u32 vval, fval; | ||
| 1615 | unsigned int region = EVENT_REGION(config_base); | ||
| 1616 | unsigned int group = EVENT_GROUP(config_base); | ||
| 1617 | unsigned int code = EVENT_CODE(config_base); | ||
| 1618 | unsigned int group_shift; | ||
| 1619 | bool venum_event = EVENT_VENUM(config_base); | ||
| 1620 | |||
| 1621 | group_shift = group * 8; | ||
| 1622 | mask = 0xff << group_shift; | ||
| 1623 | |||
| 1624 | /* Configure evtsel for the region and group */ | ||
| 1625 | if (venum_event) | ||
| 1626 | val = SCORPION_VLPM_GROUP0; | ||
| 1627 | else | ||
| 1628 | val = scorpion_get_pmresrn_event(region); | ||
| 1629 | val += group; | ||
| 1630 | /* Mix in mode-exclusion bits */ | ||
| 1631 | val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1); | ||
| 1632 | armv7_pmnc_write_evtsel(idx, val); | ||
| 1633 | |||
| 1634 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | ||
| 1635 | |||
| 1636 | if (venum_event) { | ||
| 1637 | venum_pre_pmresr(&vval, &fval); | ||
| 1638 | val = venum_read_pmresr(); | ||
| 1639 | val &= ~mask; | ||
| 1640 | val |= code << group_shift; | ||
| 1641 | val |= PMRESRn_EN; | ||
| 1642 | venum_write_pmresr(val); | ||
| 1643 | venum_post_pmresr(vval, fval); | ||
| 1644 | } else { | ||
| 1645 | val = scorpion_read_pmresrn(region); | ||
| 1646 | val &= ~mask; | ||
| 1647 | val |= code << group_shift; | ||
| 1648 | val |= PMRESRn_EN; | ||
| 1649 | scorpion_write_pmresrn(region, val); | ||
| 1650 | } | ||
| 1651 | } | ||
| 1652 | |||
| 1653 | static void scorpion_clearpmu(u32 config_base) | ||
| 1654 | { | ||
| 1655 | u32 val; | ||
| 1656 | u32 vval, fval; | ||
| 1657 | unsigned int region = EVENT_REGION(config_base); | ||
| 1658 | unsigned int group = EVENT_GROUP(config_base); | ||
| 1659 | bool venum_event = EVENT_VENUM(config_base); | ||
| 1660 | |||
| 1661 | if (venum_event) { | ||
| 1662 | venum_pre_pmresr(&vval, &fval); | ||
| 1663 | val = venum_read_pmresr(); | ||
| 1664 | val = clear_pmresrn_group(val, group); | ||
| 1665 | venum_write_pmresr(val); | ||
| 1666 | venum_post_pmresr(vval, fval); | ||
| 1667 | } else { | ||
| 1668 | val = scorpion_read_pmresrn(region); | ||
| 1669 | val = clear_pmresrn_group(val, group); | ||
| 1670 | scorpion_write_pmresrn(region, val); | ||
| 1671 | } | ||
| 1672 | } | ||
| 1673 | |||
| 1674 | static void scorpion_pmu_disable_event(struct perf_event *event) | ||
| 1675 | { | ||
| 1676 | unsigned long flags; | ||
| 1677 | struct hw_perf_event *hwc = &event->hw; | ||
| 1678 | int idx = hwc->idx; | ||
| 1679 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
| 1680 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | ||
| 1681 | |||
| 1682 | /* Disable counter and interrupt */ | ||
| 1683 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
| 1684 | |||
| 1685 | /* Disable counter */ | ||
| 1686 | armv7_pmnc_disable_counter(idx); | ||
| 1687 | |||
| 1688 | /* | ||
| 1689 | * Clear pmresr code (if destined for PMNx counters) | ||
| 1690 | */ | ||
| 1691 | if (hwc->config_base & KRAIT_EVENT_MASK) | ||
| 1692 | scorpion_clearpmu(hwc->config_base); | ||
| 1693 | |||
| 1694 | /* Disable interrupt for this counter */ | ||
| 1695 | armv7_pmnc_disable_intens(idx); | ||
| 1696 | |||
| 1697 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
| 1698 | } | ||
| 1699 | |||
| 1700 | static void scorpion_pmu_enable_event(struct perf_event *event) | ||
| 1701 | { | ||
| 1702 | unsigned long flags; | ||
| 1703 | struct hw_perf_event *hwc = &event->hw; | ||
| 1704 | int idx = hwc->idx; | ||
| 1705 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
| 1706 | struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events); | ||
| 1707 | |||
| 1708 | /* | ||
| 1709 | * Enable counter and interrupt, and set the counter to count | ||
| 1710 | * the event that we're interested in. | ||
| 1711 | */ | ||
| 1712 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | ||
| 1713 | |||
| 1714 | /* Disable counter */ | ||
| 1715 | armv7_pmnc_disable_counter(idx); | ||
| 1716 | |||
| 1717 | /* | ||
| 1718 | * Set event (if destined for PMNx counters) | ||
| 1719 | * We don't set the event for the cycle counter because we | ||
| 1720 | * don't have the ability to perform event filtering. | ||
| 1721 | */ | ||
| 1722 | if (hwc->config_base & KRAIT_EVENT_MASK) | ||
| 1723 | scorpion_evt_setup(idx, hwc->config_base); | ||
| 1724 | else if (idx != ARMV7_IDX_CYCLE_COUNTER) | ||
| 1725 | armv7_pmnc_write_evtsel(idx, hwc->config_base); | ||
| 1726 | |||
| 1727 | /* Enable interrupt for this counter */ | ||
| 1728 | armv7_pmnc_enable_intens(idx); | ||
| 1729 | |||
| 1730 | /* Enable counter */ | ||
| 1731 | armv7_pmnc_enable_counter(idx); | ||
| 1732 | |||
| 1733 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | ||
| 1734 | } | ||
| 1735 | |||
| 1736 | static void scorpion_pmu_reset(void *info) | ||
| 1737 | { | ||
| 1738 | u32 vval, fval; | ||
| 1739 | struct arm_pmu *cpu_pmu = info; | ||
| 1740 | u32 idx, nb_cnt = cpu_pmu->num_events; | ||
| 1741 | |||
| 1742 | armv7pmu_reset(info); | ||
| 1743 | |||
| 1744 | /* Clear all pmresrs */ | ||
| 1745 | scorpion_write_pmresrn(0, 0); | ||
| 1746 | scorpion_write_pmresrn(1, 0); | ||
| 1747 | scorpion_write_pmresrn(2, 0); | ||
| 1748 | scorpion_write_pmresrn(3, 0); | ||
| 1749 | |||
| 1750 | venum_pre_pmresr(&vval, &fval); | ||
| 1751 | venum_write_pmresr(0); | ||
| 1752 | venum_post_pmresr(vval, fval); | ||
| 1753 | |||
| 1754 | /* Reset PMxEVNCTCR to sane default */ | ||
| 1755 | for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) { | ||
| 1756 | armv7_pmnc_select_counter(idx); | ||
| 1757 | asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0)); | ||
| 1758 | } | ||
| 1759 | } | ||
| 1760 | |||
| 1761 | static int scorpion_event_to_bit(struct perf_event *event, unsigned int region, | ||
| 1762 | unsigned int group) | ||
| 1763 | { | ||
| 1764 | int bit; | ||
| 1765 | struct hw_perf_event *hwc = &event->hw; | ||
| 1766 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | ||
| 1767 | |||
| 1768 | if (hwc->config_base & VENUM_EVENT) | ||
| 1769 | bit = SCORPION_VLPM_GROUP0; | ||
| 1770 | else | ||
| 1771 | bit = scorpion_get_pmresrn_event(region); | ||
| 1772 | bit -= scorpion_get_pmresrn_event(0); | ||
| 1773 | bit += group; | ||
| 1774 | /* | ||
| 1775 | * Lower bits are reserved for use by the counters (see | ||
| 1776 | * armv7pmu_get_event_idx() for more info) | ||
| 1777 | */ | ||
| 1778 | bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1; | ||
| 1779 | |||
| 1780 | return bit; | ||
| 1781 | } | ||
| 1782 | |||
| 1783 | /* | ||
| 1784 | * We check for column exclusion constraints here. | ||
| 1785 | * Two events cant use the same group within a pmresr register. | ||
| 1786 | */ | ||
| 1787 | static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc, | ||
| 1788 | struct perf_event *event) | ||
| 1789 | { | ||
| 1790 | int idx; | ||
| 1791 | int bit = -1; | ||
| 1792 | struct hw_perf_event *hwc = &event->hw; | ||
| 1793 | unsigned int region = EVENT_REGION(hwc->config_base); | ||
| 1794 | unsigned int group = EVENT_GROUP(hwc->config_base); | ||
| 1795 | bool venum_event = EVENT_VENUM(hwc->config_base); | ||
| 1796 | bool scorpion_event = EVENT_CPU(hwc->config_base); | ||
| 1797 | |||
| 1798 | if (venum_event || scorpion_event) { | ||
| 1799 | /* Ignore invalid events */ | ||
| 1800 | if (group > 3 || region > 3) | ||
| 1801 | return -EINVAL; | ||
| 1802 | |||
| 1803 | bit = scorpion_event_to_bit(event, region, group); | ||
| 1804 | if (test_and_set_bit(bit, cpuc->used_mask)) | ||
| 1805 | return -EAGAIN; | ||
| 1806 | } | ||
| 1807 | |||
| 1808 | idx = armv7pmu_get_event_idx(cpuc, event); | ||
| 1809 | if (idx < 0 && bit >= 0) | ||
| 1810 | clear_bit(bit, cpuc->used_mask); | ||
| 1811 | |||
| 1812 | return idx; | ||
| 1813 | } | ||
| 1814 | |||
| 1815 | static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc, | ||
| 1816 | struct perf_event *event) | ||
| 1817 | { | ||
| 1818 | int bit; | ||
| 1819 | struct hw_perf_event *hwc = &event->hw; | ||
| 1820 | unsigned int region = EVENT_REGION(hwc->config_base); | ||
| 1821 | unsigned int group = EVENT_GROUP(hwc->config_base); | ||
| 1822 | bool venum_event = EVENT_VENUM(hwc->config_base); | ||
| 1823 | bool scorpion_event = EVENT_CPU(hwc->config_base); | ||
| 1824 | |||
| 1825 | if (venum_event || scorpion_event) { | ||
| 1826 | bit = scorpion_event_to_bit(event, region, group); | ||
| 1827 | clear_bit(bit, cpuc->used_mask); | ||
| 1828 | } | ||
| 1829 | } | ||
| 1830 | |||
| 1831 | static int scorpion_pmu_init(struct arm_pmu *cpu_pmu) | ||
| 1832 | { | ||
| 1833 | armv7pmu_init(cpu_pmu); | ||
| 1834 | cpu_pmu->name = "armv7_scorpion"; | ||
| 1835 | cpu_pmu->map_event = scorpion_map_event; | ||
| 1836 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | ||
| 1837 | cpu_pmu->reset = scorpion_pmu_reset; | ||
| 1838 | cpu_pmu->enable = scorpion_pmu_enable_event; | ||
| 1839 | cpu_pmu->disable = scorpion_pmu_disable_event; | ||
| 1840 | cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; | ||
| 1841 | cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; | ||
| 1842 | return 0; | ||
| 1843 | } | ||
| 1844 | |||
| 1845 | static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) | ||
| 1846 | { | ||
| 1847 | armv7pmu_init(cpu_pmu); | ||
| 1848 | cpu_pmu->name = "armv7_scorpion_mp"; | ||
| 1849 | cpu_pmu->map_event = scorpion_map_event; | ||
| 1850 | cpu_pmu->num_events = armv7_read_num_pmnc_events(); | ||
| 1851 | cpu_pmu->reset = scorpion_pmu_reset; | ||
| 1852 | cpu_pmu->enable = scorpion_pmu_enable_event; | ||
| 1853 | cpu_pmu->disable = scorpion_pmu_disable_event; | ||
| 1854 | cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx; | ||
| 1855 | cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx; | ||
| 1856 | return 0; | ||
| 1857 | } | ||
| 1461 | #else | 1858 | #else |
| 1462 | static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) | 1859 | static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu) |
| 1463 | { | 1860 | { |
| @@ -1498,4 +1895,14 @@ static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) | |||
| 1498 | { | 1895 | { |
| 1499 | return -ENODEV; | 1896 | return -ENODEV; |
| 1500 | } | 1897 | } |
| 1898 | |||
| 1899 | static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu) | ||
| 1900 | { | ||
| 1901 | return -ENODEV; | ||
| 1902 | } | ||
| 1903 | |||
| 1904 | static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu) | ||
| 1905 | { | ||
| 1906 | return -ENODEV; | ||
| 1907 | } | ||
| 1501 | #endif /* CONFIG_CPU_V7 */ | 1908 | #endif /* CONFIG_CPU_V7 */ |
