aboutsummaryrefslogtreecommitdiffstats
path: root/arch/arc/kernel/perf_event.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/arc/kernel/perf_event.c')
-rw-r--r--arch/arc/kernel/perf_event.c241
1 files changed, 182 insertions, 59 deletions
diff --git a/arch/arc/kernel/perf_event.c b/arch/arc/kernel/perf_event.c
index 8aec462d90fb..861a8aea51f9 100644
--- a/arch/arc/kernel/perf_event.c
+++ b/arch/arc/kernel/perf_event.c
@@ -1,15 +1,10 @@
1/* 1// SPDX-License-Identifier: GPL-2.0+
2 * Linux performance counter support for ARC700 series 2//
3 * 3// Linux performance counter support for ARC CPUs.
4 * Copyright (C) 2013-2015 Synopsys, Inc. (www.synopsys.com) 4// This code is inspired by the perf support of various other architectures.
5 * 5//
6 * This code is inspired by the perf support of various other architectures. 6// Copyright (C) 2013-2018 Synopsys, Inc. (www.synopsys.com)
7 * 7
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 *
12 */
13#include <linux/errno.h> 8#include <linux/errno.h>
14#include <linux/interrupt.h> 9#include <linux/interrupt.h>
15#include <linux/module.h> 10#include <linux/module.h>
@@ -19,12 +14,31 @@
19#include <asm/arcregs.h> 14#include <asm/arcregs.h>
20#include <asm/stacktrace.h> 15#include <asm/stacktrace.h>
21 16
17/* HW holds 8 symbols + one for null terminator */
18#define ARCPMU_EVENT_NAME_LEN 9
19
20enum arc_pmu_attr_groups {
21 ARCPMU_ATTR_GR_EVENTS,
22 ARCPMU_ATTR_GR_FORMATS,
23 ARCPMU_NR_ATTR_GR
24};
25
26struct arc_pmu_raw_event_entry {
27 char name[ARCPMU_EVENT_NAME_LEN];
28};
29
22struct arc_pmu { 30struct arc_pmu {
23 struct pmu pmu; 31 struct pmu pmu;
24 unsigned int irq; 32 unsigned int irq;
25 int n_counters; 33 int n_counters;
34 int n_events;
26 u64 max_period; 35 u64 max_period;
27 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX]; 36 int ev_hw_idx[PERF_COUNT_ARC_HW_MAX];
37
38 struct arc_pmu_raw_event_entry *raw_entry;
39 struct attribute **attrs;
40 struct perf_pmu_events_attr *attr;
41 const struct attribute_group *attr_groups[ARCPMU_NR_ATTR_GR + 1];
28}; 42};
29 43
30struct arc_pmu_cpu { 44struct arc_pmu_cpu {
@@ -49,6 +63,7 @@ static int callchain_trace(unsigned int addr, void *data)
49{ 63{
50 struct arc_callchain_trace *ctrl = data; 64 struct arc_callchain_trace *ctrl = data;
51 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff; 65 struct perf_callchain_entry_ctx *entry = ctrl->perf_stuff;
66
52 perf_callchain_store(entry, addr); 67 perf_callchain_store(entry, addr);
53 68
54 if (ctrl->depth++ < 3) 69 if (ctrl->depth++ < 3)
@@ -57,8 +72,8 @@ static int callchain_trace(unsigned int addr, void *data)
57 return -1; 72 return -1;
58} 73}
59 74
60void 75void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
61perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 76 struct pt_regs *regs)
62{ 77{
63 struct arc_callchain_trace ctrl = { 78 struct arc_callchain_trace ctrl = {
64 .depth = 0, 79 .depth = 0,
@@ -68,8 +83,8 @@ perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *re
68 arc_unwind_core(NULL, regs, callchain_trace, &ctrl); 83 arc_unwind_core(NULL, regs, callchain_trace, &ctrl);
69} 84}
70 85
71void 86void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
72perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs) 87 struct pt_regs *regs)
73{ 88{
74 /* 89 /*
75 * User stack can't be unwound trivially with kernel dwarf unwinder 90 * User stack can't be unwound trivially with kernel dwarf unwinder
@@ -82,10 +97,10 @@ static struct arc_pmu *arc_pmu;
82static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu); 97static DEFINE_PER_CPU(struct arc_pmu_cpu, arc_pmu_cpu);
83 98
84/* read counter #idx; note that counter# != event# on ARC! */ 99/* read counter #idx; note that counter# != event# on ARC! */
85static uint64_t arc_pmu_read_counter(int idx) 100static u64 arc_pmu_read_counter(int idx)
86{ 101{
87 uint32_t tmp; 102 u32 tmp;
88 uint64_t result; 103 u64 result;
89 104
90 /* 105 /*
91 * ARC supports making 'snapshots' of the counters, so we don't 106 * ARC supports making 'snapshots' of the counters, so we don't
@@ -94,7 +109,7 @@ static uint64_t arc_pmu_read_counter(int idx)
94 write_aux_reg(ARC_REG_PCT_INDEX, idx); 109 write_aux_reg(ARC_REG_PCT_INDEX, idx);
95 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 110 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
96 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN); 111 write_aux_reg(ARC_REG_PCT_CONTROL, tmp | ARC_REG_PCT_CONTROL_SN);
97 result = (uint64_t) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32; 112 result = (u64) (read_aux_reg(ARC_REG_PCT_SNAPH)) << 32;
98 result |= read_aux_reg(ARC_REG_PCT_SNAPL); 113 result |= read_aux_reg(ARC_REG_PCT_SNAPL);
99 114
100 return result; 115 return result;
@@ -103,9 +118,9 @@ static uint64_t arc_pmu_read_counter(int idx)
103static void arc_perf_event_update(struct perf_event *event, 118static void arc_perf_event_update(struct perf_event *event,
104 struct hw_perf_event *hwc, int idx) 119 struct hw_perf_event *hwc, int idx)
105{ 120{
106 uint64_t prev_raw_count = local64_read(&hwc->prev_count); 121 u64 prev_raw_count = local64_read(&hwc->prev_count);
107 uint64_t new_raw_count = arc_pmu_read_counter(idx); 122 u64 new_raw_count = arc_pmu_read_counter(idx);
108 int64_t delta = new_raw_count - prev_raw_count; 123 s64 delta = new_raw_count - prev_raw_count;
109 124
110 /* 125 /*
111 * We aren't afraid of hwc->prev_count changing beneath our feet 126 * We aren't afraid of hwc->prev_count changing beneath our feet
@@ -155,7 +170,7 @@ static int arc_pmu_event_init(struct perf_event *event)
155 int ret; 170 int ret;
156 171
157 if (!is_sampling_event(event)) { 172 if (!is_sampling_event(event)) {
158 hwc->sample_period = arc_pmu->max_period; 173 hwc->sample_period = arc_pmu->max_period;
159 hwc->last_period = hwc->sample_period; 174 hwc->last_period = hwc->sample_period;
160 local64_set(&hwc->period_left, hwc->sample_period); 175 local64_set(&hwc->period_left, hwc->sample_period);
161 } 176 }
@@ -192,6 +207,18 @@ static int arc_pmu_event_init(struct perf_event *event)
192 pr_debug("init cache event with h/w %08x \'%s\'\n", 207 pr_debug("init cache event with h/w %08x \'%s\'\n",
193 (int)hwc->config, arc_pmu_ev_hw_map[ret]); 208 (int)hwc->config, arc_pmu_ev_hw_map[ret]);
194 return 0; 209 return 0;
210
211 case PERF_TYPE_RAW:
212 if (event->attr.config >= arc_pmu->n_events)
213 return -ENOENT;
214
215 hwc->config |= event->attr.config;
216 pr_debug("init raw event with idx %lld \'%s\'\n",
217 event->attr.config,
218 arc_pmu->raw_entry[event->attr.config].name);
219
220 return 0;
221
195 default: 222 default:
196 return -ENOENT; 223 return -ENOENT;
197 } 224 }
@@ -200,7 +227,7 @@ static int arc_pmu_event_init(struct perf_event *event)
200/* starts all counters */ 227/* starts all counters */
201static void arc_pmu_enable(struct pmu *pmu) 228static void arc_pmu_enable(struct pmu *pmu)
202{ 229{
203 uint32_t tmp; 230 u32 tmp;
204 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 231 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
205 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1); 232 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x1);
206} 233}
@@ -208,7 +235,7 @@ static void arc_pmu_enable(struct pmu *pmu)
208/* stops all counters */ 235/* stops all counters */
209static void arc_pmu_disable(struct pmu *pmu) 236static void arc_pmu_disable(struct pmu *pmu)
210{ 237{
211 uint32_t tmp; 238 u32 tmp;
212 tmp = read_aux_reg(ARC_REG_PCT_CONTROL); 239 tmp = read_aux_reg(ARC_REG_PCT_CONTROL);
213 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0); 240 write_aux_reg(ARC_REG_PCT_CONTROL, (tmp & 0xffff0000) | 0x0);
214} 241}
@@ -228,7 +255,7 @@ static int arc_pmu_event_set_period(struct perf_event *event)
228 local64_set(&hwc->period_left, left); 255 local64_set(&hwc->period_left, left);
229 hwc->last_period = period; 256 hwc->last_period = period;
230 overflow = 1; 257 overflow = 1;
231 } else if (unlikely(left <= 0)) { 258 } else if (unlikely(left <= 0)) {
232 /* left underflowed by less than period. */ 259 /* left underflowed by less than period. */
233 left += period; 260 left += period;
234 local64_set(&hwc->period_left, left); 261 local64_set(&hwc->period_left, left);
@@ -246,8 +273,8 @@ static int arc_pmu_event_set_period(struct perf_event *event)
246 write_aux_reg(ARC_REG_PCT_INDEX, idx); 273 write_aux_reg(ARC_REG_PCT_INDEX, idx);
247 274
248 /* Write value */ 275 /* Write value */
249 write_aux_reg(ARC_REG_PCT_COUNTL, (u32)value); 276 write_aux_reg(ARC_REG_PCT_COUNTL, lower_32_bits(value));
250 write_aux_reg(ARC_REG_PCT_COUNTH, (value >> 32)); 277 write_aux_reg(ARC_REG_PCT_COUNTH, upper_32_bits(value));
251 278
252 perf_event_update_userpage(event); 279 perf_event_update_userpage(event);
253 280
@@ -277,7 +304,7 @@ static void arc_pmu_start(struct perf_event *event, int flags)
277 /* Enable interrupt for this counter */ 304 /* Enable interrupt for this counter */
278 if (is_sampling_event(event)) 305 if (is_sampling_event(event))
279 write_aux_reg(ARC_REG_PCT_INT_CTRL, 306 write_aux_reg(ARC_REG_PCT_INT_CTRL,
280 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 307 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
281 308
282 /* enable ARC pmu here */ 309 /* enable ARC pmu here */
283 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */ 310 write_aux_reg(ARC_REG_PCT_INDEX, idx); /* counter # */
@@ -295,9 +322,9 @@ static void arc_pmu_stop(struct perf_event *event, int flags)
295 * Reset interrupt flag by writing of 1. This is required 322 * Reset interrupt flag by writing of 1. This is required
296 * to make sure pending interrupt was not left. 323 * to make sure pending interrupt was not left.
297 */ 324 */
298 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 325 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
299 write_aux_reg(ARC_REG_PCT_INT_CTRL, 326 write_aux_reg(ARC_REG_PCT_INT_CTRL,
300 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~(1 << idx)); 327 read_aux_reg(ARC_REG_PCT_INT_CTRL) & ~BIT(idx));
301 } 328 }
302 329
303 if (!(event->hw.state & PERF_HES_STOPPED)) { 330 if (!(event->hw.state & PERF_HES_STOPPED)) {
@@ -349,9 +376,10 @@ static int arc_pmu_add(struct perf_event *event, int flags)
349 376
350 if (is_sampling_event(event)) { 377 if (is_sampling_event(event)) {
351 /* Mimic full counter overflow as other arches do */ 378 /* Mimic full counter overflow as other arches do */
352 write_aux_reg(ARC_REG_PCT_INT_CNTL, (u32)arc_pmu->max_period); 379 write_aux_reg(ARC_REG_PCT_INT_CNTL,
380 lower_32_bits(arc_pmu->max_period));
353 write_aux_reg(ARC_REG_PCT_INT_CNTH, 381 write_aux_reg(ARC_REG_PCT_INT_CNTH,
354 (arc_pmu->max_period >> 32)); 382 upper_32_bits(arc_pmu->max_period));
355 } 383 }
356 384
357 write_aux_reg(ARC_REG_PCT_CONFIG, 0); 385 write_aux_reg(ARC_REG_PCT_CONFIG, 0);
@@ -392,7 +420,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
392 idx = __ffs(active_ints); 420 idx = __ffs(active_ints);
393 421
394 /* Reset interrupt flag by writing of 1 */ 422 /* Reset interrupt flag by writing of 1 */
395 write_aux_reg(ARC_REG_PCT_INT_ACT, 1 << idx); 423 write_aux_reg(ARC_REG_PCT_INT_ACT, BIT(idx));
396 424
397 /* 425 /*
398 * On reset of "interrupt active" bit corresponding 426 * On reset of "interrupt active" bit corresponding
@@ -400,7 +428,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
400 * Now we need to re-enable interrupt for the counter. 428 * Now we need to re-enable interrupt for the counter.
401 */ 429 */
402 write_aux_reg(ARC_REG_PCT_INT_CTRL, 430 write_aux_reg(ARC_REG_PCT_INT_CTRL,
403 read_aux_reg(ARC_REG_PCT_INT_CTRL) | (1 << idx)); 431 read_aux_reg(ARC_REG_PCT_INT_CTRL) | BIT(idx));
404 432
405 event = pmu_cpu->act_counter[idx]; 433 event = pmu_cpu->act_counter[idx];
406 hwc = &event->hw; 434 hwc = &event->hw;
@@ -414,7 +442,7 @@ static irqreturn_t arc_pmu_intr(int irq, void *dev)
414 arc_pmu_stop(event, 0); 442 arc_pmu_stop(event, 0);
415 } 443 }
416 444
417 active_ints &= ~(1U << idx); 445 active_ints &= ~BIT(idx);
418 } while (active_ints); 446 } while (active_ints);
419 447
420done: 448done:
@@ -441,19 +469,108 @@ static void arc_cpu_pmu_irq_init(void *data)
441 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff); 469 write_aux_reg(ARC_REG_PCT_INT_ACT, 0xffffffff);
442} 470}
443 471
472/* Event field occupies the bottom 15 bits of our config field */
473PMU_FORMAT_ATTR(event, "config:0-14");
474static struct attribute *arc_pmu_format_attrs[] = {
475 &format_attr_event.attr,
476 NULL,
477};
478
479static struct attribute_group arc_pmu_format_attr_gr = {
480 .name = "format",
481 .attrs = arc_pmu_format_attrs,
482};
483
484static ssize_t arc_pmu_events_sysfs_show(struct device *dev,
485 struct device_attribute *attr,
486 char *page)
487{
488 struct perf_pmu_events_attr *pmu_attr;
489
490 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
491 return sprintf(page, "event=0x%04llx\n", pmu_attr->id);
492}
493
494/*
495 * We don't add attrs here as we don't have pre-defined list of perf events.
496 * We will generate and add attrs dynamically in probe() after we read HW
497 * configuration.
498 */
499static struct attribute_group arc_pmu_events_attr_gr = {
500 .name = "events",
501};
502
503static void arc_pmu_add_raw_event_attr(int j, char *str)
504{
505 memmove(arc_pmu->raw_entry[j].name, str, ARCPMU_EVENT_NAME_LEN - 1);
506 arc_pmu->attr[j].attr.attr.name = arc_pmu->raw_entry[j].name;
507 arc_pmu->attr[j].attr.attr.mode = VERIFY_OCTAL_PERMISSIONS(0444);
508 arc_pmu->attr[j].attr.show = arc_pmu_events_sysfs_show;
509 arc_pmu->attr[j].id = j;
510 arc_pmu->attrs[j] = &(arc_pmu->attr[j].attr.attr);
511}
512
513static int arc_pmu_raw_alloc(struct device *dev)
514{
515 arc_pmu->attr = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
516 sizeof(*arc_pmu->attr), GFP_KERNEL | __GFP_ZERO);
517 if (!arc_pmu->attr)
518 return -ENOMEM;
519
520 arc_pmu->attrs = devm_kmalloc_array(dev, arc_pmu->n_events + 1,
521 sizeof(*arc_pmu->attrs), GFP_KERNEL | __GFP_ZERO);
522 if (!arc_pmu->attrs)
523 return -ENOMEM;
524
525 arc_pmu->raw_entry = devm_kmalloc_array(dev, arc_pmu->n_events,
526 sizeof(*arc_pmu->raw_entry), GFP_KERNEL | __GFP_ZERO);
527 if (!arc_pmu->raw_entry)
528 return -ENOMEM;
529
530 return 0;
531}
532
533static inline bool event_in_hw_event_map(int i, char *name)
534{
535 if (!arc_pmu_ev_hw_map[i])
536 return false;
537
538 if (!strlen(arc_pmu_ev_hw_map[i]))
539 return false;
540
541 if (strcmp(arc_pmu_ev_hw_map[i], name))
542 return false;
543
544 return true;
545}
546
547static void arc_pmu_map_hw_event(int j, char *str)
548{
549 int i;
550
551 /* See if HW condition has been mapped to a perf event_id */
552 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) {
553 if (event_in_hw_event_map(i, str)) {
554 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
555 i, str, j);
556 arc_pmu->ev_hw_idx[i] = j;
557 }
558 }
559}
560
444static int arc_pmu_device_probe(struct platform_device *pdev) 561static int arc_pmu_device_probe(struct platform_device *pdev)
445{ 562{
446 struct arc_reg_pct_build pct_bcr; 563 struct arc_reg_pct_build pct_bcr;
447 struct arc_reg_cc_build cc_bcr; 564 struct arc_reg_cc_build cc_bcr;
448 int i, j, has_interrupts; 565 int i, has_interrupts;
449 int counter_size; /* in bits */ 566 int counter_size; /* in bits */
450 567
451 union cc_name { 568 union cc_name {
452 struct { 569 struct {
453 uint32_t word0, word1; 570 u32 word0, word1;
454 char sentinel; 571 char sentinel;
455 } indiv; 572 } indiv;
456 char str[9]; 573 char str[ARCPMU_EVENT_NAME_LEN];
457 } cc_name; 574 } cc_name;
458 575
459 576
@@ -463,15 +580,22 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
463 return -ENODEV; 580 return -ENODEV;
464 } 581 }
465 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32); 582 BUILD_BUG_ON(ARC_PERF_MAX_COUNTERS > 32);
466 BUG_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS); 583 if (WARN_ON(pct_bcr.c > ARC_PERF_MAX_COUNTERS))
584 return -EINVAL;
467 585
468 READ_BCR(ARC_REG_CC_BUILD, cc_bcr); 586 READ_BCR(ARC_REG_CC_BUILD, cc_bcr);
469 BUG_ON(!cc_bcr.v); /* Counters exist but No countable conditions ? */ 587 if (WARN(!cc_bcr.v, "Counters exist but No countable conditions?"))
588 return -EINVAL;
470 589
471 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL); 590 arc_pmu = devm_kzalloc(&pdev->dev, sizeof(struct arc_pmu), GFP_KERNEL);
472 if (!arc_pmu) 591 if (!arc_pmu)
473 return -ENOMEM; 592 return -ENOMEM;
474 593
594 arc_pmu->n_events = cc_bcr.c;
595
596 if (arc_pmu_raw_alloc(&pdev->dev))
597 return -ENOMEM;
598
475 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0; 599 has_interrupts = is_isa_arcv2() ? pct_bcr.i : 0;
476 600
477 arc_pmu->n_counters = pct_bcr.c; 601 arc_pmu->n_counters = pct_bcr.c;
@@ -481,30 +605,26 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
481 605
482 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n", 606 pr_info("ARC perf\t: %d counters (%d bits), %d conditions%s\n",
483 arc_pmu->n_counters, counter_size, cc_bcr.c, 607 arc_pmu->n_counters, counter_size, cc_bcr.c,
484 has_interrupts ? ", [overflow IRQ support]":""); 608 has_interrupts ? ", [overflow IRQ support]" : "");
485 609
486 cc_name.str[8] = 0; 610 cc_name.str[ARCPMU_EVENT_NAME_LEN - 1] = 0;
487 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++) 611 for (i = 0; i < PERF_COUNT_ARC_HW_MAX; i++)
488 arc_pmu->ev_hw_idx[i] = -1; 612 arc_pmu->ev_hw_idx[i] = -1;
489 613
490 /* loop thru all available h/w condition indexes */ 614 /* loop thru all available h/w condition indexes */
491 for (j = 0; j < cc_bcr.c; j++) { 615 for (i = 0; i < cc_bcr.c; i++) {
492 write_aux_reg(ARC_REG_CC_INDEX, j); 616 write_aux_reg(ARC_REG_CC_INDEX, i);
493 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0); 617 cc_name.indiv.word0 = read_aux_reg(ARC_REG_CC_NAME0);
494 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1); 618 cc_name.indiv.word1 = read_aux_reg(ARC_REG_CC_NAME1);
495 619
496 /* See if it has been mapped to a perf event_id */ 620 arc_pmu_map_hw_event(i, cc_name.str);
497 for (i = 0; i < ARRAY_SIZE(arc_pmu_ev_hw_map); i++) { 621 arc_pmu_add_raw_event_attr(i, cc_name.str);
498 if (arc_pmu_ev_hw_map[i] &&
499 !strcmp(arc_pmu_ev_hw_map[i], cc_name.str) &&
500 strlen(arc_pmu_ev_hw_map[i])) {
501 pr_debug("mapping perf event %2d to h/w event \'%8s\' (idx %d)\n",
502 i, cc_name.str, j);
503 arc_pmu->ev_hw_idx[i] = j;
504 }
505 }
506 } 622 }
507 623
624 arc_pmu_events_attr_gr.attrs = arc_pmu->attrs;
625 arc_pmu->attr_groups[ARCPMU_ATTR_GR_EVENTS] = &arc_pmu_events_attr_gr;
626 arc_pmu->attr_groups[ARCPMU_ATTR_GR_FORMATS] = &arc_pmu_format_attr_gr;
627
508 arc_pmu->pmu = (struct pmu) { 628 arc_pmu->pmu = (struct pmu) {
509 .pmu_enable = arc_pmu_enable, 629 .pmu_enable = arc_pmu_enable,
510 .pmu_disable = arc_pmu_disable, 630 .pmu_disable = arc_pmu_disable,
@@ -514,6 +634,7 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
514 .start = arc_pmu_start, 634 .start = arc_pmu_start,
515 .stop = arc_pmu_stop, 635 .stop = arc_pmu_stop,
516 .read = arc_pmu_read, 636 .read = arc_pmu_read,
637 .attr_groups = arc_pmu->attr_groups,
517 }; 638 };
518 639
519 if (has_interrupts) { 640 if (has_interrupts) {
@@ -535,17 +656,19 @@ static int arc_pmu_device_probe(struct platform_device *pdev)
535 } else 656 } else
536 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT; 657 arc_pmu->pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
537 658
538 return perf_pmu_register(&arc_pmu->pmu, pdev->name, PERF_TYPE_RAW); 659 /*
660 * perf parser doesn't really like '-' symbol in events name, so let's
661 * use '_' in arc pct name as it goes to kernel PMU event prefix.
662 */
663 return perf_pmu_register(&arc_pmu->pmu, "arc_pct", PERF_TYPE_RAW);
539} 664}
540 665
541#ifdef CONFIG_OF
542static const struct of_device_id arc_pmu_match[] = { 666static const struct of_device_id arc_pmu_match[] = {
543 { .compatible = "snps,arc700-pct" }, 667 { .compatible = "snps,arc700-pct" },
544 { .compatible = "snps,archs-pct" }, 668 { .compatible = "snps,archs-pct" },
545 {}, 669 {},
546}; 670};
547MODULE_DEVICE_TABLE(of, arc_pmu_match); 671MODULE_DEVICE_TABLE(of, arc_pmu_match);
548#endif
549 672
550static struct platform_driver arc_pmu_driver = { 673static struct platform_driver arc_pmu_driver = {
551 .driver = { 674 .driver = {