aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorPawel Moll <pawel.moll@arm.com>2014-07-22 13:32:59 -0400
committerArnd Bergmann <arnd@arndb.de>2014-07-23 16:14:43 -0400
commita33b0daab73a0e08cc04459dd44b0121a8e8f81b (patch)
treee7c432add19822871a057313264825c85e9cdf80
parent779ae55bd8ee63f2ba35a0ec15f033e512e706ee (diff)
bus: ARM CCN PMU driver
Driver providing perf backend for ARM Cache Coherent Network interconnect. Supports counting all hardware events and crosspoint watchpoints. Currently works with CCN-504 only, although there should be no changes required for CCN-508 (just impossible to test it now). Signed-off-by: Pawel Moll <pawel.moll@arm.com> Signed-off-by: Arnd Bergmann <arnd@arndb.de>
-rw-r--r--Documentation/arm/CCN.txt52
-rw-r--r--Documentation/devicetree/bindings/arm/ccn.txt21
-rw-r--r--drivers/bus/Kconfig7
-rw-r--r--drivers/bus/Makefile4
-rw-r--r--drivers/bus/arm-ccn.c1390
5 files changed, 1473 insertions, 1 deletions
diff --git a/Documentation/arm/CCN.txt b/Documentation/arm/CCN.txt
new file mode 100644
index 000000000000..0632b3aad83e
--- /dev/null
+++ b/Documentation/arm/CCN.txt
@@ -0,0 +1,52 @@
1ARM Cache Coherent Network
2==========================
3
4CCN-504 is a ring-bus interconnect consisting of 11 crosspoints
5(XPs), with each crosspoint supporting up to two device ports,
6so nodes (devices) 0 and 1 are connected to crosspoint 0,
7nodes 2 and 3 to crosspoint 1 etc.
8
9PMU (perf) driver
10-----------------
11
12The CCN driver registers a perf PMU driver, which provides
13description of available events and configuration options
14in sysfs, see /sys/bus/event_source/devices/ccn*.
15
16The "format" directory describes format of the config, config1
17and config2 fields of the perf_event_attr structure. The "events"
18directory provides configuration templates for all documented
19events, that can be used with perf tool. For example "xp_valid_flit"
20is an equivalent of "type=0x8,event=0x4". Other parameters must be
21explicitly specified. For events originating from device, "node"
22defines its index. All crosspoint events require "xp" (index),
23"port" (device port number) and "vc" (virtual channel ID) and
24"dir" (direction). Watchpoints (special "event" value 0xfe) also
25require comparator values ("cmp_l" and "cmp_h") and "mask", being
26index of the comparator mask.
27
28Masks are defined separately from the event description
29(due to limited number of the config values) in the "cmp_mask"
30directory, with first 8 configurable by user and additional
314 hardcoded for the most frequent use cases.
32
33Cycle counter is described by a "type" value 0xff and does
34not require any other settings.
35
36Example of perf tool use:
37
38/ # perf list | grep ccn
39 ccn/cycles/ [Kernel PMU event]
40<...>
41 ccn/xp_valid_flit/ [Kernel PMU event]
42<...>
43
44/ # perf stat -C 0 -e ccn/cycles/,ccn/xp_valid_flit,xp=1,port=0,vc=1,dir=1/ \
45 sleep 1
46
47The driver does not support sampling, therefore "perf record" will
48not work. Also notice that only single cpu is being selected
49("-C 0") - this is because perf framework does not support
50"non-CPU related" counters (yet?) so system-wide session ("-a")
51would try (and in most cases fail) to set up the same event
52per each CPU.
diff --git a/Documentation/devicetree/bindings/arm/ccn.txt b/Documentation/devicetree/bindings/arm/ccn.txt
new file mode 100644
index 000000000000..b100d3847d88
--- /dev/null
+++ b/Documentation/devicetree/bindings/arm/ccn.txt
@@ -0,0 +1,21 @@
1* ARM CCN (Cache Coherent Network)
2
3Required properties:
4
5- compatible: (standard compatible string) should be one of:
6 "arm,ccn-504"
7 "arm,ccn-508"
8
9- reg: (standard registers property) physical address and size
10 (16MB) of the configuration registers block
11
12- interrupts: (standard interrupt property) single interrupt
13 generated by the control block
14
15Example:
16
17 ccn@0x2000000000 {
18 compatible = "arm,ccn-504";
19 reg = <0x20 0x00000000 0 0x1000000>;
20 interrupts = <0 181 4>;
21 };
diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig
index 1f37d9870e7a..5c0c2764839f 100644
--- a/drivers/bus/Kconfig
+++ b/drivers/bus/Kconfig
@@ -50,6 +50,13 @@ config ARM_CCI
50 Driver supporting the CCI cache coherent interconnect for ARM 50 Driver supporting the CCI cache coherent interconnect for ARM
51 platforms. 51 platforms.
52 52
53config ARM_CCN
54 bool "ARM CCN driver support"
55 depends on ARM || ARM64
56 help
57 PMU (perf) driver supporting the ARM CCN (Cache Coherent Network)
58 interconnect.
59
53config VEXPRESS_CONFIG 60config VEXPRESS_CONFIG
54 bool "Versatile Express configuration bus" 61 bool "Versatile Express configuration bus"
55 default y if ARCH_VEXPRESS 62 default y if ARCH_VEXPRESS
diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile
index 6a4ea7e4af1a..2973c18cbcc2 100644
--- a/drivers/bus/Makefile
+++ b/drivers/bus/Makefile
@@ -9,7 +9,9 @@ obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o
9 9
10# Interconnect bus driver for OMAP SoCs. 10# Interconnect bus driver for OMAP SoCs.
11obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o 11obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o
12# CCI cache coherent interconnect for ARM platforms 12
13# Interconnect bus drivers for ARM platforms
13obj-$(CONFIG_ARM_CCI) += arm-cci.o 14obj-$(CONFIG_ARM_CCI) += arm-cci.o
15obj-$(CONFIG_ARM_CCN) += arm-ccn.o
14 16
15obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o 17obj-$(CONFIG_VEXPRESS_CONFIG) += vexpress-config.o
diff --git a/drivers/bus/arm-ccn.c b/drivers/bus/arm-ccn.c
new file mode 100644
index 000000000000..4f86bbb2fac5
--- /dev/null
+++ b/drivers/bus/arm-ccn.c
@@ -0,0 +1,1390 @@
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License version 2 as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright (C) 2014 ARM Limited
12 */
13
14#include <linux/ctype.h>
15#include <linux/hrtimer.h>
16#include <linux/idr.h>
17#include <linux/interrupt.h>
18#include <linux/io.h>
19#include <linux/module.h>
20#include <linux/perf_event.h>
21#include <linux/platform_device.h>
22#include <linux/slab.h>
23
24#define CCN_NUM_XP_PORTS 2
25#define CCN_NUM_VCS 4
26#define CCN_NUM_REGIONS 256
27#define CCN_REGION_SIZE 0x10000
28
29#define CCN_ALL_OLY_ID 0xff00
30#define CCN_ALL_OLY_ID__OLY_ID__SHIFT 0
31#define CCN_ALL_OLY_ID__OLY_ID__MASK 0x1f
32#define CCN_ALL_OLY_ID__NODE_ID__SHIFT 8
33#define CCN_ALL_OLY_ID__NODE_ID__MASK 0x3f
34
35#define CCN_MN_ERRINT_STATUS 0x0008
36#define CCN_MN_ERRINT_STATUS__INTREQ__DESSERT 0x11
37#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__ENABLE 0x02
38#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLED 0x20
39#define CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE 0x22
40#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_ENABLE 0x04
41#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLED 0x40
42#define CCN_MN_ERRINT_STATUS__CORRECTED_ERRORS_DISABLE 0x44
43#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE 0x08
44#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED 0x80
45#define CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE 0x88
46#define CCN_MN_OLY_COMP_LIST_63_0 0x01e0
47#define CCN_MN_ERR_SIG_VAL_63_0 0x0300
48#define CCN_MN_ERR_SIG_VAL_63_0__DT (1 << 1)
49
50#define CCN_DT_ACTIVE_DSM 0x0000
51#define CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(n) ((n) * 8)
52#define CCN_DT_ACTIVE_DSM__DSM_ID__MASK 0xff
53#define CCN_DT_CTL 0x0028
54#define CCN_DT_CTL__DT_EN (1 << 0)
55#define CCN_DT_PMEVCNT(n) (0x0100 + (n) * 0x8)
56#define CCN_DT_PMCCNTR 0x0140
57#define CCN_DT_PMCCNTRSR 0x0190
58#define CCN_DT_PMOVSR 0x0198
59#define CCN_DT_PMOVSR_CLR 0x01a0
60#define CCN_DT_PMCR 0x01a8
61#define CCN_DT_PMCR__OVFL_INTR_EN (1 << 6)
62#define CCN_DT_PMCR__PMU_EN (1 << 0)
63#define CCN_DT_PMSR 0x01b0
64#define CCN_DT_PMSR_REQ 0x01b8
65#define CCN_DT_PMSR_CLR 0x01c0
66
67#define CCN_HNF_PMU_EVENT_SEL 0x0600
68#define CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
69#define CCN_HNF_PMU_EVENT_SEL__ID__MASK 0xf
70
71#define CCN_XP_DT_CONFIG 0x0300
72#define CCN_XP_DT_CONFIG__DT_CFG__SHIFT(n) ((n) * 4)
73#define CCN_XP_DT_CONFIG__DT_CFG__MASK 0xf
74#define CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH 0x0
75#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT_0_OR_1 0x1
76#define CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(n) (0x2 + (n))
77#define CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(n) (0x4 + (n))
78#define CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(d, n) (0x8 + (d) * 4 + (n))
79#define CCN_XP_DT_INTERFACE_SEL 0x0308
80#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(n) (0 + (n) * 8)
81#define CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK 0x1
82#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(n) (1 + (n) * 8)
83#define CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK 0x1
84#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(n) (2 + (n) * 8)
85#define CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK 0x3
86#define CCN_XP_DT_CMP_VAL_L(n) (0x0310 + (n) * 0x40)
87#define CCN_XP_DT_CMP_VAL_H(n) (0x0318 + (n) * 0x40)
88#define CCN_XP_DT_CMP_MASK_L(n) (0x0320 + (n) * 0x40)
89#define CCN_XP_DT_CMP_MASK_H(n) (0x0328 + (n) * 0x40)
90#define CCN_XP_DT_CONTROL 0x0370
91#define CCN_XP_DT_CONTROL__DT_ENABLE (1 << 0)
92#define CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(n) (12 + (n) * 4)
93#define CCN_XP_DT_CONTROL__WP_ARM_SEL__MASK 0xf
94#define CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS 0xf
95#define CCN_XP_PMU_EVENT_SEL 0x0600
96#define CCN_XP_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 7)
97#define CCN_XP_PMU_EVENT_SEL__ID__MASK 0x3f
98
99#define CCN_SBAS_PMU_EVENT_SEL 0x0600
100#define CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
101#define CCN_SBAS_PMU_EVENT_SEL__ID__MASK 0xf
102
103#define CCN_RNI_PMU_EVENT_SEL 0x0600
104#define CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(n) ((n) * 4)
105#define CCN_RNI_PMU_EVENT_SEL__ID__MASK 0xf
106
107#define CCN_TYPE_MN 0x01
108#define CCN_TYPE_DT 0x02
109#define CCN_TYPE_HNF 0x04
110#define CCN_TYPE_HNI 0x05
111#define CCN_TYPE_XP 0x08
112#define CCN_TYPE_SBSX 0x0c
113#define CCN_TYPE_SBAS 0x10
114#define CCN_TYPE_RNI_1P 0x14
115#define CCN_TYPE_RNI_2P 0x15
116#define CCN_TYPE_RNI_3P 0x16
117#define CCN_TYPE_RND_1P 0x18 /* RN-D = RN-I + DVM */
118#define CCN_TYPE_RND_2P 0x19
119#define CCN_TYPE_RND_3P 0x1a
120#define CCN_TYPE_CYCLES 0xff /* Pseudotype */
121
122#define CCN_EVENT_WATCHPOINT 0xfe /* Pseudoevent */
123
124#define CCN_NUM_PMU_EVENTS 4
125#define CCN_NUM_XP_WATCHPOINTS 2 /* See DT.dbg_id.num_watchpoints */
126#define CCN_NUM_PMU_EVENT_COUNTERS 8 /* See DT.dbg_id.num_pmucntr */
127#define CCN_IDX_PMU_CYCLE_COUNTER CCN_NUM_PMU_EVENT_COUNTERS
128
129#define CCN_NUM_PREDEFINED_MASKS 4
130#define CCN_IDX_MASK_ANY (CCN_NUM_PMU_EVENT_COUNTERS + 0)
131#define CCN_IDX_MASK_EXACT (CCN_NUM_PMU_EVENT_COUNTERS + 1)
132#define CCN_IDX_MASK_ORDER (CCN_NUM_PMU_EVENT_COUNTERS + 2)
133#define CCN_IDX_MASK_OPCODE (CCN_NUM_PMU_EVENT_COUNTERS + 3)
134
135struct arm_ccn_component {
136 void __iomem *base;
137 u32 type;
138
139 DECLARE_BITMAP(pmu_events_mask, CCN_NUM_PMU_EVENTS);
140 union {
141 struct {
142 DECLARE_BITMAP(dt_cmp_mask, CCN_NUM_XP_WATCHPOINTS);
143 } xp;
144 };
145};
146
147#define pmu_to_arm_ccn(_pmu) container_of(container_of(_pmu, \
148 struct arm_ccn_dt, pmu), struct arm_ccn, dt)
149
150struct arm_ccn_dt {
151 int id;
152 void __iomem *base;
153
154 spinlock_t config_lock;
155
156 DECLARE_BITMAP(pmu_counters_mask, CCN_NUM_PMU_EVENT_COUNTERS + 1);
157 struct {
158 struct arm_ccn_component *source;
159 struct perf_event *event;
160 } pmu_counters[CCN_NUM_PMU_EVENT_COUNTERS + 1];
161
162 struct {
163 u64 l, h;
164 } cmp_mask[CCN_NUM_PMU_EVENT_COUNTERS + CCN_NUM_PREDEFINED_MASKS];
165
166 struct hrtimer hrtimer;
167
168 struct pmu pmu;
169};
170
171struct arm_ccn {
172 struct device *dev;
173 void __iomem *base;
174 unsigned irq_used:1;
175 unsigned sbas_present:1;
176 unsigned sbsx_present:1;
177
178 int num_nodes;
179 struct arm_ccn_component *node;
180
181 int num_xps;
182 struct arm_ccn_component *xp;
183
184 struct arm_ccn_dt dt;
185};
186
187
188static int arm_ccn_node_to_xp(int node)
189{
190 return node / CCN_NUM_XP_PORTS;
191}
192
193static int arm_ccn_node_to_xp_port(int node)
194{
195 return node % CCN_NUM_XP_PORTS;
196}
197
198
199/*
200 * Bit shifts and masks in these defines must be kept in sync with
201 * arm_ccn_pmu_config_set() and CCN_FORMAT_ATTRs below!
202 */
203#define CCN_CONFIG_NODE(_config) (((_config) >> 0) & 0xff)
204#define CCN_CONFIG_XP(_config) (((_config) >> 0) & 0xff)
205#define CCN_CONFIG_TYPE(_config) (((_config) >> 8) & 0xff)
206#define CCN_CONFIG_EVENT(_config) (((_config) >> 16) & 0xff)
207#define CCN_CONFIG_PORT(_config) (((_config) >> 24) & 0x3)
208#define CCN_CONFIG_VC(_config) (((_config) >> 26) & 0x7)
209#define CCN_CONFIG_DIR(_config) (((_config) >> 29) & 0x1)
210#define CCN_CONFIG_MASK(_config) (((_config) >> 30) & 0xf)
211
212static void arm_ccn_pmu_config_set(u64 *config, u32 node_xp, u32 type, u32 port)
213{
214 *config &= ~((0xff << 0) | (0xff << 8) | (0xff << 24));
215 *config |= (node_xp << 0) | (type << 8) | (port << 24);
216}
217
218static ssize_t arm_ccn_pmu_format_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 struct dev_ext_attribute *ea = container_of(attr,
222 struct dev_ext_attribute, attr);
223
224 return snprintf(buf, PAGE_SIZE, "%s\n", (char *)ea->var);
225}
226
227#define CCN_FORMAT_ATTR(_name, _config) \
228 struct dev_ext_attribute arm_ccn_pmu_format_attr_##_name = \
229 { __ATTR(_name, S_IRUGO, arm_ccn_pmu_format_show, \
230 NULL), _config }
231
232static CCN_FORMAT_ATTR(node, "config:0-7");
233static CCN_FORMAT_ATTR(xp, "config:0-7");
234static CCN_FORMAT_ATTR(type, "config:8-15");
235static CCN_FORMAT_ATTR(event, "config:16-23");
236static CCN_FORMAT_ATTR(port, "config:24-25");
237static CCN_FORMAT_ATTR(vc, "config:26-28");
238static CCN_FORMAT_ATTR(dir, "config:29-29");
239static CCN_FORMAT_ATTR(mask, "config:30-33");
240static CCN_FORMAT_ATTR(cmp_l, "config1:0-62");
241static CCN_FORMAT_ATTR(cmp_h, "config2:0-59");
242
243static struct attribute *arm_ccn_pmu_format_attrs[] = {
244 &arm_ccn_pmu_format_attr_node.attr.attr,
245 &arm_ccn_pmu_format_attr_xp.attr.attr,
246 &arm_ccn_pmu_format_attr_type.attr.attr,
247 &arm_ccn_pmu_format_attr_event.attr.attr,
248 &arm_ccn_pmu_format_attr_port.attr.attr,
249 &arm_ccn_pmu_format_attr_vc.attr.attr,
250 &arm_ccn_pmu_format_attr_dir.attr.attr,
251 &arm_ccn_pmu_format_attr_mask.attr.attr,
252 &arm_ccn_pmu_format_attr_cmp_l.attr.attr,
253 &arm_ccn_pmu_format_attr_cmp_h.attr.attr,
254 NULL
255};
256
257static struct attribute_group arm_ccn_pmu_format_attr_group = {
258 .name = "format",
259 .attrs = arm_ccn_pmu_format_attrs,
260};
261
262
263struct arm_ccn_pmu_event {
264 struct device_attribute attr;
265 u32 type;
266 u32 event;
267 int num_ports;
268 int num_vcs;
269 const char *def;
270 int mask;
271};
272
273#define CCN_EVENT_ATTR(_name) \
274 __ATTR(_name, S_IRUGO, arm_ccn_pmu_event_show, NULL)
275
276/*
277 * Events defined in TRM for MN, HN-I and SBSX are actually watchpoints set on
278 * their ports in XP they are connected to. For the sake of usability they are
279 * explicitly defined here (and translated into a relevant watchpoint in
280 * arm_ccn_pmu_event_init()) so the user can easily request them without deep
281 * knowledge of the flit format.
282 */
283
284#define CCN_EVENT_MN(_name, _def, _mask) { .attr = CCN_EVENT_ATTR(mn_##_name), \
285 .type = CCN_TYPE_MN, .event = CCN_EVENT_WATCHPOINT, \
286 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, \
287 .def = _def, .mask = _mask, }
288
289#define CCN_EVENT_HNI(_name, _def, _mask) { \
290 .attr = CCN_EVENT_ATTR(hni_##_name), .type = CCN_TYPE_HNI, \
291 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
292 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
293
294#define CCN_EVENT_SBSX(_name, _def, _mask) { \
295 .attr = CCN_EVENT_ATTR(sbsx_##_name), .type = CCN_TYPE_SBSX, \
296 .event = CCN_EVENT_WATCHPOINT, .num_ports = CCN_NUM_XP_PORTS, \
297 .num_vcs = CCN_NUM_VCS, .def = _def, .mask = _mask, }
298
299#define CCN_EVENT_HNF(_name, _event) { .attr = CCN_EVENT_ATTR(hnf_##_name), \
300 .type = CCN_TYPE_HNF, .event = _event, }
301
302#define CCN_EVENT_XP(_name, _event) { .attr = CCN_EVENT_ATTR(xp_##_name), \
303 .type = CCN_TYPE_XP, .event = _event, \
304 .num_ports = CCN_NUM_XP_PORTS, .num_vcs = CCN_NUM_VCS, }
305
306/*
307 * RN-I & RN-D (RN-D = RN-I + DVM) nodes have different type ID depending
308 * on configuration. One of them is picked to represent the whole group,
309 * as they all share the same event types.
310 */
311#define CCN_EVENT_RNI(_name, _event) { .attr = CCN_EVENT_ATTR(rni_##_name), \
312 .type = CCN_TYPE_RNI_3P, .event = _event, }
313
314#define CCN_EVENT_SBAS(_name, _event) { .attr = CCN_EVENT_ATTR(sbas_##_name), \
315 .type = CCN_TYPE_SBAS, .event = _event, }
316
317#define CCN_EVENT_CYCLES(_name) { .attr = CCN_EVENT_ATTR(_name), \
318 .type = CCN_TYPE_CYCLES }
319
320
321static ssize_t arm_ccn_pmu_event_show(struct device *dev,
322 struct device_attribute *attr, char *buf)
323{
324 struct arm_ccn_pmu_event *event = container_of(attr,
325 struct arm_ccn_pmu_event, attr);
326 ssize_t res;
327
328 res = snprintf(buf, PAGE_SIZE, "type=0x%x", event->type);
329 if (event->event)
330 res += snprintf(buf + res, PAGE_SIZE - res, ",event=0x%x",
331 event->event);
332 if (event->def)
333 res += snprintf(buf + res, PAGE_SIZE - res, ",%s",
334 event->def);
335 if (event->mask)
336 res += snprintf(buf + res, PAGE_SIZE - res, ",mask=0x%x",
337 event->mask);
338 res += snprintf(buf + res, PAGE_SIZE - res, "\n");
339
340 return res;
341}
342
343static umode_t arm_ccn_pmu_events_is_visible(struct kobject *kobj,
344 struct attribute *attr, int index)
345{
346 struct device *dev = kobj_to_dev(kobj);
347 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
348 struct device_attribute *dev_attr = container_of(attr,
349 struct device_attribute, attr);
350 struct arm_ccn_pmu_event *event = container_of(dev_attr,
351 struct arm_ccn_pmu_event, attr);
352
353 if (event->type == CCN_TYPE_SBAS && !ccn->sbas_present)
354 return 0;
355 if (event->type == CCN_TYPE_SBSX && !ccn->sbsx_present)
356 return 0;
357
358 return attr->mode;
359}
360
361static struct arm_ccn_pmu_event arm_ccn_pmu_events[] = {
362 CCN_EVENT_MN(eobarrier, "dir=0,vc=0,cmp_h=0x1c00", CCN_IDX_MASK_OPCODE),
363 CCN_EVENT_MN(ecbarrier, "dir=0,vc=0,cmp_h=0x1e00", CCN_IDX_MASK_OPCODE),
364 CCN_EVENT_MN(dvmop, "dir=0,vc=0,cmp_h=0x2800", CCN_IDX_MASK_OPCODE),
365 CCN_EVENT_HNI(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
366 CCN_EVENT_HNI(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
367 CCN_EVENT_HNI(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
368 CCN_EVENT_HNI(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
369 CCN_EVENT_HNI(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
370 CCN_IDX_MASK_ORDER),
371 CCN_EVENT_SBSX(txdatflits, "dir=1,vc=3", CCN_IDX_MASK_ANY),
372 CCN_EVENT_SBSX(rxdatflits, "dir=0,vc=3", CCN_IDX_MASK_ANY),
373 CCN_EVENT_SBSX(txreqflits, "dir=1,vc=0", CCN_IDX_MASK_ANY),
374 CCN_EVENT_SBSX(rxreqflits, "dir=0,vc=0", CCN_IDX_MASK_ANY),
375 CCN_EVENT_SBSX(rxreqflits_order, "dir=0,vc=0,cmp_h=0x8000",
376 CCN_IDX_MASK_ORDER),
377 CCN_EVENT_HNF(cache_miss, 0x1),
378 CCN_EVENT_HNF(l3_sf_cache_access, 0x02),
379 CCN_EVENT_HNF(cache_fill, 0x3),
380 CCN_EVENT_HNF(pocq_retry, 0x4),
381 CCN_EVENT_HNF(pocq_reqs_recvd, 0x5),
382 CCN_EVENT_HNF(sf_hit, 0x6),
383 CCN_EVENT_HNF(sf_evictions, 0x7),
384 CCN_EVENT_HNF(snoops_sent, 0x8),
385 CCN_EVENT_HNF(snoops_broadcast, 0x9),
386 CCN_EVENT_HNF(l3_eviction, 0xa),
387 CCN_EVENT_HNF(l3_fill_invalid_way, 0xb),
388 CCN_EVENT_HNF(mc_retries, 0xc),
389 CCN_EVENT_HNF(mc_reqs, 0xd),
390 CCN_EVENT_HNF(qos_hh_retry, 0xe),
391 CCN_EVENT_RNI(rdata_beats_p0, 0x1),
392 CCN_EVENT_RNI(rdata_beats_p1, 0x2),
393 CCN_EVENT_RNI(rdata_beats_p2, 0x3),
394 CCN_EVENT_RNI(rxdat_flits, 0x4),
395 CCN_EVENT_RNI(txdat_flits, 0x5),
396 CCN_EVENT_RNI(txreq_flits, 0x6),
397 CCN_EVENT_RNI(txreq_flits_retried, 0x7),
398 CCN_EVENT_RNI(rrt_full, 0x8),
399 CCN_EVENT_RNI(wrt_full, 0x9),
400 CCN_EVENT_RNI(txreq_flits_replayed, 0xa),
401 CCN_EVENT_XP(upload_starvation, 0x1),
402 CCN_EVENT_XP(download_starvation, 0x2),
403 CCN_EVENT_XP(respin, 0x3),
404 CCN_EVENT_XP(valid_flit, 0x4),
405 CCN_EVENT_XP(watchpoint, CCN_EVENT_WATCHPOINT),
406 CCN_EVENT_SBAS(rdata_beats_p0, 0x1),
407 CCN_EVENT_SBAS(rxdat_flits, 0x4),
408 CCN_EVENT_SBAS(txdat_flits, 0x5),
409 CCN_EVENT_SBAS(txreq_flits, 0x6),
410 CCN_EVENT_SBAS(txreq_flits_retried, 0x7),
411 CCN_EVENT_SBAS(rrt_full, 0x8),
412 CCN_EVENT_SBAS(wrt_full, 0x9),
413 CCN_EVENT_SBAS(txreq_flits_replayed, 0xa),
414 CCN_EVENT_CYCLES(cycles),
415};
416
417/* Populated in arm_ccn_init() */
418static struct attribute
419 *arm_ccn_pmu_events_attrs[ARRAY_SIZE(arm_ccn_pmu_events) + 1];
420
421static struct attribute_group arm_ccn_pmu_events_attr_group = {
422 .name = "events",
423 .is_visible = arm_ccn_pmu_events_is_visible,
424 .attrs = arm_ccn_pmu_events_attrs,
425};
426
427
428static u64 *arm_ccn_pmu_get_cmp_mask(struct arm_ccn *ccn, const char *name)
429{
430 unsigned long i;
431
432 if (WARN_ON(!name || !name[0] || !isxdigit(name[0]) || !name[1]))
433 return NULL;
434 i = isdigit(name[0]) ? name[0] - '0' : 0xa + tolower(name[0]) - 'a';
435
436 switch (name[1]) {
437 case 'l':
438 return &ccn->dt.cmp_mask[i].l;
439 case 'h':
440 return &ccn->dt.cmp_mask[i].h;
441 default:
442 return NULL;
443 }
444}
445
446static ssize_t arm_ccn_pmu_cmp_mask_show(struct device *dev,
447 struct device_attribute *attr, char *buf)
448{
449 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
450 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
451
452 return mask ? snprintf(buf, PAGE_SIZE, "0x%016llx\n", *mask) : -EINVAL;
453}
454
455static ssize_t arm_ccn_pmu_cmp_mask_store(struct device *dev,
456 struct device_attribute *attr, const char *buf, size_t count)
457{
458 struct arm_ccn *ccn = pmu_to_arm_ccn(dev_get_drvdata(dev));
459 u64 *mask = arm_ccn_pmu_get_cmp_mask(ccn, attr->attr.name);
460 int err = -EINVAL;
461
462 if (mask)
463 err = kstrtoull(buf, 0, mask);
464
465 return err ? err : count;
466}
467
468#define CCN_CMP_MASK_ATTR(_name) \
469 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
470 __ATTR(_name, S_IRUGO | S_IWUSR, \
471 arm_ccn_pmu_cmp_mask_show, arm_ccn_pmu_cmp_mask_store)
472
473#define CCN_CMP_MASK_ATTR_RO(_name) \
474 struct device_attribute arm_ccn_pmu_cmp_mask_attr_##_name = \
475 __ATTR(_name, S_IRUGO, arm_ccn_pmu_cmp_mask_show, NULL)
476
477static CCN_CMP_MASK_ATTR(0l);
478static CCN_CMP_MASK_ATTR(0h);
479static CCN_CMP_MASK_ATTR(1l);
480static CCN_CMP_MASK_ATTR(1h);
481static CCN_CMP_MASK_ATTR(2l);
482static CCN_CMP_MASK_ATTR(2h);
483static CCN_CMP_MASK_ATTR(3l);
484static CCN_CMP_MASK_ATTR(3h);
485static CCN_CMP_MASK_ATTR(4l);
486static CCN_CMP_MASK_ATTR(4h);
487static CCN_CMP_MASK_ATTR(5l);
488static CCN_CMP_MASK_ATTR(5h);
489static CCN_CMP_MASK_ATTR(6l);
490static CCN_CMP_MASK_ATTR(6h);
491static CCN_CMP_MASK_ATTR(7l);
492static CCN_CMP_MASK_ATTR(7h);
493static CCN_CMP_MASK_ATTR_RO(8l);
494static CCN_CMP_MASK_ATTR_RO(8h);
495static CCN_CMP_MASK_ATTR_RO(9l);
496static CCN_CMP_MASK_ATTR_RO(9h);
497static CCN_CMP_MASK_ATTR_RO(al);
498static CCN_CMP_MASK_ATTR_RO(ah);
499static CCN_CMP_MASK_ATTR_RO(bl);
500static CCN_CMP_MASK_ATTR_RO(bh);
501
502static struct attribute *arm_ccn_pmu_cmp_mask_attrs[] = {
503 &arm_ccn_pmu_cmp_mask_attr_0l.attr, &arm_ccn_pmu_cmp_mask_attr_0h.attr,
504 &arm_ccn_pmu_cmp_mask_attr_1l.attr, &arm_ccn_pmu_cmp_mask_attr_1h.attr,
505 &arm_ccn_pmu_cmp_mask_attr_2l.attr, &arm_ccn_pmu_cmp_mask_attr_2h.attr,
506 &arm_ccn_pmu_cmp_mask_attr_3l.attr, &arm_ccn_pmu_cmp_mask_attr_3h.attr,
507 &arm_ccn_pmu_cmp_mask_attr_4l.attr, &arm_ccn_pmu_cmp_mask_attr_4h.attr,
508 &arm_ccn_pmu_cmp_mask_attr_5l.attr, &arm_ccn_pmu_cmp_mask_attr_5h.attr,
509 &arm_ccn_pmu_cmp_mask_attr_6l.attr, &arm_ccn_pmu_cmp_mask_attr_6h.attr,
510 &arm_ccn_pmu_cmp_mask_attr_7l.attr, &arm_ccn_pmu_cmp_mask_attr_7h.attr,
511 &arm_ccn_pmu_cmp_mask_attr_8l.attr, &arm_ccn_pmu_cmp_mask_attr_8h.attr,
512 &arm_ccn_pmu_cmp_mask_attr_9l.attr, &arm_ccn_pmu_cmp_mask_attr_9h.attr,
513 &arm_ccn_pmu_cmp_mask_attr_al.attr, &arm_ccn_pmu_cmp_mask_attr_ah.attr,
514 &arm_ccn_pmu_cmp_mask_attr_bl.attr, &arm_ccn_pmu_cmp_mask_attr_bh.attr,
515 NULL
516};
517
518static struct attribute_group arm_ccn_pmu_cmp_mask_attr_group = {
519 .name = "cmp_mask",
520 .attrs = arm_ccn_pmu_cmp_mask_attrs,
521};
522
523
524/*
525 * Default poll period is 10ms, which is way over the top anyway,
526 * as in the worst case scenario (an event every cycle), with 1GHz
527 * clocked bus, the smallest, 32 bit counter will overflow in
528 * more than 4s.
529 */
530static unsigned int arm_ccn_pmu_poll_period_us = 10000;
531module_param_named(pmu_poll_period_us, arm_ccn_pmu_poll_period_us, uint,
532 S_IRUGO | S_IWUSR);
533
534static ktime_t arm_ccn_pmu_timer_period(void)
535{
536 return ns_to_ktime((u64)arm_ccn_pmu_poll_period_us * 1000);
537}
538
539
540static const struct attribute_group *arm_ccn_pmu_attr_groups[] = {
541 &arm_ccn_pmu_events_attr_group,
542 &arm_ccn_pmu_format_attr_group,
543 &arm_ccn_pmu_cmp_mask_attr_group,
544 NULL
545};
546
547
548static int arm_ccn_pmu_alloc_bit(unsigned long *bitmap, unsigned long size)
549{
550 int bit;
551
552 do {
553 bit = find_first_zero_bit(bitmap, size);
554 if (bit >= size)
555 return -EAGAIN;
556 } while (test_and_set_bit(bit, bitmap));
557
558 return bit;
559}
560
561/* All RN-I and RN-D nodes have identical PMUs */
562static int arm_ccn_pmu_type_eq(u32 a, u32 b)
563{
564 if (a == b)
565 return 1;
566
567 switch (a) {
568 case CCN_TYPE_RNI_1P:
569 case CCN_TYPE_RNI_2P:
570 case CCN_TYPE_RNI_3P:
571 case CCN_TYPE_RND_1P:
572 case CCN_TYPE_RND_2P:
573 case CCN_TYPE_RND_3P:
574 switch (b) {
575 case CCN_TYPE_RNI_1P:
576 case CCN_TYPE_RNI_2P:
577 case CCN_TYPE_RNI_3P:
578 case CCN_TYPE_RND_1P:
579 case CCN_TYPE_RND_2P:
580 case CCN_TYPE_RND_3P:
581 return 1;
582 }
583 break;
584 }
585
586 return 0;
587}
588
589static int arm_ccn_pmu_event_init(struct perf_event *event)
590{
591 struct arm_ccn *ccn;
592 struct hw_perf_event *hw = &event->hw;
593 u32 node_xp, type, event_id;
594 int valid;
595 struct arm_ccn_component *source;
596 int i;
597
598 if (event->attr.type != event->pmu->type)
599 return -ENOENT;
600
601 ccn = pmu_to_arm_ccn(event->pmu);
602
603 if (hw->sample_period) {
604 dev_warn(ccn->dev, "Sampling not supported!\n");
605 return -EOPNOTSUPP;
606 }
607
608 if (has_branch_stack(event) || event->attr.exclude_user ||
609 event->attr.exclude_kernel || event->attr.exclude_hv ||
610 event->attr.exclude_idle) {
611 dev_warn(ccn->dev, "Can't exclude execution levels!\n");
612 return -EOPNOTSUPP;
613 }
614
615 if (event->cpu < 0) {
616 dev_warn(ccn->dev, "Can't provide per-task data!\n");
617 return -EOPNOTSUPP;
618 }
619
620 node_xp = CCN_CONFIG_NODE(event->attr.config);
621 type = CCN_CONFIG_TYPE(event->attr.config);
622 event_id = CCN_CONFIG_EVENT(event->attr.config);
623
624 /* Validate node/xp vs topology */
625 switch (type) {
626 case CCN_TYPE_XP:
627 if (node_xp >= ccn->num_xps) {
628 dev_warn(ccn->dev, "Invalid XP ID %d!\n", node_xp);
629 return -EINVAL;
630 }
631 break;
632 case CCN_TYPE_CYCLES:
633 break;
634 default:
635 if (node_xp >= ccn->num_nodes) {
636 dev_warn(ccn->dev, "Invalid node ID %d!\n", node_xp);
637 return -EINVAL;
638 }
639 if (!arm_ccn_pmu_type_eq(type, ccn->node[node_xp].type)) {
640 dev_warn(ccn->dev, "Invalid type 0x%x for node %d!\n",
641 type, node_xp);
642 return -EINVAL;
643 }
644 break;
645 }
646
647 /* Validate event ID vs available for the type */
648 for (i = 0, valid = 0; i < ARRAY_SIZE(arm_ccn_pmu_events) && !valid;
649 i++) {
650 struct arm_ccn_pmu_event *e = &arm_ccn_pmu_events[i];
651 u32 port = CCN_CONFIG_PORT(event->attr.config);
652 u32 vc = CCN_CONFIG_VC(event->attr.config);
653
654 if (!arm_ccn_pmu_type_eq(type, e->type))
655 continue;
656 if (event_id != e->event)
657 continue;
658 if (e->num_ports && port >= e->num_ports) {
659 dev_warn(ccn->dev, "Invalid port %d for node/XP %d!\n",
660 port, node_xp);
661 return -EINVAL;
662 }
663 if (e->num_vcs && vc >= e->num_vcs) {
664 dev_warn(ccn->dev, "Invalid vc %d for node/XP %d!\n",
665 port, node_xp);
666 return -EINVAL;
667 }
668 valid = 1;
669 }
670 if (!valid) {
671 dev_warn(ccn->dev, "Invalid event 0x%x for node/XP %d!\n",
672 event_id, node_xp);
673 return -EINVAL;
674 }
675
676 /* Watchpoint-based event for a node is actually set on XP */
677 if (event_id == CCN_EVENT_WATCHPOINT && type != CCN_TYPE_XP) {
678 u32 port;
679
680 type = CCN_TYPE_XP;
681 port = arm_ccn_node_to_xp_port(node_xp);
682 node_xp = arm_ccn_node_to_xp(node_xp);
683
684 arm_ccn_pmu_config_set(&event->attr.config,
685 node_xp, type, port);
686 }
687
688 /* Allocate the cycle counter */
689 if (type == CCN_TYPE_CYCLES) {
690 if (test_and_set_bit(CCN_IDX_PMU_CYCLE_COUNTER,
691 ccn->dt.pmu_counters_mask))
692 return -EAGAIN;
693
694 hw->idx = CCN_IDX_PMU_CYCLE_COUNTER;
695 ccn->dt.pmu_counters[CCN_IDX_PMU_CYCLE_COUNTER].event = event;
696
697 return 0;
698 }
699
700 /* Allocate an event counter */
701 hw->idx = arm_ccn_pmu_alloc_bit(ccn->dt.pmu_counters_mask,
702 CCN_NUM_PMU_EVENT_COUNTERS);
703 if (hw->idx < 0) {
704 dev_warn(ccn->dev, "No more counters available!\n");
705 return -EAGAIN;
706 }
707
708 if (type == CCN_TYPE_XP)
709 source = &ccn->xp[node_xp];
710 else
711 source = &ccn->node[node_xp];
712 ccn->dt.pmu_counters[hw->idx].source = source;
713
714 /* Allocate an event source or a watchpoint */
715 if (type == CCN_TYPE_XP && event_id == CCN_EVENT_WATCHPOINT)
716 hw->config_base = arm_ccn_pmu_alloc_bit(source->xp.dt_cmp_mask,
717 CCN_NUM_XP_WATCHPOINTS);
718 else
719 hw->config_base = arm_ccn_pmu_alloc_bit(source->pmu_events_mask,
720 CCN_NUM_PMU_EVENTS);
721 if (hw->config_base < 0) {
722 dev_warn(ccn->dev, "No more event sources/watchpoints on node/XP %d!\n",
723 node_xp);
724 clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
725 return -EAGAIN;
726 }
727
728 ccn->dt.pmu_counters[hw->idx].event = event;
729
730 return 0;
731}
732
733static void arm_ccn_pmu_event_free(struct perf_event *event)
734{
735 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
736 struct hw_perf_event *hw = &event->hw;
737
738 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER) {
739 clear_bit(CCN_IDX_PMU_CYCLE_COUNTER, ccn->dt.pmu_counters_mask);
740 } else {
741 struct arm_ccn_component *source =
742 ccn->dt.pmu_counters[hw->idx].source;
743
744 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP &&
745 CCN_CONFIG_EVENT(event->attr.config) ==
746 CCN_EVENT_WATCHPOINT)
747 clear_bit(hw->config_base, source->xp.dt_cmp_mask);
748 else
749 clear_bit(hw->config_base, source->pmu_events_mask);
750 clear_bit(hw->idx, ccn->dt.pmu_counters_mask);
751 }
752
753 ccn->dt.pmu_counters[hw->idx].source = NULL;
754 ccn->dt.pmu_counters[hw->idx].event = NULL;
755}
756
757static u64 arm_ccn_pmu_read_counter(struct arm_ccn *ccn, int idx)
758{
759 u64 res;
760
761 if (idx == CCN_IDX_PMU_CYCLE_COUNTER) {
762#ifdef readq
763 res = readq(ccn->dt.base + CCN_DT_PMCCNTR);
764#else
765 /* 40 bit counter, can do snapshot and read in two parts */
766 writel(0x1, ccn->dt.base + CCN_DT_PMSR_REQ);
767 while (!(readl(ccn->dt.base + CCN_DT_PMSR) & 0x1))
768 ;
769 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
770 res = readl(ccn->dt.base + CCN_DT_PMCCNTRSR + 4) & 0xff;
771 res <<= 32;
772 res |= readl(ccn->dt.base + CCN_DT_PMCCNTRSR);
773#endif
774 } else {
775 res = readl(ccn->dt.base + CCN_DT_PMEVCNT(idx));
776 }
777
778 return res;
779}
780
781static void arm_ccn_pmu_event_update(struct perf_event *event)
782{
783 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
784 struct hw_perf_event *hw = &event->hw;
785 u64 prev_count, new_count, mask;
786
787 do {
788 prev_count = local64_read(&hw->prev_count);
789 new_count = arm_ccn_pmu_read_counter(ccn, hw->idx);
790 } while (local64_xchg(&hw->prev_count, new_count) != prev_count);
791
792 mask = (1LLU << (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER ? 40 : 32)) - 1;
793
794 local64_add((new_count - prev_count) & mask, &event->count);
795}
796
797static void arm_ccn_pmu_xp_dt_config(struct perf_event *event, int enable)
798{
799 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
800 struct hw_perf_event *hw = &event->hw;
801 struct arm_ccn_component *xp;
802 u32 val, dt_cfg;
803
804 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
805 xp = &ccn->xp[CCN_CONFIG_XP(event->attr.config)];
806 else
807 xp = &ccn->xp[arm_ccn_node_to_xp(
808 CCN_CONFIG_NODE(event->attr.config))];
809
810 if (enable)
811 dt_cfg = hw->event_base;
812 else
813 dt_cfg = CCN_XP_DT_CONFIG__DT_CFG__PASS_THROUGH;
814
815 spin_lock(&ccn->dt.config_lock);
816
817 val = readl(xp->base + CCN_XP_DT_CONFIG);
818 val &= ~(CCN_XP_DT_CONFIG__DT_CFG__MASK <<
819 CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx));
820 val |= dt_cfg << CCN_XP_DT_CONFIG__DT_CFG__SHIFT(hw->idx);
821 writel(val, xp->base + CCN_XP_DT_CONFIG);
822
823 spin_unlock(&ccn->dt.config_lock);
824}
825
826static void arm_ccn_pmu_event_start(struct perf_event *event, int flags)
827{
828 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
829 struct hw_perf_event *hw = &event->hw;
830
831 local64_set(&event->hw.prev_count,
832 arm_ccn_pmu_read_counter(ccn, hw->idx));
833 hw->state = 0;
834
835 if (!ccn->irq_used)
836 hrtimer_start(&ccn->dt.hrtimer, arm_ccn_pmu_timer_period(),
837 HRTIMER_MODE_REL);
838
839 /* Set the DT bus input, engaging the counter */
840 arm_ccn_pmu_xp_dt_config(event, 1);
841}
842
843static void arm_ccn_pmu_event_stop(struct perf_event *event, int flags)
844{
845 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
846 struct hw_perf_event *hw = &event->hw;
847 u64 timeout;
848
849 /* Disable counting, setting the DT bus to pass-through mode */
850 arm_ccn_pmu_xp_dt_config(event, 0);
851
852 if (!ccn->irq_used)
853 hrtimer_cancel(&ccn->dt.hrtimer);
854
855 /* Let the DT bus drain */
856 timeout = arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) +
857 ccn->num_xps;
858 while (arm_ccn_pmu_read_counter(ccn, CCN_IDX_PMU_CYCLE_COUNTER) <
859 timeout)
860 cpu_relax();
861
862 if (flags & PERF_EF_UPDATE)
863 arm_ccn_pmu_event_update(event);
864
865 hw->state |= PERF_HES_STOPPED;
866}
867
868static void arm_ccn_pmu_xp_watchpoint_config(struct perf_event *event)
869{
870 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
871 struct hw_perf_event *hw = &event->hw;
872 struct arm_ccn_component *source =
873 ccn->dt.pmu_counters[hw->idx].source;
874 unsigned long wp = hw->config_base;
875 u32 val;
876 u64 cmp_l = event->attr.config1;
877 u64 cmp_h = event->attr.config2;
878 u64 mask_l = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].l;
879 u64 mask_h = ccn->dt.cmp_mask[CCN_CONFIG_MASK(event->attr.config)].h;
880
881 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__WATCHPOINT(wp);
882
883 /* Direction (RX/TX), device (port) & virtual channel */
884 val = readl(source->base + CCN_XP_DT_INTERFACE_SEL);
885 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__MASK <<
886 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp));
887 val |= CCN_CONFIG_DIR(event->attr.config) <<
888 CCN_XP_DT_INTERFACE_SEL__DT_IO_SEL__SHIFT(wp);
889 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__MASK <<
890 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp));
891 val |= CCN_CONFIG_PORT(event->attr.config) <<
892 CCN_XP_DT_INTERFACE_SEL__DT_DEV_SEL__SHIFT(wp);
893 val &= ~(CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__MASK <<
894 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp));
895 val |= CCN_CONFIG_VC(event->attr.config) <<
896 CCN_XP_DT_INTERFACE_SEL__DT_VC_SEL__SHIFT(wp);
897 writel(val, source->base + CCN_XP_DT_INTERFACE_SEL);
898
899 /* Comparison values */
900 writel(cmp_l & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_L(wp));
901 writel((cmp_l >> 32) & 0xefffffff,
902 source->base + CCN_XP_DT_CMP_VAL_L(wp) + 4);
903 writel(cmp_h & 0xffffffff, source->base + CCN_XP_DT_CMP_VAL_H(wp));
904 writel((cmp_h >> 32) & 0x0fffffff,
905 source->base + CCN_XP_DT_CMP_VAL_H(wp) + 4);
906
907 /* Mask */
908 writel(mask_l & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_L(wp));
909 writel((mask_l >> 32) & 0xefffffff,
910 source->base + CCN_XP_DT_CMP_MASK_L(wp) + 4);
911 writel(mask_h & 0xffffffff, source->base + CCN_XP_DT_CMP_MASK_H(wp));
912 writel((mask_h >> 32) & 0x0fffffff,
913 source->base + CCN_XP_DT_CMP_MASK_H(wp) + 4);
914}
915
916static void arm_ccn_pmu_xp_event_config(struct perf_event *event)
917{
918 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
919 struct hw_perf_event *hw = &event->hw;
920 struct arm_ccn_component *source =
921 ccn->dt.pmu_counters[hw->idx].source;
922 u32 val, id;
923
924 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__XP_PMU_EVENT(hw->config_base);
925
926 id = (CCN_CONFIG_VC(event->attr.config) << 4) |
927 (CCN_CONFIG_PORT(event->attr.config) << 3) |
928 (CCN_CONFIG_EVENT(event->attr.config) << 0);
929
930 val = readl(source->base + CCN_XP_PMU_EVENT_SEL);
931 val &= ~(CCN_XP_PMU_EVENT_SEL__ID__MASK <<
932 CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
933 val |= id << CCN_XP_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
934 writel(val, source->base + CCN_XP_PMU_EVENT_SEL);
935}
936
937static void arm_ccn_pmu_node_event_config(struct perf_event *event)
938{
939 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
940 struct hw_perf_event *hw = &event->hw;
941 struct arm_ccn_component *source =
942 ccn->dt.pmu_counters[hw->idx].source;
943 u32 type = CCN_CONFIG_TYPE(event->attr.config);
944 u32 val, port;
945
946 port = arm_ccn_node_to_xp_port(CCN_CONFIG_NODE(event->attr.config));
947 hw->event_base = CCN_XP_DT_CONFIG__DT_CFG__DEVICE_PMU_EVENT(port,
948 hw->config_base);
949
950 /* These *_event_sel regs should be identical, but let's make sure... */
951 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL != CCN_SBAS_PMU_EVENT_SEL);
952 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL != CCN_RNI_PMU_EVENT_SEL);
953 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(1) !=
954 CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1));
955 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__SHIFT(1) !=
956 CCN_RNI_PMU_EVENT_SEL__ID__SHIFT(1));
957 BUILD_BUG_ON(CCN_HNF_PMU_EVENT_SEL__ID__MASK !=
958 CCN_SBAS_PMU_EVENT_SEL__ID__MASK);
959 BUILD_BUG_ON(CCN_SBAS_PMU_EVENT_SEL__ID__MASK !=
960 CCN_RNI_PMU_EVENT_SEL__ID__MASK);
961 if (WARN_ON(type != CCN_TYPE_HNF && type != CCN_TYPE_SBAS &&
962 !arm_ccn_pmu_type_eq(type, CCN_TYPE_RNI_3P)))
963 return;
964
965 /* Set the event id for the pre-allocated counter */
966 val = readl(source->base + CCN_HNF_PMU_EVENT_SEL);
967 val &= ~(CCN_HNF_PMU_EVENT_SEL__ID__MASK <<
968 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base));
969 val |= CCN_CONFIG_EVENT(event->attr.config) <<
970 CCN_HNF_PMU_EVENT_SEL__ID__SHIFT(hw->config_base);
971 writel(val, source->base + CCN_HNF_PMU_EVENT_SEL);
972}
973
974static void arm_ccn_pmu_event_config(struct perf_event *event)
975{
976 struct arm_ccn *ccn = pmu_to_arm_ccn(event->pmu);
977 struct hw_perf_event *hw = &event->hw;
978 u32 xp, offset, val;
979
980 /* Cycle counter requires no setup */
981 if (hw->idx == CCN_IDX_PMU_CYCLE_COUNTER)
982 return;
983
984 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP)
985 xp = CCN_CONFIG_XP(event->attr.config);
986 else
987 xp = arm_ccn_node_to_xp(CCN_CONFIG_NODE(event->attr.config));
988
989 spin_lock(&ccn->dt.config_lock);
990
991 /* Set the DT bus "distance" register */
992 offset = (hw->idx / 4) * 4;
993 val = readl(ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
994 val &= ~(CCN_DT_ACTIVE_DSM__DSM_ID__MASK <<
995 CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4));
996 val |= xp << CCN_DT_ACTIVE_DSM__DSM_ID__SHIFT(hw->idx % 4);
997 writel(val, ccn->dt.base + CCN_DT_ACTIVE_DSM + offset);
998
999 if (CCN_CONFIG_TYPE(event->attr.config) == CCN_TYPE_XP) {
1000 if (CCN_CONFIG_EVENT(event->attr.config) ==
1001 CCN_EVENT_WATCHPOINT)
1002 arm_ccn_pmu_xp_watchpoint_config(event);
1003 else
1004 arm_ccn_pmu_xp_event_config(event);
1005 } else {
1006 arm_ccn_pmu_node_event_config(event);
1007 }
1008
1009 spin_unlock(&ccn->dt.config_lock);
1010}
1011
1012static int arm_ccn_pmu_event_add(struct perf_event *event, int flags)
1013{
1014 struct hw_perf_event *hw = &event->hw;
1015
1016 arm_ccn_pmu_event_config(event);
1017
1018 hw->state = PERF_HES_STOPPED;
1019
1020 if (flags & PERF_EF_START)
1021 arm_ccn_pmu_event_start(event, PERF_EF_UPDATE);
1022
1023 return 0;
1024}
1025
1026static void arm_ccn_pmu_event_del(struct perf_event *event, int flags)
1027{
1028 arm_ccn_pmu_event_stop(event, PERF_EF_UPDATE);
1029
1030 arm_ccn_pmu_event_free(event);
1031}
1032
1033static void arm_ccn_pmu_event_read(struct perf_event *event)
1034{
1035 arm_ccn_pmu_event_update(event);
1036}
1037
1038static irqreturn_t arm_ccn_pmu_overflow_handler(struct arm_ccn_dt *dt)
1039{
1040 u32 pmovsr = readl(dt->base + CCN_DT_PMOVSR);
1041 int idx;
1042
1043 if (!pmovsr)
1044 return IRQ_NONE;
1045
1046 writel(pmovsr, dt->base + CCN_DT_PMOVSR_CLR);
1047
1048 BUILD_BUG_ON(CCN_IDX_PMU_CYCLE_COUNTER != CCN_NUM_PMU_EVENT_COUNTERS);
1049
1050 for (idx = 0; idx < CCN_NUM_PMU_EVENT_COUNTERS + 1; idx++) {
1051 struct perf_event *event = dt->pmu_counters[idx].event;
1052 int overflowed = pmovsr & BIT(idx);
1053
1054 WARN_ON_ONCE(overflowed && !event);
1055
1056 if (!event || !overflowed)
1057 continue;
1058
1059 arm_ccn_pmu_event_update(event);
1060 }
1061
1062 return IRQ_HANDLED;
1063}
1064
1065static enum hrtimer_restart arm_ccn_pmu_timer_handler(struct hrtimer *hrtimer)
1066{
1067 struct arm_ccn_dt *dt = container_of(hrtimer, struct arm_ccn_dt,
1068 hrtimer);
1069 unsigned long flags;
1070
1071 local_irq_save(flags);
1072 arm_ccn_pmu_overflow_handler(dt);
1073 local_irq_restore(flags);
1074
1075 hrtimer_forward_now(hrtimer, arm_ccn_pmu_timer_period());
1076 return HRTIMER_RESTART;
1077}
1078
1079
1080static DEFINE_IDA(arm_ccn_pmu_ida);
1081
1082static int arm_ccn_pmu_init(struct arm_ccn *ccn)
1083{
1084 int i;
1085 char *name;
1086
1087 /* Initialize DT subsystem */
1088 ccn->dt.base = ccn->base + CCN_REGION_SIZE;
1089 spin_lock_init(&ccn->dt.config_lock);
1090 writel(CCN_DT_CTL__DT_EN, ccn->dt.base + CCN_DT_CTL);
1091 writel(CCN_DT_PMCR__OVFL_INTR_EN | CCN_DT_PMCR__PMU_EN,
1092 ccn->dt.base + CCN_DT_PMCR);
1093 writel(0x1, ccn->dt.base + CCN_DT_PMSR_CLR);
1094 for (i = 0; i < ccn->num_xps; i++) {
1095 writel(0, ccn->xp[i].base + CCN_XP_DT_CONFIG);
1096 writel((CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1097 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(0)) |
1098 (CCN_XP_DT_CONTROL__WP_ARM_SEL__ALWAYS <<
1099 CCN_XP_DT_CONTROL__WP_ARM_SEL__SHIFT(1)) |
1100 CCN_XP_DT_CONTROL__DT_ENABLE,
1101 ccn->xp[i].base + CCN_XP_DT_CONTROL);
1102 }
1103 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].l = ~0;
1104 ccn->dt.cmp_mask[CCN_IDX_MASK_ANY].h = ~0;
1105 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].l = 0;
1106 ccn->dt.cmp_mask[CCN_IDX_MASK_EXACT].h = 0;
1107 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].l = ~0;
1108 ccn->dt.cmp_mask[CCN_IDX_MASK_ORDER].h = ~(0x1 << 15);
1109 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].l = ~0;
1110 ccn->dt.cmp_mask[CCN_IDX_MASK_OPCODE].h = ~(0x1f << 9);
1111
1112 /* Get a convenient /sys/event_source/devices/ name */
1113 ccn->dt.id = ida_simple_get(&arm_ccn_pmu_ida, 0, 0, GFP_KERNEL);
1114 if (ccn->dt.id == 0) {
1115 name = "ccn";
1116 } else {
1117 int len = snprintf(NULL, 0, "ccn_%d", ccn->dt.id);
1118
1119 name = devm_kzalloc(ccn->dev, len + 1, GFP_KERNEL);
1120 snprintf(name, len + 1, "ccn_%d", ccn->dt.id);
1121 }
1122
1123 /* Perf driver registration */
1124 ccn->dt.pmu = (struct pmu) {
1125 .attr_groups = arm_ccn_pmu_attr_groups,
1126 .task_ctx_nr = perf_invalid_context,
1127 .event_init = arm_ccn_pmu_event_init,
1128 .add = arm_ccn_pmu_event_add,
1129 .del = arm_ccn_pmu_event_del,
1130 .start = arm_ccn_pmu_event_start,
1131 .stop = arm_ccn_pmu_event_stop,
1132 .read = arm_ccn_pmu_event_read,
1133 };
1134
1135 /* No overflow interrupt? Have to use a timer instead. */
1136 if (!ccn->irq_used) {
1137 dev_info(ccn->dev, "No access to interrupts, using timer.\n");
1138 hrtimer_init(&ccn->dt.hrtimer, CLOCK_MONOTONIC,
1139 HRTIMER_MODE_REL);
1140 ccn->dt.hrtimer.function = arm_ccn_pmu_timer_handler;
1141 }
1142
1143 return perf_pmu_register(&ccn->dt.pmu, name, -1);
1144}
1145
1146static void arm_ccn_pmu_cleanup(struct arm_ccn *ccn)
1147{
1148 int i;
1149
1150 for (i = 0; i < ccn->num_xps; i++)
1151 writel(0, ccn->xp[i].base + CCN_XP_DT_CONTROL);
1152 writel(0, ccn->dt.base + CCN_DT_PMCR);
1153 perf_pmu_unregister(&ccn->dt.pmu);
1154 ida_simple_remove(&arm_ccn_pmu_ida, ccn->dt.id);
1155}
1156
1157
1158static int arm_ccn_for_each_valid_region(struct arm_ccn *ccn,
1159 int (*callback)(struct arm_ccn *ccn, int region,
1160 void __iomem *base, u32 type, u32 id))
1161{
1162 int region;
1163
1164 for (region = 0; region < CCN_NUM_REGIONS; region++) {
1165 u32 val, type, id;
1166 void __iomem *base;
1167 int err;
1168
1169 val = readl(ccn->base + CCN_MN_OLY_COMP_LIST_63_0 +
1170 4 * (region / 32));
1171 if (!(val & (1 << (region % 32))))
1172 continue;
1173
1174 base = ccn->base + region * CCN_REGION_SIZE;
1175 val = readl(base + CCN_ALL_OLY_ID);
1176 type = (val >> CCN_ALL_OLY_ID__OLY_ID__SHIFT) &
1177 CCN_ALL_OLY_ID__OLY_ID__MASK;
1178 id = (val >> CCN_ALL_OLY_ID__NODE_ID__SHIFT) &
1179 CCN_ALL_OLY_ID__NODE_ID__MASK;
1180
1181 err = callback(ccn, region, base, type, id);
1182 if (err)
1183 return err;
1184 }
1185
1186 return 0;
1187}
1188
1189static int arm_ccn_get_nodes_num(struct arm_ccn *ccn, int region,
1190 void __iomem *base, u32 type, u32 id)
1191{
1192
1193 if (type == CCN_TYPE_XP && id >= ccn->num_xps)
1194 ccn->num_xps = id + 1;
1195 else if (id >= ccn->num_nodes)
1196 ccn->num_nodes = id + 1;
1197
1198 return 0;
1199}
1200
1201static int arm_ccn_init_nodes(struct arm_ccn *ccn, int region,
1202 void __iomem *base, u32 type, u32 id)
1203{
1204 struct arm_ccn_component *component;
1205
1206 dev_dbg(ccn->dev, "Region %d: id=%u, type=0x%02x\n", region, id, type);
1207
1208 switch (type) {
1209 case CCN_TYPE_MN:
1210 case CCN_TYPE_DT:
1211 return 0;
1212 case CCN_TYPE_XP:
1213 component = &ccn->xp[id];
1214 break;
1215 case CCN_TYPE_SBSX:
1216 ccn->sbsx_present = 1;
1217 component = &ccn->node[id];
1218 break;
1219 case CCN_TYPE_SBAS:
1220 ccn->sbas_present = 1;
1221 /* Fall-through */
1222 default:
1223 component = &ccn->node[id];
1224 break;
1225 }
1226
1227 component->base = base;
1228 component->type = type;
1229
1230 return 0;
1231}
1232
1233
1234static irqreturn_t arm_ccn_error_handler(struct arm_ccn *ccn,
1235 const u32 *err_sig_val)
1236{
1237 /* This should be really handled by firmware... */
1238 dev_err(ccn->dev, "Error reported in %08x%08x%08x%08x%08x%08x.\n",
1239 err_sig_val[5], err_sig_val[4], err_sig_val[3],
1240 err_sig_val[2], err_sig_val[1], err_sig_val[0]);
1241 dev_err(ccn->dev, "Disabling interrupt generation for all errors.\n");
1242 writel(CCN_MN_ERRINT_STATUS__ALL_ERRORS__DISABLE,
1243 ccn->base + CCN_MN_ERRINT_STATUS);
1244
1245 return IRQ_HANDLED;
1246}
1247
1248
1249static irqreturn_t arm_ccn_irq_handler(int irq, void *dev_id)
1250{
1251 irqreturn_t res = IRQ_NONE;
1252 struct arm_ccn *ccn = dev_id;
1253 u32 err_sig_val[6];
1254 u32 err_or;
1255 int i;
1256
1257 /* PMU overflow is a special case */
1258 err_or = err_sig_val[0] = readl(ccn->base + CCN_MN_ERR_SIG_VAL_63_0);
1259 if (err_or & CCN_MN_ERR_SIG_VAL_63_0__DT) {
1260 err_or &= ~CCN_MN_ERR_SIG_VAL_63_0__DT;
1261 res = arm_ccn_pmu_overflow_handler(&ccn->dt);
1262 }
1263
1264 /* Have to read all err_sig_vals to clear them */
1265 for (i = 1; i < ARRAY_SIZE(err_sig_val); i++) {
1266 err_sig_val[i] = readl(ccn->base +
1267 CCN_MN_ERR_SIG_VAL_63_0 + i * 4);
1268 err_or |= err_sig_val[i];
1269 }
1270 if (err_or)
1271 res |= arm_ccn_error_handler(ccn, err_sig_val);
1272
1273 if (res != IRQ_NONE)
1274 writel(CCN_MN_ERRINT_STATUS__INTREQ__DESSERT,
1275 ccn->base + CCN_MN_ERRINT_STATUS);
1276
1277 return res;
1278}
1279
1280
1281static int arm_ccn_probe(struct platform_device *pdev)
1282{
1283 struct arm_ccn *ccn;
1284 struct resource *res;
1285 int err;
1286
1287 ccn = devm_kzalloc(&pdev->dev, sizeof(*ccn), GFP_KERNEL);
1288 if (!ccn)
1289 return -ENOMEM;
1290 ccn->dev = &pdev->dev;
1291 platform_set_drvdata(pdev, ccn);
1292
1293 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1294 if (!res)
1295 return -EINVAL;
1296
1297 if (!devm_request_mem_region(ccn->dev, res->start,
1298 resource_size(res), pdev->name))
1299 return -EBUSY;
1300
1301 ccn->base = devm_ioremap(ccn->dev, res->start,
1302 resource_size(res));
1303 if (!ccn->base)
1304 return -EFAULT;
1305
1306 res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
1307 if (!res)
1308 return -EINVAL;
1309
1310 /* Check if we can use the interrupt */
1311 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLE,
1312 ccn->base + CCN_MN_ERRINT_STATUS);
1313 if (readl(ccn->base + CCN_MN_ERRINT_STATUS) &
1314 CCN_MN_ERRINT_STATUS__PMU_EVENTS__DISABLED) {
1315 /* Can set 'disable' bits, so can acknowledge interrupts */
1316 writel(CCN_MN_ERRINT_STATUS__PMU_EVENTS__ENABLE,
1317 ccn->base + CCN_MN_ERRINT_STATUS);
1318 err = devm_request_irq(ccn->dev, res->start,
1319 arm_ccn_irq_handler, 0, dev_name(ccn->dev),
1320 ccn);
1321 if (err)
1322 return err;
1323
1324 ccn->irq_used = 1;
1325 }
1326
1327
1328 /* Build topology */
1329
1330 err = arm_ccn_for_each_valid_region(ccn, arm_ccn_get_nodes_num);
1331 if (err)
1332 return err;
1333
1334 ccn->node = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_nodes,
1335 GFP_KERNEL);
1336 ccn->xp = devm_kzalloc(ccn->dev, sizeof(*ccn->node) * ccn->num_xps,
1337 GFP_KERNEL);
1338 if (!ccn->node || !ccn->xp)
1339 return -ENOMEM;
1340
1341 err = arm_ccn_for_each_valid_region(ccn, arm_ccn_init_nodes);
1342 if (err)
1343 return err;
1344
1345 return arm_ccn_pmu_init(ccn);
1346}
1347
1348static int arm_ccn_remove(struct platform_device *pdev)
1349{
1350 struct arm_ccn *ccn = platform_get_drvdata(pdev);
1351
1352 arm_ccn_pmu_cleanup(ccn);
1353
1354 return 0;
1355}
1356
1357static const struct of_device_id arm_ccn_match[] = {
1358 { .compatible = "arm,ccn-504", },
1359 {},
1360};
1361
1362static struct platform_driver arm_ccn_driver = {
1363 .driver = {
1364 .name = "arm-ccn",
1365 .of_match_table = arm_ccn_match,
1366 },
1367 .probe = arm_ccn_probe,
1368 .remove = arm_ccn_remove,
1369};
1370
1371static int __init arm_ccn_init(void)
1372{
1373 int i;
1374
1375 for (i = 0; i < ARRAY_SIZE(arm_ccn_pmu_events); i++)
1376 arm_ccn_pmu_events_attrs[i] = &arm_ccn_pmu_events[i].attr.attr;
1377
1378 return platform_driver_register(&arm_ccn_driver);
1379}
1380
1381static void __exit arm_ccn_exit(void)
1382{
1383 platform_driver_unregister(&arm_ccn_driver);
1384}
1385
1386module_init(arm_ccn_init);
1387module_exit(arm_ccn_exit);
1388
1389MODULE_AUTHOR("Pawel Moll <pawel.moll@arm.com>");
1390MODULE_LICENSE("GPL");