diff options
author | Kulkarni, Ganapatrao <Ganapatrao.Kulkarni@cavium.com> | 2018-12-06 06:51:31 -0500 |
---|---|---|
committer | Will Deacon <will.deacon@arm.com> | 2018-12-06 08:03:17 -0500 |
commit | 69c32972d59388c041268e8206e8eb1acff29b9a (patch) | |
tree | 1d1e477db9250533e5f6cb26a9cb4c74097ff487 | |
parent | d6310a3f3396e004bdb7a76787a2a3bbc643d0b7 (diff) |
drivers/perf: Add Cavium ThunderX2 SoC UNCORE PMU driver
This patch adds a perf driver for the PMU UNCORE devices DDR4 Memory
Controller(DMC) and Level 3 Cache(L3C). Each PMU supports up to 4
counters. All counters lack overflow interrupt and are
sampled periodically.
Reviewed-by: Suzuki K Poulose <suzuki.poulose@arm.com>
Signed-off-by: Ganapatrao Kulkarni <ganapatrao.kulkarni@cavium.com>
[will: consistent enum cpuhp_state naming]
Signed-off-by: Will Deacon <will.deacon@arm.com>
-rw-r--r-- | drivers/perf/Kconfig | 9 | ||||
-rw-r--r-- | drivers/perf/Makefile | 1 | ||||
-rw-r--r-- | drivers/perf/thunderx2_pmu.c | 861 | ||||
-rw-r--r-- | include/linux/cpuhotplug.h | 1 |
4 files changed, 872 insertions, 0 deletions
diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 08ebaf7cca8b..af9bc178495d 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig | |||
@@ -87,6 +87,15 @@ config QCOM_L3_PMU | |||
87 | Adds the L3 cache PMU into the perf events subsystem for | 87 | Adds the L3 cache PMU into the perf events subsystem for |
88 | monitoring L3 cache events. | 88 | monitoring L3 cache events. |
89 | 89 | ||
90 | config THUNDERX2_PMU | ||
91 | tristate "Cavium ThunderX2 SoC PMU UNCORE" | ||
92 | depends on ARCH_THUNDER2 && ARM64 && ACPI && NUMA | ||
93 | default m | ||
94 | help | ||
95 | Provides support for ThunderX2 UNCORE events. | ||
96 | The SoC has PMU support in its L3 cache controller (L3C) and | ||
97 | in the DDR4 Memory Controller (DMC). | ||
98 | |||
90 | config XGENE_PMU | 99 | config XGENE_PMU |
91 | depends on ARCH_XGENE | 100 | depends on ARCH_XGENE |
92 | bool "APM X-Gene SoC PMU" | 101 | bool "APM X-Gene SoC PMU" |
diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index b3902bd37d53..909f27fd9db3 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile | |||
@@ -7,5 +7,6 @@ obj-$(CONFIG_ARM_PMU_ACPI) += arm_pmu_acpi.o | |||
7 | obj-$(CONFIG_HISI_PMU) += hisilicon/ | 7 | obj-$(CONFIG_HISI_PMU) += hisilicon/ |
8 | obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o | 8 | obj-$(CONFIG_QCOM_L2_PMU) += qcom_l2_pmu.o |
9 | obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o | 9 | obj-$(CONFIG_QCOM_L3_PMU) += qcom_l3_pmu.o |
10 | obj-$(CONFIG_THUNDERX2_PMU) += thunderx2_pmu.o | ||
10 | obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o | 11 | obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o |
11 | obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o | 12 | obj-$(CONFIG_ARM_SPE_PMU) += arm_spe_pmu.o |
diff --git a/drivers/perf/thunderx2_pmu.c b/drivers/perf/thunderx2_pmu.c new file mode 100644 index 000000000000..c9a1701d3e54 --- /dev/null +++ b/drivers/perf/thunderx2_pmu.c | |||
@@ -0,0 +1,861 @@ | |||
1 | // SPDX-License-Identifier: GPL-2.0 | ||
2 | /* | ||
3 | * CAVIUM THUNDERX2 SoC PMU UNCORE | ||
4 | * Copyright (C) 2018 Cavium Inc. | ||
5 | * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com> | ||
6 | */ | ||
7 | |||
8 | #include <linux/acpi.h> | ||
9 | #include <linux/cpuhotplug.h> | ||
10 | #include <linux/perf_event.h> | ||
11 | #include <linux/platform_device.h> | ||
12 | |||
13 | /* Each ThunderX2(TX2) Socket has a L3C and DMC UNCORE PMU device. | ||
14 | * Each UNCORE PMU device consists of 4 independent programmable counters. | ||
15 | * Counters are 32 bit and do not support overflow interrupt, | ||
16 | * they need to be sampled before overflow(i.e, at every 2 seconds). | ||
17 | */ | ||
18 | |||
19 | #define TX2_PMU_MAX_COUNTERS 4 | ||
20 | #define TX2_PMU_DMC_CHANNELS 8 | ||
21 | #define TX2_PMU_L3_TILES 16 | ||
22 | |||
23 | #define TX2_PMU_HRTIMER_INTERVAL (2 * NSEC_PER_SEC) | ||
24 | #define GET_EVENTID(ev) ((ev->hw.config) & 0x1f) | ||
25 | #define GET_COUNTERID(ev) ((ev->hw.idx) & 0x3) | ||
26 | /* 1 byte per counter(4 counters). | ||
27 | * Event id is encoded in bits [5:1] of a byte, | ||
28 | */ | ||
29 | #define DMC_EVENT_CFG(idx, val) ((val) << (((idx) * 8) + 1)) | ||
30 | |||
31 | #define L3C_COUNTER_CTL 0xA8 | ||
32 | #define L3C_COUNTER_DATA 0xAC | ||
33 | #define DMC_COUNTER_CTL 0x234 | ||
34 | #define DMC_COUNTER_DATA 0x240 | ||
35 | |||
36 | /* L3C event IDs */ | ||
37 | #define L3_EVENT_READ_REQ 0xD | ||
38 | #define L3_EVENT_WRITEBACK_REQ 0xE | ||
39 | #define L3_EVENT_INV_N_WRITE_REQ 0xF | ||
40 | #define L3_EVENT_INV_REQ 0x10 | ||
41 | #define L3_EVENT_EVICT_REQ 0x13 | ||
42 | #define L3_EVENT_INV_N_WRITE_HIT 0x14 | ||
43 | #define L3_EVENT_INV_HIT 0x15 | ||
44 | #define L3_EVENT_READ_HIT 0x17 | ||
45 | #define L3_EVENT_MAX 0x18 | ||
46 | |||
47 | /* DMC event IDs */ | ||
48 | #define DMC_EVENT_COUNT_CYCLES 0x1 | ||
49 | #define DMC_EVENT_WRITE_TXNS 0xB | ||
50 | #define DMC_EVENT_DATA_TRANSFERS 0xD | ||
51 | #define DMC_EVENT_READ_TXNS 0xF | ||
52 | #define DMC_EVENT_MAX 0x10 | ||
53 | |||
54 | enum tx2_uncore_type { | ||
55 | PMU_TYPE_L3C, | ||
56 | PMU_TYPE_DMC, | ||
57 | PMU_TYPE_INVALID, | ||
58 | }; | ||
59 | |||
60 | /* | ||
61 | * pmu on each socket has 2 uncore devices(dmc and l3c), | ||
62 | * each device has 4 counters. | ||
63 | */ | ||
64 | struct tx2_uncore_pmu { | ||
65 | struct hlist_node hpnode; | ||
66 | struct list_head entry; | ||
67 | struct pmu pmu; | ||
68 | char *name; | ||
69 | int node; | ||
70 | int cpu; | ||
71 | u32 max_counters; | ||
72 | u32 prorate_factor; | ||
73 | u32 max_events; | ||
74 | u64 hrtimer_interval; | ||
75 | void __iomem *base; | ||
76 | DECLARE_BITMAP(active_counters, TX2_PMU_MAX_COUNTERS); | ||
77 | struct perf_event *events[TX2_PMU_MAX_COUNTERS]; | ||
78 | struct device *dev; | ||
79 | struct hrtimer hrtimer; | ||
80 | const struct attribute_group **attr_groups; | ||
81 | enum tx2_uncore_type type; | ||
82 | void (*init_cntr_base)(struct perf_event *event, | ||
83 | struct tx2_uncore_pmu *tx2_pmu); | ||
84 | void (*stop_event)(struct perf_event *event); | ||
85 | void (*start_event)(struct perf_event *event, int flags); | ||
86 | }; | ||
87 | |||
88 | static LIST_HEAD(tx2_pmus); | ||
89 | |||
90 | static inline struct tx2_uncore_pmu *pmu_to_tx2_pmu(struct pmu *pmu) | ||
91 | { | ||
92 | return container_of(pmu, struct tx2_uncore_pmu, pmu); | ||
93 | } | ||
94 | |||
95 | PMU_FORMAT_ATTR(event, "config:0-4"); | ||
96 | |||
97 | static struct attribute *l3c_pmu_format_attrs[] = { | ||
98 | &format_attr_event.attr, | ||
99 | NULL, | ||
100 | }; | ||
101 | |||
102 | static struct attribute *dmc_pmu_format_attrs[] = { | ||
103 | &format_attr_event.attr, | ||
104 | NULL, | ||
105 | }; | ||
106 | |||
107 | static const struct attribute_group l3c_pmu_format_attr_group = { | ||
108 | .name = "format", | ||
109 | .attrs = l3c_pmu_format_attrs, | ||
110 | }; | ||
111 | |||
112 | static const struct attribute_group dmc_pmu_format_attr_group = { | ||
113 | .name = "format", | ||
114 | .attrs = dmc_pmu_format_attrs, | ||
115 | }; | ||
116 | |||
117 | /* | ||
118 | * sysfs event attributes | ||
119 | */ | ||
120 | static ssize_t tx2_pmu_event_show(struct device *dev, | ||
121 | struct device_attribute *attr, char *buf) | ||
122 | { | ||
123 | struct dev_ext_attribute *eattr; | ||
124 | |||
125 | eattr = container_of(attr, struct dev_ext_attribute, attr); | ||
126 | return sprintf(buf, "event=0x%lx\n", (unsigned long) eattr->var); | ||
127 | } | ||
128 | |||
129 | #define TX2_EVENT_ATTR(name, config) \ | ||
130 | PMU_EVENT_ATTR(name, tx2_pmu_event_attr_##name, \ | ||
131 | config, tx2_pmu_event_show) | ||
132 | |||
133 | TX2_EVENT_ATTR(read_request, L3_EVENT_READ_REQ); | ||
134 | TX2_EVENT_ATTR(writeback_request, L3_EVENT_WRITEBACK_REQ); | ||
135 | TX2_EVENT_ATTR(inv_nwrite_request, L3_EVENT_INV_N_WRITE_REQ); | ||
136 | TX2_EVENT_ATTR(inv_request, L3_EVENT_INV_REQ); | ||
137 | TX2_EVENT_ATTR(evict_request, L3_EVENT_EVICT_REQ); | ||
138 | TX2_EVENT_ATTR(inv_nwrite_hit, L3_EVENT_INV_N_WRITE_HIT); | ||
139 | TX2_EVENT_ATTR(inv_hit, L3_EVENT_INV_HIT); | ||
140 | TX2_EVENT_ATTR(read_hit, L3_EVENT_READ_HIT); | ||
141 | |||
142 | static struct attribute *l3c_pmu_events_attrs[] = { | ||
143 | &tx2_pmu_event_attr_read_request.attr.attr, | ||
144 | &tx2_pmu_event_attr_writeback_request.attr.attr, | ||
145 | &tx2_pmu_event_attr_inv_nwrite_request.attr.attr, | ||
146 | &tx2_pmu_event_attr_inv_request.attr.attr, | ||
147 | &tx2_pmu_event_attr_evict_request.attr.attr, | ||
148 | &tx2_pmu_event_attr_inv_nwrite_hit.attr.attr, | ||
149 | &tx2_pmu_event_attr_inv_hit.attr.attr, | ||
150 | &tx2_pmu_event_attr_read_hit.attr.attr, | ||
151 | NULL, | ||
152 | }; | ||
153 | |||
154 | TX2_EVENT_ATTR(cnt_cycles, DMC_EVENT_COUNT_CYCLES); | ||
155 | TX2_EVENT_ATTR(write_txns, DMC_EVENT_WRITE_TXNS); | ||
156 | TX2_EVENT_ATTR(data_transfers, DMC_EVENT_DATA_TRANSFERS); | ||
157 | TX2_EVENT_ATTR(read_txns, DMC_EVENT_READ_TXNS); | ||
158 | |||
159 | static struct attribute *dmc_pmu_events_attrs[] = { | ||
160 | &tx2_pmu_event_attr_cnt_cycles.attr.attr, | ||
161 | &tx2_pmu_event_attr_write_txns.attr.attr, | ||
162 | &tx2_pmu_event_attr_data_transfers.attr.attr, | ||
163 | &tx2_pmu_event_attr_read_txns.attr.attr, | ||
164 | NULL, | ||
165 | }; | ||
166 | |||
167 | static const struct attribute_group l3c_pmu_events_attr_group = { | ||
168 | .name = "events", | ||
169 | .attrs = l3c_pmu_events_attrs, | ||
170 | }; | ||
171 | |||
172 | static const struct attribute_group dmc_pmu_events_attr_group = { | ||
173 | .name = "events", | ||
174 | .attrs = dmc_pmu_events_attrs, | ||
175 | }; | ||
176 | |||
177 | /* | ||
178 | * sysfs cpumask attributes | ||
179 | */ | ||
180 | static ssize_t cpumask_show(struct device *dev, struct device_attribute *attr, | ||
181 | char *buf) | ||
182 | { | ||
183 | struct tx2_uncore_pmu *tx2_pmu; | ||
184 | |||
185 | tx2_pmu = pmu_to_tx2_pmu(dev_get_drvdata(dev)); | ||
186 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(tx2_pmu->cpu)); | ||
187 | } | ||
188 | static DEVICE_ATTR_RO(cpumask); | ||
189 | |||
190 | static struct attribute *tx2_pmu_cpumask_attrs[] = { | ||
191 | &dev_attr_cpumask.attr, | ||
192 | NULL, | ||
193 | }; | ||
194 | |||
195 | static const struct attribute_group pmu_cpumask_attr_group = { | ||
196 | .attrs = tx2_pmu_cpumask_attrs, | ||
197 | }; | ||
198 | |||
199 | /* | ||
200 | * Per PMU device attribute groups | ||
201 | */ | ||
202 | static const struct attribute_group *l3c_pmu_attr_groups[] = { | ||
203 | &l3c_pmu_format_attr_group, | ||
204 | &pmu_cpumask_attr_group, | ||
205 | &l3c_pmu_events_attr_group, | ||
206 | NULL | ||
207 | }; | ||
208 | |||
209 | static const struct attribute_group *dmc_pmu_attr_groups[] = { | ||
210 | &dmc_pmu_format_attr_group, | ||
211 | &pmu_cpumask_attr_group, | ||
212 | &dmc_pmu_events_attr_group, | ||
213 | NULL | ||
214 | }; | ||
215 | |||
216 | static inline u32 reg_readl(unsigned long addr) | ||
217 | { | ||
218 | return readl((void __iomem *)addr); | ||
219 | } | ||
220 | |||
221 | static inline void reg_writel(u32 val, unsigned long addr) | ||
222 | { | ||
223 | writel(val, (void __iomem *)addr); | ||
224 | } | ||
225 | |||
226 | static int alloc_counter(struct tx2_uncore_pmu *tx2_pmu) | ||
227 | { | ||
228 | int counter; | ||
229 | |||
230 | counter = find_first_zero_bit(tx2_pmu->active_counters, | ||
231 | tx2_pmu->max_counters); | ||
232 | if (counter == tx2_pmu->max_counters) | ||
233 | return -ENOSPC; | ||
234 | |||
235 | set_bit(counter, tx2_pmu->active_counters); | ||
236 | return counter; | ||
237 | } | ||
238 | |||
239 | static inline void free_counter(struct tx2_uncore_pmu *tx2_pmu, int counter) | ||
240 | { | ||
241 | clear_bit(counter, tx2_pmu->active_counters); | ||
242 | } | ||
243 | |||
244 | static void init_cntr_base_l3c(struct perf_event *event, | ||
245 | struct tx2_uncore_pmu *tx2_pmu) | ||
246 | { | ||
247 | struct hw_perf_event *hwc = &event->hw; | ||
248 | |||
249 | /* counter ctrl/data reg offset at 8 */ | ||
250 | hwc->config_base = (unsigned long)tx2_pmu->base | ||
251 | + L3C_COUNTER_CTL + (8 * GET_COUNTERID(event)); | ||
252 | hwc->event_base = (unsigned long)tx2_pmu->base | ||
253 | + L3C_COUNTER_DATA + (8 * GET_COUNTERID(event)); | ||
254 | } | ||
255 | |||
256 | static void init_cntr_base_dmc(struct perf_event *event, | ||
257 | struct tx2_uncore_pmu *tx2_pmu) | ||
258 | { | ||
259 | struct hw_perf_event *hwc = &event->hw; | ||
260 | |||
261 | hwc->config_base = (unsigned long)tx2_pmu->base | ||
262 | + DMC_COUNTER_CTL; | ||
263 | /* counter data reg offset at 0xc */ | ||
264 | hwc->event_base = (unsigned long)tx2_pmu->base | ||
265 | + DMC_COUNTER_DATA + (0xc * GET_COUNTERID(event)); | ||
266 | } | ||
267 | |||
268 | static void uncore_start_event_l3c(struct perf_event *event, int flags) | ||
269 | { | ||
270 | u32 val; | ||
271 | struct hw_perf_event *hwc = &event->hw; | ||
272 | |||
273 | /* event id encoded in bits [07:03] */ | ||
274 | val = GET_EVENTID(event) << 3; | ||
275 | reg_writel(val, hwc->config_base); | ||
276 | local64_set(&hwc->prev_count, 0); | ||
277 | reg_writel(0, hwc->event_base); | ||
278 | } | ||
279 | |||
280 | static inline void uncore_stop_event_l3c(struct perf_event *event) | ||
281 | { | ||
282 | reg_writel(0, event->hw.config_base); | ||
283 | } | ||
284 | |||
285 | static void uncore_start_event_dmc(struct perf_event *event, int flags) | ||
286 | { | ||
287 | u32 val; | ||
288 | struct hw_perf_event *hwc = &event->hw; | ||
289 | int idx = GET_COUNTERID(event); | ||
290 | int event_id = GET_EVENTID(event); | ||
291 | |||
292 | /* enable and start counters. | ||
293 | * 8 bits for each counter, bits[05:01] of a counter to set event type. | ||
294 | */ | ||
295 | val = reg_readl(hwc->config_base); | ||
296 | val &= ~DMC_EVENT_CFG(idx, 0x1f); | ||
297 | val |= DMC_EVENT_CFG(idx, event_id); | ||
298 | reg_writel(val, hwc->config_base); | ||
299 | local64_set(&hwc->prev_count, 0); | ||
300 | reg_writel(0, hwc->event_base); | ||
301 | } | ||
302 | |||
303 | static void uncore_stop_event_dmc(struct perf_event *event) | ||
304 | { | ||
305 | u32 val; | ||
306 | struct hw_perf_event *hwc = &event->hw; | ||
307 | int idx = GET_COUNTERID(event); | ||
308 | |||
309 | /* clear event type(bits[05:01]) to stop counter */ | ||
310 | val = reg_readl(hwc->config_base); | ||
311 | val &= ~DMC_EVENT_CFG(idx, 0x1f); | ||
312 | reg_writel(val, hwc->config_base); | ||
313 | } | ||
314 | |||
315 | static void tx2_uncore_event_update(struct perf_event *event) | ||
316 | { | ||
317 | s64 prev, delta, new = 0; | ||
318 | struct hw_perf_event *hwc = &event->hw; | ||
319 | struct tx2_uncore_pmu *tx2_pmu; | ||
320 | enum tx2_uncore_type type; | ||
321 | u32 prorate_factor; | ||
322 | |||
323 | tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
324 | type = tx2_pmu->type; | ||
325 | prorate_factor = tx2_pmu->prorate_factor; | ||
326 | |||
327 | new = reg_readl(hwc->event_base); | ||
328 | prev = local64_xchg(&hwc->prev_count, new); | ||
329 | |||
330 | /* handles rollover of 32 bit counter */ | ||
331 | delta = (u32)(((1UL << 32) - prev) + new); | ||
332 | |||
333 | /* DMC event data_transfers granularity is 16 Bytes, convert it to 64 */ | ||
334 | if (type == PMU_TYPE_DMC && | ||
335 | GET_EVENTID(event) == DMC_EVENT_DATA_TRANSFERS) | ||
336 | delta = delta/4; | ||
337 | |||
338 | /* L3C and DMC has 16 and 8 interleave channels respectively. | ||
339 | * The sampled value is for channel 0 and multiplied with | ||
340 | * prorate_factor to get the count for a device. | ||
341 | */ | ||
342 | local64_add(delta * prorate_factor, &event->count); | ||
343 | } | ||
344 | |||
345 | static enum tx2_uncore_type get_tx2_pmu_type(struct acpi_device *adev) | ||
346 | { | ||
347 | int i = 0; | ||
348 | struct acpi_tx2_pmu_device { | ||
349 | __u8 id[ACPI_ID_LEN]; | ||
350 | enum tx2_uncore_type type; | ||
351 | } devices[] = { | ||
352 | {"CAV901D", PMU_TYPE_L3C}, | ||
353 | {"CAV901F", PMU_TYPE_DMC}, | ||
354 | {"", PMU_TYPE_INVALID} | ||
355 | }; | ||
356 | |||
357 | while (devices[i].type != PMU_TYPE_INVALID) { | ||
358 | if (!strcmp(acpi_device_hid(adev), devices[i].id)) | ||
359 | break; | ||
360 | i++; | ||
361 | } | ||
362 | |||
363 | return devices[i].type; | ||
364 | } | ||
365 | |||
366 | static bool tx2_uncore_validate_event(struct pmu *pmu, | ||
367 | struct perf_event *event, int *counters) | ||
368 | { | ||
369 | if (is_software_event(event)) | ||
370 | return true; | ||
371 | /* Reject groups spanning multiple HW PMUs. */ | ||
372 | if (event->pmu != pmu) | ||
373 | return false; | ||
374 | |||
375 | *counters = *counters + 1; | ||
376 | return true; | ||
377 | } | ||
378 | |||
379 | /* | ||
380 | * Make sure the group of events can be scheduled at once | ||
381 | * on the PMU. | ||
382 | */ | ||
383 | static bool tx2_uncore_validate_event_group(struct perf_event *event) | ||
384 | { | ||
385 | struct perf_event *sibling, *leader = event->group_leader; | ||
386 | int counters = 0; | ||
387 | |||
388 | if (event->group_leader == event) | ||
389 | return true; | ||
390 | |||
391 | if (!tx2_uncore_validate_event(event->pmu, leader, &counters)) | ||
392 | return false; | ||
393 | |||
394 | for_each_sibling_event(sibling, leader) { | ||
395 | if (!tx2_uncore_validate_event(event->pmu, sibling, &counters)) | ||
396 | return false; | ||
397 | } | ||
398 | |||
399 | if (!tx2_uncore_validate_event(event->pmu, event, &counters)) | ||
400 | return false; | ||
401 | |||
402 | /* | ||
403 | * If the group requires more counters than the HW has, | ||
404 | * it cannot ever be scheduled. | ||
405 | */ | ||
406 | return counters <= TX2_PMU_MAX_COUNTERS; | ||
407 | } | ||
408 | |||
409 | |||
410 | static int tx2_uncore_event_init(struct perf_event *event) | ||
411 | { | ||
412 | struct hw_perf_event *hwc = &event->hw; | ||
413 | struct tx2_uncore_pmu *tx2_pmu; | ||
414 | |||
415 | /* Test the event attr type check for PMU enumeration */ | ||
416 | if (event->attr.type != event->pmu->type) | ||
417 | return -ENOENT; | ||
418 | |||
419 | /* | ||
420 | * SOC PMU counters are shared across all cores. | ||
421 | * Therefore, it does not support per-process mode. | ||
422 | * Also, it does not support event sampling mode. | ||
423 | */ | ||
424 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | ||
425 | return -EINVAL; | ||
426 | |||
427 | /* We have no filtering of any kind */ | ||
428 | if (event->attr.exclude_user || | ||
429 | event->attr.exclude_kernel || | ||
430 | event->attr.exclude_hv || | ||
431 | event->attr.exclude_idle || | ||
432 | event->attr.exclude_host || | ||
433 | event->attr.exclude_guest) | ||
434 | return -EINVAL; | ||
435 | |||
436 | if (event->cpu < 0) | ||
437 | return -EINVAL; | ||
438 | |||
439 | tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
440 | if (tx2_pmu->cpu >= nr_cpu_ids) | ||
441 | return -EINVAL; | ||
442 | event->cpu = tx2_pmu->cpu; | ||
443 | |||
444 | if (event->attr.config >= tx2_pmu->max_events) | ||
445 | return -EINVAL; | ||
446 | |||
447 | /* store event id */ | ||
448 | hwc->config = event->attr.config; | ||
449 | |||
450 | /* Validate the group */ | ||
451 | if (!tx2_uncore_validate_event_group(event)) | ||
452 | return -EINVAL; | ||
453 | |||
454 | return 0; | ||
455 | } | ||
456 | |||
457 | static void tx2_uncore_event_start(struct perf_event *event, int flags) | ||
458 | { | ||
459 | struct hw_perf_event *hwc = &event->hw; | ||
460 | struct tx2_uncore_pmu *tx2_pmu; | ||
461 | |||
462 | hwc->state = 0; | ||
463 | tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
464 | |||
465 | tx2_pmu->start_event(event, flags); | ||
466 | perf_event_update_userpage(event); | ||
467 | |||
468 | /* Start timer for first event */ | ||
469 | if (bitmap_weight(tx2_pmu->active_counters, | ||
470 | tx2_pmu->max_counters) == 1) { | ||
471 | hrtimer_start(&tx2_pmu->hrtimer, | ||
472 | ns_to_ktime(tx2_pmu->hrtimer_interval), | ||
473 | HRTIMER_MODE_REL_PINNED); | ||
474 | } | ||
475 | } | ||
476 | |||
477 | static void tx2_uncore_event_stop(struct perf_event *event, int flags) | ||
478 | { | ||
479 | struct hw_perf_event *hwc = &event->hw; | ||
480 | struct tx2_uncore_pmu *tx2_pmu; | ||
481 | |||
482 | if (hwc->state & PERF_HES_UPTODATE) | ||
483 | return; | ||
484 | |||
485 | tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
486 | tx2_pmu->stop_event(event); | ||
487 | WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED); | ||
488 | hwc->state |= PERF_HES_STOPPED; | ||
489 | if (flags & PERF_EF_UPDATE) { | ||
490 | tx2_uncore_event_update(event); | ||
491 | hwc->state |= PERF_HES_UPTODATE; | ||
492 | } | ||
493 | } | ||
494 | |||
495 | static int tx2_uncore_event_add(struct perf_event *event, int flags) | ||
496 | { | ||
497 | struct hw_perf_event *hwc = &event->hw; | ||
498 | struct tx2_uncore_pmu *tx2_pmu; | ||
499 | |||
500 | tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
501 | |||
502 | /* Allocate a free counter */ | ||
503 | hwc->idx = alloc_counter(tx2_pmu); | ||
504 | if (hwc->idx < 0) | ||
505 | return -EAGAIN; | ||
506 | |||
507 | tx2_pmu->events[hwc->idx] = event; | ||
508 | /* set counter control and data registers base address */ | ||
509 | tx2_pmu->init_cntr_base(event, tx2_pmu); | ||
510 | |||
511 | hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; | ||
512 | if (flags & PERF_EF_START) | ||
513 | tx2_uncore_event_start(event, flags); | ||
514 | |||
515 | return 0; | ||
516 | } | ||
517 | |||
518 | static void tx2_uncore_event_del(struct perf_event *event, int flags) | ||
519 | { | ||
520 | struct tx2_uncore_pmu *tx2_pmu = pmu_to_tx2_pmu(event->pmu); | ||
521 | struct hw_perf_event *hwc = &event->hw; | ||
522 | |||
523 | tx2_uncore_event_stop(event, PERF_EF_UPDATE); | ||
524 | |||
525 | /* clear the assigned counter */ | ||
526 | free_counter(tx2_pmu, GET_COUNTERID(event)); | ||
527 | |||
528 | perf_event_update_userpage(event); | ||
529 | tx2_pmu->events[hwc->idx] = NULL; | ||
530 | hwc->idx = -1; | ||
531 | } | ||
532 | |||
533 | static void tx2_uncore_event_read(struct perf_event *event) | ||
534 | { | ||
535 | tx2_uncore_event_update(event); | ||
536 | } | ||
537 | |||
538 | static enum hrtimer_restart tx2_hrtimer_callback(struct hrtimer *timer) | ||
539 | { | ||
540 | struct tx2_uncore_pmu *tx2_pmu; | ||
541 | int max_counters, idx; | ||
542 | |||
543 | tx2_pmu = container_of(timer, struct tx2_uncore_pmu, hrtimer); | ||
544 | max_counters = tx2_pmu->max_counters; | ||
545 | |||
546 | if (bitmap_empty(tx2_pmu->active_counters, max_counters)) | ||
547 | return HRTIMER_NORESTART; | ||
548 | |||
549 | for_each_set_bit(idx, tx2_pmu->active_counters, max_counters) { | ||
550 | struct perf_event *event = tx2_pmu->events[idx]; | ||
551 | |||
552 | tx2_uncore_event_update(event); | ||
553 | } | ||
554 | hrtimer_forward_now(timer, ns_to_ktime(tx2_pmu->hrtimer_interval)); | ||
555 | return HRTIMER_RESTART; | ||
556 | } | ||
557 | |||
558 | static int tx2_uncore_pmu_register( | ||
559 | struct tx2_uncore_pmu *tx2_pmu) | ||
560 | { | ||
561 | struct device *dev = tx2_pmu->dev; | ||
562 | char *name = tx2_pmu->name; | ||
563 | |||
564 | /* Perf event registration */ | ||
565 | tx2_pmu->pmu = (struct pmu) { | ||
566 | .module = THIS_MODULE, | ||
567 | .attr_groups = tx2_pmu->attr_groups, | ||
568 | .task_ctx_nr = perf_invalid_context, | ||
569 | .event_init = tx2_uncore_event_init, | ||
570 | .add = tx2_uncore_event_add, | ||
571 | .del = tx2_uncore_event_del, | ||
572 | .start = tx2_uncore_event_start, | ||
573 | .stop = tx2_uncore_event_stop, | ||
574 | .read = tx2_uncore_event_read, | ||
575 | }; | ||
576 | |||
577 | tx2_pmu->pmu.name = devm_kasprintf(dev, GFP_KERNEL, | ||
578 | "%s", name); | ||
579 | |||
580 | return perf_pmu_register(&tx2_pmu->pmu, tx2_pmu->pmu.name, -1); | ||
581 | } | ||
582 | |||
583 | static int tx2_uncore_pmu_add_dev(struct tx2_uncore_pmu *tx2_pmu) | ||
584 | { | ||
585 | int ret, cpu; | ||
586 | |||
587 | cpu = cpumask_any_and(cpumask_of_node(tx2_pmu->node), | ||
588 | cpu_online_mask); | ||
589 | |||
590 | tx2_pmu->cpu = cpu; | ||
591 | hrtimer_init(&tx2_pmu->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); | ||
592 | tx2_pmu->hrtimer.function = tx2_hrtimer_callback; | ||
593 | |||
594 | ret = tx2_uncore_pmu_register(tx2_pmu); | ||
595 | if (ret) { | ||
596 | dev_err(tx2_pmu->dev, "%s PMU: Failed to init driver\n", | ||
597 | tx2_pmu->name); | ||
598 | return -ENODEV; | ||
599 | } | ||
600 | |||
601 | /* register hotplug callback for the pmu */ | ||
602 | ret = cpuhp_state_add_instance( | ||
603 | CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, | ||
604 | &tx2_pmu->hpnode); | ||
605 | if (ret) { | ||
606 | dev_err(tx2_pmu->dev, "Error %d registering hotplug", ret); | ||
607 | return ret; | ||
608 | } | ||
609 | |||
610 | /* Add to list */ | ||
611 | list_add(&tx2_pmu->entry, &tx2_pmus); | ||
612 | |||
613 | dev_dbg(tx2_pmu->dev, "%s PMU UNCORE registered\n", | ||
614 | tx2_pmu->pmu.name); | ||
615 | return ret; | ||
616 | } | ||
617 | |||
618 | static struct tx2_uncore_pmu *tx2_uncore_pmu_init_dev(struct device *dev, | ||
619 | acpi_handle handle, struct acpi_device *adev, u32 type) | ||
620 | { | ||
621 | struct tx2_uncore_pmu *tx2_pmu; | ||
622 | void __iomem *base; | ||
623 | struct resource res; | ||
624 | struct resource_entry *rentry; | ||
625 | struct list_head list; | ||
626 | int ret; | ||
627 | |||
628 | INIT_LIST_HEAD(&list); | ||
629 | ret = acpi_dev_get_resources(adev, &list, NULL, NULL); | ||
630 | if (ret <= 0) { | ||
631 | dev_err(dev, "failed to parse _CRS method, error %d\n", ret); | ||
632 | return NULL; | ||
633 | } | ||
634 | |||
635 | list_for_each_entry(rentry, &list, node) { | ||
636 | if (resource_type(rentry->res) == IORESOURCE_MEM) { | ||
637 | res = *rentry->res; | ||
638 | break; | ||
639 | } | ||
640 | } | ||
641 | |||
642 | if (!rentry->res) | ||
643 | return NULL; | ||
644 | |||
645 | acpi_dev_free_resource_list(&list); | ||
646 | base = devm_ioremap_resource(dev, &res); | ||
647 | if (IS_ERR(base)) { | ||
648 | dev_err(dev, "PMU type %d: Fail to map resource\n", type); | ||
649 | return NULL; | ||
650 | } | ||
651 | |||
652 | tx2_pmu = devm_kzalloc(dev, sizeof(*tx2_pmu), GFP_KERNEL); | ||
653 | if (!tx2_pmu) | ||
654 | return NULL; | ||
655 | |||
656 | tx2_pmu->dev = dev; | ||
657 | tx2_pmu->type = type; | ||
658 | tx2_pmu->base = base; | ||
659 | tx2_pmu->node = dev_to_node(dev); | ||
660 | INIT_LIST_HEAD(&tx2_pmu->entry); | ||
661 | |||
662 | switch (tx2_pmu->type) { | ||
663 | case PMU_TYPE_L3C: | ||
664 | tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS; | ||
665 | tx2_pmu->prorate_factor = TX2_PMU_L3_TILES; | ||
666 | tx2_pmu->max_events = L3_EVENT_MAX; | ||
667 | tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; | ||
668 | tx2_pmu->attr_groups = l3c_pmu_attr_groups; | ||
669 | tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, | ||
670 | "uncore_l3c_%d", tx2_pmu->node); | ||
671 | tx2_pmu->init_cntr_base = init_cntr_base_l3c; | ||
672 | tx2_pmu->start_event = uncore_start_event_l3c; | ||
673 | tx2_pmu->stop_event = uncore_stop_event_l3c; | ||
674 | break; | ||
675 | case PMU_TYPE_DMC: | ||
676 | tx2_pmu->max_counters = TX2_PMU_MAX_COUNTERS; | ||
677 | tx2_pmu->prorate_factor = TX2_PMU_DMC_CHANNELS; | ||
678 | tx2_pmu->max_events = DMC_EVENT_MAX; | ||
679 | tx2_pmu->hrtimer_interval = TX2_PMU_HRTIMER_INTERVAL; | ||
680 | tx2_pmu->attr_groups = dmc_pmu_attr_groups; | ||
681 | tx2_pmu->name = devm_kasprintf(dev, GFP_KERNEL, | ||
682 | "uncore_dmc_%d", tx2_pmu->node); | ||
683 | tx2_pmu->init_cntr_base = init_cntr_base_dmc; | ||
684 | tx2_pmu->start_event = uncore_start_event_dmc; | ||
685 | tx2_pmu->stop_event = uncore_stop_event_dmc; | ||
686 | break; | ||
687 | case PMU_TYPE_INVALID: | ||
688 | devm_kfree(dev, tx2_pmu); | ||
689 | return NULL; | ||
690 | } | ||
691 | |||
692 | return tx2_pmu; | ||
693 | } | ||
694 | |||
695 | static acpi_status tx2_uncore_pmu_add(acpi_handle handle, u32 level, | ||
696 | void *data, void **return_value) | ||
697 | { | ||
698 | struct tx2_uncore_pmu *tx2_pmu; | ||
699 | struct acpi_device *adev; | ||
700 | enum tx2_uncore_type type; | ||
701 | |||
702 | if (acpi_bus_get_device(handle, &adev)) | ||
703 | return AE_OK; | ||
704 | if (acpi_bus_get_status(adev) || !adev->status.present) | ||
705 | return AE_OK; | ||
706 | |||
707 | type = get_tx2_pmu_type(adev); | ||
708 | if (type == PMU_TYPE_INVALID) | ||
709 | return AE_OK; | ||
710 | |||
711 | tx2_pmu = tx2_uncore_pmu_init_dev((struct device *)data, | ||
712 | handle, adev, type); | ||
713 | |||
714 | if (!tx2_pmu) | ||
715 | return AE_ERROR; | ||
716 | |||
717 | if (tx2_uncore_pmu_add_dev(tx2_pmu)) { | ||
718 | /* Can't add the PMU device, abort */ | ||
719 | return AE_ERROR; | ||
720 | } | ||
721 | return AE_OK; | ||
722 | } | ||
723 | |||
724 | static int tx2_uncore_pmu_online_cpu(unsigned int cpu, | ||
725 | struct hlist_node *hpnode) | ||
726 | { | ||
727 | struct tx2_uncore_pmu *tx2_pmu; | ||
728 | |||
729 | tx2_pmu = hlist_entry_safe(hpnode, | ||
730 | struct tx2_uncore_pmu, hpnode); | ||
731 | |||
732 | /* Pick this CPU, If there is no CPU/PMU association and both are | ||
733 | * from same node. | ||
734 | */ | ||
735 | if ((tx2_pmu->cpu >= nr_cpu_ids) && | ||
736 | (tx2_pmu->node == cpu_to_node(cpu))) | ||
737 | tx2_pmu->cpu = cpu; | ||
738 | |||
739 | return 0; | ||
740 | } | ||
741 | |||
742 | static int tx2_uncore_pmu_offline_cpu(unsigned int cpu, | ||
743 | struct hlist_node *hpnode) | ||
744 | { | ||
745 | int new_cpu; | ||
746 | struct tx2_uncore_pmu *tx2_pmu; | ||
747 | struct cpumask cpu_online_mask_temp; | ||
748 | |||
749 | tx2_pmu = hlist_entry_safe(hpnode, | ||
750 | struct tx2_uncore_pmu, hpnode); | ||
751 | |||
752 | if (cpu != tx2_pmu->cpu) | ||
753 | return 0; | ||
754 | |||
755 | hrtimer_cancel(&tx2_pmu->hrtimer); | ||
756 | cpumask_copy(&cpu_online_mask_temp, cpu_online_mask); | ||
757 | cpumask_clear_cpu(cpu, &cpu_online_mask_temp); | ||
758 | new_cpu = cpumask_any_and( | ||
759 | cpumask_of_node(tx2_pmu->node), | ||
760 | &cpu_online_mask_temp); | ||
761 | |||
762 | tx2_pmu->cpu = new_cpu; | ||
763 | if (new_cpu >= nr_cpu_ids) | ||
764 | return 0; | ||
765 | perf_pmu_migrate_context(&tx2_pmu->pmu, cpu, new_cpu); | ||
766 | |||
767 | return 0; | ||
768 | } | ||
769 | |||
770 | static const struct acpi_device_id tx2_uncore_acpi_match[] = { | ||
771 | {"CAV901C", 0}, | ||
772 | {}, | ||
773 | }; | ||
774 | MODULE_DEVICE_TABLE(acpi, tx2_uncore_acpi_match); | ||
775 | |||
776 | static int tx2_uncore_probe(struct platform_device *pdev) | ||
777 | { | ||
778 | struct device *dev = &pdev->dev; | ||
779 | acpi_handle handle; | ||
780 | acpi_status status; | ||
781 | |||
782 | set_dev_node(dev, acpi_get_node(ACPI_HANDLE(dev))); | ||
783 | |||
784 | if (!has_acpi_companion(dev)) | ||
785 | return -ENODEV; | ||
786 | |||
787 | handle = ACPI_HANDLE(dev); | ||
788 | if (!handle) | ||
789 | return -EINVAL; | ||
790 | |||
791 | /* Walk through the tree for all PMU UNCORE devices */ | ||
792 | status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, | ||
793 | tx2_uncore_pmu_add, | ||
794 | NULL, dev, NULL); | ||
795 | if (ACPI_FAILURE(status)) { | ||
796 | dev_err(dev, "failed to probe PMU devices\n"); | ||
797 | return_ACPI_STATUS(status); | ||
798 | } | ||
799 | |||
800 | dev_info(dev, "node%d: pmu uncore registered\n", dev_to_node(dev)); | ||
801 | return 0; | ||
802 | } | ||
803 | |||
804 | static int tx2_uncore_remove(struct platform_device *pdev) | ||
805 | { | ||
806 | struct tx2_uncore_pmu *tx2_pmu, *temp; | ||
807 | struct device *dev = &pdev->dev; | ||
808 | |||
809 | if (!list_empty(&tx2_pmus)) { | ||
810 | list_for_each_entry_safe(tx2_pmu, temp, &tx2_pmus, entry) { | ||
811 | if (tx2_pmu->node == dev_to_node(dev)) { | ||
812 | cpuhp_state_remove_instance_nocalls( | ||
813 | CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, | ||
814 | &tx2_pmu->hpnode); | ||
815 | perf_pmu_unregister(&tx2_pmu->pmu); | ||
816 | list_del(&tx2_pmu->entry); | ||
817 | } | ||
818 | } | ||
819 | } | ||
820 | return 0; | ||
821 | } | ||
822 | |||
823 | static struct platform_driver tx2_uncore_driver = { | ||
824 | .driver = { | ||
825 | .name = "tx2-uncore-pmu", | ||
826 | .acpi_match_table = ACPI_PTR(tx2_uncore_acpi_match), | ||
827 | }, | ||
828 | .probe = tx2_uncore_probe, | ||
829 | .remove = tx2_uncore_remove, | ||
830 | }; | ||
831 | |||
832 | static int __init tx2_uncore_driver_init(void) | ||
833 | { | ||
834 | int ret; | ||
835 | |||
836 | ret = cpuhp_setup_state_multi(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, | ||
837 | "perf/tx2/uncore:online", | ||
838 | tx2_uncore_pmu_online_cpu, | ||
839 | tx2_uncore_pmu_offline_cpu); | ||
840 | if (ret) { | ||
841 | pr_err("TX2 PMU: setup hotplug failed(%d)\n", ret); | ||
842 | return ret; | ||
843 | } | ||
844 | ret = platform_driver_register(&tx2_uncore_driver); | ||
845 | if (ret) | ||
846 | cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE); | ||
847 | |||
848 | return ret; | ||
849 | } | ||
850 | module_init(tx2_uncore_driver_init); | ||
851 | |||
852 | static void __exit tx2_uncore_driver_exit(void) | ||
853 | { | ||
854 | platform_driver_unregister(&tx2_uncore_driver); | ||
855 | cpuhp_remove_multi_state(CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE); | ||
856 | } | ||
857 | module_exit(tx2_uncore_driver_exit); | ||
858 | |||
859 | MODULE_DESCRIPTION("ThunderX2 UNCORE PMU driver"); | ||
860 | MODULE_LICENSE("GPL v2"); | ||
861 | MODULE_AUTHOR("Ganapatrao Kulkarni <gkulkarni@cavium.com>"); | ||
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h index d007a319dfd4..fd586d0301e7 100644 --- a/include/linux/cpuhotplug.h +++ b/include/linux/cpuhotplug.h | |||
@@ -165,6 +165,7 @@ enum cpuhp_state { | |||
165 | CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, | 165 | CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE, |
166 | CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, | 166 | CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE, |
167 | CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, | 167 | CPUHP_AP_PERF_ARM_APM_XGENE_ONLINE, |
168 | CPUHP_AP_PERF_ARM_CAVIUM_TX2_UNCORE_ONLINE, | ||
168 | CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, | 169 | CPUHP_AP_PERF_POWERPC_NEST_IMC_ONLINE, |
169 | CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, | 170 | CPUHP_AP_PERF_POWERPC_CORE_IMC_ONLINE, |
170 | CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, | 171 | CPUHP_AP_PERF_POWERPC_THREAD_IMC_ONLINE, |