aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/hwtracing
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-21 00:20:31 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-21 00:20:31 -0400
commit5af2344013454640e0133bb62e8cf2e30190a472 (patch)
tree93495d1eb88d7498dac4747a3d28081c09a69a55 /drivers/hwtracing
parent19e36ad292ab24980db64a5ff17973d3118a8fb9 (diff)
parent725d0123dfff3d7b666cf57f5d29c50addfc99d3 (diff)
Merge tag 'char-misc-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char / misc driver updates from Greg KH: "Here's the big char and misc driver update for 4.7-rc1. Lots of different tiny driver subsystems have updates here with new drivers and functionality. Details in the shortlog. All have been in linux-next with no reported issues for a while" * tag 'char-misc-4.7-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (125 commits) mcb: Delete num_cells variable which is not required mcb: Fixed bar number assignment for the gdd mcb: Replace ioremap and request_region with the devm version mcb: Implement bus->dev.release callback mcb: export bus information via sysfs mcb: Correctly initialize the bus's device mei: bus: call mei_cl_read_start under device lock coresight: etb10: adjust read pointer only when needed coresight: configuring ETF in FIFO mode when acting as link coresight: tmc: implementing TMC-ETF AUX space API coresight: moving struct cs_buffers to header file coresight: tmc: keep track of memory width coresight: tmc: make sysFS and Perf mode mutually exclusive coresight: tmc: dump system memory content only when needed coresight: tmc: adding mode of operation for link/sinks coresight: tmc: getting rid of multiple read access coresight: tmc: allocating memory when needed coresight: tmc: making prepare/unprepare functions generic coresight: tmc: splitting driver in ETB/ETF and ETR components coresight: tmc: cleaning up header file ...
Diffstat (limited to 'drivers/hwtracing')
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/hwtracing/coresight/Makefile13
-rw-r--r--drivers/hwtracing/coresight/coresight-etb10.c107
-rw-r--r--drivers/hwtracing/coresight/coresight-etm3x-sysfs.c33
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x-sysfs.c2126
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c2402
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.h222
-rw-r--r--drivers/hwtracing/coresight/coresight-funnel.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-priv.h30
-rw-r--r--drivers/hwtracing/coresight/coresight-replicator.c1
-rw-r--r--drivers/hwtracing/coresight/coresight-stm.c920
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etf.c604
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc-etr.c329
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c604
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.h140
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c1
-rw-r--r--drivers/hwtracing/coresight/coresight.c142
-rw-r--r--drivers/hwtracing/intel_th/core.c29
-rw-r--r--drivers/hwtracing/intel_th/intel_th.h6
-rw-r--r--drivers/hwtracing/intel_th/msu.c116
-rw-r--r--drivers/hwtracing/intel_th/pci.c5
-rw-r--r--drivers/hwtracing/intel_th/pti.c6
-rw-r--r--drivers/hwtracing/stm/core.c36
-rw-r--r--drivers/hwtracing/stm/dummy_stm.c14
-rw-r--r--drivers/hwtracing/stm/heartbeat.c14
-rw-r--r--drivers/hwtracing/stm/policy.c5
26 files changed, 4986 insertions, 2931 deletions
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index db0541031c72..130cb2114059 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -78,4 +78,15 @@ config CORESIGHT_QCOM_REPLICATOR
78 programmable ATB replicator sends the ATB trace stream from the 78 programmable ATB replicator sends the ATB trace stream from the
79 ETB/ETF to the TPIUi and ETR. 79 ETB/ETF to the TPIUi and ETR.
80 80
81config CORESIGHT_STM
82 bool "CoreSight System Trace Macrocell driver"
83 depends on (ARM && !(CPU_32v3 || CPU_32v4 || CPU_32v4T)) || ARM64
84 select CORESIGHT_LINKS_AND_SINKS
85 select STM
86 help
87 This driver provides support for hardware assisted software
88 instrumentation based tracing. This is primarily used for
89 logging useful software events or data coming from various entities
90 in the system, possibly running different OSs
91
81endif 92endif
diff --git a/drivers/hwtracing/coresight/Makefile b/drivers/hwtracing/coresight/Makefile
index cf8c6d689747..af480d9c1441 100644
--- a/drivers/hwtracing/coresight/Makefile
+++ b/drivers/hwtracing/coresight/Makefile
@@ -1,15 +1,18 @@
1# 1#
2# Makefile for CoreSight drivers. 2# Makefile for CoreSight drivers.
3# 3#
4obj-$(CONFIG_CORESIGHT) += coresight.o 4obj-$(CONFIG_CORESIGHT) += coresight.o coresight-etm-perf.o
5obj-$(CONFIG_OF) += of_coresight.o 5obj-$(CONFIG_OF) += of_coresight.o
6obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o 6obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o \
7 coresight-tmc-etf.o \
8 coresight-tmc-etr.o
7obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o 9obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
8obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o 10obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
9obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \ 11obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
10 coresight-replicator.o 12 coresight-replicator.o
11obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \ 13obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o \
12 coresight-etm3x-sysfs.o \ 14 coresight-etm3x-sysfs.o
13 coresight-etm-perf.o 15obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o \
14obj-$(CONFIG_CORESIGHT_SOURCE_ETM4X) += coresight-etm4x.o 16 coresight-etm4x-sysfs.o
15obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o 17obj-$(CONFIG_CORESIGHT_QCOM_REPLICATOR) += coresight-replicator-qcom.o
18obj-$(CONFIG_CORESIGHT_STM) += coresight-stm.o
diff --git a/drivers/hwtracing/coresight/coresight-etb10.c b/drivers/hwtracing/coresight/coresight-etb10.c
index acbce79934d6..4d20b0be0c0b 100644
--- a/drivers/hwtracing/coresight/coresight-etb10.c
+++ b/drivers/hwtracing/coresight/coresight-etb10.c
@@ -71,26 +71,6 @@
71#define ETB_FRAME_SIZE_WORDS 4 71#define ETB_FRAME_SIZE_WORDS 4
72 72
73/** 73/**
74 * struct cs_buffer - keep track of a recording session' specifics
75 * @cur: index of the current buffer
76 * @nr_pages: max number of pages granted to us
77 * @offset: offset within the current buffer
78 * @data_size: how much we collected in this run
79 * @lost: other than zero if we had a HW buffer wrap around
80 * @snapshot: is this run in snapshot mode
81 * @data_pages: a handle the ring buffer
82 */
83struct cs_buffers {
84 unsigned int cur;
85 unsigned int nr_pages;
86 unsigned long offset;
87 local_t data_size;
88 local_t lost;
89 bool snapshot;
90 void **data_pages;
91};
92
93/**
94 * struct etb_drvdata - specifics associated to an ETB component 74 * struct etb_drvdata - specifics associated to an ETB component
95 * @base: memory mapped base address for this component. 75 * @base: memory mapped base address for this component.
96 * @dev: the device entity associated to this component. 76 * @dev: the device entity associated to this component.
@@ -440,7 +420,7 @@ static void etb_update_buffer(struct coresight_device *csdev,
440 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1); 420 u32 mask = ~(ETB_FRAME_SIZE_WORDS - 1);
441 421
442 /* The new read pointer must be frame size aligned */ 422 /* The new read pointer must be frame size aligned */
443 to_read -= handle->size & mask; 423 to_read = handle->size & mask;
444 /* 424 /*
445 * Move the RAM read pointer up, keeping in mind that 425 * Move the RAM read pointer up, keeping in mind that
446 * everything is in frame size units. 426 * everything is in frame size units.
@@ -448,7 +428,8 @@ static void etb_update_buffer(struct coresight_device *csdev,
448 read_ptr = (write_ptr + drvdata->buffer_depth) - 428 read_ptr = (write_ptr + drvdata->buffer_depth) -
449 to_read / ETB_FRAME_SIZE_WORDS; 429 to_read / ETB_FRAME_SIZE_WORDS;
450 /* Wrap around if need be*/ 430 /* Wrap around if need be*/
451 read_ptr &= ~(drvdata->buffer_depth - 1); 431 if (read_ptr > (drvdata->buffer_depth - 1))
432 read_ptr -= drvdata->buffer_depth;
452 /* let the decoder know we've skipped ahead */ 433 /* let the decoder know we've skipped ahead */
453 local_inc(&buf->lost); 434 local_inc(&buf->lost);
454 } 435 }
@@ -579,47 +560,29 @@ static const struct file_operations etb_fops = {
579 .llseek = no_llseek, 560 .llseek = no_llseek,
580}; 561};
581 562
582static ssize_t status_show(struct device *dev, 563#define coresight_etb10_simple_func(name, offset) \
583 struct device_attribute *attr, char *buf) 564 coresight_simple_func(struct etb_drvdata, name, offset)
584{ 565
585 unsigned long flags; 566coresight_etb10_simple_func(rdp, ETB_RAM_DEPTH_REG);
586 u32 etb_rdr, etb_sr, etb_rrp, etb_rwp; 567coresight_etb10_simple_func(sts, ETB_STATUS_REG);
587 u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr; 568coresight_etb10_simple_func(rrp, ETB_RAM_READ_POINTER);
588 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent); 569coresight_etb10_simple_func(rwp, ETB_RAM_WRITE_POINTER);
589 570coresight_etb10_simple_func(trg, ETB_TRG);
590 pm_runtime_get_sync(drvdata->dev); 571coresight_etb10_simple_func(ctl, ETB_CTL_REG);
591 spin_lock_irqsave(&drvdata->spinlock, flags); 572coresight_etb10_simple_func(ffsr, ETB_FFSR);
592 CS_UNLOCK(drvdata->base); 573coresight_etb10_simple_func(ffcr, ETB_FFCR);
593 574
594 etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG); 575static struct attribute *coresight_etb_mgmt_attrs[] = {
595 etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG); 576 &dev_attr_rdp.attr,
596 etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER); 577 &dev_attr_sts.attr,
597 etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER); 578 &dev_attr_rrp.attr,
598 etb_trg = readl_relaxed(drvdata->base + ETB_TRG); 579 &dev_attr_rwp.attr,
599 etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG); 580 &dev_attr_trg.attr,
600 etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR); 581 &dev_attr_ctl.attr,
601 etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR); 582 &dev_attr_ffsr.attr,
602 583 &dev_attr_ffcr.attr,
603 CS_LOCK(drvdata->base); 584 NULL,
604 spin_unlock_irqrestore(&drvdata->spinlock, flags); 585};
605
606 pm_runtime_put(drvdata->dev);
607
608 return sprintf(buf,
609 "Depth:\t\t0x%x\n"
610 "Status:\t\t0x%x\n"
611 "RAM read ptr:\t0x%x\n"
612 "RAM wrt ptr:\t0x%x\n"
613 "Trigger cnt:\t0x%x\n"
614 "Control:\t0x%x\n"
615 "Flush status:\t0x%x\n"
616 "Flush ctrl:\t0x%x\n",
617 etb_rdr, etb_sr, etb_rrp, etb_rwp,
618 etb_trg, etb_cr, etb_ffsr, etb_ffcr);
619
620 return -EINVAL;
621}
622static DEVICE_ATTR_RO(status);
623 586
624static ssize_t trigger_cntr_show(struct device *dev, 587static ssize_t trigger_cntr_show(struct device *dev,
625 struct device_attribute *attr, char *buf) 588 struct device_attribute *attr, char *buf)
@@ -649,10 +612,23 @@ static DEVICE_ATTR_RW(trigger_cntr);
649 612
650static struct attribute *coresight_etb_attrs[] = { 613static struct attribute *coresight_etb_attrs[] = {
651 &dev_attr_trigger_cntr.attr, 614 &dev_attr_trigger_cntr.attr,
652 &dev_attr_status.attr,
653 NULL, 615 NULL,
654}; 616};
655ATTRIBUTE_GROUPS(coresight_etb); 617
618static const struct attribute_group coresight_etb_group = {
619 .attrs = coresight_etb_attrs,
620};
621
622static const struct attribute_group coresight_etb_mgmt_group = {
623 .attrs = coresight_etb_mgmt_attrs,
624 .name = "mgmt",
625};
626
627const struct attribute_group *coresight_etb_groups[] = {
628 &coresight_etb_group,
629 &coresight_etb_mgmt_group,
630 NULL,
631};
656 632
657static int etb_probe(struct amba_device *adev, const struct amba_id *id) 633static int etb_probe(struct amba_device *adev, const struct amba_id *id)
658{ 634{
@@ -729,7 +705,6 @@ static int etb_probe(struct amba_device *adev, const struct amba_id *id)
729 if (ret) 705 if (ret)
730 goto err_misc_register; 706 goto err_misc_register;
731 707
732 dev_info(dev, "ETB initialized\n");
733 return 0; 708 return 0;
734 709
735err_misc_register: 710err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
index cbb4046c1070..02d4b629891f 100644
--- a/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
+++ b/drivers/hwtracing/coresight/coresight-etm3x-sysfs.c
@@ -1221,26 +1221,19 @@ static struct attribute *coresight_etm_attrs[] = {
1221 NULL, 1221 NULL,
1222}; 1222};
1223 1223
1224#define coresight_simple_func(name, offset) \ 1224#define coresight_etm3x_simple_func(name, offset) \
1225static ssize_t name##_show(struct device *_dev, \ 1225 coresight_simple_func(struct etm_drvdata, name, offset)
1226 struct device_attribute *attr, char *buf) \ 1226
1227{ \ 1227coresight_etm3x_simple_func(etmccr, ETMCCR);
1228 struct etm_drvdata *drvdata = dev_get_drvdata(_dev->parent); \ 1228coresight_etm3x_simple_func(etmccer, ETMCCER);
1229 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \ 1229coresight_etm3x_simple_func(etmscr, ETMSCR);
1230 readl_relaxed(drvdata->base + offset)); \ 1230coresight_etm3x_simple_func(etmidr, ETMIDR);
1231} \ 1231coresight_etm3x_simple_func(etmcr, ETMCR);
1232DEVICE_ATTR_RO(name) 1232coresight_etm3x_simple_func(etmtraceidr, ETMTRACEIDR);
1233 1233coresight_etm3x_simple_func(etmteevr, ETMTEEVR);
1234coresight_simple_func(etmccr, ETMCCR); 1234coresight_etm3x_simple_func(etmtssvr, ETMTSSCR);
1235coresight_simple_func(etmccer, ETMCCER); 1235coresight_etm3x_simple_func(etmtecr1, ETMTECR1);
1236coresight_simple_func(etmscr, ETMSCR); 1236coresight_etm3x_simple_func(etmtecr2, ETMTECR2);
1237coresight_simple_func(etmidr, ETMIDR);
1238coresight_simple_func(etmcr, ETMCR);
1239coresight_simple_func(etmtraceidr, ETMTRACEIDR);
1240coresight_simple_func(etmteevr, ETMTEEVR);
1241coresight_simple_func(etmtssvr, ETMTSSCR);
1242coresight_simple_func(etmtecr1, ETMTECR1);
1243coresight_simple_func(etmtecr2, ETMTECR2);
1244 1237
1245static struct attribute *coresight_etm_mgmt_attrs[] = { 1238static struct attribute *coresight_etm_mgmt_attrs[] = {
1246 &dev_attr_etmccr.attr, 1239 &dev_attr_etmccr.attr,
diff --git a/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
new file mode 100644
index 000000000000..7c84308c5564
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-etm4x-sysfs.c
@@ -0,0 +1,2126 @@
1/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/pm_runtime.h>
19#include <linux/sysfs.h>
20#include "coresight-etm4x.h"
21
22static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
23{
24 u8 idx;
25 struct etmv4_config *config = &drvdata->config;
26
27 idx = config->addr_idx;
28
29 /*
30 * TRCACATRn.TYPE bit[1:0]: type of comparison
31 * the trace unit performs
32 */
33 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
34 if (idx % 2 != 0)
35 return -EINVAL;
36
37 /*
38 * We are performing instruction address comparison. Set the
39 * relevant bit of ViewInst Include/Exclude Control register
40 * for corresponding address comparator pair.
41 */
42 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
43 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
44 return -EINVAL;
45
46 if (exclude == true) {
47 /*
48 * Set exclude bit and unset the include bit
49 * corresponding to comparator pair
50 */
51 config->viiectlr |= BIT(idx / 2 + 16);
52 config->viiectlr &= ~BIT(idx / 2);
53 } else {
54 /*
55 * Set include bit and unset exclude bit
56 * corresponding to comparator pair
57 */
58 config->viiectlr |= BIT(idx / 2);
59 config->viiectlr &= ~BIT(idx / 2 + 16);
60 }
61 }
62 return 0;
63}
64
65static ssize_t nr_pe_cmp_show(struct device *dev,
66 struct device_attribute *attr,
67 char *buf)
68{
69 unsigned long val;
70 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
71
72 val = drvdata->nr_pe_cmp;
73 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
74}
75static DEVICE_ATTR_RO(nr_pe_cmp);
76
77static ssize_t nr_addr_cmp_show(struct device *dev,
78 struct device_attribute *attr,
79 char *buf)
80{
81 unsigned long val;
82 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
83
84 val = drvdata->nr_addr_cmp;
85 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
86}
87static DEVICE_ATTR_RO(nr_addr_cmp);
88
89static ssize_t nr_cntr_show(struct device *dev,
90 struct device_attribute *attr,
91 char *buf)
92{
93 unsigned long val;
94 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
95
96 val = drvdata->nr_cntr;
97 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
98}
99static DEVICE_ATTR_RO(nr_cntr);
100
101static ssize_t nr_ext_inp_show(struct device *dev,
102 struct device_attribute *attr,
103 char *buf)
104{
105 unsigned long val;
106 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
107
108 val = drvdata->nr_ext_inp;
109 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
110}
111static DEVICE_ATTR_RO(nr_ext_inp);
112
113static ssize_t numcidc_show(struct device *dev,
114 struct device_attribute *attr,
115 char *buf)
116{
117 unsigned long val;
118 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
119
120 val = drvdata->numcidc;
121 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
122}
123static DEVICE_ATTR_RO(numcidc);
124
125static ssize_t numvmidc_show(struct device *dev,
126 struct device_attribute *attr,
127 char *buf)
128{
129 unsigned long val;
130 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
131
132 val = drvdata->numvmidc;
133 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
134}
135static DEVICE_ATTR_RO(numvmidc);
136
137static ssize_t nrseqstate_show(struct device *dev,
138 struct device_attribute *attr,
139 char *buf)
140{
141 unsigned long val;
142 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
143
144 val = drvdata->nrseqstate;
145 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
146}
147static DEVICE_ATTR_RO(nrseqstate);
148
149static ssize_t nr_resource_show(struct device *dev,
150 struct device_attribute *attr,
151 char *buf)
152{
153 unsigned long val;
154 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
155
156 val = drvdata->nr_resource;
157 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
158}
159static DEVICE_ATTR_RO(nr_resource);
160
161static ssize_t nr_ss_cmp_show(struct device *dev,
162 struct device_attribute *attr,
163 char *buf)
164{
165 unsigned long val;
166 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
167
168 val = drvdata->nr_ss_cmp;
169 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
170}
171static DEVICE_ATTR_RO(nr_ss_cmp);
172
173static ssize_t reset_store(struct device *dev,
174 struct device_attribute *attr,
175 const char *buf, size_t size)
176{
177 int i;
178 unsigned long val;
179 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
180 struct etmv4_config *config = &drvdata->config;
181
182 if (kstrtoul(buf, 16, &val))
183 return -EINVAL;
184
185 spin_lock(&drvdata->spinlock);
186 if (val)
187 config->mode = 0x0;
188
189 /* Disable data tracing: do not trace load and store data transfers */
190 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
191 config->cfg &= ~(BIT(1) | BIT(2));
192
193 /* Disable data value and data address tracing */
194 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
195 ETM_MODE_DATA_TRACE_VAL);
196 config->cfg &= ~(BIT(16) | BIT(17));
197
198 /* Disable all events tracing */
199 config->eventctrl0 = 0x0;
200 config->eventctrl1 = 0x0;
201
202 /* Disable timestamp event */
203 config->ts_ctrl = 0x0;
204
205 /* Disable stalling */
206 config->stall_ctrl = 0x0;
207
208 /* Reset trace synchronization period to 2^8 = 256 bytes*/
209 if (drvdata->syncpr == false)
210 config->syncfreq = 0x8;
211
212 /*
213 * Enable ViewInst to trace everything with start-stop logic in
214 * started state. ARM recommends start-stop logic is set before
215 * each trace run.
216 */
217 config->vinst_ctrl |= BIT(0);
218 if (drvdata->nr_addr_cmp == true) {
219 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
220 /* SSSTATUS, bit[9] */
221 config->vinst_ctrl |= BIT(9);
222 }
223
224 /* No address range filtering for ViewInst */
225 config->viiectlr = 0x0;
226
227 /* No start-stop filtering for ViewInst */
228 config->vissctlr = 0x0;
229
230 /* Disable seq events */
231 for (i = 0; i < drvdata->nrseqstate-1; i++)
232 config->seq_ctrl[i] = 0x0;
233 config->seq_rst = 0x0;
234 config->seq_state = 0x0;
235
236 /* Disable external input events */
237 config->ext_inp = 0x0;
238
239 config->cntr_idx = 0x0;
240 for (i = 0; i < drvdata->nr_cntr; i++) {
241 config->cntrldvr[i] = 0x0;
242 config->cntr_ctrl[i] = 0x0;
243 config->cntr_val[i] = 0x0;
244 }
245
246 config->res_idx = 0x0;
247 for (i = 0; i < drvdata->nr_resource; i++)
248 config->res_ctrl[i] = 0x0;
249
250 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
251 config->ss_ctrl[i] = 0x0;
252 config->ss_pe_cmp[i] = 0x0;
253 }
254
255 config->addr_idx = 0x0;
256 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
257 config->addr_val[i] = 0x0;
258 config->addr_acc[i] = 0x0;
259 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
260 }
261
262 config->ctxid_idx = 0x0;
263 for (i = 0; i < drvdata->numcidc; i++) {
264 config->ctxid_pid[i] = 0x0;
265 config->ctxid_vpid[i] = 0x0;
266 }
267
268 config->ctxid_mask0 = 0x0;
269 config->ctxid_mask1 = 0x0;
270
271 config->vmid_idx = 0x0;
272 for (i = 0; i < drvdata->numvmidc; i++)
273 config->vmid_val[i] = 0x0;
274 config->vmid_mask0 = 0x0;
275 config->vmid_mask1 = 0x0;
276
277 drvdata->trcid = drvdata->cpu + 1;
278
279 spin_unlock(&drvdata->spinlock);
280
281 return size;
282}
283static DEVICE_ATTR_WO(reset);
284
285static ssize_t mode_show(struct device *dev,
286 struct device_attribute *attr,
287 char *buf)
288{
289 unsigned long val;
290 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
291 struct etmv4_config *config = &drvdata->config;
292
293 val = config->mode;
294 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
295}
296
297static ssize_t mode_store(struct device *dev,
298 struct device_attribute *attr,
299 const char *buf, size_t size)
300{
301 unsigned long val, mode;
302 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
303 struct etmv4_config *config = &drvdata->config;
304
305 if (kstrtoul(buf, 16, &val))
306 return -EINVAL;
307
308 spin_lock(&drvdata->spinlock);
309 config->mode = val & ETMv4_MODE_ALL;
310
311 if (config->mode & ETM_MODE_EXCLUDE)
312 etm4_set_mode_exclude(drvdata, true);
313 else
314 etm4_set_mode_exclude(drvdata, false);
315
316 if (drvdata->instrp0 == true) {
317 /* start by clearing instruction P0 field */
318 config->cfg &= ~(BIT(1) | BIT(2));
319 if (config->mode & ETM_MODE_LOAD)
320 /* 0b01 Trace load instructions as P0 instructions */
321 config->cfg |= BIT(1);
322 if (config->mode & ETM_MODE_STORE)
323 /* 0b10 Trace store instructions as P0 instructions */
324 config->cfg |= BIT(2);
325 if (config->mode & ETM_MODE_LOAD_STORE)
326 /*
327 * 0b11 Trace load and store instructions
328 * as P0 instructions
329 */
330 config->cfg |= BIT(1) | BIT(2);
331 }
332
333 /* bit[3], Branch broadcast mode */
334 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
335 config->cfg |= BIT(3);
336 else
337 config->cfg &= ~BIT(3);
338
339 /* bit[4], Cycle counting instruction trace bit */
340 if ((config->mode & ETMv4_MODE_CYCACC) &&
341 (drvdata->trccci == true))
342 config->cfg |= BIT(4);
343 else
344 config->cfg &= ~BIT(4);
345
346 /* bit[6], Context ID tracing bit */
347 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
348 config->cfg |= BIT(6);
349 else
350 config->cfg &= ~BIT(6);
351
352 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
353 config->cfg |= BIT(7);
354 else
355 config->cfg &= ~BIT(7);
356
357 /* bits[10:8], Conditional instruction tracing bit */
358 mode = ETM_MODE_COND(config->mode);
359 if (drvdata->trccond == true) {
360 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
361 config->cfg |= mode << 8;
362 }
363
364 /* bit[11], Global timestamp tracing bit */
365 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
366 config->cfg |= BIT(11);
367 else
368 config->cfg &= ~BIT(11);
369
370 /* bit[12], Return stack enable bit */
371 if ((config->mode & ETM_MODE_RETURNSTACK) &&
372 (drvdata->retstack == true))
373 config->cfg |= BIT(12);
374 else
375 config->cfg &= ~BIT(12);
376
377 /* bits[14:13], Q element enable field */
378 mode = ETM_MODE_QELEM(config->mode);
379 /* start by clearing QE bits */
380 config->cfg &= ~(BIT(13) | BIT(14));
381 /* if supported, Q elements with instruction counts are enabled */
382 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
383 config->cfg |= BIT(13);
384 /*
385 * if supported, Q elements with and without instruction
386 * counts are enabled
387 */
388 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
389 config->cfg |= BIT(14);
390
391 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
392 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
393 (drvdata->atbtrig == true))
394 config->eventctrl1 |= BIT(11);
395 else
396 config->eventctrl1 &= ~BIT(11);
397
398 /* bit[12], Low-power state behavior override bit */
399 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
400 (drvdata->lpoverride == true))
401 config->eventctrl1 |= BIT(12);
402 else
403 config->eventctrl1 &= ~BIT(12);
404
405 /* bit[8], Instruction stall bit */
406 if (config->mode & ETM_MODE_ISTALL_EN)
407 config->stall_ctrl |= BIT(8);
408 else
409 config->stall_ctrl &= ~BIT(8);
410
411 /* bit[10], Prioritize instruction trace bit */
412 if (config->mode & ETM_MODE_INSTPRIO)
413 config->stall_ctrl |= BIT(10);
414 else
415 config->stall_ctrl &= ~BIT(10);
416
417 /* bit[13], Trace overflow prevention bit */
418 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
419 (drvdata->nooverflow == true))
420 config->stall_ctrl |= BIT(13);
421 else
422 config->stall_ctrl &= ~BIT(13);
423
424 /* bit[9] Start/stop logic control bit */
425 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
426 config->vinst_ctrl |= BIT(9);
427 else
428 config->vinst_ctrl &= ~BIT(9);
429
430 /* bit[10], Whether a trace unit must trace a Reset exception */
431 if (config->mode & ETM_MODE_TRACE_RESET)
432 config->vinst_ctrl |= BIT(10);
433 else
434 config->vinst_ctrl &= ~BIT(10);
435
436 /* bit[11], Whether a trace unit must trace a system error exception */
437 if ((config->mode & ETM_MODE_TRACE_ERR) &&
438 (drvdata->trc_error == true))
439 config->vinst_ctrl |= BIT(11);
440 else
441 config->vinst_ctrl &= ~BIT(11);
442
443 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
444 etm4_config_trace_mode(config);
445
446 spin_unlock(&drvdata->spinlock);
447
448 return size;
449}
450static DEVICE_ATTR_RW(mode);
451
452static ssize_t pe_show(struct device *dev,
453 struct device_attribute *attr,
454 char *buf)
455{
456 unsigned long val;
457 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
458 struct etmv4_config *config = &drvdata->config;
459
460 val = config->pe_sel;
461 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
462}
463
464static ssize_t pe_store(struct device *dev,
465 struct device_attribute *attr,
466 const char *buf, size_t size)
467{
468 unsigned long val;
469 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
470 struct etmv4_config *config = &drvdata->config;
471
472 if (kstrtoul(buf, 16, &val))
473 return -EINVAL;
474
475 spin_lock(&drvdata->spinlock);
476 if (val > drvdata->nr_pe) {
477 spin_unlock(&drvdata->spinlock);
478 return -EINVAL;
479 }
480
481 config->pe_sel = val;
482 spin_unlock(&drvdata->spinlock);
483 return size;
484}
485static DEVICE_ATTR_RW(pe);
486
487static ssize_t event_show(struct device *dev,
488 struct device_attribute *attr,
489 char *buf)
490{
491 unsigned long val;
492 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
493 struct etmv4_config *config = &drvdata->config;
494
495 val = config->eventctrl0;
496 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
497}
498
499static ssize_t event_store(struct device *dev,
500 struct device_attribute *attr,
501 const char *buf, size_t size)
502{
503 unsigned long val;
504 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
505 struct etmv4_config *config = &drvdata->config;
506
507 if (kstrtoul(buf, 16, &val))
508 return -EINVAL;
509
510 spin_lock(&drvdata->spinlock);
511 switch (drvdata->nr_event) {
512 case 0x0:
513 /* EVENT0, bits[7:0] */
514 config->eventctrl0 = val & 0xFF;
515 break;
516 case 0x1:
517 /* EVENT1, bits[15:8] */
518 config->eventctrl0 = val & 0xFFFF;
519 break;
520 case 0x2:
521 /* EVENT2, bits[23:16] */
522 config->eventctrl0 = val & 0xFFFFFF;
523 break;
524 case 0x3:
525 /* EVENT3, bits[31:24] */
526 config->eventctrl0 = val;
527 break;
528 default:
529 break;
530 }
531 spin_unlock(&drvdata->spinlock);
532 return size;
533}
534static DEVICE_ATTR_RW(event);
535
536static ssize_t event_instren_show(struct device *dev,
537 struct device_attribute *attr,
538 char *buf)
539{
540 unsigned long val;
541 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
542 struct etmv4_config *config = &drvdata->config;
543
544 val = BMVAL(config->eventctrl1, 0, 3);
545 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
546}
547
548static ssize_t event_instren_store(struct device *dev,
549 struct device_attribute *attr,
550 const char *buf, size_t size)
551{
552 unsigned long val;
553 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
554 struct etmv4_config *config = &drvdata->config;
555
556 if (kstrtoul(buf, 16, &val))
557 return -EINVAL;
558
559 spin_lock(&drvdata->spinlock);
560 /* start by clearing all instruction event enable bits */
561 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
562 switch (drvdata->nr_event) {
563 case 0x0:
564 /* generate Event element for event 1 */
565 config->eventctrl1 |= val & BIT(1);
566 break;
567 case 0x1:
568 /* generate Event element for event 1 and 2 */
569 config->eventctrl1 |= val & (BIT(0) | BIT(1));
570 break;
571 case 0x2:
572 /* generate Event element for event 1, 2 and 3 */
573 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
574 break;
575 case 0x3:
576 /* generate Event element for all 4 events */
577 config->eventctrl1 |= val & 0xF;
578 break;
579 default:
580 break;
581 }
582 spin_unlock(&drvdata->spinlock);
583 return size;
584}
585static DEVICE_ATTR_RW(event_instren);
586
587static ssize_t event_ts_show(struct device *dev,
588 struct device_attribute *attr,
589 char *buf)
590{
591 unsigned long val;
592 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
593 struct etmv4_config *config = &drvdata->config;
594
595 val = config->ts_ctrl;
596 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
597}
598
599static ssize_t event_ts_store(struct device *dev,
600 struct device_attribute *attr,
601 const char *buf, size_t size)
602{
603 unsigned long val;
604 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
605 struct etmv4_config *config = &drvdata->config;
606
607 if (kstrtoul(buf, 16, &val))
608 return -EINVAL;
609 if (!drvdata->ts_size)
610 return -EINVAL;
611
612 config->ts_ctrl = val & ETMv4_EVENT_MASK;
613 return size;
614}
615static DEVICE_ATTR_RW(event_ts);
616
617static ssize_t syncfreq_show(struct device *dev,
618 struct device_attribute *attr,
619 char *buf)
620{
621 unsigned long val;
622 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
623 struct etmv4_config *config = &drvdata->config;
624
625 val = config->syncfreq;
626 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
627}
628
629static ssize_t syncfreq_store(struct device *dev,
630 struct device_attribute *attr,
631 const char *buf, size_t size)
632{
633 unsigned long val;
634 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
635 struct etmv4_config *config = &drvdata->config;
636
637 if (kstrtoul(buf, 16, &val))
638 return -EINVAL;
639 if (drvdata->syncpr == true)
640 return -EINVAL;
641
642 config->syncfreq = val & ETMv4_SYNC_MASK;
643 return size;
644}
645static DEVICE_ATTR_RW(syncfreq);
646
647static ssize_t cyc_threshold_show(struct device *dev,
648 struct device_attribute *attr,
649 char *buf)
650{
651 unsigned long val;
652 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
653 struct etmv4_config *config = &drvdata->config;
654
655 val = config->ccctlr;
656 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
657}
658
659static ssize_t cyc_threshold_store(struct device *dev,
660 struct device_attribute *attr,
661 const char *buf, size_t size)
662{
663 unsigned long val;
664 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
665 struct etmv4_config *config = &drvdata->config;
666
667 if (kstrtoul(buf, 16, &val))
668 return -EINVAL;
669 if (val < drvdata->ccitmin)
670 return -EINVAL;
671
672 config->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
673 return size;
674}
675static DEVICE_ATTR_RW(cyc_threshold);
676
677static ssize_t bb_ctrl_show(struct device *dev,
678 struct device_attribute *attr,
679 char *buf)
680{
681 unsigned long val;
682 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
683 struct etmv4_config *config = &drvdata->config;
684
685 val = config->bb_ctrl;
686 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
687}
688
689static ssize_t bb_ctrl_store(struct device *dev,
690 struct device_attribute *attr,
691 const char *buf, size_t size)
692{
693 unsigned long val;
694 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
695 struct etmv4_config *config = &drvdata->config;
696
697 if (kstrtoul(buf, 16, &val))
698 return -EINVAL;
699 if (drvdata->trcbb == false)
700 return -EINVAL;
701 if (!drvdata->nr_addr_cmp)
702 return -EINVAL;
703 /*
704 * Bit[7:0] selects which address range comparator is used for
705 * branch broadcast control.
706 */
707 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
708 return -EINVAL;
709
710 config->bb_ctrl = val;
711 return size;
712}
713static DEVICE_ATTR_RW(bb_ctrl);
714
715static ssize_t event_vinst_show(struct device *dev,
716 struct device_attribute *attr,
717 char *buf)
718{
719 unsigned long val;
720 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
721 struct etmv4_config *config = &drvdata->config;
722
723 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
724 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
725}
726
727static ssize_t event_vinst_store(struct device *dev,
728 struct device_attribute *attr,
729 const char *buf, size_t size)
730{
731 unsigned long val;
732 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
733 struct etmv4_config *config = &drvdata->config;
734
735 if (kstrtoul(buf, 16, &val))
736 return -EINVAL;
737
738 spin_lock(&drvdata->spinlock);
739 val &= ETMv4_EVENT_MASK;
740 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
741 config->vinst_ctrl |= val;
742 spin_unlock(&drvdata->spinlock);
743 return size;
744}
745static DEVICE_ATTR_RW(event_vinst);
746
747static ssize_t s_exlevel_vinst_show(struct device *dev,
748 struct device_attribute *attr,
749 char *buf)
750{
751 unsigned long val;
752 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
753 struct etmv4_config *config = &drvdata->config;
754
755 val = BMVAL(config->vinst_ctrl, 16, 19);
756 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
757}
758
759static ssize_t s_exlevel_vinst_store(struct device *dev,
760 struct device_attribute *attr,
761 const char *buf, size_t size)
762{
763 unsigned long val;
764 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
765 struct etmv4_config *config = &drvdata->config;
766
767 if (kstrtoul(buf, 16, &val))
768 return -EINVAL;
769
770 spin_lock(&drvdata->spinlock);
771 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
772 config->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
773 /* enable instruction tracing for corresponding exception level */
774 val &= drvdata->s_ex_level;
775 config->vinst_ctrl |= (val << 16);
776 spin_unlock(&drvdata->spinlock);
777 return size;
778}
779static DEVICE_ATTR_RW(s_exlevel_vinst);
780
781static ssize_t ns_exlevel_vinst_show(struct device *dev,
782 struct device_attribute *attr,
783 char *buf)
784{
785 unsigned long val;
786 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
787 struct etmv4_config *config = &drvdata->config;
788
789 /* EXLEVEL_NS, bits[23:20] */
790 val = BMVAL(config->vinst_ctrl, 20, 23);
791 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
792}
793
794static ssize_t ns_exlevel_vinst_store(struct device *dev,
795 struct device_attribute *attr,
796 const char *buf, size_t size)
797{
798 unsigned long val;
799 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
800 struct etmv4_config *config = &drvdata->config;
801
802 if (kstrtoul(buf, 16, &val))
803 return -EINVAL;
804
805 spin_lock(&drvdata->spinlock);
806 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
807 config->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
808 /* enable instruction tracing for corresponding exception level */
809 val &= drvdata->ns_ex_level;
810 config->vinst_ctrl |= (val << 20);
811 spin_unlock(&drvdata->spinlock);
812 return size;
813}
814static DEVICE_ATTR_RW(ns_exlevel_vinst);
815
816static ssize_t addr_idx_show(struct device *dev,
817 struct device_attribute *attr,
818 char *buf)
819{
820 unsigned long val;
821 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
822 struct etmv4_config *config = &drvdata->config;
823
824 val = config->addr_idx;
825 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
826}
827
828static ssize_t addr_idx_store(struct device *dev,
829 struct device_attribute *attr,
830 const char *buf, size_t size)
831{
832 unsigned long val;
833 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
834 struct etmv4_config *config = &drvdata->config;
835
836 if (kstrtoul(buf, 16, &val))
837 return -EINVAL;
838 if (val >= drvdata->nr_addr_cmp * 2)
839 return -EINVAL;
840
841 /*
842 * Use spinlock to ensure index doesn't change while it gets
843 * dereferenced multiple times within a spinlock block elsewhere.
844 */
845 spin_lock(&drvdata->spinlock);
846 config->addr_idx = val;
847 spin_unlock(&drvdata->spinlock);
848 return size;
849}
850static DEVICE_ATTR_RW(addr_idx);
851
852static ssize_t addr_instdatatype_show(struct device *dev,
853 struct device_attribute *attr,
854 char *buf)
855{
856 ssize_t len;
857 u8 val, idx;
858 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
859 struct etmv4_config *config = &drvdata->config;
860
861 spin_lock(&drvdata->spinlock);
862 idx = config->addr_idx;
863 val = BMVAL(config->addr_acc[idx], 0, 1);
864 len = scnprintf(buf, PAGE_SIZE, "%s\n",
865 val == ETM_INSTR_ADDR ? "instr" :
866 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
867 (val == ETM_DATA_STORE_ADDR ? "data_store" :
868 "data_load_store")));
869 spin_unlock(&drvdata->spinlock);
870 return len;
871}
872
873static ssize_t addr_instdatatype_store(struct device *dev,
874 struct device_attribute *attr,
875 const char *buf, size_t size)
876{
877 u8 idx;
878 char str[20] = "";
879 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
880 struct etmv4_config *config = &drvdata->config;
881
882 if (strlen(buf) >= 20)
883 return -EINVAL;
884 if (sscanf(buf, "%s", str) != 1)
885 return -EINVAL;
886
887 spin_lock(&drvdata->spinlock);
888 idx = config->addr_idx;
889 if (!strcmp(str, "instr"))
890 /* TYPE, bits[1:0] */
891 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
892
893 spin_unlock(&drvdata->spinlock);
894 return size;
895}
896static DEVICE_ATTR_RW(addr_instdatatype);
897
898static ssize_t addr_single_show(struct device *dev,
899 struct device_attribute *attr,
900 char *buf)
901{
902 u8 idx;
903 unsigned long val;
904 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
905 struct etmv4_config *config = &drvdata->config;
906
907 idx = config->addr_idx;
908 spin_lock(&drvdata->spinlock);
909 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
910 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
911 spin_unlock(&drvdata->spinlock);
912 return -EPERM;
913 }
914 val = (unsigned long)config->addr_val[idx];
915 spin_unlock(&drvdata->spinlock);
916 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
917}
918
919static ssize_t addr_single_store(struct device *dev,
920 struct device_attribute *attr,
921 const char *buf, size_t size)
922{
923 u8 idx;
924 unsigned long val;
925 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
926 struct etmv4_config *config = &drvdata->config;
927
928 if (kstrtoul(buf, 16, &val))
929 return -EINVAL;
930
931 spin_lock(&drvdata->spinlock);
932 idx = config->addr_idx;
933 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
934 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
935 spin_unlock(&drvdata->spinlock);
936 return -EPERM;
937 }
938
939 config->addr_val[idx] = (u64)val;
940 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
941 spin_unlock(&drvdata->spinlock);
942 return size;
943}
944static DEVICE_ATTR_RW(addr_single);
945
946static ssize_t addr_range_show(struct device *dev,
947 struct device_attribute *attr,
948 char *buf)
949{
950 u8 idx;
951 unsigned long val1, val2;
952 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
953 struct etmv4_config *config = &drvdata->config;
954
955 spin_lock(&drvdata->spinlock);
956 idx = config->addr_idx;
957 if (idx % 2 != 0) {
958 spin_unlock(&drvdata->spinlock);
959 return -EPERM;
960 }
961 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
962 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
963 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
964 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
965 spin_unlock(&drvdata->spinlock);
966 return -EPERM;
967 }
968
969 val1 = (unsigned long)config->addr_val[idx];
970 val2 = (unsigned long)config->addr_val[idx + 1];
971 spin_unlock(&drvdata->spinlock);
972 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
973}
974
975static ssize_t addr_range_store(struct device *dev,
976 struct device_attribute *attr,
977 const char *buf, size_t size)
978{
979 u8 idx;
980 unsigned long val1, val2;
981 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
982 struct etmv4_config *config = &drvdata->config;
983
984 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
985 return -EINVAL;
986 /* lower address comparator cannot have a higher address value */
987 if (val1 > val2)
988 return -EINVAL;
989
990 spin_lock(&drvdata->spinlock);
991 idx = config->addr_idx;
992 if (idx % 2 != 0) {
993 spin_unlock(&drvdata->spinlock);
994 return -EPERM;
995 }
996
997 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
998 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
999 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1000 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1001 spin_unlock(&drvdata->spinlock);
1002 return -EPERM;
1003 }
1004
1005 config->addr_val[idx] = (u64)val1;
1006 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1007 config->addr_val[idx + 1] = (u64)val2;
1008 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1009 /*
1010 * Program include or exclude control bits for vinst or vdata
1011 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1012 */
1013 if (config->mode & ETM_MODE_EXCLUDE)
1014 etm4_set_mode_exclude(drvdata, true);
1015 else
1016 etm4_set_mode_exclude(drvdata, false);
1017
1018 spin_unlock(&drvdata->spinlock);
1019 return size;
1020}
1021static DEVICE_ATTR_RW(addr_range);
1022
1023static ssize_t addr_start_show(struct device *dev,
1024 struct device_attribute *attr,
1025 char *buf)
1026{
1027 u8 idx;
1028 unsigned long val;
1029 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1030 struct etmv4_config *config = &drvdata->config;
1031
1032 spin_lock(&drvdata->spinlock);
1033 idx = config->addr_idx;
1034
1035 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1036 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1037 spin_unlock(&drvdata->spinlock);
1038 return -EPERM;
1039 }
1040
1041 val = (unsigned long)config->addr_val[idx];
1042 spin_unlock(&drvdata->spinlock);
1043 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1044}
1045
1046static ssize_t addr_start_store(struct device *dev,
1047 struct device_attribute *attr,
1048 const char *buf, size_t size)
1049{
1050 u8 idx;
1051 unsigned long val;
1052 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1053 struct etmv4_config *config = &drvdata->config;
1054
1055 if (kstrtoul(buf, 16, &val))
1056 return -EINVAL;
1057
1058 spin_lock(&drvdata->spinlock);
1059 idx = config->addr_idx;
1060 if (!drvdata->nr_addr_cmp) {
1061 spin_unlock(&drvdata->spinlock);
1062 return -EINVAL;
1063 }
1064 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1065 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1066 spin_unlock(&drvdata->spinlock);
1067 return -EPERM;
1068 }
1069
1070 config->addr_val[idx] = (u64)val;
1071 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1072 config->vissctlr |= BIT(idx);
1073 /* SSSTATUS, bit[9] - turn on start/stop logic */
1074 config->vinst_ctrl |= BIT(9);
1075 spin_unlock(&drvdata->spinlock);
1076 return size;
1077}
1078static DEVICE_ATTR_RW(addr_start);
1079
1080static ssize_t addr_stop_show(struct device *dev,
1081 struct device_attribute *attr,
1082 char *buf)
1083{
1084 u8 idx;
1085 unsigned long val;
1086 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1087 struct etmv4_config *config = &drvdata->config;
1088
1089 spin_lock(&drvdata->spinlock);
1090 idx = config->addr_idx;
1091
1092 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1093 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1094 spin_unlock(&drvdata->spinlock);
1095 return -EPERM;
1096 }
1097
1098 val = (unsigned long)config->addr_val[idx];
1099 spin_unlock(&drvdata->spinlock);
1100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1101}
1102
1103static ssize_t addr_stop_store(struct device *dev,
1104 struct device_attribute *attr,
1105 const char *buf, size_t size)
1106{
1107 u8 idx;
1108 unsigned long val;
1109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1110 struct etmv4_config *config = &drvdata->config;
1111
1112 if (kstrtoul(buf, 16, &val))
1113 return -EINVAL;
1114
1115 spin_lock(&drvdata->spinlock);
1116 idx = config->addr_idx;
1117 if (!drvdata->nr_addr_cmp) {
1118 spin_unlock(&drvdata->spinlock);
1119 return -EINVAL;
1120 }
1121 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1122 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1123 spin_unlock(&drvdata->spinlock);
1124 return -EPERM;
1125 }
1126
1127 config->addr_val[idx] = (u64)val;
1128 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1129 config->vissctlr |= BIT(idx + 16);
1130 /* SSSTATUS, bit[9] - turn on start/stop logic */
1131 config->vinst_ctrl |= BIT(9);
1132 spin_unlock(&drvdata->spinlock);
1133 return size;
1134}
1135static DEVICE_ATTR_RW(addr_stop);
1136
1137static ssize_t addr_ctxtype_show(struct device *dev,
1138 struct device_attribute *attr,
1139 char *buf)
1140{
1141 ssize_t len;
1142 u8 idx, val;
1143 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1144 struct etmv4_config *config = &drvdata->config;
1145
1146 spin_lock(&drvdata->spinlock);
1147 idx = config->addr_idx;
1148 /* CONTEXTTYPE, bits[3:2] */
1149 val = BMVAL(config->addr_acc[idx], 2, 3);
1150 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1151 (val == ETM_CTX_CTXID ? "ctxid" :
1152 (val == ETM_CTX_VMID ? "vmid" : "all")));
1153 spin_unlock(&drvdata->spinlock);
1154 return len;
1155}
1156
1157static ssize_t addr_ctxtype_store(struct device *dev,
1158 struct device_attribute *attr,
1159 const char *buf, size_t size)
1160{
1161 u8 idx;
1162 char str[10] = "";
1163 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1164 struct etmv4_config *config = &drvdata->config;
1165
1166 if (strlen(buf) >= 10)
1167 return -EINVAL;
1168 if (sscanf(buf, "%s", str) != 1)
1169 return -EINVAL;
1170
1171 spin_lock(&drvdata->spinlock);
1172 idx = config->addr_idx;
1173 if (!strcmp(str, "none"))
1174 /* start by clearing context type bits */
1175 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1176 else if (!strcmp(str, "ctxid")) {
1177 /* 0b01 The trace unit performs a Context ID */
1178 if (drvdata->numcidc) {
1179 config->addr_acc[idx] |= BIT(2);
1180 config->addr_acc[idx] &= ~BIT(3);
1181 }
1182 } else if (!strcmp(str, "vmid")) {
1183 /* 0b10 The trace unit performs a VMID */
1184 if (drvdata->numvmidc) {
1185 config->addr_acc[idx] &= ~BIT(2);
1186 config->addr_acc[idx] |= BIT(3);
1187 }
1188 } else if (!strcmp(str, "all")) {
1189 /*
1190 * 0b11 The trace unit performs a Context ID
1191 * comparison and a VMID
1192 */
1193 if (drvdata->numcidc)
1194 config->addr_acc[idx] |= BIT(2);
1195 if (drvdata->numvmidc)
1196 config->addr_acc[idx] |= BIT(3);
1197 }
1198 spin_unlock(&drvdata->spinlock);
1199 return size;
1200}
1201static DEVICE_ATTR_RW(addr_ctxtype);
1202
1203static ssize_t addr_context_show(struct device *dev,
1204 struct device_attribute *attr,
1205 char *buf)
1206{
1207 u8 idx;
1208 unsigned long val;
1209 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1210 struct etmv4_config *config = &drvdata->config;
1211
1212 spin_lock(&drvdata->spinlock);
1213 idx = config->addr_idx;
1214 /* context ID comparator bits[6:4] */
1215 val = BMVAL(config->addr_acc[idx], 4, 6);
1216 spin_unlock(&drvdata->spinlock);
1217 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1218}
1219
1220static ssize_t addr_context_store(struct device *dev,
1221 struct device_attribute *attr,
1222 const char *buf, size_t size)
1223{
1224 u8 idx;
1225 unsigned long val;
1226 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1227 struct etmv4_config *config = &drvdata->config;
1228
1229 if (kstrtoul(buf, 16, &val))
1230 return -EINVAL;
1231 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1232 return -EINVAL;
1233 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1234 drvdata->numcidc : drvdata->numvmidc))
1235 return -EINVAL;
1236
1237 spin_lock(&drvdata->spinlock);
1238 idx = config->addr_idx;
1239 /* clear context ID comparator bits[6:4] */
1240 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1241 config->addr_acc[idx] |= (val << 4);
1242 spin_unlock(&drvdata->spinlock);
1243 return size;
1244}
1245static DEVICE_ATTR_RW(addr_context);
1246
1247static ssize_t seq_idx_show(struct device *dev,
1248 struct device_attribute *attr,
1249 char *buf)
1250{
1251 unsigned long val;
1252 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1253 struct etmv4_config *config = &drvdata->config;
1254
1255 val = config->seq_idx;
1256 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1257}
1258
1259static ssize_t seq_idx_store(struct device *dev,
1260 struct device_attribute *attr,
1261 const char *buf, size_t size)
1262{
1263 unsigned long val;
1264 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1265 struct etmv4_config *config = &drvdata->config;
1266
1267 if (kstrtoul(buf, 16, &val))
1268 return -EINVAL;
1269 if (val >= drvdata->nrseqstate - 1)
1270 return -EINVAL;
1271
1272 /*
1273 * Use spinlock to ensure index doesn't change while it gets
1274 * dereferenced multiple times within a spinlock block elsewhere.
1275 */
1276 spin_lock(&drvdata->spinlock);
1277 config->seq_idx = val;
1278 spin_unlock(&drvdata->spinlock);
1279 return size;
1280}
1281static DEVICE_ATTR_RW(seq_idx);
1282
1283static ssize_t seq_state_show(struct device *dev,
1284 struct device_attribute *attr,
1285 char *buf)
1286{
1287 unsigned long val;
1288 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1289 struct etmv4_config *config = &drvdata->config;
1290
1291 val = config->seq_state;
1292 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1293}
1294
1295static ssize_t seq_state_store(struct device *dev,
1296 struct device_attribute *attr,
1297 const char *buf, size_t size)
1298{
1299 unsigned long val;
1300 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301 struct etmv4_config *config = &drvdata->config;
1302
1303 if (kstrtoul(buf, 16, &val))
1304 return -EINVAL;
1305 if (val >= drvdata->nrseqstate)
1306 return -EINVAL;
1307
1308 config->seq_state = val;
1309 return size;
1310}
1311static DEVICE_ATTR_RW(seq_state);
1312
1313static ssize_t seq_event_show(struct device *dev,
1314 struct device_attribute *attr,
1315 char *buf)
1316{
1317 u8 idx;
1318 unsigned long val;
1319 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1320 struct etmv4_config *config = &drvdata->config;
1321
1322 spin_lock(&drvdata->spinlock);
1323 idx = config->seq_idx;
1324 val = config->seq_ctrl[idx];
1325 spin_unlock(&drvdata->spinlock);
1326 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1327}
1328
1329static ssize_t seq_event_store(struct device *dev,
1330 struct device_attribute *attr,
1331 const char *buf, size_t size)
1332{
1333 u8 idx;
1334 unsigned long val;
1335 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1336 struct etmv4_config *config = &drvdata->config;
1337
1338 if (kstrtoul(buf, 16, &val))
1339 return -EINVAL;
1340
1341 spin_lock(&drvdata->spinlock);
1342 idx = config->seq_idx;
1343 /* RST, bits[7:0] */
1344 config->seq_ctrl[idx] = val & 0xFF;
1345 spin_unlock(&drvdata->spinlock);
1346 return size;
1347}
1348static DEVICE_ATTR_RW(seq_event);
1349
1350static ssize_t seq_reset_event_show(struct device *dev,
1351 struct device_attribute *attr,
1352 char *buf)
1353{
1354 unsigned long val;
1355 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356 struct etmv4_config *config = &drvdata->config;
1357
1358 val = config->seq_rst;
1359 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1360}
1361
1362static ssize_t seq_reset_event_store(struct device *dev,
1363 struct device_attribute *attr,
1364 const char *buf, size_t size)
1365{
1366 unsigned long val;
1367 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1368 struct etmv4_config *config = &drvdata->config;
1369
1370 if (kstrtoul(buf, 16, &val))
1371 return -EINVAL;
1372 if (!(drvdata->nrseqstate))
1373 return -EINVAL;
1374
1375 config->seq_rst = val & ETMv4_EVENT_MASK;
1376 return size;
1377}
1378static DEVICE_ATTR_RW(seq_reset_event);
1379
1380static ssize_t cntr_idx_show(struct device *dev,
1381 struct device_attribute *attr,
1382 char *buf)
1383{
1384 unsigned long val;
1385 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1386 struct etmv4_config *config = &drvdata->config;
1387
1388 val = config->cntr_idx;
1389 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1390}
1391
1392static ssize_t cntr_idx_store(struct device *dev,
1393 struct device_attribute *attr,
1394 const char *buf, size_t size)
1395{
1396 unsigned long val;
1397 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1398 struct etmv4_config *config = &drvdata->config;
1399
1400 if (kstrtoul(buf, 16, &val))
1401 return -EINVAL;
1402 if (val >= drvdata->nr_cntr)
1403 return -EINVAL;
1404
1405 /*
1406 * Use spinlock to ensure index doesn't change while it gets
1407 * dereferenced multiple times within a spinlock block elsewhere.
1408 */
1409 spin_lock(&drvdata->spinlock);
1410 config->cntr_idx = val;
1411 spin_unlock(&drvdata->spinlock);
1412 return size;
1413}
1414static DEVICE_ATTR_RW(cntr_idx);
1415
1416static ssize_t cntrldvr_show(struct device *dev,
1417 struct device_attribute *attr,
1418 char *buf)
1419{
1420 u8 idx;
1421 unsigned long val;
1422 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423 struct etmv4_config *config = &drvdata->config;
1424
1425 spin_lock(&drvdata->spinlock);
1426 idx = config->cntr_idx;
1427 val = config->cntrldvr[idx];
1428 spin_unlock(&drvdata->spinlock);
1429 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1430}
1431
1432static ssize_t cntrldvr_store(struct device *dev,
1433 struct device_attribute *attr,
1434 const char *buf, size_t size)
1435{
1436 u8 idx;
1437 unsigned long val;
1438 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1439 struct etmv4_config *config = &drvdata->config;
1440
1441 if (kstrtoul(buf, 16, &val))
1442 return -EINVAL;
1443 if (val > ETM_CNTR_MAX_VAL)
1444 return -EINVAL;
1445
1446 spin_lock(&drvdata->spinlock);
1447 idx = config->cntr_idx;
1448 config->cntrldvr[idx] = val;
1449 spin_unlock(&drvdata->spinlock);
1450 return size;
1451}
1452static DEVICE_ATTR_RW(cntrldvr);
1453
1454static ssize_t cntr_val_show(struct device *dev,
1455 struct device_attribute *attr,
1456 char *buf)
1457{
1458 u8 idx;
1459 unsigned long val;
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461 struct etmv4_config *config = &drvdata->config;
1462
1463 spin_lock(&drvdata->spinlock);
1464 idx = config->cntr_idx;
1465 val = config->cntr_val[idx];
1466 spin_unlock(&drvdata->spinlock);
1467 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1468}
1469
1470static ssize_t cntr_val_store(struct device *dev,
1471 struct device_attribute *attr,
1472 const char *buf, size_t size)
1473{
1474 u8 idx;
1475 unsigned long val;
1476 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1477 struct etmv4_config *config = &drvdata->config;
1478
1479 if (kstrtoul(buf, 16, &val))
1480 return -EINVAL;
1481 if (val > ETM_CNTR_MAX_VAL)
1482 return -EINVAL;
1483
1484 spin_lock(&drvdata->spinlock);
1485 idx = config->cntr_idx;
1486 config->cntr_val[idx] = val;
1487 spin_unlock(&drvdata->spinlock);
1488 return size;
1489}
1490static DEVICE_ATTR_RW(cntr_val);
1491
1492static ssize_t cntr_ctrl_show(struct device *dev,
1493 struct device_attribute *attr,
1494 char *buf)
1495{
1496 u8 idx;
1497 unsigned long val;
1498 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1499 struct etmv4_config *config = &drvdata->config;
1500
1501 spin_lock(&drvdata->spinlock);
1502 idx = config->cntr_idx;
1503 val = config->cntr_ctrl[idx];
1504 spin_unlock(&drvdata->spinlock);
1505 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1506}
1507
1508static ssize_t cntr_ctrl_store(struct device *dev,
1509 struct device_attribute *attr,
1510 const char *buf, size_t size)
1511{
1512 u8 idx;
1513 unsigned long val;
1514 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1515 struct etmv4_config *config = &drvdata->config;
1516
1517 if (kstrtoul(buf, 16, &val))
1518 return -EINVAL;
1519
1520 spin_lock(&drvdata->spinlock);
1521 idx = config->cntr_idx;
1522 config->cntr_ctrl[idx] = val;
1523 spin_unlock(&drvdata->spinlock);
1524 return size;
1525}
1526static DEVICE_ATTR_RW(cntr_ctrl);
1527
1528static ssize_t res_idx_show(struct device *dev,
1529 struct device_attribute *attr,
1530 char *buf)
1531{
1532 unsigned long val;
1533 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1534 struct etmv4_config *config = &drvdata->config;
1535
1536 val = config->res_idx;
1537 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1538}
1539
1540static ssize_t res_idx_store(struct device *dev,
1541 struct device_attribute *attr,
1542 const char *buf, size_t size)
1543{
1544 unsigned long val;
1545 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1546 struct etmv4_config *config = &drvdata->config;
1547
1548 if (kstrtoul(buf, 16, &val))
1549 return -EINVAL;
1550 /* Resource selector pair 0 is always implemented and reserved */
1551 if ((val == 0) || (val >= drvdata->nr_resource))
1552 return -EINVAL;
1553
1554 /*
1555 * Use spinlock to ensure index doesn't change while it gets
1556 * dereferenced multiple times within a spinlock block elsewhere.
1557 */
1558 spin_lock(&drvdata->spinlock);
1559 config->res_idx = val;
1560 spin_unlock(&drvdata->spinlock);
1561 return size;
1562}
1563static DEVICE_ATTR_RW(res_idx);
1564
1565static ssize_t res_ctrl_show(struct device *dev,
1566 struct device_attribute *attr,
1567 char *buf)
1568{
1569 u8 idx;
1570 unsigned long val;
1571 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1572 struct etmv4_config *config = &drvdata->config;
1573
1574 spin_lock(&drvdata->spinlock);
1575 idx = config->res_idx;
1576 val = config->res_ctrl[idx];
1577 spin_unlock(&drvdata->spinlock);
1578 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1579}
1580
1581static ssize_t res_ctrl_store(struct device *dev,
1582 struct device_attribute *attr,
1583 const char *buf, size_t size)
1584{
1585 u8 idx;
1586 unsigned long val;
1587 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1588 struct etmv4_config *config = &drvdata->config;
1589
1590 if (kstrtoul(buf, 16, &val))
1591 return -EINVAL;
1592
1593 spin_lock(&drvdata->spinlock);
1594 idx = config->res_idx;
1595 /* For odd idx pair inversal bit is RES0 */
1596 if (idx % 2 != 0)
1597 /* PAIRINV, bit[21] */
1598 val &= ~BIT(21);
1599 config->res_ctrl[idx] = val;
1600 spin_unlock(&drvdata->spinlock);
1601 return size;
1602}
1603static DEVICE_ATTR_RW(res_ctrl);
1604
1605static ssize_t ctxid_idx_show(struct device *dev,
1606 struct device_attribute *attr,
1607 char *buf)
1608{
1609 unsigned long val;
1610 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1611 struct etmv4_config *config = &drvdata->config;
1612
1613 val = config->ctxid_idx;
1614 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1615}
1616
1617static ssize_t ctxid_idx_store(struct device *dev,
1618 struct device_attribute *attr,
1619 const char *buf, size_t size)
1620{
1621 unsigned long val;
1622 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1623 struct etmv4_config *config = &drvdata->config;
1624
1625 if (kstrtoul(buf, 16, &val))
1626 return -EINVAL;
1627 if (val >= drvdata->numcidc)
1628 return -EINVAL;
1629
1630 /*
1631 * Use spinlock to ensure index doesn't change while it gets
1632 * dereferenced multiple times within a spinlock block elsewhere.
1633 */
1634 spin_lock(&drvdata->spinlock);
1635 config->ctxid_idx = val;
1636 spin_unlock(&drvdata->spinlock);
1637 return size;
1638}
1639static DEVICE_ATTR_RW(ctxid_idx);
1640
1641static ssize_t ctxid_pid_show(struct device *dev,
1642 struct device_attribute *attr,
1643 char *buf)
1644{
1645 u8 idx;
1646 unsigned long val;
1647 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1648 struct etmv4_config *config = &drvdata->config;
1649
1650 spin_lock(&drvdata->spinlock);
1651 idx = config->ctxid_idx;
1652 val = (unsigned long)config->ctxid_vpid[idx];
1653 spin_unlock(&drvdata->spinlock);
1654 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1655}
1656
1657static ssize_t ctxid_pid_store(struct device *dev,
1658 struct device_attribute *attr,
1659 const char *buf, size_t size)
1660{
1661 u8 idx;
1662 unsigned long vpid, pid;
1663 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1664 struct etmv4_config *config = &drvdata->config;
1665
1666 /*
1667 * only implemented when ctxid tracing is enabled, i.e. at least one
1668 * ctxid comparator is implemented and ctxid is greater than 0 bits
1669 * in length
1670 */
1671 if (!drvdata->ctxid_size || !drvdata->numcidc)
1672 return -EINVAL;
1673 if (kstrtoul(buf, 16, &vpid))
1674 return -EINVAL;
1675
1676 pid = coresight_vpid_to_pid(vpid);
1677
1678 spin_lock(&drvdata->spinlock);
1679 idx = config->ctxid_idx;
1680 config->ctxid_pid[idx] = (u64)pid;
1681 config->ctxid_vpid[idx] = (u64)vpid;
1682 spin_unlock(&drvdata->spinlock);
1683 return size;
1684}
1685static DEVICE_ATTR_RW(ctxid_pid);
1686
1687static ssize_t ctxid_masks_show(struct device *dev,
1688 struct device_attribute *attr,
1689 char *buf)
1690{
1691 unsigned long val1, val2;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693 struct etmv4_config *config = &drvdata->config;
1694
1695 spin_lock(&drvdata->spinlock);
1696 val1 = config->ctxid_mask0;
1697 val2 = config->ctxid_mask1;
1698 spin_unlock(&drvdata->spinlock);
1699 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1700}
1701
1702static ssize_t ctxid_masks_store(struct device *dev,
1703 struct device_attribute *attr,
1704 const char *buf, size_t size)
1705{
1706 u8 i, j, maskbyte;
1707 unsigned long val1, val2, mask;
1708 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1709 struct etmv4_config *config = &drvdata->config;
1710
1711 /*
1712 * only implemented when ctxid tracing is enabled, i.e. at least one
1713 * ctxid comparator is implemented and ctxid is greater than 0 bits
1714 * in length
1715 */
1716 if (!drvdata->ctxid_size || !drvdata->numcidc)
1717 return -EINVAL;
1718 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1719 return -EINVAL;
1720
1721 spin_lock(&drvdata->spinlock);
1722 /*
1723 * each byte[0..3] controls mask value applied to ctxid
1724 * comparator[0..3]
1725 */
1726 switch (drvdata->numcidc) {
1727 case 0x1:
1728 /* COMP0, bits[7:0] */
1729 config->ctxid_mask0 = val1 & 0xFF;
1730 break;
1731 case 0x2:
1732 /* COMP1, bits[15:8] */
1733 config->ctxid_mask0 = val1 & 0xFFFF;
1734 break;
1735 case 0x3:
1736 /* COMP2, bits[23:16] */
1737 config->ctxid_mask0 = val1 & 0xFFFFFF;
1738 break;
1739 case 0x4:
1740 /* COMP3, bits[31:24] */
1741 config->ctxid_mask0 = val1;
1742 break;
1743 case 0x5:
1744 /* COMP4, bits[7:0] */
1745 config->ctxid_mask0 = val1;
1746 config->ctxid_mask1 = val2 & 0xFF;
1747 break;
1748 case 0x6:
1749 /* COMP5, bits[15:8] */
1750 config->ctxid_mask0 = val1;
1751 config->ctxid_mask1 = val2 & 0xFFFF;
1752 break;
1753 case 0x7:
1754 /* COMP6, bits[23:16] */
1755 config->ctxid_mask0 = val1;
1756 config->ctxid_mask1 = val2 & 0xFFFFFF;
1757 break;
1758 case 0x8:
1759 /* COMP7, bits[31:24] */
1760 config->ctxid_mask0 = val1;
1761 config->ctxid_mask1 = val2;
1762 break;
1763 default:
1764 break;
1765 }
1766 /*
1767 * If software sets a mask bit to 1, it must program relevant byte
1768 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
1769 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1770 * of ctxid comparator0 value (corresponding to byte 0) register.
1771 */
1772 mask = config->ctxid_mask0;
1773 for (i = 0; i < drvdata->numcidc; i++) {
1774 /* mask value of corresponding ctxid comparator */
1775 maskbyte = mask & ETMv4_EVENT_MASK;
1776 /*
1777 * each bit corresponds to a byte of respective ctxid comparator
1778 * value register
1779 */
1780 for (j = 0; j < 8; j++) {
1781 if (maskbyte & 1)
1782 config->ctxid_pid[i] &= ~(0xFF << (j * 8));
1783 maskbyte >>= 1;
1784 }
1785 /* Select the next ctxid comparator mask value */
1786 if (i == 3)
1787 /* ctxid comparators[4-7] */
1788 mask = config->ctxid_mask1;
1789 else
1790 mask >>= 0x8;
1791 }
1792
1793 spin_unlock(&drvdata->spinlock);
1794 return size;
1795}
1796static DEVICE_ATTR_RW(ctxid_masks);
1797
1798static ssize_t vmid_idx_show(struct device *dev,
1799 struct device_attribute *attr,
1800 char *buf)
1801{
1802 unsigned long val;
1803 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1804 struct etmv4_config *config = &drvdata->config;
1805
1806 val = config->vmid_idx;
1807 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1808}
1809
1810static ssize_t vmid_idx_store(struct device *dev,
1811 struct device_attribute *attr,
1812 const char *buf, size_t size)
1813{
1814 unsigned long val;
1815 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1816 struct etmv4_config *config = &drvdata->config;
1817
1818 if (kstrtoul(buf, 16, &val))
1819 return -EINVAL;
1820 if (val >= drvdata->numvmidc)
1821 return -EINVAL;
1822
1823 /*
1824 * Use spinlock to ensure index doesn't change while it gets
1825 * dereferenced multiple times within a spinlock block elsewhere.
1826 */
1827 spin_lock(&drvdata->spinlock);
1828 config->vmid_idx = val;
1829 spin_unlock(&drvdata->spinlock);
1830 return size;
1831}
1832static DEVICE_ATTR_RW(vmid_idx);
1833
1834static ssize_t vmid_val_show(struct device *dev,
1835 struct device_attribute *attr,
1836 char *buf)
1837{
1838 unsigned long val;
1839 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1840 struct etmv4_config *config = &drvdata->config;
1841
1842 val = (unsigned long)config->vmid_val[config->vmid_idx];
1843 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1844}
1845
1846static ssize_t vmid_val_store(struct device *dev,
1847 struct device_attribute *attr,
1848 const char *buf, size_t size)
1849{
1850 unsigned long val;
1851 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1852 struct etmv4_config *config = &drvdata->config;
1853
1854 /*
1855 * only implemented when vmid tracing is enabled, i.e. at least one
1856 * vmid comparator is implemented and at least 8 bit vmid size
1857 */
1858 if (!drvdata->vmid_size || !drvdata->numvmidc)
1859 return -EINVAL;
1860 if (kstrtoul(buf, 16, &val))
1861 return -EINVAL;
1862
1863 spin_lock(&drvdata->spinlock);
1864 config->vmid_val[config->vmid_idx] = (u64)val;
1865 spin_unlock(&drvdata->spinlock);
1866 return size;
1867}
1868static DEVICE_ATTR_RW(vmid_val);
1869
1870static ssize_t vmid_masks_show(struct device *dev,
1871 struct device_attribute *attr, char *buf)
1872{
1873 unsigned long val1, val2;
1874 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1875 struct etmv4_config *config = &drvdata->config;
1876
1877 spin_lock(&drvdata->spinlock);
1878 val1 = config->vmid_mask0;
1879 val2 = config->vmid_mask1;
1880 spin_unlock(&drvdata->spinlock);
1881 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1882}
1883
1884static ssize_t vmid_masks_store(struct device *dev,
1885 struct device_attribute *attr,
1886 const char *buf, size_t size)
1887{
1888 u8 i, j, maskbyte;
1889 unsigned long val1, val2, mask;
1890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1891 struct etmv4_config *config = &drvdata->config;
1892
1893 /*
1894 * only implemented when vmid tracing is enabled, i.e. at least one
1895 * vmid comparator is implemented and at least 8 bit vmid size
1896 */
1897 if (!drvdata->vmid_size || !drvdata->numvmidc)
1898 return -EINVAL;
1899 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1900 return -EINVAL;
1901
1902 spin_lock(&drvdata->spinlock);
1903
1904 /*
1905 * each byte[0..3] controls mask value applied to vmid
1906 * comparator[0..3]
1907 */
1908 switch (drvdata->numvmidc) {
1909 case 0x1:
1910 /* COMP0, bits[7:0] */
1911 config->vmid_mask0 = val1 & 0xFF;
1912 break;
1913 case 0x2:
1914 /* COMP1, bits[15:8] */
1915 config->vmid_mask0 = val1 & 0xFFFF;
1916 break;
1917 case 0x3:
1918 /* COMP2, bits[23:16] */
1919 config->vmid_mask0 = val1 & 0xFFFFFF;
1920 break;
1921 case 0x4:
1922 /* COMP3, bits[31:24] */
1923 config->vmid_mask0 = val1;
1924 break;
1925 case 0x5:
1926 /* COMP4, bits[7:0] */
1927 config->vmid_mask0 = val1;
1928 config->vmid_mask1 = val2 & 0xFF;
1929 break;
1930 case 0x6:
1931 /* COMP5, bits[15:8] */
1932 config->vmid_mask0 = val1;
1933 config->vmid_mask1 = val2 & 0xFFFF;
1934 break;
1935 case 0x7:
1936 /* COMP6, bits[23:16] */
1937 config->vmid_mask0 = val1;
1938 config->vmid_mask1 = val2 & 0xFFFFFF;
1939 break;
1940 case 0x8:
1941 /* COMP7, bits[31:24] */
1942 config->vmid_mask0 = val1;
1943 config->vmid_mask1 = val2;
1944 break;
1945 default:
1946 break;
1947 }
1948
1949 /*
1950 * If software sets a mask bit to 1, it must program relevant byte
1951 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
1952 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
1953 * of vmid comparator0 value (corresponding to byte 0) register.
1954 */
1955 mask = config->vmid_mask0;
1956 for (i = 0; i < drvdata->numvmidc; i++) {
1957 /* mask value of corresponding vmid comparator */
1958 maskbyte = mask & ETMv4_EVENT_MASK;
1959 /*
1960 * each bit corresponds to a byte of respective vmid comparator
1961 * value register
1962 */
1963 for (j = 0; j < 8; j++) {
1964 if (maskbyte & 1)
1965 config->vmid_val[i] &= ~(0xFF << (j * 8));
1966 maskbyte >>= 1;
1967 }
1968 /* Select the next vmid comparator mask value */
1969 if (i == 3)
1970 /* vmid comparators[4-7] */
1971 mask = config->vmid_mask1;
1972 else
1973 mask >>= 0x8;
1974 }
1975 spin_unlock(&drvdata->spinlock);
1976 return size;
1977}
1978static DEVICE_ATTR_RW(vmid_masks);
1979
1980static ssize_t cpu_show(struct device *dev,
1981 struct device_attribute *attr, char *buf)
1982{
1983 int val;
1984 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1985
1986 val = drvdata->cpu;
1987 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
1988
1989}
1990static DEVICE_ATTR_RO(cpu);
1991
1992static struct attribute *coresight_etmv4_attrs[] = {
1993 &dev_attr_nr_pe_cmp.attr,
1994 &dev_attr_nr_addr_cmp.attr,
1995 &dev_attr_nr_cntr.attr,
1996 &dev_attr_nr_ext_inp.attr,
1997 &dev_attr_numcidc.attr,
1998 &dev_attr_numvmidc.attr,
1999 &dev_attr_nrseqstate.attr,
2000 &dev_attr_nr_resource.attr,
2001 &dev_attr_nr_ss_cmp.attr,
2002 &dev_attr_reset.attr,
2003 &dev_attr_mode.attr,
2004 &dev_attr_pe.attr,
2005 &dev_attr_event.attr,
2006 &dev_attr_event_instren.attr,
2007 &dev_attr_event_ts.attr,
2008 &dev_attr_syncfreq.attr,
2009 &dev_attr_cyc_threshold.attr,
2010 &dev_attr_bb_ctrl.attr,
2011 &dev_attr_event_vinst.attr,
2012 &dev_attr_s_exlevel_vinst.attr,
2013 &dev_attr_ns_exlevel_vinst.attr,
2014 &dev_attr_addr_idx.attr,
2015 &dev_attr_addr_instdatatype.attr,
2016 &dev_attr_addr_single.attr,
2017 &dev_attr_addr_range.attr,
2018 &dev_attr_addr_start.attr,
2019 &dev_attr_addr_stop.attr,
2020 &dev_attr_addr_ctxtype.attr,
2021 &dev_attr_addr_context.attr,
2022 &dev_attr_seq_idx.attr,
2023 &dev_attr_seq_state.attr,
2024 &dev_attr_seq_event.attr,
2025 &dev_attr_seq_reset_event.attr,
2026 &dev_attr_cntr_idx.attr,
2027 &dev_attr_cntrldvr.attr,
2028 &dev_attr_cntr_val.attr,
2029 &dev_attr_cntr_ctrl.attr,
2030 &dev_attr_res_idx.attr,
2031 &dev_attr_res_ctrl.attr,
2032 &dev_attr_ctxid_idx.attr,
2033 &dev_attr_ctxid_pid.attr,
2034 &dev_attr_ctxid_masks.attr,
2035 &dev_attr_vmid_idx.attr,
2036 &dev_attr_vmid_val.attr,
2037 &dev_attr_vmid_masks.attr,
2038 &dev_attr_cpu.attr,
2039 NULL,
2040};
2041
2042#define coresight_etm4x_simple_func(name, offset) \
2043 coresight_simple_func(struct etmv4_drvdata, name, offset)
2044
2045coresight_etm4x_simple_func(trcoslsr, TRCOSLSR);
2046coresight_etm4x_simple_func(trcpdcr, TRCPDCR);
2047coresight_etm4x_simple_func(trcpdsr, TRCPDSR);
2048coresight_etm4x_simple_func(trclsr, TRCLSR);
2049coresight_etm4x_simple_func(trcconfig, TRCCONFIGR);
2050coresight_etm4x_simple_func(trctraceid, TRCTRACEIDR);
2051coresight_etm4x_simple_func(trcauthstatus, TRCAUTHSTATUS);
2052coresight_etm4x_simple_func(trcdevid, TRCDEVID);
2053coresight_etm4x_simple_func(trcdevtype, TRCDEVTYPE);
2054coresight_etm4x_simple_func(trcpidr0, TRCPIDR0);
2055coresight_etm4x_simple_func(trcpidr1, TRCPIDR1);
2056coresight_etm4x_simple_func(trcpidr2, TRCPIDR2);
2057coresight_etm4x_simple_func(trcpidr3, TRCPIDR3);
2058
2059static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2060 &dev_attr_trcoslsr.attr,
2061 &dev_attr_trcpdcr.attr,
2062 &dev_attr_trcpdsr.attr,
2063 &dev_attr_trclsr.attr,
2064 &dev_attr_trcconfig.attr,
2065 &dev_attr_trctraceid.attr,
2066 &dev_attr_trcauthstatus.attr,
2067 &dev_attr_trcdevid.attr,
2068 &dev_attr_trcdevtype.attr,
2069 &dev_attr_trcpidr0.attr,
2070 &dev_attr_trcpidr1.attr,
2071 &dev_attr_trcpidr2.attr,
2072 &dev_attr_trcpidr3.attr,
2073 NULL,
2074};
2075
2076coresight_etm4x_simple_func(trcidr0, TRCIDR0);
2077coresight_etm4x_simple_func(trcidr1, TRCIDR1);
2078coresight_etm4x_simple_func(trcidr2, TRCIDR2);
2079coresight_etm4x_simple_func(trcidr3, TRCIDR3);
2080coresight_etm4x_simple_func(trcidr4, TRCIDR4);
2081coresight_etm4x_simple_func(trcidr5, TRCIDR5);
2082/* trcidr[6,7] are reserved */
2083coresight_etm4x_simple_func(trcidr8, TRCIDR8);
2084coresight_etm4x_simple_func(trcidr9, TRCIDR9);
2085coresight_etm4x_simple_func(trcidr10, TRCIDR10);
2086coresight_etm4x_simple_func(trcidr11, TRCIDR11);
2087coresight_etm4x_simple_func(trcidr12, TRCIDR12);
2088coresight_etm4x_simple_func(trcidr13, TRCIDR13);
2089
2090static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2091 &dev_attr_trcidr0.attr,
2092 &dev_attr_trcidr1.attr,
2093 &dev_attr_trcidr2.attr,
2094 &dev_attr_trcidr3.attr,
2095 &dev_attr_trcidr4.attr,
2096 &dev_attr_trcidr5.attr,
2097 /* trcidr[6,7] are reserved */
2098 &dev_attr_trcidr8.attr,
2099 &dev_attr_trcidr9.attr,
2100 &dev_attr_trcidr10.attr,
2101 &dev_attr_trcidr11.attr,
2102 &dev_attr_trcidr12.attr,
2103 &dev_attr_trcidr13.attr,
2104 NULL,
2105};
2106
2107static const struct attribute_group coresight_etmv4_group = {
2108 .attrs = coresight_etmv4_attrs,
2109};
2110
2111static const struct attribute_group coresight_etmv4_mgmt_group = {
2112 .attrs = coresight_etmv4_mgmt_attrs,
2113 .name = "mgmt",
2114};
2115
2116static const struct attribute_group coresight_etmv4_trcidr_group = {
2117 .attrs = coresight_etmv4_trcidr_attrs,
2118 .name = "trcidr",
2119};
2120
2121const struct attribute_group *coresight_etmv4_groups[] = {
2122 &coresight_etmv4_group,
2123 &coresight_etmv4_mgmt_group,
2124 &coresight_etmv4_trcidr_group,
2125 NULL,
2126};
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 1c59bd36834c..462f0dc15757 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -26,15 +26,19 @@
26#include <linux/clk.h> 26#include <linux/clk.h>
27#include <linux/cpu.h> 27#include <linux/cpu.h>
28#include <linux/coresight.h> 28#include <linux/coresight.h>
29#include <linux/coresight-pmu.h>
29#include <linux/pm_wakeup.h> 30#include <linux/pm_wakeup.h>
30#include <linux/amba/bus.h> 31#include <linux/amba/bus.h>
31#include <linux/seq_file.h> 32#include <linux/seq_file.h>
32#include <linux/uaccess.h> 33#include <linux/uaccess.h>
34#include <linux/perf_event.h>
33#include <linux/pm_runtime.h> 35#include <linux/pm_runtime.h>
34#include <linux/perf_event.h> 36#include <linux/perf_event.h>
35#include <asm/sections.h> 37#include <asm/sections.h>
38#include <asm/local.h>
36 39
37#include "coresight-etm4x.h" 40#include "coresight-etm4x.h"
41#include "coresight-etm-perf.h"
38 42
39static int boot_enable; 43static int boot_enable;
40module_param_named(boot_enable, boot_enable, int, S_IRUGO); 44module_param_named(boot_enable, boot_enable, int, S_IRUGO);
@@ -42,13 +46,13 @@ module_param_named(boot_enable, boot_enable, int, S_IRUGO);
42/* The number of ETMv4 currently registered */ 46/* The number of ETMv4 currently registered */
43static int etm4_count; 47static int etm4_count;
44static struct etmv4_drvdata *etmdrvdata[NR_CPUS]; 48static struct etmv4_drvdata *etmdrvdata[NR_CPUS];
49static void etm4_set_default(struct etmv4_config *config);
45 50
46static void etm4_os_unlock(void *info) 51static void etm4_os_unlock(struct etmv4_drvdata *drvdata)
47{ 52{
48 struct etmv4_drvdata *drvdata = (struct etmv4_drvdata *)info;
49
50 /* Writing any value to ETMOSLAR unlocks the trace registers */ 53 /* Writing any value to ETMOSLAR unlocks the trace registers */
51 writel_relaxed(0x0, drvdata->base + TRCOSLAR); 54 writel_relaxed(0x0, drvdata->base + TRCOSLAR);
55 drvdata->os_unlock = true;
52 isb(); 56 isb();
53} 57}
54 58
@@ -76,7 +80,7 @@ static int etm4_trace_id(struct coresight_device *csdev)
76 unsigned long flags; 80 unsigned long flags;
77 int trace_id = -1; 81 int trace_id = -1;
78 82
79 if (!drvdata->enable) 83 if (!local_read(&drvdata->mode))
80 return drvdata->trcid; 84 return drvdata->trcid;
81 85
82 spin_lock_irqsave(&drvdata->spinlock, flags); 86 spin_lock_irqsave(&drvdata->spinlock, flags);
@@ -95,6 +99,7 @@ static void etm4_enable_hw(void *info)
95{ 99{
96 int i; 100 int i;
97 struct etmv4_drvdata *drvdata = info; 101 struct etmv4_drvdata *drvdata = info;
102 struct etmv4_config *config = &drvdata->config;
98 103
99 CS_UNLOCK(drvdata->base); 104 CS_UNLOCK(drvdata->base);
100 105
@@ -109,69 +114,69 @@ static void etm4_enable_hw(void *info)
109 "timeout observed when probing at offset %#x\n", 114 "timeout observed when probing at offset %#x\n",
110 TRCSTATR); 115 TRCSTATR);
111 116
112 writel_relaxed(drvdata->pe_sel, drvdata->base + TRCPROCSELR); 117 writel_relaxed(config->pe_sel, drvdata->base + TRCPROCSELR);
113 writel_relaxed(drvdata->cfg, drvdata->base + TRCCONFIGR); 118 writel_relaxed(config->cfg, drvdata->base + TRCCONFIGR);
114 /* nothing specific implemented */ 119 /* nothing specific implemented */
115 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR); 120 writel_relaxed(0x0, drvdata->base + TRCAUXCTLR);
116 writel_relaxed(drvdata->eventctrl0, drvdata->base + TRCEVENTCTL0R); 121 writel_relaxed(config->eventctrl0, drvdata->base + TRCEVENTCTL0R);
117 writel_relaxed(drvdata->eventctrl1, drvdata->base + TRCEVENTCTL1R); 122 writel_relaxed(config->eventctrl1, drvdata->base + TRCEVENTCTL1R);
118 writel_relaxed(drvdata->stall_ctrl, drvdata->base + TRCSTALLCTLR); 123 writel_relaxed(config->stall_ctrl, drvdata->base + TRCSTALLCTLR);
119 writel_relaxed(drvdata->ts_ctrl, drvdata->base + TRCTSCTLR); 124 writel_relaxed(config->ts_ctrl, drvdata->base + TRCTSCTLR);
120 writel_relaxed(drvdata->syncfreq, drvdata->base + TRCSYNCPR); 125 writel_relaxed(config->syncfreq, drvdata->base + TRCSYNCPR);
121 writel_relaxed(drvdata->ccctlr, drvdata->base + TRCCCCTLR); 126 writel_relaxed(config->ccctlr, drvdata->base + TRCCCCTLR);
122 writel_relaxed(drvdata->bb_ctrl, drvdata->base + TRCBBCTLR); 127 writel_relaxed(config->bb_ctrl, drvdata->base + TRCBBCTLR);
123 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR); 128 writel_relaxed(drvdata->trcid, drvdata->base + TRCTRACEIDR);
124 writel_relaxed(drvdata->vinst_ctrl, drvdata->base + TRCVICTLR); 129 writel_relaxed(config->vinst_ctrl, drvdata->base + TRCVICTLR);
125 writel_relaxed(drvdata->viiectlr, drvdata->base + TRCVIIECTLR); 130 writel_relaxed(config->viiectlr, drvdata->base + TRCVIIECTLR);
126 writel_relaxed(drvdata->vissctlr, 131 writel_relaxed(config->vissctlr,
127 drvdata->base + TRCVISSCTLR); 132 drvdata->base + TRCVISSCTLR);
128 writel_relaxed(drvdata->vipcssctlr, 133 writel_relaxed(config->vipcssctlr,
129 drvdata->base + TRCVIPCSSCTLR); 134 drvdata->base + TRCVIPCSSCTLR);
130 for (i = 0; i < drvdata->nrseqstate - 1; i++) 135 for (i = 0; i < drvdata->nrseqstate - 1; i++)
131 writel_relaxed(drvdata->seq_ctrl[i], 136 writel_relaxed(config->seq_ctrl[i],
132 drvdata->base + TRCSEQEVRn(i)); 137 drvdata->base + TRCSEQEVRn(i));
133 writel_relaxed(drvdata->seq_rst, drvdata->base + TRCSEQRSTEVR); 138 writel_relaxed(config->seq_rst, drvdata->base + TRCSEQRSTEVR);
134 writel_relaxed(drvdata->seq_state, drvdata->base + TRCSEQSTR); 139 writel_relaxed(config->seq_state, drvdata->base + TRCSEQSTR);
135 writel_relaxed(drvdata->ext_inp, drvdata->base + TRCEXTINSELR); 140 writel_relaxed(config->ext_inp, drvdata->base + TRCEXTINSELR);
136 for (i = 0; i < drvdata->nr_cntr; i++) { 141 for (i = 0; i < drvdata->nr_cntr; i++) {
137 writel_relaxed(drvdata->cntrldvr[i], 142 writel_relaxed(config->cntrldvr[i],
138 drvdata->base + TRCCNTRLDVRn(i)); 143 drvdata->base + TRCCNTRLDVRn(i));
139 writel_relaxed(drvdata->cntr_ctrl[i], 144 writel_relaxed(config->cntr_ctrl[i],
140 drvdata->base + TRCCNTCTLRn(i)); 145 drvdata->base + TRCCNTCTLRn(i));
141 writel_relaxed(drvdata->cntr_val[i], 146 writel_relaxed(config->cntr_val[i],
142 drvdata->base + TRCCNTVRn(i)); 147 drvdata->base + TRCCNTVRn(i));
143 } 148 }
144 149
145 /* Resource selector pair 0 is always implemented and reserved */ 150 /* Resource selector pair 0 is always implemented and reserved */
146 for (i = 2; i < drvdata->nr_resource * 2; i++) 151 for (i = 0; i < drvdata->nr_resource * 2; i++)
147 writel_relaxed(drvdata->res_ctrl[i], 152 writel_relaxed(config->res_ctrl[i],
148 drvdata->base + TRCRSCTLRn(i)); 153 drvdata->base + TRCRSCTLRn(i));
149 154
150 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 155 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
151 writel_relaxed(drvdata->ss_ctrl[i], 156 writel_relaxed(config->ss_ctrl[i],
152 drvdata->base + TRCSSCCRn(i)); 157 drvdata->base + TRCSSCCRn(i));
153 writel_relaxed(drvdata->ss_status[i], 158 writel_relaxed(config->ss_status[i],
154 drvdata->base + TRCSSCSRn(i)); 159 drvdata->base + TRCSSCSRn(i));
155 writel_relaxed(drvdata->ss_pe_cmp[i], 160 writel_relaxed(config->ss_pe_cmp[i],
156 drvdata->base + TRCSSPCICRn(i)); 161 drvdata->base + TRCSSPCICRn(i));
157 } 162 }
158 for (i = 0; i < drvdata->nr_addr_cmp; i++) { 163 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
159 writeq_relaxed(drvdata->addr_val[i], 164 writeq_relaxed(config->addr_val[i],
160 drvdata->base + TRCACVRn(i)); 165 drvdata->base + TRCACVRn(i));
161 writeq_relaxed(drvdata->addr_acc[i], 166 writeq_relaxed(config->addr_acc[i],
162 drvdata->base + TRCACATRn(i)); 167 drvdata->base + TRCACATRn(i));
163 } 168 }
164 for (i = 0; i < drvdata->numcidc; i++) 169 for (i = 0; i < drvdata->numcidc; i++)
165 writeq_relaxed(drvdata->ctxid_pid[i], 170 writeq_relaxed(config->ctxid_pid[i],
166 drvdata->base + TRCCIDCVRn(i)); 171 drvdata->base + TRCCIDCVRn(i));
167 writel_relaxed(drvdata->ctxid_mask0, drvdata->base + TRCCIDCCTLR0); 172 writel_relaxed(config->ctxid_mask0, drvdata->base + TRCCIDCCTLR0);
168 writel_relaxed(drvdata->ctxid_mask1, drvdata->base + TRCCIDCCTLR1); 173 writel_relaxed(config->ctxid_mask1, drvdata->base + TRCCIDCCTLR1);
169 174
170 for (i = 0; i < drvdata->numvmidc; i++) 175 for (i = 0; i < drvdata->numvmidc; i++)
171 writeq_relaxed(drvdata->vmid_val[i], 176 writeq_relaxed(config->vmid_val[i],
172 drvdata->base + TRCVMIDCVRn(i)); 177 drvdata->base + TRCVMIDCVRn(i));
173 writel_relaxed(drvdata->vmid_mask0, drvdata->base + TRCVMIDCCTLR0); 178 writel_relaxed(config->vmid_mask0, drvdata->base + TRCVMIDCCTLR0);
174 writel_relaxed(drvdata->vmid_mask1, drvdata->base + TRCVMIDCCTLR1); 179 writel_relaxed(config->vmid_mask1, drvdata->base + TRCVMIDCCTLR1);
175 180
176 /* Enable the trace unit */ 181 /* Enable the trace unit */
177 writel_relaxed(1, drvdata->base + TRCPRGCTLR); 182 writel_relaxed(1, drvdata->base + TRCPRGCTLR);
@@ -187,2120 +192,210 @@ static void etm4_enable_hw(void *info)
187 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); 192 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
188} 193}
189 194
190static int etm4_enable(struct coresight_device *csdev, 195static int etm4_parse_event_config(struct etmv4_drvdata *drvdata,
191 struct perf_event_attr *attr, u32 mode) 196 struct perf_event_attr *attr)
192{ 197{
193 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 198 struct etmv4_config *config = &drvdata->config;
194 int ret;
195
196 spin_lock(&drvdata->spinlock);
197
198 /*
199 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
200 * ensures that register writes occur when cpu is powered.
201 */
202 ret = smp_call_function_single(drvdata->cpu,
203 etm4_enable_hw, drvdata, 1);
204 if (ret)
205 goto err;
206 drvdata->enable = true;
207 drvdata->sticky_enable = true;
208
209 spin_unlock(&drvdata->spinlock);
210 199
211 dev_info(drvdata->dev, "ETM tracing enabled\n"); 200 if (!attr)
212 return 0; 201 return -EINVAL;
213err:
214 spin_unlock(&drvdata->spinlock);
215 return ret;
216}
217 202
218static void etm4_disable_hw(void *info) 203 /* Clear configuration from previous run */
219{ 204 memset(config, 0, sizeof(struct etmv4_config));
220 u32 control;
221 struct etmv4_drvdata *drvdata = info;
222 205
223 CS_UNLOCK(drvdata->base); 206 if (attr->exclude_kernel)
207 config->mode = ETM_MODE_EXCL_KERN;
224 208
225 control = readl_relaxed(drvdata->base + TRCPRGCTLR); 209 if (attr->exclude_user)
210 config->mode = ETM_MODE_EXCL_USER;
226 211
227 /* EN, bit[0] Trace unit enable bit */ 212 /* Always start from the default config */
228 control &= ~0x1; 213 etm4_set_default(config);
229
230 /* make sure everything completes before disabling */
231 mb();
232 isb();
233 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
234
235 CS_LOCK(drvdata->base);
236
237 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
238}
239
240static void etm4_disable(struct coresight_device *csdev)
241{
242 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
243 214
244 /* 215 /*
245 * Taking hotplug lock here protects from clocks getting disabled 216 * By default the tracers are configured to trace the whole address
246 * with tracing being left on (crash scenario) if user disable occurs 217 * range. Narrow the field only if requested by user space.
247 * after cpu online mask indicates the cpu is offline but before the
248 * DYING hotplug callback is serviced by the ETM driver.
249 */ 218 */
250 get_online_cpus(); 219 if (config->mode)
251 spin_lock(&drvdata->spinlock); 220 etm4_config_trace_mode(config);
252
253 /*
254 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
255 * ensures that register writes occur when cpu is powered.
256 */
257 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
258 drvdata->enable = false;
259
260 spin_unlock(&drvdata->spinlock);
261 put_online_cpus();
262
263 dev_info(drvdata->dev, "ETM tracing disabled\n");
264}
265
266static const struct coresight_ops_source etm4_source_ops = {
267 .cpu_id = etm4_cpu_id,
268 .trace_id = etm4_trace_id,
269 .enable = etm4_enable,
270 .disable = etm4_disable,
271};
272
273static const struct coresight_ops etm4_cs_ops = {
274 .source_ops = &etm4_source_ops,
275};
276 221
277static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude) 222 /* Go from generic option to ETMv4 specifics */
278{ 223 if (attr->config & BIT(ETM_OPT_CYCACC))
279 u8 idx = drvdata->addr_idx; 224 config->cfg |= ETMv4_MODE_CYCACC;
225 if (attr->config & BIT(ETM_OPT_TS))
226 config->cfg |= ETMv4_MODE_TIMESTAMP;
280 227
281 /*
282 * TRCACATRn.TYPE bit[1:0]: type of comparison
283 * the trace unit performs
284 */
285 if (BMVAL(drvdata->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
286 if (idx % 2 != 0)
287 return -EINVAL;
288
289 /*
290 * We are performing instruction address comparison. Set the
291 * relevant bit of ViewInst Include/Exclude Control register
292 * for corresponding address comparator pair.
293 */
294 if (drvdata->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
295 drvdata->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
296 return -EINVAL;
297
298 if (exclude == true) {
299 /*
300 * Set exclude bit and unset the include bit
301 * corresponding to comparator pair
302 */
303 drvdata->viiectlr |= BIT(idx / 2 + 16);
304 drvdata->viiectlr &= ~BIT(idx / 2);
305 } else {
306 /*
307 * Set include bit and unset exclude bit
308 * corresponding to comparator pair
309 */
310 drvdata->viiectlr |= BIT(idx / 2);
311 drvdata->viiectlr &= ~BIT(idx / 2 + 16);
312 }
313 }
314 return 0; 228 return 0;
315} 229}
316 230
317static ssize_t nr_pe_cmp_show(struct device *dev, 231static int etm4_enable_perf(struct coresight_device *csdev,
318 struct device_attribute *attr, 232 struct perf_event_attr *attr)
319 char *buf)
320{ 233{
321 unsigned long val; 234 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
322 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
323
324 val = drvdata->nr_pe_cmp;
325 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
326}
327static DEVICE_ATTR_RO(nr_pe_cmp);
328
329static ssize_t nr_addr_cmp_show(struct device *dev,
330 struct device_attribute *attr,
331 char *buf)
332{
333 unsigned long val;
334 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
335
336 val = drvdata->nr_addr_cmp;
337 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
338}
339static DEVICE_ATTR_RO(nr_addr_cmp);
340
341static ssize_t nr_cntr_show(struct device *dev,
342 struct device_attribute *attr,
343 char *buf)
344{
345 unsigned long val;
346 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
347
348 val = drvdata->nr_cntr;
349 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
350}
351static DEVICE_ATTR_RO(nr_cntr);
352
353static ssize_t nr_ext_inp_show(struct device *dev,
354 struct device_attribute *attr,
355 char *buf)
356{
357 unsigned long val;
358 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
359
360 val = drvdata->nr_ext_inp;
361 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
362}
363static DEVICE_ATTR_RO(nr_ext_inp);
364
365static ssize_t numcidc_show(struct device *dev,
366 struct device_attribute *attr,
367 char *buf)
368{
369 unsigned long val;
370 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
371
372 val = drvdata->numcidc;
373 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
374}
375static DEVICE_ATTR_RO(numcidc);
376
377static ssize_t numvmidc_show(struct device *dev,
378 struct device_attribute *attr,
379 char *buf)
380{
381 unsigned long val;
382 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
383
384 val = drvdata->numvmidc;
385 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
386}
387static DEVICE_ATTR_RO(numvmidc);
388
389static ssize_t nrseqstate_show(struct device *dev,
390 struct device_attribute *attr,
391 char *buf)
392{
393 unsigned long val;
394 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
395
396 val = drvdata->nrseqstate;
397 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
398}
399static DEVICE_ATTR_RO(nrseqstate);
400
401static ssize_t nr_resource_show(struct device *dev,
402 struct device_attribute *attr,
403 char *buf)
404{
405 unsigned long val;
406 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
407
408 val = drvdata->nr_resource;
409 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
410}
411static DEVICE_ATTR_RO(nr_resource);
412
413static ssize_t nr_ss_cmp_show(struct device *dev,
414 struct device_attribute *attr,
415 char *buf)
416{
417 unsigned long val;
418 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
419
420 val = drvdata->nr_ss_cmp;
421 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
422}
423static DEVICE_ATTR_RO(nr_ss_cmp);
424
425static ssize_t reset_store(struct device *dev,
426 struct device_attribute *attr,
427 const char *buf, size_t size)
428{
429 int i;
430 unsigned long val;
431 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
432 235
433 if (kstrtoul(buf, 16, &val)) 236 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
434 return -EINVAL; 237 return -EINVAL;
435 238
436 spin_lock(&drvdata->spinlock); 239 /* Configure the tracer based on the session's specifics */
437 if (val) 240 etm4_parse_event_config(drvdata, attr);
438 drvdata->mode = 0x0; 241 /* And enable it */
439 242 etm4_enable_hw(drvdata);
440 /* Disable data tracing: do not trace load and store data transfers */
441 drvdata->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
442 drvdata->cfg &= ~(BIT(1) | BIT(2));
443
444 /* Disable data value and data address tracing */
445 drvdata->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
446 ETM_MODE_DATA_TRACE_VAL);
447 drvdata->cfg &= ~(BIT(16) | BIT(17));
448
449 /* Disable all events tracing */
450 drvdata->eventctrl0 = 0x0;
451 drvdata->eventctrl1 = 0x0;
452
453 /* Disable timestamp event */
454 drvdata->ts_ctrl = 0x0;
455
456 /* Disable stalling */
457 drvdata->stall_ctrl = 0x0;
458
459 /* Reset trace synchronization period to 2^8 = 256 bytes*/
460 if (drvdata->syncpr == false)
461 drvdata->syncfreq = 0x8;
462
463 /*
464 * Enable ViewInst to trace everything with start-stop logic in
465 * started state. ARM recommends start-stop logic is set before
466 * each trace run.
467 */
468 drvdata->vinst_ctrl |= BIT(0);
469 if (drvdata->nr_addr_cmp == true) {
470 drvdata->mode |= ETM_MODE_VIEWINST_STARTSTOP;
471 /* SSSTATUS, bit[9] */
472 drvdata->vinst_ctrl |= BIT(9);
473 }
474
475 /* No address range filtering for ViewInst */
476 drvdata->viiectlr = 0x0;
477
478 /* No start-stop filtering for ViewInst */
479 drvdata->vissctlr = 0x0;
480
481 /* Disable seq events */
482 for (i = 0; i < drvdata->nrseqstate-1; i++)
483 drvdata->seq_ctrl[i] = 0x0;
484 drvdata->seq_rst = 0x0;
485 drvdata->seq_state = 0x0;
486
487 /* Disable external input events */
488 drvdata->ext_inp = 0x0;
489
490 drvdata->cntr_idx = 0x0;
491 for (i = 0; i < drvdata->nr_cntr; i++) {
492 drvdata->cntrldvr[i] = 0x0;
493 drvdata->cntr_ctrl[i] = 0x0;
494 drvdata->cntr_val[i] = 0x0;
495 }
496
497 /* Resource selector pair 0 is always implemented and reserved */
498 drvdata->res_idx = 0x2;
499 for (i = 2; i < drvdata->nr_resource * 2; i++)
500 drvdata->res_ctrl[i] = 0x0;
501
502 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
503 drvdata->ss_ctrl[i] = 0x0;
504 drvdata->ss_pe_cmp[i] = 0x0;
505 }
506
507 drvdata->addr_idx = 0x0;
508 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
509 drvdata->addr_val[i] = 0x0;
510 drvdata->addr_acc[i] = 0x0;
511 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
512 }
513
514 drvdata->ctxid_idx = 0x0;
515 for (i = 0; i < drvdata->numcidc; i++) {
516 drvdata->ctxid_pid[i] = 0x0;
517 drvdata->ctxid_vpid[i] = 0x0;
518 }
519
520 drvdata->ctxid_mask0 = 0x0;
521 drvdata->ctxid_mask1 = 0x0;
522
523 drvdata->vmid_idx = 0x0;
524 for (i = 0; i < drvdata->numvmidc; i++)
525 drvdata->vmid_val[i] = 0x0;
526 drvdata->vmid_mask0 = 0x0;
527 drvdata->vmid_mask1 = 0x0;
528
529 drvdata->trcid = drvdata->cpu + 1;
530 spin_unlock(&drvdata->spinlock);
531 return size;
532}
533static DEVICE_ATTR_WO(reset);
534 243
535static ssize_t mode_show(struct device *dev, 244 return 0;
536 struct device_attribute *attr,
537 char *buf)
538{
539 unsigned long val;
540 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
541
542 val = drvdata->mode;
543 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
544} 245}
545 246
546static ssize_t mode_store(struct device *dev, 247static int etm4_enable_sysfs(struct coresight_device *csdev)
547 struct device_attribute *attr,
548 const char *buf, size_t size)
549{ 248{
550 unsigned long val, mode; 249 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
551 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 250 int ret;
552
553 if (kstrtoul(buf, 16, &val))
554 return -EINVAL;
555 251
556 spin_lock(&drvdata->spinlock); 252 spin_lock(&drvdata->spinlock);
557 drvdata->mode = val & ETMv4_MODE_ALL;
558
559 if (drvdata->mode & ETM_MODE_EXCLUDE)
560 etm4_set_mode_exclude(drvdata, true);
561 else
562 etm4_set_mode_exclude(drvdata, false);
563
564 if (drvdata->instrp0 == true) {
565 /* start by clearing instruction P0 field */
566 drvdata->cfg &= ~(BIT(1) | BIT(2));
567 if (drvdata->mode & ETM_MODE_LOAD)
568 /* 0b01 Trace load instructions as P0 instructions */
569 drvdata->cfg |= BIT(1);
570 if (drvdata->mode & ETM_MODE_STORE)
571 /* 0b10 Trace store instructions as P0 instructions */
572 drvdata->cfg |= BIT(2);
573 if (drvdata->mode & ETM_MODE_LOAD_STORE)
574 /*
575 * 0b11 Trace load and store instructions
576 * as P0 instructions
577 */
578 drvdata->cfg |= BIT(1) | BIT(2);
579 }
580
581 /* bit[3], Branch broadcast mode */
582 if ((drvdata->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
583 drvdata->cfg |= BIT(3);
584 else
585 drvdata->cfg &= ~BIT(3);
586
587 /* bit[4], Cycle counting instruction trace bit */
588 if ((drvdata->mode & ETMv4_MODE_CYCACC) &&
589 (drvdata->trccci == true))
590 drvdata->cfg |= BIT(4);
591 else
592 drvdata->cfg &= ~BIT(4);
593
594 /* bit[6], Context ID tracing bit */
595 if ((drvdata->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
596 drvdata->cfg |= BIT(6);
597 else
598 drvdata->cfg &= ~BIT(6);
599
600 if ((drvdata->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
601 drvdata->cfg |= BIT(7);
602 else
603 drvdata->cfg &= ~BIT(7);
604
605 /* bits[10:8], Conditional instruction tracing bit */
606 mode = ETM_MODE_COND(drvdata->mode);
607 if (drvdata->trccond == true) {
608 drvdata->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
609 drvdata->cfg |= mode << 8;
610 }
611
612 /* bit[11], Global timestamp tracing bit */
613 if ((drvdata->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
614 drvdata->cfg |= BIT(11);
615 else
616 drvdata->cfg &= ~BIT(11);
617 253
618 /* bit[12], Return stack enable bit */
619 if ((drvdata->mode & ETM_MODE_RETURNSTACK) &&
620 (drvdata->retstack == true))
621 drvdata->cfg |= BIT(12);
622 else
623 drvdata->cfg &= ~BIT(12);
624
625 /* bits[14:13], Q element enable field */
626 mode = ETM_MODE_QELEM(drvdata->mode);
627 /* start by clearing QE bits */
628 drvdata->cfg &= ~(BIT(13) | BIT(14));
629 /* if supported, Q elements with instruction counts are enabled */
630 if ((mode & BIT(0)) && (drvdata->q_support & BIT(0)))
631 drvdata->cfg |= BIT(13);
632 /* 254 /*
633 * if supported, Q elements with and without instruction 255 * Executing etm4_enable_hw on the cpu whose ETM is being enabled
634 * counts are enabled 256 * ensures that register writes occur when cpu is powered.
635 */ 257 */
636 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1))) 258 ret = smp_call_function_single(drvdata->cpu,
637 drvdata->cfg |= BIT(14); 259 etm4_enable_hw, drvdata, 1);
638 260 if (ret)
639 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */ 261 goto err;
640 if ((drvdata->mode & ETM_MODE_ATB_TRIGGER) &&
641 (drvdata->atbtrig == true))
642 drvdata->eventctrl1 |= BIT(11);
643 else
644 drvdata->eventctrl1 &= ~BIT(11);
645
646 /* bit[12], Low-power state behavior override bit */
647 if ((drvdata->mode & ETM_MODE_LPOVERRIDE) &&
648 (drvdata->lpoverride == true))
649 drvdata->eventctrl1 |= BIT(12);
650 else
651 drvdata->eventctrl1 &= ~BIT(12);
652
653 /* bit[8], Instruction stall bit */
654 if (drvdata->mode & ETM_MODE_ISTALL_EN)
655 drvdata->stall_ctrl |= BIT(8);
656 else
657 drvdata->stall_ctrl &= ~BIT(8);
658
659 /* bit[10], Prioritize instruction trace bit */
660 if (drvdata->mode & ETM_MODE_INSTPRIO)
661 drvdata->stall_ctrl |= BIT(10);
662 else
663 drvdata->stall_ctrl &= ~BIT(10);
664
665 /* bit[13], Trace overflow prevention bit */
666 if ((drvdata->mode & ETM_MODE_NOOVERFLOW) &&
667 (drvdata->nooverflow == true))
668 drvdata->stall_ctrl |= BIT(13);
669 else
670 drvdata->stall_ctrl &= ~BIT(13);
671
672 /* bit[9] Start/stop logic control bit */
673 if (drvdata->mode & ETM_MODE_VIEWINST_STARTSTOP)
674 drvdata->vinst_ctrl |= BIT(9);
675 else
676 drvdata->vinst_ctrl &= ~BIT(9);
677
678 /* bit[10], Whether a trace unit must trace a Reset exception */
679 if (drvdata->mode & ETM_MODE_TRACE_RESET)
680 drvdata->vinst_ctrl |= BIT(10);
681 else
682 drvdata->vinst_ctrl &= ~BIT(10);
683
684 /* bit[11], Whether a trace unit must trace a system error exception */
685 if ((drvdata->mode & ETM_MODE_TRACE_ERR) &&
686 (drvdata->trc_error == true))
687 drvdata->vinst_ctrl |= BIT(11);
688 else
689 drvdata->vinst_ctrl &= ~BIT(11);
690
691 spin_unlock(&drvdata->spinlock);
692 return size;
693}
694static DEVICE_ATTR_RW(mode);
695
696static ssize_t pe_show(struct device *dev,
697 struct device_attribute *attr,
698 char *buf)
699{
700 unsigned long val;
701 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
702
703 val = drvdata->pe_sel;
704 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
705}
706
707static ssize_t pe_store(struct device *dev,
708 struct device_attribute *attr,
709 const char *buf, size_t size)
710{
711 unsigned long val;
712 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
713
714 if (kstrtoul(buf, 16, &val))
715 return -EINVAL;
716
717 spin_lock(&drvdata->spinlock);
718 if (val > drvdata->nr_pe) {
719 spin_unlock(&drvdata->spinlock);
720 return -EINVAL;
721 }
722 262
723 drvdata->pe_sel = val; 263 drvdata->sticky_enable = true;
724 spin_unlock(&drvdata->spinlock); 264 spin_unlock(&drvdata->spinlock);
725 return size;
726}
727static DEVICE_ATTR_RW(pe);
728
729static ssize_t event_show(struct device *dev,
730 struct device_attribute *attr,
731 char *buf)
732{
733 unsigned long val;
734 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
735
736 val = drvdata->eventctrl0;
737 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
738}
739 265
740static ssize_t event_store(struct device *dev, 266 dev_info(drvdata->dev, "ETM tracing enabled\n");
741 struct device_attribute *attr, 267 return 0;
742 const char *buf, size_t size)
743{
744 unsigned long val;
745 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
746
747 if (kstrtoul(buf, 16, &val))
748 return -EINVAL;
749 268
750 spin_lock(&drvdata->spinlock); 269err:
751 switch (drvdata->nr_event) {
752 case 0x0:
753 /* EVENT0, bits[7:0] */
754 drvdata->eventctrl0 = val & 0xFF;
755 break;
756 case 0x1:
757 /* EVENT1, bits[15:8] */
758 drvdata->eventctrl0 = val & 0xFFFF;
759 break;
760 case 0x2:
761 /* EVENT2, bits[23:16] */
762 drvdata->eventctrl0 = val & 0xFFFFFF;
763 break;
764 case 0x3:
765 /* EVENT3, bits[31:24] */
766 drvdata->eventctrl0 = val;
767 break;
768 default:
769 break;
770 }
771 spin_unlock(&drvdata->spinlock); 270 spin_unlock(&drvdata->spinlock);
772 return size; 271 return ret;
773} 272}
774static DEVICE_ATTR_RW(event);
775 273
776static ssize_t event_instren_show(struct device *dev, 274static int etm4_enable(struct coresight_device *csdev,
777 struct device_attribute *attr, 275 struct perf_event_attr *attr, u32 mode)
778 char *buf)
779{ 276{
780 unsigned long val; 277 int ret;
781 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 278 u32 val;
782 279 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
783 val = BMVAL(drvdata->eventctrl1, 0, 3);
784 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
785}
786 280
787static ssize_t event_instren_store(struct device *dev, 281 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
788 struct device_attribute *attr,
789 const char *buf, size_t size)
790{
791 unsigned long val;
792 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
793 282
794 if (kstrtoul(buf, 16, &val)) 283 /* Someone is already using the tracer */
795 return -EINVAL; 284 if (val)
285 return -EBUSY;
796 286
797 spin_lock(&drvdata->spinlock); 287 switch (mode) {
798 /* start by clearing all instruction event enable bits */ 288 case CS_MODE_SYSFS:
799 drvdata->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3)); 289 ret = etm4_enable_sysfs(csdev);
800 switch (drvdata->nr_event) {
801 case 0x0:
802 /* generate Event element for event 1 */
803 drvdata->eventctrl1 |= val & BIT(1);
804 break; 290 break;
805 case 0x1: 291 case CS_MODE_PERF:
806 /* generate Event element for event 1 and 2 */ 292 ret = etm4_enable_perf(csdev, attr);
807 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1));
808 break;
809 case 0x2:
810 /* generate Event element for event 1, 2 and 3 */
811 drvdata->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
812 break;
813 case 0x3:
814 /* generate Event element for all 4 events */
815 drvdata->eventctrl1 |= val & 0xF;
816 break; 293 break;
817 default: 294 default:
818 break; 295 ret = -EINVAL;
819 }
820 spin_unlock(&drvdata->spinlock);
821 return size;
822}
823static DEVICE_ATTR_RW(event_instren);
824
825static ssize_t event_ts_show(struct device *dev,
826 struct device_attribute *attr,
827 char *buf)
828{
829 unsigned long val;
830 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
831
832 val = drvdata->ts_ctrl;
833 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
834}
835
836static ssize_t event_ts_store(struct device *dev,
837 struct device_attribute *attr,
838 const char *buf, size_t size)
839{
840 unsigned long val;
841 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
842
843 if (kstrtoul(buf, 16, &val))
844 return -EINVAL;
845 if (!drvdata->ts_size)
846 return -EINVAL;
847
848 drvdata->ts_ctrl = val & ETMv4_EVENT_MASK;
849 return size;
850}
851static DEVICE_ATTR_RW(event_ts);
852
853static ssize_t syncfreq_show(struct device *dev,
854 struct device_attribute *attr,
855 char *buf)
856{
857 unsigned long val;
858 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
859
860 val = drvdata->syncfreq;
861 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
862}
863
864static ssize_t syncfreq_store(struct device *dev,
865 struct device_attribute *attr,
866 const char *buf, size_t size)
867{
868 unsigned long val;
869 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
870
871 if (kstrtoul(buf, 16, &val))
872 return -EINVAL;
873 if (drvdata->syncpr == true)
874 return -EINVAL;
875
876 drvdata->syncfreq = val & ETMv4_SYNC_MASK;
877 return size;
878}
879static DEVICE_ATTR_RW(syncfreq);
880
881static ssize_t cyc_threshold_show(struct device *dev,
882 struct device_attribute *attr,
883 char *buf)
884{
885 unsigned long val;
886 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
887
888 val = drvdata->ccctlr;
889 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
890}
891
892static ssize_t cyc_threshold_store(struct device *dev,
893 struct device_attribute *attr,
894 const char *buf, size_t size)
895{
896 unsigned long val;
897 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
898
899 if (kstrtoul(buf, 16, &val))
900 return -EINVAL;
901 if (val < drvdata->ccitmin)
902 return -EINVAL;
903
904 drvdata->ccctlr = val & ETM_CYC_THRESHOLD_MASK;
905 return size;
906}
907static DEVICE_ATTR_RW(cyc_threshold);
908
909static ssize_t bb_ctrl_show(struct device *dev,
910 struct device_attribute *attr,
911 char *buf)
912{
913 unsigned long val;
914 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
915
916 val = drvdata->bb_ctrl;
917 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
918}
919
920static ssize_t bb_ctrl_store(struct device *dev,
921 struct device_attribute *attr,
922 const char *buf, size_t size)
923{
924 unsigned long val;
925 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
926
927 if (kstrtoul(buf, 16, &val))
928 return -EINVAL;
929 if (drvdata->trcbb == false)
930 return -EINVAL;
931 if (!drvdata->nr_addr_cmp)
932 return -EINVAL;
933 /*
934 * Bit[7:0] selects which address range comparator is used for
935 * branch broadcast control.
936 */
937 if (BMVAL(val, 0, 7) > drvdata->nr_addr_cmp)
938 return -EINVAL;
939
940 drvdata->bb_ctrl = val;
941 return size;
942}
943static DEVICE_ATTR_RW(bb_ctrl);
944
945static ssize_t event_vinst_show(struct device *dev,
946 struct device_attribute *attr,
947 char *buf)
948{
949 unsigned long val;
950 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
951
952 val = drvdata->vinst_ctrl & ETMv4_EVENT_MASK;
953 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
954}
955
956static ssize_t event_vinst_store(struct device *dev,
957 struct device_attribute *attr,
958 const char *buf, size_t size)
959{
960 unsigned long val;
961 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
962
963 if (kstrtoul(buf, 16, &val))
964 return -EINVAL;
965
966 spin_lock(&drvdata->spinlock);
967 val &= ETMv4_EVENT_MASK;
968 drvdata->vinst_ctrl &= ~ETMv4_EVENT_MASK;
969 drvdata->vinst_ctrl |= val;
970 spin_unlock(&drvdata->spinlock);
971 return size;
972}
973static DEVICE_ATTR_RW(event_vinst);
974
975static ssize_t s_exlevel_vinst_show(struct device *dev,
976 struct device_attribute *attr,
977 char *buf)
978{
979 unsigned long val;
980 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
981
982 val = BMVAL(drvdata->vinst_ctrl, 16, 19);
983 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
984}
985
986static ssize_t s_exlevel_vinst_store(struct device *dev,
987 struct device_attribute *attr,
988 const char *buf, size_t size)
989{
990 unsigned long val;
991 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
992
993 if (kstrtoul(buf, 16, &val))
994 return -EINVAL;
995
996 spin_lock(&drvdata->spinlock);
997 /* clear all EXLEVEL_S bits (bit[18] is never implemented) */
998 drvdata->vinst_ctrl &= ~(BIT(16) | BIT(17) | BIT(19));
999 /* enable instruction tracing for corresponding exception level */
1000 val &= drvdata->s_ex_level;
1001 drvdata->vinst_ctrl |= (val << 16);
1002 spin_unlock(&drvdata->spinlock);
1003 return size;
1004}
1005static DEVICE_ATTR_RW(s_exlevel_vinst);
1006
1007static ssize_t ns_exlevel_vinst_show(struct device *dev,
1008 struct device_attribute *attr,
1009 char *buf)
1010{
1011 unsigned long val;
1012 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1013
1014 /* EXLEVEL_NS, bits[23:20] */
1015 val = BMVAL(drvdata->vinst_ctrl, 20, 23);
1016 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1017}
1018
1019static ssize_t ns_exlevel_vinst_store(struct device *dev,
1020 struct device_attribute *attr,
1021 const char *buf, size_t size)
1022{
1023 unsigned long val;
1024 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1025
1026 if (kstrtoul(buf, 16, &val))
1027 return -EINVAL;
1028
1029 spin_lock(&drvdata->spinlock);
1030 /* clear EXLEVEL_NS bits (bit[23] is never implemented */
1031 drvdata->vinst_ctrl &= ~(BIT(20) | BIT(21) | BIT(22));
1032 /* enable instruction tracing for corresponding exception level */
1033 val &= drvdata->ns_ex_level;
1034 drvdata->vinst_ctrl |= (val << 20);
1035 spin_unlock(&drvdata->spinlock);
1036 return size;
1037}
1038static DEVICE_ATTR_RW(ns_exlevel_vinst);
1039
1040static ssize_t addr_idx_show(struct device *dev,
1041 struct device_attribute *attr,
1042 char *buf)
1043{
1044 unsigned long val;
1045 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1046
1047 val = drvdata->addr_idx;
1048 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1049}
1050
1051static ssize_t addr_idx_store(struct device *dev,
1052 struct device_attribute *attr,
1053 const char *buf, size_t size)
1054{
1055 unsigned long val;
1056 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1057
1058 if (kstrtoul(buf, 16, &val))
1059 return -EINVAL;
1060 if (val >= drvdata->nr_addr_cmp * 2)
1061 return -EINVAL;
1062
1063 /*
1064 * Use spinlock to ensure index doesn't change while it gets
1065 * dereferenced multiple times within a spinlock block elsewhere.
1066 */
1067 spin_lock(&drvdata->spinlock);
1068 drvdata->addr_idx = val;
1069 spin_unlock(&drvdata->spinlock);
1070 return size;
1071}
1072static DEVICE_ATTR_RW(addr_idx);
1073
1074static ssize_t addr_instdatatype_show(struct device *dev,
1075 struct device_attribute *attr,
1076 char *buf)
1077{
1078 ssize_t len;
1079 u8 val, idx;
1080 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1081
1082 spin_lock(&drvdata->spinlock);
1083 idx = drvdata->addr_idx;
1084 val = BMVAL(drvdata->addr_acc[idx], 0, 1);
1085 len = scnprintf(buf, PAGE_SIZE, "%s\n",
1086 val == ETM_INSTR_ADDR ? "instr" :
1087 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
1088 (val == ETM_DATA_STORE_ADDR ? "data_store" :
1089 "data_load_store")));
1090 spin_unlock(&drvdata->spinlock);
1091 return len;
1092}
1093
1094static ssize_t addr_instdatatype_store(struct device *dev,
1095 struct device_attribute *attr,
1096 const char *buf, size_t size)
1097{
1098 u8 idx;
1099 char str[20] = "";
1100 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1101
1102 if (strlen(buf) >= 20)
1103 return -EINVAL;
1104 if (sscanf(buf, "%s", str) != 1)
1105 return -EINVAL;
1106
1107 spin_lock(&drvdata->spinlock);
1108 idx = drvdata->addr_idx;
1109 if (!strcmp(str, "instr"))
1110 /* TYPE, bits[1:0] */
1111 drvdata->addr_acc[idx] &= ~(BIT(0) | BIT(1));
1112
1113 spin_unlock(&drvdata->spinlock);
1114 return size;
1115}
1116static DEVICE_ATTR_RW(addr_instdatatype);
1117
1118static ssize_t addr_single_show(struct device *dev,
1119 struct device_attribute *attr,
1120 char *buf)
1121{
1122 u8 idx;
1123 unsigned long val;
1124 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1125
1126 idx = drvdata->addr_idx;
1127 spin_lock(&drvdata->spinlock);
1128 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1129 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1130 spin_unlock(&drvdata->spinlock);
1131 return -EPERM;
1132 }
1133 val = (unsigned long)drvdata->addr_val[idx];
1134 spin_unlock(&drvdata->spinlock);
1135 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1136}
1137
1138static ssize_t addr_single_store(struct device *dev,
1139 struct device_attribute *attr,
1140 const char *buf, size_t size)
1141{
1142 u8 idx;
1143 unsigned long val;
1144 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1145
1146 if (kstrtoul(buf, 16, &val))
1147 return -EINVAL;
1148
1149 spin_lock(&drvdata->spinlock);
1150 idx = drvdata->addr_idx;
1151 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1152 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
1153 spin_unlock(&drvdata->spinlock);
1154 return -EPERM;
1155 }
1156
1157 drvdata->addr_val[idx] = (u64)val;
1158 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
1159 spin_unlock(&drvdata->spinlock);
1160 return size;
1161}
1162static DEVICE_ATTR_RW(addr_single);
1163
1164static ssize_t addr_range_show(struct device *dev,
1165 struct device_attribute *attr,
1166 char *buf)
1167{
1168 u8 idx;
1169 unsigned long val1, val2;
1170 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1171
1172 spin_lock(&drvdata->spinlock);
1173 idx = drvdata->addr_idx;
1174 if (idx % 2 != 0) {
1175 spin_unlock(&drvdata->spinlock);
1176 return -EPERM;
1177 }
1178 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1179 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1180 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1181 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1182 spin_unlock(&drvdata->spinlock);
1183 return -EPERM;
1184 }
1185
1186 val1 = (unsigned long)drvdata->addr_val[idx];
1187 val2 = (unsigned long)drvdata->addr_val[idx + 1];
1188 spin_unlock(&drvdata->spinlock);
1189 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1190}
1191
1192static ssize_t addr_range_store(struct device *dev,
1193 struct device_attribute *attr,
1194 const char *buf, size_t size)
1195{
1196 u8 idx;
1197 unsigned long val1, val2;
1198 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199
1200 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1201 return -EINVAL;
1202 /* lower address comparator cannot have a higher address value */
1203 if (val1 > val2)
1204 return -EINVAL;
1205
1206 spin_lock(&drvdata->spinlock);
1207 idx = drvdata->addr_idx;
1208 if (idx % 2 != 0) {
1209 spin_unlock(&drvdata->spinlock);
1210 return -EPERM;
1211 }
1212
1213 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1214 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1215 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1216 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1217 spin_unlock(&drvdata->spinlock);
1218 return -EPERM;
1219 }
1220
1221 drvdata->addr_val[idx] = (u64)val1;
1222 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1223 drvdata->addr_val[idx + 1] = (u64)val2;
1224 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1225 /*
1226 * Program include or exclude control bits for vinst or vdata
1227 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1228 */
1229 if (drvdata->mode & ETM_MODE_EXCLUDE)
1230 etm4_set_mode_exclude(drvdata, true);
1231 else
1232 etm4_set_mode_exclude(drvdata, false);
1233
1234 spin_unlock(&drvdata->spinlock);
1235 return size;
1236}
1237static DEVICE_ATTR_RW(addr_range);
1238
1239static ssize_t addr_start_show(struct device *dev,
1240 struct device_attribute *attr,
1241 char *buf)
1242{
1243 u8 idx;
1244 unsigned long val;
1245 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1246
1247 spin_lock(&drvdata->spinlock);
1248 idx = drvdata->addr_idx;
1249
1250 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1251 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1252 spin_unlock(&drvdata->spinlock);
1253 return -EPERM;
1254 }
1255
1256 val = (unsigned long)drvdata->addr_val[idx];
1257 spin_unlock(&drvdata->spinlock);
1258 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1259}
1260
1261static ssize_t addr_start_store(struct device *dev,
1262 struct device_attribute *attr,
1263 const char *buf, size_t size)
1264{
1265 u8 idx;
1266 unsigned long val;
1267 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268
1269 if (kstrtoul(buf, 16, &val))
1270 return -EINVAL;
1271
1272 spin_lock(&drvdata->spinlock);
1273 idx = drvdata->addr_idx;
1274 if (!drvdata->nr_addr_cmp) {
1275 spin_unlock(&drvdata->spinlock);
1276 return -EINVAL;
1277 }
1278 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1279 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1280 spin_unlock(&drvdata->spinlock);
1281 return -EPERM;
1282 }
1283
1284 drvdata->addr_val[idx] = (u64)val;
1285 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
1286 drvdata->vissctlr |= BIT(idx);
1287 /* SSSTATUS, bit[9] - turn on start/stop logic */
1288 drvdata->vinst_ctrl |= BIT(9);
1289 spin_unlock(&drvdata->spinlock);
1290 return size;
1291}
1292static DEVICE_ATTR_RW(addr_start);
1293
1294static ssize_t addr_stop_show(struct device *dev,
1295 struct device_attribute *attr,
1296 char *buf)
1297{
1298 u8 idx;
1299 unsigned long val;
1300 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1301
1302 spin_lock(&drvdata->spinlock);
1303 idx = drvdata->addr_idx;
1304
1305 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1306 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1307 spin_unlock(&drvdata->spinlock);
1308 return -EPERM;
1309 }
1310
1311 val = (unsigned long)drvdata->addr_val[idx];
1312 spin_unlock(&drvdata->spinlock);
1313 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1314}
1315
1316static ssize_t addr_stop_store(struct device *dev,
1317 struct device_attribute *attr,
1318 const char *buf, size_t size)
1319{
1320 u8 idx;
1321 unsigned long val;
1322 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323
1324 if (kstrtoul(buf, 16, &val))
1325 return -EINVAL;
1326
1327 spin_lock(&drvdata->spinlock);
1328 idx = drvdata->addr_idx;
1329 if (!drvdata->nr_addr_cmp) {
1330 spin_unlock(&drvdata->spinlock);
1331 return -EINVAL;
1332 }
1333 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1334 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1335 spin_unlock(&drvdata->spinlock);
1336 return -EPERM;
1337 }
1338
1339 drvdata->addr_val[idx] = (u64)val;
1340 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1341 drvdata->vissctlr |= BIT(idx + 16);
1342 /* SSSTATUS, bit[9] - turn on start/stop logic */
1343 drvdata->vinst_ctrl |= BIT(9);
1344 spin_unlock(&drvdata->spinlock);
1345 return size;
1346}
1347static DEVICE_ATTR_RW(addr_stop);
1348
1349static ssize_t addr_ctxtype_show(struct device *dev,
1350 struct device_attribute *attr,
1351 char *buf)
1352{
1353 ssize_t len;
1354 u8 idx, val;
1355 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356
1357 spin_lock(&drvdata->spinlock);
1358 idx = drvdata->addr_idx;
1359 /* CONTEXTTYPE, bits[3:2] */
1360 val = BMVAL(drvdata->addr_acc[idx], 2, 3);
1361 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1362 (val == ETM_CTX_CTXID ? "ctxid" :
1363 (val == ETM_CTX_VMID ? "vmid" : "all")));
1364 spin_unlock(&drvdata->spinlock);
1365 return len;
1366}
1367
1368static ssize_t addr_ctxtype_store(struct device *dev,
1369 struct device_attribute *attr,
1370 const char *buf, size_t size)
1371{
1372 u8 idx;
1373 char str[10] = "";
1374 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1375
1376 if (strlen(buf) >= 10)
1377 return -EINVAL;
1378 if (sscanf(buf, "%s", str) != 1)
1379 return -EINVAL;
1380
1381 spin_lock(&drvdata->spinlock);
1382 idx = drvdata->addr_idx;
1383 if (!strcmp(str, "none"))
1384 /* start by clearing context type bits */
1385 drvdata->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1386 else if (!strcmp(str, "ctxid")) {
1387 /* 0b01 The trace unit performs a Context ID */
1388 if (drvdata->numcidc) {
1389 drvdata->addr_acc[idx] |= BIT(2);
1390 drvdata->addr_acc[idx] &= ~BIT(3);
1391 }
1392 } else if (!strcmp(str, "vmid")) {
1393 /* 0b10 The trace unit performs a VMID */
1394 if (drvdata->numvmidc) {
1395 drvdata->addr_acc[idx] &= ~BIT(2);
1396 drvdata->addr_acc[idx] |= BIT(3);
1397 }
1398 } else if (!strcmp(str, "all")) {
1399 /*
1400 * 0b11 The trace unit performs a Context ID
1401 * comparison and a VMID
1402 */
1403 if (drvdata->numcidc)
1404 drvdata->addr_acc[idx] |= BIT(2);
1405 if (drvdata->numvmidc)
1406 drvdata->addr_acc[idx] |= BIT(3);
1407 } 296 }
1408 spin_unlock(&drvdata->spinlock);
1409 return size;
1410}
1411static DEVICE_ATTR_RW(addr_ctxtype);
1412
1413static ssize_t addr_context_show(struct device *dev,
1414 struct device_attribute *attr,
1415 char *buf)
1416{
1417 u8 idx;
1418 unsigned long val;
1419 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1420
1421 spin_lock(&drvdata->spinlock);
1422 idx = drvdata->addr_idx;
1423 /* context ID comparator bits[6:4] */
1424 val = BMVAL(drvdata->addr_acc[idx], 4, 6);
1425 spin_unlock(&drvdata->spinlock);
1426 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1427}
1428
1429static ssize_t addr_context_store(struct device *dev,
1430 struct device_attribute *attr,
1431 const char *buf, size_t size)
1432{
1433 u8 idx;
1434 unsigned long val;
1435 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1436
1437 if (kstrtoul(buf, 16, &val))
1438 return -EINVAL;
1439 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1440 return -EINVAL;
1441 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1442 drvdata->numcidc : drvdata->numvmidc))
1443 return -EINVAL;
1444
1445 spin_lock(&drvdata->spinlock);
1446 idx = drvdata->addr_idx;
1447 /* clear context ID comparator bits[6:4] */
1448 drvdata->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1449 drvdata->addr_acc[idx] |= (val << 4);
1450 spin_unlock(&drvdata->spinlock);
1451 return size;
1452}
1453static DEVICE_ATTR_RW(addr_context);
1454
1455static ssize_t seq_idx_show(struct device *dev,
1456 struct device_attribute *attr,
1457 char *buf)
1458{
1459 unsigned long val;
1460 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1461
1462 val = drvdata->seq_idx;
1463 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1464}
1465
1466static ssize_t seq_idx_store(struct device *dev,
1467 struct device_attribute *attr,
1468 const char *buf, size_t size)
1469{
1470 unsigned long val;
1471 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1472
1473 if (kstrtoul(buf, 16, &val))
1474 return -EINVAL;
1475 if (val >= drvdata->nrseqstate - 1)
1476 return -EINVAL;
1477
1478 /*
1479 * Use spinlock to ensure index doesn't change while it gets
1480 * dereferenced multiple times within a spinlock block elsewhere.
1481 */
1482 spin_lock(&drvdata->spinlock);
1483 drvdata->seq_idx = val;
1484 spin_unlock(&drvdata->spinlock);
1485 return size;
1486}
1487static DEVICE_ATTR_RW(seq_idx);
1488
1489static ssize_t seq_state_show(struct device *dev,
1490 struct device_attribute *attr,
1491 char *buf)
1492{
1493 unsigned long val;
1494 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1495 297
1496 val = drvdata->seq_state; 298 /* The tracer didn't start */
1497 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); 299 if (ret)
1498} 300 local_set(&drvdata->mode, CS_MODE_DISABLED);
1499
1500static ssize_t seq_state_store(struct device *dev,
1501 struct device_attribute *attr,
1502 const char *buf, size_t size)
1503{
1504 unsigned long val;
1505 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1506
1507 if (kstrtoul(buf, 16, &val))
1508 return -EINVAL;
1509 if (val >= drvdata->nrseqstate)
1510 return -EINVAL;
1511
1512 drvdata->seq_state = val;
1513 return size;
1514}
1515static DEVICE_ATTR_RW(seq_state);
1516
1517static ssize_t seq_event_show(struct device *dev,
1518 struct device_attribute *attr,
1519 char *buf)
1520{
1521 u8 idx;
1522 unsigned long val;
1523 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1524
1525 spin_lock(&drvdata->spinlock);
1526 idx = drvdata->seq_idx;
1527 val = drvdata->seq_ctrl[idx];
1528 spin_unlock(&drvdata->spinlock);
1529 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1530}
1531
1532static ssize_t seq_event_store(struct device *dev,
1533 struct device_attribute *attr,
1534 const char *buf, size_t size)
1535{
1536 u8 idx;
1537 unsigned long val;
1538 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1539
1540 if (kstrtoul(buf, 16, &val))
1541 return -EINVAL;
1542
1543 spin_lock(&drvdata->spinlock);
1544 idx = drvdata->seq_idx;
1545 /* RST, bits[7:0] */
1546 drvdata->seq_ctrl[idx] = val & 0xFF;
1547 spin_unlock(&drvdata->spinlock);
1548 return size;
1549}
1550static DEVICE_ATTR_RW(seq_event);
1551
1552static ssize_t seq_reset_event_show(struct device *dev,
1553 struct device_attribute *attr,
1554 char *buf)
1555{
1556 unsigned long val;
1557 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1558
1559 val = drvdata->seq_rst;
1560 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1561}
1562
1563static ssize_t seq_reset_event_store(struct device *dev,
1564 struct device_attribute *attr,
1565 const char *buf, size_t size)
1566{
1567 unsigned long val;
1568 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1569
1570 if (kstrtoul(buf, 16, &val))
1571 return -EINVAL;
1572 if (!(drvdata->nrseqstate))
1573 return -EINVAL;
1574
1575 drvdata->seq_rst = val & ETMv4_EVENT_MASK;
1576 return size;
1577}
1578static DEVICE_ATTR_RW(seq_reset_event);
1579
1580static ssize_t cntr_idx_show(struct device *dev,
1581 struct device_attribute *attr,
1582 char *buf)
1583{
1584 unsigned long val;
1585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1586
1587 val = drvdata->cntr_idx;
1588 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1589}
1590
1591static ssize_t cntr_idx_store(struct device *dev,
1592 struct device_attribute *attr,
1593 const char *buf, size_t size)
1594{
1595 unsigned long val;
1596 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1597
1598 if (kstrtoul(buf, 16, &val))
1599 return -EINVAL;
1600 if (val >= drvdata->nr_cntr)
1601 return -EINVAL;
1602
1603 /*
1604 * Use spinlock to ensure index doesn't change while it gets
1605 * dereferenced multiple times within a spinlock block elsewhere.
1606 */
1607 spin_lock(&drvdata->spinlock);
1608 drvdata->cntr_idx = val;
1609 spin_unlock(&drvdata->spinlock);
1610 return size;
1611}
1612static DEVICE_ATTR_RW(cntr_idx);
1613
1614static ssize_t cntrldvr_show(struct device *dev,
1615 struct device_attribute *attr,
1616 char *buf)
1617{
1618 u8 idx;
1619 unsigned long val;
1620 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1621
1622 spin_lock(&drvdata->spinlock);
1623 idx = drvdata->cntr_idx;
1624 val = drvdata->cntrldvr[idx];
1625 spin_unlock(&drvdata->spinlock);
1626 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1627}
1628
1629static ssize_t cntrldvr_store(struct device *dev,
1630 struct device_attribute *attr,
1631 const char *buf, size_t size)
1632{
1633 u8 idx;
1634 unsigned long val;
1635 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1636
1637 if (kstrtoul(buf, 16, &val))
1638 return -EINVAL;
1639 if (val > ETM_CNTR_MAX_VAL)
1640 return -EINVAL;
1641
1642 spin_lock(&drvdata->spinlock);
1643 idx = drvdata->cntr_idx;
1644 drvdata->cntrldvr[idx] = val;
1645 spin_unlock(&drvdata->spinlock);
1646 return size;
1647}
1648static DEVICE_ATTR_RW(cntrldvr);
1649
1650static ssize_t cntr_val_show(struct device *dev,
1651 struct device_attribute *attr,
1652 char *buf)
1653{
1654 u8 idx;
1655 unsigned long val;
1656 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1657
1658 spin_lock(&drvdata->spinlock);
1659 idx = drvdata->cntr_idx;
1660 val = drvdata->cntr_val[idx];
1661 spin_unlock(&drvdata->spinlock);
1662 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1663}
1664
1665static ssize_t cntr_val_store(struct device *dev,
1666 struct device_attribute *attr,
1667 const char *buf, size_t size)
1668{
1669 u8 idx;
1670 unsigned long val;
1671 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1672
1673 if (kstrtoul(buf, 16, &val))
1674 return -EINVAL;
1675 if (val > ETM_CNTR_MAX_VAL)
1676 return -EINVAL;
1677
1678 spin_lock(&drvdata->spinlock);
1679 idx = drvdata->cntr_idx;
1680 drvdata->cntr_val[idx] = val;
1681 spin_unlock(&drvdata->spinlock);
1682 return size;
1683}
1684static DEVICE_ATTR_RW(cntr_val);
1685
1686static ssize_t cntr_ctrl_show(struct device *dev,
1687 struct device_attribute *attr,
1688 char *buf)
1689{
1690 u8 idx;
1691 unsigned long val;
1692 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1693
1694 spin_lock(&drvdata->spinlock);
1695 idx = drvdata->cntr_idx;
1696 val = drvdata->cntr_ctrl[idx];
1697 spin_unlock(&drvdata->spinlock);
1698 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1699}
1700
1701static ssize_t cntr_ctrl_store(struct device *dev,
1702 struct device_attribute *attr,
1703 const char *buf, size_t size)
1704{
1705 u8 idx;
1706 unsigned long val;
1707 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1708
1709 if (kstrtoul(buf, 16, &val))
1710 return -EINVAL;
1711
1712 spin_lock(&drvdata->spinlock);
1713 idx = drvdata->cntr_idx;
1714 drvdata->cntr_ctrl[idx] = val;
1715 spin_unlock(&drvdata->spinlock);
1716 return size;
1717}
1718static DEVICE_ATTR_RW(cntr_ctrl);
1719
1720static ssize_t res_idx_show(struct device *dev,
1721 struct device_attribute *attr,
1722 char *buf)
1723{
1724 unsigned long val;
1725 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1726
1727 val = drvdata->res_idx;
1728 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1729}
1730
1731static ssize_t res_idx_store(struct device *dev,
1732 struct device_attribute *attr,
1733 const char *buf, size_t size)
1734{
1735 unsigned long val;
1736 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1737
1738 if (kstrtoul(buf, 16, &val))
1739 return -EINVAL;
1740 /* Resource selector pair 0 is always implemented and reserved */
1741 if (val < 2 || val >= drvdata->nr_resource * 2)
1742 return -EINVAL;
1743 301
1744 /* 302 return ret;
1745 * Use spinlock to ensure index doesn't change while it gets
1746 * dereferenced multiple times within a spinlock block elsewhere.
1747 */
1748 spin_lock(&drvdata->spinlock);
1749 drvdata->res_idx = val;
1750 spin_unlock(&drvdata->spinlock);
1751 return size;
1752} 303}
1753static DEVICE_ATTR_RW(res_idx);
1754 304
1755static ssize_t res_ctrl_show(struct device *dev, 305static void etm4_disable_hw(void *info)
1756 struct device_attribute *attr,
1757 char *buf)
1758{ 306{
1759 u8 idx; 307 u32 control;
1760 unsigned long val; 308 struct etmv4_drvdata *drvdata = info;
1761 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1762 309
1763 spin_lock(&drvdata->spinlock); 310 CS_UNLOCK(drvdata->base);
1764 idx = drvdata->res_idx;
1765 val = drvdata->res_ctrl[idx];
1766 spin_unlock(&drvdata->spinlock);
1767 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1768}
1769 311
1770static ssize_t res_ctrl_store(struct device *dev, 312 control = readl_relaxed(drvdata->base + TRCPRGCTLR);
1771 struct device_attribute *attr,
1772 const char *buf, size_t size)
1773{
1774 u8 idx;
1775 unsigned long val;
1776 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1777 313
1778 if (kstrtoul(buf, 16, &val)) 314 /* EN, bit[0] Trace unit enable bit */
1779 return -EINVAL; 315 control &= ~0x1;
1780 316
1781 spin_lock(&drvdata->spinlock); 317 /* make sure everything completes before disabling */
1782 idx = drvdata->res_idx; 318 mb();
1783 /* For odd idx pair inversal bit is RES0 */ 319 isb();
1784 if (idx % 2 != 0) 320 writel_relaxed(control, drvdata->base + TRCPRGCTLR);
1785 /* PAIRINV, bit[21] */
1786 val &= ~BIT(21);
1787 drvdata->res_ctrl[idx] = val;
1788 spin_unlock(&drvdata->spinlock);
1789 return size;
1790}
1791static DEVICE_ATTR_RW(res_ctrl);
1792 321
1793static ssize_t ctxid_idx_show(struct device *dev, 322 CS_LOCK(drvdata->base);
1794 struct device_attribute *attr,
1795 char *buf)
1796{
1797 unsigned long val;
1798 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1799 323
1800 val = drvdata->ctxid_idx; 324 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
1801 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1802} 325}
1803 326
1804static ssize_t ctxid_idx_store(struct device *dev, 327static int etm4_disable_perf(struct coresight_device *csdev)
1805 struct device_attribute *attr,
1806 const char *buf, size_t size)
1807{ 328{
1808 unsigned long val; 329 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1809 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1810 330
1811 if (kstrtoul(buf, 16, &val)) 331 if (WARN_ON_ONCE(drvdata->cpu != smp_processor_id()))
1812 return -EINVAL;
1813 if (val >= drvdata->numcidc)
1814 return -EINVAL; 332 return -EINVAL;
1815 333
1816 /* 334 etm4_disable_hw(drvdata);
1817 * Use spinlock to ensure index doesn't change while it gets 335 return 0;
1818 * dereferenced multiple times within a spinlock block elsewhere.
1819 */
1820 spin_lock(&drvdata->spinlock);
1821 drvdata->ctxid_idx = val;
1822 spin_unlock(&drvdata->spinlock);
1823 return size;
1824}
1825static DEVICE_ATTR_RW(ctxid_idx);
1826
1827static ssize_t ctxid_pid_show(struct device *dev,
1828 struct device_attribute *attr,
1829 char *buf)
1830{
1831 u8 idx;
1832 unsigned long val;
1833 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1834
1835 spin_lock(&drvdata->spinlock);
1836 idx = drvdata->ctxid_idx;
1837 val = (unsigned long)drvdata->ctxid_vpid[idx];
1838 spin_unlock(&drvdata->spinlock);
1839 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1840} 336}
1841 337
1842static ssize_t ctxid_pid_store(struct device *dev, 338static void etm4_disable_sysfs(struct coresight_device *csdev)
1843 struct device_attribute *attr,
1844 const char *buf, size_t size)
1845{ 339{
1846 u8 idx; 340 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
1847 unsigned long vpid, pid;
1848 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1849 341
1850 /* 342 /*
1851 * only implemented when ctxid tracing is enabled, i.e. at least one 343 * Taking hotplug lock here protects from clocks getting disabled
1852 * ctxid comparator is implemented and ctxid is greater than 0 bits 344 * with tracing being left on (crash scenario) if user disable occurs
1853 * in length 345 * after cpu online mask indicates the cpu is offline but before the
346 * DYING hotplug callback is serviced by the ETM driver.
1854 */ 347 */
1855 if (!drvdata->ctxid_size || !drvdata->numcidc) 348 get_online_cpus();
1856 return -EINVAL;
1857 if (kstrtoul(buf, 16, &vpid))
1858 return -EINVAL;
1859
1860 pid = coresight_vpid_to_pid(vpid);
1861
1862 spin_lock(&drvdata->spinlock);
1863 idx = drvdata->ctxid_idx;
1864 drvdata->ctxid_pid[idx] = (u64)pid;
1865 drvdata->ctxid_vpid[idx] = (u64)vpid;
1866 spin_unlock(&drvdata->spinlock);
1867 return size;
1868}
1869static DEVICE_ATTR_RW(ctxid_pid);
1870
1871static ssize_t ctxid_masks_show(struct device *dev,
1872 struct device_attribute *attr,
1873 char *buf)
1874{
1875 unsigned long val1, val2;
1876 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1877
1878 spin_lock(&drvdata->spinlock); 349 spin_lock(&drvdata->spinlock);
1879 val1 = drvdata->ctxid_mask0;
1880 val2 = drvdata->ctxid_mask1;
1881 spin_unlock(&drvdata->spinlock);
1882 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1883}
1884 350
1885static ssize_t ctxid_masks_store(struct device *dev,
1886 struct device_attribute *attr,
1887 const char *buf, size_t size)
1888{
1889 u8 i, j, maskbyte;
1890 unsigned long val1, val2, mask;
1891 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1892
1893 /*
1894 * only implemented when ctxid tracing is enabled, i.e. at least one
1895 * ctxid comparator is implemented and ctxid is greater than 0 bits
1896 * in length
1897 */
1898 if (!drvdata->ctxid_size || !drvdata->numcidc)
1899 return -EINVAL;
1900 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
1901 return -EINVAL;
1902
1903 spin_lock(&drvdata->spinlock);
1904 /*
1905 * each byte[0..3] controls mask value applied to ctxid
1906 * comparator[0..3]
1907 */
1908 switch (drvdata->numcidc) {
1909 case 0x1:
1910 /* COMP0, bits[7:0] */
1911 drvdata->ctxid_mask0 = val1 & 0xFF;
1912 break;
1913 case 0x2:
1914 /* COMP1, bits[15:8] */
1915 drvdata->ctxid_mask0 = val1 & 0xFFFF;
1916 break;
1917 case 0x3:
1918 /* COMP2, bits[23:16] */
1919 drvdata->ctxid_mask0 = val1 & 0xFFFFFF;
1920 break;
1921 case 0x4:
1922 /* COMP3, bits[31:24] */
1923 drvdata->ctxid_mask0 = val1;
1924 break;
1925 case 0x5:
1926 /* COMP4, bits[7:0] */
1927 drvdata->ctxid_mask0 = val1;
1928 drvdata->ctxid_mask1 = val2 & 0xFF;
1929 break;
1930 case 0x6:
1931 /* COMP5, bits[15:8] */
1932 drvdata->ctxid_mask0 = val1;
1933 drvdata->ctxid_mask1 = val2 & 0xFFFF;
1934 break;
1935 case 0x7:
1936 /* COMP6, bits[23:16] */
1937 drvdata->ctxid_mask0 = val1;
1938 drvdata->ctxid_mask1 = val2 & 0xFFFFFF;
1939 break;
1940 case 0x8:
1941 /* COMP7, bits[31:24] */
1942 drvdata->ctxid_mask0 = val1;
1943 drvdata->ctxid_mask1 = val2;
1944 break;
1945 default:
1946 break;
1947 }
1948 /* 351 /*
1949 * If software sets a mask bit to 1, it must program relevant byte 352 * Executing etm4_disable_hw on the cpu whose ETM is being disabled
1950 * of ctxid comparator value 0x0, otherwise behavior is unpredictable. 353 * ensures that register writes occur when cpu is powered.
1951 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
1952 * of ctxid comparator0 value (corresponding to byte 0) register.
1953 */ 354 */
1954 mask = drvdata->ctxid_mask0; 355 smp_call_function_single(drvdata->cpu, etm4_disable_hw, drvdata, 1);
1955 for (i = 0; i < drvdata->numcidc; i++) {
1956 /* mask value of corresponding ctxid comparator */
1957 maskbyte = mask & ETMv4_EVENT_MASK;
1958 /*
1959 * each bit corresponds to a byte of respective ctxid comparator
1960 * value register
1961 */
1962 for (j = 0; j < 8; j++) {
1963 if (maskbyte & 1)
1964 drvdata->ctxid_pid[i] &= ~(0xFF << (j * 8));
1965 maskbyte >>= 1;
1966 }
1967 /* Select the next ctxid comparator mask value */
1968 if (i == 3)
1969 /* ctxid comparators[4-7] */
1970 mask = drvdata->ctxid_mask1;
1971 else
1972 mask >>= 0x8;
1973 }
1974 356
1975 spin_unlock(&drvdata->spinlock); 357 spin_unlock(&drvdata->spinlock);
1976 return size; 358 put_online_cpus();
1977}
1978static DEVICE_ATTR_RW(ctxid_masks);
1979
1980static ssize_t vmid_idx_show(struct device *dev,
1981 struct device_attribute *attr,
1982 char *buf)
1983{
1984 unsigned long val;
1985 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1986
1987 val = drvdata->vmid_idx;
1988 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1989}
1990
1991static ssize_t vmid_idx_store(struct device *dev,
1992 struct device_attribute *attr,
1993 const char *buf, size_t size)
1994{
1995 unsigned long val;
1996 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1997
1998 if (kstrtoul(buf, 16, &val))
1999 return -EINVAL;
2000 if (val >= drvdata->numvmidc)
2001 return -EINVAL;
2002
2003 /*
2004 * Use spinlock to ensure index doesn't change while it gets
2005 * dereferenced multiple times within a spinlock block elsewhere.
2006 */
2007 spin_lock(&drvdata->spinlock);
2008 drvdata->vmid_idx = val;
2009 spin_unlock(&drvdata->spinlock);
2010 return size;
2011}
2012static DEVICE_ATTR_RW(vmid_idx);
2013
2014static ssize_t vmid_val_show(struct device *dev,
2015 struct device_attribute *attr,
2016 char *buf)
2017{
2018 unsigned long val;
2019 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2020
2021 val = (unsigned long)drvdata->vmid_val[drvdata->vmid_idx];
2022 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2023}
2024
2025static ssize_t vmid_val_store(struct device *dev,
2026 struct device_attribute *attr,
2027 const char *buf, size_t size)
2028{
2029 unsigned long val;
2030 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2031
2032 /*
2033 * only implemented when vmid tracing is enabled, i.e. at least one
2034 * vmid comparator is implemented and at least 8 bit vmid size
2035 */
2036 if (!drvdata->vmid_size || !drvdata->numvmidc)
2037 return -EINVAL;
2038 if (kstrtoul(buf, 16, &val))
2039 return -EINVAL;
2040 359
2041 spin_lock(&drvdata->spinlock); 360 dev_info(drvdata->dev, "ETM tracing disabled\n");
2042 drvdata->vmid_val[drvdata->vmid_idx] = (u64)val;
2043 spin_unlock(&drvdata->spinlock);
2044 return size;
2045} 361}
2046static DEVICE_ATTR_RW(vmid_val);
2047 362
2048static ssize_t vmid_masks_show(struct device *dev, 363static void etm4_disable(struct coresight_device *csdev)
2049 struct device_attribute *attr, char *buf)
2050{ 364{
2051 unsigned long val1, val2; 365 u32 mode;
2052 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent); 366 struct etmv4_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
2053
2054 spin_lock(&drvdata->spinlock);
2055 val1 = drvdata->vmid_mask0;
2056 val2 = drvdata->vmid_mask1;
2057 spin_unlock(&drvdata->spinlock);
2058 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2059}
2060 367
2061static ssize_t vmid_masks_store(struct device *dev,
2062 struct device_attribute *attr,
2063 const char *buf, size_t size)
2064{
2065 u8 i, j, maskbyte;
2066 unsigned long val1, val2, mask;
2067 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2068 /* 368 /*
2069 * only implemented when vmid tracing is enabled, i.e. at least one 369 * For as long as the tracer isn't disabled another entity can't
2070 * vmid comparator is implemented and at least 8 bit vmid size 370 * change its status. As such we can read the status here without
371 * fearing it will change under us.
2071 */ 372 */
2072 if (!drvdata->vmid_size || !drvdata->numvmidc) 373 mode = local_read(&drvdata->mode);
2073 return -EINVAL;
2074 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
2075 return -EINVAL;
2076
2077 spin_lock(&drvdata->spinlock);
2078 374
2079 /* 375 switch (mode) {
2080 * each byte[0..3] controls mask value applied to vmid 376 case CS_MODE_DISABLED:
2081 * comparator[0..3]
2082 */
2083 switch (drvdata->numvmidc) {
2084 case 0x1:
2085 /* COMP0, bits[7:0] */
2086 drvdata->vmid_mask0 = val1 & 0xFF;
2087 break;
2088 case 0x2:
2089 /* COMP1, bits[15:8] */
2090 drvdata->vmid_mask0 = val1 & 0xFFFF;
2091 break;
2092 case 0x3:
2093 /* COMP2, bits[23:16] */
2094 drvdata->vmid_mask0 = val1 & 0xFFFFFF;
2095 break; 377 break;
2096 case 0x4: 378 case CS_MODE_SYSFS:
2097 /* COMP3, bits[31:24] */ 379 etm4_disable_sysfs(csdev);
2098 drvdata->vmid_mask0 = val1;
2099 break; 380 break;
2100 case 0x5: 381 case CS_MODE_PERF:
2101 /* COMP4, bits[7:0] */ 382 etm4_disable_perf(csdev);
2102 drvdata->vmid_mask0 = val1;
2103 drvdata->vmid_mask1 = val2 & 0xFF;
2104 break;
2105 case 0x6:
2106 /* COMP5, bits[15:8] */
2107 drvdata->vmid_mask0 = val1;
2108 drvdata->vmid_mask1 = val2 & 0xFFFF;
2109 break;
2110 case 0x7:
2111 /* COMP6, bits[23:16] */
2112 drvdata->vmid_mask0 = val1;
2113 drvdata->vmid_mask1 = val2 & 0xFFFFFF;
2114 break;
2115 case 0x8:
2116 /* COMP7, bits[31:24] */
2117 drvdata->vmid_mask0 = val1;
2118 drvdata->vmid_mask1 = val2;
2119 break;
2120 default:
2121 break; 383 break;
2122 } 384 }
2123 385
2124 /* 386 if (mode)
2125 * If software sets a mask bit to 1, it must program relevant byte 387 local_set(&drvdata->mode, CS_MODE_DISABLED);
2126 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2127 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2128 * of vmid comparator0 value (corresponding to byte 0) register.
2129 */
2130 mask = drvdata->vmid_mask0;
2131 for (i = 0; i < drvdata->numvmidc; i++) {
2132 /* mask value of corresponding vmid comparator */
2133 maskbyte = mask & ETMv4_EVENT_MASK;
2134 /*
2135 * each bit corresponds to a byte of respective vmid comparator
2136 * value register
2137 */
2138 for (j = 0; j < 8; j++) {
2139 if (maskbyte & 1)
2140 drvdata->vmid_val[i] &= ~(0xFF << (j * 8));
2141 maskbyte >>= 1;
2142 }
2143 /* Select the next vmid comparator mask value */
2144 if (i == 3)
2145 /* vmid comparators[4-7] */
2146 mask = drvdata->vmid_mask1;
2147 else
2148 mask >>= 0x8;
2149 }
2150 spin_unlock(&drvdata->spinlock);
2151 return size;
2152}
2153static DEVICE_ATTR_RW(vmid_masks);
2154
2155static ssize_t cpu_show(struct device *dev,
2156 struct device_attribute *attr, char *buf)
2157{
2158 int val;
2159 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2160
2161 val = drvdata->cpu;
2162 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2163
2164} 388}
2165static DEVICE_ATTR_RO(cpu);
2166
2167static struct attribute *coresight_etmv4_attrs[] = {
2168 &dev_attr_nr_pe_cmp.attr,
2169 &dev_attr_nr_addr_cmp.attr,
2170 &dev_attr_nr_cntr.attr,
2171 &dev_attr_nr_ext_inp.attr,
2172 &dev_attr_numcidc.attr,
2173 &dev_attr_numvmidc.attr,
2174 &dev_attr_nrseqstate.attr,
2175 &dev_attr_nr_resource.attr,
2176 &dev_attr_nr_ss_cmp.attr,
2177 &dev_attr_reset.attr,
2178 &dev_attr_mode.attr,
2179 &dev_attr_pe.attr,
2180 &dev_attr_event.attr,
2181 &dev_attr_event_instren.attr,
2182 &dev_attr_event_ts.attr,
2183 &dev_attr_syncfreq.attr,
2184 &dev_attr_cyc_threshold.attr,
2185 &dev_attr_bb_ctrl.attr,
2186 &dev_attr_event_vinst.attr,
2187 &dev_attr_s_exlevel_vinst.attr,
2188 &dev_attr_ns_exlevel_vinst.attr,
2189 &dev_attr_addr_idx.attr,
2190 &dev_attr_addr_instdatatype.attr,
2191 &dev_attr_addr_single.attr,
2192 &dev_attr_addr_range.attr,
2193 &dev_attr_addr_start.attr,
2194 &dev_attr_addr_stop.attr,
2195 &dev_attr_addr_ctxtype.attr,
2196 &dev_attr_addr_context.attr,
2197 &dev_attr_seq_idx.attr,
2198 &dev_attr_seq_state.attr,
2199 &dev_attr_seq_event.attr,
2200 &dev_attr_seq_reset_event.attr,
2201 &dev_attr_cntr_idx.attr,
2202 &dev_attr_cntrldvr.attr,
2203 &dev_attr_cntr_val.attr,
2204 &dev_attr_cntr_ctrl.attr,
2205 &dev_attr_res_idx.attr,
2206 &dev_attr_res_ctrl.attr,
2207 &dev_attr_ctxid_idx.attr,
2208 &dev_attr_ctxid_pid.attr,
2209 &dev_attr_ctxid_masks.attr,
2210 &dev_attr_vmid_idx.attr,
2211 &dev_attr_vmid_val.attr,
2212 &dev_attr_vmid_masks.attr,
2213 &dev_attr_cpu.attr,
2214 NULL,
2215};
2216
2217#define coresight_simple_func(name, offset) \
2218static ssize_t name##_show(struct device *_dev, \
2219 struct device_attribute *attr, char *buf) \
2220{ \
2221 struct etmv4_drvdata *drvdata = dev_get_drvdata(_dev->parent); \
2222 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
2223 readl_relaxed(drvdata->base + offset)); \
2224} \
2225static DEVICE_ATTR_RO(name)
2226
2227coresight_simple_func(trcoslsr, TRCOSLSR);
2228coresight_simple_func(trcpdcr, TRCPDCR);
2229coresight_simple_func(trcpdsr, TRCPDSR);
2230coresight_simple_func(trclsr, TRCLSR);
2231coresight_simple_func(trcauthstatus, TRCAUTHSTATUS);
2232coresight_simple_func(trcdevid, TRCDEVID);
2233coresight_simple_func(trcdevtype, TRCDEVTYPE);
2234coresight_simple_func(trcpidr0, TRCPIDR0);
2235coresight_simple_func(trcpidr1, TRCPIDR1);
2236coresight_simple_func(trcpidr2, TRCPIDR2);
2237coresight_simple_func(trcpidr3, TRCPIDR3);
2238
2239static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2240 &dev_attr_trcoslsr.attr,
2241 &dev_attr_trcpdcr.attr,
2242 &dev_attr_trcpdsr.attr,
2243 &dev_attr_trclsr.attr,
2244 &dev_attr_trcauthstatus.attr,
2245 &dev_attr_trcdevid.attr,
2246 &dev_attr_trcdevtype.attr,
2247 &dev_attr_trcpidr0.attr,
2248 &dev_attr_trcpidr1.attr,
2249 &dev_attr_trcpidr2.attr,
2250 &dev_attr_trcpidr3.attr,
2251 NULL,
2252};
2253 389
2254coresight_simple_func(trcidr0, TRCIDR0); 390static const struct coresight_ops_source etm4_source_ops = {
2255coresight_simple_func(trcidr1, TRCIDR1); 391 .cpu_id = etm4_cpu_id,
2256coresight_simple_func(trcidr2, TRCIDR2); 392 .trace_id = etm4_trace_id,
2257coresight_simple_func(trcidr3, TRCIDR3); 393 .enable = etm4_enable,
2258coresight_simple_func(trcidr4, TRCIDR4); 394 .disable = etm4_disable,
2259coresight_simple_func(trcidr5, TRCIDR5);
2260/* trcidr[6,7] are reserved */
2261coresight_simple_func(trcidr8, TRCIDR8);
2262coresight_simple_func(trcidr9, TRCIDR9);
2263coresight_simple_func(trcidr10, TRCIDR10);
2264coresight_simple_func(trcidr11, TRCIDR11);
2265coresight_simple_func(trcidr12, TRCIDR12);
2266coresight_simple_func(trcidr13, TRCIDR13);
2267
2268static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2269 &dev_attr_trcidr0.attr,
2270 &dev_attr_trcidr1.attr,
2271 &dev_attr_trcidr2.attr,
2272 &dev_attr_trcidr3.attr,
2273 &dev_attr_trcidr4.attr,
2274 &dev_attr_trcidr5.attr,
2275 /* trcidr[6,7] are reserved */
2276 &dev_attr_trcidr8.attr,
2277 &dev_attr_trcidr9.attr,
2278 &dev_attr_trcidr10.attr,
2279 &dev_attr_trcidr11.attr,
2280 &dev_attr_trcidr12.attr,
2281 &dev_attr_trcidr13.attr,
2282 NULL,
2283};
2284
2285static const struct attribute_group coresight_etmv4_group = {
2286 .attrs = coresight_etmv4_attrs,
2287};
2288
2289static const struct attribute_group coresight_etmv4_mgmt_group = {
2290 .attrs = coresight_etmv4_mgmt_attrs,
2291 .name = "mgmt",
2292};
2293
2294static const struct attribute_group coresight_etmv4_trcidr_group = {
2295 .attrs = coresight_etmv4_trcidr_attrs,
2296 .name = "trcidr",
2297}; 395};
2298 396
2299static const struct attribute_group *coresight_etmv4_groups[] = { 397static const struct coresight_ops etm4_cs_ops = {
2300 &coresight_etmv4_group, 398 .source_ops = &etm4_source_ops,
2301 &coresight_etmv4_mgmt_group,
2302 &coresight_etmv4_trcidr_group,
2303 NULL,
2304}; 399};
2305 400
2306static void etm4_init_arch_data(void *info) 401static void etm4_init_arch_data(void *info)
@@ -2313,6 +408,9 @@ static void etm4_init_arch_data(void *info)
2313 u32 etmidr5; 408 u32 etmidr5;
2314 struct etmv4_drvdata *drvdata = info; 409 struct etmv4_drvdata *drvdata = info;
2315 410
411 /* Make sure all registers are accessible */
412 etm4_os_unlock(drvdata);
413
2316 CS_UNLOCK(drvdata->base); 414 CS_UNLOCK(drvdata->base);
2317 415
2318 /* find all capabilities of the tracing unit */ 416 /* find all capabilities of the tracing unit */
@@ -2464,93 +562,115 @@ static void etm4_init_arch_data(void *info)
2464 CS_LOCK(drvdata->base); 562 CS_LOCK(drvdata->base);
2465} 563}
2466 564
2467static void etm4_init_default_data(struct etmv4_drvdata *drvdata) 565static void etm4_set_default(struct etmv4_config *config)
2468{ 566{
2469 int i; 567 if (WARN_ON_ONCE(!config))
568 return;
2470 569
2471 drvdata->pe_sel = 0x0; 570 /*
2472 drvdata->cfg = (ETMv4_MODE_CTXID | ETM_MODE_VMID | 571 * Make default initialisation trace everything
2473 ETMv4_MODE_TIMESTAMP | ETM_MODE_RETURNSTACK); 572 *
573 * Select the "always true" resource selector on the
574 * "Enablign Event" line and configure address range comparator
575 * '0' to trace all the possible address range. From there
576 * configure the "include/exclude" engine to include address
577 * range comparator '0'.
578 */
2474 579
2475 /* disable all events tracing */ 580 /* disable all events tracing */
2476 drvdata->eventctrl0 = 0x0; 581 config->eventctrl0 = 0x0;
2477 drvdata->eventctrl1 = 0x0; 582 config->eventctrl1 = 0x0;
2478 583
2479 /* disable stalling */ 584 /* disable stalling */
2480 drvdata->stall_ctrl = 0x0; 585 config->stall_ctrl = 0x0;
586
587 /* enable trace synchronization every 4096 bytes, if available */
588 config->syncfreq = 0xC;
2481 589
2482 /* disable timestamp event */ 590 /* disable timestamp event */
2483 drvdata->ts_ctrl = 0x0; 591 config->ts_ctrl = 0x0;
2484 592
2485 /* enable trace synchronization every 4096 bytes for trace */ 593 /* TRCVICTLR::EVENT = 0x01, select the always on logic */
2486 if (drvdata->syncpr == false) 594 config->vinst_ctrl |= BIT(0);
2487 drvdata->syncfreq = 0xC;
2488 595
2489 /* 596 /*
2490 * enable viewInst to trace everything with start-stop logic in 597 * TRCVICTLR::SSSTATUS == 1, the start-stop logic is
2491 * started state 598 * in the started state
2492 */ 599 */
2493 drvdata->vinst_ctrl |= BIT(0); 600 config->vinst_ctrl |= BIT(9);
2494 /* set initial state of start-stop logic */
2495 if (drvdata->nr_addr_cmp)
2496 drvdata->vinst_ctrl |= BIT(9);
2497 601
2498 /* no address range filtering for ViewInst */ 602 /*
2499 drvdata->viiectlr = 0x0; 603 * Configure address range comparator '0' to encompass all
2500 /* no start-stop filtering for ViewInst */ 604 * possible addresses.
2501 drvdata->vissctlr = 0x0; 605 */
2502 606
2503 /* disable seq events */ 607 /* First half of default address comparator: start at address 0 */
2504 for (i = 0; i < drvdata->nrseqstate-1; i++) 608 config->addr_val[ETM_DEFAULT_ADDR_COMP] = 0x0;
2505 drvdata->seq_ctrl[i] = 0x0; 609 /* trace instruction addresses */
2506 drvdata->seq_rst = 0x0; 610 config->addr_acc[ETM_DEFAULT_ADDR_COMP] &= ~(BIT(0) | BIT(1));
2507 drvdata->seq_state = 0x0; 611 /* EXLEVEL_NS, bits[12:15], only trace application and kernel space */
612 config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= ETM_EXLEVEL_NS_HYP;
613 /* EXLEVEL_S, bits[11:8], don't trace anything in secure state */
614 config->addr_acc[ETM_DEFAULT_ADDR_COMP] |= (ETM_EXLEVEL_S_APP |
615 ETM_EXLEVEL_S_OS |
616 ETM_EXLEVEL_S_HYP);
617 config->addr_type[ETM_DEFAULT_ADDR_COMP] = ETM_ADDR_TYPE_RANGE;
2508 618
2509 /* disable external input events */ 619 /*
2510 drvdata->ext_inp = 0x0; 620 * Second half of default address comparator: go all
621 * the way to the top.
622 */
623 config->addr_val[ETM_DEFAULT_ADDR_COMP + 1] = ~0x0;
624 /* trace instruction addresses */
625 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] &= ~(BIT(0) | BIT(1));
626 /* Address comparator type must be equal for both halves */
627 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] =
628 config->addr_acc[ETM_DEFAULT_ADDR_COMP];
629 config->addr_type[ETM_DEFAULT_ADDR_COMP + 1] = ETM_ADDR_TYPE_RANGE;
2511 630
2512 for (i = 0; i < drvdata->nr_cntr; i++) { 631 /*
2513 drvdata->cntrldvr[i] = 0x0; 632 * Configure the ViewInst function to filter on address range
2514 drvdata->cntr_ctrl[i] = 0x0; 633 * comparator '0'.
2515 drvdata->cntr_val[i] = 0x0; 634 */
2516 } 635 config->viiectlr = BIT(0);
2517 636
2518 /* Resource selector pair 0 is always implemented and reserved */ 637 /* no start-stop filtering for ViewInst */
2519 drvdata->res_idx = 0x2; 638 config->vissctlr = 0x0;
2520 for (i = 2; i < drvdata->nr_resource * 2; i++) 639}
2521 drvdata->res_ctrl[i] = 0x0;
2522 640
2523 for (i = 0; i < drvdata->nr_ss_cmp; i++) { 641void etm4_config_trace_mode(struct etmv4_config *config)
2524 drvdata->ss_ctrl[i] = 0x0; 642{
2525 drvdata->ss_pe_cmp[i] = 0x0; 643 u32 addr_acc, mode;
2526 }
2527 644
2528 if (drvdata->nr_addr_cmp >= 1) { 645 mode = config->mode;
2529 drvdata->addr_val[0] = (unsigned long)_stext; 646 mode &= (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER);
2530 drvdata->addr_val[1] = (unsigned long)_etext;
2531 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
2532 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
2533 }
2534 647
2535 for (i = 0; i < drvdata->numcidc; i++) { 648 /* excluding kernel AND user space doesn't make sense */
2536 drvdata->ctxid_pid[i] = 0x0; 649 WARN_ON_ONCE(mode == (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER));
2537 drvdata->ctxid_vpid[i] = 0x0;
2538 }
2539 650
2540 drvdata->ctxid_mask0 = 0x0; 651 /* nothing to do if neither flags are set */
2541 drvdata->ctxid_mask1 = 0x0; 652 if (!(mode & ETM_MODE_EXCL_KERN) && !(mode & ETM_MODE_EXCL_USER))
653 return;
2542 654
2543 for (i = 0; i < drvdata->numvmidc; i++) 655 addr_acc = config->addr_acc[ETM_DEFAULT_ADDR_COMP];
2544 drvdata->vmid_val[i] = 0x0; 656 /* clear default config */
2545 drvdata->vmid_mask0 = 0x0; 657 addr_acc &= ~(ETM_EXLEVEL_NS_APP | ETM_EXLEVEL_NS_OS);
2546 drvdata->vmid_mask1 = 0x0;
2547 658
2548 /* 659 /*
2549 * A trace ID value of 0 is invalid, so let's start at some 660 * EXLEVEL_NS, bits[15:12]
2550 * random value that fits in 7 bits. ETMv3.x has 0x10 so let's 661 * The Exception levels are:
2551 * start at 0x20. 662 * Bit[12] Exception level 0 - Application
663 * Bit[13] Exception level 1 - OS
664 * Bit[14] Exception level 2 - Hypervisor
665 * Bit[15] Never implemented
2552 */ 666 */
2553 drvdata->trcid = 0x20 + drvdata->cpu; 667 if (mode & ETM_MODE_EXCL_KERN)
668 addr_acc |= ETM_EXLEVEL_NS_OS;
669 else
670 addr_acc |= ETM_EXLEVEL_NS_APP;
671
672 config->addr_acc[ETM_DEFAULT_ADDR_COMP] = addr_acc;
673 config->addr_acc[ETM_DEFAULT_ADDR_COMP + 1] = addr_acc;
2554} 674}
2555 675
2556static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action, 676static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
@@ -2569,7 +689,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2569 etmdrvdata[cpu]->os_unlock = true; 689 etmdrvdata[cpu]->os_unlock = true;
2570 } 690 }
2571 691
2572 if (etmdrvdata[cpu]->enable) 692 if (local_read(&etmdrvdata[cpu]->mode))
2573 etm4_enable_hw(etmdrvdata[cpu]); 693 etm4_enable_hw(etmdrvdata[cpu]);
2574 spin_unlock(&etmdrvdata[cpu]->spinlock); 694 spin_unlock(&etmdrvdata[cpu]->spinlock);
2575 break; 695 break;
@@ -2582,7 +702,7 @@ static int etm4_cpu_callback(struct notifier_block *nfb, unsigned long action,
2582 702
2583 case CPU_DYING: 703 case CPU_DYING:
2584 spin_lock(&etmdrvdata[cpu]->spinlock); 704 spin_lock(&etmdrvdata[cpu]->spinlock);
2585 if (etmdrvdata[cpu]->enable) 705 if (local_read(&etmdrvdata[cpu]->mode))
2586 etm4_disable_hw(etmdrvdata[cpu]); 706 etm4_disable_hw(etmdrvdata[cpu]);
2587 spin_unlock(&etmdrvdata[cpu]->spinlock); 707 spin_unlock(&etmdrvdata[cpu]->spinlock);
2588 break; 708 break;
@@ -2595,6 +715,11 @@ static struct notifier_block etm4_cpu_notifier = {
2595 .notifier_call = etm4_cpu_callback, 715 .notifier_call = etm4_cpu_callback,
2596}; 716};
2597 717
718static void etm4_init_trace_id(struct etmv4_drvdata *drvdata)
719{
720 drvdata->trcid = coresight_get_trace_id(drvdata->cpu);
721}
722
2598static int etm4_probe(struct amba_device *adev, const struct amba_id *id) 723static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2599{ 724{
2600 int ret; 725 int ret;
@@ -2638,9 +763,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2638 get_online_cpus(); 763 get_online_cpus();
2639 etmdrvdata[drvdata->cpu] = drvdata; 764 etmdrvdata[drvdata->cpu] = drvdata;
2640 765
2641 if (!smp_call_function_single(drvdata->cpu, etm4_os_unlock, drvdata, 1))
2642 drvdata->os_unlock = true;
2643
2644 if (smp_call_function_single(drvdata->cpu, 766 if (smp_call_function_single(drvdata->cpu,
2645 etm4_init_arch_data, drvdata, 1)) 767 etm4_init_arch_data, drvdata, 1))
2646 dev_err(dev, "ETM arch init failed\n"); 768 dev_err(dev, "ETM arch init failed\n");
@@ -2654,9 +776,9 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2654 ret = -EINVAL; 776 ret = -EINVAL;
2655 goto err_arch_supported; 777 goto err_arch_supported;
2656 } 778 }
2657 etm4_init_default_data(drvdata);
2658 779
2659 pm_runtime_put(&adev->dev); 780 etm4_init_trace_id(drvdata);
781 etm4_set_default(&drvdata->config);
2660 782
2661 desc->type = CORESIGHT_DEV_TYPE_SOURCE; 783 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
2662 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; 784 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
@@ -2667,9 +789,16 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2667 drvdata->csdev = coresight_register(desc); 789 drvdata->csdev = coresight_register(desc);
2668 if (IS_ERR(drvdata->csdev)) { 790 if (IS_ERR(drvdata->csdev)) {
2669 ret = PTR_ERR(drvdata->csdev); 791 ret = PTR_ERR(drvdata->csdev);
2670 goto err_coresight_register; 792 goto err_arch_supported;
2671 } 793 }
2672 794
795 ret = etm_perf_symlink(drvdata->csdev, true);
796 if (ret) {
797 coresight_unregister(drvdata->csdev);
798 goto err_arch_supported;
799 }
800
801 pm_runtime_put(&adev->dev);
2673 dev_info(dev, "%s initialized\n", (char *)id->data); 802 dev_info(dev, "%s initialized\n", (char *)id->data);
2674 803
2675 if (boot_enable) { 804 if (boot_enable) {
@@ -2680,8 +809,6 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
2680 return 0; 809 return 0;
2681 810
2682err_arch_supported: 811err_arch_supported:
2683 pm_runtime_put(&adev->dev);
2684err_coresight_register:
2685 if (--etm4_count == 0) 812 if (--etm4_count == 0)
2686 unregister_hotcpu_notifier(&etm4_cpu_notifier); 813 unregister_hotcpu_notifier(&etm4_cpu_notifier);
2687 return ret; 814 return ret;
@@ -2698,6 +825,11 @@ static struct amba_id etm4_ids[] = {
2698 .mask = 0x000fffff, 825 .mask = 0x000fffff,
2699 .data = "ETM 4.0", 826 .data = "ETM 4.0",
2700 }, 827 },
828 { /* ETM 4.0 - A72, Maia, HiSilicon */
829 .id = 0x000bb95a,
830 .mask = 0x000fffff,
831 .data = "ETM 4.0",
832 },
2701 { 0, 0}, 833 { 0, 0},
2702}; 834};
2703 835
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.h b/drivers/hwtracing/coresight/coresight-etm4x.h
index c34100205ca9..5359c5197c1d 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.h
+++ b/drivers/hwtracing/coresight/coresight-etm4x.h
@@ -13,6 +13,7 @@
13#ifndef _CORESIGHT_CORESIGHT_ETM_H 13#ifndef _CORESIGHT_CORESIGHT_ETM_H
14#define _CORESIGHT_CORESIGHT_ETM_H 14#define _CORESIGHT_CORESIGHT_ETM_H
15 15
16#include <asm/local.h>
16#include <linux/spinlock.h> 17#include <linux/spinlock.h>
17#include "coresight-priv.h" 18#include "coresight-priv.h"
18 19
@@ -175,71 +176,38 @@
175#define ETM_MODE_TRACE_RESET BIT(25) 176#define ETM_MODE_TRACE_RESET BIT(25)
176#define ETM_MODE_TRACE_ERR BIT(26) 177#define ETM_MODE_TRACE_ERR BIT(26)
177#define ETM_MODE_VIEWINST_STARTSTOP BIT(27) 178#define ETM_MODE_VIEWINST_STARTSTOP BIT(27)
178#define ETMv4_MODE_ALL 0xFFFFFFF 179#define ETMv4_MODE_ALL (GENMASK(27, 0) | \
180 ETM_MODE_EXCL_KERN | \
181 ETM_MODE_EXCL_USER)
179 182
180#define TRCSTATR_IDLE_BIT 0 183#define TRCSTATR_IDLE_BIT 0
184#define ETM_DEFAULT_ADDR_COMP 0
185
186/* secure state access levels */
187#define ETM_EXLEVEL_S_APP BIT(8)
188#define ETM_EXLEVEL_S_OS BIT(9)
189#define ETM_EXLEVEL_S_NA BIT(10)
190#define ETM_EXLEVEL_S_HYP BIT(11)
191/* non-secure state access levels */
192#define ETM_EXLEVEL_NS_APP BIT(12)
193#define ETM_EXLEVEL_NS_OS BIT(13)
194#define ETM_EXLEVEL_NS_HYP BIT(14)
195#define ETM_EXLEVEL_NS_NA BIT(15)
181 196
182/** 197/**
183 * struct etm4_drvdata - specifics associated to an ETM component 198 * struct etmv4_config - configuration information related to an ETMv4
184 * @base: Memory mapped base address for this component.
185 * @dev: The device entity associated to this component.
186 * @csdev: Component vitals needed by the framework.
187 * @spinlock: Only one at a time pls.
188 * @cpu: The cpu this component is affined to.
189 * @arch: ETM version number.
190 * @enable: Is this ETM currently tracing.
191 * @sticky_enable: true if ETM base configuration has been done.
192 * @boot_enable:True if we should start tracing at boot time.
193 * @os_unlock: True if access to management registers is allowed.
194 * @nr_pe: The number of processing entity available for tracing.
195 * @nr_pe_cmp: The number of processing entity comparator inputs that are
196 * available for tracing.
197 * @nr_addr_cmp:Number of pairs of address comparators available
198 * as found in ETMIDR4 0-3.
199 * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
200 * @nr_ext_inp: Number of external input.
201 * @numcidc: Number of contextID comparators.
202 * @numvmidc: Number of VMID comparators.
203 * @nrseqstate: The number of sequencer states that are implemented.
204 * @nr_event: Indicates how many events the trace unit support.
205 * @nr_resource:The number of resource selection pairs available for tracing.
206 * @nr_ss_cmp: Number of single-shot comparator controls that are available.
207 * @mode: Controls various modes supported by this ETM. 199 * @mode: Controls various modes supported by this ETM.
208 * @trcid: value of the current ID for this component.
209 * @trcid_size: Indicates the trace ID width.
210 * @instrp0: Tracing of load and store instructions
211 * as P0 elements is supported.
212 * @trccond: If the trace unit supports conditional
213 * instruction tracing.
214 * @retstack: Indicates if the implementation supports a return stack.
215 * @trc_error: Whether a trace unit can trace a system
216 * error exception.
217 * @atbtrig: If the implementation can support ATB triggers
218 * @lpoverride: If the implementation can support low-power state over.
219 * @pe_sel: Controls which PE to trace. 200 * @pe_sel: Controls which PE to trace.
220 * @cfg: Controls the tracing options. 201 * @cfg: Controls the tracing options.
221 * @eventctrl0: Controls the tracing of arbitrary events. 202 * @eventctrl0: Controls the tracing of arbitrary events.
222 * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects. 203 * @eventctrl1: Controls the behavior of the events that @event_ctrl0 selects.
223 * @stallctl: If functionality that prevents trace unit buffer overflows 204 * @stallctl: If functionality that prevents trace unit buffer overflows
224 * is available. 205 * is available.
225 * @sysstall: Does the system support stall control of the PE?
226 * @nooverflow: Indicate if overflow prevention is supported.
227 * @stall_ctrl: Enables trace unit functionality that prevents trace
228 * unit buffer overflows.
229 * @ts_size: Global timestamp size field.
230 * @ts_ctrl: Controls the insertion of global timestamps in the 206 * @ts_ctrl: Controls the insertion of global timestamps in the
231 * trace streams. 207 * trace streams.
232 * @syncpr: Indicates if an implementation has a fixed
233 * synchronization period.
234 * @syncfreq: Controls how often trace synchronization requests occur. 208 * @syncfreq: Controls how often trace synchronization requests occur.
235 * @trccci: Indicates if the trace unit supports cycle counting
236 * for instruction.
237 * @ccsize: Indicates the size of the cycle counter in bits.
238 * @ccitmin: minimum value that can be programmed in
239 * the TRCCCCTLR register. 209 * the TRCCCCTLR register.
240 * @ccctlr: Sets the threshold value for cycle counting. 210 * @ccctlr: Sets the threshold value for cycle counting.
241 * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
242 * @q_support: Q element support characteristics.
243 * @vinst_ctrl: Controls instruction trace filtering. 211 * @vinst_ctrl: Controls instruction trace filtering.
244 * @viiectlr: Set or read, the address range comparators. 212 * @viiectlr: Set or read, the address range comparators.
245 * @vissctlr: Set, or read, the single address comparators that control the 213 * @vissctlr: Set, or read, the single address comparators that control the
@@ -264,73 +232,28 @@
264 * @addr_acc: Address comparator access type. 232 * @addr_acc: Address comparator access type.
265 * @addr_type: Current status of the comparator register. 233 * @addr_type: Current status of the comparator register.
266 * @ctxid_idx: Context ID index selector. 234 * @ctxid_idx: Context ID index selector.
267 * @ctxid_size: Size of the context ID field to consider.
268 * @ctxid_pid: Value of the context ID comparator. 235 * @ctxid_pid: Value of the context ID comparator.
269 * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise 236 * @ctxid_vpid: Virtual PID seen by users if PID namespace is enabled, otherwise
270 * the same value of ctxid_pid. 237 * the same value of ctxid_pid.
271 * @ctxid_mask0:Context ID comparator mask for comparator 0-3. 238 * @ctxid_mask0:Context ID comparator mask for comparator 0-3.
272 * @ctxid_mask1:Context ID comparator mask for comparator 4-7. 239 * @ctxid_mask1:Context ID comparator mask for comparator 4-7.
273 * @vmid_idx: VM ID index selector. 240 * @vmid_idx: VM ID index selector.
274 * @vmid_size: Size of the VM ID comparator to consider.
275 * @vmid_val: Value of the VM ID comparator. 241 * @vmid_val: Value of the VM ID comparator.
276 * @vmid_mask0: VM ID comparator mask for comparator 0-3. 242 * @vmid_mask0: VM ID comparator mask for comparator 0-3.
277 * @vmid_mask1: VM ID comparator mask for comparator 4-7. 243 * @vmid_mask1: VM ID comparator mask for comparator 4-7.
278 * @s_ex_level: In secure state, indicates whether instruction tracing is
279 * supported for the corresponding Exception level.
280 * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
281 * supported for the corresponding Exception level.
282 * @ext_inp: External input selection. 244 * @ext_inp: External input selection.
283 */ 245 */
284struct etmv4_drvdata { 246struct etmv4_config {
285 void __iomem *base;
286 struct device *dev;
287 struct coresight_device *csdev;
288 spinlock_t spinlock;
289 int cpu;
290 u8 arch;
291 bool enable;
292 bool sticky_enable;
293 bool boot_enable;
294 bool os_unlock;
295 u8 nr_pe;
296 u8 nr_pe_cmp;
297 u8 nr_addr_cmp;
298 u8 nr_cntr;
299 u8 nr_ext_inp;
300 u8 numcidc;
301 u8 numvmidc;
302 u8 nrseqstate;
303 u8 nr_event;
304 u8 nr_resource;
305 u8 nr_ss_cmp;
306 u32 mode; 247 u32 mode;
307 u8 trcid;
308 u8 trcid_size;
309 bool instrp0;
310 bool trccond;
311 bool retstack;
312 bool trc_error;
313 bool atbtrig;
314 bool lpoverride;
315 u32 pe_sel; 248 u32 pe_sel;
316 u32 cfg; 249 u32 cfg;
317 u32 eventctrl0; 250 u32 eventctrl0;
318 u32 eventctrl1; 251 u32 eventctrl1;
319 bool stallctl;
320 bool sysstall;
321 bool nooverflow;
322 u32 stall_ctrl; 252 u32 stall_ctrl;
323 u8 ts_size;
324 u32 ts_ctrl; 253 u32 ts_ctrl;
325 bool syncpr;
326 u32 syncfreq; 254 u32 syncfreq;
327 bool trccci;
328 u8 ccsize;
329 u8 ccitmin;
330 u32 ccctlr; 255 u32 ccctlr;
331 bool trcbb;
332 u32 bb_ctrl; 256 u32 bb_ctrl;
333 bool q_support;
334 u32 vinst_ctrl; 257 u32 vinst_ctrl;
335 u32 viiectlr; 258 u32 viiectlr;
336 u32 vissctlr; 259 u32 vissctlr;
@@ -353,19 +276,119 @@ struct etmv4_drvdata {
353 u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP]; 276 u64 addr_acc[ETM_MAX_SINGLE_ADDR_CMP];
354 u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP]; 277 u8 addr_type[ETM_MAX_SINGLE_ADDR_CMP];
355 u8 ctxid_idx; 278 u8 ctxid_idx;
356 u8 ctxid_size;
357 u64 ctxid_pid[ETMv4_MAX_CTXID_CMP]; 279 u64 ctxid_pid[ETMv4_MAX_CTXID_CMP];
358 u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP]; 280 u64 ctxid_vpid[ETMv4_MAX_CTXID_CMP];
359 u32 ctxid_mask0; 281 u32 ctxid_mask0;
360 u32 ctxid_mask1; 282 u32 ctxid_mask1;
361 u8 vmid_idx; 283 u8 vmid_idx;
362 u8 vmid_size;
363 u64 vmid_val[ETM_MAX_VMID_CMP]; 284 u64 vmid_val[ETM_MAX_VMID_CMP];
364 u32 vmid_mask0; 285 u32 vmid_mask0;
365 u32 vmid_mask1; 286 u32 vmid_mask1;
287 u32 ext_inp;
288};
289
290/**
291 * struct etm4_drvdata - specifics associated to an ETM component
292 * @base: Memory mapped base address for this component.
293 * @dev: The device entity associated to this component.
294 * @csdev: Component vitals needed by the framework.
295 * @spinlock: Only one at a time pls.
296 * @mode: This tracer's mode, i.e sysFS, Perf or disabled.
297 * @cpu: The cpu this component is affined to.
298 * @arch: ETM version number.
299 * @nr_pe: The number of processing entity available for tracing.
300 * @nr_pe_cmp: The number of processing entity comparator inputs that are
301 * available for tracing.
302 * @nr_addr_cmp:Number of pairs of address comparators available
303 * as found in ETMIDR4 0-3.
304 * @nr_cntr: Number of counters as found in ETMIDR5 bit 28-30.
305 * @nr_ext_inp: Number of external input.
306 * @numcidc: Number of contextID comparators.
307 * @numvmidc: Number of VMID comparators.
308 * @nrseqstate: The number of sequencer states that are implemented.
309 * @nr_event: Indicates how many events the trace unit support.
310 * @nr_resource:The number of resource selection pairs available for tracing.
311 * @nr_ss_cmp: Number of single-shot comparator controls that are available.
312 * @trcid: value of the current ID for this component.
313 * @trcid_size: Indicates the trace ID width.
314 * @ts_size: Global timestamp size field.
315 * @ctxid_size: Size of the context ID field to consider.
316 * @vmid_size: Size of the VM ID comparator to consider.
317 * @ccsize: Indicates the size of the cycle counter in bits.
318 * @ccitmin: minimum value that can be programmed in
319 * @s_ex_level: In secure state, indicates whether instruction tracing is
320 * supported for the corresponding Exception level.
321 * @ns_ex_level:In non-secure state, indicates whether instruction tracing is
322 * supported for the corresponding Exception level.
323 * @sticky_enable: true if ETM base configuration has been done.
324 * @boot_enable:True if we should start tracing at boot time.
325 * @os_unlock: True if access to management registers is allowed.
326 * @instrp0: Tracing of load and store instructions
327 * as P0 elements is supported.
328 * @trcbb: Indicates if the trace unit supports branch broadcast tracing.
329 * @trccond: If the trace unit supports conditional
330 * instruction tracing.
331 * @retstack: Indicates if the implementation supports a return stack.
332 * @trccci: Indicates if the trace unit supports cycle counting
333 * for instruction.
334 * @q_support: Q element support characteristics.
335 * @trc_error: Whether a trace unit can trace a system
336 * error exception.
337 * @syncpr: Indicates if an implementation has a fixed
338 * synchronization period.
339 * @stall_ctrl: Enables trace unit functionality that prevents trace
340 * unit buffer overflows.
341 * @sysstall: Does the system support stall control of the PE?
342 * @nooverflow: Indicate if overflow prevention is supported.
343 * @atbtrig: If the implementation can support ATB triggers
344 * @lpoverride: If the implementation can support low-power state over.
345 * @config: structure holding configuration parameters.
346 */
347struct etmv4_drvdata {
348 void __iomem *base;
349 struct device *dev;
350 struct coresight_device *csdev;
351 spinlock_t spinlock;
352 local_t mode;
353 int cpu;
354 u8 arch;
355 u8 nr_pe;
356 u8 nr_pe_cmp;
357 u8 nr_addr_cmp;
358 u8 nr_cntr;
359 u8 nr_ext_inp;
360 u8 numcidc;
361 u8 numvmidc;
362 u8 nrseqstate;
363 u8 nr_event;
364 u8 nr_resource;
365 u8 nr_ss_cmp;
366 u8 trcid;
367 u8 trcid_size;
368 u8 ts_size;
369 u8 ctxid_size;
370 u8 vmid_size;
371 u8 ccsize;
372 u8 ccitmin;
366 u8 s_ex_level; 373 u8 s_ex_level;
367 u8 ns_ex_level; 374 u8 ns_ex_level;
368 u32 ext_inp; 375 u8 q_support;
376 bool sticky_enable;
377 bool boot_enable;
378 bool os_unlock;
379 bool instrp0;
380 bool trcbb;
381 bool trccond;
382 bool retstack;
383 bool trccci;
384 bool trc_error;
385 bool syncpr;
386 bool stallctl;
387 bool sysstall;
388 bool nooverflow;
389 bool atbtrig;
390 bool lpoverride;
391 struct etmv4_config config;
369}; 392};
370 393
371/* Address comparator access types */ 394/* Address comparator access types */
@@ -391,4 +414,7 @@ enum etm_addr_type {
391 ETM_ADDR_TYPE_START, 414 ETM_ADDR_TYPE_START,
392 ETM_ADDR_TYPE_STOP, 415 ETM_ADDR_TYPE_STOP,
393}; 416};
417
418extern const struct attribute_group *coresight_etmv4_groups[];
419void etm4_config_trace_mode(struct etmv4_config *config);
394#endif 420#endif
diff --git a/drivers/hwtracing/coresight/coresight-funnel.c b/drivers/hwtracing/coresight/coresight-funnel.c
index 0600ca30649d..05df789056cc 100644
--- a/drivers/hwtracing/coresight/coresight-funnel.c
+++ b/drivers/hwtracing/coresight/coresight-funnel.c
@@ -221,7 +221,6 @@ static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
221 if (IS_ERR(drvdata->csdev)) 221 if (IS_ERR(drvdata->csdev))
222 return PTR_ERR(drvdata->csdev); 222 return PTR_ERR(drvdata->csdev);
223 223
224 dev_info(dev, "FUNNEL initialized\n");
225 return 0; 224 return 0;
226} 225}
227 226
diff --git a/drivers/hwtracing/coresight/coresight-priv.h b/drivers/hwtracing/coresight/coresight-priv.h
index 333eddaed339..ad975c58080d 100644
--- a/drivers/hwtracing/coresight/coresight-priv.h
+++ b/drivers/hwtracing/coresight/coresight-priv.h
@@ -37,12 +37,42 @@
37#define ETM_MODE_EXCL_KERN BIT(30) 37#define ETM_MODE_EXCL_KERN BIT(30)
38#define ETM_MODE_EXCL_USER BIT(31) 38#define ETM_MODE_EXCL_USER BIT(31)
39 39
40#define coresight_simple_func(type, name, offset) \
41static ssize_t name##_show(struct device *_dev, \
42 struct device_attribute *attr, char *buf) \
43{ \
44 type *drvdata = dev_get_drvdata(_dev->parent); \
45 return scnprintf(buf, PAGE_SIZE, "0x%x\n", \
46 readl_relaxed(drvdata->base + offset)); \
47} \
48static DEVICE_ATTR_RO(name)
49
40enum cs_mode { 50enum cs_mode {
41 CS_MODE_DISABLED, 51 CS_MODE_DISABLED,
42 CS_MODE_SYSFS, 52 CS_MODE_SYSFS,
43 CS_MODE_PERF, 53 CS_MODE_PERF,
44}; 54};
45 55
56/**
57 * struct cs_buffer - keep track of a recording session' specifics
58 * @cur: index of the current buffer
59 * @nr_pages: max number of pages granted to us
60 * @offset: offset within the current buffer
61 * @data_size: how much we collected in this run
62 * @lost: other than zero if we had a HW buffer wrap around
63 * @snapshot: is this run in snapshot mode
64 * @data_pages: a handle the ring buffer
65 */
66struct cs_buffers {
67 unsigned int cur;
68 unsigned int nr_pages;
69 unsigned long offset;
70 local_t data_size;
71 local_t lost;
72 bool snapshot;
73 void **data_pages;
74};
75
46static inline void CS_LOCK(void __iomem *addr) 76static inline void CS_LOCK(void __iomem *addr)
47{ 77{
48 do { 78 do {
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index 4299c0569340..c6982e312e15 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -114,7 +114,6 @@ static int replicator_probe(struct platform_device *pdev)
114 114
115 pm_runtime_put(&pdev->dev); 115 pm_runtime_put(&pdev->dev);
116 116
117 dev_info(dev, "REPLICATOR initialized\n");
118 return 0; 117 return 0;
119 118
120out_disable_pm: 119out_disable_pm:
diff --git a/drivers/hwtracing/coresight/coresight-stm.c b/drivers/hwtracing/coresight/coresight-stm.c
new file mode 100644
index 000000000000..73be58a11e4f
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-stm.c
@@ -0,0 +1,920 @@
1/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
2 *
3 * Description: CoreSight System Trace Macrocell driver
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * Initial implementation by Pratik Patel
15 * (C) 2014-2015 Pratik Patel <pratikp@codeaurora.org>
16 *
17 * Serious refactoring, code cleanup and upgrading to the Coresight upstream
18 * framework by Mathieu Poirier
19 * (C) 2015-2016 Mathieu Poirier <mathieu.poirier@linaro.org>
20 *
21 * Guaranteed timing and support for various packet type coming from the
22 * generic STM API by Chunyan Zhang
23 * (C) 2015-2016 Chunyan Zhang <zhang.chunyan@linaro.org>
24 */
25#include <asm/local.h>
26#include <linux/amba/bus.h>
27#include <linux/bitmap.h>
28#include <linux/clk.h>
29#include <linux/coresight.h>
30#include <linux/coresight-stm.h>
31#include <linux/err.h>
32#include <linux/kernel.h>
33#include <linux/moduleparam.h>
34#include <linux/of_address.h>
35#include <linux/perf_event.h>
36#include <linux/pm_runtime.h>
37#include <linux/stm.h>
38
39#include "coresight-priv.h"
40
41#define STMDMASTARTR 0xc04
42#define STMDMASTOPR 0xc08
43#define STMDMASTATR 0xc0c
44#define STMDMACTLR 0xc10
45#define STMDMAIDR 0xcfc
46#define STMHEER 0xd00
47#define STMHETER 0xd20
48#define STMHEBSR 0xd60
49#define STMHEMCR 0xd64
50#define STMHEMASTR 0xdf4
51#define STMHEFEAT1R 0xdf8
52#define STMHEIDR 0xdfc
53#define STMSPER 0xe00
54#define STMSPTER 0xe20
55#define STMPRIVMASKR 0xe40
56#define STMSPSCR 0xe60
57#define STMSPMSCR 0xe64
58#define STMSPOVERRIDER 0xe68
59#define STMSPMOVERRIDER 0xe6c
60#define STMSPTRIGCSR 0xe70
61#define STMTCSR 0xe80
62#define STMTSSTIMR 0xe84
63#define STMTSFREQR 0xe8c
64#define STMSYNCR 0xe90
65#define STMAUXCR 0xe94
66#define STMSPFEAT1R 0xea0
67#define STMSPFEAT2R 0xea4
68#define STMSPFEAT3R 0xea8
69#define STMITTRIGGER 0xee8
70#define STMITATBDATA0 0xeec
71#define STMITATBCTR2 0xef0
72#define STMITATBID 0xef4
73#define STMITATBCTR0 0xef8
74
75#define STM_32_CHANNEL 32
76#define BYTES_PER_CHANNEL 256
77#define STM_TRACE_BUF_SIZE 4096
78#define STM_SW_MASTER_END 127
79
80/* Register bit definition */
81#define STMTCSR_BUSY_BIT 23
82/* Reserve the first 10 channels for kernel usage */
83#define STM_CHANNEL_OFFSET 0
84
85enum stm_pkt_type {
86 STM_PKT_TYPE_DATA = 0x98,
87 STM_PKT_TYPE_FLAG = 0xE8,
88 STM_PKT_TYPE_TRIG = 0xF8,
89};
90
91#define stm_channel_addr(drvdata, ch) (drvdata->chs.base + \
92 (ch * BYTES_PER_CHANNEL))
93#define stm_channel_off(type, opts) (type & ~opts)
94
95static int boot_nr_channel;
96
97/*
98 * Not really modular but using module_param is the easiest way to
99 * remain consistent with existing use cases for now.
100 */
101module_param_named(
102 boot_nr_channel, boot_nr_channel, int, S_IRUGO
103);
104
105/**
106 * struct channel_space - central management entity for extended ports
107 * @base: memory mapped base address where channels start.
108 * @guaraneed: is the channel delivery guaranteed.
109 */
110struct channel_space {
111 void __iomem *base;
112 unsigned long *guaranteed;
113};
114
115/**
116 * struct stm_drvdata - specifics associated to an STM component
117 * @base: memory mapped base address for this component.
118 * @dev: the device entity associated to this component.
119 * @atclk: optional clock for the core parts of the STM.
120 * @csdev: component vitals needed by the framework.
121 * @spinlock: only one at a time pls.
122 * @chs: the channels accociated to this STM.
123 * @stm: structure associated to the generic STM interface.
124 * @mode: this tracer's mode, i.e sysFS, or disabled.
125 * @traceid: value of the current ID for this component.
126 * @write_bytes: Maximus bytes this STM can write at a time.
127 * @stmsper: settings for register STMSPER.
128 * @stmspscr: settings for register STMSPSCR.
129 * @numsp: the total number of stimulus port support by this STM.
130 * @stmheer: settings for register STMHEER.
131 * @stmheter: settings for register STMHETER.
132 * @stmhebsr: settings for register STMHEBSR.
133 */
134struct stm_drvdata {
135 void __iomem *base;
136 struct device *dev;
137 struct clk *atclk;
138 struct coresight_device *csdev;
139 spinlock_t spinlock;
140 struct channel_space chs;
141 struct stm_data stm;
142 local_t mode;
143 u8 traceid;
144 u32 write_bytes;
145 u32 stmsper;
146 u32 stmspscr;
147 u32 numsp;
148 u32 stmheer;
149 u32 stmheter;
150 u32 stmhebsr;
151};
152
153static void stm_hwevent_enable_hw(struct stm_drvdata *drvdata)
154{
155 CS_UNLOCK(drvdata->base);
156
157 writel_relaxed(drvdata->stmhebsr, drvdata->base + STMHEBSR);
158 writel_relaxed(drvdata->stmheter, drvdata->base + STMHETER);
159 writel_relaxed(drvdata->stmheer, drvdata->base + STMHEER);
160 writel_relaxed(0x01 | /* Enable HW event tracing */
161 0x04, /* Error detection on event tracing */
162 drvdata->base + STMHEMCR);
163
164 CS_LOCK(drvdata->base);
165}
166
167static void stm_port_enable_hw(struct stm_drvdata *drvdata)
168{
169 CS_UNLOCK(drvdata->base);
170 /* ATB trigger enable on direct writes to TRIG locations */
171 writel_relaxed(0x10,
172 drvdata->base + STMSPTRIGCSR);
173 writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
174 writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
175
176 CS_LOCK(drvdata->base);
177}
178
179static void stm_enable_hw(struct stm_drvdata *drvdata)
180{
181 if (drvdata->stmheer)
182 stm_hwevent_enable_hw(drvdata);
183
184 stm_port_enable_hw(drvdata);
185
186 CS_UNLOCK(drvdata->base);
187
188 /* 4096 byte between synchronisation packets */
189 writel_relaxed(0xFFF, drvdata->base + STMSYNCR);
190 writel_relaxed((drvdata->traceid << 16 | /* trace id */
191 0x02 | /* timestamp enable */
192 0x01), /* global STM enable */
193 drvdata->base + STMTCSR);
194
195 CS_LOCK(drvdata->base);
196}
197
198static int stm_enable(struct coresight_device *csdev,
199 struct perf_event_attr *attr, u32 mode)
200{
201 u32 val;
202 struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
203
204 if (mode != CS_MODE_SYSFS)
205 return -EINVAL;
206
207 val = local_cmpxchg(&drvdata->mode, CS_MODE_DISABLED, mode);
208
209 /* Someone is already using the tracer */
210 if (val)
211 return -EBUSY;
212
213 pm_runtime_get_sync(drvdata->dev);
214
215 spin_lock(&drvdata->spinlock);
216 stm_enable_hw(drvdata);
217 spin_unlock(&drvdata->spinlock);
218
219 dev_info(drvdata->dev, "STM tracing enabled\n");
220 return 0;
221}
222
223static void stm_hwevent_disable_hw(struct stm_drvdata *drvdata)
224{
225 CS_UNLOCK(drvdata->base);
226
227 writel_relaxed(0x0, drvdata->base + STMHEMCR);
228 writel_relaxed(0x0, drvdata->base + STMHEER);
229 writel_relaxed(0x0, drvdata->base + STMHETER);
230
231 CS_LOCK(drvdata->base);
232}
233
234static void stm_port_disable_hw(struct stm_drvdata *drvdata)
235{
236 CS_UNLOCK(drvdata->base);
237
238 writel_relaxed(0x0, drvdata->base + STMSPER);
239 writel_relaxed(0x0, drvdata->base + STMSPTRIGCSR);
240
241 CS_LOCK(drvdata->base);
242}
243
244static void stm_disable_hw(struct stm_drvdata *drvdata)
245{
246 u32 val;
247
248 CS_UNLOCK(drvdata->base);
249
250 val = readl_relaxed(drvdata->base + STMTCSR);
251 val &= ~0x1; /* clear global STM enable [0] */
252 writel_relaxed(val, drvdata->base + STMTCSR);
253
254 CS_LOCK(drvdata->base);
255
256 stm_port_disable_hw(drvdata);
257 if (drvdata->stmheer)
258 stm_hwevent_disable_hw(drvdata);
259}
260
261static void stm_disable(struct coresight_device *csdev)
262{
263 struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
264
265 /*
266 * For as long as the tracer isn't disabled another entity can't
267 * change its status. As such we can read the status here without
268 * fearing it will change under us.
269 */
270 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
271 spin_lock(&drvdata->spinlock);
272 stm_disable_hw(drvdata);
273 spin_unlock(&drvdata->spinlock);
274
275 /* Wait until the engine has completely stopped */
276 coresight_timeout(drvdata, STMTCSR, STMTCSR_BUSY_BIT, 0);
277
278 pm_runtime_put(drvdata->dev);
279
280 local_set(&drvdata->mode, CS_MODE_DISABLED);
281 dev_info(drvdata->dev, "STM tracing disabled\n");
282 }
283}
284
285static int stm_trace_id(struct coresight_device *csdev)
286{
287 struct stm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
288
289 return drvdata->traceid;
290}
291
292static const struct coresight_ops_source stm_source_ops = {
293 .trace_id = stm_trace_id,
294 .enable = stm_enable,
295 .disable = stm_disable,
296};
297
298static const struct coresight_ops stm_cs_ops = {
299 .source_ops = &stm_source_ops,
300};
301
302static inline bool stm_addr_unaligned(const void *addr, u8 write_bytes)
303{
304 return ((unsigned long)addr & (write_bytes - 1));
305}
306
307static void stm_send(void *addr, const void *data, u32 size, u8 write_bytes)
308{
309 u8 paload[8];
310
311 if (stm_addr_unaligned(data, write_bytes)) {
312 memcpy(paload, data, size);
313 data = paload;
314 }
315
316 /* now we are 64bit/32bit aligned */
317 switch (size) {
318#ifdef CONFIG_64BIT
319 case 8:
320 writeq_relaxed(*(u64 *)data, addr);
321 break;
322#endif
323 case 4:
324 writel_relaxed(*(u32 *)data, addr);
325 break;
326 case 2:
327 writew_relaxed(*(u16 *)data, addr);
328 break;
329 case 1:
330 writeb_relaxed(*(u8 *)data, addr);
331 break;
332 default:
333 break;
334 }
335}
336
337static int stm_generic_link(struct stm_data *stm_data,
338 unsigned int master, unsigned int channel)
339{
340 struct stm_drvdata *drvdata = container_of(stm_data,
341 struct stm_drvdata, stm);
342 if (!drvdata || !drvdata->csdev)
343 return -EINVAL;
344
345 return coresight_enable(drvdata->csdev);
346}
347
348static void stm_generic_unlink(struct stm_data *stm_data,
349 unsigned int master, unsigned int channel)
350{
351 struct stm_drvdata *drvdata = container_of(stm_data,
352 struct stm_drvdata, stm);
353 if (!drvdata || !drvdata->csdev)
354 return;
355
356 stm_disable(drvdata->csdev);
357}
358
359static long stm_generic_set_options(struct stm_data *stm_data,
360 unsigned int master,
361 unsigned int channel,
362 unsigned int nr_chans,
363 unsigned long options)
364{
365 struct stm_drvdata *drvdata = container_of(stm_data,
366 struct stm_drvdata, stm);
367 if (!(drvdata && local_read(&drvdata->mode)))
368 return -EINVAL;
369
370 if (channel >= drvdata->numsp)
371 return -EINVAL;
372
373 switch (options) {
374 case STM_OPTION_GUARANTEED:
375 set_bit(channel, drvdata->chs.guaranteed);
376 break;
377
378 case STM_OPTION_INVARIANT:
379 clear_bit(channel, drvdata->chs.guaranteed);
380 break;
381
382 default:
383 return -EINVAL;
384 }
385
386 return 0;
387}
388
389static ssize_t stm_generic_packet(struct stm_data *stm_data,
390 unsigned int master,
391 unsigned int channel,
392 unsigned int packet,
393 unsigned int flags,
394 unsigned int size,
395 const unsigned char *payload)
396{
397 unsigned long ch_addr;
398 struct stm_drvdata *drvdata = container_of(stm_data,
399 struct stm_drvdata, stm);
400
401 if (!(drvdata && local_read(&drvdata->mode)))
402 return 0;
403
404 if (channel >= drvdata->numsp)
405 return 0;
406
407 ch_addr = (unsigned long)stm_channel_addr(drvdata, channel);
408
409 flags = (flags == STP_PACKET_TIMESTAMPED) ? STM_FLAG_TIMESTAMPED : 0;
410 flags |= test_bit(channel, drvdata->chs.guaranteed) ?
411 STM_FLAG_GUARANTEED : 0;
412
413 if (size > drvdata->write_bytes)
414 size = drvdata->write_bytes;
415 else
416 size = rounddown_pow_of_two(size);
417
418 switch (packet) {
419 case STP_PACKET_FLAG:
420 ch_addr |= stm_channel_off(STM_PKT_TYPE_FLAG, flags);
421
422 /*
423 * The generic STM core sets a size of '0' on flag packets.
424 * As such send a flag packet of size '1' and tell the
425 * core we did so.
426 */
427 stm_send((void *)ch_addr, payload, 1, drvdata->write_bytes);
428 size = 1;
429 break;
430
431 case STP_PACKET_DATA:
432 ch_addr |= stm_channel_off(STM_PKT_TYPE_DATA, flags);
433 stm_send((void *)ch_addr, payload, size,
434 drvdata->write_bytes);
435 break;
436
437 default:
438 return -ENOTSUPP;
439 }
440
441 return size;
442}
443
444static ssize_t hwevent_enable_show(struct device *dev,
445 struct device_attribute *attr, char *buf)
446{
447 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
448 unsigned long val = drvdata->stmheer;
449
450 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
451}
452
453static ssize_t hwevent_enable_store(struct device *dev,
454 struct device_attribute *attr,
455 const char *buf, size_t size)
456{
457 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
458 unsigned long val;
459 int ret = 0;
460
461 ret = kstrtoul(buf, 16, &val);
462 if (ret)
463 return -EINVAL;
464
465 drvdata->stmheer = val;
466 /* HW event enable and trigger go hand in hand */
467 drvdata->stmheter = val;
468
469 return size;
470}
471static DEVICE_ATTR_RW(hwevent_enable);
472
473static ssize_t hwevent_select_show(struct device *dev,
474 struct device_attribute *attr, char *buf)
475{
476 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
477 unsigned long val = drvdata->stmhebsr;
478
479 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
480}
481
482static ssize_t hwevent_select_store(struct device *dev,
483 struct device_attribute *attr,
484 const char *buf, size_t size)
485{
486 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
487 unsigned long val;
488 int ret = 0;
489
490 ret = kstrtoul(buf, 16, &val);
491 if (ret)
492 return -EINVAL;
493
494 drvdata->stmhebsr = val;
495
496 return size;
497}
498static DEVICE_ATTR_RW(hwevent_select);
499
500static ssize_t port_select_show(struct device *dev,
501 struct device_attribute *attr, char *buf)
502{
503 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
504 unsigned long val;
505
506 if (!local_read(&drvdata->mode)) {
507 val = drvdata->stmspscr;
508 } else {
509 spin_lock(&drvdata->spinlock);
510 val = readl_relaxed(drvdata->base + STMSPSCR);
511 spin_unlock(&drvdata->spinlock);
512 }
513
514 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
515}
516
517static ssize_t port_select_store(struct device *dev,
518 struct device_attribute *attr,
519 const char *buf, size_t size)
520{
521 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
522 unsigned long val, stmsper;
523 int ret = 0;
524
525 ret = kstrtoul(buf, 16, &val);
526 if (ret)
527 return ret;
528
529 spin_lock(&drvdata->spinlock);
530 drvdata->stmspscr = val;
531
532 if (local_read(&drvdata->mode)) {
533 CS_UNLOCK(drvdata->base);
534 /* Process as per ARM's TRM recommendation */
535 stmsper = readl_relaxed(drvdata->base + STMSPER);
536 writel_relaxed(0x0, drvdata->base + STMSPER);
537 writel_relaxed(drvdata->stmspscr, drvdata->base + STMSPSCR);
538 writel_relaxed(stmsper, drvdata->base + STMSPER);
539 CS_LOCK(drvdata->base);
540 }
541 spin_unlock(&drvdata->spinlock);
542
543 return size;
544}
545static DEVICE_ATTR_RW(port_select);
546
547static ssize_t port_enable_show(struct device *dev,
548 struct device_attribute *attr, char *buf)
549{
550 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
551 unsigned long val;
552
553 if (!local_read(&drvdata->mode)) {
554 val = drvdata->stmsper;
555 } else {
556 spin_lock(&drvdata->spinlock);
557 val = readl_relaxed(drvdata->base + STMSPER);
558 spin_unlock(&drvdata->spinlock);
559 }
560
561 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
562}
563
564static ssize_t port_enable_store(struct device *dev,
565 struct device_attribute *attr,
566 const char *buf, size_t size)
567{
568 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
569 unsigned long val;
570 int ret = 0;
571
572 ret = kstrtoul(buf, 16, &val);
573 if (ret)
574 return ret;
575
576 spin_lock(&drvdata->spinlock);
577 drvdata->stmsper = val;
578
579 if (local_read(&drvdata->mode)) {
580 CS_UNLOCK(drvdata->base);
581 writel_relaxed(drvdata->stmsper, drvdata->base + STMSPER);
582 CS_LOCK(drvdata->base);
583 }
584 spin_unlock(&drvdata->spinlock);
585
586 return size;
587}
588static DEVICE_ATTR_RW(port_enable);
589
590static ssize_t traceid_show(struct device *dev,
591 struct device_attribute *attr, char *buf)
592{
593 unsigned long val;
594 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
595
596 val = drvdata->traceid;
597 return sprintf(buf, "%#lx\n", val);
598}
599
600static ssize_t traceid_store(struct device *dev,
601 struct device_attribute *attr,
602 const char *buf, size_t size)
603{
604 int ret;
605 unsigned long val;
606 struct stm_drvdata *drvdata = dev_get_drvdata(dev->parent);
607
608 ret = kstrtoul(buf, 16, &val);
609 if (ret)
610 return ret;
611
612 /* traceid field is 7bit wide on STM32 */
613 drvdata->traceid = val & 0x7f;
614 return size;
615}
616static DEVICE_ATTR_RW(traceid);
617
618#define coresight_stm_simple_func(name, offset) \
619 coresight_simple_func(struct stm_drvdata, name, offset)
620
621coresight_stm_simple_func(tcsr, STMTCSR);
622coresight_stm_simple_func(tsfreqr, STMTSFREQR);
623coresight_stm_simple_func(syncr, STMSYNCR);
624coresight_stm_simple_func(sper, STMSPER);
625coresight_stm_simple_func(spter, STMSPTER);
626coresight_stm_simple_func(privmaskr, STMPRIVMASKR);
627coresight_stm_simple_func(spscr, STMSPSCR);
628coresight_stm_simple_func(spmscr, STMSPMSCR);
629coresight_stm_simple_func(spfeat1r, STMSPFEAT1R);
630coresight_stm_simple_func(spfeat2r, STMSPFEAT2R);
631coresight_stm_simple_func(spfeat3r, STMSPFEAT3R);
632coresight_stm_simple_func(devid, CORESIGHT_DEVID);
633
634static struct attribute *coresight_stm_attrs[] = {
635 &dev_attr_hwevent_enable.attr,
636 &dev_attr_hwevent_select.attr,
637 &dev_attr_port_enable.attr,
638 &dev_attr_port_select.attr,
639 &dev_attr_traceid.attr,
640 NULL,
641};
642
643static struct attribute *coresight_stm_mgmt_attrs[] = {
644 &dev_attr_tcsr.attr,
645 &dev_attr_tsfreqr.attr,
646 &dev_attr_syncr.attr,
647 &dev_attr_sper.attr,
648 &dev_attr_spter.attr,
649 &dev_attr_privmaskr.attr,
650 &dev_attr_spscr.attr,
651 &dev_attr_spmscr.attr,
652 &dev_attr_spfeat1r.attr,
653 &dev_attr_spfeat2r.attr,
654 &dev_attr_spfeat3r.attr,
655 &dev_attr_devid.attr,
656 NULL,
657};
658
659static const struct attribute_group coresight_stm_group = {
660 .attrs = coresight_stm_attrs,
661};
662
663static const struct attribute_group coresight_stm_mgmt_group = {
664 .attrs = coresight_stm_mgmt_attrs,
665 .name = "mgmt",
666};
667
668static const struct attribute_group *coresight_stm_groups[] = {
669 &coresight_stm_group,
670 &coresight_stm_mgmt_group,
671 NULL,
672};
673
674static int stm_get_resource_byname(struct device_node *np,
675 char *ch_base, struct resource *res)
676{
677 const char *name = NULL;
678 int index = 0, found = 0;
679
680 while (!of_property_read_string_index(np, "reg-names", index, &name)) {
681 if (strcmp(ch_base, name)) {
682 index++;
683 continue;
684 }
685
686 /* We have a match and @index is where it's at */
687 found = 1;
688 break;
689 }
690
691 if (!found)
692 return -EINVAL;
693
694 return of_address_to_resource(np, index, res);
695}
696
697static u32 stm_fundamental_data_size(struct stm_drvdata *drvdata)
698{
699 u32 stmspfeat2r;
700
701 if (!IS_ENABLED(CONFIG_64BIT))
702 return 4;
703
704 stmspfeat2r = readl_relaxed(drvdata->base + STMSPFEAT2R);
705
706 /*
707 * bit[15:12] represents the fundamental data size
708 * 0 - 32-bit data
709 * 1 - 64-bit data
710 */
711 return BMVAL(stmspfeat2r, 12, 15) ? 8 : 4;
712}
713
714static u32 stm_num_stimulus_port(struct stm_drvdata *drvdata)
715{
716 u32 numsp;
717
718 numsp = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
719 /*
720 * NUMPS in STMDEVID is 17 bit long and if equal to 0x0,
721 * 32 stimulus ports are supported.
722 */
723 numsp &= 0x1ffff;
724 if (!numsp)
725 numsp = STM_32_CHANNEL;
726 return numsp;
727}
728
729static void stm_init_default_data(struct stm_drvdata *drvdata)
730{
731 /* Don't use port selection */
732 drvdata->stmspscr = 0x0;
733 /*
734 * Enable all channel regardless of their number. When port
735 * selection isn't used (see above) STMSPER applies to all
736 * 32 channel group available, hence setting all 32 bits to 1
737 */
738 drvdata->stmsper = ~0x0;
739
740 /*
741 * The trace ID value for *ETM* tracers start at CPU_ID * 2 + 0x10 and
742 * anything equal to or higher than 0x70 is reserved. Since 0x00 is
743 * also reserved the STM trace ID needs to be higher than 0x00 and
744 * lowner than 0x10.
745 */
746 drvdata->traceid = 0x1;
747
748 /* Set invariant transaction timing on all channels */
749 bitmap_clear(drvdata->chs.guaranteed, 0, drvdata->numsp);
750}
751
752static void stm_init_generic_data(struct stm_drvdata *drvdata)
753{
754 drvdata->stm.name = dev_name(drvdata->dev);
755
756 /*
757 * MasterIDs are assigned at HW design phase. As such the core is
758 * using a single master for interaction with this device.
759 */
760 drvdata->stm.sw_start = 1;
761 drvdata->stm.sw_end = 1;
762 drvdata->stm.hw_override = true;
763 drvdata->stm.sw_nchannels = drvdata->numsp;
764 drvdata->stm.packet = stm_generic_packet;
765 drvdata->stm.link = stm_generic_link;
766 drvdata->stm.unlink = stm_generic_unlink;
767 drvdata->stm.set_options = stm_generic_set_options;
768}
769
770static int stm_probe(struct amba_device *adev, const struct amba_id *id)
771{
772 int ret;
773 void __iomem *base;
774 unsigned long *guaranteed;
775 struct device *dev = &adev->dev;
776 struct coresight_platform_data *pdata = NULL;
777 struct stm_drvdata *drvdata;
778 struct resource *res = &adev->res;
779 struct resource ch_res;
780 size_t res_size, bitmap_size;
781 struct coresight_desc *desc;
782 struct device_node *np = adev->dev.of_node;
783
784 if (np) {
785 pdata = of_get_coresight_platform_data(dev, np);
786 if (IS_ERR(pdata))
787 return PTR_ERR(pdata);
788 adev->dev.platform_data = pdata;
789 }
790 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
791 if (!drvdata)
792 return -ENOMEM;
793
794 drvdata->dev = &adev->dev;
795 drvdata->atclk = devm_clk_get(&adev->dev, "atclk"); /* optional */
796 if (!IS_ERR(drvdata->atclk)) {
797 ret = clk_prepare_enable(drvdata->atclk);
798 if (ret)
799 return ret;
800 }
801 dev_set_drvdata(dev, drvdata);
802
803 base = devm_ioremap_resource(dev, res);
804 if (IS_ERR(base))
805 return PTR_ERR(base);
806 drvdata->base = base;
807
808 ret = stm_get_resource_byname(np, "stm-stimulus-base", &ch_res);
809 if (ret)
810 return ret;
811
812 base = devm_ioremap_resource(dev, &ch_res);
813 if (IS_ERR(base))
814 return PTR_ERR(base);
815 drvdata->chs.base = base;
816
817 drvdata->write_bytes = stm_fundamental_data_size(drvdata);
818
819 if (boot_nr_channel) {
820 drvdata->numsp = boot_nr_channel;
821 res_size = min((resource_size_t)(boot_nr_channel *
822 BYTES_PER_CHANNEL), resource_size(res));
823 } else {
824 drvdata->numsp = stm_num_stimulus_port(drvdata);
825 res_size = min((resource_size_t)(drvdata->numsp *
826 BYTES_PER_CHANNEL), resource_size(res));
827 }
828 bitmap_size = BITS_TO_LONGS(drvdata->numsp) * sizeof(long);
829
830 guaranteed = devm_kzalloc(dev, bitmap_size, GFP_KERNEL);
831 if (!guaranteed)
832 return -ENOMEM;
833 drvdata->chs.guaranteed = guaranteed;
834
835 spin_lock_init(&drvdata->spinlock);
836
837 stm_init_default_data(drvdata);
838 stm_init_generic_data(drvdata);
839
840 if (stm_register_device(dev, &drvdata->stm, THIS_MODULE)) {
841 dev_info(dev,
842 "stm_register_device failed, probing deffered\n");
843 return -EPROBE_DEFER;
844 }
845
846 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
847 if (!desc) {
848 ret = -ENOMEM;
849 goto stm_unregister;
850 }
851
852 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
853 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE;
854 desc->ops = &stm_cs_ops;
855 desc->pdata = pdata;
856 desc->dev = dev;
857 desc->groups = coresight_stm_groups;
858 drvdata->csdev = coresight_register(desc);
859 if (IS_ERR(drvdata->csdev)) {
860 ret = PTR_ERR(drvdata->csdev);
861 goto stm_unregister;
862 }
863
864 pm_runtime_put(&adev->dev);
865
866 dev_info(dev, "%s initialized\n", (char *)id->data);
867 return 0;
868
869stm_unregister:
870 stm_unregister_device(&drvdata->stm);
871 return ret;
872}
873
874#ifdef CONFIG_PM
875static int stm_runtime_suspend(struct device *dev)
876{
877 struct stm_drvdata *drvdata = dev_get_drvdata(dev);
878
879 if (drvdata && !IS_ERR(drvdata->atclk))
880 clk_disable_unprepare(drvdata->atclk);
881
882 return 0;
883}
884
885static int stm_runtime_resume(struct device *dev)
886{
887 struct stm_drvdata *drvdata = dev_get_drvdata(dev);
888
889 if (drvdata && !IS_ERR(drvdata->atclk))
890 clk_prepare_enable(drvdata->atclk);
891
892 return 0;
893}
894#endif
895
896static const struct dev_pm_ops stm_dev_pm_ops = {
897 SET_RUNTIME_PM_OPS(stm_runtime_suspend, stm_runtime_resume, NULL)
898};
899
900static struct amba_id stm_ids[] = {
901 {
902 .id = 0x0003b962,
903 .mask = 0x0003ffff,
904 .data = "STM32",
905 },
906 { 0, 0},
907};
908
909static struct amba_driver stm_driver = {
910 .drv = {
911 .name = "coresight-stm",
912 .owner = THIS_MODULE,
913 .pm = &stm_dev_pm_ops,
914 .suppress_bind_attrs = true,
915 },
916 .probe = stm_probe,
917 .id_table = stm_ids,
918};
919
920builtin_amba_driver(stm_driver);
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etf.c b/drivers/hwtracing/coresight/coresight-tmc-etf.c
new file mode 100644
index 000000000000..466af86fd76f
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-etf.c
@@ -0,0 +1,604 @@
1/*
2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/circ_buf.h>
19#include <linux/coresight.h>
20#include <linux/perf_event.h>
21#include <linux/slab.h>
22#include "coresight-priv.h"
23#include "coresight-tmc.h"
24
25void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
26{
27 CS_UNLOCK(drvdata->base);
28
29 /* Wait for TMCSReady bit to be set */
30 tmc_wait_for_tmcready(drvdata);
31
32 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
33 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
34 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
35 TMC_FFCR_TRIGON_TRIGIN,
36 drvdata->base + TMC_FFCR);
37
38 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
39 tmc_enable_hw(drvdata);
40
41 CS_LOCK(drvdata->base);
42}
43
44static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
45{
46 char *bufp;
47 u32 read_data;
48 int i;
49
50 bufp = drvdata->buf;
51 while (1) {
52 for (i = 0; i < drvdata->memwidth; i++) {
53 read_data = readl_relaxed(drvdata->base + TMC_RRD);
54 if (read_data == 0xFFFFFFFF)
55 return;
56 memcpy(bufp, &read_data, 4);
57 bufp += 4;
58 }
59 }
60}
61
62static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
63{
64 CS_UNLOCK(drvdata->base);
65
66 tmc_flush_and_stop(drvdata);
67 /*
68 * When operating in sysFS mode the content of the buffer needs to be
69 * read before the TMC is disabled.
70 */
71 if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
72 tmc_etb_dump_hw(drvdata);
73 tmc_disable_hw(drvdata);
74
75 CS_LOCK(drvdata->base);
76}
77
78static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
79{
80 CS_UNLOCK(drvdata->base);
81
82 /* Wait for TMCSReady bit to be set */
83 tmc_wait_for_tmcready(drvdata);
84
85 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
86 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
87 drvdata->base + TMC_FFCR);
88 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
89 tmc_enable_hw(drvdata);
90
91 CS_LOCK(drvdata->base);
92}
93
94static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
95{
96 CS_UNLOCK(drvdata->base);
97
98 tmc_flush_and_stop(drvdata);
99 tmc_disable_hw(drvdata);
100
101 CS_LOCK(drvdata->base);
102}
103
104static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev, u32 mode)
105{
106 int ret = 0;
107 bool used = false;
108 char *buf = NULL;
109 long val;
110 unsigned long flags;
111 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
112
113 /* This shouldn't be happening */
114 if (WARN_ON(mode != CS_MODE_SYSFS))
115 return -EINVAL;
116
117 /*
118 * If we don't have a buffer release the lock and allocate memory.
119 * Otherwise keep the lock and move along.
120 */
121 spin_lock_irqsave(&drvdata->spinlock, flags);
122 if (!drvdata->buf) {
123 spin_unlock_irqrestore(&drvdata->spinlock, flags);
124
125 /* Allocating the memory here while outside of the spinlock */
126 buf = kzalloc(drvdata->size, GFP_KERNEL);
127 if (!buf)
128 return -ENOMEM;
129
130 /* Let's try again */
131 spin_lock_irqsave(&drvdata->spinlock, flags);
132 }
133
134 if (drvdata->reading) {
135 ret = -EBUSY;
136 goto out;
137 }
138
139 val = local_xchg(&drvdata->mode, mode);
140 /*
141 * In sysFS mode we can have multiple writers per sink. Since this
142 * sink is already enabled no memory is needed and the HW need not be
143 * touched.
144 */
145 if (val == CS_MODE_SYSFS)
146 goto out;
147
148 /*
149 * If drvdata::buf isn't NULL, memory was allocated for a previous
150 * trace run but wasn't read. If so simply zero-out the memory.
151 * Otherwise use the memory allocated above.
152 *
153 * The memory is freed when users read the buffer using the
154 * /dev/xyz.{etf|etb} interface. See tmc_read_unprepare_etf() for
155 * details.
156 */
157 if (drvdata->buf) {
158 memset(drvdata->buf, 0, drvdata->size);
159 } else {
160 used = true;
161 drvdata->buf = buf;
162 }
163
164 tmc_etb_enable_hw(drvdata);
165out:
166 spin_unlock_irqrestore(&drvdata->spinlock, flags);
167
168 /* Free memory outside the spinlock if need be */
169 if (!used && buf)
170 kfree(buf);
171
172 if (!ret)
173 dev_info(drvdata->dev, "TMC-ETB/ETF enabled\n");
174
175 return ret;
176}
177
178static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, u32 mode)
179{
180 int ret = 0;
181 long val;
182 unsigned long flags;
183 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
184
185 /* This shouldn't be happening */
186 if (WARN_ON(mode != CS_MODE_PERF))
187 return -EINVAL;
188
189 spin_lock_irqsave(&drvdata->spinlock, flags);
190 if (drvdata->reading) {
191 ret = -EINVAL;
192 goto out;
193 }
194
195 val = local_xchg(&drvdata->mode, mode);
196 /*
197 * In Perf mode there can be only one writer per sink. There
198 * is also no need to continue if the ETB/ETR is already operated
199 * from sysFS.
200 */
201 if (val != CS_MODE_DISABLED) {
202 ret = -EINVAL;
203 goto out;
204 }
205
206 tmc_etb_enable_hw(drvdata);
207out:
208 spin_unlock_irqrestore(&drvdata->spinlock, flags);
209
210 return ret;
211}
212
213static int tmc_enable_etf_sink(struct coresight_device *csdev, u32 mode)
214{
215 switch (mode) {
216 case CS_MODE_SYSFS:
217 return tmc_enable_etf_sink_sysfs(csdev, mode);
218 case CS_MODE_PERF:
219 return tmc_enable_etf_sink_perf(csdev, mode);
220 }
221
222 /* We shouldn't be here */
223 return -EINVAL;
224}
225
226static void tmc_disable_etf_sink(struct coresight_device *csdev)
227{
228 long val;
229 unsigned long flags;
230 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
231
232 spin_lock_irqsave(&drvdata->spinlock, flags);
233 if (drvdata->reading) {
234 spin_unlock_irqrestore(&drvdata->spinlock, flags);
235 return;
236 }
237
238 val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
239 /* Disable the TMC only if it needs to */
240 if (val != CS_MODE_DISABLED)
241 tmc_etb_disable_hw(drvdata);
242
243 spin_unlock_irqrestore(&drvdata->spinlock, flags);
244
245 dev_info(drvdata->dev, "TMC-ETB/ETF disabled\n");
246}
247
248static int tmc_enable_etf_link(struct coresight_device *csdev,
249 int inport, int outport)
250{
251 unsigned long flags;
252 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
253
254 spin_lock_irqsave(&drvdata->spinlock, flags);
255 if (drvdata->reading) {
256 spin_unlock_irqrestore(&drvdata->spinlock, flags);
257 return -EBUSY;
258 }
259
260 tmc_etf_enable_hw(drvdata);
261 local_set(&drvdata->mode, CS_MODE_SYSFS);
262 spin_unlock_irqrestore(&drvdata->spinlock, flags);
263
264 dev_info(drvdata->dev, "TMC-ETF enabled\n");
265 return 0;
266}
267
268static void tmc_disable_etf_link(struct coresight_device *csdev,
269 int inport, int outport)
270{
271 unsigned long flags;
272 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
273
274 spin_lock_irqsave(&drvdata->spinlock, flags);
275 if (drvdata->reading) {
276 spin_unlock_irqrestore(&drvdata->spinlock, flags);
277 return;
278 }
279
280 tmc_etf_disable_hw(drvdata);
281 local_set(&drvdata->mode, CS_MODE_DISABLED);
282 spin_unlock_irqrestore(&drvdata->spinlock, flags);
283
284 dev_info(drvdata->dev, "TMC disabled\n");
285}
286
287static void *tmc_alloc_etf_buffer(struct coresight_device *csdev, int cpu,
288 void **pages, int nr_pages, bool overwrite)
289{
290 int node;
291 struct cs_buffers *buf;
292
293 if (cpu == -1)
294 cpu = smp_processor_id();
295 node = cpu_to_node(cpu);
296
297 /* Allocate memory structure for interaction with Perf */
298 buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
299 if (!buf)
300 return NULL;
301
302 buf->snapshot = overwrite;
303 buf->nr_pages = nr_pages;
304 buf->data_pages = pages;
305
306 return buf;
307}
308
309static void tmc_free_etf_buffer(void *config)
310{
311 struct cs_buffers *buf = config;
312
313 kfree(buf);
314}
315
316static int tmc_set_etf_buffer(struct coresight_device *csdev,
317 struct perf_output_handle *handle,
318 void *sink_config)
319{
320 int ret = 0;
321 unsigned long head;
322 struct cs_buffers *buf = sink_config;
323
324 /* wrap head around to the amount of space we have */
325 head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
326
327 /* find the page to write to */
328 buf->cur = head / PAGE_SIZE;
329
330 /* and offset within that page */
331 buf->offset = head % PAGE_SIZE;
332
333 local_set(&buf->data_size, 0);
334
335 return ret;
336}
337
338static unsigned long tmc_reset_etf_buffer(struct coresight_device *csdev,
339 struct perf_output_handle *handle,
340 void *sink_config, bool *lost)
341{
342 long size = 0;
343 struct cs_buffers *buf = sink_config;
344
345 if (buf) {
346 /*
347 * In snapshot mode ->data_size holds the new address of the
348 * ring buffer's head. The size itself is the whole address
349 * range since we want the latest information.
350 */
351 if (buf->snapshot)
352 handle->head = local_xchg(&buf->data_size,
353 buf->nr_pages << PAGE_SHIFT);
354 /*
355 * Tell the tracer PMU how much we got in this run and if
356 * something went wrong along the way. Nobody else can use
357 * this cs_buffers instance until we are done. As such
358 * resetting parameters here and squaring off with the ring
359 * buffer API in the tracer PMU is fine.
360 */
361 *lost = !!local_xchg(&buf->lost, 0);
362 size = local_xchg(&buf->data_size, 0);
363 }
364
365 return size;
366}
367
368static void tmc_update_etf_buffer(struct coresight_device *csdev,
369 struct perf_output_handle *handle,
370 void *sink_config)
371{
372 int i, cur;
373 u32 *buf_ptr;
374 u32 read_ptr, write_ptr;
375 u32 status, to_read;
376 unsigned long offset;
377 struct cs_buffers *buf = sink_config;
378 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
379
380 if (!buf)
381 return;
382
383 /* This shouldn't happen */
384 if (WARN_ON_ONCE(local_read(&drvdata->mode) != CS_MODE_PERF))
385 return;
386
387 CS_UNLOCK(drvdata->base);
388
389 tmc_flush_and_stop(drvdata);
390
391 read_ptr = readl_relaxed(drvdata->base + TMC_RRP);
392 write_ptr = readl_relaxed(drvdata->base + TMC_RWP);
393
394 /*
395 * Get a hold of the status register and see if a wrap around
396 * has occurred. If so adjust things accordingly.
397 */
398 status = readl_relaxed(drvdata->base + TMC_STS);
399 if (status & TMC_STS_FULL) {
400 local_inc(&buf->lost);
401 to_read = drvdata->size;
402 } else {
403 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
404 }
405
406 /*
407 * The TMC RAM buffer may be bigger than the space available in the
408 * perf ring buffer (handle->size). If so advance the RRP so that we
409 * get the latest trace data.
410 */
411 if (to_read > handle->size) {
412 u32 mask = 0;
413
414 /*
415 * The value written to RRP must be byte-address aligned to
416 * the width of the trace memory databus _and_ to a frame
417 * boundary (16 byte), whichever is the biggest. For example,
418 * for 32-bit, 64-bit and 128-bit wide trace memory, the four
419 * LSBs must be 0s. For 256-bit wide trace memory, the five
420 * LSBs must be 0s.
421 */
422 switch (drvdata->memwidth) {
423 case TMC_MEM_INTF_WIDTH_32BITS:
424 case TMC_MEM_INTF_WIDTH_64BITS:
425 case TMC_MEM_INTF_WIDTH_128BITS:
426 mask = GENMASK(31, 5);
427 break;
428 case TMC_MEM_INTF_WIDTH_256BITS:
429 mask = GENMASK(31, 6);
430 break;
431 }
432
433 /*
434 * Make sure the new size is aligned in accordance with the
435 * requirement explained above.
436 */
437 to_read = handle->size & mask;
438 /* Move the RAM read pointer up */
439 read_ptr = (write_ptr + drvdata->size) - to_read;
440 /* Make sure we are still within our limits */
441 if (read_ptr > (drvdata->size - 1))
442 read_ptr -= drvdata->size;
443 /* Tell the HW */
444 writel_relaxed(read_ptr, drvdata->base + TMC_RRP);
445 local_inc(&buf->lost);
446 }
447
448 cur = buf->cur;
449 offset = buf->offset;
450
451 /* for every byte to read */
452 for (i = 0; i < to_read; i += 4) {
453 buf_ptr = buf->data_pages[cur] + offset;
454 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
455
456 offset += 4;
457 if (offset >= PAGE_SIZE) {
458 offset = 0;
459 cur++;
460 /* wrap around at the end of the buffer */
461 cur &= buf->nr_pages - 1;
462 }
463 }
464
465 /*
466 * In snapshot mode all we have to do is communicate to
467 * perf_aux_output_end() the address of the current head. In full
468 * trace mode the same function expects a size to move rb->aux_head
469 * forward.
470 */
471 if (buf->snapshot)
472 local_set(&buf->data_size, (cur * PAGE_SIZE) + offset);
473 else
474 local_add(to_read, &buf->data_size);
475
476 CS_LOCK(drvdata->base);
477}
478
479static const struct coresight_ops_sink tmc_etf_sink_ops = {
480 .enable = tmc_enable_etf_sink,
481 .disable = tmc_disable_etf_sink,
482 .alloc_buffer = tmc_alloc_etf_buffer,
483 .free_buffer = tmc_free_etf_buffer,
484 .set_buffer = tmc_set_etf_buffer,
485 .reset_buffer = tmc_reset_etf_buffer,
486 .update_buffer = tmc_update_etf_buffer,
487};
488
489static const struct coresight_ops_link tmc_etf_link_ops = {
490 .enable = tmc_enable_etf_link,
491 .disable = tmc_disable_etf_link,
492};
493
494const struct coresight_ops tmc_etb_cs_ops = {
495 .sink_ops = &tmc_etf_sink_ops,
496};
497
498const struct coresight_ops tmc_etf_cs_ops = {
499 .sink_ops = &tmc_etf_sink_ops,
500 .link_ops = &tmc_etf_link_ops,
501};
502
503int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
504{
505 long val;
506 enum tmc_mode mode;
507 int ret = 0;
508 unsigned long flags;
509
510 /* config types are set a boot time and never change */
511 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
512 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
513 return -EINVAL;
514
515 spin_lock_irqsave(&drvdata->spinlock, flags);
516
517 if (drvdata->reading) {
518 ret = -EBUSY;
519 goto out;
520 }
521
522 /* There is no point in reading a TMC in HW FIFO mode */
523 mode = readl_relaxed(drvdata->base + TMC_MODE);
524 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
525 ret = -EINVAL;
526 goto out;
527 }
528
529 val = local_read(&drvdata->mode);
530 /* Don't interfere if operated from Perf */
531 if (val == CS_MODE_PERF) {
532 ret = -EINVAL;
533 goto out;
534 }
535
536 /* If drvdata::buf is NULL the trace data has been read already */
537 if (drvdata->buf == NULL) {
538 ret = -EINVAL;
539 goto out;
540 }
541
542 /* Disable the TMC if need be */
543 if (val == CS_MODE_SYSFS)
544 tmc_etb_disable_hw(drvdata);
545
546 drvdata->reading = true;
547out:
548 spin_unlock_irqrestore(&drvdata->spinlock, flags);
549
550 return ret;
551}
552
553int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
554{
555 char *buf = NULL;
556 enum tmc_mode mode;
557 unsigned long flags;
558
559 /* config types are set a boot time and never change */
560 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
561 drvdata->config_type != TMC_CONFIG_TYPE_ETF))
562 return -EINVAL;
563
564 spin_lock_irqsave(&drvdata->spinlock, flags);
565
566 /* There is no point in reading a TMC in HW FIFO mode */
567 mode = readl_relaxed(drvdata->base + TMC_MODE);
568 if (mode != TMC_MODE_CIRCULAR_BUFFER) {
569 spin_unlock_irqrestore(&drvdata->spinlock, flags);
570 return -EINVAL;
571 }
572
573 /* Re-enable the TMC if need be */
574 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
575 /*
576 * The trace run will continue with the same allocated trace
577 * buffer. As such zero-out the buffer so that we don't end
578 * up with stale data.
579 *
580 * Since the tracer is still enabled drvdata::buf
581 * can't be NULL.
582 */
583 memset(drvdata->buf, 0, drvdata->size);
584 tmc_etb_enable_hw(drvdata);
585 } else {
586 /*
587 * The ETB/ETF is not tracing and the buffer was just read.
588 * As such prepare to free the trace buffer.
589 */
590 buf = drvdata->buf;
591 drvdata->buf = NULL;
592 }
593
594 drvdata->reading = false;
595 spin_unlock_irqrestore(&drvdata->spinlock, flags);
596
597 /*
598 * Free allocated memory outside of the spinlock. There is no need
599 * to assert the validity of 'buf' since calling kfree(NULL) is safe.
600 */
601 kfree(buf);
602
603 return 0;
604}
diff --git a/drivers/hwtracing/coresight/coresight-tmc-etr.c b/drivers/hwtracing/coresight/coresight-tmc-etr.c
new file mode 100644
index 000000000000..847d1b5f2c13
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc-etr.c
@@ -0,0 +1,329 @@
1/*
2 * Copyright(C) 2016 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/coresight.h>
19#include <linux/dma-mapping.h>
20#include "coresight-priv.h"
21#include "coresight-tmc.h"
22
23void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
24{
25 u32 axictl;
26
27 /* Zero out the memory to help with debug */
28 memset(drvdata->vaddr, 0, drvdata->size);
29
30 CS_UNLOCK(drvdata->base);
31
32 /* Wait for TMCSReady bit to be set */
33 tmc_wait_for_tmcready(drvdata);
34
35 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
36 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
37
38 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
39 axictl |= TMC_AXICTL_WR_BURST_16;
40 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
41 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
42 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
43 axictl = (axictl &
44 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
45 TMC_AXICTL_PROT_CTL_B1;
46 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
47
48 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
49 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
50 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
51 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
52 TMC_FFCR_TRIGON_TRIGIN,
53 drvdata->base + TMC_FFCR);
54 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
55 tmc_enable_hw(drvdata);
56
57 CS_LOCK(drvdata->base);
58}
59
60static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
61{
62 u32 rwp, val;
63
64 rwp = readl_relaxed(drvdata->base + TMC_RWP);
65 val = readl_relaxed(drvdata->base + TMC_STS);
66
67 /* How much memory do we still have */
68 if (val & BIT(0))
69 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
70 else
71 drvdata->buf = drvdata->vaddr;
72}
73
74static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
75{
76 CS_UNLOCK(drvdata->base);
77
78 tmc_flush_and_stop(drvdata);
79 /*
80 * When operating in sysFS mode the content of the buffer needs to be
81 * read before the TMC is disabled.
82 */
83 if (local_read(&drvdata->mode) == CS_MODE_SYSFS)
84 tmc_etr_dump_hw(drvdata);
85 tmc_disable_hw(drvdata);
86
87 CS_LOCK(drvdata->base);
88}
89
90static int tmc_enable_etr_sink_sysfs(struct coresight_device *csdev, u32 mode)
91{
92 int ret = 0;
93 bool used = false;
94 long val;
95 unsigned long flags;
96 void __iomem *vaddr = NULL;
97 dma_addr_t paddr;
98 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
99
100 /* This shouldn't be happening */
101 if (WARN_ON(mode != CS_MODE_SYSFS))
102 return -EINVAL;
103
104 /*
105 * If we don't have a buffer release the lock and allocate memory.
106 * Otherwise keep the lock and move along.
107 */
108 spin_lock_irqsave(&drvdata->spinlock, flags);
109 if (!drvdata->vaddr) {
110 spin_unlock_irqrestore(&drvdata->spinlock, flags);
111
112 /*
113 * Contiguous memory can't be allocated while a spinlock is
114 * held. As such allocate memory here and free it if a buffer
115 * has already been allocated (from a previous session).
116 */
117 vaddr = dma_alloc_coherent(drvdata->dev, drvdata->size,
118 &paddr, GFP_KERNEL);
119 if (!vaddr)
120 return -ENOMEM;
121
122 /* Let's try again */
123 spin_lock_irqsave(&drvdata->spinlock, flags);
124 }
125
126 if (drvdata->reading) {
127 ret = -EBUSY;
128 goto out;
129 }
130
131 val = local_xchg(&drvdata->mode, mode);
132 /*
133 * In sysFS mode we can have multiple writers per sink. Since this
134 * sink is already enabled no memory is needed and the HW need not be
135 * touched.
136 */
137 if (val == CS_MODE_SYSFS)
138 goto out;
139
140 /*
141 * If drvdata::buf == NULL, use the memory allocated above.
142 * Otherwise a buffer still exists from a previous session, so
143 * simply use that.
144 */
145 if (drvdata->buf == NULL) {
146 used = true;
147 drvdata->vaddr = vaddr;
148 drvdata->paddr = paddr;
149 drvdata->buf = drvdata->vaddr;
150 }
151
152 memset(drvdata->vaddr, 0, drvdata->size);
153
154 tmc_etr_enable_hw(drvdata);
155out:
156 spin_unlock_irqrestore(&drvdata->spinlock, flags);
157
158 /* Free memory outside the spinlock if need be */
159 if (!used && vaddr)
160 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
161
162 if (!ret)
163 dev_info(drvdata->dev, "TMC-ETR enabled\n");
164
165 return ret;
166}
167
168static int tmc_enable_etr_sink_perf(struct coresight_device *csdev, u32 mode)
169{
170 int ret = 0;
171 long val;
172 unsigned long flags;
173 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
174
175 /* This shouldn't be happening */
176 if (WARN_ON(mode != CS_MODE_PERF))
177 return -EINVAL;
178
179 spin_lock_irqsave(&drvdata->spinlock, flags);
180 if (drvdata->reading) {
181 ret = -EINVAL;
182 goto out;
183 }
184
185 val = local_xchg(&drvdata->mode, mode);
186 /*
187 * In Perf mode there can be only one writer per sink. There
188 * is also no need to continue if the ETR is already operated
189 * from sysFS.
190 */
191 if (val != CS_MODE_DISABLED) {
192 ret = -EINVAL;
193 goto out;
194 }
195
196 tmc_etr_enable_hw(drvdata);
197out:
198 spin_unlock_irqrestore(&drvdata->spinlock, flags);
199
200 return ret;
201}
202
203static int tmc_enable_etr_sink(struct coresight_device *csdev, u32 mode)
204{
205 switch (mode) {
206 case CS_MODE_SYSFS:
207 return tmc_enable_etr_sink_sysfs(csdev, mode);
208 case CS_MODE_PERF:
209 return tmc_enable_etr_sink_perf(csdev, mode);
210 }
211
212 /* We shouldn't be here */
213 return -EINVAL;
214}
215
216static void tmc_disable_etr_sink(struct coresight_device *csdev)
217{
218 long val;
219 unsigned long flags;
220 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
221
222 spin_lock_irqsave(&drvdata->spinlock, flags);
223 if (drvdata->reading) {
224 spin_unlock_irqrestore(&drvdata->spinlock, flags);
225 return;
226 }
227
228 val = local_xchg(&drvdata->mode, CS_MODE_DISABLED);
229 /* Disable the TMC only if it needs to */
230 if (val != CS_MODE_DISABLED)
231 tmc_etr_disable_hw(drvdata);
232
233 spin_unlock_irqrestore(&drvdata->spinlock, flags);
234
235 dev_info(drvdata->dev, "TMC-ETR disabled\n");
236}
237
238static const struct coresight_ops_sink tmc_etr_sink_ops = {
239 .enable = tmc_enable_etr_sink,
240 .disable = tmc_disable_etr_sink,
241};
242
243const struct coresight_ops tmc_etr_cs_ops = {
244 .sink_ops = &tmc_etr_sink_ops,
245};
246
247int tmc_read_prepare_etr(struct tmc_drvdata *drvdata)
248{
249 int ret = 0;
250 long val;
251 unsigned long flags;
252
253 /* config types are set a boot time and never change */
254 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
255 return -EINVAL;
256
257 spin_lock_irqsave(&drvdata->spinlock, flags);
258 if (drvdata->reading) {
259 ret = -EBUSY;
260 goto out;
261 }
262
263 val = local_read(&drvdata->mode);
264 /* Don't interfere if operated from Perf */
265 if (val == CS_MODE_PERF) {
266 ret = -EINVAL;
267 goto out;
268 }
269
270 /* If drvdata::buf is NULL the trace data has been read already */
271 if (drvdata->buf == NULL) {
272 ret = -EINVAL;
273 goto out;
274 }
275
276 /* Disable the TMC if need be */
277 if (val == CS_MODE_SYSFS)
278 tmc_etr_disable_hw(drvdata);
279
280 drvdata->reading = true;
281out:
282 spin_unlock_irqrestore(&drvdata->spinlock, flags);
283
284 return ret;
285}
286
287int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata)
288{
289 unsigned long flags;
290 dma_addr_t paddr;
291 void __iomem *vaddr = NULL;
292
293 /* config types are set a boot time and never change */
294 if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETR))
295 return -EINVAL;
296
297 spin_lock_irqsave(&drvdata->spinlock, flags);
298
299 /* RE-enable the TMC if need be */
300 if (local_read(&drvdata->mode) == CS_MODE_SYSFS) {
301 /*
302 * The trace run will continue with the same allocated trace
303 * buffer. As such zero-out the buffer so that we don't end
304 * up with stale data.
305 *
306 * Since the tracer is still enabled drvdata::buf
307 * can't be NULL.
308 */
309 memset(drvdata->buf, 0, drvdata->size);
310 tmc_etr_enable_hw(drvdata);
311 } else {
312 /*
313 * The ETR is not tracing and the buffer was just read.
314 * As such prepare to free the trace buffer.
315 */
316 vaddr = drvdata->vaddr;
317 paddr = drvdata->paddr;
318 drvdata->buf = NULL;
319 }
320
321 drvdata->reading = false;
322 spin_unlock_irqrestore(&drvdata->spinlock, flags);
323
324 /* Free allocated memory out side of the spinlock */
325 if (vaddr)
326 dma_free_coherent(drvdata->dev, drvdata->size, vaddr, paddr);
327
328 return 0;
329}
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 1be191f5d39c..9e02ac963cd0 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -30,127 +30,27 @@
30#include <linux/amba/bus.h> 30#include <linux/amba/bus.h>
31 31
32#include "coresight-priv.h" 32#include "coresight-priv.h"
33#include "coresight-tmc.h"
33 34
34#define TMC_RSZ 0x004 35void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata)
35#define TMC_STS 0x00c
36#define TMC_RRD 0x010
37#define TMC_RRP 0x014
38#define TMC_RWP 0x018
39#define TMC_TRG 0x01c
40#define TMC_CTL 0x020
41#define TMC_RWD 0x024
42#define TMC_MODE 0x028
43#define TMC_LBUFLEVEL 0x02c
44#define TMC_CBUFLEVEL 0x030
45#define TMC_BUFWM 0x034
46#define TMC_RRPHI 0x038
47#define TMC_RWPHI 0x03c
48#define TMC_AXICTL 0x110
49#define TMC_DBALO 0x118
50#define TMC_DBAHI 0x11c
51#define TMC_FFSR 0x300
52#define TMC_FFCR 0x304
53#define TMC_PSCR 0x308
54#define TMC_ITMISCOP0 0xee0
55#define TMC_ITTRFLIN 0xee8
56#define TMC_ITATBDATA0 0xeec
57#define TMC_ITATBCTR2 0xef0
58#define TMC_ITATBCTR1 0xef4
59#define TMC_ITATBCTR0 0xef8
60
61/* register description */
62/* TMC_CTL - 0x020 */
63#define TMC_CTL_CAPT_EN BIT(0)
64/* TMC_STS - 0x00C */
65#define TMC_STS_TRIGGERED BIT(1)
66/* TMC_AXICTL - 0x110 */
67#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
68#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
69#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
70#define TMC_AXICTL_WR_BURST_LEN 0xF00
71/* TMC_FFCR - 0x304 */
72#define TMC_FFCR_EN_FMT BIT(0)
73#define TMC_FFCR_EN_TI BIT(1)
74#define TMC_FFCR_FON_FLIN BIT(4)
75#define TMC_FFCR_FON_TRIG_EVT BIT(5)
76#define TMC_FFCR_FLUSHMAN BIT(6)
77#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
78#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
79
80#define TMC_STS_TRIGGERED_BIT 2
81#define TMC_FFCR_FLUSHMAN_BIT 6
82
83enum tmc_config_type {
84 TMC_CONFIG_TYPE_ETB,
85 TMC_CONFIG_TYPE_ETR,
86 TMC_CONFIG_TYPE_ETF,
87};
88
89enum tmc_mode {
90 TMC_MODE_CIRCULAR_BUFFER,
91 TMC_MODE_SOFTWARE_FIFO,
92 TMC_MODE_HARDWARE_FIFO,
93};
94
95enum tmc_mem_intf_width {
96 TMC_MEM_INTF_WIDTH_32BITS = 0x2,
97 TMC_MEM_INTF_WIDTH_64BITS = 0x3,
98 TMC_MEM_INTF_WIDTH_128BITS = 0x4,
99 TMC_MEM_INTF_WIDTH_256BITS = 0x5,
100};
101
102/**
103 * struct tmc_drvdata - specifics associated to an TMC component
104 * @base: memory mapped base address for this component.
105 * @dev: the device entity associated to this component.
106 * @csdev: component vitals needed by the framework.
107 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
108 * @spinlock: only one at a time pls.
109 * @read_count: manages preparation of buffer for reading.
110 * @buf: area of memory where trace data get sent.
111 * @paddr: DMA start location in RAM.
112 * @vaddr: virtual representation of @paddr.
113 * @size: @buf size.
114 * @enable: this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119 void __iomem *base;
120 struct device *dev;
121 struct coresight_device *csdev;
122 struct miscdevice miscdev;
123 spinlock_t spinlock;
124 int read_count;
125 bool reading;
126 char *buf;
127 dma_addr_t paddr;
128 void *vaddr;
129 u32 size;
130 bool enable;
131 enum tmc_config_type config_type;
132 u32 trigger_cntr;
133};
134
135static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
136{ 36{
137 /* Ensure formatter, unformatter and hardware fifo are empty */ 37 /* Ensure formatter, unformatter and hardware fifo are empty */
138 if (coresight_timeout(drvdata->base, 38 if (coresight_timeout(drvdata->base,
139 TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) { 39 TMC_STS, TMC_STS_TMCREADY_BIT, 1)) {
140 dev_err(drvdata->dev, 40 dev_err(drvdata->dev,
141 "timeout observed when probing at offset %#x\n", 41 "timeout observed when probing at offset %#x\n",
142 TMC_STS); 42 TMC_STS);
143 } 43 }
144} 44}
145 45
146static void tmc_flush_and_stop(struct tmc_drvdata *drvdata) 46void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
147{ 47{
148 u32 ffcr; 48 u32 ffcr;
149 49
150 ffcr = readl_relaxed(drvdata->base + TMC_FFCR); 50 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
151 ffcr |= TMC_FFCR_STOP_ON_FLUSH; 51 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
152 writel_relaxed(ffcr, drvdata->base + TMC_FFCR); 52 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
153 ffcr |= TMC_FFCR_FLUSHMAN; 53 ffcr |= BIT(TMC_FFCR_FLUSHMAN_BIT);
154 writel_relaxed(ffcr, drvdata->base + TMC_FFCR); 54 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
155 /* Ensure flush completes */ 55 /* Ensure flush completes */
156 if (coresight_timeout(drvdata->base, 56 if (coresight_timeout(drvdata->base,
@@ -160,338 +60,73 @@ static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
160 TMC_FFCR); 60 TMC_FFCR);
161 } 61 }
162 62
163 tmc_wait_for_ready(drvdata); 63 tmc_wait_for_tmcready(drvdata);
164} 64}
165 65
166static void tmc_enable_hw(struct tmc_drvdata *drvdata) 66void tmc_enable_hw(struct tmc_drvdata *drvdata)
167{ 67{
168 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL); 68 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
169} 69}
170 70
171static void tmc_disable_hw(struct tmc_drvdata *drvdata) 71void tmc_disable_hw(struct tmc_drvdata *drvdata)
172{ 72{
173 writel_relaxed(0x0, drvdata->base + TMC_CTL); 73 writel_relaxed(0x0, drvdata->base + TMC_CTL);
174} 74}
175 75
176static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata) 76static int tmc_read_prepare(struct tmc_drvdata *drvdata)
177{
178 /* Zero out the memory to help with debug */
179 memset(drvdata->buf, 0, drvdata->size);
180
181 CS_UNLOCK(drvdata->base);
182
183 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
184 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
185 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
186 TMC_FFCR_TRIGON_TRIGIN,
187 drvdata->base + TMC_FFCR);
188
189 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
190 tmc_enable_hw(drvdata);
191
192 CS_LOCK(drvdata->base);
193}
194
195static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
196{
197 u32 axictl;
198
199 /* Zero out the memory to help with debug */
200 memset(drvdata->vaddr, 0, drvdata->size);
201
202 CS_UNLOCK(drvdata->base);
203
204 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
205 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
206
207 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
208 axictl |= TMC_AXICTL_WR_BURST_LEN;
209 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
210 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
211 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
212 axictl = (axictl &
213 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
214 TMC_AXICTL_PROT_CTL_B1;
215 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
216
217 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
218 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
219 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
220 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
221 TMC_FFCR_TRIGON_TRIGIN,
222 drvdata->base + TMC_FFCR);
223 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
224 tmc_enable_hw(drvdata);
225
226 CS_LOCK(drvdata->base);
227}
228
229static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
230{
231 CS_UNLOCK(drvdata->base);
232
233 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
234 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
235 drvdata->base + TMC_FFCR);
236 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
237 tmc_enable_hw(drvdata);
238
239 CS_LOCK(drvdata->base);
240}
241
242static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
243{
244 unsigned long flags;
245
246 spin_lock_irqsave(&drvdata->spinlock, flags);
247 if (drvdata->reading) {
248 spin_unlock_irqrestore(&drvdata->spinlock, flags);
249 return -EBUSY;
250 }
251
252 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
253 tmc_etb_enable_hw(drvdata);
254 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
255 tmc_etr_enable_hw(drvdata);
256 } else {
257 if (mode == TMC_MODE_CIRCULAR_BUFFER)
258 tmc_etb_enable_hw(drvdata);
259 else
260 tmc_etf_enable_hw(drvdata);
261 }
262 drvdata->enable = true;
263 spin_unlock_irqrestore(&drvdata->spinlock, flags);
264
265 dev_info(drvdata->dev, "TMC enabled\n");
266 return 0;
267}
268
269static int tmc_enable_sink(struct coresight_device *csdev, u32 mode)
270{
271 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
272
273 return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
274}
275
276static int tmc_enable_link(struct coresight_device *csdev, int inport,
277 int outport)
278{ 77{
279 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); 78 int ret = 0;
280
281 return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
282}
283 79
284static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata) 80 switch (drvdata->config_type) {
285{ 81 case TMC_CONFIG_TYPE_ETB:
286 enum tmc_mem_intf_width memwidth; 82 case TMC_CONFIG_TYPE_ETF:
287 u8 memwords; 83 ret = tmc_read_prepare_etb(drvdata);
288 char *bufp; 84 break;
289 u32 read_data; 85 case TMC_CONFIG_TYPE_ETR:
290 int i; 86 ret = tmc_read_prepare_etr(drvdata);
291 87 break;
292 memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10); 88 default:
293 if (memwidth == TMC_MEM_INTF_WIDTH_32BITS) 89 ret = -EINVAL;
294 memwords = 1;
295 else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
296 memwords = 2;
297 else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
298 memwords = 4;
299 else
300 memwords = 8;
301
302 bufp = drvdata->buf;
303 while (1) {
304 for (i = 0; i < memwords; i++) {
305 read_data = readl_relaxed(drvdata->base + TMC_RRD);
306 if (read_data == 0xFFFFFFFF)
307 return;
308 memcpy(bufp, &read_data, 4);
309 bufp += 4;
310 }
311 } 90 }
312}
313
314static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
315{
316 CS_UNLOCK(drvdata->base);
317
318 tmc_flush_and_stop(drvdata);
319 tmc_etb_dump_hw(drvdata);
320 tmc_disable_hw(drvdata);
321
322 CS_LOCK(drvdata->base);
323}
324
325static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
326{
327 u32 rwp, val;
328 91
329 rwp = readl_relaxed(drvdata->base + TMC_RWP); 92 if (!ret)
330 val = readl_relaxed(drvdata->base + TMC_STS); 93 dev_info(drvdata->dev, "TMC read start\n");
331
332 /* How much memory do we still have */
333 if (val & BIT(0))
334 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
335 else
336 drvdata->buf = drvdata->vaddr;
337}
338 94
339static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata) 95 return ret;
340{
341 CS_UNLOCK(drvdata->base);
342
343 tmc_flush_and_stop(drvdata);
344 tmc_etr_dump_hw(drvdata);
345 tmc_disable_hw(drvdata);
346
347 CS_LOCK(drvdata->base);
348}
349
350static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
351{
352 CS_UNLOCK(drvdata->base);
353
354 tmc_flush_and_stop(drvdata);
355 tmc_disable_hw(drvdata);
356
357 CS_LOCK(drvdata->base);
358} 96}
359 97
360static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode) 98static int tmc_read_unprepare(struct tmc_drvdata *drvdata)
361{ 99{
362 unsigned long flags; 100 int ret = 0;
363
364 spin_lock_irqsave(&drvdata->spinlock, flags);
365 if (drvdata->reading)
366 goto out;
367 101
368 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { 102 switch (drvdata->config_type) {
369 tmc_etb_disable_hw(drvdata); 103 case TMC_CONFIG_TYPE_ETB:
370 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { 104 case TMC_CONFIG_TYPE_ETF:
371 tmc_etr_disable_hw(drvdata); 105 ret = tmc_read_unprepare_etb(drvdata);
372 } else { 106 break;
373 if (mode == TMC_MODE_CIRCULAR_BUFFER) 107 case TMC_CONFIG_TYPE_ETR:
374 tmc_etb_disable_hw(drvdata); 108 ret = tmc_read_unprepare_etr(drvdata);
375 else 109 break;
376 tmc_etf_disable_hw(drvdata); 110 default:
111 ret = -EINVAL;
377 } 112 }
378out:
379 drvdata->enable = false;
380 spin_unlock_irqrestore(&drvdata->spinlock, flags);
381
382 dev_info(drvdata->dev, "TMC disabled\n");
383}
384
385static void tmc_disable_sink(struct coresight_device *csdev)
386{
387 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
388
389 tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
390}
391
392static void tmc_disable_link(struct coresight_device *csdev, int inport,
393 int outport)
394{
395 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
396
397 tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
398}
399
400static const struct coresight_ops_sink tmc_sink_ops = {
401 .enable = tmc_enable_sink,
402 .disable = tmc_disable_sink,
403};
404
405static const struct coresight_ops_link tmc_link_ops = {
406 .enable = tmc_enable_link,
407 .disable = tmc_disable_link,
408};
409
410static const struct coresight_ops tmc_etb_cs_ops = {
411 .sink_ops = &tmc_sink_ops,
412};
413
414static const struct coresight_ops tmc_etr_cs_ops = {
415 .sink_ops = &tmc_sink_ops,
416};
417
418static const struct coresight_ops tmc_etf_cs_ops = {
419 .sink_ops = &tmc_sink_ops,
420 .link_ops = &tmc_link_ops,
421};
422
423static int tmc_read_prepare(struct tmc_drvdata *drvdata)
424{
425 int ret;
426 unsigned long flags;
427 enum tmc_mode mode;
428 113
429 spin_lock_irqsave(&drvdata->spinlock, flags); 114 if (!ret)
430 if (!drvdata->enable) 115 dev_info(drvdata->dev, "TMC read end\n");
431 goto out;
432 116
433 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
434 tmc_etb_disable_hw(drvdata);
435 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
436 tmc_etr_disable_hw(drvdata);
437 } else {
438 mode = readl_relaxed(drvdata->base + TMC_MODE);
439 if (mode == TMC_MODE_CIRCULAR_BUFFER) {
440 tmc_etb_disable_hw(drvdata);
441 } else {
442 ret = -ENODEV;
443 goto err;
444 }
445 }
446out:
447 drvdata->reading = true;
448 spin_unlock_irqrestore(&drvdata->spinlock, flags);
449
450 dev_info(drvdata->dev, "TMC read start\n");
451 return 0;
452err:
453 spin_unlock_irqrestore(&drvdata->spinlock, flags);
454 return ret; 117 return ret;
455} 118}
456 119
457static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
458{
459 unsigned long flags;
460 enum tmc_mode mode;
461
462 spin_lock_irqsave(&drvdata->spinlock, flags);
463 if (!drvdata->enable)
464 goto out;
465
466 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
467 tmc_etb_enable_hw(drvdata);
468 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
469 tmc_etr_enable_hw(drvdata);
470 } else {
471 mode = readl_relaxed(drvdata->base + TMC_MODE);
472 if (mode == TMC_MODE_CIRCULAR_BUFFER)
473 tmc_etb_enable_hw(drvdata);
474 }
475out:
476 drvdata->reading = false;
477 spin_unlock_irqrestore(&drvdata->spinlock, flags);
478
479 dev_info(drvdata->dev, "TMC read end\n");
480}
481
482static int tmc_open(struct inode *inode, struct file *file) 120static int tmc_open(struct inode *inode, struct file *file)
483{ 121{
122 int ret;
484 struct tmc_drvdata *drvdata = container_of(file->private_data, 123 struct tmc_drvdata *drvdata = container_of(file->private_data,
485 struct tmc_drvdata, miscdev); 124 struct tmc_drvdata, miscdev);
486 int ret = 0;
487
488 if (drvdata->read_count++)
489 goto out;
490 125
491 ret = tmc_read_prepare(drvdata); 126 ret = tmc_read_prepare(drvdata);
492 if (ret) 127 if (ret)
493 return ret; 128 return ret;
494out: 129
495 nonseekable_open(inode, file); 130 nonseekable_open(inode, file);
496 131
497 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__); 132 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
@@ -531,19 +166,14 @@ static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
531 166
532static int tmc_release(struct inode *inode, struct file *file) 167static int tmc_release(struct inode *inode, struct file *file)
533{ 168{
169 int ret;
534 struct tmc_drvdata *drvdata = container_of(file->private_data, 170 struct tmc_drvdata *drvdata = container_of(file->private_data,
535 struct tmc_drvdata, miscdev); 171 struct tmc_drvdata, miscdev);
536 172
537 if (--drvdata->read_count) { 173 ret = tmc_read_unprepare(drvdata);
538 if (drvdata->read_count < 0) { 174 if (ret)
539 dev_err(drvdata->dev, "mismatched close\n"); 175 return ret;
540 drvdata->read_count = 0;
541 }
542 goto out;
543 }
544 176
545 tmc_read_unprepare(drvdata);
546out:
547 dev_dbg(drvdata->dev, "%s: released\n", __func__); 177 dev_dbg(drvdata->dev, "%s: released\n", __func__);
548 return 0; 178 return 0;
549} 179}
@@ -556,56 +186,71 @@ static const struct file_operations tmc_fops = {
556 .llseek = no_llseek, 186 .llseek = no_llseek,
557}; 187};
558 188
559static ssize_t status_show(struct device *dev, 189static enum tmc_mem_intf_width tmc_get_memwidth(u32 devid)
560 struct device_attribute *attr, char *buf)
561{ 190{
562 unsigned long flags; 191 enum tmc_mem_intf_width memwidth;
563 u32 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg;
564 u32 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr;
565 u32 devid;
566 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
567 192
568 pm_runtime_get_sync(drvdata->dev); 193 /*
569 spin_lock_irqsave(&drvdata->spinlock, flags); 194 * Excerpt from the TRM:
570 CS_UNLOCK(drvdata->base); 195 *
571 196 * DEVID::MEMWIDTH[10:8]
572 tmc_rsz = readl_relaxed(drvdata->base + TMC_RSZ); 197 * 0x2 Memory interface databus is 32 bits wide.
573 tmc_sts = readl_relaxed(drvdata->base + TMC_STS); 198 * 0x3 Memory interface databus is 64 bits wide.
574 tmc_rrp = readl_relaxed(drvdata->base + TMC_RRP); 199 * 0x4 Memory interface databus is 128 bits wide.
575 tmc_rwp = readl_relaxed(drvdata->base + TMC_RWP); 200 * 0x5 Memory interface databus is 256 bits wide.
576 tmc_trg = readl_relaxed(drvdata->base + TMC_TRG); 201 */
577 tmc_ctl = readl_relaxed(drvdata->base + TMC_CTL); 202 switch (BMVAL(devid, 8, 10)) {
578 tmc_ffsr = readl_relaxed(drvdata->base + TMC_FFSR); 203 case 0x2:
579 tmc_ffcr = readl_relaxed(drvdata->base + TMC_FFCR); 204 memwidth = TMC_MEM_INTF_WIDTH_32BITS;
580 tmc_mode = readl_relaxed(drvdata->base + TMC_MODE); 205 break;
581 tmc_pscr = readl_relaxed(drvdata->base + TMC_PSCR); 206 case 0x3:
582 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); 207 memwidth = TMC_MEM_INTF_WIDTH_64BITS;
208 break;
209 case 0x4:
210 memwidth = TMC_MEM_INTF_WIDTH_128BITS;
211 break;
212 case 0x5:
213 memwidth = TMC_MEM_INTF_WIDTH_256BITS;
214 break;
215 default:
216 memwidth = 0;
217 }
583 218
584 CS_LOCK(drvdata->base); 219 return memwidth;
585 spin_unlock_irqrestore(&drvdata->spinlock, flags);
586 pm_runtime_put(drvdata->dev);
587
588 return sprintf(buf,
589 "Depth:\t\t0x%x\n"
590 "Status:\t\t0x%x\n"
591 "RAM read ptr:\t0x%x\n"
592 "RAM wrt ptr:\t0x%x\n"
593 "Trigger cnt:\t0x%x\n"
594 "Control:\t0x%x\n"
595 "Flush status:\t0x%x\n"
596 "Flush ctrl:\t0x%x\n"
597 "Mode:\t\t0x%x\n"
598 "PSRC:\t\t0x%x\n"
599 "DEVID:\t\t0x%x\n",
600 tmc_rsz, tmc_sts, tmc_rrp, tmc_rwp, tmc_trg,
601 tmc_ctl, tmc_ffsr, tmc_ffcr, tmc_mode, tmc_pscr, devid);
602
603 return -EINVAL;
604} 220}
605static DEVICE_ATTR_RO(status);
606 221
607static ssize_t trigger_cntr_show(struct device *dev, 222#define coresight_tmc_simple_func(name, offset) \
608 struct device_attribute *attr, char *buf) 223 coresight_simple_func(struct tmc_drvdata, name, offset)
224
225coresight_tmc_simple_func(rsz, TMC_RSZ);
226coresight_tmc_simple_func(sts, TMC_STS);
227coresight_tmc_simple_func(rrp, TMC_RRP);
228coresight_tmc_simple_func(rwp, TMC_RWP);
229coresight_tmc_simple_func(trg, TMC_TRG);
230coresight_tmc_simple_func(ctl, TMC_CTL);
231coresight_tmc_simple_func(ffsr, TMC_FFSR);
232coresight_tmc_simple_func(ffcr, TMC_FFCR);
233coresight_tmc_simple_func(mode, TMC_MODE);
234coresight_tmc_simple_func(pscr, TMC_PSCR);
235coresight_tmc_simple_func(devid, CORESIGHT_DEVID);
236
237static struct attribute *coresight_tmc_mgmt_attrs[] = {
238 &dev_attr_rsz.attr,
239 &dev_attr_sts.attr,
240 &dev_attr_rrp.attr,
241 &dev_attr_rwp.attr,
242 &dev_attr_trg.attr,
243 &dev_attr_ctl.attr,
244 &dev_attr_ffsr.attr,
245 &dev_attr_ffcr.attr,
246 &dev_attr_mode.attr,
247 &dev_attr_pscr.attr,
248 &dev_attr_devid.attr,
249 NULL,
250};
251
252ssize_t trigger_cntr_show(struct device *dev,
253 struct device_attribute *attr, char *buf)
609{ 254{
610 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent); 255 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
611 unsigned long val = drvdata->trigger_cntr; 256 unsigned long val = drvdata->trigger_cntr;
@@ -630,26 +275,25 @@ static ssize_t trigger_cntr_store(struct device *dev,
630} 275}
631static DEVICE_ATTR_RW(trigger_cntr); 276static DEVICE_ATTR_RW(trigger_cntr);
632 277
633static struct attribute *coresight_etb_attrs[] = { 278static struct attribute *coresight_tmc_attrs[] = {
634 &dev_attr_trigger_cntr.attr, 279 &dev_attr_trigger_cntr.attr,
635 &dev_attr_status.attr,
636 NULL, 280 NULL,
637}; 281};
638ATTRIBUTE_GROUPS(coresight_etb);
639 282
640static struct attribute *coresight_etr_attrs[] = { 283static const struct attribute_group coresight_tmc_group = {
641 &dev_attr_trigger_cntr.attr, 284 .attrs = coresight_tmc_attrs,
642 &dev_attr_status.attr,
643 NULL,
644}; 285};
645ATTRIBUTE_GROUPS(coresight_etr);
646 286
647static struct attribute *coresight_etf_attrs[] = { 287static const struct attribute_group coresight_tmc_mgmt_group = {
648 &dev_attr_trigger_cntr.attr, 288 .attrs = coresight_tmc_mgmt_attrs,
649 &dev_attr_status.attr, 289 .name = "mgmt",
290};
291
292const struct attribute_group *coresight_tmc_groups[] = {
293 &coresight_tmc_group,
294 &coresight_tmc_mgmt_group,
650 NULL, 295 NULL,
651}; 296};
652ATTRIBUTE_GROUPS(coresight_etf);
653 297
654static int tmc_probe(struct amba_device *adev, const struct amba_id *id) 298static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
655{ 299{
@@ -688,6 +332,7 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
688 332
689 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID); 333 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
690 drvdata->config_type = BMVAL(devid, 6, 7); 334 drvdata->config_type = BMVAL(devid, 6, 7);
335 drvdata->memwidth = tmc_get_memwidth(devid);
691 336
692 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { 337 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
693 if (np) 338 if (np)
@@ -702,20 +347,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
702 347
703 pm_runtime_put(&adev->dev); 348 pm_runtime_put(&adev->dev);
704 349
705 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
706 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
707 &drvdata->paddr, GFP_KERNEL);
708 if (!drvdata->vaddr)
709 return -ENOMEM;
710
711 memset(drvdata->vaddr, 0, drvdata->size);
712 drvdata->buf = drvdata->vaddr;
713 } else {
714 drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
715 if (!drvdata->buf)
716 return -ENOMEM;
717 }
718
719 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); 350 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
720 if (!desc) { 351 if (!desc) {
721 ret = -ENOMEM; 352 ret = -ENOMEM;
@@ -725,20 +356,18 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
725 desc->pdata = pdata; 356 desc->pdata = pdata;
726 desc->dev = dev; 357 desc->dev = dev;
727 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER; 358 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
359 desc->groups = coresight_tmc_groups;
728 360
729 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) { 361 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
730 desc->type = CORESIGHT_DEV_TYPE_SINK; 362 desc->type = CORESIGHT_DEV_TYPE_SINK;
731 desc->ops = &tmc_etb_cs_ops; 363 desc->ops = &tmc_etb_cs_ops;
732 desc->groups = coresight_etb_groups;
733 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) { 364 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
734 desc->type = CORESIGHT_DEV_TYPE_SINK; 365 desc->type = CORESIGHT_DEV_TYPE_SINK;
735 desc->ops = &tmc_etr_cs_ops; 366 desc->ops = &tmc_etr_cs_ops;
736 desc->groups = coresight_etr_groups;
737 } else { 367 } else {
738 desc->type = CORESIGHT_DEV_TYPE_LINKSINK; 368 desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
739 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO; 369 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
740 desc->ops = &tmc_etf_cs_ops; 370 desc->ops = &tmc_etf_cs_ops;
741 desc->groups = coresight_etf_groups;
742 } 371 }
743 372
744 drvdata->csdev = coresight_register(desc); 373 drvdata->csdev = coresight_register(desc);
@@ -754,7 +383,6 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
754 if (ret) 383 if (ret)
755 goto err_misc_register; 384 goto err_misc_register;
756 385
757 dev_info(dev, "TMC initialized\n");
758 return 0; 386 return 0;
759 387
760err_misc_register: 388err_misc_register:
diff --git a/drivers/hwtracing/coresight/coresight-tmc.h b/drivers/hwtracing/coresight/coresight-tmc.h
new file mode 100644
index 000000000000..5c5fe2ad2ca7
--- /dev/null
+++ b/drivers/hwtracing/coresight/coresight-tmc.h
@@ -0,0 +1,140 @@
1/*
2 * Copyright(C) 2015 Linaro Limited. All rights reserved.
3 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published by
7 * the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#ifndef _CORESIGHT_TMC_H
19#define _CORESIGHT_TMC_H
20
21#include <linux/miscdevice.h>
22
23#define TMC_RSZ 0x004
24#define TMC_STS 0x00c
25#define TMC_RRD 0x010
26#define TMC_RRP 0x014
27#define TMC_RWP 0x018
28#define TMC_TRG 0x01c
29#define TMC_CTL 0x020
30#define TMC_RWD 0x024
31#define TMC_MODE 0x028
32#define TMC_LBUFLEVEL 0x02c
33#define TMC_CBUFLEVEL 0x030
34#define TMC_BUFWM 0x034
35#define TMC_RRPHI 0x038
36#define TMC_RWPHI 0x03c
37#define TMC_AXICTL 0x110
38#define TMC_DBALO 0x118
39#define TMC_DBAHI 0x11c
40#define TMC_FFSR 0x300
41#define TMC_FFCR 0x304
42#define TMC_PSCR 0x308
43#define TMC_ITMISCOP0 0xee0
44#define TMC_ITTRFLIN 0xee8
45#define TMC_ITATBDATA0 0xeec
46#define TMC_ITATBCTR2 0xef0
47#define TMC_ITATBCTR1 0xef4
48#define TMC_ITATBCTR0 0xef8
49
50/* register description */
51/* TMC_CTL - 0x020 */
52#define TMC_CTL_CAPT_EN BIT(0)
53/* TMC_STS - 0x00C */
54#define TMC_STS_TMCREADY_BIT 2
55#define TMC_STS_FULL BIT(0)
56#define TMC_STS_TRIGGERED BIT(1)
57/* TMC_AXICTL - 0x110 */
58#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
59#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
60#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
61#define TMC_AXICTL_WR_BURST_16 0xF00
62/* TMC_FFCR - 0x304 */
63#define TMC_FFCR_FLUSHMAN_BIT 6
64#define TMC_FFCR_EN_FMT BIT(0)
65#define TMC_FFCR_EN_TI BIT(1)
66#define TMC_FFCR_FON_FLIN BIT(4)
67#define TMC_FFCR_FON_TRIG_EVT BIT(5)
68#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
69#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
70
71
72enum tmc_config_type {
73 TMC_CONFIG_TYPE_ETB,
74 TMC_CONFIG_TYPE_ETR,
75 TMC_CONFIG_TYPE_ETF,
76};
77
78enum tmc_mode {
79 TMC_MODE_CIRCULAR_BUFFER,
80 TMC_MODE_SOFTWARE_FIFO,
81 TMC_MODE_HARDWARE_FIFO,
82};
83
84enum tmc_mem_intf_width {
85 TMC_MEM_INTF_WIDTH_32BITS = 1,
86 TMC_MEM_INTF_WIDTH_64BITS = 2,
87 TMC_MEM_INTF_WIDTH_128BITS = 4,
88 TMC_MEM_INTF_WIDTH_256BITS = 8,
89};
90
91/**
92 * struct tmc_drvdata - specifics associated to an TMC component
93 * @base: memory mapped base address for this component.
94 * @dev: the device entity associated to this component.
95 * @csdev: component vitals needed by the framework.
96 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
97 * @spinlock: only one at a time pls.
98 * @buf: area of memory where trace data get sent.
99 * @paddr: DMA start location in RAM.
100 * @vaddr: virtual representation of @paddr.
101 * @size: @buf size.
102 * @mode: how this TMC is being used.
103 * @config_type: TMC variant, must be of type @tmc_config_type.
104 * @memwidth: width of the memory interface databus, in bytes.
105 * @trigger_cntr: amount of words to store after a trigger.
106 */
107struct tmc_drvdata {
108 void __iomem *base;
109 struct device *dev;
110 struct coresight_device *csdev;
111 struct miscdevice miscdev;
112 spinlock_t spinlock;
113 bool reading;
114 char *buf;
115 dma_addr_t paddr;
116 void __iomem *vaddr;
117 u32 size;
118 local_t mode;
119 enum tmc_config_type config_type;
120 enum tmc_mem_intf_width memwidth;
121 u32 trigger_cntr;
122};
123
124/* Generic functions */
125void tmc_wait_for_tmcready(struct tmc_drvdata *drvdata);
126void tmc_flush_and_stop(struct tmc_drvdata *drvdata);
127void tmc_enable_hw(struct tmc_drvdata *drvdata);
128void tmc_disable_hw(struct tmc_drvdata *drvdata);
129
130/* ETB/ETF functions */
131int tmc_read_prepare_etb(struct tmc_drvdata *drvdata);
132int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata);
133extern const struct coresight_ops tmc_etb_cs_ops;
134extern const struct coresight_ops tmc_etf_cs_ops;
135
136/* ETR functions */
137int tmc_read_prepare_etr(struct tmc_drvdata *drvdata);
138int tmc_read_unprepare_etr(struct tmc_drvdata *drvdata);
139extern const struct coresight_ops tmc_etr_cs_ops;
140#endif
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 8fb09d9237ab..4e471e2e9d89 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -167,7 +167,6 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
167 if (IS_ERR(drvdata->csdev)) 167 if (IS_ERR(drvdata->csdev))
168 return PTR_ERR(drvdata->csdev); 168 return PTR_ERR(drvdata->csdev);
169 169
170 dev_info(dev, "TPIU initialized\n");
171 return 0; 170 return 0;
172} 171}
173 172
diff --git a/drivers/hwtracing/coresight/coresight.c b/drivers/hwtracing/coresight/coresight.c
index 2ea5961092c1..5443d03a1eec 100644
--- a/drivers/hwtracing/coresight/coresight.c
+++ b/drivers/hwtracing/coresight/coresight.c
@@ -43,7 +43,15 @@ struct coresight_node {
43 * When operating Coresight drivers from the sysFS interface, only a single 43 * When operating Coresight drivers from the sysFS interface, only a single
44 * path can exist from a tracer (associated to a CPU) to a sink. 44 * path can exist from a tracer (associated to a CPU) to a sink.
45 */ 45 */
46static DEFINE_PER_CPU(struct list_head *, sysfs_path); 46static DEFINE_PER_CPU(struct list_head *, tracer_path);
47
48/*
49 * As of this writing only a single STM can be found in CS topologies. Since
50 * there is no way to know if we'll ever see more and what kind of
51 * configuration they will enact, for the time being only define a single path
52 * for STM.
53 */
54static struct list_head *stm_path;
47 55
48static int coresight_id_match(struct device *dev, void *data) 56static int coresight_id_match(struct device *dev, void *data)
49{ 57{
@@ -257,15 +265,27 @@ static void coresight_disable_source(struct coresight_device *csdev)
257 265
258void coresight_disable_path(struct list_head *path) 266void coresight_disable_path(struct list_head *path)
259{ 267{
268 u32 type;
260 struct coresight_node *nd; 269 struct coresight_node *nd;
261 struct coresight_device *csdev, *parent, *child; 270 struct coresight_device *csdev, *parent, *child;
262 271
263 list_for_each_entry(nd, path, link) { 272 list_for_each_entry(nd, path, link) {
264 csdev = nd->csdev; 273 csdev = nd->csdev;
274 type = csdev->type;
275
276 /*
277 * ETF devices are tricky... They can be a link or a sink,
278 * depending on how they are configured. If an ETF has been
279 * "activated" it will be configured as a sink, otherwise
280 * go ahead with the link configuration.
281 */
282 if (type == CORESIGHT_DEV_TYPE_LINKSINK)
283 type = (csdev == coresight_get_sink(path)) ?
284 CORESIGHT_DEV_TYPE_SINK :
285 CORESIGHT_DEV_TYPE_LINK;
265 286
266 switch (csdev->type) { 287 switch (type) {
267 case CORESIGHT_DEV_TYPE_SINK: 288 case CORESIGHT_DEV_TYPE_SINK:
268 case CORESIGHT_DEV_TYPE_LINKSINK:
269 coresight_disable_sink(csdev); 289 coresight_disable_sink(csdev);
270 break; 290 break;
271 case CORESIGHT_DEV_TYPE_SOURCE: 291 case CORESIGHT_DEV_TYPE_SOURCE:
@@ -286,15 +306,27 @@ int coresight_enable_path(struct list_head *path, u32 mode)
286{ 306{
287 307
288 int ret = 0; 308 int ret = 0;
309 u32 type;
289 struct coresight_node *nd; 310 struct coresight_node *nd;
290 struct coresight_device *csdev, *parent, *child; 311 struct coresight_device *csdev, *parent, *child;
291 312
292 list_for_each_entry_reverse(nd, path, link) { 313 list_for_each_entry_reverse(nd, path, link) {
293 csdev = nd->csdev; 314 csdev = nd->csdev;
315 type = csdev->type;
294 316
295 switch (csdev->type) { 317 /*
318 * ETF devices are tricky... They can be a link or a sink,
319 * depending on how they are configured. If an ETF has been
320 * "activated" it will be configured as a sink, otherwise
321 * go ahead with the link configuration.
322 */
323 if (type == CORESIGHT_DEV_TYPE_LINKSINK)
324 type = (csdev == coresight_get_sink(path)) ?
325 CORESIGHT_DEV_TYPE_SINK :
326 CORESIGHT_DEV_TYPE_LINK;
327
328 switch (type) {
296 case CORESIGHT_DEV_TYPE_SINK: 329 case CORESIGHT_DEV_TYPE_SINK:
297 case CORESIGHT_DEV_TYPE_LINKSINK:
298 ret = coresight_enable_sink(csdev, mode); 330 ret = coresight_enable_sink(csdev, mode);
299 if (ret) 331 if (ret)
300 goto err; 332 goto err;
@@ -432,18 +464,45 @@ void coresight_release_path(struct list_head *path)
432 path = NULL; 464 path = NULL;
433} 465}
434 466
467/** coresight_validate_source - make sure a source has the right credentials
468 * @csdev: the device structure for a source.
469 * @function: the function this was called from.
470 *
471 * Assumes the coresight_mutex is held.
472 */
473static int coresight_validate_source(struct coresight_device *csdev,
474 const char *function)
475{
476 u32 type, subtype;
477
478 type = csdev->type;
479 subtype = csdev->subtype.source_subtype;
480
481 if (type != CORESIGHT_DEV_TYPE_SOURCE) {
482 dev_err(&csdev->dev, "wrong device type in %s\n", function);
483 return -EINVAL;
484 }
485
486 if (subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_PROC &&
487 subtype != CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE) {
488 dev_err(&csdev->dev, "wrong device subtype in %s\n", function);
489 return -EINVAL;
490 }
491
492 return 0;
493}
494
435int coresight_enable(struct coresight_device *csdev) 495int coresight_enable(struct coresight_device *csdev)
436{ 496{
437 int ret = 0; 497 int cpu, ret = 0;
438 int cpu;
439 struct list_head *path; 498 struct list_head *path;
440 499
441 mutex_lock(&coresight_mutex); 500 mutex_lock(&coresight_mutex);
442 if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { 501
443 ret = -EINVAL; 502 ret = coresight_validate_source(csdev, __func__);
444 dev_err(&csdev->dev, "wrong device type in %s\n", __func__); 503 if (ret)
445 goto out; 504 goto out;
446 } 505
447 if (csdev->enable) 506 if (csdev->enable)
448 goto out; 507 goto out;
449 508
@@ -461,15 +520,25 @@ int coresight_enable(struct coresight_device *csdev)
461 if (ret) 520 if (ret)
462 goto err_source; 521 goto err_source;
463 522
464 /* 523 switch (csdev->subtype.source_subtype) {
465 * When working from sysFS it is important to keep track 524 case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
466 * of the paths that were created so that they can be 525 /*
467 * undone in 'coresight_disable()'. Since there can only 526 * When working from sysFS it is important to keep track
468 * be a single session per tracer (when working from sysFS) 527 * of the paths that were created so that they can be
469 * a per-cpu variable will do just fine. 528 * undone in 'coresight_disable()'. Since there can only
470 */ 529 * be a single session per tracer (when working from sysFS)
471 cpu = source_ops(csdev)->cpu_id(csdev); 530 * a per-cpu variable will do just fine.
472 per_cpu(sysfs_path, cpu) = path; 531 */
532 cpu = source_ops(csdev)->cpu_id(csdev);
533 per_cpu(tracer_path, cpu) = path;
534 break;
535 case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
536 stm_path = path;
537 break;
538 default:
539 /* We can't be here */
540 break;
541 }
473 542
474out: 543out:
475 mutex_unlock(&coresight_mutex); 544 mutex_unlock(&coresight_mutex);
@@ -486,23 +555,36 @@ EXPORT_SYMBOL_GPL(coresight_enable);
486 555
487void coresight_disable(struct coresight_device *csdev) 556void coresight_disable(struct coresight_device *csdev)
488{ 557{
489 int cpu; 558 int cpu, ret;
490 struct list_head *path; 559 struct list_head *path = NULL;
491 560
492 mutex_lock(&coresight_mutex); 561 mutex_lock(&coresight_mutex);
493 if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) { 562
494 dev_err(&csdev->dev, "wrong device type in %s\n", __func__); 563 ret = coresight_validate_source(csdev, __func__);
564 if (ret)
495 goto out; 565 goto out;
496 } 566
497 if (!csdev->enable) 567 if (!csdev->enable)
498 goto out; 568 goto out;
499 569
500 cpu = source_ops(csdev)->cpu_id(csdev); 570 switch (csdev->subtype.source_subtype) {
501 path = per_cpu(sysfs_path, cpu); 571 case CORESIGHT_DEV_SUBTYPE_SOURCE_PROC:
572 cpu = source_ops(csdev)->cpu_id(csdev);
573 path = per_cpu(tracer_path, cpu);
574 per_cpu(tracer_path, cpu) = NULL;
575 break;
576 case CORESIGHT_DEV_SUBTYPE_SOURCE_SOFTWARE:
577 path = stm_path;
578 stm_path = NULL;
579 break;
580 default:
581 /* We can't be here */
582 break;
583 }
584
502 coresight_disable_source(csdev); 585 coresight_disable_source(csdev);
503 coresight_disable_path(path); 586 coresight_disable_path(path);
504 coresight_release_path(path); 587 coresight_release_path(path);
505 per_cpu(sysfs_path, cpu) = NULL;
506 588
507out: 589out:
508 mutex_unlock(&coresight_mutex); 590 mutex_unlock(&coresight_mutex);
@@ -514,7 +596,7 @@ static ssize_t enable_sink_show(struct device *dev,
514{ 596{
515 struct coresight_device *csdev = to_coresight_device(dev); 597 struct coresight_device *csdev = to_coresight_device(dev);
516 598
517 return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated); 599 return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->activated);
518} 600}
519 601
520static ssize_t enable_sink_store(struct device *dev, 602static ssize_t enable_sink_store(struct device *dev,
@@ -544,7 +626,7 @@ static ssize_t enable_source_show(struct device *dev,
544{ 626{
545 struct coresight_device *csdev = to_coresight_device(dev); 627 struct coresight_device *csdev = to_coresight_device(dev);
546 628
547 return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable); 629 return scnprintf(buf, PAGE_SIZE, "%u\n", csdev->enable);
548} 630}
549 631
550static ssize_t enable_source_store(struct device *dev, 632static ssize_t enable_source_store(struct device *dev,
diff --git a/drivers/hwtracing/intel_th/core.c b/drivers/hwtracing/intel_th/core.c
index 4272f2ce5f6e..1be543e8e42f 100644
--- a/drivers/hwtracing/intel_th/core.c
+++ b/drivers/hwtracing/intel_th/core.c
@@ -71,6 +71,15 @@ static int intel_th_probe(struct device *dev)
71 if (ret) 71 if (ret)
72 return ret; 72 return ret;
73 73
74 if (thdrv->attr_group) {
75 ret = sysfs_create_group(&thdev->dev.kobj, thdrv->attr_group);
76 if (ret) {
77 thdrv->remove(thdev);
78
79 return ret;
80 }
81 }
82
74 if (thdev->type == INTEL_TH_OUTPUT && 83 if (thdev->type == INTEL_TH_OUTPUT &&
75 !intel_th_output_assigned(thdev)) 84 !intel_th_output_assigned(thdev))
76 ret = hubdrv->assign(hub, thdev); 85 ret = hubdrv->assign(hub, thdev);
@@ -91,6 +100,9 @@ static int intel_th_remove(struct device *dev)
91 return err; 100 return err;
92 } 101 }
93 102
103 if (thdrv->attr_group)
104 sysfs_remove_group(&thdev->dev.kobj, thdrv->attr_group);
105
94 thdrv->remove(thdev); 106 thdrv->remove(thdev);
95 107
96 if (intel_th_output_assigned(thdev)) { 108 if (intel_th_output_assigned(thdev)) {
@@ -171,7 +183,14 @@ static DEVICE_ATTR_RO(port);
171 183
172static int intel_th_output_activate(struct intel_th_device *thdev) 184static int intel_th_output_activate(struct intel_th_device *thdev)
173{ 185{
174 struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); 186 struct intel_th_driver *thdrv =
187 to_intel_th_driver_or_null(thdev->dev.driver);
188
189 if (!thdrv)
190 return -ENODEV;
191
192 if (!try_module_get(thdrv->driver.owner))
193 return -ENODEV;
175 194
176 if (thdrv->activate) 195 if (thdrv->activate)
177 return thdrv->activate(thdev); 196 return thdrv->activate(thdev);
@@ -183,12 +202,18 @@ static int intel_th_output_activate(struct intel_th_device *thdev)
183 202
184static void intel_th_output_deactivate(struct intel_th_device *thdev) 203static void intel_th_output_deactivate(struct intel_th_device *thdev)
185{ 204{
186 struct intel_th_driver *thdrv = to_intel_th_driver(thdev->dev.driver); 205 struct intel_th_driver *thdrv =
206 to_intel_th_driver_or_null(thdev->dev.driver);
207
208 if (!thdrv)
209 return;
187 210
188 if (thdrv->deactivate) 211 if (thdrv->deactivate)
189 thdrv->deactivate(thdev); 212 thdrv->deactivate(thdev);
190 else 213 else
191 intel_th_trace_disable(thdev); 214 intel_th_trace_disable(thdev);
215
216 module_put(thdrv->driver.owner);
192} 217}
193 218
194static ssize_t active_show(struct device *dev, struct device_attribute *attr, 219static ssize_t active_show(struct device *dev, struct device_attribute *attr,
diff --git a/drivers/hwtracing/intel_th/intel_th.h b/drivers/hwtracing/intel_th/intel_th.h
index eedd09332db6..0df22e30673d 100644
--- a/drivers/hwtracing/intel_th/intel_th.h
+++ b/drivers/hwtracing/intel_th/intel_th.h
@@ -115,6 +115,7 @@ intel_th_output_assigned(struct intel_th_device *thdev)
115 * @enable: enable tracing for a given output device 115 * @enable: enable tracing for a given output device
116 * @disable: disable tracing for a given output device 116 * @disable: disable tracing for a given output device
117 * @fops: file operations for device nodes 117 * @fops: file operations for device nodes
118 * @attr_group: attributes provided by the driver
118 * 119 *
119 * Callbacks @probe and @remove are required for all device types. 120 * Callbacks @probe and @remove are required for all device types.
120 * Switch device driver needs to fill in @assign, @enable and @disable 121 * Switch device driver needs to fill in @assign, @enable and @disable
@@ -139,6 +140,8 @@ struct intel_th_driver {
139 void (*deactivate)(struct intel_th_device *thdev); 140 void (*deactivate)(struct intel_th_device *thdev);
140 /* file_operations for those who want a device node */ 141 /* file_operations for those who want a device node */
141 const struct file_operations *fops; 142 const struct file_operations *fops;
143 /* optional attributes */
144 struct attribute_group *attr_group;
142 145
143 /* source ops */ 146 /* source ops */
144 int (*set_output)(struct intel_th_device *thdev, 147 int (*set_output)(struct intel_th_device *thdev,
@@ -148,6 +151,9 @@ struct intel_th_driver {
148#define to_intel_th_driver(_d) \ 151#define to_intel_th_driver(_d) \
149 container_of((_d), struct intel_th_driver, driver) 152 container_of((_d), struct intel_th_driver, driver)
150 153
154#define to_intel_th_driver_or_null(_d) \
155 ((_d) ? to_intel_th_driver(_d) : NULL)
156
151static inline struct intel_th_device * 157static inline struct intel_th_device *
152to_intel_th_hub(struct intel_th_device *thdev) 158to_intel_th_hub(struct intel_th_device *thdev)
153{ 159{
diff --git a/drivers/hwtracing/intel_th/msu.c b/drivers/hwtracing/intel_th/msu.c
index d2209147dc89..e8d55a153a65 100644
--- a/drivers/hwtracing/intel_th/msu.c
+++ b/drivers/hwtracing/intel_th/msu.c
@@ -122,7 +122,6 @@ struct msc {
122 atomic_t mmap_count; 122 atomic_t mmap_count;
123 struct mutex buf_mutex; 123 struct mutex buf_mutex;
124 124
125 struct mutex iter_mutex;
126 struct list_head iter_list; 125 struct list_head iter_list;
127 126
128 /* config */ 127 /* config */
@@ -257,23 +256,37 @@ static struct msc_iter *msc_iter_install(struct msc *msc)
257 256
258 iter = kzalloc(sizeof(*iter), GFP_KERNEL); 257 iter = kzalloc(sizeof(*iter), GFP_KERNEL);
259 if (!iter) 258 if (!iter)
260 return NULL; 259 return ERR_PTR(-ENOMEM);
260
261 mutex_lock(&msc->buf_mutex);
262
263 /*
264 * Reading and tracing are mutually exclusive; if msc is
265 * enabled, open() will fail; otherwise existing readers
266 * will prevent enabling the msc and the rest of fops don't
267 * need to worry about it.
268 */
269 if (msc->enabled) {
270 kfree(iter);
271 iter = ERR_PTR(-EBUSY);
272 goto unlock;
273 }
261 274
262 msc_iter_init(iter); 275 msc_iter_init(iter);
263 iter->msc = msc; 276 iter->msc = msc;
264 277
265 mutex_lock(&msc->iter_mutex);
266 list_add_tail(&iter->entry, &msc->iter_list); 278 list_add_tail(&iter->entry, &msc->iter_list);
267 mutex_unlock(&msc->iter_mutex); 279unlock:
280 mutex_unlock(&msc->buf_mutex);
268 281
269 return iter; 282 return iter;
270} 283}
271 284
272static void msc_iter_remove(struct msc_iter *iter, struct msc *msc) 285static void msc_iter_remove(struct msc_iter *iter, struct msc *msc)
273{ 286{
274 mutex_lock(&msc->iter_mutex); 287 mutex_lock(&msc->buf_mutex);
275 list_del(&iter->entry); 288 list_del(&iter->entry);
276 mutex_unlock(&msc->iter_mutex); 289 mutex_unlock(&msc->buf_mutex);
277 290
278 kfree(iter); 291 kfree(iter);
279} 292}
@@ -454,7 +467,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
454{ 467{
455 struct msc_window *win; 468 struct msc_window *win;
456 469
457 mutex_lock(&msc->buf_mutex);
458 list_for_each_entry(win, &msc->win_list, entry) { 470 list_for_each_entry(win, &msc->win_list, entry) {
459 unsigned int blk; 471 unsigned int blk;
460 size_t hw_sz = sizeof(struct msc_block_desc) - 472 size_t hw_sz = sizeof(struct msc_block_desc) -
@@ -466,7 +478,6 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
466 memset(&bdesc->hw_tag, 0, hw_sz); 478 memset(&bdesc->hw_tag, 0, hw_sz);
467 } 479 }
468 } 480 }
469 mutex_unlock(&msc->buf_mutex);
470} 481}
471 482
472/** 483/**
@@ -474,12 +485,15 @@ static void msc_buffer_clear_hw_header(struct msc *msc)
474 * @msc: the MSC device to configure 485 * @msc: the MSC device to configure
475 * 486 *
476 * Program storage mode, wrapping, burst length and trace buffer address 487 * Program storage mode, wrapping, burst length and trace buffer address
477 * into a given MSC. If msc::enabled is set, enable the trace, too. 488 * into a given MSC. Then, enable tracing and set msc::enabled.
489 * The latter is serialized on msc::buf_mutex, so make sure to hold it.
478 */ 490 */
479static int msc_configure(struct msc *msc) 491static int msc_configure(struct msc *msc)
480{ 492{
481 u32 reg; 493 u32 reg;
482 494
495 lockdep_assert_held(&msc->buf_mutex);
496
483 if (msc->mode > MSC_MODE_MULTI) 497 if (msc->mode > MSC_MODE_MULTI)
484 return -ENOTSUPP; 498 return -ENOTSUPP;
485 499
@@ -497,21 +511,19 @@ static int msc_configure(struct msc *msc)
497 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL); 511 reg = ioread32(msc->reg_base + REG_MSU_MSC0CTL);
498 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD); 512 reg &= ~(MSC_MODE | MSC_WRAPEN | MSC_EN | MSC_RD_HDR_OVRD);
499 513
514 reg |= MSC_EN;
500 reg |= msc->mode << __ffs(MSC_MODE); 515 reg |= msc->mode << __ffs(MSC_MODE);
501 reg |= msc->burst_len << __ffs(MSC_LEN); 516 reg |= msc->burst_len << __ffs(MSC_LEN);
502 /*if (msc->mode == MSC_MODE_MULTI) 517
503 reg |= MSC_RD_HDR_OVRD; */
504 if (msc->wrap) 518 if (msc->wrap)
505 reg |= MSC_WRAPEN; 519 reg |= MSC_WRAPEN;
506 if (msc->enabled)
507 reg |= MSC_EN;
508 520
509 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL); 521 iowrite32(reg, msc->reg_base + REG_MSU_MSC0CTL);
510 522
511 if (msc->enabled) { 523 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI;
512 msc->thdev->output.multiblock = msc->mode == MSC_MODE_MULTI; 524 intel_th_trace_enable(msc->thdev);
513 intel_th_trace_enable(msc->thdev); 525 msc->enabled = 1;
514 } 526
515 527
516 return 0; 528 return 0;
517} 529}
@@ -521,15 +533,14 @@ static int msc_configure(struct msc *msc)
521 * @msc: MSC device to disable 533 * @msc: MSC device to disable
522 * 534 *
523 * If @msc is enabled, disable tracing on the switch and then disable MSC 535 * If @msc is enabled, disable tracing on the switch and then disable MSC
524 * storage. 536 * storage. Caller must hold msc::buf_mutex.
525 */ 537 */
526static void msc_disable(struct msc *msc) 538static void msc_disable(struct msc *msc)
527{ 539{
528 unsigned long count; 540 unsigned long count;
529 u32 reg; 541 u32 reg;
530 542
531 if (!msc->enabled) 543 lockdep_assert_held(&msc->buf_mutex);
532 return;
533 544
534 intel_th_trace_disable(msc->thdev); 545 intel_th_trace_disable(msc->thdev);
535 546
@@ -569,33 +580,35 @@ static void msc_disable(struct msc *msc)
569static int intel_th_msc_activate(struct intel_th_device *thdev) 580static int intel_th_msc_activate(struct intel_th_device *thdev)
570{ 581{
571 struct msc *msc = dev_get_drvdata(&thdev->dev); 582 struct msc *msc = dev_get_drvdata(&thdev->dev);
572 int ret = 0; 583 int ret = -EBUSY;
573 584
574 if (!atomic_inc_unless_negative(&msc->user_count)) 585 if (!atomic_inc_unless_negative(&msc->user_count))
575 return -ENODEV; 586 return -ENODEV;
576 587
577 mutex_lock(&msc->iter_mutex); 588 mutex_lock(&msc->buf_mutex);
578 if (!list_empty(&msc->iter_list))
579 ret = -EBUSY;
580 mutex_unlock(&msc->iter_mutex);
581 589
582 if (ret) { 590 /* if there are readers, refuse */
583 atomic_dec(&msc->user_count); 591 if (list_empty(&msc->iter_list))
584 return ret; 592 ret = msc_configure(msc);
585 }
586 593
587 msc->enabled = 1; 594 mutex_unlock(&msc->buf_mutex);
595
596 if (ret)
597 atomic_dec(&msc->user_count);
588 598
589 return msc_configure(msc); 599 return ret;
590} 600}
591 601
592static void intel_th_msc_deactivate(struct intel_th_device *thdev) 602static void intel_th_msc_deactivate(struct intel_th_device *thdev)
593{ 603{
594 struct msc *msc = dev_get_drvdata(&thdev->dev); 604 struct msc *msc = dev_get_drvdata(&thdev->dev);
595 605
596 msc_disable(msc); 606 mutex_lock(&msc->buf_mutex);
597 607 if (msc->enabled) {
598 atomic_dec(&msc->user_count); 608 msc_disable(msc);
609 atomic_dec(&msc->user_count);
610 }
611 mutex_unlock(&msc->buf_mutex);
599} 612}
600 613
601/** 614/**
@@ -1035,8 +1048,8 @@ static int intel_th_msc_open(struct inode *inode, struct file *file)
1035 return -EPERM; 1048 return -EPERM;
1036 1049
1037 iter = msc_iter_install(msc); 1050 iter = msc_iter_install(msc);
1038 if (!iter) 1051 if (IS_ERR(iter))
1039 return -ENOMEM; 1052 return PTR_ERR(iter);
1040 1053
1041 file->private_data = iter; 1054 file->private_data = iter;
1042 1055
@@ -1101,11 +1114,6 @@ static ssize_t intel_th_msc_read(struct file *file, char __user *buf,
1101 if (!atomic_inc_unless_negative(&msc->user_count)) 1114 if (!atomic_inc_unless_negative(&msc->user_count))
1102 return 0; 1115 return 0;
1103 1116
1104 if (msc->enabled) {
1105 ret = -EBUSY;
1106 goto put_count;
1107 }
1108
1109 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap) 1117 if (msc->mode == MSC_MODE_SINGLE && !msc->single_wrap)
1110 size = msc->single_sz; 1118 size = msc->single_sz;
1111 else 1119 else
@@ -1245,6 +1253,7 @@ static const struct file_operations intel_th_msc_fops = {
1245 .read = intel_th_msc_read, 1253 .read = intel_th_msc_read,
1246 .mmap = intel_th_msc_mmap, 1254 .mmap = intel_th_msc_mmap,
1247 .llseek = no_llseek, 1255 .llseek = no_llseek,
1256 .owner = THIS_MODULE,
1248}; 1257};
1249 1258
1250static int intel_th_msc_init(struct msc *msc) 1259static int intel_th_msc_init(struct msc *msc)
@@ -1254,8 +1263,6 @@ static int intel_th_msc_init(struct msc *msc)
1254 msc->mode = MSC_MODE_MULTI; 1263 msc->mode = MSC_MODE_MULTI;
1255 mutex_init(&msc->buf_mutex); 1264 mutex_init(&msc->buf_mutex);
1256 INIT_LIST_HEAD(&msc->win_list); 1265 INIT_LIST_HEAD(&msc->win_list);
1257
1258 mutex_init(&msc->iter_mutex);
1259 INIT_LIST_HEAD(&msc->iter_list); 1266 INIT_LIST_HEAD(&msc->iter_list);
1260 1267
1261 msc->burst_len = 1268 msc->burst_len =
@@ -1393,6 +1400,11 @@ nr_pages_store(struct device *dev, struct device_attribute *attr,
1393 do { 1400 do {
1394 end = memchr(p, ',', len); 1401 end = memchr(p, ',', len);
1395 s = kstrndup(p, end ? end - p : len, GFP_KERNEL); 1402 s = kstrndup(p, end ? end - p : len, GFP_KERNEL);
1403 if (!s) {
1404 ret = -ENOMEM;
1405 goto free_win;
1406 }
1407
1396 ret = kstrtoul(s, 10, &val); 1408 ret = kstrtoul(s, 10, &val);
1397 kfree(s); 1409 kfree(s);
1398 1410
@@ -1473,10 +1485,6 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
1473 if (err) 1485 if (err)
1474 return err; 1486 return err;
1475 1487
1476 err = sysfs_create_group(&dev->kobj, &msc_output_group);
1477 if (err)
1478 return err;
1479
1480 dev_set_drvdata(dev, msc); 1488 dev_set_drvdata(dev, msc);
1481 1489
1482 return 0; 1490 return 0;
@@ -1484,7 +1492,18 @@ static int intel_th_msc_probe(struct intel_th_device *thdev)
1484 1492
1485static void intel_th_msc_remove(struct intel_th_device *thdev) 1493static void intel_th_msc_remove(struct intel_th_device *thdev)
1486{ 1494{
1487 sysfs_remove_group(&thdev->dev.kobj, &msc_output_group); 1495 struct msc *msc = dev_get_drvdata(&thdev->dev);
1496 int ret;
1497
1498 intel_th_msc_deactivate(thdev);
1499
1500 /*
1501 * Buffers should not be used at this point except if the
1502 * output character device is still open and the parent
1503 * device gets detached from its bus, which is a FIXME.
1504 */
1505 ret = msc_buffer_free_unless_used(msc);
1506 WARN_ON_ONCE(ret);
1488} 1507}
1489 1508
1490static struct intel_th_driver intel_th_msc_driver = { 1509static struct intel_th_driver intel_th_msc_driver = {
@@ -1493,6 +1512,7 @@ static struct intel_th_driver intel_th_msc_driver = {
1493 .activate = intel_th_msc_activate, 1512 .activate = intel_th_msc_activate,
1494 .deactivate = intel_th_msc_deactivate, 1513 .deactivate = intel_th_msc_deactivate,
1495 .fops = &intel_th_msc_fops, 1514 .fops = &intel_th_msc_fops,
1515 .attr_group = &msc_output_group,
1496 .driver = { 1516 .driver = {
1497 .name = "msc", 1517 .name = "msc",
1498 .owner = THIS_MODULE, 1518 .owner = THIS_MODULE,
diff --git a/drivers/hwtracing/intel_th/pci.c b/drivers/hwtracing/intel_th/pci.c
index bca7a2ac00d6..5e25c7eb31d3 100644
--- a/drivers/hwtracing/intel_th/pci.c
+++ b/drivers/hwtracing/intel_th/pci.c
@@ -75,6 +75,11 @@ static const struct pci_device_id intel_th_pci_id_table[] = {
75 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80), 75 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x0a80),
76 .driver_data = (kernel_ulong_t)0, 76 .driver_data = (kernel_ulong_t)0,
77 }, 77 },
78 {
79 /* Broxton B-step */
80 PCI_DEVICE(PCI_VENDOR_ID_INTEL, 0x1a8e),
81 .driver_data = (kernel_ulong_t)0,
82 },
78 { 0 }, 83 { 0 },
79}; 84};
80 85
diff --git a/drivers/hwtracing/intel_th/pti.c b/drivers/hwtracing/intel_th/pti.c
index 57cbfdcc7ef0..35738b5bfccd 100644
--- a/drivers/hwtracing/intel_th/pti.c
+++ b/drivers/hwtracing/intel_th/pti.c
@@ -200,7 +200,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev)
200 struct resource *res; 200 struct resource *res;
201 struct pti_device *pti; 201 struct pti_device *pti;
202 void __iomem *base; 202 void __iomem *base;
203 int ret;
204 203
205 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0); 204 res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
206 if (!res) 205 if (!res)
@@ -219,10 +218,6 @@ static int intel_th_pti_probe(struct intel_th_device *thdev)
219 218
220 read_hw_config(pti); 219 read_hw_config(pti);
221 220
222 ret = sysfs_create_group(&dev->kobj, &pti_output_group);
223 if (ret)
224 return ret;
225
226 dev_set_drvdata(dev, pti); 221 dev_set_drvdata(dev, pti);
227 222
228 return 0; 223 return 0;
@@ -237,6 +232,7 @@ static struct intel_th_driver intel_th_pti_driver = {
237 .remove = intel_th_pti_remove, 232 .remove = intel_th_pti_remove,
238 .activate = intel_th_pti_activate, 233 .activate = intel_th_pti_activate,
239 .deactivate = intel_th_pti_deactivate, 234 .deactivate = intel_th_pti_deactivate,
235 .attr_group = &pti_output_group,
240 .driver = { 236 .driver = {
241 .name = "pti", 237 .name = "pti",
242 .owner = THIS_MODULE, 238 .owner = THIS_MODULE,
diff --git a/drivers/hwtracing/stm/core.c b/drivers/hwtracing/stm/core.c
index de80d45d8df9..ff31108b066f 100644
--- a/drivers/hwtracing/stm/core.c
+++ b/drivers/hwtracing/stm/core.c
@@ -67,9 +67,24 @@ static ssize_t channels_show(struct device *dev,
67 67
68static DEVICE_ATTR_RO(channels); 68static DEVICE_ATTR_RO(channels);
69 69
70static ssize_t hw_override_show(struct device *dev,
71 struct device_attribute *attr,
72 char *buf)
73{
74 struct stm_device *stm = to_stm_device(dev);
75 int ret;
76
77 ret = sprintf(buf, "%u\n", stm->data->hw_override);
78
79 return ret;
80}
81
82static DEVICE_ATTR_RO(hw_override);
83
70static struct attribute *stm_attrs[] = { 84static struct attribute *stm_attrs[] = {
71 &dev_attr_masters.attr, 85 &dev_attr_masters.attr,
72 &dev_attr_channels.attr, 86 &dev_attr_channels.attr,
87 &dev_attr_hw_override.attr,
73 NULL, 88 NULL,
74}; 89};
75 90
@@ -546,8 +561,6 @@ static int stm_char_policy_set_ioctl(struct stm_file *stmf, void __user *arg)
546 if (ret) 561 if (ret)
547 goto err_free; 562 goto err_free;
548 563
549 ret = 0;
550
551 if (stm->data->link) 564 if (stm->data->link)
552 ret = stm->data->link(stm->data, stmf->output.master, 565 ret = stm->data->link(stm->data, stmf->output.master,
553 stmf->output.channel); 566 stmf->output.channel);
@@ -668,18 +681,11 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
668 stm->dev.parent = parent; 681 stm->dev.parent = parent;
669 stm->dev.release = stm_device_release; 682 stm->dev.release = stm_device_release;
670 683
671 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
672 if (err)
673 goto err_device;
674
675 err = device_add(&stm->dev);
676 if (err)
677 goto err_device;
678
679 mutex_init(&stm->link_mutex); 684 mutex_init(&stm->link_mutex);
680 spin_lock_init(&stm->link_lock); 685 spin_lock_init(&stm->link_lock);
681 INIT_LIST_HEAD(&stm->link_list); 686 INIT_LIST_HEAD(&stm->link_list);
682 687
688 /* initialize the object before it is accessible via sysfs */
683 spin_lock_init(&stm->mc_lock); 689 spin_lock_init(&stm->mc_lock);
684 mutex_init(&stm->policy_mutex); 690 mutex_init(&stm->policy_mutex);
685 stm->sw_nmasters = nmasters; 691 stm->sw_nmasters = nmasters;
@@ -687,9 +693,19 @@ int stm_register_device(struct device *parent, struct stm_data *stm_data,
687 stm->data = stm_data; 693 stm->data = stm_data;
688 stm_data->stm = stm; 694 stm_data->stm = stm;
689 695
696 err = kobject_set_name(&stm->dev.kobj, "%s", stm_data->name);
697 if (err)
698 goto err_device;
699
700 err = device_add(&stm->dev);
701 if (err)
702 goto err_device;
703
690 return 0; 704 return 0;
691 705
692err_device: 706err_device:
707 unregister_chrdev(stm->major, stm_data->name);
708
693 /* matches device_initialize() above */ 709 /* matches device_initialize() above */
694 put_device(&stm->dev); 710 put_device(&stm->dev);
695err_free: 711err_free:
diff --git a/drivers/hwtracing/stm/dummy_stm.c b/drivers/hwtracing/stm/dummy_stm.c
index 310adf57e7a1..a86612d989f9 100644
--- a/drivers/hwtracing/stm/dummy_stm.c
+++ b/drivers/hwtracing/stm/dummy_stm.c
@@ -46,9 +46,7 @@ static struct stm_data dummy_stm[DUMMY_STM_MAX];
46 46
47static int nr_dummies = 4; 47static int nr_dummies = 4;
48 48
49module_param(nr_dummies, int, 0600); 49module_param(nr_dummies, int, 0400);
50
51static unsigned int dummy_stm_nr;
52 50
53static unsigned int fail_mode; 51static unsigned int fail_mode;
54 52
@@ -65,12 +63,12 @@ static int dummy_stm_link(struct stm_data *data, unsigned int master,
65 63
66static int dummy_stm_init(void) 64static int dummy_stm_init(void)
67{ 65{
68 int i, ret = -ENOMEM, __nr_dummies = ACCESS_ONCE(nr_dummies); 66 int i, ret = -ENOMEM;
69 67
70 if (__nr_dummies < 0 || __nr_dummies > DUMMY_STM_MAX) 68 if (nr_dummies < 0 || nr_dummies > DUMMY_STM_MAX)
71 return -EINVAL; 69 return -EINVAL;
72 70
73 for (i = 0; i < __nr_dummies; i++) { 71 for (i = 0; i < nr_dummies; i++) {
74 dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i); 72 dummy_stm[i].name = kasprintf(GFP_KERNEL, "dummy_stm.%d", i);
75 if (!dummy_stm[i].name) 73 if (!dummy_stm[i].name)
76 goto fail_unregister; 74 goto fail_unregister;
@@ -86,8 +84,6 @@ static int dummy_stm_init(void)
86 goto fail_free; 84 goto fail_free;
87 } 85 }
88 86
89 dummy_stm_nr = __nr_dummies;
90
91 return 0; 87 return 0;
92 88
93fail_unregister: 89fail_unregister:
@@ -105,7 +101,7 @@ static void dummy_stm_exit(void)
105{ 101{
106 int i; 102 int i;
107 103
108 for (i = 0; i < dummy_stm_nr; i++) { 104 for (i = 0; i < nr_dummies; i++) {
109 stm_unregister_device(&dummy_stm[i]); 105 stm_unregister_device(&dummy_stm[i]);
110 kfree(dummy_stm[i].name); 106 kfree(dummy_stm[i].name);
111 } 107 }
diff --git a/drivers/hwtracing/stm/heartbeat.c b/drivers/hwtracing/stm/heartbeat.c
index 0133571b506f..3da7b673aab2 100644
--- a/drivers/hwtracing/stm/heartbeat.c
+++ b/drivers/hwtracing/stm/heartbeat.c
@@ -26,7 +26,7 @@
26static int nr_devs = 4; 26static int nr_devs = 4;
27static int interval_ms = 10; 27static int interval_ms = 10;
28 28
29module_param(nr_devs, int, 0600); 29module_param(nr_devs, int, 0400);
30module_param(interval_ms, int, 0600); 30module_param(interval_ms, int, 0600);
31 31
32static struct stm_heartbeat { 32static struct stm_heartbeat {
@@ -35,8 +35,6 @@ static struct stm_heartbeat {
35 unsigned int active; 35 unsigned int active;
36} stm_heartbeat[STM_HEARTBEAT_MAX]; 36} stm_heartbeat[STM_HEARTBEAT_MAX];
37 37
38static unsigned int nr_instances;
39
40static const char str[] = "heartbeat stm source driver is here to serve you"; 38static const char str[] = "heartbeat stm source driver is here to serve you";
41 39
42static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr) 40static enum hrtimer_restart stm_heartbeat_hrtimer_handler(struct hrtimer *hr)
@@ -74,12 +72,12 @@ static void stm_heartbeat_unlink(struct stm_source_data *data)
74 72
75static int stm_heartbeat_init(void) 73static int stm_heartbeat_init(void)
76{ 74{
77 int i, ret = -ENOMEM, __nr_instances = ACCESS_ONCE(nr_devs); 75 int i, ret = -ENOMEM;
78 76
79 if (__nr_instances < 0 || __nr_instances > STM_HEARTBEAT_MAX) 77 if (nr_devs < 0 || nr_devs > STM_HEARTBEAT_MAX)
80 return -EINVAL; 78 return -EINVAL;
81 79
82 for (i = 0; i < __nr_instances; i++) { 80 for (i = 0; i < nr_devs; i++) {
83 stm_heartbeat[i].data.name = 81 stm_heartbeat[i].data.name =
84 kasprintf(GFP_KERNEL, "heartbeat.%d", i); 82 kasprintf(GFP_KERNEL, "heartbeat.%d", i);
85 if (!stm_heartbeat[i].data.name) 83 if (!stm_heartbeat[i].data.name)
@@ -98,8 +96,6 @@ static int stm_heartbeat_init(void)
98 goto fail_free; 96 goto fail_free;
99 } 97 }
100 98
101 nr_instances = __nr_instances;
102
103 return 0; 99 return 0;
104 100
105fail_unregister: 101fail_unregister:
@@ -116,7 +112,7 @@ static void stm_heartbeat_exit(void)
116{ 112{
117 int i; 113 int i;
118 114
119 for (i = 0; i < nr_instances; i++) { 115 for (i = 0; i < nr_devs; i++) {
120 stm_source_unregister_device(&stm_heartbeat[i].data); 116 stm_source_unregister_device(&stm_heartbeat[i].data);
121 kfree(stm_heartbeat[i].data.name); 117 kfree(stm_heartbeat[i].data.name);
122 } 118 }
diff --git a/drivers/hwtracing/stm/policy.c b/drivers/hwtracing/stm/policy.c
index 1db189657b2b..6c0ae2996326 100644
--- a/drivers/hwtracing/stm/policy.c
+++ b/drivers/hwtracing/stm/policy.c
@@ -107,8 +107,7 @@ stp_policy_node_masters_store(struct config_item *item, const char *page,
107 goto unlock; 107 goto unlock;
108 108
109 /* must be within [sw_start..sw_end], which is an inclusive range */ 109 /* must be within [sw_start..sw_end], which is an inclusive range */
110 if (first > INT_MAX || last > INT_MAX || first > last || 110 if (first > last || first < stm->data->sw_start ||
111 first < stm->data->sw_start ||
112 last > stm->data->sw_end) { 111 last > stm->data->sw_end) {
113 ret = -ERANGE; 112 ret = -ERANGE;
114 goto unlock; 113 goto unlock;
@@ -342,7 +341,7 @@ stp_policies_make(struct config_group *group, const char *name)
342 return ERR_PTR(-EINVAL); 341 return ERR_PTR(-EINVAL);
343 } 342 }
344 343
345 *p++ = '\0'; 344 *p = '\0';
346 345
347 stm = stm_find_device(devname); 346 stm = stm_find_device(devname);
348 kfree(devname); 347 kfree(devname);