aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-14 19:43:47 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-14 19:43:47 -0500
commit6ae840e7cc4be0be3aa40d9f67c35c75cfc67d83 (patch)
tree9c83c87a8670ef678d95f8d6f76a07f24a09a49f /drivers
parente6b5be2be4e30037eb551e0ed09dd97bd00d85d3 (diff)
parent91905b6f4afe51e23a3f58df93e4cdc5e49cf40c (diff)
Merge tag 'char-misc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc
Pull char/misc driver updates from Greg KH: "Here's the big char/misc driver update for 3.19-rc1 Lots of little things all over the place in different drivers, and a new subsystem, "coresight" has been added. Full details are in the shortlog" * tag 'char-misc-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/char-misc: (73 commits) parport: parport_pc, do not remove parent devices early spmi: Remove shutdown/suspend/resume kernel-doc carma-fpga-program: drop videobuf dependency carma-fpga: drop videobuf dependency carma-fpga-program.c: fix compile errors i8k: Fix temperature bug handling in i8k_get_temp() cxl: Name interrupts in /proc/interrupt CXL: Return error to PSL if IRQ demultiplexing fails & print clearer warning coresight-replicator: remove .owner field for driver coresight: fixed comments in coresight.h coresight: fix typo in comment in coresight-priv.h coresight: bindings for coresight drivers coresight: Adding ABI documentation w1: support auto-load of w1_bq27000 module. w1: avoid potential u16 overflow cn: verify msg->len before making callback mei: export fw status registers through sysfs mei: read and print all six FW status registers mei: txe: add cherrytrail device id mei: kill cached host and me csr values ...
Diffstat (limited to 'drivers')
-rw-r--r--drivers/Makefile1
-rw-r--r--drivers/amba/bus.c2
-rw-r--r--drivers/char/hangcheck-timer.c4
-rw-r--r--drivers/char/i8k.c26
-rw-r--r--drivers/connector/connector.c6
-rw-r--r--drivers/coresight/Makefile11
-rw-r--r--drivers/coresight/coresight-etb10.c537
-rw-r--r--drivers/coresight/coresight-etm-cp14.c591
-rw-r--r--drivers/coresight/coresight-etm.h251
-rw-r--r--drivers/coresight/coresight-etm3x.c1928
-rw-r--r--drivers/coresight/coresight-funnel.c268
-rw-r--r--drivers/coresight/coresight-priv.h63
-rw-r--r--drivers/coresight/coresight-replicator.c137
-rw-r--r--drivers/coresight/coresight-tmc.c776
-rw-r--r--drivers/coresight/coresight-tpiu.c217
-rw-r--r--drivers/coresight/coresight.c717
-rw-r--r--drivers/coresight/of_coresight.c204
-rw-r--r--drivers/extcon/extcon-class.c14
-rw-r--r--drivers/extcon/extcon-max14577.c2
-rw-r--r--drivers/extcon/extcon-max77693.c12
-rw-r--r--drivers/hv/channel_mgmt.c11
-rw-r--r--drivers/hv/hv_balloon.c10
-rw-r--r--drivers/hv/hv_kvp.c9
-rw-r--r--drivers/hv/hv_snapshot.c28
-rw-r--r--drivers/misc/atmel-ssc.c4
-rw-r--r--drivers/misc/carma/Kconfig6
-rw-r--r--drivers/misc/carma/carma-fpga-program.c104
-rw-r--r--drivers/misc/carma/carma-fpga.c98
-rw-r--r--drivers/misc/fuse/Makefile1
-rw-r--r--drivers/misc/genwqe/card_utils.c2
-rw-r--r--drivers/misc/mei/amthif.c34
-rw-r--r--drivers/misc/mei/bus.c4
-rw-r--r--drivers/misc/mei/client.c2
-rw-r--r--drivers/misc/mei/debugfs.c8
-rw-r--r--drivers/misc/mei/hbm.c23
-rw-r--r--drivers/misc/mei/hbm.h4
-rw-r--r--drivers/misc/mei/hw-me-regs.h12
-rw-r--r--drivers/misc/mei/hw-me.c49
-rw-r--r--drivers/misc/mei/hw-me.h10
-rw-r--r--drivers/misc/mei/hw-txe.c7
-rw-r--r--drivers/misc/mei/init.c38
-rw-r--r--drivers/misc/mei/interrupt.c12
-rw-r--r--drivers/misc/mei/main.c43
-rw-r--r--drivers/misc/mei/mei_dev.h39
-rw-r--r--drivers/misc/mei/nfc.c52
-rw-r--r--drivers/misc/mei/pci-me.c12
-rw-r--r--drivers/misc/mei/pci-txe.c1
-rw-r--r--drivers/misc/mei/wd.c9
-rw-r--r--drivers/misc/pch_phub.c2
-rw-r--r--drivers/parport/parport_pc.c7
-rw-r--r--drivers/pcmcia/sa1111_badge4.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c4
-rw-r--r--drivers/pcmcia/sa1111_generic.h4
-rw-r--r--drivers/pcmcia/sa1111_jornada720.c3
-rw-r--r--drivers/uio/uio.c4
-rw-r--r--drivers/w1/masters/ds2490.c2
-rw-r--r--drivers/w1/slaves/w1_bq27000.c4
-rw-r--r--drivers/w1/w1.c2
-rw-r--r--drivers/w1/w1_family.h1
-rw-r--r--drivers/w1/w1_netlink.c2
60 files changed, 6225 insertions, 211 deletions
diff --git a/drivers/Makefile b/drivers/Makefile
index ebee55537a05..628b512b625b 100644
--- a/drivers/Makefile
+++ b/drivers/Makefile
@@ -161,3 +161,4 @@ obj-$(CONFIG_POWERCAP) += powercap/
161obj-$(CONFIG_MCB) += mcb/ 161obj-$(CONFIG_MCB) += mcb/
162obj-$(CONFIG_RAS) += ras/ 162obj-$(CONFIG_RAS) += ras/
163obj-$(CONFIG_THUNDERBOLT) += thunderbolt/ 163obj-$(CONFIG_THUNDERBOLT) += thunderbolt/
164obj-$(CONFIG_CORESIGHT) += coresight/
diff --git a/drivers/amba/bus.c b/drivers/amba/bus.c
index 80f4de729a86..52ddd9fbb55e 100644
--- a/drivers/amba/bus.c
+++ b/drivers/amba/bus.c
@@ -343,7 +343,7 @@ int amba_device_add(struct amba_device *dev, struct resource *parent)
343 343
344 amba_put_disable_pclk(dev); 344 amba_put_disable_pclk(dev);
345 345
346 if (cid == AMBA_CID) 346 if (cid == AMBA_CID || cid == CORESIGHT_CID)
347 dev->periphid = pid; 347 dev->periphid = pid;
348 348
349 if (!dev->periphid) 349 if (!dev->periphid)
diff --git a/drivers/char/hangcheck-timer.c b/drivers/char/hangcheck-timer.c
index ebc4c73d8ca4..a7c5c59675f0 100644
--- a/drivers/char/hangcheck-timer.c
+++ b/drivers/char/hangcheck-timer.c
@@ -168,8 +168,8 @@ static int __init hangcheck_init(void)
168 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n", 168 printk("Hangcheck: starting hangcheck timer %s (tick is %d seconds, margin is %d seconds).\n",
169 VERSION_STR, hangcheck_tick, hangcheck_margin); 169 VERSION_STR, hangcheck_tick, hangcheck_margin);
170 hangcheck_tsc_margin = 170 hangcheck_tsc_margin =
171 (unsigned long long)(hangcheck_margin + hangcheck_tick); 171 (unsigned long long)hangcheck_margin + hangcheck_tick;
172 hangcheck_tsc_margin *= (unsigned long long)TIMER_FREQ; 172 hangcheck_tsc_margin *= TIMER_FREQ;
173 173
174 hangcheck_tsc = ktime_get_ns(); 174 hangcheck_tsc = ktime_get_ns();
175 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ)); 175 mod_timer(&hangcheck_ticktock, jiffies + (hangcheck_tick*HZ));
diff --git a/drivers/char/i8k.c b/drivers/char/i8k.c
index 34174d01462e..e34a019eb930 100644
--- a/drivers/char/i8k.c
+++ b/drivers/char/i8k.c
@@ -298,7 +298,7 @@ static int i8k_get_temp(int sensor)
298 int temp; 298 int temp;
299 299
300#ifdef I8K_TEMPERATURE_BUG 300#ifdef I8K_TEMPERATURE_BUG
301 static int prev[4]; 301 static int prev[4] = { I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1, I8K_MAX_TEMP+1 };
302#endif 302#endif
303 regs.ebx = sensor & 0xff; 303 regs.ebx = sensor & 0xff;
304 rc = i8k_smm(&regs); 304 rc = i8k_smm(&regs);
@@ -317,10 +317,12 @@ static int i8k_get_temp(int sensor)
317 */ 317 */
318 if (temp > I8K_MAX_TEMP) { 318 if (temp > I8K_MAX_TEMP) {
319 temp = prev[sensor]; 319 temp = prev[sensor];
320 prev[sensor] = I8K_MAX_TEMP; 320 prev[sensor] = I8K_MAX_TEMP+1;
321 } else { 321 } else {
322 prev[sensor] = temp; 322 prev[sensor] = temp;
323 } 323 }
324 if (temp > I8K_MAX_TEMP)
325 return -ERANGE;
324#endif 326#endif
325 327
326 return temp; 328 return temp;
@@ -499,6 +501,8 @@ static ssize_t i8k_hwmon_show_temp(struct device *dev,
499 int temp; 501 int temp;
500 502
501 temp = i8k_get_temp(index); 503 temp = i8k_get_temp(index);
504 if (temp == -ERANGE)
505 return -EINVAL;
502 if (temp < 0) 506 if (temp < 0)
503 return temp; 507 return temp;
504 return sprintf(buf, "%d\n", temp * 1000); 508 return sprintf(buf, "%d\n", temp * 1000);
@@ -610,17 +614,17 @@ static int __init i8k_init_hwmon(void)
610 614
611 /* CPU temperature attributes, if temperature reading is OK */ 615 /* CPU temperature attributes, if temperature reading is OK */
612 err = i8k_get_temp(0); 616 err = i8k_get_temp(0);
613 if (err >= 0) 617 if (err >= 0 || err == -ERANGE)
614 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1; 618 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP1;
615 /* check for additional temperature sensors */ 619 /* check for additional temperature sensors */
616 err = i8k_get_temp(1); 620 err = i8k_get_temp(1);
617 if (err >= 0) 621 if (err >= 0 || err == -ERANGE)
618 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2; 622 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP2;
619 err = i8k_get_temp(2); 623 err = i8k_get_temp(2);
620 if (err >= 0) 624 if (err >= 0 || err == -ERANGE)
621 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3; 625 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP3;
622 err = i8k_get_temp(3); 626 err = i8k_get_temp(3);
623 if (err >= 0) 627 if (err >= 0 || err == -ERANGE)
624 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4; 628 i8k_hwmon_flags |= I8K_HWMON_HAVE_TEMP4;
625 629
626 /* Left fan attributes, if left fan is present */ 630 /* Left fan attributes, if left fan is present */
@@ -711,6 +715,14 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
711 .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520], 715 .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_D520],
712 }, 716 },
713 { 717 {
718 .ident = "Dell Latitude E6440",
719 .matches = {
720 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
721 DMI_MATCH(DMI_PRODUCT_NAME, "Latitude E6440"),
722 },
723 .driver_data = (void *)&i8k_config_data[DELL_LATITUDE_E6540],
724 },
725 {
714 .ident = "Dell Latitude E6540", 726 .ident = "Dell Latitude E6540",
715 .matches = { 727 .matches = {
716 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), 728 DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."),
@@ -788,6 +800,8 @@ static struct dmi_system_id i8k_dmi_table[] __initdata = {
788 { } 800 { }
789}; 801};
790 802
803MODULE_DEVICE_TABLE(dmi, i8k_dmi_table);
804
791/* 805/*
792 * Probe for the presence of a supported laptop. 806 * Probe for the presence of a supported laptop.
793 */ 807 */
diff --git a/drivers/connector/connector.c b/drivers/connector/connector.c
index f612d68629dc..30f522848c73 100644
--- a/drivers/connector/connector.c
+++ b/drivers/connector/connector.c
@@ -141,12 +141,18 @@ EXPORT_SYMBOL_GPL(cn_netlink_send);
141 */ 141 */
142static int cn_call_callback(struct sk_buff *skb) 142static int cn_call_callback(struct sk_buff *skb)
143{ 143{
144 struct nlmsghdr *nlh;
144 struct cn_callback_entry *i, *cbq = NULL; 145 struct cn_callback_entry *i, *cbq = NULL;
145 struct cn_dev *dev = &cdev; 146 struct cn_dev *dev = &cdev;
146 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb)); 147 struct cn_msg *msg = nlmsg_data(nlmsg_hdr(skb));
147 struct netlink_skb_parms *nsp = &NETLINK_CB(skb); 148 struct netlink_skb_parms *nsp = &NETLINK_CB(skb);
148 int err = -ENODEV; 149 int err = -ENODEV;
149 150
151 /* verify msg->len is within skb */
152 nlh = nlmsg_hdr(skb);
153 if (nlh->nlmsg_len < NLMSG_HDRLEN + sizeof(struct cn_msg) + msg->len)
154 return -EINVAL;
155
150 spin_lock_bh(&dev->cbdev->queue_lock); 156 spin_lock_bh(&dev->cbdev->queue_lock);
151 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) { 157 list_for_each_entry(i, &dev->cbdev->queue_list, callback_entry) {
152 if (cn_cb_equal(&i->id.id, &msg->id)) { 158 if (cn_cb_equal(&i->id.id, &msg->id)) {
diff --git a/drivers/coresight/Makefile b/drivers/coresight/Makefile
new file mode 100644
index 000000000000..4b4bec890ef5
--- /dev/null
+++ b/drivers/coresight/Makefile
@@ -0,0 +1,11 @@
1#
2# Makefile for CoreSight drivers.
3#
4obj-$(CONFIG_CORESIGHT) += coresight.o
5obj-$(CONFIG_OF) += of_coresight.o
6obj-$(CONFIG_CORESIGHT_LINK_AND_SINK_TMC) += coresight-tmc.o
7obj-$(CONFIG_CORESIGHT_SINK_TPIU) += coresight-tpiu.o
8obj-$(CONFIG_CORESIGHT_SINK_ETBV10) += coresight-etb10.o
9obj-$(CONFIG_CORESIGHT_LINKS_AND_SINKS) += coresight-funnel.o \
10 coresight-replicator.o
11obj-$(CONFIG_CORESIGHT_SOURCE_ETM3X) += coresight-etm3x.o coresight-etm-cp14.o
diff --git a/drivers/coresight/coresight-etb10.c b/drivers/coresight/coresight-etb10.c
new file mode 100644
index 000000000000..c922d4aded8a
--- /dev/null
+++ b/drivers/coresight/coresight-etb10.c
@@ -0,0 +1,537 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/fs.h>
21#include <linux/miscdevice.h>
22#include <linux/uaccess.h>
23#include <linux/slab.h>
24#include <linux/spinlock.h>
25#include <linux/clk.h>
26#include <linux/seq_file.h>
27#include <linux/coresight.h>
28#include <linux/amba/bus.h>
29
30#include "coresight-priv.h"
31
32#define ETB_RAM_DEPTH_REG 0x004
33#define ETB_STATUS_REG 0x00c
34#define ETB_RAM_READ_DATA_REG 0x010
35#define ETB_RAM_READ_POINTER 0x014
36#define ETB_RAM_WRITE_POINTER 0x018
37#define ETB_TRG 0x01c
38#define ETB_CTL_REG 0x020
39#define ETB_RWD_REG 0x024
40#define ETB_FFSR 0x300
41#define ETB_FFCR 0x304
42#define ETB_ITMISCOP0 0xee0
43#define ETB_ITTRFLINACK 0xee4
44#define ETB_ITTRFLIN 0xee8
45#define ETB_ITATBDATA0 0xeeC
46#define ETB_ITATBCTR2 0xef0
47#define ETB_ITATBCTR1 0xef4
48#define ETB_ITATBCTR0 0xef8
49
50/* register description */
51/* STS - 0x00C */
52#define ETB_STATUS_RAM_FULL BIT(0)
53/* CTL - 0x020 */
54#define ETB_CTL_CAPT_EN BIT(0)
55/* FFCR - 0x304 */
56#define ETB_FFCR_EN_FTC BIT(0)
57#define ETB_FFCR_FON_MAN BIT(6)
58#define ETB_FFCR_STOP_FI BIT(12)
59#define ETB_FFCR_STOP_TRIGGER BIT(13)
60
61#define ETB_FFCR_BIT 6
62#define ETB_FFSR_BIT 1
63#define ETB_FRAME_SIZE_WORDS 4
64
65/**
66 * struct etb_drvdata - specifics associated to an ETB component
67 * @base: memory mapped base address for this component.
68 * @dev: the device entity associated to this component.
69 * @csdev: component vitals needed by the framework.
70 * @miscdev: specifics to handle "/dev/xyz.etb" entry.
71 * @clk: the clock this component is associated to.
72 * @spinlock: only one at a time pls.
73 * @in_use: synchronise user space access to etb buffer.
74 * @buf: area of memory where ETB buffer content gets sent.
75 * @buffer_depth: size of @buf.
76 * @enable: this ETB is being used.
77 * @trigger_cntr: amount of words to store after a trigger.
78 */
79struct etb_drvdata {
80 void __iomem *base;
81 struct device *dev;
82 struct coresight_device *csdev;
83 struct miscdevice miscdev;
84 struct clk *clk;
85 spinlock_t spinlock;
86 atomic_t in_use;
87 u8 *buf;
88 u32 buffer_depth;
89 bool enable;
90 u32 trigger_cntr;
91};
92
93static unsigned int etb_get_buffer_depth(struct etb_drvdata *drvdata)
94{
95 int ret;
96 u32 depth = 0;
97
98 ret = clk_prepare_enable(drvdata->clk);
99 if (ret)
100 return ret;
101
102 /* RO registers don't need locking */
103 depth = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
104
105 clk_disable_unprepare(drvdata->clk);
106 return depth;
107}
108
109static void etb_enable_hw(struct etb_drvdata *drvdata)
110{
111 int i;
112 u32 depth;
113
114 CS_UNLOCK(drvdata->base);
115
116 depth = drvdata->buffer_depth;
117 /* reset write RAM pointer address */
118 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
119 /* clear entire RAM buffer */
120 for (i = 0; i < depth; i++)
121 writel_relaxed(0x0, drvdata->base + ETB_RWD_REG);
122
123 /* reset write RAM pointer address */
124 writel_relaxed(0x0, drvdata->base + ETB_RAM_WRITE_POINTER);
125 /* reset read RAM pointer address */
126 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
127
128 writel_relaxed(drvdata->trigger_cntr, drvdata->base + ETB_TRG);
129 writel_relaxed(ETB_FFCR_EN_FTC | ETB_FFCR_STOP_TRIGGER,
130 drvdata->base + ETB_FFCR);
131 /* ETB trace capture enable */
132 writel_relaxed(ETB_CTL_CAPT_EN, drvdata->base + ETB_CTL_REG);
133
134 CS_LOCK(drvdata->base);
135}
136
137static int etb_enable(struct coresight_device *csdev)
138{
139 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
140 int ret;
141 unsigned long flags;
142
143 ret = clk_prepare_enable(drvdata->clk);
144 if (ret)
145 return ret;
146
147 spin_lock_irqsave(&drvdata->spinlock, flags);
148 etb_enable_hw(drvdata);
149 drvdata->enable = true;
150 spin_unlock_irqrestore(&drvdata->spinlock, flags);
151
152 dev_info(drvdata->dev, "ETB enabled\n");
153 return 0;
154}
155
156static void etb_disable_hw(struct etb_drvdata *drvdata)
157{
158 u32 ffcr;
159
160 CS_UNLOCK(drvdata->base);
161
162 ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
163 /* stop formatter when a stop has completed */
164 ffcr |= ETB_FFCR_STOP_FI;
165 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
166 /* manually generate a flush of the system */
167 ffcr |= ETB_FFCR_FON_MAN;
168 writel_relaxed(ffcr, drvdata->base + ETB_FFCR);
169
170 if (coresight_timeout(drvdata->base, ETB_FFCR, ETB_FFCR_BIT, 0)) {
171 dev_err(drvdata->dev,
172 "timeout observed when probing at offset %#x\n",
173 ETB_FFCR);
174 }
175
176 /* disable trace capture */
177 writel_relaxed(0x0, drvdata->base + ETB_CTL_REG);
178
179 if (coresight_timeout(drvdata->base, ETB_FFSR, ETB_FFSR_BIT, 1)) {
180 dev_err(drvdata->dev,
181 "timeout observed when probing at offset %#x\n",
182 ETB_FFCR);
183 }
184
185 CS_LOCK(drvdata->base);
186}
187
188static void etb_dump_hw(struct etb_drvdata *drvdata)
189{
190 int i;
191 u8 *buf_ptr;
192 u32 read_data, depth;
193 u32 read_ptr, write_ptr;
194 u32 frame_off, frame_endoff;
195
196 CS_UNLOCK(drvdata->base);
197
198 read_ptr = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
199 write_ptr = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
200
201 frame_off = write_ptr % ETB_FRAME_SIZE_WORDS;
202 frame_endoff = ETB_FRAME_SIZE_WORDS - frame_off;
203 if (frame_off) {
204 dev_err(drvdata->dev,
205 "write_ptr: %lu not aligned to formatter frame size\n",
206 (unsigned long)write_ptr);
207 dev_err(drvdata->dev, "frameoff: %lu, frame_endoff: %lu\n",
208 (unsigned long)frame_off, (unsigned long)frame_endoff);
209 write_ptr += frame_endoff;
210 }
211
212 if ((readl_relaxed(drvdata->base + ETB_STATUS_REG)
213 & ETB_STATUS_RAM_FULL) == 0)
214 writel_relaxed(0x0, drvdata->base + ETB_RAM_READ_POINTER);
215 else
216 writel_relaxed(write_ptr, drvdata->base + ETB_RAM_READ_POINTER);
217
218 depth = drvdata->buffer_depth;
219 buf_ptr = drvdata->buf;
220 for (i = 0; i < depth; i++) {
221 read_data = readl_relaxed(drvdata->base +
222 ETB_RAM_READ_DATA_REG);
223 *buf_ptr++ = read_data >> 0;
224 *buf_ptr++ = read_data >> 8;
225 *buf_ptr++ = read_data >> 16;
226 *buf_ptr++ = read_data >> 24;
227 }
228
229 if (frame_off) {
230 buf_ptr -= (frame_endoff * 4);
231 for (i = 0; i < frame_endoff; i++) {
232 *buf_ptr++ = 0x0;
233 *buf_ptr++ = 0x0;
234 *buf_ptr++ = 0x0;
235 *buf_ptr++ = 0x0;
236 }
237 }
238
239 writel_relaxed(read_ptr, drvdata->base + ETB_RAM_READ_POINTER);
240
241 CS_LOCK(drvdata->base);
242}
243
244static void etb_disable(struct coresight_device *csdev)
245{
246 struct etb_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
247 unsigned long flags;
248
249 spin_lock_irqsave(&drvdata->spinlock, flags);
250 etb_disable_hw(drvdata);
251 etb_dump_hw(drvdata);
252 drvdata->enable = false;
253 spin_unlock_irqrestore(&drvdata->spinlock, flags);
254
255 clk_disable_unprepare(drvdata->clk);
256
257 dev_info(drvdata->dev, "ETB disabled\n");
258}
259
260static const struct coresight_ops_sink etb_sink_ops = {
261 .enable = etb_enable,
262 .disable = etb_disable,
263};
264
265static const struct coresight_ops etb_cs_ops = {
266 .sink_ops = &etb_sink_ops,
267};
268
269static void etb_dump(struct etb_drvdata *drvdata)
270{
271 unsigned long flags;
272
273 spin_lock_irqsave(&drvdata->spinlock, flags);
274 if (drvdata->enable) {
275 etb_disable_hw(drvdata);
276 etb_dump_hw(drvdata);
277 etb_enable_hw(drvdata);
278 }
279 spin_unlock_irqrestore(&drvdata->spinlock, flags);
280
281 dev_info(drvdata->dev, "ETB dumped\n");
282}
283
284static int etb_open(struct inode *inode, struct file *file)
285{
286 struct etb_drvdata *drvdata = container_of(file->private_data,
287 struct etb_drvdata, miscdev);
288
289 if (atomic_cmpxchg(&drvdata->in_use, 0, 1))
290 return -EBUSY;
291
292 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
293 return 0;
294}
295
296static ssize_t etb_read(struct file *file, char __user *data,
297 size_t len, loff_t *ppos)
298{
299 u32 depth;
300 struct etb_drvdata *drvdata = container_of(file->private_data,
301 struct etb_drvdata, miscdev);
302
303 etb_dump(drvdata);
304
305 depth = drvdata->buffer_depth;
306 if (*ppos + len > depth * 4)
307 len = depth * 4 - *ppos;
308
309 if (copy_to_user(data, drvdata->buf + *ppos, len)) {
310 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
311 return -EFAULT;
312 }
313
314 *ppos += len;
315
316 dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
317 __func__, len, (int) (depth * 4 - *ppos));
318 return len;
319}
320
321static int etb_release(struct inode *inode, struct file *file)
322{
323 struct etb_drvdata *drvdata = container_of(file->private_data,
324 struct etb_drvdata, miscdev);
325 atomic_set(&drvdata->in_use, 0);
326
327 dev_dbg(drvdata->dev, "%s: released\n", __func__);
328 return 0;
329}
330
331static const struct file_operations etb_fops = {
332 .owner = THIS_MODULE,
333 .open = etb_open,
334 .read = etb_read,
335 .release = etb_release,
336 .llseek = no_llseek,
337};
338
339static ssize_t status_show(struct device *dev,
340 struct device_attribute *attr, char *buf)
341{
342 int ret;
343 unsigned long flags;
344 u32 etb_rdr, etb_sr, etb_rrp, etb_rwp;
345 u32 etb_trg, etb_cr, etb_ffsr, etb_ffcr;
346 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
347
348 ret = clk_prepare_enable(drvdata->clk);
349 if (ret)
350 goto out;
351
352 spin_lock_irqsave(&drvdata->spinlock, flags);
353 CS_UNLOCK(drvdata->base);
354
355 etb_rdr = readl_relaxed(drvdata->base + ETB_RAM_DEPTH_REG);
356 etb_sr = readl_relaxed(drvdata->base + ETB_STATUS_REG);
357 etb_rrp = readl_relaxed(drvdata->base + ETB_RAM_READ_POINTER);
358 etb_rwp = readl_relaxed(drvdata->base + ETB_RAM_WRITE_POINTER);
359 etb_trg = readl_relaxed(drvdata->base + ETB_TRG);
360 etb_cr = readl_relaxed(drvdata->base + ETB_CTL_REG);
361 etb_ffsr = readl_relaxed(drvdata->base + ETB_FFSR);
362 etb_ffcr = readl_relaxed(drvdata->base + ETB_FFCR);
363
364 CS_LOCK(drvdata->base);
365 spin_unlock_irqrestore(&drvdata->spinlock, flags);
366
367 clk_disable_unprepare(drvdata->clk);
368
369 return sprintf(buf,
370 "Depth:\t\t0x%x\n"
371 "Status:\t\t0x%x\n"
372 "RAM read ptr:\t0x%x\n"
373 "RAM wrt ptr:\t0x%x\n"
374 "Trigger cnt:\t0x%x\n"
375 "Control:\t0x%x\n"
376 "Flush status:\t0x%x\n"
377 "Flush ctrl:\t0x%x\n",
378 etb_rdr, etb_sr, etb_rrp, etb_rwp,
379 etb_trg, etb_cr, etb_ffsr, etb_ffcr);
380out:
381 return -EINVAL;
382}
383static DEVICE_ATTR_RO(status);
384
385static ssize_t trigger_cntr_show(struct device *dev,
386 struct device_attribute *attr, char *buf)
387{
388 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
389 unsigned long val = drvdata->trigger_cntr;
390
391 return sprintf(buf, "%#lx\n", val);
392}
393
394static ssize_t trigger_cntr_store(struct device *dev,
395 struct device_attribute *attr,
396 const char *buf, size_t size)
397{
398 int ret;
399 unsigned long val;
400 struct etb_drvdata *drvdata = dev_get_drvdata(dev->parent);
401
402 ret = kstrtoul(buf, 16, &val);
403 if (ret)
404 return ret;
405
406 drvdata->trigger_cntr = val;
407 return size;
408}
409static DEVICE_ATTR_RW(trigger_cntr);
410
411static struct attribute *coresight_etb_attrs[] = {
412 &dev_attr_trigger_cntr.attr,
413 &dev_attr_status.attr,
414 NULL,
415};
416ATTRIBUTE_GROUPS(coresight_etb);
417
418static int etb_probe(struct amba_device *adev, const struct amba_id *id)
419{
420 int ret;
421 void __iomem *base;
422 struct device *dev = &adev->dev;
423 struct coresight_platform_data *pdata = NULL;
424 struct etb_drvdata *drvdata;
425 struct resource *res = &adev->res;
426 struct coresight_desc *desc;
427 struct device_node *np = adev->dev.of_node;
428
429 if (np) {
430 pdata = of_get_coresight_platform_data(dev, np);
431 if (IS_ERR(pdata))
432 return PTR_ERR(pdata);
433 adev->dev.platform_data = pdata;
434 }
435
436 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
437 if (!drvdata)
438 return -ENOMEM;
439
440 drvdata->dev = &adev->dev;
441 dev_set_drvdata(dev, drvdata);
442
443 /* validity for the resource is already checked by the AMBA core */
444 base = devm_ioremap_resource(dev, res);
445 if (IS_ERR(base))
446 return PTR_ERR(base);
447
448 drvdata->base = base;
449
450 spin_lock_init(&drvdata->spinlock);
451
452 drvdata->clk = adev->pclk;
453 ret = clk_prepare_enable(drvdata->clk);
454 if (ret)
455 return ret;
456
457 drvdata->buffer_depth = etb_get_buffer_depth(drvdata);
458 clk_disable_unprepare(drvdata->clk);
459
460 if (drvdata->buffer_depth < 0)
461 return -EINVAL;
462
463 drvdata->buf = devm_kzalloc(dev,
464 drvdata->buffer_depth * 4, GFP_KERNEL);
465 if (!drvdata->buf)
466 return -ENOMEM;
467
468 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
469 if (!desc)
470 return -ENOMEM;
471
472 desc->type = CORESIGHT_DEV_TYPE_SINK;
473 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
474 desc->ops = &etb_cs_ops;
475 desc->pdata = pdata;
476 desc->dev = dev;
477 desc->groups = coresight_etb_groups;
478 drvdata->csdev = coresight_register(desc);
479 if (IS_ERR(drvdata->csdev))
480 return PTR_ERR(drvdata->csdev);
481
482 drvdata->miscdev.name = pdata->name;
483 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
484 drvdata->miscdev.fops = &etb_fops;
485 ret = misc_register(&drvdata->miscdev);
486 if (ret)
487 goto err_misc_register;
488
489 dev_info(dev, "ETB initialized\n");
490 return 0;
491
492err_misc_register:
493 coresight_unregister(drvdata->csdev);
494 return ret;
495}
496
497static int etb_remove(struct amba_device *adev)
498{
499 struct etb_drvdata *drvdata = amba_get_drvdata(adev);
500
501 misc_deregister(&drvdata->miscdev);
502 coresight_unregister(drvdata->csdev);
503 return 0;
504}
505
506static struct amba_id etb_ids[] = {
507 {
508 .id = 0x0003b907,
509 .mask = 0x0003ffff,
510 },
511 { 0, 0},
512};
513
514static struct amba_driver etb_driver = {
515 .drv = {
516 .name = "coresight-etb10",
517 .owner = THIS_MODULE,
518 },
519 .probe = etb_probe,
520 .remove = etb_remove,
521 .id_table = etb_ids,
522};
523
524static int __init etb_init(void)
525{
526 return amba_driver_register(&etb_driver);
527}
528module_init(etb_init);
529
530static void __exit etb_exit(void)
531{
532 amba_driver_unregister(&etb_driver);
533}
534module_exit(etb_exit);
535
536MODULE_LICENSE("GPL v2");
537MODULE_DESCRIPTION("CoreSight Embedded Trace Buffer driver");
diff --git a/drivers/coresight/coresight-etm-cp14.c b/drivers/coresight/coresight-etm-cp14.c
new file mode 100644
index 000000000000..12a220682117
--- /dev/null
+++ b/drivers/coresight/coresight-etm-cp14.c
@@ -0,0 +1,591 @@
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14
15#include <linux/kernel.h>
16#include <linux/types.h>
17#include <linux/bug.h>
18#include <asm/hardware/cp14.h>
19
20#include "coresight-etm.h"
21
22int etm_readl_cp14(u32 reg, unsigned int *val)
23{
24 switch (reg) {
25 case ETMCR:
26 *val = etm_read(ETMCR);
27 return 0;
28 case ETMCCR:
29 *val = etm_read(ETMCCR);
30 return 0;
31 case ETMTRIGGER:
32 *val = etm_read(ETMTRIGGER);
33 return 0;
34 case ETMSR:
35 *val = etm_read(ETMSR);
36 return 0;
37 case ETMSCR:
38 *val = etm_read(ETMSCR);
39 return 0;
40 case ETMTSSCR:
41 *val = etm_read(ETMTSSCR);
42 return 0;
43 case ETMTEEVR:
44 *val = etm_read(ETMTEEVR);
45 return 0;
46 case ETMTECR1:
47 *val = etm_read(ETMTECR1);
48 return 0;
49 case ETMFFLR:
50 *val = etm_read(ETMFFLR);
51 return 0;
52 case ETMACVRn(0):
53 *val = etm_read(ETMACVR0);
54 return 0;
55 case ETMACVRn(1):
56 *val = etm_read(ETMACVR1);
57 return 0;
58 case ETMACVRn(2):
59 *val = etm_read(ETMACVR2);
60 return 0;
61 case ETMACVRn(3):
62 *val = etm_read(ETMACVR3);
63 return 0;
64 case ETMACVRn(4):
65 *val = etm_read(ETMACVR4);
66 return 0;
67 case ETMACVRn(5):
68 *val = etm_read(ETMACVR5);
69 return 0;
70 case ETMACVRn(6):
71 *val = etm_read(ETMACVR6);
72 return 0;
73 case ETMACVRn(7):
74 *val = etm_read(ETMACVR7);
75 return 0;
76 case ETMACVRn(8):
77 *val = etm_read(ETMACVR8);
78 return 0;
79 case ETMACVRn(9):
80 *val = etm_read(ETMACVR9);
81 return 0;
82 case ETMACVRn(10):
83 *val = etm_read(ETMACVR10);
84 return 0;
85 case ETMACVRn(11):
86 *val = etm_read(ETMACVR11);
87 return 0;
88 case ETMACVRn(12):
89 *val = etm_read(ETMACVR12);
90 return 0;
91 case ETMACVRn(13):
92 *val = etm_read(ETMACVR13);
93 return 0;
94 case ETMACVRn(14):
95 *val = etm_read(ETMACVR14);
96 return 0;
97 case ETMACVRn(15):
98 *val = etm_read(ETMACVR15);
99 return 0;
100 case ETMACTRn(0):
101 *val = etm_read(ETMACTR0);
102 return 0;
103 case ETMACTRn(1):
104 *val = etm_read(ETMACTR1);
105 return 0;
106 case ETMACTRn(2):
107 *val = etm_read(ETMACTR2);
108 return 0;
109 case ETMACTRn(3):
110 *val = etm_read(ETMACTR3);
111 return 0;
112 case ETMACTRn(4):
113 *val = etm_read(ETMACTR4);
114 return 0;
115 case ETMACTRn(5):
116 *val = etm_read(ETMACTR5);
117 return 0;
118 case ETMACTRn(6):
119 *val = etm_read(ETMACTR6);
120 return 0;
121 case ETMACTRn(7):
122 *val = etm_read(ETMACTR7);
123 return 0;
124 case ETMACTRn(8):
125 *val = etm_read(ETMACTR8);
126 return 0;
127 case ETMACTRn(9):
128 *val = etm_read(ETMACTR9);
129 return 0;
130 case ETMACTRn(10):
131 *val = etm_read(ETMACTR10);
132 return 0;
133 case ETMACTRn(11):
134 *val = etm_read(ETMACTR11);
135 return 0;
136 case ETMACTRn(12):
137 *val = etm_read(ETMACTR12);
138 return 0;
139 case ETMACTRn(13):
140 *val = etm_read(ETMACTR13);
141 return 0;
142 case ETMACTRn(14):
143 *val = etm_read(ETMACTR14);
144 return 0;
145 case ETMACTRn(15):
146 *val = etm_read(ETMACTR15);
147 return 0;
148 case ETMCNTRLDVRn(0):
149 *val = etm_read(ETMCNTRLDVR0);
150 return 0;
151 case ETMCNTRLDVRn(1):
152 *val = etm_read(ETMCNTRLDVR1);
153 return 0;
154 case ETMCNTRLDVRn(2):
155 *val = etm_read(ETMCNTRLDVR2);
156 return 0;
157 case ETMCNTRLDVRn(3):
158 *val = etm_read(ETMCNTRLDVR3);
159 return 0;
160 case ETMCNTENRn(0):
161 *val = etm_read(ETMCNTENR0);
162 return 0;
163 case ETMCNTENRn(1):
164 *val = etm_read(ETMCNTENR1);
165 return 0;
166 case ETMCNTENRn(2):
167 *val = etm_read(ETMCNTENR2);
168 return 0;
169 case ETMCNTENRn(3):
170 *val = etm_read(ETMCNTENR3);
171 return 0;
172 case ETMCNTRLDEVRn(0):
173 *val = etm_read(ETMCNTRLDEVR0);
174 return 0;
175 case ETMCNTRLDEVRn(1):
176 *val = etm_read(ETMCNTRLDEVR1);
177 return 0;
178 case ETMCNTRLDEVRn(2):
179 *val = etm_read(ETMCNTRLDEVR2);
180 return 0;
181 case ETMCNTRLDEVRn(3):
182 *val = etm_read(ETMCNTRLDEVR3);
183 return 0;
184 case ETMCNTVRn(0):
185 *val = etm_read(ETMCNTVR0);
186 return 0;
187 case ETMCNTVRn(1):
188 *val = etm_read(ETMCNTVR1);
189 return 0;
190 case ETMCNTVRn(2):
191 *val = etm_read(ETMCNTVR2);
192 return 0;
193 case ETMCNTVRn(3):
194 *val = etm_read(ETMCNTVR3);
195 return 0;
196 case ETMSQ12EVR:
197 *val = etm_read(ETMSQ12EVR);
198 return 0;
199 case ETMSQ21EVR:
200 *val = etm_read(ETMSQ21EVR);
201 return 0;
202 case ETMSQ23EVR:
203 *val = etm_read(ETMSQ23EVR);
204 return 0;
205 case ETMSQ31EVR:
206 *val = etm_read(ETMSQ31EVR);
207 return 0;
208 case ETMSQ32EVR:
209 *val = etm_read(ETMSQ32EVR);
210 return 0;
211 case ETMSQ13EVR:
212 *val = etm_read(ETMSQ13EVR);
213 return 0;
214 case ETMSQR:
215 *val = etm_read(ETMSQR);
216 return 0;
217 case ETMEXTOUTEVRn(0):
218 *val = etm_read(ETMEXTOUTEVR0);
219 return 0;
220 case ETMEXTOUTEVRn(1):
221 *val = etm_read(ETMEXTOUTEVR1);
222 return 0;
223 case ETMEXTOUTEVRn(2):
224 *val = etm_read(ETMEXTOUTEVR2);
225 return 0;
226 case ETMEXTOUTEVRn(3):
227 *val = etm_read(ETMEXTOUTEVR3);
228 return 0;
229 case ETMCIDCVRn(0):
230 *val = etm_read(ETMCIDCVR0);
231 return 0;
232 case ETMCIDCVRn(1):
233 *val = etm_read(ETMCIDCVR1);
234 return 0;
235 case ETMCIDCVRn(2):
236 *val = etm_read(ETMCIDCVR2);
237 return 0;
238 case ETMCIDCMR:
239 *val = etm_read(ETMCIDCMR);
240 return 0;
241 case ETMIMPSPEC0:
242 *val = etm_read(ETMIMPSPEC0);
243 return 0;
244 case ETMIMPSPEC1:
245 *val = etm_read(ETMIMPSPEC1);
246 return 0;
247 case ETMIMPSPEC2:
248 *val = etm_read(ETMIMPSPEC2);
249 return 0;
250 case ETMIMPSPEC3:
251 *val = etm_read(ETMIMPSPEC3);
252 return 0;
253 case ETMIMPSPEC4:
254 *val = etm_read(ETMIMPSPEC4);
255 return 0;
256 case ETMIMPSPEC5:
257 *val = etm_read(ETMIMPSPEC5);
258 return 0;
259 case ETMIMPSPEC6:
260 *val = etm_read(ETMIMPSPEC6);
261 return 0;
262 case ETMIMPSPEC7:
263 *val = etm_read(ETMIMPSPEC7);
264 return 0;
265 case ETMSYNCFR:
266 *val = etm_read(ETMSYNCFR);
267 return 0;
268 case ETMIDR:
269 *val = etm_read(ETMIDR);
270 return 0;
271 case ETMCCER:
272 *val = etm_read(ETMCCER);
273 return 0;
274 case ETMEXTINSELR:
275 *val = etm_read(ETMEXTINSELR);
276 return 0;
277 case ETMTESSEICR:
278 *val = etm_read(ETMTESSEICR);
279 return 0;
280 case ETMEIBCR:
281 *val = etm_read(ETMEIBCR);
282 return 0;
283 case ETMTSEVR:
284 *val = etm_read(ETMTSEVR);
285 return 0;
286 case ETMAUXCR:
287 *val = etm_read(ETMAUXCR);
288 return 0;
289 case ETMTRACEIDR:
290 *val = etm_read(ETMTRACEIDR);
291 return 0;
292 case ETMVMIDCVR:
293 *val = etm_read(ETMVMIDCVR);
294 return 0;
295 case ETMOSLSR:
296 *val = etm_read(ETMOSLSR);
297 return 0;
298 case ETMOSSRR:
299 *val = etm_read(ETMOSSRR);
300 return 0;
301 case ETMPDCR:
302 *val = etm_read(ETMPDCR);
303 return 0;
304 case ETMPDSR:
305 *val = etm_read(ETMPDSR);
306 return 0;
307 default:
308 *val = 0;
309 return -EINVAL;
310 }
311}
312
313int etm_writel_cp14(u32 reg, u32 val)
314{
315 switch (reg) {
316 case ETMCR:
317 etm_write(val, ETMCR);
318 break;
319 case ETMTRIGGER:
320 etm_write(val, ETMTRIGGER);
321 break;
322 case ETMSR:
323 etm_write(val, ETMSR);
324 break;
325 case ETMTSSCR:
326 etm_write(val, ETMTSSCR);
327 break;
328 case ETMTEEVR:
329 etm_write(val, ETMTEEVR);
330 break;
331 case ETMTECR1:
332 etm_write(val, ETMTECR1);
333 break;
334 case ETMFFLR:
335 etm_write(val, ETMFFLR);
336 break;
337 case ETMACVRn(0):
338 etm_write(val, ETMACVR0);
339 break;
340 case ETMACVRn(1):
341 etm_write(val, ETMACVR1);
342 break;
343 case ETMACVRn(2):
344 etm_write(val, ETMACVR2);
345 break;
346 case ETMACVRn(3):
347 etm_write(val, ETMACVR3);
348 break;
349 case ETMACVRn(4):
350 etm_write(val, ETMACVR4);
351 break;
352 case ETMACVRn(5):
353 etm_write(val, ETMACVR5);
354 break;
355 case ETMACVRn(6):
356 etm_write(val, ETMACVR6);
357 break;
358 case ETMACVRn(7):
359 etm_write(val, ETMACVR7);
360 break;
361 case ETMACVRn(8):
362 etm_write(val, ETMACVR8);
363 break;
364 case ETMACVRn(9):
365 etm_write(val, ETMACVR9);
366 break;
367 case ETMACVRn(10):
368 etm_write(val, ETMACVR10);
369 break;
370 case ETMACVRn(11):
371 etm_write(val, ETMACVR11);
372 break;
373 case ETMACVRn(12):
374 etm_write(val, ETMACVR12);
375 break;
376 case ETMACVRn(13):
377 etm_write(val, ETMACVR13);
378 break;
379 case ETMACVRn(14):
380 etm_write(val, ETMACVR14);
381 break;
382 case ETMACVRn(15):
383 etm_write(val, ETMACVR15);
384 break;
385 case ETMACTRn(0):
386 etm_write(val, ETMACTR0);
387 break;
388 case ETMACTRn(1):
389 etm_write(val, ETMACTR1);
390 break;
391 case ETMACTRn(2):
392 etm_write(val, ETMACTR2);
393 break;
394 case ETMACTRn(3):
395 etm_write(val, ETMACTR3);
396 break;
397 case ETMACTRn(4):
398 etm_write(val, ETMACTR4);
399 break;
400 case ETMACTRn(5):
401 etm_write(val, ETMACTR5);
402 break;
403 case ETMACTRn(6):
404 etm_write(val, ETMACTR6);
405 break;
406 case ETMACTRn(7):
407 etm_write(val, ETMACTR7);
408 break;
409 case ETMACTRn(8):
410 etm_write(val, ETMACTR8);
411 break;
412 case ETMACTRn(9):
413 etm_write(val, ETMACTR9);
414 break;
415 case ETMACTRn(10):
416 etm_write(val, ETMACTR10);
417 break;
418 case ETMACTRn(11):
419 etm_write(val, ETMACTR11);
420 break;
421 case ETMACTRn(12):
422 etm_write(val, ETMACTR12);
423 break;
424 case ETMACTRn(13):
425 etm_write(val, ETMACTR13);
426 break;
427 case ETMACTRn(14):
428 etm_write(val, ETMACTR14);
429 break;
430 case ETMACTRn(15):
431 etm_write(val, ETMACTR15);
432 break;
433 case ETMCNTRLDVRn(0):
434 etm_write(val, ETMCNTRLDVR0);
435 break;
436 case ETMCNTRLDVRn(1):
437 etm_write(val, ETMCNTRLDVR1);
438 break;
439 case ETMCNTRLDVRn(2):
440 etm_write(val, ETMCNTRLDVR2);
441 break;
442 case ETMCNTRLDVRn(3):
443 etm_write(val, ETMCNTRLDVR3);
444 break;
445 case ETMCNTENRn(0):
446 etm_write(val, ETMCNTENR0);
447 break;
448 case ETMCNTENRn(1):
449 etm_write(val, ETMCNTENR1);
450 break;
451 case ETMCNTENRn(2):
452 etm_write(val, ETMCNTENR2);
453 break;
454 case ETMCNTENRn(3):
455 etm_write(val, ETMCNTENR3);
456 break;
457 case ETMCNTRLDEVRn(0):
458 etm_write(val, ETMCNTRLDEVR0);
459 break;
460 case ETMCNTRLDEVRn(1):
461 etm_write(val, ETMCNTRLDEVR1);
462 break;
463 case ETMCNTRLDEVRn(2):
464 etm_write(val, ETMCNTRLDEVR2);
465 break;
466 case ETMCNTRLDEVRn(3):
467 etm_write(val, ETMCNTRLDEVR3);
468 break;
469 case ETMCNTVRn(0):
470 etm_write(val, ETMCNTVR0);
471 break;
472 case ETMCNTVRn(1):
473 etm_write(val, ETMCNTVR1);
474 break;
475 case ETMCNTVRn(2):
476 etm_write(val, ETMCNTVR2);
477 break;
478 case ETMCNTVRn(3):
479 etm_write(val, ETMCNTVR3);
480 break;
481 case ETMSQ12EVR:
482 etm_write(val, ETMSQ12EVR);
483 break;
484 case ETMSQ21EVR:
485 etm_write(val, ETMSQ21EVR);
486 break;
487 case ETMSQ23EVR:
488 etm_write(val, ETMSQ23EVR);
489 break;
490 case ETMSQ31EVR:
491 etm_write(val, ETMSQ31EVR);
492 break;
493 case ETMSQ32EVR:
494 etm_write(val, ETMSQ32EVR);
495 break;
496 case ETMSQ13EVR:
497 etm_write(val, ETMSQ13EVR);
498 break;
499 case ETMSQR:
500 etm_write(val, ETMSQR);
501 break;
502 case ETMEXTOUTEVRn(0):
503 etm_write(val, ETMEXTOUTEVR0);
504 break;
505 case ETMEXTOUTEVRn(1):
506 etm_write(val, ETMEXTOUTEVR1);
507 break;
508 case ETMEXTOUTEVRn(2):
509 etm_write(val, ETMEXTOUTEVR2);
510 break;
511 case ETMEXTOUTEVRn(3):
512 etm_write(val, ETMEXTOUTEVR3);
513 break;
514 case ETMCIDCVRn(0):
515 etm_write(val, ETMCIDCVR0);
516 break;
517 case ETMCIDCVRn(1):
518 etm_write(val, ETMCIDCVR1);
519 break;
520 case ETMCIDCVRn(2):
521 etm_write(val, ETMCIDCVR2);
522 break;
523 case ETMCIDCMR:
524 etm_write(val, ETMCIDCMR);
525 break;
526 case ETMIMPSPEC0:
527 etm_write(val, ETMIMPSPEC0);
528 break;
529 case ETMIMPSPEC1:
530 etm_write(val, ETMIMPSPEC1);
531 break;
532 case ETMIMPSPEC2:
533 etm_write(val, ETMIMPSPEC2);
534 break;
535 case ETMIMPSPEC3:
536 etm_write(val, ETMIMPSPEC3);
537 break;
538 case ETMIMPSPEC4:
539 etm_write(val, ETMIMPSPEC4);
540 break;
541 case ETMIMPSPEC5:
542 etm_write(val, ETMIMPSPEC5);
543 break;
544 case ETMIMPSPEC6:
545 etm_write(val, ETMIMPSPEC6);
546 break;
547 case ETMIMPSPEC7:
548 etm_write(val, ETMIMPSPEC7);
549 break;
550 case ETMSYNCFR:
551 etm_write(val, ETMSYNCFR);
552 break;
553 case ETMEXTINSELR:
554 etm_write(val, ETMEXTINSELR);
555 break;
556 case ETMTESSEICR:
557 etm_write(val, ETMTESSEICR);
558 break;
559 case ETMEIBCR:
560 etm_write(val, ETMEIBCR);
561 break;
562 case ETMTSEVR:
563 etm_write(val, ETMTSEVR);
564 break;
565 case ETMAUXCR:
566 etm_write(val, ETMAUXCR);
567 break;
568 case ETMTRACEIDR:
569 etm_write(val, ETMTRACEIDR);
570 break;
571 case ETMVMIDCVR:
572 etm_write(val, ETMVMIDCVR);
573 break;
574 case ETMOSLAR:
575 etm_write(val, ETMOSLAR);
576 break;
577 case ETMOSSRR:
578 etm_write(val, ETMOSSRR);
579 break;
580 case ETMPDCR:
581 etm_write(val, ETMPDCR);
582 break;
583 case ETMPDSR:
584 etm_write(val, ETMPDSR);
585 break;
586 default:
587 return -EINVAL;
588 }
589
590 return 0;
591}
diff --git a/drivers/coresight/coresight-etm.h b/drivers/coresight/coresight-etm.h
new file mode 100644
index 000000000000..501c5fac8a45
--- /dev/null
+++ b/drivers/coresight/coresight-etm.h
@@ -0,0 +1,251 @@
1/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef _CORESIGHT_CORESIGHT_ETM_H
14#define _CORESIGHT_CORESIGHT_ETM_H
15
16#include <linux/spinlock.h>
17#include "coresight-priv.h"
18
19/*
20 * Device registers:
21 * 0x000 - 0x2FC: Trace registers
22 * 0x300 - 0x314: Management registers
23 * 0x318 - 0xEFC: Trace registers
24 *
25 * Coresight registers
26 * 0xF00 - 0xF9C: Management registers
27 * 0xFA0 - 0xFA4: Management registers in PFTv1.0
28 * Trace registers in PFTv1.1
29 * 0xFA8 - 0xFFC: Management registers
30 */
31
32/* Trace registers (0x000-0x2FC) */
33#define ETMCR 0x000
34#define ETMCCR 0x004
35#define ETMTRIGGER 0x008
36#define ETMSR 0x010
37#define ETMSCR 0x014
38#define ETMTSSCR 0x018
39#define ETMTECR2 0x01c
40#define ETMTEEVR 0x020
41#define ETMTECR1 0x024
42#define ETMFFLR 0x02c
43#define ETMACVRn(n) (0x040 + (n * 4))
44#define ETMACTRn(n) (0x080 + (n * 4))
45#define ETMCNTRLDVRn(n) (0x140 + (n * 4))
46#define ETMCNTENRn(n) (0x150 + (n * 4))
47#define ETMCNTRLDEVRn(n) (0x160 + (n * 4))
48#define ETMCNTVRn(n) (0x170 + (n * 4))
49#define ETMSQ12EVR 0x180
50#define ETMSQ21EVR 0x184
51#define ETMSQ23EVR 0x188
52#define ETMSQ31EVR 0x18c
53#define ETMSQ32EVR 0x190
54#define ETMSQ13EVR 0x194
55#define ETMSQR 0x19c
56#define ETMEXTOUTEVRn(n) (0x1a0 + (n * 4))
57#define ETMCIDCVRn(n) (0x1b0 + (n * 4))
58#define ETMCIDCMR 0x1bc
59#define ETMIMPSPEC0 0x1c0
60#define ETMIMPSPEC1 0x1c4
61#define ETMIMPSPEC2 0x1c8
62#define ETMIMPSPEC3 0x1cc
63#define ETMIMPSPEC4 0x1d0
64#define ETMIMPSPEC5 0x1d4
65#define ETMIMPSPEC6 0x1d8
66#define ETMIMPSPEC7 0x1dc
67#define ETMSYNCFR 0x1e0
68#define ETMIDR 0x1e4
69#define ETMCCER 0x1e8
70#define ETMEXTINSELR 0x1ec
71#define ETMTESSEICR 0x1f0
72#define ETMEIBCR 0x1f4
73#define ETMTSEVR 0x1f8
74#define ETMAUXCR 0x1fc
75#define ETMTRACEIDR 0x200
76#define ETMVMIDCVR 0x240
77/* Management registers (0x300-0x314) */
78#define ETMOSLAR 0x300
79#define ETMOSLSR 0x304
80#define ETMOSSRR 0x308
81#define ETMPDCR 0x310
82#define ETMPDSR 0x314
83#define ETM_MAX_ADDR_CMP 16
84#define ETM_MAX_CNTR 4
85#define ETM_MAX_CTXID_CMP 3
86
87/* Register definition */
88/* ETMCR - 0x00 */
89#define ETMCR_PWD_DWN BIT(0)
90#define ETMCR_STALL_MODE BIT(7)
91#define ETMCR_ETM_PRG BIT(10)
92#define ETMCR_ETM_EN BIT(11)
93#define ETMCR_CYC_ACC BIT(12)
94#define ETMCR_CTXID_SIZE (BIT(14)|BIT(15))
95#define ETMCR_TIMESTAMP_EN BIT(28)
96/* ETMCCR - 0x04 */
97#define ETMCCR_FIFOFULL BIT(23)
98/* ETMPDCR - 0x310 */
99#define ETMPDCR_PWD_UP BIT(3)
100/* ETMTECR1 - 0x024 */
101#define ETMTECR1_ADDR_COMP_1 BIT(0)
102#define ETMTECR1_INC_EXC BIT(24)
103#define ETMTECR1_START_STOP BIT(25)
104/* ETMCCER - 0x1E8 */
105#define ETMCCER_TIMESTAMP BIT(22)
106
107#define ETM_MODE_EXCLUDE BIT(0)
108#define ETM_MODE_CYCACC BIT(1)
109#define ETM_MODE_STALL BIT(2)
110#define ETM_MODE_TIMESTAMP BIT(3)
111#define ETM_MODE_CTXID BIT(4)
112#define ETM_MODE_ALL 0x1f
113
114#define ETM_SQR_MASK 0x3
115#define ETM_TRACEID_MASK 0x3f
116#define ETM_EVENT_MASK 0x1ffff
117#define ETM_SYNC_MASK 0xfff
118#define ETM_ALL_MASK 0xffffffff
119
120#define ETMSR_PROG_BIT 1
121#define ETM_SEQ_STATE_MAX_VAL (0x2)
122#define PORT_SIZE_MASK (GENMASK(21, 21) | GENMASK(6, 4))
123
124#define ETM_HARD_WIRE_RES_A /* Hard wired, always true */ \
125 ((0x0f << 0) | \
126 /* Resource index A */ \
127 (0x06 << 4))
128
129#define ETM_ADD_COMP_0 /* Single addr comparator 1 */ \
130 ((0x00 << 7) | \
131 /* Resource index B */ \
132 (0x00 << 11))
133
134#define ETM_EVENT_NOT_A BIT(14) /* NOT(A) */
135
136#define ETM_DEFAULT_EVENT_VAL (ETM_HARD_WIRE_RES_A | \
137 ETM_ADD_COMP_0 | \
138 ETM_EVENT_NOT_A)
139/**
140 * struct etm_drvdata - specifics associated to an ETM component
141 * @base: memory mapped base address for this component.
142 * @dev: the device entity associated to this component.
143 * @csdev: component vitals needed by the framework.
144 * @clk: the clock this component is associated to.
145 * @spinlock: only one at a time pls.
146 * @cpu: the cpu this component is affined to.
147 * @port_size: port size as reported by ETMCR bit 4-6 and 21.
148 * @arch: ETM/PTM version number.
149 * @use_cpu14: true if management registers need to be accessed via CP14.
150 * @enable: is this ETM/PTM currently tracing.
151 * @sticky_enable: true if ETM base configuration has been done.
152 * @boot_enable:true if we should start tracing at boot time.
153 * @os_unlock: true if access to management registers is allowed.
154 * @nr_addr_cmp:Number of pairs of address comparators as found in ETMCCR.
155 * @nr_cntr: Number of counters as found in ETMCCR bit 13-15.
156 * @nr_ext_inp: Number of external input as found in ETMCCR bit 17-19.
157 * @nr_ext_out: Number of external output as found in ETMCCR bit 20-22.
158 * @nr_ctxid_cmp: Number of contextID comparators as found in ETMCCR bit 24-25.
159 * @etmccr: value of register ETMCCR.
160 * @etmccer: value of register ETMCCER.
161 * @traceid: value of the current ID for this component.
162 * @mode: controls various modes supported by this ETM/PTM.
163 * @ctrl: used in conjunction with @mode.
164 * @trigger_event: setting for register ETMTRIGGER.
165 * @startstop_ctrl: setting for register ETMTSSCR.
166 * @enable_event: setting for register ETMTEEVR.
167 * @enable_ctrl1: setting for register ETMTECR1.
168 * @fifofull_level: setting for register ETMFFLR.
169 * @addr_idx: index for the address comparator selection.
170 * @addr_val: value for address comparator register.
171 * @addr_acctype: access type for address comparator register.
172 * @addr_type: current status of the comparator register.
173 * @cntr_idx: index for the counter register selection.
174 * @cntr_rld_val: reload value of a counter register.
175 * @cntr_event: control for counter enable register.
176 * @cntr_rld_event: value for counter reload event register.
177 * @cntr_val: counter value register.
178 * @seq_12_event: event causing the transition from 1 to 2.
179 * @seq_21_event: event causing the transition from 2 to 1.
180 * @seq_23_event: event causing the transition from 2 to 3.
181 * @seq_31_event: event causing the transition from 3 to 1.
182 * @seq_32_event: event causing the transition from 3 to 2.
183 * @seq_13_event: event causing the transition from 1 to 3.
184 * @seq_curr_state: current value of the sequencer register.
185 * @ctxid_idx: index for the context ID registers.
186 * @ctxid_val: value for the context ID to trigger on.
187 * @ctxid_mask: mask applicable to all the context IDs.
188 * @sync_freq: Synchronisation frequency.
189 * @timestamp_event: Defines an event that requests the insertion
190 of a timestamp into the trace stream.
191 */
192struct etm_drvdata {
193 void __iomem *base;
194 struct device *dev;
195 struct coresight_device *csdev;
196 struct clk *clk;
197 spinlock_t spinlock;
198 int cpu;
199 int port_size;
200 u8 arch;
201 bool use_cp14;
202 bool enable;
203 bool sticky_enable;
204 bool boot_enable;
205 bool os_unlock;
206 u8 nr_addr_cmp;
207 u8 nr_cntr;
208 u8 nr_ext_inp;
209 u8 nr_ext_out;
210 u8 nr_ctxid_cmp;
211 u32 etmccr;
212 u32 etmccer;
213 u32 traceid;
214 u32 mode;
215 u32 ctrl;
216 u32 trigger_event;
217 u32 startstop_ctrl;
218 u32 enable_event;
219 u32 enable_ctrl1;
220 u32 fifofull_level;
221 u8 addr_idx;
222 u32 addr_val[ETM_MAX_ADDR_CMP];
223 u32 addr_acctype[ETM_MAX_ADDR_CMP];
224 u32 addr_type[ETM_MAX_ADDR_CMP];
225 u8 cntr_idx;
226 u32 cntr_rld_val[ETM_MAX_CNTR];
227 u32 cntr_event[ETM_MAX_CNTR];
228 u32 cntr_rld_event[ETM_MAX_CNTR];
229 u32 cntr_val[ETM_MAX_CNTR];
230 u32 seq_12_event;
231 u32 seq_21_event;
232 u32 seq_23_event;
233 u32 seq_31_event;
234 u32 seq_32_event;
235 u32 seq_13_event;
236 u32 seq_curr_state;
237 u8 ctxid_idx;
238 u32 ctxid_val[ETM_MAX_CTXID_CMP];
239 u32 ctxid_mask;
240 u32 sync_freq;
241 u32 timestamp_event;
242};
243
244enum etm_addr_type {
245 ETM_ADDR_TYPE_NONE,
246 ETM_ADDR_TYPE_SINGLE,
247 ETM_ADDR_TYPE_RANGE,
248 ETM_ADDR_TYPE_START,
249 ETM_ADDR_TYPE_STOP,
250};
251#endif
diff --git a/drivers/coresight/coresight-etm3x.c b/drivers/coresight/coresight-etm3x.c
new file mode 100644
index 000000000000..d9e3ed6aa857
--- /dev/null
+++ b/drivers/coresight/coresight-etm3x.c
@@ -0,0 +1,1928 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/fs.h>
21#include <linux/slab.h>
22#include <linux/delay.h>
23#include <linux/smp.h>
24#include <linux/sysfs.h>
25#include <linux/stat.h>
26#include <linux/clk.h>
27#include <linux/cpu.h>
28#include <linux/of.h>
29#include <linux/coresight.h>
30#include <linux/amba/bus.h>
31#include <linux/seq_file.h>
32#include <linux/uaccess.h>
33#include <asm/sections.h>
34
35#include "coresight-etm.h"
36
37#ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE
38static int boot_enable = 1;
39#else
40static int boot_enable;
41#endif
42module_param_named(
43 boot_enable, boot_enable, int, S_IRUGO
44);
45
46/* The number of ETM/PTM currently registered */
47static int etm_count;
48static struct etm_drvdata *etmdrvdata[NR_CPUS];
49
50static inline void etm_writel(struct etm_drvdata *drvdata,
51 u32 val, u32 off)
52{
53 if (drvdata->use_cp14) {
54 if (etm_writel_cp14(off, val)) {
55 dev_err(drvdata->dev,
56 "invalid CP14 access to ETM reg: %#x", off);
57 }
58 } else {
59 writel_relaxed(val, drvdata->base + off);
60 }
61}
62
63static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off)
64{
65 u32 val;
66
67 if (drvdata->use_cp14) {
68 if (etm_readl_cp14(off, &val)) {
69 dev_err(drvdata->dev,
70 "invalid CP14 access to ETM reg: %#x", off);
71 }
72 } else {
73 val = readl_relaxed(drvdata->base + off);
74 }
75
76 return val;
77}
78
79/*
80 * Memory mapped writes to clear os lock are not supported on some processors
81 * and OS lock must be unlocked before any memory mapped access on such
82 * processors, otherwise memory mapped reads/writes will be invalid.
83 */
84static void etm_os_unlock(void *info)
85{
86 struct etm_drvdata *drvdata = (struct etm_drvdata *)info;
87 /* Writing any value to ETMOSLAR unlocks the trace registers */
88 etm_writel(drvdata, 0x0, ETMOSLAR);
89 isb();
90}
91
92static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
93{
94 u32 etmcr;
95
96 /* Ensure pending cp14 accesses complete before setting pwrdwn */
97 mb();
98 isb();
99 etmcr = etm_readl(drvdata, ETMCR);
100 etmcr |= ETMCR_PWD_DWN;
101 etm_writel(drvdata, etmcr, ETMCR);
102}
103
104static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
105{
106 u32 etmcr;
107
108 etmcr = etm_readl(drvdata, ETMCR);
109 etmcr &= ~ETMCR_PWD_DWN;
110 etm_writel(drvdata, etmcr, ETMCR);
111 /* Ensure pwrup completes before subsequent cp14 accesses */
112 mb();
113 isb();
114}
115
116static void etm_set_pwrup(struct etm_drvdata *drvdata)
117{
118 u32 etmpdcr;
119
120 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
121 etmpdcr |= ETMPDCR_PWD_UP;
122 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
123 /* Ensure pwrup completes before subsequent cp14 accesses */
124 mb();
125 isb();
126}
127
128static void etm_clr_pwrup(struct etm_drvdata *drvdata)
129{
130 u32 etmpdcr;
131
132 /* Ensure pending cp14 accesses complete before clearing pwrup */
133 mb();
134 isb();
135 etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
136 etmpdcr &= ~ETMPDCR_PWD_UP;
137 writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
138}
139
140/**
141 * coresight_timeout_etm - loop until a bit has changed to a specific state.
142 * @drvdata: etm's private data structure.
143 * @offset: address of a register, starting from @addr.
144 * @position: the position of the bit of interest.
145 * @value: the value the bit should have.
146 *
147 * Basically the same as @coresight_timeout except for the register access
148 * method where we have to account for CP14 configurations.
149
150 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
151 * TIMEOUT_US has elapsed, which ever happens first.
152 */
153
154static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset,
155 int position, int value)
156{
157 int i;
158 u32 val;
159
160 for (i = TIMEOUT_US; i > 0; i--) {
161 val = etm_readl(drvdata, offset);
162 /* Waiting on the bit to go from 0 to 1 */
163 if (value) {
164 if (val & BIT(position))
165 return 0;
166 /* Waiting on the bit to go from 1 to 0 */
167 } else {
168 if (!(val & BIT(position)))
169 return 0;
170 }
171
172 /*
173 * Delay is arbitrary - the specification doesn't say how long
174 * we are expected to wait. Extra check required to make sure
175 * we don't wait needlessly on the last iteration.
176 */
177 if (i - 1)
178 udelay(1);
179 }
180
181 return -EAGAIN;
182}
183
184
185static void etm_set_prog(struct etm_drvdata *drvdata)
186{
187 u32 etmcr;
188
189 etmcr = etm_readl(drvdata, ETMCR);
190 etmcr |= ETMCR_ETM_PRG;
191 etm_writel(drvdata, etmcr, ETMCR);
192 /*
193 * Recommended by spec for cp14 accesses to ensure etmcr write is
194 * complete before polling etmsr
195 */
196 isb();
197 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) {
198 dev_err(drvdata->dev,
199 "timeout observed when probing at offset %#x\n", ETMSR);
200 }
201}
202
203static void etm_clr_prog(struct etm_drvdata *drvdata)
204{
205 u32 etmcr;
206
207 etmcr = etm_readl(drvdata, ETMCR);
208 etmcr &= ~ETMCR_ETM_PRG;
209 etm_writel(drvdata, etmcr, ETMCR);
210 /*
211 * Recommended by spec for cp14 accesses to ensure etmcr write is
212 * complete before polling etmsr
213 */
214 isb();
215 if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) {
216 dev_err(drvdata->dev,
217 "timeout observed when probing at offset %#x\n", ETMSR);
218 }
219}
220
221static void etm_set_default(struct etm_drvdata *drvdata)
222{
223 int i;
224
225 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
226 drvdata->enable_event = ETM_HARD_WIRE_RES_A;
227
228 drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL;
229 drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL;
230 drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL;
231 drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL;
232 drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL;
233 drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL;
234 drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL;
235
236 for (i = 0; i < drvdata->nr_cntr; i++) {
237 drvdata->cntr_rld_val[i] = 0x0;
238 drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL;
239 drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL;
240 drvdata->cntr_val[i] = 0x0;
241 }
242
243 drvdata->seq_curr_state = 0x0;
244 drvdata->ctxid_idx = 0x0;
245 for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
246 drvdata->ctxid_val[i] = 0x0;
247 drvdata->ctxid_mask = 0x0;
248}
249
250static void etm_enable_hw(void *info)
251{
252 int i;
253 u32 etmcr;
254 struct etm_drvdata *drvdata = info;
255
256 CS_UNLOCK(drvdata->base);
257
258 /* Turn engine on */
259 etm_clr_pwrdwn(drvdata);
260 /* Apply power to trace registers */
261 etm_set_pwrup(drvdata);
262 /* Make sure all registers are accessible */
263 etm_os_unlock(drvdata);
264
265 etm_set_prog(drvdata);
266
267 etmcr = etm_readl(drvdata, ETMCR);
268 etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG);
269 etmcr |= drvdata->port_size;
270 etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR);
271 etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER);
272 etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR);
273 etm_writel(drvdata, drvdata->enable_event, ETMTEEVR);
274 etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1);
275 etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR);
276 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
277 etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i));
278 etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i));
279 }
280 for (i = 0; i < drvdata->nr_cntr; i++) {
281 etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i));
282 etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i));
283 etm_writel(drvdata, drvdata->cntr_rld_event[i],
284 ETMCNTRLDEVRn(i));
285 etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i));
286 }
287 etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR);
288 etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR);
289 etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR);
290 etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR);
291 etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR);
292 etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR);
293 etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR);
294 for (i = 0; i < drvdata->nr_ext_out; i++)
295 etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i));
296 for (i = 0; i < drvdata->nr_ctxid_cmp; i++)
297 etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i));
298 etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR);
299 etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR);
300 /* No external input selected */
301 etm_writel(drvdata, 0x0, ETMEXTINSELR);
302 etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR);
303 /* No auxiliary control selected */
304 etm_writel(drvdata, 0x0, ETMAUXCR);
305 etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR);
306 /* No VMID comparator value selected */
307 etm_writel(drvdata, 0x0, ETMVMIDCVR);
308
309 /* Ensures trace output is enabled from this ETM */
310 etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR);
311
312 etm_clr_prog(drvdata);
313 CS_LOCK(drvdata->base);
314
315 dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu);
316}
317
318static int etm_trace_id_simple(struct etm_drvdata *drvdata)
319{
320 if (!drvdata->enable)
321 return drvdata->traceid;
322
323 return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
324}
325
326static int etm_trace_id(struct coresight_device *csdev)
327{
328 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
329 unsigned long flags;
330 int trace_id = -1;
331
332 if (!drvdata->enable)
333 return drvdata->traceid;
334
335 if (clk_prepare_enable(drvdata->clk))
336 goto out;
337
338 spin_lock_irqsave(&drvdata->spinlock, flags);
339
340 CS_UNLOCK(drvdata->base);
341 trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
342 CS_LOCK(drvdata->base);
343
344 spin_unlock_irqrestore(&drvdata->spinlock, flags);
345 clk_disable_unprepare(drvdata->clk);
346out:
347 return trace_id;
348}
349
350static int etm_enable(struct coresight_device *csdev)
351{
352 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
353 int ret;
354
355 ret = clk_prepare_enable(drvdata->clk);
356 if (ret)
357 goto err_clk;
358
359 spin_lock(&drvdata->spinlock);
360
361 /*
362 * Configure the ETM only if the CPU is online. If it isn't online
363 * hw configuration will take place when 'CPU_STARTING' is received
364 * in @etm_cpu_callback.
365 */
366 if (cpu_online(drvdata->cpu)) {
367 ret = smp_call_function_single(drvdata->cpu,
368 etm_enable_hw, drvdata, 1);
369 if (ret)
370 goto err;
371 }
372
373 drvdata->enable = true;
374 drvdata->sticky_enable = true;
375
376 spin_unlock(&drvdata->spinlock);
377
378 dev_info(drvdata->dev, "ETM tracing enabled\n");
379 return 0;
380err:
381 spin_unlock(&drvdata->spinlock);
382 clk_disable_unprepare(drvdata->clk);
383err_clk:
384 return ret;
385}
386
387static void etm_disable_hw(void *info)
388{
389 int i;
390 struct etm_drvdata *drvdata = info;
391
392 CS_UNLOCK(drvdata->base);
393 etm_set_prog(drvdata);
394
395 /* Program trace enable to low by using always false event */
396 etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR);
397
398 /* Read back sequencer and counters for post trace analysis */
399 drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
400
401 for (i = 0; i < drvdata->nr_cntr; i++)
402 drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i));
403
404 etm_set_pwrdwn(drvdata);
405 CS_LOCK(drvdata->base);
406
407 dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu);
408}
409
410static void etm_disable(struct coresight_device *csdev)
411{
412 struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
413
414 /*
415 * Taking hotplug lock here protects from clocks getting disabled
416 * with tracing being left on (crash scenario) if user disable occurs
417 * after cpu online mask indicates the cpu is offline but before the
418 * DYING hotplug callback is serviced by the ETM driver.
419 */
420 get_online_cpus();
421 spin_lock(&drvdata->spinlock);
422
423 /*
424 * Executing etm_disable_hw on the cpu whose ETM is being disabled
425 * ensures that register writes occur when cpu is powered.
426 */
427 smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1);
428 drvdata->enable = false;
429
430 spin_unlock(&drvdata->spinlock);
431 put_online_cpus();
432
433 clk_disable_unprepare(drvdata->clk);
434
435 dev_info(drvdata->dev, "ETM tracing disabled\n");
436}
437
438static const struct coresight_ops_source etm_source_ops = {
439 .trace_id = etm_trace_id,
440 .enable = etm_enable,
441 .disable = etm_disable,
442};
443
444static const struct coresight_ops etm_cs_ops = {
445 .source_ops = &etm_source_ops,
446};
447
448static ssize_t nr_addr_cmp_show(struct device *dev,
449 struct device_attribute *attr, char *buf)
450{
451 unsigned long val;
452 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
453
454 val = drvdata->nr_addr_cmp;
455 return sprintf(buf, "%#lx\n", val);
456}
457static DEVICE_ATTR_RO(nr_addr_cmp);
458
459static ssize_t nr_cntr_show(struct device *dev,
460 struct device_attribute *attr, char *buf)
461{ unsigned long val;
462 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
463
464 val = drvdata->nr_cntr;
465 return sprintf(buf, "%#lx\n", val);
466}
467static DEVICE_ATTR_RO(nr_cntr);
468
469static ssize_t nr_ctxid_cmp_show(struct device *dev,
470 struct device_attribute *attr, char *buf)
471{
472 unsigned long val;
473 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
474
475 val = drvdata->nr_ctxid_cmp;
476 return sprintf(buf, "%#lx\n", val);
477}
478static DEVICE_ATTR_RO(nr_ctxid_cmp);
479
480static ssize_t etmsr_show(struct device *dev,
481 struct device_attribute *attr, char *buf)
482{
483 int ret;
484 unsigned long flags, val;
485 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
486
487 ret = clk_prepare_enable(drvdata->clk);
488 if (ret)
489 return ret;
490
491 spin_lock_irqsave(&drvdata->spinlock, flags);
492 CS_UNLOCK(drvdata->base);
493
494 val = etm_readl(drvdata, ETMSR);
495
496 CS_LOCK(drvdata->base);
497 spin_unlock_irqrestore(&drvdata->spinlock, flags);
498 clk_disable_unprepare(drvdata->clk);
499
500 return sprintf(buf, "%#lx\n", val);
501}
502static DEVICE_ATTR_RO(etmsr);
503
504static ssize_t reset_store(struct device *dev,
505 struct device_attribute *attr,
506 const char *buf, size_t size)
507{
508 int i, ret;
509 unsigned long val;
510 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
511
512 ret = kstrtoul(buf, 16, &val);
513 if (ret)
514 return ret;
515
516 if (val) {
517 spin_lock(&drvdata->spinlock);
518 drvdata->mode = ETM_MODE_EXCLUDE;
519 drvdata->ctrl = 0x0;
520 drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL;
521 drvdata->startstop_ctrl = 0x0;
522 drvdata->addr_idx = 0x0;
523 for (i = 0; i < drvdata->nr_addr_cmp; i++) {
524 drvdata->addr_val[i] = 0x0;
525 drvdata->addr_acctype[i] = 0x0;
526 drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE;
527 }
528 drvdata->cntr_idx = 0x0;
529
530 etm_set_default(drvdata);
531 spin_unlock(&drvdata->spinlock);
532 }
533
534 return size;
535}
536static DEVICE_ATTR_WO(reset);
537
538static ssize_t mode_show(struct device *dev,
539 struct device_attribute *attr, char *buf)
540{
541 unsigned long val;
542 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
543
544 val = drvdata->mode;
545 return sprintf(buf, "%#lx\n", val);
546}
547
548static ssize_t mode_store(struct device *dev,
549 struct device_attribute *attr,
550 const char *buf, size_t size)
551{
552 int ret;
553 unsigned long val;
554 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
555
556 ret = kstrtoul(buf, 16, &val);
557 if (ret)
558 return ret;
559
560 spin_lock(&drvdata->spinlock);
561 drvdata->mode = val & ETM_MODE_ALL;
562
563 if (drvdata->mode & ETM_MODE_EXCLUDE)
564 drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC;
565 else
566 drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC;
567
568 if (drvdata->mode & ETM_MODE_CYCACC)
569 drvdata->ctrl |= ETMCR_CYC_ACC;
570 else
571 drvdata->ctrl &= ~ETMCR_CYC_ACC;
572
573 if (drvdata->mode & ETM_MODE_STALL) {
574 if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) {
575 dev_warn(drvdata->dev, "stall mode not supported\n");
576 return -EINVAL;
577 }
578 drvdata->ctrl |= ETMCR_STALL_MODE;
579 } else
580 drvdata->ctrl &= ~ETMCR_STALL_MODE;
581
582 if (drvdata->mode & ETM_MODE_TIMESTAMP) {
583 if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) {
584 dev_warn(drvdata->dev, "timestamp not supported\n");
585 return -EINVAL;
586 }
587 drvdata->ctrl |= ETMCR_TIMESTAMP_EN;
588 } else
589 drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN;
590
591 if (drvdata->mode & ETM_MODE_CTXID)
592 drvdata->ctrl |= ETMCR_CTXID_SIZE;
593 else
594 drvdata->ctrl &= ~ETMCR_CTXID_SIZE;
595 spin_unlock(&drvdata->spinlock);
596
597 return size;
598}
599static DEVICE_ATTR_RW(mode);
600
601static ssize_t trigger_event_show(struct device *dev,
602 struct device_attribute *attr, char *buf)
603{
604 unsigned long val;
605 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
606
607 val = drvdata->trigger_event;
608 return sprintf(buf, "%#lx\n", val);
609}
610
611static ssize_t trigger_event_store(struct device *dev,
612 struct device_attribute *attr,
613 const char *buf, size_t size)
614{
615 int ret;
616 unsigned long val;
617 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
618
619 ret = kstrtoul(buf, 16, &val);
620 if (ret)
621 return ret;
622
623 drvdata->trigger_event = val & ETM_EVENT_MASK;
624
625 return size;
626}
627static DEVICE_ATTR_RW(trigger_event);
628
629static ssize_t enable_event_show(struct device *dev,
630 struct device_attribute *attr, char *buf)
631{
632 unsigned long val;
633 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
634
635 val = drvdata->enable_event;
636 return sprintf(buf, "%#lx\n", val);
637}
638
639static ssize_t enable_event_store(struct device *dev,
640 struct device_attribute *attr,
641 const char *buf, size_t size)
642{
643 int ret;
644 unsigned long val;
645 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
646
647 ret = kstrtoul(buf, 16, &val);
648 if (ret)
649 return ret;
650
651 drvdata->enable_event = val & ETM_EVENT_MASK;
652
653 return size;
654}
655static DEVICE_ATTR_RW(enable_event);
656
657static ssize_t fifofull_level_show(struct device *dev,
658 struct device_attribute *attr, char *buf)
659{
660 unsigned long val;
661 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
662
663 val = drvdata->fifofull_level;
664 return sprintf(buf, "%#lx\n", val);
665}
666
667static ssize_t fifofull_level_store(struct device *dev,
668 struct device_attribute *attr,
669 const char *buf, size_t size)
670{
671 int ret;
672 unsigned long val;
673 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
674
675 ret = kstrtoul(buf, 16, &val);
676 if (ret)
677 return ret;
678
679 drvdata->fifofull_level = val;
680
681 return size;
682}
683static DEVICE_ATTR_RW(fifofull_level);
684
685static ssize_t addr_idx_show(struct device *dev,
686 struct device_attribute *attr, char *buf)
687{
688 unsigned long val;
689 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
690
691 val = drvdata->addr_idx;
692 return sprintf(buf, "%#lx\n", val);
693}
694
695static ssize_t addr_idx_store(struct device *dev,
696 struct device_attribute *attr,
697 const char *buf, size_t size)
698{
699 int ret;
700 unsigned long val;
701 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
702
703 ret = kstrtoul(buf, 16, &val);
704 if (ret)
705 return ret;
706
707 if (val >= drvdata->nr_addr_cmp)
708 return -EINVAL;
709
710 /*
711 * Use spinlock to ensure index doesn't change while it gets
712 * dereferenced multiple times within a spinlock block elsewhere.
713 */
714 spin_lock(&drvdata->spinlock);
715 drvdata->addr_idx = val;
716 spin_unlock(&drvdata->spinlock);
717
718 return size;
719}
720static DEVICE_ATTR_RW(addr_idx);
721
722static ssize_t addr_single_show(struct device *dev,
723 struct device_attribute *attr, char *buf)
724{
725 u8 idx;
726 unsigned long val;
727 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
728
729 spin_lock(&drvdata->spinlock);
730 idx = drvdata->addr_idx;
731 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
732 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
733 spin_unlock(&drvdata->spinlock);
734 return -EINVAL;
735 }
736
737 val = drvdata->addr_val[idx];
738 spin_unlock(&drvdata->spinlock);
739
740 return sprintf(buf, "%#lx\n", val);
741}
742
743static ssize_t addr_single_store(struct device *dev,
744 struct device_attribute *attr,
745 const char *buf, size_t size)
746{
747 u8 idx;
748 int ret;
749 unsigned long val;
750 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
751
752 ret = kstrtoul(buf, 16, &val);
753 if (ret)
754 return ret;
755
756 spin_lock(&drvdata->spinlock);
757 idx = drvdata->addr_idx;
758 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
759 drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
760 spin_unlock(&drvdata->spinlock);
761 return -EINVAL;
762 }
763
764 drvdata->addr_val[idx] = val;
765 drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
766 spin_unlock(&drvdata->spinlock);
767
768 return size;
769}
770static DEVICE_ATTR_RW(addr_single);
771
772static ssize_t addr_range_show(struct device *dev,
773 struct device_attribute *attr, char *buf)
774{
775 u8 idx;
776 unsigned long val1, val2;
777 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
778
779 spin_lock(&drvdata->spinlock);
780 idx = drvdata->addr_idx;
781 if (idx % 2 != 0) {
782 spin_unlock(&drvdata->spinlock);
783 return -EPERM;
784 }
785 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
786 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
787 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
788 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
789 spin_unlock(&drvdata->spinlock);
790 return -EPERM;
791 }
792
793 val1 = drvdata->addr_val[idx];
794 val2 = drvdata->addr_val[idx + 1];
795 spin_unlock(&drvdata->spinlock);
796
797 return sprintf(buf, "%#lx %#lx\n", val1, val2);
798}
799
800static ssize_t addr_range_store(struct device *dev,
801 struct device_attribute *attr,
802 const char *buf, size_t size)
803{
804 u8 idx;
805 unsigned long val1, val2;
806 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
807
808 if (sscanf(buf, "%lx %lx", &val1, &val2) != 2)
809 return -EINVAL;
810 /* Lower address comparator cannot have a higher address value */
811 if (val1 > val2)
812 return -EINVAL;
813
814 spin_lock(&drvdata->spinlock);
815 idx = drvdata->addr_idx;
816 if (idx % 2 != 0) {
817 spin_unlock(&drvdata->spinlock);
818 return -EPERM;
819 }
820 if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
821 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
822 (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
823 drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
824 spin_unlock(&drvdata->spinlock);
825 return -EPERM;
826 }
827
828 drvdata->addr_val[idx] = val1;
829 drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
830 drvdata->addr_val[idx + 1] = val2;
831 drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
832 drvdata->enable_ctrl1 |= (1 << (idx/2));
833 spin_unlock(&drvdata->spinlock);
834
835 return size;
836}
837static DEVICE_ATTR_RW(addr_range);
838
839static ssize_t addr_start_show(struct device *dev,
840 struct device_attribute *attr, char *buf)
841{
842 u8 idx;
843 unsigned long val;
844 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
845
846 spin_lock(&drvdata->spinlock);
847 idx = drvdata->addr_idx;
848 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
849 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
850 spin_unlock(&drvdata->spinlock);
851 return -EPERM;
852 }
853
854 val = drvdata->addr_val[idx];
855 spin_unlock(&drvdata->spinlock);
856
857 return sprintf(buf, "%#lx\n", val);
858}
859
860static ssize_t addr_start_store(struct device *dev,
861 struct device_attribute *attr,
862 const char *buf, size_t size)
863{
864 u8 idx;
865 int ret;
866 unsigned long val;
867 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
868
869 ret = kstrtoul(buf, 16, &val);
870 if (ret)
871 return ret;
872
873 spin_lock(&drvdata->spinlock);
874 idx = drvdata->addr_idx;
875 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
876 drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) {
877 spin_unlock(&drvdata->spinlock);
878 return -EPERM;
879 }
880
881 drvdata->addr_val[idx] = val;
882 drvdata->addr_type[idx] = ETM_ADDR_TYPE_START;
883 drvdata->startstop_ctrl |= (1 << idx);
884 drvdata->enable_ctrl1 |= BIT(25);
885 spin_unlock(&drvdata->spinlock);
886
887 return size;
888}
889static DEVICE_ATTR_RW(addr_start);
890
891static ssize_t addr_stop_show(struct device *dev,
892 struct device_attribute *attr, char *buf)
893{
894 u8 idx;
895 unsigned long val;
896 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
897
898 spin_lock(&drvdata->spinlock);
899 idx = drvdata->addr_idx;
900 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
901 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
902 spin_unlock(&drvdata->spinlock);
903 return -EPERM;
904 }
905
906 val = drvdata->addr_val[idx];
907 spin_unlock(&drvdata->spinlock);
908
909 return sprintf(buf, "%#lx\n", val);
910}
911
912static ssize_t addr_stop_store(struct device *dev,
913 struct device_attribute *attr,
914 const char *buf, size_t size)
915{
916 u8 idx;
917 int ret;
918 unsigned long val;
919 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
920
921 ret = kstrtoul(buf, 16, &val);
922 if (ret)
923 return ret;
924
925 spin_lock(&drvdata->spinlock);
926 idx = drvdata->addr_idx;
927 if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
928 drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
929 spin_unlock(&drvdata->spinlock);
930 return -EPERM;
931 }
932
933 drvdata->addr_val[idx] = val;
934 drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP;
935 drvdata->startstop_ctrl |= (1 << (idx + 16));
936 drvdata->enable_ctrl1 |= ETMTECR1_START_STOP;
937 spin_unlock(&drvdata->spinlock);
938
939 return size;
940}
941static DEVICE_ATTR_RW(addr_stop);
942
943static ssize_t addr_acctype_show(struct device *dev,
944 struct device_attribute *attr, char *buf)
945{
946 unsigned long val;
947 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
948
949 spin_lock(&drvdata->spinlock);
950 val = drvdata->addr_acctype[drvdata->addr_idx];
951 spin_unlock(&drvdata->spinlock);
952
953 return sprintf(buf, "%#lx\n", val);
954}
955
956static ssize_t addr_acctype_store(struct device *dev,
957 struct device_attribute *attr,
958 const char *buf, size_t size)
959{
960 int ret;
961 unsigned long val;
962 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
963
964 ret = kstrtoul(buf, 16, &val);
965 if (ret)
966 return ret;
967
968 spin_lock(&drvdata->spinlock);
969 drvdata->addr_acctype[drvdata->addr_idx] = val;
970 spin_unlock(&drvdata->spinlock);
971
972 return size;
973}
974static DEVICE_ATTR_RW(addr_acctype);
975
976static ssize_t cntr_idx_show(struct device *dev,
977 struct device_attribute *attr, char *buf)
978{
979 unsigned long val;
980 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
981
982 val = drvdata->cntr_idx;
983 return sprintf(buf, "%#lx\n", val);
984}
985
986static ssize_t cntr_idx_store(struct device *dev,
987 struct device_attribute *attr,
988 const char *buf, size_t size)
989{
990 int ret;
991 unsigned long val;
992 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
993
994 ret = kstrtoul(buf, 16, &val);
995 if (ret)
996 return ret;
997
998 if (val >= drvdata->nr_cntr)
999 return -EINVAL;
1000 /*
1001 * Use spinlock to ensure index doesn't change while it gets
1002 * dereferenced multiple times within a spinlock block elsewhere.
1003 */
1004 spin_lock(&drvdata->spinlock);
1005 drvdata->cntr_idx = val;
1006 spin_unlock(&drvdata->spinlock);
1007
1008 return size;
1009}
1010static DEVICE_ATTR_RW(cntr_idx);
1011
1012static ssize_t cntr_rld_val_show(struct device *dev,
1013 struct device_attribute *attr, char *buf)
1014{
1015 unsigned long val;
1016 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1017
1018 spin_lock(&drvdata->spinlock);
1019 val = drvdata->cntr_rld_val[drvdata->cntr_idx];
1020 spin_unlock(&drvdata->spinlock);
1021
1022 return sprintf(buf, "%#lx\n", val);
1023}
1024
1025static ssize_t cntr_rld_val_store(struct device *dev,
1026 struct device_attribute *attr,
1027 const char *buf, size_t size)
1028{
1029 int ret;
1030 unsigned long val;
1031 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1032
1033 ret = kstrtoul(buf, 16, &val);
1034 if (ret)
1035 return ret;
1036
1037 spin_lock(&drvdata->spinlock);
1038 drvdata->cntr_rld_val[drvdata->cntr_idx] = val;
1039 spin_unlock(&drvdata->spinlock);
1040
1041 return size;
1042}
1043static DEVICE_ATTR_RW(cntr_rld_val);
1044
1045static ssize_t cntr_event_show(struct device *dev,
1046 struct device_attribute *attr, char *buf)
1047{
1048 unsigned long val;
1049 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1050
1051 spin_lock(&drvdata->spinlock);
1052 val = drvdata->cntr_event[drvdata->cntr_idx];
1053 spin_unlock(&drvdata->spinlock);
1054
1055 return sprintf(buf, "%#lx\n", val);
1056}
1057
1058static ssize_t cntr_event_store(struct device *dev,
1059 struct device_attribute *attr,
1060 const char *buf, size_t size)
1061{
1062 int ret;
1063 unsigned long val;
1064 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1065
1066 ret = kstrtoul(buf, 16, &val);
1067 if (ret)
1068 return ret;
1069
1070 spin_lock(&drvdata->spinlock);
1071 drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1072 spin_unlock(&drvdata->spinlock);
1073
1074 return size;
1075}
1076static DEVICE_ATTR_RW(cntr_event);
1077
1078static ssize_t cntr_rld_event_show(struct device *dev,
1079 struct device_attribute *attr, char *buf)
1080{
1081 unsigned long val;
1082 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1083
1084 spin_lock(&drvdata->spinlock);
1085 val = drvdata->cntr_rld_event[drvdata->cntr_idx];
1086 spin_unlock(&drvdata->spinlock);
1087
1088 return sprintf(buf, "%#lx\n", val);
1089}
1090
1091static ssize_t cntr_rld_event_store(struct device *dev,
1092 struct device_attribute *attr,
1093 const char *buf, size_t size)
1094{
1095 int ret;
1096 unsigned long val;
1097 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1098
1099 ret = kstrtoul(buf, 16, &val);
1100 if (ret)
1101 return ret;
1102
1103 spin_lock(&drvdata->spinlock);
1104 drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK;
1105 spin_unlock(&drvdata->spinlock);
1106
1107 return size;
1108}
1109static DEVICE_ATTR_RW(cntr_rld_event);
1110
1111static ssize_t cntr_val_show(struct device *dev,
1112 struct device_attribute *attr, char *buf)
1113{
1114 int i, ret = 0;
1115 u32 val;
1116 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1117
1118 if (!drvdata->enable) {
1119 spin_lock(&drvdata->spinlock);
1120 for (i = 0; i < drvdata->nr_cntr; i++)
1121 ret += sprintf(buf, "counter %d: %x\n",
1122 i, drvdata->cntr_val[i]);
1123 spin_unlock(&drvdata->spinlock);
1124 return ret;
1125 }
1126
1127 for (i = 0; i < drvdata->nr_cntr; i++) {
1128 val = etm_readl(drvdata, ETMCNTVRn(i));
1129 ret += sprintf(buf, "counter %d: %x\n", i, val);
1130 }
1131
1132 return ret;
1133}
1134
1135static ssize_t cntr_val_store(struct device *dev,
1136 struct device_attribute *attr,
1137 const char *buf, size_t size)
1138{
1139 int ret;
1140 unsigned long val;
1141 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1142
1143 ret = kstrtoul(buf, 16, &val);
1144 if (ret)
1145 return ret;
1146
1147 spin_lock(&drvdata->spinlock);
1148 drvdata->cntr_val[drvdata->cntr_idx] = val;
1149 spin_unlock(&drvdata->spinlock);
1150
1151 return size;
1152}
1153static DEVICE_ATTR_RW(cntr_val);
1154
1155static ssize_t seq_12_event_show(struct device *dev,
1156 struct device_attribute *attr, char *buf)
1157{
1158 unsigned long val;
1159 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1160
1161 val = drvdata->seq_12_event;
1162 return sprintf(buf, "%#lx\n", val);
1163}
1164
1165static ssize_t seq_12_event_store(struct device *dev,
1166 struct device_attribute *attr,
1167 const char *buf, size_t size)
1168{
1169 int ret;
1170 unsigned long val;
1171 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1172
1173 ret = kstrtoul(buf, 16, &val);
1174 if (ret)
1175 return ret;
1176
1177 drvdata->seq_12_event = val & ETM_EVENT_MASK;
1178 return size;
1179}
1180static DEVICE_ATTR_RW(seq_12_event);
1181
1182static ssize_t seq_21_event_show(struct device *dev,
1183 struct device_attribute *attr, char *buf)
1184{
1185 unsigned long val;
1186 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1187
1188 val = drvdata->seq_21_event;
1189 return sprintf(buf, "%#lx\n", val);
1190}
1191
1192static ssize_t seq_21_event_store(struct device *dev,
1193 struct device_attribute *attr,
1194 const char *buf, size_t size)
1195{
1196 int ret;
1197 unsigned long val;
1198 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1199
1200 ret = kstrtoul(buf, 16, &val);
1201 if (ret)
1202 return ret;
1203
1204 drvdata->seq_21_event = val & ETM_EVENT_MASK;
1205 return size;
1206}
1207static DEVICE_ATTR_RW(seq_21_event);
1208
1209static ssize_t seq_23_event_show(struct device *dev,
1210 struct device_attribute *attr, char *buf)
1211{
1212 unsigned long val;
1213 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1214
1215 val = drvdata->seq_23_event;
1216 return sprintf(buf, "%#lx\n", val);
1217}
1218
1219static ssize_t seq_23_event_store(struct device *dev,
1220 struct device_attribute *attr,
1221 const char *buf, size_t size)
1222{
1223 int ret;
1224 unsigned long val;
1225 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1226
1227 ret = kstrtoul(buf, 16, &val);
1228 if (ret)
1229 return ret;
1230
1231 drvdata->seq_23_event = val & ETM_EVENT_MASK;
1232 return size;
1233}
1234static DEVICE_ATTR_RW(seq_23_event);
1235
1236static ssize_t seq_31_event_show(struct device *dev,
1237 struct device_attribute *attr, char *buf)
1238{
1239 unsigned long val;
1240 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1241
1242 val = drvdata->seq_31_event;
1243 return sprintf(buf, "%#lx\n", val);
1244}
1245
1246static ssize_t seq_31_event_store(struct device *dev,
1247 struct device_attribute *attr,
1248 const char *buf, size_t size)
1249{
1250 int ret;
1251 unsigned long val;
1252 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1253
1254 ret = kstrtoul(buf, 16, &val);
1255 if (ret)
1256 return ret;
1257
1258 drvdata->seq_31_event = val & ETM_EVENT_MASK;
1259 return size;
1260}
1261static DEVICE_ATTR_RW(seq_31_event);
1262
1263static ssize_t seq_32_event_show(struct device *dev,
1264 struct device_attribute *attr, char *buf)
1265{
1266 unsigned long val;
1267 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268
1269 val = drvdata->seq_32_event;
1270 return sprintf(buf, "%#lx\n", val);
1271}
1272
1273static ssize_t seq_32_event_store(struct device *dev,
1274 struct device_attribute *attr,
1275 const char *buf, size_t size)
1276{
1277 int ret;
1278 unsigned long val;
1279 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1280
1281 ret = kstrtoul(buf, 16, &val);
1282 if (ret)
1283 return ret;
1284
1285 drvdata->seq_32_event = val & ETM_EVENT_MASK;
1286 return size;
1287}
1288static DEVICE_ATTR_RW(seq_32_event);
1289
1290static ssize_t seq_13_event_show(struct device *dev,
1291 struct device_attribute *attr, char *buf)
1292{
1293 unsigned long val;
1294 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1295
1296 val = drvdata->seq_13_event;
1297 return sprintf(buf, "%#lx\n", val);
1298}
1299
1300static ssize_t seq_13_event_store(struct device *dev,
1301 struct device_attribute *attr,
1302 const char *buf, size_t size)
1303{
1304 int ret;
1305 unsigned long val;
1306 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1307
1308 ret = kstrtoul(buf, 16, &val);
1309 if (ret)
1310 return ret;
1311
1312 drvdata->seq_13_event = val & ETM_EVENT_MASK;
1313 return size;
1314}
1315static DEVICE_ATTR_RW(seq_13_event);
1316
1317static ssize_t seq_curr_state_show(struct device *dev,
1318 struct device_attribute *attr, char *buf)
1319{
1320 int ret;
1321 unsigned long val, flags;
1322 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1323
1324 if (!drvdata->enable) {
1325 val = drvdata->seq_curr_state;
1326 goto out;
1327 }
1328
1329 ret = clk_prepare_enable(drvdata->clk);
1330 if (ret)
1331 return ret;
1332
1333 spin_lock_irqsave(&drvdata->spinlock, flags);
1334
1335 CS_UNLOCK(drvdata->base);
1336 val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK);
1337 CS_LOCK(drvdata->base);
1338
1339 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1340 clk_disable_unprepare(drvdata->clk);
1341out:
1342 return sprintf(buf, "%#lx\n", val);
1343}
1344
1345static ssize_t seq_curr_state_store(struct device *dev,
1346 struct device_attribute *attr,
1347 const char *buf, size_t size)
1348{
1349 int ret;
1350 unsigned long val;
1351 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1352
1353 ret = kstrtoul(buf, 16, &val);
1354 if (ret)
1355 return ret;
1356
1357 if (val > ETM_SEQ_STATE_MAX_VAL)
1358 return -EINVAL;
1359
1360 drvdata->seq_curr_state = val;
1361
1362 return size;
1363}
1364static DEVICE_ATTR_RW(seq_curr_state);
1365
1366static ssize_t ctxid_idx_show(struct device *dev,
1367 struct device_attribute *attr, char *buf)
1368{
1369 unsigned long val;
1370 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1371
1372 val = drvdata->ctxid_idx;
1373 return sprintf(buf, "%#lx\n", val);
1374}
1375
1376static ssize_t ctxid_idx_store(struct device *dev,
1377 struct device_attribute *attr,
1378 const char *buf, size_t size)
1379{
1380 int ret;
1381 unsigned long val;
1382 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1383
1384 ret = kstrtoul(buf, 16, &val);
1385 if (ret)
1386 return ret;
1387
1388 if (val >= drvdata->nr_ctxid_cmp)
1389 return -EINVAL;
1390
1391 /*
1392 * Use spinlock to ensure index doesn't change while it gets
1393 * dereferenced multiple times within a spinlock block elsewhere.
1394 */
1395 spin_lock(&drvdata->spinlock);
1396 drvdata->ctxid_idx = val;
1397 spin_unlock(&drvdata->spinlock);
1398
1399 return size;
1400}
1401static DEVICE_ATTR_RW(ctxid_idx);
1402
1403static ssize_t ctxid_val_show(struct device *dev,
1404 struct device_attribute *attr, char *buf)
1405{
1406 unsigned long val;
1407 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1408
1409 spin_lock(&drvdata->spinlock);
1410 val = drvdata->ctxid_val[drvdata->ctxid_idx];
1411 spin_unlock(&drvdata->spinlock);
1412
1413 return sprintf(buf, "%#lx\n", val);
1414}
1415
1416static ssize_t ctxid_val_store(struct device *dev,
1417 struct device_attribute *attr,
1418 const char *buf, size_t size)
1419{
1420 int ret;
1421 unsigned long val;
1422 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1423
1424 ret = kstrtoul(buf, 16, &val);
1425 if (ret)
1426 return ret;
1427
1428 spin_lock(&drvdata->spinlock);
1429 drvdata->ctxid_val[drvdata->ctxid_idx] = val;
1430 spin_unlock(&drvdata->spinlock);
1431
1432 return size;
1433}
1434static DEVICE_ATTR_RW(ctxid_val);
1435
1436static ssize_t ctxid_mask_show(struct device *dev,
1437 struct device_attribute *attr, char *buf)
1438{
1439 unsigned long val;
1440 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1441
1442 val = drvdata->ctxid_mask;
1443 return sprintf(buf, "%#lx\n", val);
1444}
1445
1446static ssize_t ctxid_mask_store(struct device *dev,
1447 struct device_attribute *attr,
1448 const char *buf, size_t size)
1449{
1450 int ret;
1451 unsigned long val;
1452 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1453
1454 ret = kstrtoul(buf, 16, &val);
1455 if (ret)
1456 return ret;
1457
1458 drvdata->ctxid_mask = val;
1459 return size;
1460}
1461static DEVICE_ATTR_RW(ctxid_mask);
1462
1463static ssize_t sync_freq_show(struct device *dev,
1464 struct device_attribute *attr, char *buf)
1465{
1466 unsigned long val;
1467 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1468
1469 val = drvdata->sync_freq;
1470 return sprintf(buf, "%#lx\n", val);
1471}
1472
1473static ssize_t sync_freq_store(struct device *dev,
1474 struct device_attribute *attr,
1475 const char *buf, size_t size)
1476{
1477 int ret;
1478 unsigned long val;
1479 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1480
1481 ret = kstrtoul(buf, 16, &val);
1482 if (ret)
1483 return ret;
1484
1485 drvdata->sync_freq = val & ETM_SYNC_MASK;
1486 return size;
1487}
1488static DEVICE_ATTR_RW(sync_freq);
1489
1490static ssize_t timestamp_event_show(struct device *dev,
1491 struct device_attribute *attr, char *buf)
1492{
1493 unsigned long val;
1494 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1495
1496 val = drvdata->timestamp_event;
1497 return sprintf(buf, "%#lx\n", val);
1498}
1499
1500static ssize_t timestamp_event_store(struct device *dev,
1501 struct device_attribute *attr,
1502 const char *buf, size_t size)
1503{
1504 int ret;
1505 unsigned long val;
1506 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1507
1508 ret = kstrtoul(buf, 16, &val);
1509 if (ret)
1510 return ret;
1511
1512 drvdata->timestamp_event = val & ETM_EVENT_MASK;
1513 return size;
1514}
1515static DEVICE_ATTR_RW(timestamp_event);
1516
1517static ssize_t status_show(struct device *dev,
1518 struct device_attribute *attr, char *buf)
1519{
1520 int ret;
1521 unsigned long flags;
1522 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1523
1524 ret = clk_prepare_enable(drvdata->clk);
1525 if (ret)
1526 return ret;
1527
1528 spin_lock_irqsave(&drvdata->spinlock, flags);
1529
1530 CS_UNLOCK(drvdata->base);
1531 ret = sprintf(buf,
1532 "ETMCCR: 0x%08x\n"
1533 "ETMCCER: 0x%08x\n"
1534 "ETMSCR: 0x%08x\n"
1535 "ETMIDR: 0x%08x\n"
1536 "ETMCR: 0x%08x\n"
1537 "ETMTRACEIDR: 0x%08x\n"
1538 "Enable event: 0x%08x\n"
1539 "Enable start/stop: 0x%08x\n"
1540 "Enable control: CR1 0x%08x CR2 0x%08x\n"
1541 "CPU affinity: %d\n",
1542 drvdata->etmccr, drvdata->etmccer,
1543 etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR),
1544 etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata),
1545 etm_readl(drvdata, ETMTEEVR),
1546 etm_readl(drvdata, ETMTSSCR),
1547 etm_readl(drvdata, ETMTECR1),
1548 etm_readl(drvdata, ETMTECR2),
1549 drvdata->cpu);
1550 CS_LOCK(drvdata->base);
1551
1552 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1553 clk_disable_unprepare(drvdata->clk);
1554
1555 return ret;
1556}
1557static DEVICE_ATTR_RO(status);
1558
1559static ssize_t traceid_show(struct device *dev,
1560 struct device_attribute *attr, char *buf)
1561{
1562 int ret;
1563 unsigned long val, flags;
1564 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1565
1566 if (!drvdata->enable) {
1567 val = drvdata->traceid;
1568 goto out;
1569 }
1570
1571 ret = clk_prepare_enable(drvdata->clk);
1572 if (ret)
1573 return ret;
1574
1575 spin_lock_irqsave(&drvdata->spinlock, flags);
1576 CS_UNLOCK(drvdata->base);
1577
1578 val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK);
1579
1580 CS_LOCK(drvdata->base);
1581 spin_unlock_irqrestore(&drvdata->spinlock, flags);
1582 clk_disable_unprepare(drvdata->clk);
1583out:
1584 return sprintf(buf, "%#lx\n", val);
1585}
1586
1587static ssize_t traceid_store(struct device *dev,
1588 struct device_attribute *attr,
1589 const char *buf, size_t size)
1590{
1591 int ret;
1592 unsigned long val;
1593 struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent);
1594
1595 ret = kstrtoul(buf, 16, &val);
1596 if (ret)
1597 return ret;
1598
1599 drvdata->traceid = val & ETM_TRACEID_MASK;
1600 return size;
1601}
1602static DEVICE_ATTR_RW(traceid);
1603
1604static struct attribute *coresight_etm_attrs[] = {
1605 &dev_attr_nr_addr_cmp.attr,
1606 &dev_attr_nr_cntr.attr,
1607 &dev_attr_nr_ctxid_cmp.attr,
1608 &dev_attr_etmsr.attr,
1609 &dev_attr_reset.attr,
1610 &dev_attr_mode.attr,
1611 &dev_attr_trigger_event.attr,
1612 &dev_attr_enable_event.attr,
1613 &dev_attr_fifofull_level.attr,
1614 &dev_attr_addr_idx.attr,
1615 &dev_attr_addr_single.attr,
1616 &dev_attr_addr_range.attr,
1617 &dev_attr_addr_start.attr,
1618 &dev_attr_addr_stop.attr,
1619 &dev_attr_addr_acctype.attr,
1620 &dev_attr_cntr_idx.attr,
1621 &dev_attr_cntr_rld_val.attr,
1622 &dev_attr_cntr_event.attr,
1623 &dev_attr_cntr_rld_event.attr,
1624 &dev_attr_cntr_val.attr,
1625 &dev_attr_seq_12_event.attr,
1626 &dev_attr_seq_21_event.attr,
1627 &dev_attr_seq_23_event.attr,
1628 &dev_attr_seq_31_event.attr,
1629 &dev_attr_seq_32_event.attr,
1630 &dev_attr_seq_13_event.attr,
1631 &dev_attr_seq_curr_state.attr,
1632 &dev_attr_ctxid_idx.attr,
1633 &dev_attr_ctxid_val.attr,
1634 &dev_attr_ctxid_mask.attr,
1635 &dev_attr_sync_freq.attr,
1636 &dev_attr_timestamp_event.attr,
1637 &dev_attr_status.attr,
1638 &dev_attr_traceid.attr,
1639 NULL,
1640};
1641ATTRIBUTE_GROUPS(coresight_etm);
1642
1643static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action,
1644 void *hcpu)
1645{
1646 unsigned int cpu = (unsigned long)hcpu;
1647
1648 if (!etmdrvdata[cpu])
1649 goto out;
1650
1651 switch (action & (~CPU_TASKS_FROZEN)) {
1652 case CPU_STARTING:
1653 spin_lock(&etmdrvdata[cpu]->spinlock);
1654 if (!etmdrvdata[cpu]->os_unlock) {
1655 etm_os_unlock(etmdrvdata[cpu]);
1656 etmdrvdata[cpu]->os_unlock = true;
1657 }
1658
1659 if (etmdrvdata[cpu]->enable)
1660 etm_enable_hw(etmdrvdata[cpu]);
1661 spin_unlock(&etmdrvdata[cpu]->spinlock);
1662 break;
1663
1664 case CPU_ONLINE:
1665 if (etmdrvdata[cpu]->boot_enable &&
1666 !etmdrvdata[cpu]->sticky_enable)
1667 coresight_enable(etmdrvdata[cpu]->csdev);
1668 break;
1669
1670 case CPU_DYING:
1671 spin_lock(&etmdrvdata[cpu]->spinlock);
1672 if (etmdrvdata[cpu]->enable)
1673 etm_disable_hw(etmdrvdata[cpu]);
1674 spin_unlock(&etmdrvdata[cpu]->spinlock);
1675 break;
1676 }
1677out:
1678 return NOTIFY_OK;
1679}
1680
1681static struct notifier_block etm_cpu_notifier = {
1682 .notifier_call = etm_cpu_callback,
1683};
1684
1685static bool etm_arch_supported(u8 arch)
1686{
1687 switch (arch) {
1688 case ETM_ARCH_V3_3:
1689 break;
1690 case ETM_ARCH_V3_5:
1691 break;
1692 case PFT_ARCH_V1_0:
1693 break;
1694 case PFT_ARCH_V1_1:
1695 break;
1696 default:
1697 return false;
1698 }
1699 return true;
1700}
1701
1702static void etm_init_arch_data(void *info)
1703{
1704 u32 etmidr;
1705 u32 etmccr;
1706 struct etm_drvdata *drvdata = info;
1707
1708 CS_UNLOCK(drvdata->base);
1709
1710 /* First dummy read */
1711 (void)etm_readl(drvdata, ETMPDSR);
1712 /* Provide power to ETM: ETMPDCR[3] == 1 */
1713 etm_set_pwrup(drvdata);
1714 /*
1715 * Clear power down bit since when this bit is set writes to
1716 * certain registers might be ignored.
1717 */
1718 etm_clr_pwrdwn(drvdata);
1719 /*
1720 * Set prog bit. It will be set from reset but this is included to
1721 * ensure it is set
1722 */
1723 etm_set_prog(drvdata);
1724
1725 /* Find all capabilities */
1726 etmidr = etm_readl(drvdata, ETMIDR);
1727 drvdata->arch = BMVAL(etmidr, 4, 11);
1728 drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK;
1729
1730 drvdata->etmccer = etm_readl(drvdata, ETMCCER);
1731 etmccr = etm_readl(drvdata, ETMCCR);
1732 drvdata->etmccr = etmccr;
1733 drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2;
1734 drvdata->nr_cntr = BMVAL(etmccr, 13, 15);
1735 drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19);
1736 drvdata->nr_ext_out = BMVAL(etmccr, 20, 22);
1737 drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25);
1738
1739 etm_set_pwrdwn(drvdata);
1740 etm_clr_pwrup(drvdata);
1741 CS_LOCK(drvdata->base);
1742}
1743
1744static void etm_init_default_data(struct etm_drvdata *drvdata)
1745{
1746 static int etm3x_traceid;
1747
1748 u32 flags = (1 << 0 | /* instruction execute*/
1749 3 << 3 | /* ARM instruction */
1750 0 << 5 | /* No data value comparison */
1751 0 << 7 | /* No exact mach */
1752 0 << 8 | /* Ignore context ID */
1753 0 << 10); /* Security ignored */
1754
1755 /*
1756 * Initial configuration only - guarantees sources handled by
1757 * this driver have a unique ID at startup time but not between
1758 * all other types of sources. For that we lean on the core
1759 * framework.
1760 */
1761 drvdata->traceid = etm3x_traceid++;
1762 drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN);
1763 drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1;
1764 if (drvdata->nr_addr_cmp >= 2) {
1765 drvdata->addr_val[0] = (u32) _stext;
1766 drvdata->addr_val[1] = (u32) _etext;
1767 drvdata->addr_acctype[0] = flags;
1768 drvdata->addr_acctype[1] = flags;
1769 drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE;
1770 drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE;
1771 }
1772
1773 etm_set_default(drvdata);
1774}
1775
1776static int etm_probe(struct amba_device *adev, const struct amba_id *id)
1777{
1778 int ret;
1779 void __iomem *base;
1780 struct device *dev = &adev->dev;
1781 struct coresight_platform_data *pdata = NULL;
1782 struct etm_drvdata *drvdata;
1783 struct resource *res = &adev->res;
1784 struct coresight_desc *desc;
1785 struct device_node *np = adev->dev.of_node;
1786
1787 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
1788 if (!desc)
1789 return -ENOMEM;
1790
1791 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1792 if (!drvdata)
1793 return -ENOMEM;
1794
1795 if (np) {
1796 pdata = of_get_coresight_platform_data(dev, np);
1797 if (IS_ERR(pdata))
1798 return PTR_ERR(pdata);
1799
1800 adev->dev.platform_data = pdata;
1801 drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14");
1802 }
1803
1804 drvdata->dev = &adev->dev;
1805 dev_set_drvdata(dev, drvdata);
1806
1807 /* Validity for the resource is already checked by the AMBA core */
1808 base = devm_ioremap_resource(dev, res);
1809 if (IS_ERR(base))
1810 return PTR_ERR(base);
1811
1812 drvdata->base = base;
1813
1814 spin_lock_init(&drvdata->spinlock);
1815
1816 drvdata->clk = adev->pclk;
1817 ret = clk_prepare_enable(drvdata->clk);
1818 if (ret)
1819 return ret;
1820
1821 drvdata->cpu = pdata ? pdata->cpu : 0;
1822
1823 get_online_cpus();
1824 etmdrvdata[drvdata->cpu] = drvdata;
1825
1826 if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1))
1827 drvdata->os_unlock = true;
1828
1829 if (smp_call_function_single(drvdata->cpu,
1830 etm_init_arch_data, drvdata, 1))
1831 dev_err(dev, "ETM arch init failed\n");
1832
1833 if (!etm_count++)
1834 register_hotcpu_notifier(&etm_cpu_notifier);
1835
1836 put_online_cpus();
1837
1838 if (etm_arch_supported(drvdata->arch) == false) {
1839 ret = -EINVAL;
1840 goto err_arch_supported;
1841 }
1842 etm_init_default_data(drvdata);
1843
1844 clk_disable_unprepare(drvdata->clk);
1845
1846 desc->type = CORESIGHT_DEV_TYPE_SOURCE;
1847 desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC;
1848 desc->ops = &etm_cs_ops;
1849 desc->pdata = pdata;
1850 desc->dev = dev;
1851 desc->groups = coresight_etm_groups;
1852 drvdata->csdev = coresight_register(desc);
1853 if (IS_ERR(drvdata->csdev)) {
1854 ret = PTR_ERR(drvdata->csdev);
1855 goto err_arch_supported;
1856 }
1857
1858 dev_info(dev, "ETM initialized\n");
1859
1860 if (boot_enable) {
1861 coresight_enable(drvdata->csdev);
1862 drvdata->boot_enable = true;
1863 }
1864
1865 return 0;
1866
1867err_arch_supported:
1868 clk_disable_unprepare(drvdata->clk);
1869 if (--etm_count == 0)
1870 unregister_hotcpu_notifier(&etm_cpu_notifier);
1871 return ret;
1872}
1873
1874static int etm_remove(struct amba_device *adev)
1875{
1876 struct etm_drvdata *drvdata = amba_get_drvdata(adev);
1877
1878 coresight_unregister(drvdata->csdev);
1879 if (--etm_count == 0)
1880 unregister_hotcpu_notifier(&etm_cpu_notifier);
1881
1882 return 0;
1883}
1884
1885static struct amba_id etm_ids[] = {
1886 { /* ETM 3.3 */
1887 .id = 0x0003b921,
1888 .mask = 0x0003ffff,
1889 },
1890 { /* ETM 3.5 */
1891 .id = 0x0003b956,
1892 .mask = 0x0003ffff,
1893 },
1894 { /* PTM 1.0 */
1895 .id = 0x0003b950,
1896 .mask = 0x0003ffff,
1897 },
1898 { /* PTM 1.1 */
1899 .id = 0x0003b95f,
1900 .mask = 0x0003ffff,
1901 },
1902 { 0, 0},
1903};
1904
1905static struct amba_driver etm_driver = {
1906 .drv = {
1907 .name = "coresight-etm3x",
1908 .owner = THIS_MODULE,
1909 },
1910 .probe = etm_probe,
1911 .remove = etm_remove,
1912 .id_table = etm_ids,
1913};
1914
1915int __init etm_init(void)
1916{
1917 return amba_driver_register(&etm_driver);
1918}
1919module_init(etm_init);
1920
1921void __exit etm_exit(void)
1922{
1923 amba_driver_unregister(&etm_driver);
1924}
1925module_exit(etm_exit);
1926
1927MODULE_LICENSE("GPL v2");
1928MODULE_DESCRIPTION("CoreSight Program Flow Trace driver");
diff --git a/drivers/coresight/coresight-funnel.c b/drivers/coresight/coresight-funnel.c
new file mode 100644
index 000000000000..2108edffe1f4
--- /dev/null
+++ b/drivers/coresight/coresight-funnel.c
@@ -0,0 +1,268 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/err.h>
19#include <linux/fs.h>
20#include <linux/slab.h>
21#include <linux/clk.h>
22#include <linux/coresight.h>
23#include <linux/amba/bus.h>
24
25#include "coresight-priv.h"
26
27#define FUNNEL_FUNCTL 0x000
28#define FUNNEL_PRICTL 0x004
29
30#define FUNNEL_HOLDTIME_MASK 0xf00
31#define FUNNEL_HOLDTIME_SHFT 0x8
32#define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT)
33
34/**
35 * struct funnel_drvdata - specifics associated to a funnel component
36 * @base: memory mapped base address for this component.
37 * @dev: the device entity associated to this component.
38 * @csdev: component vitals needed by the framework.
39 * @clk: the clock this component is associated to.
40 * @priority: port selection order.
41 */
42struct funnel_drvdata {
43 void __iomem *base;
44 struct device *dev;
45 struct coresight_device *csdev;
46 struct clk *clk;
47 unsigned long priority;
48};
49
50static void funnel_enable_hw(struct funnel_drvdata *drvdata, int port)
51{
52 u32 functl;
53
54 CS_UNLOCK(drvdata->base);
55
56 functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
57 functl &= ~FUNNEL_HOLDTIME_MASK;
58 functl |= FUNNEL_HOLDTIME;
59 functl |= (1 << port);
60 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
61 writel_relaxed(drvdata->priority, drvdata->base + FUNNEL_PRICTL);
62
63 CS_LOCK(drvdata->base);
64}
65
66static int funnel_enable(struct coresight_device *csdev, int inport,
67 int outport)
68{
69 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
70 int ret;
71
72 ret = clk_prepare_enable(drvdata->clk);
73 if (ret)
74 return ret;
75
76 funnel_enable_hw(drvdata, inport);
77
78 dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport);
79 return 0;
80}
81
82static void funnel_disable_hw(struct funnel_drvdata *drvdata, int inport)
83{
84 u32 functl;
85
86 CS_UNLOCK(drvdata->base);
87
88 functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
89 functl &= ~(1 << inport);
90 writel_relaxed(functl, drvdata->base + FUNNEL_FUNCTL);
91
92 CS_LOCK(drvdata->base);
93}
94
95static void funnel_disable(struct coresight_device *csdev, int inport,
96 int outport)
97{
98 struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
99
100 funnel_disable_hw(drvdata, inport);
101
102 clk_disable_unprepare(drvdata->clk);
103
104 dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport);
105}
106
107static const struct coresight_ops_link funnel_link_ops = {
108 .enable = funnel_enable,
109 .disable = funnel_disable,
110};
111
112static const struct coresight_ops funnel_cs_ops = {
113 .link_ops = &funnel_link_ops,
114};
115
116static ssize_t priority_show(struct device *dev,
117 struct device_attribute *attr, char *buf)
118{
119 struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
120 unsigned long val = drvdata->priority;
121
122 return sprintf(buf, "%#lx\n", val);
123}
124
125static ssize_t priority_store(struct device *dev,
126 struct device_attribute *attr,
127 const char *buf, size_t size)
128{
129 int ret;
130 unsigned long val;
131 struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
132
133 ret = kstrtoul(buf, 16, &val);
134 if (ret)
135 return ret;
136
137 drvdata->priority = val;
138 return size;
139}
140static DEVICE_ATTR_RW(priority);
141
142static u32 get_funnel_ctrl_hw(struct funnel_drvdata *drvdata)
143{
144 u32 functl;
145
146 CS_UNLOCK(drvdata->base);
147 functl = readl_relaxed(drvdata->base + FUNNEL_FUNCTL);
148 CS_LOCK(drvdata->base);
149
150 return functl;
151}
152
153static ssize_t funnel_ctrl_show(struct device *dev,
154 struct device_attribute *attr, char *buf)
155{
156 int ret;
157 u32 val;
158 struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent);
159
160 ret = clk_prepare_enable(drvdata->clk);
161 if (ret)
162 return ret;
163
164 val = get_funnel_ctrl_hw(drvdata);
165 clk_disable_unprepare(drvdata->clk);
166
167 return sprintf(buf, "%#x\n", val);
168}
169static DEVICE_ATTR_RO(funnel_ctrl);
170
171static struct attribute *coresight_funnel_attrs[] = {
172 &dev_attr_funnel_ctrl.attr,
173 &dev_attr_priority.attr,
174 NULL,
175};
176ATTRIBUTE_GROUPS(coresight_funnel);
177
178static int funnel_probe(struct amba_device *adev, const struct amba_id *id)
179{
180 void __iomem *base;
181 struct device *dev = &adev->dev;
182 struct coresight_platform_data *pdata = NULL;
183 struct funnel_drvdata *drvdata;
184 struct resource *res = &adev->res;
185 struct coresight_desc *desc;
186 struct device_node *np = adev->dev.of_node;
187
188 if (np) {
189 pdata = of_get_coresight_platform_data(dev, np);
190 if (IS_ERR(pdata))
191 return PTR_ERR(pdata);
192 adev->dev.platform_data = pdata;
193 }
194
195 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
196 if (!drvdata)
197 return -ENOMEM;
198
199 drvdata->dev = &adev->dev;
200 dev_set_drvdata(dev, drvdata);
201
202 /* Validity for the resource is already checked by the AMBA core */
203 base = devm_ioremap_resource(dev, res);
204 if (IS_ERR(base))
205 return PTR_ERR(base);
206
207 drvdata->base = base;
208
209 drvdata->clk = adev->pclk;
210
211 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
212 if (!desc)
213 return -ENOMEM;
214
215 desc->type = CORESIGHT_DEV_TYPE_LINK;
216 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG;
217 desc->ops = &funnel_cs_ops;
218 desc->pdata = pdata;
219 desc->dev = dev;
220 desc->groups = coresight_funnel_groups;
221 drvdata->csdev = coresight_register(desc);
222 if (IS_ERR(drvdata->csdev))
223 return PTR_ERR(drvdata->csdev);
224
225 dev_info(dev, "FUNNEL initialized\n");
226 return 0;
227}
228
229static int funnel_remove(struct amba_device *adev)
230{
231 struct funnel_drvdata *drvdata = amba_get_drvdata(adev);
232
233 coresight_unregister(drvdata->csdev);
234 return 0;
235}
236
237static struct amba_id funnel_ids[] = {
238 {
239 .id = 0x0003b908,
240 .mask = 0x0003ffff,
241 },
242 { 0, 0},
243};
244
245static struct amba_driver funnel_driver = {
246 .drv = {
247 .name = "coresight-funnel",
248 .owner = THIS_MODULE,
249 },
250 .probe = funnel_probe,
251 .remove = funnel_remove,
252 .id_table = funnel_ids,
253};
254
255static int __init funnel_init(void)
256{
257 return amba_driver_register(&funnel_driver);
258}
259module_init(funnel_init);
260
261static void __exit funnel_exit(void)
262{
263 amba_driver_unregister(&funnel_driver);
264}
265module_exit(funnel_exit);
266
267MODULE_LICENSE("GPL v2");
268MODULE_DESCRIPTION("CoreSight Funnel driver");
diff --git a/drivers/coresight/coresight-priv.h b/drivers/coresight/coresight-priv.h
new file mode 100644
index 000000000000..7b3372fca4f6
--- /dev/null
+++ b/drivers/coresight/coresight-priv.h
@@ -0,0 +1,63 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#ifndef _CORESIGHT_PRIV_H
14#define _CORESIGHT_PRIV_H
15
16#include <linux/bitops.h>
17#include <linux/io.h>
18#include <linux/coresight.h>
19
20/*
21 * Coresight management registers (0xf00-0xfcc)
22 * 0xfa0 - 0xfa4: Management registers in PFTv1.0
23 * Trace registers in PFTv1.1
24 */
25#define CORESIGHT_ITCTRL 0xf00
26#define CORESIGHT_CLAIMSET 0xfa0
27#define CORESIGHT_CLAIMCLR 0xfa4
28#define CORESIGHT_LAR 0xfb0
29#define CORESIGHT_LSR 0xfb4
30#define CORESIGHT_AUTHSTATUS 0xfb8
31#define CORESIGHT_DEVID 0xfc8
32#define CORESIGHT_DEVTYPE 0xfcc
33
34#define TIMEOUT_US 100
35#define BMVAL(val, lsb, msb) ((val & GENMASK(msb, lsb)) >> lsb)
36
37static inline void CS_LOCK(void __iomem *addr)
38{
39 do {
40 /* Wait for things to settle */
41 mb();
42 writel_relaxed(0x0, addr + CORESIGHT_LAR);
43 } while (0);
44}
45
46static inline void CS_UNLOCK(void __iomem *addr)
47{
48 do {
49 writel_relaxed(CORESIGHT_UNLOCK, addr + CORESIGHT_LAR);
50 /* Make sure everyone has seen this */
51 mb();
52 } while (0);
53}
54
55#ifdef CONFIG_CORESIGHT_SOURCE_ETM3X
56extern int etm_readl_cp14(u32 off, unsigned int *val);
57extern int etm_writel_cp14(u32 off, u32 val);
58#else
59static inline int etm_readl_cp14(u32 off, unsigned int *val) { return 0; }
60static inline int etm_writel_cp14(u32 val, u32 off) { return 0; }
61#endif
62
63#endif
diff --git a/drivers/coresight/coresight-replicator.c b/drivers/coresight/coresight-replicator.c
new file mode 100644
index 000000000000..a2dfcf903551
--- /dev/null
+++ b/drivers/coresight/coresight-replicator.c
@@ -0,0 +1,137 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/device.h>
17#include <linux/platform_device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/slab.h>
21#include <linux/clk.h>
22#include <linux/of.h>
23#include <linux/coresight.h>
24
25#include "coresight-priv.h"
26
27/**
28 * struct replicator_drvdata - specifics associated to a replicator component
29 * @dev: the device entity associated with this component
30 * @csdev: component vitals needed by the framework
31 */
32struct replicator_drvdata {
33 struct device *dev;
34 struct coresight_device *csdev;
35};
36
37static int replicator_enable(struct coresight_device *csdev, int inport,
38 int outport)
39{
40 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
41
42 dev_info(drvdata->dev, "REPLICATOR enabled\n");
43 return 0;
44}
45
46static void replicator_disable(struct coresight_device *csdev, int inport,
47 int outport)
48{
49 struct replicator_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
50
51 dev_info(drvdata->dev, "REPLICATOR disabled\n");
52}
53
54static const struct coresight_ops_link replicator_link_ops = {
55 .enable = replicator_enable,
56 .disable = replicator_disable,
57};
58
59static const struct coresight_ops replicator_cs_ops = {
60 .link_ops = &replicator_link_ops,
61};
62
63static int replicator_probe(struct platform_device *pdev)
64{
65 struct device *dev = &pdev->dev;
66 struct coresight_platform_data *pdata = NULL;
67 struct replicator_drvdata *drvdata;
68 struct coresight_desc *desc;
69 struct device_node *np = pdev->dev.of_node;
70
71 if (np) {
72 pdata = of_get_coresight_platform_data(dev, np);
73 if (IS_ERR(pdata))
74 return PTR_ERR(pdata);
75 pdev->dev.platform_data = pdata;
76 }
77
78 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
79 if (!drvdata)
80 return -ENOMEM;
81
82 drvdata->dev = &pdev->dev;
83 platform_set_drvdata(pdev, drvdata);
84
85 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
86 if (!desc)
87 return -ENOMEM;
88
89 desc->type = CORESIGHT_DEV_TYPE_LINK;
90 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
91 desc->ops = &replicator_cs_ops;
92 desc->pdata = pdev->dev.platform_data;
93 desc->dev = &pdev->dev;
94 drvdata->csdev = coresight_register(desc);
95 if (IS_ERR(drvdata->csdev))
96 return PTR_ERR(drvdata->csdev);
97
98 dev_info(dev, "REPLICATOR initialized\n");
99 return 0;
100}
101
102static int replicator_remove(struct platform_device *pdev)
103{
104 struct replicator_drvdata *drvdata = platform_get_drvdata(pdev);
105
106 coresight_unregister(drvdata->csdev);
107 return 0;
108}
109
110static struct of_device_id replicator_match[] = {
111 {.compatible = "arm,coresight-replicator"},
112 {}
113};
114
115static struct platform_driver replicator_driver = {
116 .probe = replicator_probe,
117 .remove = replicator_remove,
118 .driver = {
119 .name = "coresight-replicator",
120 .of_match_table = replicator_match,
121 },
122};
123
124static int __init replicator_init(void)
125{
126 return platform_driver_register(&replicator_driver);
127}
128module_init(replicator_init);
129
130static void __exit replicator_exit(void)
131{
132 platform_driver_unregister(&replicator_driver);
133}
134module_exit(replicator_exit);
135
136MODULE_LICENSE("GPL v2");
137MODULE_DESCRIPTION("CoreSight Replicator driver");
diff --git a/drivers/coresight/coresight-tmc.c b/drivers/coresight/coresight-tmc.c
new file mode 100644
index 000000000000..ce2c293f1707
--- /dev/null
+++ b/drivers/coresight/coresight-tmc.c
@@ -0,0 +1,776 @@
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/fs.h>
21#include <linux/miscdevice.h>
22#include <linux/uaccess.h>
23#include <linux/slab.h>
24#include <linux/dma-mapping.h>
25#include <linux/spinlock.h>
26#include <linux/clk.h>
27#include <linux/of.h>
28#include <linux/coresight.h>
29#include <linux/amba/bus.h>
30
31#include "coresight-priv.h"
32
33#define TMC_RSZ 0x004
34#define TMC_STS 0x00c
35#define TMC_RRD 0x010
36#define TMC_RRP 0x014
37#define TMC_RWP 0x018
38#define TMC_TRG 0x01c
39#define TMC_CTL 0x020
40#define TMC_RWD 0x024
41#define TMC_MODE 0x028
42#define TMC_LBUFLEVEL 0x02c
43#define TMC_CBUFLEVEL 0x030
44#define TMC_BUFWM 0x034
45#define TMC_RRPHI 0x038
46#define TMC_RWPHI 0x03c
47#define TMC_AXICTL 0x110
48#define TMC_DBALO 0x118
49#define TMC_DBAHI 0x11c
50#define TMC_FFSR 0x300
51#define TMC_FFCR 0x304
52#define TMC_PSCR 0x308
53#define TMC_ITMISCOP0 0xee0
54#define TMC_ITTRFLIN 0xee8
55#define TMC_ITATBDATA0 0xeec
56#define TMC_ITATBCTR2 0xef0
57#define TMC_ITATBCTR1 0xef4
58#define TMC_ITATBCTR0 0xef8
59
60/* register description */
61/* TMC_CTL - 0x020 */
62#define TMC_CTL_CAPT_EN BIT(0)
63/* TMC_STS - 0x00C */
64#define TMC_STS_TRIGGERED BIT(1)
65/* TMC_AXICTL - 0x110 */
66#define TMC_AXICTL_PROT_CTL_B0 BIT(0)
67#define TMC_AXICTL_PROT_CTL_B1 BIT(1)
68#define TMC_AXICTL_SCT_GAT_MODE BIT(7)
69#define TMC_AXICTL_WR_BURST_LEN 0xF00
70/* TMC_FFCR - 0x304 */
71#define TMC_FFCR_EN_FMT BIT(0)
72#define TMC_FFCR_EN_TI BIT(1)
73#define TMC_FFCR_FON_FLIN BIT(4)
74#define TMC_FFCR_FON_TRIG_EVT BIT(5)
75#define TMC_FFCR_FLUSHMAN BIT(6)
76#define TMC_FFCR_TRIGON_TRIGIN BIT(8)
77#define TMC_FFCR_STOP_ON_FLUSH BIT(12)
78
79#define TMC_STS_TRIGGERED_BIT 2
80#define TMC_FFCR_FLUSHMAN_BIT 6
81
82enum tmc_config_type {
83 TMC_CONFIG_TYPE_ETB,
84 TMC_CONFIG_TYPE_ETR,
85 TMC_CONFIG_TYPE_ETF,
86};
87
88enum tmc_mode {
89 TMC_MODE_CIRCULAR_BUFFER,
90 TMC_MODE_SOFTWARE_FIFO,
91 TMC_MODE_HARDWARE_FIFO,
92};
93
94enum tmc_mem_intf_width {
95 TMC_MEM_INTF_WIDTH_32BITS = 0x2,
96 TMC_MEM_INTF_WIDTH_64BITS = 0x3,
97 TMC_MEM_INTF_WIDTH_128BITS = 0x4,
98 TMC_MEM_INTF_WIDTH_256BITS = 0x5,
99};
100
101/**
102 * struct tmc_drvdata - specifics associated to an TMC component
103 * @base: memory mapped base address for this component.
104 * @dev: the device entity associated to this component.
105 * @csdev: component vitals needed by the framework.
106 * @miscdev: specifics to handle "/dev/xyz.tmc" entry.
107 * @clk: the clock this component is associated to.
108 * @spinlock: only one at a time pls.
109 * @read_count: manages preparation of buffer for reading.
110 * @buf: area of memory where trace data get sent.
111 * @paddr: DMA start location in RAM.
112 * @vaddr: virtual representation of @paddr.
113 * @size: @buf size.
114 * @enable: this TMC is being used.
115 * @config_type: TMC variant, must be of type @tmc_config_type.
116 * @trigger_cntr: amount of words to store after a trigger.
117 */
118struct tmc_drvdata {
119 void __iomem *base;
120 struct device *dev;
121 struct coresight_device *csdev;
122 struct miscdevice miscdev;
123 struct clk *clk;
124 spinlock_t spinlock;
125 int read_count;
126 bool reading;
127 char *buf;
128 dma_addr_t paddr;
129 void __iomem *vaddr;
130 u32 size;
131 bool enable;
132 enum tmc_config_type config_type;
133 u32 trigger_cntr;
134};
135
136static void tmc_wait_for_ready(struct tmc_drvdata *drvdata)
137{
138 /* Ensure formatter, unformatter and hardware fifo are empty */
139 if (coresight_timeout(drvdata->base,
140 TMC_STS, TMC_STS_TRIGGERED_BIT, 1)) {
141 dev_err(drvdata->dev,
142 "timeout observed when probing at offset %#x\n",
143 TMC_STS);
144 }
145}
146
147static void tmc_flush_and_stop(struct tmc_drvdata *drvdata)
148{
149 u32 ffcr;
150
151 ffcr = readl_relaxed(drvdata->base + TMC_FFCR);
152 ffcr |= TMC_FFCR_STOP_ON_FLUSH;
153 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
154 ffcr |= TMC_FFCR_FLUSHMAN;
155 writel_relaxed(ffcr, drvdata->base + TMC_FFCR);
156 /* Ensure flush completes */
157 if (coresight_timeout(drvdata->base,
158 TMC_FFCR, TMC_FFCR_FLUSHMAN_BIT, 0)) {
159 dev_err(drvdata->dev,
160 "timeout observed when probing at offset %#x\n",
161 TMC_FFCR);
162 }
163
164 tmc_wait_for_ready(drvdata);
165}
166
167static void tmc_enable_hw(struct tmc_drvdata *drvdata)
168{
169 writel_relaxed(TMC_CTL_CAPT_EN, drvdata->base + TMC_CTL);
170}
171
172static void tmc_disable_hw(struct tmc_drvdata *drvdata)
173{
174 writel_relaxed(0x0, drvdata->base + TMC_CTL);
175}
176
177static void tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
178{
179 /* Zero out the memory to help with debug */
180 memset(drvdata->buf, 0, drvdata->size);
181
182 CS_UNLOCK(drvdata->base);
183
184 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
185 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
186 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
187 TMC_FFCR_TRIGON_TRIGIN,
188 drvdata->base + TMC_FFCR);
189
190 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
191 tmc_enable_hw(drvdata);
192
193 CS_LOCK(drvdata->base);
194}
195
196static void tmc_etr_enable_hw(struct tmc_drvdata *drvdata)
197{
198 u32 axictl;
199
200 /* Zero out the memory to help with debug */
201 memset(drvdata->vaddr, 0, drvdata->size);
202
203 CS_UNLOCK(drvdata->base);
204
205 writel_relaxed(drvdata->size / 4, drvdata->base + TMC_RSZ);
206 writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
207
208 axictl = readl_relaxed(drvdata->base + TMC_AXICTL);
209 axictl |= TMC_AXICTL_WR_BURST_LEN;
210 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
211 axictl &= ~TMC_AXICTL_SCT_GAT_MODE;
212 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
213 axictl = (axictl &
214 ~(TMC_AXICTL_PROT_CTL_B0 | TMC_AXICTL_PROT_CTL_B1)) |
215 TMC_AXICTL_PROT_CTL_B1;
216 writel_relaxed(axictl, drvdata->base + TMC_AXICTL);
217
218 writel_relaxed(drvdata->paddr, drvdata->base + TMC_DBALO);
219 writel_relaxed(0x0, drvdata->base + TMC_DBAHI);
220 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
221 TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
222 TMC_FFCR_TRIGON_TRIGIN,
223 drvdata->base + TMC_FFCR);
224 writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
225 tmc_enable_hw(drvdata);
226
227 CS_LOCK(drvdata->base);
228}
229
230static void tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
231{
232 CS_UNLOCK(drvdata->base);
233
234 writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
235 writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
236 drvdata->base + TMC_FFCR);
237 writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
238 tmc_enable_hw(drvdata);
239
240 CS_LOCK(drvdata->base);
241}
242
243static int tmc_enable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
244{
245 int ret;
246 unsigned long flags;
247
248 ret = clk_prepare_enable(drvdata->clk);
249 if (ret)
250 return ret;
251
252 spin_lock_irqsave(&drvdata->spinlock, flags);
253 if (drvdata->reading) {
254 spin_unlock_irqrestore(&drvdata->spinlock, flags);
255 clk_disable_unprepare(drvdata->clk);
256 return -EBUSY;
257 }
258
259 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
260 tmc_etb_enable_hw(drvdata);
261 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
262 tmc_etr_enable_hw(drvdata);
263 } else {
264 if (mode == TMC_MODE_CIRCULAR_BUFFER)
265 tmc_etb_enable_hw(drvdata);
266 else
267 tmc_etf_enable_hw(drvdata);
268 }
269 drvdata->enable = true;
270 spin_unlock_irqrestore(&drvdata->spinlock, flags);
271
272 dev_info(drvdata->dev, "TMC enabled\n");
273 return 0;
274}
275
276static int tmc_enable_sink(struct coresight_device *csdev)
277{
278 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
279
280 return tmc_enable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
281}
282
283static int tmc_enable_link(struct coresight_device *csdev, int inport,
284 int outport)
285{
286 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
287
288 return tmc_enable(drvdata, TMC_MODE_HARDWARE_FIFO);
289}
290
291static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
292{
293 enum tmc_mem_intf_width memwidth;
294 u8 memwords;
295 char *bufp;
296 u32 read_data;
297 int i;
298
299 memwidth = BMVAL(readl_relaxed(drvdata->base + CORESIGHT_DEVID), 8, 10);
300 if (memwidth == TMC_MEM_INTF_WIDTH_32BITS)
301 memwords = 1;
302 else if (memwidth == TMC_MEM_INTF_WIDTH_64BITS)
303 memwords = 2;
304 else if (memwidth == TMC_MEM_INTF_WIDTH_128BITS)
305 memwords = 4;
306 else
307 memwords = 8;
308
309 bufp = drvdata->buf;
310 while (1) {
311 for (i = 0; i < memwords; i++) {
312 read_data = readl_relaxed(drvdata->base + TMC_RRD);
313 if (read_data == 0xFFFFFFFF)
314 return;
315 memcpy(bufp, &read_data, 4);
316 bufp += 4;
317 }
318 }
319}
320
321static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
322{
323 CS_UNLOCK(drvdata->base);
324
325 tmc_flush_and_stop(drvdata);
326 tmc_etb_dump_hw(drvdata);
327 tmc_disable_hw(drvdata);
328
329 CS_LOCK(drvdata->base);
330}
331
332static void tmc_etr_dump_hw(struct tmc_drvdata *drvdata)
333{
334 u32 rwp, val;
335
336 rwp = readl_relaxed(drvdata->base + TMC_RWP);
337 val = readl_relaxed(drvdata->base + TMC_STS);
338
339 /* How much memory do we still have */
340 if (val & BIT(0))
341 drvdata->buf = drvdata->vaddr + rwp - drvdata->paddr;
342 else
343 drvdata->buf = drvdata->vaddr;
344}
345
346static void tmc_etr_disable_hw(struct tmc_drvdata *drvdata)
347{
348 CS_UNLOCK(drvdata->base);
349
350 tmc_flush_and_stop(drvdata);
351 tmc_etr_dump_hw(drvdata);
352 tmc_disable_hw(drvdata);
353
354 CS_LOCK(drvdata->base);
355}
356
357static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
358{
359 CS_UNLOCK(drvdata->base);
360
361 tmc_flush_and_stop(drvdata);
362 tmc_disable_hw(drvdata);
363
364 CS_LOCK(drvdata->base);
365}
366
367static void tmc_disable(struct tmc_drvdata *drvdata, enum tmc_mode mode)
368{
369 unsigned long flags;
370
371 spin_lock_irqsave(&drvdata->spinlock, flags);
372 if (drvdata->reading)
373 goto out;
374
375 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
376 tmc_etb_disable_hw(drvdata);
377 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
378 tmc_etr_disable_hw(drvdata);
379 } else {
380 if (mode == TMC_MODE_CIRCULAR_BUFFER)
381 tmc_etb_disable_hw(drvdata);
382 else
383 tmc_etf_disable_hw(drvdata);
384 }
385out:
386 drvdata->enable = false;
387 spin_unlock_irqrestore(&drvdata->spinlock, flags);
388
389 clk_disable_unprepare(drvdata->clk);
390
391 dev_info(drvdata->dev, "TMC disabled\n");
392}
393
394static void tmc_disable_sink(struct coresight_device *csdev)
395{
396 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
397
398 tmc_disable(drvdata, TMC_MODE_CIRCULAR_BUFFER);
399}
400
401static void tmc_disable_link(struct coresight_device *csdev, int inport,
402 int outport)
403{
404 struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
405
406 tmc_disable(drvdata, TMC_MODE_HARDWARE_FIFO);
407}
408
409static const struct coresight_ops_sink tmc_sink_ops = {
410 .enable = tmc_enable_sink,
411 .disable = tmc_disable_sink,
412};
413
414static const struct coresight_ops_link tmc_link_ops = {
415 .enable = tmc_enable_link,
416 .disable = tmc_disable_link,
417};
418
419static const struct coresight_ops tmc_etb_cs_ops = {
420 .sink_ops = &tmc_sink_ops,
421};
422
423static const struct coresight_ops tmc_etr_cs_ops = {
424 .sink_ops = &tmc_sink_ops,
425};
426
427static const struct coresight_ops tmc_etf_cs_ops = {
428 .sink_ops = &tmc_sink_ops,
429 .link_ops = &tmc_link_ops,
430};
431
432static int tmc_read_prepare(struct tmc_drvdata *drvdata)
433{
434 int ret;
435 unsigned long flags;
436 enum tmc_mode mode;
437
438 spin_lock_irqsave(&drvdata->spinlock, flags);
439 if (!drvdata->enable)
440 goto out;
441
442 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
443 tmc_etb_disable_hw(drvdata);
444 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
445 tmc_etr_disable_hw(drvdata);
446 } else {
447 mode = readl_relaxed(drvdata->base + TMC_MODE);
448 if (mode == TMC_MODE_CIRCULAR_BUFFER) {
449 tmc_etb_disable_hw(drvdata);
450 } else {
451 ret = -ENODEV;
452 goto err;
453 }
454 }
455out:
456 drvdata->reading = true;
457 spin_unlock_irqrestore(&drvdata->spinlock, flags);
458
459 dev_info(drvdata->dev, "TMC read start\n");
460 return 0;
461err:
462 spin_unlock_irqrestore(&drvdata->spinlock, flags);
463 return ret;
464}
465
466static void tmc_read_unprepare(struct tmc_drvdata *drvdata)
467{
468 unsigned long flags;
469 enum tmc_mode mode;
470
471 spin_lock_irqsave(&drvdata->spinlock, flags);
472 if (!drvdata->enable)
473 goto out;
474
475 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
476 tmc_etb_enable_hw(drvdata);
477 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
478 tmc_etr_enable_hw(drvdata);
479 } else {
480 mode = readl_relaxed(drvdata->base + TMC_MODE);
481 if (mode == TMC_MODE_CIRCULAR_BUFFER)
482 tmc_etb_enable_hw(drvdata);
483 }
484out:
485 drvdata->reading = false;
486 spin_unlock_irqrestore(&drvdata->spinlock, flags);
487
488 dev_info(drvdata->dev, "TMC read end\n");
489}
490
491static int tmc_open(struct inode *inode, struct file *file)
492{
493 struct tmc_drvdata *drvdata = container_of(file->private_data,
494 struct tmc_drvdata, miscdev);
495 int ret = 0;
496
497 if (drvdata->read_count++)
498 goto out;
499
500 ret = tmc_read_prepare(drvdata);
501 if (ret)
502 return ret;
503out:
504 nonseekable_open(inode, file);
505
506 dev_dbg(drvdata->dev, "%s: successfully opened\n", __func__);
507 return 0;
508}
509
510static ssize_t tmc_read(struct file *file, char __user *data, size_t len,
511 loff_t *ppos)
512{
513 struct tmc_drvdata *drvdata = container_of(file->private_data,
514 struct tmc_drvdata, miscdev);
515 char *bufp = drvdata->buf + *ppos;
516
517 if (*ppos + len > drvdata->size)
518 len = drvdata->size - *ppos;
519
520 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
521 if (bufp == (char *)(drvdata->vaddr + drvdata->size))
522 bufp = drvdata->vaddr;
523 else if (bufp > (char *)(drvdata->vaddr + drvdata->size))
524 bufp -= drvdata->size;
525 if ((bufp + len) > (char *)(drvdata->vaddr + drvdata->size))
526 len = (char *)(drvdata->vaddr + drvdata->size) - bufp;
527 }
528
529 if (copy_to_user(data, bufp, len)) {
530 dev_dbg(drvdata->dev, "%s: copy_to_user failed\n", __func__);
531 return -EFAULT;
532 }
533
534 *ppos += len;
535
536 dev_dbg(drvdata->dev, "%s: %d bytes copied, %d bytes left\n",
537 __func__, len, (int) (drvdata->size - *ppos));
538 return len;
539}
540
541static int tmc_release(struct inode *inode, struct file *file)
542{
543 struct tmc_drvdata *drvdata = container_of(file->private_data,
544 struct tmc_drvdata, miscdev);
545
546 if (--drvdata->read_count) {
547 if (drvdata->read_count < 0) {
548 dev_err(drvdata->dev, "mismatched close\n");
549 drvdata->read_count = 0;
550 }
551 goto out;
552 }
553
554 tmc_read_unprepare(drvdata);
555out:
556 dev_dbg(drvdata->dev, "%s: released\n", __func__);
557 return 0;
558}
559
560static const struct file_operations tmc_fops = {
561 .owner = THIS_MODULE,
562 .open = tmc_open,
563 .read = tmc_read,
564 .release = tmc_release,
565 .llseek = no_llseek,
566};
567
568static ssize_t trigger_cntr_show(struct device *dev,
569 struct device_attribute *attr, char *buf)
570{
571 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
572 unsigned long val = drvdata->trigger_cntr;
573
574 return sprintf(buf, "%#lx\n", val);
575}
576
577static ssize_t trigger_cntr_store(struct device *dev,
578 struct device_attribute *attr,
579 const char *buf, size_t size)
580{
581 int ret;
582 unsigned long val;
583 struct tmc_drvdata *drvdata = dev_get_drvdata(dev->parent);
584
585 ret = kstrtoul(buf, 16, &val);
586 if (ret)
587 return ret;
588
589 drvdata->trigger_cntr = val;
590 return size;
591}
592static DEVICE_ATTR_RW(trigger_cntr);
593
594static struct attribute *coresight_etb_attrs[] = {
595 &dev_attr_trigger_cntr.attr,
596 NULL,
597};
598ATTRIBUTE_GROUPS(coresight_etb);
599
600static struct attribute *coresight_etr_attrs[] = {
601 &dev_attr_trigger_cntr.attr,
602 NULL,
603};
604ATTRIBUTE_GROUPS(coresight_etr);
605
606static struct attribute *coresight_etf_attrs[] = {
607 &dev_attr_trigger_cntr.attr,
608 NULL,
609};
610ATTRIBUTE_GROUPS(coresight_etf);
611
612static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
613{
614 int ret = 0;
615 u32 devid;
616 void __iomem *base;
617 struct device *dev = &adev->dev;
618 struct coresight_platform_data *pdata = NULL;
619 struct tmc_drvdata *drvdata;
620 struct resource *res = &adev->res;
621 struct coresight_desc *desc;
622 struct device_node *np = adev->dev.of_node;
623
624 if (np) {
625 pdata = of_get_coresight_platform_data(dev, np);
626 if (IS_ERR(pdata))
627 return PTR_ERR(pdata);
628 adev->dev.platform_data = pdata;
629 }
630
631 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
632 if (!drvdata)
633 return -ENOMEM;
634
635 drvdata->dev = &adev->dev;
636 dev_set_drvdata(dev, drvdata);
637
638 /* Validity for the resource is already checked by the AMBA core */
639 base = devm_ioremap_resource(dev, res);
640 if (IS_ERR(base))
641 return PTR_ERR(base);
642
643 drvdata->base = base;
644
645 spin_lock_init(&drvdata->spinlock);
646
647 drvdata->clk = adev->pclk;
648 ret = clk_prepare_enable(drvdata->clk);
649 if (ret)
650 return ret;
651
652 devid = readl_relaxed(drvdata->base + CORESIGHT_DEVID);
653 drvdata->config_type = BMVAL(devid, 6, 7);
654
655 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
656 if (np)
657 ret = of_property_read_u32(np,
658 "arm,buffer-size",
659 &drvdata->size);
660 if (ret)
661 drvdata->size = SZ_1M;
662 } else {
663 drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
664 }
665
666 clk_disable_unprepare(drvdata->clk);
667
668 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
669 drvdata->vaddr = dma_alloc_coherent(dev, drvdata->size,
670 &drvdata->paddr, GFP_KERNEL);
671 if (!drvdata->vaddr)
672 return -ENOMEM;
673
674 memset(drvdata->vaddr, 0, drvdata->size);
675 drvdata->buf = drvdata->vaddr;
676 } else {
677 drvdata->buf = devm_kzalloc(dev, drvdata->size, GFP_KERNEL);
678 if (!drvdata->buf)
679 return -ENOMEM;
680 }
681
682 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
683 if (!desc) {
684 ret = -ENOMEM;
685 goto err_devm_kzalloc;
686 }
687
688 desc->pdata = pdata;
689 desc->dev = dev;
690 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_BUFFER;
691
692 if (drvdata->config_type == TMC_CONFIG_TYPE_ETB) {
693 desc->type = CORESIGHT_DEV_TYPE_SINK;
694 desc->ops = &tmc_etb_cs_ops;
695 desc->groups = coresight_etb_groups;
696 } else if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
697 desc->type = CORESIGHT_DEV_TYPE_SINK;
698 desc->ops = &tmc_etr_cs_ops;
699 desc->groups = coresight_etr_groups;
700 } else {
701 desc->type = CORESIGHT_DEV_TYPE_LINKSINK;
702 desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_FIFO;
703 desc->ops = &tmc_etf_cs_ops;
704 desc->groups = coresight_etf_groups;
705 }
706
707 drvdata->csdev = coresight_register(desc);
708 if (IS_ERR(drvdata->csdev)) {
709 ret = PTR_ERR(drvdata->csdev);
710 goto err_devm_kzalloc;
711 }
712
713 drvdata->miscdev.name = pdata->name;
714 drvdata->miscdev.minor = MISC_DYNAMIC_MINOR;
715 drvdata->miscdev.fops = &tmc_fops;
716 ret = misc_register(&drvdata->miscdev);
717 if (ret)
718 goto err_misc_register;
719
720 dev_info(dev, "TMC initialized\n");
721 return 0;
722
723err_misc_register:
724 coresight_unregister(drvdata->csdev);
725err_devm_kzalloc:
726 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
727 dma_free_coherent(dev, drvdata->size,
728 &drvdata->paddr, GFP_KERNEL);
729 return ret;
730}
731
732static int tmc_remove(struct amba_device *adev)
733{
734 struct tmc_drvdata *drvdata = amba_get_drvdata(adev);
735
736 misc_deregister(&drvdata->miscdev);
737 coresight_unregister(drvdata->csdev);
738 if (drvdata->config_type == TMC_CONFIG_TYPE_ETR)
739 dma_free_coherent(drvdata->dev, drvdata->size,
740 &drvdata->paddr, GFP_KERNEL);
741
742 return 0;
743}
744
745static struct amba_id tmc_ids[] = {
746 {
747 .id = 0x0003b961,
748 .mask = 0x0003ffff,
749 },
750 { 0, 0},
751};
752
753static struct amba_driver tmc_driver = {
754 .drv = {
755 .name = "coresight-tmc",
756 .owner = THIS_MODULE,
757 },
758 .probe = tmc_probe,
759 .remove = tmc_remove,
760 .id_table = tmc_ids,
761};
762
763static int __init tmc_init(void)
764{
765 return amba_driver_register(&tmc_driver);
766}
767module_init(tmc_init);
768
769static void __exit tmc_exit(void)
770{
771 amba_driver_unregister(&tmc_driver);
772}
773module_exit(tmc_exit);
774
775MODULE_LICENSE("GPL v2");
776MODULE_DESCRIPTION("CoreSight Trace Memory Controller driver");
diff --git a/drivers/coresight/coresight-tpiu.c b/drivers/coresight/coresight-tpiu.c
new file mode 100644
index 000000000000..ae101082791a
--- /dev/null
+++ b/drivers/coresight/coresight-tpiu.c
@@ -0,0 +1,217 @@
1/* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/device.h>
17#include <linux/io.h>
18#include <linux/err.h>
19#include <linux/slab.h>
20#include <linux/clk.h>
21#include <linux/coresight.h>
22#include <linux/amba/bus.h>
23
24#include "coresight-priv.h"
25
26#define TPIU_SUPP_PORTSZ 0x000
27#define TPIU_CURR_PORTSZ 0x004
28#define TPIU_SUPP_TRIGMODES 0x100
29#define TPIU_TRIG_CNTRVAL 0x104
30#define TPIU_TRIG_MULT 0x108
31#define TPIU_SUPP_TESTPATM 0x200
32#define TPIU_CURR_TESTPATM 0x204
33#define TPIU_TEST_PATREPCNTR 0x208
34#define TPIU_FFSR 0x300
35#define TPIU_FFCR 0x304
36#define TPIU_FSYNC_CNTR 0x308
37#define TPIU_EXTCTL_INPORT 0x400
38#define TPIU_EXTCTL_OUTPORT 0x404
39#define TPIU_ITTRFLINACK 0xee4
40#define TPIU_ITTRFLIN 0xee8
41#define TPIU_ITATBDATA0 0xeec
42#define TPIU_ITATBCTR2 0xef0
43#define TPIU_ITATBCTR1 0xef4
44#define TPIU_ITATBCTR0 0xef8
45
46/** register definition **/
47/* FFCR - 0x304 */
48#define FFCR_FON_MAN BIT(6)
49
50/**
51 * @base: memory mapped base address for this component.
52 * @dev: the device entity associated to this component.
53 * @csdev: component vitals needed by the framework.
54 * @clk: the clock this component is associated to.
55 */
56struct tpiu_drvdata {
57 void __iomem *base;
58 struct device *dev;
59 struct coresight_device *csdev;
60 struct clk *clk;
61};
62
63static void tpiu_enable_hw(struct tpiu_drvdata *drvdata)
64{
65 CS_UNLOCK(drvdata->base);
66
67 /* TODO: fill this up */
68
69 CS_LOCK(drvdata->base);
70}
71
72static int tpiu_enable(struct coresight_device *csdev)
73{
74 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
75 int ret;
76
77 ret = clk_prepare_enable(drvdata->clk);
78 if (ret)
79 return ret;
80
81 tpiu_enable_hw(drvdata);
82
83 dev_info(drvdata->dev, "TPIU enabled\n");
84 return 0;
85}
86
87static void tpiu_disable_hw(struct tpiu_drvdata *drvdata)
88{
89 CS_UNLOCK(drvdata->base);
90
91 /* Clear formatter controle reg. */
92 writel_relaxed(0x0, drvdata->base + TPIU_FFCR);
93 /* Generate manual flush */
94 writel_relaxed(FFCR_FON_MAN, drvdata->base + TPIU_FFCR);
95
96 CS_LOCK(drvdata->base);
97}
98
99static void tpiu_disable(struct coresight_device *csdev)
100{
101 struct tpiu_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
102
103 tpiu_disable_hw(drvdata);
104
105 clk_disable_unprepare(drvdata->clk);
106
107 dev_info(drvdata->dev, "TPIU disabled\n");
108}
109
110static const struct coresight_ops_sink tpiu_sink_ops = {
111 .enable = tpiu_enable,
112 .disable = tpiu_disable,
113};
114
115static const struct coresight_ops tpiu_cs_ops = {
116 .sink_ops = &tpiu_sink_ops,
117};
118
119static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
120{
121 int ret;
122 void __iomem *base;
123 struct device *dev = &adev->dev;
124 struct coresight_platform_data *pdata = NULL;
125 struct tpiu_drvdata *drvdata;
126 struct resource *res = &adev->res;
127 struct coresight_desc *desc;
128 struct device_node *np = adev->dev.of_node;
129
130 if (np) {
131 pdata = of_get_coresight_platform_data(dev, np);
132 if (IS_ERR(pdata))
133 return PTR_ERR(pdata);
134 adev->dev.platform_data = pdata;
135 }
136
137 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
138 if (!drvdata)
139 return -ENOMEM;
140
141 drvdata->dev = &adev->dev;
142 dev_set_drvdata(dev, drvdata);
143
144 /* Validity for the resource is already checked by the AMBA core */
145 base = devm_ioremap_resource(dev, res);
146 if (IS_ERR(base))
147 return PTR_ERR(base);
148
149 drvdata->base = base;
150
151 drvdata->clk = adev->pclk;
152 ret = clk_prepare_enable(drvdata->clk);
153 if (ret)
154 return ret;
155
156 /* Disable tpiu to support older devices */
157 tpiu_disable_hw(drvdata);
158
159 clk_disable_unprepare(drvdata->clk);
160
161 desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL);
162 if (!desc)
163 return -ENOMEM;
164
165 desc->type = CORESIGHT_DEV_TYPE_SINK;
166 desc->subtype.sink_subtype = CORESIGHT_DEV_SUBTYPE_SINK_PORT;
167 desc->ops = &tpiu_cs_ops;
168 desc->pdata = pdata;
169 desc->dev = dev;
170 drvdata->csdev = coresight_register(desc);
171 if (IS_ERR(drvdata->csdev))
172 return PTR_ERR(drvdata->csdev);
173
174 dev_info(dev, "TPIU initialized\n");
175 return 0;
176}
177
178static int tpiu_remove(struct amba_device *adev)
179{
180 struct tpiu_drvdata *drvdata = amba_get_drvdata(adev);
181
182 coresight_unregister(drvdata->csdev);
183 return 0;
184}
185
186static struct amba_id tpiu_ids[] = {
187 {
188 .id = 0x0003b912,
189 .mask = 0x0003ffff,
190 },
191 { 0, 0},
192};
193
194static struct amba_driver tpiu_driver = {
195 .drv = {
196 .name = "coresight-tpiu",
197 .owner = THIS_MODULE,
198 },
199 .probe = tpiu_probe,
200 .remove = tpiu_remove,
201 .id_table = tpiu_ids,
202};
203
204static int __init tpiu_init(void)
205{
206 return amba_driver_register(&tpiu_driver);
207}
208module_init(tpiu_init);
209
210static void __exit tpiu_exit(void)
211{
212 amba_driver_unregister(&tpiu_driver);
213}
214module_exit(tpiu_exit);
215
216MODULE_LICENSE("GPL v2");
217MODULE_DESCRIPTION("CoreSight Trace Port Interface Unit driver");
diff --git a/drivers/coresight/coresight.c b/drivers/coresight/coresight.c
new file mode 100644
index 000000000000..6e0181f84425
--- /dev/null
+++ b/drivers/coresight/coresight.c
@@ -0,0 +1,717 @@
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/kernel.h>
14#include <linux/module.h>
15#include <linux/init.h>
16#include <linux/types.h>
17#include <linux/device.h>
18#include <linux/io.h>
19#include <linux/err.h>
20#include <linux/export.h>
21#include <linux/slab.h>
22#include <linux/mutex.h>
23#include <linux/clk.h>
24#include <linux/coresight.h>
25#include <linux/of_platform.h>
26#include <linux/delay.h>
27
28#include "coresight-priv.h"
29
30static DEFINE_MUTEX(coresight_mutex);
31
32static int coresight_id_match(struct device *dev, void *data)
33{
34 int trace_id, i_trace_id;
35 struct coresight_device *csdev, *i_csdev;
36
37 csdev = data;
38 i_csdev = to_coresight_device(dev);
39
40 /*
41 * No need to care about oneself and components that are not
42 * sources or not enabled
43 */
44 if (i_csdev == csdev || !i_csdev->enable ||
45 i_csdev->type != CORESIGHT_DEV_TYPE_SOURCE)
46 return 0;
47
48 /* Get the source ID for both compoment */
49 trace_id = source_ops(csdev)->trace_id(csdev);
50 i_trace_id = source_ops(i_csdev)->trace_id(i_csdev);
51
52 /* All you need is one */
53 if (trace_id == i_trace_id)
54 return 1;
55
56 return 0;
57}
58
59static int coresight_source_is_unique(struct coresight_device *csdev)
60{
61 int trace_id = source_ops(csdev)->trace_id(csdev);
62
63 /* this shouldn't happen */
64 if (trace_id < 0)
65 return 0;
66
67 return !bus_for_each_dev(&coresight_bustype, NULL,
68 csdev, coresight_id_match);
69}
70
71static int coresight_find_link_inport(struct coresight_device *csdev)
72{
73 int i;
74 struct coresight_device *parent;
75 struct coresight_connection *conn;
76
77 parent = container_of(csdev->path_link.next,
78 struct coresight_device, path_link);
79
80 for (i = 0; i < parent->nr_outport; i++) {
81 conn = &parent->conns[i];
82 if (conn->child_dev == csdev)
83 return conn->child_port;
84 }
85
86 dev_err(&csdev->dev, "couldn't find inport, parent: %s, child: %s\n",
87 dev_name(&parent->dev), dev_name(&csdev->dev));
88
89 return 0;
90}
91
92static int coresight_find_link_outport(struct coresight_device *csdev)
93{
94 int i;
95 struct coresight_device *child;
96 struct coresight_connection *conn;
97
98 child = container_of(csdev->path_link.prev,
99 struct coresight_device, path_link);
100
101 for (i = 0; i < csdev->nr_outport; i++) {
102 conn = &csdev->conns[i];
103 if (conn->child_dev == child)
104 return conn->outport;
105 }
106
107 dev_err(&csdev->dev, "couldn't find outport, parent: %s, child: %s\n",
108 dev_name(&csdev->dev), dev_name(&child->dev));
109
110 return 0;
111}
112
113static int coresight_enable_sink(struct coresight_device *csdev)
114{
115 int ret;
116
117 if (!csdev->enable) {
118 if (sink_ops(csdev)->enable) {
119 ret = sink_ops(csdev)->enable(csdev);
120 if (ret)
121 return ret;
122 }
123 csdev->enable = true;
124 }
125
126 atomic_inc(csdev->refcnt);
127
128 return 0;
129}
130
131static void coresight_disable_sink(struct coresight_device *csdev)
132{
133 if (atomic_dec_return(csdev->refcnt) == 0) {
134 if (sink_ops(csdev)->disable) {
135 sink_ops(csdev)->disable(csdev);
136 csdev->enable = false;
137 }
138 }
139}
140
141static int coresight_enable_link(struct coresight_device *csdev)
142{
143 int ret;
144 int link_subtype;
145 int refport, inport, outport;
146
147 inport = coresight_find_link_inport(csdev);
148 outport = coresight_find_link_outport(csdev);
149 link_subtype = csdev->subtype.link_subtype;
150
151 if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
152 refport = inport;
153 else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
154 refport = outport;
155 else
156 refport = 0;
157
158 if (atomic_inc_return(&csdev->refcnt[refport]) == 1) {
159 if (link_ops(csdev)->enable) {
160 ret = link_ops(csdev)->enable(csdev, inport, outport);
161 if (ret)
162 return ret;
163 }
164 }
165
166 csdev->enable = true;
167
168 return 0;
169}
170
171static void coresight_disable_link(struct coresight_device *csdev)
172{
173 int i, nr_conns;
174 int link_subtype;
175 int refport, inport, outport;
176
177 inport = coresight_find_link_inport(csdev);
178 outport = coresight_find_link_outport(csdev);
179 link_subtype = csdev->subtype.link_subtype;
180
181 if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG) {
182 refport = inport;
183 nr_conns = csdev->nr_inport;
184 } else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT) {
185 refport = outport;
186 nr_conns = csdev->nr_outport;
187 } else {
188 refport = 0;
189 nr_conns = 1;
190 }
191
192 if (atomic_dec_return(&csdev->refcnt[refport]) == 0) {
193 if (link_ops(csdev)->disable)
194 link_ops(csdev)->disable(csdev, inport, outport);
195 }
196
197 for (i = 0; i < nr_conns; i++)
198 if (atomic_read(&csdev->refcnt[i]) != 0)
199 return;
200
201 csdev->enable = false;
202}
203
204static int coresight_enable_source(struct coresight_device *csdev)
205{
206 int ret;
207
208 if (!coresight_source_is_unique(csdev)) {
209 dev_warn(&csdev->dev, "traceID %d not unique\n",
210 source_ops(csdev)->trace_id(csdev));
211 return -EINVAL;
212 }
213
214 if (!csdev->enable) {
215 if (source_ops(csdev)->enable) {
216 ret = source_ops(csdev)->enable(csdev);
217 if (ret)
218 return ret;
219 }
220 csdev->enable = true;
221 }
222
223 atomic_inc(csdev->refcnt);
224
225 return 0;
226}
227
228static void coresight_disable_source(struct coresight_device *csdev)
229{
230 if (atomic_dec_return(csdev->refcnt) == 0) {
231 if (source_ops(csdev)->disable) {
232 source_ops(csdev)->disable(csdev);
233 csdev->enable = false;
234 }
235 }
236}
237
238static int coresight_enable_path(struct list_head *path)
239{
240 int ret = 0;
241 struct coresight_device *cd;
242
243 list_for_each_entry(cd, path, path_link) {
244 if (cd == list_first_entry(path, struct coresight_device,
245 path_link)) {
246 ret = coresight_enable_sink(cd);
247 } else if (list_is_last(&cd->path_link, path)) {
248 /*
249 * Don't enable the source just yet - this needs to
250 * happen at the very end when all links and sink
251 * along the path have been configured properly.
252 */
253 ;
254 } else {
255 ret = coresight_enable_link(cd);
256 }
257 if (ret)
258 goto err;
259 }
260
261 return 0;
262err:
263 list_for_each_entry_continue_reverse(cd, path, path_link) {
264 if (cd == list_first_entry(path, struct coresight_device,
265 path_link)) {
266 coresight_disable_sink(cd);
267 } else if (list_is_last(&cd->path_link, path)) {
268 ;
269 } else {
270 coresight_disable_link(cd);
271 }
272 }
273
274 return ret;
275}
276
277static int coresight_disable_path(struct list_head *path)
278{
279 struct coresight_device *cd;
280
281 list_for_each_entry_reverse(cd, path, path_link) {
282 if (cd == list_first_entry(path, struct coresight_device,
283 path_link)) {
284 coresight_disable_sink(cd);
285 } else if (list_is_last(&cd->path_link, path)) {
286 /*
287 * The source has already been stopped, no need
288 * to do it again here.
289 */
290 ;
291 } else {
292 coresight_disable_link(cd);
293 }
294 }
295
296 return 0;
297}
298
299static int coresight_build_paths(struct coresight_device *csdev,
300 struct list_head *path,
301 bool enable)
302{
303 int i, ret = -EINVAL;
304 struct coresight_connection *conn;
305
306 list_add(&csdev->path_link, path);
307
308 if (csdev->type == CORESIGHT_DEV_TYPE_SINK && csdev->activated) {
309 if (enable)
310 ret = coresight_enable_path(path);
311 else
312 ret = coresight_disable_path(path);
313 } else {
314 for (i = 0; i < csdev->nr_outport; i++) {
315 conn = &csdev->conns[i];
316 if (coresight_build_paths(conn->child_dev,
317 path, enable) == 0)
318 ret = 0;
319 }
320 }
321
322 if (list_first_entry(path, struct coresight_device, path_link) != csdev)
323 dev_err(&csdev->dev, "wrong device in %s\n", __func__);
324
325 list_del(&csdev->path_link);
326
327 return ret;
328}
329
330int coresight_enable(struct coresight_device *csdev)
331{
332 int ret = 0;
333 LIST_HEAD(path);
334
335 mutex_lock(&coresight_mutex);
336 if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
337 ret = -EINVAL;
338 dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
339 goto out;
340 }
341 if (csdev->enable)
342 goto out;
343
344 if (coresight_build_paths(csdev, &path, true)) {
345 dev_err(&csdev->dev, "building path(s) failed\n");
346 goto out;
347 }
348
349 if (coresight_enable_source(csdev))
350 dev_err(&csdev->dev, "source enable failed\n");
351out:
352 mutex_unlock(&coresight_mutex);
353 return ret;
354}
355EXPORT_SYMBOL_GPL(coresight_enable);
356
357void coresight_disable(struct coresight_device *csdev)
358{
359 LIST_HEAD(path);
360
361 mutex_lock(&coresight_mutex);
362 if (csdev->type != CORESIGHT_DEV_TYPE_SOURCE) {
363 dev_err(&csdev->dev, "wrong device type in %s\n", __func__);
364 goto out;
365 }
366 if (!csdev->enable)
367 goto out;
368
369 coresight_disable_source(csdev);
370 if (coresight_build_paths(csdev, &path, false))
371 dev_err(&csdev->dev, "releasing path(s) failed\n");
372
373out:
374 mutex_unlock(&coresight_mutex);
375}
376EXPORT_SYMBOL_GPL(coresight_disable);
377
378static ssize_t enable_sink_show(struct device *dev,
379 struct device_attribute *attr, char *buf)
380{
381 struct coresight_device *csdev = to_coresight_device(dev);
382
383 return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->activated);
384}
385
386static ssize_t enable_sink_store(struct device *dev,
387 struct device_attribute *attr,
388 const char *buf, size_t size)
389{
390 int ret;
391 unsigned long val;
392 struct coresight_device *csdev = to_coresight_device(dev);
393
394 ret = kstrtoul(buf, 10, &val);
395 if (ret)
396 return ret;
397
398 if (val)
399 csdev->activated = true;
400 else
401 csdev->activated = false;
402
403 return size;
404
405}
406static DEVICE_ATTR_RW(enable_sink);
407
408static ssize_t enable_source_show(struct device *dev,
409 struct device_attribute *attr, char *buf)
410{
411 struct coresight_device *csdev = to_coresight_device(dev);
412
413 return scnprintf(buf, PAGE_SIZE, "%u\n", (unsigned)csdev->enable);
414}
415
416static ssize_t enable_source_store(struct device *dev,
417 struct device_attribute *attr,
418 const char *buf, size_t size)
419{
420 int ret = 0;
421 unsigned long val;
422 struct coresight_device *csdev = to_coresight_device(dev);
423
424 ret = kstrtoul(buf, 10, &val);
425 if (ret)
426 return ret;
427
428 if (val) {
429 ret = coresight_enable(csdev);
430 if (ret)
431 return ret;
432 } else {
433 coresight_disable(csdev);
434 }
435
436 return size;
437}
438static DEVICE_ATTR_RW(enable_source);
439
440static struct attribute *coresight_sink_attrs[] = {
441 &dev_attr_enable_sink.attr,
442 NULL,
443};
444ATTRIBUTE_GROUPS(coresight_sink);
445
446static struct attribute *coresight_source_attrs[] = {
447 &dev_attr_enable_source.attr,
448 NULL,
449};
450ATTRIBUTE_GROUPS(coresight_source);
451
452static struct device_type coresight_dev_type[] = {
453 {
454 .name = "none",
455 },
456 {
457 .name = "sink",
458 .groups = coresight_sink_groups,
459 },
460 {
461 .name = "link",
462 },
463 {
464 .name = "linksink",
465 .groups = coresight_sink_groups,
466 },
467 {
468 .name = "source",
469 .groups = coresight_source_groups,
470 },
471};
472
473static void coresight_device_release(struct device *dev)
474{
475 struct coresight_device *csdev = to_coresight_device(dev);
476
477 kfree(csdev);
478}
479
480static int coresight_orphan_match(struct device *dev, void *data)
481{
482 int i;
483 bool still_orphan = false;
484 struct coresight_device *csdev, *i_csdev;
485 struct coresight_connection *conn;
486
487 csdev = data;
488 i_csdev = to_coresight_device(dev);
489
490 /* No need to check oneself */
491 if (csdev == i_csdev)
492 return 0;
493
494 /* Move on to another component if no connection is orphan */
495 if (!i_csdev->orphan)
496 return 0;
497 /*
498 * Circle throuch all the connection of that component. If we find
499 * an orphan connection whose name matches @csdev, link it.
500 */
501 for (i = 0; i < i_csdev->nr_outport; i++) {
502 conn = &i_csdev->conns[i];
503
504 /* We have found at least one orphan connection */
505 if (conn->child_dev == NULL) {
506 /* Does it match this newly added device? */
507 if (!strcmp(dev_name(&csdev->dev), conn->child_name))
508 conn->child_dev = csdev;
509 } else {
510 /* Too bad, this component still has an orphan */
511 still_orphan = true;
512 }
513 }
514
515 i_csdev->orphan = still_orphan;
516
517 /*
518 * Returning '0' ensures that all known component on the
519 * bus will be checked.
520 */
521 return 0;
522}
523
524static void coresight_fixup_orphan_conns(struct coresight_device *csdev)
525{
526 /*
527 * No need to check for a return value as orphan connection(s)
528 * are hooked-up with each newly added component.
529 */
530 bus_for_each_dev(&coresight_bustype, NULL,
531 csdev, coresight_orphan_match);
532}
533
534
535static int coresight_name_match(struct device *dev, void *data)
536{
537 char *to_match;
538 struct coresight_device *i_csdev;
539
540 to_match = data;
541 i_csdev = to_coresight_device(dev);
542
543 if (!strcmp(to_match, dev_name(&i_csdev->dev)))
544 return 1;
545
546 return 0;
547}
548
549static void coresight_fixup_device_conns(struct coresight_device *csdev)
550{
551 int i;
552 struct device *dev = NULL;
553 struct coresight_connection *conn;
554
555 for (i = 0; i < csdev->nr_outport; i++) {
556 conn = &csdev->conns[i];
557 dev = bus_find_device(&coresight_bustype, NULL,
558 (void *)conn->child_name,
559 coresight_name_match);
560
561 if (dev) {
562 conn->child_dev = to_coresight_device(dev);
563 } else {
564 csdev->orphan = true;
565 conn->child_dev = NULL;
566 }
567 }
568}
569
570/**
571 * coresight_timeout - loop until a bit has changed to a specific state.
572 * @addr: base address of the area of interest.
573 * @offset: address of a register, starting from @addr.
574 * @position: the position of the bit of interest.
575 * @value: the value the bit should have.
576 *
577 * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if
578 * TIMEOUT_US has elapsed, which ever happens first.
579 */
580
581int coresight_timeout(void __iomem *addr, u32 offset, int position, int value)
582{
583 int i;
584 u32 val;
585
586 for (i = TIMEOUT_US; i > 0; i--) {
587 val = __raw_readl(addr + offset);
588 /* waiting on the bit to go from 0 to 1 */
589 if (value) {
590 if (val & BIT(position))
591 return 0;
592 /* waiting on the bit to go from 1 to 0 */
593 } else {
594 if (!(val & BIT(position)))
595 return 0;
596 }
597
598 /*
599 * Delay is arbitrary - the specification doesn't say how long
600 * we are expected to wait. Extra check required to make sure
601 * we don't wait needlessly on the last iteration.
602 */
603 if (i - 1)
604 udelay(1);
605 }
606
607 return -EAGAIN;
608}
609
610struct bus_type coresight_bustype = {
611 .name = "coresight",
612};
613
614static int __init coresight_init(void)
615{
616 return bus_register(&coresight_bustype);
617}
618postcore_initcall(coresight_init);
619
620struct coresight_device *coresight_register(struct coresight_desc *desc)
621{
622 int i;
623 int ret;
624 int link_subtype;
625 int nr_refcnts = 1;
626 atomic_t *refcnts = NULL;
627 struct coresight_device *csdev;
628 struct coresight_connection *conns;
629
630 csdev = kzalloc(sizeof(*csdev), GFP_KERNEL);
631 if (!csdev) {
632 ret = -ENOMEM;
633 goto err_kzalloc_csdev;
634 }
635
636 if (desc->type == CORESIGHT_DEV_TYPE_LINK ||
637 desc->type == CORESIGHT_DEV_TYPE_LINKSINK) {
638 link_subtype = desc->subtype.link_subtype;
639
640 if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_MERG)
641 nr_refcnts = desc->pdata->nr_inport;
642 else if (link_subtype == CORESIGHT_DEV_SUBTYPE_LINK_SPLIT)
643 nr_refcnts = desc->pdata->nr_outport;
644 }
645
646 refcnts = kcalloc(nr_refcnts, sizeof(*refcnts), GFP_KERNEL);
647 if (!refcnts) {
648 ret = -ENOMEM;
649 goto err_kzalloc_refcnts;
650 }
651
652 csdev->refcnt = refcnts;
653
654 csdev->nr_inport = desc->pdata->nr_inport;
655 csdev->nr_outport = desc->pdata->nr_outport;
656 conns = kcalloc(csdev->nr_outport, sizeof(*conns), GFP_KERNEL);
657 if (!conns) {
658 ret = -ENOMEM;
659 goto err_kzalloc_conns;
660 }
661
662 for (i = 0; i < csdev->nr_outport; i++) {
663 conns[i].outport = desc->pdata->outports[i];
664 conns[i].child_name = desc->pdata->child_names[i];
665 conns[i].child_port = desc->pdata->child_ports[i];
666 }
667
668 csdev->conns = conns;
669
670 csdev->type = desc->type;
671 csdev->subtype = desc->subtype;
672 csdev->ops = desc->ops;
673 csdev->orphan = false;
674
675 csdev->dev.type = &coresight_dev_type[desc->type];
676 csdev->dev.groups = desc->groups;
677 csdev->dev.parent = desc->dev;
678 csdev->dev.release = coresight_device_release;
679 csdev->dev.bus = &coresight_bustype;
680 dev_set_name(&csdev->dev, "%s", desc->pdata->name);
681
682 ret = device_register(&csdev->dev);
683 if (ret)
684 goto err_device_register;
685
686 mutex_lock(&coresight_mutex);
687
688 coresight_fixup_device_conns(csdev);
689 coresight_fixup_orphan_conns(csdev);
690
691 mutex_unlock(&coresight_mutex);
692
693 return csdev;
694
695err_device_register:
696 kfree(conns);
697err_kzalloc_conns:
698 kfree(refcnts);
699err_kzalloc_refcnts:
700 kfree(csdev);
701err_kzalloc_csdev:
702 return ERR_PTR(ret);
703}
704EXPORT_SYMBOL_GPL(coresight_register);
705
706void coresight_unregister(struct coresight_device *csdev)
707{
708 mutex_lock(&coresight_mutex);
709
710 kfree(csdev->conns);
711 device_unregister(&csdev->dev);
712
713 mutex_unlock(&coresight_mutex);
714}
715EXPORT_SYMBOL_GPL(coresight_unregister);
716
717MODULE_LICENSE("GPL v2");
diff --git a/drivers/coresight/of_coresight.c b/drivers/coresight/of_coresight.c
new file mode 100644
index 000000000000..5030c0734508
--- /dev/null
+++ b/drivers/coresight/of_coresight.c
@@ -0,0 +1,204 @@
1/* Copyright (c) 2012, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
13#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/err.h>
16#include <linux/slab.h>
17#include <linux/clk.h>
18#include <linux/of.h>
19#include <linux/of_address.h>
20#include <linux/of_graph.h>
21#include <linux/of_platform.h>
22#include <linux/platform_device.h>
23#include <linux/amba/bus.h>
24#include <linux/coresight.h>
25#include <asm/smp_plat.h>
26
27
28static int of_dev_node_match(struct device *dev, void *data)
29{
30 return dev->of_node == data;
31}
32
33static struct device *
34of_coresight_get_endpoint_device(struct device_node *endpoint)
35{
36 struct device *dev = NULL;
37
38 /*
39 * If we have a non-configuable replicator, it will be found on the
40 * platform bus.
41 */
42 dev = bus_find_device(&platform_bus_type, NULL,
43 endpoint, of_dev_node_match);
44 if (dev)
45 return dev;
46
47 /*
48 * We have a configurable component - circle through the AMBA bus
49 * looking for the device that matches the endpoint node.
50 */
51 return bus_find_device(&amba_bustype, NULL,
52 endpoint, of_dev_node_match);
53}
54
55static struct device_node *of_get_coresight_endpoint(
56 const struct device_node *parent, struct device_node *prev)
57{
58 struct device_node *node = of_graph_get_next_endpoint(parent, prev);
59
60 of_node_put(prev);
61 return node;
62}
63
64static void of_coresight_get_ports(struct device_node *node,
65 int *nr_inport, int *nr_outport)
66{
67 struct device_node *ep = NULL;
68 int in = 0, out = 0;
69
70 do {
71 ep = of_get_coresight_endpoint(node, ep);
72 if (!ep)
73 break;
74
75 if (of_property_read_bool(ep, "slave-mode"))
76 in++;
77 else
78 out++;
79
80 } while (ep);
81
82 *nr_inport = in;
83 *nr_outport = out;
84}
85
86static int of_coresight_alloc_memory(struct device *dev,
87 struct coresight_platform_data *pdata)
88{
89 /* List of output port on this component */
90 pdata->outports = devm_kzalloc(dev, pdata->nr_outport *
91 sizeof(*pdata->outports),
92 GFP_KERNEL);
93 if (!pdata->outports)
94 return -ENOMEM;
95
96 /* Children connected to this component via @outport */
97 pdata->child_names = devm_kzalloc(dev, pdata->nr_outport *
98 sizeof(*pdata->child_names),
99 GFP_KERNEL);
100 if (!pdata->child_names)
101 return -ENOMEM;
102
103 /* Port number on the child this component is connected to */
104 pdata->child_ports = devm_kzalloc(dev, pdata->nr_outport *
105 sizeof(*pdata->child_ports),
106 GFP_KERNEL);
107 if (!pdata->child_ports)
108 return -ENOMEM;
109
110 return 0;
111}
112
113struct coresight_platform_data *of_get_coresight_platform_data(
114 struct device *dev, struct device_node *node)
115{
116 int i = 0, ret = 0;
117 struct coresight_platform_data *pdata;
118 struct of_endpoint endpoint, rendpoint;
119 struct device *rdev;
120 struct device_node *cpu;
121 struct device_node *ep = NULL;
122 struct device_node *rparent = NULL;
123 struct device_node *rport = NULL;
124
125 pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
126 if (!pdata)
127 return ERR_PTR(-ENOMEM);
128
129 /* Use device name as debugfs handle */
130 pdata->name = dev_name(dev);
131
132 /* Get the number of input and output port for this component */
133 of_coresight_get_ports(node, &pdata->nr_inport, &pdata->nr_outport);
134
135 if (pdata->nr_outport) {
136 ret = of_coresight_alloc_memory(dev, pdata);
137 if (ret)
138 return ERR_PTR(ret);
139
140 /* Iterate through each port to discover topology */
141 do {
142 /* Get a handle on a port */
143 ep = of_get_coresight_endpoint(node, ep);
144 if (!ep)
145 break;
146
147 /*
148 * No need to deal with input ports, processing for as
149 * processing for output ports will deal with them.
150 */
151 if (of_find_property(ep, "slave-mode", NULL))
152 continue;
153
154 /* Get a handle on the local endpoint */
155 ret = of_graph_parse_endpoint(ep, &endpoint);
156
157 if (ret)
158 continue;
159
160 /* The local out port number */
161 pdata->outports[i] = endpoint.id;
162
163 /*
164 * Get a handle on the remote port and parent
165 * attached to it.
166 */
167 rparent = of_graph_get_remote_port_parent(ep);
168 rport = of_graph_get_remote_port(ep);
169
170 if (!rparent || !rport)
171 continue;
172
173 if (of_graph_parse_endpoint(rport, &rendpoint))
174 continue;
175
176 rdev = of_coresight_get_endpoint_device(rparent);
177 if (!dev)
178 continue;
179
180 pdata->child_names[i] = dev_name(rdev);
181 pdata->child_ports[i] = rendpoint.id;
182
183 i++;
184 } while (ep);
185 }
186
187 /* Affinity defaults to CPU0 */
188 pdata->cpu = 0;
189 cpu = of_parse_phandle(node, "cpu", 0);
190 if (cpu) {
191 const u32 *mpidr;
192 int len, index;
193
194 mpidr = of_get_property(cpu, "reg", &len);
195 if (mpidr && len == 4) {
196 index = get_logical_index(be32_to_cpup(mpidr));
197 if (index != -EINVAL)
198 pdata->cpu = index;
199 }
200 }
201
202 return pdata;
203}
204EXPORT_SYMBOL_GPL(of_get_coresight_platform_data);
diff --git a/drivers/extcon/extcon-class.c b/drivers/extcon/extcon-class.c
index 4c2f2c543bb7..043dcd9946c9 100644
--- a/drivers/extcon/extcon-class.c
+++ b/drivers/extcon/extcon-class.c
@@ -29,6 +29,7 @@
29#include <linux/fs.h> 29#include <linux/fs.h>
30#include <linux/err.h> 30#include <linux/err.h>
31#include <linux/extcon.h> 31#include <linux/extcon.h>
32#include <linux/of.h>
32#include <linux/slab.h> 33#include <linux/slab.h>
33#include <linux/sysfs.h> 34#include <linux/sysfs.h>
34#include <linux/of.h> 35#include <linux/of.h>
@@ -997,13 +998,16 @@ struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
997 return ERR_PTR(-ENODEV); 998 return ERR_PTR(-ENODEV);
998 } 999 }
999 1000
1000 edev = extcon_get_extcon_dev(node->name); 1001 mutex_lock(&extcon_dev_list_lock);
1001 if (!edev) { 1002 list_for_each_entry(edev, &extcon_dev_list, entry) {
1002 dev_err(dev, "unable to get extcon device : %s\n", node->name); 1003 if (edev->dev.parent && edev->dev.parent->of_node == node) {
1003 return ERR_PTR(-ENODEV); 1004 mutex_unlock(&extcon_dev_list_lock);
1005 return edev;
1006 }
1004 } 1007 }
1008 mutex_unlock(&extcon_dev_list_lock);
1005 1009
1006 return edev; 1010 return ERR_PTR(-EPROBE_DEFER);
1007} 1011}
1008#else 1012#else
1009struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index) 1013struct extcon_dev *extcon_get_edev_by_phandle(struct device *dev, int index)
diff --git a/drivers/extcon/extcon-max14577.c b/drivers/extcon/extcon-max14577.c
index dfc2fd370eb3..c1bf0cf747b0 100644
--- a/drivers/extcon/extcon-max14577.c
+++ b/drivers/extcon/extcon-max14577.c
@@ -1,7 +1,7 @@
1/* 1/*
2 * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC 2 * extcon-max14577.c - MAX14577/77836 extcon driver to support MUIC
3 * 3 *
4 * Copyright (C) 2013,2014 Samsung Electrnoics 4 * Copyright (C) 2013,2014 Samsung Electronics
5 * Chanwoo Choi <cw00.choi@samsung.com> 5 * Chanwoo Choi <cw00.choi@samsung.com>
6 * Krzysztof Kozlowski <k.kozlowski@samsung.com> 6 * Krzysztof Kozlowski <k.kozlowski@samsung.com>
7 * 7 *
diff --git a/drivers/extcon/extcon-max77693.c b/drivers/extcon/extcon-max77693.c
index 5bf5c5e66f16..740a14d35072 100644
--- a/drivers/extcon/extcon-max77693.c
+++ b/drivers/extcon/extcon-max77693.c
@@ -232,7 +232,7 @@ static const char *max77693_extcon_cable[] = {
232 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON", 232 [EXTCON_CABLE_JIG_USB_ON] = "JIG-USB-ON",
233 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF", 233 [EXTCON_CABLE_JIG_USB_OFF] = "JIG-USB-OFF",
234 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF", 234 [EXTCON_CABLE_JIG_UART_OFF] = "JIG-UART-OFF",
235 [EXTCON_CABLE_JIG_UART_ON] = "Dock-Car", 235 [EXTCON_CABLE_JIG_UART_ON] = "JIG-UART-ON",
236 [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart", 236 [EXTCON_CABLE_DOCK_SMART] = "Dock-Smart",
237 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk", 237 [EXTCON_CABLE_DOCK_DESK] = "Dock-Desk",
238 [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio", 238 [EXTCON_CABLE_DOCK_AUDIO] = "Dock-Audio",
@@ -532,9 +532,6 @@ static int max77693_muic_dock_handler(struct max77693_muic_info *info,
532 extcon_set_cable_state(info->edev, "Dock-Smart", attached); 532 extcon_set_cable_state(info->edev, "Dock-Smart", attached);
533 extcon_set_cable_state(info->edev, "MHL", attached); 533 extcon_set_cable_state(info->edev, "MHL", attached);
534 goto out; 534 goto out;
535 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
536 strcpy(dock_name, "Dock-Car");
537 break;
538 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */ 535 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
539 strcpy(dock_name, "Dock-Desk"); 536 strcpy(dock_name, "Dock-Desk");
540 break; 537 break;
@@ -669,6 +666,11 @@ static int max77693_muic_jig_handler(struct max77693_muic_info *info,
669 strcpy(cable_name, "JIG-UART-OFF"); 666 strcpy(cable_name, "JIG-UART-OFF");
670 path = CONTROL1_SW_UART; 667 path = CONTROL1_SW_UART;
671 break; 668 break;
669 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* ADC_JIG_UART_ON */
670 /* PATH:AP_UART */
671 strcpy(cable_name, "JIG-UART-ON");
672 path = CONTROL1_SW_UART;
673 break;
672 default: 674 default:
673 dev_err(info->dev, "failed to detect %s jig cable\n", 675 dev_err(info->dev, "failed to detect %s jig cable\n",
674 attached ? "attached" : "detached"); 676 attached ? "attached" : "detached");
@@ -708,13 +710,13 @@ static int max77693_muic_adc_handler(struct max77693_muic_info *info)
708 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF: 710 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_OFF:
709 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON: 711 case MAX77693_MUIC_ADC_FACTORY_MODE_USB_ON:
710 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF: 712 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_OFF:
713 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON:
711 /* JIG */ 714 /* JIG */
712 ret = max77693_muic_jig_handler(info, cable_type, attached); 715 ret = max77693_muic_jig_handler(info, cable_type, attached);
713 if (ret < 0) 716 if (ret < 0)
714 return ret; 717 return ret;
715 break; 718 break;
716 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */ 719 case MAX77693_MUIC_ADC_RESERVED_ACC_3: /* Dock-Smart */
717 case MAX77693_MUIC_ADC_FACTORY_MODE_UART_ON: /* Dock-Car */
718 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */ 720 case MAX77693_MUIC_ADC_AUDIO_MODE_REMOTE: /* Dock-Desk */
719 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */ 721 case MAX77693_MUIC_ADC_AV_CABLE_NOLOAD: /* Dock-Audio */
720 /* 722 /*
diff --git a/drivers/hv/channel_mgmt.c b/drivers/hv/channel_mgmt.c
index 191a6a3ae6ca..2c59f030546b 100644
--- a/drivers/hv/channel_mgmt.c
+++ b/drivers/hv/channel_mgmt.c
@@ -216,9 +216,16 @@ static void vmbus_process_rescind_offer(struct work_struct *work)
216 unsigned long flags; 216 unsigned long flags;
217 struct vmbus_channel *primary_channel; 217 struct vmbus_channel *primary_channel;
218 struct vmbus_channel_relid_released msg; 218 struct vmbus_channel_relid_released msg;
219 struct device *dev;
220
221 if (channel->device_obj) {
222 dev = get_device(&channel->device_obj->device);
223 if (dev) {
224 vmbus_device_unregister(channel->device_obj);
225 put_device(dev);
226 }
227 }
219 228
220 if (channel->device_obj)
221 vmbus_device_unregister(channel->device_obj);
222 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released)); 229 memset(&msg, 0, sizeof(struct vmbus_channel_relid_released));
223 msg.child_relid = channel->offermsg.child_relid; 230 msg.child_relid = channel->offermsg.child_relid;
224 msg.header.msgtype = CHANNELMSG_RELID_RELEASED; 231 msg.header.msgtype = CHANNELMSG_RELID_RELEASED;
diff --git a/drivers/hv/hv_balloon.c b/drivers/hv/hv_balloon.c
index 5e90c5d771a7..b958ded8ac7e 100644
--- a/drivers/hv/hv_balloon.c
+++ b/drivers/hv/hv_balloon.c
@@ -1087,10 +1087,12 @@ static void balloon_up(struct work_struct *dummy)
1087 struct dm_balloon_response *bl_resp; 1087 struct dm_balloon_response *bl_resp;
1088 int alloc_unit; 1088 int alloc_unit;
1089 int ret; 1089 int ret;
1090 bool alloc_error = false; 1090 bool alloc_error;
1091 bool done = false; 1091 bool done = false;
1092 int i; 1092 int i;
1093 1093
1094 /* The host balloons pages in 2M granularity. */
1095 WARN_ON_ONCE(num_pages % PAGES_IN_2M != 0);
1094 1096
1095 /* 1097 /*
1096 * We will attempt 2M allocations. However, if we fail to 1098 * We will attempt 2M allocations. However, if we fail to
@@ -1107,16 +1109,18 @@ static void balloon_up(struct work_struct *dummy)
1107 1109
1108 1110
1109 num_pages -= num_ballooned; 1111 num_pages -= num_ballooned;
1112 alloc_error = false;
1110 num_ballooned = alloc_balloon_pages(&dm_device, num_pages, 1113 num_ballooned = alloc_balloon_pages(&dm_device, num_pages,
1111 bl_resp, alloc_unit, 1114 bl_resp, alloc_unit,
1112 &alloc_error); 1115 &alloc_error);
1113 1116
1114 if ((alloc_error) && (alloc_unit != 1)) { 1117 if (alloc_unit != 1 && num_ballooned == 0) {
1115 alloc_unit = 1; 1118 alloc_unit = 1;
1116 continue; 1119 continue;
1117 } 1120 }
1118 1121
1119 if ((alloc_error) || (num_ballooned == num_pages)) { 1122 if ((alloc_unit == 1 && alloc_error) ||
1123 (num_ballooned == num_pages)) {
1120 bl_resp->more_pages = 0; 1124 bl_resp->more_pages = 0;
1121 done = true; 1125 done = true;
1122 dm_device.state = DM_INITIALIZED; 1126 dm_device.state = DM_INITIALIZED;
diff --git a/drivers/hv/hv_kvp.c b/drivers/hv/hv_kvp.c
index 521c14625b3a..beb8105c0e7b 100644
--- a/drivers/hv/hv_kvp.c
+++ b/drivers/hv/hv_kvp.c
@@ -350,6 +350,7 @@ kvp_send_key(struct work_struct *dummy)
350 __u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool; 350 __u8 pool = kvp_transaction.kvp_msg->kvp_hdr.pool;
351 __u32 val32; 351 __u32 val32;
352 __u64 val64; 352 __u64 val64;
353 int rc;
353 354
354 msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC); 355 msg = kzalloc(sizeof(*msg) + sizeof(struct hv_kvp_msg) , GFP_ATOMIC);
355 if (!msg) 356 if (!msg)
@@ -446,7 +447,13 @@ kvp_send_key(struct work_struct *dummy)
446 } 447 }
447 448
448 msg->len = sizeof(struct hv_kvp_msg); 449 msg->len = sizeof(struct hv_kvp_msg);
449 cn_netlink_send(msg, 0, 0, GFP_ATOMIC); 450 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
451 if (rc) {
452 pr_debug("KVP: failed to communicate to the daemon: %d\n", rc);
453 if (cancel_delayed_work_sync(&kvp_work))
454 kvp_respond_to_host(message, HV_E_FAIL);
455 }
456
450 kfree(msg); 457 kfree(msg);
451 458
452 return; 459 return;
diff --git a/drivers/hv/hv_snapshot.c b/drivers/hv/hv_snapshot.c
index 34f14fddb666..9d5e0d1efdb5 100644
--- a/drivers/hv/hv_snapshot.c
+++ b/drivers/hv/hv_snapshot.c
@@ -28,7 +28,7 @@
28#define VSS_MINOR 0 28#define VSS_MINOR 0
29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR) 29#define VSS_VERSION (VSS_MAJOR << 16 | VSS_MINOR)
30 30
31 31#define VSS_USERSPACE_TIMEOUT (msecs_to_jiffies(10 * 1000))
32 32
33/* 33/*
34 * Global state maintained for transaction that is being processed. 34 * Global state maintained for transaction that is being processed.
@@ -55,12 +55,24 @@ static const char vss_name[] = "vss_kernel_module";
55static __u8 *recv_buffer; 55static __u8 *recv_buffer;
56 56
57static void vss_send_op(struct work_struct *dummy); 57static void vss_send_op(struct work_struct *dummy);
58static void vss_timeout_func(struct work_struct *dummy);
59
60static DECLARE_DELAYED_WORK(vss_timeout_work, vss_timeout_func);
58static DECLARE_WORK(vss_send_op_work, vss_send_op); 61static DECLARE_WORK(vss_send_op_work, vss_send_op);
59 62
60/* 63/*
61 * Callback when data is received from user mode. 64 * Callback when data is received from user mode.
62 */ 65 */
63 66
67static void vss_timeout_func(struct work_struct *dummy)
68{
69 /*
70 * Timeout waiting for userspace component to reply happened.
71 */
72 pr_warn("VSS: timeout waiting for daemon to reply\n");
73 vss_respond_to_host(HV_E_FAIL);
74}
75
64static void 76static void
65vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp) 77vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
66{ 78{
@@ -76,13 +88,15 @@ vss_cn_callback(struct cn_msg *msg, struct netlink_skb_parms *nsp)
76 return; 88 return;
77 89
78 } 90 }
79 vss_respond_to_host(vss_msg->error); 91 if (cancel_delayed_work_sync(&vss_timeout_work))
92 vss_respond_to_host(vss_msg->error);
80} 93}
81 94
82 95
83static void vss_send_op(struct work_struct *dummy) 96static void vss_send_op(struct work_struct *dummy)
84{ 97{
85 int op = vss_transaction.msg->vss_hdr.operation; 98 int op = vss_transaction.msg->vss_hdr.operation;
99 int rc;
86 struct cn_msg *msg; 100 struct cn_msg *msg;
87 struct hv_vss_msg *vss_msg; 101 struct hv_vss_msg *vss_msg;
88 102
@@ -98,7 +112,12 @@ static void vss_send_op(struct work_struct *dummy)
98 vss_msg->vss_hdr.operation = op; 112 vss_msg->vss_hdr.operation = op;
99 msg->len = sizeof(struct hv_vss_msg); 113 msg->len = sizeof(struct hv_vss_msg);
100 114
101 cn_netlink_send(msg, 0, 0, GFP_ATOMIC); 115 rc = cn_netlink_send(msg, 0, 0, GFP_ATOMIC);
116 if (rc) {
117 pr_warn("VSS: failed to communicate to the daemon: %d\n", rc);
118 if (cancel_delayed_work_sync(&vss_timeout_work))
119 vss_respond_to_host(HV_E_FAIL);
120 }
102 kfree(msg); 121 kfree(msg);
103 122
104 return; 123 return;
@@ -223,6 +242,8 @@ void hv_vss_onchannelcallback(void *context)
223 case VSS_OP_FREEZE: 242 case VSS_OP_FREEZE:
224 case VSS_OP_THAW: 243 case VSS_OP_THAW:
225 schedule_work(&vss_send_op_work); 244 schedule_work(&vss_send_op_work);
245 schedule_delayed_work(&vss_timeout_work,
246 VSS_USERSPACE_TIMEOUT);
226 return; 247 return;
227 248
228 case VSS_OP_HOT_BACKUP: 249 case VSS_OP_HOT_BACKUP:
@@ -277,5 +298,6 @@ hv_vss_init(struct hv_util_service *srv)
277void hv_vss_deinit(void) 298void hv_vss_deinit(void)
278{ 299{
279 cn_del_callback(&vss_id); 300 cn_del_callback(&vss_id);
301 cancel_delayed_work_sync(&vss_timeout_work);
280 cancel_work_sync(&vss_send_op_work); 302 cancel_work_sync(&vss_send_op_work);
281} 303}
diff --git a/drivers/misc/atmel-ssc.c b/drivers/misc/atmel-ssc.c
index 5305ac8dfb3e..e11a0bd6c66e 100644
--- a/drivers/misc/atmel-ssc.c
+++ b/drivers/misc/atmel-ssc.c
@@ -57,7 +57,7 @@ struct ssc_device *ssc_request(unsigned int ssc_num)
57 ssc->user++; 57 ssc->user++;
58 spin_unlock(&user_lock); 58 spin_unlock(&user_lock);
59 59
60 clk_prepare_enable(ssc->clk); 60 clk_prepare(ssc->clk);
61 61
62 return ssc; 62 return ssc;
63} 63}
@@ -77,7 +77,7 @@ void ssc_free(struct ssc_device *ssc)
77 spin_unlock(&user_lock); 77 spin_unlock(&user_lock);
78 78
79 if (disable_clk) 79 if (disable_clk)
80 clk_disable_unprepare(ssc->clk); 80 clk_unprepare(ssc->clk);
81} 81}
82EXPORT_SYMBOL(ssc_free); 82EXPORT_SYMBOL(ssc_free);
83 83
diff --git a/drivers/misc/carma/Kconfig b/drivers/misc/carma/Kconfig
index c90370ed712b..295882bfb14e 100644
--- a/drivers/misc/carma/Kconfig
+++ b/drivers/misc/carma/Kconfig
@@ -1,7 +1,6 @@
1config CARMA_FPGA 1config CARMA_FPGA
2 tristate "CARMA DATA-FPGA Access Driver" 2 tristate "CARMA DATA-FPGA Access Driver"
3 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA 3 depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
4 select VIDEOBUF_DMA_SG
5 default n 4 default n
6 help 5 help
7 Say Y here to include support for communicating with the data 6 Say Y here to include support for communicating with the data
@@ -9,8 +8,7 @@ config CARMA_FPGA
9 8
10config CARMA_FPGA_PROGRAM 9config CARMA_FPGA_PROGRAM
11 tristate "CARMA DATA-FPGA Programmer" 10 tristate "CARMA DATA-FPGA Programmer"
12 depends on FSL_SOC && PPC_83xx && MEDIA_SUPPORT && HAS_DMA && FSL_DMA 11 depends on FSL_SOC && PPC_83xx && HAS_DMA && FSL_DMA
13 select VIDEOBUF_DMA_SG
14 default n 12 default n
15 help 13 help
16 Say Y here to include support for programming the data processing 14 Say Y here to include support for programming the data processing
diff --git a/drivers/misc/carma/carma-fpga-program.c b/drivers/misc/carma/carma-fpga-program.c
index 6fa52f71f51c..06166ac000e0 100644
--- a/drivers/misc/carma/carma-fpga-program.c
+++ b/drivers/misc/carma/carma-fpga-program.c
@@ -19,6 +19,7 @@
19#include <linux/fsldma.h> 19#include <linux/fsldma.h>
20#include <linux/interrupt.h> 20#include <linux/interrupt.h>
21#include <linux/highmem.h> 21#include <linux/highmem.h>
22#include <linux/vmalloc.h>
22#include <linux/kernel.h> 23#include <linux/kernel.h>
23#include <linux/module.h> 24#include <linux/module.h>
24#include <linux/mutex.h> 25#include <linux/mutex.h>
@@ -30,8 +31,6 @@
30#include <linux/fs.h> 31#include <linux/fs.h>
31#include <linux/io.h> 32#include <linux/io.h>
32 33
33#include <media/videobuf-dma-sg.h>
34
35/* MPC8349EMDS specific get_immrbase() */ 34/* MPC8349EMDS specific get_immrbase() */
36#include <sysdev/fsl_soc.h> 35#include <sysdev/fsl_soc.h>
37 36
@@ -67,14 +66,79 @@ struct fpga_dev {
67 /* FPGA Bitfile */ 66 /* FPGA Bitfile */
68 struct mutex lock; 67 struct mutex lock;
69 68
70 struct videobuf_dmabuf vb; 69 void *vaddr;
71 bool vb_allocated; 70 struct scatterlist *sglist;
71 int sglen;
72 int nr_pages;
73 bool buf_allocated;
72 74
73 /* max size and written bytes */ 75 /* max size and written bytes */
74 size_t fw_size; 76 size_t fw_size;
75 size_t bytes; 77 size_t bytes;
76}; 78};
77 79
80static int fpga_dma_init(struct fpga_dev *priv, int nr_pages)
81{
82 struct page *pg;
83 int i;
84
85 priv->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
86 if (NULL == priv->vaddr) {
87 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
88 return -ENOMEM;
89 }
90
91 pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
92 (unsigned long)priv->vaddr,
93 nr_pages << PAGE_SHIFT);
94
95 memset(priv->vaddr, 0, nr_pages << PAGE_SHIFT);
96 priv->nr_pages = nr_pages;
97
98 priv->sglist = vzalloc(priv->nr_pages * sizeof(*priv->sglist));
99 if (NULL == priv->sglist)
100 goto vzalloc_err;
101
102 sg_init_table(priv->sglist, priv->nr_pages);
103 for (i = 0; i < priv->nr_pages; i++) {
104 pg = vmalloc_to_page(priv->vaddr + i * PAGE_SIZE);
105 if (NULL == pg)
106 goto vmalloc_to_page_err;
107 sg_set_page(&priv->sglist[i], pg, PAGE_SIZE, 0);
108 }
109 return 0;
110
111vmalloc_to_page_err:
112 vfree(priv->sglist);
113 priv->sglist = NULL;
114vzalloc_err:
115 vfree(priv->vaddr);
116 priv->vaddr = NULL;
117 return -ENOMEM;
118}
119
120static int fpga_dma_map(struct fpga_dev *priv)
121{
122 priv->sglen = dma_map_sg(priv->dev, priv->sglist,
123 priv->nr_pages, DMA_TO_DEVICE);
124
125 if (0 == priv->sglen) {
126 pr_warn("%s: dma_map_sg failed\n", __func__);
127 return -ENOMEM;
128 }
129 return 0;
130}
131
132static int fpga_dma_unmap(struct fpga_dev *priv)
133{
134 if (!priv->sglen)
135 return 0;
136
137 dma_unmap_sg(priv->dev, priv->sglist, priv->sglen, DMA_TO_DEVICE);
138 priv->sglen = 0;
139 return 0;
140}
141
78/* 142/*
79 * FPGA Bitfile Helpers 143 * FPGA Bitfile Helpers
80 */ 144 */
@@ -87,8 +151,9 @@ struct fpga_dev {
87 */ 151 */
88static void fpga_drop_firmware_data(struct fpga_dev *priv) 152static void fpga_drop_firmware_data(struct fpga_dev *priv)
89{ 153{
90 videobuf_dma_free(&priv->vb); 154 vfree(priv->sglist);
91 priv->vb_allocated = false; 155 vfree(priv->vaddr);
156 priv->buf_allocated = false;
92 priv->bytes = 0; 157 priv->bytes = 0;
93} 158}
94 159
@@ -427,7 +492,7 @@ static noinline int fpga_program_cpu(struct fpga_dev *priv)
427 dev_dbg(priv->dev, "enabled the controller\n"); 492 dev_dbg(priv->dev, "enabled the controller\n");
428 493
429 /* Write each chunk of the FPGA bitfile to FPGA programmer */ 494 /* Write each chunk of the FPGA bitfile to FPGA programmer */
430 ret = fpga_program_block(priv, priv->vb.vaddr, priv->bytes); 495 ret = fpga_program_block(priv, priv->vaddr, priv->bytes);
431 if (ret) 496 if (ret)
432 goto out_disable_controller; 497 goto out_disable_controller;
433 498
@@ -463,7 +528,6 @@ out_disable_controller:
463 */ 528 */
464static noinline int fpga_program_dma(struct fpga_dev *priv) 529static noinline int fpga_program_dma(struct fpga_dev *priv)
465{ 530{
466 struct videobuf_dmabuf *vb = &priv->vb;
467 struct dma_chan *chan = priv->chan; 531 struct dma_chan *chan = priv->chan;
468 struct dma_async_tx_descriptor *tx; 532 struct dma_async_tx_descriptor *tx;
469 size_t num_pages, len, avail = 0; 533 size_t num_pages, len, avail = 0;
@@ -505,7 +569,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
505 } 569 }
506 570
507 /* Map the buffer for DMA */ 571 /* Map the buffer for DMA */
508 ret = videobuf_dma_map(priv->dev, &priv->vb); 572 ret = fpga_dma_map(priv);
509 if (ret) { 573 if (ret) {
510 dev_err(priv->dev, "Unable to map buffer for DMA\n"); 574 dev_err(priv->dev, "Unable to map buffer for DMA\n");
511 goto out_free_table; 575 goto out_free_table;
@@ -525,7 +589,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
525 goto out_dma_unmap; 589 goto out_dma_unmap;
526 } 590 }
527 591
528 ret = fsl_dma_external_start(chan, 1) 592 ret = fsl_dma_external_start(chan, 1);
529 if (ret) { 593 if (ret) {
530 dev_err(priv->dev, "DMA external control setup failed\n"); 594 dev_err(priv->dev, "DMA external control setup failed\n");
531 goto out_dma_unmap; 595 goto out_dma_unmap;
@@ -534,7 +598,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
534 /* setup and submit the DMA transaction */ 598 /* setup and submit the DMA transaction */
535 599
536 tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages, 600 tx = dmaengine_prep_dma_sg(chan, table.sgl, num_pages,
537 vb->sglist, vb->sglen, 0); 601 priv->sglist, priv->sglen, 0);
538 if (!tx) { 602 if (!tx) {
539 dev_err(priv->dev, "Unable to prep DMA transaction\n"); 603 dev_err(priv->dev, "Unable to prep DMA transaction\n");
540 ret = -ENOMEM; 604 ret = -ENOMEM;
@@ -572,7 +636,7 @@ static noinline int fpga_program_dma(struct fpga_dev *priv)
572out_disable_controller: 636out_disable_controller:
573 fpga_programmer_disable(priv); 637 fpga_programmer_disable(priv);
574out_dma_unmap: 638out_dma_unmap:
575 videobuf_dma_unmap(priv->dev, vb); 639 fpga_dma_unmap(priv);
576out_free_table: 640out_free_table:
577 sg_free_table(&table); 641 sg_free_table(&table);
578out_return: 642out_return:
@@ -702,12 +766,12 @@ static int fpga_open(struct inode *inode, struct file *filp)
702 priv->bytes = 0; 766 priv->bytes = 0;
703 767
704 /* Check if we have already allocated a buffer */ 768 /* Check if we have already allocated a buffer */
705 if (priv->vb_allocated) 769 if (priv->buf_allocated)
706 return 0; 770 return 0;
707 771
708 /* Allocate a buffer to hold enough data for the bitfile */ 772 /* Allocate a buffer to hold enough data for the bitfile */
709 nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE); 773 nr_pages = DIV_ROUND_UP(priv->fw_size, PAGE_SIZE);
710 ret = videobuf_dma_init_kernel(&priv->vb, DMA_TO_DEVICE, nr_pages); 774 ret = fpga_dma_init(priv, nr_pages);
711 if (ret) { 775 if (ret) {
712 dev_err(priv->dev, "unable to allocate data buffer\n"); 776 dev_err(priv->dev, "unable to allocate data buffer\n");
713 mutex_unlock(&priv->lock); 777 mutex_unlock(&priv->lock);
@@ -715,7 +779,7 @@ static int fpga_open(struct inode *inode, struct file *filp)
715 return ret; 779 return ret;
716 } 780 }
717 781
718 priv->vb_allocated = true; 782 priv->buf_allocated = true;
719 return 0; 783 return 0;
720} 784}
721 785
@@ -738,7 +802,7 @@ static ssize_t fpga_write(struct file *filp, const char __user *buf,
738 return -ENOSPC; 802 return -ENOSPC;
739 803
740 count = min_t(size_t, priv->fw_size - priv->bytes, count); 804 count = min_t(size_t, priv->fw_size - priv->bytes, count);
741 if (copy_from_user(priv->vb.vaddr + priv->bytes, buf, count)) 805 if (copy_from_user(priv->vaddr + priv->bytes, buf, count))
742 return -EFAULT; 806 return -EFAULT;
743 807
744 priv->bytes += count; 808 priv->bytes += count;
@@ -749,20 +813,19 @@ static ssize_t fpga_read(struct file *filp, char __user *buf, size_t count,
749 loff_t *f_pos) 813 loff_t *f_pos)
750{ 814{
751 struct fpga_dev *priv = filp->private_data; 815 struct fpga_dev *priv = filp->private_data;
752 return simple_read_from_buffer(buf, count, ppos, 816 return simple_read_from_buffer(buf, count, f_pos,
753 priv->vb.vaddr, priv->bytes); 817 priv->vaddr, priv->bytes);
754} 818}
755 819
756static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin) 820static loff_t fpga_llseek(struct file *filp, loff_t offset, int origin)
757{ 821{
758 struct fpga_dev *priv = filp->private_data; 822 struct fpga_dev *priv = filp->private_data;
759 loff_t newpos;
760 823
761 /* only read-only opens are allowed to seek */ 824 /* only read-only opens are allowed to seek */
762 if ((filp->f_flags & O_ACCMODE) != O_RDONLY) 825 if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
763 return -EINVAL; 826 return -EINVAL;
764 827
765 return fixed_size_llseek(file, offset, origin, priv->fw_size); 828 return fixed_size_llseek(filp, offset, origin, priv->fw_size);
766} 829}
767 830
768static const struct file_operations fpga_fops = { 831static const struct file_operations fpga_fops = {
@@ -953,7 +1016,6 @@ static int fpga_of_probe(struct platform_device *op)
953 priv->dev = &op->dev; 1016 priv->dev = &op->dev;
954 mutex_init(&priv->lock); 1017 mutex_init(&priv->lock);
955 init_completion(&priv->completion); 1018 init_completion(&priv->completion);
956 videobuf_dma_init(&priv->vb);
957 1019
958 dev_set_drvdata(priv->dev, priv); 1020 dev_set_drvdata(priv->dev, priv);
959 dma_cap_zero(mask); 1021 dma_cap_zero(mask);
diff --git a/drivers/misc/carma/carma-fpga.c b/drivers/misc/carma/carma-fpga.c
index cdf2d7b902bb..68cdfe151bdb 100644
--- a/drivers/misc/carma/carma-fpga.c
+++ b/drivers/misc/carma/carma-fpga.c
@@ -98,6 +98,7 @@
98#include <linux/seq_file.h> 98#include <linux/seq_file.h>
99#include <linux/highmem.h> 99#include <linux/highmem.h>
100#include <linux/debugfs.h> 100#include <linux/debugfs.h>
101#include <linux/vmalloc.h>
101#include <linux/kernel.h> 102#include <linux/kernel.h>
102#include <linux/module.h> 103#include <linux/module.h>
103#include <linux/poll.h> 104#include <linux/poll.h>
@@ -105,8 +106,6 @@
105#include <linux/kref.h> 106#include <linux/kref.h>
106#include <linux/io.h> 107#include <linux/io.h>
107 108
108#include <media/videobuf-dma-sg.h>
109
110/* system controller registers */ 109/* system controller registers */
111#define SYS_IRQ_SOURCE_CTL 0x24 110#define SYS_IRQ_SOURCE_CTL 0x24
112#define SYS_IRQ_OUTPUT_EN 0x28 111#define SYS_IRQ_OUTPUT_EN 0x28
@@ -142,7 +141,10 @@ struct fpga_info {
142 141
143struct data_buf { 142struct data_buf {
144 struct list_head entry; 143 struct list_head entry;
145 struct videobuf_dmabuf vb; 144 void *vaddr;
145 struct scatterlist *sglist;
146 int sglen;
147 int nr_pages;
146 size_t size; 148 size_t size;
147}; 149};
148 150
@@ -207,6 +209,68 @@ static void fpga_device_release(struct kref *ref)
207 * Data Buffer Allocation Helpers 209 * Data Buffer Allocation Helpers
208 */ 210 */
209 211
212static int carma_dma_init(struct data_buf *buf, int nr_pages)
213{
214 struct page *pg;
215 int i;
216
217 buf->vaddr = vmalloc_32(nr_pages << PAGE_SHIFT);
218 if (NULL == buf->vaddr) {
219 pr_debug("vmalloc_32(%d pages) failed\n", nr_pages);
220 return -ENOMEM;
221 }
222
223 pr_debug("vmalloc is at addr 0x%08lx, size=%d\n",
224 (unsigned long)buf->vaddr,
225 nr_pages << PAGE_SHIFT);
226
227 memset(buf->vaddr, 0, nr_pages << PAGE_SHIFT);
228 buf->nr_pages = nr_pages;
229
230 buf->sglist = vzalloc(buf->nr_pages * sizeof(*buf->sglist));
231 if (NULL == buf->sglist)
232 goto vzalloc_err;
233
234 sg_init_table(buf->sglist, buf->nr_pages);
235 for (i = 0; i < buf->nr_pages; i++) {
236 pg = vmalloc_to_page(buf->vaddr + i * PAGE_SIZE);
237 if (NULL == pg)
238 goto vmalloc_to_page_err;
239 sg_set_page(&buf->sglist[i], pg, PAGE_SIZE, 0);
240 }
241 return 0;
242
243vmalloc_to_page_err:
244 vfree(buf->sglist);
245 buf->sglist = NULL;
246vzalloc_err:
247 vfree(buf->vaddr);
248 buf->vaddr = NULL;
249 return -ENOMEM;
250}
251
252static int carma_dma_map(struct device *dev, struct data_buf *buf)
253{
254 buf->sglen = dma_map_sg(dev, buf->sglist,
255 buf->nr_pages, DMA_FROM_DEVICE);
256
257 if (0 == buf->sglen) {
258 pr_warn("%s: dma_map_sg failed\n", __func__);
259 return -ENOMEM;
260 }
261 return 0;
262}
263
264static int carma_dma_unmap(struct device *dev, struct data_buf *buf)
265{
266 if (!buf->sglen)
267 return 0;
268
269 dma_unmap_sg(dev, buf->sglist, buf->sglen, DMA_FROM_DEVICE);
270 buf->sglen = 0;
271 return 0;
272}
273
210/** 274/**
211 * data_free_buffer() - free a single data buffer and all allocated memory 275 * data_free_buffer() - free a single data buffer and all allocated memory
212 * @buf: the buffer to free 276 * @buf: the buffer to free
@@ -221,7 +285,8 @@ static void data_free_buffer(struct data_buf *buf)
221 return; 285 return;
222 286
223 /* free all memory */ 287 /* free all memory */
224 videobuf_dma_free(&buf->vb); 288 vfree(buf->sglist);
289 vfree(buf->vaddr);
225 kfree(buf); 290 kfree(buf);
226} 291}
227 292
@@ -230,7 +295,7 @@ static void data_free_buffer(struct data_buf *buf)
230 * @bytes: the number of bytes required 295 * @bytes: the number of bytes required
231 * 296 *
232 * This allocates all space needed for a data buffer. It must be mapped before 297 * This allocates all space needed for a data buffer. It must be mapped before
233 * use in a DMA transaction using videobuf_dma_map(). 298 * use in a DMA transaction using carma_dma_map().
234 * 299 *
235 * Returns NULL on failure 300 * Returns NULL on failure
236 */ 301 */
@@ -252,9 +317,8 @@ static struct data_buf *data_alloc_buffer(const size_t bytes)
252 INIT_LIST_HEAD(&buf->entry); 317 INIT_LIST_HEAD(&buf->entry);
253 buf->size = bytes; 318 buf->size = bytes;
254 319
255 /* allocate the videobuf */ 320 /* allocate the buffer */
256 videobuf_dma_init(&buf->vb); 321 ret = carma_dma_init(buf, nr_pages);
257 ret = videobuf_dma_init_kernel(&buf->vb, DMA_FROM_DEVICE, nr_pages);
258 if (ret) 322 if (ret)
259 goto out_free_buf; 323 goto out_free_buf;
260 324
@@ -285,13 +349,13 @@ static void data_free_buffers(struct fpga_device *priv)
285 349
286 list_for_each_entry_safe(buf, tmp, &priv->free, entry) { 350 list_for_each_entry_safe(buf, tmp, &priv->free, entry) {
287 list_del_init(&buf->entry); 351 list_del_init(&buf->entry);
288 videobuf_dma_unmap(priv->dev, &buf->vb); 352 carma_dma_unmap(priv->dev, buf);
289 data_free_buffer(buf); 353 data_free_buffer(buf);
290 } 354 }
291 355
292 list_for_each_entry_safe(buf, tmp, &priv->used, entry) { 356 list_for_each_entry_safe(buf, tmp, &priv->used, entry) {
293 list_del_init(&buf->entry); 357 list_del_init(&buf->entry);
294 videobuf_dma_unmap(priv->dev, &buf->vb); 358 carma_dma_unmap(priv->dev, buf);
295 data_free_buffer(buf); 359 data_free_buffer(buf);
296 } 360 }
297 361
@@ -330,7 +394,7 @@ static int data_alloc_buffers(struct fpga_device *priv)
330 break; 394 break;
331 395
332 /* map it for DMA */ 396 /* map it for DMA */
333 ret = videobuf_dma_map(priv->dev, &buf->vb); 397 ret = carma_dma_map(priv->dev, buf);
334 if (ret) { 398 if (ret) {
335 data_free_buffer(buf); 399 data_free_buffer(buf);
336 break; 400 break;
@@ -634,8 +698,8 @@ static int data_submit_dma(struct fpga_device *priv, struct data_buf *buf)
634 dma_addr_t dst, src; 698 dma_addr_t dst, src;
635 unsigned long dma_flags = 0; 699 unsigned long dma_flags = 0;
636 700
637 dst_sg = buf->vb.sglist; 701 dst_sg = buf->sglist;
638 dst_nents = buf->vb.sglen; 702 dst_nents = buf->sglen;
639 703
640 src_sg = priv->corl_table.sgl; 704 src_sg = priv->corl_table.sgl;
641 src_nents = priv->corl_nents; 705 src_nents = priv->corl_nents;
@@ -1134,7 +1198,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1134 spin_unlock_irq(&priv->lock); 1198 spin_unlock_irq(&priv->lock);
1135 1199
1136 /* Buffers are always mapped: unmap it */ 1200 /* Buffers are always mapped: unmap it */
1137 videobuf_dma_unmap(priv->dev, &dbuf->vb); 1201 carma_dma_unmap(priv->dev, dbuf);
1138 1202
1139 /* save the buffer for later */ 1203 /* save the buffer for later */
1140 reader->buf = dbuf; 1204 reader->buf = dbuf;
@@ -1143,7 +1207,7 @@ static ssize_t data_read(struct file *filp, char __user *ubuf, size_t count,
1143have_buffer: 1207have_buffer:
1144 /* Get the number of bytes available */ 1208 /* Get the number of bytes available */
1145 avail = dbuf->size - reader->buf_start; 1209 avail = dbuf->size - reader->buf_start;
1146 data = dbuf->vb.vaddr + reader->buf_start; 1210 data = dbuf->vaddr + reader->buf_start;
1147 1211
1148 /* Get the number of bytes we can transfer */ 1212 /* Get the number of bytes we can transfer */
1149 count = min(count, avail); 1213 count = min(count, avail);
@@ -1171,7 +1235,7 @@ have_buffer:
1171 * If it fails, we pretend that the read never happed and return 1235 * If it fails, we pretend that the read never happed and return
1172 * -EFAULT to userspace. The read will be retried. 1236 * -EFAULT to userspace. The read will be retried.
1173 */ 1237 */
1174 ret = videobuf_dma_map(priv->dev, &dbuf->vb); 1238 ret = carma_dma_map(priv->dev, dbuf);
1175 if (ret) { 1239 if (ret) {
1176 dev_err(priv->dev, "unable to remap buffer for DMA\n"); 1240 dev_err(priv->dev, "unable to remap buffer for DMA\n");
1177 return -EFAULT; 1241 return -EFAULT;
@@ -1203,7 +1267,7 @@ out_unlock:
1203 spin_unlock_irq(&priv->lock); 1267 spin_unlock_irq(&priv->lock);
1204 1268
1205 if (drop_buffer) { 1269 if (drop_buffer) {
1206 videobuf_dma_unmap(priv->dev, &dbuf->vb); 1270 carma_dma_unmap(priv->dev, dbuf);
1207 data_free_buffer(dbuf); 1271 data_free_buffer(dbuf);
1208 } 1272 }
1209 1273
diff --git a/drivers/misc/fuse/Makefile b/drivers/misc/fuse/Makefile
deleted file mode 100644
index 0679c4febc89..000000000000
--- a/drivers/misc/fuse/Makefile
+++ /dev/null
@@ -1 +0,0 @@
1obj-$(CONFIG_ARCH_TEGRA) += tegra/
diff --git a/drivers/misc/genwqe/card_utils.c b/drivers/misc/genwqe/card_utils.c
index 7cb3b7e41739..1ca94e6fa8fb 100644
--- a/drivers/misc/genwqe/card_utils.c
+++ b/drivers/misc/genwqe/card_utils.c
@@ -590,6 +590,8 @@ int genwqe_user_vmap(struct genwqe_dev *cd, struct dma_mapping *m, void *uaddr,
590 m->nr_pages, 590 m->nr_pages,
591 1, /* write by caller */ 591 1, /* write by caller */
592 m->page_list); /* ptrs to pages */ 592 m->page_list); /* ptrs to pages */
593 if (rc < 0)
594 goto fail_get_user_pages;
593 595
594 /* assumption: get_user_pages can be killed by signals. */ 596 /* assumption: get_user_pages can be killed by signals. */
595 if (rc < m->nr_pages) { 597 if (rc < m->nr_pages) {
diff --git a/drivers/misc/mei/amthif.c b/drivers/misc/mei/amthif.c
index 6cdce8477f57..79f53941779d 100644
--- a/drivers/misc/mei/amthif.c
+++ b/drivers/misc/mei/amthif.c
@@ -262,6 +262,7 @@ out:
262static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb) 262static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
263{ 263{
264 struct mei_msg_hdr mei_hdr; 264 struct mei_msg_hdr mei_hdr;
265 struct mei_cl *cl;
265 int ret; 266 int ret;
266 267
267 if (!dev || !cb) 268 if (!dev || !cb)
@@ -277,8 +278,9 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
277 dev->iamthif_msg_buf_size = cb->request_buffer.size; 278 dev->iamthif_msg_buf_size = cb->request_buffer.size;
278 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data, 279 memcpy(dev->iamthif_msg_buf, cb->request_buffer.data,
279 cb->request_buffer.size); 280 cb->request_buffer.size);
281 cl = &dev->iamthif_cl;
280 282
281 ret = mei_cl_flow_ctrl_creds(&dev->iamthif_cl); 283 ret = mei_cl_flow_ctrl_creds(cl);
282 if (ret < 0) 284 if (ret < 0)
283 return ret; 285 return ret;
284 286
@@ -292,8 +294,8 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
292 mei_hdr.msg_complete = 1; 294 mei_hdr.msg_complete = 1;
293 } 295 }
294 296
295 mei_hdr.host_addr = dev->iamthif_cl.host_client_id; 297 mei_hdr.host_addr = cl->host_client_id;
296 mei_hdr.me_addr = dev->iamthif_cl.me_client_id; 298 mei_hdr.me_addr = cl->me_client_id;
297 mei_hdr.reserved = 0; 299 mei_hdr.reserved = 0;
298 mei_hdr.internal = 0; 300 mei_hdr.internal = 0;
299 dev->iamthif_msg_buf_index += mei_hdr.length; 301 dev->iamthif_msg_buf_index += mei_hdr.length;
@@ -302,7 +304,7 @@ static int mei_amthif_send_cmd(struct mei_device *dev, struct mei_cl_cb *cb)
302 return ret; 304 return ret;
303 305
304 if (mei_hdr.msg_complete) { 306 if (mei_hdr.msg_complete) {
305 if (mei_cl_flow_ctrl_reduce(&dev->iamthif_cl)) 307 if (mei_cl_flow_ctrl_reduce(cl))
306 return -EIO; 308 return -EIO;
307 dev->iamthif_flow_control_pending = true; 309 dev->iamthif_flow_control_pending = true;
308 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL; 310 dev->iamthif_state = MEI_IAMTHIF_FLOW_CONTROL;
@@ -360,8 +362,7 @@ int mei_amthif_write(struct mei_device *dev, struct mei_cl_cb *cb)
360void mei_amthif_run_next_cmd(struct mei_device *dev) 362void mei_amthif_run_next_cmd(struct mei_device *dev)
361{ 363{
362 struct mei_cl_cb *cb; 364 struct mei_cl_cb *cb;
363 struct mei_cl_cb *next; 365 int ret;
364 int status;
365 366
366 if (!dev) 367 if (!dev)
367 return; 368 return;
@@ -376,16 +377,14 @@ void mei_amthif_run_next_cmd(struct mei_device *dev)
376 377
377 dev_dbg(dev->dev, "complete amthif cmd_list cb.\n"); 378 dev_dbg(dev->dev, "complete amthif cmd_list cb.\n");
378 379
379 list_for_each_entry_safe(cb, next, &dev->amthif_cmd_list.list, list) { 380 cb = list_first_entry_or_null(&dev->amthif_cmd_list.list,
380 list_del(&cb->list); 381 typeof(*cb), list);
381 if (!cb->cl) 382 if (!cb)
382 continue; 383 return;
383 status = mei_amthif_send_cmd(dev, cb); 384 list_del(&cb->list);
384 if (status) 385 ret = mei_amthif_send_cmd(dev, cb);
385 dev_warn(dev->dev, "amthif write failed status = %d\n", 386 if (ret)
386 status); 387 dev_warn(dev->dev, "amthif write failed status = %d\n", ret);
387 break;
388 }
389} 388}
390 389
391 390
@@ -536,9 +535,6 @@ int mei_amthif_irq_read_msg(struct mei_device *dev,
536 cb = dev->iamthif_current_cb; 535 cb = dev->iamthif_current_cb;
537 dev->iamthif_current_cb = NULL; 536 dev->iamthif_current_cb = NULL;
538 537
539 if (!cb->cl)
540 return -ENODEV;
541
542 dev->iamthif_stall_timer = 0; 538 dev->iamthif_stall_timer = 0;
543 cb->buf_idx = dev->iamthif_msg_buf_index; 539 cb->buf_idx = dev->iamthif_msg_buf_index;
544 cb->read_time = jiffies; 540 cb->read_time = jiffies;
diff --git a/drivers/misc/mei/bus.c b/drivers/misc/mei/bus.c
index 4d20d60ca38d..b3a72bca5242 100644
--- a/drivers/misc/mei/bus.c
+++ b/drivers/misc/mei/bus.c
@@ -140,7 +140,7 @@ static struct device_type mei_cl_device_type = {
140 .release = mei_cl_dev_release, 140 .release = mei_cl_dev_release,
141}; 141};
142 142
143static struct mei_cl *mei_bus_find_mei_cl_by_uuid(struct mei_device *dev, 143struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev,
144 uuid_le uuid) 144 uuid_le uuid)
145{ 145{
146 struct mei_cl *cl; 146 struct mei_cl *cl;
@@ -160,7 +160,7 @@ struct mei_cl_device *mei_cl_add_device(struct mei_device *dev,
160 struct mei_cl *cl; 160 struct mei_cl *cl;
161 int status; 161 int status;
162 162
163 cl = mei_bus_find_mei_cl_by_uuid(dev, uuid); 163 cl = mei_cl_bus_find_cl_by_uuid(dev, uuid);
164 if (cl == NULL) 164 if (cl == NULL)
165 return NULL; 165 return NULL;
166 166
diff --git a/drivers/misc/mei/client.c b/drivers/misc/mei/client.c
index bc9ba5359bc6..1382d551d7ed 100644
--- a/drivers/misc/mei/client.c
+++ b/drivers/misc/mei/client.c
@@ -146,7 +146,7 @@ static void __mei_io_list_flush(struct mei_cl_cb *list,
146 146
147 /* enable removing everything if no cl is specified */ 147 /* enable removing everything if no cl is specified */
148 list_for_each_entry_safe(cb, next, &list->list, list) { 148 list_for_each_entry_safe(cb, next, &list->list, list) {
149 if (!cl || (cb->cl && mei_cl_cmp_id(cl, cb->cl))) { 149 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
150 list_del(&cb->list); 150 list_del(&cb->list);
151 if (free) 151 if (free)
152 mei_io_cb_free(cb); 152 mei_io_cb_free(cb);
diff --git a/drivers/misc/mei/debugfs.c b/drivers/misc/mei/debugfs.c
index ce1566715f80..b60b4263cf0f 100644
--- a/drivers/misc/mei/debugfs.c
+++ b/drivers/misc/mei/debugfs.c
@@ -34,7 +34,7 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
34 int pos = 0; 34 int pos = 0;
35 int ret; 35 int ret;
36 36
37#define HDR " |id|addr| UUID |con|msg len|sb|\n" 37#define HDR " |id|fix| UUID |con|msg len|sb|\n"
38 38
39 mutex_lock(&dev->device_lock); 39 mutex_lock(&dev->device_lock);
40 40
@@ -56,12 +56,8 @@ static ssize_t mei_dbgfs_read_meclients(struct file *fp, char __user *ubuf,
56 56
57 list_for_each_entry(me_cl, &dev->me_clients, list) { 57 list_for_each_entry(me_cl, &dev->me_clients, list) {
58 58
59 /* skip me clients that cannot be connected */
60 if (me_cl->props.max_number_of_connections == 0)
61 continue;
62
63 pos += scnprintf(buf + pos, bufsz - pos, 59 pos += scnprintf(buf + pos, bufsz - pos,
64 "%2d|%2d|%4d|%pUl|%3d|%7d|%2d|\n", 60 "%2d|%2d|%3d|%pUl|%3d|%7d|%2d|\n",
65 i++, me_cl->client_id, 61 i++, me_cl->client_id,
66 me_cl->props.fixed_address, 62 me_cl->props.fixed_address,
67 &me_cl->props.protocol_name, 63 &me_cl->props.protocol_name,
diff --git a/drivers/misc/mei/hbm.c b/drivers/misc/mei/hbm.c
index 49a2653d91a5..239d7f5d6a92 100644
--- a/drivers/misc/mei/hbm.c
+++ b/drivers/misc/mei/hbm.c
@@ -562,17 +562,17 @@ int mei_hbm_cl_disconnect_rsp(struct mei_device *dev, struct mei_cl *cl)
562 * mei_hbm_cl_disconnect_res - update the client state according 562 * mei_hbm_cl_disconnect_res - update the client state according
563 * disconnect response 563 * disconnect response
564 * 564 *
565 * @dev: the device structure
565 * @cl: mei host client 566 * @cl: mei host client
566 * @cmd: disconnect client response host bus message 567 * @cmd: disconnect client response host bus message
567 */ 568 */
568static void mei_hbm_cl_disconnect_res(struct mei_cl *cl, 569static void mei_hbm_cl_disconnect_res(struct mei_device *dev, struct mei_cl *cl,
569 struct mei_hbm_cl_cmd *cmd) 570 struct mei_hbm_cl_cmd *cmd)
570{ 571{
571 struct hbm_client_connect_response *rs = 572 struct hbm_client_connect_response *rs =
572 (struct hbm_client_connect_response *)cmd; 573 (struct hbm_client_connect_response *)cmd;
573 574
574 dev_dbg(cl->dev->dev, "hbm: disconnect response cl:host=%02d me=%02d status=%d\n", 575 cl_dbg(dev, cl, "hbm: disconnect response status=%d\n", rs->status);
575 rs->me_addr, rs->host_addr, rs->status);
576 576
577 if (rs->status == MEI_CL_DISCONN_SUCCESS) 577 if (rs->status == MEI_CL_DISCONN_SUCCESS)
578 cl->state = MEI_FILE_DISCONNECTED; 578 cl->state = MEI_FILE_DISCONNECTED;
@@ -598,17 +598,17 @@ int mei_hbm_cl_connect_req(struct mei_device *dev, struct mei_cl *cl)
598 * mei_hbm_cl_connect_res - update the client state according 598 * mei_hbm_cl_connect_res - update the client state according
599 * connection response 599 * connection response
600 * 600 *
601 * @dev: the device structure
601 * @cl: mei host client 602 * @cl: mei host client
602 * @cmd: connect client response host bus message 603 * @cmd: connect client response host bus message
603 */ 604 */
604static void mei_hbm_cl_connect_res(struct mei_cl *cl, 605static void mei_hbm_cl_connect_res(struct mei_device *dev, struct mei_cl *cl,
605 struct mei_hbm_cl_cmd *cmd) 606 struct mei_hbm_cl_cmd *cmd)
606{ 607{
607 struct hbm_client_connect_response *rs = 608 struct hbm_client_connect_response *rs =
608 (struct hbm_client_connect_response *)cmd; 609 (struct hbm_client_connect_response *)cmd;
609 610
610 dev_dbg(cl->dev->dev, "hbm: connect response cl:host=%02d me=%02d status=%s\n", 611 cl_dbg(dev, cl, "hbm: connect response status=%s\n",
611 rs->me_addr, rs->host_addr,
612 mei_cl_conn_status_str(rs->status)); 612 mei_cl_conn_status_str(rs->status));
613 613
614 if (rs->status == MEI_CL_CONN_SUCCESS) 614 if (rs->status == MEI_CL_CONN_SUCCESS)
@@ -637,11 +637,6 @@ static void mei_hbm_cl_res(struct mei_device *dev,
637 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) { 637 list_for_each_entry_safe(cb, next, &dev->ctrl_rd_list.list, list) {
638 638
639 cl = cb->cl; 639 cl = cb->cl;
640 /* this should not happen */
641 if (WARN_ON(!cl)) {
642 list_del_init(&cb->list);
643 continue;
644 }
645 640
646 if (cb->fop_type != fop_type) 641 if (cb->fop_type != fop_type)
647 continue; 642 continue;
@@ -657,10 +652,10 @@ static void mei_hbm_cl_res(struct mei_device *dev,
657 652
658 switch (fop_type) { 653 switch (fop_type) {
659 case MEI_FOP_CONNECT: 654 case MEI_FOP_CONNECT:
660 mei_hbm_cl_connect_res(cl, rs); 655 mei_hbm_cl_connect_res(dev, cl, rs);
661 break; 656 break;
662 case MEI_FOP_DISCONNECT: 657 case MEI_FOP_DISCONNECT:
663 mei_hbm_cl_disconnect_res(cl, rs); 658 mei_hbm_cl_disconnect_res(dev, cl, rs);
664 break; 659 break;
665 default: 660 default:
666 return; 661 return;
@@ -811,8 +806,6 @@ int mei_hbm_dispatch(struct mei_device *dev, struct mei_msg_hdr *hdr)
811 return -EPROTO; 806 return -EPROTO;
812 } 807 }
813 808
814 dev->hbm_state = MEI_HBM_STARTED;
815
816 if (mei_hbm_enum_clients_req(dev)) { 809 if (mei_hbm_enum_clients_req(dev)) {
817 dev_err(dev->dev, "hbm: start: failed to send enumeration request\n"); 810 dev_err(dev->dev, "hbm: start: failed to send enumeration request\n");
818 return -EIO; 811 return -EIO;
diff --git a/drivers/misc/mei/hbm.h b/drivers/misc/mei/hbm.h
index b7cd3d857fd5..2544db7d1649 100644
--- a/drivers/misc/mei/hbm.h
+++ b/drivers/misc/mei/hbm.h
@@ -26,17 +26,17 @@ struct mei_cl;
26 * 26 *
27 * @MEI_HBM_IDLE : protocol not started 27 * @MEI_HBM_IDLE : protocol not started
28 * @MEI_HBM_STARTING : start request message was sent 28 * @MEI_HBM_STARTING : start request message was sent
29 * @MEI_HBM_STARTED : start reply message was received
30 * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent 29 * @MEI_HBM_ENUM_CLIENTS : enumeration request was sent
31 * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties 30 * @MEI_HBM_CLIENT_PROPERTIES : acquiring clients properties
31 * @MEI_HBM_STARTED : enumeration was completed
32 * @MEI_HBM_STOPPED : stopping exchange 32 * @MEI_HBM_STOPPED : stopping exchange
33 */ 33 */
34enum mei_hbm_state { 34enum mei_hbm_state {
35 MEI_HBM_IDLE = 0, 35 MEI_HBM_IDLE = 0,
36 MEI_HBM_STARTING, 36 MEI_HBM_STARTING,
37 MEI_HBM_STARTED,
38 MEI_HBM_ENUM_CLIENTS, 37 MEI_HBM_ENUM_CLIENTS,
39 MEI_HBM_CLIENT_PROPERTIES, 38 MEI_HBM_CLIENT_PROPERTIES,
39 MEI_HBM_STARTED,
40 MEI_HBM_STOPPED, 40 MEI_HBM_STOPPED,
41}; 41};
42 42
diff --git a/drivers/misc/mei/hw-me-regs.h b/drivers/misc/mei/hw-me-regs.h
index c5feafdd58a8..9eb7ed70ace2 100644
--- a/drivers/misc/mei/hw-me-regs.h
+++ b/drivers/misc/mei/hw-me-regs.h
@@ -117,14 +117,18 @@
117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */ 117#define MEI_DEV_ID_WPT_LP 0x9CBA /* Wildcat Point LP */
118#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */ 118#define MEI_DEV_ID_WPT_LP_2 0x9CBB /* Wildcat Point LP 2 */
119 119
120/* Host Firmware Status Registers in PCI Config Space */
121#define PCI_CFG_HFS_1 0x40
122#define PCI_CFG_HFS_2 0x48
123
124/* 120/*
125 * MEI HW Section 121 * MEI HW Section
126 */ 122 */
127 123
124/* Host Firmware Status Registers in PCI Config Space */
125#define PCI_CFG_HFS_1 0x40
126#define PCI_CFG_HFS_2 0x48
127#define PCI_CFG_HFS_3 0x60
128#define PCI_CFG_HFS_4 0x64
129#define PCI_CFG_HFS_5 0x68
130#define PCI_CFG_HFS_6 0x6C
131
128/* MEI registers */ 132/* MEI registers */
129/* H_CB_WW - Host Circular Buffer (CB) Write Window register */ 133/* H_CB_WW - Host Circular Buffer (CB) Write Window register */
130#define H_CB_WW 0 134#define H_CB_WW 0
diff --git a/drivers/misc/mei/hw-me.c b/drivers/misc/mei/hw-me.c
index 4f2fd6fc1e23..ff2755062b44 100644
--- a/drivers/misc/mei/hw-me.c
+++ b/drivers/misc/mei/hw-me.c
@@ -270,10 +270,10 @@ static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
270static void mei_me_host_set_ready(struct mei_device *dev) 270static void mei_me_host_set_ready(struct mei_device *dev)
271{ 271{
272 struct mei_me_hw *hw = to_me_hw(dev); 272 struct mei_me_hw *hw = to_me_hw(dev);
273 u32 hcsr = mei_hcsr_read(hw);
273 274
274 hw->host_hw_state = mei_hcsr_read(hw); 275 hcsr |= H_IE | H_IG | H_RDY;
275 hw->host_hw_state |= H_IE | H_IG | H_RDY; 276 mei_hcsr_set(hw, hcsr);
276 mei_hcsr_set(hw, hw->host_hw_state);
277} 277}
278 278
279/** 279/**
@@ -285,9 +285,9 @@ static void mei_me_host_set_ready(struct mei_device *dev)
285static bool mei_me_host_is_ready(struct mei_device *dev) 285static bool mei_me_host_is_ready(struct mei_device *dev)
286{ 286{
287 struct mei_me_hw *hw = to_me_hw(dev); 287 struct mei_me_hw *hw = to_me_hw(dev);
288 u32 hcsr = mei_hcsr_read(hw);
288 289
289 hw->host_hw_state = mei_hcsr_read(hw); 290 return (hcsr & H_RDY) == H_RDY;
290 return (hw->host_hw_state & H_RDY) == H_RDY;
291} 291}
292 292
293/** 293/**
@@ -299,9 +299,9 @@ static bool mei_me_host_is_ready(struct mei_device *dev)
299static bool mei_me_hw_is_ready(struct mei_device *dev) 299static bool mei_me_hw_is_ready(struct mei_device *dev)
300{ 300{
301 struct mei_me_hw *hw = to_me_hw(dev); 301 struct mei_me_hw *hw = to_me_hw(dev);
302 u32 mecsr = mei_me_mecsr_read(hw);
302 303
303 hw->me_hw_state = mei_me_mecsr_read(hw); 304 return (mecsr & ME_RDY_HRA) == ME_RDY_HRA;
304 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
305} 305}
306 306
307/** 307/**
@@ -356,12 +356,13 @@ static int mei_me_hw_start(struct mei_device *dev)
356static unsigned char mei_hbuf_filled_slots(struct mei_device *dev) 356static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
357{ 357{
358 struct mei_me_hw *hw = to_me_hw(dev); 358 struct mei_me_hw *hw = to_me_hw(dev);
359 u32 hcsr;
359 char read_ptr, write_ptr; 360 char read_ptr, write_ptr;
360 361
361 hw->host_hw_state = mei_hcsr_read(hw); 362 hcsr = mei_hcsr_read(hw);
362 363
363 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8); 364 read_ptr = (char) ((hcsr & H_CBRP) >> 8);
364 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16); 365 write_ptr = (char) ((hcsr & H_CBWP) >> 16);
365 366
366 return (unsigned char) (write_ptr - read_ptr); 367 return (unsigned char) (write_ptr - read_ptr);
367} 368}
@@ -474,13 +475,14 @@ static int mei_me_write_message(struct mei_device *dev,
474static int mei_me_count_full_read_slots(struct mei_device *dev) 475static int mei_me_count_full_read_slots(struct mei_device *dev)
475{ 476{
476 struct mei_me_hw *hw = to_me_hw(dev); 477 struct mei_me_hw *hw = to_me_hw(dev);
478 u32 me_csr;
477 char read_ptr, write_ptr; 479 char read_ptr, write_ptr;
478 unsigned char buffer_depth, filled_slots; 480 unsigned char buffer_depth, filled_slots;
479 481
480 hw->me_hw_state = mei_me_mecsr_read(hw); 482 me_csr = mei_me_mecsr_read(hw);
481 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24); 483 buffer_depth = (unsigned char)((me_csr & ME_CBD_HRA) >> 24);
482 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8); 484 read_ptr = (char) ((me_csr & ME_CBRP_HRA) >> 8);
483 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16); 485 write_ptr = (char) ((me_csr & ME_CBWP_HRA) >> 16);
484 filled_slots = (unsigned char) (write_ptr - read_ptr); 486 filled_slots = (unsigned char) (write_ptr - read_ptr);
485 487
486 /* check for overflow */ 488 /* check for overflow */
@@ -833,6 +835,14 @@ static bool mei_me_fw_type_sps(struct pci_dev *pdev)
833 .fw_status.status[0] = PCI_CFG_HFS_1, \ 835 .fw_status.status[0] = PCI_CFG_HFS_1, \
834 .fw_status.status[1] = PCI_CFG_HFS_2 836 .fw_status.status[1] = PCI_CFG_HFS_2
835 837
838#define MEI_CFG_PCH8_HFS \
839 .fw_status.count = 6, \
840 .fw_status.status[0] = PCI_CFG_HFS_1, \
841 .fw_status.status[1] = PCI_CFG_HFS_2, \
842 .fw_status.status[2] = PCI_CFG_HFS_3, \
843 .fw_status.status[3] = PCI_CFG_HFS_4, \
844 .fw_status.status[4] = PCI_CFG_HFS_5, \
845 .fw_status.status[5] = PCI_CFG_HFS_6
836 846
837/* ICH Legacy devices */ 847/* ICH Legacy devices */
838const struct mei_cfg mei_me_legacy_cfg = { 848const struct mei_cfg mei_me_legacy_cfg = {
@@ -856,9 +866,14 @@ const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
856 MEI_CFG_FW_NM, 866 MEI_CFG_FW_NM,
857}; 867};
858 868
859/* PCH Lynx Point with quirk for SPS Firmware exclusion */ 869/* PCH8 Lynx Point and newer devices */
860const struct mei_cfg mei_me_lpt_cfg = { 870const struct mei_cfg mei_me_pch8_cfg = {
861 MEI_CFG_PCH_HFS, 871 MEI_CFG_PCH8_HFS,
872};
873
874/* PCH8 Lynx Point with quirk for SPS Firmware exclusion */
875const struct mei_cfg mei_me_pch8_sps_cfg = {
876 MEI_CFG_PCH8_HFS,
862 MEI_CFG_FW_SPS, 877 MEI_CFG_FW_SPS,
863}; 878};
864 879
diff --git a/drivers/misc/mei/hw-me.h b/drivers/misc/mei/hw-me.h
index e6a59a62573a..d6567af44377 100644
--- a/drivers/misc/mei/hw-me.h
+++ b/drivers/misc/mei/hw-me.h
@@ -51,18 +51,11 @@ struct mei_cfg {
51 * 51 *
52 * @cfg: per device generation config and ops 52 * @cfg: per device generation config and ops
53 * @mem_addr: io memory address 53 * @mem_addr: io memory address
54 * @host_hw_state: cached host state
55 * @me_hw_state: cached me (fw) state
56 * @pg_state: power gating state 54 * @pg_state: power gating state
57 */ 55 */
58struct mei_me_hw { 56struct mei_me_hw {
59 const struct mei_cfg *cfg; 57 const struct mei_cfg *cfg;
60 void __iomem *mem_addr; 58 void __iomem *mem_addr;
61 /*
62 * hw states of host and fw(ME)
63 */
64 u32 host_hw_state;
65 u32 me_hw_state;
66 enum mei_pg_state pg_state; 59 enum mei_pg_state pg_state;
67}; 60};
68 61
@@ -72,7 +65,8 @@ extern const struct mei_cfg mei_me_legacy_cfg;
72extern const struct mei_cfg mei_me_ich_cfg; 65extern const struct mei_cfg mei_me_ich_cfg;
73extern const struct mei_cfg mei_me_pch_cfg; 66extern const struct mei_cfg mei_me_pch_cfg;
74extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg; 67extern const struct mei_cfg mei_me_pch_cpt_pbg_cfg;
75extern const struct mei_cfg mei_me_lpt_cfg; 68extern const struct mei_cfg mei_me_pch8_cfg;
69extern const struct mei_cfg mei_me_pch8_sps_cfg;
76 70
77struct mei_device *mei_me_dev_init(struct pci_dev *pdev, 71struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
78 const struct mei_cfg *cfg); 72 const struct mei_cfg *cfg);
diff --git a/drivers/misc/mei/hw-txe.c b/drivers/misc/mei/hw-txe.c
index c5e1902e493f..618ea721aca8 100644
--- a/drivers/misc/mei/hw-txe.c
+++ b/drivers/misc/mei/hw-txe.c
@@ -700,11 +700,10 @@ static int mei_txe_write(struct mei_device *dev,
700 mei_txe_input_ready_interrupt_enable(dev); 700 mei_txe_input_ready_interrupt_enable(dev);
701 701
702 if (!mei_txe_is_input_ready(dev)) { 702 if (!mei_txe_is_input_ready(dev)) {
703 struct mei_fw_status fw_status; 703 char fw_sts_str[MEI_FW_STATUS_STR_SZ];
704 704
705 mei_fw_status(dev, &fw_status); 705 mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
706 dev_err(dev->dev, "Input is not ready " FW_STS_FMT "\n", 706 dev_err(dev->dev, "Input is not ready %s\n", fw_sts_str);
707 FW_STS_PRM(fw_status));
708 return -EAGAIN; 707 return -EAGAIN;
709 } 708 }
710 709
diff --git a/drivers/misc/mei/init.c b/drivers/misc/mei/init.c
index 7901d076c127..9306219d5675 100644
--- a/drivers/misc/mei/init.c
+++ b/drivers/misc/mei/init.c
@@ -54,6 +54,35 @@ const char *mei_pg_state_str(enum mei_pg_state state)
54#undef MEI_PG_STATE 54#undef MEI_PG_STATE
55} 55}
56 56
57/**
58 * mei_fw_status2str - convert fw status registers to printable string
59 *
60 * @fw_status: firmware status
61 * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
62 * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
63 *
64 * Return: number of bytes written or -EINVAL if buffer is to small
65 */
66ssize_t mei_fw_status2str(struct mei_fw_status *fw_status,
67 char *buf, size_t len)
68{
69 ssize_t cnt = 0;
70 int i;
71
72 buf[0] = '\0';
73
74 if (len < MEI_FW_STATUS_STR_SZ)
75 return -EINVAL;
76
77 for (i = 0; i < fw_status->count; i++)
78 cnt += scnprintf(buf + cnt, len - cnt, "%08X ",
79 fw_status->status[i]);
80
81 /* drop last space */
82 buf[cnt] = '\0';
83 return cnt;
84}
85EXPORT_SYMBOL_GPL(mei_fw_status2str);
57 86
58/** 87/**
59 * mei_cancel_work - Cancel mei background jobs 88 * mei_cancel_work - Cancel mei background jobs
@@ -86,12 +115,11 @@ int mei_reset(struct mei_device *dev)
86 state != MEI_DEV_DISABLED && 115 state != MEI_DEV_DISABLED &&
87 state != MEI_DEV_POWER_DOWN && 116 state != MEI_DEV_POWER_DOWN &&
88 state != MEI_DEV_POWER_UP) { 117 state != MEI_DEV_POWER_UP) {
89 struct mei_fw_status fw_status; 118 char fw_sts_str[MEI_FW_STATUS_STR_SZ];
90 119
91 mei_fw_status(dev, &fw_status); 120 mei_fw_status_str(dev, fw_sts_str, MEI_FW_STATUS_STR_SZ);
92 dev_warn(dev->dev, 121 dev_warn(dev->dev, "unexpected reset: dev_state = %s fw status = %s\n",
93 "unexpected reset: dev_state = %s " FW_STS_FMT "\n", 122 mei_dev_state_str(state), fw_sts_str);
94 mei_dev_state_str(state), FW_STS_PRM(fw_status));
95 } 123 }
96 124
97 /* we're already in reset, cancel the init timer 125 /* we're already in reset, cancel the init timer
diff --git a/drivers/misc/mei/interrupt.c b/drivers/misc/mei/interrupt.c
index 20c6c511f438..711cddfa9c99 100644
--- a/drivers/misc/mei/interrupt.c
+++ b/drivers/misc/mei/interrupt.c
@@ -44,8 +44,6 @@ void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
44 list_for_each_entry_safe(cb, next, &compl_list->list, list) { 44 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
45 cl = cb->cl; 45 cl = cb->cl;
46 list_del(&cb->list); 46 list_del(&cb->list);
47 if (!cl)
48 continue;
49 47
50 dev_dbg(dev->dev, "completing call back.\n"); 48 dev_dbg(dev->dev, "completing call back.\n");
51 if (cl == &dev->iamthif_cl) 49 if (cl == &dev->iamthif_cl)
@@ -105,7 +103,7 @@ static int mei_cl_irq_read_msg(struct mei_device *dev,
105 103
106 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) { 104 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
107 cl = cb->cl; 105 cl = cb->cl;
108 if (!cl || !mei_cl_is_reading(cl, mei_hdr)) 106 if (!mei_cl_is_reading(cl, mei_hdr))
109 continue; 107 continue;
110 108
111 cl->reading_state = MEI_READING; 109 cl->reading_state = MEI_READING;
@@ -449,8 +447,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
449 list = &dev->write_waiting_list; 447 list = &dev->write_waiting_list;
450 list_for_each_entry_safe(cb, next, &list->list, list) { 448 list_for_each_entry_safe(cb, next, &list->list, list) {
451 cl = cb->cl; 449 cl = cb->cl;
452 if (cl == NULL)
453 continue;
454 450
455 cl->status = 0; 451 cl->status = 0;
456 list_del(&cb->list); 452 list_del(&cb->list);
@@ -489,10 +485,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
489 dev_dbg(dev->dev, "complete control write list cb.\n"); 485 dev_dbg(dev->dev, "complete control write list cb.\n");
490 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) { 486 list_for_each_entry_safe(cb, next, &dev->ctrl_wr_list.list, list) {
491 cl = cb->cl; 487 cl = cb->cl;
492 if (!cl) {
493 list_del(&cb->list);
494 return -ENODEV;
495 }
496 switch (cb->fop_type) { 488 switch (cb->fop_type) {
497 case MEI_FOP_DISCONNECT: 489 case MEI_FOP_DISCONNECT:
498 /* send disconnect message */ 490 /* send disconnect message */
@@ -530,8 +522,6 @@ int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
530 dev_dbg(dev->dev, "complete write list cb.\n"); 522 dev_dbg(dev->dev, "complete write list cb.\n");
531 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) { 523 list_for_each_entry_safe(cb, next, &dev->write_list.list, list) {
532 cl = cb->cl; 524 cl = cb->cl;
533 if (cl == NULL)
534 continue;
535 if (cl == &dev->iamthif_cl) 525 if (cl == &dev->iamthif_cl)
536 ret = mei_amthif_irq_write(cl, cb, cmpl_list); 526 ret = mei_amthif_irq_write(cl, cb, cmpl_list);
537 else 527 else
diff --git a/drivers/misc/mei/main.c b/drivers/misc/mei/main.c
index beedc91f03a6..ae56ba6ca0e3 100644
--- a/drivers/misc/mei/main.c
+++ b/drivers/misc/mei/main.c
@@ -631,6 +631,44 @@ out:
631 return mask; 631 return mask;
632} 632}
633 633
634/**
635 * fw_status_show - mei device attribute show method
636 *
637 * @device: device pointer
638 * @attr: attribute pointer
639 * @buf: char out buffer
640 *
641 * Return: number of the bytes printed into buf or error
642 */
643static ssize_t fw_status_show(struct device *device,
644 struct device_attribute *attr, char *buf)
645{
646 struct mei_device *dev = dev_get_drvdata(device);
647 struct mei_fw_status fw_status;
648 int err, i;
649 ssize_t cnt = 0;
650
651 mutex_lock(&dev->device_lock);
652 err = mei_fw_status(dev, &fw_status);
653 mutex_unlock(&dev->device_lock);
654 if (err) {
655 dev_err(device, "read fw_status error = %d\n", err);
656 return err;
657 }
658
659 for (i = 0; i < fw_status.count; i++)
660 cnt += scnprintf(buf + cnt, PAGE_SIZE - cnt, "%08X\n",
661 fw_status.status[i]);
662 return cnt;
663}
664static DEVICE_ATTR_RO(fw_status);
665
666static struct attribute *mei_attrs[] = {
667 &dev_attr_fw_status.attr,
668 NULL
669};
670ATTRIBUTE_GROUPS(mei);
671
634/* 672/*
635 * file operations structure will be used for mei char device. 673 * file operations structure will be used for mei char device.
636 */ 674 */
@@ -710,8 +748,9 @@ int mei_register(struct mei_device *dev, struct device *parent)
710 goto err_dev_add; 748 goto err_dev_add;
711 } 749 }
712 750
713 clsdev = device_create(mei_class, parent, devno, 751 clsdev = device_create_with_groups(mei_class, parent, devno,
714 NULL, "mei%d", dev->minor); 752 dev, mei_groups,
753 "mei%d", dev->minor);
715 754
716 if (IS_ERR(clsdev)) { 755 if (IS_ERR(clsdev)) {
717 dev_err(parent, "unable to create device %d:%d\n", 756 dev_err(parent, "unable to create device %d:%d\n",
diff --git a/drivers/misc/mei/mei_dev.h b/drivers/misc/mei/mei_dev.h
index 61b04d7646f1..3dad74a8d496 100644
--- a/drivers/misc/mei/mei_dev.h
+++ b/drivers/misc/mei/mei_dev.h
@@ -152,7 +152,10 @@ struct mei_msg_data {
152}; 152};
153 153
154/* Maximum number of processed FW status registers */ 154/* Maximum number of processed FW status registers */
155#define MEI_FW_STATUS_MAX 2 155#define MEI_FW_STATUS_MAX 6
156/* Minimal buffer for FW status string (8 bytes in dw + space or '\0') */
157#define MEI_FW_STATUS_STR_SZ (MEI_FW_STATUS_MAX * (8 + 1))
158
156 159
157/* 160/*
158 * struct mei_fw_status - storage of FW status data 161 * struct mei_fw_status - storage of FW status data
@@ -349,6 +352,7 @@ void mei_cl_bus_rx_event(struct mei_cl *cl);
349void mei_cl_bus_remove_devices(struct mei_device *dev); 352void mei_cl_bus_remove_devices(struct mei_device *dev);
350int mei_cl_bus_init(void); 353int mei_cl_bus_init(void);
351void mei_cl_bus_exit(void); 354void mei_cl_bus_exit(void);
355struct mei_cl *mei_cl_bus_find_cl_by_uuid(struct mei_device *dev, uuid_le uuid);
352 356
353 357
354/** 358/**
@@ -804,11 +808,6 @@ static inline int mei_fw_status(struct mei_device *dev,
804 return dev->ops->fw_status(dev, fw_status); 808 return dev->ops->fw_status(dev, fw_status);
805} 809}
806 810
807#define FW_STS_FMT "%08X %08X"
808#define FW_STS_PRM(fw_status) \
809 (fw_status).count > 0 ? (fw_status).status[0] : 0xDEADBEEF, \
810 (fw_status).count > 1 ? (fw_status).status[1] : 0xDEADBEEF
811
812bool mei_hbuf_acquire(struct mei_device *dev); 811bool mei_hbuf_acquire(struct mei_device *dev);
813 812
814bool mei_write_is_idle(struct mei_device *dev); 813bool mei_write_is_idle(struct mei_device *dev);
@@ -832,4 +831,32 @@ void mei_deregister(struct mei_device *dev);
832 (hdr)->host_addr, (hdr)->me_addr, \ 831 (hdr)->host_addr, (hdr)->me_addr, \
833 (hdr)->length, (hdr)->internal, (hdr)->msg_complete 832 (hdr)->length, (hdr)->internal, (hdr)->msg_complete
834 833
834ssize_t mei_fw_status2str(struct mei_fw_status *fw_sts, char *buf, size_t len);
835/**
836 * mei_fw_status_str - fetch and convert fw status registers to printable string
837 *
838 * @dev: the device structure
839 * @buf: string buffer at minimal size MEI_FW_STATUS_STR_SZ
840 * @len: buffer len must be >= MEI_FW_STATUS_STR_SZ
841 *
842 * Return: number of bytes written or < 0 on failure
843 */
844static inline ssize_t mei_fw_status_str(struct mei_device *dev,
845 char *buf, size_t len)
846{
847 struct mei_fw_status fw_status;
848 int ret;
849
850 buf[0] = '\0';
851
852 ret = mei_fw_status(dev, &fw_status);
853 if (ret)
854 return ret;
855
856 ret = mei_fw_status2str(&fw_status, buf, MEI_FW_STATUS_STR_SZ);
857
858 return ret;
859}
860
861
835#endif 862#endif
diff --git a/drivers/misc/mei/nfc.c b/drivers/misc/mei/nfc.c
index 622654323177..60ca9240368e 100644
--- a/drivers/misc/mei/nfc.c
+++ b/drivers/misc/mei/nfc.c
@@ -117,8 +117,6 @@ struct mei_nfc_dev {
117 u16 recv_req_id; 117 u16 recv_req_id;
118}; 118};
119 119
120static struct mei_nfc_dev nfc_dev;
121
122/* UUIDs for NFC F/W clients */ 120/* UUIDs for NFC F/W clients */
123const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50, 121const uuid_le mei_nfc_guid = UUID_LE(0x0bb17a78, 0x2a8e, 0x4c50,
124 0x94, 0xd4, 0x50, 0x26, 122 0x94, 0xd4, 0x50, 0x26,
@@ -138,6 +136,9 @@ static const uuid_le mei_nfc_info_guid = UUID_LE(0xd2de1625, 0x382d, 0x417d,
138 136
139static void mei_nfc_free(struct mei_nfc_dev *ndev) 137static void mei_nfc_free(struct mei_nfc_dev *ndev)
140{ 138{
139 if (!ndev)
140 return;
141
141 if (ndev->cl) { 142 if (ndev->cl) {
142 list_del(&ndev->cl->device_link); 143 list_del(&ndev->cl->device_link);
143 mei_cl_unlink(ndev->cl); 144 mei_cl_unlink(ndev->cl);
@@ -150,7 +151,7 @@ static void mei_nfc_free(struct mei_nfc_dev *ndev)
150 kfree(ndev->cl_info); 151 kfree(ndev->cl_info);
151 } 152 }
152 153
153 memset(ndev, 0, sizeof(struct mei_nfc_dev)); 154 kfree(ndev);
154} 155}
155 156
156static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev) 157static int mei_nfc_build_bus_name(struct mei_nfc_dev *ndev)
@@ -319,9 +320,10 @@ err:
319static int mei_nfc_enable(struct mei_cl_device *cldev) 320static int mei_nfc_enable(struct mei_cl_device *cldev)
320{ 321{
321 struct mei_device *dev; 322 struct mei_device *dev;
322 struct mei_nfc_dev *ndev = &nfc_dev; 323 struct mei_nfc_dev *ndev;
323 int ret; 324 int ret;
324 325
326 ndev = (struct mei_nfc_dev *)cldev->priv_data;
325 dev = ndev->cl->dev; 327 dev = ndev->cl->dev;
326 328
327 ret = mei_nfc_connect(ndev); 329 ret = mei_nfc_connect(ndev);
@@ -479,15 +481,25 @@ err:
479 481
480int mei_nfc_host_init(struct mei_device *dev) 482int mei_nfc_host_init(struct mei_device *dev)
481{ 483{
482 struct mei_nfc_dev *ndev = &nfc_dev; 484 struct mei_nfc_dev *ndev;
483 struct mei_cl *cl_info, *cl = NULL; 485 struct mei_cl *cl_info, *cl = NULL;
484 struct mei_me_client *me_cl; 486 struct mei_me_client *me_cl;
485 int ret; 487 int ret;
486 488
487 /* already initialized */ 489
488 if (ndev->cl_info) 490 /* in case of internal reset bail out
491 * as the device is already setup
492 */
493 cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
494 if (cl)
489 return 0; 495 return 0;
490 496
497 ndev = kzalloc(sizeof(struct mei_nfc_dev), GFP_KERNEL);
498 if (!ndev) {
499 ret = -ENOMEM;
500 goto err;
501 }
502
491 ndev->cl_info = mei_cl_allocate(dev); 503 ndev->cl_info = mei_cl_allocate(dev);
492 ndev->cl = mei_cl_allocate(dev); 504 ndev->cl = mei_cl_allocate(dev);
493 505
@@ -550,9 +562,31 @@ err:
550 562
551void mei_nfc_host_exit(struct mei_device *dev) 563void mei_nfc_host_exit(struct mei_device *dev)
552{ 564{
553 struct mei_nfc_dev *ndev = &nfc_dev; 565 struct mei_nfc_dev *ndev;
566 struct mei_cl *cl;
567 struct mei_cl_device *cldev;
568
569 cl = mei_cl_bus_find_cl_by_uuid(dev, mei_nfc_guid);
570 if (!cl)
571 return;
572
573 cldev = cl->device;
574 if (!cldev)
575 return;
554 576
555 cancel_work_sync(&ndev->init_work); 577 ndev = (struct mei_nfc_dev *)cldev->priv_data;
578 if (ndev)
579 cancel_work_sync(&ndev->init_work);
580
581 cldev->priv_data = NULL;
582
583 mutex_lock(&dev->device_lock);
584 /* Need to remove the device here
585 * since mei_nfc_free will unlink the clients
586 */
587 mei_cl_remove_device(cldev);
588 mei_nfc_free(ndev);
589 mutex_unlock(&dev->device_lock);
556} 590}
557 591
558 592
diff --git a/drivers/misc/mei/pci-me.c b/drivers/misc/mei/pci-me.c
index cf20d397068a..bd3039ab8f98 100644
--- a/drivers/misc/mei/pci-me.c
+++ b/drivers/misc/mei/pci-me.c
@@ -76,12 +76,12 @@ static const struct pci_device_id mei_me_pci_tbl[] = {
76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)}, 76 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_1, mei_me_pch_cfg)},
77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)}, 77 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_2, mei_me_pch_cfg)},
78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)}, 78 {MEI_PCI_DEVICE(MEI_DEV_ID_PPT_3, mei_me_pch_cfg)},
79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_lpt_cfg)}, 79 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_H, mei_me_pch8_sps_cfg)},
80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_lpt_cfg)}, 80 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_W, mei_me_pch8_sps_cfg)},
81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch_cfg)}, 81 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_LP, mei_me_pch8_cfg)},
82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_lpt_cfg)}, 82 {MEI_PCI_DEVICE(MEI_DEV_ID_LPT_HR, mei_me_pch8_sps_cfg)},
83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch_cfg)}, 83 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP, mei_me_pch8_cfg)},
84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch_cfg)}, 84 {MEI_PCI_DEVICE(MEI_DEV_ID_WPT_LP_2, mei_me_pch8_cfg)},
85 85
86 /* required last entry */ 86 /* required last entry */
87 {0, } 87 {0, }
diff --git a/drivers/misc/mei/pci-txe.c b/drivers/misc/mei/pci-txe.c
index 1f572deacf54..c86e2ddbe30a 100644
--- a/drivers/misc/mei/pci-txe.c
+++ b/drivers/misc/mei/pci-txe.c
@@ -37,6 +37,7 @@
37 37
38static const struct pci_device_id mei_txe_pci_tbl[] = { 38static const struct pci_device_id mei_txe_pci_tbl[] = {
39 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */ 39 {PCI_VDEVICE(INTEL, 0x0F18)}, /* Baytrail */
40 {PCI_VDEVICE(INTEL, 0x2298)}, /* Cherrytrail */
40 41
41 {0, } 42 {0, }
42}; 43};
diff --git a/drivers/misc/mei/wd.c b/drivers/misc/mei/wd.c
index b836dfffceb5..b1d892cea94d 100644
--- a/drivers/misc/mei/wd.c
+++ b/drivers/misc/mei/wd.c
@@ -270,15 +270,18 @@ static int mei_wd_ops_stop(struct watchdog_device *wd_dev)
270static int mei_wd_ops_ping(struct watchdog_device *wd_dev) 270static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
271{ 271{
272 struct mei_device *dev; 272 struct mei_device *dev;
273 struct mei_cl *cl;
273 int ret; 274 int ret;
274 275
275 dev = watchdog_get_drvdata(wd_dev); 276 dev = watchdog_get_drvdata(wd_dev);
276 if (!dev) 277 if (!dev)
277 return -ENODEV; 278 return -ENODEV;
278 279
280 cl = &dev->wd_cl;
281
279 mutex_lock(&dev->device_lock); 282 mutex_lock(&dev->device_lock);
280 283
281 if (dev->wd_cl.state != MEI_FILE_CONNECTED) { 284 if (cl->state != MEI_FILE_CONNECTED) {
282 dev_err(dev->dev, "wd: not connected.\n"); 285 dev_err(dev->dev, "wd: not connected.\n");
283 ret = -ENODEV; 286 ret = -ENODEV;
284 goto end; 287 goto end;
@@ -286,12 +289,12 @@ static int mei_wd_ops_ping(struct watchdog_device *wd_dev)
286 289
287 dev->wd_state = MEI_WD_RUNNING; 290 dev->wd_state = MEI_WD_RUNNING;
288 291
289 ret = mei_cl_flow_ctrl_creds(&dev->wd_cl); 292 ret = mei_cl_flow_ctrl_creds(cl);
290 if (ret < 0) 293 if (ret < 0)
291 goto end; 294 goto end;
295
292 /* Check if we can send the ping to HW*/ 296 /* Check if we can send the ping to HW*/
293 if (ret && mei_hbuf_acquire(dev)) { 297 if (ret && mei_hbuf_acquire(dev)) {
294
295 dev_dbg(dev->dev, "wd: sending ping\n"); 298 dev_dbg(dev->dev, "wd: sending ping\n");
296 299
297 ret = mei_wd_send(dev); 300 ret = mei_wd_send(dev);
diff --git a/drivers/misc/pch_phub.c b/drivers/misc/pch_phub.c
index 956597321d2a..9a17a9bab8d6 100644
--- a/drivers/misc/pch_phub.c
+++ b/drivers/misc/pch_phub.c
@@ -158,6 +158,7 @@ static void pch_phub_read_modify_write_reg(struct pch_phub_reg *chip,
158 iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr); 158 iowrite32(((ioread32(reg_addr) & ~mask)) | data, reg_addr);
159} 159}
160 160
161#ifdef CONFIG_PM
161/* pch_phub_save_reg_conf - saves register configuration */ 162/* pch_phub_save_reg_conf - saves register configuration */
162static void pch_phub_save_reg_conf(struct pci_dev *pdev) 163static void pch_phub_save_reg_conf(struct pci_dev *pdev)
163{ 164{
@@ -280,6 +281,7 @@ static void pch_phub_restore_reg_conf(struct pci_dev *pdev)
280 if ((chip->ioh_type == 2) || (chip->ioh_type == 4)) 281 if ((chip->ioh_type == 2) || (chip->ioh_type == 4))
281 iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET); 282 iowrite32(chip->funcsel_reg, p + FUNCSEL_REG_OFFSET);
282} 283}
284#endif
283 285
284/** 286/**
285 * pch_phub_read_serial_rom() - Reading Serial ROM 287 * pch_phub_read_serial_rom() - Reading Serial ROM
diff --git a/drivers/parport/parport_pc.c b/drivers/parport/parport_pc.c
index 7ae20b7a56bc..53d15b30636a 100644
--- a/drivers/parport/parport_pc.c
+++ b/drivers/parport/parport_pc.c
@@ -3339,13 +3339,14 @@ static void __exit parport_pc_exit(void)
3339 while (!list_empty(&ports_list)) { 3339 while (!list_empty(&ports_list)) {
3340 struct parport_pc_private *priv; 3340 struct parport_pc_private *priv;
3341 struct parport *port; 3341 struct parport *port;
3342 struct device *dev;
3342 priv = list_entry(ports_list.next, 3343 priv = list_entry(ports_list.next,
3343 struct parport_pc_private, list); 3344 struct parport_pc_private, list);
3344 port = priv->port; 3345 port = priv->port;
3345 if (port->dev && port->dev->bus == &platform_bus_type) 3346 dev = port->dev;
3346 platform_device_unregister(
3347 to_platform_device(port->dev));
3348 parport_pc_unregister_port(port); 3347 parport_pc_unregister_port(port);
3348 if (dev && dev->bus == &platform_bus_type)
3349 platform_device_unregister(to_platform_device(dev));
3349 } 3350 }
3350} 3351}
3351 3352
diff --git a/drivers/pcmcia/sa1111_badge4.c b/drivers/pcmcia/sa1111_badge4.c
index 4d206f4dd67b..12f0dd091477 100644
--- a/drivers/pcmcia/sa1111_badge4.c
+++ b/drivers/pcmcia/sa1111_badge4.c
@@ -132,7 +132,7 @@ static struct pcmcia_low_level badge4_pcmcia_ops = {
132 .nr = 2, 132 .nr = 2,
133}; 133};
134 134
135int pcmcia_badge4_init(struct device *dev) 135int pcmcia_badge4_init(struct sa1111_dev *dev)
136{ 136{
137 int ret = -ENODEV; 137 int ret = -ENODEV;
138 138
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 7bae7e549d8b..80b8e9d05275 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -203,10 +203,10 @@ static int pcmcia_probe(struct sa1111_dev *dev)
203 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR); 203 sa1111_writel(PCCR_S0_FLT | PCCR_S1_FLT, base + PCCR);
204 204
205#ifdef CONFIG_SA1100_BADGE4 205#ifdef CONFIG_SA1100_BADGE4
206 pcmcia_badge4_init(&dev->dev); 206 pcmcia_badge4_init(dev);
207#endif 207#endif
208#ifdef CONFIG_SA1100_JORNADA720 208#ifdef CONFIG_SA1100_JORNADA720
209 pcmcia_jornada720_init(&dev->dev); 209 pcmcia_jornada720_init(dev);
210#endif 210#endif
211#ifdef CONFIG_ARCH_LUBBOCK 211#ifdef CONFIG_ARCH_LUBBOCK
212 pcmcia_lubbock_init(dev); 212 pcmcia_lubbock_init(dev);
diff --git a/drivers/pcmcia/sa1111_generic.h b/drivers/pcmcia/sa1111_generic.h
index f6376e34a7e4..e74ecfdc1b26 100644
--- a/drivers/pcmcia/sa1111_generic.h
+++ b/drivers/pcmcia/sa1111_generic.h
@@ -18,8 +18,8 @@ int sa1111_pcmcia_add(struct sa1111_dev *dev, struct pcmcia_low_level *ops,
18extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *); 18extern void sa1111_pcmcia_socket_state(struct soc_pcmcia_socket *, struct pcmcia_state *);
19extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *); 19extern int sa1111_pcmcia_configure_socket(struct soc_pcmcia_socket *, const socket_state_t *);
20 20
21extern int pcmcia_badge4_init(struct device *); 21extern int pcmcia_badge4_init(struct sa1111_dev *);
22extern int pcmcia_jornada720_init(struct device *); 22extern int pcmcia_jornada720_init(struct sa1111_dev *);
23extern int pcmcia_lubbock_init(struct sa1111_dev *); 23extern int pcmcia_lubbock_init(struct sa1111_dev *);
24extern int pcmcia_neponset_init(struct sa1111_dev *); 24extern int pcmcia_neponset_init(struct sa1111_dev *);
25 25
diff --git a/drivers/pcmcia/sa1111_jornada720.c b/drivers/pcmcia/sa1111_jornada720.c
index 40e040314503..c2c30580c83f 100644
--- a/drivers/pcmcia/sa1111_jornada720.c
+++ b/drivers/pcmcia/sa1111_jornada720.c
@@ -92,10 +92,9 @@ static struct pcmcia_low_level jornada720_pcmcia_ops = {
92 .nr = 2, 92 .nr = 2,
93}; 93};
94 94
95int pcmcia_jornada720_init(struct device *dev) 95int pcmcia_jornada720_init(struct sa1111_dev *sadev)
96{ 96{
97 int ret = -ENODEV; 97 int ret = -ENODEV;
98 struct sa1111_dev *sadev = SA1111_DEV(dev);
99 98
100 if (machine_is_jornada720()) { 99 if (machine_is_jornada720()) {
101 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3; 100 unsigned int pin = GPIO_A0 | GPIO_A1 | GPIO_A2 | GPIO_A3;
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 60fa6278fbce..6276f13e9e12 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -56,12 +56,12 @@ static ssize_t map_name_show(struct uio_mem *mem, char *buf)
56 56
57static ssize_t map_addr_show(struct uio_mem *mem, char *buf) 57static ssize_t map_addr_show(struct uio_mem *mem, char *buf)
58{ 58{
59 return sprintf(buf, "0x%llx\n", (unsigned long long)mem->addr); 59 return sprintf(buf, "%pa\n", &mem->addr);
60} 60}
61 61
62static ssize_t map_size_show(struct uio_mem *mem, char *buf) 62static ssize_t map_size_show(struct uio_mem *mem, char *buf)
63{ 63{
64 return sprintf(buf, "0x%lx\n", mem->size); 64 return sprintf(buf, "%pa\n", &mem->size);
65} 65}
66 66
67static ssize_t map_offset_show(struct uio_mem *mem, char *buf) 67static ssize_t map_offset_show(struct uio_mem *mem, char *buf)
diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c
index 1de6df87bfa3..049a884a756f 100644
--- a/drivers/w1/masters/ds2490.c
+++ b/drivers/w1/masters/ds2490.c
@@ -253,7 +253,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st,
253 253
254 count = 0; 254 count = 0;
255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev, 255 err = usb_interrupt_msg(dev->udev, usb_rcvintpipe(dev->udev,
256 dev->ep[EP_STATUS]), buf, size, &count, 100); 256 dev->ep[EP_STATUS]), buf, size, &count, 1000);
257 if (err < 0) { 257 if (err < 0) {
258 pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n", 258 pr_err("Failed to read 1-wire data from 0x%x: err=%d.\n",
259 dev->ep[EP_STATUS], err); 259 dev->ep[EP_STATUS], err);
diff --git a/drivers/w1/slaves/w1_bq27000.c b/drivers/w1/slaves/w1_bq27000.c
index afbefed5f2c9..caafb1722783 100644
--- a/drivers/w1/slaves/w1_bq27000.c
+++ b/drivers/w1/slaves/w1_bq27000.c
@@ -88,7 +88,7 @@ static struct w1_family_ops w1_bq27000_fops = {
88}; 88};
89 89
90static struct w1_family w1_bq27000_family = { 90static struct w1_family w1_bq27000_family = {
91 .fid = 1, 91 .fid = W1_FAMILY_BQ27000,
92 .fops = &w1_bq27000_fops, 92 .fops = &w1_bq27000_fops,
93}; 93};
94 94
@@ -111,7 +111,7 @@ module_exit(w1_bq27000_exit);
111 111
112module_param(F_ID, int, S_IRUSR); 112module_param(F_ID, int, S_IRUSR);
113MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device"); 113MODULE_PARM_DESC(F_ID, "1-wire slave FID for BQ device");
114 114MODULE_ALIAS("w1-family-" __stringify(W1_FAMILY_BQ27000));
115MODULE_LICENSE("GPL"); 115MODULE_LICENSE("GPL");
116MODULE_AUTHOR("Texas Instruments Ltd"); 116MODULE_AUTHOR("Texas Instruments Ltd");
117MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip"); 117MODULE_DESCRIPTION("HDQ/1-wire slave driver bq27000 battery monitor chip");
diff --git a/drivers/w1/w1.c b/drivers/w1/w1.c
index 592f7edc671e..181f41cb960b 100644
--- a/drivers/w1/w1.c
+++ b/drivers/w1/w1.c
@@ -727,7 +727,7 @@ int w1_attach_slave_device(struct w1_master *dev, struct w1_reg_num *rn)
727 727
728 /* slave modules need to be loaded in a context with unlocked mutex */ 728 /* slave modules need to be loaded in a context with unlocked mutex */
729 mutex_unlock(&dev->mutex); 729 mutex_unlock(&dev->mutex);
730 request_module("w1-family-0x%0x", rn->family); 730 request_module("w1-family-0x%02x", rn->family);
731 mutex_lock(&dev->mutex); 731 mutex_lock(&dev->mutex);
732 732
733 spin_lock(&w1_flock); 733 spin_lock(&w1_flock);
diff --git a/drivers/w1/w1_family.h b/drivers/w1/w1_family.h
index 0d18365b61ad..ed5dcb80a1f7 100644
--- a/drivers/w1/w1_family.h
+++ b/drivers/w1/w1_family.h
@@ -27,6 +27,7 @@
27#include <linux/atomic.h> 27#include <linux/atomic.h>
28 28
29#define W1_FAMILY_DEFAULT 0 29#define W1_FAMILY_DEFAULT 0
30#define W1_FAMILY_BQ27000 0x01
30#define W1_FAMILY_SMEM_01 0x01 31#define W1_FAMILY_SMEM_01 0x01
31#define W1_FAMILY_SMEM_81 0x81 32#define W1_FAMILY_SMEM_81 0x81
32#define W1_THERM_DS18S20 0x10 33#define W1_THERM_DS18S20 0x10
diff --git a/drivers/w1/w1_netlink.c b/drivers/w1/w1_netlink.c
index dd9656237274..881597a191b8 100644
--- a/drivers/w1/w1_netlink.c
+++ b/drivers/w1/w1_netlink.c
@@ -598,7 +598,7 @@ static void w1_cn_callback(struct cn_msg *cn, struct netlink_skb_parms *nsp)
598 msg = (struct w1_netlink_msg *)(cn + 1); 598 msg = (struct w1_netlink_msg *)(cn + 1);
599 if (node_count) { 599 if (node_count) {
600 int size; 600 int size;
601 u16 reply_size = sizeof(*cn) + cn->len + slave_len; 601 int reply_size = sizeof(*cn) + cn->len + slave_len;
602 if (cn->flags & W1_CN_BUNDLE) { 602 if (cn->flags & W1_CN_BUNDLE) {
603 /* bundling duplicats some of the messages */ 603 /* bundling duplicats some of the messages */
604 reply_size += 2 * cmd_count * (sizeof(struct cn_msg) + 604 reply_size += 2 * cmd_count * (sizeof(struct cn_msg) +