aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 13:16:08 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2018-08-16 13:16:08 -0400
commit9502f0d1d9059988ca4edc566f81ba864568f39e (patch)
treec41d948db1be2abba32ce6cd64597e5e7752909e
parentea93102f32244e3f45c8b26260be77ed0cc1d16c (diff)
parent2bb7005696e2246baa88772341ca032ff09a63cb (diff)
Merge tag 'mailbox-v4.19' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jassi Brar: - xgene: potential null pointer fix - omap: switch to spdx license and use of_device_get_match_data() to match data - ti-msgmgr: cleanup and optimisation. New TI specific feature - secure proxy thread. - mediatek: add driver for CMDQ controller. - nxp: add driver for MU controller * tag 'mailbox-v4.19' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox: Add support for i.MX messaging unit dt-bindings: mailbox: imx-mu: add generic MU channel support dt-bindings: arm: fsl: add mu binding doc mailbox: add MODULE_LICENSE() for mtk-cmdq-mailbox.c mailbox: mediatek: Add Mediatek CMDQ driver dt-bindings: soc: Add documentation for the MediaTek GCE unit mailbox: ti-msgmgr: Add support for Secure Proxy dt-bindings: mailbox: Add support for secure proxy threads mailbox: ti-msgmgr: Move the memory region name to descriptor mailbox: ti-msgmgr: Change message count mask to be descriptor based mailbox: ti-msgmgr: Allocate Rx channel resources only on request mailbox: ti-msgmgr: Get rid of unused structure members mailbox/omap: use of_device_get_match_data() to get match data mailbox/omap: switch to SPDX license identifier mailbox: xgene-slimpro: Fix potential NULL pointer dereference
-rw-r--r--Documentation/devicetree/bindings/mailbox/fsl,mu.txt54
-rw-r--r--Documentation/devicetree/bindings/mailbox/mtk-gce.txt57
-rw-r--r--Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt50
-rw-r--r--drivers/mailbox/Kconfig16
-rw-r--r--drivers/mailbox/Makefile4
-rw-r--r--drivers/mailbox/imx-mailbox.c358
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c6
-rw-r--r--drivers/mailbox/mtk-cmdq-mailbox.c571
-rw-r--r--drivers/mailbox/omap-mailbox.c31
-rw-r--r--drivers/mailbox/ti-msgmgr.c353
-rw-r--r--include/dt-bindings/gce/mt8173-gce.h44
-rw-r--r--include/linux/mailbox/mtk-cmdq-mailbox.h77
-rw-r--r--include/linux/omap-mailbox.h5
13 files changed, 1536 insertions, 90 deletions
diff --git a/Documentation/devicetree/bindings/mailbox/fsl,mu.txt b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
new file mode 100644
index 000000000000..f3cf77eb5ab4
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/fsl,mu.txt
@@ -0,0 +1,54 @@
1NXP i.MX Messaging Unit (MU)
2--------------------------------------------------------------------
3
4The Messaging Unit module enables two processors within the SoC to
5communicate and coordinate by passing messages (e.g. data, status
6and control) through the MU interface. The MU also provides the ability
7for one processor to signal the other processor using interrupts.
8
9Because the MU manages the messaging between processors, the MU uses
10different clocks (from each side of the different peripheral buses).
11Therefore, the MU must synchronize the accesses from one side to the
12other. The MU accomplishes synchronization using two sets of matching
13registers (Processor A-facing, Processor B-facing).
14
15Messaging Unit Device Node:
16=============================
17
18Required properties:
19-------------------
20- compatible : should be "fsl,<chip>-mu", the supported chips include
21 imx6sx, imx7s, imx8qxp, imx8qm.
22 The "fsl,imx6sx-mu" compatible is seen as generic and should
23 be included together with SoC specific compatible.
24- reg : Should contain the registers location and length
25- interrupts : Interrupt number. The interrupt specifier format depends
26 on the interrupt controller parent.
27- #mbox-cells: Must be 2.
28 <&phandle type channel>
29 phandle : Label name of controller
30 type : Channel type
31 channel : Channel number
32
33 This MU support 4 type of unidirectional channels, each type
34 has 4 channels. A total of 16 channels. Following types are
35 supported:
36 0 - TX channel with 32bit transmit register and IRQ transmit
37 acknowledgment support.
38 1 - RX channel with 32bit receive register and IRQ support
39 2 - TX doorbell channel. Without own register and no ACK support.
40 3 - RX doorbell channel.
41
42Optional properties:
43-------------------
44- clocks : phandle to the input clock.
45- fsl,mu-side-b : Should be set for side B MU.
46
47Examples:
48--------
49lsio_mu0: mailbox@5d1b0000 {
50 compatible = "fsl,imx8qxp-mu";
51 reg = <0x0 0x5d1b0000 0x0 0x10000>;
52 interrupts = <GIC_SPI 176 IRQ_TYPE_LEVEL_HIGH>;
53 #mbox-cells = <2>;
54};
diff --git a/Documentation/devicetree/bindings/mailbox/mtk-gce.txt b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
new file mode 100644
index 000000000000..7d72b21c9e94
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/mtk-gce.txt
@@ -0,0 +1,57 @@
1MediaTek GCE
2===============
3
4The Global Command Engine (GCE) is used to help read/write registers with
5critical time limitation, such as updating display configuration during the
6vblank. The GCE can be used to implement the Command Queue (CMDQ) driver.
7
8CMDQ driver uses mailbox framework for communication. Please refer to
9mailbox.txt for generic information about mailbox device-tree bindings.
10
11Required properties:
12- compatible: Must be "mediatek,mt8173-gce"
13- reg: Address range of the GCE unit
14- interrupts: The interrupt signal from the GCE block
15- clock: Clocks according to the common clock binding
16- clock-names: Must be "gce" to stand for GCE clock
17- #mbox-cells: Should be 3.
18 <&phandle channel priority atomic_exec>
19 phandle: Label name of a gce node.
20 channel: Channel of mailbox. Be equal to the thread id of GCE.
21 priority: Priority of GCE thread.
22 atomic_exec: GCE processing continuous packets of commands in atomic
23 way.
24
25Required properties for a client device:
26- mboxes: Client use mailbox to communicate with GCE, it should have this
27 property and list of phandle, mailbox specifiers.
28- mediatek,gce-subsys: u32, specify the sub-system id which is corresponding
29 to the register address.
30
31Some vaules of properties are defined in 'dt-bindings/gce/mt8173-gce.h'. Such as
32sub-system ids, thread priority, event ids.
33
34Example:
35
36 gce: gce@10212000 {
37 compatible = "mediatek,mt8173-gce";
38 reg = <0 0x10212000 0 0x1000>;
39 interrupts = <GIC_SPI 135 IRQ_TYPE_LEVEL_LOW>;
40 clocks = <&infracfg CLK_INFRA_GCE>;
41 clock-names = "gce";
42 thread-num = CMDQ_THR_MAX_COUNT;
43 #mbox-cells = <3>;
44 };
45
46Example for a client device:
47
48 mmsys: clock-controller@14000000 {
49 compatible = "mediatek,mt8173-mmsys";
50 mboxes = <&gce 0 CMDQ_THR_PRIO_LOWEST 1>,
51 <&gce 1 CMDQ_THR_PRIO_LOWEST 1>;
52 mediatek,gce-subsys = <SUBSYS_1400XXXX>;
53 mutex-event-eof = <CMDQ_EVENT_MUTEX0_STREAM_EOF
54 CMDQ_EVENT_MUTEX1_STREAM_EOF>;
55
56 ...
57 };
diff --git a/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
new file mode 100644
index 000000000000..6c9c7daf0f5c
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/ti,secure-proxy.txt
@@ -0,0 +1,50 @@
1Texas Instruments' Secure Proxy
2========================================
3
4The Texas Instruments' secure proxy is a mailbox controller that has
5configurable queues selectable at SoC(System on Chip) integration. The
6Message manager is broken up into different address regions that are
7called "threads" or "proxies" - each instance is unidirectional and is
8instantiated at SoC integration level by system controller to indicate
9receive or transmit path.
10
11Message Manager Device Node:
12===========================
13Required properties:
14--------------------
15- compatible: Shall be "ti,am654-secure-proxy"
16- reg-names target_data - Map the proxy data region
17 rt - Map the realtime status region
18 scfg - Map the configuration region
19- reg: Contains the register map per reg-names.
20- #mbox-cells Shall be 1 and shall refer to the transfer path
21 called thread.
22- interrupt-names: Contains interrupt names matching the rx transfer path
23 for a given SoC. Receive interrupts shall be of the
24 format: "rx_<PID>".
25- interrupts: Contains the interrupt information corresponding to
26 interrupt-names property.
27
28Example(AM654):
29------------
30
31 secure_proxy: mailbox@32c00000 {
32 compatible = "ti,am654-secure-proxy";
33 #mbox-cells = <1>;
34 reg-names = "target_data", "rt", "scfg";
35 reg = <0x0 0x32c00000 0x0 0x100000>,
36 <0x0 0x32400000 0x0 0x100000>,
37 <0x0 0x32800000 0x0 0x100000>;
38 interrupt-names = "rx_011";
39 interrupts = <GIC_SPI 32 IRQ_TYPE_LEVEL_HIGH>;
40 };
41
42 dmsc: dmsc {
43 [...]
44 mbox-names = "rx", "tx";
45 # RX Thread ID is 11
46 # TX Thread ID is 13
47 mboxes= <&secure_proxy 11>,
48 <&secure_proxy 13>;
49 [...]
50 };
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index e63d29a95e76..841c005d8ebb 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -15,6 +15,12 @@ config ARM_MHU
15 The controller has 3 mailbox channels, the last of which can be 15 The controller has 3 mailbox channels, the last of which can be
16 used in Secure mode only. 16 used in Secure mode only.
17 17
18config IMX_MBOX
19 tristate "i.MX Mailbox"
20 depends on ARCH_MXC || COMPILE_TEST
21 help
22 Mailbox implementation for i.MX Messaging Unit (MU).
23
18config PLATFORM_MHU 24config PLATFORM_MHU
19 tristate "Platform MHU Mailbox" 25 tristate "Platform MHU Mailbox"
20 depends on OF 26 depends on OF
@@ -189,4 +195,14 @@ config STM32_IPCC
189 Mailbox implementation for STMicroelectonics STM32 family chips 195 Mailbox implementation for STMicroelectonics STM32 family chips
190 with hardware for Inter-Processor Communication Controller (IPCC) 196 with hardware for Inter-Processor Communication Controller (IPCC)
191 between processors. Say Y here if you want to have this support. 197 between processors. Say Y here if you want to have this support.
198
199config MTK_CMDQ_MBOX
200 tristate "MediaTek CMDQ Mailbox Support"
201 depends on ARCH_MEDIATEK || COMPILE_TEST
202 select MTK_INFRACFG
203 help
204 Say yes here to add support for the MediaTek Command Queue (CMDQ)
205 mailbox driver. The CMDQ is used to help read/write registers with
206 critical time limitation, such as updating display configuration
207 during the vblank.
192endif 208endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 4d501bea7863..c818b5d011ae 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -7,6 +7,8 @@ obj-$(CONFIG_MAILBOX_TEST) += mailbox-test.o
7 7
8obj-$(CONFIG_ARM_MHU) += arm_mhu.o 8obj-$(CONFIG_ARM_MHU) += arm_mhu.o
9 9
10obj-$(CONFIG_IMX_MBOX) += imx-mailbox.o
11
10obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o 12obj-$(CONFIG_PLATFORM_MHU) += platform_mhu.o
11 13
12obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o 14obj-$(CONFIG_PL320_MBOX) += pl320-ipc.o
@@ -40,3 +42,5 @@ obj-$(CONFIG_QCOM_APCS_IPC) += qcom-apcs-ipc-mailbox.o
40obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o 42obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
41 43
42obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o 44obj-$(CONFIG_STM32_IPCC) += stm32-ipcc.o
45
46obj-$(CONFIG_MTK_CMDQ_MBOX) += mtk-cmdq-mailbox.o
diff --git a/drivers/mailbox/imx-mailbox.c b/drivers/mailbox/imx-mailbox.c
new file mode 100644
index 000000000000..363d35d5e49d
--- /dev/null
+++ b/drivers/mailbox/imx-mailbox.c
@@ -0,0 +1,358 @@
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (c) 2018 Pengutronix, Oleksij Rempel <o.rempel@pengutronix.de>
4 */
5
6#include <linux/clk.h>
7#include <linux/interrupt.h>
8#include <linux/io.h>
9#include <linux/kernel.h>
10#include <linux/mailbox_controller.h>
11#include <linux/module.h>
12#include <linux/of_device.h>
13#include <linux/slab.h>
14
15/* Transmit Register */
16#define IMX_MU_xTRn(x) (0x00 + 4 * (x))
17/* Receive Register */
18#define IMX_MU_xRRn(x) (0x10 + 4 * (x))
19/* Status Register */
20#define IMX_MU_xSR 0x20
21#define IMX_MU_xSR_GIPn(x) BIT(28 + (3 - (x)))
22#define IMX_MU_xSR_RFn(x) BIT(24 + (3 - (x)))
23#define IMX_MU_xSR_TEn(x) BIT(20 + (3 - (x)))
24#define IMX_MU_xSR_BRDIP BIT(9)
25
26/* Control Register */
27#define IMX_MU_xCR 0x24
28/* General Purpose Interrupt Enable */
29#define IMX_MU_xCR_GIEn(x) BIT(28 + (3 - (x)))
30/* Receive Interrupt Enable */
31#define IMX_MU_xCR_RIEn(x) BIT(24 + (3 - (x)))
32/* Transmit Interrupt Enable */
33#define IMX_MU_xCR_TIEn(x) BIT(20 + (3 - (x)))
34/* General Purpose Interrupt Request */
35#define IMX_MU_xCR_GIRn(x) BIT(16 + (3 - (x)))
36
37#define IMX_MU_CHANS 16
38#define IMX_MU_CHAN_NAME_SIZE 20
39
40enum imx_mu_chan_type {
41 IMX_MU_TYPE_TX, /* Tx */
42 IMX_MU_TYPE_RX, /* Rx */
43 IMX_MU_TYPE_TXDB, /* Tx doorbell */
44 IMX_MU_TYPE_RXDB, /* Rx doorbell */
45};
46
47struct imx_mu_con_priv {
48 unsigned int idx;
49 char irq_desc[IMX_MU_CHAN_NAME_SIZE];
50 enum imx_mu_chan_type type;
51 struct mbox_chan *chan;
52 struct tasklet_struct txdb_tasklet;
53};
54
55struct imx_mu_priv {
56 struct device *dev;
57 void __iomem *base;
58 spinlock_t xcr_lock; /* control register lock */
59
60 struct mbox_controller mbox;
61 struct mbox_chan mbox_chans[IMX_MU_CHANS];
62
63 struct imx_mu_con_priv con_priv[IMX_MU_CHANS];
64 struct clk *clk;
65 int irq;
66
67 bool side_b;
68};
69
70static struct imx_mu_priv *to_imx_mu_priv(struct mbox_controller *mbox)
71{
72 return container_of(mbox, struct imx_mu_priv, mbox);
73}
74
75static void imx_mu_write(struct imx_mu_priv *priv, u32 val, u32 offs)
76{
77 iowrite32(val, priv->base + offs);
78}
79
80static u32 imx_mu_read(struct imx_mu_priv *priv, u32 offs)
81{
82 return ioread32(priv->base + offs);
83}
84
85static u32 imx_mu_xcr_rmw(struct imx_mu_priv *priv, u32 set, u32 clr)
86{
87 unsigned long flags;
88 u32 val;
89
90 spin_lock_irqsave(&priv->xcr_lock, flags);
91 val = imx_mu_read(priv, IMX_MU_xCR);
92 val &= ~clr;
93 val |= set;
94 imx_mu_write(priv, val, IMX_MU_xCR);
95 spin_unlock_irqrestore(&priv->xcr_lock, flags);
96
97 return val;
98}
99
100static void imx_mu_txdb_tasklet(unsigned long data)
101{
102 struct imx_mu_con_priv *cp = (struct imx_mu_con_priv *)data;
103
104 mbox_chan_txdone(cp->chan, 0);
105}
106
107static irqreturn_t imx_mu_isr(int irq, void *p)
108{
109 struct mbox_chan *chan = p;
110 struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
111 struct imx_mu_con_priv *cp = chan->con_priv;
112 u32 val, ctrl, dat;
113
114 ctrl = imx_mu_read(priv, IMX_MU_xCR);
115 val = imx_mu_read(priv, IMX_MU_xSR);
116
117 switch (cp->type) {
118 case IMX_MU_TYPE_TX:
119 val &= IMX_MU_xSR_TEn(cp->idx) &
120 (ctrl & IMX_MU_xCR_TIEn(cp->idx));
121 break;
122 case IMX_MU_TYPE_RX:
123 val &= IMX_MU_xSR_RFn(cp->idx) &
124 (ctrl & IMX_MU_xCR_RIEn(cp->idx));
125 break;
126 case IMX_MU_TYPE_RXDB:
127 val &= IMX_MU_xSR_GIPn(cp->idx) &
128 (ctrl & IMX_MU_xCR_GIEn(cp->idx));
129 break;
130 default:
131 break;
132 }
133
134 if (!val)
135 return IRQ_NONE;
136
137 if (val == IMX_MU_xSR_TEn(cp->idx)) {
138 imx_mu_xcr_rmw(priv, 0, IMX_MU_xCR_TIEn(cp->idx));
139 mbox_chan_txdone(chan, 0);
140 } else if (val == IMX_MU_xSR_RFn(cp->idx)) {
141 dat = imx_mu_read(priv, IMX_MU_xRRn(cp->idx));
142 mbox_chan_received_data(chan, (void *)&dat);
143 } else if (val == IMX_MU_xSR_GIPn(cp->idx)) {
144 imx_mu_write(priv, IMX_MU_xSR_GIPn(cp->idx), IMX_MU_xSR);
145 mbox_chan_received_data(chan, NULL);
146 } else {
147 dev_warn_ratelimited(priv->dev, "Not handled interrupt\n");
148 return IRQ_NONE;
149 }
150
151 return IRQ_HANDLED;
152}
153
154static int imx_mu_send_data(struct mbox_chan *chan, void *data)
155{
156 struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
157 struct imx_mu_con_priv *cp = chan->con_priv;
158 u32 *arg = data;
159
160 switch (cp->type) {
161 case IMX_MU_TYPE_TX:
162 imx_mu_write(priv, *arg, IMX_MU_xTRn(cp->idx));
163 imx_mu_xcr_rmw(priv, IMX_MU_xCR_TIEn(cp->idx), 0);
164 break;
165 case IMX_MU_TYPE_TXDB:
166 imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIRn(cp->idx), 0);
167 tasklet_schedule(&cp->txdb_tasklet);
168 break;
169 default:
170 dev_warn_ratelimited(priv->dev, "Send data on wrong channel type: %d\n", cp->type);
171 return -EINVAL;
172 }
173
174 return 0;
175}
176
177static int imx_mu_startup(struct mbox_chan *chan)
178{
179 struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
180 struct imx_mu_con_priv *cp = chan->con_priv;
181 int ret;
182
183 if (cp->type == IMX_MU_TYPE_TXDB) {
184 /* Tx doorbell don't have ACK support */
185 tasklet_init(&cp->txdb_tasklet, imx_mu_txdb_tasklet,
186 (unsigned long)cp);
187 return 0;
188 }
189
190 ret = request_irq(priv->irq, imx_mu_isr, IRQF_SHARED, cp->irq_desc,
191 chan);
192 if (ret) {
193 dev_err(priv->dev,
194 "Unable to acquire IRQ %d\n", priv->irq);
195 return ret;
196 }
197
198 switch (cp->type) {
199 case IMX_MU_TYPE_RX:
200 imx_mu_xcr_rmw(priv, IMX_MU_xCR_RIEn(cp->idx), 0);
201 break;
202 case IMX_MU_TYPE_RXDB:
203 imx_mu_xcr_rmw(priv, IMX_MU_xCR_GIEn(cp->idx), 0);
204 break;
205 default:
206 break;
207 }
208
209 return 0;
210}
211
212static void imx_mu_shutdown(struct mbox_chan *chan)
213{
214 struct imx_mu_priv *priv = to_imx_mu_priv(chan->mbox);
215 struct imx_mu_con_priv *cp = chan->con_priv;
216
217 if (cp->type == IMX_MU_TYPE_TXDB)
218 tasklet_kill(&cp->txdb_tasklet);
219
220 imx_mu_xcr_rmw(priv, 0,
221 IMX_MU_xCR_TIEn(cp->idx) | IMX_MU_xCR_RIEn(cp->idx));
222
223 free_irq(priv->irq, chan);
224}
225
226static const struct mbox_chan_ops imx_mu_ops = {
227 .send_data = imx_mu_send_data,
228 .startup = imx_mu_startup,
229 .shutdown = imx_mu_shutdown,
230};
231
232static struct mbox_chan * imx_mu_xlate(struct mbox_controller *mbox,
233 const struct of_phandle_args *sp)
234{
235 u32 type, idx, chan;
236
237 if (sp->args_count != 2) {
238 dev_err(mbox->dev, "Invalid argument count %d\n", sp->args_count);
239 return ERR_PTR(-EINVAL);
240 }
241
242 type = sp->args[0]; /* channel type */
243 idx = sp->args[1]; /* index */
244 chan = type * 4 + idx;
245
246 if (chan >= mbox->num_chans) {
247 dev_err(mbox->dev, "Not supported channel number: %d. (type: %d, idx: %d)\n", chan, type, idx);
248 return ERR_PTR(-EINVAL);
249 }
250
251 return &mbox->chans[chan];
252}
253
254static void imx_mu_init_generic(struct imx_mu_priv *priv)
255{
256 if (priv->side_b)
257 return;
258
259 /* Set default MU configuration */
260 imx_mu_write(priv, 0, IMX_MU_xCR);
261}
262
263static int imx_mu_probe(struct platform_device *pdev)
264{
265 struct device *dev = &pdev->dev;
266 struct device_node *np = dev->of_node;
267 struct resource *iomem;
268 struct imx_mu_priv *priv;
269 unsigned int i;
270 int ret;
271
272 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
273 if (!priv)
274 return -ENOMEM;
275
276 priv->dev = dev;
277
278 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
279 priv->base = devm_ioremap_resource(&pdev->dev, iomem);
280 if (IS_ERR(priv->base))
281 return PTR_ERR(priv->base);
282
283 priv->irq = platform_get_irq(pdev, 0);
284 if (priv->irq < 0)
285 return priv->irq;
286
287 priv->clk = devm_clk_get(dev, NULL);
288 if (IS_ERR(priv->clk)) {
289 if (PTR_ERR(priv->clk) != -ENOENT)
290 return PTR_ERR(priv->clk);
291
292 priv->clk = NULL;
293 }
294
295 ret = clk_prepare_enable(priv->clk);
296 if (ret) {
297 dev_err(dev, "Failed to enable clock\n");
298 return ret;
299 }
300
301 for (i = 0; i < IMX_MU_CHANS; i++) {
302 struct imx_mu_con_priv *cp = &priv->con_priv[i];
303
304 cp->idx = i % 4;
305 cp->type = i >> 2;
306 cp->chan = &priv->mbox_chans[i];
307 priv->mbox_chans[i].con_priv = cp;
308 snprintf(cp->irq_desc, sizeof(cp->irq_desc),
309 "imx_mu_chan[%i-%i]", cp->type, cp->idx);
310 }
311
312 priv->side_b = of_property_read_bool(np, "fsl,mu-side-b");
313
314 spin_lock_init(&priv->xcr_lock);
315
316 priv->mbox.dev = dev;
317 priv->mbox.ops = &imx_mu_ops;
318 priv->mbox.chans = priv->mbox_chans;
319 priv->mbox.num_chans = IMX_MU_CHANS;
320 priv->mbox.of_xlate = imx_mu_xlate;
321 priv->mbox.txdone_irq = true;
322
323 platform_set_drvdata(pdev, priv);
324
325 imx_mu_init_generic(priv);
326
327 return mbox_controller_register(&priv->mbox);
328}
329
330static int imx_mu_remove(struct platform_device *pdev)
331{
332 struct imx_mu_priv *priv = platform_get_drvdata(pdev);
333
334 mbox_controller_unregister(&priv->mbox);
335 clk_disable_unprepare(priv->clk);
336
337 return 0;
338}
339
340static const struct of_device_id imx_mu_dt_ids[] = {
341 { .compatible = "fsl,imx6sx-mu" },
342 { },
343};
344MODULE_DEVICE_TABLE(of, imx_mu_dt_ids);
345
346static struct platform_driver imx_mu_driver = {
347 .probe = imx_mu_probe,
348 .remove = imx_mu_remove,
349 .driver = {
350 .name = "imx_mu",
351 .of_match_table = imx_mu_dt_ids,
352 },
353};
354module_platform_driver(imx_mu_driver);
355
356MODULE_AUTHOR("Oleksij Rempel <o.rempel@pengutronix.de>");
357MODULE_DESCRIPTION("Message Unit driver for i.MX");
358MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index a7040163dd43..b8b2b3533f46 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -195,9 +195,9 @@ static int slimpro_mbox_probe(struct platform_device *pdev)
195 platform_set_drvdata(pdev, ctx); 195 platform_set_drvdata(pdev, ctx);
196 196
197 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); 197 regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
198 mb_base = devm_ioremap(&pdev->dev, regs->start, resource_size(regs)); 198 mb_base = devm_ioremap_resource(&pdev->dev, regs);
199 if (!mb_base) 199 if (IS_ERR(mb_base))
200 return -ENOMEM; 200 return PTR_ERR(mb_base);
201 201
202 /* Setup mailbox links */ 202 /* Setup mailbox links */
203 for (i = 0; i < MBOX_CNT; i++) { 203 for (i = 0; i < MBOX_CNT; i++) {
diff --git a/drivers/mailbox/mtk-cmdq-mailbox.c b/drivers/mailbox/mtk-cmdq-mailbox.c
new file mode 100644
index 000000000000..aec46d5d3506
--- /dev/null
+++ b/drivers/mailbox/mtk-cmdq-mailbox.c
@@ -0,0 +1,571 @@
1// SPDX-License-Identifier: GPL-2.0
2//
3// Copyright (c) 2018 MediaTek Inc.
4
5#include <linux/bitops.h>
6#include <linux/clk.h>
7#include <linux/clk-provider.h>
8#include <linux/dma-mapping.h>
9#include <linux/errno.h>
10#include <linux/interrupt.h>
11#include <linux/iopoll.h>
12#include <linux/kernel.h>
13#include <linux/module.h>
14#include <linux/platform_device.h>
15#include <linux/mailbox_controller.h>
16#include <linux/mailbox/mtk-cmdq-mailbox.h>
17#include <linux/of_device.h>
18
19#define CMDQ_OP_CODE_MASK (0xff << CMDQ_OP_CODE_SHIFT)
20#define CMDQ_IRQ_MASK 0xffff
21#define CMDQ_NUM_CMD(t) (t->cmd_buf_size / CMDQ_INST_SIZE)
22
23#define CMDQ_CURR_IRQ_STATUS 0x10
24#define CMDQ_THR_SLOT_CYCLES 0x30
25#define CMDQ_THR_BASE 0x100
26#define CMDQ_THR_SIZE 0x80
27#define CMDQ_THR_WARM_RESET 0x00
28#define CMDQ_THR_ENABLE_TASK 0x04
29#define CMDQ_THR_SUSPEND_TASK 0x08
30#define CMDQ_THR_CURR_STATUS 0x0c
31#define CMDQ_THR_IRQ_STATUS 0x10
32#define CMDQ_THR_IRQ_ENABLE 0x14
33#define CMDQ_THR_CURR_ADDR 0x20
34#define CMDQ_THR_END_ADDR 0x24
35#define CMDQ_THR_WAIT_TOKEN 0x30
36#define CMDQ_THR_PRIORITY 0x40
37
38#define CMDQ_THR_ACTIVE_SLOT_CYCLES 0x3200
39#define CMDQ_THR_ENABLED 0x1
40#define CMDQ_THR_DISABLED 0x0
41#define CMDQ_THR_SUSPEND 0x1
42#define CMDQ_THR_RESUME 0x0
43#define CMDQ_THR_STATUS_SUSPENDED BIT(1)
44#define CMDQ_THR_DO_WARM_RESET BIT(0)
45#define CMDQ_THR_IRQ_DONE 0x1
46#define CMDQ_THR_IRQ_ERROR 0x12
47#define CMDQ_THR_IRQ_EN (CMDQ_THR_IRQ_ERROR | CMDQ_THR_IRQ_DONE)
48#define CMDQ_THR_IS_WAITING BIT(31)
49
50#define CMDQ_JUMP_BY_OFFSET 0x10000000
51#define CMDQ_JUMP_BY_PA 0x10000001
52
53struct cmdq_thread {
54 struct mbox_chan *chan;
55 void __iomem *base;
56 struct list_head task_busy_list;
57 u32 priority;
58 bool atomic_exec;
59};
60
61struct cmdq_task {
62 struct cmdq *cmdq;
63 struct list_head list_entry;
64 dma_addr_t pa_base;
65 struct cmdq_thread *thread;
66 struct cmdq_pkt *pkt; /* the packet sent from mailbox client */
67};
68
69struct cmdq {
70 struct mbox_controller mbox;
71 void __iomem *base;
72 u32 irq;
73 u32 thread_nr;
74 struct cmdq_thread *thread;
75 struct clk *clock;
76 bool suspended;
77};
78
79static int cmdq_thread_suspend(struct cmdq *cmdq, struct cmdq_thread *thread)
80{
81 u32 status;
82
83 writel(CMDQ_THR_SUSPEND, thread->base + CMDQ_THR_SUSPEND_TASK);
84
85 /* If already disabled, treat as suspended successful. */
86 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
87 return 0;
88
89 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_STATUS,
90 status, status & CMDQ_THR_STATUS_SUSPENDED, 0, 10)) {
91 dev_err(cmdq->mbox.dev, "suspend GCE thread 0x%x failed\n",
92 (u32)(thread->base - cmdq->base));
93 return -EFAULT;
94 }
95
96 return 0;
97}
98
99static void cmdq_thread_resume(struct cmdq_thread *thread)
100{
101 writel(CMDQ_THR_RESUME, thread->base + CMDQ_THR_SUSPEND_TASK);
102}
103
104static void cmdq_init(struct cmdq *cmdq)
105{
106 WARN_ON(clk_enable(cmdq->clock) < 0);
107 writel(CMDQ_THR_ACTIVE_SLOT_CYCLES, cmdq->base + CMDQ_THR_SLOT_CYCLES);
108 clk_disable(cmdq->clock);
109}
110
111static int cmdq_thread_reset(struct cmdq *cmdq, struct cmdq_thread *thread)
112{
113 u32 warm_reset;
114
115 writel(CMDQ_THR_DO_WARM_RESET, thread->base + CMDQ_THR_WARM_RESET);
116 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_WARM_RESET,
117 warm_reset, !(warm_reset & CMDQ_THR_DO_WARM_RESET),
118 0, 10)) {
119 dev_err(cmdq->mbox.dev, "reset GCE thread 0x%x failed\n",
120 (u32)(thread->base - cmdq->base));
121 return -EFAULT;
122 }
123
124 return 0;
125}
126
127static void cmdq_thread_disable(struct cmdq *cmdq, struct cmdq_thread *thread)
128{
129 cmdq_thread_reset(cmdq, thread);
130 writel(CMDQ_THR_DISABLED, thread->base + CMDQ_THR_ENABLE_TASK);
131}
132
133/* notify GCE to re-fetch commands by setting GCE thread PC */
134static void cmdq_thread_invalidate_fetched_data(struct cmdq_thread *thread)
135{
136 writel(readl(thread->base + CMDQ_THR_CURR_ADDR),
137 thread->base + CMDQ_THR_CURR_ADDR);
138}
139
140static void cmdq_task_insert_into_thread(struct cmdq_task *task)
141{
142 struct device *dev = task->cmdq->mbox.dev;
143 struct cmdq_thread *thread = task->thread;
144 struct cmdq_task *prev_task = list_last_entry(
145 &thread->task_busy_list, typeof(*task), list_entry);
146 u64 *prev_task_base = prev_task->pkt->va_base;
147
148 /* let previous task jump to this task */
149 dma_sync_single_for_cpu(dev, prev_task->pa_base,
150 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
151 prev_task_base[CMDQ_NUM_CMD(prev_task->pkt) - 1] =
152 (u64)CMDQ_JUMP_BY_PA << 32 | task->pa_base;
153 dma_sync_single_for_device(dev, prev_task->pa_base,
154 prev_task->pkt->cmd_buf_size, DMA_TO_DEVICE);
155
156 cmdq_thread_invalidate_fetched_data(thread);
157}
158
159static bool cmdq_command_is_wfe(u64 cmd)
160{
161 u64 wfe_option = CMDQ_WFE_UPDATE | CMDQ_WFE_WAIT | CMDQ_WFE_WAIT_VALUE;
162 u64 wfe_op = (u64)(CMDQ_CODE_WFE << CMDQ_OP_CODE_SHIFT) << 32;
163 u64 wfe_mask = (u64)CMDQ_OP_CODE_MASK << 32 | 0xffffffff;
164
165 return ((cmd & wfe_mask) == (wfe_op | wfe_option));
166}
167
168/* we assume tasks in the same display GCE thread are waiting the same event. */
169static void cmdq_task_remove_wfe(struct cmdq_task *task)
170{
171 struct device *dev = task->cmdq->mbox.dev;
172 u64 *base = task->pkt->va_base;
173 int i;
174
175 dma_sync_single_for_cpu(dev, task->pa_base, task->pkt->cmd_buf_size,
176 DMA_TO_DEVICE);
177 for (i = 0; i < CMDQ_NUM_CMD(task->pkt); i++)
178 if (cmdq_command_is_wfe(base[i]))
179 base[i] = (u64)CMDQ_JUMP_BY_OFFSET << 32 |
180 CMDQ_JUMP_PASS;
181 dma_sync_single_for_device(dev, task->pa_base, task->pkt->cmd_buf_size,
182 DMA_TO_DEVICE);
183}
184
185static bool cmdq_thread_is_in_wfe(struct cmdq_thread *thread)
186{
187 return readl(thread->base + CMDQ_THR_WAIT_TOKEN) & CMDQ_THR_IS_WAITING;
188}
189
190static void cmdq_thread_wait_end(struct cmdq_thread *thread,
191 unsigned long end_pa)
192{
193 struct device *dev = thread->chan->mbox->dev;
194 unsigned long curr_pa;
195
196 if (readl_poll_timeout_atomic(thread->base + CMDQ_THR_CURR_ADDR,
197 curr_pa, curr_pa == end_pa, 1, 20))
198 dev_err(dev, "GCE thread cannot run to end.\n");
199}
200
201static void cmdq_task_exec_done(struct cmdq_task *task, enum cmdq_cb_status sta)
202{
203 struct cmdq_task_cb *cb = &task->pkt->async_cb;
204 struct cmdq_cb_data data;
205
206 WARN_ON(cb->cb == (cmdq_async_flush_cb)NULL);
207 data.sta = sta;
208 data.data = cb->data;
209 cb->cb(data);
210
211 list_del(&task->list_entry);
212}
213
214static void cmdq_task_handle_error(struct cmdq_task *task)
215{
216 struct cmdq_thread *thread = task->thread;
217 struct cmdq_task *next_task;
218
219 dev_err(task->cmdq->mbox.dev, "task 0x%p error\n", task);
220 WARN_ON(cmdq_thread_suspend(task->cmdq, thread) < 0);
221 next_task = list_first_entry_or_null(&thread->task_busy_list,
222 struct cmdq_task, list_entry);
223 if (next_task)
224 writel(next_task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
225 cmdq_thread_resume(thread);
226}
227
228static void cmdq_thread_irq_handler(struct cmdq *cmdq,
229 struct cmdq_thread *thread)
230{
231 struct cmdq_task *task, *tmp, *curr_task = NULL;
232 u32 curr_pa, irq_flag, task_end_pa;
233 bool err;
234
235 irq_flag = readl(thread->base + CMDQ_THR_IRQ_STATUS);
236 writel(~irq_flag, thread->base + CMDQ_THR_IRQ_STATUS);
237
238 /*
239 * When ISR call this function, another CPU core could run
240 * "release task" right before we acquire the spin lock, and thus
241 * reset / disable this GCE thread, so we need to check the enable
242 * bit of this GCE thread.
243 */
244 if (!(readl(thread->base + CMDQ_THR_ENABLE_TASK) & CMDQ_THR_ENABLED))
245 return;
246
247 if (irq_flag & CMDQ_THR_IRQ_ERROR)
248 err = true;
249 else if (irq_flag & CMDQ_THR_IRQ_DONE)
250 err = false;
251 else
252 return;
253
254 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
255
256 list_for_each_entry_safe(task, tmp, &thread->task_busy_list,
257 list_entry) {
258 task_end_pa = task->pa_base + task->pkt->cmd_buf_size;
259 if (curr_pa >= task->pa_base && curr_pa < task_end_pa)
260 curr_task = task;
261
262 if (!curr_task || curr_pa == task_end_pa - CMDQ_INST_SIZE) {
263 cmdq_task_exec_done(task, CMDQ_CB_NORMAL);
264 kfree(task);
265 } else if (err) {
266 cmdq_task_exec_done(task, CMDQ_CB_ERROR);
267 cmdq_task_handle_error(curr_task);
268 kfree(task);
269 }
270
271 if (curr_task)
272 break;
273 }
274
275 if (list_empty(&thread->task_busy_list)) {
276 cmdq_thread_disable(cmdq, thread);
277 clk_disable(cmdq->clock);
278 }
279}
280
281static irqreturn_t cmdq_irq_handler(int irq, void *dev)
282{
283 struct cmdq *cmdq = dev;
284 unsigned long irq_status, flags = 0L;
285 int bit;
286
287 irq_status = readl(cmdq->base + CMDQ_CURR_IRQ_STATUS) & CMDQ_IRQ_MASK;
288 if (!(irq_status ^ CMDQ_IRQ_MASK))
289 return IRQ_NONE;
290
291 for_each_clear_bit(bit, &irq_status, fls(CMDQ_IRQ_MASK)) {
292 struct cmdq_thread *thread = &cmdq->thread[bit];
293
294 spin_lock_irqsave(&thread->chan->lock, flags);
295 cmdq_thread_irq_handler(cmdq, thread);
296 spin_unlock_irqrestore(&thread->chan->lock, flags);
297 }
298
299 return IRQ_HANDLED;
300}
301
302static int cmdq_suspend(struct device *dev)
303{
304 struct cmdq *cmdq = dev_get_drvdata(dev);
305 struct cmdq_thread *thread;
306 int i;
307 bool task_running = false;
308
309 cmdq->suspended = true;
310
311 for (i = 0; i < cmdq->thread_nr; i++) {
312 thread = &cmdq->thread[i];
313 if (!list_empty(&thread->task_busy_list)) {
314 task_running = true;
315 break;
316 }
317 }
318
319 if (task_running)
320 dev_warn(dev, "exist running task(s) in suspend\n");
321
322 clk_unprepare(cmdq->clock);
323
324 return 0;
325}
326
327static int cmdq_resume(struct device *dev)
328{
329 struct cmdq *cmdq = dev_get_drvdata(dev);
330
331 WARN_ON(clk_prepare(cmdq->clock) < 0);
332 cmdq->suspended = false;
333 return 0;
334}
335
336static int cmdq_remove(struct platform_device *pdev)
337{
338 struct cmdq *cmdq = platform_get_drvdata(pdev);
339
340 mbox_controller_unregister(&cmdq->mbox);
341 clk_unprepare(cmdq->clock);
342
343 if (cmdq->mbox.chans)
344 devm_kfree(&pdev->dev, cmdq->mbox.chans);
345
346 if (cmdq->thread)
347 devm_kfree(&pdev->dev, cmdq->thread);
348
349 devm_kfree(&pdev->dev, cmdq);
350
351 return 0;
352}
353
354static int cmdq_mbox_send_data(struct mbox_chan *chan, void *data)
355{
356 struct cmdq_pkt *pkt = (struct cmdq_pkt *)data;
357 struct cmdq_thread *thread = (struct cmdq_thread *)chan->con_priv;
358 struct cmdq *cmdq = dev_get_drvdata(chan->mbox->dev);
359 struct cmdq_task *task;
360 unsigned long curr_pa, end_pa;
361
362 /* Client should not flush new tasks if suspended. */
363 WARN_ON(cmdq->suspended);
364
365 task = kzalloc(sizeof(*task), GFP_ATOMIC);
366 task->cmdq = cmdq;
367 INIT_LIST_HEAD(&task->list_entry);
368 task->pa_base = pkt->pa_base;
369 task->thread = thread;
370 task->pkt = pkt;
371
372 if (list_empty(&thread->task_busy_list)) {
373 WARN_ON(clk_enable(cmdq->clock) < 0);
374 WARN_ON(cmdq_thread_reset(cmdq, thread) < 0);
375
376 writel(task->pa_base, thread->base + CMDQ_THR_CURR_ADDR);
377 writel(task->pa_base + pkt->cmd_buf_size,
378 thread->base + CMDQ_THR_END_ADDR);
379 writel(thread->priority, thread->base + CMDQ_THR_PRIORITY);
380 writel(CMDQ_THR_IRQ_EN, thread->base + CMDQ_THR_IRQ_ENABLE);
381 writel(CMDQ_THR_ENABLED, thread->base + CMDQ_THR_ENABLE_TASK);
382 } else {
383 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
384 curr_pa = readl(thread->base + CMDQ_THR_CURR_ADDR);
385 end_pa = readl(thread->base + CMDQ_THR_END_ADDR);
386
387 /*
388 * Atomic execution should remove the following wfe, i.e. only
389 * wait event at first task, and prevent to pause when running.
390 */
391 if (thread->atomic_exec) {
392 /* GCE is executing if command is not WFE */
393 if (!cmdq_thread_is_in_wfe(thread)) {
394 cmdq_thread_resume(thread);
395 cmdq_thread_wait_end(thread, end_pa);
396 WARN_ON(cmdq_thread_suspend(cmdq, thread) < 0);
397 /* set to this task directly */
398 writel(task->pa_base,
399 thread->base + CMDQ_THR_CURR_ADDR);
400 } else {
401 cmdq_task_insert_into_thread(task);
402 cmdq_task_remove_wfe(task);
403 smp_mb(); /* modify jump before enable thread */
404 }
405 } else {
406 /* check boundary */
407 if (curr_pa == end_pa - CMDQ_INST_SIZE ||
408 curr_pa == end_pa) {
409 /* set to this task directly */
410 writel(task->pa_base,
411 thread->base + CMDQ_THR_CURR_ADDR);
412 } else {
413 cmdq_task_insert_into_thread(task);
414 smp_mb(); /* modify jump before enable thread */
415 }
416 }
417 writel(task->pa_base + pkt->cmd_buf_size,
418 thread->base + CMDQ_THR_END_ADDR);
419 cmdq_thread_resume(thread);
420 }
421 list_move_tail(&task->list_entry, &thread->task_busy_list);
422
423 return 0;
424}
425
426static int cmdq_mbox_startup(struct mbox_chan *chan)
427{
428 return 0;
429}
430
431static void cmdq_mbox_shutdown(struct mbox_chan *chan)
432{
433}
434
435static const struct mbox_chan_ops cmdq_mbox_chan_ops = {
436 .send_data = cmdq_mbox_send_data,
437 .startup = cmdq_mbox_startup,
438 .shutdown = cmdq_mbox_shutdown,
439};
440
441static struct mbox_chan *cmdq_xlate(struct mbox_controller *mbox,
442 const struct of_phandle_args *sp)
443{
444 int ind = sp->args[0];
445 struct cmdq_thread *thread;
446
447 if (ind >= mbox->num_chans)
448 return ERR_PTR(-EINVAL);
449
450 thread = (struct cmdq_thread *)mbox->chans[ind].con_priv;
451 thread->priority = sp->args[1];
452 thread->atomic_exec = (sp->args[2] != 0);
453 thread->chan = &mbox->chans[ind];
454
455 return &mbox->chans[ind];
456}
457
458static int cmdq_probe(struct platform_device *pdev)
459{
460 struct device *dev = &pdev->dev;
461 struct resource *res;
462 struct cmdq *cmdq;
463 int err, i;
464
465 cmdq = devm_kzalloc(dev, sizeof(*cmdq), GFP_KERNEL);
466 if (!cmdq)
467 return -ENOMEM;
468
469 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
470 cmdq->base = devm_ioremap_resource(dev, res);
471 if (IS_ERR(cmdq->base)) {
472 dev_err(dev, "failed to ioremap gce\n");
473 return PTR_ERR(cmdq->base);
474 }
475
476 cmdq->irq = platform_get_irq(pdev, 0);
477 if (!cmdq->irq) {
478 dev_err(dev, "failed to get irq\n");
479 return -EINVAL;
480 }
481 err = devm_request_irq(dev, cmdq->irq, cmdq_irq_handler, IRQF_SHARED,
482 "mtk_cmdq", cmdq);
483 if (err < 0) {
484 dev_err(dev, "failed to register ISR (%d)\n", err);
485 return err;
486 }
487
488 dev_dbg(dev, "cmdq device: addr:0x%p, va:0x%p, irq:%d\n",
489 dev, cmdq->base, cmdq->irq);
490
491 cmdq->clock = devm_clk_get(dev, "gce");
492 if (IS_ERR(cmdq->clock)) {
493 dev_err(dev, "failed to get gce clk\n");
494 return PTR_ERR(cmdq->clock);
495 }
496
497 cmdq->thread_nr = (u32)(unsigned long)of_device_get_match_data(dev);
498 cmdq->mbox.dev = dev;
499 cmdq->mbox.chans = devm_kcalloc(dev, cmdq->thread_nr,
500 sizeof(*cmdq->mbox.chans), GFP_KERNEL);
501 if (!cmdq->mbox.chans)
502 return -ENOMEM;
503
504 cmdq->mbox.num_chans = cmdq->thread_nr;
505 cmdq->mbox.ops = &cmdq_mbox_chan_ops;
506 cmdq->mbox.of_xlate = cmdq_xlate;
507
508 /* make use of TXDONE_BY_ACK */
509 cmdq->mbox.txdone_irq = false;
510 cmdq->mbox.txdone_poll = false;
511
512 cmdq->thread = devm_kcalloc(dev, cmdq->thread_nr,
513 sizeof(*cmdq->thread), GFP_KERNEL);
514 if (!cmdq->thread)
515 return -ENOMEM;
516
517 for (i = 0; i < cmdq->thread_nr; i++) {
518 cmdq->thread[i].base = cmdq->base + CMDQ_THR_BASE +
519 CMDQ_THR_SIZE * i;
520 INIT_LIST_HEAD(&cmdq->thread[i].task_busy_list);
521 cmdq->mbox.chans[i].con_priv = (void *)&cmdq->thread[i];
522 }
523
524 err = mbox_controller_register(&cmdq->mbox);
525 if (err < 0) {
526 dev_err(dev, "failed to register mailbox: %d\n", err);
527 return err;
528 }
529
530 platform_set_drvdata(pdev, cmdq);
531 WARN_ON(clk_prepare(cmdq->clock) < 0);
532
533 cmdq_init(cmdq);
534
535 return 0;
536}
537
538static const struct dev_pm_ops cmdq_pm_ops = {
539 .suspend = cmdq_suspend,
540 .resume = cmdq_resume,
541};
542
543static const struct of_device_id cmdq_of_ids[] = {
544 {.compatible = "mediatek,mt8173-gce", .data = (void *)16},
545 {}
546};
547
548static struct platform_driver cmdq_drv = {
549 .probe = cmdq_probe,
550 .remove = cmdq_remove,
551 .driver = {
552 .name = "mtk_cmdq",
553 .pm = &cmdq_pm_ops,
554 .of_match_table = cmdq_of_ids,
555 }
556};
557
558static int __init cmdq_drv_init(void)
559{
560 return platform_driver_register(&cmdq_drv);
561}
562
563static void __exit cmdq_drv_exit(void)
564{
565 platform_driver_unregister(&cmdq_drv);
566}
567
568subsys_initcall(cmdq_drv_init);
569module_exit(cmdq_drv_exit);
570
571MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/omap-mailbox.c b/drivers/mailbox/omap-mailbox.c
index e1e2c085e68e..db66e952a871 100644
--- a/drivers/mailbox/omap-mailbox.c
+++ b/drivers/mailbox/omap-mailbox.c
@@ -1,3 +1,4 @@
1// SPDX-License-Identifier: GPL-2.0
1/* 2/*
2 * OMAP mailbox driver 3 * OMAP mailbox driver
3 * 4 *
@@ -6,15 +7,6 @@
6 * 7 *
7 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com> 8 * Contact: Hiroshi DOYU <Hiroshi.DOYU@nokia.com>
8 * Suman Anna <s-anna@ti.com> 9 * Suman Anna <s-anna@ti.com>
9 *
10 * This program is free software; you can redistribute it and/or
11 * modify it under the terms of the GNU General Public License
12 * version 2 as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17 * General Public License for more details.
18 */ 10 */
19 11
20#include <linux/interrupt.h> 12#include <linux/interrupt.h>
@@ -77,6 +69,10 @@ struct omap_mbox_queue {
77 bool full; 69 bool full;
78}; 70};
79 71
72struct omap_mbox_match_data {
73 u32 intr_type;
74};
75
80struct omap_mbox_device { 76struct omap_mbox_device {
81 struct device *dev; 77 struct device *dev;
82 struct mutex cfg_lock; 78 struct mutex cfg_lock;
@@ -646,18 +642,21 @@ static const struct dev_pm_ops omap_mbox_pm_ops = {
646 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume) 642 SET_SYSTEM_SLEEP_PM_OPS(omap_mbox_suspend, omap_mbox_resume)
647}; 643};
648 644
645static const struct omap_mbox_match_data omap2_data = { MBOX_INTR_CFG_TYPE1 };
646static const struct omap_mbox_match_data omap4_data = { MBOX_INTR_CFG_TYPE2 };
647
649static const struct of_device_id omap_mailbox_of_match[] = { 648static const struct of_device_id omap_mailbox_of_match[] = {
650 { 649 {
651 .compatible = "ti,omap2-mailbox", 650 .compatible = "ti,omap2-mailbox",
652 .data = (void *)MBOX_INTR_CFG_TYPE1, 651 .data = &omap2_data,
653 }, 652 },
654 { 653 {
655 .compatible = "ti,omap3-mailbox", 654 .compatible = "ti,omap3-mailbox",
656 .data = (void *)MBOX_INTR_CFG_TYPE1, 655 .data = &omap2_data,
657 }, 656 },
658 { 657 {
659 .compatible = "ti,omap4-mailbox", 658 .compatible = "ti,omap4-mailbox",
660 .data = (void *)MBOX_INTR_CFG_TYPE2, 659 .data = &omap4_data,
661 }, 660 },
662 { 661 {
663 /* end */ 662 /* end */
@@ -700,7 +699,7 @@ static int omap_mbox_probe(struct platform_device *pdev)
700 struct omap_mbox_fifo *fifo; 699 struct omap_mbox_fifo *fifo;
701 struct device_node *node = pdev->dev.of_node; 700 struct device_node *node = pdev->dev.of_node;
702 struct device_node *child; 701 struct device_node *child;
703 const struct of_device_id *match; 702 const struct omap_mbox_match_data *match_data;
704 u32 intr_type, info_count; 703 u32 intr_type, info_count;
705 u32 num_users, num_fifos; 704 u32 num_users, num_fifos;
706 u32 tmp[3]; 705 u32 tmp[3];
@@ -712,10 +711,10 @@ static int omap_mbox_probe(struct platform_device *pdev)
712 return -ENODEV; 711 return -ENODEV;
713 } 712 }
714 713
715 match = of_match_device(omap_mailbox_of_match, &pdev->dev); 714 match_data = of_device_get_match_data(&pdev->dev);
716 if (!match) 715 if (!match_data)
717 return -ENODEV; 716 return -ENODEV;
718 intr_type = (u32)match->data; 717 intr_type = match_data->intr_type;
719 718
720 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users)) 719 if (of_property_read_u32(node, "ti,mbox-num-users", &num_users))
721 return -ENODEV; 720 return -ENODEV;
diff --git a/drivers/mailbox/ti-msgmgr.c b/drivers/mailbox/ti-msgmgr.c
index 5d04738c3c8a..5bceafbf6699 100644
--- a/drivers/mailbox/ti-msgmgr.c
+++ b/drivers/mailbox/ti-msgmgr.c
@@ -25,6 +25,17 @@
25#define Q_STATE_OFFSET(queue) ((queue) * 0x4) 25#define Q_STATE_OFFSET(queue) ((queue) * 0x4)
26#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000) 26#define Q_STATE_ENTRY_COUNT_MASK (0xFFF000)
27 27
28#define SPROXY_THREAD_OFFSET(tid) (0x1000 * (tid))
29#define SPROXY_THREAD_DATA_OFFSET(tid, reg) \
30 (SPROXY_THREAD_OFFSET(tid) + ((reg) * 0x4) + 0x4)
31
32#define SPROXY_THREAD_STATUS_OFFSET(tid) (SPROXY_THREAD_OFFSET(tid))
33
34#define SPROXY_THREAD_STATUS_COUNT_MASK (0xFF)
35
36#define SPROXY_THREAD_CTRL_OFFSET(tid) (0x1000 + SPROXY_THREAD_OFFSET(tid))
37#define SPROXY_THREAD_CTRL_DIR_MASK (0x1 << 31)
38
28/** 39/**
29 * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor 40 * struct ti_msgmgr_valid_queue_desc - SoC valid queues meant for this processor
30 * @queue_id: Queue Number for this path 41 * @queue_id: Queue Number for this path
@@ -42,14 +53,18 @@ struct ti_msgmgr_valid_queue_desc {
42 * @queue_count: Number of Queues 53 * @queue_count: Number of Queues
43 * @max_message_size: Message size in bytes 54 * @max_message_size: Message size in bytes
44 * @max_messages: Number of messages 55 * @max_messages: Number of messages
45 * @q_slices: Number of queue engines
46 * @q_proxies: Number of queue proxies per page
47 * @data_first_reg: First data register for proxy data region 56 * @data_first_reg: First data register for proxy data region
48 * @data_last_reg: Last data register for proxy data region 57 * @data_last_reg: Last data register for proxy data region
58 * @status_cnt_mask: Mask for getting the status value
59 * @status_err_mask: Mask for getting the error value, if applicable
49 * @tx_polled: Do I need to use polled mechanism for tx 60 * @tx_polled: Do I need to use polled mechanism for tx
50 * @tx_poll_timeout_ms: Timeout in ms if polled 61 * @tx_poll_timeout_ms: Timeout in ms if polled
51 * @valid_queues: List of Valid queues that the processor can access 62 * @valid_queues: List of Valid queues that the processor can access
63 * @data_region_name: Name of the proxy data region
64 * @status_region_name: Name of the proxy status region
65 * @ctrl_region_name: Name of the proxy control region
52 * @num_valid_queues: Number of valid queues 66 * @num_valid_queues: Number of valid queues
67 * @is_sproxy: Is this an Secure Proxy instance?
53 * 68 *
54 * This structure is used in of match data to describe how integration 69 * This structure is used in of match data to describe how integration
55 * for a specific compatible SoC is done. 70 * for a specific compatible SoC is done.
@@ -58,14 +73,18 @@ struct ti_msgmgr_desc {
58 u8 queue_count; 73 u8 queue_count;
59 u8 max_message_size; 74 u8 max_message_size;
60 u8 max_messages; 75 u8 max_messages;
61 u8 q_slices;
62 u8 q_proxies;
63 u8 data_first_reg; 76 u8 data_first_reg;
64 u8 data_last_reg; 77 u8 data_last_reg;
78 u32 status_cnt_mask;
79 u32 status_err_mask;
65 bool tx_polled; 80 bool tx_polled;
66 int tx_poll_timeout_ms; 81 int tx_poll_timeout_ms;
67 const struct ti_msgmgr_valid_queue_desc *valid_queues; 82 const struct ti_msgmgr_valid_queue_desc *valid_queues;
83 const char *data_region_name;
84 const char *status_region_name;
85 const char *ctrl_region_name;
68 int num_valid_queues; 86 int num_valid_queues;
87 bool is_sproxy;
69}; 88};
70 89
71/** 90/**
@@ -78,6 +97,7 @@ struct ti_msgmgr_desc {
78 * @queue_buff_start: First register of Data Buffer 97 * @queue_buff_start: First register of Data Buffer
79 * @queue_buff_end: Last (or confirmation) register of Data buffer 98 * @queue_buff_end: Last (or confirmation) register of Data buffer
80 * @queue_state: Queue status register 99 * @queue_state: Queue status register
100 * @queue_ctrl: Queue Control register
81 * @chan: Mailbox channel 101 * @chan: Mailbox channel
82 * @rx_buff: Receive buffer pointer allocated at probe, max_message_size 102 * @rx_buff: Receive buffer pointer allocated at probe, max_message_size
83 */ 103 */
@@ -90,6 +110,7 @@ struct ti_queue_inst {
90 void __iomem *queue_buff_start; 110 void __iomem *queue_buff_start;
91 void __iomem *queue_buff_end; 111 void __iomem *queue_buff_end;
92 void __iomem *queue_state; 112 void __iomem *queue_state;
113 void __iomem *queue_ctrl;
93 struct mbox_chan *chan; 114 struct mbox_chan *chan;
94 u32 *rx_buff; 115 u32 *rx_buff;
95}; 116};
@@ -100,6 +121,7 @@ struct ti_queue_inst {
100 * @desc: Description of the SoC integration 121 * @desc: Description of the SoC integration
101 * @queue_proxy_region: Queue proxy region where queue buffers are located 122 * @queue_proxy_region: Queue proxy region where queue buffers are located
102 * @queue_state_debug_region: Queue status register regions 123 * @queue_state_debug_region: Queue status register regions
124 * @queue_ctrl_region: Queue Control register regions
103 * @num_valid_queues: Number of valid queues defined for the processor 125 * @num_valid_queues: Number of valid queues defined for the processor
104 * Note: other queues are probably reserved for other processors 126 * Note: other queues are probably reserved for other processors
105 * in the SoC. 127 * in the SoC.
@@ -112,6 +134,7 @@ struct ti_msgmgr_inst {
112 const struct ti_msgmgr_desc *desc; 134 const struct ti_msgmgr_desc *desc;
113 void __iomem *queue_proxy_region; 135 void __iomem *queue_proxy_region;
114 void __iomem *queue_state_debug_region; 136 void __iomem *queue_state_debug_region;
137 void __iomem *queue_ctrl_region;
115 u8 num_valid_queues; 138 u8 num_valid_queues;
116 struct ti_queue_inst *qinsts; 139 struct ti_queue_inst *qinsts;
117 struct mbox_controller mbox; 140 struct mbox_controller mbox;
@@ -120,25 +143,54 @@ struct ti_msgmgr_inst {
120 143
121/** 144/**
122 * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages 145 * ti_msgmgr_queue_get_num_messages() - Get the number of pending messages
146 * @d: Description of message manager
123 * @qinst: Queue instance for which we check the number of pending messages 147 * @qinst: Queue instance for which we check the number of pending messages
124 * 148 *
125 * Return: number of messages pending in the queue (0 == no pending messages) 149 * Return: number of messages pending in the queue (0 == no pending messages)
126 */ 150 */
127static inline int ti_msgmgr_queue_get_num_messages(struct ti_queue_inst *qinst) 151static inline int
152ti_msgmgr_queue_get_num_messages(const struct ti_msgmgr_desc *d,
153 struct ti_queue_inst *qinst)
128{ 154{
129 u32 val; 155 u32 val;
156 u32 status_cnt_mask = d->status_cnt_mask;
130 157
131 /* 158 /*
132 * We cannot use relaxed operation here - update may happen 159 * We cannot use relaxed operation here - update may happen
133 * real-time. 160 * real-time.
134 */ 161 */
135 val = readl(qinst->queue_state) & Q_STATE_ENTRY_COUNT_MASK; 162 val = readl(qinst->queue_state) & status_cnt_mask;
136 val >>= __ffs(Q_STATE_ENTRY_COUNT_MASK); 163 val >>= __ffs(status_cnt_mask);
137 164
138 return val; 165 return val;
139} 166}
140 167
141/** 168/**
169 * ti_msgmgr_queue_is_error() - Check to see if there is queue error
170 * @d: Description of message manager
171 * @qinst: Queue instance for which we check the number of pending messages
172 *
173 * Return: true if error, else false
174 */
175static inline bool ti_msgmgr_queue_is_error(const struct ti_msgmgr_desc *d,
176 struct ti_queue_inst *qinst)
177{
178 u32 val;
179
180 /* Msgmgr has no error detection */
181 if (!d->is_sproxy)
182 return false;
183
184 /*
185 * We cannot use relaxed operation here - update may happen
186 * real-time.
187 */
188 val = readl(qinst->queue_state) & d->status_err_mask;
189
190 return val ? true : false;
191}
192
193/**
142 * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue 194 * ti_msgmgr_queue_rx_interrupt() - Interrupt handler for receive Queue
143 * @irq: Interrupt number 195 * @irq: Interrupt number
144 * @p: Channel Pointer 196 * @p: Channel Pointer
@@ -171,8 +223,14 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
171 return IRQ_NONE; 223 return IRQ_NONE;
172 } 224 }
173 225
226 desc = inst->desc;
227 if (ti_msgmgr_queue_is_error(desc, qinst)) {
228 dev_err(dev, "Error on Rx channel %s\n", qinst->name);
229 return IRQ_NONE;
230 }
231
174 /* Do I actually have messages to read? */ 232 /* Do I actually have messages to read? */
175 msg_count = ti_msgmgr_queue_get_num_messages(qinst); 233 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
176 if (!msg_count) { 234 if (!msg_count) {
177 /* Shared IRQ? */ 235 /* Shared IRQ? */
178 dev_dbg(dev, "Spurious event - 0 pending data!\n"); 236 dev_dbg(dev, "Spurious event - 0 pending data!\n");
@@ -185,7 +243,6 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
185 * of how many bytes I should be reading. Let the client figure this 243 * of how many bytes I should be reading. Let the client figure this
186 * out.. I just read the full message and pass it on.. 244 * out.. I just read the full message and pass it on..
187 */ 245 */
188 desc = inst->desc;
189 message.len = desc->max_message_size; 246 message.len = desc->max_message_size;
190 message.buf = (u8 *)qinst->rx_buff; 247 message.buf = (u8 *)qinst->rx_buff;
191 248
@@ -228,12 +285,20 @@ static irqreturn_t ti_msgmgr_queue_rx_interrupt(int irq, void *p)
228static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan) 285static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
229{ 286{
230 struct ti_queue_inst *qinst = chan->con_priv; 287 struct ti_queue_inst *qinst = chan->con_priv;
288 struct device *dev = chan->mbox->dev;
289 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
290 const struct ti_msgmgr_desc *desc = inst->desc;
231 int msg_count; 291 int msg_count;
232 292
233 if (qinst->is_tx) 293 if (qinst->is_tx)
234 return false; 294 return false;
235 295
236 msg_count = ti_msgmgr_queue_get_num_messages(qinst); 296 if (ti_msgmgr_queue_is_error(desc, qinst)) {
297 dev_err(dev, "Error on channel %s\n", qinst->name);
298 return false;
299 }
300
301 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
237 302
238 return msg_count ? true : false; 303 return msg_count ? true : false;
239} 304}
@@ -247,12 +312,25 @@ static bool ti_msgmgr_queue_peek_data(struct mbox_chan *chan)
247static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan) 312static bool ti_msgmgr_last_tx_done(struct mbox_chan *chan)
248{ 313{
249 struct ti_queue_inst *qinst = chan->con_priv; 314 struct ti_queue_inst *qinst = chan->con_priv;
315 struct device *dev = chan->mbox->dev;
316 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
317 const struct ti_msgmgr_desc *desc = inst->desc;
250 int msg_count; 318 int msg_count;
251 319
252 if (!qinst->is_tx) 320 if (!qinst->is_tx)
253 return false; 321 return false;
254 322
255 msg_count = ti_msgmgr_queue_get_num_messages(qinst); 323 if (ti_msgmgr_queue_is_error(desc, qinst)) {
324 dev_err(dev, "Error on channel %s\n", qinst->name);
325 return false;
326 }
327
328 msg_count = ti_msgmgr_queue_get_num_messages(desc, qinst);
329
330 if (desc->is_sproxy) {
331 /* In secure proxy, msg_count indicates how many we can send */
332 return msg_count ? true : false;
333 }
256 334
257 /* if we have any messages pending.. */ 335 /* if we have any messages pending.. */
258 return msg_count ? false : true; 336 return msg_count ? false : true;
@@ -282,6 +360,11 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
282 } 360 }
283 desc = inst->desc; 361 desc = inst->desc;
284 362
363 if (ti_msgmgr_queue_is_error(desc, qinst)) {
364 dev_err(dev, "Error on channel %s\n", qinst->name);
365 return false;
366 }
367
285 if (desc->max_message_size < message->len) { 368 if (desc->max_message_size < message->len) {
286 dev_err(dev, "Queue %s message length %zu > max %d\n", 369 dev_err(dev, "Queue %s message length %zu > max %d\n",
287 qinst->name, message->len, desc->max_message_size); 370 qinst->name, message->len, desc->max_message_size);
@@ -315,6 +398,53 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
315} 398}
316 399
317/** 400/**
401 * ti_msgmgr_queue_rx_irq_req() - RX IRQ request
402 * @dev: device pointer
403 * @d: descriptor for ti_msgmgr
404 * @qinst: Queue instance
405 * @chan: Channel pointer
406 */
407static int ti_msgmgr_queue_rx_irq_req(struct device *dev,
408 const struct ti_msgmgr_desc *d,
409 struct ti_queue_inst *qinst,
410 struct mbox_chan *chan)
411{
412 int ret = 0;
413 char of_rx_irq_name[7];
414 struct device_node *np;
415
416 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name),
417 "rx_%03d", d->is_sproxy ? qinst->proxy_id : qinst->queue_id);
418
419 /* Get the IRQ if not found */
420 if (qinst->irq < 0) {
421 np = of_node_get(dev->of_node);
422 if (!np)
423 return -ENODATA;
424 qinst->irq = of_irq_get_byname(np, of_rx_irq_name);
425 of_node_put(np);
426
427 if (qinst->irq < 0) {
428 dev_err(dev,
429 "QID %d PID %d:No IRQ[%s]: %d\n",
430 qinst->queue_id, qinst->proxy_id,
431 of_rx_irq_name, qinst->irq);
432 return qinst->irq;
433 }
434 }
435
436 /* With the expectation that the IRQ might be shared in SoC */
437 ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt,
438 IRQF_SHARED, qinst->name, chan);
439 if (ret) {
440 dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n",
441 qinst->irq, qinst->name, ret);
442 }
443
444 return ret;
445}
446
447/**
318 * ti_msgmgr_queue_startup() - Startup queue 448 * ti_msgmgr_queue_startup() - Startup queue
319 * @chan: Channel pointer 449 * @chan: Channel pointer
320 * 450 *
@@ -322,19 +452,39 @@ static int ti_msgmgr_send_data(struct mbox_chan *chan, void *data)
322 */ 452 */
323static int ti_msgmgr_queue_startup(struct mbox_chan *chan) 453static int ti_msgmgr_queue_startup(struct mbox_chan *chan)
324{ 454{
325 struct ti_queue_inst *qinst = chan->con_priv;
326 struct device *dev = chan->mbox->dev; 455 struct device *dev = chan->mbox->dev;
456 struct ti_msgmgr_inst *inst = dev_get_drvdata(dev);
457 struct ti_queue_inst *qinst = chan->con_priv;
458 const struct ti_msgmgr_desc *d = inst->desc;
327 int ret; 459 int ret;
460 int msg_count;
461
462 /*
463 * If sproxy is starting and can send messages, we are a Tx thread,
464 * else Rx
465 */
466 if (d->is_sproxy) {
467 qinst->is_tx = (readl(qinst->queue_ctrl) &
468 SPROXY_THREAD_CTRL_DIR_MASK) ? false : true;
469
470 msg_count = ti_msgmgr_queue_get_num_messages(d, qinst);
471
472 if (!msg_count && qinst->is_tx) {
473 dev_err(dev, "%s: Cannot transmit with 0 credits!\n",
474 qinst->name);
475 return -EINVAL;
476 }
477 }
328 478
329 if (!qinst->is_tx) { 479 if (!qinst->is_tx) {
330 /* 480 /* Allocate usage buffer for rx */
331 * With the expectation that the IRQ might be shared in SoC 481 qinst->rx_buff = kzalloc(d->max_message_size, GFP_KERNEL);
332 */ 482 if (!qinst->rx_buff)
333 ret = request_irq(qinst->irq, ti_msgmgr_queue_rx_interrupt, 483 return -ENOMEM;
334 IRQF_SHARED, qinst->name, chan); 484 /* Request IRQ */
485 ret = ti_msgmgr_queue_rx_irq_req(dev, d, qinst, chan);
335 if (ret) { 486 if (ret) {
336 dev_err(dev, "Unable to get IRQ %d on %s(res=%d)\n", 487 kfree(qinst->rx_buff);
337 qinst->irq, qinst->name, ret);
338 return ret; 488 return ret;
339 } 489 }
340 } 490 }
@@ -350,8 +500,10 @@ static void ti_msgmgr_queue_shutdown(struct mbox_chan *chan)
350{ 500{
351 struct ti_queue_inst *qinst = chan->con_priv; 501 struct ti_queue_inst *qinst = chan->con_priv;
352 502
353 if (!qinst->is_tx) 503 if (!qinst->is_tx) {
354 free_irq(qinst->irq, chan); 504 free_irq(qinst->irq, chan);
505 kfree(qinst->rx_buff);
506 }
355} 507}
356 508
357/** 509/**
@@ -368,20 +520,38 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
368 struct ti_msgmgr_inst *inst; 520 struct ti_msgmgr_inst *inst;
369 int req_qid, req_pid; 521 int req_qid, req_pid;
370 struct ti_queue_inst *qinst; 522 struct ti_queue_inst *qinst;
371 int i; 523 const struct ti_msgmgr_desc *d;
524 int i, ncells;
372 525
373 inst = container_of(mbox, struct ti_msgmgr_inst, mbox); 526 inst = container_of(mbox, struct ti_msgmgr_inst, mbox);
374 if (WARN_ON(!inst)) 527 if (WARN_ON(!inst))
375 return ERR_PTR(-EINVAL); 528 return ERR_PTR(-EINVAL);
376 529
377 /* #mbox-cells is 2 */ 530 d = inst->desc;
378 if (p->args_count != 2) { 531
379 dev_err(inst->dev, "Invalid arguments in dt[%d] instead of 2\n", 532 if (d->is_sproxy)
380 p->args_count); 533 ncells = 1;
534 else
535 ncells = 2;
536 if (p->args_count != ncells) {
537 dev_err(inst->dev, "Invalid arguments in dt[%d]. Must be %d\n",
538 p->args_count, ncells);
381 return ERR_PTR(-EINVAL); 539 return ERR_PTR(-EINVAL);
382 } 540 }
383 req_qid = p->args[0]; 541 if (ncells == 1) {
384 req_pid = p->args[1]; 542 req_qid = 0;
543 req_pid = p->args[0];
544 } else {
545 req_qid = p->args[0];
546 req_pid = p->args[1];
547 }
548
549 if (d->is_sproxy) {
550 if (req_pid > d->num_valid_queues)
551 goto err;
552 qinst = &inst->qinsts[req_pid];
553 return qinst->chan;
554 }
385 555
386 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues; 556 for (qinst = inst->qinsts, i = 0; i < inst->num_valid_queues;
387 i++, qinst++) { 557 i++, qinst++) {
@@ -389,6 +559,7 @@ static struct mbox_chan *ti_msgmgr_of_xlate(struct mbox_controller *mbox,
389 return qinst->chan; 559 return qinst->chan;
390 } 560 }
391 561
562err:
392 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n", 563 dev_err(inst->dev, "Queue ID %d, Proxy ID %d is wrong on %s\n",
393 req_qid, req_pid, p->np->name); 564 req_qid, req_pid, p->np->name);
394 return ERR_PTR(-ENOENT); 565 return ERR_PTR(-ENOENT);
@@ -415,6 +586,8 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
415 struct ti_queue_inst *qinst, 586 struct ti_queue_inst *qinst,
416 struct mbox_chan *chan) 587 struct mbox_chan *chan)
417{ 588{
589 char *dir;
590
418 qinst->proxy_id = qd->proxy_id; 591 qinst->proxy_id = qd->proxy_id;
419 qinst->queue_id = qd->queue_id; 592 qinst->queue_id = qd->queue_id;
420 593
@@ -424,40 +597,43 @@ static int ti_msgmgr_queue_setup(int idx, struct device *dev,
424 return -ERANGE; 597 return -ERANGE;
425 } 598 }
426 599
427 qinst->is_tx = qd->is_tx; 600 if (d->is_sproxy) {
428 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d", 601 qinst->queue_buff_start = inst->queue_proxy_region +
429 dev_name(dev), qinst->is_tx ? "tx" : "rx", qinst->queue_id, 602 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
430 qinst->proxy_id); 603 d->data_first_reg);
431 604 qinst->queue_buff_end = inst->queue_proxy_region +
432 if (!qinst->is_tx) { 605 SPROXY_THREAD_DATA_OFFSET(qinst->proxy_id,
433 char of_rx_irq_name[7]; 606 d->data_last_reg);
434 607 qinst->queue_state = inst->queue_state_debug_region +
435 snprintf(of_rx_irq_name, sizeof(of_rx_irq_name), 608 SPROXY_THREAD_STATUS_OFFSET(qinst->proxy_id);
436 "rx_%03d", qinst->queue_id); 609 qinst->queue_ctrl = inst->queue_ctrl_region +
437 610 SPROXY_THREAD_CTRL_OFFSET(qinst->proxy_id);
438 qinst->irq = of_irq_get_byname(np, of_rx_irq_name); 611
439 if (qinst->irq < 0) { 612 /* XXX: DONOT read registers here!.. Some may be unusable */
440 dev_crit(dev, 613 dir = "thr";
441 "[%d]QID %d PID %d:No IRQ[%s]: %d\n", 614 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d",
442 idx, qinst->queue_id, qinst->proxy_id, 615 dev_name(dev), dir, qinst->proxy_id);
443 of_rx_irq_name, qinst->irq); 616 } else {
444 return qinst->irq; 617 qinst->queue_buff_start = inst->queue_proxy_region +
445 } 618 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
446 /* Allocate usage buffer for rx */ 619 d->data_first_reg);
447 qinst->rx_buff = devm_kzalloc(dev, 620 qinst->queue_buff_end = inst->queue_proxy_region +
448 d->max_message_size, GFP_KERNEL); 621 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id,
449 if (!qinst->rx_buff) 622 d->data_last_reg);
450 return -ENOMEM; 623 qinst->queue_state =
624 inst->queue_state_debug_region +
625 Q_STATE_OFFSET(qinst->queue_id);
626 qinst->is_tx = qd->is_tx;
627 dir = qinst->is_tx ? "tx" : "rx";
628 snprintf(qinst->name, sizeof(qinst->name), "%s %s_%03d_%03d",
629 dev_name(dev), dir, qinst->queue_id, qinst->proxy_id);
451 } 630 }
452 631
453 qinst->queue_buff_start = inst->queue_proxy_region +
454 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_first_reg);
455 qinst->queue_buff_end = inst->queue_proxy_region +
456 Q_DATA_OFFSET(qinst->proxy_id, qinst->queue_id, d->data_last_reg);
457 qinst->queue_state = inst->queue_state_debug_region +
458 Q_STATE_OFFSET(qinst->queue_id);
459 qinst->chan = chan; 632 qinst->chan = chan;
460 633
634 /* Setup an error value for IRQ - Lazy allocation */
635 qinst->irq = -EINVAL;
636
461 chan->con_priv = qinst; 637 chan->con_priv = qinst;
462 638
463 dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n", 639 dev_dbg(dev, "[%d] qidx=%d pidx=%d irq=%d q_s=%p q_e = %p\n",
@@ -494,19 +670,37 @@ static const struct ti_msgmgr_desc k2g_desc = {
494 .queue_count = 64, 670 .queue_count = 64,
495 .max_message_size = 64, 671 .max_message_size = 64,
496 .max_messages = 128, 672 .max_messages = 128,
497 .q_slices = 1, 673 .data_region_name = "queue_proxy_region",
498 .q_proxies = 1, 674 .status_region_name = "queue_state_debug_region",
499 .data_first_reg = 16, 675 .data_first_reg = 16,
500 .data_last_reg = 31, 676 .data_last_reg = 31,
677 .status_cnt_mask = Q_STATE_ENTRY_COUNT_MASK,
501 .tx_polled = false, 678 .tx_polled = false,
502 .valid_queues = k2g_valid_queues, 679 .valid_queues = k2g_valid_queues,
503 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues), 680 .num_valid_queues = ARRAY_SIZE(k2g_valid_queues),
681 .is_sproxy = false,
682};
683
684static const struct ti_msgmgr_desc am654_desc = {
685 .queue_count = 190,
686 .num_valid_queues = 190,
687 .max_message_size = 60,
688 .data_region_name = "target_data",
689 .status_region_name = "rt",
690 .ctrl_region_name = "scfg",
691 .data_first_reg = 0,
692 .data_last_reg = 14,
693 .status_cnt_mask = SPROXY_THREAD_STATUS_COUNT_MASK,
694 .tx_polled = false,
695 .is_sproxy = true,
504}; 696};
505 697
506static const struct of_device_id ti_msgmgr_of_match[] = { 698static const struct of_device_id ti_msgmgr_of_match[] = {
507 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc}, 699 {.compatible = "ti,k2g-message-manager", .data = &k2g_desc},
700 {.compatible = "ti,am654-secure-proxy", .data = &am654_desc},
508 { /* Sentinel */ } 701 { /* Sentinel */ }
509}; 702};
703
510MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match); 704MODULE_DEVICE_TABLE(of, ti_msgmgr_of_match);
511 705
512static int ti_msgmgr_probe(struct platform_device *pdev) 706static int ti_msgmgr_probe(struct platform_device *pdev)
@@ -546,17 +740,25 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
546 inst->desc = desc; 740 inst->desc = desc;
547 741
548 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 742 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
549 "queue_proxy_region"); 743 desc->data_region_name);
550 inst->queue_proxy_region = devm_ioremap_resource(dev, res); 744 inst->queue_proxy_region = devm_ioremap_resource(dev, res);
551 if (IS_ERR(inst->queue_proxy_region)) 745 if (IS_ERR(inst->queue_proxy_region))
552 return PTR_ERR(inst->queue_proxy_region); 746 return PTR_ERR(inst->queue_proxy_region);
553 747
554 res = platform_get_resource_byname(pdev, IORESOURCE_MEM, 748 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
555 "queue_state_debug_region"); 749 desc->status_region_name);
556 inst->queue_state_debug_region = devm_ioremap_resource(dev, res); 750 inst->queue_state_debug_region = devm_ioremap_resource(dev, res);
557 if (IS_ERR(inst->queue_state_debug_region)) 751 if (IS_ERR(inst->queue_state_debug_region))
558 return PTR_ERR(inst->queue_state_debug_region); 752 return PTR_ERR(inst->queue_state_debug_region);
559 753
754 if (desc->is_sproxy) {
755 res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
756 desc->ctrl_region_name);
757 inst->queue_ctrl_region = devm_ioremap_resource(dev, res);
758 if (IS_ERR(inst->queue_ctrl_region))
759 return PTR_ERR(inst->queue_ctrl_region);
760 }
761
560 dev_dbg(dev, "proxy region=%p, queue_state=%p\n", 762 dev_dbg(dev, "proxy region=%p, queue_state=%p\n",
561 inst->queue_proxy_region, inst->queue_state_debug_region); 763 inst->queue_proxy_region, inst->queue_state_debug_region);
562 764
@@ -578,12 +780,29 @@ static int ti_msgmgr_probe(struct platform_device *pdev)
578 return -ENOMEM; 780 return -ENOMEM;
579 inst->chans = chans; 781 inst->chans = chans;
580 782
581 for (i = 0, queue_desc = desc->valid_queues; 783 if (desc->is_sproxy) {
582 i < queue_count; i++, qinst++, chans++, queue_desc++) { 784 struct ti_msgmgr_valid_queue_desc sproxy_desc;
583 ret = ti_msgmgr_queue_setup(i, dev, np, inst, 785
584 desc, queue_desc, qinst, chans); 786 /* All proxies may be valid in Secure Proxy instance */
585 if (ret) 787 for (i = 0; i < queue_count; i++, qinst++, chans++) {
586 return ret; 788 sproxy_desc.queue_id = 0;
789 sproxy_desc.proxy_id = i;
790 ret = ti_msgmgr_queue_setup(i, dev, np, inst,
791 desc, &sproxy_desc, qinst,
792 chans);
793 if (ret)
794 return ret;
795 }
796 } else {
797 /* Only Some proxies are valid in Message Manager */
798 for (i = 0, queue_desc = desc->valid_queues;
799 i < queue_count; i++, qinst++, chans++, queue_desc++) {
800 ret = ti_msgmgr_queue_setup(i, dev, np, inst,
801 desc, queue_desc, qinst,
802 chans);
803 if (ret)
804 return ret;
805 }
587 } 806 }
588 807
589 mbox = &inst->mbox; 808 mbox = &inst->mbox;
diff --git a/include/dt-bindings/gce/mt8173-gce.h b/include/dt-bindings/gce/mt8173-gce.h
new file mode 100644
index 000000000000..ffcf94ba96c6
--- /dev/null
+++ b/include/dt-bindings/gce/mt8173-gce.h
@@ -0,0 +1,44 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018 MediaTek Inc.
4 * Author: Houlong Wei <houlong.wei@mediatek.com>
5 *
6 */
7
8#ifndef _DT_BINDINGS_GCE_MT8173_H
9#define _DT_BINDINGS_GCE_MT8173_H
10
11/* GCE HW thread priority */
12#define CMDQ_THR_PRIO_LOWEST 0
13#define CMDQ_THR_PRIO_HIGHEST 1
14
15/* GCE SUBSYS */
16#define SUBSYS_1400XXXX 1
17#define SUBSYS_1401XXXX 2
18#define SUBSYS_1402XXXX 3
19
20/* GCE HW EVENT */
21#define CMDQ_EVENT_DISP_OVL0_SOF 11
22#define CMDQ_EVENT_DISP_OVL1_SOF 12
23#define CMDQ_EVENT_DISP_RDMA0_SOF 13
24#define CMDQ_EVENT_DISP_RDMA1_SOF 14
25#define CMDQ_EVENT_DISP_RDMA2_SOF 15
26#define CMDQ_EVENT_DISP_WDMA0_SOF 16
27#define CMDQ_EVENT_DISP_WDMA1_SOF 17
28#define CMDQ_EVENT_DISP_OVL0_EOF 39
29#define CMDQ_EVENT_DISP_OVL1_EOF 40
30#define CMDQ_EVENT_DISP_RDMA0_EOF 41
31#define CMDQ_EVENT_DISP_RDMA1_EOF 42
32#define CMDQ_EVENT_DISP_RDMA2_EOF 43
33#define CMDQ_EVENT_DISP_WDMA0_EOF 44
34#define CMDQ_EVENT_DISP_WDMA1_EOF 45
35#define CMDQ_EVENT_MUTEX0_STREAM_EOF 53
36#define CMDQ_EVENT_MUTEX1_STREAM_EOF 54
37#define CMDQ_EVENT_MUTEX2_STREAM_EOF 55
38#define CMDQ_EVENT_MUTEX3_STREAM_EOF 56
39#define CMDQ_EVENT_MUTEX4_STREAM_EOF 57
40#define CMDQ_EVENT_DISP_RDMA0_UNDERRUN 63
41#define CMDQ_EVENT_DISP_RDMA1_UNDERRUN 64
42#define CMDQ_EVENT_DISP_RDMA2_UNDERRUN 65
43
44#endif
diff --git a/include/linux/mailbox/mtk-cmdq-mailbox.h b/include/linux/mailbox/mtk-cmdq-mailbox.h
new file mode 100644
index 000000000000..ccb73422c2fa
--- /dev/null
+++ b/include/linux/mailbox/mtk-cmdq-mailbox.h
@@ -0,0 +1,77 @@
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (c) 2018 MediaTek Inc.
4 *
5 */
6
7#ifndef __MTK_CMDQ_MAILBOX_H__
8#define __MTK_CMDQ_MAILBOX_H__
9
10#include <linux/platform_device.h>
11#include <linux/slab.h>
12#include <linux/types.h>
13
14#define CMDQ_INST_SIZE 8 /* instruction is 64-bit */
15#define CMDQ_SUBSYS_SHIFT 16
16#define CMDQ_OP_CODE_SHIFT 24
17#define CMDQ_JUMP_PASS CMDQ_INST_SIZE
18
19#define CMDQ_WFE_UPDATE BIT(31)
20#define CMDQ_WFE_WAIT BIT(15)
21#define CMDQ_WFE_WAIT_VALUE 0x1
22
23/*
24 * CMDQ_CODE_MASK:
25 * set write mask
26 * format: op mask
27 * CMDQ_CODE_WRITE:
28 * write value into target register
29 * format: op subsys address value
30 * CMDQ_CODE_JUMP:
31 * jump by offset
32 * format: op offset
33 * CMDQ_CODE_WFE:
34 * wait for event and clear
35 * it is just clear if no wait
36 * format: [wait] op event update:1 to_wait:1 wait:1
37 * [clear] op event update:1 to_wait:0 wait:0
38 * CMDQ_CODE_EOC:
39 * end of command
40 * format: op irq_flag
41 */
42enum cmdq_code {
43 CMDQ_CODE_MASK = 0x02,
44 CMDQ_CODE_WRITE = 0x04,
45 CMDQ_CODE_JUMP = 0x10,
46 CMDQ_CODE_WFE = 0x20,
47 CMDQ_CODE_EOC = 0x40,
48};
49
50enum cmdq_cb_status {
51 CMDQ_CB_NORMAL = 0,
52 CMDQ_CB_ERROR
53};
54
55struct cmdq_cb_data {
56 enum cmdq_cb_status sta;
57 void *data;
58};
59
60typedef void (*cmdq_async_flush_cb)(struct cmdq_cb_data data);
61
62struct cmdq_task_cb {
63 cmdq_async_flush_cb cb;
64 void *data;
65};
66
67struct cmdq_pkt {
68 void *va_base;
69 dma_addr_t pa_base;
70 size_t cmd_buf_size; /* command occupied size */
71 size_t buf_size; /* real buffer size */
72 struct cmdq_task_cb cb;
73 struct cmdq_task_cb async_cb;
74 void *cl;
75};
76
77#endif /* __MTK_CMDQ_MAILBOX_H__ */
diff --git a/include/linux/omap-mailbox.h b/include/linux/omap-mailbox.h
index c726bd833761..6dbcd2da0332 100644
--- a/include/linux/omap-mailbox.h
+++ b/include/linux/omap-mailbox.h
@@ -1,9 +1,6 @@
1/* SPDX-License-Identifier: GPL-2.0 */
1/* 2/*
2 * omap-mailbox: interprocessor communication module for OMAP 3 * omap-mailbox: interprocessor communication module for OMAP
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */ 4 */
8 5
9#ifndef OMAP_MAILBOX_H 6#ifndef OMAP_MAILBOX_H