aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 16:10:03 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2017-05-01 16:10:03 -0400
commit477d7caeede0e3a933368440fc877b12c25dbb6d (patch)
treee35e179a5231dd0501aa9f3868d342bd77e30a96
parent6fb41cbd7d82f7b9e41db6245f6a46c84ca3083e (diff)
parentcb710ab1d8a23f68ff8f45aedf3e552bb90e70de (diff)
Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jassi Brar: - new driver for Broadcom FlexRM controller - constify data structures of callback functions in some drivers - a few bug fixes uncovered by multi-threaded use of mailbox channels in blocking mode * 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu/integration: mailbox: handle empty message in tx_tick mailbox: skip complete wait event if timer expired mailbox: always wait in mbox_send_message for blocking Tx mode mailbox: Remove depends on COMPILE_TEST for BCM_FLEXRM_MBOX mailbox: check ->last_tx_done for NULL in case of timer-based polling dt-bindings: Add DT bindings info for FlexRM ring manager mailbox: Add driver for Broadcom FlexRM ring manager dt-bindings: mailbox: Update doc with NSP PDC/mailbox support mailbox: bcm-pdc: Add Northstar Plus support to PDC driver mailbox: constify mbox_chan_ops structures
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt59
-rw-r--r--Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt6
-rw-r--r--drivers/mailbox/Kconfig18
-rw-r--r--drivers/mailbox/Makefile2
-rw-r--r--drivers/mailbox/bcm-flexrm-mailbox.c1595
-rw-r--r--drivers/mailbox/bcm-pdc-mailbox.c61
-rw-r--r--drivers/mailbox/hi6220-mailbox.c2
-rw-r--r--drivers/mailbox/mailbox-xgene-slimpro.c2
-rw-r--r--drivers/mailbox/mailbox.c19
-rw-r--r--include/linux/mailbox/brcm-message.h14
10 files changed, 1746 insertions, 32 deletions
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt
new file mode 100644
index 000000000000..752ae6b00d26
--- /dev/null
+++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-flexrm-mbox.txt
@@ -0,0 +1,59 @@
1Broadcom FlexRM Ring Manager
2============================
3The Broadcom FlexRM ring manager provides a set of rings which can be
4used to submit work to offload engines. An SoC may have multiple FlexRM
5hardware blocks. There is one device tree entry per FlexRM block. The
6FlexRM driver will create a mailbox-controller instance for given FlexRM
7hardware block where each mailbox channel is a separate FlexRM ring.
8
9Required properties:
10--------------------
11- compatible: Should be "brcm,iproc-flexrm-mbox"
12- reg: Specifies base physical address and size of the FlexRM
13 ring registers
14- msi-parent: Phandles (and potential Device IDs) to MSI controllers
15 The FlexRM engine will send MSIs (instead of wired
16 interrupts) to CPU. There is one MSI for each FlexRM ring.
17 Refer devicetree/bindings/interrupt-controller/msi.txt
18- #mbox-cells: Specifies the number of cells needed to encode a mailbox
19 channel. This should be 3.
20
21 The 1st cell is the mailbox channel number.
22
23 The 2nd cell contains MSI completion threshold. This is the
24 number of completion messages for which FlexRM will inject
25 one MSI interrupt to CPU.
26
27 The 3nd cell contains MSI timer value representing time for
28 which FlexRM will wait to accumulate N completion messages
29 where N is the value specified by 2nd cell above. If FlexRM
30 does not get required number of completion messages in time
31 specified by this cell then it will inject one MSI interrupt
32 to CPU provided atleast one completion message is available.
33
34Optional properties:
35--------------------
36- dma-coherent: Present if DMA operations made by the FlexRM engine (such
37 as DMA descriptor access, access to buffers pointed by DMA
38 descriptors and read/write pointer updates to DDR) are
39 cache coherent with the CPU.
40
41Example:
42--------
43crypto_mbox: mbox@67000000 {
44 compatible = "brcm,iproc-flexrm-mbox";
45 reg = <0x67000000 0x200000>;
46 msi-parent = <&gic_its 0x7f00>;
47 #mbox-cells = <3>;
48};
49
50crypto@672c0000 {
51 compatible = "brcm,spu2-v2-crypto";
52 reg = <0x672c0000 0x1000>;
53 mboxes = <&crypto_mbox 0 0x1 0xffff>,
54 <&crypto_mbox 1 0x1 0xffff>,
55 <&crypto_mbox 16 0x1 0xffff>,
56 <&crypto_mbox 17 0x1 0xffff>,
57 <&crypto_mbox 30 0x1 0xffff>,
58 <&crypto_mbox 31 0x1 0xffff>;
59};
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
index 411ccf421584..0f3ee81d92c2 100644
--- a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
+++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt
@@ -1,9 +1,11 @@
1The PDC driver manages data transfer to and from various offload engines 1The PDC driver manages data transfer to and from various offload engines
2on some Broadcom SoCs. An SoC may have multiple PDC hardware blocks. There is 2on some Broadcom SoCs. An SoC may have multiple PDC hardware blocks. There is
3one device tree entry per block. 3one device tree entry per block. On some chips, the PDC functionality is
4handled by the FA2 (Northstar Plus).
4 5
5Required properties: 6Required properties:
6- compatible : Should be "brcm,iproc-pdc-mbox". 7- compatible : Should be "brcm,iproc-pdc-mbox" or "brcm,iproc-fa2-mbox" for
8 FA2/Northstar Plus.
7- reg: Should contain PDC registers location and length. 9- reg: Should contain PDC registers location and length.
8- interrupts: Should contain the IRQ line for the PDC. 10- interrupts: Should contain the IRQ line for the PDC.
9- #mbox-cells: 1 11- #mbox-cells: 1
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig
index ceff415f201c..ee1a3d9147ef 100644
--- a/drivers/mailbox/Kconfig
+++ b/drivers/mailbox/Kconfig
@@ -144,12 +144,22 @@ config XGENE_SLIMPRO_MBOX
144 want to use the APM X-Gene SLIMpro IPCM support. 144 want to use the APM X-Gene SLIMpro IPCM support.
145 145
146config BCM_PDC_MBOX 146config BCM_PDC_MBOX
147 tristate "Broadcom PDC Mailbox" 147 tristate "Broadcom FlexSparx DMA Mailbox"
148 depends on ARM64 || COMPILE_TEST 148 depends on ARCH_BCM_IPROC || COMPILE_TEST
149 depends on HAS_DMA 149 depends on HAS_DMA
150 help
151 Mailbox implementation for the Broadcom FlexSparx DMA ring manager,
152 which provides access to various offload engines on Broadcom
153 SoCs, including FA2/FA+ on Northstar Plus and PDC on Northstar 2.
154
155config BCM_FLEXRM_MBOX
156 tristate "Broadcom FlexRM Mailbox"
157 depends on ARM64
158 depends on HAS_DMA
159 select GENERIC_MSI_IRQ_DOMAIN
150 default ARCH_BCM_IPROC 160 default ARCH_BCM_IPROC
151 help 161 help
152 Mailbox implementation for the Broadcom PDC ring manager, 162 Mailbox implementation of the Broadcom FlexRM ring manager,
153 which provides access to various offload engines on Broadcom 163 which provides access to various offload engines on Broadcom
154 SoCs. Say Y here if you want to use the Broadcom PDC. 164 SoCs. Say Y here if you want to use the Broadcom FlexRM.
155endif 165endif
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile
index 7dde4f609ae8..e2bcb03cd35b 100644
--- a/drivers/mailbox/Makefile
+++ b/drivers/mailbox/Makefile
@@ -30,4 +30,6 @@ obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o
30 30
31obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o 31obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o
32 32
33obj-$(CONFIG_BCM_FLEXRM_MBOX) += bcm-flexrm-mailbox.o
34
33obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o 35obj-$(CONFIG_TEGRA_HSP_MBOX) += tegra-hsp.o
diff --git a/drivers/mailbox/bcm-flexrm-mailbox.c b/drivers/mailbox/bcm-flexrm-mailbox.c
new file mode 100644
index 000000000000..da67882caa7b
--- /dev/null
+++ b/drivers/mailbox/bcm-flexrm-mailbox.c
@@ -0,0 +1,1595 @@
1/* Broadcom FlexRM Mailbox Driver
2 *
3 * Copyright (C) 2017 Broadcom
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * Each Broadcom FlexSparx4 offload engine is implemented as an
10 * extension to Broadcom FlexRM ring manager. The FlexRM ring
11 * manager provides a set of rings which can be used to submit
12 * work to a FlexSparx4 offload engine.
13 *
14 * This driver creates a mailbox controller using a set of FlexRM
15 * rings where each mailbox channel represents a separate FlexRM ring.
16 */
17
18#include <asm/barrier.h>
19#include <asm/byteorder.h>
20#include <linux/delay.h>
21#include <linux/device.h>
22#include <linux/dma-mapping.h>
23#include <linux/dmapool.h>
24#include <linux/err.h>
25#include <linux/idr.h>
26#include <linux/interrupt.h>
27#include <linux/kernel.h>
28#include <linux/mailbox_controller.h>
29#include <linux/mailbox_client.h>
30#include <linux/mailbox/brcm-message.h>
31#include <linux/module.h>
32#include <linux/msi.h>
33#include <linux/of_address.h>
34#include <linux/of_irq.h>
35#include <linux/platform_device.h>
36#include <linux/spinlock.h>
37
38/* ====== FlexRM register defines ===== */
39
40/* FlexRM configuration */
41#define RING_REGS_SIZE 0x10000
42#define RING_DESC_SIZE 8
43#define RING_DESC_INDEX(offset) \
44 ((offset) / RING_DESC_SIZE)
45#define RING_DESC_OFFSET(index) \
46 ((index) * RING_DESC_SIZE)
47#define RING_MAX_REQ_COUNT 1024
48#define RING_BD_ALIGN_ORDER 12
49#define RING_BD_ALIGN_CHECK(addr) \
50 (!((addr) & ((0x1 << RING_BD_ALIGN_ORDER) - 1)))
51#define RING_BD_TOGGLE_INVALID(offset) \
52 (((offset) >> RING_BD_ALIGN_ORDER) & 0x1)
53#define RING_BD_TOGGLE_VALID(offset) \
54 (!RING_BD_TOGGLE_INVALID(offset))
55#define RING_BD_DESC_PER_REQ 32
56#define RING_BD_DESC_COUNT \
57 (RING_MAX_REQ_COUNT * RING_BD_DESC_PER_REQ)
58#define RING_BD_SIZE \
59 (RING_BD_DESC_COUNT * RING_DESC_SIZE)
60#define RING_CMPL_ALIGN_ORDER 13
61#define RING_CMPL_DESC_COUNT RING_MAX_REQ_COUNT
62#define RING_CMPL_SIZE \
63 (RING_CMPL_DESC_COUNT * RING_DESC_SIZE)
64#define RING_VER_MAGIC 0x76303031
65
66/* Per-Ring register offsets */
67#define RING_VER 0x000
68#define RING_BD_START_ADDR 0x004
69#define RING_BD_READ_PTR 0x008
70#define RING_BD_WRITE_PTR 0x00c
71#define RING_BD_READ_PTR_DDR_LS 0x010
72#define RING_BD_READ_PTR_DDR_MS 0x014
73#define RING_CMPL_START_ADDR 0x018
74#define RING_CMPL_WRITE_PTR 0x01c
75#define RING_NUM_REQ_RECV_LS 0x020
76#define RING_NUM_REQ_RECV_MS 0x024
77#define RING_NUM_REQ_TRANS_LS 0x028
78#define RING_NUM_REQ_TRANS_MS 0x02c
79#define RING_NUM_REQ_OUTSTAND 0x030
80#define RING_CONTROL 0x034
81#define RING_FLUSH_DONE 0x038
82#define RING_MSI_ADDR_LS 0x03c
83#define RING_MSI_ADDR_MS 0x040
84#define RING_MSI_CONTROL 0x048
85#define RING_BD_READ_PTR_DDR_CONTROL 0x04c
86#define RING_MSI_DATA_VALUE 0x064
87
88/* Register RING_BD_START_ADDR fields */
89#define BD_LAST_UPDATE_HW_SHIFT 28
90#define BD_LAST_UPDATE_HW_MASK 0x1
91#define BD_START_ADDR_VALUE(pa) \
92 ((u32)((((dma_addr_t)(pa)) >> RING_BD_ALIGN_ORDER) & 0x0fffffff))
93#define BD_START_ADDR_DECODE(val) \
94 ((dma_addr_t)((val) & 0x0fffffff) << RING_BD_ALIGN_ORDER)
95
96/* Register RING_CMPL_START_ADDR fields */
97#define CMPL_START_ADDR_VALUE(pa) \
98 ((u32)((((u64)(pa)) >> RING_CMPL_ALIGN_ORDER) & 0x03ffffff))
99
100/* Register RING_CONTROL fields */
101#define CONTROL_MASK_DISABLE_CONTROL 12
102#define CONTROL_FLUSH_SHIFT 5
103#define CONTROL_ACTIVE_SHIFT 4
104#define CONTROL_RATE_ADAPT_MASK 0xf
105#define CONTROL_RATE_DYNAMIC 0x0
106#define CONTROL_RATE_FAST 0x8
107#define CONTROL_RATE_MEDIUM 0x9
108#define CONTROL_RATE_SLOW 0xa
109#define CONTROL_RATE_IDLE 0xb
110
111/* Register RING_FLUSH_DONE fields */
112#define FLUSH_DONE_MASK 0x1
113
114/* Register RING_MSI_CONTROL fields */
115#define MSI_TIMER_VAL_SHIFT 16
116#define MSI_TIMER_VAL_MASK 0xffff
117#define MSI_ENABLE_SHIFT 15
118#define MSI_ENABLE_MASK 0x1
119#define MSI_COUNT_SHIFT 0
120#define MSI_COUNT_MASK 0x3ff
121
122/* Register RING_BD_READ_PTR_DDR_CONTROL fields */
123#define BD_READ_PTR_DDR_TIMER_VAL_SHIFT 16
124#define BD_READ_PTR_DDR_TIMER_VAL_MASK 0xffff
125#define BD_READ_PTR_DDR_ENABLE_SHIFT 15
126#define BD_READ_PTR_DDR_ENABLE_MASK 0x1
127
128/* ====== FlexRM ring descriptor defines ===== */
129
130/* Completion descriptor format */
131#define CMPL_OPAQUE_SHIFT 0
132#define CMPL_OPAQUE_MASK 0xffff
133#define CMPL_ENGINE_STATUS_SHIFT 16
134#define CMPL_ENGINE_STATUS_MASK 0xffff
135#define CMPL_DME_STATUS_SHIFT 32
136#define CMPL_DME_STATUS_MASK 0xffff
137#define CMPL_RM_STATUS_SHIFT 48
138#define CMPL_RM_STATUS_MASK 0xffff
139
140/* Completion DME status code */
141#define DME_STATUS_MEM_COR_ERR BIT(0)
142#define DME_STATUS_MEM_UCOR_ERR BIT(1)
143#define DME_STATUS_FIFO_UNDERFLOW BIT(2)
144#define DME_STATUS_FIFO_OVERFLOW BIT(3)
145#define DME_STATUS_RRESP_ERR BIT(4)
146#define DME_STATUS_BRESP_ERR BIT(5)
147#define DME_STATUS_ERROR_MASK (DME_STATUS_MEM_COR_ERR | \
148 DME_STATUS_MEM_UCOR_ERR | \
149 DME_STATUS_FIFO_UNDERFLOW | \
150 DME_STATUS_FIFO_OVERFLOW | \
151 DME_STATUS_RRESP_ERR | \
152 DME_STATUS_BRESP_ERR)
153
154/* Completion RM status code */
155#define RM_STATUS_CODE_SHIFT 0
156#define RM_STATUS_CODE_MASK 0x3ff
157#define RM_STATUS_CODE_GOOD 0x0
158#define RM_STATUS_CODE_AE_TIMEOUT 0x3ff
159
160/* General descriptor format */
161#define DESC_TYPE_SHIFT 60
162#define DESC_TYPE_MASK 0xf
163#define DESC_PAYLOAD_SHIFT 0
164#define DESC_PAYLOAD_MASK 0x0fffffffffffffff
165
166/* Null descriptor format */
167#define NULL_TYPE 0
168#define NULL_TOGGLE_SHIFT 58
169#define NULL_TOGGLE_MASK 0x1
170
171/* Header descriptor format */
172#define HEADER_TYPE 1
173#define HEADER_TOGGLE_SHIFT 58
174#define HEADER_TOGGLE_MASK 0x1
175#define HEADER_ENDPKT_SHIFT 57
176#define HEADER_ENDPKT_MASK 0x1
177#define HEADER_STARTPKT_SHIFT 56
178#define HEADER_STARTPKT_MASK 0x1
179#define HEADER_BDCOUNT_SHIFT 36
180#define HEADER_BDCOUNT_MASK 0x1f
181#define HEADER_BDCOUNT_MAX HEADER_BDCOUNT_MASK
182#define HEADER_FLAGS_SHIFT 16
183#define HEADER_FLAGS_MASK 0xffff
184#define HEADER_OPAQUE_SHIFT 0
185#define HEADER_OPAQUE_MASK 0xffff
186
187/* Source (SRC) descriptor format */
188#define SRC_TYPE 2
189#define SRC_LENGTH_SHIFT 44
190#define SRC_LENGTH_MASK 0xffff
191#define SRC_ADDR_SHIFT 0
192#define SRC_ADDR_MASK 0x00000fffffffffff
193
194/* Destination (DST) descriptor format */
195#define DST_TYPE 3
196#define DST_LENGTH_SHIFT 44
197#define DST_LENGTH_MASK 0xffff
198#define DST_ADDR_SHIFT 0
199#define DST_ADDR_MASK 0x00000fffffffffff
200
201/* Immediate (IMM) descriptor format */
202#define IMM_TYPE 4
203#define IMM_DATA_SHIFT 0
204#define IMM_DATA_MASK 0x0fffffffffffffff
205
206/* Next pointer (NPTR) descriptor format */
207#define NPTR_TYPE 5
208#define NPTR_TOGGLE_SHIFT 58
209#define NPTR_TOGGLE_MASK 0x1
210#define NPTR_ADDR_SHIFT 0
211#define NPTR_ADDR_MASK 0x00000fffffffffff
212
213/* Mega source (MSRC) descriptor format */
214#define MSRC_TYPE 6
215#define MSRC_LENGTH_SHIFT 44
216#define MSRC_LENGTH_MASK 0xffff
217#define MSRC_ADDR_SHIFT 0
218#define MSRC_ADDR_MASK 0x00000fffffffffff
219
220/* Mega destination (MDST) descriptor format */
221#define MDST_TYPE 7
222#define MDST_LENGTH_SHIFT 44
223#define MDST_LENGTH_MASK 0xffff
224#define MDST_ADDR_SHIFT 0
225#define MDST_ADDR_MASK 0x00000fffffffffff
226
227/* Source with tlast (SRCT) descriptor format */
228#define SRCT_TYPE 8
229#define SRCT_LENGTH_SHIFT 44
230#define SRCT_LENGTH_MASK 0xffff
231#define SRCT_ADDR_SHIFT 0
232#define SRCT_ADDR_MASK 0x00000fffffffffff
233
234/* Destination with tlast (DSTT) descriptor format */
235#define DSTT_TYPE 9
236#define DSTT_LENGTH_SHIFT 44
237#define DSTT_LENGTH_MASK 0xffff
238#define DSTT_ADDR_SHIFT 0
239#define DSTT_ADDR_MASK 0x00000fffffffffff
240
241/* Immediate with tlast (IMMT) descriptor format */
242#define IMMT_TYPE 10
243#define IMMT_DATA_SHIFT 0
244#define IMMT_DATA_MASK 0x0fffffffffffffff
245
246/* Descriptor helper macros */
247#define DESC_DEC(_d, _s, _m) (((_d) >> (_s)) & (_m))
248#define DESC_ENC(_d, _v, _s, _m) \
249 do { \
250 (_d) &= ~((u64)(_m) << (_s)); \
251 (_d) |= (((u64)(_v) & (_m)) << (_s)); \
252 } while (0)
253
254/* ====== FlexRM data structures ===== */
255
256struct flexrm_ring {
257 /* Unprotected members */
258 int num;
259 struct flexrm_mbox *mbox;
260 void __iomem *regs;
261 bool irq_requested;
262 unsigned int irq;
263 unsigned int msi_timer_val;
264 unsigned int msi_count_threshold;
265 struct ida requests_ida;
266 struct brcm_message *requests[RING_MAX_REQ_COUNT];
267 void *bd_base;
268 dma_addr_t bd_dma_base;
269 u32 bd_write_offset;
270 void *cmpl_base;
271 dma_addr_t cmpl_dma_base;
272 /* Protected members */
273 spinlock_t lock;
274 struct brcm_message *last_pending_msg;
275 u32 cmpl_read_offset;
276};
277
278struct flexrm_mbox {
279 struct device *dev;
280 void __iomem *regs;
281 u32 num_rings;
282 struct flexrm_ring *rings;
283 struct dma_pool *bd_pool;
284 struct dma_pool *cmpl_pool;
285 struct mbox_controller controller;
286};
287
288/* ====== FlexRM ring descriptor helper routines ===== */
289
290static u64 flexrm_read_desc(void *desc_ptr)
291{
292 return le64_to_cpu(*((u64 *)desc_ptr));
293}
294
295static void flexrm_write_desc(void *desc_ptr, u64 desc)
296{
297 *((u64 *)desc_ptr) = cpu_to_le64(desc);
298}
299
300static u32 flexrm_cmpl_desc_to_reqid(u64 cmpl_desc)
301{
302 return (u32)(cmpl_desc & CMPL_OPAQUE_MASK);
303}
304
305static int flexrm_cmpl_desc_to_error(u64 cmpl_desc)
306{
307 u32 status;
308
309 status = DESC_DEC(cmpl_desc, CMPL_DME_STATUS_SHIFT,
310 CMPL_DME_STATUS_MASK);
311 if (status & DME_STATUS_ERROR_MASK)
312 return -EIO;
313
314 status = DESC_DEC(cmpl_desc, CMPL_RM_STATUS_SHIFT,
315 CMPL_RM_STATUS_MASK);
316 status &= RM_STATUS_CODE_MASK;
317 if (status == RM_STATUS_CODE_AE_TIMEOUT)
318 return -ETIMEDOUT;
319
320 return 0;
321}
322
323static bool flexrm_is_next_table_desc(void *desc_ptr)
324{
325 u64 desc = flexrm_read_desc(desc_ptr);
326 u32 type = DESC_DEC(desc, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
327
328 return (type == NPTR_TYPE) ? true : false;
329}
330
331static u64 flexrm_next_table_desc(u32 toggle, dma_addr_t next_addr)
332{
333 u64 desc = 0;
334
335 DESC_ENC(desc, NPTR_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
336 DESC_ENC(desc, toggle, NPTR_TOGGLE_SHIFT, NPTR_TOGGLE_MASK);
337 DESC_ENC(desc, next_addr, NPTR_ADDR_SHIFT, NPTR_ADDR_MASK);
338
339 return desc;
340}
341
342static u64 flexrm_null_desc(u32 toggle)
343{
344 u64 desc = 0;
345
346 DESC_ENC(desc, NULL_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
347 DESC_ENC(desc, toggle, NULL_TOGGLE_SHIFT, NULL_TOGGLE_MASK);
348
349 return desc;
350}
351
352static u32 flexrm_estimate_header_desc_count(u32 nhcnt)
353{
354 u32 hcnt = nhcnt / HEADER_BDCOUNT_MAX;
355
356 if (!(nhcnt % HEADER_BDCOUNT_MAX))
357 hcnt += 1;
358
359 return hcnt;
360}
361
362static void flexrm_flip_header_toogle(void *desc_ptr)
363{
364 u64 desc = flexrm_read_desc(desc_ptr);
365
366 if (desc & ((u64)0x1 << HEADER_TOGGLE_SHIFT))
367 desc &= ~((u64)0x1 << HEADER_TOGGLE_SHIFT);
368 else
369 desc |= ((u64)0x1 << HEADER_TOGGLE_SHIFT);
370
371 flexrm_write_desc(desc_ptr, desc);
372}
373
374static u64 flexrm_header_desc(u32 toggle, u32 startpkt, u32 endpkt,
375 u32 bdcount, u32 flags, u32 opaque)
376{
377 u64 desc = 0;
378
379 DESC_ENC(desc, HEADER_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
380 DESC_ENC(desc, toggle, HEADER_TOGGLE_SHIFT, HEADER_TOGGLE_MASK);
381 DESC_ENC(desc, startpkt, HEADER_STARTPKT_SHIFT, HEADER_STARTPKT_MASK);
382 DESC_ENC(desc, endpkt, HEADER_ENDPKT_SHIFT, HEADER_ENDPKT_MASK);
383 DESC_ENC(desc, bdcount, HEADER_BDCOUNT_SHIFT, HEADER_BDCOUNT_MASK);
384 DESC_ENC(desc, flags, HEADER_FLAGS_SHIFT, HEADER_FLAGS_MASK);
385 DESC_ENC(desc, opaque, HEADER_OPAQUE_SHIFT, HEADER_OPAQUE_MASK);
386
387 return desc;
388}
389
390static void flexrm_enqueue_desc(u32 nhpos, u32 nhcnt, u32 reqid,
391 u64 desc, void **desc_ptr, u32 *toggle,
392 void *start_desc, void *end_desc)
393{
394 u64 d;
395 u32 nhavail, _toggle, _startpkt, _endpkt, _bdcount;
396
397 /* Sanity check */
398 if (nhcnt <= nhpos)
399 return;
400
401 /*
402 * Each request or packet start with a HEADER descriptor followed
403 * by one or more non-HEADER descriptors (SRC, SRCT, MSRC, DST,
404 * DSTT, MDST, IMM, and IMMT). The number of non-HEADER descriptors
405 * following a HEADER descriptor is represented by BDCOUNT field
406 * of HEADER descriptor. The max value of BDCOUNT field is 31 which
407 * means we can only have 31 non-HEADER descriptors following one
408 * HEADER descriptor.
409 *
410 * In general use, number of non-HEADER descriptors can easily go
411 * beyond 31. To tackle this situation, we have packet (or request)
412 * extenstion bits (STARTPKT and ENDPKT) in the HEADER descriptor.
413 *
414 * To use packet extension, the first HEADER descriptor of request
415 * (or packet) will have STARTPKT=1 and ENDPKT=0. The intermediate
416 * HEADER descriptors will have STARTPKT=0 and ENDPKT=0. The last
417 * HEADER descriptor will have STARTPKT=0 and ENDPKT=1. Also, the
418 * TOGGLE bit of the first HEADER will be set to invalid state to
419 * ensure that FlexRM does not start fetching descriptors till all
420 * descriptors are enqueued. The user of this function will flip
421 * the TOGGLE bit of first HEADER after all descriptors are
422 * enqueued.
423 */
424
425 if ((nhpos % HEADER_BDCOUNT_MAX == 0) && (nhcnt - nhpos)) {
426 /* Prepare the header descriptor */
427 nhavail = (nhcnt - nhpos);
428 _toggle = (nhpos == 0) ? !(*toggle) : (*toggle);
429 _startpkt = (nhpos == 0) ? 0x1 : 0x0;
430 _endpkt = (nhavail <= HEADER_BDCOUNT_MAX) ? 0x1 : 0x0;
431 _bdcount = (nhavail <= HEADER_BDCOUNT_MAX) ?
432 nhavail : HEADER_BDCOUNT_MAX;
433 if (nhavail <= HEADER_BDCOUNT_MAX)
434 _bdcount = nhavail;
435 else
436 _bdcount = HEADER_BDCOUNT_MAX;
437 d = flexrm_header_desc(_toggle, _startpkt, _endpkt,
438 _bdcount, 0x0, reqid);
439
440 /* Write header descriptor */
441 flexrm_write_desc(*desc_ptr, d);
442
443 /* Point to next descriptor */
444 *desc_ptr += sizeof(desc);
445 if (*desc_ptr == end_desc)
446 *desc_ptr = start_desc;
447
448 /* Skip next pointer descriptors */
449 while (flexrm_is_next_table_desc(*desc_ptr)) {
450 *toggle = (*toggle) ? 0 : 1;
451 *desc_ptr += sizeof(desc);
452 if (*desc_ptr == end_desc)
453 *desc_ptr = start_desc;
454 }
455 }
456
457 /* Write desired descriptor */
458 flexrm_write_desc(*desc_ptr, desc);
459
460 /* Point to next descriptor */
461 *desc_ptr += sizeof(desc);
462 if (*desc_ptr == end_desc)
463 *desc_ptr = start_desc;
464
465 /* Skip next pointer descriptors */
466 while (flexrm_is_next_table_desc(*desc_ptr)) {
467 *toggle = (*toggle) ? 0 : 1;
468 *desc_ptr += sizeof(desc);
469 if (*desc_ptr == end_desc)
470 *desc_ptr = start_desc;
471 }
472}
473
474static u64 flexrm_src_desc(dma_addr_t addr, unsigned int length)
475{
476 u64 desc = 0;
477
478 DESC_ENC(desc, SRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
479 DESC_ENC(desc, length, SRC_LENGTH_SHIFT, SRC_LENGTH_MASK);
480 DESC_ENC(desc, addr, SRC_ADDR_SHIFT, SRC_ADDR_MASK);
481
482 return desc;
483}
484
485static u64 flexrm_msrc_desc(dma_addr_t addr, unsigned int length_div_16)
486{
487 u64 desc = 0;
488
489 DESC_ENC(desc, MSRC_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
490 DESC_ENC(desc, length_div_16, MSRC_LENGTH_SHIFT, MSRC_LENGTH_MASK);
491 DESC_ENC(desc, addr, MSRC_ADDR_SHIFT, MSRC_ADDR_MASK);
492
493 return desc;
494}
495
496static u64 flexrm_dst_desc(dma_addr_t addr, unsigned int length)
497{
498 u64 desc = 0;
499
500 DESC_ENC(desc, DST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
501 DESC_ENC(desc, length, DST_LENGTH_SHIFT, DST_LENGTH_MASK);
502 DESC_ENC(desc, addr, DST_ADDR_SHIFT, DST_ADDR_MASK);
503
504 return desc;
505}
506
507static u64 flexrm_mdst_desc(dma_addr_t addr, unsigned int length_div_16)
508{
509 u64 desc = 0;
510
511 DESC_ENC(desc, MDST_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
512 DESC_ENC(desc, length_div_16, MDST_LENGTH_SHIFT, MDST_LENGTH_MASK);
513 DESC_ENC(desc, addr, MDST_ADDR_SHIFT, MDST_ADDR_MASK);
514
515 return desc;
516}
517
518static u64 flexrm_imm_desc(u64 data)
519{
520 u64 desc = 0;
521
522 DESC_ENC(desc, IMM_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
523 DESC_ENC(desc, data, IMM_DATA_SHIFT, IMM_DATA_MASK);
524
525 return desc;
526}
527
528static u64 flexrm_srct_desc(dma_addr_t addr, unsigned int length)
529{
530 u64 desc = 0;
531
532 DESC_ENC(desc, SRCT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
533 DESC_ENC(desc, length, SRCT_LENGTH_SHIFT, SRCT_LENGTH_MASK);
534 DESC_ENC(desc, addr, SRCT_ADDR_SHIFT, SRCT_ADDR_MASK);
535
536 return desc;
537}
538
539static u64 flexrm_dstt_desc(dma_addr_t addr, unsigned int length)
540{
541 u64 desc = 0;
542
543 DESC_ENC(desc, DSTT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
544 DESC_ENC(desc, length, DSTT_LENGTH_SHIFT, DSTT_LENGTH_MASK);
545 DESC_ENC(desc, addr, DSTT_ADDR_SHIFT, DSTT_ADDR_MASK);
546
547 return desc;
548}
549
550static u64 flexrm_immt_desc(u64 data)
551{
552 u64 desc = 0;
553
554 DESC_ENC(desc, IMMT_TYPE, DESC_TYPE_SHIFT, DESC_TYPE_MASK);
555 DESC_ENC(desc, data, IMMT_DATA_SHIFT, IMMT_DATA_MASK);
556
557 return desc;
558}
559
560static bool flexrm_spu_sanity_check(struct brcm_message *msg)
561{
562 struct scatterlist *sg;
563
564 if (!msg->spu.src || !msg->spu.dst)
565 return false;
566 for (sg = msg->spu.src; sg; sg = sg_next(sg)) {
567 if (sg->length & 0xf) {
568 if (sg->length > SRC_LENGTH_MASK)
569 return false;
570 } else {
571 if (sg->length > (MSRC_LENGTH_MASK * 16))
572 return false;
573 }
574 }
575 for (sg = msg->spu.dst; sg; sg = sg_next(sg)) {
576 if (sg->length & 0xf) {
577 if (sg->length > DST_LENGTH_MASK)
578 return false;
579 } else {
580 if (sg->length > (MDST_LENGTH_MASK * 16))
581 return false;
582 }
583 }
584
585 return true;
586}
587
588static u32 flexrm_spu_estimate_nonheader_desc_count(struct brcm_message *msg)
589{
590 u32 cnt = 0;
591 unsigned int dst_target = 0;
592 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
593
594 while (src_sg || dst_sg) {
595 if (src_sg) {
596 cnt++;
597 dst_target = src_sg->length;
598 src_sg = sg_next(src_sg);
599 } else
600 dst_target = UINT_MAX;
601
602 while (dst_target && dst_sg) {
603 cnt++;
604 if (dst_sg->length < dst_target)
605 dst_target -= dst_sg->length;
606 else
607 dst_target = 0;
608 dst_sg = sg_next(dst_sg);
609 }
610 }
611
612 return cnt;
613}
614
615static int flexrm_spu_dma_map(struct device *dev, struct brcm_message *msg)
616{
617 int rc;
618
619 rc = dma_map_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
620 DMA_TO_DEVICE);
621 if (rc < 0)
622 return rc;
623
624 rc = dma_map_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
625 DMA_FROM_DEVICE);
626 if (rc < 0) {
627 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
628 DMA_TO_DEVICE);
629 return rc;
630 }
631
632 return 0;
633}
634
635static void flexrm_spu_dma_unmap(struct device *dev, struct brcm_message *msg)
636{
637 dma_unmap_sg(dev, msg->spu.dst, sg_nents(msg->spu.dst),
638 DMA_FROM_DEVICE);
639 dma_unmap_sg(dev, msg->spu.src, sg_nents(msg->spu.src),
640 DMA_TO_DEVICE);
641}
642
643static void *flexrm_spu_write_descs(struct brcm_message *msg, u32 nhcnt,
644 u32 reqid, void *desc_ptr, u32 toggle,
645 void *start_desc, void *end_desc)
646{
647 u64 d;
648 u32 nhpos = 0;
649 void *orig_desc_ptr = desc_ptr;
650 unsigned int dst_target = 0;
651 struct scatterlist *src_sg = msg->spu.src, *dst_sg = msg->spu.dst;
652
653 while (src_sg || dst_sg) {
654 if (src_sg) {
655 if (sg_dma_len(src_sg) & 0xf)
656 d = flexrm_src_desc(sg_dma_address(src_sg),
657 sg_dma_len(src_sg));
658 else
659 d = flexrm_msrc_desc(sg_dma_address(src_sg),
660 sg_dma_len(src_sg)/16);
661 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
662 d, &desc_ptr, &toggle,
663 start_desc, end_desc);
664 nhpos++;
665 dst_target = sg_dma_len(src_sg);
666 src_sg = sg_next(src_sg);
667 } else
668 dst_target = UINT_MAX;
669
670 while (dst_target && dst_sg) {
671 if (sg_dma_len(dst_sg) & 0xf)
672 d = flexrm_dst_desc(sg_dma_address(dst_sg),
673 sg_dma_len(dst_sg));
674 else
675 d = flexrm_mdst_desc(sg_dma_address(dst_sg),
676 sg_dma_len(dst_sg)/16);
677 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
678 d, &desc_ptr, &toggle,
679 start_desc, end_desc);
680 nhpos++;
681 if (sg_dma_len(dst_sg) < dst_target)
682 dst_target -= sg_dma_len(dst_sg);
683 else
684 dst_target = 0;
685 dst_sg = sg_next(dst_sg);
686 }
687 }
688
689 /* Null descriptor with invalid toggle bit */
690 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
691
692 /* Ensure that descriptors have been written to memory */
693 wmb();
694
695 /* Flip toggle bit in header */
696 flexrm_flip_header_toogle(orig_desc_ptr);
697
698 return desc_ptr;
699}
700
701static bool flexrm_sba_sanity_check(struct brcm_message *msg)
702{
703 u32 i;
704
705 if (!msg->sba.cmds || !msg->sba.cmds_count)
706 return false;
707
708 for (i = 0; i < msg->sba.cmds_count; i++) {
709 if (((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
710 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C)) &&
711 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT))
712 return false;
713 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) &&
714 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
715 return false;
716 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C) &&
717 (msg->sba.cmds[i].data_len > SRCT_LENGTH_MASK))
718 return false;
719 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP) &&
720 (msg->sba.cmds[i].resp_len > DSTT_LENGTH_MASK))
721 return false;
722 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT) &&
723 (msg->sba.cmds[i].data_len > DSTT_LENGTH_MASK))
724 return false;
725 }
726
727 return true;
728}
729
730static u32 flexrm_sba_estimate_nonheader_desc_count(struct brcm_message *msg)
731{
732 u32 i, cnt;
733
734 cnt = 0;
735 for (i = 0; i < msg->sba.cmds_count; i++) {
736 cnt++;
737
738 if ((msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_B) ||
739 (msg->sba.cmds[i].flags & BRCM_SBA_CMD_TYPE_C))
740 cnt++;
741
742 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_RESP)
743 cnt++;
744
745 if (msg->sba.cmds[i].flags & BRCM_SBA_CMD_HAS_OUTPUT)
746 cnt++;
747 }
748
749 return cnt;
750}
751
752static void *flexrm_sba_write_descs(struct brcm_message *msg, u32 nhcnt,
753 u32 reqid, void *desc_ptr, u32 toggle,
754 void *start_desc, void *end_desc)
755{
756 u64 d;
757 u32 i, nhpos = 0;
758 struct brcm_sba_command *c;
759 void *orig_desc_ptr = desc_ptr;
760
761 /* Convert SBA commands into descriptors */
762 for (i = 0; i < msg->sba.cmds_count; i++) {
763 c = &msg->sba.cmds[i];
764
765 if ((c->flags & BRCM_SBA_CMD_HAS_RESP) &&
766 (c->flags & BRCM_SBA_CMD_HAS_OUTPUT)) {
767 /* Destination response descriptor */
768 d = flexrm_dst_desc(c->resp, c->resp_len);
769 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
770 d, &desc_ptr, &toggle,
771 start_desc, end_desc);
772 nhpos++;
773 } else if (c->flags & BRCM_SBA_CMD_HAS_RESP) {
774 /* Destination response with tlast descriptor */
775 d = flexrm_dstt_desc(c->resp, c->resp_len);
776 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
777 d, &desc_ptr, &toggle,
778 start_desc, end_desc);
779 nhpos++;
780 }
781
782 if (c->flags & BRCM_SBA_CMD_HAS_OUTPUT) {
783 /* Destination with tlast descriptor */
784 d = flexrm_dstt_desc(c->data, c->data_len);
785 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
786 d, &desc_ptr, &toggle,
787 start_desc, end_desc);
788 nhpos++;
789 }
790
791 if (c->flags & BRCM_SBA_CMD_TYPE_B) {
792 /* Command as immediate descriptor */
793 d = flexrm_imm_desc(c->cmd);
794 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
795 d, &desc_ptr, &toggle,
796 start_desc, end_desc);
797 nhpos++;
798 } else {
799 /* Command as immediate descriptor with tlast */
800 d = flexrm_immt_desc(c->cmd);
801 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
802 d, &desc_ptr, &toggle,
803 start_desc, end_desc);
804 nhpos++;
805 }
806
807 if ((c->flags & BRCM_SBA_CMD_TYPE_B) ||
808 (c->flags & BRCM_SBA_CMD_TYPE_C)) {
809 /* Source with tlast descriptor */
810 d = flexrm_srct_desc(c->data, c->data_len);
811 flexrm_enqueue_desc(nhpos, nhcnt, reqid,
812 d, &desc_ptr, &toggle,
813 start_desc, end_desc);
814 nhpos++;
815 }
816 }
817
818 /* Null descriptor with invalid toggle bit */
819 flexrm_write_desc(desc_ptr, flexrm_null_desc(!toggle));
820
821 /* Ensure that descriptors have been written to memory */
822 wmb();
823
824 /* Flip toggle bit in header */
825 flexrm_flip_header_toogle(orig_desc_ptr);
826
827 return desc_ptr;
828}
829
830static bool flexrm_sanity_check(struct brcm_message *msg)
831{
832 if (!msg)
833 return false;
834
835 switch (msg->type) {
836 case BRCM_MESSAGE_SPU:
837 return flexrm_spu_sanity_check(msg);
838 case BRCM_MESSAGE_SBA:
839 return flexrm_sba_sanity_check(msg);
840 default:
841 return false;
842 };
843}
844
845static u32 flexrm_estimate_nonheader_desc_count(struct brcm_message *msg)
846{
847 if (!msg)
848 return 0;
849
850 switch (msg->type) {
851 case BRCM_MESSAGE_SPU:
852 return flexrm_spu_estimate_nonheader_desc_count(msg);
853 case BRCM_MESSAGE_SBA:
854 return flexrm_sba_estimate_nonheader_desc_count(msg);
855 default:
856 return 0;
857 };
858}
859
860static int flexrm_dma_map(struct device *dev, struct brcm_message *msg)
861{
862 if (!dev || !msg)
863 return -EINVAL;
864
865 switch (msg->type) {
866 case BRCM_MESSAGE_SPU:
867 return flexrm_spu_dma_map(dev, msg);
868 default:
869 break;
870 }
871
872 return 0;
873}
874
875static void flexrm_dma_unmap(struct device *dev, struct brcm_message *msg)
876{
877 if (!dev || !msg)
878 return;
879
880 switch (msg->type) {
881 case BRCM_MESSAGE_SPU:
882 flexrm_spu_dma_unmap(dev, msg);
883 break;
884 default:
885 break;
886 }
887}
888
889static void *flexrm_write_descs(struct brcm_message *msg, u32 nhcnt,
890 u32 reqid, void *desc_ptr, u32 toggle,
891 void *start_desc, void *end_desc)
892{
893 if (!msg || !desc_ptr || !start_desc || !end_desc)
894 return ERR_PTR(-ENOTSUPP);
895
896 if ((desc_ptr < start_desc) || (end_desc <= desc_ptr))
897 return ERR_PTR(-ERANGE);
898
899 switch (msg->type) {
900 case BRCM_MESSAGE_SPU:
901 return flexrm_spu_write_descs(msg, nhcnt, reqid,
902 desc_ptr, toggle,
903 start_desc, end_desc);
904 case BRCM_MESSAGE_SBA:
905 return flexrm_sba_write_descs(msg, nhcnt, reqid,
906 desc_ptr, toggle,
907 start_desc, end_desc);
908 default:
909 return ERR_PTR(-ENOTSUPP);
910 };
911}
912
913/* ====== FlexRM driver helper routines ===== */
914
915static int flexrm_new_request(struct flexrm_ring *ring,
916 struct brcm_message *batch_msg,
917 struct brcm_message *msg)
918{
919 void *next;
920 unsigned long flags;
921 u32 val, count, nhcnt;
922 u32 read_offset, write_offset;
923 bool exit_cleanup = false;
924 int ret = 0, reqid;
925
926 /* Do sanity check on message */
927 if (!flexrm_sanity_check(msg))
928 return -EIO;
929 msg->error = 0;
930
931 /* If no requests possible then save data pointer and goto done. */
932 reqid = ida_simple_get(&ring->requests_ida, 0,
933 RING_MAX_REQ_COUNT, GFP_KERNEL);
934 if (reqid < 0) {
935 spin_lock_irqsave(&ring->lock, flags);
936 if (batch_msg)
937 ring->last_pending_msg = batch_msg;
938 else
939 ring->last_pending_msg = msg;
940 spin_unlock_irqrestore(&ring->lock, flags);
941 return 0;
942 }
943 ring->requests[reqid] = msg;
944
945 /* Do DMA mappings for the message */
946 ret = flexrm_dma_map(ring->mbox->dev, msg);
947 if (ret < 0) {
948 ring->requests[reqid] = NULL;
949 ida_simple_remove(&ring->requests_ida, reqid);
950 return ret;
951 }
952
953 /* If last_pending_msg is already set then goto done with error */
954 spin_lock_irqsave(&ring->lock, flags);
955 if (ring->last_pending_msg)
956 ret = -ENOSPC;
957 spin_unlock_irqrestore(&ring->lock, flags);
958 if (ret < 0) {
959 dev_warn(ring->mbox->dev, "no space in ring %d\n", ring->num);
960 exit_cleanup = true;
961 goto exit;
962 }
963
964 /* Determine current HW BD read offset */
965 read_offset = readl_relaxed(ring->regs + RING_BD_READ_PTR);
966 val = readl_relaxed(ring->regs + RING_BD_START_ADDR);
967 read_offset *= RING_DESC_SIZE;
968 read_offset += (u32)(BD_START_ADDR_DECODE(val) - ring->bd_dma_base);
969
970 /*
971 * Number required descriptors = number of non-header descriptors +
972 * number of header descriptors +
973 * 1x null descriptor
974 */
975 nhcnt = flexrm_estimate_nonheader_desc_count(msg);
976 count = flexrm_estimate_header_desc_count(nhcnt) + nhcnt + 1;
977
978 /* Check for available descriptor space. */
979 write_offset = ring->bd_write_offset;
980 while (count) {
981 if (!flexrm_is_next_table_desc(ring->bd_base + write_offset))
982 count--;
983 write_offset += RING_DESC_SIZE;
984 if (write_offset == RING_BD_SIZE)
985 write_offset = 0x0;
986 if (write_offset == read_offset)
987 break;
988 }
989 if (count) {
990 spin_lock_irqsave(&ring->lock, flags);
991 if (batch_msg)
992 ring->last_pending_msg = batch_msg;
993 else
994 ring->last_pending_msg = msg;
995 spin_unlock_irqrestore(&ring->lock, flags);
996 ret = 0;
997 exit_cleanup = true;
998 goto exit;
999 }
1000
1001 /* Write descriptors to ring */
1002 next = flexrm_write_descs(msg, nhcnt, reqid,
1003 ring->bd_base + ring->bd_write_offset,
1004 RING_BD_TOGGLE_VALID(ring->bd_write_offset),
1005 ring->bd_base, ring->bd_base + RING_BD_SIZE);
1006 if (IS_ERR(next)) {
1007 ret = PTR_ERR(next);
1008 exit_cleanup = true;
1009 goto exit;
1010 }
1011
1012 /* Save ring BD write offset */
1013 ring->bd_write_offset = (unsigned long)(next - ring->bd_base);
1014
1015exit:
1016 /* Update error status in message */
1017 msg->error = ret;
1018
1019 /* Cleanup if we failed */
1020 if (exit_cleanup) {
1021 flexrm_dma_unmap(ring->mbox->dev, msg);
1022 ring->requests[reqid] = NULL;
1023 ida_simple_remove(&ring->requests_ida, reqid);
1024 }
1025
1026 return ret;
1027}
1028
1029static int flexrm_process_completions(struct flexrm_ring *ring)
1030{
1031 u64 desc;
1032 int err, count = 0;
1033 unsigned long flags;
1034 struct brcm_message *msg = NULL;
1035 u32 reqid, cmpl_read_offset, cmpl_write_offset;
1036 struct mbox_chan *chan = &ring->mbox->controller.chans[ring->num];
1037
1038 spin_lock_irqsave(&ring->lock, flags);
1039
1040 /* Check last_pending_msg */
1041 if (ring->last_pending_msg) {
1042 msg = ring->last_pending_msg;
1043 ring->last_pending_msg = NULL;
1044 }
1045
1046 /*
1047 * Get current completion read and write offset
1048 *
1049 * Note: We should read completion write pointer atleast once
1050 * after we get a MSI interrupt because HW maintains internal
1051 * MSI status which will allow next MSI interrupt only after
1052 * completion write pointer is read.
1053 */
1054 cmpl_write_offset = readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1055 cmpl_write_offset *= RING_DESC_SIZE;
1056 cmpl_read_offset = ring->cmpl_read_offset;
1057 ring->cmpl_read_offset = cmpl_write_offset;
1058
1059 spin_unlock_irqrestore(&ring->lock, flags);
1060
1061 /* If last_pending_msg was set then queue it back */
1062 if (msg)
1063 mbox_send_message(chan, msg);
1064
1065 /* For each completed request notify mailbox clients */
1066 reqid = 0;
1067 while (cmpl_read_offset != cmpl_write_offset) {
1068 /* Dequeue next completion descriptor */
1069 desc = *((u64 *)(ring->cmpl_base + cmpl_read_offset));
1070
1071 /* Next read offset */
1072 cmpl_read_offset += RING_DESC_SIZE;
1073 if (cmpl_read_offset == RING_CMPL_SIZE)
1074 cmpl_read_offset = 0;
1075
1076 /* Decode error from completion descriptor */
1077 err = flexrm_cmpl_desc_to_error(desc);
1078 if (err < 0) {
1079 dev_warn(ring->mbox->dev,
1080 "got completion desc=0x%lx with error %d",
1081 (unsigned long)desc, err);
1082 }
1083
1084 /* Determine request id from completion descriptor */
1085 reqid = flexrm_cmpl_desc_to_reqid(desc);
1086
1087 /* Determine message pointer based on reqid */
1088 msg = ring->requests[reqid];
1089 if (!msg) {
1090 dev_warn(ring->mbox->dev,
1091 "null msg pointer for completion desc=0x%lx",
1092 (unsigned long)desc);
1093 continue;
1094 }
1095
1096 /* Release reqid for recycling */
1097 ring->requests[reqid] = NULL;
1098 ida_simple_remove(&ring->requests_ida, reqid);
1099
1100 /* Unmap DMA mappings */
1101 flexrm_dma_unmap(ring->mbox->dev, msg);
1102
1103 /* Give-back message to mailbox client */
1104 msg->error = err;
1105 mbox_chan_received_data(chan, msg);
1106
1107 /* Increment number of completions processed */
1108 count++;
1109 }
1110
1111 return count;
1112}
1113
1114/* ====== FlexRM interrupt handler ===== */
1115
1116static irqreturn_t flexrm_irq_event(int irq, void *dev_id)
1117{
1118 /* We only have MSI for completions so just wakeup IRQ thread */
1119 /* Ring related errors will be informed via completion descriptors */
1120
1121 return IRQ_WAKE_THREAD;
1122}
1123
1124static irqreturn_t flexrm_irq_thread(int irq, void *dev_id)
1125{
1126 flexrm_process_completions(dev_id);
1127
1128 return IRQ_HANDLED;
1129}
1130
1131/* ====== FlexRM mailbox callbacks ===== */
1132
1133static int flexrm_send_data(struct mbox_chan *chan, void *data)
1134{
1135 int i, rc;
1136 struct flexrm_ring *ring = chan->con_priv;
1137 struct brcm_message *msg = data;
1138
1139 if (msg->type == BRCM_MESSAGE_BATCH) {
1140 for (i = msg->batch.msgs_queued;
1141 i < msg->batch.msgs_count; i++) {
1142 rc = flexrm_new_request(ring, msg,
1143 &msg->batch.msgs[i]);
1144 if (rc) {
1145 msg->error = rc;
1146 return rc;
1147 }
1148 msg->batch.msgs_queued++;
1149 }
1150 return 0;
1151 }
1152
1153 return flexrm_new_request(ring, NULL, data);
1154}
1155
1156static bool flexrm_peek_data(struct mbox_chan *chan)
1157{
1158 int cnt = flexrm_process_completions(chan->con_priv);
1159
1160 return (cnt > 0) ? true : false;
1161}
1162
1163static int flexrm_startup(struct mbox_chan *chan)
1164{
1165 u64 d;
1166 u32 val, off;
1167 int ret = 0;
1168 dma_addr_t next_addr;
1169 struct flexrm_ring *ring = chan->con_priv;
1170
1171 /* Allocate BD memory */
1172 ring->bd_base = dma_pool_alloc(ring->mbox->bd_pool,
1173 GFP_KERNEL, &ring->bd_dma_base);
1174 if (!ring->bd_base) {
1175 dev_err(ring->mbox->dev, "can't allocate BD memory\n");
1176 ret = -ENOMEM;
1177 goto fail;
1178 }
1179
1180 /* Configure next table pointer entries in BD memory */
1181 for (off = 0; off < RING_BD_SIZE; off += RING_DESC_SIZE) {
1182 next_addr = off + RING_DESC_SIZE;
1183 if (next_addr == RING_BD_SIZE)
1184 next_addr = 0;
1185 next_addr += ring->bd_dma_base;
1186 if (RING_BD_ALIGN_CHECK(next_addr))
1187 d = flexrm_next_table_desc(RING_BD_TOGGLE_VALID(off),
1188 next_addr);
1189 else
1190 d = flexrm_null_desc(RING_BD_TOGGLE_INVALID(off));
1191 flexrm_write_desc(ring->bd_base + off, d);
1192 }
1193
1194 /* Allocate completion memory */
1195 ring->cmpl_base = dma_pool_alloc(ring->mbox->cmpl_pool,
1196 GFP_KERNEL, &ring->cmpl_dma_base);
1197 if (!ring->cmpl_base) {
1198 dev_err(ring->mbox->dev, "can't allocate completion memory\n");
1199 ret = -ENOMEM;
1200 goto fail_free_bd_memory;
1201 }
1202 memset(ring->cmpl_base, 0, RING_CMPL_SIZE);
1203
1204 /* Request IRQ */
1205 if (ring->irq == UINT_MAX) {
1206 dev_err(ring->mbox->dev, "ring IRQ not available\n");
1207 ret = -ENODEV;
1208 goto fail_free_cmpl_memory;
1209 }
1210 ret = request_threaded_irq(ring->irq,
1211 flexrm_irq_event,
1212 flexrm_irq_thread,
1213 0, dev_name(ring->mbox->dev), ring);
1214 if (ret) {
1215 dev_err(ring->mbox->dev, "failed to request ring IRQ\n");
1216 goto fail_free_cmpl_memory;
1217 }
1218 ring->irq_requested = true;
1219
1220 /* Disable/inactivate ring */
1221 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1222
1223 /* Program BD start address */
1224 val = BD_START_ADDR_VALUE(ring->bd_dma_base);
1225 writel_relaxed(val, ring->regs + RING_BD_START_ADDR);
1226
1227 /* BD write pointer will be same as HW write pointer */
1228 ring->bd_write_offset =
1229 readl_relaxed(ring->regs + RING_BD_WRITE_PTR);
1230 ring->bd_write_offset *= RING_DESC_SIZE;
1231
1232 /* Program completion start address */
1233 val = CMPL_START_ADDR_VALUE(ring->cmpl_dma_base);
1234 writel_relaxed(val, ring->regs + RING_CMPL_START_ADDR);
1235
1236 /* Ensure last pending message is cleared */
1237 ring->last_pending_msg = NULL;
1238
1239 /* Completion read pointer will be same as HW write pointer */
1240 ring->cmpl_read_offset =
1241 readl_relaxed(ring->regs + RING_CMPL_WRITE_PTR);
1242 ring->cmpl_read_offset *= RING_DESC_SIZE;
1243
1244 /* Read ring Tx, Rx, and Outstanding counts to clear */
1245 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_LS);
1246 readl_relaxed(ring->regs + RING_NUM_REQ_RECV_MS);
1247 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_LS);
1248 readl_relaxed(ring->regs + RING_NUM_REQ_TRANS_MS);
1249 readl_relaxed(ring->regs + RING_NUM_REQ_OUTSTAND);
1250
1251 /* Configure RING_MSI_CONTROL */
1252 val = 0;
1253 val |= (ring->msi_timer_val << MSI_TIMER_VAL_SHIFT);
1254 val |= BIT(MSI_ENABLE_SHIFT);
1255 val |= (ring->msi_count_threshold & MSI_COUNT_MASK) << MSI_COUNT_SHIFT;
1256 writel_relaxed(val, ring->regs + RING_MSI_CONTROL);
1257
1258 /* Enable/activate ring */
1259 val = BIT(CONTROL_ACTIVE_SHIFT);
1260 writel_relaxed(val, ring->regs + RING_CONTROL);
1261
1262 return 0;
1263
1264fail_free_cmpl_memory:
1265 dma_pool_free(ring->mbox->cmpl_pool,
1266 ring->cmpl_base, ring->cmpl_dma_base);
1267 ring->cmpl_base = NULL;
1268fail_free_bd_memory:
1269 dma_pool_free(ring->mbox->bd_pool,
1270 ring->bd_base, ring->bd_dma_base);
1271 ring->bd_base = NULL;
1272fail:
1273 return ret;
1274}
1275
1276static void flexrm_shutdown(struct mbox_chan *chan)
1277{
1278 u32 reqid;
1279 unsigned int timeout;
1280 struct brcm_message *msg;
1281 struct flexrm_ring *ring = chan->con_priv;
1282
1283 /* Disable/inactivate ring */
1284 writel_relaxed(0x0, ring->regs + RING_CONTROL);
1285
1286 /* Flush ring with timeout of 1s */
1287 timeout = 1000;
1288 writel_relaxed(BIT(CONTROL_FLUSH_SHIFT),
1289 ring->regs + RING_CONTROL);
1290 do {
1291 if (readl_relaxed(ring->regs + RING_FLUSH_DONE) &
1292 FLUSH_DONE_MASK)
1293 break;
1294 mdelay(1);
1295 } while (timeout--);
1296
1297 /* Abort all in-flight requests */
1298 for (reqid = 0; reqid < RING_MAX_REQ_COUNT; reqid++) {
1299 msg = ring->requests[reqid];
1300 if (!msg)
1301 continue;
1302
1303 /* Release reqid for recycling */
1304 ring->requests[reqid] = NULL;
1305 ida_simple_remove(&ring->requests_ida, reqid);
1306
1307 /* Unmap DMA mappings */
1308 flexrm_dma_unmap(ring->mbox->dev, msg);
1309
1310 /* Give-back message to mailbox client */
1311 msg->error = -EIO;
1312 mbox_chan_received_data(chan, msg);
1313 }
1314
1315 /* Release IRQ */
1316 if (ring->irq_requested) {
1317 free_irq(ring->irq, ring);
1318 ring->irq_requested = false;
1319 }
1320
1321 /* Free-up completion descriptor ring */
1322 if (ring->cmpl_base) {
1323 dma_pool_free(ring->mbox->cmpl_pool,
1324 ring->cmpl_base, ring->cmpl_dma_base);
1325 ring->cmpl_base = NULL;
1326 }
1327
1328 /* Free-up BD descriptor ring */
1329 if (ring->bd_base) {
1330 dma_pool_free(ring->mbox->bd_pool,
1331 ring->bd_base, ring->bd_dma_base);
1332 ring->bd_base = NULL;
1333 }
1334}
1335
1336static bool flexrm_last_tx_done(struct mbox_chan *chan)
1337{
1338 bool ret;
1339 unsigned long flags;
1340 struct flexrm_ring *ring = chan->con_priv;
1341
1342 spin_lock_irqsave(&ring->lock, flags);
1343 ret = (ring->last_pending_msg) ? false : true;
1344 spin_unlock_irqrestore(&ring->lock, flags);
1345
1346 return ret;
1347}
1348
1349static const struct mbox_chan_ops flexrm_mbox_chan_ops = {
1350 .send_data = flexrm_send_data,
1351 .startup = flexrm_startup,
1352 .shutdown = flexrm_shutdown,
1353 .last_tx_done = flexrm_last_tx_done,
1354 .peek_data = flexrm_peek_data,
1355};
1356
1357static struct mbox_chan *flexrm_mbox_of_xlate(struct mbox_controller *cntlr,
1358 const struct of_phandle_args *pa)
1359{
1360 struct mbox_chan *chan;
1361 struct flexrm_ring *ring;
1362
1363 if (pa->args_count < 3)
1364 return ERR_PTR(-EINVAL);
1365
1366 if (pa->args[0] >= cntlr->num_chans)
1367 return ERR_PTR(-ENOENT);
1368
1369 if (pa->args[1] > MSI_COUNT_MASK)
1370 return ERR_PTR(-EINVAL);
1371
1372 if (pa->args[2] > MSI_TIMER_VAL_MASK)
1373 return ERR_PTR(-EINVAL);
1374
1375 chan = &cntlr->chans[pa->args[0]];
1376 ring = chan->con_priv;
1377 ring->msi_count_threshold = pa->args[1];
1378 ring->msi_timer_val = pa->args[2];
1379
1380 return chan;
1381}
1382
1383/* ====== FlexRM platform driver ===== */
1384
1385static void flexrm_mbox_msi_write(struct msi_desc *desc, struct msi_msg *msg)
1386{
1387 struct device *dev = msi_desc_to_dev(desc);
1388 struct flexrm_mbox *mbox = dev_get_drvdata(dev);
1389 struct flexrm_ring *ring = &mbox->rings[desc->platform.msi_index];
1390
1391 /* Configure per-Ring MSI registers */
1392 writel_relaxed(msg->address_lo, ring->regs + RING_MSI_ADDR_LS);
1393 writel_relaxed(msg->address_hi, ring->regs + RING_MSI_ADDR_MS);
1394 writel_relaxed(msg->data, ring->regs + RING_MSI_DATA_VALUE);
1395}
1396
1397static int flexrm_mbox_probe(struct platform_device *pdev)
1398{
1399 int index, ret = 0;
1400 void __iomem *regs;
1401 void __iomem *regs_end;
1402 struct msi_desc *desc;
1403 struct resource *iomem;
1404 struct flexrm_ring *ring;
1405 struct flexrm_mbox *mbox;
1406 struct device *dev = &pdev->dev;
1407
1408 /* Allocate driver mailbox struct */
1409 mbox = devm_kzalloc(dev, sizeof(*mbox), GFP_KERNEL);
1410 if (!mbox) {
1411 ret = -ENOMEM;
1412 goto fail;
1413 }
1414 mbox->dev = dev;
1415 platform_set_drvdata(pdev, mbox);
1416
1417 /* Get resource for registers */
1418 iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1419 if (!iomem || (resource_size(iomem) < RING_REGS_SIZE)) {
1420 ret = -ENODEV;
1421 goto fail;
1422 }
1423
1424 /* Map registers of all rings */
1425 mbox->regs = devm_ioremap_resource(&pdev->dev, iomem);
1426 if (IS_ERR(mbox->regs)) {
1427 ret = PTR_ERR(mbox->regs);
1428 dev_err(&pdev->dev, "Failed to remap mailbox regs: %d\n", ret);
1429 goto fail;
1430 }
1431 regs_end = mbox->regs + resource_size(iomem);
1432
1433 /* Scan and count available rings */
1434 mbox->num_rings = 0;
1435 for (regs = mbox->regs; regs < regs_end; regs += RING_REGS_SIZE) {
1436 if (readl_relaxed(regs + RING_VER) == RING_VER_MAGIC)
1437 mbox->num_rings++;
1438 }
1439 if (!mbox->num_rings) {
1440 ret = -ENODEV;
1441 goto fail;
1442 }
1443
1444 /* Allocate driver ring structs */
1445 ring = devm_kcalloc(dev, mbox->num_rings, sizeof(*ring), GFP_KERNEL);
1446 if (!ring) {
1447 ret = -ENOMEM;
1448 goto fail;
1449 }
1450 mbox->rings = ring;
1451
1452 /* Initialize members of driver ring structs */
1453 regs = mbox->regs;
1454 for (index = 0; index < mbox->num_rings; index++) {
1455 ring = &mbox->rings[index];
1456 ring->num = index;
1457 ring->mbox = mbox;
1458 while ((regs < regs_end) &&
1459 (readl_relaxed(regs + RING_VER) != RING_VER_MAGIC))
1460 regs += RING_REGS_SIZE;
1461 if (regs_end <= regs) {
1462 ret = -ENODEV;
1463 goto fail;
1464 }
1465 ring->regs = regs;
1466 regs += RING_REGS_SIZE;
1467 ring->irq = UINT_MAX;
1468 ring->irq_requested = false;
1469 ring->msi_timer_val = MSI_TIMER_VAL_MASK;
1470 ring->msi_count_threshold = 0x1;
1471 ida_init(&ring->requests_ida);
1472 memset(ring->requests, 0, sizeof(ring->requests));
1473 ring->bd_base = NULL;
1474 ring->bd_dma_base = 0;
1475 ring->cmpl_base = NULL;
1476 ring->cmpl_dma_base = 0;
1477 spin_lock_init(&ring->lock);
1478 ring->last_pending_msg = NULL;
1479 ring->cmpl_read_offset = 0;
1480 }
1481
1482 /* FlexRM is capable of 40-bit physical addresses only */
1483 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(40));
1484 if (ret) {
1485 ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
1486 if (ret)
1487 goto fail;
1488 }
1489
1490 /* Create DMA pool for ring BD memory */
1491 mbox->bd_pool = dma_pool_create("bd", dev, RING_BD_SIZE,
1492 1 << RING_BD_ALIGN_ORDER, 0);
1493 if (!mbox->bd_pool) {
1494 ret = -ENOMEM;
1495 goto fail;
1496 }
1497
1498 /* Create DMA pool for ring completion memory */
1499 mbox->cmpl_pool = dma_pool_create("cmpl", dev, RING_CMPL_SIZE,
1500 1 << RING_CMPL_ALIGN_ORDER, 0);
1501 if (!mbox->cmpl_pool) {
1502 ret = -ENOMEM;
1503 goto fail_destroy_bd_pool;
1504 }
1505
1506 /* Allocate platform MSIs for each ring */
1507 ret = platform_msi_domain_alloc_irqs(dev, mbox->num_rings,
1508 flexrm_mbox_msi_write);
1509 if (ret)
1510 goto fail_destroy_cmpl_pool;
1511
1512 /* Save alloced IRQ numbers for each ring */
1513 for_each_msi_entry(desc, dev) {
1514 ring = &mbox->rings[desc->platform.msi_index];
1515 ring->irq = desc->irq;
1516 }
1517
1518 /* Initialize mailbox controller */
1519 mbox->controller.txdone_irq = false;
1520 mbox->controller.txdone_poll = true;
1521 mbox->controller.txpoll_period = 1;
1522 mbox->controller.ops = &flexrm_mbox_chan_ops;
1523 mbox->controller.dev = dev;
1524 mbox->controller.num_chans = mbox->num_rings;
1525 mbox->controller.of_xlate = flexrm_mbox_of_xlate;
1526 mbox->controller.chans = devm_kcalloc(dev, mbox->num_rings,
1527 sizeof(*mbox->controller.chans), GFP_KERNEL);
1528 if (!mbox->controller.chans) {
1529 ret = -ENOMEM;
1530 goto fail_free_msis;
1531 }
1532 for (index = 0; index < mbox->num_rings; index++)
1533 mbox->controller.chans[index].con_priv = &mbox->rings[index];
1534
1535 /* Register mailbox controller */
1536 ret = mbox_controller_register(&mbox->controller);
1537 if (ret)
1538 goto fail_free_msis;
1539
1540 dev_info(dev, "registered flexrm mailbox with %d channels\n",
1541 mbox->controller.num_chans);
1542
1543 return 0;
1544
1545fail_free_msis:
1546 platform_msi_domain_free_irqs(dev);
1547fail_destroy_cmpl_pool:
1548 dma_pool_destroy(mbox->cmpl_pool);
1549fail_destroy_bd_pool:
1550 dma_pool_destroy(mbox->bd_pool);
1551fail:
1552 return ret;
1553}
1554
1555static int flexrm_mbox_remove(struct platform_device *pdev)
1556{
1557 int index;
1558 struct device *dev = &pdev->dev;
1559 struct flexrm_ring *ring;
1560 struct flexrm_mbox *mbox = platform_get_drvdata(pdev);
1561
1562 mbox_controller_unregister(&mbox->controller);
1563
1564 platform_msi_domain_free_irqs(dev);
1565
1566 dma_pool_destroy(mbox->cmpl_pool);
1567 dma_pool_destroy(mbox->bd_pool);
1568
1569 for (index = 0; index < mbox->num_rings; index++) {
1570 ring = &mbox->rings[index];
1571 ida_destroy(&ring->requests_ida);
1572 }
1573
1574 return 0;
1575}
1576
1577static const struct of_device_id flexrm_mbox_of_match[] = {
1578 { .compatible = "brcm,iproc-flexrm-mbox", },
1579 {},
1580};
1581MODULE_DEVICE_TABLE(of, flexrm_mbox_of_match);
1582
1583static struct platform_driver flexrm_mbox_driver = {
1584 .driver = {
1585 .name = "brcm-flexrm-mbox",
1586 .of_match_table = flexrm_mbox_of_match,
1587 },
1588 .probe = flexrm_mbox_probe,
1589 .remove = flexrm_mbox_remove,
1590};
1591module_platform_driver(flexrm_mbox_driver);
1592
1593MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1594MODULE_DESCRIPTION("Broadcom FlexRM mailbox driver");
1595MODULE_LICENSE("GPL v2");
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c
index 2aeb034d5fb9..4fe7be0bdd11 100644
--- a/drivers/mailbox/bcm-pdc-mailbox.c
+++ b/drivers/mailbox/bcm-pdc-mailbox.c
@@ -18,7 +18,8 @@
18 * Broadcom PDC Mailbox Driver 18 * Broadcom PDC Mailbox Driver
19 * The PDC provides a ring based programming interface to one or more hardware 19 * The PDC provides a ring based programming interface to one or more hardware
20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2 20 * offload engines. For example, the PDC driver works with both SPU-M and SPU2
21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE. 21 * cryptographic offload hardware. In some chips the PDC is referred to as MDE,
22 * and in others the FA2/FA+ hardware is used with this PDC driver.
22 * 23 *
23 * The PDC driver registers with the Linux mailbox framework as a mailbox 24 * The PDC driver registers with the Linux mailbox framework as a mailbox
24 * controller, once for each PDC instance. Ring 0 for each PDC is registered as 25 * controller, once for each PDC instance. Ring 0 for each PDC is registered as
@@ -108,6 +109,7 @@
108#define PDC_INTMASK_OFFSET 0x24 109#define PDC_INTMASK_OFFSET 0x24
109#define PDC_INTSTATUS_OFFSET 0x20 110#define PDC_INTSTATUS_OFFSET 0x20
110#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET) 111#define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET)
112#define FA_RCVLAZY0_OFFSET 0x100
111 113
112/* 114/*
113 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata 115 * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata
@@ -162,6 +164,11 @@
162/* Maximum size buffer the DMA engine can handle */ 164/* Maximum size buffer the DMA engine can handle */
163#define PDC_DMA_BUF_MAX 16384 165#define PDC_DMA_BUF_MAX 16384
164 166
167enum pdc_hw {
168 FA_HW, /* FA2/FA+ hardware (i.e. Northstar Plus) */
169 PDC_HW /* PDC/MDE hardware (i.e. Northstar 2, Pegasus) */
170};
171
165struct pdc_dma_map { 172struct pdc_dma_map {
166 void *ctx; /* opaque context associated with frame */ 173 void *ctx; /* opaque context associated with frame */
167}; 174};
@@ -211,13 +218,13 @@ struct pdc_regs {
211 u32 gptimer; /* 0x028 */ 218 u32 gptimer; /* 0x028 */
212 219
213 u32 PAD; 220 u32 PAD;
214 u32 intrcvlazy_0; /* 0x030 */ 221 u32 intrcvlazy_0; /* 0x030 (Only in PDC, not FA2) */
215 u32 intrcvlazy_1; /* 0x034 */ 222 u32 intrcvlazy_1; /* 0x034 (Only in PDC, not FA2) */
216 u32 intrcvlazy_2; /* 0x038 */ 223 u32 intrcvlazy_2; /* 0x038 (Only in PDC, not FA2) */
217 u32 intrcvlazy_3; /* 0x03c */ 224 u32 intrcvlazy_3; /* 0x03c (Only in PDC, not FA2) */
218 225
219 u32 PAD[48]; 226 u32 PAD[48];
220 u32 removed_intrecvlazy; /* 0x100 */ 227 u32 fa_intrecvlazy; /* 0x100 (Only in FA2, not PDC) */
221 u32 flowctlthresh; /* 0x104 */ 228 u32 flowctlthresh; /* 0x104 */
222 u32 wrrthresh; /* 0x108 */ 229 u32 wrrthresh; /* 0x108 */
223 u32 gmac_idle_cnt_thresh; /* 0x10c */ 230 u32 gmac_idle_cnt_thresh; /* 0x10c */
@@ -243,7 +250,7 @@ struct pdc_regs {
243 u32 serdes_status1; /* 0x1b0 */ 250 u32 serdes_status1; /* 0x1b0 */
244 u32 PAD[11]; /* 0x1b4-1dc */ 251 u32 PAD[11]; /* 0x1b4-1dc */
245 u32 clk_ctl_st; /* 0x1e0 */ 252 u32 clk_ctl_st; /* 0x1e0 */
246 u32 hw_war; /* 0x1e4 */ 253 u32 hw_war; /* 0x1e4 (Only in PDC, not FA2) */
247 u32 pwrctl; /* 0x1e8 */ 254 u32 pwrctl; /* 0x1e8 */
248 u32 PAD[5]; 255 u32 PAD[5];
249 256
@@ -410,6 +417,9 @@ struct pdc_state {
410 u32 txnobuf; /* unable to create tx descriptor */ 417 u32 txnobuf; /* unable to create tx descriptor */
411 u32 rxnobuf; /* unable to create rx descriptor */ 418 u32 rxnobuf; /* unable to create rx descriptor */
412 u32 rx_oflow; /* count of rx overflows */ 419 u32 rx_oflow; /* count of rx overflows */
420
421 /* hardware type - FA2 or PDC/MDE */
422 enum pdc_hw hw_type;
413}; 423};
414 424
415/* Global variables */ 425/* Global variables */
@@ -1396,7 +1406,13 @@ static int pdc_interrupts_init(struct pdc_state *pdcs)
1396 1406
1397 /* interrupt configuration */ 1407 /* interrupt configuration */
1398 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); 1408 iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET);
1399 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET); 1409
1410 if (pdcs->hw_type == FA_HW)
1411 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1412 FA_RCVLAZY0_OFFSET);
1413 else
1414 iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase +
1415 PDC_RCVLAZY0_OFFSET);
1400 1416
1401 /* read irq from device tree */ 1417 /* read irq from device tree */
1402 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0); 1418 pdcs->pdc_irq = irq_of_parse_and_map(dn, 0);
@@ -1465,6 +1481,17 @@ static int pdc_mb_init(struct pdc_state *pdcs)
1465 return 0; 1481 return 0;
1466} 1482}
1467 1483
1484/* Device tree API */
1485static const int pdc_hw = PDC_HW;
1486static const int fa_hw = FA_HW;
1487
1488static const struct of_device_id pdc_mbox_of_match[] = {
1489 {.compatible = "brcm,iproc-pdc-mbox", .data = &pdc_hw},
1490 {.compatible = "brcm,iproc-fa2-mbox", .data = &fa_hw},
1491 { /* sentinel */ }
1492};
1493MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1494
1468/** 1495/**
1469 * pdc_dt_read() - Read application-specific data from device tree. 1496 * pdc_dt_read() - Read application-specific data from device tree.
1470 * @pdev: Platform device 1497 * @pdev: Platform device
@@ -1481,6 +1508,8 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1481{ 1508{
1482 struct device *dev = &pdev->dev; 1509 struct device *dev = &pdev->dev;
1483 struct device_node *dn = pdev->dev.of_node; 1510 struct device_node *dn = pdev->dev.of_node;
1511 const struct of_device_id *match;
1512 const int *hw_type;
1484 int err; 1513 int err;
1485 1514
1486 err = of_property_read_u32(dn, "brcm,rx-status-len", 1515 err = of_property_read_u32(dn, "brcm,rx-status-len",
@@ -1492,6 +1521,14 @@ static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs)
1492 1521
1493 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr"); 1522 pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr");
1494 1523
1524 pdcs->hw_type = PDC_HW;
1525
1526 match = of_match_device(of_match_ptr(pdc_mbox_of_match), dev);
1527 if (match != NULL) {
1528 hw_type = match->data;
1529 pdcs->hw_type = *hw_type;
1530 }
1531
1495 return 0; 1532 return 0;
1496} 1533}
1497 1534
@@ -1525,7 +1562,7 @@ static int pdc_probe(struct platform_device *pdev)
1525 pdcs->pdc_idx = pdcg.num_spu; 1562 pdcs->pdc_idx = pdcg.num_spu;
1526 pdcg.num_spu++; 1563 pdcg.num_spu++;
1527 1564
1528 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); 1565 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(39));
1529 if (err) { 1566 if (err) {
1530 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err); 1567 dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err);
1531 goto cleanup; 1568 goto cleanup;
@@ -1611,12 +1648,6 @@ static int pdc_remove(struct platform_device *pdev)
1611 return 0; 1648 return 0;
1612} 1649}
1613 1650
1614static const struct of_device_id pdc_mbox_of_match[] = {
1615 {.compatible = "brcm,iproc-pdc-mbox"},
1616 { /* sentinel */ }
1617};
1618MODULE_DEVICE_TABLE(of, pdc_mbox_of_match);
1619
1620static struct platform_driver pdc_mbox_driver = { 1651static struct platform_driver pdc_mbox_driver = {
1621 .probe = pdc_probe, 1652 .probe = pdc_probe,
1622 .remove = pdc_remove, 1653 .remove = pdc_remove,
diff --git a/drivers/mailbox/hi6220-mailbox.c b/drivers/mailbox/hi6220-mailbox.c
index 613722db5daf..519376d3534c 100644
--- a/drivers/mailbox/hi6220-mailbox.c
+++ b/drivers/mailbox/hi6220-mailbox.c
@@ -221,7 +221,7 @@ static void hi6220_mbox_shutdown(struct mbox_chan *chan)
221 mbox->irq_map_chan[mchan->ack_irq] = NULL; 221 mbox->irq_map_chan[mchan->ack_irq] = NULL;
222} 222}
223 223
224static struct mbox_chan_ops hi6220_mbox_ops = { 224static const struct mbox_chan_ops hi6220_mbox_ops = {
225 .send_data = hi6220_mbox_send_data, 225 .send_data = hi6220_mbox_send_data,
226 .startup = hi6220_mbox_startup, 226 .startup = hi6220_mbox_startup,
227 .shutdown = hi6220_mbox_shutdown, 227 .shutdown = hi6220_mbox_shutdown,
diff --git a/drivers/mailbox/mailbox-xgene-slimpro.c b/drivers/mailbox/mailbox-xgene-slimpro.c
index dd2afbca51c9..a7040163dd43 100644
--- a/drivers/mailbox/mailbox-xgene-slimpro.c
+++ b/drivers/mailbox/mailbox-xgene-slimpro.c
@@ -174,7 +174,7 @@ static void slimpro_mbox_shutdown(struct mbox_chan *chan)
174 devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan); 174 devm_free_irq(mb_chan->dev, mb_chan->irq, mb_chan);
175} 175}
176 176
177static struct mbox_chan_ops slimpro_mbox_ops = { 177static const struct mbox_chan_ops slimpro_mbox_ops = {
178 .send_data = slimpro_mbox_send_data, 178 .send_data = slimpro_mbox_send_data,
179 .startup = slimpro_mbox_startup, 179 .startup = slimpro_mbox_startup,
180 .shutdown = slimpro_mbox_shutdown, 180 .shutdown = slimpro_mbox_shutdown,
diff --git a/drivers/mailbox/mailbox.c b/drivers/mailbox/mailbox.c
index 4671f8a12872..9dfbf7ea10a2 100644
--- a/drivers/mailbox/mailbox.c
+++ b/drivers/mailbox/mailbox.c
@@ -103,11 +103,14 @@ static void tx_tick(struct mbox_chan *chan, int r)
103 /* Submit next message */ 103 /* Submit next message */
104 msg_submit(chan); 104 msg_submit(chan);
105 105
106 if (!mssg)
107 return;
108
106 /* Notify the client */ 109 /* Notify the client */
107 if (mssg && chan->cl->tx_done) 110 if (chan->cl->tx_done)
108 chan->cl->tx_done(chan->cl, mssg, r); 111 chan->cl->tx_done(chan->cl, mssg, r);
109 112
110 if (chan->cl->tx_block) 113 if (r != -ETIME && chan->cl->tx_block)
111 complete(&chan->tx_complete); 114 complete(&chan->tx_complete);
112} 115}
113 116
@@ -260,7 +263,7 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
260 263
261 msg_submit(chan); 264 msg_submit(chan);
262 265
263 if (chan->cl->tx_block && chan->active_req) { 266 if (chan->cl->tx_block) {
264 unsigned long wait; 267 unsigned long wait;
265 int ret; 268 int ret;
266 269
@@ -271,8 +274,8 @@ int mbox_send_message(struct mbox_chan *chan, void *mssg)
271 274
272 ret = wait_for_completion_timeout(&chan->tx_complete, wait); 275 ret = wait_for_completion_timeout(&chan->tx_complete, wait);
273 if (ret == 0) { 276 if (ret == 0) {
274 t = -EIO; 277 t = -ETIME;
275 tx_tick(chan, -EIO); 278 tx_tick(chan, t);
276 } 279 }
277 } 280 }
278 281
@@ -453,6 +456,12 @@ int mbox_controller_register(struct mbox_controller *mbox)
453 txdone = TXDONE_BY_ACK; 456 txdone = TXDONE_BY_ACK;
454 457
455 if (txdone == TXDONE_BY_POLL) { 458 if (txdone == TXDONE_BY_POLL) {
459
460 if (!mbox->ops->last_tx_done) {
461 dev_err(mbox->dev, "last_tx_done method is absent\n");
462 return -EINVAL;
463 }
464
456 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC, 465 hrtimer_init(&mbox->poll_hrt, CLOCK_MONOTONIC,
457 HRTIMER_MODE_REL); 466 HRTIMER_MODE_REL);
458 mbox->poll_hrt.function = txdone_hrtimer; 467 mbox->poll_hrt.function = txdone_hrtimer;
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index 6b55c938b401..c20b4843fc2d 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -16,6 +16,7 @@
16 16
17enum brcm_message_type { 17enum brcm_message_type {
18 BRCM_MESSAGE_UNKNOWN = 0, 18 BRCM_MESSAGE_UNKNOWN = 0,
19 BRCM_MESSAGE_BATCH,
19 BRCM_MESSAGE_SPU, 20 BRCM_MESSAGE_SPU,
20 BRCM_MESSAGE_SBA, 21 BRCM_MESSAGE_SBA,
21 BRCM_MESSAGE_MAX, 22 BRCM_MESSAGE_MAX,
@@ -23,24 +24,29 @@ enum brcm_message_type {
23 24
24struct brcm_sba_command { 25struct brcm_sba_command {
25 u64 cmd; 26 u64 cmd;
27 u64 *cmd_dma;
28 dma_addr_t cmd_dma_addr;
26#define BRCM_SBA_CMD_TYPE_A BIT(0) 29#define BRCM_SBA_CMD_TYPE_A BIT(0)
27#define BRCM_SBA_CMD_TYPE_B BIT(1) 30#define BRCM_SBA_CMD_TYPE_B BIT(1)
28#define BRCM_SBA_CMD_TYPE_C BIT(2) 31#define BRCM_SBA_CMD_TYPE_C BIT(2)
29#define BRCM_SBA_CMD_HAS_RESP BIT(3) 32#define BRCM_SBA_CMD_HAS_RESP BIT(3)
30#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) 33#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
31 u64 flags; 34 u64 flags;
32 dma_addr_t input;
33 size_t input_len;
34 dma_addr_t resp; 35 dma_addr_t resp;
35 size_t resp_len; 36 size_t resp_len;
36 dma_addr_t output; 37 dma_addr_t data;
37 size_t output_len; 38 size_t data_len;
38}; 39};
39 40
40struct brcm_message { 41struct brcm_message {
41 enum brcm_message_type type; 42 enum brcm_message_type type;
42 union { 43 union {
43 struct { 44 struct {
45 struct brcm_message *msgs;
46 unsigned int msgs_queued;
47 unsigned int msgs_count;
48 } batch;
49 struct {
44 struct scatterlist *src; 50 struct scatterlist *src;
45 struct scatterlist *dst; 51 struct scatterlist *dst;
46 } spu; 52 } spu;