diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-01 07:23:29 -0400 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2016-08-01 07:23:29 -0400 |
commit | 77d9ada23f207ec3d6258985c882f4fb653693f1 (patch) | |
tree | 84d28d876721d3063ff1b33ab4b16c86ceafb5a7 | |
parent | 07f00f06ba9a5533d6650d46d3e938f6cbeee97e (diff) | |
parent | a68b216676e89fa959a23b583b56f9ce7df81b37 (diff) |
Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu/integration
Pull mailbox updates from Jussi Brar:
"Broadcom:
- New PDC controller driver and bindings
Misc:
- PL320 - Convert from 'raw' IO to 'relaxed' version
- Test - fix dangling pointer"
* 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu/integration:
mailbox: Fix format and type mismatches in Broadcom PDC driver
mailbox: Add Broadcom PDC mailbox driver
dt-bindings: add bindings documentation for PDC driver.
mailbox: pl320: remove __raw IO
mailbox: mailbox-test: set tdev->signal to NULL after freeing
-rw-r--r-- | Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt | 23 | ||||
-rw-r--r-- | drivers/mailbox/Kconfig | 9 | ||||
-rw-r--r-- | drivers/mailbox/Makefile | 2 | ||||
-rw-r--r-- | drivers/mailbox/bcm-pdc-mailbox.c | 1531 | ||||
-rw-r--r-- | drivers/mailbox/mailbox-test.c | 1 | ||||
-rw-r--r-- | drivers/mailbox/pl320-ipc.c | 46 | ||||
-rw-r--r-- | include/linux/mailbox/brcm-message.h | 56 |
7 files changed, 1645 insertions, 23 deletions
diff --git a/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt new file mode 100644 index 000000000000..411ccf421584 --- /dev/null +++ b/Documentation/devicetree/bindings/mailbox/brcm,iproc-pdc-mbox.txt | |||
@@ -0,0 +1,23 @@ | |||
1 | The PDC driver manages data transfer to and from various offload engines | ||
2 | on some Broadcom SoCs. An SoC may have multiple PDC hardware blocks. There is | ||
3 | one device tree entry per block. | ||
4 | |||
5 | Required properties: | ||
6 | - compatible : Should be "brcm,iproc-pdc-mbox". | ||
7 | - reg: Should contain PDC registers location and length. | ||
8 | - interrupts: Should contain the IRQ line for the PDC. | ||
9 | - #mbox-cells: 1 | ||
10 | - brcm,rx-status-len: Length of metadata preceding received frames, in bytes. | ||
11 | |||
12 | Optional properties: | ||
13 | - brcm,use-bcm-hdr: present if a BCM header precedes each frame. | ||
14 | |||
15 | Example: | ||
16 | pdc0: iproc-pdc0@0x612c0000 { | ||
17 | compatible = "brcm,iproc-pdc-mbox"; | ||
18 | reg = <0 0x612c0000 0 0x445>; /* PDC FS0 regs */ | ||
19 | interrupts = <GIC_SPI 187 IRQ_TYPE_LEVEL_HIGH>; | ||
20 | #mbox-cells = <1>; /* one cell per mailbox channel */ | ||
21 | brcm,rx-status-len = <32>; | ||
22 | brcm,use-bcm-hdr; | ||
23 | }; | ||
diff --git a/drivers/mailbox/Kconfig b/drivers/mailbox/Kconfig index 5305923752d2..97c372908e78 100644 --- a/drivers/mailbox/Kconfig +++ b/drivers/mailbox/Kconfig | |||
@@ -123,4 +123,13 @@ config XGENE_SLIMPRO_MBOX | |||
123 | It is used to send short messages between ARM64-bit cores and | 123 | It is used to send short messages between ARM64-bit cores and |
124 | the SLIMpro Management Engine, primarily for PM. Say Y here if you | 124 | the SLIMpro Management Engine, primarily for PM. Say Y here if you |
125 | want to use the APM X-Gene SLIMpro IPCM support. | 125 | want to use the APM X-Gene SLIMpro IPCM support. |
126 | |||
127 | config BCM_PDC_MBOX | ||
128 | tristate "Broadcom PDC Mailbox" | ||
129 | depends on ARM64 || COMPILE_TEST | ||
130 | default ARCH_BCM_IPROC | ||
131 | help | ||
132 | Mailbox implementation for the Broadcom PDC ring manager, | ||
133 | which provides access to various offload engines on Broadcom | ||
134 | SoCs. Say Y here if you want to use the Broadcom PDC. | ||
126 | endif | 135 | endif |
diff --git a/drivers/mailbox/Makefile b/drivers/mailbox/Makefile index 0be3e742bb7d..66c38e300dfc 100644 --- a/drivers/mailbox/Makefile +++ b/drivers/mailbox/Makefile | |||
@@ -25,3 +25,5 @@ obj-$(CONFIG_TI_MESSAGE_MANAGER) += ti-msgmgr.o | |||
25 | obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o | 25 | obj-$(CONFIG_XGENE_SLIMPRO_MBOX) += mailbox-xgene-slimpro.o |
26 | 26 | ||
27 | obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o | 27 | obj-$(CONFIG_HI6220_MBOX) += hi6220-mailbox.o |
28 | |||
29 | obj-$(CONFIG_BCM_PDC_MBOX) += bcm-pdc-mailbox.o | ||
diff --git a/drivers/mailbox/bcm-pdc-mailbox.c b/drivers/mailbox/bcm-pdc-mailbox.c new file mode 100644 index 000000000000..cbe0c1ee4ba9 --- /dev/null +++ b/drivers/mailbox/bcm-pdc-mailbox.c | |||
@@ -0,0 +1,1531 @@ | |||
1 | /* | ||
2 | * Copyright 2016 Broadcom | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License, version 2, as | ||
6 | * published by the Free Software Foundation (the "GPL"). | ||
7 | * | ||
8 | * This program is distributed in the hope that it will be useful, but | ||
9 | * WITHOUT ANY WARRANTY; without even the implied warranty of | ||
10 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | ||
11 | * General Public License version 2 (GPLv2) for more details. | ||
12 | * | ||
13 | * You should have received a copy of the GNU General Public License | ||
14 | * version 2 (GPLv2) along with this source code. | ||
15 | */ | ||
16 | |||
17 | /* | ||
18 | * Broadcom PDC Mailbox Driver | ||
19 | * The PDC provides a ring based programming interface to one or more hardware | ||
20 | * offload engines. For example, the PDC driver works with both SPU-M and SPU2 | ||
21 | * cryptographic offload hardware. In some chips the PDC is referred to as MDE. | ||
22 | * | ||
23 | * The PDC driver registers with the Linux mailbox framework as a mailbox | ||
24 | * controller, once for each PDC instance. Ring 0 for each PDC is registered as | ||
25 | * a mailbox channel. The PDC driver uses interrupts to determine when data | ||
26 | * transfers to and from an offload engine are complete. The PDC driver uses | ||
27 | * threaded IRQs so that response messages are handled outside of interrupt | ||
28 | * context. | ||
29 | * | ||
30 | * The PDC driver allows multiple messages to be pending in the descriptor | ||
31 | * rings. The tx_msg_start descriptor index indicates where the last message | ||
32 | * starts. The txin_numd value at this index indicates how many descriptor | ||
33 | * indexes make up the message. Similar state is kept on the receive side. When | ||
34 | * an rx interrupt indicates a response is ready, the PDC driver processes numd | ||
35 | * descriptors from the tx and rx ring, thus processing one response at a time. | ||
36 | */ | ||
37 | |||
38 | #include <linux/errno.h> | ||
39 | #include <linux/module.h> | ||
40 | #include <linux/init.h> | ||
41 | #include <linux/slab.h> | ||
42 | #include <linux/debugfs.h> | ||
43 | #include <linux/interrupt.h> | ||
44 | #include <linux/wait.h> | ||
45 | #include <linux/platform_device.h> | ||
46 | #include <linux/io.h> | ||
47 | #include <linux/of.h> | ||
48 | #include <linux/of_device.h> | ||
49 | #include <linux/of_address.h> | ||
50 | #include <linux/of_irq.h> | ||
51 | #include <linux/mailbox_controller.h> | ||
52 | #include <linux/mailbox/brcm-message.h> | ||
53 | #include <linux/scatterlist.h> | ||
54 | #include <linux/dma-direction.h> | ||
55 | #include <linux/dma-mapping.h> | ||
56 | #include <linux/dmapool.h> | ||
57 | |||
58 | #define PDC_SUCCESS 0 | ||
59 | |||
60 | #define RING_ENTRY_SIZE sizeof(struct dma64dd) | ||
61 | |||
62 | /* # entries in PDC dma ring */ | ||
63 | #define PDC_RING_ENTRIES 128 | ||
64 | #define PDC_RING_SIZE (PDC_RING_ENTRIES * RING_ENTRY_SIZE) | ||
65 | /* Rings are 8k aligned */ | ||
66 | #define RING_ALIGN_ORDER 13 | ||
67 | #define RING_ALIGN BIT(RING_ALIGN_ORDER) | ||
68 | |||
69 | #define RX_BUF_ALIGN_ORDER 5 | ||
70 | #define RX_BUF_ALIGN BIT(RX_BUF_ALIGN_ORDER) | ||
71 | |||
72 | /* descriptor bumping macros */ | ||
73 | #define XXD(x, max_mask) ((x) & (max_mask)) | ||
74 | #define TXD(x, max_mask) XXD((x), (max_mask)) | ||
75 | #define RXD(x, max_mask) XXD((x), (max_mask)) | ||
76 | #define NEXTTXD(i, max_mask) TXD((i) + 1, (max_mask)) | ||
77 | #define PREVTXD(i, max_mask) TXD((i) - 1, (max_mask)) | ||
78 | #define NEXTRXD(i, max_mask) RXD((i) + 1, (max_mask)) | ||
79 | #define PREVRXD(i, max_mask) RXD((i) - 1, (max_mask)) | ||
80 | #define NTXDACTIVE(h, t, max_mask) TXD((t) - (h), (max_mask)) | ||
81 | #define NRXDACTIVE(h, t, max_mask) RXD((t) - (h), (max_mask)) | ||
82 | |||
83 | /* Length of BCM header at start of SPU msg, in bytes */ | ||
84 | #define BCM_HDR_LEN 8 | ||
85 | |||
86 | /* | ||
87 | * PDC driver reserves ringset 0 on each SPU for its own use. The driver does | ||
88 | * not currently support use of multiple ringsets on a single PDC engine. | ||
89 | */ | ||
90 | #define PDC_RINGSET 0 | ||
91 | |||
92 | /* | ||
93 | * Interrupt mask and status definitions. Enable interrupts for tx and rx on | ||
94 | * ring 0 | ||
95 | */ | ||
96 | #define PDC_XMTINT_0 (24 + PDC_RINGSET) | ||
97 | #define PDC_RCVINT_0 (16 + PDC_RINGSET) | ||
98 | #define PDC_XMTINTEN_0 BIT(PDC_XMTINT_0) | ||
99 | #define PDC_RCVINTEN_0 BIT(PDC_RCVINT_0) | ||
100 | #define PDC_INTMASK (PDC_XMTINTEN_0 | PDC_RCVINTEN_0) | ||
101 | #define PDC_LAZY_FRAMECOUNT 1 | ||
102 | #define PDC_LAZY_TIMEOUT 10000 | ||
103 | #define PDC_LAZY_INT (PDC_LAZY_TIMEOUT | (PDC_LAZY_FRAMECOUNT << 24)) | ||
104 | #define PDC_INTMASK_OFFSET 0x24 | ||
105 | #define PDC_INTSTATUS_OFFSET 0x20 | ||
106 | #define PDC_RCVLAZY0_OFFSET (0x30 + 4 * PDC_RINGSET) | ||
107 | |||
108 | /* | ||
109 | * For SPU2, configure MDE_CKSUM_CONTROL to write 17 bytes of metadata | ||
110 | * before frame | ||
111 | */ | ||
112 | #define PDC_SPU2_RESP_HDR_LEN 17 | ||
113 | #define PDC_CKSUM_CTRL BIT(27) | ||
114 | #define PDC_CKSUM_CTRL_OFFSET 0x400 | ||
115 | |||
116 | #define PDC_SPUM_RESP_HDR_LEN 32 | ||
117 | |||
118 | /* | ||
119 | * Sets the following bits for write to transmit control reg: | ||
120 | * 0 - XmtEn - enable activity on the tx channel | ||
121 | * 11 - PtyChkDisable - parity check is disabled | ||
122 | * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory | ||
123 | */ | ||
124 | #define PDC_TX_CTL 0x000C0801 | ||
125 | |||
126 | /* | ||
127 | * Sets the following bits for write to receive control reg: | ||
128 | * 0 - RcvEn - enable activity on the rx channel | ||
129 | * 7:1 - RcvOffset - size in bytes of status region at start of rx frame buf | ||
130 | * 9 - SepRxHdrDescEn - place start of new frames only in descriptors | ||
131 | * that have StartOfFrame set | ||
132 | * 10 - OflowContinue - on rx FIFO overflow, clear rx fifo, discard all | ||
133 | * remaining bytes in current frame, report error | ||
134 | * in rx frame status for current frame | ||
135 | * 11 - PtyChkDisable - parity check is disabled | ||
136 | * 20:18 - BurstLen = 3 -> 2^7 = 128 byte data reads from memory | ||
137 | */ | ||
138 | #define PDC_RX_CTL 0x000C0E01 | ||
139 | |||
140 | #define CRYPTO_D64_RS0_CD_MASK ((PDC_RING_ENTRIES * RING_ENTRY_SIZE) - 1) | ||
141 | |||
142 | /* descriptor flags */ | ||
143 | #define D64_CTRL1_EOT BIT(28) /* end of descriptor table */ | ||
144 | #define D64_CTRL1_IOC BIT(29) /* interrupt on complete */ | ||
145 | #define D64_CTRL1_EOF BIT(30) /* end of frame */ | ||
146 | #define D64_CTRL1_SOF BIT(31) /* start of frame */ | ||
147 | |||
148 | #define RX_STATUS_OVERFLOW 0x00800000 | ||
149 | #define RX_STATUS_LEN 0x0000FFFF | ||
150 | |||
151 | #define PDC_TXREGS_OFFSET 0x200 | ||
152 | #define PDC_RXREGS_OFFSET 0x220 | ||
153 | |||
154 | /* Maximum size buffer the DMA engine can handle */ | ||
155 | #define PDC_DMA_BUF_MAX 16384 | ||
156 | |||
157 | struct pdc_dma_map { | ||
158 | void *ctx; /* opaque context associated with frame */ | ||
159 | }; | ||
160 | |||
161 | /* dma descriptor */ | ||
162 | struct dma64dd { | ||
163 | u32 ctrl1; /* misc control bits */ | ||
164 | u32 ctrl2; /* buffer count and address extension */ | ||
165 | u32 addrlow; /* memory address of the date buffer, bits 31:0 */ | ||
166 | u32 addrhigh; /* memory address of the date buffer, bits 63:32 */ | ||
167 | }; | ||
168 | |||
169 | /* dma registers per channel(xmt or rcv) */ | ||
170 | struct dma64_regs { | ||
171 | u32 control; /* enable, et al */ | ||
172 | u32 ptr; /* last descriptor posted to chip */ | ||
173 | u32 addrlow; /* descriptor ring base address low 32-bits */ | ||
174 | u32 addrhigh; /* descriptor ring base address bits 63:32 */ | ||
175 | u32 status0; /* last rx descriptor written by hw */ | ||
176 | u32 status1; /* driver does not use */ | ||
177 | }; | ||
178 | |||
179 | /* cpp contortions to concatenate w/arg prescan */ | ||
180 | #ifndef PAD | ||
181 | #define _PADLINE(line) pad ## line | ||
182 | #define _XSTR(line) _PADLINE(line) | ||
183 | #define PAD _XSTR(__LINE__) | ||
184 | #endif /* PAD */ | ||
185 | |||
186 | /* dma registers. matches hw layout. */ | ||
187 | struct dma64 { | ||
188 | struct dma64_regs dmaxmt; /* dma tx */ | ||
189 | u32 PAD[2]; | ||
190 | struct dma64_regs dmarcv; /* dma rx */ | ||
191 | u32 PAD[2]; | ||
192 | }; | ||
193 | |||
194 | /* PDC registers */ | ||
195 | struct pdc_regs { | ||
196 | u32 devcontrol; /* 0x000 */ | ||
197 | u32 devstatus; /* 0x004 */ | ||
198 | u32 PAD; | ||
199 | u32 biststatus; /* 0x00c */ | ||
200 | u32 PAD[4]; | ||
201 | u32 intstatus; /* 0x020 */ | ||
202 | u32 intmask; /* 0x024 */ | ||
203 | u32 gptimer; /* 0x028 */ | ||
204 | |||
205 | u32 PAD; | ||
206 | u32 intrcvlazy_0; /* 0x030 */ | ||
207 | u32 intrcvlazy_1; /* 0x034 */ | ||
208 | u32 intrcvlazy_2; /* 0x038 */ | ||
209 | u32 intrcvlazy_3; /* 0x03c */ | ||
210 | |||
211 | u32 PAD[48]; | ||
212 | u32 removed_intrecvlazy; /* 0x100 */ | ||
213 | u32 flowctlthresh; /* 0x104 */ | ||
214 | u32 wrrthresh; /* 0x108 */ | ||
215 | u32 gmac_idle_cnt_thresh; /* 0x10c */ | ||
216 | |||
217 | u32 PAD[4]; | ||
218 | u32 ifioaccessaddr; /* 0x120 */ | ||
219 | u32 ifioaccessbyte; /* 0x124 */ | ||
220 | u32 ifioaccessdata; /* 0x128 */ | ||
221 | |||
222 | u32 PAD[21]; | ||
223 | u32 phyaccess; /* 0x180 */ | ||
224 | u32 PAD; | ||
225 | u32 phycontrol; /* 0x188 */ | ||
226 | u32 txqctl; /* 0x18c */ | ||
227 | u32 rxqctl; /* 0x190 */ | ||
228 | u32 gpioselect; /* 0x194 */ | ||
229 | u32 gpio_output_en; /* 0x198 */ | ||
230 | u32 PAD; /* 0x19c */ | ||
231 | u32 txq_rxq_mem_ctl; /* 0x1a0 */ | ||
232 | u32 memory_ecc_status; /* 0x1a4 */ | ||
233 | u32 serdes_ctl; /* 0x1a8 */ | ||
234 | u32 serdes_status0; /* 0x1ac */ | ||
235 | u32 serdes_status1; /* 0x1b0 */ | ||
236 | u32 PAD[11]; /* 0x1b4-1dc */ | ||
237 | u32 clk_ctl_st; /* 0x1e0 */ | ||
238 | u32 hw_war; /* 0x1e4 */ | ||
239 | u32 pwrctl; /* 0x1e8 */ | ||
240 | u32 PAD[5]; | ||
241 | |||
242 | #define PDC_NUM_DMA_RINGS 4 | ||
243 | struct dma64 dmaregs[PDC_NUM_DMA_RINGS]; /* 0x0200 - 0x2fc */ | ||
244 | |||
245 | /* more registers follow, but we don't use them */ | ||
246 | }; | ||
247 | |||
248 | /* structure for allocating/freeing DMA rings */ | ||
249 | struct pdc_ring_alloc { | ||
250 | dma_addr_t dmabase; /* DMA address of start of ring */ | ||
251 | void *vbase; /* base kernel virtual address of ring */ | ||
252 | u32 size; /* ring allocation size in bytes */ | ||
253 | }; | ||
254 | |||
255 | /* PDC state structure */ | ||
256 | struct pdc_state { | ||
257 | /* synchronize access to this PDC state structure */ | ||
258 | spinlock_t pdc_lock; | ||
259 | |||
260 | /* Index of the PDC whose state is in this structure instance */ | ||
261 | u8 pdc_idx; | ||
262 | |||
263 | /* Platform device for this PDC instance */ | ||
264 | struct platform_device *pdev; | ||
265 | |||
266 | /* | ||
267 | * Each PDC instance has a mailbox controller. PDC receives request | ||
268 | * messages through mailboxes, and sends response messages through the | ||
269 | * mailbox framework. | ||
270 | */ | ||
271 | struct mbox_controller mbc; | ||
272 | |||
273 | unsigned int pdc_irq; | ||
274 | |||
275 | /* | ||
276 | * Last interrupt status read from PDC device. Saved in interrupt | ||
277 | * handler so the handler can clear the interrupt in the device, | ||
278 | * and the interrupt thread called later can know which interrupt | ||
279 | * bits are active. | ||
280 | */ | ||
281 | unsigned long intstatus; | ||
282 | |||
283 | /* Number of bytes of receive status prior to each rx frame */ | ||
284 | u32 rx_status_len; | ||
285 | /* Whether a BCM header is prepended to each frame */ | ||
286 | bool use_bcm_hdr; | ||
287 | /* Sum of length of BCM header and rx status header */ | ||
288 | u32 pdc_resp_hdr_len; | ||
289 | |||
290 | /* The base virtual address of DMA hw registers */ | ||
291 | void __iomem *pdc_reg_vbase; | ||
292 | |||
293 | /* Pool for allocation of DMA rings */ | ||
294 | struct dma_pool *ring_pool; | ||
295 | |||
296 | /* Pool for allocation of metadata buffers for response messages */ | ||
297 | struct dma_pool *rx_buf_pool; | ||
298 | |||
299 | /* | ||
300 | * The base virtual address of DMA tx/rx descriptor rings. Corresponding | ||
301 | * DMA address and size of ring allocation. | ||
302 | */ | ||
303 | struct pdc_ring_alloc tx_ring_alloc; | ||
304 | struct pdc_ring_alloc rx_ring_alloc; | ||
305 | |||
306 | struct pdc_regs *regs; /* start of PDC registers */ | ||
307 | |||
308 | struct dma64_regs *txregs_64; /* dma tx engine registers */ | ||
309 | struct dma64_regs *rxregs_64; /* dma rx engine registers */ | ||
310 | |||
311 | /* | ||
312 | * Arrays of PDC_RING_ENTRIES descriptors | ||
313 | * To use multiple ringsets, this needs to be extended | ||
314 | */ | ||
315 | struct dma64dd *txd_64; /* tx descriptor ring */ | ||
316 | struct dma64dd *rxd_64; /* rx descriptor ring */ | ||
317 | |||
318 | /* descriptor ring sizes */ | ||
319 | u32 ntxd; /* # tx descriptors */ | ||
320 | u32 nrxd; /* # rx descriptors */ | ||
321 | u32 nrxpost; /* # rx buffers to keep posted */ | ||
322 | u32 ntxpost; /* max number of tx buffers that can be posted */ | ||
323 | |||
324 | /* | ||
325 | * Index of next tx descriptor to reclaim. That is, the descriptor | ||
326 | * index of the oldest tx buffer for which the host has yet to process | ||
327 | * the corresponding response. | ||
328 | */ | ||
329 | u32 txin; | ||
330 | |||
331 | /* | ||
332 | * Index of the first receive descriptor for the sequence of | ||
333 | * message fragments currently under construction. Used to build up | ||
334 | * the rxin_numd count for a message. Updated to rxout when the host | ||
335 | * starts a new sequence of rx buffers for a new message. | ||
336 | */ | ||
337 | u32 tx_msg_start; | ||
338 | |||
339 | /* Index of next tx descriptor to post. */ | ||
340 | u32 txout; | ||
341 | |||
342 | /* | ||
343 | * Number of tx descriptors associated with the message that starts | ||
344 | * at this tx descriptor index. | ||
345 | */ | ||
346 | u32 txin_numd[PDC_RING_ENTRIES]; | ||
347 | |||
348 | /* | ||
349 | * Index of next rx descriptor to reclaim. This is the index of | ||
350 | * the next descriptor whose data has yet to be processed by the host. | ||
351 | */ | ||
352 | u32 rxin; | ||
353 | |||
354 | /* | ||
355 | * Index of the first receive descriptor for the sequence of | ||
356 | * message fragments currently under construction. Used to build up | ||
357 | * the rxin_numd count for a message. Updated to rxout when the host | ||
358 | * starts a new sequence of rx buffers for a new message. | ||
359 | */ | ||
360 | u32 rx_msg_start; | ||
361 | |||
362 | /* | ||
363 | * Saved value of current hardware rx descriptor index. | ||
364 | * The last rx buffer written by the hw is the index previous to | ||
365 | * this one. | ||
366 | */ | ||
367 | u32 last_rx_curr; | ||
368 | |||
369 | /* Index of next rx descriptor to post. */ | ||
370 | u32 rxout; | ||
371 | |||
372 | /* | ||
373 | * opaque context associated with frame that starts at each | ||
374 | * rx ring index. | ||
375 | */ | ||
376 | void *rxp_ctx[PDC_RING_ENTRIES]; | ||
377 | |||
378 | /* | ||
379 | * Scatterlists used to form request and reply frames beginning at a | ||
380 | * given ring index. Retained in order to unmap each sg after reply | ||
381 | * is processed | ||
382 | */ | ||
383 | struct scatterlist *src_sg[PDC_RING_ENTRIES]; | ||
384 | struct scatterlist *dst_sg[PDC_RING_ENTRIES]; | ||
385 | |||
386 | /* | ||
387 | * Number of rx descriptors associated with the message that starts | ||
388 | * at this descriptor index. Not set for every index. For example, | ||
389 | * if descriptor index i points to a scatterlist with 4 entries, then | ||
390 | * the next three descriptor indexes don't have a value set. | ||
391 | */ | ||
392 | u32 rxin_numd[PDC_RING_ENTRIES]; | ||
393 | |||
394 | void *resp_hdr[PDC_RING_ENTRIES]; | ||
395 | dma_addr_t resp_hdr_daddr[PDC_RING_ENTRIES]; | ||
396 | |||
397 | struct dentry *debugfs_stats; /* debug FS stats file for this PDC */ | ||
398 | |||
399 | /* counters */ | ||
400 | u32 pdc_requests; /* number of request messages submitted */ | ||
401 | u32 pdc_replies; /* number of reply messages received */ | ||
402 | u32 txnobuf; /* count of tx ring full */ | ||
403 | u32 rxnobuf; /* count of rx ring full */ | ||
404 | u32 rx_oflow; /* count of rx overflows */ | ||
405 | }; | ||
406 | |||
407 | /* Global variables */ | ||
408 | |||
409 | struct pdc_globals { | ||
410 | /* Actual number of SPUs in hardware, as reported by device tree */ | ||
411 | u32 num_spu; | ||
412 | }; | ||
413 | |||
414 | static struct pdc_globals pdcg; | ||
415 | |||
416 | /* top level debug FS directory for PDC driver */ | ||
417 | static struct dentry *debugfs_dir; | ||
418 | |||
419 | static ssize_t pdc_debugfs_read(struct file *filp, char __user *ubuf, | ||
420 | size_t count, loff_t *offp) | ||
421 | { | ||
422 | struct pdc_state *pdcs; | ||
423 | char *buf; | ||
424 | ssize_t ret, out_offset, out_count; | ||
425 | |||
426 | out_count = 512; | ||
427 | |||
428 | buf = kmalloc(out_count, GFP_KERNEL); | ||
429 | if (!buf) | ||
430 | return -ENOMEM; | ||
431 | |||
432 | pdcs = filp->private_data; | ||
433 | out_offset = 0; | ||
434 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
435 | "SPU %u stats:\n", pdcs->pdc_idx); | ||
436 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
437 | "PDC requests............%u\n", | ||
438 | pdcs->pdc_requests); | ||
439 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
440 | "PDC responses...........%u\n", | ||
441 | pdcs->pdc_replies); | ||
442 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
443 | "Tx err ring full........%u\n", | ||
444 | pdcs->txnobuf); | ||
445 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
446 | "Rx err ring full........%u\n", | ||
447 | pdcs->rxnobuf); | ||
448 | out_offset += snprintf(buf + out_offset, out_count - out_offset, | ||
449 | "Receive overflow........%u\n", | ||
450 | pdcs->rx_oflow); | ||
451 | |||
452 | if (out_offset > out_count) | ||
453 | out_offset = out_count; | ||
454 | |||
455 | ret = simple_read_from_buffer(ubuf, count, offp, buf, out_offset); | ||
456 | kfree(buf); | ||
457 | return ret; | ||
458 | } | ||
459 | |||
460 | static const struct file_operations pdc_debugfs_stats = { | ||
461 | .owner = THIS_MODULE, | ||
462 | .open = simple_open, | ||
463 | .read = pdc_debugfs_read, | ||
464 | }; | ||
465 | |||
466 | /** | ||
467 | * pdc_setup_debugfs() - Create the debug FS directories. If the top-level | ||
468 | * directory has not yet been created, create it now. Create a stats file in | ||
469 | * this directory for a SPU. | ||
470 | * @pdcs: PDC state structure | ||
471 | */ | ||
472 | void pdc_setup_debugfs(struct pdc_state *pdcs) | ||
473 | { | ||
474 | char spu_stats_name[16]; | ||
475 | |||
476 | if (!debugfs_initialized()) | ||
477 | return; | ||
478 | |||
479 | snprintf(spu_stats_name, 16, "pdc%d_stats", pdcs->pdc_idx); | ||
480 | if (!debugfs_dir) | ||
481 | debugfs_dir = debugfs_create_dir(KBUILD_MODNAME, NULL); | ||
482 | |||
483 | pdcs->debugfs_stats = debugfs_create_file(spu_stats_name, S_IRUSR, | ||
484 | debugfs_dir, pdcs, | ||
485 | &pdc_debugfs_stats); | ||
486 | } | ||
487 | |||
488 | void pdc_free_debugfs(void) | ||
489 | { | ||
490 | if (debugfs_dir && simple_empty(debugfs_dir)) { | ||
491 | debugfs_remove_recursive(debugfs_dir); | ||
492 | debugfs_dir = NULL; | ||
493 | } | ||
494 | } | ||
495 | |||
496 | /** | ||
497 | * pdc_build_rxd() - Build DMA descriptor to receive SPU result. | ||
498 | * @pdcs: PDC state for SPU that will generate result | ||
499 | * @dma_addr: DMA address of buffer that descriptor is being built for | ||
500 | * @buf_len: Length of the receive buffer, in bytes | ||
501 | * @flags: Flags to be stored in descriptor | ||
502 | */ | ||
503 | static inline void | ||
504 | pdc_build_rxd(struct pdc_state *pdcs, dma_addr_t dma_addr, | ||
505 | u32 buf_len, u32 flags) | ||
506 | { | ||
507 | struct device *dev = &pdcs->pdev->dev; | ||
508 | |||
509 | dev_dbg(dev, | ||
510 | "Writing rx descriptor for PDC %u at index %u with length %u. flags %#x\n", | ||
511 | pdcs->pdc_idx, pdcs->rxout, buf_len, flags); | ||
512 | |||
513 | iowrite32(lower_32_bits(dma_addr), | ||
514 | (void *)&pdcs->rxd_64[pdcs->rxout].addrlow); | ||
515 | iowrite32(upper_32_bits(dma_addr), | ||
516 | (void *)&pdcs->rxd_64[pdcs->rxout].addrhigh); | ||
517 | iowrite32(flags, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl1); | ||
518 | iowrite32(buf_len, (void *)&pdcs->rxd_64[pdcs->rxout].ctrl2); | ||
519 | /* bump ring index and return */ | ||
520 | pdcs->rxout = NEXTRXD(pdcs->rxout, pdcs->nrxpost); | ||
521 | } | ||
522 | |||
523 | /** | ||
524 | * pdc_build_txd() - Build a DMA descriptor to transmit a SPU request to | ||
525 | * hardware. | ||
526 | * @pdcs: PDC state for the SPU that will process this request | ||
527 | * @dma_addr: DMA address of packet to be transmitted | ||
528 | * @buf_len: Length of tx buffer, in bytes | ||
529 | * @flags: Flags to be stored in descriptor | ||
530 | */ | ||
531 | static inline void | ||
532 | pdc_build_txd(struct pdc_state *pdcs, dma_addr_t dma_addr, u32 buf_len, | ||
533 | u32 flags) | ||
534 | { | ||
535 | struct device *dev = &pdcs->pdev->dev; | ||
536 | |||
537 | dev_dbg(dev, | ||
538 | "Writing tx descriptor for PDC %u at index %u with length %u, flags %#x\n", | ||
539 | pdcs->pdc_idx, pdcs->txout, buf_len, flags); | ||
540 | |||
541 | iowrite32(lower_32_bits(dma_addr), | ||
542 | (void *)&pdcs->txd_64[pdcs->txout].addrlow); | ||
543 | iowrite32(upper_32_bits(dma_addr), | ||
544 | (void *)&pdcs->txd_64[pdcs->txout].addrhigh); | ||
545 | iowrite32(flags, (void *)&pdcs->txd_64[pdcs->txout].ctrl1); | ||
546 | iowrite32(buf_len, (void *)&pdcs->txd_64[pdcs->txout].ctrl2); | ||
547 | |||
548 | /* bump ring index and return */ | ||
549 | pdcs->txout = NEXTTXD(pdcs->txout, pdcs->ntxpost); | ||
550 | } | ||
551 | |||
552 | /** | ||
553 | * pdc_receive() - Receive a response message from a given SPU. | ||
554 | * @pdcs: PDC state for the SPU to receive from | ||
555 | * @mssg: mailbox message to be returned to client | ||
556 | * | ||
557 | * When the return code indicates success, the response message is available in | ||
558 | * the receive buffers provided prior to submission of the request. | ||
559 | * | ||
560 | * Input: | ||
561 | * pdcs - PDC state structure for the SPU to be polled | ||
562 | * mssg - mailbox message to be returned to client. This function sets the | ||
563 | * context pointer on the message to help the client associate the | ||
564 | * response with a request. | ||
565 | * | ||
566 | * Return: PDC_SUCCESS if one or more receive descriptors was processed | ||
567 | * -EAGAIN indicates that no response message is available | ||
568 | * -EIO an error occurred | ||
569 | */ | ||
570 | static int | ||
571 | pdc_receive(struct pdc_state *pdcs, struct brcm_message *mssg) | ||
572 | { | ||
573 | struct device *dev = &pdcs->pdev->dev; | ||
574 | u32 len, rx_status; | ||
575 | u32 num_frags; | ||
576 | int i; | ||
577 | u8 *resp_hdr; /* virtual addr of start of resp message DMA header */ | ||
578 | u32 frags_rdy; /* number of fragments ready to read */ | ||
579 | u32 rx_idx; /* ring index of start of receive frame */ | ||
580 | dma_addr_t resp_hdr_daddr; | ||
581 | |||
582 | spin_lock(&pdcs->pdc_lock); | ||
583 | |||
584 | /* | ||
585 | * return if a complete response message is not yet ready. | ||
586 | * rxin_numd[rxin] is the number of fragments in the next msg | ||
587 | * to read. | ||
588 | */ | ||
589 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, pdcs->nrxpost); | ||
590 | if ((frags_rdy == 0) || (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { | ||
591 | /* See if the hw has written more fragments than we know */ | ||
592 | pdcs->last_rx_curr = | ||
593 | (ioread32((void *)&pdcs->rxregs_64->status0) & | ||
594 | CRYPTO_D64_RS0_CD_MASK) / RING_ENTRY_SIZE; | ||
595 | frags_rdy = NRXDACTIVE(pdcs->rxin, pdcs->last_rx_curr, | ||
596 | pdcs->nrxpost); | ||
597 | if ((frags_rdy == 0) || | ||
598 | (frags_rdy < pdcs->rxin_numd[pdcs->rxin])) { | ||
599 | /* No response ready */ | ||
600 | spin_unlock(&pdcs->pdc_lock); | ||
601 | return -EAGAIN; | ||
602 | } | ||
603 | /* can't read descriptors/data until write index is read */ | ||
604 | rmb(); | ||
605 | } | ||
606 | |||
607 | num_frags = pdcs->txin_numd[pdcs->txin]; | ||
608 | dma_unmap_sg(dev, pdcs->src_sg[pdcs->txin], | ||
609 | sg_nents(pdcs->src_sg[pdcs->txin]), DMA_TO_DEVICE); | ||
610 | |||
611 | for (i = 0; i < num_frags; i++) | ||
612 | pdcs->txin = NEXTTXD(pdcs->txin, pdcs->ntxpost); | ||
613 | |||
614 | dev_dbg(dev, "PDC %u reclaimed %d tx descriptors", | ||
615 | pdcs->pdc_idx, num_frags); | ||
616 | |||
617 | rx_idx = pdcs->rxin; | ||
618 | num_frags = pdcs->rxin_numd[rx_idx]; | ||
619 | /* Return opaque context with result */ | ||
620 | mssg->ctx = pdcs->rxp_ctx[rx_idx]; | ||
621 | pdcs->rxp_ctx[rx_idx] = NULL; | ||
622 | resp_hdr = pdcs->resp_hdr[rx_idx]; | ||
623 | resp_hdr_daddr = pdcs->resp_hdr_daddr[rx_idx]; | ||
624 | dma_unmap_sg(dev, pdcs->dst_sg[rx_idx], | ||
625 | sg_nents(pdcs->dst_sg[rx_idx]), DMA_FROM_DEVICE); | ||
626 | |||
627 | for (i = 0; i < num_frags; i++) | ||
628 | pdcs->rxin = NEXTRXD(pdcs->rxin, pdcs->nrxpost); | ||
629 | |||
630 | spin_unlock(&pdcs->pdc_lock); | ||
631 | |||
632 | dev_dbg(dev, "PDC %u reclaimed %d rx descriptors", | ||
633 | pdcs->pdc_idx, num_frags); | ||
634 | |||
635 | dev_dbg(dev, | ||
636 | "PDC %u txin %u, txout %u, rxin %u, rxout %u, last_rx_curr %u\n", | ||
637 | pdcs->pdc_idx, pdcs->txin, pdcs->txout, pdcs->rxin, | ||
638 | pdcs->rxout, pdcs->last_rx_curr); | ||
639 | |||
640 | if (pdcs->pdc_resp_hdr_len == PDC_SPUM_RESP_HDR_LEN) { | ||
641 | /* | ||
642 | * For SPU-M, get length of response msg and rx overflow status. | ||
643 | */ | ||
644 | rx_status = *((u32 *)resp_hdr); | ||
645 | len = rx_status & RX_STATUS_LEN; | ||
646 | dev_dbg(dev, | ||
647 | "SPU response length %u bytes", len); | ||
648 | if (unlikely(((rx_status & RX_STATUS_OVERFLOW) || (!len)))) { | ||
649 | if (rx_status & RX_STATUS_OVERFLOW) { | ||
650 | dev_err_ratelimited(dev, | ||
651 | "crypto receive overflow"); | ||
652 | pdcs->rx_oflow++; | ||
653 | } else { | ||
654 | dev_info_ratelimited(dev, "crypto rx len = 0"); | ||
655 | } | ||
656 | return -EIO; | ||
657 | } | ||
658 | } | ||
659 | |||
660 | dma_pool_free(pdcs->rx_buf_pool, resp_hdr, resp_hdr_daddr); | ||
661 | |||
662 | pdcs->pdc_replies++; | ||
663 | /* if we read one or more rx descriptors, claim success */ | ||
664 | if (num_frags > 0) | ||
665 | return PDC_SUCCESS; | ||
666 | else | ||
667 | return -EIO; | ||
668 | } | ||
669 | |||
670 | /** | ||
671 | * pdc_tx_list_sg_add() - Add the buffers in a scatterlist to the transmit | ||
672 | * descriptors for a given SPU. The scatterlist buffers contain the data for a | ||
673 | * SPU request message. | ||
674 | * @spu_idx: The index of the SPU to submit the request to, [0, max_spu) | ||
675 | * @sg: Scatterlist whose buffers contain part of the SPU request | ||
676 | * | ||
677 | * If a scatterlist buffer is larger than PDC_DMA_BUF_MAX, multiple descriptors | ||
678 | * are written for that buffer, each <= PDC_DMA_BUF_MAX byte in length. | ||
679 | * | ||
680 | * Return: PDC_SUCCESS if successful | ||
681 | * < 0 otherwise | ||
682 | */ | ||
683 | static int pdc_tx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) | ||
684 | { | ||
685 | u32 flags = 0; | ||
686 | u32 eot; | ||
687 | u32 tx_avail; | ||
688 | |||
689 | /* | ||
690 | * Num descriptors needed. Conservatively assume we need a descriptor | ||
691 | * for every entry in sg. | ||
692 | */ | ||
693 | u32 num_desc; | ||
694 | u32 desc_w = 0; /* Number of tx descriptors written */ | ||
695 | u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ | ||
696 | dma_addr_t databufptr; /* DMA address to put in descriptor */ | ||
697 | |||
698 | num_desc = (u32)sg_nents(sg); | ||
699 | |||
700 | /* check whether enough tx descriptors are available */ | ||
701 | tx_avail = pdcs->ntxpost - NTXDACTIVE(pdcs->txin, pdcs->txout, | ||
702 | pdcs->ntxpost); | ||
703 | if (unlikely(num_desc > tx_avail)) { | ||
704 | pdcs->txnobuf++; | ||
705 | return -ENOSPC; | ||
706 | } | ||
707 | |||
708 | /* build tx descriptors */ | ||
709 | if (pdcs->tx_msg_start == pdcs->txout) { | ||
710 | /* Start of frame */ | ||
711 | pdcs->txin_numd[pdcs->tx_msg_start] = 0; | ||
712 | pdcs->src_sg[pdcs->txout] = sg; | ||
713 | flags = D64_CTRL1_SOF; | ||
714 | } | ||
715 | |||
716 | while (sg) { | ||
717 | if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) | ||
718 | eot = D64_CTRL1_EOT; | ||
719 | else | ||
720 | eot = 0; | ||
721 | |||
722 | /* | ||
723 | * If sg buffer larger than PDC limit, split across | ||
724 | * multiple descriptors | ||
725 | */ | ||
726 | bufcnt = sg_dma_len(sg); | ||
727 | databufptr = sg_dma_address(sg); | ||
728 | while (bufcnt > PDC_DMA_BUF_MAX) { | ||
729 | pdc_build_txd(pdcs, databufptr, PDC_DMA_BUF_MAX, | ||
730 | flags | eot); | ||
731 | desc_w++; | ||
732 | bufcnt -= PDC_DMA_BUF_MAX; | ||
733 | databufptr += PDC_DMA_BUF_MAX; | ||
734 | if (unlikely(pdcs->txout == (pdcs->ntxd - 1))) | ||
735 | eot = D64_CTRL1_EOT; | ||
736 | else | ||
737 | eot = 0; | ||
738 | } | ||
739 | sg = sg_next(sg); | ||
740 | if (!sg) | ||
741 | /* Writing last descriptor for frame */ | ||
742 | flags |= (D64_CTRL1_EOF | D64_CTRL1_IOC); | ||
743 | pdc_build_txd(pdcs, databufptr, bufcnt, flags | eot); | ||
744 | desc_w++; | ||
745 | /* Clear start of frame after first descriptor */ | ||
746 | flags &= ~D64_CTRL1_SOF; | ||
747 | } | ||
748 | pdcs->txin_numd[pdcs->tx_msg_start] += desc_w; | ||
749 | |||
750 | return PDC_SUCCESS; | ||
751 | } | ||
752 | |||
753 | /** | ||
754 | * pdc_tx_list_final() - Initiate DMA transfer of last frame written to tx | ||
755 | * ring. | ||
756 | * @pdcs: PDC state for SPU to process the request | ||
757 | * | ||
758 | * Sets the index of the last descriptor written in both the rx and tx ring. | ||
759 | * | ||
760 | * Return: PDC_SUCCESS | ||
761 | */ | ||
762 | static int pdc_tx_list_final(struct pdc_state *pdcs) | ||
763 | { | ||
764 | /* | ||
765 | * write barrier to ensure all register writes are complete | ||
766 | * before chip starts to process new request | ||
767 | */ | ||
768 | wmb(); | ||
769 | iowrite32(pdcs->rxout << 4, (void *)&pdcs->rxregs_64->ptr); | ||
770 | iowrite32(pdcs->txout << 4, (void *)&pdcs->txregs_64->ptr); | ||
771 | pdcs->pdc_requests++; | ||
772 | |||
773 | return PDC_SUCCESS; | ||
774 | } | ||
775 | |||
776 | /** | ||
777 | * pdc_rx_list_init() - Start a new receive descriptor list for a given PDC. | ||
778 | * @pdcs: PDC state for SPU handling request | ||
779 | * @dst_sg: scatterlist providing rx buffers for response to be returned to | ||
780 | * mailbox client | ||
781 | * @ctx: Opaque context for this request | ||
782 | * | ||
783 | * Posts a single receive descriptor to hold the metadata that precedes a | ||
784 | * response. For example, with SPU-M, the metadata is a 32-byte DMA header and | ||
785 | * an 8-byte BCM header. Moves the msg_start descriptor indexes for both tx and | ||
786 | * rx to indicate the start of a new message. | ||
787 | * | ||
788 | * Return: PDC_SUCCESS if successful | ||
789 | * < 0 if an error (e.g., rx ring is full) | ||
790 | */ | ||
791 | static int pdc_rx_list_init(struct pdc_state *pdcs, struct scatterlist *dst_sg, | ||
792 | void *ctx) | ||
793 | { | ||
794 | u32 flags = 0; | ||
795 | u32 rx_avail; | ||
796 | u32 rx_pkt_cnt = 1; /* Adding a single rx buffer */ | ||
797 | dma_addr_t daddr; | ||
798 | void *vaddr; | ||
799 | |||
800 | rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, | ||
801 | pdcs->nrxpost); | ||
802 | if (unlikely(rx_pkt_cnt > rx_avail)) { | ||
803 | pdcs->rxnobuf++; | ||
804 | return -ENOSPC; | ||
805 | } | ||
806 | |||
807 | /* allocate a buffer for the dma rx status */ | ||
808 | vaddr = dma_pool_zalloc(pdcs->rx_buf_pool, GFP_ATOMIC, &daddr); | ||
809 | if (!vaddr) | ||
810 | return -ENOMEM; | ||
811 | |||
812 | /* | ||
813 | * Update msg_start indexes for both tx and rx to indicate the start | ||
814 | * of a new sequence of descriptor indexes that contain the fragments | ||
815 | * of the same message. | ||
816 | */ | ||
817 | pdcs->rx_msg_start = pdcs->rxout; | ||
818 | pdcs->tx_msg_start = pdcs->txout; | ||
819 | |||
820 | /* This is always the first descriptor in the receive sequence */ | ||
821 | flags = D64_CTRL1_SOF; | ||
822 | pdcs->rxin_numd[pdcs->rx_msg_start] = 1; | ||
823 | |||
824 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | ||
825 | flags |= D64_CTRL1_EOT; | ||
826 | |||
827 | pdcs->rxp_ctx[pdcs->rxout] = ctx; | ||
828 | pdcs->dst_sg[pdcs->rxout] = dst_sg; | ||
829 | pdcs->resp_hdr[pdcs->rxout] = vaddr; | ||
830 | pdcs->resp_hdr_daddr[pdcs->rxout] = daddr; | ||
831 | pdc_build_rxd(pdcs, daddr, pdcs->pdc_resp_hdr_len, flags); | ||
832 | return PDC_SUCCESS; | ||
833 | } | ||
834 | |||
835 | /** | ||
836 | * pdc_rx_list_sg_add() - Add the buffers in a scatterlist to the receive | ||
837 | * descriptors for a given SPU. The caller must have already DMA mapped the | ||
838 | * scatterlist. | ||
839 | * @spu_idx: Indicates which SPU the buffers are for | ||
840 | * @sg: Scatterlist whose buffers are added to the receive ring | ||
841 | * | ||
842 | * If a receive buffer in the scatterlist is larger than PDC_DMA_BUF_MAX, | ||
843 | * multiple receive descriptors are written, each with a buffer <= | ||
844 | * PDC_DMA_BUF_MAX. | ||
845 | * | ||
846 | * Return: PDC_SUCCESS if successful | ||
847 | * < 0 otherwise (e.g., receive ring is full) | ||
848 | */ | ||
849 | static int pdc_rx_list_sg_add(struct pdc_state *pdcs, struct scatterlist *sg) | ||
850 | { | ||
851 | u32 flags = 0; | ||
852 | u32 rx_avail; | ||
853 | |||
854 | /* | ||
855 | * Num descriptors needed. Conservatively assume we need a descriptor | ||
856 | * for every entry from our starting point in the scatterlist. | ||
857 | */ | ||
858 | u32 num_desc; | ||
859 | u32 desc_w = 0; /* Number of tx descriptors written */ | ||
860 | u32 bufcnt; /* Number of bytes of buffer pointed to by descriptor */ | ||
861 | dma_addr_t databufptr; /* DMA address to put in descriptor */ | ||
862 | |||
863 | num_desc = (u32)sg_nents(sg); | ||
864 | |||
865 | rx_avail = pdcs->nrxpost - NRXDACTIVE(pdcs->rxin, pdcs->rxout, | ||
866 | pdcs->nrxpost); | ||
867 | if (unlikely(num_desc > rx_avail)) { | ||
868 | pdcs->rxnobuf++; | ||
869 | return -ENOSPC; | ||
870 | } | ||
871 | |||
872 | while (sg) { | ||
873 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | ||
874 | flags = D64_CTRL1_EOT; | ||
875 | else | ||
876 | flags = 0; | ||
877 | |||
878 | /* | ||
879 | * If sg buffer larger than PDC limit, split across | ||
880 | * multiple descriptors | ||
881 | */ | ||
882 | bufcnt = sg_dma_len(sg); | ||
883 | databufptr = sg_dma_address(sg); | ||
884 | while (bufcnt > PDC_DMA_BUF_MAX) { | ||
885 | pdc_build_rxd(pdcs, databufptr, PDC_DMA_BUF_MAX, flags); | ||
886 | desc_w++; | ||
887 | bufcnt -= PDC_DMA_BUF_MAX; | ||
888 | databufptr += PDC_DMA_BUF_MAX; | ||
889 | if (unlikely(pdcs->rxout == (pdcs->nrxd - 1))) | ||
890 | flags = D64_CTRL1_EOT; | ||
891 | else | ||
892 | flags = 0; | ||
893 | } | ||
894 | pdc_build_rxd(pdcs, databufptr, bufcnt, flags); | ||
895 | desc_w++; | ||
896 | sg = sg_next(sg); | ||
897 | } | ||
898 | pdcs->rxin_numd[pdcs->rx_msg_start] += desc_w; | ||
899 | |||
900 | return PDC_SUCCESS; | ||
901 | } | ||
902 | |||
903 | /** | ||
904 | * pdc_irq_handler() - Interrupt handler called in interrupt context. | ||
905 | * @irq: Interrupt number that has fired | ||
906 | * @cookie: PDC state for DMA engine that generated the interrupt | ||
907 | * | ||
908 | * We have to clear the device interrupt status flags here. So cache the | ||
909 | * status for later use in the thread function. Other than that, just return | ||
910 | * WAKE_THREAD to invoke the thread function. | ||
911 | * | ||
912 | * Return: IRQ_WAKE_THREAD if interrupt is ours | ||
913 | * IRQ_NONE otherwise | ||
914 | */ | ||
915 | static irqreturn_t pdc_irq_handler(int irq, void *cookie) | ||
916 | { | ||
917 | struct pdc_state *pdcs = cookie; | ||
918 | u32 intstatus = ioread32(pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); | ||
919 | |||
920 | if (intstatus & PDC_XMTINTEN_0) | ||
921 | set_bit(PDC_XMTINT_0, &pdcs->intstatus); | ||
922 | if (intstatus & PDC_RCVINTEN_0) | ||
923 | set_bit(PDC_RCVINT_0, &pdcs->intstatus); | ||
924 | |||
925 | /* Clear interrupt flags in device */ | ||
926 | iowrite32(intstatus, pdcs->pdc_reg_vbase + PDC_INTSTATUS_OFFSET); | ||
927 | |||
928 | /* Wakeup IRQ thread */ | ||
929 | if (pdcs && (irq == pdcs->pdc_irq) && (intstatus & PDC_INTMASK)) | ||
930 | return IRQ_WAKE_THREAD; | ||
931 | |||
932 | return IRQ_NONE; | ||
933 | } | ||
934 | |||
935 | /** | ||
936 | * pdc_irq_thread() - Function invoked on deferred thread when a DMA tx has | ||
937 | * completed or data is available to receive. | ||
938 | * @irq: Interrupt number | ||
939 | * @cookie: PDC state for PDC that generated the interrupt | ||
940 | * | ||
941 | * On DMA tx complete, notify the mailbox client. On DMA rx complete, process | ||
942 | * as many SPU response messages as are available and send each to the mailbox | ||
943 | * client. | ||
944 | * | ||
945 | * Return: IRQ_HANDLED if we recognized and handled the interrupt | ||
946 | * IRQ_NONE otherwise | ||
947 | */ | ||
948 | static irqreturn_t pdc_irq_thread(int irq, void *cookie) | ||
949 | { | ||
950 | struct pdc_state *pdcs = cookie; | ||
951 | struct mbox_controller *mbc; | ||
952 | struct mbox_chan *chan; | ||
953 | bool tx_int; | ||
954 | bool rx_int; | ||
955 | int rx_status; | ||
956 | struct brcm_message mssg; | ||
957 | |||
958 | tx_int = test_and_clear_bit(PDC_XMTINT_0, &pdcs->intstatus); | ||
959 | rx_int = test_and_clear_bit(PDC_RCVINT_0, &pdcs->intstatus); | ||
960 | |||
961 | if (pdcs && (tx_int || rx_int)) { | ||
962 | dev_dbg(&pdcs->pdev->dev, | ||
963 | "%s() got irq %d with tx_int %s, rx_int %s", | ||
964 | __func__, irq, | ||
965 | tx_int ? "set" : "clear", rx_int ? "set" : "clear"); | ||
966 | |||
967 | mbc = &pdcs->mbc; | ||
968 | chan = &mbc->chans[0]; | ||
969 | |||
970 | if (tx_int) { | ||
971 | dev_dbg(&pdcs->pdev->dev, "%s(): tx done", __func__); | ||
972 | /* only one frame in flight at a time */ | ||
973 | mbox_chan_txdone(chan, PDC_SUCCESS); | ||
974 | } | ||
975 | if (rx_int) { | ||
976 | while (1) { | ||
977 | /* Could be many frames ready */ | ||
978 | memset(&mssg, 0, sizeof(mssg)); | ||
979 | mssg.type = BRCM_MESSAGE_SPU; | ||
980 | rx_status = pdc_receive(pdcs, &mssg); | ||
981 | if (rx_status >= 0) { | ||
982 | dev_dbg(&pdcs->pdev->dev, | ||
983 | "%s(): invoking client rx cb", | ||
984 | __func__); | ||
985 | mbox_chan_received_data(chan, &mssg); | ||
986 | } else { | ||
987 | dev_dbg(&pdcs->pdev->dev, | ||
988 | "%s(): no SPU response available", | ||
989 | __func__); | ||
990 | break; | ||
991 | } | ||
992 | } | ||
993 | } | ||
994 | return IRQ_HANDLED; | ||
995 | } | ||
996 | return IRQ_NONE; | ||
997 | } | ||
998 | |||
999 | /** | ||
1000 | * pdc_ring_init() - Allocate DMA rings and initialize constant fields of | ||
1001 | * descriptors in one ringset. | ||
1002 | * @pdcs: PDC instance state | ||
1003 | * @ringset: index of ringset being used | ||
1004 | * | ||
1005 | * Return: PDC_SUCCESS if ring initialized | ||
1006 | * < 0 otherwise | ||
1007 | */ | ||
1008 | static int pdc_ring_init(struct pdc_state *pdcs, int ringset) | ||
1009 | { | ||
1010 | int i; | ||
1011 | int err = PDC_SUCCESS; | ||
1012 | struct dma64 *dma_reg; | ||
1013 | struct device *dev = &pdcs->pdev->dev; | ||
1014 | struct pdc_ring_alloc tx; | ||
1015 | struct pdc_ring_alloc rx; | ||
1016 | |||
1017 | /* Allocate tx ring */ | ||
1018 | tx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &tx.dmabase); | ||
1019 | if (!tx.vbase) { | ||
1020 | err = -ENOMEM; | ||
1021 | goto done; | ||
1022 | } | ||
1023 | |||
1024 | /* Allocate rx ring */ | ||
1025 | rx.vbase = dma_pool_zalloc(pdcs->ring_pool, GFP_KERNEL, &rx.dmabase); | ||
1026 | if (!rx.vbase) { | ||
1027 | err = -ENOMEM; | ||
1028 | goto fail_dealloc; | ||
1029 | } | ||
1030 | |||
1031 | dev_dbg(dev, " - base DMA addr of tx ring %pad", &tx.dmabase); | ||
1032 | dev_dbg(dev, " - base virtual addr of tx ring %p", tx.vbase); | ||
1033 | dev_dbg(dev, " - base DMA addr of rx ring %pad", &rx.dmabase); | ||
1034 | dev_dbg(dev, " - base virtual addr of rx ring %p", rx.vbase); | ||
1035 | |||
1036 | /* lock after ring allocation to avoid scheduling while atomic */ | ||
1037 | spin_lock(&pdcs->pdc_lock); | ||
1038 | |||
1039 | memcpy(&pdcs->tx_ring_alloc, &tx, sizeof(tx)); | ||
1040 | memcpy(&pdcs->rx_ring_alloc, &rx, sizeof(rx)); | ||
1041 | |||
1042 | pdcs->rxin = 0; | ||
1043 | pdcs->rx_msg_start = 0; | ||
1044 | pdcs->last_rx_curr = 0; | ||
1045 | pdcs->rxout = 0; | ||
1046 | pdcs->txin = 0; | ||
1047 | pdcs->tx_msg_start = 0; | ||
1048 | pdcs->txout = 0; | ||
1049 | |||
1050 | /* Set descriptor array base addresses */ | ||
1051 | pdcs->txd_64 = (struct dma64dd *)pdcs->tx_ring_alloc.vbase; | ||
1052 | pdcs->rxd_64 = (struct dma64dd *)pdcs->rx_ring_alloc.vbase; | ||
1053 | |||
1054 | /* Tell device the base DMA address of each ring */ | ||
1055 | dma_reg = &pdcs->regs->dmaregs[ringset]; | ||
1056 | iowrite32(lower_32_bits(pdcs->tx_ring_alloc.dmabase), | ||
1057 | (void *)&dma_reg->dmaxmt.addrlow); | ||
1058 | iowrite32(upper_32_bits(pdcs->tx_ring_alloc.dmabase), | ||
1059 | (void *)&dma_reg->dmaxmt.addrhigh); | ||
1060 | |||
1061 | iowrite32(lower_32_bits(pdcs->rx_ring_alloc.dmabase), | ||
1062 | (void *)&dma_reg->dmarcv.addrlow); | ||
1063 | iowrite32(upper_32_bits(pdcs->rx_ring_alloc.dmabase), | ||
1064 | (void *)&dma_reg->dmarcv.addrhigh); | ||
1065 | |||
1066 | /* Initialize descriptors */ | ||
1067 | for (i = 0; i < PDC_RING_ENTRIES; i++) { | ||
1068 | /* Every tx descriptor can be used for start of frame. */ | ||
1069 | if (i != pdcs->ntxpost) { | ||
1070 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF, | ||
1071 | (void *)&pdcs->txd_64[i].ctrl1); | ||
1072 | } else { | ||
1073 | /* Last descriptor in ringset. Set End of Table. */ | ||
1074 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOF | | ||
1075 | D64_CTRL1_EOT, | ||
1076 | (void *)&pdcs->txd_64[i].ctrl1); | ||
1077 | } | ||
1078 | |||
1079 | /* Every rx descriptor can be used for start of frame */ | ||
1080 | if (i != pdcs->nrxpost) { | ||
1081 | iowrite32(D64_CTRL1_SOF, | ||
1082 | (void *)&pdcs->rxd_64[i].ctrl1); | ||
1083 | } else { | ||
1084 | /* Last descriptor in ringset. Set End of Table. */ | ||
1085 | iowrite32(D64_CTRL1_SOF | D64_CTRL1_EOT, | ||
1086 | (void *)&pdcs->rxd_64[i].ctrl1); | ||
1087 | } | ||
1088 | } | ||
1089 | spin_unlock(&pdcs->pdc_lock); | ||
1090 | return PDC_SUCCESS; | ||
1091 | |||
1092 | fail_dealloc: | ||
1093 | dma_pool_free(pdcs->ring_pool, tx.vbase, tx.dmabase); | ||
1094 | done: | ||
1095 | return err; | ||
1096 | } | ||
1097 | |||
1098 | static void pdc_ring_free(struct pdc_state *pdcs) | ||
1099 | { | ||
1100 | if (pdcs->tx_ring_alloc.vbase) { | ||
1101 | dma_pool_free(pdcs->ring_pool, pdcs->tx_ring_alloc.vbase, | ||
1102 | pdcs->tx_ring_alloc.dmabase); | ||
1103 | pdcs->tx_ring_alloc.vbase = NULL; | ||
1104 | } | ||
1105 | |||
1106 | if (pdcs->rx_ring_alloc.vbase) { | ||
1107 | dma_pool_free(pdcs->ring_pool, pdcs->rx_ring_alloc.vbase, | ||
1108 | pdcs->rx_ring_alloc.dmabase); | ||
1109 | pdcs->rx_ring_alloc.vbase = NULL; | ||
1110 | } | ||
1111 | } | ||
1112 | |||
1113 | /** | ||
1114 | * pdc_send_data() - mailbox send_data function | ||
1115 | * @chan: The mailbox channel on which the data is sent. The channel | ||
1116 | * corresponds to a DMA ringset. | ||
1117 | * @data: The mailbox message to be sent. The message must be a | ||
1118 | * brcm_message structure. | ||
1119 | * | ||
1120 | * This function is registered as the send_data function for the mailbox | ||
1121 | * controller. From the destination scatterlist in the mailbox message, it | ||
1122 | * creates a sequence of receive descriptors in the rx ring. From the source | ||
1123 | * scatterlist, it creates a sequence of transmit descriptors in the tx ring. | ||
1124 | * After creating the descriptors, it writes the rx ptr and tx ptr registers to | ||
1125 | * initiate the DMA transfer. | ||
1126 | * | ||
1127 | * This function does the DMA map and unmap of the src and dst scatterlists in | ||
1128 | * the mailbox message. | ||
1129 | * | ||
1130 | * Return: 0 if successful | ||
1131 | * -ENOTSUPP if the mailbox message is a type this driver does not | ||
1132 | * support | ||
1133 | * < 0 if an error | ||
1134 | */ | ||
1135 | static int pdc_send_data(struct mbox_chan *chan, void *data) | ||
1136 | { | ||
1137 | struct pdc_state *pdcs = chan->con_priv; | ||
1138 | struct device *dev = &pdcs->pdev->dev; | ||
1139 | struct brcm_message *mssg = data; | ||
1140 | int err = PDC_SUCCESS; | ||
1141 | int src_nent; | ||
1142 | int dst_nent; | ||
1143 | int nent; | ||
1144 | |||
1145 | if (mssg->type != BRCM_MESSAGE_SPU) | ||
1146 | return -ENOTSUPP; | ||
1147 | |||
1148 | src_nent = sg_nents(mssg->spu.src); | ||
1149 | if (src_nent) { | ||
1150 | nent = dma_map_sg(dev, mssg->spu.src, src_nent, DMA_TO_DEVICE); | ||
1151 | if (nent == 0) | ||
1152 | return -EIO; | ||
1153 | } | ||
1154 | |||
1155 | dst_nent = sg_nents(mssg->spu.dst); | ||
1156 | if (dst_nent) { | ||
1157 | nent = dma_map_sg(dev, mssg->spu.dst, dst_nent, | ||
1158 | DMA_FROM_DEVICE); | ||
1159 | if (nent == 0) { | ||
1160 | dma_unmap_sg(dev, mssg->spu.src, src_nent, | ||
1161 | DMA_TO_DEVICE); | ||
1162 | return -EIO; | ||
1163 | } | ||
1164 | } | ||
1165 | |||
1166 | spin_lock(&pdcs->pdc_lock); | ||
1167 | |||
1168 | /* Create rx descriptors to SPU catch response */ | ||
1169 | err = pdc_rx_list_init(pdcs, mssg->spu.dst, mssg->ctx); | ||
1170 | err |= pdc_rx_list_sg_add(pdcs, mssg->spu.dst); | ||
1171 | |||
1172 | /* Create tx descriptors to submit SPU request */ | ||
1173 | err |= pdc_tx_list_sg_add(pdcs, mssg->spu.src); | ||
1174 | err |= pdc_tx_list_final(pdcs); /* initiate transfer */ | ||
1175 | |||
1176 | spin_unlock(&pdcs->pdc_lock); | ||
1177 | |||
1178 | if (err) | ||
1179 | dev_err(&pdcs->pdev->dev, | ||
1180 | "%s failed with error %d", __func__, err); | ||
1181 | |||
1182 | return err; | ||
1183 | } | ||
1184 | |||
1185 | static int pdc_startup(struct mbox_chan *chan) | ||
1186 | { | ||
1187 | return pdc_ring_init(chan->con_priv, PDC_RINGSET); | ||
1188 | } | ||
1189 | |||
1190 | static void pdc_shutdown(struct mbox_chan *chan) | ||
1191 | { | ||
1192 | struct pdc_state *pdcs = chan->con_priv; | ||
1193 | |||
1194 | if (pdcs) | ||
1195 | dev_dbg(&pdcs->pdev->dev, | ||
1196 | "Shutdown mailbox channel for PDC %u", pdcs->pdc_idx); | ||
1197 | |||
1198 | pdc_ring_free(pdcs); | ||
1199 | } | ||
1200 | |||
1201 | /** | ||
1202 | * pdc_hw_init() - Use the given initialization parameters to initialize the | ||
1203 | * state for one of the PDCs. | ||
1204 | * @pdcs: state of the PDC | ||
1205 | */ | ||
1206 | static | ||
1207 | void pdc_hw_init(struct pdc_state *pdcs) | ||
1208 | { | ||
1209 | struct platform_device *pdev; | ||
1210 | struct device *dev; | ||
1211 | struct dma64 *dma_reg; | ||
1212 | int ringset = PDC_RINGSET; | ||
1213 | |||
1214 | pdev = pdcs->pdev; | ||
1215 | dev = &pdev->dev; | ||
1216 | |||
1217 | dev_dbg(dev, "PDC %u initial values:", pdcs->pdc_idx); | ||
1218 | dev_dbg(dev, "state structure: %p", | ||
1219 | pdcs); | ||
1220 | dev_dbg(dev, " - base virtual addr of hw regs %p", | ||
1221 | pdcs->pdc_reg_vbase); | ||
1222 | |||
1223 | /* initialize data structures */ | ||
1224 | pdcs->regs = (struct pdc_regs *)pdcs->pdc_reg_vbase; | ||
1225 | pdcs->txregs_64 = (struct dma64_regs *) | ||
1226 | (void *)(((u8 *)pdcs->pdc_reg_vbase) + | ||
1227 | PDC_TXREGS_OFFSET + (sizeof(struct dma64) * ringset)); | ||
1228 | pdcs->rxregs_64 = (struct dma64_regs *) | ||
1229 | (void *)(((u8 *)pdcs->pdc_reg_vbase) + | ||
1230 | PDC_RXREGS_OFFSET + (sizeof(struct dma64) * ringset)); | ||
1231 | |||
1232 | pdcs->ntxd = PDC_RING_ENTRIES; | ||
1233 | pdcs->nrxd = PDC_RING_ENTRIES; | ||
1234 | pdcs->ntxpost = PDC_RING_ENTRIES - 1; | ||
1235 | pdcs->nrxpost = PDC_RING_ENTRIES - 1; | ||
1236 | pdcs->regs->intmask = 0; | ||
1237 | |||
1238 | dma_reg = &pdcs->regs->dmaregs[ringset]; | ||
1239 | iowrite32(0, (void *)&dma_reg->dmaxmt.ptr); | ||
1240 | iowrite32(0, (void *)&dma_reg->dmarcv.ptr); | ||
1241 | |||
1242 | iowrite32(PDC_TX_CTL, (void *)&dma_reg->dmaxmt.control); | ||
1243 | |||
1244 | iowrite32(PDC_RX_CTL + (pdcs->rx_status_len << 1), | ||
1245 | (void *)&dma_reg->dmarcv.control); | ||
1246 | |||
1247 | if (pdcs->pdc_resp_hdr_len == PDC_SPU2_RESP_HDR_LEN) | ||
1248 | iowrite32(PDC_CKSUM_CTRL, | ||
1249 | pdcs->pdc_reg_vbase + PDC_CKSUM_CTRL_OFFSET); | ||
1250 | } | ||
1251 | |||
1252 | /** | ||
1253 | * pdc_rx_buf_pool_create() - Pool of receive buffers used to catch the metadata | ||
1254 | * header returned with each response message. | ||
1255 | * @pdcs: PDC state structure | ||
1256 | * | ||
1257 | * The metadata is not returned to the mailbox client. So the PDC driver | ||
1258 | * manages these buffers. | ||
1259 | * | ||
1260 | * Return: PDC_SUCCESS | ||
1261 | * -ENOMEM if pool creation fails | ||
1262 | */ | ||
1263 | static int pdc_rx_buf_pool_create(struct pdc_state *pdcs) | ||
1264 | { | ||
1265 | struct platform_device *pdev; | ||
1266 | struct device *dev; | ||
1267 | |||
1268 | pdev = pdcs->pdev; | ||
1269 | dev = &pdev->dev; | ||
1270 | |||
1271 | pdcs->pdc_resp_hdr_len = pdcs->rx_status_len; | ||
1272 | if (pdcs->use_bcm_hdr) | ||
1273 | pdcs->pdc_resp_hdr_len += BCM_HDR_LEN; | ||
1274 | |||
1275 | pdcs->rx_buf_pool = dma_pool_create("pdc rx bufs", dev, | ||
1276 | pdcs->pdc_resp_hdr_len, | ||
1277 | RX_BUF_ALIGN, 0); | ||
1278 | if (!pdcs->rx_buf_pool) | ||
1279 | return -ENOMEM; | ||
1280 | |||
1281 | return PDC_SUCCESS; | ||
1282 | } | ||
1283 | |||
1284 | /** | ||
1285 | * pdc_interrupts_init() - Initialize the interrupt configuration for a PDC and | ||
1286 | * specify a threaded IRQ handler for deferred handling of interrupts outside of | ||
1287 | * interrupt context. | ||
1288 | * @pdcs: PDC state | ||
1289 | * | ||
1290 | * Set the interrupt mask for transmit and receive done. | ||
1291 | * Set the lazy interrupt frame count to generate an interrupt for just one pkt. | ||
1292 | * | ||
1293 | * Return: PDC_SUCCESS | ||
1294 | * <0 if threaded irq request fails | ||
1295 | */ | ||
1296 | static int pdc_interrupts_init(struct pdc_state *pdcs) | ||
1297 | { | ||
1298 | struct platform_device *pdev = pdcs->pdev; | ||
1299 | struct device *dev = &pdev->dev; | ||
1300 | struct device_node *dn = pdev->dev.of_node; | ||
1301 | int err; | ||
1302 | |||
1303 | pdcs->intstatus = 0; | ||
1304 | |||
1305 | /* interrupt configuration */ | ||
1306 | iowrite32(PDC_INTMASK, pdcs->pdc_reg_vbase + PDC_INTMASK_OFFSET); | ||
1307 | iowrite32(PDC_LAZY_INT, pdcs->pdc_reg_vbase + PDC_RCVLAZY0_OFFSET); | ||
1308 | |||
1309 | /* read irq from device tree */ | ||
1310 | pdcs->pdc_irq = irq_of_parse_and_map(dn, 0); | ||
1311 | dev_dbg(dev, "pdc device %s irq %u for pdcs %p", | ||
1312 | dev_name(dev), pdcs->pdc_irq, pdcs); | ||
1313 | err = devm_request_threaded_irq(dev, pdcs->pdc_irq, | ||
1314 | pdc_irq_handler, | ||
1315 | pdc_irq_thread, 0, dev_name(dev), pdcs); | ||
1316 | if (err) { | ||
1317 | dev_err(dev, "threaded tx IRQ %u request failed with err %d\n", | ||
1318 | pdcs->pdc_irq, err); | ||
1319 | return err; | ||
1320 | } | ||
1321 | return PDC_SUCCESS; | ||
1322 | } | ||
1323 | |||
1324 | static const struct mbox_chan_ops pdc_mbox_chan_ops = { | ||
1325 | .send_data = pdc_send_data, | ||
1326 | .startup = pdc_startup, | ||
1327 | .shutdown = pdc_shutdown | ||
1328 | }; | ||
1329 | |||
1330 | /** | ||
1331 | * pdc_mb_init() - Initialize the mailbox controller. | ||
1332 | * @pdcs: PDC state | ||
1333 | * | ||
1334 | * Each PDC is a mailbox controller. Each ringset is a mailbox channel. Kernel | ||
1335 | * driver only uses one ringset and thus one mb channel. PDC uses the transmit | ||
1336 | * complete interrupt to determine when a mailbox message has successfully been | ||
1337 | * transmitted. | ||
1338 | * | ||
1339 | * Return: 0 on success | ||
1340 | * < 0 if there is an allocation or registration failure | ||
1341 | */ | ||
1342 | static int pdc_mb_init(struct pdc_state *pdcs) | ||
1343 | { | ||
1344 | struct device *dev = &pdcs->pdev->dev; | ||
1345 | struct mbox_controller *mbc; | ||
1346 | int chan_index; | ||
1347 | int err; | ||
1348 | |||
1349 | mbc = &pdcs->mbc; | ||
1350 | mbc->dev = dev; | ||
1351 | mbc->ops = &pdc_mbox_chan_ops; | ||
1352 | mbc->num_chans = 1; | ||
1353 | mbc->chans = devm_kcalloc(dev, mbc->num_chans, sizeof(*mbc->chans), | ||
1354 | GFP_KERNEL); | ||
1355 | if (!mbc->chans) | ||
1356 | return -ENOMEM; | ||
1357 | |||
1358 | mbc->txdone_irq = true; | ||
1359 | mbc->txdone_poll = false; | ||
1360 | for (chan_index = 0; chan_index < mbc->num_chans; chan_index++) | ||
1361 | mbc->chans[chan_index].con_priv = pdcs; | ||
1362 | |||
1363 | /* Register mailbox controller */ | ||
1364 | err = mbox_controller_register(mbc); | ||
1365 | if (err) { | ||
1366 | dev_crit(dev, | ||
1367 | "Failed to register PDC mailbox controller. Error %d.", | ||
1368 | err); | ||
1369 | return err; | ||
1370 | } | ||
1371 | return 0; | ||
1372 | } | ||
1373 | |||
1374 | /** | ||
1375 | * pdc_dt_read() - Read application-specific data from device tree. | ||
1376 | * @pdev: Platform device | ||
1377 | * @pdcs: PDC state | ||
1378 | * | ||
1379 | * Reads the number of bytes of receive status that precede each received frame. | ||
1380 | * Reads whether transmit and received frames should be preceded by an 8-byte | ||
1381 | * BCM header. | ||
1382 | * | ||
1383 | * Return: 0 if successful | ||
1384 | * -ENODEV if device not available | ||
1385 | */ | ||
1386 | static int pdc_dt_read(struct platform_device *pdev, struct pdc_state *pdcs) | ||
1387 | { | ||
1388 | struct device *dev = &pdev->dev; | ||
1389 | struct device_node *dn = pdev->dev.of_node; | ||
1390 | int err; | ||
1391 | |||
1392 | err = of_property_read_u32(dn, "brcm,rx-status-len", | ||
1393 | &pdcs->rx_status_len); | ||
1394 | if (err < 0) | ||
1395 | dev_err(dev, | ||
1396 | "%s failed to get DMA receive status length from device tree", | ||
1397 | __func__); | ||
1398 | |||
1399 | pdcs->use_bcm_hdr = of_property_read_bool(dn, "brcm,use-bcm-hdr"); | ||
1400 | |||
1401 | return 0; | ||
1402 | } | ||
1403 | |||
1404 | /** | ||
1405 | * pdc_probe() - Probe function for PDC driver. | ||
1406 | * @pdev: PDC platform device | ||
1407 | * | ||
1408 | * Reserve and map register regions defined in device tree. | ||
1409 | * Allocate and initialize tx and rx DMA rings. | ||
1410 | * Initialize a mailbox controller for each PDC. | ||
1411 | * | ||
1412 | * Return: 0 if successful | ||
1413 | * < 0 if an error | ||
1414 | */ | ||
1415 | static int pdc_probe(struct platform_device *pdev) | ||
1416 | { | ||
1417 | int err = 0; | ||
1418 | struct device *dev = &pdev->dev; | ||
1419 | struct resource *pdc_regs; | ||
1420 | struct pdc_state *pdcs; | ||
1421 | |||
1422 | /* PDC state for one SPU */ | ||
1423 | pdcs = devm_kzalloc(dev, sizeof(*pdcs), GFP_KERNEL); | ||
1424 | if (!pdcs) { | ||
1425 | err = -ENOMEM; | ||
1426 | goto cleanup; | ||
1427 | } | ||
1428 | |||
1429 | spin_lock_init(&pdcs->pdc_lock); | ||
1430 | pdcs->pdev = pdev; | ||
1431 | platform_set_drvdata(pdev, pdcs); | ||
1432 | pdcs->pdc_idx = pdcg.num_spu; | ||
1433 | pdcg.num_spu++; | ||
1434 | |||
1435 | err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); | ||
1436 | if (err) { | ||
1437 | dev_warn(dev, "PDC device cannot perform DMA. Error %d.", err); | ||
1438 | goto cleanup; | ||
1439 | } | ||
1440 | |||
1441 | /* Create DMA pool for tx ring */ | ||
1442 | pdcs->ring_pool = dma_pool_create("pdc rings", dev, PDC_RING_SIZE, | ||
1443 | RING_ALIGN, 0); | ||
1444 | if (!pdcs->ring_pool) { | ||
1445 | err = -ENOMEM; | ||
1446 | goto cleanup; | ||
1447 | } | ||
1448 | |||
1449 | err = pdc_dt_read(pdev, pdcs); | ||
1450 | if (err) | ||
1451 | goto cleanup_ring_pool; | ||
1452 | |||
1453 | pdc_regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1454 | if (!pdc_regs) { | ||
1455 | err = -ENODEV; | ||
1456 | goto cleanup_ring_pool; | ||
1457 | } | ||
1458 | dev_dbg(dev, "PDC register region res.start = %pa, res.end = %pa", | ||
1459 | &pdc_regs->start, &pdc_regs->end); | ||
1460 | |||
1461 | pdcs->pdc_reg_vbase = devm_ioremap_resource(&pdev->dev, pdc_regs); | ||
1462 | if (IS_ERR(pdcs->pdc_reg_vbase)) { | ||
1463 | err = PTR_ERR(pdcs->pdc_reg_vbase); | ||
1464 | dev_err(&pdev->dev, "Failed to map registers: %d\n", err); | ||
1465 | goto cleanup_ring_pool; | ||
1466 | } | ||
1467 | |||
1468 | /* create rx buffer pool after dt read to know how big buffers are */ | ||
1469 | err = pdc_rx_buf_pool_create(pdcs); | ||
1470 | if (err) | ||
1471 | goto cleanup_ring_pool; | ||
1472 | |||
1473 | pdc_hw_init(pdcs); | ||
1474 | |||
1475 | err = pdc_interrupts_init(pdcs); | ||
1476 | if (err) | ||
1477 | goto cleanup_buf_pool; | ||
1478 | |||
1479 | /* Initialize mailbox controller */ | ||
1480 | err = pdc_mb_init(pdcs); | ||
1481 | if (err) | ||
1482 | goto cleanup_buf_pool; | ||
1483 | |||
1484 | pdcs->debugfs_stats = NULL; | ||
1485 | pdc_setup_debugfs(pdcs); | ||
1486 | |||
1487 | dev_dbg(dev, "pdc_probe() successful"); | ||
1488 | return PDC_SUCCESS; | ||
1489 | |||
1490 | cleanup_buf_pool: | ||
1491 | dma_pool_destroy(pdcs->rx_buf_pool); | ||
1492 | |||
1493 | cleanup_ring_pool: | ||
1494 | dma_pool_destroy(pdcs->ring_pool); | ||
1495 | |||
1496 | cleanup: | ||
1497 | return err; | ||
1498 | } | ||
1499 | |||
1500 | static int pdc_remove(struct platform_device *pdev) | ||
1501 | { | ||
1502 | struct pdc_state *pdcs = platform_get_drvdata(pdev); | ||
1503 | |||
1504 | pdc_free_debugfs(); | ||
1505 | |||
1506 | mbox_controller_unregister(&pdcs->mbc); | ||
1507 | |||
1508 | dma_pool_destroy(pdcs->rx_buf_pool); | ||
1509 | dma_pool_destroy(pdcs->ring_pool); | ||
1510 | return 0; | ||
1511 | } | ||
1512 | |||
1513 | static const struct of_device_id pdc_mbox_of_match[] = { | ||
1514 | {.compatible = "brcm,iproc-pdc-mbox"}, | ||
1515 | { /* sentinel */ } | ||
1516 | }; | ||
1517 | MODULE_DEVICE_TABLE(of, pdc_mbox_of_match); | ||
1518 | |||
1519 | static struct platform_driver pdc_mbox_driver = { | ||
1520 | .probe = pdc_probe, | ||
1521 | .remove = pdc_remove, | ||
1522 | .driver = { | ||
1523 | .name = "brcm-iproc-pdc-mbox", | ||
1524 | .of_match_table = of_match_ptr(pdc_mbox_of_match), | ||
1525 | }, | ||
1526 | }; | ||
1527 | module_platform_driver(pdc_mbox_driver); | ||
1528 | |||
1529 | MODULE_AUTHOR("Rob Rice <rob.rice@broadcom.com>"); | ||
1530 | MODULE_DESCRIPTION("Broadcom PDC mailbox driver"); | ||
1531 | MODULE_LICENSE("GPL v2"); | ||
diff --git a/drivers/mailbox/mailbox-test.c b/drivers/mailbox/mailbox-test.c index 58d04726cdd7..9ca96e9db6bf 100644 --- a/drivers/mailbox/mailbox-test.c +++ b/drivers/mailbox/mailbox-test.c | |||
@@ -133,6 +133,7 @@ static ssize_t mbox_test_message_write(struct file *filp, | |||
133 | out: | 133 | out: |
134 | kfree(tdev->signal); | 134 | kfree(tdev->signal); |
135 | kfree(tdev->message); | 135 | kfree(tdev->message); |
136 | tdev->signal = NULL; | ||
136 | 137 | ||
137 | return ret < 0 ? ret : count; | 138 | return ret < 0 ? ret : count; |
138 | } | 139 | } |
diff --git a/drivers/mailbox/pl320-ipc.c b/drivers/mailbox/pl320-ipc.c index f80acb36ff07..2dbed87094d7 100644 --- a/drivers/mailbox/pl320-ipc.c +++ b/drivers/mailbox/pl320-ipc.c | |||
@@ -58,29 +58,29 @@ static ATOMIC_NOTIFIER_HEAD(ipc_notifier); | |||
58 | 58 | ||
59 | static inline void set_destination(int source, int mbox) | 59 | static inline void set_destination(int source, int mbox) |
60 | { | 60 | { |
61 | __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox)); | 61 | writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDSET(mbox)); |
62 | __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox)); | 62 | writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMSET(mbox)); |
63 | } | 63 | } |
64 | 64 | ||
65 | static inline void clear_destination(int source, int mbox) | 65 | static inline void clear_destination(int source, int mbox) |
66 | { | 66 | { |
67 | __raw_writel(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox)); | 67 | writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxDCLEAR(mbox)); |
68 | __raw_writel(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox)); | 68 | writel_relaxed(CHAN_MASK(source), ipc_base + IPCMxMCLEAR(mbox)); |
69 | } | 69 | } |
70 | 70 | ||
71 | static void __ipc_send(int mbox, u32 *data) | 71 | static void __ipc_send(int mbox, u32 *data) |
72 | { | 72 | { |
73 | int i; | 73 | int i; |
74 | for (i = 0; i < 7; i++) | 74 | for (i = 0; i < 7; i++) |
75 | __raw_writel(data[i], ipc_base + IPCMxDR(mbox, i)); | 75 | writel_relaxed(data[i], ipc_base + IPCMxDR(mbox, i)); |
76 | __raw_writel(0x1, ipc_base + IPCMxSEND(mbox)); | 76 | writel_relaxed(0x1, ipc_base + IPCMxSEND(mbox)); |
77 | } | 77 | } |
78 | 78 | ||
79 | static u32 __ipc_rcv(int mbox, u32 *data) | 79 | static u32 __ipc_rcv(int mbox, u32 *data) |
80 | { | 80 | { |
81 | int i; | 81 | int i; |
82 | for (i = 0; i < 7; i++) | 82 | for (i = 0; i < 7; i++) |
83 | data[i] = __raw_readl(ipc_base + IPCMxDR(mbox, i)); | 83 | data[i] = readl_relaxed(ipc_base + IPCMxDR(mbox, i)); |
84 | return data[1]; | 84 | return data[1]; |
85 | } | 85 | } |
86 | 86 | ||
@@ -112,15 +112,15 @@ static irqreturn_t ipc_handler(int irq, void *dev) | |||
112 | u32 irq_stat; | 112 | u32 irq_stat; |
113 | u32 data[7]; | 113 | u32 data[7]; |
114 | 114 | ||
115 | irq_stat = __raw_readl(ipc_base + IPCMMIS(1)); | 115 | irq_stat = readl_relaxed(ipc_base + IPCMMIS(1)); |
116 | if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) { | 116 | if (irq_stat & MBOX_MASK(IPC_TX_MBOX)) { |
117 | __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); | 117 | writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); |
118 | complete(&ipc_completion); | 118 | complete(&ipc_completion); |
119 | } | 119 | } |
120 | if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) { | 120 | if (irq_stat & MBOX_MASK(IPC_RX_MBOX)) { |
121 | __ipc_rcv(IPC_RX_MBOX, data); | 121 | __ipc_rcv(IPC_RX_MBOX, data); |
122 | atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1); | 122 | atomic_notifier_call_chain(&ipc_notifier, data[0], data + 1); |
123 | __raw_writel(2, ipc_base + IPCMxSEND(IPC_RX_MBOX)); | 123 | writel_relaxed(2, ipc_base + IPCMxSEND(IPC_RX_MBOX)); |
124 | } | 124 | } |
125 | 125 | ||
126 | return IRQ_HANDLED; | 126 | return IRQ_HANDLED; |
@@ -146,7 +146,7 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id) | |||
146 | if (ipc_base == NULL) | 146 | if (ipc_base == NULL) |
147 | return -ENOMEM; | 147 | return -ENOMEM; |
148 | 148 | ||
149 | __raw_writel(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); | 149 | writel_relaxed(0, ipc_base + IPCMxSEND(IPC_TX_MBOX)); |
150 | 150 | ||
151 | ipc_irq = adev->irq[0]; | 151 | ipc_irq = adev->irq[0]; |
152 | ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL); | 152 | ret = request_irq(ipc_irq, ipc_handler, 0, dev_name(&adev->dev), NULL); |
@@ -154,20 +154,20 @@ static int pl320_probe(struct amba_device *adev, const struct amba_id *id) | |||
154 | goto err; | 154 | goto err; |
155 | 155 | ||
156 | /* Init slow mailbox */ | 156 | /* Init slow mailbox */ |
157 | __raw_writel(CHAN_MASK(A9_SOURCE), | 157 | writel_relaxed(CHAN_MASK(A9_SOURCE), |
158 | ipc_base + IPCMxSOURCE(IPC_TX_MBOX)); | 158 | ipc_base + IPCMxSOURCE(IPC_TX_MBOX)); |
159 | __raw_writel(CHAN_MASK(M3_SOURCE), | 159 | writel_relaxed(CHAN_MASK(M3_SOURCE), |
160 | ipc_base + IPCMxDSET(IPC_TX_MBOX)); | 160 | ipc_base + IPCMxDSET(IPC_TX_MBOX)); |
161 | __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), | 161 | writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), |
162 | ipc_base + IPCMxMSET(IPC_TX_MBOX)); | 162 | ipc_base + IPCMxMSET(IPC_TX_MBOX)); |
163 | 163 | ||
164 | /* Init receive mailbox */ | 164 | /* Init receive mailbox */ |
165 | __raw_writel(CHAN_MASK(M3_SOURCE), | 165 | writel_relaxed(CHAN_MASK(M3_SOURCE), |
166 | ipc_base + IPCMxSOURCE(IPC_RX_MBOX)); | 166 | ipc_base + IPCMxSOURCE(IPC_RX_MBOX)); |
167 | __raw_writel(CHAN_MASK(A9_SOURCE), | 167 | writel_relaxed(CHAN_MASK(A9_SOURCE), |
168 | ipc_base + IPCMxDSET(IPC_RX_MBOX)); | 168 | ipc_base + IPCMxDSET(IPC_RX_MBOX)); |
169 | __raw_writel(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), | 169 | writel_relaxed(CHAN_MASK(M3_SOURCE) | CHAN_MASK(A9_SOURCE), |
170 | ipc_base + IPCMxMSET(IPC_RX_MBOX)); | 170 | ipc_base + IPCMxMSET(IPC_RX_MBOX)); |
171 | 171 | ||
172 | return 0; | 172 | return 0; |
173 | err: | 173 | err: |
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h new file mode 100644 index 000000000000..6b55c938b401 --- /dev/null +++ b/include/linux/mailbox/brcm-message.h | |||
@@ -0,0 +1,56 @@ | |||
1 | /* | ||
2 | * Copyright (C) 2016 Broadcom | ||
3 | * | ||
4 | * This program is free software; you can redistribute it and/or modify | ||
5 | * it under the terms of the GNU General Public License version 2 as | ||
6 | * published by the Free Software Foundation. | ||
7 | * | ||
8 | * Common header for Broadcom mailbox messages which is shared across | ||
9 | * Broadcom SoCs and Broadcom mailbox client drivers. | ||
10 | */ | ||
11 | |||
12 | #ifndef _LINUX_BRCM_MESSAGE_H_ | ||
13 | #define _LINUX_BRCM_MESSAGE_H_ | ||
14 | |||
15 | #include <linux/scatterlist.h> | ||
16 | |||
17 | enum brcm_message_type { | ||
18 | BRCM_MESSAGE_UNKNOWN = 0, | ||
19 | BRCM_MESSAGE_SPU, | ||
20 | BRCM_MESSAGE_SBA, | ||
21 | BRCM_MESSAGE_MAX, | ||
22 | }; | ||
23 | |||
24 | struct brcm_sba_command { | ||
25 | u64 cmd; | ||
26 | #define BRCM_SBA_CMD_TYPE_A BIT(0) | ||
27 | #define BRCM_SBA_CMD_TYPE_B BIT(1) | ||
28 | #define BRCM_SBA_CMD_TYPE_C BIT(2) | ||
29 | #define BRCM_SBA_CMD_HAS_RESP BIT(3) | ||
30 | #define BRCM_SBA_CMD_HAS_OUTPUT BIT(4) | ||
31 | u64 flags; | ||
32 | dma_addr_t input; | ||
33 | size_t input_len; | ||
34 | dma_addr_t resp; | ||
35 | size_t resp_len; | ||
36 | dma_addr_t output; | ||
37 | size_t output_len; | ||
38 | }; | ||
39 | |||
40 | struct brcm_message { | ||
41 | enum brcm_message_type type; | ||
42 | union { | ||
43 | struct { | ||
44 | struct scatterlist *src; | ||
45 | struct scatterlist *dst; | ||
46 | } spu; | ||
47 | struct { | ||
48 | struct brcm_sba_command *cmds; | ||
49 | unsigned int cmds_count; | ||
50 | } sba; | ||
51 | }; | ||
52 | void *ctx; | ||
53 | int error; | ||
54 | }; | ||
55 | |||
56 | #endif /* _LINUX_BRCM_MESSAGE_H_ */ | ||