aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2018-04-09 23:25:07 -0400
committerVinod Koul <vinod.koul@intel.com>2018-04-09 23:25:07 -0400
commit62065132eec7dc1e5e8ea7a11d26786935d84856 (patch)
tree5d9897098a08cd02e45219b744e1f68c81fb94f2
parent36ebe2b98e969d80aa0eab2a37404871173b41db (diff)
parent6a28ba26f8c8557d93fe7c8a82888fb1420b81a9 (diff)
Merge branch 'topic/dw_axi' into for-linus
-rw-r--r--Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt41
-rw-r--r--MAINTAINERS6
-rw-r--r--drivers/dma/Kconfig10
-rw-r--r--drivers/dma/Makefile1
-rw-r--r--drivers/dma/dw-axi-dmac/Makefile1
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c1008
-rw-r--r--drivers/dma/dw-axi-dmac/dw-axi-dmac.h334
7 files changed, 1401 insertions, 0 deletions
diff --git a/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
new file mode 100644
index 000000000000..f237b7928283
--- /dev/null
+++ b/Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
@@ -0,0 +1,41 @@
1Synopsys DesignWare AXI DMA Controller
2
3Required properties:
4- compatible: "snps,axi-dma-1.01a"
5- reg: Address range of the DMAC registers. This should include
6 all of the per-channel registers.
7- interrupt: Should contain the DMAC interrupt number.
8- interrupt-parent: Should be the phandle for the interrupt controller
9 that services interrupts for this device.
10- dma-channels: Number of channels supported by hardware.
11- snps,dma-masters: Number of AXI masters supported by the hardware.
12- snps,data-width: Maximum AXI data width supported by hardware.
13 (0 - 8bits, 1 - 16bits, 2 - 32bits, ..., 6 - 512bits)
14- snps,priority: Priority of channel. Array size is equal to the number of
15 dma-channels. Priority value must be programmed within [0:dma-channels-1]
16 range. (0 - minimum priority)
17- snps,block-size: Maximum block size supported by the controller channel.
18 Array size is equal to the number of dma-channels.
19
20Optional properties:
21- snps,axi-max-burst-len: Restrict master AXI burst length by value specified
22 in this property. If this property is missing the maximum AXI burst length
23 supported by DMAC is used. [1:256]
24
25Example:
26
27dmac: dma-controller@80000 {
28 compatible = "snps,axi-dma-1.01a";
29 reg = <0x80000 0x400>;
30 clocks = <&core_clk>, <&cfgr_clk>;
31 clock-names = "core-clk", "cfgr-clk";
32 interrupt-parent = <&intc>;
33 interrupts = <27>;
34
35 dma-channels = <4>;
36 snps,dma-masters = <2>;
37 snps,data-width = <3>;
38 snps,block-size = <4096 4096 4096 4096>;
39 snps,priority = <0 1 2 3>;
40 snps,axi-max-burst-len = <16>;
41};
diff --git a/MAINTAINERS b/MAINTAINERS
index 3bdc260e36b7..b31bfdb8a09e 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -13336,6 +13336,12 @@ S: Maintained
13336F: drivers/gpio/gpio-dwapb.c 13336F: drivers/gpio/gpio-dwapb.c
13337F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt 13337F: Documentation/devicetree/bindings/gpio/snps-dwapb-gpio.txt
13338 13338
13339SYNOPSYS DESIGNWARE AXI DMAC DRIVER
13340M: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
13341S: Maintained
13342F: drivers/dma/dwi-axi-dmac/
13343F: Documentation/devicetree/bindings/dma/snps,dw-axi-dmac.txt
13344
13339SYNOPSYS DESIGNWARE DMAC DRIVER 13345SYNOPSYS DESIGNWARE DMAC DRIVER
13340M: Viresh Kumar <vireshk@kernel.org> 13346M: Viresh Kumar <vireshk@kernel.org>
13341R: Andy Shevchenko <andriy.shevchenko@linux.intel.com> 13347R: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index 27df3e2837fd..c36272aa7c09 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -187,6 +187,16 @@ config DMA_SUN6I
187 help 187 help
188 Support for the DMA engine first found in Allwinner A31 SoCs. 188 Support for the DMA engine first found in Allwinner A31 SoCs.
189 189
190config DW_AXI_DMAC
191 tristate "Synopsys DesignWare AXI DMA support"
192 depends on OF || COMPILE_TEST
193 select DMA_ENGINE
194 select DMA_VIRTUAL_CHANNELS
195 help
196 Enable support for Synopsys DesignWare AXI DMA controller.
197 NOTE: This driver wasn't tested on 64 bit platform because
198 of lack 64 bit platform with Synopsys DW AXI DMAC.
199
190config EP93XX_DMA 200config EP93XX_DMA
191 bool "Cirrus Logic EP93xx DMA support" 201 bool "Cirrus Logic EP93xx DMA support"
192 depends on ARCH_EP93XX || COMPILE_TEST 202 depends on ARCH_EP93XX || COMPILE_TEST
diff --git a/drivers/dma/Makefile b/drivers/dma/Makefile
index b9dca8a0e142..c242a5e8906b 100644
--- a/drivers/dma/Makefile
+++ b/drivers/dma/Makefile
@@ -28,6 +28,7 @@ obj-$(CONFIG_DMA_OMAP) += omap-dma.o
28obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o 28obj-$(CONFIG_DMA_SA11X0) += sa11x0-dma.o
29obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o 29obj-$(CONFIG_DMA_SUN4I) += sun4i-dma.o
30obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o 30obj-$(CONFIG_DMA_SUN6I) += sun6i-dma.o
31obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac/
31obj-$(CONFIG_DW_DMAC_CORE) += dw/ 32obj-$(CONFIG_DW_DMAC_CORE) += dw/
32obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o 33obj-$(CONFIG_EP93XX_DMA) += ep93xx_dma.o
33obj-$(CONFIG_FSL_DMA) += fsldma.o 34obj-$(CONFIG_FSL_DMA) += fsldma.o
diff --git a/drivers/dma/dw-axi-dmac/Makefile b/drivers/dma/dw-axi-dmac/Makefile
new file mode 100644
index 000000000000..4bfa462005be
--- /dev/null
+++ b/drivers/dma/dw-axi-dmac/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_DW_AXI_DMAC) += dw-axi-dmac-platform.o
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
new file mode 100644
index 000000000000..c4eb55e3011c
--- /dev/null
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac-platform.c
@@ -0,0 +1,1008 @@
1// SPDX-License-Identifier: GPL-2.0
2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
3
4/*
5 * Synopsys DesignWare AXI DMA Controller driver.
6 *
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8 */
9
10#include <linux/bitops.h>
11#include <linux/delay.h>
12#include <linux/device.h>
13#include <linux/dmaengine.h>
14#include <linux/dmapool.h>
15#include <linux/err.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/kernel.h>
19#include <linux/module.h>
20#include <linux/of.h>
21#include <linux/platform_device.h>
22#include <linux/pm_runtime.h>
23#include <linux/property.h>
24#include <linux/types.h>
25
26#include "dw-axi-dmac.h"
27#include "../dmaengine.h"
28#include "../virt-dma.h"
29
30/*
31 * The set of bus widths supported by the DMA controller. DW AXI DMAC supports
32 * master data bus width up to 512 bits (for both AXI master interfaces), but
33 * it depends on IP block configurarion.
34 */
35#define AXI_DMA_BUSWIDTHS \
36 (DMA_SLAVE_BUSWIDTH_1_BYTE | \
37 DMA_SLAVE_BUSWIDTH_2_BYTES | \
38 DMA_SLAVE_BUSWIDTH_4_BYTES | \
39 DMA_SLAVE_BUSWIDTH_8_BYTES | \
40 DMA_SLAVE_BUSWIDTH_16_BYTES | \
41 DMA_SLAVE_BUSWIDTH_32_BYTES | \
42 DMA_SLAVE_BUSWIDTH_64_BYTES)
43
44static inline void
45axi_dma_iowrite32(struct axi_dma_chip *chip, u32 reg, u32 val)
46{
47 iowrite32(val, chip->regs + reg);
48}
49
50static inline u32 axi_dma_ioread32(struct axi_dma_chip *chip, u32 reg)
51{
52 return ioread32(chip->regs + reg);
53}
54
55static inline void
56axi_chan_iowrite32(struct axi_dma_chan *chan, u32 reg, u32 val)
57{
58 iowrite32(val, chan->chan_regs + reg);
59}
60
61static inline u32 axi_chan_ioread32(struct axi_dma_chan *chan, u32 reg)
62{
63 return ioread32(chan->chan_regs + reg);
64}
65
66static inline void
67axi_chan_iowrite64(struct axi_dma_chan *chan, u32 reg, u64 val)
68{
69 /*
70 * We split one 64 bit write for two 32 bit write as some HW doesn't
71 * support 64 bit access.
72 */
73 iowrite32(lower_32_bits(val), chan->chan_regs + reg);
74 iowrite32(upper_32_bits(val), chan->chan_regs + reg + 4);
75}
76
77static inline void axi_dma_disable(struct axi_dma_chip *chip)
78{
79 u32 val;
80
81 val = axi_dma_ioread32(chip, DMAC_CFG);
82 val &= ~DMAC_EN_MASK;
83 axi_dma_iowrite32(chip, DMAC_CFG, val);
84}
85
86static inline void axi_dma_enable(struct axi_dma_chip *chip)
87{
88 u32 val;
89
90 val = axi_dma_ioread32(chip, DMAC_CFG);
91 val |= DMAC_EN_MASK;
92 axi_dma_iowrite32(chip, DMAC_CFG, val);
93}
94
95static inline void axi_dma_irq_disable(struct axi_dma_chip *chip)
96{
97 u32 val;
98
99 val = axi_dma_ioread32(chip, DMAC_CFG);
100 val &= ~INT_EN_MASK;
101 axi_dma_iowrite32(chip, DMAC_CFG, val);
102}
103
104static inline void axi_dma_irq_enable(struct axi_dma_chip *chip)
105{
106 u32 val;
107
108 val = axi_dma_ioread32(chip, DMAC_CFG);
109 val |= INT_EN_MASK;
110 axi_dma_iowrite32(chip, DMAC_CFG, val);
111}
112
113static inline void axi_chan_irq_disable(struct axi_dma_chan *chan, u32 irq_mask)
114{
115 u32 val;
116
117 if (likely(irq_mask == DWAXIDMAC_IRQ_ALL)) {
118 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, DWAXIDMAC_IRQ_NONE);
119 } else {
120 val = axi_chan_ioread32(chan, CH_INTSTATUS_ENA);
121 val &= ~irq_mask;
122 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, val);
123 }
124}
125
126static inline void axi_chan_irq_set(struct axi_dma_chan *chan, u32 irq_mask)
127{
128 axi_chan_iowrite32(chan, CH_INTSTATUS_ENA, irq_mask);
129}
130
131static inline void axi_chan_irq_sig_set(struct axi_dma_chan *chan, u32 irq_mask)
132{
133 axi_chan_iowrite32(chan, CH_INTSIGNAL_ENA, irq_mask);
134}
135
136static inline void axi_chan_irq_clear(struct axi_dma_chan *chan, u32 irq_mask)
137{
138 axi_chan_iowrite32(chan, CH_INTCLEAR, irq_mask);
139}
140
141static inline u32 axi_chan_irq_read(struct axi_dma_chan *chan)
142{
143 return axi_chan_ioread32(chan, CH_INTSTATUS);
144}
145
146static inline void axi_chan_disable(struct axi_dma_chan *chan)
147{
148 u32 val;
149
150 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
151 val &= ~(BIT(chan->id) << DMAC_CHAN_EN_SHIFT);
152 val |= BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
153 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
154}
155
156static inline void axi_chan_enable(struct axi_dma_chan *chan)
157{
158 u32 val;
159
160 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
161 val |= BIT(chan->id) << DMAC_CHAN_EN_SHIFT |
162 BIT(chan->id) << DMAC_CHAN_EN_WE_SHIFT;
163 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
164}
165
166static inline bool axi_chan_is_hw_enable(struct axi_dma_chan *chan)
167{
168 u32 val;
169
170 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
171
172 return !!(val & (BIT(chan->id) << DMAC_CHAN_EN_SHIFT));
173}
174
175static void axi_dma_hw_init(struct axi_dma_chip *chip)
176{
177 u32 i;
178
179 for (i = 0; i < chip->dw->hdata->nr_channels; i++) {
180 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
181 axi_chan_disable(&chip->dw->chan[i]);
182 }
183}
184
185static u32 axi_chan_get_xfer_width(struct axi_dma_chan *chan, dma_addr_t src,
186 dma_addr_t dst, size_t len)
187{
188 u32 max_width = chan->chip->dw->hdata->m_data_width;
189
190 return __ffs(src | dst | len | BIT(max_width));
191}
192
193static inline const char *axi_chan_name(struct axi_dma_chan *chan)
194{
195 return dma_chan_name(&chan->vc.chan);
196}
197
198static struct axi_dma_desc *axi_desc_get(struct axi_dma_chan *chan)
199{
200 struct dw_axi_dma *dw = chan->chip->dw;
201 struct axi_dma_desc *desc;
202 dma_addr_t phys;
203
204 desc = dma_pool_zalloc(dw->desc_pool, GFP_NOWAIT, &phys);
205 if (unlikely(!desc)) {
206 dev_err(chan2dev(chan), "%s: not enough descriptors available\n",
207 axi_chan_name(chan));
208 return NULL;
209 }
210
211 atomic_inc(&chan->descs_allocated);
212 INIT_LIST_HEAD(&desc->xfer_list);
213 desc->vd.tx.phys = phys;
214 desc->chan = chan;
215
216 return desc;
217}
218
219static void axi_desc_put(struct axi_dma_desc *desc)
220{
221 struct axi_dma_chan *chan = desc->chan;
222 struct dw_axi_dma *dw = chan->chip->dw;
223 struct axi_dma_desc *child, *_next;
224 unsigned int descs_put = 0;
225
226 list_for_each_entry_safe(child, _next, &desc->xfer_list, xfer_list) {
227 list_del(&child->xfer_list);
228 dma_pool_free(dw->desc_pool, child, child->vd.tx.phys);
229 descs_put++;
230 }
231
232 dma_pool_free(dw->desc_pool, desc, desc->vd.tx.phys);
233 descs_put++;
234
235 atomic_sub(descs_put, &chan->descs_allocated);
236 dev_vdbg(chan2dev(chan), "%s: %d descs put, %d still allocated\n",
237 axi_chan_name(chan), descs_put,
238 atomic_read(&chan->descs_allocated));
239}
240
241static void vchan_desc_put(struct virt_dma_desc *vdesc)
242{
243 axi_desc_put(vd_to_axi_desc(vdesc));
244}
245
246static enum dma_status
247dma_chan_tx_status(struct dma_chan *dchan, dma_cookie_t cookie,
248 struct dma_tx_state *txstate)
249{
250 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
251 enum dma_status ret;
252
253 ret = dma_cookie_status(dchan, cookie, txstate);
254
255 if (chan->is_paused && ret == DMA_IN_PROGRESS)
256 ret = DMA_PAUSED;
257
258 return ret;
259}
260
261static void write_desc_llp(struct axi_dma_desc *desc, dma_addr_t adr)
262{
263 desc->lli.llp = cpu_to_le64(adr);
264}
265
266static void write_chan_llp(struct axi_dma_chan *chan, dma_addr_t adr)
267{
268 axi_chan_iowrite64(chan, CH_LLP, adr);
269}
270
271/* Called in chan locked context */
272static void axi_chan_block_xfer_start(struct axi_dma_chan *chan,
273 struct axi_dma_desc *first)
274{
275 u32 priority = chan->chip->dw->hdata->priority[chan->id];
276 u32 reg, irq_mask;
277 u8 lms = 0; /* Select AXI0 master for LLI fetching */
278
279 if (unlikely(axi_chan_is_hw_enable(chan))) {
280 dev_err(chan2dev(chan), "%s is non-idle!\n",
281 axi_chan_name(chan));
282
283 return;
284 }
285
286 axi_dma_enable(chan->chip);
287
288 reg = (DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_DST_MULTBLK_TYPE_POS |
289 DWAXIDMAC_MBLK_TYPE_LL << CH_CFG_L_SRC_MULTBLK_TYPE_POS);
290 axi_chan_iowrite32(chan, CH_CFG_L, reg);
291
292 reg = (DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC << CH_CFG_H_TT_FC_POS |
293 priority << CH_CFG_H_PRIORITY_POS |
294 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_DST_POS |
295 DWAXIDMAC_HS_SEL_HW << CH_CFG_H_HS_SEL_SRC_POS);
296 axi_chan_iowrite32(chan, CH_CFG_H, reg);
297
298 write_chan_llp(chan, first->vd.tx.phys | lms);
299
300 irq_mask = DWAXIDMAC_IRQ_DMA_TRF | DWAXIDMAC_IRQ_ALL_ERR;
301 axi_chan_irq_sig_set(chan, irq_mask);
302
303 /* Generate 'suspend' status but don't generate interrupt */
304 irq_mask |= DWAXIDMAC_IRQ_SUSPENDED;
305 axi_chan_irq_set(chan, irq_mask);
306
307 axi_chan_enable(chan);
308}
309
310static void axi_chan_start_first_queued(struct axi_dma_chan *chan)
311{
312 struct axi_dma_desc *desc;
313 struct virt_dma_desc *vd;
314
315 vd = vchan_next_desc(&chan->vc);
316 if (!vd)
317 return;
318
319 desc = vd_to_axi_desc(vd);
320 dev_vdbg(chan2dev(chan), "%s: started %u\n", axi_chan_name(chan),
321 vd->tx.cookie);
322 axi_chan_block_xfer_start(chan, desc);
323}
324
325static void dma_chan_issue_pending(struct dma_chan *dchan)
326{
327 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
328 unsigned long flags;
329
330 spin_lock_irqsave(&chan->vc.lock, flags);
331 if (vchan_issue_pending(&chan->vc))
332 axi_chan_start_first_queued(chan);
333 spin_unlock_irqrestore(&chan->vc.lock, flags);
334}
335
336static int dma_chan_alloc_chan_resources(struct dma_chan *dchan)
337{
338 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
339
340 /* ASSERT: channel is idle */
341 if (axi_chan_is_hw_enable(chan)) {
342 dev_err(chan2dev(chan), "%s is non-idle!\n",
343 axi_chan_name(chan));
344 return -EBUSY;
345 }
346
347 dev_vdbg(dchan2dev(dchan), "%s: allocating\n", axi_chan_name(chan));
348
349 pm_runtime_get(chan->chip->dev);
350
351 return 0;
352}
353
354static void dma_chan_free_chan_resources(struct dma_chan *dchan)
355{
356 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
357
358 /* ASSERT: channel is idle */
359 if (axi_chan_is_hw_enable(chan))
360 dev_err(dchan2dev(dchan), "%s is non-idle!\n",
361 axi_chan_name(chan));
362
363 axi_chan_disable(chan);
364 axi_chan_irq_disable(chan, DWAXIDMAC_IRQ_ALL);
365
366 vchan_free_chan_resources(&chan->vc);
367
368 dev_vdbg(dchan2dev(dchan),
369 "%s: free resources, descriptor still allocated: %u\n",
370 axi_chan_name(chan), atomic_read(&chan->descs_allocated));
371
372 pm_runtime_put(chan->chip->dev);
373}
374
375/*
376 * If DW_axi_dmac sees CHx_CTL.ShadowReg_Or_LLI_Last bit of the fetched LLI
377 * as 1, it understands that the current block is the final block in the
378 * transfer and completes the DMA transfer operation at the end of current
379 * block transfer.
380 */
381static void set_desc_last(struct axi_dma_desc *desc)
382{
383 u32 val;
384
385 val = le32_to_cpu(desc->lli.ctl_hi);
386 val |= CH_CTL_H_LLI_LAST;
387 desc->lli.ctl_hi = cpu_to_le32(val);
388}
389
390static void write_desc_sar(struct axi_dma_desc *desc, dma_addr_t adr)
391{
392 desc->lli.sar = cpu_to_le64(adr);
393}
394
395static void write_desc_dar(struct axi_dma_desc *desc, dma_addr_t adr)
396{
397 desc->lli.dar = cpu_to_le64(adr);
398}
399
400static void set_desc_src_master(struct axi_dma_desc *desc)
401{
402 u32 val;
403
404 /* Select AXI0 for source master */
405 val = le32_to_cpu(desc->lli.ctl_lo);
406 val &= ~CH_CTL_L_SRC_MAST;
407 desc->lli.ctl_lo = cpu_to_le32(val);
408}
409
410static void set_desc_dest_master(struct axi_dma_desc *desc)
411{
412 u32 val;
413
414 /* Select AXI1 for source master if available */
415 val = le32_to_cpu(desc->lli.ctl_lo);
416 if (desc->chan->chip->dw->hdata->nr_masters > 1)
417 val |= CH_CTL_L_DST_MAST;
418 else
419 val &= ~CH_CTL_L_DST_MAST;
420
421 desc->lli.ctl_lo = cpu_to_le32(val);
422}
423
424static struct dma_async_tx_descriptor *
425dma_chan_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst_adr,
426 dma_addr_t src_adr, size_t len, unsigned long flags)
427{
428 struct axi_dma_desc *first = NULL, *desc = NULL, *prev = NULL;
429 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
430 size_t block_ts, max_block_ts, xfer_len;
431 u32 xfer_width, reg;
432 u8 lms = 0; /* Select AXI0 master for LLI fetching */
433
434 dev_dbg(chan2dev(chan), "%s: memcpy: src: %pad dst: %pad length: %zd flags: %#lx",
435 axi_chan_name(chan), &src_adr, &dst_adr, len, flags);
436
437 max_block_ts = chan->chip->dw->hdata->block_size[chan->id];
438
439 while (len) {
440 xfer_len = len;
441
442 /*
443 * Take care for the alignment.
444 * Actually source and destination widths can be different, but
445 * make them same to be simpler.
446 */
447 xfer_width = axi_chan_get_xfer_width(chan, src_adr, dst_adr, xfer_len);
448
449 /*
450 * block_ts indicates the total number of data of width
451 * to be transferred in a DMA block transfer.
452 * BLOCK_TS register should be set to block_ts - 1
453 */
454 block_ts = xfer_len >> xfer_width;
455 if (block_ts > max_block_ts) {
456 block_ts = max_block_ts;
457 xfer_len = max_block_ts << xfer_width;
458 }
459
460 desc = axi_desc_get(chan);
461 if (unlikely(!desc))
462 goto err_desc_get;
463
464 write_desc_sar(desc, src_adr);
465 write_desc_dar(desc, dst_adr);
466 desc->lli.block_ts_lo = cpu_to_le32(block_ts - 1);
467
468 reg = CH_CTL_H_LLI_VALID;
469 if (chan->chip->dw->hdata->restrict_axi_burst_len) {
470 u32 burst_len = chan->chip->dw->hdata->axi_rw_burst_len;
471
472 reg |= (CH_CTL_H_ARLEN_EN |
473 burst_len << CH_CTL_H_ARLEN_POS |
474 CH_CTL_H_AWLEN_EN |
475 burst_len << CH_CTL_H_AWLEN_POS);
476 }
477 desc->lli.ctl_hi = cpu_to_le32(reg);
478
479 reg = (DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_DST_MSIZE_POS |
480 DWAXIDMAC_BURST_TRANS_LEN_4 << CH_CTL_L_SRC_MSIZE_POS |
481 xfer_width << CH_CTL_L_DST_WIDTH_POS |
482 xfer_width << CH_CTL_L_SRC_WIDTH_POS |
483 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_DST_INC_POS |
484 DWAXIDMAC_CH_CTL_L_INC << CH_CTL_L_SRC_INC_POS);
485 desc->lli.ctl_lo = cpu_to_le32(reg);
486
487 set_desc_src_master(desc);
488 set_desc_dest_master(desc);
489
490 /* Manage transfer list (xfer_list) */
491 if (!first) {
492 first = desc;
493 } else {
494 list_add_tail(&desc->xfer_list, &first->xfer_list);
495 write_desc_llp(prev, desc->vd.tx.phys | lms);
496 }
497 prev = desc;
498
499 /* update the length and addresses for the next loop cycle */
500 len -= xfer_len;
501 dst_adr += xfer_len;
502 src_adr += xfer_len;
503 }
504
505 /* Total len of src/dest sg == 0, so no descriptor were allocated */
506 if (unlikely(!first))
507 return NULL;
508
509 /* Set end-of-link to the last link descriptor of list */
510 set_desc_last(desc);
511
512 return vchan_tx_prep(&chan->vc, &first->vd, flags);
513
514err_desc_get:
515 axi_desc_put(first);
516 return NULL;
517}
518
519static void axi_chan_dump_lli(struct axi_dma_chan *chan,
520 struct axi_dma_desc *desc)
521{
522 dev_err(dchan2dev(&chan->vc.chan),
523 "SAR: 0x%llx DAR: 0x%llx LLP: 0x%llx BTS 0x%x CTL: 0x%x:%08x",
524 le64_to_cpu(desc->lli.sar),
525 le64_to_cpu(desc->lli.dar),
526 le64_to_cpu(desc->lli.llp),
527 le32_to_cpu(desc->lli.block_ts_lo),
528 le32_to_cpu(desc->lli.ctl_hi),
529 le32_to_cpu(desc->lli.ctl_lo));
530}
531
532static void axi_chan_list_dump_lli(struct axi_dma_chan *chan,
533 struct axi_dma_desc *desc_head)
534{
535 struct axi_dma_desc *desc;
536
537 axi_chan_dump_lli(chan, desc_head);
538 list_for_each_entry(desc, &desc_head->xfer_list, xfer_list)
539 axi_chan_dump_lli(chan, desc);
540}
541
542static noinline void axi_chan_handle_err(struct axi_dma_chan *chan, u32 status)
543{
544 struct virt_dma_desc *vd;
545 unsigned long flags;
546
547 spin_lock_irqsave(&chan->vc.lock, flags);
548
549 axi_chan_disable(chan);
550
551 /* The bad descriptor currently is in the head of vc list */
552 vd = vchan_next_desc(&chan->vc);
553 /* Remove the completed descriptor from issued list */
554 list_del(&vd->node);
555
556 /* WARN about bad descriptor */
557 dev_err(chan2dev(chan),
558 "Bad descriptor submitted for %s, cookie: %d, irq: 0x%08x\n",
559 axi_chan_name(chan), vd->tx.cookie, status);
560 axi_chan_list_dump_lli(chan, vd_to_axi_desc(vd));
561
562 vchan_cookie_complete(vd);
563
564 /* Try to restart the controller */
565 axi_chan_start_first_queued(chan);
566
567 spin_unlock_irqrestore(&chan->vc.lock, flags);
568}
569
570static void axi_chan_block_xfer_complete(struct axi_dma_chan *chan)
571{
572 struct virt_dma_desc *vd;
573 unsigned long flags;
574
575 spin_lock_irqsave(&chan->vc.lock, flags);
576 if (unlikely(axi_chan_is_hw_enable(chan))) {
577 dev_err(chan2dev(chan), "BUG: %s caught DWAXIDMAC_IRQ_DMA_TRF, but channel not idle!\n",
578 axi_chan_name(chan));
579 axi_chan_disable(chan);
580 }
581
582 /* The completed descriptor currently is in the head of vc list */
583 vd = vchan_next_desc(&chan->vc);
584 /* Remove the completed descriptor from issued list before completing */
585 list_del(&vd->node);
586 vchan_cookie_complete(vd);
587
588 /* Submit queued descriptors after processing the completed ones */
589 axi_chan_start_first_queued(chan);
590
591 spin_unlock_irqrestore(&chan->vc.lock, flags);
592}
593
594static irqreturn_t dw_axi_dma_interrupt(int irq, void *dev_id)
595{
596 struct axi_dma_chip *chip = dev_id;
597 struct dw_axi_dma *dw = chip->dw;
598 struct axi_dma_chan *chan;
599
600 u32 status, i;
601
602 /* Disable DMAC inerrupts. We'll enable them after processing chanels */
603 axi_dma_irq_disable(chip);
604
605 /* Poll, clear and process every chanel interrupt status */
606 for (i = 0; i < dw->hdata->nr_channels; i++) {
607 chan = &dw->chan[i];
608 status = axi_chan_irq_read(chan);
609 axi_chan_irq_clear(chan, status);
610
611 dev_vdbg(chip->dev, "%s %u IRQ status: 0x%08x\n",
612 axi_chan_name(chan), i, status);
613
614 if (status & DWAXIDMAC_IRQ_ALL_ERR)
615 axi_chan_handle_err(chan, status);
616 else if (status & DWAXIDMAC_IRQ_DMA_TRF)
617 axi_chan_block_xfer_complete(chan);
618 }
619
620 /* Re-enable interrupts */
621 axi_dma_irq_enable(chip);
622
623 return IRQ_HANDLED;
624}
625
626static int dma_chan_terminate_all(struct dma_chan *dchan)
627{
628 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
629 unsigned long flags;
630 LIST_HEAD(head);
631
632 spin_lock_irqsave(&chan->vc.lock, flags);
633
634 axi_chan_disable(chan);
635
636 vchan_get_all_descriptors(&chan->vc, &head);
637
638 /*
639 * As vchan_dma_desc_free_list can access to desc_allocated list
640 * we need to call it in vc.lock context.
641 */
642 vchan_dma_desc_free_list(&chan->vc, &head);
643
644 spin_unlock_irqrestore(&chan->vc.lock, flags);
645
646 dev_vdbg(dchan2dev(dchan), "terminated: %s\n", axi_chan_name(chan));
647
648 return 0;
649}
650
651static int dma_chan_pause(struct dma_chan *dchan)
652{
653 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
654 unsigned long flags;
655 unsigned int timeout = 20; /* timeout iterations */
656 u32 val;
657
658 spin_lock_irqsave(&chan->vc.lock, flags);
659
660 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
661 val |= BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT |
662 BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT;
663 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
664
665 do {
666 if (axi_chan_irq_read(chan) & DWAXIDMAC_IRQ_SUSPENDED)
667 break;
668
669 udelay(2);
670 } while (--timeout);
671
672 axi_chan_irq_clear(chan, DWAXIDMAC_IRQ_SUSPENDED);
673
674 chan->is_paused = true;
675
676 spin_unlock_irqrestore(&chan->vc.lock, flags);
677
678 return timeout ? 0 : -EAGAIN;
679}
680
681/* Called in chan locked context */
682static inline void axi_chan_resume(struct axi_dma_chan *chan)
683{
684 u32 val;
685
686 val = axi_dma_ioread32(chan->chip, DMAC_CHEN);
687 val &= ~(BIT(chan->id) << DMAC_CHAN_SUSP_SHIFT);
688 val |= (BIT(chan->id) << DMAC_CHAN_SUSP_WE_SHIFT);
689 axi_dma_iowrite32(chan->chip, DMAC_CHEN, val);
690
691 chan->is_paused = false;
692}
693
694static int dma_chan_resume(struct dma_chan *dchan)
695{
696 struct axi_dma_chan *chan = dchan_to_axi_dma_chan(dchan);
697 unsigned long flags;
698
699 spin_lock_irqsave(&chan->vc.lock, flags);
700
701 if (chan->is_paused)
702 axi_chan_resume(chan);
703
704 spin_unlock_irqrestore(&chan->vc.lock, flags);
705
706 return 0;
707}
708
709static int axi_dma_suspend(struct axi_dma_chip *chip)
710{
711 axi_dma_irq_disable(chip);
712 axi_dma_disable(chip);
713
714 clk_disable_unprepare(chip->core_clk);
715 clk_disable_unprepare(chip->cfgr_clk);
716
717 return 0;
718}
719
720static int axi_dma_resume(struct axi_dma_chip *chip)
721{
722 int ret;
723
724 ret = clk_prepare_enable(chip->cfgr_clk);
725 if (ret < 0)
726 return ret;
727
728 ret = clk_prepare_enable(chip->core_clk);
729 if (ret < 0)
730 return ret;
731
732 axi_dma_enable(chip);
733 axi_dma_irq_enable(chip);
734
735 return 0;
736}
737
738static int __maybe_unused axi_dma_runtime_suspend(struct device *dev)
739{
740 struct axi_dma_chip *chip = dev_get_drvdata(dev);
741
742 return axi_dma_suspend(chip);
743}
744
745static int __maybe_unused axi_dma_runtime_resume(struct device *dev)
746{
747 struct axi_dma_chip *chip = dev_get_drvdata(dev);
748
749 return axi_dma_resume(chip);
750}
751
752static int parse_device_properties(struct axi_dma_chip *chip)
753{
754 struct device *dev = chip->dev;
755 u32 tmp, carr[DMAC_MAX_CHANNELS];
756 int ret;
757
758 ret = device_property_read_u32(dev, "dma-channels", &tmp);
759 if (ret)
760 return ret;
761 if (tmp == 0 || tmp > DMAC_MAX_CHANNELS)
762 return -EINVAL;
763
764 chip->dw->hdata->nr_channels = tmp;
765
766 ret = device_property_read_u32(dev, "snps,dma-masters", &tmp);
767 if (ret)
768 return ret;
769 if (tmp == 0 || tmp > DMAC_MAX_MASTERS)
770 return -EINVAL;
771
772 chip->dw->hdata->nr_masters = tmp;
773
774 ret = device_property_read_u32(dev, "snps,data-width", &tmp);
775 if (ret)
776 return ret;
777 if (tmp > DWAXIDMAC_TRANS_WIDTH_MAX)
778 return -EINVAL;
779
780 chip->dw->hdata->m_data_width = tmp;
781
782 ret = device_property_read_u32_array(dev, "snps,block-size", carr,
783 chip->dw->hdata->nr_channels);
784 if (ret)
785 return ret;
786 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
787 if (carr[tmp] == 0 || carr[tmp] > DMAC_MAX_BLK_SIZE)
788 return -EINVAL;
789
790 chip->dw->hdata->block_size[tmp] = carr[tmp];
791 }
792
793 ret = device_property_read_u32_array(dev, "snps,priority", carr,
794 chip->dw->hdata->nr_channels);
795 if (ret)
796 return ret;
797 /* Priority value must be programmed within [0:nr_channels-1] range */
798 for (tmp = 0; tmp < chip->dw->hdata->nr_channels; tmp++) {
799 if (carr[tmp] >= chip->dw->hdata->nr_channels)
800 return -EINVAL;
801
802 chip->dw->hdata->priority[tmp] = carr[tmp];
803 }
804
805 /* axi-max-burst-len is optional property */
806 ret = device_property_read_u32(dev, "snps,axi-max-burst-len", &tmp);
807 if (!ret) {
808 if (tmp > DWAXIDMAC_ARWLEN_MAX + 1)
809 return -EINVAL;
810 if (tmp < DWAXIDMAC_ARWLEN_MIN + 1)
811 return -EINVAL;
812
813 chip->dw->hdata->restrict_axi_burst_len = true;
814 chip->dw->hdata->axi_rw_burst_len = tmp - 1;
815 }
816
817 return 0;
818}
819
820static int dw_probe(struct platform_device *pdev)
821{
822 struct axi_dma_chip *chip;
823 struct resource *mem;
824 struct dw_axi_dma *dw;
825 struct dw_axi_dma_hcfg *hdata;
826 u32 i;
827 int ret;
828
829 chip = devm_kzalloc(&pdev->dev, sizeof(*chip), GFP_KERNEL);
830 if (!chip)
831 return -ENOMEM;
832
833 dw = devm_kzalloc(&pdev->dev, sizeof(*dw), GFP_KERNEL);
834 if (!dw)
835 return -ENOMEM;
836
837 hdata = devm_kzalloc(&pdev->dev, sizeof(*hdata), GFP_KERNEL);
838 if (!hdata)
839 return -ENOMEM;
840
841 chip->dw = dw;
842 chip->dev = &pdev->dev;
843 chip->dw->hdata = hdata;
844
845 chip->irq = platform_get_irq(pdev, 0);
846 if (chip->irq < 0)
847 return chip->irq;
848
849 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
850 chip->regs = devm_ioremap_resource(chip->dev, mem);
851 if (IS_ERR(chip->regs))
852 return PTR_ERR(chip->regs);
853
854 chip->core_clk = devm_clk_get(chip->dev, "core-clk");
855 if (IS_ERR(chip->core_clk))
856 return PTR_ERR(chip->core_clk);
857
858 chip->cfgr_clk = devm_clk_get(chip->dev, "cfgr-clk");
859 if (IS_ERR(chip->cfgr_clk))
860 return PTR_ERR(chip->cfgr_clk);
861
862 ret = parse_device_properties(chip);
863 if (ret)
864 return ret;
865
866 dw->chan = devm_kcalloc(chip->dev, hdata->nr_channels,
867 sizeof(*dw->chan), GFP_KERNEL);
868 if (!dw->chan)
869 return -ENOMEM;
870
871 ret = devm_request_irq(chip->dev, chip->irq, dw_axi_dma_interrupt,
872 IRQF_SHARED, KBUILD_MODNAME, chip);
873 if (ret)
874 return ret;
875
876 /* Lli address must be aligned to a 64-byte boundary */
877 dw->desc_pool = dmam_pool_create(KBUILD_MODNAME, chip->dev,
878 sizeof(struct axi_dma_desc), 64, 0);
879 if (!dw->desc_pool) {
880 dev_err(chip->dev, "No memory for descriptors dma pool\n");
881 return -ENOMEM;
882 }
883
884 INIT_LIST_HEAD(&dw->dma.channels);
885 for (i = 0; i < hdata->nr_channels; i++) {
886 struct axi_dma_chan *chan = &dw->chan[i];
887
888 chan->chip = chip;
889 chan->id = i;
890 chan->chan_regs = chip->regs + COMMON_REG_LEN + i * CHAN_REG_LEN;
891 atomic_set(&chan->descs_allocated, 0);
892
893 chan->vc.desc_free = vchan_desc_put;
894 vchan_init(&chan->vc, &dw->dma);
895 }
896
897 /* Set capabilities */
898 dma_cap_set(DMA_MEMCPY, dw->dma.cap_mask);
899
900 /* DMA capabilities */
901 dw->dma.chancnt = hdata->nr_channels;
902 dw->dma.src_addr_widths = AXI_DMA_BUSWIDTHS;
903 dw->dma.dst_addr_widths = AXI_DMA_BUSWIDTHS;
904 dw->dma.directions = BIT(DMA_MEM_TO_MEM);
905 dw->dma.residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR;
906
907 dw->dma.dev = chip->dev;
908 dw->dma.device_tx_status = dma_chan_tx_status;
909 dw->dma.device_issue_pending = dma_chan_issue_pending;
910 dw->dma.device_terminate_all = dma_chan_terminate_all;
911 dw->dma.device_pause = dma_chan_pause;
912 dw->dma.device_resume = dma_chan_resume;
913
914 dw->dma.device_alloc_chan_resources = dma_chan_alloc_chan_resources;
915 dw->dma.device_free_chan_resources = dma_chan_free_chan_resources;
916
917 dw->dma.device_prep_dma_memcpy = dma_chan_prep_dma_memcpy;
918
919 platform_set_drvdata(pdev, chip);
920
921 pm_runtime_enable(chip->dev);
922
923 /*
924 * We can't just call pm_runtime_get here instead of
925 * pm_runtime_get_noresume + axi_dma_resume because we need
926 * driver to work also without Runtime PM.
927 */
928 pm_runtime_get_noresume(chip->dev);
929 ret = axi_dma_resume(chip);
930 if (ret < 0)
931 goto err_pm_disable;
932
933 axi_dma_hw_init(chip);
934
935 pm_runtime_put(chip->dev);
936
937 ret = dma_async_device_register(&dw->dma);
938 if (ret)
939 goto err_pm_disable;
940
941 dev_info(chip->dev, "DesignWare AXI DMA Controller, %d channels\n",
942 dw->hdata->nr_channels);
943
944 return 0;
945
946err_pm_disable:
947 pm_runtime_disable(chip->dev);
948
949 return ret;
950}
951
952static int dw_remove(struct platform_device *pdev)
953{
954 struct axi_dma_chip *chip = platform_get_drvdata(pdev);
955 struct dw_axi_dma *dw = chip->dw;
956 struct axi_dma_chan *chan, *_chan;
957 u32 i;
958
959 /* Enable clk before accessing to registers */
960 clk_prepare_enable(chip->cfgr_clk);
961 clk_prepare_enable(chip->core_clk);
962 axi_dma_irq_disable(chip);
963 for (i = 0; i < dw->hdata->nr_channels; i++) {
964 axi_chan_disable(&chip->dw->chan[i]);
965 axi_chan_irq_disable(&chip->dw->chan[i], DWAXIDMAC_IRQ_ALL);
966 }
967 axi_dma_disable(chip);
968
969 pm_runtime_disable(chip->dev);
970 axi_dma_suspend(chip);
971
972 devm_free_irq(chip->dev, chip->irq, chip);
973
974 list_for_each_entry_safe(chan, _chan, &dw->dma.channels,
975 vc.chan.device_node) {
976 list_del(&chan->vc.chan.device_node);
977 tasklet_kill(&chan->vc.task);
978 }
979
980 dma_async_device_unregister(&dw->dma);
981
982 return 0;
983}
984
985static const struct dev_pm_ops dw_axi_dma_pm_ops = {
986 SET_RUNTIME_PM_OPS(axi_dma_runtime_suspend, axi_dma_runtime_resume, NULL)
987};
988
989static const struct of_device_id dw_dma_of_id_table[] = {
990 { .compatible = "snps,axi-dma-1.01a" },
991 {}
992};
993MODULE_DEVICE_TABLE(of, dw_dma_of_id_table);
994
995static struct platform_driver dw_driver = {
996 .probe = dw_probe,
997 .remove = dw_remove,
998 .driver = {
999 .name = KBUILD_MODNAME,
1000 .of_match_table = of_match_ptr(dw_dma_of_id_table),
1001 .pm = &dw_axi_dma_pm_ops,
1002 },
1003};
1004module_platform_driver(dw_driver);
1005
1006MODULE_LICENSE("GPL v2");
1007MODULE_DESCRIPTION("Synopsys DesignWare AXI DMA Controller platform driver");
1008MODULE_AUTHOR("Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>");
diff --git a/drivers/dma/dw-axi-dmac/dw-axi-dmac.h b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
new file mode 100644
index 000000000000..f8888dc0b8dc
--- /dev/null
+++ b/drivers/dma/dw-axi-dmac/dw-axi-dmac.h
@@ -0,0 +1,334 @@
1// SPDX-License-Identifier: GPL-2.0
2// (C) 2017-2018 Synopsys, Inc. (www.synopsys.com)
3
4/*
5 * Synopsys DesignWare AXI DMA Controller driver.
6 *
7 * Author: Eugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
8 */
9
10#ifndef _AXI_DMA_PLATFORM_H
11#define _AXI_DMA_PLATFORM_H
12
13#include <linux/bitops.h>
14#include <linux/clk.h>
15#include <linux/device.h>
16#include <linux/dmaengine.h>
17#include <linux/types.h>
18
19#include "../virt-dma.h"
20
21#define DMAC_MAX_CHANNELS 8
22#define DMAC_MAX_MASTERS 2
23#define DMAC_MAX_BLK_SIZE 0x200000
24
25struct dw_axi_dma_hcfg {
26 u32 nr_channels;
27 u32 nr_masters;
28 u32 m_data_width;
29 u32 block_size[DMAC_MAX_CHANNELS];
30 u32 priority[DMAC_MAX_CHANNELS];
31 /* maximum supported axi burst length */
32 u32 axi_rw_burst_len;
33 bool restrict_axi_burst_len;
34};
35
36struct axi_dma_chan {
37 struct axi_dma_chip *chip;
38 void __iomem *chan_regs;
39 u8 id;
40 atomic_t descs_allocated;
41
42 struct virt_dma_chan vc;
43
44 /* these other elements are all protected by vc.lock */
45 bool is_paused;
46};
47
48struct dw_axi_dma {
49 struct dma_device dma;
50 struct dw_axi_dma_hcfg *hdata;
51 struct dma_pool *desc_pool;
52
53 /* channels */
54 struct axi_dma_chan *chan;
55};
56
57struct axi_dma_chip {
58 struct device *dev;
59 int irq;
60 void __iomem *regs;
61 struct clk *core_clk;
62 struct clk *cfgr_clk;
63 struct dw_axi_dma *dw;
64};
65
66/* LLI == Linked List Item */
67struct __packed axi_dma_lli {
68 __le64 sar;
69 __le64 dar;
70 __le32 block_ts_lo;
71 __le32 block_ts_hi;
72 __le64 llp;
73 __le32 ctl_lo;
74 __le32 ctl_hi;
75 __le32 sstat;
76 __le32 dstat;
77 __le32 status_lo;
78 __le32 ststus_hi;
79 __le32 reserved_lo;
80 __le32 reserved_hi;
81};
82
83struct axi_dma_desc {
84 struct axi_dma_lli lli;
85
86 struct virt_dma_desc vd;
87 struct axi_dma_chan *chan;
88 struct list_head xfer_list;
89};
90
91static inline struct device *dchan2dev(struct dma_chan *dchan)
92{
93 return &dchan->dev->device;
94}
95
96static inline struct device *chan2dev(struct axi_dma_chan *chan)
97{
98 return &chan->vc.chan.dev->device;
99}
100
101static inline struct axi_dma_desc *vd_to_axi_desc(struct virt_dma_desc *vd)
102{
103 return container_of(vd, struct axi_dma_desc, vd);
104}
105
106static inline struct axi_dma_chan *vc_to_axi_dma_chan(struct virt_dma_chan *vc)
107{
108 return container_of(vc, struct axi_dma_chan, vc);
109}
110
111static inline struct axi_dma_chan *dchan_to_axi_dma_chan(struct dma_chan *dchan)
112{
113 return vc_to_axi_dma_chan(to_virt_chan(dchan));
114}
115
116
117#define COMMON_REG_LEN 0x100
118#define CHAN_REG_LEN 0x100
119
120/* Common registers offset */
121#define DMAC_ID 0x000 /* R DMAC ID */
122#define DMAC_COMPVER 0x008 /* R DMAC Component Version */
123#define DMAC_CFG 0x010 /* R/W DMAC Configuration */
124#define DMAC_CHEN 0x018 /* R/W DMAC Channel Enable */
125#define DMAC_CHEN_L 0x018 /* R/W DMAC Channel Enable 00-31 */
126#define DMAC_CHEN_H 0x01C /* R/W DMAC Channel Enable 32-63 */
127#define DMAC_INTSTATUS 0x030 /* R DMAC Interrupt Status */
128#define DMAC_COMMON_INTCLEAR 0x038 /* W DMAC Interrupt Clear */
129#define DMAC_COMMON_INTSTATUS_ENA 0x040 /* R DMAC Interrupt Status Enable */
130#define DMAC_COMMON_INTSIGNAL_ENA 0x048 /* R/W DMAC Interrupt Signal Enable */
131#define DMAC_COMMON_INTSTATUS 0x050 /* R DMAC Interrupt Status */
132#define DMAC_RESET 0x058 /* R DMAC Reset Register1 */
133
134/* DMA channel registers offset */
135#define CH_SAR 0x000 /* R/W Chan Source Address */
136#define CH_DAR 0x008 /* R/W Chan Destination Address */
137#define CH_BLOCK_TS 0x010 /* R/W Chan Block Transfer Size */
138#define CH_CTL 0x018 /* R/W Chan Control */
139#define CH_CTL_L 0x018 /* R/W Chan Control 00-31 */
140#define CH_CTL_H 0x01C /* R/W Chan Control 32-63 */
141#define CH_CFG 0x020 /* R/W Chan Configuration */
142#define CH_CFG_L 0x020 /* R/W Chan Configuration 00-31 */
143#define CH_CFG_H 0x024 /* R/W Chan Configuration 32-63 */
144#define CH_LLP 0x028 /* R/W Chan Linked List Pointer */
145#define CH_STATUS 0x030 /* R Chan Status */
146#define CH_SWHSSRC 0x038 /* R/W Chan SW Handshake Source */
147#define CH_SWHSDST 0x040 /* R/W Chan SW Handshake Destination */
148#define CH_BLK_TFR_RESUMEREQ 0x048 /* W Chan Block Transfer Resume Req */
149#define CH_AXI_ID 0x050 /* R/W Chan AXI ID */
150#define CH_AXI_QOS 0x058 /* R/W Chan AXI QOS */
151#define CH_SSTAT 0x060 /* R Chan Source Status */
152#define CH_DSTAT 0x068 /* R Chan Destination Status */
153#define CH_SSTATAR 0x070 /* R/W Chan Source Status Fetch Addr */
154#define CH_DSTATAR 0x078 /* R/W Chan Destination Status Fetch Addr */
155#define CH_INTSTATUS_ENA 0x080 /* R/W Chan Interrupt Status Enable */
156#define CH_INTSTATUS 0x088 /* R/W Chan Interrupt Status */
157#define CH_INTSIGNAL_ENA 0x090 /* R/W Chan Interrupt Signal Enable */
158#define CH_INTCLEAR 0x098 /* W Chan Interrupt Clear */
159
160
161/* DMAC_CFG */
162#define DMAC_EN_POS 0
163#define DMAC_EN_MASK BIT(DMAC_EN_POS)
164
165#define INT_EN_POS 1
166#define INT_EN_MASK BIT(INT_EN_POS)
167
168#define DMAC_CHAN_EN_SHIFT 0
169#define DMAC_CHAN_EN_WE_SHIFT 8
170
171#define DMAC_CHAN_SUSP_SHIFT 16
172#define DMAC_CHAN_SUSP_WE_SHIFT 24
173
174/* CH_CTL_H */
175#define CH_CTL_H_ARLEN_EN BIT(6)
176#define CH_CTL_H_ARLEN_POS 7
177#define CH_CTL_H_AWLEN_EN BIT(15)
178#define CH_CTL_H_AWLEN_POS 16
179
180enum {
181 DWAXIDMAC_ARWLEN_1 = 0,
182 DWAXIDMAC_ARWLEN_2 = 1,
183 DWAXIDMAC_ARWLEN_4 = 3,
184 DWAXIDMAC_ARWLEN_8 = 7,
185 DWAXIDMAC_ARWLEN_16 = 15,
186 DWAXIDMAC_ARWLEN_32 = 31,
187 DWAXIDMAC_ARWLEN_64 = 63,
188 DWAXIDMAC_ARWLEN_128 = 127,
189 DWAXIDMAC_ARWLEN_256 = 255,
190 DWAXIDMAC_ARWLEN_MIN = DWAXIDMAC_ARWLEN_1,
191 DWAXIDMAC_ARWLEN_MAX = DWAXIDMAC_ARWLEN_256
192};
193
194#define CH_CTL_H_LLI_LAST BIT(30)
195#define CH_CTL_H_LLI_VALID BIT(31)
196
197/* CH_CTL_L */
198#define CH_CTL_L_LAST_WRITE_EN BIT(30)
199
200#define CH_CTL_L_DST_MSIZE_POS 18
201#define CH_CTL_L_SRC_MSIZE_POS 14
202
203enum {
204 DWAXIDMAC_BURST_TRANS_LEN_1 = 0,
205 DWAXIDMAC_BURST_TRANS_LEN_4,
206 DWAXIDMAC_BURST_TRANS_LEN_8,
207 DWAXIDMAC_BURST_TRANS_LEN_16,
208 DWAXIDMAC_BURST_TRANS_LEN_32,
209 DWAXIDMAC_BURST_TRANS_LEN_64,
210 DWAXIDMAC_BURST_TRANS_LEN_128,
211 DWAXIDMAC_BURST_TRANS_LEN_256,
212 DWAXIDMAC_BURST_TRANS_LEN_512,
213 DWAXIDMAC_BURST_TRANS_LEN_1024
214};
215
216#define CH_CTL_L_DST_WIDTH_POS 11
217#define CH_CTL_L_SRC_WIDTH_POS 8
218
219#define CH_CTL_L_DST_INC_POS 6
220#define CH_CTL_L_SRC_INC_POS 4
221enum {
222 DWAXIDMAC_CH_CTL_L_INC = 0,
223 DWAXIDMAC_CH_CTL_L_NOINC
224};
225
226#define CH_CTL_L_DST_MAST BIT(2)
227#define CH_CTL_L_SRC_MAST BIT(0)
228
229/* CH_CFG_H */
230#define CH_CFG_H_PRIORITY_POS 17
231#define CH_CFG_H_HS_SEL_DST_POS 4
232#define CH_CFG_H_HS_SEL_SRC_POS 3
233enum {
234 DWAXIDMAC_HS_SEL_HW = 0,
235 DWAXIDMAC_HS_SEL_SW
236};
237
238#define CH_CFG_H_TT_FC_POS 0
239enum {
240 DWAXIDMAC_TT_FC_MEM_TO_MEM_DMAC = 0,
241 DWAXIDMAC_TT_FC_MEM_TO_PER_DMAC,
242 DWAXIDMAC_TT_FC_PER_TO_MEM_DMAC,
243 DWAXIDMAC_TT_FC_PER_TO_PER_DMAC,
244 DWAXIDMAC_TT_FC_PER_TO_MEM_SRC,
245 DWAXIDMAC_TT_FC_PER_TO_PER_SRC,
246 DWAXIDMAC_TT_FC_MEM_TO_PER_DST,
247 DWAXIDMAC_TT_FC_PER_TO_PER_DST
248};
249
250/* CH_CFG_L */
251#define CH_CFG_L_DST_MULTBLK_TYPE_POS 2
252#define CH_CFG_L_SRC_MULTBLK_TYPE_POS 0
253enum {
254 DWAXIDMAC_MBLK_TYPE_CONTIGUOUS = 0,
255 DWAXIDMAC_MBLK_TYPE_RELOAD,
256 DWAXIDMAC_MBLK_TYPE_SHADOW_REG,
257 DWAXIDMAC_MBLK_TYPE_LL
258};
259
260/**
261 * DW AXI DMA channel interrupts
262 *
263 * @DWAXIDMAC_IRQ_NONE: Bitmask of no one interrupt
264 * @DWAXIDMAC_IRQ_BLOCK_TRF: Block transfer complete
265 * @DWAXIDMAC_IRQ_DMA_TRF: Dma transfer complete
266 * @DWAXIDMAC_IRQ_SRC_TRAN: Source transaction complete
267 * @DWAXIDMAC_IRQ_DST_TRAN: Destination transaction complete
268 * @DWAXIDMAC_IRQ_SRC_DEC_ERR: Source decode error
269 * @DWAXIDMAC_IRQ_DST_DEC_ERR: Destination decode error
270 * @DWAXIDMAC_IRQ_SRC_SLV_ERR: Source slave error
271 * @DWAXIDMAC_IRQ_DST_SLV_ERR: Destination slave error
272 * @DWAXIDMAC_IRQ_LLI_RD_DEC_ERR: LLI read decode error
273 * @DWAXIDMAC_IRQ_LLI_WR_DEC_ERR: LLI write decode error
274 * @DWAXIDMAC_IRQ_LLI_RD_SLV_ERR: LLI read slave error
275 * @DWAXIDMAC_IRQ_LLI_WR_SLV_ERR: LLI write slave error
276 * @DWAXIDMAC_IRQ_INVALID_ERR: LLI invalid error or Shadow register error
277 * @DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR: Slave Interface Multiblock type error
278 * @DWAXIDMAC_IRQ_DEC_ERR: Slave Interface decode error
279 * @DWAXIDMAC_IRQ_WR2RO_ERR: Slave Interface write to read only error
280 * @DWAXIDMAC_IRQ_RD2RWO_ERR: Slave Interface read to write only error
281 * @DWAXIDMAC_IRQ_WRONCHEN_ERR: Slave Interface write to channel error
282 * @DWAXIDMAC_IRQ_SHADOWREG_ERR: Slave Interface shadow reg error
283 * @DWAXIDMAC_IRQ_WRONHOLD_ERR: Slave Interface hold error
284 * @DWAXIDMAC_IRQ_LOCK_CLEARED: Lock Cleared Status
285 * @DWAXIDMAC_IRQ_SRC_SUSPENDED: Source Suspended Status
286 * @DWAXIDMAC_IRQ_SUSPENDED: Channel Suspended Status
287 * @DWAXIDMAC_IRQ_DISABLED: Channel Disabled Status
288 * @DWAXIDMAC_IRQ_ABORTED: Channel Aborted Status
289 * @DWAXIDMAC_IRQ_ALL_ERR: Bitmask of all error interrupts
290 * @DWAXIDMAC_IRQ_ALL: Bitmask of all interrupts
291 */
292enum {
293 DWAXIDMAC_IRQ_NONE = 0,
294 DWAXIDMAC_IRQ_BLOCK_TRF = BIT(0),
295 DWAXIDMAC_IRQ_DMA_TRF = BIT(1),
296 DWAXIDMAC_IRQ_SRC_TRAN = BIT(3),
297 DWAXIDMAC_IRQ_DST_TRAN = BIT(4),
298 DWAXIDMAC_IRQ_SRC_DEC_ERR = BIT(5),
299 DWAXIDMAC_IRQ_DST_DEC_ERR = BIT(6),
300 DWAXIDMAC_IRQ_SRC_SLV_ERR = BIT(7),
301 DWAXIDMAC_IRQ_DST_SLV_ERR = BIT(8),
302 DWAXIDMAC_IRQ_LLI_RD_DEC_ERR = BIT(9),
303 DWAXIDMAC_IRQ_LLI_WR_DEC_ERR = BIT(10),
304 DWAXIDMAC_IRQ_LLI_RD_SLV_ERR = BIT(11),
305 DWAXIDMAC_IRQ_LLI_WR_SLV_ERR = BIT(12),
306 DWAXIDMAC_IRQ_INVALID_ERR = BIT(13),
307 DWAXIDMAC_IRQ_MULTIBLKTYPE_ERR = BIT(14),
308 DWAXIDMAC_IRQ_DEC_ERR = BIT(16),
309 DWAXIDMAC_IRQ_WR2RO_ERR = BIT(17),
310 DWAXIDMAC_IRQ_RD2RWO_ERR = BIT(18),
311 DWAXIDMAC_IRQ_WRONCHEN_ERR = BIT(19),
312 DWAXIDMAC_IRQ_SHADOWREG_ERR = BIT(20),
313 DWAXIDMAC_IRQ_WRONHOLD_ERR = BIT(21),
314 DWAXIDMAC_IRQ_LOCK_CLEARED = BIT(27),
315 DWAXIDMAC_IRQ_SRC_SUSPENDED = BIT(28),
316 DWAXIDMAC_IRQ_SUSPENDED = BIT(29),
317 DWAXIDMAC_IRQ_DISABLED = BIT(30),
318 DWAXIDMAC_IRQ_ABORTED = BIT(31),
319 DWAXIDMAC_IRQ_ALL_ERR = (GENMASK(21, 16) | GENMASK(14, 5)),
320 DWAXIDMAC_IRQ_ALL = GENMASK(31, 0)
321};
322
323enum {
324 DWAXIDMAC_TRANS_WIDTH_8 = 0,
325 DWAXIDMAC_TRANS_WIDTH_16,
326 DWAXIDMAC_TRANS_WIDTH_32,
327 DWAXIDMAC_TRANS_WIDTH_64,
328 DWAXIDMAC_TRANS_WIDTH_128,
329 DWAXIDMAC_TRANS_WIDTH_256,
330 DWAXIDMAC_TRANS_WIDTH_512,
331 DWAXIDMAC_TRANS_WIDTH_MAX = DWAXIDMAC_TRANS_WIDTH_512
332};
333
334#endif /* _AXI_DMA_PLATFORM_H */