aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:47:18 -0400
committerLinus Torvalds <torvalds@linux-foundation.org>2016-05-19 14:47:18 -0400
commita0d3c7c5c07cfbe00ab89438ddf82482f5a99422 (patch)
tree560def78af776bef5d0d0202580c4be0fc6219c6 /drivers/dma/xilinx
parentec67b14c1be4ebe4cf08f06746a8d0313ab85432 (diff)
parentf9114a54c1d828abbe87ac446a2da49d9720203f (diff)
Merge tag 'dmaengine-4.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time round the update brings in following changes: - new tegra driver for ADMA device - support for Xilinx AXI Direct Memory Access Engine and Xilinx AXI Central Direct Memory Access Engine and few updates to this driver - new cyclic capability to sun6i and few updates - slave-sg support in bcm2835 - updates to many drivers like designware, hsu, mv_xor, pxa, edma, qcom_hidma & bam" * tag 'dmaengine-4.7-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (84 commits) dmaengine: ioatdma: disable relaxed ordering for ioatdma dmaengine: of_dma: approximate an average distribution dmaengine: core: Use IS_ENABLED() instead of checking for built-in or module dmaengine: edma: Re-evaluate errors when ccerr is triggered w/o error event dmaengine: qcom_hidma: add support for object hierarchy dmaengine: qcom_hidma: add debugfs hooks dmaengine: qcom_hidma: implement lower level hardware interface dmaengine: vdma: Add clock support Documentation: DT: vdma: Add clock support for dmas dmaengine: vdma: Add config structure to differentiate dmas MAINTAINERS: Update Tegra DMA maintainers dmaengine: tegra-adma: Add support for Tegra210 ADMA Documentation: DT: Add binding documentation for NVIDIA ADMA dmaengine: vdma: Add Support for Xilinx AXI Central Direct Memory Access Engine Documentation: DT: vdma: update binding doc for AXI CDMA dmaengine: vdma: Add Support for Xilinx AXI Direct Memory Access Engine Documentation: DT: vdma: update binding doc for AXI DMA dmaengine: vdma: Rename xilinx_vdma_ prefix to xilinx_dma dmaengine: slave means at least one of DMA_SLAVE, DMA_CYCLIC dmaengine: mv_xor: Allow selecting mv_xor for mvebu only compatible SoC ...
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c1663
1 files changed, 1296 insertions, 367 deletions
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index ef67f278e076..df9118540b91 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -16,6 +16,15 @@
16 * video device (S2MM). Initialization, status, interrupt and management 16 * video device (S2MM). Initialization, status, interrupt and management
17 * registers are accessed through an AXI4-Lite slave interface. 17 * registers are accessed through an AXI4-Lite slave interface.
18 * 18 *
19 * The AXI Direct Memory Access (AXI DMA) core is a soft Xilinx IP core that
20 * provides high-bandwidth one dimensional direct memory access between memory
21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time.
23 *
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
27 *
19 * This program is free software: you can redistribute it and/or modify 28 * This program is free software: you can redistribute it and/or modify
20 * it under the terms of the GNU General Public License as published by 29 * it under the terms of the GNU General Public License as published by
21 * the Free Software Foundation, either version 2 of the License, or 30 * the Free Software Foundation, either version 2 of the License, or
@@ -35,116 +44,138 @@
35#include <linux/of_platform.h> 44#include <linux/of_platform.h>
36#include <linux/of_irq.h> 45#include <linux/of_irq.h>
37#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/clk.h>
38 48
39#include "../dmaengine.h" 49#include "../dmaengine.h"
40 50
41/* Register/Descriptor Offsets */ 51/* Register/Descriptor Offsets */
42#define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 52#define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000
43#define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 53#define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030
44#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 54#define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050
45#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 55#define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0
46 56
47/* Control Registers */ 57/* Control Registers */
48#define XILINX_VDMA_REG_DMACR 0x0000 58#define XILINX_DMA_REG_DMACR 0x0000
49#define XILINX_VDMA_DMACR_DELAY_MAX 0xff 59#define XILINX_DMA_DMACR_DELAY_MAX 0xff
50#define XILINX_VDMA_DMACR_DELAY_SHIFT 24 60#define XILINX_DMA_DMACR_DELAY_SHIFT 24
51#define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff 61#define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff
52#define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 62#define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16
53#define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) 63#define XILINX_DMA_DMACR_ERR_IRQ BIT(14)
54#define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) 64#define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13)
55#define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) 65#define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12)
56#define XILINX_VDMA_DMACR_MASTER_SHIFT 8 66#define XILINX_DMA_DMACR_MASTER_SHIFT 8
57#define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 67#define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5
58#define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) 68#define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4)
59#define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) 69#define XILINX_DMA_DMACR_GENLOCK_EN BIT(3)
60#define XILINX_VDMA_DMACR_RESET BIT(2) 70#define XILINX_DMA_DMACR_RESET BIT(2)
61#define XILINX_VDMA_DMACR_CIRC_EN BIT(1) 71#define XILINX_DMA_DMACR_CIRC_EN BIT(1)
62#define XILINX_VDMA_DMACR_RUNSTOP BIT(0) 72#define XILINX_DMA_DMACR_RUNSTOP BIT(0)
63#define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) 73#define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5)
64 74
65#define XILINX_VDMA_REG_DMASR 0x0004 75#define XILINX_DMA_REG_DMASR 0x0004
66#define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) 76#define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15)
67#define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) 77#define XILINX_DMA_DMASR_ERR_IRQ BIT(14)
68#define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) 78#define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13)
69#define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) 79#define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12)
70#define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) 80#define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11)
71#define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) 81#define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10)
72#define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) 82#define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9)
73#define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) 83#define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8)
74#define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) 84#define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7)
75#define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) 85#define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6)
76#define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) 86#define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5)
77#define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) 87#define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4)
78#define XILINX_VDMA_DMASR_IDLE BIT(1) 88#define XILINX_DMA_DMASR_IDLE BIT(1)
79#define XILINX_VDMA_DMASR_HALTED BIT(0) 89#define XILINX_DMA_DMASR_HALTED BIT(0)
80#define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) 90#define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24)
81#define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) 91#define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16)
82 92
83#define XILINX_VDMA_REG_CURDESC 0x0008 93#define XILINX_DMA_REG_CURDESC 0x0008
84#define XILINX_VDMA_REG_TAILDESC 0x0010 94#define XILINX_DMA_REG_TAILDESC 0x0010
85#define XILINX_VDMA_REG_REG_INDEX 0x0014 95#define XILINX_DMA_REG_REG_INDEX 0x0014
86#define XILINX_VDMA_REG_FRMSTORE 0x0018 96#define XILINX_DMA_REG_FRMSTORE 0x0018
87#define XILINX_VDMA_REG_THRESHOLD 0x001c 97#define XILINX_DMA_REG_THRESHOLD 0x001c
88#define XILINX_VDMA_REG_FRMPTR_STS 0x0024 98#define XILINX_DMA_REG_FRMPTR_STS 0x0024
89#define XILINX_VDMA_REG_PARK_PTR 0x0028 99#define XILINX_DMA_REG_PARK_PTR 0x0028
90#define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 100#define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8
91#define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 101#define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0
92#define XILINX_VDMA_REG_VDMA_VERSION 0x002c 102#define XILINX_DMA_REG_VDMA_VERSION 0x002c
93 103
94/* Register Direct Mode Registers */ 104/* Register Direct Mode Registers */
95#define XILINX_VDMA_REG_VSIZE 0x0000 105#define XILINX_DMA_REG_VSIZE 0x0000
96#define XILINX_VDMA_REG_HSIZE 0x0004 106#define XILINX_DMA_REG_HSIZE 0x0004
97 107
98#define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 108#define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008
99#define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 109#define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24
100#define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 110#define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0
101 111
102#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) 112#define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n))
113#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
103 114
104/* HW specific definitions */ 115/* HW specific definitions */
105#define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 116#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2
106 117
107#define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ 118#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
108 (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ 119 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
109 XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ 120 XILINX_DMA_DMASR_DLY_CNT_IRQ | \
110 XILINX_VDMA_DMASR_ERR_IRQ) 121 XILINX_DMA_DMASR_ERR_IRQ)
111 122
112#define XILINX_VDMA_DMASR_ALL_ERR_MASK \ 123#define XILINX_DMA_DMASR_ALL_ERR_MASK \
113 (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ 124 (XILINX_DMA_DMASR_EOL_LATE_ERR | \
114 XILINX_VDMA_DMASR_SOF_LATE_ERR | \ 125 XILINX_DMA_DMASR_SOF_LATE_ERR | \
115 XILINX_VDMA_DMASR_SG_DEC_ERR | \ 126 XILINX_DMA_DMASR_SG_DEC_ERR | \
116 XILINX_VDMA_DMASR_SG_SLV_ERR | \ 127 XILINX_DMA_DMASR_SG_SLV_ERR | \
117 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ 128 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
118 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ 129 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
119 XILINX_VDMA_DMASR_DMA_DEC_ERR | \ 130 XILINX_DMA_DMASR_DMA_DEC_ERR | \
120 XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ 131 XILINX_DMA_DMASR_DMA_SLAVE_ERR | \
121 XILINX_VDMA_DMASR_DMA_INT_ERR) 132 XILINX_DMA_DMASR_DMA_INT_ERR)
122 133
123/* 134/*
124 * Recoverable errors are DMA Internal error, SOF Early, EOF Early 135 * Recoverable errors are DMA Internal error, SOF Early, EOF Early
125 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC 136 * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC
126 * is enabled in the h/w system. 137 * is enabled in the h/w system.
127 */ 138 */
128#define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ 139#define XILINX_DMA_DMASR_ERR_RECOVER_MASK \
129 (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ 140 (XILINX_DMA_DMASR_SOF_LATE_ERR | \
130 XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ 141 XILINX_DMA_DMASR_EOF_EARLY_ERR | \
131 XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ 142 XILINX_DMA_DMASR_SOF_EARLY_ERR | \
132 XILINX_VDMA_DMASR_DMA_INT_ERR) 143 XILINX_DMA_DMASR_DMA_INT_ERR)
133 144
134/* Axi VDMA Flush on Fsync bits */ 145/* Axi VDMA Flush on Fsync bits */
135#define XILINX_VDMA_FLUSH_S2MM 3 146#define XILINX_DMA_FLUSH_S2MM 3
136#define XILINX_VDMA_FLUSH_MM2S 2 147#define XILINX_DMA_FLUSH_MM2S 2
137#define XILINX_VDMA_FLUSH_BOTH 1 148#define XILINX_DMA_FLUSH_BOTH 1
138 149
139/* Delay loop counter to prevent hardware failure */ 150/* Delay loop counter to prevent hardware failure */
140#define XILINX_VDMA_LOOP_COUNT 1000000 151#define XILINX_DMA_LOOP_COUNT 1000000
152
153/* AXI DMA Specific Registers/Offsets */
154#define XILINX_DMA_REG_SRCDSTADDR 0x18
155#define XILINX_DMA_REG_BTT 0x28
156
157/* AXI DMA Specific Masks/Bit fields */
158#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
159#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
160#define XILINX_DMA_CR_COALESCE_SHIFT 16
161#define XILINX_DMA_BD_SOP BIT(27)
162#define XILINX_DMA_BD_EOP BIT(26)
163#define XILINX_DMA_COALESCE_MAX 255
164#define XILINX_DMA_NUM_APP_WORDS 5
165
166/* AXI CDMA Specific Registers/Offsets */
167#define XILINX_CDMA_REG_SRCADDR 0x18
168#define XILINX_CDMA_REG_DSTADDR 0x20
169
170/* AXI CDMA Specific Masks */
171#define XILINX_CDMA_CR_SGMODE BIT(3)
141 172
142/** 173/**
143 * struct xilinx_vdma_desc_hw - Hardware Descriptor 174 * struct xilinx_vdma_desc_hw - Hardware Descriptor
144 * @next_desc: Next Descriptor Pointer @0x00 175 * @next_desc: Next Descriptor Pointer @0x00
145 * @pad1: Reserved @0x04 176 * @pad1: Reserved @0x04
146 * @buf_addr: Buffer address @0x08 177 * @buf_addr: Buffer address @0x08
147 * @pad2: Reserved @0x0C 178 * @buf_addr_msb: MSB of Buffer address @0x0C
148 * @vsize: Vertical Size @0x10 179 * @vsize: Vertical Size @0x10
149 * @hsize: Horizontal Size @0x14 180 * @hsize: Horizontal Size @0x14
150 * @stride: Number of bytes between the first 181 * @stride: Number of bytes between the first
@@ -154,13 +185,59 @@ struct xilinx_vdma_desc_hw {
154 u32 next_desc; 185 u32 next_desc;
155 u32 pad1; 186 u32 pad1;
156 u32 buf_addr; 187 u32 buf_addr;
157 u32 pad2; 188 u32 buf_addr_msb;
158 u32 vsize; 189 u32 vsize;
159 u32 hsize; 190 u32 hsize;
160 u32 stride; 191 u32 stride;
161} __aligned(64); 192} __aligned(64);
162 193
163/** 194/**
195 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
196 * @next_desc: Next Descriptor Pointer @0x00
197 * @pad1: Reserved @0x04
198 * @buf_addr: Buffer address @0x08
199 * @pad2: Reserved @0x0C
200 * @pad3: Reserved @0x10
201 * @pad4: Reserved @0x14
202 * @control: Control field @0x18
203 * @status: Status field @0x1C
204 * @app: APP Fields @0x20 - 0x30
205 */
206struct xilinx_axidma_desc_hw {
207 u32 next_desc;
208 u32 pad1;
209 u32 buf_addr;
210 u32 pad2;
211 u32 pad3;
212 u32 pad4;
213 u32 control;
214 u32 status;
215 u32 app[XILINX_DMA_NUM_APP_WORDS];
216} __aligned(64);
217
218/**
219 * struct xilinx_cdma_desc_hw - Hardware Descriptor
220 * @next_desc: Next Descriptor Pointer @0x00
221 * @pad1: Reserved @0x04
222 * @src_addr: Source address @0x08
223 * @pad2: Reserved @0x0C
224 * @dest_addr: Destination address @0x10
225 * @pad3: Reserved @0x14
226 * @control: Control field @0x18
227 * @status: Status field @0x1C
228 */
229struct xilinx_cdma_desc_hw {
230 u32 next_desc;
231 u32 pad1;
232 u32 src_addr;
233 u32 pad2;
234 u32 dest_addr;
235 u32 pad3;
236 u32 control;
237 u32 status;
238} __aligned(64);
239
240/**
164 * struct xilinx_vdma_tx_segment - Descriptor segment 241 * struct xilinx_vdma_tx_segment - Descriptor segment
165 * @hw: Hardware descriptor 242 * @hw: Hardware descriptor
166 * @node: Node in the descriptor segments list 243 * @node: Node in the descriptor segments list
@@ -173,19 +250,43 @@ struct xilinx_vdma_tx_segment {
173} __aligned(64); 250} __aligned(64);
174 251
175/** 252/**
176 * struct xilinx_vdma_tx_descriptor - Per Transaction structure 253 * struct xilinx_axidma_tx_segment - Descriptor segment
254 * @hw: Hardware descriptor
255 * @node: Node in the descriptor segments list
256 * @phys: Physical address of segment
257 */
258struct xilinx_axidma_tx_segment {
259 struct xilinx_axidma_desc_hw hw;
260 struct list_head node;
261 dma_addr_t phys;
262} __aligned(64);
263
264/**
265 * struct xilinx_cdma_tx_segment - Descriptor segment
266 * @hw: Hardware descriptor
267 * @node: Node in the descriptor segments list
268 * @phys: Physical address of segment
269 */
270struct xilinx_cdma_tx_segment {
271 struct xilinx_cdma_desc_hw hw;
272 struct list_head node;
273 dma_addr_t phys;
274} __aligned(64);
275
276/**
277 * struct xilinx_dma_tx_descriptor - Per Transaction structure
177 * @async_tx: Async transaction descriptor 278 * @async_tx: Async transaction descriptor
178 * @segments: TX segments list 279 * @segments: TX segments list
179 * @node: Node in the channel descriptors list 280 * @node: Node in the channel descriptors list
180 */ 281 */
181struct xilinx_vdma_tx_descriptor { 282struct xilinx_dma_tx_descriptor {
182 struct dma_async_tx_descriptor async_tx; 283 struct dma_async_tx_descriptor async_tx;
183 struct list_head segments; 284 struct list_head segments;
184 struct list_head node; 285 struct list_head node;
185}; 286};
186 287
187/** 288/**
188 * struct xilinx_vdma_chan - Driver specific VDMA channel structure 289 * struct xilinx_dma_chan - Driver specific DMA channel structure
189 * @xdev: Driver specific device structure 290 * @xdev: Driver specific device structure
190 * @ctrl_offset: Control registers offset 291 * @ctrl_offset: Control registers offset
191 * @desc_offset: TX descriptor registers offset 292 * @desc_offset: TX descriptor registers offset
@@ -207,9 +308,14 @@ struct xilinx_vdma_tx_descriptor {
207 * @config: Device configuration info 308 * @config: Device configuration info
208 * @flush_on_fsync: Flush on Frame sync 309 * @flush_on_fsync: Flush on Frame sync
209 * @desc_pendingcount: Descriptor pending count 310 * @desc_pendingcount: Descriptor pending count
311 * @ext_addr: Indicates 64 bit addressing is supported by dma channel
312 * @desc_submitcount: Descriptor h/w submitted count
313 * @residue: Residue for AXI DMA
314 * @seg_v: Statically allocated segments base
315 * @start_transfer: Differentiate b/w DMA IP's transfer
210 */ 316 */
211struct xilinx_vdma_chan { 317struct xilinx_dma_chan {
212 struct xilinx_vdma_device *xdev; 318 struct xilinx_dma_device *xdev;
213 u32 ctrl_offset; 319 u32 ctrl_offset;
214 u32 desc_offset; 320 u32 desc_offset;
215 spinlock_t lock; 321 spinlock_t lock;
@@ -230,73 +336,122 @@ struct xilinx_vdma_chan {
230 struct xilinx_vdma_config config; 336 struct xilinx_vdma_config config;
231 bool flush_on_fsync; 337 bool flush_on_fsync;
232 u32 desc_pendingcount; 338 u32 desc_pendingcount;
339 bool ext_addr;
340 u32 desc_submitcount;
341 u32 residue;
342 struct xilinx_axidma_tx_segment *seg_v;
343 void (*start_transfer)(struct xilinx_dma_chan *chan);
344};
345
346struct xilinx_dma_config {
347 enum xdma_ip_type dmatype;
348 int (*clk_init)(struct platform_device *pdev, struct clk **axi_clk,
349 struct clk **tx_clk, struct clk **txs_clk,
350 struct clk **rx_clk, struct clk **rxs_clk);
233}; 351};
234 352
235/** 353/**
236 * struct xilinx_vdma_device - VDMA device structure 354 * struct xilinx_dma_device - DMA device structure
237 * @regs: I/O mapped base address 355 * @regs: I/O mapped base address
238 * @dev: Device Structure 356 * @dev: Device Structure
239 * @common: DMA device structure 357 * @common: DMA device structure
240 * @chan: Driver specific VDMA channel 358 * @chan: Driver specific DMA channel
241 * @has_sg: Specifies whether Scatter-Gather is present or not 359 * @has_sg: Specifies whether Scatter-Gather is present or not
242 * @flush_on_fsync: Flush on frame sync 360 * @flush_on_fsync: Flush on frame sync
361 * @ext_addr: Indicates 64 bit addressing is supported by dma device
362 * @pdev: Platform device structure pointer
363 * @dma_config: DMA config structure
364 * @axi_clk: DMA Axi4-lite interace clock
365 * @tx_clk: DMA mm2s clock
366 * @txs_clk: DMA mm2s stream clock
367 * @rx_clk: DMA s2mm clock
368 * @rxs_clk: DMA s2mm stream clock
243 */ 369 */
244struct xilinx_vdma_device { 370struct xilinx_dma_device {
245 void __iomem *regs; 371 void __iomem *regs;
246 struct device *dev; 372 struct device *dev;
247 struct dma_device common; 373 struct dma_device common;
248 struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; 374 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
249 bool has_sg; 375 bool has_sg;
250 u32 flush_on_fsync; 376 u32 flush_on_fsync;
377 bool ext_addr;
378 struct platform_device *pdev;
379 const struct xilinx_dma_config *dma_config;
380 struct clk *axi_clk;
381 struct clk *tx_clk;
382 struct clk *txs_clk;
383 struct clk *rx_clk;
384 struct clk *rxs_clk;
251}; 385};
252 386
253/* Macros */ 387/* Macros */
254#define to_xilinx_chan(chan) \ 388#define to_xilinx_chan(chan) \
255 container_of(chan, struct xilinx_vdma_chan, common) 389 container_of(chan, struct xilinx_dma_chan, common)
256#define to_vdma_tx_descriptor(tx) \ 390#define to_dma_tx_descriptor(tx) \
257 container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) 391 container_of(tx, struct xilinx_dma_tx_descriptor, async_tx)
258#define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ 392#define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \
259 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ 393 readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \
260 cond, delay_us, timeout_us) 394 cond, delay_us, timeout_us)
261 395
262/* IO accessors */ 396/* IO accessors */
263static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) 397static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg)
264{ 398{
265 return ioread32(chan->xdev->regs + reg); 399 return ioread32(chan->xdev->regs + reg);
266} 400}
267 401
268static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) 402static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value)
269{ 403{
270 iowrite32(value, chan->xdev->regs + reg); 404 iowrite32(value, chan->xdev->regs + reg);
271} 405}
272 406
273static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, 407static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg,
274 u32 value) 408 u32 value)
275{ 409{
276 vdma_write(chan, chan->desc_offset + reg, value); 410 dma_write(chan, chan->desc_offset + reg, value);
277} 411}
278 412
279static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) 413static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg)
280{ 414{
281 return vdma_read(chan, chan->ctrl_offset + reg); 415 return dma_read(chan, chan->ctrl_offset + reg);
282} 416}
283 417
284static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, 418static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg,
285 u32 value) 419 u32 value)
286{ 420{
287 vdma_write(chan, chan->ctrl_offset + reg, value); 421 dma_write(chan, chan->ctrl_offset + reg, value);
288} 422}
289 423
290static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, 424static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg,
291 u32 clr) 425 u32 clr)
292{ 426{
293 vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); 427 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr);
294} 428}
295 429
296static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, 430static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg,
297 u32 set) 431 u32 set)
298{ 432{
299 vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); 433 dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set);
434}
435
436/**
437 * vdma_desc_write_64 - 64-bit descriptor write
438 * @chan: Driver specific VDMA channel
439 * @reg: Register to write
440 * @value_lsb: lower address of the descriptor.
441 * @value_msb: upper address of the descriptor.
442 *
443 * Since vdma driver is trying to write to a register offset which is not a
444 * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits
445 * instead of a single 64 bit register write.
446 */
447static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
448 u32 value_lsb, u32 value_msb)
449{
450 /* Write the lsb 32 bits*/
451 writel(value_lsb, chan->xdev->regs + chan->desc_offset + reg);
452
453 /* Write the msb 32 bits */
454 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
300} 455}
301 456
302/* ----------------------------------------------------------------------------- 457/* -----------------------------------------------------------------------------
@@ -305,16 +460,59 @@ static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg,
305 460
306/** 461/**
307 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment 462 * xilinx_vdma_alloc_tx_segment - Allocate transaction segment
308 * @chan: Driver specific VDMA channel 463 * @chan: Driver specific DMA channel
309 * 464 *
310 * Return: The allocated segment on success and NULL on failure. 465 * Return: The allocated segment on success and NULL on failure.
311 */ 466 */
312static struct xilinx_vdma_tx_segment * 467static struct xilinx_vdma_tx_segment *
313xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) 468xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
314{ 469{
315 struct xilinx_vdma_tx_segment *segment; 470 struct xilinx_vdma_tx_segment *segment;
316 dma_addr_t phys; 471 dma_addr_t phys;
317 472
473 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
474 if (!segment)
475 return NULL;
476
477 segment->phys = phys;
478
479 return segment;
480}
481
482/**
483 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
484 * @chan: Driver specific DMA channel
485 *
486 * Return: The allocated segment on success and NULL on failure.
487 */
488static struct xilinx_cdma_tx_segment *
489xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
490{
491 struct xilinx_cdma_tx_segment *segment;
492 dma_addr_t phys;
493
494 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
495 if (!segment)
496 return NULL;
497
498 memset(segment, 0, sizeof(*segment));
499 segment->phys = phys;
500
501 return segment;
502}
503
504/**
505 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
506 * @chan: Driver specific DMA channel
507 *
508 * Return: The allocated segment on success and NULL on failure.
509 */
510static struct xilinx_axidma_tx_segment *
511xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
512{
513 struct xilinx_axidma_tx_segment *segment;
514 dma_addr_t phys;
515
318 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); 516 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
319 if (!segment) 517 if (!segment)
320 return NULL; 518 return NULL;
@@ -326,26 +524,48 @@ xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan)
326} 524}
327 525
328/** 526/**
527 * xilinx_dma_free_tx_segment - Free transaction segment
528 * @chan: Driver specific DMA channel
529 * @segment: DMA transaction segment
530 */
531static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
532 struct xilinx_axidma_tx_segment *segment)
533{
534 dma_pool_free(chan->desc_pool, segment, segment->phys);
535}
536
537/**
538 * xilinx_cdma_free_tx_segment - Free transaction segment
539 * @chan: Driver specific DMA channel
540 * @segment: DMA transaction segment
541 */
542static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
543 struct xilinx_cdma_tx_segment *segment)
544{
545 dma_pool_free(chan->desc_pool, segment, segment->phys);
546}
547
548/**
329 * xilinx_vdma_free_tx_segment - Free transaction segment 549 * xilinx_vdma_free_tx_segment - Free transaction segment
330 * @chan: Driver specific VDMA channel 550 * @chan: Driver specific DMA channel
331 * @segment: VDMA transaction segment 551 * @segment: DMA transaction segment
332 */ 552 */
333static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, 553static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan,
334 struct xilinx_vdma_tx_segment *segment) 554 struct xilinx_vdma_tx_segment *segment)
335{ 555{
336 dma_pool_free(chan->desc_pool, segment, segment->phys); 556 dma_pool_free(chan->desc_pool, segment, segment->phys);
337} 557}
338 558
339/** 559/**
340 * xilinx_vdma_tx_descriptor - Allocate transaction descriptor 560 * xilinx_dma_tx_descriptor - Allocate transaction descriptor
341 * @chan: Driver specific VDMA channel 561 * @chan: Driver specific DMA channel
342 * 562 *
343 * Return: The allocated descriptor on success and NULL on failure. 563 * Return: The allocated descriptor on success and NULL on failure.
344 */ 564 */
345static struct xilinx_vdma_tx_descriptor * 565static struct xilinx_dma_tx_descriptor *
346xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) 566xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan)
347{ 567{
348 struct xilinx_vdma_tx_descriptor *desc; 568 struct xilinx_dma_tx_descriptor *desc;
349 569
350 desc = kzalloc(sizeof(*desc), GFP_KERNEL); 570 desc = kzalloc(sizeof(*desc), GFP_KERNEL);
351 if (!desc) 571 if (!desc)
@@ -357,22 +577,38 @@ xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan)
357} 577}
358 578
359/** 579/**
360 * xilinx_vdma_free_tx_descriptor - Free transaction descriptor 580 * xilinx_dma_free_tx_descriptor - Free transaction descriptor
361 * @chan: Driver specific VDMA channel 581 * @chan: Driver specific DMA channel
362 * @desc: VDMA transaction descriptor 582 * @desc: DMA transaction descriptor
363 */ 583 */
364static void 584static void
365xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, 585xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
366 struct xilinx_vdma_tx_descriptor *desc) 586 struct xilinx_dma_tx_descriptor *desc)
367{ 587{
368 struct xilinx_vdma_tx_segment *segment, *next; 588 struct xilinx_vdma_tx_segment *segment, *next;
589 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
590 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
369 591
370 if (!desc) 592 if (!desc)
371 return; 593 return;
372 594
373 list_for_each_entry_safe(segment, next, &desc->segments, node) { 595 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
374 list_del(&segment->node); 596 list_for_each_entry_safe(segment, next, &desc->segments, node) {
375 xilinx_vdma_free_tx_segment(chan, segment); 597 list_del(&segment->node);
598 xilinx_vdma_free_tx_segment(chan, segment);
599 }
600 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
601 list_for_each_entry_safe(cdma_segment, cdma_next,
602 &desc->segments, node) {
603 list_del(&cdma_segment->node);
604 xilinx_cdma_free_tx_segment(chan, cdma_segment);
605 }
606 } else {
607 list_for_each_entry_safe(axidma_segment, axidma_next,
608 &desc->segments, node) {
609 list_del(&axidma_segment->node);
610 xilinx_dma_free_tx_segment(chan, axidma_segment);
611 }
376 } 612 }
377 613
378 kfree(desc); 614 kfree(desc);
@@ -381,60 +617,62 @@ xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan,
381/* Required functions */ 617/* Required functions */
382 618
383/** 619/**
384 * xilinx_vdma_free_desc_list - Free descriptors list 620 * xilinx_dma_free_desc_list - Free descriptors list
385 * @chan: Driver specific VDMA channel 621 * @chan: Driver specific DMA channel
386 * @list: List to parse and delete the descriptor 622 * @list: List to parse and delete the descriptor
387 */ 623 */
388static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, 624static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan,
389 struct list_head *list) 625 struct list_head *list)
390{ 626{
391 struct xilinx_vdma_tx_descriptor *desc, *next; 627 struct xilinx_dma_tx_descriptor *desc, *next;
392 628
393 list_for_each_entry_safe(desc, next, list, node) { 629 list_for_each_entry_safe(desc, next, list, node) {
394 list_del(&desc->node); 630 list_del(&desc->node);
395 xilinx_vdma_free_tx_descriptor(chan, desc); 631 xilinx_dma_free_tx_descriptor(chan, desc);
396 } 632 }
397} 633}
398 634
399/** 635/**
400 * xilinx_vdma_free_descriptors - Free channel descriptors 636 * xilinx_dma_free_descriptors - Free channel descriptors
401 * @chan: Driver specific VDMA channel 637 * @chan: Driver specific DMA channel
402 */ 638 */
403static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) 639static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan)
404{ 640{
405 unsigned long flags; 641 unsigned long flags;
406 642
407 spin_lock_irqsave(&chan->lock, flags); 643 spin_lock_irqsave(&chan->lock, flags);
408 644
409 xilinx_vdma_free_desc_list(chan, &chan->pending_list); 645 xilinx_dma_free_desc_list(chan, &chan->pending_list);
410 xilinx_vdma_free_desc_list(chan, &chan->done_list); 646 xilinx_dma_free_desc_list(chan, &chan->done_list);
411 xilinx_vdma_free_desc_list(chan, &chan->active_list); 647 xilinx_dma_free_desc_list(chan, &chan->active_list);
412 648
413 spin_unlock_irqrestore(&chan->lock, flags); 649 spin_unlock_irqrestore(&chan->lock, flags);
414} 650}
415 651
416/** 652/**
417 * xilinx_vdma_free_chan_resources - Free channel resources 653 * xilinx_dma_free_chan_resources - Free channel resources
418 * @dchan: DMA channel 654 * @dchan: DMA channel
419 */ 655 */
420static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) 656static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
421{ 657{
422 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 658 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
423 659
424 dev_dbg(chan->dev, "Free all channel resources.\n"); 660 dev_dbg(chan->dev, "Free all channel resources.\n");
425 661
426 xilinx_vdma_free_descriptors(chan); 662 xilinx_dma_free_descriptors(chan);
663 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
664 xilinx_dma_free_tx_segment(chan, chan->seg_v);
427 dma_pool_destroy(chan->desc_pool); 665 dma_pool_destroy(chan->desc_pool);
428 chan->desc_pool = NULL; 666 chan->desc_pool = NULL;
429} 667}
430 668
431/** 669/**
432 * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors 670 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
433 * @chan: Driver specific VDMA channel 671 * @chan: Driver specific DMA channel
434 */ 672 */
435static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) 673static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
436{ 674{
437 struct xilinx_vdma_tx_descriptor *desc, *next; 675 struct xilinx_dma_tx_descriptor *desc, *next;
438 unsigned long flags; 676 unsigned long flags;
439 677
440 spin_lock_irqsave(&chan->lock, flags); 678 spin_lock_irqsave(&chan->lock, flags);
@@ -457,32 +695,32 @@ static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan)
457 695
458 /* Run any dependencies, then free the descriptor */ 696 /* Run any dependencies, then free the descriptor */
459 dma_run_dependencies(&desc->async_tx); 697 dma_run_dependencies(&desc->async_tx);
460 xilinx_vdma_free_tx_descriptor(chan, desc); 698 xilinx_dma_free_tx_descriptor(chan, desc);
461 } 699 }
462 700
463 spin_unlock_irqrestore(&chan->lock, flags); 701 spin_unlock_irqrestore(&chan->lock, flags);
464} 702}
465 703
466/** 704/**
467 * xilinx_vdma_do_tasklet - Schedule completion tasklet 705 * xilinx_dma_do_tasklet - Schedule completion tasklet
468 * @data: Pointer to the Xilinx VDMA channel structure 706 * @data: Pointer to the Xilinx DMA channel structure
469 */ 707 */
470static void xilinx_vdma_do_tasklet(unsigned long data) 708static void xilinx_dma_do_tasklet(unsigned long data)
471{ 709{
472 struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; 710 struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data;
473 711
474 xilinx_vdma_chan_desc_cleanup(chan); 712 xilinx_dma_chan_desc_cleanup(chan);
475} 713}
476 714
477/** 715/**
478 * xilinx_vdma_alloc_chan_resources - Allocate channel resources 716 * xilinx_dma_alloc_chan_resources - Allocate channel resources
479 * @dchan: DMA channel 717 * @dchan: DMA channel
480 * 718 *
481 * Return: '0' on success and failure value on error 719 * Return: '0' on success and failure value on error
482 */ 720 */
483static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) 721static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
484{ 722{
485 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 723 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
486 724
487 /* Has this channel already been allocated? */ 725 /* Has this channel already been allocated? */
488 if (chan->desc_pool) 726 if (chan->desc_pool)
@@ -492,10 +730,26 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
492 * We need the descriptor to be aligned to 64bytes 730 * We need the descriptor to be aligned to 64bytes
493 * for meeting Xilinx VDMA specification requirement. 731 * for meeting Xilinx VDMA specification requirement.
494 */ 732 */
495 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 733 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
496 chan->dev, 734 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
497 sizeof(struct xilinx_vdma_tx_segment), 735 chan->dev,
498 __alignof__(struct xilinx_vdma_tx_segment), 0); 736 sizeof(struct xilinx_axidma_tx_segment),
737 __alignof__(struct xilinx_axidma_tx_segment),
738 0);
739 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
740 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
741 chan->dev,
742 sizeof(struct xilinx_cdma_tx_segment),
743 __alignof__(struct xilinx_cdma_tx_segment),
744 0);
745 } else {
746 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
747 chan->dev,
748 sizeof(struct xilinx_vdma_tx_segment),
749 __alignof__(struct xilinx_vdma_tx_segment),
750 0);
751 }
752
499 if (!chan->desc_pool) { 753 if (!chan->desc_pool) {
500 dev_err(chan->dev, 754 dev_err(chan->dev,
501 "unable to allocate channel %d descriptor pool\n", 755 "unable to allocate channel %d descriptor pool\n",
@@ -503,110 +757,160 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan)
503 return -ENOMEM; 757 return -ENOMEM;
504 } 758 }
505 759
760 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
761 /*
762 * For AXI DMA case after submitting a pending_list, keep
763 * an extra segment allocated so that the "next descriptor"
764 * pointer on the tail descriptor always points to a
765 * valid descriptor, even when paused after reaching taildesc.
766 * This way, it is possible to issue additional
767 * transfers without halting and restarting the channel.
768 */
769 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
770
506 dma_cookie_init(dchan); 771 dma_cookie_init(dchan);
772
773 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
774 /* For AXI DMA resetting once channel will reset the
775 * other channel as well so enable the interrupts here.
776 */
777 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
778 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
779 }
780
781 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
782 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
783 XILINX_CDMA_CR_SGMODE);
784
507 return 0; 785 return 0;
508} 786}
509 787
510/** 788/**
511 * xilinx_vdma_tx_status - Get VDMA transaction status 789 * xilinx_dma_tx_status - Get DMA transaction status
512 * @dchan: DMA channel 790 * @dchan: DMA channel
513 * @cookie: Transaction identifier 791 * @cookie: Transaction identifier
514 * @txstate: Transaction state 792 * @txstate: Transaction state
515 * 793 *
516 * Return: DMA transaction status 794 * Return: DMA transaction status
517 */ 795 */
518static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, 796static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
519 dma_cookie_t cookie, 797 dma_cookie_t cookie,
520 struct dma_tx_state *txstate) 798 struct dma_tx_state *txstate)
521{ 799{
522 return dma_cookie_status(dchan, cookie, txstate); 800 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
801 struct xilinx_dma_tx_descriptor *desc;
802 struct xilinx_axidma_tx_segment *segment;
803 struct xilinx_axidma_desc_hw *hw;
804 enum dma_status ret;
805 unsigned long flags;
806 u32 residue = 0;
807
808 ret = dma_cookie_status(dchan, cookie, txstate);
809 if (ret == DMA_COMPLETE || !txstate)
810 return ret;
811
812 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
813 spin_lock_irqsave(&chan->lock, flags);
814
815 desc = list_last_entry(&chan->active_list,
816 struct xilinx_dma_tx_descriptor, node);
817 if (chan->has_sg) {
818 list_for_each_entry(segment, &desc->segments, node) {
819 hw = &segment->hw;
820 residue += (hw->control - hw->status) &
821 XILINX_DMA_MAX_TRANS_LEN;
822 }
823 }
824 spin_unlock_irqrestore(&chan->lock, flags);
825
826 chan->residue = residue;
827 dma_set_residue(txstate, chan->residue);
828 }
829
830 return ret;
523} 831}
524 832
525/** 833/**
526 * xilinx_vdma_is_running - Check if VDMA channel is running 834 * xilinx_dma_is_running - Check if DMA channel is running
527 * @chan: Driver specific VDMA channel 835 * @chan: Driver specific DMA channel
528 * 836 *
529 * Return: '1' if running, '0' if not. 837 * Return: '1' if running, '0' if not.
530 */ 838 */
531static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) 839static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan)
532{ 840{
533 return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & 841 return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
534 XILINX_VDMA_DMASR_HALTED) && 842 XILINX_DMA_DMASR_HALTED) &&
535 (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & 843 (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) &
536 XILINX_VDMA_DMACR_RUNSTOP); 844 XILINX_DMA_DMACR_RUNSTOP);
537} 845}
538 846
539/** 847/**
540 * xilinx_vdma_is_idle - Check if VDMA channel is idle 848 * xilinx_dma_is_idle - Check if DMA channel is idle
541 * @chan: Driver specific VDMA channel 849 * @chan: Driver specific DMA channel
542 * 850 *
543 * Return: '1' if idle, '0' if not. 851 * Return: '1' if idle, '0' if not.
544 */ 852 */
545static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) 853static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan)
546{ 854{
547 return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & 855 return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) &
548 XILINX_VDMA_DMASR_IDLE; 856 XILINX_DMA_DMASR_IDLE;
549} 857}
550 858
551/** 859/**
552 * xilinx_vdma_halt - Halt VDMA channel 860 * xilinx_dma_halt - Halt DMA channel
553 * @chan: Driver specific VDMA channel 861 * @chan: Driver specific DMA channel
554 */ 862 */
555static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) 863static void xilinx_dma_halt(struct xilinx_dma_chan *chan)
556{ 864{
557 int err; 865 int err;
558 u32 val; 866 u32 val;
559 867
560 vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); 868 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
561 869
562 /* Wait for the hardware to halt */ 870 /* Wait for the hardware to halt */
563 err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, 871 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
564 (val & XILINX_VDMA_DMASR_HALTED), 0, 872 (val & XILINX_DMA_DMASR_HALTED), 0,
565 XILINX_VDMA_LOOP_COUNT); 873 XILINX_DMA_LOOP_COUNT);
566 874
567 if (err) { 875 if (err) {
568 dev_err(chan->dev, "Cannot stop channel %p: %x\n", 876 dev_err(chan->dev, "Cannot stop channel %p: %x\n",
569 chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); 877 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
570 chan->err = true; 878 chan->err = true;
571 } 879 }
572
573 return;
574} 880}
575 881
576/** 882/**
577 * xilinx_vdma_start - Start VDMA channel 883 * xilinx_dma_start - Start DMA channel
578 * @chan: Driver specific VDMA channel 884 * @chan: Driver specific DMA channel
579 */ 885 */
580static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) 886static void xilinx_dma_start(struct xilinx_dma_chan *chan)
581{ 887{
582 int err; 888 int err;
583 u32 val; 889 u32 val;
584 890
585 vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); 891 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP);
586 892
587 /* Wait for the hardware to start */ 893 /* Wait for the hardware to start */
588 err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, 894 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val,
589 !(val & XILINX_VDMA_DMASR_HALTED), 0, 895 !(val & XILINX_DMA_DMASR_HALTED), 0,
590 XILINX_VDMA_LOOP_COUNT); 896 XILINX_DMA_LOOP_COUNT);
591 897
592 if (err) { 898 if (err) {
593 dev_err(chan->dev, "Cannot start channel %p: %x\n", 899 dev_err(chan->dev, "Cannot start channel %p: %x\n",
594 chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); 900 chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
595 901
596 chan->err = true; 902 chan->err = true;
597 } 903 }
598
599 return;
600} 904}
601 905
602/** 906/**
603 * xilinx_vdma_start_transfer - Starts VDMA transfer 907 * xilinx_vdma_start_transfer - Starts VDMA transfer
604 * @chan: Driver specific channel struct pointer 908 * @chan: Driver specific channel struct pointer
605 */ 909 */
606static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) 910static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
607{ 911{
608 struct xilinx_vdma_config *config = &chan->config; 912 struct xilinx_vdma_config *config = &chan->config;
609 struct xilinx_vdma_tx_descriptor *desc, *tail_desc; 913 struct xilinx_dma_tx_descriptor *desc, *tail_desc;
610 u32 reg; 914 u32 reg;
611 struct xilinx_vdma_tx_segment *tail_segment; 915 struct xilinx_vdma_tx_segment *tail_segment;
612 916
@@ -618,16 +922,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
618 return; 922 return;
619 923
620 desc = list_first_entry(&chan->pending_list, 924 desc = list_first_entry(&chan->pending_list,
621 struct xilinx_vdma_tx_descriptor, node); 925 struct xilinx_dma_tx_descriptor, node);
622 tail_desc = list_last_entry(&chan->pending_list, 926 tail_desc = list_last_entry(&chan->pending_list,
623 struct xilinx_vdma_tx_descriptor, node); 927 struct xilinx_dma_tx_descriptor, node);
624 928
625 tail_segment = list_last_entry(&tail_desc->segments, 929 tail_segment = list_last_entry(&tail_desc->segments,
626 struct xilinx_vdma_tx_segment, node); 930 struct xilinx_vdma_tx_segment, node);
627 931
628 /* If it is SG mode and hardware is busy, cannot submit */ 932 /* If it is SG mode and hardware is busy, cannot submit */
629 if (chan->has_sg && xilinx_vdma_is_running(chan) && 933 if (chan->has_sg && xilinx_dma_is_running(chan) &&
630 !xilinx_vdma_is_idle(chan)) { 934 !xilinx_dma_is_idle(chan)) {
631 dev_dbg(chan->dev, "DMA controller still busy\n"); 935 dev_dbg(chan->dev, "DMA controller still busy\n");
632 return; 936 return;
633 } 937 }
@@ -637,19 +941,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
637 * done, start new transfers 941 * done, start new transfers
638 */ 942 */
639 if (chan->has_sg) 943 if (chan->has_sg)
640 vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, 944 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
641 desc->async_tx.phys); 945 desc->async_tx.phys);
642 946
643 /* Configure the hardware using info in the config structure */ 947 /* Configure the hardware using info in the config structure */
644 reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); 948 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
645 949
646 if (config->frm_cnt_en) 950 if (config->frm_cnt_en)
647 reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; 951 reg |= XILINX_DMA_DMACR_FRAMECNT_EN;
648 else 952 else
649 reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; 953 reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN;
650 954
651 /* Configure channel to allow number frame buffers */ 955 /* Configure channel to allow number frame buffers */
652 vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE, 956 dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE,
653 chan->desc_pendingcount); 957 chan->desc_pendingcount);
654 958
655 /* 959 /*
@@ -657,45 +961,53 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
657 * In direct register mode, if not parking, enable circular mode 961 * In direct register mode, if not parking, enable circular mode
658 */ 962 */
659 if (chan->has_sg || !config->park) 963 if (chan->has_sg || !config->park)
660 reg |= XILINX_VDMA_DMACR_CIRC_EN; 964 reg |= XILINX_DMA_DMACR_CIRC_EN;
661 965
662 if (config->park) 966 if (config->park)
663 reg &= ~XILINX_VDMA_DMACR_CIRC_EN; 967 reg &= ~XILINX_DMA_DMACR_CIRC_EN;
664 968
665 vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); 969 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
666 970
667 if (config->park && (config->park_frm >= 0) && 971 if (config->park && (config->park_frm >= 0) &&
668 (config->park_frm < chan->num_frms)) { 972 (config->park_frm < chan->num_frms)) {
669 if (chan->direction == DMA_MEM_TO_DEV) 973 if (chan->direction == DMA_MEM_TO_DEV)
670 vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, 974 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
671 config->park_frm << 975 config->park_frm <<
672 XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); 976 XILINX_DMA_PARK_PTR_RD_REF_SHIFT);
673 else 977 else
674 vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, 978 dma_write(chan, XILINX_DMA_REG_PARK_PTR,
675 config->park_frm << 979 config->park_frm <<
676 XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); 980 XILINX_DMA_PARK_PTR_WR_REF_SHIFT);
677 } 981 }
678 982
679 /* Start the hardware */ 983 /* Start the hardware */
680 xilinx_vdma_start(chan); 984 xilinx_dma_start(chan);
681 985
682 if (chan->err) 986 if (chan->err)
683 return; 987 return;
684 988
685 /* Start the transfer */ 989 /* Start the transfer */
686 if (chan->has_sg) { 990 if (chan->has_sg) {
687 vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, 991 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
688 tail_segment->phys); 992 tail_segment->phys);
689 } else { 993 } else {
690 struct xilinx_vdma_tx_segment *segment, *last = NULL; 994 struct xilinx_vdma_tx_segment *segment, *last = NULL;
691 int i = 0; 995 int i = 0;
692 996
693 list_for_each_entry(desc, &chan->pending_list, node) { 997 if (chan->desc_submitcount < chan->num_frms)
694 segment = list_first_entry(&desc->segments, 998 i = chan->desc_submitcount;
695 struct xilinx_vdma_tx_segment, node); 999
696 vdma_desc_write(chan, 1000 list_for_each_entry(segment, &desc->segments, node) {
1001 if (chan->ext_addr)
1002 vdma_desc_write_64(chan,
1003 XILINX_VDMA_REG_START_ADDRESS_64(i++),
1004 segment->hw.buf_addr,
1005 segment->hw.buf_addr_msb);
1006 else
1007 vdma_desc_write(chan,
697 XILINX_VDMA_REG_START_ADDRESS(i++), 1008 XILINX_VDMA_REG_START_ADDRESS(i++),
698 segment->hw.buf_addr); 1009 segment->hw.buf_addr);
1010
699 last = segment; 1011 last = segment;
700 } 1012 }
701 1013
@@ -703,10 +1015,164 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
703 return; 1015 return;
704 1016
705 /* HW expects these parameters to be same for one transaction */ 1017 /* HW expects these parameters to be same for one transaction */
706 vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); 1018 vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize);
707 vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, 1019 vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE,
708 last->hw.stride); 1020 last->hw.stride);
709 vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); 1021 vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize);
1022 }
1023
1024 if (!chan->has_sg) {
1025 list_del(&desc->node);
1026 list_add_tail(&desc->node, &chan->active_list);
1027 chan->desc_submitcount++;
1028 chan->desc_pendingcount--;
1029 if (chan->desc_submitcount == chan->num_frms)
1030 chan->desc_submitcount = 0;
1031 } else {
1032 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1033 chan->desc_pendingcount = 0;
1034 }
1035}
1036
1037/**
1038 * xilinx_cdma_start_transfer - Starts cdma transfer
1039 * @chan: Driver specific channel struct pointer
1040 */
1041static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1042{
1043 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1044 struct xilinx_cdma_tx_segment *tail_segment;
1045 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1046
1047 if (chan->err)
1048 return;
1049
1050 if (list_empty(&chan->pending_list))
1051 return;
1052
1053 head_desc = list_first_entry(&chan->pending_list,
1054 struct xilinx_dma_tx_descriptor, node);
1055 tail_desc = list_last_entry(&chan->pending_list,
1056 struct xilinx_dma_tx_descriptor, node);
1057 tail_segment = list_last_entry(&tail_desc->segments,
1058 struct xilinx_cdma_tx_segment, node);
1059
1060 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1061 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1062 ctrl_reg |= chan->desc_pendingcount <<
1063 XILINX_DMA_CR_COALESCE_SHIFT;
1064 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1065 }
1066
1067 if (chan->has_sg) {
1068 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1069 head_desc->async_tx.phys);
1070
1071 /* Update tail ptr register which will start the transfer */
1072 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1073 tail_segment->phys);
1074 } else {
1075 /* In simple mode */
1076 struct xilinx_cdma_tx_segment *segment;
1077 struct xilinx_cdma_desc_hw *hw;
1078
1079 segment = list_first_entry(&head_desc->segments,
1080 struct xilinx_cdma_tx_segment,
1081 node);
1082
1083 hw = &segment->hw;
1084
1085 dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1086 dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1087
1088 /* Start the transfer */
1089 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1090 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1091 }
1092
1093 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1094 chan->desc_pendingcount = 0;
1095}
1096
1097/**
1098 * xilinx_dma_start_transfer - Starts DMA transfer
1099 * @chan: Driver specific channel struct pointer
1100 */
1101static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1102{
1103 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1104 struct xilinx_axidma_tx_segment *tail_segment, *old_head, *new_head;
1105 u32 reg;
1106
1107 if (chan->err)
1108 return;
1109
1110 if (list_empty(&chan->pending_list))
1111 return;
1112
1113 /* If it is SG mode and hardware is busy, cannot submit */
1114 if (chan->has_sg && xilinx_dma_is_running(chan) &&
1115 !xilinx_dma_is_idle(chan)) {
1116 dev_dbg(chan->dev, "DMA controller still busy\n");
1117 return;
1118 }
1119
1120 head_desc = list_first_entry(&chan->pending_list,
1121 struct xilinx_dma_tx_descriptor, node);
1122 tail_desc = list_last_entry(&chan->pending_list,
1123 struct xilinx_dma_tx_descriptor, node);
1124 tail_segment = list_last_entry(&tail_desc->segments,
1125 struct xilinx_axidma_tx_segment, node);
1126
1127 old_head = list_first_entry(&head_desc->segments,
1128 struct xilinx_axidma_tx_segment, node);
1129 new_head = chan->seg_v;
1130 /* Copy Buffer Descriptor fields. */
1131 new_head->hw = old_head->hw;
1132
1133 /* Swap and save new reserve */
1134 list_replace_init(&old_head->node, &new_head->node);
1135 chan->seg_v = old_head;
1136
1137 tail_segment->hw.next_desc = chan->seg_v->phys;
1138 head_desc->async_tx.phys = new_head->phys;
1139
1140 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1141
1142 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1143 reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1144 reg |= chan->desc_pendingcount <<
1145 XILINX_DMA_CR_COALESCE_SHIFT;
1146 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1147 }
1148
1149 if (chan->has_sg)
1150 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1151 head_desc->async_tx.phys);
1152
1153 xilinx_dma_start(chan);
1154
1155 if (chan->err)
1156 return;
1157
1158 /* Start the transfer */
1159 if (chan->has_sg) {
1160 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1161 tail_segment->phys);
1162 } else {
1163 struct xilinx_axidma_tx_segment *segment;
1164 struct xilinx_axidma_desc_hw *hw;
1165
1166 segment = list_first_entry(&head_desc->segments,
1167 struct xilinx_axidma_tx_segment,
1168 node);
1169 hw = &segment->hw;
1170
1171 dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1172
1173 /* Start the transfer */
1174 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1175 hw->control & XILINX_DMA_MAX_TRANS_LEN);
710 } 1176 }
711 1177
712 list_splice_tail_init(&chan->pending_list, &chan->active_list); 1178 list_splice_tail_init(&chan->pending_list, &chan->active_list);
@@ -714,28 +1180,28 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan)
714} 1180}
715 1181
716/** 1182/**
717 * xilinx_vdma_issue_pending - Issue pending transactions 1183 * xilinx_dma_issue_pending - Issue pending transactions
718 * @dchan: DMA channel 1184 * @dchan: DMA channel
719 */ 1185 */
720static void xilinx_vdma_issue_pending(struct dma_chan *dchan) 1186static void xilinx_dma_issue_pending(struct dma_chan *dchan)
721{ 1187{
722 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 1188 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
723 unsigned long flags; 1189 unsigned long flags;
724 1190
725 spin_lock_irqsave(&chan->lock, flags); 1191 spin_lock_irqsave(&chan->lock, flags);
726 xilinx_vdma_start_transfer(chan); 1192 chan->start_transfer(chan);
727 spin_unlock_irqrestore(&chan->lock, flags); 1193 spin_unlock_irqrestore(&chan->lock, flags);
728} 1194}
729 1195
730/** 1196/**
731 * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete 1197 * xilinx_dma_complete_descriptor - Mark the active descriptor as complete
732 * @chan : xilinx DMA channel 1198 * @chan : xilinx DMA channel
733 * 1199 *
734 * CONTEXT: hardirq 1200 * CONTEXT: hardirq
735 */ 1201 */
736static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) 1202static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
737{ 1203{
738 struct xilinx_vdma_tx_descriptor *desc, *next; 1204 struct xilinx_dma_tx_descriptor *desc, *next;
739 1205
740 /* This function was invoked with lock held */ 1206 /* This function was invoked with lock held */
741 if (list_empty(&chan->active_list)) 1207 if (list_empty(&chan->active_list))
@@ -749,27 +1215,27 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan)
749} 1215}
750 1216
751/** 1217/**
752 * xilinx_vdma_reset - Reset VDMA channel 1218 * xilinx_dma_reset - Reset DMA channel
753 * @chan: Driver specific VDMA channel 1219 * @chan: Driver specific DMA channel
754 * 1220 *
755 * Return: '0' on success and failure value on error 1221 * Return: '0' on success and failure value on error
756 */ 1222 */
757static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) 1223static int xilinx_dma_reset(struct xilinx_dma_chan *chan)
758{ 1224{
759 int err; 1225 int err;
760 u32 tmp; 1226 u32 tmp;
761 1227
762 vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); 1228 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET);
763 1229
764 /* Wait for the hardware to finish reset */ 1230 /* Wait for the hardware to finish reset */
765 err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp, 1231 err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp,
766 !(tmp & XILINX_VDMA_DMACR_RESET), 0, 1232 !(tmp & XILINX_DMA_DMACR_RESET), 0,
767 XILINX_VDMA_LOOP_COUNT); 1233 XILINX_DMA_LOOP_COUNT);
768 1234
769 if (err) { 1235 if (err) {
770 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", 1236 dev_err(chan->dev, "reset timeout, cr %x, sr %x\n",
771 vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), 1237 dma_ctrl_read(chan, XILINX_DMA_REG_DMACR),
772 vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); 1238 dma_ctrl_read(chan, XILINX_DMA_REG_DMASR));
773 return -ETIMEDOUT; 1239 return -ETIMEDOUT;
774 } 1240 }
775 1241
@@ -779,48 +1245,48 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan)
779} 1245}
780 1246
781/** 1247/**
782 * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts 1248 * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts
783 * @chan: Driver specific VDMA channel 1249 * @chan: Driver specific DMA channel
784 * 1250 *
785 * Return: '0' on success and failure value on error 1251 * Return: '0' on success and failure value on error
786 */ 1252 */
787static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) 1253static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan)
788{ 1254{
789 int err; 1255 int err;
790 1256
791 /* Reset VDMA */ 1257 /* Reset VDMA */
792 err = xilinx_vdma_reset(chan); 1258 err = xilinx_dma_reset(chan);
793 if (err) 1259 if (err)
794 return err; 1260 return err;
795 1261
796 /* Enable interrupts */ 1262 /* Enable interrupts */
797 vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, 1263 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
798 XILINX_VDMA_DMAXR_ALL_IRQ_MASK); 1264 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
799 1265
800 return 0; 1266 return 0;
801} 1267}
802 1268
803/** 1269/**
804 * xilinx_vdma_irq_handler - VDMA Interrupt handler 1270 * xilinx_dma_irq_handler - DMA Interrupt handler
805 * @irq: IRQ number 1271 * @irq: IRQ number
806 * @data: Pointer to the Xilinx VDMA channel structure 1272 * @data: Pointer to the Xilinx DMA channel structure
807 * 1273 *
808 * Return: IRQ_HANDLED/IRQ_NONE 1274 * Return: IRQ_HANDLED/IRQ_NONE
809 */ 1275 */
810static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) 1276static irqreturn_t xilinx_dma_irq_handler(int irq, void *data)
811{ 1277{
812 struct xilinx_vdma_chan *chan = data; 1278 struct xilinx_dma_chan *chan = data;
813 u32 status; 1279 u32 status;
814 1280
815 /* Read the status and ack the interrupts. */ 1281 /* Read the status and ack the interrupts. */
816 status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); 1282 status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR);
817 if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) 1283 if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK))
818 return IRQ_NONE; 1284 return IRQ_NONE;
819 1285
820 vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, 1286 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
821 status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); 1287 status & XILINX_DMA_DMAXR_ALL_IRQ_MASK);
822 1288
823 if (status & XILINX_VDMA_DMASR_ERR_IRQ) { 1289 if (status & XILINX_DMA_DMASR_ERR_IRQ) {
824 /* 1290 /*
825 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the 1291 * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the
826 * error is recoverable, ignore it. Otherwise flag the error. 1292 * error is recoverable, ignore it. Otherwise flag the error.
@@ -828,22 +1294,23 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
828 * Only recoverable errors can be cleared in the DMASR register, 1294 * Only recoverable errors can be cleared in the DMASR register,
829 * make sure not to write to other error bits to 1. 1295 * make sure not to write to other error bits to 1.
830 */ 1296 */
831 u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; 1297 u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK;
832 vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, 1298
833 errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); 1299 dma_ctrl_write(chan, XILINX_DMA_REG_DMASR,
1300 errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK);
834 1301
835 if (!chan->flush_on_fsync || 1302 if (!chan->flush_on_fsync ||
836 (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { 1303 (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) {
837 dev_err(chan->dev, 1304 dev_err(chan->dev,
838 "Channel %p has errors %x, cdr %x tdr %x\n", 1305 "Channel %p has errors %x, cdr %x tdr %x\n",
839 chan, errors, 1306 chan, errors,
840 vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), 1307 dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC),
841 vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); 1308 dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC));
842 chan->err = true; 1309 chan->err = true;
843 } 1310 }
844 } 1311 }
845 1312
846 if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { 1313 if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) {
847 /* 1314 /*
848 * Device takes too long to do the transfer when user requires 1315 * Device takes too long to do the transfer when user requires
849 * responsiveness. 1316 * responsiveness.
@@ -851,10 +1318,10 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
851 dev_dbg(chan->dev, "Inter-packet latency too long\n"); 1318 dev_dbg(chan->dev, "Inter-packet latency too long\n");
852 } 1319 }
853 1320
854 if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { 1321 if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) {
855 spin_lock(&chan->lock); 1322 spin_lock(&chan->lock);
856 xilinx_vdma_complete_descriptor(chan); 1323 xilinx_dma_complete_descriptor(chan);
857 xilinx_vdma_start_transfer(chan); 1324 chan->start_transfer(chan);
858 spin_unlock(&chan->lock); 1325 spin_unlock(&chan->lock);
859 } 1326 }
860 1327
@@ -867,11 +1334,13 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data)
867 * @chan: Driver specific dma channel 1334 * @chan: Driver specific dma channel
868 * @desc: dma transaction descriptor 1335 * @desc: dma transaction descriptor
869 */ 1336 */
870static void append_desc_queue(struct xilinx_vdma_chan *chan, 1337static void append_desc_queue(struct xilinx_dma_chan *chan,
871 struct xilinx_vdma_tx_descriptor *desc) 1338 struct xilinx_dma_tx_descriptor *desc)
872{ 1339{
873 struct xilinx_vdma_tx_segment *tail_segment; 1340 struct xilinx_vdma_tx_segment *tail_segment;
874 struct xilinx_vdma_tx_descriptor *tail_desc; 1341 struct xilinx_dma_tx_descriptor *tail_desc;
1342 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1343 struct xilinx_cdma_tx_segment *cdma_tail_segment;
875 1344
876 if (list_empty(&chan->pending_list)) 1345 if (list_empty(&chan->pending_list))
877 goto append; 1346 goto append;
@@ -881,10 +1350,23 @@ static void append_desc_queue(struct xilinx_vdma_chan *chan,
881 * that already exists in memory. 1350 * that already exists in memory.
882 */ 1351 */
883 tail_desc = list_last_entry(&chan->pending_list, 1352 tail_desc = list_last_entry(&chan->pending_list,
884 struct xilinx_vdma_tx_descriptor, node); 1353 struct xilinx_dma_tx_descriptor, node);
885 tail_segment = list_last_entry(&tail_desc->segments, 1354 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
886 struct xilinx_vdma_tx_segment, node); 1355 tail_segment = list_last_entry(&tail_desc->segments,
887 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1356 struct xilinx_vdma_tx_segment,
1357 node);
1358 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1359 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1360 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1361 struct xilinx_cdma_tx_segment,
1362 node);
1363 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1364 } else {
1365 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1366 struct xilinx_axidma_tx_segment,
1367 node);
1368 axidma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1369 }
888 1370
889 /* 1371 /*
890 * Add the software descriptor and all children to the list 1372 * Add the software descriptor and all children to the list
@@ -894,22 +1376,23 @@ append:
894 list_add_tail(&desc->node, &chan->pending_list); 1376 list_add_tail(&desc->node, &chan->pending_list);
895 chan->desc_pendingcount++; 1377 chan->desc_pendingcount++;
896 1378
897 if (unlikely(chan->desc_pendingcount > chan->num_frms)) { 1379 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1380 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
898 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1381 dev_dbg(chan->dev, "desc pendingcount is too high\n");
899 chan->desc_pendingcount = chan->num_frms; 1382 chan->desc_pendingcount = chan->num_frms;
900 } 1383 }
901} 1384}
902 1385
903/** 1386/**
904 * xilinx_vdma_tx_submit - Submit DMA transaction 1387 * xilinx_dma_tx_submit - Submit DMA transaction
905 * @tx: Async transaction descriptor 1388 * @tx: Async transaction descriptor
906 * 1389 *
907 * Return: cookie value on success and failure value on error 1390 * Return: cookie value on success and failure value on error
908 */ 1391 */
909static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) 1392static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
910{ 1393{
911 struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); 1394 struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx);
912 struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); 1395 struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan);
913 dma_cookie_t cookie; 1396 dma_cookie_t cookie;
914 unsigned long flags; 1397 unsigned long flags;
915 int err; 1398 int err;
@@ -919,7 +1402,7 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx)
919 * If reset fails, need to hard reset the system. 1402 * If reset fails, need to hard reset the system.
920 * Channel is no longer functional 1403 * Channel is no longer functional
921 */ 1404 */
922 err = xilinx_vdma_chan_reset(chan); 1405 err = xilinx_dma_chan_reset(chan);
923 if (err < 0) 1406 if (err < 0)
924 return err; 1407 return err;
925 } 1408 }
@@ -950,8 +1433,8 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
950 struct dma_interleaved_template *xt, 1433 struct dma_interleaved_template *xt,
951 unsigned long flags) 1434 unsigned long flags)
952{ 1435{
953 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 1436 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
954 struct xilinx_vdma_tx_descriptor *desc; 1437 struct xilinx_dma_tx_descriptor *desc;
955 struct xilinx_vdma_tx_segment *segment, *prev = NULL; 1438 struct xilinx_vdma_tx_segment *segment, *prev = NULL;
956 struct xilinx_vdma_desc_hw *hw; 1439 struct xilinx_vdma_desc_hw *hw;
957 1440
@@ -965,12 +1448,12 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
965 return NULL; 1448 return NULL;
966 1449
967 /* Allocate a transaction descriptor. */ 1450 /* Allocate a transaction descriptor. */
968 desc = xilinx_vdma_alloc_tx_descriptor(chan); 1451 desc = xilinx_dma_alloc_tx_descriptor(chan);
969 if (!desc) 1452 if (!desc)
970 return NULL; 1453 return NULL;
971 1454
972 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); 1455 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
973 desc->async_tx.tx_submit = xilinx_vdma_tx_submit; 1456 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
974 async_tx_ack(&desc->async_tx); 1457 async_tx_ack(&desc->async_tx);
975 1458
976 /* Allocate the link descriptor from DMA pool */ 1459 /* Allocate the link descriptor from DMA pool */
@@ -983,14 +1466,25 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
983 hw->vsize = xt->numf; 1466 hw->vsize = xt->numf;
984 hw->hsize = xt->sgl[0].size; 1467 hw->hsize = xt->sgl[0].size;
985 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << 1468 hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) <<
986 XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; 1469 XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT;
987 hw->stride |= chan->config.frm_dly << 1470 hw->stride |= chan->config.frm_dly <<
988 XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; 1471 XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT;
989 1472
990 if (xt->dir != DMA_MEM_TO_DEV) 1473 if (xt->dir != DMA_MEM_TO_DEV) {
991 hw->buf_addr = xt->dst_start; 1474 if (chan->ext_addr) {
992 else 1475 hw->buf_addr = lower_32_bits(xt->dst_start);
993 hw->buf_addr = xt->src_start; 1476 hw->buf_addr_msb = upper_32_bits(xt->dst_start);
1477 } else {
1478 hw->buf_addr = xt->dst_start;
1479 }
1480 } else {
1481 if (chan->ext_addr) {
1482 hw->buf_addr = lower_32_bits(xt->src_start);
1483 hw->buf_addr_msb = upper_32_bits(xt->src_start);
1484 } else {
1485 hw->buf_addr = xt->src_start;
1486 }
1487 }
994 1488
995 /* Insert the segment into the descriptor segments list. */ 1489 /* Insert the segment into the descriptor segments list. */
996 list_add_tail(&segment->node, &desc->segments); 1490 list_add_tail(&segment->node, &desc->segments);
@@ -1005,29 +1499,194 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan,
1005 return &desc->async_tx; 1499 return &desc->async_tx;
1006 1500
1007error: 1501error:
1008 xilinx_vdma_free_tx_descriptor(chan, desc); 1502 xilinx_dma_free_tx_descriptor(chan, desc);
1503 return NULL;
1504}
1505
1506/**
1507 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1508 * @dchan: DMA channel
1509 * @dma_dst: destination address
1510 * @dma_src: source address
1511 * @len: transfer length
1512 * @flags: transfer ack flags
1513 *
1514 * Return: Async transaction descriptor on success and NULL on failure
1515 */
1516static struct dma_async_tx_descriptor *
1517xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1518 dma_addr_t dma_src, size_t len, unsigned long flags)
1519{
1520 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1521 struct xilinx_dma_tx_descriptor *desc;
1522 struct xilinx_cdma_tx_segment *segment, *prev;
1523 struct xilinx_cdma_desc_hw *hw;
1524
1525 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1526 return NULL;
1527
1528 desc = xilinx_dma_alloc_tx_descriptor(chan);
1529 if (!desc)
1530 return NULL;
1531
1532 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1533 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1534
1535 /* Allocate the link descriptor from DMA pool */
1536 segment = xilinx_cdma_alloc_tx_segment(chan);
1537 if (!segment)
1538 goto error;
1539
1540 hw = &segment->hw;
1541 hw->control = len;
1542 hw->src_addr = dma_src;
1543 hw->dest_addr = dma_dst;
1544
1545 /* Fill the previous next descriptor with current */
1546 prev = list_last_entry(&desc->segments,
1547 struct xilinx_cdma_tx_segment, node);
1548 prev->hw.next_desc = segment->phys;
1549
1550 /* Insert the segment into the descriptor segments list. */
1551 list_add_tail(&segment->node, &desc->segments);
1552
1553 prev = segment;
1554
1555 /* Link the last hardware descriptor with the first. */
1556 segment = list_first_entry(&desc->segments,
1557 struct xilinx_cdma_tx_segment, node);
1558 desc->async_tx.phys = segment->phys;
1559 prev->hw.next_desc = segment->phys;
1560
1561 return &desc->async_tx;
1562
1563error:
1564 xilinx_dma_free_tx_descriptor(chan, desc);
1565 return NULL;
1566}
1567
1568/**
1569 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1570 * @dchan: DMA channel
1571 * @sgl: scatterlist to transfer to/from
1572 * @sg_len: number of entries in @scatterlist
1573 * @direction: DMA direction
1574 * @flags: transfer ack flags
1575 * @context: APP words of the descriptor
1576 *
1577 * Return: Async transaction descriptor on success and NULL on failure
1578 */
1579static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1580 struct dma_chan *dchan, struct scatterlist *sgl, unsigned int sg_len,
1581 enum dma_transfer_direction direction, unsigned long flags,
1582 void *context)
1583{
1584 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1585 struct xilinx_dma_tx_descriptor *desc;
1586 struct xilinx_axidma_tx_segment *segment = NULL, *prev = NULL;
1587 u32 *app_w = (u32 *)context;
1588 struct scatterlist *sg;
1589 size_t copy;
1590 size_t sg_used;
1591 unsigned int i;
1592
1593 if (!is_slave_direction(direction))
1594 return NULL;
1595
1596 /* Allocate a transaction descriptor. */
1597 desc = xilinx_dma_alloc_tx_descriptor(chan);
1598 if (!desc)
1599 return NULL;
1600
1601 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1602 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1603
1604 /* Build transactions using information in the scatter gather list */
1605 for_each_sg(sgl, sg, sg_len, i) {
1606 sg_used = 0;
1607
1608 /* Loop until the entire scatterlist entry is used */
1609 while (sg_used < sg_dma_len(sg)) {
1610 struct xilinx_axidma_desc_hw *hw;
1611
1612 /* Get a free segment */
1613 segment = xilinx_axidma_alloc_tx_segment(chan);
1614 if (!segment)
1615 goto error;
1616
1617 /*
1618 * Calculate the maximum number of bytes to transfer,
1619 * making sure it is less than the hw limit
1620 */
1621 copy = min_t(size_t, sg_dma_len(sg) - sg_used,
1622 XILINX_DMA_MAX_TRANS_LEN);
1623 hw = &segment->hw;
1624
1625 /* Fill in the descriptor */
1626 hw->buf_addr = sg_dma_address(sg) + sg_used;
1627
1628 hw->control = copy;
1629
1630 if (chan->direction == DMA_MEM_TO_DEV) {
1631 if (app_w)
1632 memcpy(hw->app, app_w, sizeof(u32) *
1633 XILINX_DMA_NUM_APP_WORDS);
1634 }
1635
1636 if (prev)
1637 prev->hw.next_desc = segment->phys;
1638
1639 prev = segment;
1640 sg_used += copy;
1641
1642 /*
1643 * Insert the segment into the descriptor segments
1644 * list.
1645 */
1646 list_add_tail(&segment->node, &desc->segments);
1647 }
1648 }
1649
1650 segment = list_first_entry(&desc->segments,
1651 struct xilinx_axidma_tx_segment, node);
1652 desc->async_tx.phys = segment->phys;
1653 prev->hw.next_desc = segment->phys;
1654
1655 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1656 if (chan->direction == DMA_MEM_TO_DEV) {
1657 segment->hw.control |= XILINX_DMA_BD_SOP;
1658 segment = list_last_entry(&desc->segments,
1659 struct xilinx_axidma_tx_segment,
1660 node);
1661 segment->hw.control |= XILINX_DMA_BD_EOP;
1662 }
1663
1664 return &desc->async_tx;
1665
1666error:
1667 xilinx_dma_free_tx_descriptor(chan, desc);
1009 return NULL; 1668 return NULL;
1010} 1669}
1011 1670
1012/** 1671/**
1013 * xilinx_vdma_terminate_all - Halt the channel and free descriptors 1672 * xilinx_dma_terminate_all - Halt the channel and free descriptors
1014 * @chan: Driver specific VDMA Channel pointer 1673 * @chan: Driver specific DMA Channel pointer
1015 */ 1674 */
1016static int xilinx_vdma_terminate_all(struct dma_chan *dchan) 1675static int xilinx_dma_terminate_all(struct dma_chan *dchan)
1017{ 1676{
1018 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 1677 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1019 1678
1020 /* Halt the DMA engine */ 1679 /* Halt the DMA engine */
1021 xilinx_vdma_halt(chan); 1680 xilinx_dma_halt(chan);
1022 1681
1023 /* Remove and free all of the descriptors in the lists */ 1682 /* Remove and free all of the descriptors in the lists */
1024 xilinx_vdma_free_descriptors(chan); 1683 xilinx_dma_free_descriptors(chan);
1025 1684
1026 return 0; 1685 return 0;
1027} 1686}
1028 1687
1029/** 1688/**
1030 * xilinx_vdma_channel_set_config - Configure VDMA channel 1689 * xilinx_dma_channel_set_config - Configure VDMA channel
1031 * Run-time configuration for Axi VDMA, supports: 1690 * Run-time configuration for Axi VDMA, supports:
1032 * . halt the channel 1691 * . halt the channel
1033 * . configure interrupt coalescing and inter-packet delay threshold 1692 * . configure interrupt coalescing and inter-packet delay threshold
@@ -1042,13 +1701,13 @@ static int xilinx_vdma_terminate_all(struct dma_chan *dchan)
1042int xilinx_vdma_channel_set_config(struct dma_chan *dchan, 1701int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1043 struct xilinx_vdma_config *cfg) 1702 struct xilinx_vdma_config *cfg)
1044{ 1703{
1045 struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); 1704 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1046 u32 dmacr; 1705 u32 dmacr;
1047 1706
1048 if (cfg->reset) 1707 if (cfg->reset)
1049 return xilinx_vdma_chan_reset(chan); 1708 return xilinx_dma_chan_reset(chan);
1050 1709
1051 dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); 1710 dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1052 1711
1053 chan->config.frm_dly = cfg->frm_dly; 1712 chan->config.frm_dly = cfg->frm_dly;
1054 chan->config.park = cfg->park; 1713 chan->config.park = cfg->park;
@@ -1058,8 +1717,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1058 chan->config.master = cfg->master; 1717 chan->config.master = cfg->master;
1059 1718
1060 if (cfg->gen_lock && chan->genlock) { 1719 if (cfg->gen_lock && chan->genlock) {
1061 dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; 1720 dmacr |= XILINX_DMA_DMACR_GENLOCK_EN;
1062 dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; 1721 dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT;
1063 } 1722 }
1064 1723
1065 chan->config.frm_cnt_en = cfg->frm_cnt_en; 1724 chan->config.frm_cnt_en = cfg->frm_cnt_en;
@@ -1071,21 +1730,21 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan,
1071 chan->config.coalesc = cfg->coalesc; 1730 chan->config.coalesc = cfg->coalesc;
1072 chan->config.delay = cfg->delay; 1731 chan->config.delay = cfg->delay;
1073 1732
1074 if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { 1733 if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) {
1075 dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; 1734 dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT;
1076 chan->config.coalesc = cfg->coalesc; 1735 chan->config.coalesc = cfg->coalesc;
1077 } 1736 }
1078 1737
1079 if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { 1738 if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) {
1080 dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; 1739 dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT;
1081 chan->config.delay = cfg->delay; 1740 chan->config.delay = cfg->delay;
1082 } 1741 }
1083 1742
1084 /* FSync Source selection */ 1743 /* FSync Source selection */
1085 dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; 1744 dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK;
1086 dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; 1745 dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT;
1087 1746
1088 vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); 1747 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr);
1089 1748
1090 return 0; 1749 return 0;
1091} 1750}
@@ -1096,14 +1755,14 @@ EXPORT_SYMBOL(xilinx_vdma_channel_set_config);
1096 */ 1755 */
1097 1756
1098/** 1757/**
1099 * xilinx_vdma_chan_remove - Per Channel remove function 1758 * xilinx_dma_chan_remove - Per Channel remove function
1100 * @chan: Driver specific VDMA channel 1759 * @chan: Driver specific DMA channel
1101 */ 1760 */
1102static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) 1761static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan)
1103{ 1762{
1104 /* Disable all interrupts */ 1763 /* Disable all interrupts */
1105 vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, 1764 dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR,
1106 XILINX_VDMA_DMAXR_ALL_IRQ_MASK); 1765 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
1107 1766
1108 if (chan->irq > 0) 1767 if (chan->irq > 0)
1109 free_irq(chan->irq, chan); 1768 free_irq(chan->irq, chan);
@@ -1113,8 +1772,197 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
1113 list_del(&chan->common.device_node); 1772 list_del(&chan->common.device_node);
1114} 1773}
1115 1774
1775static int axidma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
1776 struct clk **tx_clk, struct clk **rx_clk,
1777 struct clk **sg_clk, struct clk **tmp_clk)
1778{
1779 int err;
1780
1781 *tmp_clk = NULL;
1782
1783 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
1784 if (IS_ERR(*axi_clk)) {
1785 err = PTR_ERR(*axi_clk);
1786 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
1787 return err;
1788 }
1789
1790 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
1791 if (IS_ERR(*tx_clk))
1792 *tx_clk = NULL;
1793
1794 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
1795 if (IS_ERR(*rx_clk))
1796 *rx_clk = NULL;
1797
1798 *sg_clk = devm_clk_get(&pdev->dev, "m_axi_sg_aclk");
1799 if (IS_ERR(*sg_clk))
1800 *sg_clk = NULL;
1801
1802 err = clk_prepare_enable(*axi_clk);
1803 if (err) {
1804 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
1805 return err;
1806 }
1807
1808 err = clk_prepare_enable(*tx_clk);
1809 if (err) {
1810 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
1811 goto err_disable_axiclk;
1812 }
1813
1814 err = clk_prepare_enable(*rx_clk);
1815 if (err) {
1816 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
1817 goto err_disable_txclk;
1818 }
1819
1820 err = clk_prepare_enable(*sg_clk);
1821 if (err) {
1822 dev_err(&pdev->dev, "failed to enable sg_clk (%u)\n", err);
1823 goto err_disable_rxclk;
1824 }
1825
1826 return 0;
1827
1828err_disable_rxclk:
1829 clk_disable_unprepare(*rx_clk);
1830err_disable_txclk:
1831 clk_disable_unprepare(*tx_clk);
1832err_disable_axiclk:
1833 clk_disable_unprepare(*axi_clk);
1834
1835 return err;
1836}
1837
1838static int axicdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
1839 struct clk **dev_clk, struct clk **tmp_clk,
1840 struct clk **tmp1_clk, struct clk **tmp2_clk)
1841{
1842 int err;
1843
1844 *tmp_clk = NULL;
1845 *tmp1_clk = NULL;
1846 *tmp2_clk = NULL;
1847
1848 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
1849 if (IS_ERR(*axi_clk)) {
1850 err = PTR_ERR(*axi_clk);
1851 dev_err(&pdev->dev, "failed to get axi_clk (%u)\n", err);
1852 return err;
1853 }
1854
1855 *dev_clk = devm_clk_get(&pdev->dev, "m_axi_aclk");
1856 if (IS_ERR(*dev_clk)) {
1857 err = PTR_ERR(*dev_clk);
1858 dev_err(&pdev->dev, "failed to get dev_clk (%u)\n", err);
1859 return err;
1860 }
1861
1862 err = clk_prepare_enable(*axi_clk);
1863 if (err) {
1864 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
1865 return err;
1866 }
1867
1868 err = clk_prepare_enable(*dev_clk);
1869 if (err) {
1870 dev_err(&pdev->dev, "failed to enable dev_clk (%u)\n", err);
1871 goto err_disable_axiclk;
1872 }
1873
1874 return 0;
1875
1876err_disable_axiclk:
1877 clk_disable_unprepare(*axi_clk);
1878
1879 return err;
1880}
1881
1882static int axivdma_clk_init(struct platform_device *pdev, struct clk **axi_clk,
1883 struct clk **tx_clk, struct clk **txs_clk,
1884 struct clk **rx_clk, struct clk **rxs_clk)
1885{
1886 int err;
1887
1888 *axi_clk = devm_clk_get(&pdev->dev, "s_axi_lite_aclk");
1889 if (IS_ERR(*axi_clk)) {
1890 err = PTR_ERR(*axi_clk);
1891 dev_err(&pdev->dev, "failed to get axi_aclk (%u)\n", err);
1892 return err;
1893 }
1894
1895 *tx_clk = devm_clk_get(&pdev->dev, "m_axi_mm2s_aclk");
1896 if (IS_ERR(*tx_clk))
1897 *tx_clk = NULL;
1898
1899 *txs_clk = devm_clk_get(&pdev->dev, "m_axis_mm2s_aclk");
1900 if (IS_ERR(*txs_clk))
1901 *txs_clk = NULL;
1902
1903 *rx_clk = devm_clk_get(&pdev->dev, "m_axi_s2mm_aclk");
1904 if (IS_ERR(*rx_clk))
1905 *rx_clk = NULL;
1906
1907 *rxs_clk = devm_clk_get(&pdev->dev, "s_axis_s2mm_aclk");
1908 if (IS_ERR(*rxs_clk))
1909 *rxs_clk = NULL;
1910
1911 err = clk_prepare_enable(*axi_clk);
1912 if (err) {
1913 dev_err(&pdev->dev, "failed to enable axi_clk (%u)\n", err);
1914 return err;
1915 }
1916
1917 err = clk_prepare_enable(*tx_clk);
1918 if (err) {
1919 dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n", err);
1920 goto err_disable_axiclk;
1921 }
1922
1923 err = clk_prepare_enable(*txs_clk);
1924 if (err) {
1925 dev_err(&pdev->dev, "failed to enable txs_clk (%u)\n", err);
1926 goto err_disable_txclk;
1927 }
1928
1929 err = clk_prepare_enable(*rx_clk);
1930 if (err) {
1931 dev_err(&pdev->dev, "failed to enable rx_clk (%u)\n", err);
1932 goto err_disable_txsclk;
1933 }
1934
1935 err = clk_prepare_enable(*rxs_clk);
1936 if (err) {
1937 dev_err(&pdev->dev, "failed to enable rxs_clk (%u)\n", err);
1938 goto err_disable_rxclk;
1939 }
1940
1941 return 0;
1942
1943err_disable_rxclk:
1944 clk_disable_unprepare(*rx_clk);
1945err_disable_txsclk:
1946 clk_disable_unprepare(*txs_clk);
1947err_disable_txclk:
1948 clk_disable_unprepare(*tx_clk);
1949err_disable_axiclk:
1950 clk_disable_unprepare(*axi_clk);
1951
1952 return err;
1953}
1954
1955static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
1956{
1957 clk_disable_unprepare(xdev->rxs_clk);
1958 clk_disable_unprepare(xdev->rx_clk);
1959 clk_disable_unprepare(xdev->txs_clk);
1960 clk_disable_unprepare(xdev->tx_clk);
1961 clk_disable_unprepare(xdev->axi_clk);
1962}
1963
1116/** 1964/**
1117 * xilinx_vdma_chan_probe - Per Channel Probing 1965 * xilinx_dma_chan_probe - Per Channel Probing
1118 * It get channel features from the device tree entry and 1966 * It get channel features from the device tree entry and
1119 * initialize special channel handling routines 1967 * initialize special channel handling routines
1120 * 1968 *
@@ -1123,10 +1971,10 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan)
1123 * 1971 *
1124 * Return: '0' on success and failure value on error 1972 * Return: '0' on success and failure value on error
1125 */ 1973 */
1126static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, 1974static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1127 struct device_node *node) 1975 struct device_node *node)
1128{ 1976{
1129 struct xilinx_vdma_chan *chan; 1977 struct xilinx_dma_chan *chan;
1130 bool has_dre = false; 1978 bool has_dre = false;
1131 u32 value, width; 1979 u32 value, width;
1132 int err; 1980 int err;
@@ -1140,6 +1988,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1140 chan->xdev = xdev; 1988 chan->xdev = xdev;
1141 chan->has_sg = xdev->has_sg; 1989 chan->has_sg = xdev->has_sg;
1142 chan->desc_pendingcount = 0x0; 1990 chan->desc_pendingcount = 0x0;
1991 chan->ext_addr = xdev->ext_addr;
1143 1992
1144 spin_lock_init(&chan->lock); 1993 spin_lock_init(&chan->lock);
1145 INIT_LIST_HEAD(&chan->pending_list); 1994 INIT_LIST_HEAD(&chan->pending_list);
@@ -1169,23 +2018,27 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1169 chan->direction = DMA_MEM_TO_DEV; 2018 chan->direction = DMA_MEM_TO_DEV;
1170 chan->id = 0; 2019 chan->id = 0;
1171 2020
1172 chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; 2021 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
1173 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 2022 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2023 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
1174 2024
1175 if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || 2025 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
1176 xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) 2026 xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S)
1177 chan->flush_on_fsync = true; 2027 chan->flush_on_fsync = true;
2028 }
1178 } else if (of_device_is_compatible(node, 2029 } else if (of_device_is_compatible(node,
1179 "xlnx,axi-vdma-s2mm-channel")) { 2030 "xlnx,axi-vdma-s2mm-channel")) {
1180 chan->direction = DMA_DEV_TO_MEM; 2031 chan->direction = DMA_DEV_TO_MEM;
1181 chan->id = 1; 2032 chan->id = 1;
1182 2033
1183 chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; 2034 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
1184 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 2035 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2036 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
1185 2037
1186 if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || 2038 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
1187 xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) 2039 xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM)
1188 chan->flush_on_fsync = true; 2040 chan->flush_on_fsync = true;
2041 }
1189 } else { 2042 } else {
1190 dev_err(xdev->dev, "Invalid channel compatible node\n"); 2043 dev_err(xdev->dev, "Invalid channel compatible node\n");
1191 return -EINVAL; 2044 return -EINVAL;
@@ -1193,15 +2046,22 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1193 2046
1194 /* Request the interrupt */ 2047 /* Request the interrupt */
1195 chan->irq = irq_of_parse_and_map(node, 0); 2048 chan->irq = irq_of_parse_and_map(node, 0);
1196 err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, 2049 err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED,
1197 "xilinx-vdma-controller", chan); 2050 "xilinx-dma-controller", chan);
1198 if (err) { 2051 if (err) {
1199 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); 2052 dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq);
1200 return err; 2053 return err;
1201 } 2054 }
1202 2055
2056 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2057 chan->start_transfer = xilinx_dma_start_transfer;
2058 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
2059 chan->start_transfer = xilinx_cdma_start_transfer;
2060 else
2061 chan->start_transfer = xilinx_vdma_start_transfer;
2062
1203 /* Initialize the tasklet */ 2063 /* Initialize the tasklet */
1204 tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, 2064 tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet,
1205 (unsigned long)chan); 2065 (unsigned long)chan);
1206 2066
1207 /* 2067 /*
@@ -1214,7 +2074,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1214 xdev->chan[chan->id] = chan; 2074 xdev->chan[chan->id] = chan;
1215 2075
1216 /* Reset the channel */ 2076 /* Reset the channel */
1217 err = xilinx_vdma_chan_reset(chan); 2077 err = xilinx_dma_chan_reset(chan);
1218 if (err < 0) { 2078 if (err < 0) {
1219 dev_err(xdev->dev, "Reset channel failed\n"); 2079 dev_err(xdev->dev, "Reset channel failed\n");
1220 return err; 2080 return err;
@@ -1233,28 +2093,54 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev,
1233static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, 2093static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1234 struct of_dma *ofdma) 2094 struct of_dma *ofdma)
1235{ 2095{
1236 struct xilinx_vdma_device *xdev = ofdma->of_dma_data; 2096 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
1237 int chan_id = dma_spec->args[0]; 2097 int chan_id = dma_spec->args[0];
1238 2098
1239 if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) 2099 if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id])
1240 return NULL; 2100 return NULL;
1241 2101
1242 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2102 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
1243} 2103}
1244 2104
2105static const struct xilinx_dma_config axidma_config = {
2106 .dmatype = XDMA_TYPE_AXIDMA,
2107 .clk_init = axidma_clk_init,
2108};
2109
2110static const struct xilinx_dma_config axicdma_config = {
2111 .dmatype = XDMA_TYPE_CDMA,
2112 .clk_init = axicdma_clk_init,
2113};
2114
2115static const struct xilinx_dma_config axivdma_config = {
2116 .dmatype = XDMA_TYPE_VDMA,
2117 .clk_init = axivdma_clk_init,
2118};
2119
2120static const struct of_device_id xilinx_dma_of_ids[] = {
2121 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
2122 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
2123 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
2124 {}
2125};
2126MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
2127
1245/** 2128/**
1246 * xilinx_vdma_probe - Driver probe function 2129 * xilinx_dma_probe - Driver probe function
1247 * @pdev: Pointer to the platform_device structure 2130 * @pdev: Pointer to the platform_device structure
1248 * 2131 *
1249 * Return: '0' on success and failure value on error 2132 * Return: '0' on success and failure value on error
1250 */ 2133 */
1251static int xilinx_vdma_probe(struct platform_device *pdev) 2134static int xilinx_dma_probe(struct platform_device *pdev)
1252{ 2135{
2136 int (*clk_init)(struct platform_device *, struct clk **, struct clk **,
2137 struct clk **, struct clk **, struct clk **)
2138 = axivdma_clk_init;
1253 struct device_node *node = pdev->dev.of_node; 2139 struct device_node *node = pdev->dev.of_node;
1254 struct xilinx_vdma_device *xdev; 2140 struct xilinx_dma_device *xdev;
1255 struct device_node *child; 2141 struct device_node *child, *np = pdev->dev.of_node;
1256 struct resource *io; 2142 struct resource *io;
1257 u32 num_frames; 2143 u32 num_frames, addr_width;
1258 int i, err; 2144 int i, err;
1259 2145
1260 /* Allocate and initialize the DMA engine structure */ 2146 /* Allocate and initialize the DMA engine structure */
@@ -1263,6 +2149,20 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1263 return -ENOMEM; 2149 return -ENOMEM;
1264 2150
1265 xdev->dev = &pdev->dev; 2151 xdev->dev = &pdev->dev;
2152 if (np) {
2153 const struct of_device_id *match;
2154
2155 match = of_match_node(xilinx_dma_of_ids, np);
2156 if (match && match->data) {
2157 xdev->dma_config = match->data;
2158 clk_init = xdev->dma_config->clk_init;
2159 }
2160 }
2161
2162 err = clk_init(pdev, &xdev->axi_clk, &xdev->tx_clk, &xdev->txs_clk,
2163 &xdev->rx_clk, &xdev->rxs_clk);
2164 if (err)
2165 return err;
1266 2166
1267 /* Request and map I/O memory */ 2167 /* Request and map I/O memory */
1268 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 2168 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1273,46 +2173,77 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1273 /* Retrieve the DMA engine properties from the device tree */ 2173 /* Retrieve the DMA engine properties from the device tree */
1274 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2174 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
1275 2175
1276 err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); 2176 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1277 if (err < 0) { 2177 err = of_property_read_u32(node, "xlnx,num-fstores",
1278 dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); 2178 &num_frames);
1279 return err; 2179 if (err < 0) {
2180 dev_err(xdev->dev,
2181 "missing xlnx,num-fstores property\n");
2182 return err;
2183 }
2184
2185 err = of_property_read_u32(node, "xlnx,flush-fsync",
2186 &xdev->flush_on_fsync);
2187 if (err < 0)
2188 dev_warn(xdev->dev,
2189 "missing xlnx,flush-fsync property\n");
1280 } 2190 }
1281 2191
1282 err = of_property_read_u32(node, "xlnx,flush-fsync", 2192 err = of_property_read_u32(node, "xlnx,addrwidth", &addr_width);
1283 &xdev->flush_on_fsync);
1284 if (err < 0) 2193 if (err < 0)
1285 dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); 2194 dev_warn(xdev->dev, "missing xlnx,addrwidth property\n");
2195
2196 if (addr_width > 32)
2197 xdev->ext_addr = true;
2198 else
2199 xdev->ext_addr = false;
2200
2201 /* Set the dma mask bits */
2202 dma_set_mask(xdev->dev, DMA_BIT_MASK(addr_width));
1286 2203
1287 /* Initialize the DMA engine */ 2204 /* Initialize the DMA engine */
1288 xdev->common.dev = &pdev->dev; 2205 xdev->common.dev = &pdev->dev;
1289 2206
1290 INIT_LIST_HEAD(&xdev->common.channels); 2207 INIT_LIST_HEAD(&xdev->common.channels);
1291 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 2208 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
1292 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 2209 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
2210 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
2211 }
1293 2212
1294 xdev->common.device_alloc_chan_resources = 2213 xdev->common.device_alloc_chan_resources =
1295 xilinx_vdma_alloc_chan_resources; 2214 xilinx_dma_alloc_chan_resources;
1296 xdev->common.device_free_chan_resources = 2215 xdev->common.device_free_chan_resources =
1297 xilinx_vdma_free_chan_resources; 2216 xilinx_dma_free_chan_resources;
1298 xdev->common.device_prep_interleaved_dma = 2217 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
2218 xdev->common.device_tx_status = xilinx_dma_tx_status;
2219 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2220 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2221 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2222 /* Residue calculation is supported by only AXI DMA */
2223 xdev->common.residue_granularity =
2224 DMA_RESIDUE_GRANULARITY_SEGMENT;
2225 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
2226 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
2227 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
2228 } else {
2229 xdev->common.device_prep_interleaved_dma =
1299 xilinx_vdma_dma_prep_interleaved; 2230 xilinx_vdma_dma_prep_interleaved;
1300 xdev->common.device_terminate_all = xilinx_vdma_terminate_all; 2231 }
1301 xdev->common.device_tx_status = xilinx_vdma_tx_status;
1302 xdev->common.device_issue_pending = xilinx_vdma_issue_pending;
1303 2232
1304 platform_set_drvdata(pdev, xdev); 2233 platform_set_drvdata(pdev, xdev);
1305 2234
1306 /* Initialize the channels */ 2235 /* Initialize the channels */
1307 for_each_child_of_node(node, child) { 2236 for_each_child_of_node(node, child) {
1308 err = xilinx_vdma_chan_probe(xdev, child); 2237 err = xilinx_dma_chan_probe(xdev, child);
1309 if (err < 0) 2238 if (err < 0)
1310 goto error; 2239 goto disable_clks;
1311 } 2240 }
1312 2241
1313 for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) 2242 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1314 if (xdev->chan[i]) 2243 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
1315 xdev->chan[i]->num_frms = num_frames; 2244 if (xdev->chan[i])
2245 xdev->chan[i]->num_frms = num_frames;
2246 }
1316 2247
1317 /* Register the DMA engine with the core */ 2248 /* Register the DMA engine with the core */
1318 dma_async_device_register(&xdev->common); 2249 dma_async_device_register(&xdev->common);
@@ -1329,49 +2260,47 @@ static int xilinx_vdma_probe(struct platform_device *pdev)
1329 2260
1330 return 0; 2261 return 0;
1331 2262
2263disable_clks:
2264 xdma_disable_allclks(xdev);
1332error: 2265error:
1333 for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) 2266 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
1334 if (xdev->chan[i]) 2267 if (xdev->chan[i])
1335 xilinx_vdma_chan_remove(xdev->chan[i]); 2268 xilinx_dma_chan_remove(xdev->chan[i]);
1336 2269
1337 return err; 2270 return err;
1338} 2271}
1339 2272
1340/** 2273/**
1341 * xilinx_vdma_remove - Driver remove function 2274 * xilinx_dma_remove - Driver remove function
1342 * @pdev: Pointer to the platform_device structure 2275 * @pdev: Pointer to the platform_device structure
1343 * 2276 *
1344 * Return: Always '0' 2277 * Return: Always '0'
1345 */ 2278 */
1346static int xilinx_vdma_remove(struct platform_device *pdev) 2279static int xilinx_dma_remove(struct platform_device *pdev)
1347{ 2280{
1348 struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); 2281 struct xilinx_dma_device *xdev = platform_get_drvdata(pdev);
1349 int i; 2282 int i;
1350 2283
1351 of_dma_controller_free(pdev->dev.of_node); 2284 of_dma_controller_free(pdev->dev.of_node);
1352 2285
1353 dma_async_device_unregister(&xdev->common); 2286 dma_async_device_unregister(&xdev->common);
1354 2287
1355 for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) 2288 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
1356 if (xdev->chan[i]) 2289 if (xdev->chan[i])
1357 xilinx_vdma_chan_remove(xdev->chan[i]); 2290 xilinx_dma_chan_remove(xdev->chan[i]);
2291
2292 xdma_disable_allclks(xdev);
1358 2293
1359 return 0; 2294 return 0;
1360} 2295}
1361 2296
1362static const struct of_device_id xilinx_vdma_of_ids[] = {
1363 { .compatible = "xlnx,axi-vdma-1.00.a",},
1364 {}
1365};
1366MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids);
1367
1368static struct platform_driver xilinx_vdma_driver = { 2297static struct platform_driver xilinx_vdma_driver = {
1369 .driver = { 2298 .driver = {
1370 .name = "xilinx-vdma", 2299 .name = "xilinx-vdma",
1371 .of_match_table = xilinx_vdma_of_ids, 2300 .of_match_table = xilinx_dma_of_ids,
1372 }, 2301 },
1373 .probe = xilinx_vdma_probe, 2302 .probe = xilinx_dma_probe,
1374 .remove = xilinx_vdma_remove, 2303 .remove = xilinx_dma_remove,
1375}; 2304};
1376 2305
1377module_platform_driver(xilinx_vdma_driver); 2306module_platform_driver(xilinx_vdma_driver);