diff options
author | Kedareswara rao Appana <appana.durga.rao@xilinx.com> | 2016-04-07 01:29:41 -0400 |
---|---|---|
committer | Vinod Koul <vinod.koul@intel.com> | 2016-05-12 02:28:30 -0400 |
commit | 42c1a2ede4f6a320d6843fc846f5466bff836a0c (patch) | |
tree | 5977b5bac1444ade26b42e975d898b07f8eec988 /drivers/dma | |
parent | 2ba4f8abfe450ddf55bf6c79b4650525b37f6962 (diff) |
dmaengine: vdma: Rename xilinx_vdma_ prefix to xilinx_dma
This patch renames the xilinx_vdma_ prefix to xilinx_dma
for the API's and masks that will be shared b/w three DMA
IP cores.
Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com>
Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r-- | drivers/dma/xilinx/xilinx_vdma.c | 632 |
1 files changed, 316 insertions, 316 deletions
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c index 781ab4dbb2b8..57b85af4ed08 100644 --- a/drivers/dma/xilinx/xilinx_vdma.c +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -39,106 +39,106 @@ | |||
39 | #include "../dmaengine.h" | 39 | #include "../dmaengine.h" |
40 | 40 | ||
41 | /* Register/Descriptor Offsets */ | 41 | /* Register/Descriptor Offsets */ |
42 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | 42 | #define XILINX_DMA_MM2S_CTRL_OFFSET 0x0000 |
43 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | 43 | #define XILINX_DMA_S2MM_CTRL_OFFSET 0x0030 |
44 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | 44 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 |
45 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | 45 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 |
46 | 46 | ||
47 | /* Control Registers */ | 47 | /* Control Registers */ |
48 | #define XILINX_VDMA_REG_DMACR 0x0000 | 48 | #define XILINX_DMA_REG_DMACR 0x0000 |
49 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | 49 | #define XILINX_DMA_DMACR_DELAY_MAX 0xff |
50 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | 50 | #define XILINX_DMA_DMACR_DELAY_SHIFT 24 |
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | 51 | #define XILINX_DMA_DMACR_FRAME_COUNT_MAX 0xff |
52 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | 52 | #define XILINX_DMA_DMACR_FRAME_COUNT_SHIFT 16 |
53 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | 53 | #define XILINX_DMA_DMACR_ERR_IRQ BIT(14) |
54 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | 54 | #define XILINX_DMA_DMACR_DLY_CNT_IRQ BIT(13) |
55 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | 55 | #define XILINX_DMA_DMACR_FRM_CNT_IRQ BIT(12) |
56 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | 56 | #define XILINX_DMA_DMACR_MASTER_SHIFT 8 |
57 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | 57 | #define XILINX_DMA_DMACR_FSYNCSRC_SHIFT 5 |
58 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | 58 | #define XILINX_DMA_DMACR_FRAMECNT_EN BIT(4) |
59 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | 59 | #define XILINX_DMA_DMACR_GENLOCK_EN BIT(3) |
60 | #define XILINX_VDMA_DMACR_RESET BIT(2) | 60 | #define XILINX_DMA_DMACR_RESET BIT(2) |
61 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | 61 | #define XILINX_DMA_DMACR_CIRC_EN BIT(1) |
62 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | 62 | #define XILINX_DMA_DMACR_RUNSTOP BIT(0) |
63 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | 63 | #define XILINX_DMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) |
64 | 64 | ||
65 | #define XILINX_VDMA_REG_DMASR 0x0004 | 65 | #define XILINX_DMA_REG_DMASR 0x0004 |
66 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | 66 | #define XILINX_DMA_DMASR_EOL_LATE_ERR BIT(15) |
67 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | 67 | #define XILINX_DMA_DMASR_ERR_IRQ BIT(14) |
68 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | 68 | #define XILINX_DMA_DMASR_DLY_CNT_IRQ BIT(13) |
69 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | 69 | #define XILINX_DMA_DMASR_FRM_CNT_IRQ BIT(12) |
70 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | 70 | #define XILINX_DMA_DMASR_SOF_LATE_ERR BIT(11) |
71 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | 71 | #define XILINX_DMA_DMASR_SG_DEC_ERR BIT(10) |
72 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | 72 | #define XILINX_DMA_DMASR_SG_SLV_ERR BIT(9) |
73 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | 73 | #define XILINX_DMA_DMASR_EOF_EARLY_ERR BIT(8) |
74 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | 74 | #define XILINX_DMA_DMASR_SOF_EARLY_ERR BIT(7) |
75 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | 75 | #define XILINX_DMA_DMASR_DMA_DEC_ERR BIT(6) |
76 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | 76 | #define XILINX_DMA_DMASR_DMA_SLAVE_ERR BIT(5) |
77 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | 77 | #define XILINX_DMA_DMASR_DMA_INT_ERR BIT(4) |
78 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | 78 | #define XILINX_DMA_DMASR_IDLE BIT(1) |
79 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | 79 | #define XILINX_DMA_DMASR_HALTED BIT(0) |
80 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | 80 | #define XILINX_DMA_DMASR_DELAY_MASK GENMASK(31, 24) |
81 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | 81 | #define XILINX_DMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) |
82 | 82 | ||
83 | #define XILINX_VDMA_REG_CURDESC 0x0008 | 83 | #define XILINX_DMA_REG_CURDESC 0x0008 |
84 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | 84 | #define XILINX_DMA_REG_TAILDESC 0x0010 |
85 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | 85 | #define XILINX_DMA_REG_REG_INDEX 0x0014 |
86 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | 86 | #define XILINX_DMA_REG_FRMSTORE 0x0018 |
87 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | 87 | #define XILINX_DMA_REG_THRESHOLD 0x001c |
88 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | 88 | #define XILINX_DMA_REG_FRMPTR_STS 0x0024 |
89 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | 89 | #define XILINX_DMA_REG_PARK_PTR 0x0028 |
90 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | 90 | #define XILINX_DMA_PARK_PTR_WR_REF_SHIFT 8 |
91 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | 91 | #define XILINX_DMA_PARK_PTR_RD_REF_SHIFT 0 |
92 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | 92 | #define XILINX_DMA_REG_VDMA_VERSION 0x002c |
93 | 93 | ||
94 | /* Register Direct Mode Registers */ | 94 | /* Register Direct Mode Registers */ |
95 | #define XILINX_VDMA_REG_VSIZE 0x0000 | 95 | #define XILINX_DMA_REG_VSIZE 0x0000 |
96 | #define XILINX_VDMA_REG_HSIZE 0x0004 | 96 | #define XILINX_DMA_REG_HSIZE 0x0004 |
97 | 97 | ||
98 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | 98 | #define XILINX_DMA_REG_FRMDLY_STRIDE 0x0008 |
99 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | 99 | #define XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 |
100 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | 100 | #define XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 |
101 | 101 | ||
102 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | 102 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) |
103 | #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) | 103 | #define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) |
104 | 104 | ||
105 | /* HW specific definitions */ | 105 | /* HW specific definitions */ |
106 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | 106 | #define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 |
107 | 107 | ||
108 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | 108 | #define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ |
109 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | 109 | (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ |
110 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | 110 | XILINX_DMA_DMASR_DLY_CNT_IRQ | \ |
111 | XILINX_VDMA_DMASR_ERR_IRQ) | 111 | XILINX_DMA_DMASR_ERR_IRQ) |
112 | 112 | ||
113 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | 113 | #define XILINX_DMA_DMASR_ALL_ERR_MASK \ |
114 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | 114 | (XILINX_DMA_DMASR_EOL_LATE_ERR | \ |
115 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | 115 | XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
116 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | 116 | XILINX_DMA_DMASR_SG_DEC_ERR | \ |
117 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | 117 | XILINX_DMA_DMASR_SG_SLV_ERR | \ |
118 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | 118 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
119 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | 119 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
120 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | 120 | XILINX_DMA_DMASR_DMA_DEC_ERR | \ |
121 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | 121 | XILINX_DMA_DMASR_DMA_SLAVE_ERR | \ |
122 | XILINX_VDMA_DMASR_DMA_INT_ERR) | 122 | XILINX_DMA_DMASR_DMA_INT_ERR) |
123 | 123 | ||
124 | /* | 124 | /* |
125 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | 125 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early |
126 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | 126 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC |
127 | * is enabled in the h/w system. | 127 | * is enabled in the h/w system. |
128 | */ | 128 | */ |
129 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | 129 | #define XILINX_DMA_DMASR_ERR_RECOVER_MASK \ |
130 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | 130 | (XILINX_DMA_DMASR_SOF_LATE_ERR | \ |
131 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | 131 | XILINX_DMA_DMASR_EOF_EARLY_ERR | \ |
132 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | 132 | XILINX_DMA_DMASR_SOF_EARLY_ERR | \ |
133 | XILINX_VDMA_DMASR_DMA_INT_ERR) | 133 | XILINX_DMA_DMASR_DMA_INT_ERR) |
134 | 134 | ||
135 | /* Axi VDMA Flush on Fsync bits */ | 135 | /* Axi VDMA Flush on Fsync bits */ |
136 | #define XILINX_VDMA_FLUSH_S2MM 3 | 136 | #define XILINX_DMA_FLUSH_S2MM 3 |
137 | #define XILINX_VDMA_FLUSH_MM2S 2 | 137 | #define XILINX_DMA_FLUSH_MM2S 2 |
138 | #define XILINX_VDMA_FLUSH_BOTH 1 | 138 | #define XILINX_DMA_FLUSH_BOTH 1 |
139 | 139 | ||
140 | /* Delay loop counter to prevent hardware failure */ | 140 | /* Delay loop counter to prevent hardware failure */ |
141 | #define XILINX_VDMA_LOOP_COUNT 1000000 | 141 | #define XILINX_DMA_LOOP_COUNT 1000000 |
142 | 142 | ||
143 | /** | 143 | /** |
144 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | 144 | * struct xilinx_vdma_desc_hw - Hardware Descriptor |
@@ -174,19 +174,19 @@ struct xilinx_vdma_tx_segment { | |||
174 | } __aligned(64); | 174 | } __aligned(64); |
175 | 175 | ||
176 | /** | 176 | /** |
177 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | 177 | * struct xilinx_dma_tx_descriptor - Per Transaction structure |
178 | * @async_tx: Async transaction descriptor | 178 | * @async_tx: Async transaction descriptor |
179 | * @segments: TX segments list | 179 | * @segments: TX segments list |
180 | * @node: Node in the channel descriptors list | 180 | * @node: Node in the channel descriptors list |
181 | */ | 181 | */ |
182 | struct xilinx_vdma_tx_descriptor { | 182 | struct xilinx_dma_tx_descriptor { |
183 | struct dma_async_tx_descriptor async_tx; | 183 | struct dma_async_tx_descriptor async_tx; |
184 | struct list_head segments; | 184 | struct list_head segments; |
185 | struct list_head node; | 185 | struct list_head node; |
186 | }; | 186 | }; |
187 | 187 | ||
188 | /** | 188 | /** |
189 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | 189 | * struct xilinx_dma_chan - Driver specific DMA channel structure |
190 | * @xdev: Driver specific device structure | 190 | * @xdev: Driver specific device structure |
191 | * @ctrl_offset: Control registers offset | 191 | * @ctrl_offset: Control registers offset |
192 | * @desc_offset: TX descriptor registers offset | 192 | * @desc_offset: TX descriptor registers offset |
@@ -211,8 +211,8 @@ struct xilinx_vdma_tx_descriptor { | |||
211 | * @ext_addr: Indicates 64 bit addressing is supported by dma channel | 211 | * @ext_addr: Indicates 64 bit addressing is supported by dma channel |
212 | * @desc_submitcount: Descriptor h/w submitted count | 212 | * @desc_submitcount: Descriptor h/w submitted count |
213 | */ | 213 | */ |
214 | struct xilinx_vdma_chan { | 214 | struct xilinx_dma_chan { |
215 | struct xilinx_vdma_device *xdev; | 215 | struct xilinx_dma_device *xdev; |
216 | u32 ctrl_offset; | 216 | u32 ctrl_offset; |
217 | u32 desc_offset; | 217 | u32 desc_offset; |
218 | spinlock_t lock; | 218 | spinlock_t lock; |
@@ -238,20 +238,20 @@ struct xilinx_vdma_chan { | |||
238 | }; | 238 | }; |
239 | 239 | ||
240 | /** | 240 | /** |
241 | * struct xilinx_vdma_device - VDMA device structure | 241 | * struct xilinx_dma_device - DMA device structure |
242 | * @regs: I/O mapped base address | 242 | * @regs: I/O mapped base address |
243 | * @dev: Device Structure | 243 | * @dev: Device Structure |
244 | * @common: DMA device structure | 244 | * @common: DMA device structure |
245 | * @chan: Driver specific VDMA channel | 245 | * @chan: Driver specific DMA channel |
246 | * @has_sg: Specifies whether Scatter-Gather is present or not | 246 | * @has_sg: Specifies whether Scatter-Gather is present or not |
247 | * @flush_on_fsync: Flush on frame sync | 247 | * @flush_on_fsync: Flush on frame sync |
248 | * @ext_addr: Indicates 64 bit addressing is supported by dma device | 248 | * @ext_addr: Indicates 64 bit addressing is supported by dma device |
249 | */ | 249 | */ |
250 | struct xilinx_vdma_device { | 250 | struct xilinx_dma_device { |
251 | void __iomem *regs; | 251 | void __iomem *regs; |
252 | struct device *dev; | 252 | struct device *dev; |
253 | struct dma_device common; | 253 | struct dma_device common; |
254 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | 254 | struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; |
255 | bool has_sg; | 255 | bool has_sg; |
256 | u32 flush_on_fsync; | 256 | u32 flush_on_fsync; |
257 | bool ext_addr; | 257 | bool ext_addr; |
@@ -259,51 +259,51 @@ struct xilinx_vdma_device { | |||
259 | 259 | ||
260 | /* Macros */ | 260 | /* Macros */ |
261 | #define to_xilinx_chan(chan) \ | 261 | #define to_xilinx_chan(chan) \ |
262 | container_of(chan, struct xilinx_vdma_chan, common) | 262 | container_of(chan, struct xilinx_dma_chan, common) |
263 | #define to_vdma_tx_descriptor(tx) \ | 263 | #define to_dma_tx_descriptor(tx) \ |
264 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | 264 | container_of(tx, struct xilinx_dma_tx_descriptor, async_tx) |
265 | #define xilinx_vdma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ | 265 | #define xilinx_dma_poll_timeout(chan, reg, val, cond, delay_us, timeout_us) \ |
266 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ | 266 | readl_poll_timeout(chan->xdev->regs + chan->ctrl_offset + reg, val, \ |
267 | cond, delay_us, timeout_us) | 267 | cond, delay_us, timeout_us) |
268 | 268 | ||
269 | /* IO accessors */ | 269 | /* IO accessors */ |
270 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | 270 | static inline u32 dma_read(struct xilinx_dma_chan *chan, u32 reg) |
271 | { | 271 | { |
272 | return ioread32(chan->xdev->regs + reg); | 272 | return ioread32(chan->xdev->regs + reg); |
273 | } | 273 | } |
274 | 274 | ||
275 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | 275 | static inline void dma_write(struct xilinx_dma_chan *chan, u32 reg, u32 value) |
276 | { | 276 | { |
277 | iowrite32(value, chan->xdev->regs + reg); | 277 | iowrite32(value, chan->xdev->regs + reg); |
278 | } | 278 | } |
279 | 279 | ||
280 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | 280 | static inline void vdma_desc_write(struct xilinx_dma_chan *chan, u32 reg, |
281 | u32 value) | 281 | u32 value) |
282 | { | 282 | { |
283 | vdma_write(chan, chan->desc_offset + reg, value); | 283 | dma_write(chan, chan->desc_offset + reg, value); |
284 | } | 284 | } |
285 | 285 | ||
286 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | 286 | static inline u32 dma_ctrl_read(struct xilinx_dma_chan *chan, u32 reg) |
287 | { | 287 | { |
288 | return vdma_read(chan, chan->ctrl_offset + reg); | 288 | return dma_read(chan, chan->ctrl_offset + reg); |
289 | } | 289 | } |
290 | 290 | ||
291 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | 291 | static inline void dma_ctrl_write(struct xilinx_dma_chan *chan, u32 reg, |
292 | u32 value) | 292 | u32 value) |
293 | { | 293 | { |
294 | vdma_write(chan, chan->ctrl_offset + reg, value); | 294 | dma_write(chan, chan->ctrl_offset + reg, value); |
295 | } | 295 | } |
296 | 296 | ||
297 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | 297 | static inline void dma_ctrl_clr(struct xilinx_dma_chan *chan, u32 reg, |
298 | u32 clr) | 298 | u32 clr) |
299 | { | 299 | { |
300 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | 300 | dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) & ~clr); |
301 | } | 301 | } |
302 | 302 | ||
303 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | 303 | static inline void dma_ctrl_set(struct xilinx_dma_chan *chan, u32 reg, |
304 | u32 set) | 304 | u32 set) |
305 | { | 305 | { |
306 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | 306 | dma_ctrl_write(chan, reg, dma_ctrl_read(chan, reg) | set); |
307 | } | 307 | } |
308 | 308 | ||
309 | /** | 309 | /** |
@@ -317,7 +317,7 @@ static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | |||
317 | * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits | 317 | * multiple of 64 bits(ex : 0x5c), we are writing as two separate 32 bits |
318 | * instead of a single 64 bit register write. | 318 | * instead of a single 64 bit register write. |
319 | */ | 319 | */ |
320 | static inline void vdma_desc_write_64(struct xilinx_vdma_chan *chan, u32 reg, | 320 | static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg, |
321 | u32 value_lsb, u32 value_msb) | 321 | u32 value_lsb, u32 value_msb) |
322 | { | 322 | { |
323 | /* Write the lsb 32 bits*/ | 323 | /* Write the lsb 32 bits*/ |
@@ -333,12 +333,12 @@ static inline void vdma_desc_write_64(struct xilinx_vdma_chan *chan, u32 reg, | |||
333 | 333 | ||
334 | /** | 334 | /** |
335 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | 335 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment |
336 | * @chan: Driver specific VDMA channel | 336 | * @chan: Driver specific DMA channel |
337 | * | 337 | * |
338 | * Return: The allocated segment on success and NULL on failure. | 338 | * Return: The allocated segment on success and NULL on failure. |
339 | */ | 339 | */ |
340 | static struct xilinx_vdma_tx_segment * | 340 | static struct xilinx_vdma_tx_segment * |
341 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | 341 | xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan) |
342 | { | 342 | { |
343 | struct xilinx_vdma_tx_segment *segment; | 343 | struct xilinx_vdma_tx_segment *segment; |
344 | dma_addr_t phys; | 344 | dma_addr_t phys; |
@@ -354,25 +354,25 @@ xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | |||
354 | 354 | ||
355 | /** | 355 | /** |
356 | * xilinx_vdma_free_tx_segment - Free transaction segment | 356 | * xilinx_vdma_free_tx_segment - Free transaction segment |
357 | * @chan: Driver specific VDMA channel | 357 | * @chan: Driver specific DMA channel |
358 | * @segment: VDMA transaction segment | 358 | * @segment: DMA transaction segment |
359 | */ | 359 | */ |
360 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | 360 | static void xilinx_vdma_free_tx_segment(struct xilinx_dma_chan *chan, |
361 | struct xilinx_vdma_tx_segment *segment) | 361 | struct xilinx_vdma_tx_segment *segment) |
362 | { | 362 | { |
363 | dma_pool_free(chan->desc_pool, segment, segment->phys); | 363 | dma_pool_free(chan->desc_pool, segment, segment->phys); |
364 | } | 364 | } |
365 | 365 | ||
366 | /** | 366 | /** |
367 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | 367 | * xilinx_dma_tx_descriptor - Allocate transaction descriptor |
368 | * @chan: Driver specific VDMA channel | 368 | * @chan: Driver specific DMA channel |
369 | * | 369 | * |
370 | * Return: The allocated descriptor on success and NULL on failure. | 370 | * Return: The allocated descriptor on success and NULL on failure. |
371 | */ | 371 | */ |
372 | static struct xilinx_vdma_tx_descriptor * | 372 | static struct xilinx_dma_tx_descriptor * |
373 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | 373 | xilinx_dma_alloc_tx_descriptor(struct xilinx_dma_chan *chan) |
374 | { | 374 | { |
375 | struct xilinx_vdma_tx_descriptor *desc; | 375 | struct xilinx_dma_tx_descriptor *desc; |
376 | 376 | ||
377 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | 377 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); |
378 | if (!desc) | 378 | if (!desc) |
@@ -384,13 +384,13 @@ xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | |||
384 | } | 384 | } |
385 | 385 | ||
386 | /** | 386 | /** |
387 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | 387 | * xilinx_dma_free_tx_descriptor - Free transaction descriptor |
388 | * @chan: Driver specific VDMA channel | 388 | * @chan: Driver specific DMA channel |
389 | * @desc: VDMA transaction descriptor | 389 | * @desc: DMA transaction descriptor |
390 | */ | 390 | */ |
391 | static void | 391 | static void |
392 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | 392 | xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan, |
393 | struct xilinx_vdma_tx_descriptor *desc) | 393 | struct xilinx_dma_tx_descriptor *desc) |
394 | { | 394 | { |
395 | struct xilinx_vdma_tx_segment *segment, *next; | 395 | struct xilinx_vdma_tx_segment *segment, *next; |
396 | 396 | ||
@@ -408,60 +408,60 @@ xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | |||
408 | /* Required functions */ | 408 | /* Required functions */ |
409 | 409 | ||
410 | /** | 410 | /** |
411 | * xilinx_vdma_free_desc_list - Free descriptors list | 411 | * xilinx_dma_free_desc_list - Free descriptors list |
412 | * @chan: Driver specific VDMA channel | 412 | * @chan: Driver specific DMA channel |
413 | * @list: List to parse and delete the descriptor | 413 | * @list: List to parse and delete the descriptor |
414 | */ | 414 | */ |
415 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | 415 | static void xilinx_dma_free_desc_list(struct xilinx_dma_chan *chan, |
416 | struct list_head *list) | 416 | struct list_head *list) |
417 | { | 417 | { |
418 | struct xilinx_vdma_tx_descriptor *desc, *next; | 418 | struct xilinx_dma_tx_descriptor *desc, *next; |
419 | 419 | ||
420 | list_for_each_entry_safe(desc, next, list, node) { | 420 | list_for_each_entry_safe(desc, next, list, node) { |
421 | list_del(&desc->node); | 421 | list_del(&desc->node); |
422 | xilinx_vdma_free_tx_descriptor(chan, desc); | 422 | xilinx_dma_free_tx_descriptor(chan, desc); |
423 | } | 423 | } |
424 | } | 424 | } |
425 | 425 | ||
426 | /** | 426 | /** |
427 | * xilinx_vdma_free_descriptors - Free channel descriptors | 427 | * xilinx_dma_free_descriptors - Free channel descriptors |
428 | * @chan: Driver specific VDMA channel | 428 | * @chan: Driver specific DMA channel |
429 | */ | 429 | */ |
430 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | 430 | static void xilinx_dma_free_descriptors(struct xilinx_dma_chan *chan) |
431 | { | 431 | { |
432 | unsigned long flags; | 432 | unsigned long flags; |
433 | 433 | ||
434 | spin_lock_irqsave(&chan->lock, flags); | 434 | spin_lock_irqsave(&chan->lock, flags); |
435 | 435 | ||
436 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | 436 | xilinx_dma_free_desc_list(chan, &chan->pending_list); |
437 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | 437 | xilinx_dma_free_desc_list(chan, &chan->done_list); |
438 | xilinx_vdma_free_desc_list(chan, &chan->active_list); | 438 | xilinx_dma_free_desc_list(chan, &chan->active_list); |
439 | 439 | ||
440 | spin_unlock_irqrestore(&chan->lock, flags); | 440 | spin_unlock_irqrestore(&chan->lock, flags); |
441 | } | 441 | } |
442 | 442 | ||
443 | /** | 443 | /** |
444 | * xilinx_vdma_free_chan_resources - Free channel resources | 444 | * xilinx_dma_free_chan_resources - Free channel resources |
445 | * @dchan: DMA channel | 445 | * @dchan: DMA channel |
446 | */ | 446 | */ |
447 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | 447 | static void xilinx_dma_free_chan_resources(struct dma_chan *dchan) |
448 | { | 448 | { |
449 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 449 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
450 | 450 | ||
451 | dev_dbg(chan->dev, "Free all channel resources.\n"); | 451 | dev_dbg(chan->dev, "Free all channel resources.\n"); |
452 | 452 | ||
453 | xilinx_vdma_free_descriptors(chan); | 453 | xilinx_dma_free_descriptors(chan); |
454 | dma_pool_destroy(chan->desc_pool); | 454 | dma_pool_destroy(chan->desc_pool); |
455 | chan->desc_pool = NULL; | 455 | chan->desc_pool = NULL; |
456 | } | 456 | } |
457 | 457 | ||
458 | /** | 458 | /** |
459 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | 459 | * xilinx_dma_chan_desc_cleanup - Clean channel descriptors |
460 | * @chan: Driver specific VDMA channel | 460 | * @chan: Driver specific DMA channel |
461 | */ | 461 | */ |
462 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | 462 | static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan) |
463 | { | 463 | { |
464 | struct xilinx_vdma_tx_descriptor *desc, *next; | 464 | struct xilinx_dma_tx_descriptor *desc, *next; |
465 | unsigned long flags; | 465 | unsigned long flags; |
466 | 466 | ||
467 | spin_lock_irqsave(&chan->lock, flags); | 467 | spin_lock_irqsave(&chan->lock, flags); |
@@ -484,32 +484,32 @@ static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | |||
484 | 484 | ||
485 | /* Run any dependencies, then free the descriptor */ | 485 | /* Run any dependencies, then free the descriptor */ |
486 | dma_run_dependencies(&desc->async_tx); | 486 | dma_run_dependencies(&desc->async_tx); |
487 | xilinx_vdma_free_tx_descriptor(chan, desc); | 487 | xilinx_dma_free_tx_descriptor(chan, desc); |
488 | } | 488 | } |
489 | 489 | ||
490 | spin_unlock_irqrestore(&chan->lock, flags); | 490 | spin_unlock_irqrestore(&chan->lock, flags); |
491 | } | 491 | } |
492 | 492 | ||
493 | /** | 493 | /** |
494 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | 494 | * xilinx_dma_do_tasklet - Schedule completion tasklet |
495 | * @data: Pointer to the Xilinx VDMA channel structure | 495 | * @data: Pointer to the Xilinx DMA channel structure |
496 | */ | 496 | */ |
497 | static void xilinx_vdma_do_tasklet(unsigned long data) | 497 | static void xilinx_dma_do_tasklet(unsigned long data) |
498 | { | 498 | { |
499 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | 499 | struct xilinx_dma_chan *chan = (struct xilinx_dma_chan *)data; |
500 | 500 | ||
501 | xilinx_vdma_chan_desc_cleanup(chan); | 501 | xilinx_dma_chan_desc_cleanup(chan); |
502 | } | 502 | } |
503 | 503 | ||
504 | /** | 504 | /** |
505 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | 505 | * xilinx_dma_alloc_chan_resources - Allocate channel resources |
506 | * @dchan: DMA channel | 506 | * @dchan: DMA channel |
507 | * | 507 | * |
508 | * Return: '0' on success and failure value on error | 508 | * Return: '0' on success and failure value on error |
509 | */ | 509 | */ |
510 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | 510 | static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan) |
511 | { | 511 | { |
512 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 512 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
513 | 513 | ||
514 | /* Has this channel already been allocated? */ | 514 | /* Has this channel already been allocated? */ |
515 | if (chan->desc_pool) | 515 | if (chan->desc_pool) |
@@ -535,14 +535,14 @@ static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | |||
535 | } | 535 | } |
536 | 536 | ||
537 | /** | 537 | /** |
538 | * xilinx_vdma_tx_status - Get VDMA transaction status | 538 | * xilinx_dma_tx_status - Get DMA transaction status |
539 | * @dchan: DMA channel | 539 | * @dchan: DMA channel |
540 | * @cookie: Transaction identifier | 540 | * @cookie: Transaction identifier |
541 | * @txstate: Transaction state | 541 | * @txstate: Transaction state |
542 | * | 542 | * |
543 | * Return: DMA transaction status | 543 | * Return: DMA transaction status |
544 | */ | 544 | */ |
545 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | 545 | static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan, |
546 | dma_cookie_t cookie, | 546 | dma_cookie_t cookie, |
547 | struct dma_tx_state *txstate) | 547 | struct dma_tx_state *txstate) |
548 | { | 548 | { |
@@ -550,73 +550,73 @@ static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | |||
550 | } | 550 | } |
551 | 551 | ||
552 | /** | 552 | /** |
553 | * xilinx_vdma_is_running - Check if VDMA channel is running | 553 | * xilinx_dma_is_running - Check if DMA channel is running |
554 | * @chan: Driver specific VDMA channel | 554 | * @chan: Driver specific DMA channel |
555 | * | 555 | * |
556 | * Return: '1' if running, '0' if not. | 556 | * Return: '1' if running, '0' if not. |
557 | */ | 557 | */ |
558 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | 558 | static bool xilinx_dma_is_running(struct xilinx_dma_chan *chan) |
559 | { | 559 | { |
560 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 560 | return !(dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
561 | XILINX_VDMA_DMASR_HALTED) && | 561 | XILINX_DMA_DMASR_HALTED) && |
562 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | 562 | (dma_ctrl_read(chan, XILINX_DMA_REG_DMACR) & |
563 | XILINX_VDMA_DMACR_RUNSTOP); | 563 | XILINX_DMA_DMACR_RUNSTOP); |
564 | } | 564 | } |
565 | 565 | ||
566 | /** | 566 | /** |
567 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | 567 | * xilinx_dma_is_idle - Check if DMA channel is idle |
568 | * @chan: Driver specific VDMA channel | 568 | * @chan: Driver specific DMA channel |
569 | * | 569 | * |
570 | * Return: '1' if idle, '0' if not. | 570 | * Return: '1' if idle, '0' if not. |
571 | */ | 571 | */ |
572 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | 572 | static bool xilinx_dma_is_idle(struct xilinx_dma_chan *chan) |
573 | { | 573 | { |
574 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | 574 | return dma_ctrl_read(chan, XILINX_DMA_REG_DMASR) & |
575 | XILINX_VDMA_DMASR_IDLE; | 575 | XILINX_DMA_DMASR_IDLE; |
576 | } | 576 | } |
577 | 577 | ||
578 | /** | 578 | /** |
579 | * xilinx_vdma_halt - Halt VDMA channel | 579 | * xilinx_dma_halt - Halt DMA channel |
580 | * @chan: Driver specific VDMA channel | 580 | * @chan: Driver specific DMA channel |
581 | */ | 581 | */ |
582 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | 582 | static void xilinx_dma_halt(struct xilinx_dma_chan *chan) |
583 | { | 583 | { |
584 | int err; | 584 | int err; |
585 | u32 val; | 585 | u32 val; |
586 | 586 | ||
587 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 587 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
588 | 588 | ||
589 | /* Wait for the hardware to halt */ | 589 | /* Wait for the hardware to halt */ |
590 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, | 590 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
591 | (val & XILINX_VDMA_DMASR_HALTED), 0, | 591 | (val & XILINX_DMA_DMASR_HALTED), 0, |
592 | XILINX_VDMA_LOOP_COUNT); | 592 | XILINX_DMA_LOOP_COUNT); |
593 | 593 | ||
594 | if (err) { | 594 | if (err) { |
595 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | 595 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", |
596 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 596 | chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
597 | chan->err = true; | 597 | chan->err = true; |
598 | } | 598 | } |
599 | } | 599 | } |
600 | 600 | ||
601 | /** | 601 | /** |
602 | * xilinx_vdma_start - Start VDMA channel | 602 | * xilinx_dma_start - Start DMA channel |
603 | * @chan: Driver specific VDMA channel | 603 | * @chan: Driver specific DMA channel |
604 | */ | 604 | */ |
605 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | 605 | static void xilinx_dma_start(struct xilinx_dma_chan *chan) |
606 | { | 606 | { |
607 | int err; | 607 | int err; |
608 | u32 val; | 608 | u32 val; |
609 | 609 | ||
610 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | 610 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RUNSTOP); |
611 | 611 | ||
612 | /* Wait for the hardware to start */ | 612 | /* Wait for the hardware to start */ |
613 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMASR, val, | 613 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMASR, val, |
614 | !(val & XILINX_VDMA_DMASR_HALTED), 0, | 614 | !(val & XILINX_DMA_DMASR_HALTED), 0, |
615 | XILINX_VDMA_LOOP_COUNT); | 615 | XILINX_DMA_LOOP_COUNT); |
616 | 616 | ||
617 | if (err) { | 617 | if (err) { |
618 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | 618 | dev_err(chan->dev, "Cannot start channel %p: %x\n", |
619 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 619 | chan, dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
620 | 620 | ||
621 | chan->err = true; | 621 | chan->err = true; |
622 | } | 622 | } |
@@ -626,10 +626,10 @@ static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | |||
626 | * xilinx_vdma_start_transfer - Starts VDMA transfer | 626 | * xilinx_vdma_start_transfer - Starts VDMA transfer |
627 | * @chan: Driver specific channel struct pointer | 627 | * @chan: Driver specific channel struct pointer |
628 | */ | 628 | */ |
629 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | 629 | static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan) |
630 | { | 630 | { |
631 | struct xilinx_vdma_config *config = &chan->config; | 631 | struct xilinx_vdma_config *config = &chan->config; |
632 | struct xilinx_vdma_tx_descriptor *desc, *tail_desc; | 632 | struct xilinx_dma_tx_descriptor *desc, *tail_desc; |
633 | u32 reg; | 633 | u32 reg; |
634 | struct xilinx_vdma_tx_segment *tail_segment; | 634 | struct xilinx_vdma_tx_segment *tail_segment; |
635 | 635 | ||
@@ -641,16 +641,16 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
641 | return; | 641 | return; |
642 | 642 | ||
643 | desc = list_first_entry(&chan->pending_list, | 643 | desc = list_first_entry(&chan->pending_list, |
644 | struct xilinx_vdma_tx_descriptor, node); | 644 | struct xilinx_dma_tx_descriptor, node); |
645 | tail_desc = list_last_entry(&chan->pending_list, | 645 | tail_desc = list_last_entry(&chan->pending_list, |
646 | struct xilinx_vdma_tx_descriptor, node); | 646 | struct xilinx_dma_tx_descriptor, node); |
647 | 647 | ||
648 | tail_segment = list_last_entry(&tail_desc->segments, | 648 | tail_segment = list_last_entry(&tail_desc->segments, |
649 | struct xilinx_vdma_tx_segment, node); | 649 | struct xilinx_vdma_tx_segment, node); |
650 | 650 | ||
651 | /* If it is SG mode and hardware is busy, cannot submit */ | 651 | /* If it is SG mode and hardware is busy, cannot submit */ |
652 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | 652 | if (chan->has_sg && xilinx_dma_is_running(chan) && |
653 | !xilinx_vdma_is_idle(chan)) { | 653 | !xilinx_dma_is_idle(chan)) { |
654 | dev_dbg(chan->dev, "DMA controller still busy\n"); | 654 | dev_dbg(chan->dev, "DMA controller still busy\n"); |
655 | return; | 655 | return; |
656 | } | 656 | } |
@@ -660,19 +660,19 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
660 | * done, start new transfers | 660 | * done, start new transfers |
661 | */ | 661 | */ |
662 | if (chan->has_sg) | 662 | if (chan->has_sg) |
663 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, | 663 | dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, |
664 | desc->async_tx.phys); | 664 | desc->async_tx.phys); |
665 | 665 | ||
666 | /* Configure the hardware using info in the config structure */ | 666 | /* Configure the hardware using info in the config structure */ |
667 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | 667 | reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
668 | 668 | ||
669 | if (config->frm_cnt_en) | 669 | if (config->frm_cnt_en) |
670 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | 670 | reg |= XILINX_DMA_DMACR_FRAMECNT_EN; |
671 | else | 671 | else |
672 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | 672 | reg &= ~XILINX_DMA_DMACR_FRAMECNT_EN; |
673 | 673 | ||
674 | /* Configure channel to allow number frame buffers */ | 674 | /* Configure channel to allow number frame buffers */ |
675 | vdma_ctrl_write(chan, XILINX_VDMA_REG_FRMSTORE, | 675 | dma_ctrl_write(chan, XILINX_DMA_REG_FRMSTORE, |
676 | chan->desc_pendingcount); | 676 | chan->desc_pendingcount); |
677 | 677 | ||
678 | /* | 678 | /* |
@@ -680,34 +680,34 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
680 | * In direct register mode, if not parking, enable circular mode | 680 | * In direct register mode, if not parking, enable circular mode |
681 | */ | 681 | */ |
682 | if (chan->has_sg || !config->park) | 682 | if (chan->has_sg || !config->park) |
683 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | 683 | reg |= XILINX_DMA_DMACR_CIRC_EN; |
684 | 684 | ||
685 | if (config->park) | 685 | if (config->park) |
686 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | 686 | reg &= ~XILINX_DMA_DMACR_CIRC_EN; |
687 | 687 | ||
688 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | 688 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); |
689 | 689 | ||
690 | if (config->park && (config->park_frm >= 0) && | 690 | if (config->park && (config->park_frm >= 0) && |
691 | (config->park_frm < chan->num_frms)) { | 691 | (config->park_frm < chan->num_frms)) { |
692 | if (chan->direction == DMA_MEM_TO_DEV) | 692 | if (chan->direction == DMA_MEM_TO_DEV) |
693 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | 693 | dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
694 | config->park_frm << | 694 | config->park_frm << |
695 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | 695 | XILINX_DMA_PARK_PTR_RD_REF_SHIFT); |
696 | else | 696 | else |
697 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | 697 | dma_write(chan, XILINX_DMA_REG_PARK_PTR, |
698 | config->park_frm << | 698 | config->park_frm << |
699 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | 699 | XILINX_DMA_PARK_PTR_WR_REF_SHIFT); |
700 | } | 700 | } |
701 | 701 | ||
702 | /* Start the hardware */ | 702 | /* Start the hardware */ |
703 | xilinx_vdma_start(chan); | 703 | xilinx_dma_start(chan); |
704 | 704 | ||
705 | if (chan->err) | 705 | if (chan->err) |
706 | return; | 706 | return; |
707 | 707 | ||
708 | /* Start the transfer */ | 708 | /* Start the transfer */ |
709 | if (chan->has_sg) { | 709 | if (chan->has_sg) { |
710 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, | 710 | dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, |
711 | tail_segment->phys); | 711 | tail_segment->phys); |
712 | } else { | 712 | } else { |
713 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | 713 | struct xilinx_vdma_tx_segment *segment, *last = NULL; |
@@ -734,10 +734,10 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
734 | return; | 734 | return; |
735 | 735 | ||
736 | /* HW expects these parameters to be same for one transaction */ | 736 | /* HW expects these parameters to be same for one transaction */ |
737 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | 737 | vdma_desc_write(chan, XILINX_DMA_REG_HSIZE, last->hw.hsize); |
738 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | 738 | vdma_desc_write(chan, XILINX_DMA_REG_FRMDLY_STRIDE, |
739 | last->hw.stride); | 739 | last->hw.stride); |
740 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | 740 | vdma_desc_write(chan, XILINX_DMA_REG_VSIZE, last->hw.vsize); |
741 | } | 741 | } |
742 | 742 | ||
743 | if (!chan->has_sg) { | 743 | if (!chan->has_sg) { |
@@ -754,12 +754,12 @@ static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | |||
754 | } | 754 | } |
755 | 755 | ||
756 | /** | 756 | /** |
757 | * xilinx_vdma_issue_pending - Issue pending transactions | 757 | * xilinx_dma_issue_pending - Issue pending transactions |
758 | * @dchan: DMA channel | 758 | * @dchan: DMA channel |
759 | */ | 759 | */ |
760 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | 760 | static void xilinx_dma_issue_pending(struct dma_chan *dchan) |
761 | { | 761 | { |
762 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 762 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
763 | unsigned long flags; | 763 | unsigned long flags; |
764 | 764 | ||
765 | spin_lock_irqsave(&chan->lock, flags); | 765 | spin_lock_irqsave(&chan->lock, flags); |
@@ -768,14 +768,14 @@ static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | |||
768 | } | 768 | } |
769 | 769 | ||
770 | /** | 770 | /** |
771 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | 771 | * xilinx_dma_complete_descriptor - Mark the active descriptor as complete |
772 | * @chan : xilinx DMA channel | 772 | * @chan : xilinx DMA channel |
773 | * | 773 | * |
774 | * CONTEXT: hardirq | 774 | * CONTEXT: hardirq |
775 | */ | 775 | */ |
776 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | 776 | static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan) |
777 | { | 777 | { |
778 | struct xilinx_vdma_tx_descriptor *desc, *next; | 778 | struct xilinx_dma_tx_descriptor *desc, *next; |
779 | 779 | ||
780 | /* This function was invoked with lock held */ | 780 | /* This function was invoked with lock held */ |
781 | if (list_empty(&chan->active_list)) | 781 | if (list_empty(&chan->active_list)) |
@@ -789,27 +789,27 @@ static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | |||
789 | } | 789 | } |
790 | 790 | ||
791 | /** | 791 | /** |
792 | * xilinx_vdma_reset - Reset VDMA channel | 792 | * xilinx_dma_reset - Reset DMA channel |
793 | * @chan: Driver specific VDMA channel | 793 | * @chan: Driver specific DMA channel |
794 | * | 794 | * |
795 | * Return: '0' on success and failure value on error | 795 | * Return: '0' on success and failure value on error |
796 | */ | 796 | */ |
797 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | 797 | static int xilinx_dma_reset(struct xilinx_dma_chan *chan) |
798 | { | 798 | { |
799 | int err; | 799 | int err; |
800 | u32 tmp; | 800 | u32 tmp; |
801 | 801 | ||
802 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | 802 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, XILINX_DMA_DMACR_RESET); |
803 | 803 | ||
804 | /* Wait for the hardware to finish reset */ | 804 | /* Wait for the hardware to finish reset */ |
805 | err = xilinx_vdma_poll_timeout(chan, XILINX_VDMA_REG_DMACR, tmp, | 805 | err = xilinx_dma_poll_timeout(chan, XILINX_DMA_REG_DMACR, tmp, |
806 | !(tmp & XILINX_VDMA_DMACR_RESET), 0, | 806 | !(tmp & XILINX_DMA_DMACR_RESET), 0, |
807 | XILINX_VDMA_LOOP_COUNT); | 807 | XILINX_DMA_LOOP_COUNT); |
808 | 808 | ||
809 | if (err) { | 809 | if (err) { |
810 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | 810 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", |
811 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | 811 | dma_ctrl_read(chan, XILINX_DMA_REG_DMACR), |
812 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | 812 | dma_ctrl_read(chan, XILINX_DMA_REG_DMASR)); |
813 | return -ETIMEDOUT; | 813 | return -ETIMEDOUT; |
814 | } | 814 | } |
815 | 815 | ||
@@ -819,48 +819,48 @@ static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | |||
819 | } | 819 | } |
820 | 820 | ||
821 | /** | 821 | /** |
822 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | 822 | * xilinx_dma_chan_reset - Reset DMA channel and enable interrupts |
823 | * @chan: Driver specific VDMA channel | 823 | * @chan: Driver specific DMA channel |
824 | * | 824 | * |
825 | * Return: '0' on success and failure value on error | 825 | * Return: '0' on success and failure value on error |
826 | */ | 826 | */ |
827 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | 827 | static int xilinx_dma_chan_reset(struct xilinx_dma_chan *chan) |
828 | { | 828 | { |
829 | int err; | 829 | int err; |
830 | 830 | ||
831 | /* Reset VDMA */ | 831 | /* Reset VDMA */ |
832 | err = xilinx_vdma_reset(chan); | 832 | err = xilinx_dma_reset(chan); |
833 | if (err) | 833 | if (err) |
834 | return err; | 834 | return err; |
835 | 835 | ||
836 | /* Enable interrupts */ | 836 | /* Enable interrupts */ |
837 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | 837 | dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, |
838 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 838 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
839 | 839 | ||
840 | return 0; | 840 | return 0; |
841 | } | 841 | } |
842 | 842 | ||
843 | /** | 843 | /** |
844 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | 844 | * xilinx_dma_irq_handler - DMA Interrupt handler |
845 | * @irq: IRQ number | 845 | * @irq: IRQ number |
846 | * @data: Pointer to the Xilinx VDMA channel structure | 846 | * @data: Pointer to the Xilinx DMA channel structure |
847 | * | 847 | * |
848 | * Return: IRQ_HANDLED/IRQ_NONE | 848 | * Return: IRQ_HANDLED/IRQ_NONE |
849 | */ | 849 | */ |
850 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | 850 | static irqreturn_t xilinx_dma_irq_handler(int irq, void *data) |
851 | { | 851 | { |
852 | struct xilinx_vdma_chan *chan = data; | 852 | struct xilinx_dma_chan *chan = data; |
853 | u32 status; | 853 | u32 status; |
854 | 854 | ||
855 | /* Read the status and ack the interrupts. */ | 855 | /* Read the status and ack the interrupts. */ |
856 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | 856 | status = dma_ctrl_read(chan, XILINX_DMA_REG_DMASR); |
857 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | 857 | if (!(status & XILINX_DMA_DMAXR_ALL_IRQ_MASK)) |
858 | return IRQ_NONE; | 858 | return IRQ_NONE; |
859 | 859 | ||
860 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | 860 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
861 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 861 | status & XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
862 | 862 | ||
863 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | 863 | if (status & XILINX_DMA_DMASR_ERR_IRQ) { |
864 | /* | 864 | /* |
865 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | 865 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the |
866 | * error is recoverable, ignore it. Otherwise flag the error. | 866 | * error is recoverable, ignore it. Otherwise flag the error. |
@@ -868,23 +868,23 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
868 | * Only recoverable errors can be cleared in the DMASR register, | 868 | * Only recoverable errors can be cleared in the DMASR register, |
869 | * make sure not to write to other error bits to 1. | 869 | * make sure not to write to other error bits to 1. |
870 | */ | 870 | */ |
871 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | 871 | u32 errors = status & XILINX_DMA_DMASR_ALL_ERR_MASK; |
872 | 872 | ||
873 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | 873 | dma_ctrl_write(chan, XILINX_DMA_REG_DMASR, |
874 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | 874 | errors & XILINX_DMA_DMASR_ERR_RECOVER_MASK); |
875 | 875 | ||
876 | if (!chan->flush_on_fsync || | 876 | if (!chan->flush_on_fsync || |
877 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | 877 | (errors & ~XILINX_DMA_DMASR_ERR_RECOVER_MASK)) { |
878 | dev_err(chan->dev, | 878 | dev_err(chan->dev, |
879 | "Channel %p has errors %x, cdr %x tdr %x\n", | 879 | "Channel %p has errors %x, cdr %x tdr %x\n", |
880 | chan, errors, | 880 | chan, errors, |
881 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | 881 | dma_ctrl_read(chan, XILINX_DMA_REG_CURDESC), |
882 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | 882 | dma_ctrl_read(chan, XILINX_DMA_REG_TAILDESC)); |
883 | chan->err = true; | 883 | chan->err = true; |
884 | } | 884 | } |
885 | } | 885 | } |
886 | 886 | ||
887 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | 887 | if (status & XILINX_DMA_DMASR_DLY_CNT_IRQ) { |
888 | /* | 888 | /* |
889 | * Device takes too long to do the transfer when user requires | 889 | * Device takes too long to do the transfer when user requires |
890 | * responsiveness. | 890 | * responsiveness. |
@@ -892,9 +892,9 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
892 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | 892 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); |
893 | } | 893 | } |
894 | 894 | ||
895 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | 895 | if (status & XILINX_DMA_DMASR_FRM_CNT_IRQ) { |
896 | spin_lock(&chan->lock); | 896 | spin_lock(&chan->lock); |
897 | xilinx_vdma_complete_descriptor(chan); | 897 | xilinx_dma_complete_descriptor(chan); |
898 | xilinx_vdma_start_transfer(chan); | 898 | xilinx_vdma_start_transfer(chan); |
899 | spin_unlock(&chan->lock); | 899 | spin_unlock(&chan->lock); |
900 | } | 900 | } |
@@ -908,11 +908,11 @@ static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | |||
908 | * @chan: Driver specific dma channel | 908 | * @chan: Driver specific dma channel |
909 | * @desc: dma transaction descriptor | 909 | * @desc: dma transaction descriptor |
910 | */ | 910 | */ |
911 | static void append_desc_queue(struct xilinx_vdma_chan *chan, | 911 | static void append_desc_queue(struct xilinx_dma_chan *chan, |
912 | struct xilinx_vdma_tx_descriptor *desc) | 912 | struct xilinx_dma_tx_descriptor *desc) |
913 | { | 913 | { |
914 | struct xilinx_vdma_tx_segment *tail_segment; | 914 | struct xilinx_vdma_tx_segment *tail_segment; |
915 | struct xilinx_vdma_tx_descriptor *tail_desc; | 915 | struct xilinx_dma_tx_descriptor *tail_desc; |
916 | 916 | ||
917 | if (list_empty(&chan->pending_list)) | 917 | if (list_empty(&chan->pending_list)) |
918 | goto append; | 918 | goto append; |
@@ -922,7 +922,7 @@ static void append_desc_queue(struct xilinx_vdma_chan *chan, | |||
922 | * that already exists in memory. | 922 | * that already exists in memory. |
923 | */ | 923 | */ |
924 | tail_desc = list_last_entry(&chan->pending_list, | 924 | tail_desc = list_last_entry(&chan->pending_list, |
925 | struct xilinx_vdma_tx_descriptor, node); | 925 | struct xilinx_dma_tx_descriptor, node); |
926 | tail_segment = list_last_entry(&tail_desc->segments, | 926 | tail_segment = list_last_entry(&tail_desc->segments, |
927 | struct xilinx_vdma_tx_segment, node); | 927 | struct xilinx_vdma_tx_segment, node); |
928 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; | 928 | tail_segment->hw.next_desc = (u32)desc->async_tx.phys; |
@@ -943,15 +943,15 @@ append: | |||
943 | } | 943 | } |
944 | 944 | ||
945 | /** | 945 | /** |
946 | * xilinx_vdma_tx_submit - Submit DMA transaction | 946 | * xilinx_dma_tx_submit - Submit DMA transaction |
947 | * @tx: Async transaction descriptor | 947 | * @tx: Async transaction descriptor |
948 | * | 948 | * |
949 | * Return: cookie value on success and failure value on error | 949 | * Return: cookie value on success and failure value on error |
950 | */ | 950 | */ |
951 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | 951 | static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx) |
952 | { | 952 | { |
953 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | 953 | struct xilinx_dma_tx_descriptor *desc = to_dma_tx_descriptor(tx); |
954 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | 954 | struct xilinx_dma_chan *chan = to_xilinx_chan(tx->chan); |
955 | dma_cookie_t cookie; | 955 | dma_cookie_t cookie; |
956 | unsigned long flags; | 956 | unsigned long flags; |
957 | int err; | 957 | int err; |
@@ -961,7 +961,7 @@ static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | |||
961 | * If reset fails, need to hard reset the system. | 961 | * If reset fails, need to hard reset the system. |
962 | * Channel is no longer functional | 962 | * Channel is no longer functional |
963 | */ | 963 | */ |
964 | err = xilinx_vdma_chan_reset(chan); | 964 | err = xilinx_dma_chan_reset(chan); |
965 | if (err < 0) | 965 | if (err < 0) |
966 | return err; | 966 | return err; |
967 | } | 967 | } |
@@ -992,8 +992,8 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
992 | struct dma_interleaved_template *xt, | 992 | struct dma_interleaved_template *xt, |
993 | unsigned long flags) | 993 | unsigned long flags) |
994 | { | 994 | { |
995 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 995 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
996 | struct xilinx_vdma_tx_descriptor *desc; | 996 | struct xilinx_dma_tx_descriptor *desc; |
997 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | 997 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; |
998 | struct xilinx_vdma_desc_hw *hw; | 998 | struct xilinx_vdma_desc_hw *hw; |
999 | 999 | ||
@@ -1007,12 +1007,12 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
1007 | return NULL; | 1007 | return NULL; |
1008 | 1008 | ||
1009 | /* Allocate a transaction descriptor. */ | 1009 | /* Allocate a transaction descriptor. */ |
1010 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | 1010 | desc = xilinx_dma_alloc_tx_descriptor(chan); |
1011 | if (!desc) | 1011 | if (!desc) |
1012 | return NULL; | 1012 | return NULL; |
1013 | 1013 | ||
1014 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | 1014 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); |
1015 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | 1015 | desc->async_tx.tx_submit = xilinx_dma_tx_submit; |
1016 | async_tx_ack(&desc->async_tx); | 1016 | async_tx_ack(&desc->async_tx); |
1017 | 1017 | ||
1018 | /* Allocate the link descriptor from DMA pool */ | 1018 | /* Allocate the link descriptor from DMA pool */ |
@@ -1025,9 +1025,9 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
1025 | hw->vsize = xt->numf; | 1025 | hw->vsize = xt->numf; |
1026 | hw->hsize = xt->sgl[0].size; | 1026 | hw->hsize = xt->sgl[0].size; |
1027 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << | 1027 | hw->stride = (xt->sgl[0].icg + xt->sgl[0].size) << |
1028 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; | 1028 | XILINX_DMA_FRMDLY_STRIDE_STRIDE_SHIFT; |
1029 | hw->stride |= chan->config.frm_dly << | 1029 | hw->stride |= chan->config.frm_dly << |
1030 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | 1030 | XILINX_DMA_FRMDLY_STRIDE_FRMDLY_SHIFT; |
1031 | 1031 | ||
1032 | if (xt->dir != DMA_MEM_TO_DEV) { | 1032 | if (xt->dir != DMA_MEM_TO_DEV) { |
1033 | if (chan->ext_addr) { | 1033 | if (chan->ext_addr) { |
@@ -1058,29 +1058,29 @@ xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | |||
1058 | return &desc->async_tx; | 1058 | return &desc->async_tx; |
1059 | 1059 | ||
1060 | error: | 1060 | error: |
1061 | xilinx_vdma_free_tx_descriptor(chan, desc); | 1061 | xilinx_dma_free_tx_descriptor(chan, desc); |
1062 | return NULL; | 1062 | return NULL; |
1063 | } | 1063 | } |
1064 | 1064 | ||
1065 | /** | 1065 | /** |
1066 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | 1066 | * xilinx_dma_terminate_all - Halt the channel and free descriptors |
1067 | * @chan: Driver specific VDMA Channel pointer | 1067 | * @chan: Driver specific DMA Channel pointer |
1068 | */ | 1068 | */ |
1069 | static int xilinx_vdma_terminate_all(struct dma_chan *dchan) | 1069 | static int xilinx_dma_terminate_all(struct dma_chan *dchan) |
1070 | { | 1070 | { |
1071 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1071 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1072 | 1072 | ||
1073 | /* Halt the DMA engine */ | 1073 | /* Halt the DMA engine */ |
1074 | xilinx_vdma_halt(chan); | 1074 | xilinx_dma_halt(chan); |
1075 | 1075 | ||
1076 | /* Remove and free all of the descriptors in the lists */ | 1076 | /* Remove and free all of the descriptors in the lists */ |
1077 | xilinx_vdma_free_descriptors(chan); | 1077 | xilinx_dma_free_descriptors(chan); |
1078 | 1078 | ||
1079 | return 0; | 1079 | return 0; |
1080 | } | 1080 | } |
1081 | 1081 | ||
1082 | /** | 1082 | /** |
1083 | * xilinx_vdma_channel_set_config - Configure VDMA channel | 1083 | * xilinx_dma_channel_set_config - Configure VDMA channel |
1084 | * Run-time configuration for Axi VDMA, supports: | 1084 | * Run-time configuration for Axi VDMA, supports: |
1085 | * . halt the channel | 1085 | * . halt the channel |
1086 | * . configure interrupt coalescing and inter-packet delay threshold | 1086 | * . configure interrupt coalescing and inter-packet delay threshold |
@@ -1095,13 +1095,13 @@ static int xilinx_vdma_terminate_all(struct dma_chan *dchan) | |||
1095 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | 1095 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, |
1096 | struct xilinx_vdma_config *cfg) | 1096 | struct xilinx_vdma_config *cfg) |
1097 | { | 1097 | { |
1098 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | 1098 | struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); |
1099 | u32 dmacr; | 1099 | u32 dmacr; |
1100 | 1100 | ||
1101 | if (cfg->reset) | 1101 | if (cfg->reset) |
1102 | return xilinx_vdma_chan_reset(chan); | 1102 | return xilinx_dma_chan_reset(chan); |
1103 | 1103 | ||
1104 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | 1104 | dmacr = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); |
1105 | 1105 | ||
1106 | chan->config.frm_dly = cfg->frm_dly; | 1106 | chan->config.frm_dly = cfg->frm_dly; |
1107 | chan->config.park = cfg->park; | 1107 | chan->config.park = cfg->park; |
@@ -1111,8 +1111,8 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |||
1111 | chan->config.master = cfg->master; | 1111 | chan->config.master = cfg->master; |
1112 | 1112 | ||
1113 | if (cfg->gen_lock && chan->genlock) { | 1113 | if (cfg->gen_lock && chan->genlock) { |
1114 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | 1114 | dmacr |= XILINX_DMA_DMACR_GENLOCK_EN; |
1115 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | 1115 | dmacr |= cfg->master << XILINX_DMA_DMACR_MASTER_SHIFT; |
1116 | } | 1116 | } |
1117 | 1117 | ||
1118 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | 1118 | chan->config.frm_cnt_en = cfg->frm_cnt_en; |
@@ -1124,21 +1124,21 @@ int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | |||
1124 | chan->config.coalesc = cfg->coalesc; | 1124 | chan->config.coalesc = cfg->coalesc; |
1125 | chan->config.delay = cfg->delay; | 1125 | chan->config.delay = cfg->delay; |
1126 | 1126 | ||
1127 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | 1127 | if (cfg->coalesc <= XILINX_DMA_DMACR_FRAME_COUNT_MAX) { |
1128 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | 1128 | dmacr |= cfg->coalesc << XILINX_DMA_DMACR_FRAME_COUNT_SHIFT; |
1129 | chan->config.coalesc = cfg->coalesc; | 1129 | chan->config.coalesc = cfg->coalesc; |
1130 | } | 1130 | } |
1131 | 1131 | ||
1132 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | 1132 | if (cfg->delay <= XILINX_DMA_DMACR_DELAY_MAX) { |
1133 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | 1133 | dmacr |= cfg->delay << XILINX_DMA_DMACR_DELAY_SHIFT; |
1134 | chan->config.delay = cfg->delay; | 1134 | chan->config.delay = cfg->delay; |
1135 | } | 1135 | } |
1136 | 1136 | ||
1137 | /* FSync Source selection */ | 1137 | /* FSync Source selection */ |
1138 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | 1138 | dmacr &= ~XILINX_DMA_DMACR_FSYNCSRC_MASK; |
1139 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | 1139 | dmacr |= cfg->ext_fsync << XILINX_DMA_DMACR_FSYNCSRC_SHIFT; |
1140 | 1140 | ||
1141 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | 1141 | dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, dmacr); |
1142 | 1142 | ||
1143 | return 0; | 1143 | return 0; |
1144 | } | 1144 | } |
@@ -1149,14 +1149,14 @@ EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | |||
1149 | */ | 1149 | */ |
1150 | 1150 | ||
1151 | /** | 1151 | /** |
1152 | * xilinx_vdma_chan_remove - Per Channel remove function | 1152 | * xilinx_dma_chan_remove - Per Channel remove function |
1153 | * @chan: Driver specific VDMA channel | 1153 | * @chan: Driver specific DMA channel |
1154 | */ | 1154 | */ |
1155 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | 1155 | static void xilinx_dma_chan_remove(struct xilinx_dma_chan *chan) |
1156 | { | 1156 | { |
1157 | /* Disable all interrupts */ | 1157 | /* Disable all interrupts */ |
1158 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | 1158 | dma_ctrl_clr(chan, XILINX_DMA_REG_DMACR, |
1159 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | 1159 | XILINX_DMA_DMAXR_ALL_IRQ_MASK); |
1160 | 1160 | ||
1161 | if (chan->irq > 0) | 1161 | if (chan->irq > 0) |
1162 | free_irq(chan->irq, chan); | 1162 | free_irq(chan->irq, chan); |
@@ -1167,7 +1167,7 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |||
1167 | } | 1167 | } |
1168 | 1168 | ||
1169 | /** | 1169 | /** |
1170 | * xilinx_vdma_chan_probe - Per Channel Probing | 1170 | * xilinx_dma_chan_probe - Per Channel Probing |
1171 | * It get channel features from the device tree entry and | 1171 | * It get channel features from the device tree entry and |
1172 | * initialize special channel handling routines | 1172 | * initialize special channel handling routines |
1173 | * | 1173 | * |
@@ -1176,10 +1176,10 @@ static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | |||
1176 | * | 1176 | * |
1177 | * Return: '0' on success and failure value on error | 1177 | * Return: '0' on success and failure value on error |
1178 | */ | 1178 | */ |
1179 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | 1179 | static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, |
1180 | struct device_node *node) | 1180 | struct device_node *node) |
1181 | { | 1181 | { |
1182 | struct xilinx_vdma_chan *chan; | 1182 | struct xilinx_dma_chan *chan; |
1183 | bool has_dre = false; | 1183 | bool has_dre = false; |
1184 | u32 value, width; | 1184 | u32 value, width; |
1185 | int err; | 1185 | int err; |
@@ -1223,22 +1223,22 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1223 | chan->direction = DMA_MEM_TO_DEV; | 1223 | chan->direction = DMA_MEM_TO_DEV; |
1224 | chan->id = 0; | 1224 | chan->id = 0; |
1225 | 1225 | ||
1226 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | 1226 | chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; |
1227 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | 1227 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; |
1228 | 1228 | ||
1229 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | 1229 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
1230 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | 1230 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_MM2S) |
1231 | chan->flush_on_fsync = true; | 1231 | chan->flush_on_fsync = true; |
1232 | } else if (of_device_is_compatible(node, | 1232 | } else if (of_device_is_compatible(node, |
1233 | "xlnx,axi-vdma-s2mm-channel")) { | 1233 | "xlnx,axi-vdma-s2mm-channel")) { |
1234 | chan->direction = DMA_DEV_TO_MEM; | 1234 | chan->direction = DMA_DEV_TO_MEM; |
1235 | chan->id = 1; | 1235 | chan->id = 1; |
1236 | 1236 | ||
1237 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | 1237 | chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; |
1238 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | 1238 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; |
1239 | 1239 | ||
1240 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | 1240 | if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || |
1241 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | 1241 | xdev->flush_on_fsync == XILINX_DMA_FLUSH_S2MM) |
1242 | chan->flush_on_fsync = true; | 1242 | chan->flush_on_fsync = true; |
1243 | } else { | 1243 | } else { |
1244 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | 1244 | dev_err(xdev->dev, "Invalid channel compatible node\n"); |
@@ -1247,15 +1247,15 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1247 | 1247 | ||
1248 | /* Request the interrupt */ | 1248 | /* Request the interrupt */ |
1249 | chan->irq = irq_of_parse_and_map(node, 0); | 1249 | chan->irq = irq_of_parse_and_map(node, 0); |
1250 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | 1250 | err = request_irq(chan->irq, xilinx_dma_irq_handler, IRQF_SHARED, |
1251 | "xilinx-vdma-controller", chan); | 1251 | "xilinx-dma-controller", chan); |
1252 | if (err) { | 1252 | if (err) { |
1253 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | 1253 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); |
1254 | return err; | 1254 | return err; |
1255 | } | 1255 | } |
1256 | 1256 | ||
1257 | /* Initialize the tasklet */ | 1257 | /* Initialize the tasklet */ |
1258 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | 1258 | tasklet_init(&chan->tasklet, xilinx_dma_do_tasklet, |
1259 | (unsigned long)chan); | 1259 | (unsigned long)chan); |
1260 | 1260 | ||
1261 | /* | 1261 | /* |
@@ -1268,7 +1268,7 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1268 | xdev->chan[chan->id] = chan; | 1268 | xdev->chan[chan->id] = chan; |
1269 | 1269 | ||
1270 | /* Reset the channel */ | 1270 | /* Reset the channel */ |
1271 | err = xilinx_vdma_chan_reset(chan); | 1271 | err = xilinx_dma_chan_reset(chan); |
1272 | if (err < 0) { | 1272 | if (err < 0) { |
1273 | dev_err(xdev->dev, "Reset channel failed\n"); | 1273 | dev_err(xdev->dev, "Reset channel failed\n"); |
1274 | return err; | 1274 | return err; |
@@ -1287,25 +1287,25 @@ static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | |||
1287 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | 1287 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, |
1288 | struct of_dma *ofdma) | 1288 | struct of_dma *ofdma) |
1289 | { | 1289 | { |
1290 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | 1290 | struct xilinx_dma_device *xdev = ofdma->of_dma_data; |
1291 | int chan_id = dma_spec->args[0]; | 1291 | int chan_id = dma_spec->args[0]; |
1292 | 1292 | ||
1293 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) | 1293 | if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE) |
1294 | return NULL; | 1294 | return NULL; |
1295 | 1295 | ||
1296 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | 1296 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); |
1297 | } | 1297 | } |
1298 | 1298 | ||
1299 | /** | 1299 | /** |
1300 | * xilinx_vdma_probe - Driver probe function | 1300 | * xilinx_dma_probe - Driver probe function |
1301 | * @pdev: Pointer to the platform_device structure | 1301 | * @pdev: Pointer to the platform_device structure |
1302 | * | 1302 | * |
1303 | * Return: '0' on success and failure value on error | 1303 | * Return: '0' on success and failure value on error |
1304 | */ | 1304 | */ |
1305 | static int xilinx_vdma_probe(struct platform_device *pdev) | 1305 | static int xilinx_dma_probe(struct platform_device *pdev) |
1306 | { | 1306 | { |
1307 | struct device_node *node = pdev->dev.of_node; | 1307 | struct device_node *node = pdev->dev.of_node; |
1308 | struct xilinx_vdma_device *xdev; | 1308 | struct xilinx_dma_device *xdev; |
1309 | struct device_node *child; | 1309 | struct device_node *child; |
1310 | struct resource *io; | 1310 | struct resource *io; |
1311 | u32 num_frames, addr_width; | 1311 | u32 num_frames, addr_width; |
@@ -1358,25 +1358,25 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1358 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | 1358 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); |
1359 | 1359 | ||
1360 | xdev->common.device_alloc_chan_resources = | 1360 | xdev->common.device_alloc_chan_resources = |
1361 | xilinx_vdma_alloc_chan_resources; | 1361 | xilinx_dma_alloc_chan_resources; |
1362 | xdev->common.device_free_chan_resources = | 1362 | xdev->common.device_free_chan_resources = |
1363 | xilinx_vdma_free_chan_resources; | 1363 | xilinx_dma_free_chan_resources; |
1364 | xdev->common.device_prep_interleaved_dma = | 1364 | xdev->common.device_prep_interleaved_dma = |
1365 | xilinx_vdma_dma_prep_interleaved; | 1365 | xilinx_vdma_dma_prep_interleaved; |
1366 | xdev->common.device_terminate_all = xilinx_vdma_terminate_all; | 1366 | xdev->common.device_terminate_all = xilinx_dma_terminate_all; |
1367 | xdev->common.device_tx_status = xilinx_vdma_tx_status; | 1367 | xdev->common.device_tx_status = xilinx_dma_tx_status; |
1368 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | 1368 | xdev->common.device_issue_pending = xilinx_dma_issue_pending; |
1369 | 1369 | ||
1370 | platform_set_drvdata(pdev, xdev); | 1370 | platform_set_drvdata(pdev, xdev); |
1371 | 1371 | ||
1372 | /* Initialize the channels */ | 1372 | /* Initialize the channels */ |
1373 | for_each_child_of_node(node, child) { | 1373 | for_each_child_of_node(node, child) { |
1374 | err = xilinx_vdma_chan_probe(xdev, child); | 1374 | err = xilinx_dma_chan_probe(xdev, child); |
1375 | if (err < 0) | 1375 | if (err < 0) |
1376 | goto error; | 1376 | goto error; |
1377 | } | 1377 | } |
1378 | 1378 | ||
1379 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 1379 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1380 | if (xdev->chan[i]) | 1380 | if (xdev->chan[i]) |
1381 | xdev->chan[i]->num_frms = num_frames; | 1381 | xdev->chan[i]->num_frms = num_frames; |
1382 | 1382 | ||
@@ -1396,48 +1396,48 @@ static int xilinx_vdma_probe(struct platform_device *pdev) | |||
1396 | return 0; | 1396 | return 0; |
1397 | 1397 | ||
1398 | error: | 1398 | error: |
1399 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 1399 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1400 | if (xdev->chan[i]) | 1400 | if (xdev->chan[i]) |
1401 | xilinx_vdma_chan_remove(xdev->chan[i]); | 1401 | xilinx_dma_chan_remove(xdev->chan[i]); |
1402 | 1402 | ||
1403 | return err; | 1403 | return err; |
1404 | } | 1404 | } |
1405 | 1405 | ||
1406 | /** | 1406 | /** |
1407 | * xilinx_vdma_remove - Driver remove function | 1407 | * xilinx_dma_remove - Driver remove function |
1408 | * @pdev: Pointer to the platform_device structure | 1408 | * @pdev: Pointer to the platform_device structure |
1409 | * | 1409 | * |
1410 | * Return: Always '0' | 1410 | * Return: Always '0' |
1411 | */ | 1411 | */ |
1412 | static int xilinx_vdma_remove(struct platform_device *pdev) | 1412 | static int xilinx_dma_remove(struct platform_device *pdev) |
1413 | { | 1413 | { |
1414 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | 1414 | struct xilinx_dma_device *xdev = platform_get_drvdata(pdev); |
1415 | int i; | 1415 | int i; |
1416 | 1416 | ||
1417 | of_dma_controller_free(pdev->dev.of_node); | 1417 | of_dma_controller_free(pdev->dev.of_node); |
1418 | 1418 | ||
1419 | dma_async_device_unregister(&xdev->common); | 1419 | dma_async_device_unregister(&xdev->common); |
1420 | 1420 | ||
1421 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | 1421 | for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) |
1422 | if (xdev->chan[i]) | 1422 | if (xdev->chan[i]) |
1423 | xilinx_vdma_chan_remove(xdev->chan[i]); | 1423 | xilinx_dma_chan_remove(xdev->chan[i]); |
1424 | 1424 | ||
1425 | return 0; | 1425 | return 0; |
1426 | } | 1426 | } |
1427 | 1427 | ||
1428 | static const struct of_device_id xilinx_vdma_of_ids[] = { | 1428 | static const struct of_device_id xilinx_dma_of_ids[] = { |
1429 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | 1429 | { .compatible = "xlnx,axi-vdma-1.00.a",}, |
1430 | {} | 1430 | {} |
1431 | }; | 1431 | }; |
1432 | MODULE_DEVICE_TABLE(of, xilinx_vdma_of_ids); | 1432 | MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); |
1433 | 1433 | ||
1434 | static struct platform_driver xilinx_vdma_driver = { | 1434 | static struct platform_driver xilinx_vdma_driver = { |
1435 | .driver = { | 1435 | .driver = { |
1436 | .name = "xilinx-vdma", | 1436 | .name = "xilinx-vdma", |
1437 | .of_match_table = xilinx_vdma_of_ids, | 1437 | .of_match_table = xilinx_dma_of_ids, |
1438 | }, | 1438 | }, |
1439 | .probe = xilinx_vdma_probe, | 1439 | .probe = xilinx_dma_probe, |
1440 | .remove = xilinx_vdma_remove, | 1440 | .remove = xilinx_dma_remove, |
1441 | }; | 1441 | }; |
1442 | 1442 | ||
1443 | module_platform_driver(xilinx_vdma_driver); | 1443 | module_platform_driver(xilinx_vdma_driver); |