diff options
Diffstat (limited to 'drivers/dma/xilinx/xilinx_vdma.c')
-rw-r--r-- | drivers/dma/xilinx/xilinx_vdma.c | 1379 |
1 files changed, 1379 insertions, 0 deletions
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c new file mode 100644 index 000000000000..42a13e8d4607 --- /dev/null +++ b/drivers/dma/xilinx/xilinx_vdma.c | |||
@@ -0,0 +1,1379 @@ | |||
1 | /* | ||
2 | * DMA driver for Xilinx Video DMA Engine | ||
3 | * | ||
4 | * Copyright (C) 2010-2014 Xilinx, Inc. All rights reserved. | ||
5 | * | ||
6 | * Based on the Freescale DMA driver. | ||
7 | * | ||
8 | * Description: | ||
9 | * The AXI Video Direct Memory Access (AXI VDMA) core is a soft Xilinx IP | ||
10 | * core that provides high-bandwidth direct memory access between memory | ||
11 | * and AXI4-Stream type video target peripherals. The core provides efficient | ||
12 | * two dimensional DMA operations with independent asynchronous read (S2MM) | ||
13 | * and write (MM2S) channel operation. It can be configured to have either | ||
14 | * one channel or two channels. If configured as two channels, one is to | ||
15 | * transmit to the video device (MM2S) and another is to receive from the | ||
16 | * video device (S2MM). Initialization, status, interrupt and management | ||
17 | * registers are accessed through an AXI4-Lite slave interface. | ||
18 | * | ||
19 | * This program is free software: you can redistribute it and/or modify | ||
20 | * it under the terms of the GNU General Public License as published by | ||
21 | * the Free Software Foundation, either version 2 of the License, or | ||
22 | * (at your option) any later version. | ||
23 | */ | ||
24 | |||
25 | #include <linux/amba/xilinx_dma.h> | ||
26 | #include <linux/bitops.h> | ||
27 | #include <linux/dmapool.h> | ||
28 | #include <linux/init.h> | ||
29 | #include <linux/interrupt.h> | ||
30 | #include <linux/io.h> | ||
31 | #include <linux/module.h> | ||
32 | #include <linux/of_address.h> | ||
33 | #include <linux/of_dma.h> | ||
34 | #include <linux/of_platform.h> | ||
35 | #include <linux/of_irq.h> | ||
36 | #include <linux/slab.h> | ||
37 | |||
38 | #include "../dmaengine.h" | ||
39 | |||
40 | /* Register/Descriptor Offsets */ | ||
41 | #define XILINX_VDMA_MM2S_CTRL_OFFSET 0x0000 | ||
42 | #define XILINX_VDMA_S2MM_CTRL_OFFSET 0x0030 | ||
43 | #define XILINX_VDMA_MM2S_DESC_OFFSET 0x0050 | ||
44 | #define XILINX_VDMA_S2MM_DESC_OFFSET 0x00a0 | ||
45 | |||
46 | /* Control Registers */ | ||
47 | #define XILINX_VDMA_REG_DMACR 0x0000 | ||
48 | #define XILINX_VDMA_DMACR_DELAY_MAX 0xff | ||
49 | #define XILINX_VDMA_DMACR_DELAY_SHIFT 24 | ||
50 | #define XILINX_VDMA_DMACR_FRAME_COUNT_MAX 0xff | ||
51 | #define XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT 16 | ||
52 | #define XILINX_VDMA_DMACR_ERR_IRQ BIT(14) | ||
53 | #define XILINX_VDMA_DMACR_DLY_CNT_IRQ BIT(13) | ||
54 | #define XILINX_VDMA_DMACR_FRM_CNT_IRQ BIT(12) | ||
55 | #define XILINX_VDMA_DMACR_MASTER_SHIFT 8 | ||
56 | #define XILINX_VDMA_DMACR_FSYNCSRC_SHIFT 5 | ||
57 | #define XILINX_VDMA_DMACR_FRAMECNT_EN BIT(4) | ||
58 | #define XILINX_VDMA_DMACR_GENLOCK_EN BIT(3) | ||
59 | #define XILINX_VDMA_DMACR_RESET BIT(2) | ||
60 | #define XILINX_VDMA_DMACR_CIRC_EN BIT(1) | ||
61 | #define XILINX_VDMA_DMACR_RUNSTOP BIT(0) | ||
62 | #define XILINX_VDMA_DMACR_FSYNCSRC_MASK GENMASK(6, 5) | ||
63 | |||
64 | #define XILINX_VDMA_REG_DMASR 0x0004 | ||
65 | #define XILINX_VDMA_DMASR_EOL_LATE_ERR BIT(15) | ||
66 | #define XILINX_VDMA_DMASR_ERR_IRQ BIT(14) | ||
67 | #define XILINX_VDMA_DMASR_DLY_CNT_IRQ BIT(13) | ||
68 | #define XILINX_VDMA_DMASR_FRM_CNT_IRQ BIT(12) | ||
69 | #define XILINX_VDMA_DMASR_SOF_LATE_ERR BIT(11) | ||
70 | #define XILINX_VDMA_DMASR_SG_DEC_ERR BIT(10) | ||
71 | #define XILINX_VDMA_DMASR_SG_SLV_ERR BIT(9) | ||
72 | #define XILINX_VDMA_DMASR_EOF_EARLY_ERR BIT(8) | ||
73 | #define XILINX_VDMA_DMASR_SOF_EARLY_ERR BIT(7) | ||
74 | #define XILINX_VDMA_DMASR_DMA_DEC_ERR BIT(6) | ||
75 | #define XILINX_VDMA_DMASR_DMA_SLAVE_ERR BIT(5) | ||
76 | #define XILINX_VDMA_DMASR_DMA_INT_ERR BIT(4) | ||
77 | #define XILINX_VDMA_DMASR_IDLE BIT(1) | ||
78 | #define XILINX_VDMA_DMASR_HALTED BIT(0) | ||
79 | #define XILINX_VDMA_DMASR_DELAY_MASK GENMASK(31, 24) | ||
80 | #define XILINX_VDMA_DMASR_FRAME_COUNT_MASK GENMASK(23, 16) | ||
81 | |||
82 | #define XILINX_VDMA_REG_CURDESC 0x0008 | ||
83 | #define XILINX_VDMA_REG_TAILDESC 0x0010 | ||
84 | #define XILINX_VDMA_REG_REG_INDEX 0x0014 | ||
85 | #define XILINX_VDMA_REG_FRMSTORE 0x0018 | ||
86 | #define XILINX_VDMA_REG_THRESHOLD 0x001c | ||
87 | #define XILINX_VDMA_REG_FRMPTR_STS 0x0024 | ||
88 | #define XILINX_VDMA_REG_PARK_PTR 0x0028 | ||
89 | #define XILINX_VDMA_PARK_PTR_WR_REF_SHIFT 8 | ||
90 | #define XILINX_VDMA_PARK_PTR_RD_REF_SHIFT 0 | ||
91 | #define XILINX_VDMA_REG_VDMA_VERSION 0x002c | ||
92 | |||
93 | /* Register Direct Mode Registers */ | ||
94 | #define XILINX_VDMA_REG_VSIZE 0x0000 | ||
95 | #define XILINX_VDMA_REG_HSIZE 0x0004 | ||
96 | |||
97 | #define XILINX_VDMA_REG_FRMDLY_STRIDE 0x0008 | ||
98 | #define XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT 24 | ||
99 | #define XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT 0 | ||
100 | |||
101 | #define XILINX_VDMA_REG_START_ADDRESS(n) (0x000c + 4 * (n)) | ||
102 | |||
103 | /* HW specific definitions */ | ||
104 | #define XILINX_VDMA_MAX_CHANS_PER_DEVICE 0x2 | ||
105 | |||
106 | #define XILINX_VDMA_DMAXR_ALL_IRQ_MASK \ | ||
107 | (XILINX_VDMA_DMASR_FRM_CNT_IRQ | \ | ||
108 | XILINX_VDMA_DMASR_DLY_CNT_IRQ | \ | ||
109 | XILINX_VDMA_DMASR_ERR_IRQ) | ||
110 | |||
111 | #define XILINX_VDMA_DMASR_ALL_ERR_MASK \ | ||
112 | (XILINX_VDMA_DMASR_EOL_LATE_ERR | \ | ||
113 | XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | ||
114 | XILINX_VDMA_DMASR_SG_DEC_ERR | \ | ||
115 | XILINX_VDMA_DMASR_SG_SLV_ERR | \ | ||
116 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | ||
117 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | ||
118 | XILINX_VDMA_DMASR_DMA_DEC_ERR | \ | ||
119 | XILINX_VDMA_DMASR_DMA_SLAVE_ERR | \ | ||
120 | XILINX_VDMA_DMASR_DMA_INT_ERR) | ||
121 | |||
122 | /* | ||
123 | * Recoverable errors are DMA Internal error, SOF Early, EOF Early | ||
124 | * and SOF Late. They are only recoverable when C_FLUSH_ON_FSYNC | ||
125 | * is enabled in the h/w system. | ||
126 | */ | ||
127 | #define XILINX_VDMA_DMASR_ERR_RECOVER_MASK \ | ||
128 | (XILINX_VDMA_DMASR_SOF_LATE_ERR | \ | ||
129 | XILINX_VDMA_DMASR_EOF_EARLY_ERR | \ | ||
130 | XILINX_VDMA_DMASR_SOF_EARLY_ERR | \ | ||
131 | XILINX_VDMA_DMASR_DMA_INT_ERR) | ||
132 | |||
133 | /* Axi VDMA Flush on Fsync bits */ | ||
134 | #define XILINX_VDMA_FLUSH_S2MM 3 | ||
135 | #define XILINX_VDMA_FLUSH_MM2S 2 | ||
136 | #define XILINX_VDMA_FLUSH_BOTH 1 | ||
137 | |||
138 | /* Delay loop counter to prevent hardware failure */ | ||
139 | #define XILINX_VDMA_LOOP_COUNT 1000000 | ||
140 | |||
141 | /** | ||
142 | * struct xilinx_vdma_desc_hw - Hardware Descriptor | ||
143 | * @next_desc: Next Descriptor Pointer @0x00 | ||
144 | * @pad1: Reserved @0x04 | ||
145 | * @buf_addr: Buffer address @0x08 | ||
146 | * @pad2: Reserved @0x0C | ||
147 | * @vsize: Vertical Size @0x10 | ||
148 | * @hsize: Horizontal Size @0x14 | ||
149 | * @stride: Number of bytes between the first | ||
150 | * pixels of each horizontal line @0x18 | ||
151 | */ | ||
152 | struct xilinx_vdma_desc_hw { | ||
153 | u32 next_desc; | ||
154 | u32 pad1; | ||
155 | u32 buf_addr; | ||
156 | u32 pad2; | ||
157 | u32 vsize; | ||
158 | u32 hsize; | ||
159 | u32 stride; | ||
160 | } __aligned(64); | ||
161 | |||
162 | /** | ||
163 | * struct xilinx_vdma_tx_segment - Descriptor segment | ||
164 | * @hw: Hardware descriptor | ||
165 | * @node: Node in the descriptor segments list | ||
166 | * @phys: Physical address of segment | ||
167 | */ | ||
168 | struct xilinx_vdma_tx_segment { | ||
169 | struct xilinx_vdma_desc_hw hw; | ||
170 | struct list_head node; | ||
171 | dma_addr_t phys; | ||
172 | } __aligned(64); | ||
173 | |||
174 | /** | ||
175 | * struct xilinx_vdma_tx_descriptor - Per Transaction structure | ||
176 | * @async_tx: Async transaction descriptor | ||
177 | * @segments: TX segments list | ||
178 | * @node: Node in the channel descriptors list | ||
179 | */ | ||
180 | struct xilinx_vdma_tx_descriptor { | ||
181 | struct dma_async_tx_descriptor async_tx; | ||
182 | struct list_head segments; | ||
183 | struct list_head node; | ||
184 | }; | ||
185 | |||
186 | /** | ||
187 | * struct xilinx_vdma_chan - Driver specific VDMA channel structure | ||
188 | * @xdev: Driver specific device structure | ||
189 | * @ctrl_offset: Control registers offset | ||
190 | * @desc_offset: TX descriptor registers offset | ||
191 | * @lock: Descriptor operation lock | ||
192 | * @pending_list: Descriptors waiting | ||
193 | * @active_desc: Active descriptor | ||
194 | * @allocated_desc: Allocated descriptor | ||
195 | * @done_list: Complete descriptors | ||
196 | * @common: DMA common channel | ||
197 | * @desc_pool: Descriptors pool | ||
198 | * @dev: The dma device | ||
199 | * @irq: Channel IRQ | ||
200 | * @id: Channel ID | ||
201 | * @direction: Transfer direction | ||
202 | * @num_frms: Number of frames | ||
203 | * @has_sg: Support scatter transfers | ||
204 | * @genlock: Support genlock mode | ||
205 | * @err: Channel has errors | ||
206 | * @tasklet: Cleanup work after irq | ||
207 | * @config: Device configuration info | ||
208 | * @flush_on_fsync: Flush on Frame sync | ||
209 | */ | ||
210 | struct xilinx_vdma_chan { | ||
211 | struct xilinx_vdma_device *xdev; | ||
212 | u32 ctrl_offset; | ||
213 | u32 desc_offset; | ||
214 | spinlock_t lock; | ||
215 | struct list_head pending_list; | ||
216 | struct xilinx_vdma_tx_descriptor *active_desc; | ||
217 | struct xilinx_vdma_tx_descriptor *allocated_desc; | ||
218 | struct list_head done_list; | ||
219 | struct dma_chan common; | ||
220 | struct dma_pool *desc_pool; | ||
221 | struct device *dev; | ||
222 | int irq; | ||
223 | int id; | ||
224 | enum dma_transfer_direction direction; | ||
225 | int num_frms; | ||
226 | bool has_sg; | ||
227 | bool genlock; | ||
228 | bool err; | ||
229 | struct tasklet_struct tasklet; | ||
230 | struct xilinx_vdma_config config; | ||
231 | bool flush_on_fsync; | ||
232 | }; | ||
233 | |||
234 | /** | ||
235 | * struct xilinx_vdma_device - VDMA device structure | ||
236 | * @regs: I/O mapped base address | ||
237 | * @dev: Device Structure | ||
238 | * @common: DMA device structure | ||
239 | * @chan: Driver specific VDMA channel | ||
240 | * @has_sg: Specifies whether Scatter-Gather is present or not | ||
241 | * @flush_on_fsync: Flush on frame sync | ||
242 | */ | ||
243 | struct xilinx_vdma_device { | ||
244 | void __iomem *regs; | ||
245 | struct device *dev; | ||
246 | struct dma_device common; | ||
247 | struct xilinx_vdma_chan *chan[XILINX_VDMA_MAX_CHANS_PER_DEVICE]; | ||
248 | bool has_sg; | ||
249 | u32 flush_on_fsync; | ||
250 | }; | ||
251 | |||
252 | /* Macros */ | ||
253 | #define to_xilinx_chan(chan) \ | ||
254 | container_of(chan, struct xilinx_vdma_chan, common) | ||
255 | #define to_vdma_tx_descriptor(tx) \ | ||
256 | container_of(tx, struct xilinx_vdma_tx_descriptor, async_tx) | ||
257 | |||
258 | /* IO accessors */ | ||
259 | static inline u32 vdma_read(struct xilinx_vdma_chan *chan, u32 reg) | ||
260 | { | ||
261 | return ioread32(chan->xdev->regs + reg); | ||
262 | } | ||
263 | |||
264 | static inline void vdma_write(struct xilinx_vdma_chan *chan, u32 reg, u32 value) | ||
265 | { | ||
266 | iowrite32(value, chan->xdev->regs + reg); | ||
267 | } | ||
268 | |||
269 | static inline void vdma_desc_write(struct xilinx_vdma_chan *chan, u32 reg, | ||
270 | u32 value) | ||
271 | { | ||
272 | vdma_write(chan, chan->desc_offset + reg, value); | ||
273 | } | ||
274 | |||
275 | static inline u32 vdma_ctrl_read(struct xilinx_vdma_chan *chan, u32 reg) | ||
276 | { | ||
277 | return vdma_read(chan, chan->ctrl_offset + reg); | ||
278 | } | ||
279 | |||
280 | static inline void vdma_ctrl_write(struct xilinx_vdma_chan *chan, u32 reg, | ||
281 | u32 value) | ||
282 | { | ||
283 | vdma_write(chan, chan->ctrl_offset + reg, value); | ||
284 | } | ||
285 | |||
286 | static inline void vdma_ctrl_clr(struct xilinx_vdma_chan *chan, u32 reg, | ||
287 | u32 clr) | ||
288 | { | ||
289 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) & ~clr); | ||
290 | } | ||
291 | |||
292 | static inline void vdma_ctrl_set(struct xilinx_vdma_chan *chan, u32 reg, | ||
293 | u32 set) | ||
294 | { | ||
295 | vdma_ctrl_write(chan, reg, vdma_ctrl_read(chan, reg) | set); | ||
296 | } | ||
297 | |||
298 | /* ----------------------------------------------------------------------------- | ||
299 | * Descriptors and segments alloc and free | ||
300 | */ | ||
301 | |||
302 | /** | ||
303 | * xilinx_vdma_alloc_tx_segment - Allocate transaction segment | ||
304 | * @chan: Driver specific VDMA channel | ||
305 | * | ||
306 | * Return: The allocated segment on success and NULL on failure. | ||
307 | */ | ||
308 | static struct xilinx_vdma_tx_segment * | ||
309 | xilinx_vdma_alloc_tx_segment(struct xilinx_vdma_chan *chan) | ||
310 | { | ||
311 | struct xilinx_vdma_tx_segment *segment; | ||
312 | dma_addr_t phys; | ||
313 | |||
314 | segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); | ||
315 | if (!segment) | ||
316 | return NULL; | ||
317 | |||
318 | memset(segment, 0, sizeof(*segment)); | ||
319 | segment->phys = phys; | ||
320 | |||
321 | return segment; | ||
322 | } | ||
323 | |||
324 | /** | ||
325 | * xilinx_vdma_free_tx_segment - Free transaction segment | ||
326 | * @chan: Driver specific VDMA channel | ||
327 | * @segment: VDMA transaction segment | ||
328 | */ | ||
329 | static void xilinx_vdma_free_tx_segment(struct xilinx_vdma_chan *chan, | ||
330 | struct xilinx_vdma_tx_segment *segment) | ||
331 | { | ||
332 | dma_pool_free(chan->desc_pool, segment, segment->phys); | ||
333 | } | ||
334 | |||
335 | /** | ||
336 | * xilinx_vdma_tx_descriptor - Allocate transaction descriptor | ||
337 | * @chan: Driver specific VDMA channel | ||
338 | * | ||
339 | * Return: The allocated descriptor on success and NULL on failure. | ||
340 | */ | ||
341 | static struct xilinx_vdma_tx_descriptor * | ||
342 | xilinx_vdma_alloc_tx_descriptor(struct xilinx_vdma_chan *chan) | ||
343 | { | ||
344 | struct xilinx_vdma_tx_descriptor *desc; | ||
345 | unsigned long flags; | ||
346 | |||
347 | if (chan->allocated_desc) | ||
348 | return chan->allocated_desc; | ||
349 | |||
350 | desc = kzalloc(sizeof(*desc), GFP_KERNEL); | ||
351 | if (!desc) | ||
352 | return NULL; | ||
353 | |||
354 | spin_lock_irqsave(&chan->lock, flags); | ||
355 | chan->allocated_desc = desc; | ||
356 | spin_unlock_irqrestore(&chan->lock, flags); | ||
357 | |||
358 | INIT_LIST_HEAD(&desc->segments); | ||
359 | |||
360 | return desc; | ||
361 | } | ||
362 | |||
363 | /** | ||
364 | * xilinx_vdma_free_tx_descriptor - Free transaction descriptor | ||
365 | * @chan: Driver specific VDMA channel | ||
366 | * @desc: VDMA transaction descriptor | ||
367 | */ | ||
368 | static void | ||
369 | xilinx_vdma_free_tx_descriptor(struct xilinx_vdma_chan *chan, | ||
370 | struct xilinx_vdma_tx_descriptor *desc) | ||
371 | { | ||
372 | struct xilinx_vdma_tx_segment *segment, *next; | ||
373 | |||
374 | if (!desc) | ||
375 | return; | ||
376 | |||
377 | list_for_each_entry_safe(segment, next, &desc->segments, node) { | ||
378 | list_del(&segment->node); | ||
379 | xilinx_vdma_free_tx_segment(chan, segment); | ||
380 | } | ||
381 | |||
382 | kfree(desc); | ||
383 | } | ||
384 | |||
385 | /* Required functions */ | ||
386 | |||
387 | /** | ||
388 | * xilinx_vdma_free_desc_list - Free descriptors list | ||
389 | * @chan: Driver specific VDMA channel | ||
390 | * @list: List to parse and delete the descriptor | ||
391 | */ | ||
392 | static void xilinx_vdma_free_desc_list(struct xilinx_vdma_chan *chan, | ||
393 | struct list_head *list) | ||
394 | { | ||
395 | struct xilinx_vdma_tx_descriptor *desc, *next; | ||
396 | |||
397 | list_for_each_entry_safe(desc, next, list, node) { | ||
398 | list_del(&desc->node); | ||
399 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
400 | } | ||
401 | } | ||
402 | |||
403 | /** | ||
404 | * xilinx_vdma_free_descriptors - Free channel descriptors | ||
405 | * @chan: Driver specific VDMA channel | ||
406 | */ | ||
407 | static void xilinx_vdma_free_descriptors(struct xilinx_vdma_chan *chan) | ||
408 | { | ||
409 | unsigned long flags; | ||
410 | |||
411 | spin_lock_irqsave(&chan->lock, flags); | ||
412 | |||
413 | xilinx_vdma_free_desc_list(chan, &chan->pending_list); | ||
414 | xilinx_vdma_free_desc_list(chan, &chan->done_list); | ||
415 | |||
416 | xilinx_vdma_free_tx_descriptor(chan, chan->active_desc); | ||
417 | chan->active_desc = NULL; | ||
418 | |||
419 | spin_unlock_irqrestore(&chan->lock, flags); | ||
420 | } | ||
421 | |||
422 | /** | ||
423 | * xilinx_vdma_free_chan_resources - Free channel resources | ||
424 | * @dchan: DMA channel | ||
425 | */ | ||
426 | static void xilinx_vdma_free_chan_resources(struct dma_chan *dchan) | ||
427 | { | ||
428 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
429 | |||
430 | dev_dbg(chan->dev, "Free all channel resources.\n"); | ||
431 | |||
432 | xilinx_vdma_free_descriptors(chan); | ||
433 | dma_pool_destroy(chan->desc_pool); | ||
434 | chan->desc_pool = NULL; | ||
435 | } | ||
436 | |||
437 | /** | ||
438 | * xilinx_vdma_chan_desc_cleanup - Clean channel descriptors | ||
439 | * @chan: Driver specific VDMA channel | ||
440 | */ | ||
441 | static void xilinx_vdma_chan_desc_cleanup(struct xilinx_vdma_chan *chan) | ||
442 | { | ||
443 | struct xilinx_vdma_tx_descriptor *desc, *next; | ||
444 | unsigned long flags; | ||
445 | |||
446 | spin_lock_irqsave(&chan->lock, flags); | ||
447 | |||
448 | list_for_each_entry_safe(desc, next, &chan->done_list, node) { | ||
449 | dma_async_tx_callback callback; | ||
450 | void *callback_param; | ||
451 | |||
452 | /* Remove from the list of running transactions */ | ||
453 | list_del(&desc->node); | ||
454 | |||
455 | /* Run the link descriptor callback function */ | ||
456 | callback = desc->async_tx.callback; | ||
457 | callback_param = desc->async_tx.callback_param; | ||
458 | if (callback) { | ||
459 | spin_unlock_irqrestore(&chan->lock, flags); | ||
460 | callback(callback_param); | ||
461 | spin_lock_irqsave(&chan->lock, flags); | ||
462 | } | ||
463 | |||
464 | /* Run any dependencies, then free the descriptor */ | ||
465 | dma_run_dependencies(&desc->async_tx); | ||
466 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
467 | } | ||
468 | |||
469 | spin_unlock_irqrestore(&chan->lock, flags); | ||
470 | } | ||
471 | |||
472 | /** | ||
473 | * xilinx_vdma_do_tasklet - Schedule completion tasklet | ||
474 | * @data: Pointer to the Xilinx VDMA channel structure | ||
475 | */ | ||
476 | static void xilinx_vdma_do_tasklet(unsigned long data) | ||
477 | { | ||
478 | struct xilinx_vdma_chan *chan = (struct xilinx_vdma_chan *)data; | ||
479 | |||
480 | xilinx_vdma_chan_desc_cleanup(chan); | ||
481 | } | ||
482 | |||
483 | /** | ||
484 | * xilinx_vdma_alloc_chan_resources - Allocate channel resources | ||
485 | * @dchan: DMA channel | ||
486 | * | ||
487 | * Return: '0' on success and failure value on error | ||
488 | */ | ||
489 | static int xilinx_vdma_alloc_chan_resources(struct dma_chan *dchan) | ||
490 | { | ||
491 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
492 | |||
493 | /* Has this channel already been allocated? */ | ||
494 | if (chan->desc_pool) | ||
495 | return 0; | ||
496 | |||
497 | /* | ||
498 | * We need the descriptor to be aligned to 64bytes | ||
499 | * for meeting Xilinx VDMA specification requirement. | ||
500 | */ | ||
501 | chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", | ||
502 | chan->dev, | ||
503 | sizeof(struct xilinx_vdma_tx_segment), | ||
504 | __alignof__(struct xilinx_vdma_tx_segment), 0); | ||
505 | if (!chan->desc_pool) { | ||
506 | dev_err(chan->dev, | ||
507 | "unable to allocate channel %d descriptor pool\n", | ||
508 | chan->id); | ||
509 | return -ENOMEM; | ||
510 | } | ||
511 | |||
512 | dma_cookie_init(dchan); | ||
513 | return 0; | ||
514 | } | ||
515 | |||
516 | /** | ||
517 | * xilinx_vdma_tx_status - Get VDMA transaction status | ||
518 | * @dchan: DMA channel | ||
519 | * @cookie: Transaction identifier | ||
520 | * @txstate: Transaction state | ||
521 | * | ||
522 | * Return: DMA transaction status | ||
523 | */ | ||
524 | static enum dma_status xilinx_vdma_tx_status(struct dma_chan *dchan, | ||
525 | dma_cookie_t cookie, | ||
526 | struct dma_tx_state *txstate) | ||
527 | { | ||
528 | return dma_cookie_status(dchan, cookie, txstate); | ||
529 | } | ||
530 | |||
531 | /** | ||
532 | * xilinx_vdma_is_running - Check if VDMA channel is running | ||
533 | * @chan: Driver specific VDMA channel | ||
534 | * | ||
535 | * Return: '1' if running, '0' if not. | ||
536 | */ | ||
537 | static bool xilinx_vdma_is_running(struct xilinx_vdma_chan *chan) | ||
538 | { | ||
539 | return !(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
540 | XILINX_VDMA_DMASR_HALTED) && | ||
541 | (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
542 | XILINX_VDMA_DMACR_RUNSTOP); | ||
543 | } | ||
544 | |||
545 | /** | ||
546 | * xilinx_vdma_is_idle - Check if VDMA channel is idle | ||
547 | * @chan: Driver specific VDMA channel | ||
548 | * | ||
549 | * Return: '1' if idle, '0' if not. | ||
550 | */ | ||
551 | static bool xilinx_vdma_is_idle(struct xilinx_vdma_chan *chan) | ||
552 | { | ||
553 | return vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
554 | XILINX_VDMA_DMASR_IDLE; | ||
555 | } | ||
556 | |||
557 | /** | ||
558 | * xilinx_vdma_halt - Halt VDMA channel | ||
559 | * @chan: Driver specific VDMA channel | ||
560 | */ | ||
561 | static void xilinx_vdma_halt(struct xilinx_vdma_chan *chan) | ||
562 | { | ||
563 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
564 | |||
565 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | ||
566 | |||
567 | /* Wait for the hardware to halt */ | ||
568 | do { | ||
569 | if (vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
570 | XILINX_VDMA_DMASR_HALTED) | ||
571 | break; | ||
572 | } while (loop--); | ||
573 | |||
574 | if (!loop) { | ||
575 | dev_err(chan->dev, "Cannot stop channel %p: %x\n", | ||
576 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
577 | chan->err = true; | ||
578 | } | ||
579 | |||
580 | return; | ||
581 | } | ||
582 | |||
583 | /** | ||
584 | * xilinx_vdma_start - Start VDMA channel | ||
585 | * @chan: Driver specific VDMA channel | ||
586 | */ | ||
587 | static void xilinx_vdma_start(struct xilinx_vdma_chan *chan) | ||
588 | { | ||
589 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
590 | |||
591 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RUNSTOP); | ||
592 | |||
593 | /* Wait for the hardware to start */ | ||
594 | do { | ||
595 | if (!(vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR) & | ||
596 | XILINX_VDMA_DMASR_HALTED)) | ||
597 | break; | ||
598 | } while (loop--); | ||
599 | |||
600 | if (!loop) { | ||
601 | dev_err(chan->dev, "Cannot start channel %p: %x\n", | ||
602 | chan, vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
603 | |||
604 | chan->err = true; | ||
605 | } | ||
606 | |||
607 | return; | ||
608 | } | ||
609 | |||
610 | /** | ||
611 | * xilinx_vdma_start_transfer - Starts VDMA transfer | ||
612 | * @chan: Driver specific channel struct pointer | ||
613 | */ | ||
614 | static void xilinx_vdma_start_transfer(struct xilinx_vdma_chan *chan) | ||
615 | { | ||
616 | struct xilinx_vdma_config *config = &chan->config; | ||
617 | struct xilinx_vdma_tx_descriptor *desc; | ||
618 | unsigned long flags; | ||
619 | u32 reg; | ||
620 | struct xilinx_vdma_tx_segment *head, *tail = NULL; | ||
621 | |||
622 | if (chan->err) | ||
623 | return; | ||
624 | |||
625 | spin_lock_irqsave(&chan->lock, flags); | ||
626 | |||
627 | /* There's already an active descriptor, bail out. */ | ||
628 | if (chan->active_desc) | ||
629 | goto out_unlock; | ||
630 | |||
631 | if (list_empty(&chan->pending_list)) | ||
632 | goto out_unlock; | ||
633 | |||
634 | desc = list_first_entry(&chan->pending_list, | ||
635 | struct xilinx_vdma_tx_descriptor, node); | ||
636 | |||
637 | /* If it is SG mode and hardware is busy, cannot submit */ | ||
638 | if (chan->has_sg && xilinx_vdma_is_running(chan) && | ||
639 | !xilinx_vdma_is_idle(chan)) { | ||
640 | dev_dbg(chan->dev, "DMA controller still busy\n"); | ||
641 | goto out_unlock; | ||
642 | } | ||
643 | |||
644 | /* | ||
645 | * If hardware is idle, then all descriptors on the running lists are | ||
646 | * done, start new transfers | ||
647 | */ | ||
648 | if (chan->has_sg) { | ||
649 | head = list_first_entry(&desc->segments, | ||
650 | struct xilinx_vdma_tx_segment, node); | ||
651 | tail = list_entry(desc->segments.prev, | ||
652 | struct xilinx_vdma_tx_segment, node); | ||
653 | |||
654 | vdma_ctrl_write(chan, XILINX_VDMA_REG_CURDESC, head->phys); | ||
655 | } | ||
656 | |||
657 | /* Configure the hardware using info in the config structure */ | ||
658 | reg = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | ||
659 | |||
660 | if (config->frm_cnt_en) | ||
661 | reg |= XILINX_VDMA_DMACR_FRAMECNT_EN; | ||
662 | else | ||
663 | reg &= ~XILINX_VDMA_DMACR_FRAMECNT_EN; | ||
664 | |||
665 | /* | ||
666 | * With SG, start with circular mode, so that BDs can be fetched. | ||
667 | * In direct register mode, if not parking, enable circular mode | ||
668 | */ | ||
669 | if (chan->has_sg || !config->park) | ||
670 | reg |= XILINX_VDMA_DMACR_CIRC_EN; | ||
671 | |||
672 | if (config->park) | ||
673 | reg &= ~XILINX_VDMA_DMACR_CIRC_EN; | ||
674 | |||
675 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, reg); | ||
676 | |||
677 | if (config->park && (config->park_frm >= 0) && | ||
678 | (config->park_frm < chan->num_frms)) { | ||
679 | if (chan->direction == DMA_MEM_TO_DEV) | ||
680 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | ||
681 | config->park_frm << | ||
682 | XILINX_VDMA_PARK_PTR_RD_REF_SHIFT); | ||
683 | else | ||
684 | vdma_write(chan, XILINX_VDMA_REG_PARK_PTR, | ||
685 | config->park_frm << | ||
686 | XILINX_VDMA_PARK_PTR_WR_REF_SHIFT); | ||
687 | } | ||
688 | |||
689 | /* Start the hardware */ | ||
690 | xilinx_vdma_start(chan); | ||
691 | |||
692 | if (chan->err) | ||
693 | goto out_unlock; | ||
694 | |||
695 | /* Start the transfer */ | ||
696 | if (chan->has_sg) { | ||
697 | vdma_ctrl_write(chan, XILINX_VDMA_REG_TAILDESC, tail->phys); | ||
698 | } else { | ||
699 | struct xilinx_vdma_tx_segment *segment, *last = NULL; | ||
700 | int i = 0; | ||
701 | |||
702 | list_for_each_entry(segment, &desc->segments, node) { | ||
703 | vdma_desc_write(chan, | ||
704 | XILINX_VDMA_REG_START_ADDRESS(i++), | ||
705 | segment->hw.buf_addr); | ||
706 | last = segment; | ||
707 | } | ||
708 | |||
709 | if (!last) | ||
710 | goto out_unlock; | ||
711 | |||
712 | /* HW expects these parameters to be same for one transaction */ | ||
713 | vdma_desc_write(chan, XILINX_VDMA_REG_HSIZE, last->hw.hsize); | ||
714 | vdma_desc_write(chan, XILINX_VDMA_REG_FRMDLY_STRIDE, | ||
715 | last->hw.stride); | ||
716 | vdma_desc_write(chan, XILINX_VDMA_REG_VSIZE, last->hw.vsize); | ||
717 | } | ||
718 | |||
719 | list_del(&desc->node); | ||
720 | chan->active_desc = desc; | ||
721 | |||
722 | out_unlock: | ||
723 | spin_unlock_irqrestore(&chan->lock, flags); | ||
724 | } | ||
725 | |||
726 | /** | ||
727 | * xilinx_vdma_issue_pending - Issue pending transactions | ||
728 | * @dchan: DMA channel | ||
729 | */ | ||
730 | static void xilinx_vdma_issue_pending(struct dma_chan *dchan) | ||
731 | { | ||
732 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
733 | |||
734 | xilinx_vdma_start_transfer(chan); | ||
735 | } | ||
736 | |||
737 | /** | ||
738 | * xilinx_vdma_complete_descriptor - Mark the active descriptor as complete | ||
739 | * @chan : xilinx DMA channel | ||
740 | * | ||
741 | * CONTEXT: hardirq | ||
742 | */ | ||
743 | static void xilinx_vdma_complete_descriptor(struct xilinx_vdma_chan *chan) | ||
744 | { | ||
745 | struct xilinx_vdma_tx_descriptor *desc; | ||
746 | unsigned long flags; | ||
747 | |||
748 | spin_lock_irqsave(&chan->lock, flags); | ||
749 | |||
750 | desc = chan->active_desc; | ||
751 | if (!desc) { | ||
752 | dev_dbg(chan->dev, "no running descriptors\n"); | ||
753 | goto out_unlock; | ||
754 | } | ||
755 | |||
756 | dma_cookie_complete(&desc->async_tx); | ||
757 | list_add_tail(&desc->node, &chan->done_list); | ||
758 | |||
759 | chan->active_desc = NULL; | ||
760 | |||
761 | out_unlock: | ||
762 | spin_unlock_irqrestore(&chan->lock, flags); | ||
763 | } | ||
764 | |||
765 | /** | ||
766 | * xilinx_vdma_reset - Reset VDMA channel | ||
767 | * @chan: Driver specific VDMA channel | ||
768 | * | ||
769 | * Return: '0' on success and failure value on error | ||
770 | */ | ||
771 | static int xilinx_vdma_reset(struct xilinx_vdma_chan *chan) | ||
772 | { | ||
773 | int loop = XILINX_VDMA_LOOP_COUNT; | ||
774 | u32 tmp; | ||
775 | |||
776 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, XILINX_VDMA_DMACR_RESET); | ||
777 | |||
778 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
779 | XILINX_VDMA_DMACR_RESET; | ||
780 | |||
781 | /* Wait for the hardware to finish reset */ | ||
782 | do { | ||
783 | tmp = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR) & | ||
784 | XILINX_VDMA_DMACR_RESET; | ||
785 | } while (loop-- && tmp); | ||
786 | |||
787 | if (!loop) { | ||
788 | dev_err(chan->dev, "reset timeout, cr %x, sr %x\n", | ||
789 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR), | ||
790 | vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR)); | ||
791 | return -ETIMEDOUT; | ||
792 | } | ||
793 | |||
794 | chan->err = false; | ||
795 | |||
796 | return 0; | ||
797 | } | ||
798 | |||
799 | /** | ||
800 | * xilinx_vdma_chan_reset - Reset VDMA channel and enable interrupts | ||
801 | * @chan: Driver specific VDMA channel | ||
802 | * | ||
803 | * Return: '0' on success and failure value on error | ||
804 | */ | ||
805 | static int xilinx_vdma_chan_reset(struct xilinx_vdma_chan *chan) | ||
806 | { | ||
807 | int err; | ||
808 | |||
809 | /* Reset VDMA */ | ||
810 | err = xilinx_vdma_reset(chan); | ||
811 | if (err) | ||
812 | return err; | ||
813 | |||
814 | /* Enable interrupts */ | ||
815 | vdma_ctrl_set(chan, XILINX_VDMA_REG_DMACR, | ||
816 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
817 | |||
818 | return 0; | ||
819 | } | ||
820 | |||
821 | /** | ||
822 | * xilinx_vdma_irq_handler - VDMA Interrupt handler | ||
823 | * @irq: IRQ number | ||
824 | * @data: Pointer to the Xilinx VDMA channel structure | ||
825 | * | ||
826 | * Return: IRQ_HANDLED/IRQ_NONE | ||
827 | */ | ||
828 | static irqreturn_t xilinx_vdma_irq_handler(int irq, void *data) | ||
829 | { | ||
830 | struct xilinx_vdma_chan *chan = data; | ||
831 | u32 status; | ||
832 | |||
833 | /* Read the status and ack the interrupts. */ | ||
834 | status = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMASR); | ||
835 | if (!(status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK)) | ||
836 | return IRQ_NONE; | ||
837 | |||
838 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | ||
839 | status & XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
840 | |||
841 | if (status & XILINX_VDMA_DMASR_ERR_IRQ) { | ||
842 | /* | ||
843 | * An error occurred. If C_FLUSH_ON_FSYNC is enabled and the | ||
844 | * error is recoverable, ignore it. Otherwise flag the error. | ||
845 | * | ||
846 | * Only recoverable errors can be cleared in the DMASR register, | ||
847 | * make sure not to write to other error bits to 1. | ||
848 | */ | ||
849 | u32 errors = status & XILINX_VDMA_DMASR_ALL_ERR_MASK; | ||
850 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMASR, | ||
851 | errors & XILINX_VDMA_DMASR_ERR_RECOVER_MASK); | ||
852 | |||
853 | if (!chan->flush_on_fsync || | ||
854 | (errors & ~XILINX_VDMA_DMASR_ERR_RECOVER_MASK)) { | ||
855 | dev_err(chan->dev, | ||
856 | "Channel %p has errors %x, cdr %x tdr %x\n", | ||
857 | chan, errors, | ||
858 | vdma_ctrl_read(chan, XILINX_VDMA_REG_CURDESC), | ||
859 | vdma_ctrl_read(chan, XILINX_VDMA_REG_TAILDESC)); | ||
860 | chan->err = true; | ||
861 | } | ||
862 | } | ||
863 | |||
864 | if (status & XILINX_VDMA_DMASR_DLY_CNT_IRQ) { | ||
865 | /* | ||
866 | * Device takes too long to do the transfer when user requires | ||
867 | * responsiveness. | ||
868 | */ | ||
869 | dev_dbg(chan->dev, "Inter-packet latency too long\n"); | ||
870 | } | ||
871 | |||
872 | if (status & XILINX_VDMA_DMASR_FRM_CNT_IRQ) { | ||
873 | xilinx_vdma_complete_descriptor(chan); | ||
874 | xilinx_vdma_start_transfer(chan); | ||
875 | } | ||
876 | |||
877 | tasklet_schedule(&chan->tasklet); | ||
878 | return IRQ_HANDLED; | ||
879 | } | ||
880 | |||
881 | /** | ||
882 | * xilinx_vdma_tx_submit - Submit DMA transaction | ||
883 | * @tx: Async transaction descriptor | ||
884 | * | ||
885 | * Return: cookie value on success and failure value on error | ||
886 | */ | ||
887 | static dma_cookie_t xilinx_vdma_tx_submit(struct dma_async_tx_descriptor *tx) | ||
888 | { | ||
889 | struct xilinx_vdma_tx_descriptor *desc = to_vdma_tx_descriptor(tx); | ||
890 | struct xilinx_vdma_chan *chan = to_xilinx_chan(tx->chan); | ||
891 | dma_cookie_t cookie; | ||
892 | unsigned long flags; | ||
893 | int err; | ||
894 | |||
895 | if (chan->err) { | ||
896 | /* | ||
897 | * If reset fails, need to hard reset the system. | ||
898 | * Channel is no longer functional | ||
899 | */ | ||
900 | err = xilinx_vdma_chan_reset(chan); | ||
901 | if (err < 0) | ||
902 | return err; | ||
903 | } | ||
904 | |||
905 | spin_lock_irqsave(&chan->lock, flags); | ||
906 | |||
907 | cookie = dma_cookie_assign(tx); | ||
908 | |||
909 | /* Append the transaction to the pending transactions queue. */ | ||
910 | list_add_tail(&desc->node, &chan->pending_list); | ||
911 | |||
912 | /* Free the allocated desc */ | ||
913 | chan->allocated_desc = NULL; | ||
914 | |||
915 | spin_unlock_irqrestore(&chan->lock, flags); | ||
916 | |||
917 | return cookie; | ||
918 | } | ||
919 | |||
920 | /** | ||
921 | * xilinx_vdma_dma_prep_interleaved - prepare a descriptor for a | ||
922 | * DMA_SLAVE transaction | ||
923 | * @dchan: DMA channel | ||
924 | * @xt: Interleaved template pointer | ||
925 | * @flags: transfer ack flags | ||
926 | * | ||
927 | * Return: Async transaction descriptor on success and NULL on failure | ||
928 | */ | ||
929 | static struct dma_async_tx_descriptor * | ||
930 | xilinx_vdma_dma_prep_interleaved(struct dma_chan *dchan, | ||
931 | struct dma_interleaved_template *xt, | ||
932 | unsigned long flags) | ||
933 | { | ||
934 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
935 | struct xilinx_vdma_tx_descriptor *desc; | ||
936 | struct xilinx_vdma_tx_segment *segment, *prev = NULL; | ||
937 | struct xilinx_vdma_desc_hw *hw; | ||
938 | |||
939 | if (!is_slave_direction(xt->dir)) | ||
940 | return NULL; | ||
941 | |||
942 | if (!xt->numf || !xt->sgl[0].size) | ||
943 | return NULL; | ||
944 | |||
945 | /* Allocate a transaction descriptor. */ | ||
946 | desc = xilinx_vdma_alloc_tx_descriptor(chan); | ||
947 | if (!desc) | ||
948 | return NULL; | ||
949 | |||
950 | dma_async_tx_descriptor_init(&desc->async_tx, &chan->common); | ||
951 | desc->async_tx.tx_submit = xilinx_vdma_tx_submit; | ||
952 | async_tx_ack(&desc->async_tx); | ||
953 | |||
954 | /* Allocate the link descriptor from DMA pool */ | ||
955 | segment = xilinx_vdma_alloc_tx_segment(chan); | ||
956 | if (!segment) | ||
957 | goto error; | ||
958 | |||
959 | /* Fill in the hardware descriptor */ | ||
960 | hw = &segment->hw; | ||
961 | hw->vsize = xt->numf; | ||
962 | hw->hsize = xt->sgl[0].size; | ||
963 | hw->stride = xt->sgl[0].icg << | ||
964 | XILINX_VDMA_FRMDLY_STRIDE_STRIDE_SHIFT; | ||
965 | hw->stride |= chan->config.frm_dly << | ||
966 | XILINX_VDMA_FRMDLY_STRIDE_FRMDLY_SHIFT; | ||
967 | |||
968 | if (xt->dir != DMA_MEM_TO_DEV) | ||
969 | hw->buf_addr = xt->dst_start; | ||
970 | else | ||
971 | hw->buf_addr = xt->src_start; | ||
972 | |||
973 | /* Link the previous next descriptor to current */ | ||
974 | prev = list_last_entry(&desc->segments, | ||
975 | struct xilinx_vdma_tx_segment, node); | ||
976 | prev->hw.next_desc = segment->phys; | ||
977 | |||
978 | /* Insert the segment into the descriptor segments list. */ | ||
979 | list_add_tail(&segment->node, &desc->segments); | ||
980 | |||
981 | prev = segment; | ||
982 | |||
983 | /* Link the last hardware descriptor with the first. */ | ||
984 | segment = list_first_entry(&desc->segments, | ||
985 | struct xilinx_vdma_tx_segment, node); | ||
986 | prev->hw.next_desc = segment->phys; | ||
987 | |||
988 | return &desc->async_tx; | ||
989 | |||
990 | error: | ||
991 | xilinx_vdma_free_tx_descriptor(chan, desc); | ||
992 | return NULL; | ||
993 | } | ||
994 | |||
995 | /** | ||
996 | * xilinx_vdma_terminate_all - Halt the channel and free descriptors | ||
997 | * @chan: Driver specific VDMA Channel pointer | ||
998 | */ | ||
999 | static void xilinx_vdma_terminate_all(struct xilinx_vdma_chan *chan) | ||
1000 | { | ||
1001 | /* Halt the DMA engine */ | ||
1002 | xilinx_vdma_halt(chan); | ||
1003 | |||
1004 | /* Remove and free all of the descriptors in the lists */ | ||
1005 | xilinx_vdma_free_descriptors(chan); | ||
1006 | } | ||
1007 | |||
1008 | /** | ||
1009 | * xilinx_vdma_channel_set_config - Configure VDMA channel | ||
1010 | * Run-time configuration for Axi VDMA, supports: | ||
1011 | * . halt the channel | ||
1012 | * . configure interrupt coalescing and inter-packet delay threshold | ||
1013 | * . start/stop parking | ||
1014 | * . enable genlock | ||
1015 | * | ||
1016 | * @dchan: DMA channel | ||
1017 | * @cfg: VDMA device configuration pointer | ||
1018 | * | ||
1019 | * Return: '0' on success and failure value on error | ||
1020 | */ | ||
1021 | int xilinx_vdma_channel_set_config(struct dma_chan *dchan, | ||
1022 | struct xilinx_vdma_config *cfg) | ||
1023 | { | ||
1024 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1025 | u32 dmacr; | ||
1026 | |||
1027 | if (cfg->reset) | ||
1028 | return xilinx_vdma_chan_reset(chan); | ||
1029 | |||
1030 | dmacr = vdma_ctrl_read(chan, XILINX_VDMA_REG_DMACR); | ||
1031 | |||
1032 | chan->config.frm_dly = cfg->frm_dly; | ||
1033 | chan->config.park = cfg->park; | ||
1034 | |||
1035 | /* genlock settings */ | ||
1036 | chan->config.gen_lock = cfg->gen_lock; | ||
1037 | chan->config.master = cfg->master; | ||
1038 | |||
1039 | if (cfg->gen_lock && chan->genlock) { | ||
1040 | dmacr |= XILINX_VDMA_DMACR_GENLOCK_EN; | ||
1041 | dmacr |= cfg->master << XILINX_VDMA_DMACR_MASTER_SHIFT; | ||
1042 | } | ||
1043 | |||
1044 | chan->config.frm_cnt_en = cfg->frm_cnt_en; | ||
1045 | if (cfg->park) | ||
1046 | chan->config.park_frm = cfg->park_frm; | ||
1047 | else | ||
1048 | chan->config.park_frm = -1; | ||
1049 | |||
1050 | chan->config.coalesc = cfg->coalesc; | ||
1051 | chan->config.delay = cfg->delay; | ||
1052 | |||
1053 | if (cfg->coalesc <= XILINX_VDMA_DMACR_FRAME_COUNT_MAX) { | ||
1054 | dmacr |= cfg->coalesc << XILINX_VDMA_DMACR_FRAME_COUNT_SHIFT; | ||
1055 | chan->config.coalesc = cfg->coalesc; | ||
1056 | } | ||
1057 | |||
1058 | if (cfg->delay <= XILINX_VDMA_DMACR_DELAY_MAX) { | ||
1059 | dmacr |= cfg->delay << XILINX_VDMA_DMACR_DELAY_SHIFT; | ||
1060 | chan->config.delay = cfg->delay; | ||
1061 | } | ||
1062 | |||
1063 | /* FSync Source selection */ | ||
1064 | dmacr &= ~XILINX_VDMA_DMACR_FSYNCSRC_MASK; | ||
1065 | dmacr |= cfg->ext_fsync << XILINX_VDMA_DMACR_FSYNCSRC_SHIFT; | ||
1066 | |||
1067 | vdma_ctrl_write(chan, XILINX_VDMA_REG_DMACR, dmacr); | ||
1068 | |||
1069 | return 0; | ||
1070 | } | ||
1071 | EXPORT_SYMBOL(xilinx_vdma_channel_set_config); | ||
1072 | |||
1073 | /** | ||
1074 | * xilinx_vdma_device_control - Configure DMA channel of the device | ||
1075 | * @dchan: DMA Channel pointer | ||
1076 | * @cmd: DMA control command | ||
1077 | * @arg: Channel configuration | ||
1078 | * | ||
1079 | * Return: '0' on success and failure value on error | ||
1080 | */ | ||
1081 | static int xilinx_vdma_device_control(struct dma_chan *dchan, | ||
1082 | enum dma_ctrl_cmd cmd, unsigned long arg) | ||
1083 | { | ||
1084 | struct xilinx_vdma_chan *chan = to_xilinx_chan(dchan); | ||
1085 | |||
1086 | if (cmd != DMA_TERMINATE_ALL) | ||
1087 | return -ENXIO; | ||
1088 | |||
1089 | xilinx_vdma_terminate_all(chan); | ||
1090 | |||
1091 | return 0; | ||
1092 | } | ||
1093 | |||
1094 | /* ----------------------------------------------------------------------------- | ||
1095 | * Probe and remove | ||
1096 | */ | ||
1097 | |||
1098 | /** | ||
1099 | * xilinx_vdma_chan_remove - Per Channel remove function | ||
1100 | * @chan: Driver specific VDMA channel | ||
1101 | */ | ||
1102 | static void xilinx_vdma_chan_remove(struct xilinx_vdma_chan *chan) | ||
1103 | { | ||
1104 | /* Disable all interrupts */ | ||
1105 | vdma_ctrl_clr(chan, XILINX_VDMA_REG_DMACR, | ||
1106 | XILINX_VDMA_DMAXR_ALL_IRQ_MASK); | ||
1107 | |||
1108 | if (chan->irq > 0) | ||
1109 | free_irq(chan->irq, chan); | ||
1110 | |||
1111 | tasklet_kill(&chan->tasklet); | ||
1112 | |||
1113 | list_del(&chan->common.device_node); | ||
1114 | } | ||
1115 | |||
1116 | /** | ||
1117 | * xilinx_vdma_chan_probe - Per Channel Probing | ||
1118 | * It get channel features from the device tree entry and | ||
1119 | * initialize special channel handling routines | ||
1120 | * | ||
1121 | * @xdev: Driver specific device structure | ||
1122 | * @node: Device node | ||
1123 | * | ||
1124 | * Return: '0' on success and failure value on error | ||
1125 | */ | ||
1126 | static int xilinx_vdma_chan_probe(struct xilinx_vdma_device *xdev, | ||
1127 | struct device_node *node) | ||
1128 | { | ||
1129 | struct xilinx_vdma_chan *chan; | ||
1130 | bool has_dre = false; | ||
1131 | u32 value, width; | ||
1132 | int err; | ||
1133 | |||
1134 | /* Allocate and initialize the channel structure */ | ||
1135 | chan = devm_kzalloc(xdev->dev, sizeof(*chan), GFP_KERNEL); | ||
1136 | if (!chan) | ||
1137 | return -ENOMEM; | ||
1138 | |||
1139 | chan->dev = xdev->dev; | ||
1140 | chan->xdev = xdev; | ||
1141 | chan->has_sg = xdev->has_sg; | ||
1142 | |||
1143 | spin_lock_init(&chan->lock); | ||
1144 | INIT_LIST_HEAD(&chan->pending_list); | ||
1145 | INIT_LIST_HEAD(&chan->done_list); | ||
1146 | |||
1147 | /* Retrieve the channel properties from the device tree */ | ||
1148 | has_dre = of_property_read_bool(node, "xlnx,include-dre"); | ||
1149 | |||
1150 | chan->genlock = of_property_read_bool(node, "xlnx,genlock-mode"); | ||
1151 | |||
1152 | err = of_property_read_u32(node, "xlnx,datawidth", &value); | ||
1153 | if (err) { | ||
1154 | dev_err(xdev->dev, "missing xlnx,datawidth property\n"); | ||
1155 | return err; | ||
1156 | } | ||
1157 | width = value >> 3; /* Convert bits to bytes */ | ||
1158 | |||
1159 | /* If data width is greater than 8 bytes, DRE is not in hw */ | ||
1160 | if (width > 8) | ||
1161 | has_dre = false; | ||
1162 | |||
1163 | if (!has_dre) | ||
1164 | xdev->common.copy_align = fls(width - 1); | ||
1165 | |||
1166 | if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { | ||
1167 | chan->direction = DMA_MEM_TO_DEV; | ||
1168 | chan->id = 0; | ||
1169 | |||
1170 | chan->ctrl_offset = XILINX_VDMA_MM2S_CTRL_OFFSET; | ||
1171 | chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; | ||
1172 | |||
1173 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | ||
1174 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_MM2S) | ||
1175 | chan->flush_on_fsync = true; | ||
1176 | } else if (of_device_is_compatible(node, | ||
1177 | "xlnx,axi-vdma-s2mm-channel")) { | ||
1178 | chan->direction = DMA_DEV_TO_MEM; | ||
1179 | chan->id = 1; | ||
1180 | |||
1181 | chan->ctrl_offset = XILINX_VDMA_S2MM_CTRL_OFFSET; | ||
1182 | chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; | ||
1183 | |||
1184 | if (xdev->flush_on_fsync == XILINX_VDMA_FLUSH_BOTH || | ||
1185 | xdev->flush_on_fsync == XILINX_VDMA_FLUSH_S2MM) | ||
1186 | chan->flush_on_fsync = true; | ||
1187 | } else { | ||
1188 | dev_err(xdev->dev, "Invalid channel compatible node\n"); | ||
1189 | return -EINVAL; | ||
1190 | } | ||
1191 | |||
1192 | /* Request the interrupt */ | ||
1193 | chan->irq = irq_of_parse_and_map(node, 0); | ||
1194 | err = request_irq(chan->irq, xilinx_vdma_irq_handler, IRQF_SHARED, | ||
1195 | "xilinx-vdma-controller", chan); | ||
1196 | if (err) { | ||
1197 | dev_err(xdev->dev, "unable to request IRQ %d\n", chan->irq); | ||
1198 | return err; | ||
1199 | } | ||
1200 | |||
1201 | /* Initialize the tasklet */ | ||
1202 | tasklet_init(&chan->tasklet, xilinx_vdma_do_tasklet, | ||
1203 | (unsigned long)chan); | ||
1204 | |||
1205 | /* | ||
1206 | * Initialize the DMA channel and add it to the DMA engine channels | ||
1207 | * list. | ||
1208 | */ | ||
1209 | chan->common.device = &xdev->common; | ||
1210 | |||
1211 | list_add_tail(&chan->common.device_node, &xdev->common.channels); | ||
1212 | xdev->chan[chan->id] = chan; | ||
1213 | |||
1214 | /* Reset the channel */ | ||
1215 | err = xilinx_vdma_chan_reset(chan); | ||
1216 | if (err < 0) { | ||
1217 | dev_err(xdev->dev, "Reset channel failed\n"); | ||
1218 | return err; | ||
1219 | } | ||
1220 | |||
1221 | return 0; | ||
1222 | } | ||
1223 | |||
1224 | /** | ||
1225 | * of_dma_xilinx_xlate - Translation function | ||
1226 | * @dma_spec: Pointer to DMA specifier as found in the device tree | ||
1227 | * @ofdma: Pointer to DMA controller data | ||
1228 | * | ||
1229 | * Return: DMA channel pointer on success and NULL on error | ||
1230 | */ | ||
1231 | static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec, | ||
1232 | struct of_dma *ofdma) | ||
1233 | { | ||
1234 | struct xilinx_vdma_device *xdev = ofdma->of_dma_data; | ||
1235 | int chan_id = dma_spec->args[0]; | ||
1236 | |||
1237 | if (chan_id >= XILINX_VDMA_MAX_CHANS_PER_DEVICE) | ||
1238 | return NULL; | ||
1239 | |||
1240 | return dma_get_slave_channel(&xdev->chan[chan_id]->common); | ||
1241 | } | ||
1242 | |||
1243 | /** | ||
1244 | * xilinx_vdma_probe - Driver probe function | ||
1245 | * @pdev: Pointer to the platform_device structure | ||
1246 | * | ||
1247 | * Return: '0' on success and failure value on error | ||
1248 | */ | ||
1249 | static int xilinx_vdma_probe(struct platform_device *pdev) | ||
1250 | { | ||
1251 | struct device_node *node = pdev->dev.of_node; | ||
1252 | struct xilinx_vdma_device *xdev; | ||
1253 | struct device_node *child; | ||
1254 | struct resource *io; | ||
1255 | u32 num_frames; | ||
1256 | int i, err; | ||
1257 | |||
1258 | /* Allocate and initialize the DMA engine structure */ | ||
1259 | xdev = devm_kzalloc(&pdev->dev, sizeof(*xdev), GFP_KERNEL); | ||
1260 | if (!xdev) | ||
1261 | return -ENOMEM; | ||
1262 | |||
1263 | xdev->dev = &pdev->dev; | ||
1264 | |||
1265 | /* Request and map I/O memory */ | ||
1266 | io = platform_get_resource(pdev, IORESOURCE_MEM, 0); | ||
1267 | xdev->regs = devm_ioremap_resource(&pdev->dev, io); | ||
1268 | if (IS_ERR(xdev->regs)) | ||
1269 | return PTR_ERR(xdev->regs); | ||
1270 | |||
1271 | /* Retrieve the DMA engine properties from the device tree */ | ||
1272 | xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); | ||
1273 | |||
1274 | err = of_property_read_u32(node, "xlnx,num-fstores", &num_frames); | ||
1275 | if (err < 0) { | ||
1276 | dev_err(xdev->dev, "missing xlnx,num-fstores property\n"); | ||
1277 | return err; | ||
1278 | } | ||
1279 | |||
1280 | err = of_property_read_u32(node, "xlnx,flush-fsync", | ||
1281 | &xdev->flush_on_fsync); | ||
1282 | if (err < 0) | ||
1283 | dev_warn(xdev->dev, "missing xlnx,flush-fsync property\n"); | ||
1284 | |||
1285 | /* Initialize the DMA engine */ | ||
1286 | xdev->common.dev = &pdev->dev; | ||
1287 | |||
1288 | INIT_LIST_HEAD(&xdev->common.channels); | ||
1289 | dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); | ||
1290 | dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); | ||
1291 | |||
1292 | xdev->common.device_alloc_chan_resources = | ||
1293 | xilinx_vdma_alloc_chan_resources; | ||
1294 | xdev->common.device_free_chan_resources = | ||
1295 | xilinx_vdma_free_chan_resources; | ||
1296 | xdev->common.device_prep_interleaved_dma = | ||
1297 | xilinx_vdma_dma_prep_interleaved; | ||
1298 | xdev->common.device_control = xilinx_vdma_device_control; | ||
1299 | xdev->common.device_tx_status = xilinx_vdma_tx_status; | ||
1300 | xdev->common.device_issue_pending = xilinx_vdma_issue_pending; | ||
1301 | |||
1302 | platform_set_drvdata(pdev, xdev); | ||
1303 | |||
1304 | /* Initialize the channels */ | ||
1305 | for_each_child_of_node(node, child) { | ||
1306 | err = xilinx_vdma_chan_probe(xdev, child); | ||
1307 | if (err < 0) | ||
1308 | goto error; | ||
1309 | } | ||
1310 | |||
1311 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1312 | if (xdev->chan[i]) | ||
1313 | xdev->chan[i]->num_frms = num_frames; | ||
1314 | |||
1315 | /* Register the DMA engine with the core */ | ||
1316 | dma_async_device_register(&xdev->common); | ||
1317 | |||
1318 | err = of_dma_controller_register(node, of_dma_xilinx_xlate, | ||
1319 | xdev); | ||
1320 | if (err < 0) { | ||
1321 | dev_err(&pdev->dev, "Unable to register DMA to DT\n"); | ||
1322 | dma_async_device_unregister(&xdev->common); | ||
1323 | goto error; | ||
1324 | } | ||
1325 | |||
1326 | dev_info(&pdev->dev, "Xilinx AXI VDMA Engine Driver Probed!!\n"); | ||
1327 | |||
1328 | return 0; | ||
1329 | |||
1330 | error: | ||
1331 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1332 | if (xdev->chan[i]) | ||
1333 | xilinx_vdma_chan_remove(xdev->chan[i]); | ||
1334 | |||
1335 | return err; | ||
1336 | } | ||
1337 | |||
1338 | /** | ||
1339 | * xilinx_vdma_remove - Driver remove function | ||
1340 | * @pdev: Pointer to the platform_device structure | ||
1341 | * | ||
1342 | * Return: Always '0' | ||
1343 | */ | ||
1344 | static int xilinx_vdma_remove(struct platform_device *pdev) | ||
1345 | { | ||
1346 | struct xilinx_vdma_device *xdev = platform_get_drvdata(pdev); | ||
1347 | int i; | ||
1348 | |||
1349 | of_dma_controller_free(pdev->dev.of_node); | ||
1350 | |||
1351 | dma_async_device_unregister(&xdev->common); | ||
1352 | |||
1353 | for (i = 0; i < XILINX_VDMA_MAX_CHANS_PER_DEVICE; i++) | ||
1354 | if (xdev->chan[i]) | ||
1355 | xilinx_vdma_chan_remove(xdev->chan[i]); | ||
1356 | |||
1357 | return 0; | ||
1358 | } | ||
1359 | |||
1360 | static const struct of_device_id xilinx_vdma_of_ids[] = { | ||
1361 | { .compatible = "xlnx,axi-vdma-1.00.a",}, | ||
1362 | {} | ||
1363 | }; | ||
1364 | |||
1365 | static struct platform_driver xilinx_vdma_driver = { | ||
1366 | .driver = { | ||
1367 | .name = "xilinx-vdma", | ||
1368 | .owner = THIS_MODULE, | ||
1369 | .of_match_table = xilinx_vdma_of_ids, | ||
1370 | }, | ||
1371 | .probe = xilinx_vdma_probe, | ||
1372 | .remove = xilinx_vdma_remove, | ||
1373 | }; | ||
1374 | |||
1375 | module_platform_driver(xilinx_vdma_driver); | ||
1376 | |||
1377 | MODULE_AUTHOR("Xilinx, Inc."); | ||
1378 | MODULE_DESCRIPTION("Xilinx VDMA driver"); | ||
1379 | MODULE_LICENSE("GPL v2"); | ||