aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorVinod Koul <vinod.koul@intel.com>2016-07-16 10:40:54 -0400
committerVinod Koul <vinod.koul@intel.com>2016-07-16 10:40:54 -0400
commitad31aa8fedafdd0b9854035fe71eb37994c2d2ce (patch)
tree2733f9d27f4ed6229b4bdbe0d43375cf239d8bf7 /drivers/dma
parent3b3fb1a19963a3b735960b0b7e1cce4e53a3e79b (diff)
parent7cdd3587b8628215f377d5d73a39540d94f33dc1 (diff)
Merge branch 'topic/xilinx' into for-linus
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/Kconfig18
-rw-r--r--drivers/dma/xilinx/Makefile3
-rw-r--r--drivers/dma/xilinx/xilinx_dma.c (renamed from drivers/dma/xilinx/xilinx_vdma.c)489
-rw-r--r--drivers/dma/xilinx/zynqmp_dma.c1145
4 files changed, 1596 insertions, 59 deletions
diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig
index f6c46d06cef7..739f797b40d9 100644
--- a/drivers/dma/Kconfig
+++ b/drivers/dma/Kconfig
@@ -533,19 +533,31 @@ config XGENE_DMA
533 help 533 help
534 Enable support for the APM X-Gene SoC DMA engine. 534 Enable support for the APM X-Gene SoC DMA engine.
535 535
536config XILINX_VDMA 536config XILINX_DMA
537 tristate "Xilinx AXI VDMA Engine" 537 tristate "Xilinx AXI DMAS Engine"
538 depends on (ARCH_ZYNQ || MICROBLAZE || ARM64) 538 depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
539 select DMA_ENGINE 539 select DMA_ENGINE
540 help 540 help
541 Enable support for Xilinx AXI VDMA Soft IP. 541 Enable support for Xilinx AXI VDMA Soft IP.
542 542
543 This engine provides high-bandwidth direct memory access 543 AXI VDMA engine provides high-bandwidth direct memory access
544 between memory and AXI4-Stream video type target 544 between memory and AXI4-Stream video type target
545 peripherals including peripherals which support AXI4- 545 peripherals including peripherals which support AXI4-
546 Stream Video Protocol. It has two stream interfaces/ 546 Stream Video Protocol. It has two stream interfaces/
547 channels, Memory Mapped to Stream (MM2S) and Stream to 547 channels, Memory Mapped to Stream (MM2S) and Stream to
548 Memory Mapped (S2MM) for the data transfers. 548 Memory Mapped (S2MM) for the data transfers.
549 AXI CDMA engine provides high-bandwidth direct memory access
550 between a memory-mapped source address and a memory-mapped
551 destination address.
552 AXI DMA engine provides high-bandwidth one dimensional direct
553 memory access between memory and AXI4-Stream target peripherals.
554
555config XILINX_ZYNQMP_DMA
556 tristate "Xilinx ZynqMP DMA Engine"
557 depends on (ARCH_ZYNQ || MICROBLAZE || ARM64)
558 select DMA_ENGINE
559 help
560 Enable support for Xilinx ZynqMP DMA controller.
549 561
550config ZX_DMA 562config ZX_DMA
551 tristate "ZTE ZX296702 DMA support" 563 tristate "ZTE ZX296702 DMA support"
diff --git a/drivers/dma/xilinx/Makefile b/drivers/dma/xilinx/Makefile
index 3c4e9f2fea28..9e91f8f5b087 100644
--- a/drivers/dma/xilinx/Makefile
+++ b/drivers/dma/xilinx/Makefile
@@ -1 +1,2 @@
1obj-$(CONFIG_XILINX_VDMA) += xilinx_vdma.o 1obj-$(CONFIG_XILINX_DMA) += xilinx_dma.o
2obj-$(CONFIG_XILINX_ZYNQMP_DMA) += zynqmp_dma.o
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_dma.c
index df9118540b91..4e223d094433 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_dma.c
@@ -45,6 +45,7 @@
45#include <linux/of_irq.h> 45#include <linux/of_irq.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/clk.h> 47#include <linux/clk.h>
48#include <linux/io-64-nonatomic-lo-hi.h>
48 49
49#include "../dmaengine.h" 50#include "../dmaengine.h"
50 51
@@ -113,7 +114,7 @@
113#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n)) 114#define XILINX_VDMA_REG_START_ADDRESS_64(n) (0x000c + 8 * (n))
114 115
115/* HW specific definitions */ 116/* HW specific definitions */
116#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x2 117#define XILINX_DMA_MAX_CHANS_PER_DEVICE 0x20
117 118
118#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \ 119#define XILINX_DMA_DMAXR_ALL_IRQ_MASK \
119 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \ 120 (XILINX_DMA_DMASR_FRM_CNT_IRQ | \
@@ -157,12 +158,25 @@
157/* AXI DMA Specific Masks/Bit fields */ 158/* AXI DMA Specific Masks/Bit fields */
158#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0) 159#define XILINX_DMA_MAX_TRANS_LEN GENMASK(22, 0)
159#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16) 160#define XILINX_DMA_CR_COALESCE_MAX GENMASK(23, 16)
161#define XILINX_DMA_CR_CYCLIC_BD_EN_MASK BIT(4)
160#define XILINX_DMA_CR_COALESCE_SHIFT 16 162#define XILINX_DMA_CR_COALESCE_SHIFT 16
161#define XILINX_DMA_BD_SOP BIT(27) 163#define XILINX_DMA_BD_SOP BIT(27)
162#define XILINX_DMA_BD_EOP BIT(26) 164#define XILINX_DMA_BD_EOP BIT(26)
163#define XILINX_DMA_COALESCE_MAX 255 165#define XILINX_DMA_COALESCE_MAX 255
164#define XILINX_DMA_NUM_APP_WORDS 5 166#define XILINX_DMA_NUM_APP_WORDS 5
165 167
168/* Multi-Channel DMA Descriptor offsets*/
169#define XILINX_DMA_MCRX_CDESC(x) (0x40 + (x-1) * 0x20)
170#define XILINX_DMA_MCRX_TDESC(x) (0x48 + (x-1) * 0x20)
171
172/* Multi-Channel DMA Masks/Shifts */
173#define XILINX_DMA_BD_HSIZE_MASK GENMASK(15, 0)
174#define XILINX_DMA_BD_STRIDE_MASK GENMASK(15, 0)
175#define XILINX_DMA_BD_VSIZE_MASK GENMASK(31, 19)
176#define XILINX_DMA_BD_TDEST_MASK GENMASK(4, 0)
177#define XILINX_DMA_BD_STRIDE_SHIFT 0
178#define XILINX_DMA_BD_VSIZE_SHIFT 19
179
166/* AXI CDMA Specific Registers/Offsets */ 180/* AXI CDMA Specific Registers/Offsets */
167#define XILINX_CDMA_REG_SRCADDR 0x18 181#define XILINX_CDMA_REG_SRCADDR 0x18
168#define XILINX_CDMA_REG_DSTADDR 0x20 182#define XILINX_CDMA_REG_DSTADDR 0x20
@@ -194,22 +208,22 @@ struct xilinx_vdma_desc_hw {
194/** 208/**
195 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA 209 * struct xilinx_axidma_desc_hw - Hardware Descriptor for AXI DMA
196 * @next_desc: Next Descriptor Pointer @0x00 210 * @next_desc: Next Descriptor Pointer @0x00
197 * @pad1: Reserved @0x04 211 * @next_desc_msb: MSB of Next Descriptor Pointer @0x04
198 * @buf_addr: Buffer address @0x08 212 * @buf_addr: Buffer address @0x08
199 * @pad2: Reserved @0x0C 213 * @buf_addr_msb: MSB of Buffer address @0x0C
200 * @pad3: Reserved @0x10 214 * @pad1: Reserved @0x10
201 * @pad4: Reserved @0x14 215 * @pad2: Reserved @0x14
202 * @control: Control field @0x18 216 * @control: Control field @0x18
203 * @status: Status field @0x1C 217 * @status: Status field @0x1C
204 * @app: APP Fields @0x20 - 0x30 218 * @app: APP Fields @0x20 - 0x30
205 */ 219 */
206struct xilinx_axidma_desc_hw { 220struct xilinx_axidma_desc_hw {
207 u32 next_desc; 221 u32 next_desc;
208 u32 pad1; 222 u32 next_desc_msb;
209 u32 buf_addr; 223 u32 buf_addr;
210 u32 pad2; 224 u32 buf_addr_msb;
211 u32 pad3; 225 u32 mcdma_control;
212 u32 pad4; 226 u32 vsize_stride;
213 u32 control; 227 u32 control;
214 u32 status; 228 u32 status;
215 u32 app[XILINX_DMA_NUM_APP_WORDS]; 229 u32 app[XILINX_DMA_NUM_APP_WORDS];
@@ -218,21 +232,21 @@ struct xilinx_axidma_desc_hw {
218/** 232/**
219 * struct xilinx_cdma_desc_hw - Hardware Descriptor 233 * struct xilinx_cdma_desc_hw - Hardware Descriptor
220 * @next_desc: Next Descriptor Pointer @0x00 234 * @next_desc: Next Descriptor Pointer @0x00
221 * @pad1: Reserved @0x04 235 * @next_descmsb: Next Descriptor Pointer MSB @0x04
222 * @src_addr: Source address @0x08 236 * @src_addr: Source address @0x08
223 * @pad2: Reserved @0x0C 237 * @src_addrmsb: Source address MSB @0x0C
224 * @dest_addr: Destination address @0x10 238 * @dest_addr: Destination address @0x10
225 * @pad3: Reserved @0x14 239 * @dest_addrmsb: Destination address MSB @0x14
226 * @control: Control field @0x18 240 * @control: Control field @0x18
227 * @status: Status field @0x1C 241 * @status: Status field @0x1C
228 */ 242 */
229struct xilinx_cdma_desc_hw { 243struct xilinx_cdma_desc_hw {
230 u32 next_desc; 244 u32 next_desc;
231 u32 pad1; 245 u32 next_desc_msb;
232 u32 src_addr; 246 u32 src_addr;
233 u32 pad2; 247 u32 src_addr_msb;
234 u32 dest_addr; 248 u32 dest_addr;
235 u32 pad3; 249 u32 dest_addr_msb;
236 u32 control; 250 u32 control;
237 u32 status; 251 u32 status;
238} __aligned(64); 252} __aligned(64);
@@ -278,11 +292,13 @@ struct xilinx_cdma_tx_segment {
278 * @async_tx: Async transaction descriptor 292 * @async_tx: Async transaction descriptor
279 * @segments: TX segments list 293 * @segments: TX segments list
280 * @node: Node in the channel descriptors list 294 * @node: Node in the channel descriptors list
295 * @cyclic: Check for cyclic transfers.
281 */ 296 */
282struct xilinx_dma_tx_descriptor { 297struct xilinx_dma_tx_descriptor {
283 struct dma_async_tx_descriptor async_tx; 298 struct dma_async_tx_descriptor async_tx;
284 struct list_head segments; 299 struct list_head segments;
285 struct list_head node; 300 struct list_head node;
301 bool cyclic;
286}; 302};
287 303
288/** 304/**
@@ -302,6 +318,7 @@ struct xilinx_dma_tx_descriptor {
302 * @direction: Transfer direction 318 * @direction: Transfer direction
303 * @num_frms: Number of frames 319 * @num_frms: Number of frames
304 * @has_sg: Support scatter transfers 320 * @has_sg: Support scatter transfers
321 * @cyclic: Check for cyclic transfers.
305 * @genlock: Support genlock mode 322 * @genlock: Support genlock mode
306 * @err: Channel has errors 323 * @err: Channel has errors
307 * @tasklet: Cleanup work after irq 324 * @tasklet: Cleanup work after irq
@@ -312,6 +329,7 @@ struct xilinx_dma_tx_descriptor {
312 * @desc_submitcount: Descriptor h/w submitted count 329 * @desc_submitcount: Descriptor h/w submitted count
313 * @residue: Residue for AXI DMA 330 * @residue: Residue for AXI DMA
314 * @seg_v: Statically allocated segments base 331 * @seg_v: Statically allocated segments base
332 * @cyclic_seg_v: Statically allocated segment base for cyclic transfers
315 * @start_transfer: Differentiate b/w DMA IP's transfer 333 * @start_transfer: Differentiate b/w DMA IP's transfer
316 */ 334 */
317struct xilinx_dma_chan { 335struct xilinx_dma_chan {
@@ -330,6 +348,7 @@ struct xilinx_dma_chan {
330 enum dma_transfer_direction direction; 348 enum dma_transfer_direction direction;
331 int num_frms; 349 int num_frms;
332 bool has_sg; 350 bool has_sg;
351 bool cyclic;
333 bool genlock; 352 bool genlock;
334 bool err; 353 bool err;
335 struct tasklet_struct tasklet; 354 struct tasklet_struct tasklet;
@@ -340,7 +359,9 @@ struct xilinx_dma_chan {
340 u32 desc_submitcount; 359 u32 desc_submitcount;
341 u32 residue; 360 u32 residue;
342 struct xilinx_axidma_tx_segment *seg_v; 361 struct xilinx_axidma_tx_segment *seg_v;
362 struct xilinx_axidma_tx_segment *cyclic_seg_v;
343 void (*start_transfer)(struct xilinx_dma_chan *chan); 363 void (*start_transfer)(struct xilinx_dma_chan *chan);
364 u16 tdest;
344}; 365};
345 366
346struct xilinx_dma_config { 367struct xilinx_dma_config {
@@ -357,6 +378,7 @@ struct xilinx_dma_config {
357 * @common: DMA device structure 378 * @common: DMA device structure
358 * @chan: Driver specific DMA channel 379 * @chan: Driver specific DMA channel
359 * @has_sg: Specifies whether Scatter-Gather is present or not 380 * @has_sg: Specifies whether Scatter-Gather is present or not
381 * @mcdma: Specifies whether Multi-Channel is present or not
360 * @flush_on_fsync: Flush on frame sync 382 * @flush_on_fsync: Flush on frame sync
361 * @ext_addr: Indicates 64 bit addressing is supported by dma device 383 * @ext_addr: Indicates 64 bit addressing is supported by dma device
362 * @pdev: Platform device structure pointer 384 * @pdev: Platform device structure pointer
@@ -366,6 +388,8 @@ struct xilinx_dma_config {
366 * @txs_clk: DMA mm2s stream clock 388 * @txs_clk: DMA mm2s stream clock
367 * @rx_clk: DMA s2mm clock 389 * @rx_clk: DMA s2mm clock
368 * @rxs_clk: DMA s2mm stream clock 390 * @rxs_clk: DMA s2mm stream clock
391 * @nr_channels: Number of channels DMA device supports
392 * @chan_id: DMA channel identifier
369 */ 393 */
370struct xilinx_dma_device { 394struct xilinx_dma_device {
371 void __iomem *regs; 395 void __iomem *regs;
@@ -373,6 +397,7 @@ struct xilinx_dma_device {
373 struct dma_device common; 397 struct dma_device common;
374 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE]; 398 struct xilinx_dma_chan *chan[XILINX_DMA_MAX_CHANS_PER_DEVICE];
375 bool has_sg; 399 bool has_sg;
400 bool mcdma;
376 u32 flush_on_fsync; 401 u32 flush_on_fsync;
377 bool ext_addr; 402 bool ext_addr;
378 struct platform_device *pdev; 403 struct platform_device *pdev;
@@ -382,6 +407,8 @@ struct xilinx_dma_device {
382 struct clk *txs_clk; 407 struct clk *txs_clk;
383 struct clk *rx_clk; 408 struct clk *rx_clk;
384 struct clk *rxs_clk; 409 struct clk *rxs_clk;
410 u32 nr_channels;
411 u32 chan_id;
385}; 412};
386 413
387/* Macros */ 414/* Macros */
@@ -454,6 +481,34 @@ static inline void vdma_desc_write_64(struct xilinx_dma_chan *chan, u32 reg,
454 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4); 481 writel(value_msb, chan->xdev->regs + chan->desc_offset + reg + 4);
455} 482}
456 483
484static inline void dma_writeq(struct xilinx_dma_chan *chan, u32 reg, u64 value)
485{
486 lo_hi_writeq(value, chan->xdev->regs + chan->ctrl_offset + reg);
487}
488
489static inline void xilinx_write(struct xilinx_dma_chan *chan, u32 reg,
490 dma_addr_t addr)
491{
492 if (chan->ext_addr)
493 dma_writeq(chan, reg, addr);
494 else
495 dma_ctrl_write(chan, reg, addr);
496}
497
498static inline void xilinx_axidma_buf(struct xilinx_dma_chan *chan,
499 struct xilinx_axidma_desc_hw *hw,
500 dma_addr_t buf_addr, size_t sg_used,
501 size_t period_len)
502{
503 if (chan->ext_addr) {
504 hw->buf_addr = lower_32_bits(buf_addr + sg_used + period_len);
505 hw->buf_addr_msb = upper_32_bits(buf_addr + sg_used +
506 period_len);
507 } else {
508 hw->buf_addr = buf_addr + sg_used + period_len;
509 }
510}
511
457/* ----------------------------------------------------------------------------- 512/* -----------------------------------------------------------------------------
458 * Descriptors and segments alloc and free 513 * Descriptors and segments alloc and free
459 */ 514 */
@@ -491,11 +546,10 @@ xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
491 struct xilinx_cdma_tx_segment *segment; 546 struct xilinx_cdma_tx_segment *segment;
492 dma_addr_t phys; 547 dma_addr_t phys;
493 548
494 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); 549 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
495 if (!segment) 550 if (!segment)
496 return NULL; 551 return NULL;
497 552
498 memset(segment, 0, sizeof(*segment));
499 segment->phys = phys; 553 segment->phys = phys;
500 554
501 return segment; 555 return segment;
@@ -513,11 +567,10 @@ xilinx_axidma_alloc_tx_segment(struct xilinx_dma_chan *chan)
513 struct xilinx_axidma_tx_segment *segment; 567 struct xilinx_axidma_tx_segment *segment;
514 dma_addr_t phys; 568 dma_addr_t phys;
515 569
516 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys); 570 segment = dma_pool_zalloc(chan->desc_pool, GFP_ATOMIC, &phys);
517 if (!segment) 571 if (!segment)
518 return NULL; 572 return NULL;
519 573
520 memset(segment, 0, sizeof(*segment));
521 segment->phys = phys; 574 segment->phys = phys;
522 575
523 return segment; 576 return segment;
@@ -660,13 +713,37 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
660 dev_dbg(chan->dev, "Free all channel resources.\n"); 713 dev_dbg(chan->dev, "Free all channel resources.\n");
661 714
662 xilinx_dma_free_descriptors(chan); 715 xilinx_dma_free_descriptors(chan);
663 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 716 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
717 xilinx_dma_free_tx_segment(chan, chan->cyclic_seg_v);
664 xilinx_dma_free_tx_segment(chan, chan->seg_v); 718 xilinx_dma_free_tx_segment(chan, chan->seg_v);
719 }
665 dma_pool_destroy(chan->desc_pool); 720 dma_pool_destroy(chan->desc_pool);
666 chan->desc_pool = NULL; 721 chan->desc_pool = NULL;
667} 722}
668 723
669/** 724/**
725 * xilinx_dma_chan_handle_cyclic - Cyclic dma callback
726 * @chan: Driver specific dma channel
727 * @desc: dma transaction descriptor
728 * @flags: flags for spin lock
729 */
730static void xilinx_dma_chan_handle_cyclic(struct xilinx_dma_chan *chan,
731 struct xilinx_dma_tx_descriptor *desc,
732 unsigned long *flags)
733{
734 dma_async_tx_callback callback;
735 void *callback_param;
736
737 callback = desc->async_tx.callback;
738 callback_param = desc->async_tx.callback_param;
739 if (callback) {
740 spin_unlock_irqrestore(&chan->lock, *flags);
741 callback(callback_param);
742 spin_lock_irqsave(&chan->lock, *flags);
743 }
744}
745
746/**
670 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors 747 * xilinx_dma_chan_desc_cleanup - Clean channel descriptors
671 * @chan: Driver specific DMA channel 748 * @chan: Driver specific DMA channel
672 */ 749 */
@@ -681,6 +758,11 @@ static void xilinx_dma_chan_desc_cleanup(struct xilinx_dma_chan *chan)
681 dma_async_tx_callback callback; 758 dma_async_tx_callback callback;
682 void *callback_param; 759 void *callback_param;
683 760
761 if (desc->cyclic) {
762 xilinx_dma_chan_handle_cyclic(chan, desc, &flags);
763 break;
764 }
765
684 /* Remove from the list of running transactions */ 766 /* Remove from the list of running transactions */
685 list_del(&desc->node); 767 list_del(&desc->node);
686 768
@@ -757,7 +839,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
757 return -ENOMEM; 839 return -ENOMEM;
758 } 840 }
759 841
760 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) 842 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
761 /* 843 /*
762 * For AXI DMA case after submitting a pending_list, keep 844 * For AXI DMA case after submitting a pending_list, keep
763 * an extra segment allocated so that the "next descriptor" 845 * an extra segment allocated so that the "next descriptor"
@@ -768,6 +850,15 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
768 */ 850 */
769 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan); 851 chan->seg_v = xilinx_axidma_alloc_tx_segment(chan);
770 852
853 /*
854 * For cyclic DMA mode we need to program the tail Descriptor
855 * register with a value which is not a part of the BD chain
856 * so allocating a desc segment during channel allocation for
857 * programming tail descriptor.
858 */
859 chan->cyclic_seg_v = xilinx_axidma_alloc_tx_segment(chan);
860 }
861
771 dma_cookie_init(dchan); 862 dma_cookie_init(dchan);
772 863
773 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 864 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
@@ -1065,12 +1156,12 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1065 } 1156 }
1066 1157
1067 if (chan->has_sg) { 1158 if (chan->has_sg) {
1068 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1159 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1069 head_desc->async_tx.phys); 1160 head_desc->async_tx.phys);
1070 1161
1071 /* Update tail ptr register which will start the transfer */ 1162 /* Update tail ptr register which will start the transfer */
1072 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1163 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1073 tail_segment->phys); 1164 tail_segment->phys);
1074 } else { 1165 } else {
1075 /* In simple mode */ 1166 /* In simple mode */
1076 struct xilinx_cdma_tx_segment *segment; 1167 struct xilinx_cdma_tx_segment *segment;
@@ -1082,8 +1173,8 @@ static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1082 1173
1083 hw = &segment->hw; 1174 hw = &segment->hw;
1084 1175
1085 dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr); 1176 xilinx_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1086 dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr); 1177 xilinx_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1087 1178
1088 /* Start the transfer */ 1179 /* Start the transfer */
1089 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1180 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1124,18 +1215,20 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1124 tail_segment = list_last_entry(&tail_desc->segments, 1215 tail_segment = list_last_entry(&tail_desc->segments,
1125 struct xilinx_axidma_tx_segment, node); 1216 struct xilinx_axidma_tx_segment, node);
1126 1217
1127 old_head = list_first_entry(&head_desc->segments, 1218 if (chan->has_sg && !chan->xdev->mcdma) {
1128 struct xilinx_axidma_tx_segment, node); 1219 old_head = list_first_entry(&head_desc->segments,
1129 new_head = chan->seg_v; 1220 struct xilinx_axidma_tx_segment, node);
1130 /* Copy Buffer Descriptor fields. */ 1221 new_head = chan->seg_v;
1131 new_head->hw = old_head->hw; 1222 /* Copy Buffer Descriptor fields. */
1223 new_head->hw = old_head->hw;
1132 1224
1133 /* Swap and save new reserve */ 1225 /* Swap and save new reserve */
1134 list_replace_init(&old_head->node, &new_head->node); 1226 list_replace_init(&old_head->node, &new_head->node);
1135 chan->seg_v = old_head; 1227 chan->seg_v = old_head;
1136 1228
1137 tail_segment->hw.next_desc = chan->seg_v->phys; 1229 tail_segment->hw.next_desc = chan->seg_v->phys;
1138 head_desc->async_tx.phys = new_head->phys; 1230 head_desc->async_tx.phys = new_head->phys;
1231 }
1139 1232
1140 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR); 1233 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1141 1234
@@ -1146,9 +1239,25 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1146 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg); 1239 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1147 } 1240 }
1148 1241
1149 if (chan->has_sg) 1242 if (chan->has_sg && !chan->xdev->mcdma)
1150 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC, 1243 xilinx_write(chan, XILINX_DMA_REG_CURDESC,
1151 head_desc->async_tx.phys); 1244 head_desc->async_tx.phys);
1245
1246 if (chan->has_sg && chan->xdev->mcdma) {
1247 if (chan->direction == DMA_MEM_TO_DEV) {
1248 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1249 head_desc->async_tx.phys);
1250 } else {
1251 if (!chan->tdest) {
1252 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1253 head_desc->async_tx.phys);
1254 } else {
1255 dma_ctrl_write(chan,
1256 XILINX_DMA_MCRX_CDESC(chan->tdest),
1257 head_desc->async_tx.phys);
1258 }
1259 }
1260 }
1152 1261
1153 xilinx_dma_start(chan); 1262 xilinx_dma_start(chan);
1154 1263
@@ -1156,9 +1265,27 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1156 return; 1265 return;
1157 1266
1158 /* Start the transfer */ 1267 /* Start the transfer */
1159 if (chan->has_sg) { 1268 if (chan->has_sg && !chan->xdev->mcdma) {
1160 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC, 1269 if (chan->cyclic)
1270 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1271 chan->cyclic_seg_v->phys);
1272 else
1273 xilinx_write(chan, XILINX_DMA_REG_TAILDESC,
1274 tail_segment->phys);
1275 } else if (chan->has_sg && chan->xdev->mcdma) {
1276 if (chan->direction == DMA_MEM_TO_DEV) {
1277 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1161 tail_segment->phys); 1278 tail_segment->phys);
1279 } else {
1280 if (!chan->tdest) {
1281 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1282 tail_segment->phys);
1283 } else {
1284 dma_ctrl_write(chan,
1285 XILINX_DMA_MCRX_TDESC(chan->tdest),
1286 tail_segment->phys);
1287 }
1288 }
1162 } else { 1289 } else {
1163 struct xilinx_axidma_tx_segment *segment; 1290 struct xilinx_axidma_tx_segment *segment;
1164 struct xilinx_axidma_desc_hw *hw; 1291 struct xilinx_axidma_desc_hw *hw;
@@ -1168,7 +1295,7 @@ static void xilinx_dma_start_transfer(struct xilinx_dma_chan *chan)
1168 node); 1295 node);
1169 hw = &segment->hw; 1296 hw = &segment->hw;
1170 1297
1171 dma_ctrl_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr); 1298 xilinx_write(chan, XILINX_DMA_REG_SRCDSTADDR, hw->buf_addr);
1172 1299
1173 /* Start the transfer */ 1300 /* Start the transfer */
1174 dma_ctrl_write(chan, XILINX_DMA_REG_BTT, 1301 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
@@ -1209,7 +1336,8 @@ static void xilinx_dma_complete_descriptor(struct xilinx_dma_chan *chan)
1209 1336
1210 list_for_each_entry_safe(desc, next, &chan->active_list, node) { 1337 list_for_each_entry_safe(desc, next, &chan->active_list, node) {
1211 list_del(&desc->node); 1338 list_del(&desc->node);
1212 dma_cookie_complete(&desc->async_tx); 1339 if (!desc->cyclic)
1340 dma_cookie_complete(&desc->async_tx);
1213 list_add_tail(&desc->node, &chan->done_list); 1341 list_add_tail(&desc->node, &chan->done_list);
1214 } 1342 }
1215} 1343}
@@ -1397,6 +1525,11 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1397 unsigned long flags; 1525 unsigned long flags;
1398 int err; 1526 int err;
1399 1527
1528 if (chan->cyclic) {
1529 xilinx_dma_free_tx_descriptor(chan, desc);
1530 return -EBUSY;
1531 }
1532
1400 if (chan->err) { 1533 if (chan->err) {
1401 /* 1534 /*
1402 * If reset fails, need to hard reset the system. 1535 * If reset fails, need to hard reset the system.
@@ -1414,6 +1547,9 @@ static dma_cookie_t xilinx_dma_tx_submit(struct dma_async_tx_descriptor *tx)
1414 /* Put this transaction onto the tail of the pending queue */ 1547 /* Put this transaction onto the tail of the pending queue */
1415 append_desc_queue(chan, desc); 1548 append_desc_queue(chan, desc);
1416 1549
1550 if (desc->cyclic)
1551 chan->cyclic = true;
1552
1417 spin_unlock_irqrestore(&chan->lock, flags); 1553 spin_unlock_irqrestore(&chan->lock, flags);
1418 1554
1419 return cookie; 1555 return cookie;
@@ -1541,6 +1677,10 @@ xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1541 hw->control = len; 1677 hw->control = len;
1542 hw->src_addr = dma_src; 1678 hw->src_addr = dma_src;
1543 hw->dest_addr = dma_dst; 1679 hw->dest_addr = dma_dst;
1680 if (chan->ext_addr) {
1681 hw->src_addr_msb = upper_32_bits(dma_src);
1682 hw->dest_addr_msb = upper_32_bits(dma_dst);
1683 }
1544 1684
1545 /* Fill the previous next descriptor with current */ 1685 /* Fill the previous next descriptor with current */
1546 prev = list_last_entry(&desc->segments, 1686 prev = list_last_entry(&desc->segments,
@@ -1623,7 +1763,8 @@ static struct dma_async_tx_descriptor *xilinx_dma_prep_slave_sg(
1623 hw = &segment->hw; 1763 hw = &segment->hw;
1624 1764
1625 /* Fill in the descriptor */ 1765 /* Fill in the descriptor */
1626 hw->buf_addr = sg_dma_address(sg) + sg_used; 1766 xilinx_axidma_buf(chan, hw, sg_dma_address(sg),
1767 sg_used, 0);
1627 1768
1628 hw->control = copy; 1769 hw->control = copy;
1629 1770
@@ -1669,12 +1810,204 @@ error:
1669} 1810}
1670 1811
1671/** 1812/**
1813 * xilinx_dma_prep_dma_cyclic - prepare descriptors for a DMA_SLAVE transaction
1814 * @chan: DMA channel
1815 * @sgl: scatterlist to transfer to/from
1816 * @sg_len: number of entries in @scatterlist
1817 * @direction: DMA direction
1818 * @flags: transfer ack flags
1819 */
1820static struct dma_async_tx_descriptor *xilinx_dma_prep_dma_cyclic(
1821 struct dma_chan *dchan, dma_addr_t buf_addr, size_t buf_len,
1822 size_t period_len, enum dma_transfer_direction direction,
1823 unsigned long flags)
1824{
1825 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1826 struct xilinx_dma_tx_descriptor *desc;
1827 struct xilinx_axidma_tx_segment *segment, *head_segment, *prev = NULL;
1828 size_t copy, sg_used;
1829 unsigned int num_periods;
1830 int i;
1831 u32 reg;
1832
1833 if (!period_len)
1834 return NULL;
1835
1836 num_periods = buf_len / period_len;
1837
1838 if (!num_periods)
1839 return NULL;
1840
1841 if (!is_slave_direction(direction))
1842 return NULL;
1843
1844 /* Allocate a transaction descriptor. */
1845 desc = xilinx_dma_alloc_tx_descriptor(chan);
1846 if (!desc)
1847 return NULL;
1848
1849 chan->direction = direction;
1850 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1851 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1852
1853 for (i = 0; i < num_periods; ++i) {
1854 sg_used = 0;
1855
1856 while (sg_used < period_len) {
1857 struct xilinx_axidma_desc_hw *hw;
1858
1859 /* Get a free segment */
1860 segment = xilinx_axidma_alloc_tx_segment(chan);
1861 if (!segment)
1862 goto error;
1863
1864 /*
1865 * Calculate the maximum number of bytes to transfer,
1866 * making sure it is less than the hw limit
1867 */
1868 copy = min_t(size_t, period_len - sg_used,
1869 XILINX_DMA_MAX_TRANS_LEN);
1870 hw = &segment->hw;
1871 xilinx_axidma_buf(chan, hw, buf_addr, sg_used,
1872 period_len * i);
1873 hw->control = copy;
1874
1875 if (prev)
1876 prev->hw.next_desc = segment->phys;
1877
1878 prev = segment;
1879 sg_used += copy;
1880
1881 /*
1882 * Insert the segment into the descriptor segments
1883 * list.
1884 */
1885 list_add_tail(&segment->node, &desc->segments);
1886 }
1887 }
1888
1889 head_segment = list_first_entry(&desc->segments,
1890 struct xilinx_axidma_tx_segment, node);
1891 desc->async_tx.phys = head_segment->phys;
1892
1893 desc->cyclic = true;
1894 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
1895 reg |= XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
1896 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
1897
1898 segment = list_last_entry(&desc->segments,
1899 struct xilinx_axidma_tx_segment,
1900 node);
1901 segment->hw.next_desc = (u32) head_segment->phys;
1902
1903 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1904 if (direction == DMA_MEM_TO_DEV) {
1905 head_segment->hw.control |= XILINX_DMA_BD_SOP;
1906 segment->hw.control |= XILINX_DMA_BD_EOP;
1907 }
1908
1909 return &desc->async_tx;
1910
1911error:
1912 xilinx_dma_free_tx_descriptor(chan, desc);
1913 return NULL;
1914}
1915
1916/**
1917 * xilinx_dma_prep_interleaved - prepare a descriptor for a
1918 * DMA_SLAVE transaction
1919 * @dchan: DMA channel
1920 * @xt: Interleaved template pointer
1921 * @flags: transfer ack flags
1922 *
1923 * Return: Async transaction descriptor on success and NULL on failure
1924 */
1925static struct dma_async_tx_descriptor *
1926xilinx_dma_prep_interleaved(struct dma_chan *dchan,
1927 struct dma_interleaved_template *xt,
1928 unsigned long flags)
1929{
1930 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1931 struct xilinx_dma_tx_descriptor *desc;
1932 struct xilinx_axidma_tx_segment *segment;
1933 struct xilinx_axidma_desc_hw *hw;
1934
1935 if (!is_slave_direction(xt->dir))
1936 return NULL;
1937
1938 if (!xt->numf || !xt->sgl[0].size)
1939 return NULL;
1940
1941 if (xt->frame_size != 1)
1942 return NULL;
1943
1944 /* Allocate a transaction descriptor. */
1945 desc = xilinx_dma_alloc_tx_descriptor(chan);
1946 if (!desc)
1947 return NULL;
1948
1949 chan->direction = xt->dir;
1950 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1951 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1952
1953 /* Get a free segment */
1954 segment = xilinx_axidma_alloc_tx_segment(chan);
1955 if (!segment)
1956 goto error;
1957
1958 hw = &segment->hw;
1959
1960 /* Fill in the descriptor */
1961 if (xt->dir != DMA_MEM_TO_DEV)
1962 hw->buf_addr = xt->dst_start;
1963 else
1964 hw->buf_addr = xt->src_start;
1965
1966 hw->mcdma_control = chan->tdest & XILINX_DMA_BD_TDEST_MASK;
1967 hw->vsize_stride = (xt->numf << XILINX_DMA_BD_VSIZE_SHIFT) &
1968 XILINX_DMA_BD_VSIZE_MASK;
1969 hw->vsize_stride |= (xt->sgl[0].icg + xt->sgl[0].size) &
1970 XILINX_DMA_BD_STRIDE_MASK;
1971 hw->control = xt->sgl[0].size & XILINX_DMA_BD_HSIZE_MASK;
1972
1973 /*
1974 * Insert the segment into the descriptor segments
1975 * list.
1976 */
1977 list_add_tail(&segment->node, &desc->segments);
1978
1979
1980 segment = list_first_entry(&desc->segments,
1981 struct xilinx_axidma_tx_segment, node);
1982 desc->async_tx.phys = segment->phys;
1983
1984 /* For the last DMA_MEM_TO_DEV transfer, set EOP */
1985 if (xt->dir == DMA_MEM_TO_DEV) {
1986 segment->hw.control |= XILINX_DMA_BD_SOP;
1987 segment = list_last_entry(&desc->segments,
1988 struct xilinx_axidma_tx_segment,
1989 node);
1990 segment->hw.control |= XILINX_DMA_BD_EOP;
1991 }
1992
1993 return &desc->async_tx;
1994
1995error:
1996 xilinx_dma_free_tx_descriptor(chan, desc);
1997 return NULL;
1998}
1999
2000/**
1672 * xilinx_dma_terminate_all - Halt the channel and free descriptors 2001 * xilinx_dma_terminate_all - Halt the channel and free descriptors
1673 * @chan: Driver specific DMA Channel pointer 2002 * @chan: Driver specific DMA Channel pointer
1674 */ 2003 */
1675static int xilinx_dma_terminate_all(struct dma_chan *dchan) 2004static int xilinx_dma_terminate_all(struct dma_chan *dchan)
1676{ 2005{
1677 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan); 2006 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
2007 u32 reg;
2008
2009 if (chan->cyclic)
2010 xilinx_dma_chan_reset(chan);
1678 2011
1679 /* Halt the DMA engine */ 2012 /* Halt the DMA engine */
1680 xilinx_dma_halt(chan); 2013 xilinx_dma_halt(chan);
@@ -1682,6 +2015,13 @@ static int xilinx_dma_terminate_all(struct dma_chan *dchan)
1682 /* Remove and free all of the descriptors in the lists */ 2015 /* Remove and free all of the descriptors in the lists */
1683 xilinx_dma_free_descriptors(chan); 2016 xilinx_dma_free_descriptors(chan);
1684 2017
2018 if (chan->cyclic) {
2019 reg = dma_ctrl_read(chan, XILINX_DMA_REG_DMACR);
2020 reg &= ~XILINX_DMA_CR_CYCLIC_BD_EN_MASK;
2021 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, reg);
2022 chan->cyclic = false;
2023 }
2024
1685 return 0; 2025 return 0;
1686} 2026}
1687 2027
@@ -1972,7 +2312,7 @@ static void xdma_disable_allclks(struct xilinx_dma_device *xdev)
1972 * Return: '0' on success and failure value on error 2312 * Return: '0' on success and failure value on error
1973 */ 2313 */
1974static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev, 2314static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1975 struct device_node *node) 2315 struct device_node *node, int chan_id)
1976{ 2316{
1977 struct xilinx_dma_chan *chan; 2317 struct xilinx_dma_chan *chan;
1978 bool has_dre = false; 2318 bool has_dre = false;
@@ -2014,9 +2354,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2014 if (!has_dre) 2354 if (!has_dre)
2015 xdev->common.copy_align = fls(width - 1); 2355 xdev->common.copy_align = fls(width - 1);
2016 2356
2017 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel")) { 2357 if (of_device_is_compatible(node, "xlnx,axi-vdma-mm2s-channel") ||
2358 of_device_is_compatible(node, "xlnx,axi-dma-mm2s-channel") ||
2359 of_device_is_compatible(node, "xlnx,axi-cdma-channel")) {
2018 chan->direction = DMA_MEM_TO_DEV; 2360 chan->direction = DMA_MEM_TO_DEV;
2019 chan->id = 0; 2361 chan->id = chan_id;
2362 chan->tdest = chan_id;
2020 2363
2021 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 2364 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
2022 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2365 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2027,9 +2370,12 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2027 chan->flush_on_fsync = true; 2370 chan->flush_on_fsync = true;
2028 } 2371 }
2029 } else if (of_device_is_compatible(node, 2372 } else if (of_device_is_compatible(node,
2030 "xlnx,axi-vdma-s2mm-channel")) { 2373 "xlnx,axi-vdma-s2mm-channel") ||
2374 of_device_is_compatible(node,
2375 "xlnx,axi-dma-s2mm-channel")) {
2031 chan->direction = DMA_DEV_TO_MEM; 2376 chan->direction = DMA_DEV_TO_MEM;
2032 chan->id = 1; 2377 chan->id = chan_id;
2378 chan->tdest = chan_id - xdev->nr_channels;
2033 2379
2034 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 2380 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
2035 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2381 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
@@ -2084,6 +2430,32 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
2084} 2430}
2085 2431
2086/** 2432/**
2433 * xilinx_dma_child_probe - Per child node probe
2434 * It get number of dma-channels per child node from
2435 * device-tree and initializes all the channels.
2436 *
2437 * @xdev: Driver specific device structure
2438 * @node: Device node
2439 *
2440 * Return: 0 always.
2441 */
2442static int xilinx_dma_child_probe(struct xilinx_dma_device *xdev,
2443 struct device_node *node) {
2444 int ret, i, nr_channels = 1;
2445
2446 ret = of_property_read_u32(node, "dma-channels", &nr_channels);
2447 if ((ret < 0) && xdev->mcdma)
2448 dev_warn(xdev->dev, "missing dma-channels property\n");
2449
2450 for (i = 0; i < nr_channels; i++)
2451 xilinx_dma_chan_probe(xdev, node, xdev->chan_id++);
2452
2453 xdev->nr_channels += nr_channels;
2454
2455 return 0;
2456}
2457
2458/**
2087 * of_dma_xilinx_xlate - Translation function 2459 * of_dma_xilinx_xlate - Translation function
2088 * @dma_spec: Pointer to DMA specifier as found in the device tree 2460 * @dma_spec: Pointer to DMA specifier as found in the device tree
2089 * @ofdma: Pointer to DMA controller data 2461 * @ofdma: Pointer to DMA controller data
@@ -2096,7 +2468,7 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
2096 struct xilinx_dma_device *xdev = ofdma->of_dma_data; 2468 struct xilinx_dma_device *xdev = ofdma->of_dma_data;
2097 int chan_id = dma_spec->args[0]; 2469 int chan_id = dma_spec->args[0];
2098 2470
2099 if (chan_id >= XILINX_DMA_MAX_CHANS_PER_DEVICE || !xdev->chan[chan_id]) 2471 if (chan_id >= xdev->nr_channels || !xdev->chan[chan_id])
2100 return NULL; 2472 return NULL;
2101 2473
2102 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 2474 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
@@ -2172,6 +2544,8 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2172 2544
2173 /* Retrieve the DMA engine properties from the device tree */ 2545 /* Retrieve the DMA engine properties from the device tree */
2174 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 2546 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
2547 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
2548 xdev->mcdma = of_property_read_bool(node, "xlnx,mcdma");
2175 2549
2176 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2550 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2177 err = of_property_read_u32(node, "xlnx,num-fstores", 2551 err = of_property_read_u32(node, "xlnx,num-fstores",
@@ -2218,7 +2592,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2218 xdev->common.device_tx_status = xilinx_dma_tx_status; 2592 xdev->common.device_tx_status = xilinx_dma_tx_status;
2219 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2593 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
2220 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) { 2594 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
2595 dma_cap_set(DMA_CYCLIC, xdev->common.cap_mask);
2221 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2596 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
2597 xdev->common.device_prep_dma_cyclic =
2598 xilinx_dma_prep_dma_cyclic;
2599 xdev->common.device_prep_interleaved_dma =
2600 xilinx_dma_prep_interleaved;
2222 /* Residue calculation is supported by only AXI DMA */ 2601 /* Residue calculation is supported by only AXI DMA */
2223 xdev->common.residue_granularity = 2602 xdev->common.residue_granularity =
2224 DMA_RESIDUE_GRANULARITY_SEGMENT; 2603 DMA_RESIDUE_GRANULARITY_SEGMENT;
@@ -2234,13 +2613,13 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2234 2613
2235 /* Initialize the channels */ 2614 /* Initialize the channels */
2236 for_each_child_of_node(node, child) { 2615 for_each_child_of_node(node, child) {
2237 err = xilinx_dma_chan_probe(xdev, child); 2616 err = xilinx_dma_child_probe(xdev, child);
2238 if (err < 0) 2617 if (err < 0)
2239 goto disable_clks; 2618 goto disable_clks;
2240 } 2619 }
2241 2620
2242 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) { 2621 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2243 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) 2622 for (i = 0; i < xdev->nr_channels; i++)
2244 if (xdev->chan[i]) 2623 if (xdev->chan[i])
2245 xdev->chan[i]->num_frms = num_frames; 2624 xdev->chan[i]->num_frms = num_frames;
2246 } 2625 }
@@ -2263,7 +2642,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2263disable_clks: 2642disable_clks:
2264 xdma_disable_allclks(xdev); 2643 xdma_disable_allclks(xdev);
2265error: 2644error:
2266 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) 2645 for (i = 0; i < xdev->nr_channels; i++)
2267 if (xdev->chan[i]) 2646 if (xdev->chan[i])
2268 xilinx_dma_chan_remove(xdev->chan[i]); 2647 xilinx_dma_chan_remove(xdev->chan[i]);
2269 2648
@@ -2285,7 +2664,7 @@ static int xilinx_dma_remove(struct platform_device *pdev)
2285 2664
2286 dma_async_device_unregister(&xdev->common); 2665 dma_async_device_unregister(&xdev->common);
2287 2666
2288 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) 2667 for (i = 0; i < xdev->nr_channels; i++)
2289 if (xdev->chan[i]) 2668 if (xdev->chan[i])
2290 xilinx_dma_chan_remove(xdev->chan[i]); 2669 xilinx_dma_chan_remove(xdev->chan[i]);
2291 2670
diff --git a/drivers/dma/xilinx/zynqmp_dma.c b/drivers/dma/xilinx/zynqmp_dma.c
new file mode 100644
index 000000000000..f777a5bc0db8
--- /dev/null
+++ b/drivers/dma/xilinx/zynqmp_dma.c
@@ -0,0 +1,1145 @@
1/*
2 * DMA driver for Xilinx ZynqMP DMA Engine
3 *
4 * Copyright (C) 2016 Xilinx, Inc. All rights reserved.
5 *
6 * This program is free software: you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation, either version 2 of the License, or
9 * (at your option) any later version.
10 */
11
12#include <linux/bitops.h>
13#include <linux/dmapool.h>
14#include <linux/dma/xilinx_dma.h>
15#include <linux/init.h>
16#include <linux/interrupt.h>
17#include <linux/io.h>
18#include <linux/module.h>
19#include <linux/of_address.h>
20#include <linux/of_dma.h>
21#include <linux/of_irq.h>
22#include <linux/of_platform.h>
23#include <linux/slab.h>
24#include <linux/clk.h>
25#include <linux/io-64-nonatomic-lo-hi.h>
26
27#include "../dmaengine.h"
28
29/* Register Offsets */
30#define ZYNQMP_DMA_ISR 0x100
31#define ZYNQMP_DMA_IMR 0x104
32#define ZYNQMP_DMA_IER 0x108
33#define ZYNQMP_DMA_IDS 0x10C
34#define ZYNQMP_DMA_CTRL0 0x110
35#define ZYNQMP_DMA_CTRL1 0x114
36#define ZYNQMP_DMA_DATA_ATTR 0x120
37#define ZYNQMP_DMA_DSCR_ATTR 0x124
38#define ZYNQMP_DMA_SRC_DSCR_WRD0 0x128
39#define ZYNQMP_DMA_SRC_DSCR_WRD1 0x12C
40#define ZYNQMP_DMA_SRC_DSCR_WRD2 0x130
41#define ZYNQMP_DMA_SRC_DSCR_WRD3 0x134
42#define ZYNQMP_DMA_DST_DSCR_WRD0 0x138
43#define ZYNQMP_DMA_DST_DSCR_WRD1 0x13C
44#define ZYNQMP_DMA_DST_DSCR_WRD2 0x140
45#define ZYNQMP_DMA_DST_DSCR_WRD3 0x144
46#define ZYNQMP_DMA_SRC_START_LSB 0x158
47#define ZYNQMP_DMA_SRC_START_MSB 0x15C
48#define ZYNQMP_DMA_DST_START_LSB 0x160
49#define ZYNQMP_DMA_DST_START_MSB 0x164
50#define ZYNQMP_DMA_RATE_CTRL 0x18C
51#define ZYNQMP_DMA_IRQ_SRC_ACCT 0x190
52#define ZYNQMP_DMA_IRQ_DST_ACCT 0x194
53#define ZYNQMP_DMA_CTRL2 0x200
54
55/* Interrupt registers bit field definitions */
56#define ZYNQMP_DMA_DONE BIT(10)
57#define ZYNQMP_DMA_AXI_WR_DATA BIT(9)
58#define ZYNQMP_DMA_AXI_RD_DATA BIT(8)
59#define ZYNQMP_DMA_AXI_RD_DST_DSCR BIT(7)
60#define ZYNQMP_DMA_AXI_RD_SRC_DSCR BIT(6)
61#define ZYNQMP_DMA_IRQ_DST_ACCT_ERR BIT(5)
62#define ZYNQMP_DMA_IRQ_SRC_ACCT_ERR BIT(4)
63#define ZYNQMP_DMA_BYTE_CNT_OVRFL BIT(3)
64#define ZYNQMP_DMA_DST_DSCR_DONE BIT(2)
65#define ZYNQMP_DMA_INV_APB BIT(0)
66
67/* Control 0 register bit field definitions */
68#define ZYNQMP_DMA_OVR_FETCH BIT(7)
69#define ZYNQMP_DMA_POINT_TYPE_SG BIT(6)
70#define ZYNQMP_DMA_RATE_CTRL_EN BIT(3)
71
72/* Control 1 register bit field definitions */
73#define ZYNQMP_DMA_SRC_ISSUE GENMASK(4, 0)
74
75/* Data Attribute register bit field definitions */
76#define ZYNQMP_DMA_ARBURST GENMASK(27, 26)
77#define ZYNQMP_DMA_ARCACHE GENMASK(25, 22)
78#define ZYNQMP_DMA_ARCACHE_OFST 22
79#define ZYNQMP_DMA_ARQOS GENMASK(21, 18)
80#define ZYNQMP_DMA_ARQOS_OFST 18
81#define ZYNQMP_DMA_ARLEN GENMASK(17, 14)
82#define ZYNQMP_DMA_ARLEN_OFST 14
83#define ZYNQMP_DMA_AWBURST GENMASK(13, 12)
84#define ZYNQMP_DMA_AWCACHE GENMASK(11, 8)
85#define ZYNQMP_DMA_AWCACHE_OFST 8
86#define ZYNQMP_DMA_AWQOS GENMASK(7, 4)
87#define ZYNQMP_DMA_AWQOS_OFST 4
88#define ZYNQMP_DMA_AWLEN GENMASK(3, 0)
89#define ZYNQMP_DMA_AWLEN_OFST 0
90
91/* Descriptor Attribute register bit field definitions */
92#define ZYNQMP_DMA_AXCOHRNT BIT(8)
93#define ZYNQMP_DMA_AXCACHE GENMASK(7, 4)
94#define ZYNQMP_DMA_AXCACHE_OFST 4
95#define ZYNQMP_DMA_AXQOS GENMASK(3, 0)
96#define ZYNQMP_DMA_AXQOS_OFST 0
97
98/* Control register 2 bit field definitions */
99#define ZYNQMP_DMA_ENABLE BIT(0)
100
101/* Buffer Descriptor definitions */
102#define ZYNQMP_DMA_DESC_CTRL_STOP 0x10
103#define ZYNQMP_DMA_DESC_CTRL_COMP_INT 0x4
104#define ZYNQMP_DMA_DESC_CTRL_SIZE_256 0x2
105#define ZYNQMP_DMA_DESC_CTRL_COHRNT 0x1
106
107/* Interrupt Mask specific definitions */
108#define ZYNQMP_DMA_INT_ERR (ZYNQMP_DMA_AXI_RD_DATA | \
109 ZYNQMP_DMA_AXI_WR_DATA | \
110 ZYNQMP_DMA_AXI_RD_DST_DSCR | \
111 ZYNQMP_DMA_AXI_RD_SRC_DSCR | \
112 ZYNQMP_DMA_INV_APB)
113#define ZYNQMP_DMA_INT_OVRFL (ZYNQMP_DMA_BYTE_CNT_OVRFL | \
114 ZYNQMP_DMA_IRQ_SRC_ACCT_ERR | \
115 ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
116#define ZYNQMP_DMA_INT_DONE (ZYNQMP_DMA_DONE | ZYNQMP_DMA_DST_DSCR_DONE)
117#define ZYNQMP_DMA_INT_EN_DEFAULT_MASK (ZYNQMP_DMA_INT_DONE | \
118 ZYNQMP_DMA_INT_ERR | \
119 ZYNQMP_DMA_INT_OVRFL | \
120 ZYNQMP_DMA_DST_DSCR_DONE)
121
122/* Max number of descriptors per channel */
123#define ZYNQMP_DMA_NUM_DESCS 32
124
125/* Max transfer size per descriptor */
126#define ZYNQMP_DMA_MAX_TRANS_LEN 0x40000000
127
128/* Reset values for data attributes */
129#define ZYNQMP_DMA_AXCACHE_VAL 0xF
130#define ZYNQMP_DMA_ARLEN_RST_VAL 0xF
131#define ZYNQMP_DMA_AWLEN_RST_VAL 0xF
132
133#define ZYNQMP_DMA_SRC_ISSUE_RST_VAL 0x1F
134
135#define ZYNQMP_DMA_IDS_DEFAULT_MASK 0xFFF
136
137/* Bus width in bits */
138#define ZYNQMP_DMA_BUS_WIDTH_64 64
139#define ZYNQMP_DMA_BUS_WIDTH_128 128
140
141#define ZYNQMP_DMA_DESC_SIZE(chan) (chan->desc_size)
142
143#define to_chan(chan) container_of(chan, struct zynqmp_dma_chan, \
144 common)
145#define tx_to_desc(tx) container_of(tx, struct zynqmp_dma_desc_sw, \
146 async_tx)
147
148/**
149 * struct zynqmp_dma_desc_ll - Hw linked list descriptor
150 * @addr: Buffer address
151 * @size: Size of the buffer
152 * @ctrl: Control word
153 * @nxtdscraddr: Next descriptor base address
154 * @rsvd: Reserved field and for Hw internal use.
155 */
156struct zynqmp_dma_desc_ll {
157 u64 addr;
158 u32 size;
159 u32 ctrl;
160 u64 nxtdscraddr;
161 u64 rsvd;
162}; __aligned(64)
163
164/**
165 * struct zynqmp_dma_desc_sw - Per Transaction structure
166 * @src: Source address for simple mode dma
167 * @dst: Destination address for simple mode dma
168 * @len: Transfer length for simple mode dma
169 * @node: Node in the channel descriptor list
170 * @tx_list: List head for the current transfer
171 * @async_tx: Async transaction descriptor
172 * @src_v: Virtual address of the src descriptor
173 * @src_p: Physical address of the src descriptor
174 * @dst_v: Virtual address of the dst descriptor
175 * @dst_p: Physical address of the dst descriptor
176 */
177struct zynqmp_dma_desc_sw {
178 u64 src;
179 u64 dst;
180 u32 len;
181 struct list_head node;
182 struct list_head tx_list;
183 struct dma_async_tx_descriptor async_tx;
184 struct zynqmp_dma_desc_ll *src_v;
185 dma_addr_t src_p;
186 struct zynqmp_dma_desc_ll *dst_v;
187 dma_addr_t dst_p;
188};
189
190/**
191 * struct zynqmp_dma_chan - Driver specific DMA channel structure
192 * @zdev: Driver specific device structure
193 * @regs: Control registers offset
194 * @lock: Descriptor operation lock
195 * @pending_list: Descriptors waiting
196 * @free_list: Descriptors free
197 * @active_list: Descriptors active
198 * @sw_desc_pool: SW descriptor pool
199 * @done_list: Complete descriptors
200 * @common: DMA common channel
201 * @desc_pool_v: Statically allocated descriptor base
202 * @desc_pool_p: Physical allocated descriptor base
203 * @desc_free_cnt: Descriptor available count
204 * @dev: The dma device
205 * @irq: Channel IRQ
206 * @is_dmacoherent: Tells whether dma operations are coherent or not
207 * @tasklet: Cleanup work after irq
208 * @idle : Channel status;
209 * @desc_size: Size of the low level descriptor
210 * @err: Channel has errors
211 * @bus_width: Bus width
212 * @src_burst_len: Source burst length
213 * @dst_burst_len: Dest burst length
214 * @clk_main: Pointer to main clock
215 * @clk_apb: Pointer to apb clock
216 */
217struct zynqmp_dma_chan {
218 struct zynqmp_dma_device *zdev;
219 void __iomem *regs;
220 spinlock_t lock;
221 struct list_head pending_list;
222 struct list_head free_list;
223 struct list_head active_list;
224 struct zynqmp_dma_desc_sw *sw_desc_pool;
225 struct list_head done_list;
226 struct dma_chan common;
227 void *desc_pool_v;
228 dma_addr_t desc_pool_p;
229 u32 desc_free_cnt;
230 struct device *dev;
231 int irq;
232 bool is_dmacoherent;
233 struct tasklet_struct tasklet;
234 bool idle;
235 u32 desc_size;
236 bool err;
237 u32 bus_width;
238 u32 src_burst_len;
239 u32 dst_burst_len;
240 struct clk *clk_main;
241 struct clk *clk_apb;
242};
243
244/**
245 * struct zynqmp_dma_device - DMA device structure
246 * @dev: Device Structure
247 * @common: DMA device structure
248 * @chan: Driver specific DMA channel
249 */
250struct zynqmp_dma_device {
251 struct device *dev;
252 struct dma_device common;
253 struct zynqmp_dma_chan *chan;
254};
255
256static inline void zynqmp_dma_writeq(struct zynqmp_dma_chan *chan, u32 reg,
257 u64 value)
258{
259 lo_hi_writeq(value, chan->regs + reg);
260}
261
262/**
263 * zynqmp_dma_update_desc_to_ctrlr - Updates descriptor to the controller
264 * @chan: ZynqMP DMA DMA channel pointer
265 * @desc: Transaction descriptor pointer
266 */
267static void zynqmp_dma_update_desc_to_ctrlr(struct zynqmp_dma_chan *chan,
268 struct zynqmp_dma_desc_sw *desc)
269{
270 dma_addr_t addr;
271
272 addr = desc->src_p;
273 zynqmp_dma_writeq(chan, ZYNQMP_DMA_SRC_START_LSB, addr);
274 addr = desc->dst_p;
275 zynqmp_dma_writeq(chan, ZYNQMP_DMA_DST_START_LSB, addr);
276}
277
278/**
279 * zynqmp_dma_desc_config_eod - Mark the descriptor as end descriptor
280 * @chan: ZynqMP DMA channel pointer
281 * @desc: Hw descriptor pointer
282 */
283static void zynqmp_dma_desc_config_eod(struct zynqmp_dma_chan *chan,
284 void *desc)
285{
286 struct zynqmp_dma_desc_ll *hw = (struct zynqmp_dma_desc_ll *)desc;
287
288 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_STOP;
289 hw++;
290 hw->ctrl |= ZYNQMP_DMA_DESC_CTRL_COMP_INT | ZYNQMP_DMA_DESC_CTRL_STOP;
291}
292
293/**
294 * zynqmp_dma_config_sg_ll_desc - Configure the linked list descriptor
295 * @chan: ZynqMP DMA channel pointer
296 * @sdesc: Hw descriptor pointer
297 * @src: Source buffer address
298 * @dst: Destination buffer address
299 * @len: Transfer length
300 * @prev: Previous hw descriptor pointer
301 */
302static void zynqmp_dma_config_sg_ll_desc(struct zynqmp_dma_chan *chan,
303 struct zynqmp_dma_desc_ll *sdesc,
304 dma_addr_t src, dma_addr_t dst, size_t len,
305 struct zynqmp_dma_desc_ll *prev)
306{
307 struct zynqmp_dma_desc_ll *ddesc = sdesc + 1;
308
309 sdesc->size = ddesc->size = len;
310 sdesc->addr = src;
311 ddesc->addr = dst;
312
313 sdesc->ctrl = ddesc->ctrl = ZYNQMP_DMA_DESC_CTRL_SIZE_256;
314 if (chan->is_dmacoherent) {
315 sdesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
316 ddesc->ctrl |= ZYNQMP_DMA_DESC_CTRL_COHRNT;
317 }
318
319 if (prev) {
320 dma_addr_t addr = chan->desc_pool_p +
321 ((uintptr_t)sdesc - (uintptr_t)chan->desc_pool_v);
322 ddesc = prev + 1;
323 prev->nxtdscraddr = addr;
324 ddesc->nxtdscraddr = addr + ZYNQMP_DMA_DESC_SIZE(chan);
325 }
326}
327
328/**
329 * zynqmp_dma_init - Initialize the channel
330 * @chan: ZynqMP DMA channel pointer
331 */
332static void zynqmp_dma_init(struct zynqmp_dma_chan *chan)
333{
334 u32 val;
335
336 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
337 val = readl(chan->regs + ZYNQMP_DMA_ISR);
338 writel(val, chan->regs + ZYNQMP_DMA_ISR);
339
340 if (chan->is_dmacoherent) {
341 val = ZYNQMP_DMA_AXCOHRNT;
342 val = (val & ~ZYNQMP_DMA_AXCACHE) |
343 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AXCACHE_OFST);
344 writel(val, chan->regs + ZYNQMP_DMA_DSCR_ATTR);
345 }
346
347 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
348 if (chan->is_dmacoherent) {
349 val = (val & ~ZYNQMP_DMA_ARCACHE) |
350 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_ARCACHE_OFST);
351 val = (val & ~ZYNQMP_DMA_AWCACHE) |
352 (ZYNQMP_DMA_AXCACHE_VAL << ZYNQMP_DMA_AWCACHE_OFST);
353 }
354 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
355
356 /* Clearing the interrupt account rgisters */
357 val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
358 val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
359
360 chan->idle = true;
361}
362
363/**
364 * zynqmp_dma_tx_submit - Submit DMA transaction
365 * @tx: Async transaction descriptor pointer
366 *
367 * Return: cookie value
368 */
369static dma_cookie_t zynqmp_dma_tx_submit(struct dma_async_tx_descriptor *tx)
370{
371 struct zynqmp_dma_chan *chan = to_chan(tx->chan);
372 struct zynqmp_dma_desc_sw *desc, *new;
373 dma_cookie_t cookie;
374
375 new = tx_to_desc(tx);
376 spin_lock_bh(&chan->lock);
377 cookie = dma_cookie_assign(tx);
378
379 if (!list_empty(&chan->pending_list)) {
380 desc = list_last_entry(&chan->pending_list,
381 struct zynqmp_dma_desc_sw, node);
382 if (!list_empty(&desc->tx_list))
383 desc = list_last_entry(&desc->tx_list,
384 struct zynqmp_dma_desc_sw, node);
385 desc->src_v->nxtdscraddr = new->src_p;
386 desc->src_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
387 desc->dst_v->nxtdscraddr = new->dst_p;
388 desc->dst_v->ctrl &= ~ZYNQMP_DMA_DESC_CTRL_STOP;
389 }
390
391 list_add_tail(&new->node, &chan->pending_list);
392 spin_unlock_bh(&chan->lock);
393
394 return cookie;
395}
396
397/**
398 * zynqmp_dma_get_descriptor - Get the sw descriptor from the pool
399 * @chan: ZynqMP DMA channel pointer
400 *
401 * Return: The sw descriptor
402 */
403static struct zynqmp_dma_desc_sw *
404zynqmp_dma_get_descriptor(struct zynqmp_dma_chan *chan)
405{
406 struct zynqmp_dma_desc_sw *desc;
407
408 spin_lock_bh(&chan->lock);
409 desc = list_first_entry(&chan->free_list,
410 struct zynqmp_dma_desc_sw, node);
411 list_del(&desc->node);
412 spin_unlock_bh(&chan->lock);
413
414 INIT_LIST_HEAD(&desc->tx_list);
415 /* Clear the src and dst descriptor memory */
416 memset((void *)desc->src_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
417 memset((void *)desc->dst_v, 0, ZYNQMP_DMA_DESC_SIZE(chan));
418
419 return desc;
420}
421
422/**
423 * zynqmp_dma_free_descriptor - Issue pending transactions
424 * @chan: ZynqMP DMA channel pointer
425 * @sdesc: Transaction descriptor pointer
426 */
427static void zynqmp_dma_free_descriptor(struct zynqmp_dma_chan *chan,
428 struct zynqmp_dma_desc_sw *sdesc)
429{
430 struct zynqmp_dma_desc_sw *child, *next;
431
432 chan->desc_free_cnt++;
433 list_add_tail(&sdesc->node, &chan->free_list);
434 list_for_each_entry_safe(child, next, &sdesc->tx_list, node) {
435 chan->desc_free_cnt++;
436 list_move_tail(&child->node, &chan->free_list);
437 }
438}
439
440/**
441 * zynqmp_dma_free_desc_list - Free descriptors list
442 * @chan: ZynqMP DMA channel pointer
443 * @list: List to parse and delete the descriptor
444 */
445static void zynqmp_dma_free_desc_list(struct zynqmp_dma_chan *chan,
446 struct list_head *list)
447{
448 struct zynqmp_dma_desc_sw *desc, *next;
449
450 list_for_each_entry_safe(desc, next, list, node)
451 zynqmp_dma_free_descriptor(chan, desc);
452}
453
454/**
455 * zynqmp_dma_alloc_chan_resources - Allocate channel resources
456 * @dchan: DMA channel
457 *
458 * Return: Number of descriptors on success and failure value on error
459 */
460static int zynqmp_dma_alloc_chan_resources(struct dma_chan *dchan)
461{
462 struct zynqmp_dma_chan *chan = to_chan(dchan);
463 struct zynqmp_dma_desc_sw *desc;
464 int i;
465
466 chan->sw_desc_pool = kzalloc(sizeof(*desc) * ZYNQMP_DMA_NUM_DESCS,
467 GFP_KERNEL);
468 if (!chan->sw_desc_pool)
469 return -ENOMEM;
470
471 chan->idle = true;
472 chan->desc_free_cnt = ZYNQMP_DMA_NUM_DESCS;
473
474 INIT_LIST_HEAD(&chan->free_list);
475
476 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
477 desc = chan->sw_desc_pool + i;
478 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
479 desc->async_tx.tx_submit = zynqmp_dma_tx_submit;
480 list_add_tail(&desc->node, &chan->free_list);
481 }
482
483 chan->desc_pool_v = dma_zalloc_coherent(chan->dev,
484 (2 * chan->desc_size * ZYNQMP_DMA_NUM_DESCS),
485 &chan->desc_pool_p, GFP_KERNEL);
486 if (!chan->desc_pool_v)
487 return -ENOMEM;
488
489 for (i = 0; i < ZYNQMP_DMA_NUM_DESCS; i++) {
490 desc = chan->sw_desc_pool + i;
491 desc->src_v = (struct zynqmp_dma_desc_ll *) (chan->desc_pool_v +
492 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2));
493 desc->dst_v = (struct zynqmp_dma_desc_ll *) (desc->src_v + 1);
494 desc->src_p = chan->desc_pool_p +
495 (i * ZYNQMP_DMA_DESC_SIZE(chan) * 2);
496 desc->dst_p = desc->src_p + ZYNQMP_DMA_DESC_SIZE(chan);
497 }
498
499 return ZYNQMP_DMA_NUM_DESCS;
500}
501
502/**
503 * zynqmp_dma_start - Start DMA channel
504 * @chan: ZynqMP DMA channel pointer
505 */
506static void zynqmp_dma_start(struct zynqmp_dma_chan *chan)
507{
508 writel(ZYNQMP_DMA_INT_EN_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IER);
509 chan->idle = false;
510 writel(ZYNQMP_DMA_ENABLE, chan->regs + ZYNQMP_DMA_CTRL2);
511}
512
513/**
514 * zynqmp_dma_handle_ovfl_int - Process the overflow interrupt
515 * @chan: ZynqMP DMA channel pointer
516 * @status: Interrupt status value
517 */
518static void zynqmp_dma_handle_ovfl_int(struct zynqmp_dma_chan *chan, u32 status)
519{
520 u32 val;
521
522 if (status & ZYNQMP_DMA_IRQ_DST_ACCT_ERR)
523 val = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
524 if (status & ZYNQMP_DMA_IRQ_SRC_ACCT_ERR)
525 val = readl(chan->regs + ZYNQMP_DMA_IRQ_SRC_ACCT);
526}
527
528static void zynqmp_dma_config(struct zynqmp_dma_chan *chan)
529{
530 u32 val;
531
532 val = readl(chan->regs + ZYNQMP_DMA_CTRL0);
533 val |= ZYNQMP_DMA_POINT_TYPE_SG;
534 writel(val, chan->regs + ZYNQMP_DMA_CTRL0);
535
536 val = readl(chan->regs + ZYNQMP_DMA_DATA_ATTR);
537 val = (val & ~ZYNQMP_DMA_ARLEN) |
538 (chan->src_burst_len << ZYNQMP_DMA_ARLEN_OFST);
539 val = (val & ~ZYNQMP_DMA_AWLEN) |
540 (chan->dst_burst_len << ZYNQMP_DMA_AWLEN_OFST);
541 writel(val, chan->regs + ZYNQMP_DMA_DATA_ATTR);
542}
543
544/**
545 * zynqmp_dma_device_config - Zynqmp dma device configuration
546 * @dchan: DMA channel
547 * @config: DMA device config
548 */
549static int zynqmp_dma_device_config(struct dma_chan *dchan,
550 struct dma_slave_config *config)
551{
552 struct zynqmp_dma_chan *chan = to_chan(dchan);
553
554 chan->src_burst_len = config->src_maxburst;
555 chan->dst_burst_len = config->dst_maxburst;
556
557 return 0;
558}
559
560/**
561 * zynqmp_dma_start_transfer - Initiate the new transfer
562 * @chan: ZynqMP DMA channel pointer
563 */
564static void zynqmp_dma_start_transfer(struct zynqmp_dma_chan *chan)
565{
566 struct zynqmp_dma_desc_sw *desc;
567
568 if (!chan->idle)
569 return;
570
571 zynqmp_dma_config(chan);
572
573 desc = list_first_entry_or_null(&chan->pending_list,
574 struct zynqmp_dma_desc_sw, node);
575 if (!desc)
576 return;
577
578 list_splice_tail_init(&chan->pending_list, &chan->active_list);
579 zynqmp_dma_update_desc_to_ctrlr(chan, desc);
580 zynqmp_dma_start(chan);
581}
582
583
584/**
585 * zynqmp_dma_chan_desc_cleanup - Cleanup the completed descriptors
586 * @chan: ZynqMP DMA channel
587 */
588static void zynqmp_dma_chan_desc_cleanup(struct zynqmp_dma_chan *chan)
589{
590 struct zynqmp_dma_desc_sw *desc, *next;
591
592 list_for_each_entry_safe(desc, next, &chan->done_list, node) {
593 dma_async_tx_callback callback;
594 void *callback_param;
595
596 list_del(&desc->node);
597
598 callback = desc->async_tx.callback;
599 callback_param = desc->async_tx.callback_param;
600 if (callback) {
601 spin_unlock(&chan->lock);
602 callback(callback_param);
603 spin_lock(&chan->lock);
604 }
605
606 /* Run any dependencies, then free the descriptor */
607 zynqmp_dma_free_descriptor(chan, desc);
608 }
609}
610
611/**
612 * zynqmp_dma_complete_descriptor - Mark the active descriptor as complete
613 * @chan: ZynqMP DMA channel pointer
614 */
615static void zynqmp_dma_complete_descriptor(struct zynqmp_dma_chan *chan)
616{
617 struct zynqmp_dma_desc_sw *desc;
618
619 desc = list_first_entry_or_null(&chan->active_list,
620 struct zynqmp_dma_desc_sw, node);
621 if (!desc)
622 return;
623 list_del(&desc->node);
624 dma_cookie_complete(&desc->async_tx);
625 list_add_tail(&desc->node, &chan->done_list);
626}
627
628/**
629 * zynqmp_dma_issue_pending - Issue pending transactions
630 * @dchan: DMA channel pointer
631 */
632static void zynqmp_dma_issue_pending(struct dma_chan *dchan)
633{
634 struct zynqmp_dma_chan *chan = to_chan(dchan);
635
636 spin_lock_bh(&chan->lock);
637 zynqmp_dma_start_transfer(chan);
638 spin_unlock_bh(&chan->lock);
639}
640
641/**
642 * zynqmp_dma_free_descriptors - Free channel descriptors
643 * @dchan: DMA channel pointer
644 */
645static void zynqmp_dma_free_descriptors(struct zynqmp_dma_chan *chan)
646{
647 zynqmp_dma_free_desc_list(chan, &chan->active_list);
648 zynqmp_dma_free_desc_list(chan, &chan->pending_list);
649 zynqmp_dma_free_desc_list(chan, &chan->done_list);
650}
651
652/**
653 * zynqmp_dma_free_chan_resources - Free channel resources
654 * @dchan: DMA channel pointer
655 */
656static void zynqmp_dma_free_chan_resources(struct dma_chan *dchan)
657{
658 struct zynqmp_dma_chan *chan = to_chan(dchan);
659
660 spin_lock_bh(&chan->lock);
661 zynqmp_dma_free_descriptors(chan);
662 spin_unlock_bh(&chan->lock);
663 dma_free_coherent(chan->dev,
664 (2 * ZYNQMP_DMA_DESC_SIZE(chan) * ZYNQMP_DMA_NUM_DESCS),
665 chan->desc_pool_v, chan->desc_pool_p);
666 kfree(chan->sw_desc_pool);
667}
668
669/**
670 * zynqmp_dma_reset - Reset the channel
671 * @chan: ZynqMP DMA channel pointer
672 */
673static void zynqmp_dma_reset(struct zynqmp_dma_chan *chan)
674{
675 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
676
677 zynqmp_dma_complete_descriptor(chan);
678 zynqmp_dma_chan_desc_cleanup(chan);
679 zynqmp_dma_free_descriptors(chan);
680 zynqmp_dma_init(chan);
681}
682
683/**
684 * zynqmp_dma_irq_handler - ZynqMP DMA Interrupt handler
685 * @irq: IRQ number
686 * @data: Pointer to the ZynqMP DMA channel structure
687 *
688 * Return: IRQ_HANDLED/IRQ_NONE
689 */
690static irqreturn_t zynqmp_dma_irq_handler(int irq, void *data)
691{
692 struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
693 u32 isr, imr, status;
694 irqreturn_t ret = IRQ_NONE;
695
696 isr = readl(chan->regs + ZYNQMP_DMA_ISR);
697 imr = readl(chan->regs + ZYNQMP_DMA_IMR);
698 status = isr & ~imr;
699
700 writel(isr, chan->regs + ZYNQMP_DMA_ISR);
701 if (status & ZYNQMP_DMA_INT_DONE) {
702 tasklet_schedule(&chan->tasklet);
703 ret = IRQ_HANDLED;
704 }
705
706 if (status & ZYNQMP_DMA_DONE)
707 chan->idle = true;
708
709 if (status & ZYNQMP_DMA_INT_ERR) {
710 chan->err = true;
711 tasklet_schedule(&chan->tasklet);
712 dev_err(chan->dev, "Channel %p has errors\n", chan);
713 ret = IRQ_HANDLED;
714 }
715
716 if (status & ZYNQMP_DMA_INT_OVRFL) {
717 zynqmp_dma_handle_ovfl_int(chan, status);
718 dev_info(chan->dev, "Channel %p overflow interrupt\n", chan);
719 ret = IRQ_HANDLED;
720 }
721
722 return ret;
723}
724
725/**
726 * zynqmp_dma_do_tasklet - Schedule completion tasklet
727 * @data: Pointer to the ZynqMP DMA channel structure
728 */
729static void zynqmp_dma_do_tasklet(unsigned long data)
730{
731 struct zynqmp_dma_chan *chan = (struct zynqmp_dma_chan *)data;
732 u32 count;
733
734 spin_lock(&chan->lock);
735
736 if (chan->err) {
737 zynqmp_dma_reset(chan);
738 chan->err = false;
739 goto unlock;
740 }
741
742 count = readl(chan->regs + ZYNQMP_DMA_IRQ_DST_ACCT);
743
744 while (count) {
745 zynqmp_dma_complete_descriptor(chan);
746 zynqmp_dma_chan_desc_cleanup(chan);
747 count--;
748 }
749
750 if (chan->idle)
751 zynqmp_dma_start_transfer(chan);
752
753unlock:
754 spin_unlock(&chan->lock);
755}
756
757/**
758 * zynqmp_dma_device_terminate_all - Aborts all transfers on a channel
759 * @dchan: DMA channel pointer
760 *
761 * Return: Always '0'
762 */
763static int zynqmp_dma_device_terminate_all(struct dma_chan *dchan)
764{
765 struct zynqmp_dma_chan *chan = to_chan(dchan);
766
767 spin_lock_bh(&chan->lock);
768 writel(ZYNQMP_DMA_IDS_DEFAULT_MASK, chan->regs + ZYNQMP_DMA_IDS);
769 zynqmp_dma_free_descriptors(chan);
770 spin_unlock_bh(&chan->lock);
771
772 return 0;
773}
774
775/**
776 * zynqmp_dma_prep_memcpy - prepare descriptors for memcpy transaction
777 * @dchan: DMA channel
778 * @dma_dst: Destination buffer address
779 * @dma_src: Source buffer address
780 * @len: Transfer length
781 * @flags: transfer ack flags
782 *
783 * Return: Async transaction descriptor on success and NULL on failure
784 */
785static struct dma_async_tx_descriptor *zynqmp_dma_prep_memcpy(
786 struct dma_chan *dchan, dma_addr_t dma_dst,
787 dma_addr_t dma_src, size_t len, ulong flags)
788{
789 struct zynqmp_dma_chan *chan;
790 struct zynqmp_dma_desc_sw *new, *first = NULL;
791 void *desc = NULL, *prev = NULL;
792 size_t copy;
793 u32 desc_cnt;
794
795 chan = to_chan(dchan);
796
797 if (len > ZYNQMP_DMA_MAX_TRANS_LEN)
798 return NULL;
799
800 desc_cnt = DIV_ROUND_UP(len, ZYNQMP_DMA_MAX_TRANS_LEN);
801
802 spin_lock_bh(&chan->lock);
803 if (desc_cnt > chan->desc_free_cnt) {
804 spin_unlock_bh(&chan->lock);
805 dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
806 return NULL;
807 }
808 chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
809 spin_unlock_bh(&chan->lock);
810
811 do {
812 /* Allocate and populate the descriptor */
813 new = zynqmp_dma_get_descriptor(chan);
814
815 copy = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
816 desc = (struct zynqmp_dma_desc_ll *)new->src_v;
817 zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src,
818 dma_dst, copy, prev);
819 prev = desc;
820 len -= copy;
821 dma_src += copy;
822 dma_dst += copy;
823 if (!first)
824 first = new;
825 else
826 list_add_tail(&new->node, &first->tx_list);
827 } while (len);
828
829 zynqmp_dma_desc_config_eod(chan, desc);
830 async_tx_ack(&first->async_tx);
831 first->async_tx.flags = flags;
832 return &first->async_tx;
833}
834
835/**
836 * zynqmp_dma_prep_slave_sg - prepare descriptors for a memory sg transaction
837 * @dchan: DMA channel
838 * @dst_sg: Destination scatter list
839 * @dst_sg_len: Number of entries in destination scatter list
840 * @src_sg: Source scatter list
841 * @src_sg_len: Number of entries in source scatter list
842 * @flags: transfer ack flags
843 *
844 * Return: Async transaction descriptor on success and NULL on failure
845 */
846static struct dma_async_tx_descriptor *zynqmp_dma_prep_sg(
847 struct dma_chan *dchan, struct scatterlist *dst_sg,
848 unsigned int dst_sg_len, struct scatterlist *src_sg,
849 unsigned int src_sg_len, unsigned long flags)
850{
851 struct zynqmp_dma_desc_sw *new, *first = NULL;
852 struct zynqmp_dma_chan *chan = to_chan(dchan);
853 void *desc = NULL, *prev = NULL;
854 size_t len, dst_avail, src_avail;
855 dma_addr_t dma_dst, dma_src;
856 u32 desc_cnt = 0, i;
857 struct scatterlist *sg;
858
859 for_each_sg(src_sg, sg, src_sg_len, i)
860 desc_cnt += DIV_ROUND_UP(sg_dma_len(sg),
861 ZYNQMP_DMA_MAX_TRANS_LEN);
862
863 spin_lock_bh(&chan->lock);
864 if (desc_cnt > chan->desc_free_cnt) {
865 spin_unlock_bh(&chan->lock);
866 dev_dbg(chan->dev, "chan %p descs are not available\n", chan);
867 return NULL;
868 }
869 chan->desc_free_cnt = chan->desc_free_cnt - desc_cnt;
870 spin_unlock_bh(&chan->lock);
871
872 dst_avail = sg_dma_len(dst_sg);
873 src_avail = sg_dma_len(src_sg);
874
875 /* Run until we are out of scatterlist entries */
876 while (true) {
877 /* Allocate and populate the descriptor */
878 new = zynqmp_dma_get_descriptor(chan);
879 desc = (struct zynqmp_dma_desc_ll *)new->src_v;
880 len = min_t(size_t, src_avail, dst_avail);
881 len = min_t(size_t, len, ZYNQMP_DMA_MAX_TRANS_LEN);
882 if (len == 0)
883 goto fetch;
884 dma_dst = sg_dma_address(dst_sg) + sg_dma_len(dst_sg) -
885 dst_avail;
886 dma_src = sg_dma_address(src_sg) + sg_dma_len(src_sg) -
887 src_avail;
888
889 zynqmp_dma_config_sg_ll_desc(chan, desc, dma_src, dma_dst,
890 len, prev);
891 prev = desc;
892 dst_avail -= len;
893 src_avail -= len;
894
895 if (!first)
896 first = new;
897 else
898 list_add_tail(&new->node, &first->tx_list);
899fetch:
900 /* Fetch the next dst scatterlist entry */
901 if (dst_avail == 0) {
902 if (dst_sg_len == 0)
903 break;
904 dst_sg = sg_next(dst_sg);
905 if (dst_sg == NULL)
906 break;
907 dst_sg_len--;
908 dst_avail = sg_dma_len(dst_sg);
909 }
910 /* Fetch the next src scatterlist entry */
911 if (src_avail == 0) {
912 if (src_sg_len == 0)
913 break;
914 src_sg = sg_next(src_sg);
915 if (src_sg == NULL)
916 break;
917 src_sg_len--;
918 src_avail = sg_dma_len(src_sg);
919 }
920 }
921
922 zynqmp_dma_desc_config_eod(chan, desc);
923 first->async_tx.flags = flags;
924 return &first->async_tx;
925}
926
927/**
928 * zynqmp_dma_chan_remove - Channel remove function
929 * @chan: ZynqMP DMA channel pointer
930 */
931static void zynqmp_dma_chan_remove(struct zynqmp_dma_chan *chan)
932{
933 if (!chan)
934 return;
935
936 devm_free_irq(chan->zdev->dev, chan->irq, chan);
937 tasklet_kill(&chan->tasklet);
938 list_del(&chan->common.device_node);
939 clk_disable_unprepare(chan->clk_apb);
940 clk_disable_unprepare(chan->clk_main);
941}
942
943/**
944 * zynqmp_dma_chan_probe - Per Channel Probing
945 * @zdev: Driver specific device structure
946 * @pdev: Pointer to the platform_device structure
947 *
948 * Return: '0' on success and failure value on error
949 */
950static int zynqmp_dma_chan_probe(struct zynqmp_dma_device *zdev,
951 struct platform_device *pdev)
952{
953 struct zynqmp_dma_chan *chan;
954 struct resource *res;
955 struct device_node *node = pdev->dev.of_node;
956 int err;
957
958 chan = devm_kzalloc(zdev->dev, sizeof(*chan), GFP_KERNEL);
959 if (!chan)
960 return -ENOMEM;
961 chan->dev = zdev->dev;
962 chan->zdev = zdev;
963
964 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
965 chan->regs = devm_ioremap_resource(&pdev->dev, res);
966 if (IS_ERR(chan->regs))
967 return PTR_ERR(chan->regs);
968
969 chan->bus_width = ZYNQMP_DMA_BUS_WIDTH_64;
970 chan->dst_burst_len = ZYNQMP_DMA_AWLEN_RST_VAL;
971 chan->src_burst_len = ZYNQMP_DMA_ARLEN_RST_VAL;
972 err = of_property_read_u32(node, "xlnx,bus-width", &chan->bus_width);
973 if ((err < 0) && ((chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_64) ||
974 (chan->bus_width != ZYNQMP_DMA_BUS_WIDTH_128))) {
975 dev_err(zdev->dev, "invalid bus-width value");
976 return err;
977 }
978
979 chan->is_dmacoherent = of_property_read_bool(node, "dma-coherent");
980 zdev->chan = chan;
981 tasklet_init(&chan->tasklet, zynqmp_dma_do_tasklet, (ulong)chan);
982 spin_lock_init(&chan->lock);
983 INIT_LIST_HEAD(&chan->active_list);
984 INIT_LIST_HEAD(&chan->pending_list);
985 INIT_LIST_HEAD(&chan->done_list);
986 INIT_LIST_HEAD(&chan->free_list);
987
988 dma_cookie_init(&chan->common);
989 chan->common.device = &zdev->common;
990 list_add_tail(&chan->common.device_node, &zdev->common.channels);
991
992 zynqmp_dma_init(chan);
993 chan->irq = platform_get_irq(pdev, 0);
994 if (chan->irq < 0)
995 return -ENXIO;
996 err = devm_request_irq(&pdev->dev, chan->irq, zynqmp_dma_irq_handler, 0,
997 "zynqmp-dma", chan);
998 if (err)
999 return err;
1000 chan->clk_main = devm_clk_get(&pdev->dev, "clk_main");
1001 if (IS_ERR(chan->clk_main)) {
1002 dev_err(&pdev->dev, "main clock not found.\n");
1003 return PTR_ERR(chan->clk_main);
1004 }
1005
1006 chan->clk_apb = devm_clk_get(&pdev->dev, "clk_apb");
1007 if (IS_ERR(chan->clk_apb)) {
1008 dev_err(&pdev->dev, "apb clock not found.\n");
1009 return PTR_ERR(chan->clk_apb);
1010 }
1011
1012 err = clk_prepare_enable(chan->clk_main);
1013 if (err) {
1014 dev_err(&pdev->dev, "Unable to enable main clock.\n");
1015 return err;
1016 }
1017
1018 err = clk_prepare_enable(chan->clk_apb);
1019 if (err) {
1020 clk_disable_unprepare(chan->clk_main);
1021 dev_err(&pdev->dev, "Unable to enable apb clock.\n");
1022 return err;
1023 }
1024
1025 chan->desc_size = sizeof(struct zynqmp_dma_desc_ll);
1026 chan->idle = true;
1027 return 0;
1028}
1029
1030/**
1031 * of_zynqmp_dma_xlate - Translation function
1032 * @dma_spec: Pointer to DMA specifier as found in the device tree
1033 * @ofdma: Pointer to DMA controller data
1034 *
1035 * Return: DMA channel pointer on success and NULL on error
1036 */
1037static struct dma_chan *of_zynqmp_dma_xlate(struct of_phandle_args *dma_spec,
1038 struct of_dma *ofdma)
1039{
1040 struct zynqmp_dma_device *zdev = ofdma->of_dma_data;
1041
1042 return dma_get_slave_channel(&zdev->chan->common);
1043}
1044
1045/**
1046 * zynqmp_dma_probe - Driver probe function
1047 * @pdev: Pointer to the platform_device structure
1048 *
1049 * Return: '0' on success and failure value on error
1050 */
1051static int zynqmp_dma_probe(struct platform_device *pdev)
1052{
1053 struct zynqmp_dma_device *zdev;
1054 struct dma_device *p;
1055 int ret;
1056
1057 zdev = devm_kzalloc(&pdev->dev, sizeof(*zdev), GFP_KERNEL);
1058 if (!zdev)
1059 return -ENOMEM;
1060
1061 zdev->dev = &pdev->dev;
1062 INIT_LIST_HEAD(&zdev->common.channels);
1063
1064 dma_set_mask(&pdev->dev, DMA_BIT_MASK(44));
1065 dma_cap_set(DMA_SG, zdev->common.cap_mask);
1066 dma_cap_set(DMA_MEMCPY, zdev->common.cap_mask);
1067
1068 p = &zdev->common;
1069 p->device_prep_dma_sg = zynqmp_dma_prep_sg;
1070 p->device_prep_dma_memcpy = zynqmp_dma_prep_memcpy;
1071 p->device_terminate_all = zynqmp_dma_device_terminate_all;
1072 p->device_issue_pending = zynqmp_dma_issue_pending;
1073 p->device_alloc_chan_resources = zynqmp_dma_alloc_chan_resources;
1074 p->device_free_chan_resources = zynqmp_dma_free_chan_resources;
1075 p->device_tx_status = dma_cookie_status;
1076 p->device_config = zynqmp_dma_device_config;
1077 p->dev = &pdev->dev;
1078
1079 platform_set_drvdata(pdev, zdev);
1080
1081 ret = zynqmp_dma_chan_probe(zdev, pdev);
1082 if (ret) {
1083 dev_err(&pdev->dev, "Probing channel failed\n");
1084 goto free_chan_resources;
1085 }
1086
1087 p->dst_addr_widths = BIT(zdev->chan->bus_width / 8);
1088 p->src_addr_widths = BIT(zdev->chan->bus_width / 8);
1089
1090 dma_async_device_register(&zdev->common);
1091
1092 ret = of_dma_controller_register(pdev->dev.of_node,
1093 of_zynqmp_dma_xlate, zdev);
1094 if (ret) {
1095 dev_err(&pdev->dev, "Unable to register DMA to DT\n");
1096 dma_async_device_unregister(&zdev->common);
1097 goto free_chan_resources;
1098 }
1099
1100 dev_info(&pdev->dev, "ZynqMP DMA driver Probe success\n");
1101
1102 return 0;
1103
1104free_chan_resources:
1105 zynqmp_dma_chan_remove(zdev->chan);
1106 return ret;
1107}
1108
1109/**
1110 * zynqmp_dma_remove - Driver remove function
1111 * @pdev: Pointer to the platform_device structure
1112 *
1113 * Return: Always '0'
1114 */
1115static int zynqmp_dma_remove(struct platform_device *pdev)
1116{
1117 struct zynqmp_dma_device *zdev = platform_get_drvdata(pdev);
1118
1119 of_dma_controller_free(pdev->dev.of_node);
1120 dma_async_device_unregister(&zdev->common);
1121
1122 zynqmp_dma_chan_remove(zdev->chan);
1123
1124 return 0;
1125}
1126
1127static const struct of_device_id zynqmp_dma_of_match[] = {
1128 { .compatible = "xlnx,zynqmp-dma-1.0", },
1129 {}
1130};
1131MODULE_DEVICE_TABLE(of, zynqmp_dma_of_match);
1132
1133static struct platform_driver zynqmp_dma_driver = {
1134 .driver = {
1135 .name = "xilinx-zynqmp-dma",
1136 .of_match_table = zynqmp_dma_of_match,
1137 },
1138 .probe = zynqmp_dma_probe,
1139 .remove = zynqmp_dma_remove,
1140};
1141
1142module_platform_driver(zynqmp_dma_driver);
1143
1144MODULE_AUTHOR("Xilinx, Inc.");
1145MODULE_DESCRIPTION("Xilinx ZynqMP DMA driver");