aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
authorKedareswara rao Appana <appana.durga.rao@xilinx.com>2016-04-07 01:29:45 -0400
committerVinod Koul <vinod.koul@intel.com>2016-05-12 02:28:58 -0400
commit07b0e7d49cbcadebad9d3b986f3298e33286dea2 (patch)
treecab6a2083edf13f0db17f8636d46f50a6ca10ef0 /drivers/dma/xilinx
parent3843dc282e2b48730c4dc669d7d5671331155c2f (diff)
dmaengine: vdma: Add Support for Xilinx AXI Central Direct Memory Access Engine
This patch adds support for the AXI Central Direct Memory Access (AXI CDMA) core to the existing vdma driver, AXI CDMA is a soft Xilinx IP core that provides high-bandwidth Direct Memory Access(DMA) between a memory-mapped source address and a memory-mapped destination address. Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c236
1 files changed, 234 insertions, 2 deletions
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index 983e4bc88cd8..fb481135f27a 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -21,6 +21,10 @@
21 * and AXI4-Stream target peripherals. It supports one receive and one 21 * and AXI4-Stream target peripherals. It supports one receive and one
22 * transmit channel, both of them optional at synthesis time. 22 * transmit channel, both of them optional at synthesis time.
23 * 23 *
24 * The AXI CDMA, is a soft IP, which provides high-bandwidth Direct Memory
25 * Access (DMA) between a memory-mapped source address and a memory-mapped
26 * destination address.
27 *
24 * This program is free software: you can redistribute it and/or modify 28 * This program is free software: you can redistribute it and/or modify
25 * it under the terms of the GNU General Public License as published by 29 * it under the terms of the GNU General Public License as published by
26 * the Free Software Foundation, either version 2 of the License, or 30 * the Free Software Foundation, either version 2 of the License, or
@@ -158,6 +162,13 @@
158#define XILINX_DMA_COALESCE_MAX 255 162#define XILINX_DMA_COALESCE_MAX 255
159#define XILINX_DMA_NUM_APP_WORDS 5 163#define XILINX_DMA_NUM_APP_WORDS 5
160 164
165/* AXI CDMA Specific Registers/Offsets */
166#define XILINX_CDMA_REG_SRCADDR 0x18
167#define XILINX_CDMA_REG_DSTADDR 0x20
168
169/* AXI CDMA Specific Masks */
170#define XILINX_CDMA_CR_SGMODE BIT(3)
171
161/** 172/**
162 * struct xilinx_vdma_desc_hw - Hardware Descriptor 173 * struct xilinx_vdma_desc_hw - Hardware Descriptor
163 * @next_desc: Next Descriptor Pointer @0x00 174 * @next_desc: Next Descriptor Pointer @0x00
@@ -204,6 +215,28 @@ struct xilinx_axidma_desc_hw {
204} __aligned(64); 215} __aligned(64);
205 216
206/** 217/**
218 * struct xilinx_cdma_desc_hw - Hardware Descriptor
219 * @next_desc: Next Descriptor Pointer @0x00
220 * @pad1: Reserved @0x04
221 * @src_addr: Source address @0x08
222 * @pad2: Reserved @0x0C
223 * @dest_addr: Destination address @0x10
224 * @pad3: Reserved @0x14
225 * @control: Control field @0x18
226 * @status: Status field @0x1C
227 */
228struct xilinx_cdma_desc_hw {
229 u32 next_desc;
230 u32 pad1;
231 u32 src_addr;
232 u32 pad2;
233 u32 dest_addr;
234 u32 pad3;
235 u32 control;
236 u32 status;
237} __aligned(64);
238
239/**
207 * struct xilinx_vdma_tx_segment - Descriptor segment 240 * struct xilinx_vdma_tx_segment - Descriptor segment
208 * @hw: Hardware descriptor 241 * @hw: Hardware descriptor
209 * @node: Node in the descriptor segments list 242 * @node: Node in the descriptor segments list
@@ -228,6 +261,18 @@ struct xilinx_axidma_tx_segment {
228} __aligned(64); 261} __aligned(64);
229 262
230/** 263/**
264 * struct xilinx_cdma_tx_segment - Descriptor segment
265 * @hw: Hardware descriptor
266 * @node: Node in the descriptor segments list
267 * @phys: Physical address of segment
268 */
269struct xilinx_cdma_tx_segment {
270 struct xilinx_cdma_desc_hw hw;
271 struct list_head node;
272 dma_addr_t phys;
273} __aligned(64);
274
275/**
231 * struct xilinx_dma_tx_descriptor - Per Transaction structure 276 * struct xilinx_dma_tx_descriptor - Per Transaction structure
232 * @async_tx: Async transaction descriptor 277 * @async_tx: Async transaction descriptor
233 * @segments: TX segments list 278 * @segments: TX segments list
@@ -415,6 +460,28 @@ xilinx_vdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
415} 460}
416 461
417/** 462/**
463 * xilinx_cdma_alloc_tx_segment - Allocate transaction segment
464 * @chan: Driver specific DMA channel
465 *
466 * Return: The allocated segment on success and NULL on failure.
467 */
468static struct xilinx_cdma_tx_segment *
469xilinx_cdma_alloc_tx_segment(struct xilinx_dma_chan *chan)
470{
471 struct xilinx_cdma_tx_segment *segment;
472 dma_addr_t phys;
473
474 segment = dma_pool_alloc(chan->desc_pool, GFP_ATOMIC, &phys);
475 if (!segment)
476 return NULL;
477
478 memset(segment, 0, sizeof(*segment));
479 segment->phys = phys;
480
481 return segment;
482}
483
484/**
418 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment 485 * xilinx_axidma_alloc_tx_segment - Allocate transaction segment
419 * @chan: Driver specific DMA channel 486 * @chan: Driver specific DMA channel
420 * 487 *
@@ -448,6 +515,17 @@ static void xilinx_dma_free_tx_segment(struct xilinx_dma_chan *chan,
448} 515}
449 516
450/** 517/**
518 * xilinx_cdma_free_tx_segment - Free transaction segment
519 * @chan: Driver specific DMA channel
520 * @segment: DMA transaction segment
521 */
522static void xilinx_cdma_free_tx_segment(struct xilinx_dma_chan *chan,
523 struct xilinx_cdma_tx_segment *segment)
524{
525 dma_pool_free(chan->desc_pool, segment, segment->phys);
526}
527
528/**
451 * xilinx_vdma_free_tx_segment - Free transaction segment 529 * xilinx_vdma_free_tx_segment - Free transaction segment
452 * @chan: Driver specific DMA channel 530 * @chan: Driver specific DMA channel
453 * @segment: DMA transaction segment 531 * @segment: DMA transaction segment
@@ -488,6 +566,7 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
488 struct xilinx_dma_tx_descriptor *desc) 566 struct xilinx_dma_tx_descriptor *desc)
489{ 567{
490 struct xilinx_vdma_tx_segment *segment, *next; 568 struct xilinx_vdma_tx_segment *segment, *next;
569 struct xilinx_cdma_tx_segment *cdma_segment, *cdma_next;
491 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next; 570 struct xilinx_axidma_tx_segment *axidma_segment, *axidma_next;
492 571
493 if (!desc) 572 if (!desc)
@@ -498,6 +577,12 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
498 list_del(&segment->node); 577 list_del(&segment->node);
499 xilinx_vdma_free_tx_segment(chan, segment); 578 xilinx_vdma_free_tx_segment(chan, segment);
500 } 579 }
580 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
581 list_for_each_entry_safe(cdma_segment, cdma_next,
582 &desc->segments, node) {
583 list_del(&cdma_segment->node);
584 xilinx_cdma_free_tx_segment(chan, cdma_segment);
585 }
501 } else { 586 } else {
502 list_for_each_entry_safe(axidma_segment, axidma_next, 587 list_for_each_entry_safe(axidma_segment, axidma_next,
503 &desc->segments, node) { 588 &desc->segments, node) {
@@ -631,6 +716,12 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
631 sizeof(struct xilinx_axidma_tx_segment), 716 sizeof(struct xilinx_axidma_tx_segment),
632 __alignof__(struct xilinx_axidma_tx_segment), 717 __alignof__(struct xilinx_axidma_tx_segment),
633 0); 718 0);
719 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
720 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
721 chan->dev,
722 sizeof(struct xilinx_cdma_tx_segment),
723 __alignof__(struct xilinx_cdma_tx_segment),
724 0);
634 } else { 725 } else {
635 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool", 726 chan->desc_pool = dma_pool_create("xilinx_vdma_desc_pool",
636 chan->dev, 727 chan->dev,
@@ -667,6 +758,10 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
667 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 758 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
668 } 759 }
669 760
761 if ((chan->xdev->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
762 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
763 XILINX_CDMA_CR_SGMODE);
764
670 return 0; 765 return 0;
671} 766}
672 767
@@ -920,6 +1015,66 @@ static void xilinx_vdma_start_transfer(struct xilinx_dma_chan *chan)
920} 1015}
921 1016
922/** 1017/**
1018 * xilinx_cdma_start_transfer - Starts cdma transfer
1019 * @chan: Driver specific channel struct pointer
1020 */
1021static void xilinx_cdma_start_transfer(struct xilinx_dma_chan *chan)
1022{
1023 struct xilinx_dma_tx_descriptor *head_desc, *tail_desc;
1024 struct xilinx_cdma_tx_segment *tail_segment;
1025 u32 ctrl_reg = dma_read(chan, XILINX_DMA_REG_DMACR);
1026
1027 if (chan->err)
1028 return;
1029
1030 if (list_empty(&chan->pending_list))
1031 return;
1032
1033 head_desc = list_first_entry(&chan->pending_list,
1034 struct xilinx_dma_tx_descriptor, node);
1035 tail_desc = list_last_entry(&chan->pending_list,
1036 struct xilinx_dma_tx_descriptor, node);
1037 tail_segment = list_last_entry(&tail_desc->segments,
1038 struct xilinx_cdma_tx_segment, node);
1039
1040 if (chan->desc_pendingcount <= XILINX_DMA_COALESCE_MAX) {
1041 ctrl_reg &= ~XILINX_DMA_CR_COALESCE_MAX;
1042 ctrl_reg |= chan->desc_pendingcount <<
1043 XILINX_DMA_CR_COALESCE_SHIFT;
1044 dma_ctrl_write(chan, XILINX_DMA_REG_DMACR, ctrl_reg);
1045 }
1046
1047 if (chan->has_sg) {
1048 dma_ctrl_write(chan, XILINX_DMA_REG_CURDESC,
1049 head_desc->async_tx.phys);
1050
1051 /* Update tail ptr register which will start the transfer */
1052 dma_ctrl_write(chan, XILINX_DMA_REG_TAILDESC,
1053 tail_segment->phys);
1054 } else {
1055 /* In simple mode */
1056 struct xilinx_cdma_tx_segment *segment;
1057 struct xilinx_cdma_desc_hw *hw;
1058
1059 segment = list_first_entry(&head_desc->segments,
1060 struct xilinx_cdma_tx_segment,
1061 node);
1062
1063 hw = &segment->hw;
1064
1065 dma_ctrl_write(chan, XILINX_CDMA_REG_SRCADDR, hw->src_addr);
1066 dma_ctrl_write(chan, XILINX_CDMA_REG_DSTADDR, hw->dest_addr);
1067
1068 /* Start the transfer */
1069 dma_ctrl_write(chan, XILINX_DMA_REG_BTT,
1070 hw->control & XILINX_DMA_MAX_TRANS_LEN);
1071 }
1072
1073 list_splice_tail_init(&chan->pending_list, &chan->active_list);
1074 chan->desc_pendingcount = 0;
1075}
1076
1077/**
923 * xilinx_dma_start_transfer - Starts DMA transfer 1078 * xilinx_dma_start_transfer - Starts DMA transfer
924 * @chan: Driver specific channel struct pointer 1079 * @chan: Driver specific channel struct pointer
925 */ 1080 */
@@ -1165,6 +1320,7 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
1165 struct xilinx_vdma_tx_segment *tail_segment; 1320 struct xilinx_vdma_tx_segment *tail_segment;
1166 struct xilinx_dma_tx_descriptor *tail_desc; 1321 struct xilinx_dma_tx_descriptor *tail_desc;
1167 struct xilinx_axidma_tx_segment *axidma_tail_segment; 1322 struct xilinx_axidma_tx_segment *axidma_tail_segment;
1323 struct xilinx_cdma_tx_segment *cdma_tail_segment;
1168 1324
1169 if (list_empty(&chan->pending_list)) 1325 if (list_empty(&chan->pending_list))
1170 goto append; 1326 goto append;
@@ -1180,6 +1336,11 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
1180 struct xilinx_vdma_tx_segment, 1336 struct xilinx_vdma_tx_segment,
1181 node); 1337 node);
1182 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1338 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1339 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) {
1340 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1341 struct xilinx_cdma_tx_segment,
1342 node);
1343 cdma_tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1183 } else { 1344 } else {
1184 axidma_tail_segment = list_last_entry(&tail_desc->segments, 1345 axidma_tail_segment = list_last_entry(&tail_desc->segments,
1185 struct xilinx_axidma_tx_segment, 1346 struct xilinx_axidma_tx_segment,
@@ -1323,6 +1484,68 @@ error:
1323} 1484}
1324 1485
1325/** 1486/**
1487 * xilinx_cdma_prep_memcpy - prepare descriptors for a memcpy transaction
1488 * @dchan: DMA channel
1489 * @dma_dst: destination address
1490 * @dma_src: source address
1491 * @len: transfer length
1492 * @flags: transfer ack flags
1493 *
1494 * Return: Async transaction descriptor on success and NULL on failure
1495 */
1496static struct dma_async_tx_descriptor *
1497xilinx_cdma_prep_memcpy(struct dma_chan *dchan, dma_addr_t dma_dst,
1498 dma_addr_t dma_src, size_t len, unsigned long flags)
1499{
1500 struct xilinx_dma_chan *chan = to_xilinx_chan(dchan);
1501 struct xilinx_dma_tx_descriptor *desc;
1502 struct xilinx_cdma_tx_segment *segment, *prev;
1503 struct xilinx_cdma_desc_hw *hw;
1504
1505 if (!len || len > XILINX_DMA_MAX_TRANS_LEN)
1506 return NULL;
1507
1508 desc = xilinx_dma_alloc_tx_descriptor(chan);
1509 if (!desc)
1510 return NULL;
1511
1512 dma_async_tx_descriptor_init(&desc->async_tx, &chan->common);
1513 desc->async_tx.tx_submit = xilinx_dma_tx_submit;
1514
1515 /* Allocate the link descriptor from DMA pool */
1516 segment = xilinx_cdma_alloc_tx_segment(chan);
1517 if (!segment)
1518 goto error;
1519
1520 hw = &segment->hw;
1521 hw->control = len;
1522 hw->src_addr = dma_src;
1523 hw->dest_addr = dma_dst;
1524
1525 /* Fill the previous next descriptor with current */
1526 prev = list_last_entry(&desc->segments,
1527 struct xilinx_cdma_tx_segment, node);
1528 prev->hw.next_desc = segment->phys;
1529
1530 /* Insert the segment into the descriptor segments list. */
1531 list_add_tail(&segment->node, &desc->segments);
1532
1533 prev = segment;
1534
1535 /* Link the last hardware descriptor with the first. */
1536 segment = list_first_entry(&desc->segments,
1537 struct xilinx_cdma_tx_segment, node);
1538 desc->async_tx.phys = segment->phys;
1539 prev->hw.next_desc = segment->phys;
1540
1541 return &desc->async_tx;
1542
1543error:
1544 xilinx_dma_free_tx_descriptor(chan, desc);
1545 return NULL;
1546}
1547
1548/**
1326 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction 1549 * xilinx_dma_prep_slave_sg - prepare descriptors for a DMA_SLAVE transaction
1327 * @dchan: DMA channel 1550 * @dchan: DMA channel
1328 * @sgl: scatterlist to transfer to/from 1551 * @sgl: scatterlist to transfer to/from
@@ -1623,6 +1846,8 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1623 1846
1624 if (xdev->dmatype == XDMA_TYPE_AXIDMA) 1847 if (xdev->dmatype == XDMA_TYPE_AXIDMA)
1625 chan->start_transfer = xilinx_dma_start_transfer; 1848 chan->start_transfer = xilinx_dma_start_transfer;
1849 else if (xdev->dmatype == XDMA_TYPE_CDMA)
1850 chan->start_transfer = xilinx_cdma_start_transfer;
1626 else 1851 else
1627 chan->start_transfer = xilinx_vdma_start_transfer; 1852 chan->start_transfer = xilinx_vdma_start_transfer;
1628 1853
@@ -1671,6 +1896,8 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1671static const struct of_device_id xilinx_dma_of_ids[] = { 1896static const struct of_device_id xilinx_dma_of_ids[] = {
1672 { .compatible = "xlnx,axi-dma-1.00.a", 1897 { .compatible = "xlnx,axi-dma-1.00.a",
1673 .data = (void *)XDMA_TYPE_AXIDMA }, 1898 .data = (void *)XDMA_TYPE_AXIDMA },
1899 { .compatible = "xlnx,axi-cdma-1.00.a",
1900 .data = (void *)XDMA_TYPE_CDMA },
1674 { .compatible = "xlnx,axi-vdma-1.00.a", 1901 { .compatible = "xlnx,axi-vdma-1.00.a",
1675 .data = (void *)XDMA_TYPE_VDMA }, 1902 .data = (void *)XDMA_TYPE_VDMA },
1676 {} 1903 {}
@@ -1741,8 +1968,10 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1741 xdev->common.dev = &pdev->dev; 1968 xdev->common.dev = &pdev->dev;
1742 1969
1743 INIT_LIST_HEAD(&xdev->common.channels); 1970 INIT_LIST_HEAD(&xdev->common.channels);
1744 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 1971 if (!(xdev->dmatype == XDMA_TYPE_CDMA)) {
1745 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 1972 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1973 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1974 }
1746 1975
1747 xdev->common.device_alloc_chan_resources = 1976 xdev->common.device_alloc_chan_resources =
1748 xilinx_dma_alloc_chan_resources; 1977 xilinx_dma_alloc_chan_resources;
@@ -1756,6 +1985,9 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1756 /* Residue calculation is supported by only AXI DMA */ 1985 /* Residue calculation is supported by only AXI DMA */
1757 xdev->common.residue_granularity = 1986 xdev->common.residue_granularity =
1758 DMA_RESIDUE_GRANULARITY_SEGMENT; 1987 DMA_RESIDUE_GRANULARITY_SEGMENT;
1988 } else if (xdev->dmatype == XDMA_TYPE_CDMA) {
1989 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
1990 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
1759 } else { 1991 } else {
1760 xdev->common.device_prep_interleaved_dma = 1992 xdev->common.device_prep_interleaved_dma =
1761 xilinx_vdma_dma_prep_interleaved; 1993 xilinx_vdma_dma_prep_interleaved;