aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/xilinx
diff options
context:
space:
mode:
authorKedareswara rao Appana <appana.durga.rao@xilinx.com>2016-05-13 03:03:29 -0400
committerVinod Koul <vinod.koul@intel.com>2016-05-13 05:30:18 -0400
commitfb2366675ec2221241bea83dc4ec57ed33ce3dcd (patch)
tree6c047434a86bc399c70cb00ff2950bd18b1de208 /drivers/dma/xilinx
parent07b0e7d49cbcadebad9d3b986f3298e33286dea2 (diff)
dmaengine: vdma: Add config structure to differentiate dmas
This patch adds config structure in the driver to differentiate AXI DMA's and to add more features(clock support etc..) to these DMA's. Signed-off-by: Kedareswara rao Appana <appanad@xilinx.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/xilinx')
-rw-r--r--drivers/dma/xilinx/xilinx_vdma.c83
1 files changed, 51 insertions, 32 deletions
diff --git a/drivers/dma/xilinx/xilinx_vdma.c b/drivers/dma/xilinx/xilinx_vdma.c
index fb481135f27a..0f91f02fca2f 100644
--- a/drivers/dma/xilinx/xilinx_vdma.c
+++ b/drivers/dma/xilinx/xilinx_vdma.c
@@ -342,6 +342,10 @@ struct xilinx_dma_chan {
342 void (*start_transfer)(struct xilinx_dma_chan *chan); 342 void (*start_transfer)(struct xilinx_dma_chan *chan);
343}; 343};
344 344
345struct xilinx_dma_config {
346 enum xdma_ip_type dmatype;
347};
348
345/** 349/**
346 * struct xilinx_dma_device - DMA device structure 350 * struct xilinx_dma_device - DMA device structure
347 * @regs: I/O mapped base address 351 * @regs: I/O mapped base address
@@ -351,7 +355,7 @@ struct xilinx_dma_chan {
351 * @has_sg: Specifies whether Scatter-Gather is present or not 355 * @has_sg: Specifies whether Scatter-Gather is present or not
352 * @flush_on_fsync: Flush on frame sync 356 * @flush_on_fsync: Flush on frame sync
353 * @ext_addr: Indicates 64 bit addressing is supported by dma device 357 * @ext_addr: Indicates 64 bit addressing is supported by dma device
354 * @dmatype: DMA ip type 358 * @dma_config: DMA config structure
355 */ 359 */
356struct xilinx_dma_device { 360struct xilinx_dma_device {
357 void __iomem *regs; 361 void __iomem *regs;
@@ -361,7 +365,7 @@ struct xilinx_dma_device {
361 bool has_sg; 365 bool has_sg;
362 u32 flush_on_fsync; 366 u32 flush_on_fsync;
363 bool ext_addr; 367 bool ext_addr;
364 enum xdma_ip_type dmatype; 368 const struct xilinx_dma_config *dma_config;
365}; 369};
366 370
367/* Macros */ 371/* Macros */
@@ -572,12 +576,12 @@ xilinx_dma_free_tx_descriptor(struct xilinx_dma_chan *chan,
572 if (!desc) 576 if (!desc)
573 return; 577 return;
574 578
575 if (chan->xdev->dmatype == XDMA_TYPE_VDMA) { 579 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
576 list_for_each_entry_safe(segment, next, &desc->segments, node) { 580 list_for_each_entry_safe(segment, next, &desc->segments, node) {
577 list_del(&segment->node); 581 list_del(&segment->node);
578 xilinx_vdma_free_tx_segment(chan, segment); 582 xilinx_vdma_free_tx_segment(chan, segment);
579 } 583 }
580 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) { 584 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
581 list_for_each_entry_safe(cdma_segment, cdma_next, 585 list_for_each_entry_safe(cdma_segment, cdma_next,
582 &desc->segments, node) { 586 &desc->segments, node) {
583 list_del(&cdma_segment->node); 587 list_del(&cdma_segment->node);
@@ -640,7 +644,7 @@ static void xilinx_dma_free_chan_resources(struct dma_chan *dchan)
640 dev_dbg(chan->dev, "Free all channel resources.\n"); 644 dev_dbg(chan->dev, "Free all channel resources.\n");
641 645
642 xilinx_dma_free_descriptors(chan); 646 xilinx_dma_free_descriptors(chan);
643 if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) 647 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
644 xilinx_dma_free_tx_segment(chan, chan->seg_v); 648 xilinx_dma_free_tx_segment(chan, chan->seg_v);
645 dma_pool_destroy(chan->desc_pool); 649 dma_pool_destroy(chan->desc_pool);
646 chan->desc_pool = NULL; 650 chan->desc_pool = NULL;
@@ -710,13 +714,13 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
710 * We need the descriptor to be aligned to 64bytes 714 * We need the descriptor to be aligned to 64bytes
711 * for meeting Xilinx VDMA specification requirement. 715 * for meeting Xilinx VDMA specification requirement.
712 */ 716 */
713 if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) { 717 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
714 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool", 718 chan->desc_pool = dma_pool_create("xilinx_dma_desc_pool",
715 chan->dev, 719 chan->dev,
716 sizeof(struct xilinx_axidma_tx_segment), 720 sizeof(struct xilinx_axidma_tx_segment),
717 __alignof__(struct xilinx_axidma_tx_segment), 721 __alignof__(struct xilinx_axidma_tx_segment),
718 0); 722 0);
719 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) { 723 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
720 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool", 724 chan->desc_pool = dma_pool_create("xilinx_cdma_desc_pool",
721 chan->dev, 725 chan->dev,
722 sizeof(struct xilinx_cdma_tx_segment), 726 sizeof(struct xilinx_cdma_tx_segment),
@@ -737,7 +741,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
737 return -ENOMEM; 741 return -ENOMEM;
738 } 742 }
739 743
740 if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) 744 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
741 /* 745 /*
742 * For AXI DMA case after submitting a pending_list, keep 746 * For AXI DMA case after submitting a pending_list, keep
743 * an extra segment allocated so that the "next descriptor" 747 * an extra segment allocated so that the "next descriptor"
@@ -750,7 +754,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
750 754
751 dma_cookie_init(dchan); 755 dma_cookie_init(dchan);
752 756
753 if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) { 757 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
754 /* For AXI DMA resetting once channel will reset the 758 /* For AXI DMA resetting once channel will reset the
755 * other channel as well so enable the interrupts here. 759 * other channel as well so enable the interrupts here.
756 */ 760 */
@@ -758,7 +762,7 @@ static int xilinx_dma_alloc_chan_resources(struct dma_chan *dchan)
758 XILINX_DMA_DMAXR_ALL_IRQ_MASK); 762 XILINX_DMA_DMAXR_ALL_IRQ_MASK);
759 } 763 }
760 764
761 if ((chan->xdev->dmatype == XDMA_TYPE_CDMA) && chan->has_sg) 765 if ((chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) && chan->has_sg)
762 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR, 766 dma_ctrl_set(chan, XILINX_DMA_REG_DMACR,
763 XILINX_CDMA_CR_SGMODE); 767 XILINX_CDMA_CR_SGMODE);
764 768
@@ -789,7 +793,7 @@ static enum dma_status xilinx_dma_tx_status(struct dma_chan *dchan,
789 if (ret == DMA_COMPLETE || !txstate) 793 if (ret == DMA_COMPLETE || !txstate)
790 return ret; 794 return ret;
791 795
792 if (chan->xdev->dmatype == XDMA_TYPE_AXIDMA) { 796 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
793 spin_lock_irqsave(&chan->lock, flags); 797 spin_lock_irqsave(&chan->lock, flags);
794 798
795 desc = list_last_entry(&chan->active_list, 799 desc = list_last_entry(&chan->active_list,
@@ -1331,12 +1335,12 @@ static void append_desc_queue(struct xilinx_dma_chan *chan,
1331 */ 1335 */
1332 tail_desc = list_last_entry(&chan->pending_list, 1336 tail_desc = list_last_entry(&chan->pending_list,
1333 struct xilinx_dma_tx_descriptor, node); 1337 struct xilinx_dma_tx_descriptor, node);
1334 if (chan->xdev->dmatype == XDMA_TYPE_VDMA) { 1338 if (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1335 tail_segment = list_last_entry(&tail_desc->segments, 1339 tail_segment = list_last_entry(&tail_desc->segments,
1336 struct xilinx_vdma_tx_segment, 1340 struct xilinx_vdma_tx_segment,
1337 node); 1341 node);
1338 tail_segment->hw.next_desc = (u32)desc->async_tx.phys; 1342 tail_segment->hw.next_desc = (u32)desc->async_tx.phys;
1339 } else if (chan->xdev->dmatype == XDMA_TYPE_CDMA) { 1343 } else if (chan->xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1340 cdma_tail_segment = list_last_entry(&tail_desc->segments, 1344 cdma_tail_segment = list_last_entry(&tail_desc->segments,
1341 struct xilinx_cdma_tx_segment, 1345 struct xilinx_cdma_tx_segment,
1342 node); 1346 node);
@@ -1356,8 +1360,8 @@ append:
1356 list_add_tail(&desc->node, &chan->pending_list); 1360 list_add_tail(&desc->node, &chan->pending_list);
1357 chan->desc_pendingcount++; 1361 chan->desc_pendingcount++;
1358 1362
1359 if (chan->has_sg && (chan->xdev->dmatype == XDMA_TYPE_VDMA) && 1363 if (chan->has_sg && (chan->xdev->dma_config->dmatype == XDMA_TYPE_VDMA)
1360 unlikely(chan->desc_pendingcount > chan->num_frms)) { 1364 && unlikely(chan->desc_pendingcount > chan->num_frms)) {
1361 dev_dbg(chan->dev, "desc pendingcount is too high\n"); 1365 dev_dbg(chan->dev, "desc pendingcount is too high\n");
1362 chan->desc_pendingcount = chan->num_frms; 1366 chan->desc_pendingcount = chan->num_frms;
1363 } 1367 }
@@ -1810,7 +1814,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1810 chan->id = 0; 1814 chan->id = 0;
1811 1815
1812 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET; 1816 chan->ctrl_offset = XILINX_DMA_MM2S_CTRL_OFFSET;
1813 if (xdev->dmatype == XDMA_TYPE_VDMA) { 1817 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1814 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET; 1818 chan->desc_offset = XILINX_VDMA_MM2S_DESC_OFFSET;
1815 1819
1816 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 1820 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
@@ -1823,7 +1827,7 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1823 chan->id = 1; 1827 chan->id = 1;
1824 1828
1825 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET; 1829 chan->ctrl_offset = XILINX_DMA_S2MM_CTRL_OFFSET;
1826 if (xdev->dmatype == XDMA_TYPE_VDMA) { 1830 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1827 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET; 1831 chan->desc_offset = XILINX_VDMA_S2MM_DESC_OFFSET;
1828 1832
1829 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH || 1833 if (xdev->flush_on_fsync == XILINX_DMA_FLUSH_BOTH ||
@@ -1844,9 +1848,9 @@ static int xilinx_dma_chan_probe(struct xilinx_dma_device *xdev,
1844 return err; 1848 return err;
1845 } 1849 }
1846 1850
1847 if (xdev->dmatype == XDMA_TYPE_AXIDMA) 1851 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA)
1848 chan->start_transfer = xilinx_dma_start_transfer; 1852 chan->start_transfer = xilinx_dma_start_transfer;
1849 else if (xdev->dmatype == XDMA_TYPE_CDMA) 1853 else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA)
1850 chan->start_transfer = xilinx_cdma_start_transfer; 1854 chan->start_transfer = xilinx_cdma_start_transfer;
1851 else 1855 else
1852 chan->start_transfer = xilinx_vdma_start_transfer; 1856 chan->start_transfer = xilinx_vdma_start_transfer;
@@ -1893,13 +1897,22 @@ static struct dma_chan *of_dma_xilinx_xlate(struct of_phandle_args *dma_spec,
1893 return dma_get_slave_channel(&xdev->chan[chan_id]->common); 1897 return dma_get_slave_channel(&xdev->chan[chan_id]->common);
1894} 1898}
1895 1899
1900static const struct xilinx_dma_config axidma_config = {
1901 .dmatype = XDMA_TYPE_AXIDMA,
1902};
1903
1904static const struct xilinx_dma_config axicdma_config = {
1905 .dmatype = XDMA_TYPE_CDMA,
1906};
1907
1908static const struct xilinx_dma_config axivdma_config = {
1909 .dmatype = XDMA_TYPE_VDMA,
1910};
1911
1896static const struct of_device_id xilinx_dma_of_ids[] = { 1912static const struct of_device_id xilinx_dma_of_ids[] = {
1897 { .compatible = "xlnx,axi-dma-1.00.a", 1913 { .compatible = "xlnx,axi-dma-1.00.a", .data = &axidma_config },
1898 .data = (void *)XDMA_TYPE_AXIDMA }, 1914 { .compatible = "xlnx,axi-cdma-1.00.a", .data = &axicdma_config },
1899 { .compatible = "xlnx,axi-cdma-1.00.a", 1915 { .compatible = "xlnx,axi-vdma-1.00.a", .data = &axivdma_config },
1900 .data = (void *)XDMA_TYPE_CDMA },
1901 { .compatible = "xlnx,axi-vdma-1.00.a",
1902 .data = (void *)XDMA_TYPE_VDMA },
1903 {} 1916 {}
1904}; 1917};
1905MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids); 1918MODULE_DEVICE_TABLE(of, xilinx_dma_of_ids);
@@ -1914,7 +1927,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1914{ 1927{
1915 struct device_node *node = pdev->dev.of_node; 1928 struct device_node *node = pdev->dev.of_node;
1916 struct xilinx_dma_device *xdev; 1929 struct xilinx_dma_device *xdev;
1917 struct device_node *child; 1930 struct device_node *child, *np = pdev->dev.of_node;
1918 struct resource *io; 1931 struct resource *io;
1919 u32 num_frames, addr_width; 1932 u32 num_frames, addr_width;
1920 int i, err; 1933 int i, err;
@@ -1925,7 +1938,13 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1925 return -ENOMEM; 1938 return -ENOMEM;
1926 1939
1927 xdev->dev = &pdev->dev; 1940 xdev->dev = &pdev->dev;
1928 xdev->dmatype = (enum xdma_ip_type)of_device_get_match_data(&pdev->dev); 1941 if (np) {
1942 const struct of_device_id *match;
1943
1944 match = of_match_node(xilinx_dma_of_ids, np);
1945 if (match && match->data)
1946 xdev->dma_config = match->data;
1947 }
1929 1948
1930 /* Request and map I/O memory */ 1949 /* Request and map I/O memory */
1931 io = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1950 io = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1936,7 +1955,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1936 /* Retrieve the DMA engine properties from the device tree */ 1955 /* Retrieve the DMA engine properties from the device tree */
1937 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg"); 1956 xdev->has_sg = of_property_read_bool(node, "xlnx,include-sg");
1938 1957
1939 if (xdev->dmatype == XDMA_TYPE_VDMA) { 1958 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
1940 err = of_property_read_u32(node, "xlnx,num-fstores", 1959 err = of_property_read_u32(node, "xlnx,num-fstores",
1941 &num_frames); 1960 &num_frames);
1942 if (err < 0) { 1961 if (err < 0) {
@@ -1968,7 +1987,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1968 xdev->common.dev = &pdev->dev; 1987 xdev->common.dev = &pdev->dev;
1969 1988
1970 INIT_LIST_HEAD(&xdev->common.channels); 1989 INIT_LIST_HEAD(&xdev->common.channels);
1971 if (!(xdev->dmatype == XDMA_TYPE_CDMA)) { 1990 if (!(xdev->dma_config->dmatype == XDMA_TYPE_CDMA)) {
1972 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask); 1991 dma_cap_set(DMA_SLAVE, xdev->common.cap_mask);
1973 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask); 1992 dma_cap_set(DMA_PRIVATE, xdev->common.cap_mask);
1974 } 1993 }
@@ -1980,12 +1999,12 @@ static int xilinx_dma_probe(struct platform_device *pdev)
1980 xdev->common.device_terminate_all = xilinx_dma_terminate_all; 1999 xdev->common.device_terminate_all = xilinx_dma_terminate_all;
1981 xdev->common.device_tx_status = xilinx_dma_tx_status; 2000 xdev->common.device_tx_status = xilinx_dma_tx_status;
1982 xdev->common.device_issue_pending = xilinx_dma_issue_pending; 2001 xdev->common.device_issue_pending = xilinx_dma_issue_pending;
1983 if (xdev->dmatype == XDMA_TYPE_AXIDMA) { 2002 if (xdev->dma_config->dmatype == XDMA_TYPE_AXIDMA) {
1984 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg; 2003 xdev->common.device_prep_slave_sg = xilinx_dma_prep_slave_sg;
1985 /* Residue calculation is supported by only AXI DMA */ 2004 /* Residue calculation is supported by only AXI DMA */
1986 xdev->common.residue_granularity = 2005 xdev->common.residue_granularity =
1987 DMA_RESIDUE_GRANULARITY_SEGMENT; 2006 DMA_RESIDUE_GRANULARITY_SEGMENT;
1988 } else if (xdev->dmatype == XDMA_TYPE_CDMA) { 2007 } else if (xdev->dma_config->dmatype == XDMA_TYPE_CDMA) {
1989 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask); 2008 dma_cap_set(DMA_MEMCPY, xdev->common.cap_mask);
1990 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy; 2009 xdev->common.device_prep_dma_memcpy = xilinx_cdma_prep_memcpy;
1991 } else { 2010 } else {
@@ -2002,7 +2021,7 @@ static int xilinx_dma_probe(struct platform_device *pdev)
2002 goto error; 2021 goto error;
2003 } 2022 }
2004 2023
2005 if (xdev->dmatype == XDMA_TYPE_VDMA) { 2024 if (xdev->dma_config->dmatype == XDMA_TYPE_VDMA) {
2006 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++) 2025 for (i = 0; i < XILINX_DMA_MAX_CHANS_PER_DEVICE; i++)
2007 if (xdev->chan[i]) 2026 if (xdev->chan[i])
2008 xdev->chan[i]->num_frms = num_frames; 2027 xdev->chan[i]->num_frms = num_frames;