summaryrefslogtreecommitdiffstats
path: root/drivers/dma/xgene-dma.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 13:05:17 -0500
committerLinus Torvalds <torvalds@linux-foundation.org>2015-11-10 13:05:17 -0500
commit041c79514af9080c75197078283134f538f46b44 (patch)
treed5e465d5967d84adb37d735fddec48ee0509b93c /drivers/dma/xgene-dma.c
parent7d884710bb3635f94dac152ae226ca54a585a223 (diff)
parent34635b1accb99b3c3ad3b35a210be198701aac7e (diff)
Merge tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma
Pull dmaengine updates from Vinod Koul: "This time we have a very typical update which is mostly fixes and updates to drivers and no new drivers. - the biggest change is coming from Peter for edma cleanup which even caused some last minute regression, things seem settled now - idma64 and dw updates - iotdma updates - module autoload fixes for various drivers - scatter gather support for hdmac" * tag 'dmaengine-4.4-rc1' of git://git.infradead.org/users/vkoul/slave-dma: (77 commits) dmaengine: edma: Add dummy driver skeleton for edma3-tptc Revert "ARM: DTS: am33xx: Use the new DT bindings for the eDMA3" Revert "ARM: DTS: am437x: Use the new DT bindings for the eDMA3" dmaengine: dw: some Intel devices has no memcpy support dmaengine: dw: platform: provide platform data for Intel dmaengine: dw: don't override platform data with autocfg dmaengine: hdmac: Add scatter-gathered memset support dmaengine: hdmac: factorise memset descriptor allocation dmaengine: virt-dma: Fix kernel-doc annotations ARM: DTS: am437x: Use the new DT bindings for the eDMA3 ARM: DTS: am33xx: Use the new DT bindings for the eDMA3 dmaengine: edma: New device tree binding dmaengine: Kconfig: edma: Select TI_DMA_CROSSBAR in case of ARCH_OMAP dmaengine: ti-dma-crossbar: Add support for crossbar on AM33xx/AM43xx dmaengine: edma: Merge the of parsing functions dmaengine: edma: Do not allocate memory for edma_rsv_info in case of DT boot dmaengine: edma: Refactor the dma device and channel struct initialization dmaengine: edma: Get qDMA channel information from HW also dmaengine: edma: Merge map_dmach_to_queue into assign_channel_eventq dmaengine: edma: Correct PaRAM access function names (_parm_ to _param_) ...
Diffstat (limited to 'drivers/dma/xgene-dma.c')
-rw-r--r--drivers/dma/xgene-dma.c63
1 files changed, 2 insertions, 61 deletions
diff --git a/drivers/dma/xgene-dma.c b/drivers/dma/xgene-dma.c
index 8d57b1b12e41..9dfa2b0fa5da 100644
--- a/drivers/dma/xgene-dma.c
+++ b/drivers/dma/xgene-dma.c
@@ -547,14 +547,12 @@ static struct xgene_dma_desc_sw *xgene_dma_alloc_descriptor(
547 struct xgene_dma_desc_sw *desc; 547 struct xgene_dma_desc_sw *desc;
548 dma_addr_t phys; 548 dma_addr_t phys;
549 549
550 desc = dma_pool_alloc(chan->desc_pool, GFP_NOWAIT, &phys); 550 desc = dma_pool_zalloc(chan->desc_pool, GFP_NOWAIT, &phys);
551 if (!desc) { 551 if (!desc) {
552 chan_err(chan, "Failed to allocate LDs\n"); 552 chan_err(chan, "Failed to allocate LDs\n");
553 return NULL; 553 return NULL;
554 } 554 }
555 555
556 memset(desc, 0, sizeof(*desc));
557
558 INIT_LIST_HEAD(&desc->tx_list); 556 INIT_LIST_HEAD(&desc->tx_list);
559 desc->tx.phys = phys; 557 desc->tx.phys = phys;
560 desc->tx.tx_submit = xgene_dma_tx_submit; 558 desc->tx.tx_submit = xgene_dma_tx_submit;
@@ -894,60 +892,6 @@ static void xgene_dma_free_chan_resources(struct dma_chan *dchan)
894 chan->desc_pool = NULL; 892 chan->desc_pool = NULL;
895} 893}
896 894
897static struct dma_async_tx_descriptor *xgene_dma_prep_memcpy(
898 struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
899 size_t len, unsigned long flags)
900{
901 struct xgene_dma_desc_sw *first = NULL, *new;
902 struct xgene_dma_chan *chan;
903 size_t copy;
904
905 if (unlikely(!dchan || !len))
906 return NULL;
907
908 chan = to_dma_chan(dchan);
909
910 do {
911 /* Allocate the link descriptor from DMA pool */
912 new = xgene_dma_alloc_descriptor(chan);
913 if (!new)
914 goto fail;
915
916 /* Create the largest transaction possible */
917 copy = min_t(size_t, len, XGENE_DMA_MAX_64B_DESC_BYTE_CNT);
918
919 /* Prepare DMA descriptor */
920 xgene_dma_prep_cpy_desc(chan, new, dst, src, copy);
921
922 if (!first)
923 first = new;
924
925 new->tx.cookie = 0;
926 async_tx_ack(&new->tx);
927
928 /* Update metadata */
929 len -= copy;
930 dst += copy;
931 src += copy;
932
933 /* Insert the link descriptor to the LD ring */
934 list_add_tail(&new->node, &first->tx_list);
935 } while (len);
936
937 new->tx.flags = flags; /* client is in control of this ack */
938 new->tx.cookie = -EBUSY;
939 list_splice(&first->tx_list, &new->tx_list);
940
941 return &new->tx;
942
943fail:
944 if (!first)
945 return NULL;
946
947 xgene_dma_free_desc_list(chan, &first->tx_list);
948 return NULL;
949}
950
951static struct dma_async_tx_descriptor *xgene_dma_prep_sg( 895static struct dma_async_tx_descriptor *xgene_dma_prep_sg(
952 struct dma_chan *dchan, struct scatterlist *dst_sg, 896 struct dma_chan *dchan, struct scatterlist *dst_sg,
953 u32 dst_nents, struct scatterlist *src_sg, 897 u32 dst_nents, struct scatterlist *src_sg,
@@ -1707,7 +1651,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1707 dma_cap_zero(dma_dev->cap_mask); 1651 dma_cap_zero(dma_dev->cap_mask);
1708 1652
1709 /* Set DMA device capability */ 1653 /* Set DMA device capability */
1710 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1711 dma_cap_set(DMA_SG, dma_dev->cap_mask); 1654 dma_cap_set(DMA_SG, dma_dev->cap_mask);
1712 1655
1713 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR 1656 /* Basically here, the X-Gene SoC DMA engine channel 0 supports XOR
@@ -1734,7 +1677,6 @@ static void xgene_dma_set_caps(struct xgene_dma_chan *chan,
1734 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources; 1677 dma_dev->device_free_chan_resources = xgene_dma_free_chan_resources;
1735 dma_dev->device_issue_pending = xgene_dma_issue_pending; 1678 dma_dev->device_issue_pending = xgene_dma_issue_pending;
1736 dma_dev->device_tx_status = xgene_dma_tx_status; 1679 dma_dev->device_tx_status = xgene_dma_tx_status;
1737 dma_dev->device_prep_dma_memcpy = xgene_dma_prep_memcpy;
1738 dma_dev->device_prep_dma_sg = xgene_dma_prep_sg; 1680 dma_dev->device_prep_dma_sg = xgene_dma_prep_sg;
1739 1681
1740 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { 1682 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
@@ -1787,8 +1729,7 @@ static int xgene_dma_async_register(struct xgene_dma *pdma, int id)
1787 1729
1788 /* DMA capability info */ 1730 /* DMA capability info */
1789 dev_info(pdma->dev, 1731 dev_info(pdma->dev,
1790 "%s: CAPABILITY ( %s%s%s%s)\n", dma_chan_name(&chan->dma_chan), 1732 "%s: CAPABILITY ( %s%s%s)\n", dma_chan_name(&chan->dma_chan),
1791 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "MEMCPY " : "",
1792 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "", 1733 dma_has_cap(DMA_SG, dma_dev->cap_mask) ? "SGCPY " : "",
1793 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "", 1734 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "XOR " : "",
1794 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : ""); 1735 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "PQ " : "");