aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma/at_xdmac.c
diff options
context:
space:
mode:
authorMaxime Ripard <maxime.ripard@free-electrons.com>2015-05-18 07:46:16 -0400
committerVinod Koul <vinod.koul@intel.com>2015-06-24 23:52:32 -0400
commitb206d9a23ac71cb905f5fb6e0cd813406f89b678 (patch)
tree885cdd646b644d9a753d2cf3e386c6d49fd3b6a8 /drivers/dma/at_xdmac.c
parentf2704052cb42aabfa19b3f897cbceb70e2e63c45 (diff)
dmaengine: xdmac: Add memset support
The XDMAC supports memset transfers, both over contiguous areas, and over discontiguous areas through a LLI. The current memset operation only supports contiguous memset for now, add some support for it. Scatter-gathered memset will come eventually. Signed-off-by: Maxime Ripard <maxime.ripard@free-electrons.com> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/at_xdmac.c')
-rw-r--r--drivers/dma/at_xdmac.c89
1 files changed, 89 insertions, 0 deletions
diff --git a/drivers/dma/at_xdmac.c b/drivers/dma/at_xdmac.c
index 80e46e571bdd..897e759b0c97 100644
--- a/drivers/dma/at_xdmac.c
+++ b/drivers/dma/at_xdmac.c
@@ -1073,6 +1073,93 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
1073 return &first->tx_dma_desc; 1073 return &first->tx_dma_desc;
1074} 1074}
1075 1075
1076static struct at_xdmac_desc *at_xdmac_memset_create_desc(struct dma_chan *chan,
1077 struct at_xdmac_chan *atchan,
1078 dma_addr_t dst_addr,
1079 size_t len,
1080 int value)
1081{
1082 struct at_xdmac_desc *desc;
1083 unsigned long flags;
1084 size_t ublen;
1085 u32 dwidth;
1086 /*
1087 * WARNING: The channel configuration is set here since there is no
1088 * dmaengine_slave_config call in this case. Moreover we don't know the
1089 * direction, it involves we can't dynamically set the source and dest
1090 * interface so we have to use the same one. Only interface 0 allows EBI
1091 * access. Hopefully we can access DDR through both ports (at least on
1092 * SAMA5D4x), so we can use the same interface for source and dest,
1093 * that solves the fact we don't know the direction.
1094 */
1095 u32 chan_cc = AT_XDMAC_CC_DAM_INCREMENTED_AM
1096 | AT_XDMAC_CC_SAM_INCREMENTED_AM
1097 | AT_XDMAC_CC_DIF(0)
1098 | AT_XDMAC_CC_SIF(0)
1099 | AT_XDMAC_CC_MBSIZE_SIXTEEN
1100 | AT_XDMAC_CC_MEMSET_HW_MODE
1101 | AT_XDMAC_CC_TYPE_MEM_TRAN;
1102
1103 dwidth = at_xdmac_align_width(chan, dst_addr);
1104
1105 if (len >= (AT_XDMAC_MBR_UBC_UBLEN_MAX << dwidth)) {
1106 dev_err(chan2dev(chan),
1107 "%s: Transfer too large, aborting...\n",
1108 __func__);
1109 return NULL;
1110 }
1111
1112 spin_lock_irqsave(&atchan->lock, flags);
1113 desc = at_xdmac_get_desc(atchan);
1114 spin_unlock_irqrestore(&atchan->lock, flags);
1115 if (!desc) {
1116 dev_err(chan2dev(chan), "can't get descriptor\n");
1117 return NULL;
1118 }
1119
1120 chan_cc |= AT_XDMAC_CC_DWIDTH(dwidth);
1121
1122 ublen = len >> dwidth;
1123
1124 desc->lld.mbr_da = dst_addr;
1125 desc->lld.mbr_ds = value;
1126 desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV3
1127 | AT_XDMAC_MBR_UBC_NDEN
1128 | AT_XDMAC_MBR_UBC_NSEN
1129 | ublen;
1130 desc->lld.mbr_cfg = chan_cc;
1131
1132 dev_dbg(chan2dev(chan),
1133 "%s: lld: mbr_da=0x%08x, mbr_ds=0x%08x, mbr_ubc=0x%08x, mbr_cfg=0x%08x\n",
1134 __func__, desc->lld.mbr_da, desc->lld.mbr_ds, desc->lld.mbr_ubc,
1135 desc->lld.mbr_cfg);
1136
1137 return desc;
1138}
1139
1140struct dma_async_tx_descriptor *
1141at_xdmac_prep_dma_memset(struct dma_chan *chan, dma_addr_t dest, int value,
1142 size_t len, unsigned long flags)
1143{
1144 struct at_xdmac_chan *atchan = to_at_xdmac_chan(chan);
1145 struct at_xdmac_desc *desc;
1146
1147 dev_dbg(chan2dev(chan), "%s: dest=0x%08x, len=%d, pattern=0x%x, flags=0x%lx\n",
1148 __func__, dest, len, value, flags);
1149
1150 if (unlikely(!len))
1151 return NULL;
1152
1153 desc = at_xdmac_memset_create_desc(chan, atchan, dest, len, value);
1154 list_add_tail(&desc->desc_node, &desc->descs_list);
1155
1156 desc->tx_dma_desc.cookie = -EBUSY;
1157 desc->tx_dma_desc.flags = flags;
1158 desc->xfer_size = len;
1159
1160 return &desc->tx_dma_desc;
1161}
1162
1076static enum dma_status 1163static enum dma_status
1077at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie, 1164at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
1078 struct dma_tx_state *txstate) 1165 struct dma_tx_state *txstate)
@@ -1599,6 +1686,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
1599 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask); 1686 dma_cap_set(DMA_CYCLIC, atxdmac->dma.cap_mask);
1600 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask); 1687 dma_cap_set(DMA_INTERLEAVE, atxdmac->dma.cap_mask);
1601 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask); 1688 dma_cap_set(DMA_MEMCPY, atxdmac->dma.cap_mask);
1689 dma_cap_set(DMA_MEMSET, atxdmac->dma.cap_mask);
1602 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask); 1690 dma_cap_set(DMA_SLAVE, atxdmac->dma.cap_mask);
1603 /* 1691 /*
1604 * Without DMA_PRIVATE the driver is not able to allocate more than 1692 * Without DMA_PRIVATE the driver is not able to allocate more than
@@ -1613,6 +1701,7 @@ static int at_xdmac_probe(struct platform_device *pdev)
1613 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic; 1701 atxdmac->dma.device_prep_dma_cyclic = at_xdmac_prep_dma_cyclic;
1614 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved; 1702 atxdmac->dma.device_prep_interleaved_dma = at_xdmac_prep_interleaved;
1615 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy; 1703 atxdmac->dma.device_prep_dma_memcpy = at_xdmac_prep_dma_memcpy;
1704 atxdmac->dma.device_prep_dma_memset = at_xdmac_prep_dma_memset;
1616 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg; 1705 atxdmac->dma.device_prep_slave_sg = at_xdmac_prep_slave_sg;
1617 atxdmac->dma.device_config = at_xdmac_device_config; 1706 atxdmac->dma.device_config = at_xdmac_device_config;
1618 atxdmac->dma.device_pause = at_xdmac_device_pause; 1707 atxdmac->dma.device_pause = at_xdmac_device_pause;