summaryrefslogtreecommitdiffstats
path: root/drivers/dma/s3c24xx-dma.c
diff options
context:
space:
mode:
authorVasily Khoruzhick <anarsoul@gmail.com>2014-05-20 16:23:02 -0400
committerVinod Koul <vinod.koul@intel.com>2014-06-01 12:52:51 -0400
commitc3e175e52f82991dbb70a012212b510f7a4b3726 (patch)
treecc60a15736925d643ae6b2a4941bf7656e55b587 /drivers/dma/s3c24xx-dma.c
parent6915f45fb9748ff578025e95506b6aec3734b886 (diff)
dmaengine: s3c24xx-dma: Add cyclic transfer support
Many audio interface drivers require support of cyclic transfers to work correctly, for example Samsung ASoC DMA driver. This patch adds support for cyclic transfers to the s3c24xx-dma driver Signed-off-by: Vasily Khoruzhick <anarsoul@gmail.com> Reviewed-by: Heiko Stuebner <heiko@sntech.de> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma/s3c24xx-dma.c')
-rw-r--r--drivers/dma/s3c24xx-dma.c112
1 files changed, 111 insertions, 1 deletions
diff --git a/drivers/dma/s3c24xx-dma.c b/drivers/dma/s3c24xx-dma.c
index 6528eeda1575..012520c9fd79 100644
--- a/drivers/dma/s3c24xx-dma.c
+++ b/drivers/dma/s3c24xx-dma.c
@@ -164,6 +164,7 @@ struct s3c24xx_sg {
164 * @disrcc: value for source control register 164 * @disrcc: value for source control register
165 * @didstc: value for destination control register 165 * @didstc: value for destination control register
166 * @dcon: base value for dcon register 166 * @dcon: base value for dcon register
167 * @cyclic: indicate cyclic transfer
167 */ 168 */
168struct s3c24xx_txd { 169struct s3c24xx_txd {
169 struct virt_dma_desc vd; 170 struct virt_dma_desc vd;
@@ -173,6 +174,7 @@ struct s3c24xx_txd {
173 u32 disrcc; 174 u32 disrcc;
174 u32 didstc; 175 u32 didstc;
175 u32 dcon; 176 u32 dcon;
177 bool cyclic;
176}; 178};
177 179
178struct s3c24xx_dma_chan; 180struct s3c24xx_dma_chan;
@@ -669,8 +671,10 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
669 /* when more sg's are in this txd, start the next one */ 671 /* when more sg's are in this txd, start the next one */
670 if (!list_is_last(txd->at, &txd->dsg_list)) { 672 if (!list_is_last(txd->at, &txd->dsg_list)) {
671 txd->at = txd->at->next; 673 txd->at = txd->at->next;
674 if (txd->cyclic)
675 vchan_cyclic_callback(&txd->vd);
672 s3c24xx_dma_start_next_sg(s3cchan, txd); 676 s3c24xx_dma_start_next_sg(s3cchan, txd);
673 } else { 677 } else if (!txd->cyclic) {
674 s3cchan->at = NULL; 678 s3cchan->at = NULL;
675 vchan_cookie_complete(&txd->vd); 679 vchan_cookie_complete(&txd->vd);
676 680
@@ -682,6 +686,12 @@ static irqreturn_t s3c24xx_dma_irq(int irq, void *data)
682 s3c24xx_dma_start_next_txd(s3cchan); 686 s3c24xx_dma_start_next_txd(s3cchan);
683 else 687 else
684 s3c24xx_dma_phy_free(s3cchan); 688 s3c24xx_dma_phy_free(s3cchan);
689 } else {
690 vchan_cyclic_callback(&txd->vd);
691
692 /* Cyclic: reset at beginning */
693 txd->at = txd->dsg_list.next;
694 s3c24xx_dma_start_next_sg(s3cchan, txd);
685 } 695 }
686 } 696 }
687 spin_unlock(&s3cchan->vc.lock); 697 spin_unlock(&s3cchan->vc.lock);
@@ -877,6 +887,104 @@ static struct dma_async_tx_descriptor *s3c24xx_dma_prep_memcpy(
877 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags); 887 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
878} 888}
879 889
890static struct dma_async_tx_descriptor *s3c24xx_dma_prep_dma_cyclic(
891 struct dma_chan *chan, dma_addr_t addr, size_t size, size_t period,
892 enum dma_transfer_direction direction, unsigned long flags,
893 void *context)
894{
895 struct s3c24xx_dma_chan *s3cchan = to_s3c24xx_dma_chan(chan);
896 struct s3c24xx_dma_engine *s3cdma = s3cchan->host;
897 const struct s3c24xx_dma_platdata *pdata = s3cdma->pdata;
898 struct s3c24xx_dma_channel *cdata = &pdata->channels[s3cchan->id];
899 struct s3c24xx_txd *txd;
900 struct s3c24xx_sg *dsg;
901 unsigned sg_len;
902 dma_addr_t slave_addr;
903 u32 hwcfg = 0;
904 int i;
905
906 dev_dbg(&s3cdma->pdev->dev,
907 "prepare cyclic transaction of %zu bytes with period %zu from %s\n",
908 size, period, s3cchan->name);
909
910 if (!is_slave_direction(direction)) {
911 dev_err(&s3cdma->pdev->dev,
912 "direction %d unsupported\n", direction);
913 return NULL;
914 }
915
916 txd = s3c24xx_dma_get_txd();
917 if (!txd)
918 return NULL;
919
920 txd->cyclic = 1;
921
922 if (cdata->handshake)
923 txd->dcon |= S3C24XX_DCON_HANDSHAKE;
924
925 switch (cdata->bus) {
926 case S3C24XX_DMA_APB:
927 txd->dcon |= S3C24XX_DCON_SYNC_PCLK;
928 hwcfg |= S3C24XX_DISRCC_LOC_APB;
929 break;
930 case S3C24XX_DMA_AHB:
931 txd->dcon |= S3C24XX_DCON_SYNC_HCLK;
932 hwcfg |= S3C24XX_DISRCC_LOC_AHB;
933 break;
934 }
935
936 /*
937 * Always assume our peripheral desintation is a fixed
938 * address in memory.
939 */
940 hwcfg |= S3C24XX_DISRCC_INC_FIXED;
941
942 /*
943 * Individual dma operations are requested by the slave,
944 * so serve only single atomic operations (S3C24XX_DCON_SERV_SINGLE).
945 */
946 txd->dcon |= S3C24XX_DCON_SERV_SINGLE;
947
948 if (direction == DMA_MEM_TO_DEV) {
949 txd->disrcc = S3C24XX_DISRCC_LOC_AHB |
950 S3C24XX_DISRCC_INC_INCREMENT;
951 txd->didstc = hwcfg;
952 slave_addr = s3cchan->cfg.dst_addr;
953 txd->width = s3cchan->cfg.dst_addr_width;
954 } else {
955 txd->disrcc = hwcfg;
956 txd->didstc = S3C24XX_DIDSTC_LOC_AHB |
957 S3C24XX_DIDSTC_INC_INCREMENT;
958 slave_addr = s3cchan->cfg.src_addr;
959 txd->width = s3cchan->cfg.src_addr_width;
960 }
961
962 sg_len = size / period;
963
964 for (i = 0; i < sg_len; i++) {
965 dsg = kzalloc(sizeof(*dsg), GFP_NOWAIT);
966 if (!dsg) {
967 s3c24xx_dma_free_txd(txd);
968 return NULL;
969 }
970 list_add_tail(&dsg->node, &txd->dsg_list);
971
972 dsg->len = period;
973 /* Check last period length */
974 if (i == sg_len - 1)
975 dsg->len = size - period * i;
976 if (direction == DMA_MEM_TO_DEV) {
977 dsg->src_addr = addr + period * i;
978 dsg->dst_addr = slave_addr;
979 } else { /* DMA_DEV_TO_MEM */
980 dsg->src_addr = slave_addr;
981 dsg->dst_addr = addr + period * i;
982 }
983 }
984
985 return vchan_tx_prep(&s3cchan->vc, &txd->vd, flags);
986}
987
880static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg( 988static struct dma_async_tx_descriptor *s3c24xx_dma_prep_slave_sg(
881 struct dma_chan *chan, struct scatterlist *sgl, 989 struct dma_chan *chan, struct scatterlist *sgl,
882 unsigned int sg_len, enum dma_transfer_direction direction, 990 unsigned int sg_len, enum dma_transfer_direction direction,
@@ -1197,6 +1305,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1197 1305
1198 /* Initialize slave engine for SoC internal dedicated peripherals */ 1306 /* Initialize slave engine for SoC internal dedicated peripherals */
1199 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask); 1307 dma_cap_set(DMA_SLAVE, s3cdma->slave.cap_mask);
1308 dma_cap_set(DMA_CYCLIC, s3cdma->slave.cap_mask);
1200 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask); 1309 dma_cap_set(DMA_PRIVATE, s3cdma->slave.cap_mask);
1201 s3cdma->slave.dev = &pdev->dev; 1310 s3cdma->slave.dev = &pdev->dev;
1202 s3cdma->slave.device_alloc_chan_resources = 1311 s3cdma->slave.device_alloc_chan_resources =
@@ -1206,6 +1315,7 @@ static int s3c24xx_dma_probe(struct platform_device *pdev)
1206 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status; 1315 s3cdma->slave.device_tx_status = s3c24xx_dma_tx_status;
1207 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending; 1316 s3cdma->slave.device_issue_pending = s3c24xx_dma_issue_pending;
1208 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg; 1317 s3cdma->slave.device_prep_slave_sg = s3c24xx_dma_prep_slave_sg;
1318 s3cdma->slave.device_prep_dma_cyclic = s3c24xx_dma_prep_dma_cyclic;
1209 s3cdma->slave.device_control = s3c24xx_dma_control; 1319 s3cdma->slave.device_control = s3c24xx_dma_control;
1210 1320
1211 /* Register as many memcpy channels as there are physical channels */ 1321 /* Register as many memcpy channels as there are physical channels */