aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorAlexander Popov <a13xp0p0v88@gmail.com>2014-05-15 10:15:32 -0400
committerVinod Koul <vinod.koul@intel.com>2014-05-22 01:07:01 -0400
commit63da8e0d4f274fdf73b9924e8fd8f64a3d11d24a (patch)
tree29080d480bb3ae16f12570c26f696553dec3d56d
parentba730340f96c01160b5f26f81e8fb38f8cb1821c (diff)
dmaengine: mpc512x: add support for peripheral transfers
Introduce support for slave s/g transfer preparation and the associated device control callback in the MPC512x DMA controller driver, which adds support for data transfers between memory and peripheral I/O to the previously supported mem-to-mem transfers. Signed-off-by: Alexander Popov <a13xp0p0v88@gmail.com> [fixed subsytem name] Signed-off-by: Vinod Koul <vinod.koul@intel.com>
-rw-r--r--drivers/dma/mpc512x_dma.c244
1 files changed, 239 insertions, 5 deletions
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 96104f4eebe8..2ad43738ac8b 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -2,6 +2,7 @@
2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008. 2 * Copyright (C) Freescale Semicondutor, Inc. 2007, 2008.
3 * Copyright (C) Semihalf 2009 3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014
5 * 6 *
6 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 7 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
7 * (defines, structures and comments) was taken from MPC5121 DMA driver 8 * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -29,8 +30,18 @@
29 */ 30 */
30 31
31/* 32/*
32 * This is initial version of MPC5121 DMA driver. Only memory to memory 33 * MPC512x and MPC8308 DMA driver. It supports
33 * transfers are supported (tested using dmatest module). 34 * memory to memory data transfers (tested using dmatest module) and
35 * data transfers between memory and peripheral I/O memory
36 * by means of slave scatter/gather with these limitations:
37 * - chunked transfers (described by s/g lists with more than one item)
38 * are refused as long as proper support for scatter/gather is missing;
39 * - transfers on MPC8308 always start from software as this SoC appears
40 * not to have external request lines for peripheral flow control;
41 * - only peripheral devices with 4-byte FIFO access register are supported;
42 * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently
43 * source and destination addresses must be 4-byte aligned
44 * and transfer size must be aligned on (4 * maxburst) boundary;
34 */ 45 */
35 46
36#include <linux/module.h> 47#include <linux/module.h>
@@ -189,6 +200,7 @@ struct mpc_dma_desc {
189 dma_addr_t tcd_paddr; 200 dma_addr_t tcd_paddr;
190 int error; 201 int error;
191 struct list_head node; 202 struct list_head node;
203 int will_access_peripheral;
192}; 204};
193 205
194struct mpc_dma_chan { 206struct mpc_dma_chan {
@@ -201,6 +213,12 @@ struct mpc_dma_chan {
201 struct mpc_dma_tcd *tcd; 213 struct mpc_dma_tcd *tcd;
202 dma_addr_t tcd_paddr; 214 dma_addr_t tcd_paddr;
203 215
216 /* Settings for access to peripheral FIFO */
217 dma_addr_t src_per_paddr;
218 u32 src_tcd_nunits;
219 dma_addr_t dst_per_paddr;
220 u32 dst_tcd_nunits;
221
204 /* Lock for this structure */ 222 /* Lock for this structure */
205 spinlock_t lock; 223 spinlock_t lock;
206}; 224};
@@ -251,8 +269,23 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
251 struct mpc_dma_desc *mdesc; 269 struct mpc_dma_desc *mdesc;
252 int cid = mchan->chan.chan_id; 270 int cid = mchan->chan.chan_id;
253 271
254 /* Move all queued descriptors to active list */ 272 while (!list_empty(&mchan->queued)) {
255 list_splice_tail_init(&mchan->queued, &mchan->active); 273 mdesc = list_first_entry(&mchan->queued,
274 struct mpc_dma_desc, node);
275 /*
276 * Grab either several mem-to-mem transfer descriptors
277 * or one peripheral transfer descriptor,
278 * don't mix mem-to-mem and peripheral transfer descriptors
279 * within the same 'active' list.
280 */
281 if (mdesc->will_access_peripheral) {
282 if (list_empty(&mchan->active))
283 list_move_tail(&mdesc->node, &mchan->active);
284 break;
285 } else {
286 list_move_tail(&mdesc->node, &mchan->active);
287 }
288 }
256 289
257 /* Chain descriptors into one transaction */ 290 /* Chain descriptors into one transaction */
258 list_for_each_entry(mdesc, &mchan->active, node) { 291 list_for_each_entry(mdesc, &mchan->active, node) {
@@ -278,7 +311,17 @@ static void mpc_dma_execute(struct mpc_dma_chan *mchan)
278 311
279 if (first != prev) 312 if (first != prev)
280 mdma->tcd[cid].e_sg = 1; 313 mdma->tcd[cid].e_sg = 1;
281 out_8(&mdma->regs->dmassrt, cid); 314
315 if (mdma->is_mpc8308) {
316 /* MPC8308, no request lines, software initiated start */
317 out_8(&mdma->regs->dmassrt, cid);
318 } else if (first->will_access_peripheral) {
319 /* Peripherals involved, start by external request signal */
320 out_8(&mdma->regs->dmaserq, cid);
321 } else {
322 /* Memory to memory transfer, software initiated start */
323 out_8(&mdma->regs->dmassrt, cid);
324 }
282} 325}
283 326
284/* Handle interrupt on one half of DMA controller (32 channels) */ 327/* Handle interrupt on one half of DMA controller (32 channels) */
@@ -596,6 +639,7 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
596 } 639 }
597 640
598 mdesc->error = 0; 641 mdesc->error = 0;
642 mdesc->will_access_peripheral = 0;
599 tcd = mdesc->tcd; 643 tcd = mdesc->tcd;
600 644
601 /* Prepare Transfer Control Descriptor for this transaction */ 645 /* Prepare Transfer Control Descriptor for this transaction */
@@ -643,6 +687,193 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
643 return &mdesc->desc; 687 return &mdesc->desc;
644} 688}
645 689
690static struct dma_async_tx_descriptor *
691mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
692 unsigned int sg_len, enum dma_transfer_direction direction,
693 unsigned long flags, void *context)
694{
695 struct mpc_dma *mdma = dma_chan_to_mpc_dma(chan);
696 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
697 struct mpc_dma_desc *mdesc = NULL;
698 dma_addr_t per_paddr;
699 u32 tcd_nunits;
700 struct mpc_dma_tcd *tcd;
701 unsigned long iflags;
702 struct scatterlist *sg;
703 size_t len;
704 int iter, i;
705
706 /* Currently there is no proper support for scatter/gather */
707 if (sg_len != 1)
708 return NULL;
709
710 if (!is_slave_direction(direction))
711 return NULL;
712
713 for_each_sg(sgl, sg, sg_len, i) {
714 spin_lock_irqsave(&mchan->lock, iflags);
715
716 mdesc = list_first_entry(&mchan->free,
717 struct mpc_dma_desc, node);
718 if (!mdesc) {
719 spin_unlock_irqrestore(&mchan->lock, iflags);
720 /* Try to free completed descriptors */
721 mpc_dma_process_completed(mdma);
722 return NULL;
723 }
724
725 list_del(&mdesc->node);
726
727 if (direction == DMA_DEV_TO_MEM) {
728 per_paddr = mchan->src_per_paddr;
729 tcd_nunits = mchan->src_tcd_nunits;
730 } else {
731 per_paddr = mchan->dst_per_paddr;
732 tcd_nunits = mchan->dst_tcd_nunits;
733 }
734
735 spin_unlock_irqrestore(&mchan->lock, iflags);
736
737 if (per_paddr == 0 || tcd_nunits == 0)
738 goto err_prep;
739
740 mdesc->error = 0;
741 mdesc->will_access_peripheral = 1;
742
743 /* Prepare Transfer Control Descriptor for this transaction */
744 tcd = mdesc->tcd;
745
746 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
747
748 if (!IS_ALIGNED(sg_dma_address(sg), 4))
749 goto err_prep;
750
751 if (direction == DMA_DEV_TO_MEM) {
752 tcd->saddr = per_paddr;
753 tcd->daddr = sg_dma_address(sg);
754 tcd->soff = 0;
755 tcd->doff = 4;
756 } else {
757 tcd->saddr = sg_dma_address(sg);
758 tcd->daddr = per_paddr;
759 tcd->soff = 4;
760 tcd->doff = 0;
761 }
762
763 tcd->ssize = MPC_DMA_TSIZE_4;
764 tcd->dsize = MPC_DMA_TSIZE_4;
765
766 len = sg_dma_len(sg);
767 tcd->nbytes = tcd_nunits * 4;
768 if (!IS_ALIGNED(len, tcd->nbytes))
769 goto err_prep;
770
771 iter = len / tcd->nbytes;
772 if (iter >= 1 << 15) {
773 /* len is too big */
774 goto err_prep;
775 }
776 /* citer_linkch contains the high bits of iter */
777 tcd->biter = iter & 0x1ff;
778 tcd->biter_linkch = iter >> 9;
779 tcd->citer = tcd->biter;
780 tcd->citer_linkch = tcd->biter_linkch;
781
782 tcd->e_sg = 0;
783 tcd->d_req = 1;
784
785 /* Place descriptor in prepared list */
786 spin_lock_irqsave(&mchan->lock, iflags);
787 list_add_tail(&mdesc->node, &mchan->prepared);
788 spin_unlock_irqrestore(&mchan->lock, iflags);
789 }
790
791 return &mdesc->desc;
792
793err_prep:
794 /* Put the descriptor back */
795 spin_lock_irqsave(&mchan->lock, iflags);
796 list_add_tail(&mdesc->node, &mchan->free);
797 spin_unlock_irqrestore(&mchan->lock, iflags);
798
799 return NULL;
800}
801
802static int mpc_dma_device_control(struct dma_chan *chan, enum dma_ctrl_cmd cmd,
803 unsigned long arg)
804{
805 struct mpc_dma_chan *mchan;
806 struct mpc_dma *mdma;
807 struct dma_slave_config *cfg;
808 unsigned long flags;
809
810 mchan = dma_chan_to_mpc_dma_chan(chan);
811 switch (cmd) {
812 case DMA_TERMINATE_ALL:
813 /* Disable channel requests */
814 mdma = dma_chan_to_mpc_dma(chan);
815
816 spin_lock_irqsave(&mchan->lock, flags);
817
818 out_8(&mdma->regs->dmacerq, chan->chan_id);
819 list_splice_tail_init(&mchan->prepared, &mchan->free);
820 list_splice_tail_init(&mchan->queued, &mchan->free);
821 list_splice_tail_init(&mchan->active, &mchan->free);
822
823 spin_unlock_irqrestore(&mchan->lock, flags);
824
825 return 0;
826
827 case DMA_SLAVE_CONFIG:
828 /*
829 * Software constraints:
830 * - only transfers between a peripheral device and
831 * memory are supported;
832 * - only peripheral devices with 4-byte FIFO access register
833 * are supported;
834 * - minimal transfer chunk is 4 bytes and consequently
835 * source and destination addresses must be 4-byte aligned
836 * and transfer size must be aligned on (4 * maxburst)
837 * boundary;
838 * - during the transfer RAM address is being incremented by
839 * the size of minimal transfer chunk;
840 * - peripheral port's address is constant during the transfer.
841 */
842
843 cfg = (void *)arg;
844
845 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
846 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES ||
847 !IS_ALIGNED(cfg->src_addr, 4) ||
848 !IS_ALIGNED(cfg->dst_addr, 4)) {
849 return -EINVAL;
850 }
851
852 spin_lock_irqsave(&mchan->lock, flags);
853
854 mchan->src_per_paddr = cfg->src_addr;
855 mchan->src_tcd_nunits = cfg->src_maxburst;
856 mchan->dst_per_paddr = cfg->dst_addr;
857 mchan->dst_tcd_nunits = cfg->dst_maxburst;
858
859 /* Apply defaults */
860 if (mchan->src_tcd_nunits == 0)
861 mchan->src_tcd_nunits = 1;
862 if (mchan->dst_tcd_nunits == 0)
863 mchan->dst_tcd_nunits = 1;
864
865 spin_unlock_irqrestore(&mchan->lock, flags);
866
867 return 0;
868
869 default:
870 /* Unknown command */
871 break;
872 }
873
874 return -ENXIO;
875}
876
646static int mpc_dma_probe(struct platform_device *op) 877static int mpc_dma_probe(struct platform_device *op)
647{ 878{
648 struct device_node *dn = op->dev.of_node; 879 struct device_node *dn = op->dev.of_node;
@@ -733,9 +964,12 @@ static int mpc_dma_probe(struct platform_device *op)
733 dma->device_issue_pending = mpc_dma_issue_pending; 964 dma->device_issue_pending = mpc_dma_issue_pending;
734 dma->device_tx_status = mpc_dma_tx_status; 965 dma->device_tx_status = mpc_dma_tx_status;
735 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy; 966 dma->device_prep_dma_memcpy = mpc_dma_prep_memcpy;
967 dma->device_prep_slave_sg = mpc_dma_prep_slave_sg;
968 dma->device_control = mpc_dma_device_control;
736 969
737 INIT_LIST_HEAD(&dma->channels); 970 INIT_LIST_HEAD(&dma->channels);
738 dma_cap_set(DMA_MEMCPY, dma->cap_mask); 971 dma_cap_set(DMA_MEMCPY, dma->cap_mask);
972 dma_cap_set(DMA_SLAVE, dma->cap_mask);
739 973
740 for (i = 0; i < dma->chancnt; i++) { 974 for (i = 0; i < dma->chancnt; i++) {
741 mchan = &mdma->channels[i]; 975 mchan = &mdma->channels[i];