aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/dma
diff options
context:
space:
mode:
authorMario Six <mario.six@gdsys.cc>2016-03-18 09:57:20 -0400
committerVinod Koul <vinod.koul@intel.com>2016-04-04 12:50:10 -0400
commit899ed9dd4f2d007dfad66cd074b8ff26a0894ae8 (patch)
tree1e221969517af86e79f973284408baa62c8f2fe4 /drivers/dma
parent237ec70903bcf50768138b6c663c67ef1f946cc8 (diff)
dmaengine: mpc512x: Implement additional chunk sizes for DMA transfers
This patch extends the capabilities of the driver to handle DMA transfers to and from devices of 1, 2, 4, 16 (for MPC512x), and 32 byte widths. Signed-off-by: Mario Six <mario.six@gdsys.cc> Signed-off-by: Vinod Koul <vinod.koul@intel.com>
Diffstat (limited to 'drivers/dma')
-rw-r--r--drivers/dma/mpc512x_dma.c112
1 files changed, 76 insertions, 36 deletions
diff --git a/drivers/dma/mpc512x_dma.c b/drivers/dma/mpc512x_dma.c
index 3a9104a1041c..1a161a8d68f3 100644
--- a/drivers/dma/mpc512x_dma.c
+++ b/drivers/dma/mpc512x_dma.c
@@ -3,6 +3,7 @@
3 * Copyright (C) Semihalf 2009 3 * Copyright (C) Semihalf 2009
4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010 4 * Copyright (C) Ilya Yanok, Emcraft Systems 2010
5 * Copyright (C) Alexander Popov, Promcontroller 2014 5 * Copyright (C) Alexander Popov, Promcontroller 2014
6 * Copyright (C) Mario Six, Guntermann & Drunck GmbH, 2016
6 * 7 *
7 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description 8 * Written by Piotr Ziecik <kosmo@semihalf.com>. Hardware description
8 * (defines, structures and comments) was taken from MPC5121 DMA driver 9 * (defines, structures and comments) was taken from MPC5121 DMA driver
@@ -26,18 +27,19 @@
26 */ 27 */
27 28
28/* 29/*
29 * MPC512x and MPC8308 DMA driver. It supports 30 * MPC512x and MPC8308 DMA driver. It supports memory to memory data transfers
30 * memory to memory data transfers (tested using dmatest module) and 31 * (tested using dmatest module) and data transfers between memory and
31 * data transfers between memory and peripheral I/O memory 32 * peripheral I/O memory by means of slave scatter/gather with these
32 * by means of slave scatter/gather with these limitations: 33 * limitations:
33 * - chunked transfers (described by s/g lists with more than one item) 34 * - chunked transfers (described by s/g lists with more than one item) are
34 * are refused as long as proper support for scatter/gather is missing; 35 * refused as long as proper support for scatter/gather is missing
35 * - transfers on MPC8308 always start from software as this SoC appears 36 * - transfers on MPC8308 always start from software as this SoC does not have
36 * not to have external request lines for peripheral flow control; 37 * external request lines for peripheral flow control
37 * - only peripheral devices with 4-byte FIFO access register are supported; 38 * - memory <-> I/O memory transfer chunks of sizes of 1, 2, 4, 16 (for
38 * - minimal memory <-> I/O memory transfer chunk is 4 bytes and consequently 39 * MPC512x), and 32 bytes are supported, and, consequently, source
39 * source and destination addresses must be 4-byte aligned 40 * addresses and destination addresses must be aligned accordingly;
40 * and transfer size must be aligned on (4 * maxburst) boundary; 41 * furthermore, for MPC512x SoCs, the transfer size must be aligned on
42 * (chunk size * maxburst)
41 */ 43 */
42 44
43#include <linux/module.h> 45#include <linux/module.h>
@@ -213,8 +215,10 @@ struct mpc_dma_chan {
213 /* Settings for access to peripheral FIFO */ 215 /* Settings for access to peripheral FIFO */
214 dma_addr_t src_per_paddr; 216 dma_addr_t src_per_paddr;
215 u32 src_tcd_nunits; 217 u32 src_tcd_nunits;
218 u8 swidth;
216 dma_addr_t dst_per_paddr; 219 dma_addr_t dst_per_paddr;
217 u32 dst_tcd_nunits; 220 u32 dst_tcd_nunits;
221 u8 dwidth;
218 222
219 /* Lock for this structure */ 223 /* Lock for this structure */
220 spinlock_t lock; 224 spinlock_t lock;
@@ -684,6 +688,15 @@ mpc_dma_prep_memcpy(struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
684 return &mdesc->desc; 688 return &mdesc->desc;
685} 689}
686 690
691inline u8 buswidth_to_dmatsize(u8 buswidth)
692{
693 u8 res;
694
695 for (res = 0; buswidth > 1; buswidth /= 2)
696 res++;
697 return res;
698}
699
687static struct dma_async_tx_descriptor * 700static struct dma_async_tx_descriptor *
688mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, 701mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
689 unsigned int sg_len, enum dma_transfer_direction direction, 702 unsigned int sg_len, enum dma_transfer_direction direction,
@@ -742,27 +755,32 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
742 755
743 memset(tcd, 0, sizeof(struct mpc_dma_tcd)); 756 memset(tcd, 0, sizeof(struct mpc_dma_tcd));
744 757
745 if (!IS_ALIGNED(sg_dma_address(sg), 4))
746 goto err_prep;
747
748 if (direction == DMA_DEV_TO_MEM) { 758 if (direction == DMA_DEV_TO_MEM) {
749 tcd->saddr = per_paddr; 759 tcd->saddr = per_paddr;
750 tcd->daddr = sg_dma_address(sg); 760 tcd->daddr = sg_dma_address(sg);
761
762 if (!IS_ALIGNED(sg_dma_address(sg), mchan->dwidth))
763 goto err_prep;
764
751 tcd->soff = 0; 765 tcd->soff = 0;
752 tcd->doff = 4; 766 tcd->doff = mchan->dwidth;
753 } else { 767 } else {
754 tcd->saddr = sg_dma_address(sg); 768 tcd->saddr = sg_dma_address(sg);
755 tcd->daddr = per_paddr; 769 tcd->daddr = per_paddr;
756 tcd->soff = 4; 770
771 if (!IS_ALIGNED(sg_dma_address(sg), mchan->swidth))
772 goto err_prep;
773
774 tcd->soff = mchan->swidth;
757 tcd->doff = 0; 775 tcd->doff = 0;
758 } 776 }
759 777
760 tcd->ssize = MPC_DMA_TSIZE_4; 778 tcd->ssize = buswidth_to_dmatsize(mchan->swidth);
761 tcd->dsize = MPC_DMA_TSIZE_4; 779 tcd->dsize = buswidth_to_dmatsize(mchan->dwidth);
762 780
763 if (mdma->is_mpc8308) { 781 if (mdma->is_mpc8308) {
764 tcd->nbytes = sg_dma_len(sg); 782 tcd->nbytes = sg_dma_len(sg);
765 if (!IS_ALIGNED(tcd->nbytes, 4)) 783 if (!IS_ALIGNED(tcd->nbytes, mchan->swidth))
766 goto err_prep; 784 goto err_prep;
767 785
768 /* No major loops for MPC8303 */ 786 /* No major loops for MPC8303 */
@@ -770,7 +788,7 @@ mpc_dma_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
770 tcd->citer = 1; 788 tcd->citer = 1;
771 } else { 789 } else {
772 len = sg_dma_len(sg); 790 len = sg_dma_len(sg);
773 tcd->nbytes = tcd_nunits * 4; 791 tcd->nbytes = tcd_nunits * tcd->ssize;
774 if (!IS_ALIGNED(len, tcd->nbytes)) 792 if (!IS_ALIGNED(len, tcd->nbytes))
775 goto err_prep; 793 goto err_prep;
776 794
@@ -806,40 +824,62 @@ err_prep:
806 return NULL; 824 return NULL;
807} 825}
808 826
827inline bool is_buswidth_valid(u8 buswidth, bool is_mpc8308)
828{
829 switch (buswidth) {
830 case 16:
831 if (is_mpc8308)
832 return false;
833 case 1:
834 case 2:
835 case 4:
836 case 32:
837 break;
838 default:
839 return false;
840 }
841
842 return true;
843}
844
809static int mpc_dma_device_config(struct dma_chan *chan, 845static int mpc_dma_device_config(struct dma_chan *chan,
810 struct dma_slave_config *cfg) 846 struct dma_slave_config *cfg)
811{ 847{
812 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan); 848 struct mpc_dma_chan *mchan = dma_chan_to_mpc_dma_chan(chan);
849 struct mpc_dma *mdma = dma_chan_to_mpc_dma(&mchan->chan);
813 unsigned long flags; 850 unsigned long flags;
814 851
815 /* 852 /*
816 * Software constraints: 853 * Software constraints:
817 * - only transfers between a peripheral device and 854 * - only transfers between a peripheral device and memory are
818 * memory are supported; 855 * supported
819 * - only peripheral devices with 4-byte FIFO access register 856 * - transfer chunk sizes of 1, 2, 4, 16 (for MPC512x), and 32 bytes
820 * are supported; 857 * are supported, and, consequently, source addresses and
821 * - minimal transfer chunk is 4 bytes and consequently 858 * destination addresses; must be aligned accordingly; furthermore,
822 * source and destination addresses must be 4-byte aligned 859 * for MPC512x SoCs, the transfer size must be aligned on (chunk
823 * and transfer size must be aligned on (4 * maxburst) 860 * size * maxburst)
824 * boundary; 861 * - during the transfer, the RAM address is incremented by the size
825 * - during the transfer RAM address is being incremented by 862 * of transfer chunk
826 * the size of minimal transfer chunk; 863 * - the peripheral port's address is constant during the transfer.
827 * - peripheral port's address is constant during the transfer.
828 */ 864 */
829 865
830 if (cfg->src_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 866 if (!IS_ALIGNED(cfg->src_addr, cfg->src_addr_width) ||
831 cfg->dst_addr_width != DMA_SLAVE_BUSWIDTH_4_BYTES || 867 !IS_ALIGNED(cfg->dst_addr, cfg->dst_addr_width)) {
832 !IS_ALIGNED(cfg->src_addr, 4) ||
833 !IS_ALIGNED(cfg->dst_addr, 4)) {
834 return -EINVAL; 868 return -EINVAL;
835 } 869 }
836 870
871 if (!is_buswidth_valid(cfg->src_addr_width, mdma->is_mpc8308) ||
872 !is_buswidth_valid(cfg->dst_addr_width, mdma->is_mpc8308))
873 return -EINVAL;
874
837 spin_lock_irqsave(&mchan->lock, flags); 875 spin_lock_irqsave(&mchan->lock, flags);
838 876
839 mchan->src_per_paddr = cfg->src_addr; 877 mchan->src_per_paddr = cfg->src_addr;
840 mchan->src_tcd_nunits = cfg->src_maxburst; 878 mchan->src_tcd_nunits = cfg->src_maxburst;
879 mchan->swidth = cfg->src_addr_width;
841 mchan->dst_per_paddr = cfg->dst_addr; 880 mchan->dst_per_paddr = cfg->dst_addr;
842 mchan->dst_tcd_nunits = cfg->dst_maxburst; 881 mchan->dst_tcd_nunits = cfg->dst_maxburst;
882 mchan->dwidth = cfg->dst_addr_width;
843 883
844 /* Apply defaults */ 884 /* Apply defaults */
845 if (mchan->src_tcd_nunits == 0) 885 if (mchan->src_tcd_nunits == 0)